Flowise

Форк
0
475 строк · 21.6 Кб
1
{
2
    "description": "Stateless query engine designed to answer question over your data using LlamaIndex",
3
    "categories": "ChatAnthropic,Compact and Refine,Pinecone,LlamaIndex",
4
    "badge": "NEW",
5
    "framework": "LlamaIndex",
6
    "nodes": [
7
        {
8
            "width": 300,
9
            "height": 382,
10
            "id": "queryEngine_0",
11
            "position": {
12
                "x": 1407.9610494306783,
13
                "y": 241.12144405808692
14
            },
15
            "type": "customNode",
16
            "data": {
17
                "id": "queryEngine_0",
18
                "label": "Query Engine",
19
                "version": 2,
20
                "name": "queryEngine",
21
                "type": "QueryEngine",
22
                "baseClasses": ["QueryEngine", "BaseQueryEngine"],
23
                "tags": ["LlamaIndex"],
24
                "category": "Engine",
25
                "description": "Simple query engine built to answer question over your data, without memory",
26
                "inputParams": [
27
                    {
28
                        "label": "Return Source Documents",
29
                        "name": "returnSourceDocuments",
30
                        "type": "boolean",
31
                        "optional": true,
32
                        "id": "queryEngine_0-input-returnSourceDocuments-boolean"
33
                    }
34
                ],
35
                "inputAnchors": [
36
                    {
37
                        "label": "Vector Store Retriever",
38
                        "name": "vectorStoreRetriever",
39
                        "type": "VectorIndexRetriever",
40
                        "id": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever"
41
                    },
42
                    {
43
                        "label": "Response Synthesizer",
44
                        "name": "responseSynthesizer",
45
                        "type": "ResponseSynthesizer",
46
                        "description": "ResponseSynthesizer is responsible for sending the query, nodes, and prompt templates to the LLM to generate a response. See <a target=\"_blank\" href=\"https://ts.llamaindex.ai/modules/low_level/response_synthesizer\">more</a>",
47
                        "optional": true,
48
                        "id": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer"
49
                    }
50
                ],
51
                "inputs": {
52
                    "vectorStoreRetriever": "{{pineconeLlamaIndex_0.data.instance}}",
53
                    "responseSynthesizer": "{{compactrefineLlamaIndex_0.data.instance}}",
54
                    "returnSourceDocuments": true
55
                },
56
                "outputAnchors": [
57
                    {
58
                        "id": "queryEngine_0-output-queryEngine-QueryEngine|BaseQueryEngine",
59
                        "name": "queryEngine",
60
                        "label": "QueryEngine",
61
                        "type": "QueryEngine | BaseQueryEngine"
62
                    }
63
                ],
64
                "outputs": {},
65
                "selected": false
66
            },
67
            "selected": false,
68
            "positionAbsolute": {
69
                "x": 1407.9610494306783,
70
                "y": 241.12144405808692
71
            },
72
            "dragging": false
73
        },
74
        {
75
            "width": 300,
76
            "height": 585,
77
            "id": "pineconeLlamaIndex_0",
78
            "position": {
79
                "x": 977.3886641397302,
80
                "y": -261.2253031641797
81
            },
82
            "type": "customNode",
83
            "data": {
84
                "id": "pineconeLlamaIndex_0",
85
                "label": "Pinecone",
86
                "version": 1,
87
                "name": "pineconeLlamaIndex",
88
                "type": "Pinecone",
89
                "baseClasses": ["Pinecone", "VectorIndexRetriever"],
90
                "tags": ["LlamaIndex"],
91
                "category": "Vector Stores",
92
                "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
93
                "inputParams": [
94
                    {
95
                        "label": "Connect Credential",
96
                        "name": "credential",
97
                        "type": "credential",
98
                        "credentialNames": ["pineconeApi"],
99
                        "id": "pineconeLlamaIndex_0-input-credential-credential"
100
                    },
101
                    {
102
                        "label": "Pinecone Index",
103
                        "name": "pineconeIndex",
104
                        "type": "string",
105
                        "id": "pineconeLlamaIndex_0-input-pineconeIndex-string"
106
                    },
107
                    {
108
                        "label": "Pinecone Namespace",
109
                        "name": "pineconeNamespace",
110
                        "type": "string",
111
                        "placeholder": "my-first-namespace",
112
                        "additionalParams": true,
113
                        "optional": true,
114
                        "id": "pineconeLlamaIndex_0-input-pineconeNamespace-string"
115
                    },
116
                    {
117
                        "label": "Pinecone Metadata Filter",
118
                        "name": "pineconeMetadataFilter",
119
                        "type": "json",
120
                        "optional": true,
121
                        "additionalParams": true,
122
                        "id": "pineconeLlamaIndex_0-input-pineconeMetadataFilter-json"
123
                    },
124
                    {
125
                        "label": "Top K",
126
                        "name": "topK",
127
                        "description": "Number of top results to fetch. Default to 4",
128
                        "placeholder": "4",
129
                        "type": "number",
130
                        "additionalParams": true,
131
                        "optional": true,
132
                        "id": "pineconeLlamaIndex_0-input-topK-number"
133
                    }
134
                ],
135
                "inputAnchors": [
136
                    {
137
                        "label": "Document",
138
                        "name": "document",
139
                        "type": "Document",
140
                        "list": true,
141
                        "optional": true,
142
                        "id": "pineconeLlamaIndex_0-input-document-Document"
143
                    },
144
                    {
145
                        "label": "Chat Model",
146
                        "name": "model",
147
                        "type": "BaseChatModel_LlamaIndex",
148
                        "id": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex"
149
                    },
150
                    {
151
                        "label": "Embeddings",
152
                        "name": "embeddings",
153
                        "type": "BaseEmbedding_LlamaIndex",
154
                        "id": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex"
155
                    }
156
                ],
157
                "inputs": {
158
                    "document": "",
159
                    "model": "{{chatAnthropic_LlamaIndex_0.data.instance}}",
160
                    "embeddings": "{{openAIEmbedding_LlamaIndex_0.data.instance}}",
161
                    "pineconeIndex": "",
162
                    "pineconeNamespace": "",
163
                    "pineconeMetadataFilter": "",
164
                    "topK": ""
165
                },
166
                "outputAnchors": [
167
                    {
168
                        "name": "output",
169
                        "label": "Output",
170
                        "type": "options",
171
                        "options": [
172
                            {
173
                                "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorIndexRetriever",
174
                                "name": "retriever",
175
                                "label": "Pinecone Retriever",
176
                                "type": "Pinecone | VectorIndexRetriever"
177
                            },
178
                            {
179
                                "id": "pineconeLlamaIndex_0-output-retriever-Pinecone|VectorStoreIndex",
180
                                "name": "vectorStore",
181
                                "label": "Pinecone Vector Store Index",
182
                                "type": "Pinecone | VectorStoreIndex"
183
                            }
184
                        ],
185
                        "default": "retriever"
186
                    }
187
                ],
188
                "outputs": {
189
                    "output": "retriever"
190
                },
191
                "selected": false
192
            },
193
            "selected": false,
194
            "positionAbsolute": {
195
                "x": 977.3886641397302,
196
                "y": -261.2253031641797
197
            },
198
            "dragging": false
199
        },
200
        {
201
            "width": 300,
202
            "height": 334,
203
            "id": "openAIEmbedding_LlamaIndex_0",
204
            "position": {
205
                "x": 529.8690713844503,
206
                "y": -18.955726653613254
207
            },
208
            "type": "customNode",
209
            "data": {
210
                "id": "openAIEmbedding_LlamaIndex_0",
211
                "label": "OpenAI Embedding",
212
                "version": 2,
213
                "name": "openAIEmbedding_LlamaIndex",
214
                "type": "OpenAIEmbedding",
215
                "baseClasses": ["OpenAIEmbedding", "BaseEmbedding_LlamaIndex", "BaseEmbedding"],
216
                "tags": ["LlamaIndex"],
217
                "category": "Embeddings",
218
                "description": "OpenAI Embedding specific for LlamaIndex",
219
                "inputParams": [
220
                    {
221
                        "label": "Connect Credential",
222
                        "name": "credential",
223
                        "type": "credential",
224
                        "credentialNames": ["openAIApi"],
225
                        "id": "openAIEmbedding_LlamaIndex_0-input-credential-credential"
226
                    },
227
                    {
228
                        "label": "Model Name",
229
                        "name": "modelName",
230
                        "type": "asyncOptions",
231
                        "loadMethod": "listModels",
232
                        "default": "text-embedding-ada-002",
233
                        "id": "openAIEmbedding_LlamaIndex_0-input-modelName-options"
234
                    },
235
                    {
236
                        "label": "Timeout",
237
                        "name": "timeout",
238
                        "type": "number",
239
                        "optional": true,
240
                        "additionalParams": true,
241
                        "id": "openAIEmbedding_LlamaIndex_0-input-timeout-number"
242
                    },
243
                    {
244
                        "label": "BasePath",
245
                        "name": "basepath",
246
                        "type": "string",
247
                        "optional": true,
248
                        "additionalParams": true,
249
                        "id": "openAIEmbedding_LlamaIndex_0-input-basepath-string"
250
                    }
251
                ],
252
                "inputAnchors": [],
253
                "inputs": {
254
                    "timeout": "",
255
                    "basepath": "",
256
                    "modelName": "text-embedding-ada-002"
257
                },
258
                "outputAnchors": [
259
                    {
260
                        "id": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
261
                        "name": "openAIEmbedding_LlamaIndex",
262
                        "label": "OpenAIEmbedding",
263
                        "type": "OpenAIEmbedding | BaseEmbedding_LlamaIndex | BaseEmbedding"
264
                    }
265
                ],
266
                "outputs": {},
267
                "selected": false
268
            },
269
            "selected": false,
270
            "positionAbsolute": {
271
                "x": 529.8690713844503,
272
                "y": -18.955726653613254
273
            },
274
            "dragging": false
275
        },
276
        {
277
            "width": 300,
278
            "height": 749,
279
            "id": "compactrefineLlamaIndex_0",
280
            "position": {
281
                "x": 170.71031618977543,
282
                "y": -33.83233752386292
283
            },
284
            "type": "customNode",
285
            "data": {
286
                "id": "compactrefineLlamaIndex_0",
287
                "label": "Compact and Refine",
288
                "version": 1,
289
                "name": "compactrefineLlamaIndex",
290
                "type": "CompactRefine",
291
                "baseClasses": ["CompactRefine", "ResponseSynthesizer"],
292
                "tags": ["LlamaIndex"],
293
                "category": "Response Synthesizer",
294
                "description": "CompactRefine is a slight variation of Refine that first compacts the text chunks into the smallest possible number of chunks.",
295
                "inputParams": [
296
                    {
297
                        "label": "Refine Prompt",
298
                        "name": "refinePrompt",
299
                        "type": "string",
300
                        "rows": 4,
301
                        "default": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
302
                        "warning": "Prompt can contains no variables, or up to 3 variables. Variables must be {existingAnswer}, {context} and {query}",
303
                        "optional": true,
304
                        "id": "compactrefineLlamaIndex_0-input-refinePrompt-string"
305
                    },
306
                    {
307
                        "label": "Text QA Prompt",
308
                        "name": "textQAPrompt",
309
                        "type": "string",
310
                        "rows": 4,
311
                        "default": "Context information is below.\n---------------------\n{context}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}\nAnswer:",
312
                        "warning": "Prompt can contains no variables, or up to 2 variables. Variables must be {context} and {query}",
313
                        "optional": true,
314
                        "id": "compactrefineLlamaIndex_0-input-textQAPrompt-string"
315
                    }
316
                ],
317
                "inputAnchors": [],
318
                "inputs": {
319
                    "refinePrompt": "The original query is as follows: {query}\nWe have provided an existing answer: {existingAnswer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context}\n------------\nGiven the new context, refine the original answer to better answer the query. If the context isn't useful, return the original answer.\nRefined Answer:",
320
                    "textQAPrompt": "Context information:\n<context>\n{context}\n</context>\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query}"
321
                },
322
                "outputAnchors": [
323
                    {
324
                        "id": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
325
                        "name": "compactrefineLlamaIndex",
326
                        "label": "CompactRefine",
327
                        "type": "CompactRefine | ResponseSynthesizer"
328
                    }
329
                ],
330
                "outputs": {},
331
                "selected": false
332
            },
333
            "selected": false,
334
            "positionAbsolute": {
335
                "x": 170.71031618977543,
336
                "y": -33.83233752386292
337
            },
338
            "dragging": false
339
        },
340
        {
341
            "width": 300,
342
            "height": 529,
343
            "id": "chatAnthropic_LlamaIndex_0",
344
            "position": {
345
                "x": 521.3530883359147,
346
                "y": -584.8241219614786
347
            },
348
            "type": "customNode",
349
            "data": {
350
                "id": "chatAnthropic_LlamaIndex_0",
351
                "label": "ChatAnthropic",
352
                "version": 3.0,
353
                "name": "chatAnthropic_LlamaIndex",
354
                "type": "ChatAnthropic",
355
                "baseClasses": ["ChatAnthropic", "BaseChatModel_LlamaIndex"],
356
                "tags": ["LlamaIndex"],
357
                "category": "Chat Models",
358
                "description": "Wrapper around ChatAnthropic LLM specific for LlamaIndex",
359
                "inputParams": [
360
                    {
361
                        "label": "Connect Credential",
362
                        "name": "credential",
363
                        "type": "credential",
364
                        "credentialNames": ["anthropicApi"],
365
                        "id": "chatAnthropic_LlamaIndex_0-input-credential-credential"
366
                    },
367
                    {
368
                        "label": "Model Name",
369
                        "name": "modelName",
370
                        "type": "asyncOptions",
371
                        "loadMethod": "listModels",
372
                        "default": "claude-3-haiku",
373
                        "id": "chatAnthropic_LlamaIndex_0-input-modelName-options"
374
                    },
375
                    {
376
                        "label": "Temperature",
377
                        "name": "temperature",
378
                        "type": "number",
379
                        "step": 0.1,
380
                        "default": 0.9,
381
                        "optional": true,
382
                        "id": "chatAnthropic_LlamaIndex_0-input-temperature-number"
383
                    },
384
                    {
385
                        "label": "Max Tokens",
386
                        "name": "maxTokensToSample",
387
                        "type": "number",
388
                        "step": 1,
389
                        "optional": true,
390
                        "additionalParams": true,
391
                        "id": "chatAnthropic_LlamaIndex_0-input-maxTokensToSample-number"
392
                    },
393
                    {
394
                        "label": "Top P",
395
                        "name": "topP",
396
                        "type": "number",
397
                        "step": 0.1,
398
                        "optional": true,
399
                        "additionalParams": true,
400
                        "id": "chatAnthropic_LlamaIndex_0-input-topP-number"
401
                    }
402
                ],
403
                "inputAnchors": [],
404
                "inputs": {
405
                    "modelName": "claude-2",
406
                    "temperature": 0.9,
407
                    "maxTokensToSample": "",
408
                    "topP": ""
409
                },
410
                "outputAnchors": [
411
                    {
412
                        "id": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
413
                        "name": "chatAnthropic_LlamaIndex",
414
                        "label": "ChatAnthropic",
415
                        "type": "ChatAnthropic | BaseChatModel_LlamaIndex"
416
                    }
417
                ],
418
                "outputs": {},
419
                "selected": false
420
            },
421
            "selected": false,
422
            "positionAbsolute": {
423
                "x": 521.3530883359147,
424
                "y": -584.8241219614786
425
            },
426
            "dragging": false
427
        }
428
    ],
429
    "edges": [
430
        {
431
            "source": "pineconeLlamaIndex_0",
432
            "sourceHandle": "pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever",
433
            "target": "queryEngine_0",
434
            "targetHandle": "queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
435
            "type": "buttonedge",
436
            "id": "pineconeLlamaIndex_0-pineconeLlamaIndex_0-output-pineconeLlamaIndex-Pinecone|VectorIndexRetriever-queryEngine_0-queryEngine_0-input-vectorStoreRetriever-VectorIndexRetriever",
437
            "data": {
438
                "label": ""
439
            }
440
        },
441
        {
442
            "source": "openAIEmbedding_LlamaIndex_0",
443
            "sourceHandle": "openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding",
444
            "target": "pineconeLlamaIndex_0",
445
            "targetHandle": "pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
446
            "type": "buttonedge",
447
            "id": "openAIEmbedding_LlamaIndex_0-openAIEmbedding_LlamaIndex_0-output-openAIEmbedding_LlamaIndex-OpenAIEmbedding|BaseEmbedding_LlamaIndex|BaseEmbedding-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-embeddings-BaseEmbedding_LlamaIndex",
448
            "data": {
449
                "label": ""
450
            }
451
        },
452
        {
453
            "source": "compactrefineLlamaIndex_0",
454
            "sourceHandle": "compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer",
455
            "target": "queryEngine_0",
456
            "targetHandle": "queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
457
            "type": "buttonedge",
458
            "id": "compactrefineLlamaIndex_0-compactrefineLlamaIndex_0-output-compactrefineLlamaIndex-CompactRefine|ResponseSynthesizer-queryEngine_0-queryEngine_0-input-responseSynthesizer-ResponseSynthesizer",
459
            "data": {
460
                "label": ""
461
            }
462
        },
463
        {
464
            "source": "chatAnthropic_LlamaIndex_0",
465
            "sourceHandle": "chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex",
466
            "target": "pineconeLlamaIndex_0",
467
            "targetHandle": "pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
468
            "type": "buttonedge",
469
            "id": "chatAnthropic_LlamaIndex_0-chatAnthropic_LlamaIndex_0-output-chatAnthropic_LlamaIndex-ChatAnthropic|BaseChatModel_LlamaIndex-pineconeLlamaIndex_0-pineconeLlamaIndex_0-input-model-BaseChatModel_LlamaIndex",
470
            "data": {
471
                "label": ""
472
            }
473
        }
474
    ]
475
}
476

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.