Flowise

Форк
0
/
Conversational Retrieval QA Chain.json 
755 строк · 33.5 Кб
1
{
2
    "description": "Text file QnA using conversational retrieval QA chain",
3
    "categories": "TextFile,ChatOpenAI,Conversational Retrieval QA Chain,Pinecone,Langchain",
4
    "badge": "POPULAR",
5
    "framework": "Langchain",
6
    "nodes": [
7
        {
8
            "width": 300,
9
            "height": 329,
10
            "id": "openAIEmbeddings_0",
11
            "position": {
12
                "x": 795.6162477805387,
13
                "y": 603.260214150876
14
            },
15
            "type": "customNode",
16
            "data": {
17
                "id": "openAIEmbeddings_0",
18
                "label": "OpenAI Embeddings",
19
                "version": 3,
20
                "name": "openAIEmbeddings",
21
                "type": "OpenAIEmbeddings",
22
                "baseClasses": ["OpenAIEmbeddings", "Embeddings"],
23
                "category": "Embeddings",
24
                "description": "OpenAI API to generate embeddings for a given text",
25
                "inputParams": [
26
                    {
27
                        "label": "Connect Credential",
28
                        "name": "credential",
29
                        "type": "credential",
30
                        "credentialNames": ["openAIApi"],
31
                        "id": "openAIEmbeddings_0-input-credential-credential"
32
                    },
33
                    {
34
                        "label": "Model Name",
35
                        "name": "modelName",
36
                        "type": "asyncOptions",
37
                        "loadMethod": "listModels",
38
                        "default": "text-embedding-ada-002",
39
                        "id": "openAIEmbeddings_0-input-modelName-options"
40
                    },
41
                    {
42
                        "label": "Strip New Lines",
43
                        "name": "stripNewLines",
44
                        "type": "boolean",
45
                        "optional": true,
46
                        "additionalParams": true,
47
                        "id": "openAIEmbeddings_0-input-stripNewLines-boolean"
48
                    },
49
                    {
50
                        "label": "Batch Size",
51
                        "name": "batchSize",
52
                        "type": "number",
53
                        "optional": true,
54
                        "additionalParams": true,
55
                        "id": "openAIEmbeddings_0-input-batchSize-number"
56
                    },
57
                    {
58
                        "label": "Timeout",
59
                        "name": "timeout",
60
                        "type": "number",
61
                        "optional": true,
62
                        "additionalParams": true,
63
                        "id": "openAIEmbeddings_0-input-timeout-number"
64
                    },
65
                    {
66
                        "label": "BasePath",
67
                        "name": "basepath",
68
                        "type": "string",
69
                        "optional": true,
70
                        "additionalParams": true,
71
                        "id": "openAIEmbeddings_0-input-basepath-string"
72
                    }
73
                ],
74
                "inputAnchors": [],
75
                "inputs": {
76
                    "stripNewLines": "",
77
                    "batchSize": "",
78
                    "timeout": "",
79
                    "basepath": "",
80
                    "modelName": "text-embedding-ada-002"
81
                },
82
                "outputAnchors": [
83
                    {
84
                        "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
85
                        "name": "openAIEmbeddings",
86
                        "label": "OpenAIEmbeddings",
87
                        "type": "OpenAIEmbeddings | Embeddings"
88
                    }
89
                ],
90
                "outputs": {},
91
                "selected": false
92
            },
93
            "selected": false,
94
            "positionAbsolute": {
95
                "x": 795.6162477805387,
96
                "y": 603.260214150876
97
            },
98
            "dragging": false
99
        },
100
        {
101
            "width": 300,
102
            "height": 429,
103
            "id": "recursiveCharacterTextSplitter_0",
104
            "position": {
105
                "x": 406.08456707531263,
106
                "y": 197.66460328693972
107
            },
108
            "type": "customNode",
109
            "data": {
110
                "id": "recursiveCharacterTextSplitter_0",
111
                "label": "Recursive Character Text Splitter",
112
                "version": 2,
113
                "name": "recursiveCharacterTextSplitter",
114
                "type": "RecursiveCharacterTextSplitter",
115
                "baseClasses": ["RecursiveCharacterTextSplitter", "TextSplitter"],
116
                "category": "Text Splitters",
117
                "description": "Split documents recursively by different characters - starting with \"\\n\\n\", then \"\\n\", then \" \"",
118
                "inputParams": [
119
                    {
120
                        "label": "Chunk Size",
121
                        "name": "chunkSize",
122
                        "type": "number",
123
                        "default": 1000,
124
                        "optional": true,
125
                        "id": "recursiveCharacterTextSplitter_0-input-chunkSize-number"
126
                    },
127
                    {
128
                        "label": "Chunk Overlap",
129
                        "name": "chunkOverlap",
130
                        "type": "number",
131
                        "optional": true,
132
                        "id": "recursiveCharacterTextSplitter_0-input-chunkOverlap-number"
133
                    },
134
                    {
135
                        "label": "Custom Separators",
136
                        "name": "separators",
137
                        "type": "string",
138
                        "rows": 4,
139
                        "description": "Array of custom separators to determine when to split the text, will override the default separators",
140
                        "placeholder": "[\"|\", \"##\", \">\", \"-\"]",
141
                        "additionalParams": true,
142
                        "optional": true,
143
                        "id": "recursiveCharacterTextSplitter_0-input-separators-string"
144
                    }
145
                ],
146
                "inputAnchors": [],
147
                "inputs": {
148
                    "chunkSize": 1000,
149
                    "chunkOverlap": ""
150
                },
151
                "outputAnchors": [
152
                    {
153
                        "id": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter",
154
                        "name": "recursiveCharacterTextSplitter",
155
                        "label": "RecursiveCharacterTextSplitter",
156
                        "type": "RecursiveCharacterTextSplitter | TextSplitter"
157
                    }
158
                ],
159
                "outputs": {},
160
                "selected": false
161
            },
162
            "selected": false,
163
            "positionAbsolute": {
164
                "x": 406.08456707531263,
165
                "y": 197.66460328693972
166
            },
167
            "dragging": false
168
        },
169
        {
170
            "width": 300,
171
            "height": 419,
172
            "id": "textFile_0",
173
            "position": {
174
                "x": 786.5497697231324,
175
                "y": 140.09563157584407
176
            },
177
            "type": "customNode",
178
            "data": {
179
                "id": "textFile_0",
180
                "label": "Text File",
181
                "version": 3,
182
                "name": "textFile",
183
                "type": "Document",
184
                "baseClasses": ["Document"],
185
                "category": "Document Loaders",
186
                "description": "Load data from text files",
187
                "inputParams": [
188
                    {
189
                        "label": "Txt File",
190
                        "name": "txtFile",
191
                        "type": "file",
192
                        "fileType": ".txt, .html, .aspx, .asp, .cpp, .c, .cs, .css, .go, .h, .java, .js, .less, .ts, .php, .proto, .python, .py, .rst, .ruby, .rb, .rs, .scala, .sc, .scss, .sol, .sql, .swift, .markdown, .md, .tex, .ltx, .vb, .xml",
193
                        "id": "textFile_0-input-txtFile-file"
194
                    },
195
                    {
196
                        "label": "Metadata",
197
                        "name": "metadata",
198
                        "type": "json",
199
                        "optional": true,
200
                        "additionalParams": true,
201
                        "id": "textFile_0-input-metadata-json"
202
                    }
203
                ],
204
                "inputAnchors": [
205
                    {
206
                        "label": "Text Splitter",
207
                        "name": "textSplitter",
208
                        "type": "TextSplitter",
209
                        "optional": true,
210
                        "id": "textFile_0-input-textSplitter-TextSplitter"
211
                    }
212
                ],
213
                "inputs": {
214
                    "textSplitter": "{{recursiveCharacterTextSplitter_0.data.instance}}",
215
                    "metadata": ""
216
                },
217
                "outputAnchors": [
218
                    {
219
                        "name": "output",
220
                        "label": "Output",
221
                        "type": "options",
222
                        "options": [
223
                            {
224
                                "id": "textFile_0-output-document-Document|json",
225
                                "name": "document",
226
                                "label": "Document",
227
                                "type": "Document | json"
228
                            },
229
                            {
230
                                "id": "textFile_0-output-text-string|json",
231
                                "name": "text",
232
                                "label": "Text",
233
                                "type": "string | json"
234
                            }
235
                        ],
236
                        "default": "document"
237
                    }
238
                ],
239
                "outputs": {
240
                    "output": "document"
241
                },
242
                "selected": false
243
            },
244
            "selected": false,
245
            "positionAbsolute": {
246
                "x": 786.5497697231324,
247
                "y": 140.09563157584407
248
            },
249
            "dragging": false
250
        },
251
        {
252
            "width": 300,
253
            "height": 480,
254
            "id": "conversationalRetrievalQAChain_0",
255
            "position": {
256
                "x": 1558.6564094656787,
257
                "y": 386.60217819991124
258
            },
259
            "type": "customNode",
260
            "data": {
261
                "id": "conversationalRetrievalQAChain_0",
262
                "label": "Conversational Retrieval QA Chain",
263
                "version": 3,
264
                "name": "conversationalRetrievalQAChain",
265
                "type": "ConversationalRetrievalQAChain",
266
                "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
267
                "category": "Chains",
268
                "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
269
                "inputParams": [
270
                    {
271
                        "label": "Return Source Documents",
272
                        "name": "returnSourceDocuments",
273
                        "type": "boolean",
274
                        "optional": true,
275
                        "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
276
                    },
277
                    {
278
                        "label": "Rephrase Prompt",
279
                        "name": "rephrasePrompt",
280
                        "type": "string",
281
                        "description": "Using previous chat history, rephrase question into a standalone question",
282
                        "warning": "Prompt must include input variables: {chat_history} and {question}",
283
                        "rows": 4,
284
                        "additionalParams": true,
285
                        "optional": true,
286
                        "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
287
                        "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
288
                    },
289
                    {
290
                        "label": "Response Prompt",
291
                        "name": "responsePrompt",
292
                        "type": "string",
293
                        "description": "Taking the rephrased question, search for answer from the provided context",
294
                        "warning": "Prompt must include input variable: {context}",
295
                        "rows": 4,
296
                        "additionalParams": true,
297
                        "optional": true,
298
                        "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
299
                        "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
300
                    }
301
                ],
302
                "inputAnchors": [
303
                    {
304
                        "label": "Chat Model",
305
                        "name": "model",
306
                        "type": "BaseChatModel",
307
                        "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
308
                    },
309
                    {
310
                        "label": "Vector Store Retriever",
311
                        "name": "vectorStoreRetriever",
312
                        "type": "BaseRetriever",
313
                        "id": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
314
                    },
315
                    {
316
                        "label": "Memory",
317
                        "name": "memory",
318
                        "type": "BaseMemory",
319
                        "optional": true,
320
                        "description": "If left empty, a default BufferMemory will be used",
321
                        "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
322
                    },
323
                    {
324
                        "label": "Input Moderation",
325
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
326
                        "name": "inputModeration",
327
                        "type": "Moderation",
328
                        "optional": true,
329
                        "list": true,
330
                        "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
331
                    }
332
                ],
333
                "inputs": {
334
                    "inputModeration": "",
335
                    "model": "{{chatOpenAI_0.data.instance}}",
336
                    "vectorStoreRetriever": "{{pinecone_0.data.instance}}",
337
                    "memory": "",
338
                    "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
339
                    "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
340
                },
341
                "outputAnchors": [
342
                    {
343
                        "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
344
                        "name": "conversationalRetrievalQAChain",
345
                        "label": "ConversationalRetrievalQAChain",
346
                        "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
347
                    }
348
                ],
349
                "outputs": {},
350
                "selected": false
351
            },
352
            "positionAbsolute": {
353
                "x": 1558.6564094656787,
354
                "y": 386.60217819991124
355
            },
356
            "selected": false
357
        },
358
        {
359
            "width": 300,
360
            "height": 574,
361
            "id": "chatOpenAI_0",
362
            "position": {
363
                "x": 1194.3554779412727,
364
                "y": -46.74877201166788
365
            },
366
            "type": "customNode",
367
            "data": {
368
                "id": "chatOpenAI_0",
369
                "label": "ChatOpenAI",
370
                "version": 6.0,
371
                "name": "chatOpenAI",
372
                "type": "ChatOpenAI",
373
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
374
                "category": "Chat Models",
375
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
376
                "inputParams": [
377
                    {
378
                        "label": "Connect Credential",
379
                        "name": "credential",
380
                        "type": "credential",
381
                        "credentialNames": ["openAIApi"],
382
                        "id": "chatOpenAI_0-input-credential-credential"
383
                    },
384
                    {
385
                        "label": "Model Name",
386
                        "name": "modelName",
387
                        "type": "asyncOptions",
388
                        "loadMethod": "listModels",
389
                        "default": "gpt-3.5-turbo",
390
                        "id": "chatOpenAI_0-input-modelName-options"
391
                    },
392
                    {
393
                        "label": "Temperature",
394
                        "name": "temperature",
395
                        "type": "number",
396
                        "step": 0.1,
397
                        "default": 0.9,
398
                        "optional": true,
399
                        "id": "chatOpenAI_0-input-temperature-number"
400
                    },
401
                    {
402
                        "label": "Max Tokens",
403
                        "name": "maxTokens",
404
                        "type": "number",
405
                        "step": 1,
406
                        "optional": true,
407
                        "additionalParams": true,
408
                        "id": "chatOpenAI_0-input-maxTokens-number"
409
                    },
410
                    {
411
                        "label": "Top Probability",
412
                        "name": "topP",
413
                        "type": "number",
414
                        "step": 0.1,
415
                        "optional": true,
416
                        "additionalParams": true,
417
                        "id": "chatOpenAI_0-input-topP-number"
418
                    },
419
                    {
420
                        "label": "Frequency Penalty",
421
                        "name": "frequencyPenalty",
422
                        "type": "number",
423
                        "step": 0.1,
424
                        "optional": true,
425
                        "additionalParams": true,
426
                        "id": "chatOpenAI_0-input-frequencyPenalty-number"
427
                    },
428
                    {
429
                        "label": "Presence Penalty",
430
                        "name": "presencePenalty",
431
                        "type": "number",
432
                        "step": 0.1,
433
                        "optional": true,
434
                        "additionalParams": true,
435
                        "id": "chatOpenAI_0-input-presencePenalty-number"
436
                    },
437
                    {
438
                        "label": "Timeout",
439
                        "name": "timeout",
440
                        "type": "number",
441
                        "step": 1,
442
                        "optional": true,
443
                        "additionalParams": true,
444
                        "id": "chatOpenAI_0-input-timeout-number"
445
                    },
446
                    {
447
                        "label": "BasePath",
448
                        "name": "basepath",
449
                        "type": "string",
450
                        "optional": true,
451
                        "additionalParams": true,
452
                        "id": "chatOpenAI_0-input-basepath-string"
453
                    },
454
                    {
455
                        "label": "BaseOptions",
456
                        "name": "baseOptions",
457
                        "type": "json",
458
                        "optional": true,
459
                        "additionalParams": true,
460
                        "id": "chatOpenAI_0-input-baseOptions-json"
461
                    },
462
                    {
463
                        "label": "Allow Image Uploads",
464
                        "name": "allowImageUploads",
465
                        "type": "boolean",
466
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
467
                        "default": false,
468
                        "optional": true,
469
                        "id": "chatOpenAI_0-input-allowImageUploads-boolean"
470
                    },
471
                    {
472
                        "label": "Image Resolution",
473
                        "description": "This parameter controls the resolution in which the model views the image.",
474
                        "name": "imageResolution",
475
                        "type": "options",
476
                        "options": [
477
                            {
478
                                "label": "Low",
479
                                "name": "low"
480
                            },
481
                            {
482
                                "label": "High",
483
                                "name": "high"
484
                            },
485
                            {
486
                                "label": "Auto",
487
                                "name": "auto"
488
                            }
489
                        ],
490
                        "default": "low",
491
                        "optional": false,
492
                        "additionalParams": true,
493
                        "id": "chatOpenAI_0-input-imageResolution-options"
494
                    }
495
                ],
496
                "inputAnchors": [
497
                    {
498
                        "label": "Cache",
499
                        "name": "cache",
500
                        "type": "BaseCache",
501
                        "optional": true,
502
                        "id": "chatOpenAI_0-input-cache-BaseCache"
503
                    }
504
                ],
505
                "inputs": {
506
                    "cache": "",
507
                    "modelName": "gpt-3.5-turbo-16k",
508
                    "temperature": 0.9,
509
                    "maxTokens": "",
510
                    "topP": "",
511
                    "frequencyPenalty": "",
512
                    "presencePenalty": "",
513
                    "timeout": "",
514
                    "basepath": "",
515
                    "baseOptions": "",
516
                    "allowImageUploads": true,
517
                    "imageResolution": "low"
518
                },
519
                "outputAnchors": [
520
                    {
521
                        "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
522
                        "name": "chatOpenAI",
523
                        "label": "ChatOpenAI",
524
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
525
                    }
526
                ],
527
                "outputs": {},
528
                "selected": false
529
            },
530
            "selected": false,
531
            "positionAbsolute": {
532
                "x": 1194.3554779412727,
533
                "y": -46.74877201166788
534
            },
535
            "dragging": false
536
        },
537
        {
538
            "width": 300,
539
            "height": 555,
540
            "id": "pinecone_0",
541
            "position": {
542
                "x": 1192.4771449209463,
543
                "y": 552.43946147251
544
            },
545
            "type": "customNode",
546
            "data": {
547
                "id": "pinecone_0",
548
                "label": "Pinecone",
549
                "version": 2,
550
                "name": "pinecone",
551
                "type": "Pinecone",
552
                "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
553
                "category": "Vector Stores",
554
                "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
555
                "inputParams": [
556
                    {
557
                        "label": "Connect Credential",
558
                        "name": "credential",
559
                        "type": "credential",
560
                        "credentialNames": ["pineconeApi"],
561
                        "id": "pinecone_0-input-credential-credential"
562
                    },
563
                    {
564
                        "label": "Pinecone Index",
565
                        "name": "pineconeIndex",
566
                        "type": "string",
567
                        "id": "pinecone_0-input-pineconeIndex-string"
568
                    },
569
                    {
570
                        "label": "Pinecone Namespace",
571
                        "name": "pineconeNamespace",
572
                        "type": "string",
573
                        "placeholder": "my-first-namespace",
574
                        "additionalParams": true,
575
                        "optional": true,
576
                        "id": "pinecone_0-input-pineconeNamespace-string"
577
                    },
578
                    {
579
                        "label": "Pinecone Metadata Filter",
580
                        "name": "pineconeMetadataFilter",
581
                        "type": "json",
582
                        "optional": true,
583
                        "additionalParams": true,
584
                        "id": "pinecone_0-input-pineconeMetadataFilter-json"
585
                    },
586
                    {
587
                        "label": "Top K",
588
                        "name": "topK",
589
                        "description": "Number of top results to fetch. Default to 4",
590
                        "placeholder": "4",
591
                        "type": "number",
592
                        "additionalParams": true,
593
                        "optional": true,
594
                        "id": "pinecone_0-input-topK-number"
595
                    },
596
                    {
597
                        "label": "Search Type",
598
                        "name": "searchType",
599
                        "type": "options",
600
                        "default": "similarity",
601
                        "options": [
602
                            {
603
                                "label": "Similarity",
604
                                "name": "similarity"
605
                            },
606
                            {
607
                                "label": "Max Marginal Relevance",
608
                                "name": "mmr"
609
                            }
610
                        ],
611
                        "additionalParams": true,
612
                        "optional": true,
613
                        "id": "pinecone_0-input-searchType-options"
614
                    },
615
                    {
616
                        "label": "Fetch K (for MMR Search)",
617
                        "name": "fetchK",
618
                        "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
619
                        "placeholder": "20",
620
                        "type": "number",
621
                        "additionalParams": true,
622
                        "optional": true,
623
                        "id": "pinecone_0-input-fetchK-number"
624
                    },
625
                    {
626
                        "label": "Lambda (for MMR Search)",
627
                        "name": "lambda",
628
                        "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
629
                        "placeholder": "0.5",
630
                        "type": "number",
631
                        "additionalParams": true,
632
                        "optional": true,
633
                        "id": "pinecone_0-input-lambda-number"
634
                    }
635
                ],
636
                "inputAnchors": [
637
                    {
638
                        "label": "Document",
639
                        "name": "document",
640
                        "type": "Document",
641
                        "list": true,
642
                        "optional": true,
643
                        "id": "pinecone_0-input-document-Document"
644
                    },
645
                    {
646
                        "label": "Embeddings",
647
                        "name": "embeddings",
648
                        "type": "Embeddings",
649
                        "id": "pinecone_0-input-embeddings-Embeddings"
650
                    }
651
                ],
652
                "inputs": {
653
                    "document": ["{{textFile_0.data.instance}}"],
654
                    "embeddings": "{{openAIEmbeddings_0.data.instance}}",
655
                    "pineconeIndex": "",
656
                    "pineconeNamespace": "",
657
                    "pineconeMetadataFilter": "",
658
                    "topK": "",
659
                    "searchType": "similarity",
660
                    "fetchK": "",
661
                    "lambda": ""
662
                },
663
                "outputAnchors": [
664
                    {
665
                        "name": "output",
666
                        "label": "Output",
667
                        "type": "options",
668
                        "options": [
669
                            {
670
                                "id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever",
671
                                "name": "retriever",
672
                                "label": "Pinecone Retriever",
673
                                "type": "Pinecone | VectorStoreRetriever | BaseRetriever"
674
                            },
675
                            {
676
                                "id": "pinecone_0-output-vectorStore-Pinecone|VectorStore",
677
                                "name": "vectorStore",
678
                                "label": "Pinecone Vector Store",
679
                                "type": "Pinecone | VectorStore"
680
                            }
681
                        ],
682
                        "default": "retriever"
683
                    }
684
                ],
685
                "outputs": {
686
                    "output": "retriever"
687
                },
688
                "selected": false
689
            },
690
            "selected": false,
691
            "positionAbsolute": {
692
                "x": 1192.4771449209463,
693
                "y": 552.43946147251
694
            },
695
            "dragging": false
696
        }
697
    ],
698
    "edges": [
699
        {
700
            "source": "recursiveCharacterTextSplitter_0",
701
            "sourceHandle": "recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter",
702
            "target": "textFile_0",
703
            "targetHandle": "textFile_0-input-textSplitter-TextSplitter",
704
            "type": "buttonedge",
705
            "id": "recursiveCharacterTextSplitter_0-recursiveCharacterTextSplitter_0-output-recursiveCharacterTextSplitter-RecursiveCharacterTextSplitter|TextSplitter-textFile_0-textFile_0-input-textSplitter-TextSplitter",
706
            "data": {
707
                "label": ""
708
            }
709
        },
710
        {
711
            "source": "textFile_0",
712
            "sourceHandle": "textFile_0-output-document-Document|json",
713
            "target": "pinecone_0",
714
            "targetHandle": "pinecone_0-input-document-Document",
715
            "type": "buttonedge",
716
            "id": "textFile_0-textFile_0-output-document-Document|json-pinecone_0-pinecone_0-input-document-Document",
717
            "data": {
718
                "label": ""
719
            }
720
        },
721
        {
722
            "source": "openAIEmbeddings_0",
723
            "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
724
            "target": "pinecone_0",
725
            "targetHandle": "pinecone_0-input-embeddings-Embeddings",
726
            "type": "buttonedge",
727
            "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings",
728
            "data": {
729
                "label": ""
730
            }
731
        },
732
        {
733
            "source": "pinecone_0",
734
            "sourceHandle": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever",
735
            "target": "conversationalRetrievalQAChain_0",
736
            "targetHandle": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
737
            "type": "buttonedge",
738
            "id": "pinecone_0-pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
739
            "data": {
740
                "label": ""
741
            }
742
        },
743
        {
744
            "source": "chatOpenAI_0",
745
            "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
746
            "target": "conversationalRetrievalQAChain_0",
747
            "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
748
            "type": "buttonedge",
749
            "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
750
            "data": {
751
                "label": ""
752
            }
753
        }
754
    ]
755
}
756

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.