Flowise

Форк
0
/
Flowise Docs QnA.json 
701 строка · 30.9 Кб
1
{
2
    "description": "Flowise Docs Github QnA using conversational retrieval QA chain",
3
    "categories": "Memory Vector Store,Github Loader,ChatOpenAI,Conversational Retrieval QA Chain,Langchain",
4
    "badge": "POPULAR",
5
    "framework": "Langchain",
6
    "nodes": [
7
        {
8
            "width": 300,
9
            "height": 376,
10
            "id": "markdownTextSplitter_0",
11
            "position": {
12
                "x": 1081.1540334344143,
13
                "y": -113.73571627207801
14
            },
15
            "type": "customNode",
16
            "data": {
17
                "id": "markdownTextSplitter_0",
18
                "label": "Markdown Text Splitter",
19
                "name": "markdownTextSplitter",
20
                "version": 1,
21
                "type": "MarkdownTextSplitter",
22
                "baseClasses": ["MarkdownTextSplitter", "RecursiveCharacterTextSplitter", "TextSplitter", "BaseDocumentTransformer"],
23
                "category": "Text Splitters",
24
                "description": "Split your content into documents based on the Markdown headers",
25
                "inputParams": [
26
                    {
27
                        "label": "Chunk Size",
28
                        "name": "chunkSize",
29
                        "type": "number",
30
                        "default": 1000,
31
                        "optional": true,
32
                        "id": "markdownTextSplitter_0-input-chunkSize-number"
33
                    },
34
                    {
35
                        "label": "Chunk Overlap",
36
                        "name": "chunkOverlap",
37
                        "type": "number",
38
                        "optional": true,
39
                        "id": "markdownTextSplitter_0-input-chunkOverlap-number"
40
                    }
41
                ],
42
                "inputAnchors": [],
43
                "inputs": {
44
                    "chunkSize": "4000",
45
                    "chunkOverlap": ""
46
                },
47
                "outputAnchors": [
48
                    {
49
                        "id": "markdownTextSplitter_0-output-markdownTextSplitter-MarkdownTextSplitter|RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer",
50
                        "name": "markdownTextSplitter",
51
                        "label": "MarkdownTextSplitter",
52
                        "type": "MarkdownTextSplitter | RecursiveCharacterTextSplitter | TextSplitter | BaseDocumentTransformer"
53
                    }
54
                ],
55
                "outputs": {},
56
                "selected": false
57
            },
58
            "selected": false,
59
            "positionAbsolute": {
60
                "x": 1081.1540334344143,
61
                "y": -113.73571627207801
62
            },
63
            "dragging": false
64
        },
65
        {
66
            "width": 300,
67
            "height": 405,
68
            "id": "memoryVectorStore_0",
69
            "position": {
70
                "x": 1844.88052464165,
71
                "y": 484.60473328470243
72
            },
73
            "type": "customNode",
74
            "data": {
75
                "id": "memoryVectorStore_0",
76
                "label": "In-Memory Vector Store",
77
                "name": "memoryVectorStore",
78
                "version": 1,
79
                "type": "Memory",
80
                "baseClasses": ["Memory", "VectorStoreRetriever", "BaseRetriever"],
81
                "category": "Vector Stores",
82
                "description": "In-memory vectorstore that stores embeddings and does an exact, linear search for the most similar embeddings.",
83
                "inputParams": [
84
                    {
85
                        "label": "Top K",
86
                        "name": "topK",
87
                        "description": "Number of top results to fetch. Default to 4",
88
                        "placeholder": "4",
89
                        "type": "number",
90
                        "optional": true,
91
                        "id": "memoryVectorStore_0-input-topK-number"
92
                    }
93
                ],
94
                "inputAnchors": [
95
                    {
96
                        "label": "Document",
97
                        "name": "document",
98
                        "type": "Document",
99
                        "list": true,
100
                        "id": "memoryVectorStore_0-input-document-Document"
101
                    },
102
                    {
103
                        "label": "Embeddings",
104
                        "name": "embeddings",
105
                        "type": "Embeddings",
106
                        "id": "memoryVectorStore_0-input-embeddings-Embeddings"
107
                    }
108
                ],
109
                "inputs": {
110
                    "document": ["{{github_0.data.instance}}"],
111
                    "embeddings": "{{openAIEmbeddings_0.data.instance}}",
112
                    "topK": ""
113
                },
114
                "outputAnchors": [
115
                    {
116
                        "name": "output",
117
                        "label": "Output",
118
                        "type": "options",
119
                        "options": [
120
                            {
121
                                "id": "memoryVectorStore_0-output-retriever-Memory|VectorStoreRetriever|BaseRetriever",
122
                                "name": "retriever",
123
                                "label": "Memory Retriever",
124
                                "type": "Memory | VectorStoreRetriever | BaseRetriever"
125
                            },
126
                            {
127
                                "id": "memoryVectorStore_0-output-vectorStore-Memory|VectorStore",
128
                                "name": "vectorStore",
129
                                "label": "Memory Vector Store",
130
                                "type": "Memory | VectorStore"
131
                            }
132
                        ],
133
                        "default": "retriever"
134
                    }
135
                ],
136
                "outputs": {
137
                    "output": "retriever"
138
                },
139
                "selected": false
140
            },
141
            "selected": false,
142
            "positionAbsolute": {
143
                "x": 1844.88052464165,
144
                "y": 484.60473328470243
145
            },
146
            "dragging": false
147
        },
148
        {
149
            "width": 300,
150
            "height": 479,
151
            "id": "conversationalRetrievalQAChain_0",
152
            "position": {
153
                "x": 2311.697827287373,
154
                "y": 228.14841720207832
155
            },
156
            "type": "customNode",
157
            "data": {
158
                "id": "conversationalRetrievalQAChain_0",
159
                "label": "Conversational Retrieval QA Chain",
160
                "name": "conversationalRetrievalQAChain",
161
                "version": 3,
162
                "type": "ConversationalRetrievalQAChain",
163
                "baseClasses": ["ConversationalRetrievalQAChain", "BaseChain", "Runnable"],
164
                "category": "Chains",
165
                "description": "Document QA - built on RetrievalQAChain to provide a chat history component",
166
                "inputParams": [
167
                    {
168
                        "label": "Return Source Documents",
169
                        "name": "returnSourceDocuments",
170
                        "type": "boolean",
171
                        "optional": true,
172
                        "id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
173
                    },
174
                    {
175
                        "label": "Rephrase Prompt",
176
                        "name": "rephrasePrompt",
177
                        "type": "string",
178
                        "description": "Using previous chat history, rephrase question into a standalone question",
179
                        "warning": "Prompt must include input variables: {chat_history} and {question}",
180
                        "rows": 4,
181
                        "additionalParams": true,
182
                        "optional": true,
183
                        "default": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
184
                        "id": "conversationalRetrievalQAChain_0-input-rephrasePrompt-string"
185
                    },
186
                    {
187
                        "label": "Response Prompt",
188
                        "name": "responsePrompt",
189
                        "type": "string",
190
                        "description": "Taking the rephrased question, search for answer from the provided context",
191
                        "warning": "Prompt must include input variable: {context}",
192
                        "rows": 4,
193
                        "additionalParams": true,
194
                        "optional": true,
195
                        "default": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.",
196
                        "id": "conversationalRetrievalQAChain_0-input-responsePrompt-string"
197
                    }
198
                ],
199
                "inputAnchors": [
200
                    {
201
                        "label": "Chat Model",
202
                        "name": "model",
203
                        "type": "BaseChatModel",
204
                        "id": "conversationalRetrievalQAChain_0-input-model-BaseChatModel"
205
                    },
206
                    {
207
                        "label": "Vector Store Retriever",
208
                        "name": "vectorStoreRetriever",
209
                        "type": "BaseRetriever",
210
                        "id": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
211
                    },
212
                    {
213
                        "label": "Memory",
214
                        "name": "memory",
215
                        "type": "BaseMemory",
216
                        "optional": true,
217
                        "description": "If left empty, a default BufferMemory will be used",
218
                        "id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
219
                    },
220
                    {
221
                        "label": "Input Moderation",
222
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
223
                        "name": "inputModeration",
224
                        "type": "Moderation",
225
                        "optional": true,
226
                        "list": true,
227
                        "id": "conversationalRetrievalQAChain_0-input-inputModeration-Moderation"
228
                    }
229
                ],
230
                "inputs": {
231
                    "inputModeration": "",
232
                    "model": "{{chatOpenAI_0.data.instance}}",
233
                    "vectorStoreRetriever": "{{memoryVectorStore_0.data.instance}}",
234
                    "memory": "",
235
                    "returnSourceDocuments": true,
236
                    "rephrasePrompt": "Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone Question:",
237
                    "responsePrompt": "You are a helpful assistant. Using the provided context, answer the user's question to the best of your ability using the resources provided.\nIf there is nothing in the context relevant to the question at hand, just say \"Hmm, I'm not sure.\" Don't try to make up an answer.\n------------\n{context}\n------------\nREMEMBER: If there is no relevant information within the context, just say \"Hmm, I'm not sure.\" Don't try to make up an answer."
238
                },
239
                "outputAnchors": [
240
                    {
241
                        "id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain|Runnable",
242
                        "name": "conversationalRetrievalQAChain",
243
                        "label": "ConversationalRetrievalQAChain",
244
                        "type": "ConversationalRetrievalQAChain | BaseChain | Runnable"
245
                    }
246
                ],
247
                "outputs": {},
248
                "selected": false
249
            },
250
            "selected": false,
251
            "dragging": false,
252
            "positionAbsolute": {
253
                "x": 2311.697827287373,
254
                "y": 228.14841720207832
255
            }
256
        },
257
        {
258
            "width": 300,
259
            "height": 673,
260
            "id": "github_0",
261
            "position": {
262
                "x": 1460.1858988997,
263
                "y": -137.83585695472374
264
            },
265
            "type": "customNode",
266
            "data": {
267
                "id": "github_0",
268
                "label": "Github",
269
                "name": "github",
270
                "version": 2,
271
                "type": "Document",
272
                "baseClasses": ["Document"],
273
                "category": "Document Loaders",
274
                "description": "Load data from a GitHub repository",
275
                "inputParams": [
276
                    {
277
                        "label": "Connect Credential",
278
                        "name": "credential",
279
                        "type": "credential",
280
                        "description": "Only needed when accessing private repo",
281
                        "optional": true,
282
                        "credentialNames": ["githubApi"],
283
                        "id": "github_0-input-credential-credential"
284
                    },
285
                    {
286
                        "label": "Repo Link",
287
                        "name": "repoLink",
288
                        "type": "string",
289
                        "placeholder": "https://github.com/FlowiseAI/Flowise",
290
                        "id": "github_0-input-repoLink-string"
291
                    },
292
                    {
293
                        "label": "Branch",
294
                        "name": "branch",
295
                        "type": "string",
296
                        "default": "main",
297
                        "id": "github_0-input-branch-string"
298
                    },
299
                    {
300
                        "label": "Recursive",
301
                        "name": "recursive",
302
                        "type": "boolean",
303
                        "optional": true,
304
                        "id": "github_0-input-recursive-boolean"
305
                    },
306
                    {
307
                        "label": "Max Concurrency",
308
                        "name": "maxConcurrency",
309
                        "type": "number",
310
                        "step": 1,
311
                        "optional": true,
312
                        "additionalParams": true,
313
                        "id": "github_0-input-maxConcurrency-number"
314
                    },
315
                    {
316
                        "label": "Ignore Paths",
317
                        "name": "ignorePath",
318
                        "type": "string",
319
                        "description": "An array of paths to be ignored",
320
                        "placeholder": "[\"*.md\"]",
321
                        "rows": 4,
322
                        "optional": true,
323
                        "additionalParams": true,
324
                        "id": "github_0-input-ignorePath-string"
325
                    },
326
                    {
327
                        "label": "Max Retries",
328
                        "name": "maxRetries",
329
                        "description": "The maximum number of retries that can be made for a single call, with an exponential backoff between each attempt. Defaults to 2.",
330
                        "type": "number",
331
                        "optional": true,
332
                        "additionalParams": true,
333
                        "id": "github_0-input-maxRetries-number"
334
                    },
335
                    {
336
                        "label": "Metadata",
337
                        "name": "metadata",
338
                        "type": "json",
339
                        "optional": true,
340
                        "additionalParams": true,
341
                        "id": "github_0-input-metadata-json"
342
                    }
343
                ],
344
                "inputAnchors": [
345
                    {
346
                        "label": "Text Splitter",
347
                        "name": "textSplitter",
348
                        "type": "TextSplitter",
349
                        "optional": true,
350
                        "id": "github_0-input-textSplitter-TextSplitter"
351
                    }
352
                ],
353
                "inputs": {
354
                    "repoLink": "https://github.com/FlowiseAI/FlowiseDocs",
355
                    "branch": "main",
356
                    "recursive": true,
357
                    "textSplitter": "{{markdownTextSplitter_0.data.instance}}",
358
                    "metadata": ""
359
                },
360
                "outputAnchors": [
361
                    {
362
                        "id": "github_0-output-github-Document",
363
                        "name": "github",
364
                        "label": "Document",
365
                        "type": "Document"
366
                    }
367
                ],
368
                "outputs": {},
369
                "selected": false
370
            },
371
            "selected": false,
372
            "positionAbsolute": {
373
                "x": 1460.1858988997,
374
                "y": -137.83585695472374
375
            },
376
            "dragging": false
377
        },
378
        {
379
            "width": 300,
380
            "height": 522,
381
            "id": "chatOpenAI_0",
382
            "position": {
383
                "x": 1857.367353502965,
384
                "y": -104.25095383414119
385
            },
386
            "type": "customNode",
387
            "data": {
388
                "id": "chatOpenAI_0",
389
                "label": "ChatOpenAI",
390
                "name": "chatOpenAI",
391
                "version": 6.0,
392
                "type": "ChatOpenAI",
393
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
394
                "category": "Chat Models",
395
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
396
                "inputParams": [
397
                    {
398
                        "label": "Connect Credential",
399
                        "name": "credential",
400
                        "type": "credential",
401
                        "credentialNames": ["openAIApi"],
402
                        "id": "chatOpenAI_0-input-credential-credential"
403
                    },
404
                    {
405
                        "label": "Model Name",
406
                        "name": "modelName",
407
                        "type": "asyncOptions",
408
                        "loadMethod": "listModels",
409
                        "default": "gpt-3.5-turbo",
410
                        "id": "chatOpenAI_0-input-modelName-options"
411
                    },
412
                    {
413
                        "label": "Temperature",
414
                        "name": "temperature",
415
                        "type": "number",
416
                        "default": 0.9,
417
                        "optional": true,
418
                        "id": "chatOpenAI_0-input-temperature-number"
419
                    },
420
                    {
421
                        "label": "Max Tokens",
422
                        "name": "maxTokens",
423
                        "type": "number",
424
                        "optional": true,
425
                        "additionalParams": true,
426
                        "id": "chatOpenAI_0-input-maxTokens-number"
427
                    },
428
                    {
429
                        "label": "Top Probability",
430
                        "name": "topP",
431
                        "type": "number",
432
                        "optional": true,
433
                        "additionalParams": true,
434
                        "id": "chatOpenAI_0-input-topP-number"
435
                    },
436
                    {
437
                        "label": "Frequency Penalty",
438
                        "name": "frequencyPenalty",
439
                        "type": "number",
440
                        "optional": true,
441
                        "additionalParams": true,
442
                        "id": "chatOpenAI_0-input-frequencyPenalty-number"
443
                    },
444
                    {
445
                        "label": "Presence Penalty",
446
                        "name": "presencePenalty",
447
                        "type": "number",
448
                        "optional": true,
449
                        "additionalParams": true,
450
                        "id": "chatOpenAI_0-input-presencePenalty-number"
451
                    },
452
                    {
453
                        "label": "Timeout",
454
                        "name": "timeout",
455
                        "type": "number",
456
                        "optional": true,
457
                        "additionalParams": true,
458
                        "id": "chatOpenAI_0-input-timeout-number"
459
                    },
460
                    {
461
                        "label": "BasePath",
462
                        "name": "basepath",
463
                        "type": "string",
464
                        "optional": true,
465
                        "additionalParams": true,
466
                        "id": "chatOpenAI_0-input-basepath-string"
467
                    },
468
                    {
469
                        "label": "BaseOptions",
470
                        "name": "baseOptions",
471
                        "type": "json",
472
                        "optional": true,
473
                        "additionalParams": true,
474
                        "id": "chatOpenAI_0-input-baseOptions-json"
475
                    },
476
                    {
477
                        "label": "Allow Image Uploads",
478
                        "name": "allowImageUploads",
479
                        "type": "boolean",
480
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
481
                        "default": false,
482
                        "optional": true,
483
                        "id": "chatOpenAI_0-input-allowImageUploads-boolean"
484
                    },
485
                    {
486
                        "label": "Image Resolution",
487
                        "description": "This parameter controls the resolution in which the model views the image.",
488
                        "name": "imageResolution",
489
                        "type": "options",
490
                        "options": [
491
                            {
492
                                "label": "Low",
493
                                "name": "low"
494
                            },
495
                            {
496
                                "label": "High",
497
                                "name": "high"
498
                            },
499
                            {
500
                                "label": "Auto",
501
                                "name": "auto"
502
                            }
503
                        ],
504
                        "default": "low",
505
                        "optional": false,
506
                        "additionalParams": true,
507
                        "id": "chatOpenAI_0-input-imageResolution-options"
508
                    }
509
                ],
510
                "inputAnchors": [
511
                    {
512
                        "label": "Cache",
513
                        "name": "cache",
514
                        "type": "BaseCache",
515
                        "optional": true,
516
                        "id": "chatOpenAI_0-input-cache-BaseCache"
517
                    }
518
                ],
519
                "inputs": {
520
                    "modelName": "gpt-3.5-turbo",
521
                    "temperature": 0.9,
522
                    "maxTokens": "",
523
                    "topP": "",
524
                    "frequencyPenalty": "",
525
                    "presencePenalty": "",
526
                    "timeout": "",
527
                    "basepath": "",
528
                    "baseOptions": "",
529
                    "allowImageUploads": true,
530
                    "imageResolution": "low"
531
                },
532
                "outputAnchors": [
533
                    {
534
                        "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
535
                        "name": "chatOpenAI",
536
                        "label": "ChatOpenAI",
537
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
538
                    }
539
                ],
540
                "outputs": {},
541
                "selected": false
542
            },
543
            "selected": false,
544
            "positionAbsolute": {
545
                "x": 1857.367353502965,
546
                "y": -104.25095383414119
547
            },
548
            "dragging": false
549
        },
550
        {
551
            "width": 300,
552
            "height": 328,
553
            "id": "openAIEmbeddings_0",
554
            "position": {
555
                "x": 1299.9983863833309,
556
                "y": 581.8406384863323
557
            },
558
            "type": "customNode",
559
            "data": {
560
                "id": "openAIEmbeddings_0",
561
                "label": "OpenAI Embeddings",
562
                "name": "openAIEmbeddings",
563
                "version": 3,
564
                "type": "OpenAIEmbeddings",
565
                "baseClasses": ["OpenAIEmbeddings", "Embeddings"],
566
                "category": "Embeddings",
567
                "description": "OpenAI API to generate embeddings for a given text",
568
                "inputParams": [
569
                    {
570
                        "label": "Connect Credential",
571
                        "name": "credential",
572
                        "type": "credential",
573
                        "credentialNames": ["openAIApi"],
574
                        "id": "openAIEmbeddings_0-input-credential-credential"
575
                    },
576
                    {
577
                        "label": "Model Name",
578
                        "name": "modelName",
579
                        "type": "asyncOptions",
580
                        "loadMethod": "listModels",
581
                        "default": "text-embedding-ada-002",
582
                        "id": "openAIEmbeddings_0-input-modelName-options"
583
                    },
584
                    {
585
                        "label": "Strip New Lines",
586
                        "name": "stripNewLines",
587
                        "type": "boolean",
588
                        "optional": true,
589
                        "additionalParams": true,
590
                        "id": "openAIEmbeddings_0-input-stripNewLines-boolean"
591
                    },
592
                    {
593
                        "label": "Batch Size",
594
                        "name": "batchSize",
595
                        "type": "number",
596
                        "optional": true,
597
                        "additionalParams": true,
598
                        "id": "openAIEmbeddings_0-input-batchSize-number"
599
                    },
600
                    {
601
                        "label": "Timeout",
602
                        "name": "timeout",
603
                        "type": "number",
604
                        "optional": true,
605
                        "additionalParams": true,
606
                        "id": "openAIEmbeddings_0-input-timeout-number"
607
                    },
608
                    {
609
                        "label": "BasePath",
610
                        "name": "basepath",
611
                        "type": "string",
612
                        "optional": true,
613
                        "additionalParams": true,
614
                        "id": "openAIEmbeddings_0-input-basepath-string"
615
                    }
616
                ],
617
                "inputAnchors": [],
618
                "inputs": {
619
                    "stripNewLines": "",
620
                    "batchSize": "",
621
                    "timeout": "",
622
                    "basepath": "",
623
                    "modelName": "text-embedding-ada-002"
624
                },
625
                "outputAnchors": [
626
                    {
627
                        "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
628
                        "name": "openAIEmbeddings",
629
                        "label": "OpenAIEmbeddings",
630
                        "type": "OpenAIEmbeddings | Embeddings"
631
                    }
632
                ],
633
                "outputs": {},
634
                "selected": false
635
            },
636
            "selected": false,
637
            "dragging": false,
638
            "positionAbsolute": {
639
                "x": 1299.9983863833309,
640
                "y": 581.8406384863323
641
            }
642
        }
643
    ],
644
    "edges": [
645
        {
646
            "source": "memoryVectorStore_0",
647
            "sourceHandle": "memoryVectorStore_0-output-retriever-Memory|VectorStoreRetriever|BaseRetriever",
648
            "target": "conversationalRetrievalQAChain_0",
649
            "targetHandle": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
650
            "type": "buttonedge",
651
            "id": "memoryVectorStore_0-memoryVectorStore_0-output-retriever-Memory|VectorStoreRetriever|BaseRetriever-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
652
            "data": {
653
                "label": ""
654
            }
655
        },
656
        {
657
            "source": "markdownTextSplitter_0",
658
            "sourceHandle": "markdownTextSplitter_0-output-markdownTextSplitter-MarkdownTextSplitter|RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer",
659
            "target": "github_0",
660
            "targetHandle": "github_0-input-textSplitter-TextSplitter",
661
            "type": "buttonedge",
662
            "id": "markdownTextSplitter_0-markdownTextSplitter_0-output-markdownTextSplitter-MarkdownTextSplitter|RecursiveCharacterTextSplitter|TextSplitter|BaseDocumentTransformer-github_0-github_0-input-textSplitter-TextSplitter",
663
            "data": {
664
                "label": ""
665
            }
666
        },
667
        {
668
            "source": "github_0",
669
            "sourceHandle": "github_0-output-github-Document",
670
            "target": "memoryVectorStore_0",
671
            "targetHandle": "memoryVectorStore_0-input-document-Document",
672
            "type": "buttonedge",
673
            "id": "github_0-github_0-output-github-Document-memoryVectorStore_0-memoryVectorStore_0-input-document-Document",
674
            "data": {
675
                "label": ""
676
            }
677
        },
678
        {
679
            "source": "chatOpenAI_0",
680
            "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
681
            "target": "conversationalRetrievalQAChain_0",
682
            "targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseChatModel",
683
            "type": "buttonedge",
684
            "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseChatModel",
685
            "data": {
686
                "label": ""
687
            }
688
        },
689
        {
690
            "source": "openAIEmbeddings_0",
691
            "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings",
692
            "target": "memoryVectorStore_0",
693
            "targetHandle": "memoryVectorStore_0-input-embeddings-Embeddings",
694
            "type": "buttonedge",
695
            "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-memoryVectorStore_0-memoryVectorStore_0-input-embeddings-Embeddings",
696
            "data": {
697
                "label": ""
698
            }
699
        }
700
    ]
701
}
702

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.