Flowise

Форк
0
/
Image Generation.json 
682 строки · 28.6 Кб
1
{
2
    "description": "Generate image using Replicate Stability text-to-image generative AI model",
3
    "badge": "NEW",
4
    "categories": "Replicate,ChatOpenAI,LLM Chain,Langchain",
5
    "framework": "Langchain",
6
    "nodes": [
7
        {
8
            "width": 300,
9
            "height": 475,
10
            "id": "promptTemplate_0",
11
            "position": {
12
                "x": 366.28009688480114,
13
                "y": 183.05394484895152
14
            },
15
            "type": "customNode",
16
            "data": {
17
                "id": "promptTemplate_0",
18
                "label": "Prompt Template",
19
                "version": 1,
20
                "name": "promptTemplate",
21
                "type": "PromptTemplate",
22
                "baseClasses": ["PromptTemplate", "BaseStringPromptTemplate", "BasePromptTemplate"],
23
                "category": "Prompts",
24
                "description": "Schema to represent a basic prompt for an LLM",
25
                "inputParams": [
26
                    {
27
                        "label": "Template",
28
                        "name": "template",
29
                        "type": "string",
30
                        "rows": 4,
31
                        "placeholder": "What is a good name for a company that makes {product}?",
32
                        "id": "promptTemplate_0-input-template-string"
33
                    },
34
                    {
35
                        "label": "Format Prompt Values",
36
                        "name": "promptValues",
37
                        "type": "json",
38
                        "optional": true,
39
                        "acceptVariable": true,
40
                        "list": true,
41
                        "id": "promptTemplate_0-input-promptValues-json"
42
                    }
43
                ],
44
                "inputAnchors": [],
45
                "inputs": {
46
                    "template": "{query}",
47
                    "promptValues": "{\"query\":\"{{question}}\"}"
48
                },
49
                "outputAnchors": [
50
                    {
51
                        "id": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate",
52
                        "name": "promptTemplate",
53
                        "label": "PromptTemplate",
54
                        "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate"
55
                    }
56
                ],
57
                "outputs": {},
58
                "selected": false
59
            },
60
            "selected": false,
61
            "positionAbsolute": {
62
                "x": 366.28009688480114,
63
                "y": 183.05394484895152
64
            },
65
            "dragging": false
66
        },
67
        {
68
            "width": 300,
69
            "height": 475,
70
            "id": "promptTemplate_1",
71
            "position": {
72
                "x": 1391.1872909364881,
73
                "y": 274.0360952991433
74
            },
75
            "type": "customNode",
76
            "data": {
77
                "id": "promptTemplate_1",
78
                "label": "Prompt Template",
79
                "version": 1,
80
                "name": "promptTemplate",
81
                "type": "PromptTemplate",
82
                "baseClasses": ["PromptTemplate", "BaseStringPromptTemplate", "BasePromptTemplate", "Runnable"],
83
                "category": "Prompts",
84
                "description": "Schema to represent a basic prompt for an LLM",
85
                "inputParams": [
86
                    {
87
                        "label": "Template",
88
                        "name": "template",
89
                        "type": "string",
90
                        "rows": 4,
91
                        "placeholder": "What is a good name for a company that makes {product}?",
92
                        "id": "promptTemplate_1-input-template-string"
93
                    },
94
                    {
95
                        "label": "Format Prompt Values",
96
                        "name": "promptValues",
97
                        "type": "json",
98
                        "optional": true,
99
                        "acceptVariable": true,
100
                        "list": true,
101
                        "id": "promptTemplate_1-input-promptValues-json"
102
                    }
103
                ],
104
                "inputAnchors": [],
105
                "inputs": {
106
                    "template": "Reply with nothing else but the following:\n![]({text})",
107
                    "promptValues": "{\"text\":\"{{llmChain_0.data.instance}}\"}"
108
                },
109
                "outputAnchors": [
110
                    {
111
                        "id": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable",
112
                        "name": "promptTemplate",
113
                        "label": "PromptTemplate",
114
                        "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate | Runnable"
115
                    }
116
                ],
117
                "outputs": {},
118
                "selected": false
119
            },
120
            "selected": false,
121
            "positionAbsolute": {
122
                "x": 1391.1872909364881,
123
                "y": 274.0360952991433
124
            },
125
            "dragging": false
126
        },
127
        {
128
            "width": 300,
129
            "height": 577,
130
            "id": "replicate_0",
131
            "position": {
132
                "x": 700.5657822436667,
133
                "y": -192.57827891379952
134
            },
135
            "type": "customNode",
136
            "data": {
137
                "id": "replicate_0",
138
                "label": "Replicate",
139
                "version": 2,
140
                "name": "replicate",
141
                "type": "Replicate",
142
                "baseClasses": ["Replicate", "BaseChatModel", "LLM", "BaseLLM", "BaseLanguageModel", "Runnable"],
143
                "category": "LLMs",
144
                "description": "Use Replicate to run open source models on cloud",
145
                "inputParams": [
146
                    {
147
                        "label": "Connect Credential",
148
                        "name": "credential",
149
                        "type": "credential",
150
                        "credentialNames": ["replicateApi"],
151
                        "id": "replicate_0-input-credential-credential"
152
                    },
153
                    {
154
                        "label": "Model",
155
                        "name": "model",
156
                        "type": "string",
157
                        "placeholder": "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
158
                        "optional": true,
159
                        "id": "replicate_0-input-model-string"
160
                    },
161
                    {
162
                        "label": "Temperature",
163
                        "name": "temperature",
164
                        "type": "number",
165
                        "step": 0.1,
166
                        "description": "Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value.",
167
                        "default": 0.7,
168
                        "optional": true,
169
                        "id": "replicate_0-input-temperature-number"
170
                    },
171
                    {
172
                        "label": "Max Tokens",
173
                        "name": "maxTokens",
174
                        "type": "number",
175
                        "step": 1,
176
                        "description": "Maximum number of tokens to generate. A word is generally 2-3 tokens",
177
                        "optional": true,
178
                        "additionalParams": true,
179
                        "id": "replicate_0-input-maxTokens-number"
180
                    },
181
                    {
182
                        "label": "Top Probability",
183
                        "name": "topP",
184
                        "type": "number",
185
                        "step": 0.1,
186
                        "description": "When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens",
187
                        "optional": true,
188
                        "additionalParams": true,
189
                        "id": "replicate_0-input-topP-number"
190
                    },
191
                    {
192
                        "label": "Repetition Penalty",
193
                        "name": "repetitionPenalty",
194
                        "type": "number",
195
                        "step": 0.1,
196
                        "description": "Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it. (minimum: 0.01; maximum: 5)",
197
                        "optional": true,
198
                        "additionalParams": true,
199
                        "id": "replicate_0-input-repetitionPenalty-number"
200
                    },
201
                    {
202
                        "label": "Additional Inputs",
203
                        "name": "additionalInputs",
204
                        "type": "json",
205
                        "description": "Each model has different parameters, refer to the specific model accepted inputs. For example: <a target=\"_blank\" href=\"https://replicate.com/a16z-infra/llama13b-v2-chat/api#inputs\">llama13b-v2</a>",
206
                        "additionalParams": true,
207
                        "optional": true,
208
                        "id": "replicate_0-input-additionalInputs-json"
209
                    }
210
                ],
211
                "inputAnchors": [
212
                    {
213
                        "label": "Cache",
214
                        "name": "cache",
215
                        "type": "BaseCache",
216
                        "optional": true,
217
                        "id": "replicate_0-input-cache-BaseCache"
218
                    }
219
                ],
220
                "inputs": {
221
                    "cache": "",
222
                    "model": "stability-ai/sdxl:af1a68a271597604546c09c64aabcd7782c114a63539a4a8d14d1eeda5630c33",
223
                    "temperature": 0.7,
224
                    "maxTokens": "",
225
                    "topP": "",
226
                    "repetitionPenalty": "",
227
                    "additionalInputs": ""
228
                },
229
                "outputAnchors": [
230
                    {
231
                        "id": "replicate_0-output-replicate-Replicate|BaseChatModel|LLM|BaseLLM|BaseLanguageModel|Runnable",
232
                        "name": "replicate",
233
                        "label": "Replicate",
234
                        "type": "Replicate | BaseChatModel | LLM | BaseLLM | BaseLanguageModel | Runnable"
235
                    }
236
                ],
237
                "outputs": {},
238
                "selected": false
239
            },
240
            "selected": false,
241
            "positionAbsolute": {
242
                "x": 700.5657822436667,
243
                "y": -192.57827891379952
244
            },
245
            "dragging": false
246
        },
247
        {
248
            "width": 300,
249
            "height": 456,
250
            "id": "llmChain_0",
251
            "position": {
252
                "x": 1045.7783277092838,
253
                "y": 242.08205161173464
254
            },
255
            "type": "customNode",
256
            "data": {
257
                "id": "llmChain_0",
258
                "label": "LLM Chain",
259
                "version": 3,
260
                "name": "llmChain",
261
                "type": "LLMChain",
262
                "baseClasses": ["LLMChain", "BaseChain", "Runnable"],
263
                "category": "Chains",
264
                "description": "Chain to run queries against LLMs",
265
                "inputParams": [
266
                    {
267
                        "label": "Chain Name",
268
                        "name": "chainName",
269
                        "type": "string",
270
                        "placeholder": "Name Your Chain",
271
                        "optional": true,
272
                        "id": "llmChain_0-input-chainName-string"
273
                    }
274
                ],
275
                "inputAnchors": [
276
                    {
277
                        "label": "Language Model",
278
                        "name": "model",
279
                        "type": "BaseLanguageModel",
280
                        "id": "llmChain_0-input-model-BaseLanguageModel"
281
                    },
282
                    {
283
                        "label": "Prompt",
284
                        "name": "prompt",
285
                        "type": "BasePromptTemplate",
286
                        "id": "llmChain_0-input-prompt-BasePromptTemplate"
287
                    },
288
                    {
289
                        "label": "Output Parser",
290
                        "name": "outputParser",
291
                        "type": "BaseLLMOutputParser",
292
                        "optional": true,
293
                        "id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
294
                    },
295
                    {
296
                        "label": "Input Moderation",
297
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
298
                        "name": "inputModeration",
299
                        "type": "Moderation",
300
                        "optional": true,
301
                        "list": true,
302
                        "id": "llmChain_0-input-inputModeration-Moderation"
303
                    }
304
                ],
305
                "inputs": {
306
                    "model": "{{replicate_0.data.instance}}",
307
                    "prompt": "{{promptTemplate_0.data.instance}}",
308
                    "outputParser": "",
309
                    "chainName": "",
310
                    "inputModeration": ""
311
                },
312
                "outputAnchors": [
313
                    {
314
                        "name": "output",
315
                        "label": "Output",
316
                        "type": "options",
317
                        "options": [
318
                            {
319
                                "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable",
320
                                "name": "llmChain",
321
                                "label": "LLM Chain",
322
                                "type": "LLMChain | BaseChain | Runnable"
323
                            },
324
                            {
325
                                "id": "llmChain_0-output-outputPrediction-string|json",
326
                                "name": "outputPrediction",
327
                                "label": "Output Prediction",
328
                                "type": "string | json"
329
                            }
330
                        ],
331
                        "default": "llmChain"
332
                    }
333
                ],
334
                "outputs": {
335
                    "output": "outputPrediction"
336
                },
337
                "selected": false
338
            },
339
            "selected": false,
340
            "positionAbsolute": {
341
                "x": 1045.7783277092838,
342
                "y": 242.08205161173464
343
            },
344
            "dragging": false
345
        },
346
        {
347
            "width": 300,
348
            "height": 456,
349
            "id": "llmChain_1",
350
            "position": {
351
                "x": 1769.7463380379868,
352
                "y": 194.56291579865376
353
            },
354
            "type": "customNode",
355
            "data": {
356
                "id": "llmChain_1",
357
                "label": "LLM Chain",
358
                "version": 3,
359
                "name": "llmChain",
360
                "type": "LLMChain",
361
                "baseClasses": ["LLMChain", "BaseChain", "Runnable"],
362
                "category": "Chains",
363
                "description": "Chain to run queries against LLMs",
364
                "inputParams": [
365
                    {
366
                        "label": "Chain Name",
367
                        "name": "chainName",
368
                        "type": "string",
369
                        "placeholder": "Name Your Chain",
370
                        "optional": true,
371
                        "id": "llmChain_1-input-chainName-string"
372
                    }
373
                ],
374
                "inputAnchors": [
375
                    {
376
                        "label": "Language Model",
377
                        "name": "model",
378
                        "type": "BaseLanguageModel",
379
                        "id": "llmChain_1-input-model-BaseLanguageModel"
380
                    },
381
                    {
382
                        "label": "Prompt",
383
                        "name": "prompt",
384
                        "type": "BasePromptTemplate",
385
                        "id": "llmChain_1-input-prompt-BasePromptTemplate"
386
                    },
387
                    {
388
                        "label": "Output Parser",
389
                        "name": "outputParser",
390
                        "type": "BaseLLMOutputParser",
391
                        "optional": true,
392
                        "id": "llmChain_1-input-outputParser-BaseLLMOutputParser"
393
                    },
394
                    {
395
                        "label": "Input Moderation",
396
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
397
                        "name": "inputModeration",
398
                        "type": "Moderation",
399
                        "optional": true,
400
                        "list": true,
401
                        "id": "llmChain_1-input-inputModeration-Moderation"
402
                    }
403
                ],
404
                "inputs": {
405
                    "model": "{{chatOpenAI_0.data.instance}}",
406
                    "prompt": "{{promptTemplate_1.data.instance}}",
407
                    "outputParser": "",
408
                    "chainName": "",
409
                    "inputModeration": ""
410
                },
411
                "outputAnchors": [
412
                    {
413
                        "name": "output",
414
                        "label": "Output",
415
                        "type": "options",
416
                        "options": [
417
                            {
418
                                "id": "llmChain_1-output-llmChain-LLMChain|BaseChain|Runnable",
419
                                "name": "llmChain",
420
                                "label": "LLM Chain",
421
                                "type": "LLMChain | BaseChain | Runnable"
422
                            },
423
                            {
424
                                "id": "llmChain_1-output-outputPrediction-string|json",
425
                                "name": "outputPrediction",
426
                                "label": "Output Prediction",
427
                                "type": "string | json"
428
                            }
429
                        ],
430
                        "default": "llmChain"
431
                    }
432
                ],
433
                "outputs": {
434
                    "output": "llmChain"
435
                },
436
                "selected": false
437
            },
438
            "selected": false,
439
            "positionAbsolute": {
440
                "x": 1769.7463380379868,
441
                "y": 194.56291579865376
442
            },
443
            "dragging": false
444
        },
445
        {
446
            "width": 300,
447
            "height": 574,
448
            "id": "chatOpenAI_0",
449
            "position": {
450
                "x": 1390.9908731749008,
451
                "y": -332.0609187416074
452
            },
453
            "type": "customNode",
454
            "data": {
455
                "id": "chatOpenAI_0",
456
                "label": "ChatOpenAI",
457
                "version": 6.0,
458
                "name": "chatOpenAI",
459
                "type": "ChatOpenAI",
460
                "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"],
461
                "category": "Chat Models",
462
                "description": "Wrapper around OpenAI large language models that use the Chat endpoint",
463
                "inputParams": [
464
                    {
465
                        "label": "Connect Credential",
466
                        "name": "credential",
467
                        "type": "credential",
468
                        "credentialNames": ["openAIApi"],
469
                        "id": "chatOpenAI_0-input-credential-credential"
470
                    },
471
                    {
472
                        "label": "Model Name",
473
                        "name": "modelName",
474
                        "type": "asyncOptions",
475
                        "loadMethod": "listModels",
476
                        "default": "gpt-3.5-turbo",
477
                        "id": "chatOpenAI_0-input-modelName-options"
478
                    },
479
                    {
480
                        "label": "Temperature",
481
                        "name": "temperature",
482
                        "type": "number",
483
                        "step": 0.1,
484
                        "default": 0.9,
485
                        "optional": true,
486
                        "id": "chatOpenAI_0-input-temperature-number"
487
                    },
488
                    {
489
                        "label": "Max Tokens",
490
                        "name": "maxTokens",
491
                        "type": "number",
492
                        "step": 1,
493
                        "optional": true,
494
                        "additionalParams": true,
495
                        "id": "chatOpenAI_0-input-maxTokens-number"
496
                    },
497
                    {
498
                        "label": "Top Probability",
499
                        "name": "topP",
500
                        "type": "number",
501
                        "step": 0.1,
502
                        "optional": true,
503
                        "additionalParams": true,
504
                        "id": "chatOpenAI_0-input-topP-number"
505
                    },
506
                    {
507
                        "label": "Frequency Penalty",
508
                        "name": "frequencyPenalty",
509
                        "type": "number",
510
                        "step": 0.1,
511
                        "optional": true,
512
                        "additionalParams": true,
513
                        "id": "chatOpenAI_0-input-frequencyPenalty-number"
514
                    },
515
                    {
516
                        "label": "Presence Penalty",
517
                        "name": "presencePenalty",
518
                        "type": "number",
519
                        "step": 0.1,
520
                        "optional": true,
521
                        "additionalParams": true,
522
                        "id": "chatOpenAI_0-input-presencePenalty-number"
523
                    },
524
                    {
525
                        "label": "Timeout",
526
                        "name": "timeout",
527
                        "type": "number",
528
                        "step": 1,
529
                        "optional": true,
530
                        "additionalParams": true,
531
                        "id": "chatOpenAI_0-input-timeout-number"
532
                    },
533
                    {
534
                        "label": "BasePath",
535
                        "name": "basepath",
536
                        "type": "string",
537
                        "optional": true,
538
                        "additionalParams": true,
539
                        "id": "chatOpenAI_0-input-basepath-string"
540
                    },
541
                    {
542
                        "label": "BaseOptions",
543
                        "name": "baseOptions",
544
                        "type": "json",
545
                        "optional": true,
546
                        "additionalParams": true,
547
                        "id": "chatOpenAI_0-input-baseOptions-json"
548
                    },
549
                    {
550
                        "label": "Allow Image Uploads",
551
                        "name": "allowImageUploads",
552
                        "type": "boolean",
553
                        "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
554
                        "default": false,
555
                        "optional": true,
556
                        "id": "chatOpenAI_0-input-allowImageUploads-boolean"
557
                    },
558
                    {
559
                        "label": "Image Resolution",
560
                        "description": "This parameter controls the resolution in which the model views the image.",
561
                        "name": "imageResolution",
562
                        "type": "options",
563
                        "options": [
564
                            {
565
                                "label": "Low",
566
                                "name": "low"
567
                            },
568
                            {
569
                                "label": "High",
570
                                "name": "high"
571
                            },
572
                            {
573
                                "label": "Auto",
574
                                "name": "auto"
575
                            }
576
                        ],
577
                        "default": "low",
578
                        "optional": false,
579
                        "additionalParams": true,
580
                        "id": "chatOpenAI_0-input-imageResolution-options"
581
                    }
582
                ],
583
                "inputAnchors": [
584
                    {
585
                        "label": "Cache",
586
                        "name": "cache",
587
                        "type": "BaseCache",
588
                        "optional": true,
589
                        "id": "chatOpenAI_0-input-cache-BaseCache"
590
                    }
591
                ],
592
                "inputs": {
593
                    "cache": "",
594
                    "modelName": "gpt-3.5-turbo",
595
                    "temperature": "0",
596
                    "maxTokens": "",
597
                    "topP": "",
598
                    "frequencyPenalty": "",
599
                    "presencePenalty": "",
600
                    "timeout": "",
601
                    "basepath": "",
602
                    "baseOptions": "",
603
                    "allowImageUploads": true,
604
                    "imageResolution": "low"
605
                },
606
                "outputAnchors": [
607
                    {
608
                        "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
609
                        "name": "chatOpenAI",
610
                        "label": "ChatOpenAI",
611
                        "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable"
612
                    }
613
                ],
614
                "outputs": {},
615
                "selected": false
616
            },
617
            "selected": false,
618
            "positionAbsolute": {
619
                "x": 1390.9908731749008,
620
                "y": -332.0609187416074
621
            },
622
            "dragging": false
623
        }
624
    ],
625
    "edges": [
626
        {
627
            "source": "promptTemplate_0",
628
            "sourceHandle": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate",
629
            "target": "llmChain_0",
630
            "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate",
631
            "type": "buttonedge",
632
            "id": "promptTemplate_0-promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate",
633
            "data": {
634
                "label": ""
635
            }
636
        },
637
        {
638
            "source": "replicate_0",
639
            "sourceHandle": "replicate_0-output-replicate-Replicate|BaseChatModel|LLM|BaseLLM|BaseLanguageModel|Runnable",
640
            "target": "llmChain_0",
641
            "targetHandle": "llmChain_0-input-model-BaseLanguageModel",
642
            "type": "buttonedge",
643
            "id": "replicate_0-replicate_0-output-replicate-Replicate|BaseChatModel|LLM|BaseLLM|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel",
644
            "data": {
645
                "label": ""
646
            }
647
        },
648
        {
649
            "source": "promptTemplate_1",
650
            "sourceHandle": "promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable",
651
            "target": "llmChain_1",
652
            "targetHandle": "llmChain_1-input-prompt-BasePromptTemplate",
653
            "type": "buttonedge",
654
            "id": "promptTemplate_1-promptTemplate_1-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate|Runnable-llmChain_1-llmChain_1-input-prompt-BasePromptTemplate",
655
            "data": {
656
                "label": ""
657
            }
658
        },
659
        {
660
            "source": "chatOpenAI_0",
661
            "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
662
            "target": "llmChain_1",
663
            "targetHandle": "llmChain_1-input-model-BaseLanguageModel",
664
            "type": "buttonedge",
665
            "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-llmChain_1-llmChain_1-input-model-BaseLanguageModel",
666
            "data": {
667
                "label": ""
668
            }
669
        },
670
        {
671
            "source": "llmChain_0",
672
            "sourceHandle": "llmChain_0-output-outputPrediction-string|json",
673
            "target": "promptTemplate_1",
674
            "targetHandle": "promptTemplate_1-input-promptValues-json",
675
            "type": "buttonedge",
676
            "id": "llmChain_0-llmChain_0-output-outputPrediction-string|json-promptTemplate_1-promptTemplate_1-input-promptValues-json",
677
            "data": {
678
                "label": ""
679
            }
680
        }
681
    ]
682
}
683

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.