Flowise

Форк
0
309 строк · 13.3 Кб
1
{
2
    "description": "Use Replicate API that runs Llama 13b v2 model with LLMChain",
3
    "categories": "Replicate,LLM Chain,Langchain",
4
    "framework": "Langchain",
5
    "nodes": [
6
        {
7
            "width": 300,
8
            "height": 475,
9
            "id": "promptTemplate_0",
10
            "position": {
11
                "x": 269.2203229225663,
12
                "y": 129.02909641085535
13
            },
14
            "type": "customNode",
15
            "data": {
16
                "id": "promptTemplate_0",
17
                "label": "Prompt Template",
18
                "version": 1,
19
                "name": "promptTemplate",
20
                "type": "PromptTemplate",
21
                "baseClasses": ["PromptTemplate", "BaseStringPromptTemplate", "BasePromptTemplate"],
22
                "category": "Prompts",
23
                "description": "Schema to represent a basic prompt for an LLM",
24
                "inputParams": [
25
                    {
26
                        "label": "Template",
27
                        "name": "template",
28
                        "type": "string",
29
                        "rows": 4,
30
                        "placeholder": "What is a good name for a company that makes {product}?",
31
                        "id": "promptTemplate_0-input-template-string"
32
                    },
33
                    {
34
                        "label": "Format Prompt Values",
35
                        "name": "promptValues",
36
                        "type": "json",
37
                        "optional": true,
38
                        "acceptVariable": true,
39
                        "list": true,
40
                        "id": "promptTemplate_0-input-promptValues-json"
41
                    }
42
                ],
43
                "inputAnchors": [],
44
                "inputs": {
45
                    "template": "Assistant: You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as Assistant.\nUser: {query}\nAssistant:",
46
                    "promptValues": "{\"query\":\"{{question}}\"}"
47
                },
48
                "outputAnchors": [
49
                    {
50
                        "id": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate",
51
                        "name": "promptTemplate",
52
                        "label": "PromptTemplate",
53
                        "type": "PromptTemplate | BaseStringPromptTemplate | BasePromptTemplate"
54
                    }
55
                ],
56
                "outputs": {},
57
                "selected": false
58
            },
59
            "selected": false,
60
            "positionAbsolute": {
61
                "x": 269.2203229225663,
62
                "y": 129.02909641085535
63
            },
64
            "dragging": false
65
        },
66
        {
67
            "width": 300,
68
            "height": 577,
69
            "id": "replicate_0",
70
            "position": {
71
                "x": 623.313978186024,
72
                "y": -142.92788335022428
73
            },
74
            "type": "customNode",
75
            "data": {
76
                "id": "replicate_0",
77
                "label": "Replicate",
78
                "version": 2,
79
                "name": "replicate",
80
                "type": "Replicate",
81
                "baseClasses": ["Replicate", "BaseChatModel", "LLM", "BaseLLM", "BaseLanguageModel", "Runnable"],
82
                "category": "LLMs",
83
                "description": "Use Replicate to run open source models on cloud",
84
                "inputParams": [
85
                    {
86
                        "label": "Connect Credential",
87
                        "name": "credential",
88
                        "type": "credential",
89
                        "credentialNames": ["replicateApi"],
90
                        "id": "replicate_0-input-credential-credential"
91
                    },
92
                    {
93
                        "label": "Model",
94
                        "name": "model",
95
                        "type": "string",
96
                        "placeholder": "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
97
                        "optional": true,
98
                        "id": "replicate_0-input-model-string"
99
                    },
100
                    {
101
                        "label": "Temperature",
102
                        "name": "temperature",
103
                        "type": "number",
104
                        "step": 0.1,
105
                        "description": "Adjusts randomness of outputs, greater than 1 is random and 0 is deterministic, 0.75 is a good starting value.",
106
                        "default": 0.7,
107
                        "optional": true,
108
                        "id": "replicate_0-input-temperature-number"
109
                    },
110
                    {
111
                        "label": "Max Tokens",
112
                        "name": "maxTokens",
113
                        "type": "number",
114
                        "step": 1,
115
                        "description": "Maximum number of tokens to generate. A word is generally 2-3 tokens",
116
                        "optional": true,
117
                        "additionalParams": true,
118
                        "id": "replicate_0-input-maxTokens-number"
119
                    },
120
                    {
121
                        "label": "Top Probability",
122
                        "name": "topP",
123
                        "type": "number",
124
                        "step": 0.1,
125
                        "description": "When decoding text, samples from the top p percentage of most likely tokens; lower to ignore less likely tokens",
126
                        "optional": true,
127
                        "additionalParams": true,
128
                        "id": "replicate_0-input-topP-number"
129
                    },
130
                    {
131
                        "label": "Repetition Penalty",
132
                        "name": "repetitionPenalty",
133
                        "type": "number",
134
                        "step": 0.1,
135
                        "description": "Penalty for repeated words in generated text; 1 is no penalty, values greater than 1 discourage repetition, less than 1 encourage it. (minimum: 0.01; maximum: 5)",
136
                        "optional": true,
137
                        "additionalParams": true,
138
                        "id": "replicate_0-input-repetitionPenalty-number"
139
                    },
140
                    {
141
                        "label": "Additional Inputs",
142
                        "name": "additionalInputs",
143
                        "type": "json",
144
                        "description": "Each model has different parameters, refer to the specific model accepted inputs. For example: <a target=\"_blank\" href=\"https://replicate.com/a16z-infra/llama13b-v2-chat/api#inputs\">llama13b-v2</a>",
145
                        "additionalParams": true,
146
                        "optional": true,
147
                        "id": "replicate_0-input-additionalInputs-json"
148
                    }
149
                ],
150
                "inputAnchors": [
151
                    {
152
                        "label": "Cache",
153
                        "name": "cache",
154
                        "type": "BaseCache",
155
                        "optional": true,
156
                        "id": "replicate_0-input-cache-BaseCache"
157
                    }
158
                ],
159
                "inputs": {
160
                    "model": "a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
161
                    "temperature": 0.7,
162
                    "maxTokens": "",
163
                    "topP": "",
164
                    "repetitionPenalty": "",
165
                    "additionalInputs": ""
166
                },
167
                "outputAnchors": [
168
                    {
169
                        "id": "replicate_0-output-replicate-Replicate|BaseChatModel|LLM|BaseLLM|BaseLanguageModel|Runnable",
170
                        "name": "replicate",
171
                        "label": "Replicate",
172
                        "type": "Replicate | BaseChatModel | LLM | BaseLLM | BaseLanguageModel | Runnable"
173
                    }
174
                ],
175
                "outputs": {},
176
                "selected": false
177
            },
178
            "selected": false,
179
            "positionAbsolute": {
180
                "x": 623.313978186024,
181
                "y": -142.92788335022428
182
            },
183
            "dragging": false
184
        },
185
        {
186
            "width": 300,
187
            "height": 456,
188
            "id": "llmChain_0",
189
            "position": {
190
                "x": 1013.8484815418046,
191
                "y": 298.7146179121001
192
            },
193
            "type": "customNode",
194
            "data": {
195
                "id": "llmChain_0",
196
                "label": "LLM Chain",
197
                "version": 3,
198
                "name": "llmChain",
199
                "type": "LLMChain",
200
                "baseClasses": ["LLMChain", "BaseChain", "Runnable"],
201
                "category": "Chains",
202
                "description": "Chain to run queries against LLMs",
203
                "inputParams": [
204
                    {
205
                        "label": "Chain Name",
206
                        "name": "chainName",
207
                        "type": "string",
208
                        "placeholder": "Name Your Chain",
209
                        "optional": true,
210
                        "id": "llmChain_0-input-chainName-string"
211
                    }
212
                ],
213
                "inputAnchors": [
214
                    {
215
                        "label": "Language Model",
216
                        "name": "model",
217
                        "type": "BaseLanguageModel",
218
                        "id": "llmChain_0-input-model-BaseLanguageModel"
219
                    },
220
                    {
221
                        "label": "Prompt",
222
                        "name": "prompt",
223
                        "type": "BasePromptTemplate",
224
                        "id": "llmChain_0-input-prompt-BasePromptTemplate"
225
                    },
226
                    {
227
                        "label": "Output Parser",
228
                        "name": "outputParser",
229
                        "type": "BaseLLMOutputParser",
230
                        "optional": true,
231
                        "id": "llmChain_0-input-outputParser-BaseLLMOutputParser"
232
                    },
233
                    {
234
                        "label": "Input Moderation",
235
                        "description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
236
                        "name": "inputModeration",
237
                        "type": "Moderation",
238
                        "optional": true,
239
                        "list": true,
240
                        "id": "llmChain_0-input-inputModeration-Moderation"
241
                    }
242
                ],
243
                "inputs": {
244
                    "model": "{{replicate_0.data.instance}}",
245
                    "prompt": "{{promptTemplate_0.data.instance}}",
246
                    "outputParser": "",
247
                    "chainName": "",
248
                    "inputModeration": ""
249
                },
250
                "outputAnchors": [
251
                    {
252
                        "name": "output",
253
                        "label": "Output",
254
                        "type": "options",
255
                        "options": [
256
                            {
257
                                "id": "llmChain_0-output-llmChain-LLMChain|BaseChain|Runnable",
258
                                "name": "llmChain",
259
                                "label": "LLM Chain",
260
                                "type": "LLMChain | BaseChain | Runnable"
261
                            },
262
                            {
263
                                "id": "llmChain_0-output-outputPrediction-string|json",
264
                                "name": "outputPrediction",
265
                                "label": "Output Prediction",
266
                                "type": "string | json"
267
                            }
268
                        ],
269
                        "default": "llmChain"
270
                    }
271
                ],
272
                "outputs": {
273
                    "output": "llmChain"
274
                },
275
                "selected": false
276
            },
277
            "selected": false,
278
            "positionAbsolute": {
279
                "x": 1013.8484815418046,
280
                "y": 298.7146179121001
281
            },
282
            "dragging": false
283
        }
284
    ],
285
    "edges": [
286
        {
287
            "source": "replicate_0",
288
            "sourceHandle": "replicate_0-output-replicate-Replicate|BaseChatModel|LLM|BaseLLM|BaseLanguageModel|Runnable",
289
            "target": "llmChain_0",
290
            "targetHandle": "llmChain_0-input-model-BaseLanguageModel",
291
            "type": "buttonedge",
292
            "id": "replicate_0-replicate_0-output-replicate-Replicate|BaseChatModel|LLM|BaseLLM|BaseLanguageModel|Runnable-llmChain_0-llmChain_0-input-model-BaseLanguageModel",
293
            "data": {
294
                "label": ""
295
            }
296
        },
297
        {
298
            "source": "promptTemplate_0",
299
            "sourceHandle": "promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate",
300
            "target": "llmChain_0",
301
            "targetHandle": "llmChain_0-input-prompt-BasePromptTemplate",
302
            "type": "buttonedge",
303
            "id": "promptTemplate_0-promptTemplate_0-output-promptTemplate-PromptTemplate|BaseStringPromptTemplate|BasePromptTemplate-llmChain_0-llmChain_0-input-prompt-BasePromptTemplate",
304
            "data": {
305
                "label": ""
306
            }
307
        }
308
    ]
309
}
310

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.