promptflow

Форк
0
445 строк · 16.0 Кб
1
# Generate Readme file for the examples folder
2
import json
3
from pathlib import Path
4
import workflow_generator
5
import readme_generator
6
from jinja2 import Environment, FileSystemLoader
7
from ghactions_driver.readme_step import ReadmeStepsManage
8
from operator import itemgetter
9
import argparse
10
import sys
11
import os
12
import re
13

14
BRANCH = "main"
15

16

17
def get_notebook_readme_description(notebook) -> str:
18
    """
19
    Set each ipynb metadata description at .metadata.description
20
    """
21
    try:
22
        # read in notebook
23
        with open(notebook, "r", encoding="utf-8") as f:
24
            data = json.load(f)
25
        return data["metadata"]["description"]
26
    except Exception:
27
        print(f"{notebook} metadata description not set")
28
        return ""
29

30

31
def get_notebook_buildDoc_description(notebook) -> str:
32
    """
33
    Set each ipynb metadata description at .metadata.description
34
    """
35
    try:
36
        # read in notebook
37
        with open(notebook, "r", encoding="utf-8") as f:
38
            data = json.load(f)
39
        return data["metadata"]["build_doc"]
40
    except Exception:
41
        print(f"{notebook} metadata build_doc not set")
42
        return {}
43

44

45
def get_readme_description_first_sentence(readme) -> str:
46
    """
47
    Get each readme first sentence of first paragraph
48
    """
49
    try:
50
        with open(readme, "r", encoding="utf-8") as f:
51
            # read first line
52
            line = f.readline()
53
            sentence = ""
54
            while True:
55
                line = f.readline()
56
                if line.startswith("#"):
57
                    line = ""
58
                # skip metadata section
59
                if (
60
                    line.startswith("---")
61
                    or line.startswith("resources:")
62
                    or line.startswith("title:")
63
                    or line.startswith("cloud:")
64
                    or line.startswith("category:")
65
                    or line.startswith("weight:")
66
                ):
67
                    line = ""
68
                if line.strip() == "" and sentence != "":
69
                    break
70
                elif "." in line:
71
                    sentence += " " + line.split(".")[0].strip()
72
                    break
73
                else:
74
                    if sentence == "":
75
                        sentence += line.strip()
76
                    elif line.strip() != "":
77
                        sentence += " " + line.strip()
78
            return sentence
79
    except Exception:
80
        print(f"Error during reading {readme}")
81
        return ""
82

83

84
def write_readme(workflow_telemetries, readme_telemetries):
85
    global BRANCH
86

87
    ReadmeStepsManage.git_base_dir()
88
    readme_file = Path(ReadmeStepsManage.git_base_dir()) / "examples/README.md"
89

90
    quickstarts = {
91
        "readmes": [],
92
        "notebooks": [],
93
    }
94
    tutorials = {
95
        "readmes": [],
96
        "notebooks": [],
97
    }
98
    flex_flows = {
99
        "readmes": [],
100
        "notebooks": [],
101
    }
102
    prompty = {
103
        "readmes": [],
104
        "notebooks": [],
105
    }
106
    flows = {
107
        "readmes": [],
108
        "notebooks": [],
109
    }
110
    evaluations = {
111
        "readmes": [],
112
        "notebooks": [],
113
    }
114
    chats = {
115
        "readmes": [],
116
        "notebooks": [],
117
    }
118
    toolusecases = {
119
        "readmes": [],
120
        "notebooks": [],
121
    }
122
    connections = {
123
        "readmes": [],
124
        "notebooks": [],
125
    }
126

127
    for workflow_telemetry in workflow_telemetries:
128
        notebook_name = f"{workflow_telemetry.name}.ipynb"
129
        gh_working_dir = workflow_telemetry.gh_working_dir
130
        pipeline_name = workflow_telemetry.workflow_name
131
        yaml_name = f"{pipeline_name}.yml"
132

133
        # For workflows, open ipynb as raw json and
134
        # setup description at .metadata.description
135
        description = get_notebook_readme_description(workflow_telemetry.notebook)
136
        build_doc = get_notebook_buildDoc_description(workflow_telemetry.notebook)
137
        notebook_path = gh_working_dir.replace("examples/", "") + f"/{notebook_name}"
138
        default_workflow_item = {
139
            "name": notebook_name,
140
            "path": notebook_path,
141
            "pipeline_name": pipeline_name,
142
            "yaml_name": yaml_name,
143
            "description": description,
144
            "build_doc": build_doc,
145
            "title": workflow_telemetry.title.capitalize()
146
            if hasattr(workflow_telemetry, "title")
147
            else "Empty title",
148
            "cloud": workflow_telemetry.cloud.capitalize()
149
            if hasattr(workflow_telemetry, "cloud")
150
            else "NOT DEFINED",
151
            "category": workflow_telemetry.category.capitalize()
152
            if hasattr(workflow_telemetry, "category")
153
            else "General",
154
            "weight": workflow_telemetry.weight
155
            if hasattr(workflow_telemetry, "weight")
156
            else 0,
157
        }
158
        if gh_working_dir.startswith("examples/flows/standard"):
159
            flows["notebooks"].append(default_workflow_item)
160
        elif gh_working_dir.startswith("examples/connections"):
161
            connections["notebooks"].append(default_workflow_item)
162
        elif gh_working_dir.startswith("examples/flows/evaluation"):
163
            evaluations["notebooks"].append(default_workflow_item)
164
        elif gh_working_dir.startswith("examples/tutorials"):
165
            if "quickstart" in notebook_name:
166
                quickstarts["notebooks"].append(default_workflow_item)
167
            else:
168
                tutorials["notebooks"].append(default_workflow_item)
169
        elif gh_working_dir.startswith("examples/flows/chat"):
170
            chats["notebooks"].append(default_workflow_item)
171
        elif gh_working_dir.startswith("examples/flex-flows"):
172
            flex_flows["notebooks"].append(default_workflow_item)
173
        elif gh_working_dir.startswith("examples/prompty"):
174
            prompty["notebooks"].append(default_workflow_item)
175
        elif gh_working_dir.startswith("examples/tools/use-cases"):
176
            toolusecases["notebooks"].append(default_workflow_item)
177
        else:
178
            print(f"Unknown workflow type: {gh_working_dir}")
179

180
        # Adjust tutorial names:
181

182
        no_workflow_readmes = []
183

184
    for readme_telemetry in readme_telemetries:
185
        if readme_telemetry.readme_name.endswith("README.md"):
186
            notebook_name = readme_telemetry.readme_folder.split("/")[-1]
187
        else:
188
            notebook_name = readme_telemetry.readme_name.split("/")[-1].replace(
189
                ".md", ""
190
            )
191
        notebook_path = readme_telemetry.readme_name.replace("examples/", "")
192
        if not hasattr(readme_telemetry, "workflow_name"):
193
            no_workflow_readme_item = {
194
                "name": notebook_name,
195
                "path": notebook_path,
196
                "description": get_readme_description_first_sentence(
197
                    readme_telemetry.readme_name
198
                ),
199
                "title": readme_telemetry.title.capitalize()
200
                if hasattr(readme_telemetry, "title")
201
                else "Empty title",
202
                "cloud": readme_telemetry.cloud.capitalize()
203
                if hasattr(readme_telemetry, "cloud")
204
                else "NOT DEFINED",
205
                "category": readme_telemetry.category.capitalize()
206
                if hasattr(readme_telemetry, "category")
207
                else "General",
208
                "weight": readme_telemetry.weight
209
                if hasattr(readme_telemetry, "weight")
210
                else 0,
211
            }
212
            no_workflow_readmes.append(no_workflow_readme_item)
213
            continue
214

215
        pipeline_name = readme_telemetry.workflow_name
216
        yaml_name = f"{readme_telemetry.workflow_name}.yml"
217
        description = get_readme_description_first_sentence(
218
            readme_telemetry.readme_name
219
        )
220
        readme_folder = readme_telemetry.readme_folder
221

222
        default_readme_item = {
223
            "name": notebook_name,
224
            "path": notebook_path,
225
            "pipeline_name": pipeline_name,
226
            "yaml_name": yaml_name,
227
            "description": description,
228
            "title": readme_telemetry.title.capitalize()
229
            if hasattr(readme_telemetry, "title")
230
            else "Empty title",
231
            "cloud": readme_telemetry.cloud.capitalize()
232
            if hasattr(readme_telemetry, "cloud")
233
            else "NOT DEFINED",
234
            "category": readme_telemetry.category.capitalize()
235
            if hasattr(readme_telemetry, "category")
236
            else "General",
237
            "weight": readme_telemetry.weight
238
            if hasattr(readme_telemetry, "weight")
239
            else 0,
240
        }
241
        if readme_folder.startswith("examples/flows/standard"):
242
            flows["readmes"].append(default_readme_item)
243
        elif readme_folder.startswith("examples/connections"):
244
            connections["readmes"].append(default_readme_item)
245
        elif readme_folder.startswith("examples/flows/evaluation"):
246
            evaluations["readmes"].append(default_readme_item)
247
        elif readme_folder.startswith("examples/tutorials"):
248
            if "quickstart" in notebook_name:
249
                quickstarts["readmes"].append(default_readme_item)
250
            else:
251
                tutorials["readmes"].append(default_readme_item)
252
        elif readme_folder.startswith("examples/flows/chat"):
253
            chats["readmes"].append(default_readme_item)
254
        elif readme_folder.startswith("examples/flex-flows"):
255
            flex_flows["readmes"].append(default_readme_item)
256
        elif readme_folder.startswith("examples/prompty"):
257
            prompty["readmes"].append(default_readme_item)
258
        elif readme_folder.startswith("examples/tools/use-cases"):
259
            toolusecases["readmes"].append(default_readme_item)
260
        else:
261
            print(f"Unknown workflow type: {readme_folder}")
262

263
    quickstarts["notebooks"] = sorted(
264
        quickstarts["notebooks"],
265
        key=itemgetter("name"),
266
        reverse=True,
267
    )
268

269
    # Debug this replacement to check if generated correctly
270
    replacement = {
271
        "branch": BRANCH,
272
        "tutorials": tutorials,
273
        "flex_flows": flex_flows,
274
        "prompty": prompty,
275
        "flows": flows,
276
        "evaluations": evaluations,
277
        "chats": chats,
278
        "toolusecases": toolusecases,
279
        "connections": connections,
280
        "quickstarts": quickstarts,
281
    }
282

283
    print("writing README.md...")
284
    env = Environment(
285
        loader=FileSystemLoader(
286
            Path(ReadmeStepsManage.git_base_dir())
287
            / "scripts/readme/ghactions_driver/readme_templates"
288
        )
289
    )
290
    template = env.get_template("README.md.jinja2")
291
    with open(readme_file, "w") as f:
292
        f.write(template.render(replacement))
293
    print(f"finished writing {str(readme_file)}")
294

295
    # Build a table out of replacement
296
    # |Area|Cloud|Category|Sample|Description|
297
    new_items = []
298
    for row in replacement.keys():
299
        if row == "branch":
300
            continue
301
        for item in replacement[row]["notebooks"]:
302
            item[
303
                "url"
304
            ] = f"https://github.com/microsoft/promptflow/blob/main/examples/{item['path']}"
305
            item["area"] = "SDK"
306
            if "azure" in item["name"].lower():
307
                item["weight"] += 1000
308
            new_items.append(item)
309
        for item in replacement[row]["readmes"]:
310
            if item.get("category", "General") == "General":
311
                print(
312
                    f"Tutorial Index: Skipping {item['path']} for not having a category"
313
                )
314
                continue
315
            item[
316
                "url"
317
            ] = f"https://github.com/microsoft/promptflow/blob/main/examples/{item['path']}"
318
            item["area"] = "CLI"
319
            new_items.append(item)
320
    for item in no_workflow_readmes:
321
        if not item["path"].startswith("tutorials"):
322
            print(f"Tutorial Index: Skipping {item['path']} for not being in tutorials")
323
            continue
324
        if item.get("category", "General") == "General":
325
            print(f"Tutorial Index: Skipping {item['path']} for not having a category")
326
            continue
327
        item[
328
            "url"
329
        ] = f"https://github.com/microsoft/promptflow/blob/main/examples/{item['path']}"
330
        item["area"] = "CLI"
331
        new_items.append(item)
332

333
    # sort new_items by category
334
    tracing_category = sorted(
335
        [item for item in new_items if item["category"] == "Tracing"],
336
        key=lambda x: x["weight"],
337
    )
338
    prompty_category = sorted(
339
        [item for item in new_items if item["category"] == "Prompty"],
340
        key=lambda x: x["weight"],
341
    )
342
    flow_category = sorted(
343
        [item for item in new_items if item["category"] == "Flow"],
344
        key=lambda x: x["weight"],
345
    )
346
    deployment_category = sorted(
347
        [item for item in new_items if item["category"] == "Deployment"],
348
        key=lambda x: x["weight"],
349
    )
350
    rag_category = sorted(
351
        [item for item in new_items if item["category"] == "Rag"],
352
        key=lambda x: x["weight"],
353
    )
354

355
    real_new_items = [
356
        *tracing_category,
357
        *prompty_category,
358
        *flow_category,
359
        *deployment_category,
360
        *rag_category,
361
    ]
362
    tutorial_items = {"items": real_new_items}
363
    tutorial_index_file = (
364
        Path(ReadmeStepsManage.git_base_dir()) / "docs/tutorials/index.md"
365
    )
366
    template_tutorial = env.get_template("tutorial_index.md.jinja2")
367
    with open(tutorial_index_file, "w") as f:
368
        f.write(template_tutorial.render(tutorial_items))
369
    print(f"Tutorial Index: finished writing {str(tutorial_index_file)}")
370

371

372
def main(check):
373
    if check:
374
        # Disable print
375
        sys.stdout = open(os.devnull, "w")
376

377
    input_glob = ["examples/**/*.ipynb"]
378
    workflow_telemetry = []
379
    workflow_generator.main(input_glob, workflow_telemetry, check=check)
380

381
    input_glob_readme = [
382
        "examples/flows/**/README.md",
383
        "examples/flex-flows/**/README.md",
384
        "examples/prompty/**/README.md",
385
        "examples/connections/**/README.md",
386
        "examples/tutorials/**/*.md",
387
        "examples/tools/use-cases/**/README.md",
388
    ]
389
    # exclude the readme since this is 3p integration folder, pipeline generation is not included
390
    input_glob_readme_exclude = ["examples/flows/integrations/**/README.md"]
391
    readme_telemetry = []
392
    readme_generator.main(
393
        input_glob_readme, input_glob_readme_exclude, readme_telemetry
394
    )
395

396
    write_readme(workflow_telemetry, readme_telemetry)
397

398
    if check:
399
        output_object = {}
400
        for workflow in workflow_telemetry:
401
            workflow_items = re.split(r"\[|,| |\]", workflow.path_filter)
402
            workflow_items = list(filter(None, workflow_items))
403
            output_object[workflow.workflow_name] = []
404
            for item in workflow_items:
405
                if item == "examples/*requirements.txt":
406
                    output_object[workflow.workflow_name].append(
407
                        "examples/requirements.txt"
408
                    )
409
                    output_object[workflow.workflow_name].append(
410
                        "examples/dev_requirements.txt"
411
                    )
412
                    continue
413
                output_object[workflow.workflow_name].append(item)
414
        for readme in readme_telemetry:
415
            if not hasattr(readme_telemetry, "workflow_name"):
416
                continue
417
            output_object[readme.workflow_name] = []
418
            readme_items = re.split(r"\[|,| |\]", readme.path_filter)
419
            readme_items = list(filter(None, readme_items))
420
            for item in readme_items:
421
                if item == "examples/*requirements.txt":
422
                    output_object[readme.workflow_name].append(
423
                        "examples/requirements.txt"
424
                    )
425
                    output_object[readme.workflow_name].append(
426
                        "examples/dev_requirements.txt"
427
                    )
428
                    continue
429
                output_object[readme.workflow_name].append(item)
430
        # enable output
431
        sys.stdout = sys.__stdout__
432
        return output_object
433
    else:
434
        return ""
435

436

437
if __name__ == "__main__":
438
    # setup argparse
439
    parser = argparse.ArgumentParser()
440
    parser.add_argument(
441
        "-c", "--check", action="store_true", help="Check what file is affected"
442
    )
443
    args = parser.parse_args()
444
    output = main(args.check)
445
    print(json.dumps(output))
446

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.