paddlenlp
175 строк · 6.6 Кб
1# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14"""
15RUN PaddleNLP CI Case
16"""
17import os
18import re
19import subprocess
20import sys
21
22
23def get_mode_info(case_path):
24"""
25Return: model_info{path,exec_file_list}
26Examples:
27pegasus = {
28"path": "applications/text_summarization/pegasus/"
29"deploy_path": "deploy/paddle_inference/"
30"prepare": "run_prepare.py"
31"train_exec_file": "train.py"
32"eval_exec_file": None
33"predict_exec_file": predict.py
34“export_exec_file”: export_model.py
35"infer_exec_file": inference_pegasus.py
36}
37"""
38model_info = {
39"path": case_path,
40"deploy_path": None,
41"prepare_exec_file": None,
42"train_exec_file": [],
43"eval_exec_file": None,
44"predict_exec_file": None,
45"export_exec_file": None,
46"infer_exec_file": None,
47}
48for root, dirs, files in os.walk(case_path):
49infer_deploy_path = case_path + "/deploy/paddle_inference"
50python_deploy_path = case_path + "/deploy/python"
51
52if files and root == case_path:
53for file in files:
54# TODO .sh file incompatible windows
55if file.split(".")[-1] == "py":
56if re.compile("prepare.py").findall(file):
57model_info["prepare_exec_file"] = file
58
59elif re.compile("train.py").findall(file):
60model_info["train_exec_file"].append(file)
61
62elif re.compile("finetune").findall(file):
63model_info["train_exec_file"].append(file)
64
65elif re.compile("eval.py").findall(file):
66model_info["eval_exec_file"] = file
67
68elif re.compile("predict.py").findall(file):
69model_info["predict_exec_file"] = file
70
71elif re.compile("export_model.py").findall(file):
72model_info["export_exec_file"] = file
73
74elif re.compile("run_").findall(file):
75model_info["train_exec_file"].append(file)
76else:
77continue
78elif files and root == infer_deploy_path:
79for file in files:
80if file.split(".")[-1] == "py":
81model_info["deploy_path"] = "deploy/paddle_inference"
82model_info["infer_exec_file"] = file
83elif files and root == python_deploy_path:
84for file in files:
85if file.split(".")[-1] == "py":
86model_info["deploy_path"] = "deploy/python"
87model_info["infer_exec_file"] = file
88
89print("model_info", model_info)
90return model_info
91
92
93def save_log(exit_code, output, case_name, file_name):
94"""
95save model log
96"""
97root_path = "/workspace/PaddleNLP"
98# root_path = '/ssd1/paddlenlp/zhangjunjun/PaddleNLP'
99if exit_code == 0:
100log_file = root_path + "/model_logs/" + os.path.join(case_name + "_" + file_name + "_SUCCESS.log")
101print("{} SUCCESS".format(file_name))
102with open(log_file, "a") as flog:
103flog.write("%s" % (output))
104else:
105log_file = root_path + "/model_logs/" + os.path.join(case_name + "_" + file_name + "_FAIL.log")
106print("{} FAIL".format(file_name))
107with open(log_file, "a") as flog:
108flog.write("%s" % (output))
109
110
111def run_normal_case(case_path):
112"""
113Run new normal case
114params:
115case_path: model path based PaddleNLP from git diff
116"""
117case_name = case_path.split("/")[-1]
118model_info = get_mode_info(case_path)
119depoly_path = model_info["deploy_path"]
120prepare_exec_file = model_info["prepare_exec_file"]
121eval_exec_file = model_info["eval_exec_file"]
122predict_exec_file = model_info["predict_exec_file"]
123export_exec_file = model_info["export_exec_file"]
124infer_exec_file = model_info["infer_exec_file"]
125
126os.chdir(case_path)
127
128if prepare_exec_file:
129prepare_output = subprocess.getstatusoutput("python %s " % (prepare_exec_file))
130save_log(prepare_output[0], prepare_output[1], case_name, prepare_exec_file.split(".")[0])
131
132if model_info["train_exec_file"]:
133for train_file in model_info["train_exec_file"]:
134train_output = subprocess.getstatusoutput(
135"python -m paddle.distributed.launch %s --device gpu --max_steps 2 \
136--save_steps 2 --output_dir ./output/"
137% (train_file)
138)
139save_log(train_output[0], train_output[1], case_name, train_file.split(".")[0])
140else:
141print("Train Skipped")
142
143if eval_exec_file:
144eval_output = subprocess.getstatusoutput("python %s --init_checkpoint_dir ./output/" % (eval_exec_file))
145save_log(eval_output[0], eval_output[1], case_name, eval_exec_file.split(".")[0])
146else:
147print("Evalation Skipped")
148if predict_exec_file:
149predict_output = subprocess.getstatusoutput("python %s --init_checkpoint_dir ./output/" % (predict_exec_file))
150save_log(predict_output[0], predict_output[1], case_name, predict_exec_file.split(".")[0])
151else:
152print("Predict Skipped")
153if export_exec_file:
154export_output = subprocess.getstatusoutput(
155"python %s --export_output_dir ./inference_model/" % (export_exec_file)
156)
157save_log(export_output[0], export_output[1], case_name, export_exec_file.split(".")[0])
158else:
159print("Export model Skipped")
160if infer_exec_file:
161infer_output = subprocess.getstatusoutput(
162"cd %s && python %s --inference_model_dir ../../inference_model/" % (depoly_path, infer_exec_file)
163)
164save_log(infer_output[0], infer_output[1], case_name, infer_exec_file.split(".")[0])
165else:
166print("python inference Skipped")
167
168
169if __name__ == "__main__":
170# path ="applications/text_summarization/pegasus"
171path = sys.argv[1]
172if os.path.isdir(path):
173run_normal_case(path)
174else:
175print("not model file path, skepped ")
176