pytorch
163 строки · 5.7 Кб
1"""Scribe Uploader for Pytorch Benchmark Data
2
3Currently supports data in pytest-benchmark format but can be extended.
4
5New fields can be added just by modifying the schema in this file, schema
6checking is only here to encourage reusing existing fields and avoiding typos.
7"""
8
9import argparse10import json11import os12import subprocess13import time14from collections import defaultdict15
16import requests17
18
19class ScribeUploader:20def __init__(self, category):21self.category = category22
23def format_message(self, field_dict):24assert "time" in field_dict, "Missing required Scribe field 'time'"25message = defaultdict(dict)26for field, value in field_dict.items():27if field in self.schema["normal"]:28message["normal"][field] = str(value)29elif field in self.schema["int"]:30message["int"][field] = int(value)31elif field in self.schema["float"]:32message["float"][field] = float(value)33else:34raise ValueError(35f"Field {field} is not currently used, be intentional about adding new fields"36)37return message38
39def _upload_intern(self, messages):40for m in messages:41json_str = json.dumps(m)42cmd = ["scribe_cat", self.category, json_str]43subprocess.run(cmd)44
45def upload(self, messages):46if os.environ.get("SCRIBE_INTERN"):47return self._upload_intern(messages)48access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN")49if not access_token:50raise ValueError("Can't find access token from environment variable")51url = "https://graph.facebook.com/scribe_logs"52r = requests.post(53url,54data={55"access_token": access_token,56"logs": json.dumps(57[58{59"category": self.category,60"message": json.dumps(message),61"line_escape": False,62}63for message in messages64]65),66},67)68print(r.text)69r.raise_for_status()70
71
72class PytorchBenchmarkUploader(ScribeUploader):73def __init__(self):74super().__init__("perfpipe_pytorch_benchmarks")75self.schema = {76"int": [77"time",78"rounds",79],80"normal": [81"benchmark_group",82"benchmark_name",83"benchmark_executor",84"benchmark_fuser",85"benchmark_class",86"benchmark_time",87"pytorch_commit_id",88"pytorch_branch",89"pytorch_commit_time",90"pytorch_version",91"pytorch_git_dirty",92"machine_kernel",93"machine_processor",94"machine_hostname",95"circle_build_num",96"circle_project_reponame",97],98"float": [99"stddev",100"min",101"median",102"max",103"mean",104],105}106
107def post_pytest_benchmarks(self, pytest_json):108machine_info = pytest_json["machine_info"]109commit_info = pytest_json["commit_info"]110upload_time = int(time.time())111messages = []112for b in pytest_json["benchmarks"]:113test = b["name"].split("[")[0]114net_name = b["params"]["net_name"]115benchmark_name = f"{test}[{net_name}]"116executor = b["params"]["executor"]117fuser = b["params"]["fuser"]118m = self.format_message(119{120"time": upload_time,121"benchmark_group": b["group"],122"benchmark_name": benchmark_name,123"benchmark_executor": executor,124"benchmark_fuser": fuser,125"benchmark_class": b["fullname"],126"benchmark_time": pytest_json["datetime"],127"pytorch_commit_id": commit_info["id"],128"pytorch_branch": commit_info["branch"],129"pytorch_commit_time": commit_info["time"],130"pytorch_version": None,131"pytorch_git_dirty": commit_info["dirty"],132"machine_kernel": machine_info["release"],133"machine_processor": machine_info["processor"],134"machine_hostname": machine_info["node"],135"circle_build_num": os.environ.get("CIRCLE_BUILD_NUM"),136"circle_project_reponame": os.environ.get(137"CIRCLE_PROJECT_REPONAME"138),139"stddev": b["stats"]["stddev"],140"rounds": b["stats"]["rounds"],141"min": b["stats"]["min"],142"median": b["stats"]["median"],143"max": b["stats"]["max"],144"mean": b["stats"]["mean"],145}146)147messages.append(m)148self.upload(messages)149
150
151if __name__ == "__main__":152parser = argparse.ArgumentParser(description=__doc__)153parser.add_argument(154"--pytest-bench-json",155"--pytest_bench_json",156type=argparse.FileType("r"),157help="Upload json data formatted by pytest-benchmark module",158)159args = parser.parse_args()160if args.pytest_bench_json:161benchmark_uploader = PytorchBenchmarkUploader()162json_data = json.load(args.pytest_bench_json)163benchmark_uploader.post_pytest_benchmarks(json_data)164