dragonfly

Форк
0
/
json_benchmark.py 
122 строки · 3.7 Кб
1
#!/usr/bin/env python
2

3
import multiprocessing
4
import time
5
import redis
6
import sys
7
import argparse
8
from urllib.parse import urlparse
9
import os
10
from collections import defaultdict
11
import math
12

13
'''
14
Run JSON benchmark for 3 commands:
15
    JSON.SET
16
    JSON.GET
17
    JSON.TYPE
18
We want to the overall time it takes
19
to save and access keys that contains
20
JSON values with this benchmark.
21
This also verify that the basic functionalities
22
for using JSON types work correctly
23
'''
24

25
def ping(r):
26
    r.ping()
27

28
def jsonset(r, i):
29
    key = "json-{}".format(i)
30
    r.execute_command('JSON.SET', key, '.', '{"a":123456, "b": "hello", "nested": {"abc": "ffffff", "bfb": null}}')
31

32

33
def jsonget(r, i):
34
    key = "json-{}".format(i)
35
    r.execute_command('JSON.GET', key, '$.a', '$..abc')
36

37
def jsontype(r, i):
38
    key = "json-{}".format(i)
39
    r.execute_command('JSON.TYPE', key, '$.a')
40

41
def runWorker(ctx):
42
    wpid = os.getpid()
43
    print( '{} '.format(wpid))
44

45
    rep = defaultdict(int)
46
    r = redis.StrictRedis(host=ctx['host'], port=ctx['port'])
47
    work = ctx['work']
48
    if ctx['pipeline'] == 0:
49
        total_count = int(ctx['count'])
50
        for i in range(0, total_count):
51
            s0 = time.time()
52
            jsonset(r, i)
53
            s1 = time.time() - s0
54
            bin = int(math.floor(s1 * 1000)) + 1
55
            rep[bin] += 1
56
        for i in range(0, total_count):
57
            s0 = time.time()
58
            jsonget(r, i)
59
            s1 = time.time() - s0
60
            bin = int(math.floor(s1 * 1000)) + 1
61
            rep[bin] += 1
62
        for i in range(0, total_count):
63
            s0 = time.time()
64
            jsontype(r, i)
65
            s1 = time.time() - s0
66
            bin = int(math.floor(s1 * 1000)) + 1
67
            rep[bin] += 1
68
    else:
69
        for i in range(0, ctx['count'], ctx['pipeline']):
70
            p = r.pipeline()
71
            s0 = time.time()
72
            for j in range(0, ctx['pipeline']):
73
                work(p)
74
            p.execute()
75
            s1 = time.time() - s0
76
            bin = int(math.floor(s1 * 1000)) + 1
77
            rep[bin] += ctx['pipeline']
78

79
    return rep
80

81
if __name__ == '__main__':
82
    parser = argparse.ArgumentParser(description='ReJSON Benchmark', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
83
    parser.add_argument('-c', '--count', type=int, default=100000, help='total number of operations')
84
    parser.add_argument('-p', '--pipeline', type=int, default=0, help='pipeline size')
85
    parser.add_argument('-w', '--workers', type=int, default=8, help='number of worker processes')
86
    parser.add_argument('-u', '--uri', type=str, default='redis://localhost:6379', help='Redis server URI')
87
    args = parser.parse_args()
88
    uri = urlparse(args.uri)
89

90
    r = redis.Redis(host=uri.hostname, port=uri.port)
91

92
    pool = multiprocessing.Pool(args.workers)
93
    s0 = time.time()
94
    ctx = {
95
        'count': args.count / args.workers,
96
        'pipeline': args.pipeline,
97
        'host': uri.hostname,
98
        'port': uri.port,
99
        'work': jsonset,
100
    }
101

102
    print ('Starting workers: ')
103
    p = multiprocessing.Pool(args.workers)
104
    results = p.map(runWorker, (ctx, ) * args.workers)
105
    print("")
106
    sys.stdout.flush()
107

108
    s1 = time.time() - s0
109
    agg = defaultdict(int)
110
    for res in results:
111
        for k, v in res.items():
112
            agg[k] += v
113

114
    print()
115
    count = args.count * 3
116
    print (f'Count: {args.count}, Workers: {args.workers}, Pipeline: {args.pipeline}')
117
    print (f'Using hireds: {redis.utils.HIREDIS_AVAILABLE}')
118
    print (f'Runtime: {round(s1, 2):,} seconds')
119
    print (f'Throughput: {round(count/s1, 2):,} requests per second')
120
    for k, v in sorted(agg.items()):
121
        perc = 100.0 * v / count
122
        print (f'{perc:.4f}% <= {k:,} milliseconds')
123

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.