glusterfs

Форк
0
/
volume.rc 
1026 строк · 25.6 Кб
1
function volinfo_field()
2
{
3
    local vol=$1;
4
    local field=$2;
5

6
    $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
7
}
8

9
function volume_get_field()
10
{
11
    local vol=$1
12
    local field=$2
13
    $CLI volume get $vol $field | tail -1 | awk '{print $2}'
14
}
15

16

17
function brick_count()
18
{
19
    local vol=$1;
20

21
    $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
22
}
23

24
function check_brick_status() {
25
       cmd="gluster --xml volume status"
26
       local daemon=$1
27

28
       if [[ -z $daemon ]]
29
        then
30
                echo `$cmd | grep '<status>1' | wc -l`
31
       else
32
                echo `$cmd | grep -A 5 ${daemon} | grep '<status>1' | wc -l`
33
       fi
34
}
35

36
function online_brick_count ()
37
{
38
       local v1=0
39
       local v2=0
40
       local v3=0
41
       local v4=0
42
       local v5=0
43
       local tot=0
44

45
       #First count total Number of bricks and then subtract daemon status
46
       v1=`check_brick_status`
47
       v2=`check_brick_status "Self-heal"`
48
       v3=`check_brick_status "Quota"`
49
       v4=`check_brick_status "Snapshot"`
50
       v5=`check_brick_status "Tier"`
51
       v6=`check_brick_status "Scrubber"`
52
       v7=`check_brick_status "Bitrot"`
53

54
       tot=$((v1-v2-v3-v4-v5-v6-v7))
55
       echo $tot
56

57
}
58

59

60
function brick_up_status {
61
        local vol=$1
62
        local host=$2
63
        local brick=$3
64
        $CLI volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'
65
}
66

67
function volume_option()
68
{
69
        local vol=$1
70
        local key=$2
71
        $CLI volume info $vol | egrep "^$key: " | cut -f2 -d' ';
72
}
73

74
function rebalanced_size_field {
75
        $CLI volume rebalance $1 status | awk '{print $3}' | sed -n 3p
76
}
77

78
function rebalance_status_field {
79
        $CLI volume rebalance $1 status | awk '{print $7}' | sed -n 3p
80
}
81

82
function rebalanced_files_field {
83
        $CLI volume rebalance $1 status | awk '{print $2}' | sed -n 3p
84
}
85

86
function rebalance_failed_field {
87
        $CLI volume rebalance $1 status | awk '{print $5}' | sed -n 3p
88
}
89

90
function fix-layout_status_field {
91
        #The fix-layout status can be up to 3 words, (ex:'fix-layout in progress'), hence the awk-print $2 thru $4.
92
        #But if the status is less than 3 words, it also prints the next field i.e the run_time_in_secs.(ex:'completed 3.00').
93
        #So we trim the numbers out with  `tr`. Finally remove the trailing white spaces with sed. What we get is one of the
94
        #strings in the 'cli_vol_task_status_str' char array of cli-rpc-ops.c
95

96
        $CLI volume rebalance $1 status | awk '{print $2,$3,$4}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
97
}
98

99
function remove_brick_status_completed_field {
100
        local vol=$1
101
        local brick_list=$2
102
        $CLI volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p
103
}
104

105
function get_mount_process_pid {
106
        local vol=$1
107
        local mnt=$2
108
        ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$vol .*$mnt" | awk '{print $2}' | head -1
109
}
110

111
function get_nfs_pid ()
112
{
113
        ps auxww | grep "volfile-id\ gluster\/nfs" | awk '{print $2}' | head -1
114
}
115

116
function read_nfs_pidfile ()
117
{
118
        echo `cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid`
119
}
120

121
function cleanup_statedump {
122
       pid=$1
123
       rm -f $statedumpdir/*$pid.dump.*
124
       #.vimrc friendly comment */
125
}
126

127
function wait_statedump_ready {
128
        local maxtime="${1}000000000"
129
        local pid="$2"
130
        local deadline="$(($(date +%s%N) + maxtime))"
131
        local fname
132

133
        while [[ "$(date +%s%N)" < "$deadline" ]]; do
134
                fname="$statedumpdir/$(ls $statedumpdir | grep -E "\.$pid\.dump\.")"
135
                if [[ -f "$fname" ]]; then
136
                        grep "^DUMP-END-TIME" "$fname" >/dev/null
137
                        if [[ $? -eq 0 ]]; then
138
                                echo $fname
139
                                return
140
                        fi
141
                fi
142
                sleep 0.1
143
        done
144

145
        echo "nostatedump"
146
}
147

148
function generate_statedump {
149
        pid=$1
150
        #remove old stale statedumps
151
        cleanup_statedump $pid
152
        kill -USR1 $pid
153
        wait_statedump_ready 3 $pid
154
}
155

156
function generate_mount_statedump {
157
        local vol=$1
158
        local mnt=$2
159
        generate_statedump $(get_mount_process_pid $vol $mnt)
160
}
161

162
function cleanup_mount_statedump {
163
        local vol=$1
164
        cleanup_statedump $(get_mount_process_pid $vol)
165
}
166

167
function snap_client_connected_status {
168
         local vol=$1
169
         local fpath=$(generate_mount_statedump $vol)
170
         up=$(grep -a -A1 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=')
171
         rm -f $fpath
172
         echo "$up"
173
}
174

175
function _afr_child_up_status {
176
        local vol=$1
177
        #brick_id is (brick-num in volume info - 1)
178
        local brick_id=$2
179
        local gen_state_dump=$3
180
        local fpath=$($gen_state_dump $vol)
181
        up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
182
        rm -f $fpath
183
        echo "$up"
184
}
185

186
function afr_child_up_status_meta {
187
        local mnt=$1
188
        local repl=$2
189
        local child=$3
190
        grep -E "^child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
191
}
192

193
function client_connected_status_meta {
194
        local mnt=$1
195
        local client=$2
196
        grep "connected" $mnt/.meta/graphs/active/$client/private | awk '{print $3}'
197
}
198

199
function afr_child_up_status {
200
        local vol=$1
201
        #brick_id is (brick-num in volume info - 1)
202
        local brick_id=$2
203
        _afr_child_up_status $vol $brick_id generate_mount_statedump
204
}
205

206
function ec_get_info {
207
        local vol=$1
208
        local dist_id=$2
209
        local key=$3
210
        local fpath=$4
211
        local value=$(sed -n "/^\[cluster\/disperse\.$vol-disperse-$dist_id\]/,/^\[/{s/^$key=\(.*\)/\1/p;}" $fpath | head -1)
212
        rm -f $fpath
213
        echo "$value"
214
}
215

216
function ec_child_up_status {
217
        local vol=$1
218
        local dist_id=$2
219
        local brick_id=$(($3 + 1))
220
        local mnt=$4
221
        local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_mount_statedump $vol $mnt))
222
        echo "${mask: -$brick_id:1}"
223
}
224

225
function ec_child_up_count {
226
        local vol=$1
227
        local dist_id=$2
228
        local mnt=$3
229
        ec_get_info $vol $dist_id "childs_up" $(generate_mount_statedump $vol $mnt)
230
}
231

232
function ec_child_up_status_shd {
233
        local vol=$1
234
        local dist_id=$2
235
        local brick_id=$(($3 + 1))
236
        local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_shd_statedump $vol))
237
        echo "${mask: -$brick_id:1}"
238
}
239

240
function ec_child_up_count_shd {
241
        local vol=$1
242
        local dist_id=$2
243
        ec_get_info $vol $dist_id "childs_up" $(generate_shd_statedump $vol)
244
}
245

246
function get_shd_process_pid {
247
        local vol=$1
248
        ps auxww | grep "process-name\ glustershd" | awk '{print $2}' | head -1
249
}
250

251
function generate_shd_statedump {
252
        local vol=$1
253
        generate_statedump $(get_shd_process_pid $vol)
254
}
255

256
function generate_nfs_statedump {
257
        generate_statedump $(get_nfs_pid)
258
}
259

260
function generate_brick_statedump {
261
        local vol=$1
262
        local host=$2
263
        local brick=$3
264
        generate_statedump $(get_brick_pid $vol $host $brick)
265
}
266

267
function afr_child_up_status_in_shd {
268
        local vol=$1
269
        #brick_id is (brick-num in volume info - 1)
270
        local brick_id=$2
271
        _afr_child_up_status $vol $brick_id generate_shd_statedump
272
}
273

274
function afr_child_up_status_in_nfs {
275
        local vol=$1
276
        #brick_id is (brick-num in volume info - 1)
277
        local brick_id=$2
278
        _afr_child_up_status $vol $brick_id generate_nfs_statedump
279
}
280

281
function nfs_up_status {
282
        gluster volume status | grep "NFS Server" | awk '{print $7}'
283
}
284

285
function glustershd_up_status {
286
        gluster volume status | grep "Self-heal Daemon" | awk '{print $7}'
287
}
288

289
function quotad_up_status {
290
        gluster volume status | grep "Quota Daemon" | awk '{print $7}'
291
}
292

293
function get_glusterd_pid {
294
        pgrep '^glusterd$' | head -1
295
}
296

297
function get_brick_pidfile {
298
        local vol=$1
299
        local host=$2
300
        local brick=$3
301
        local brick_hiphenated=$(echo $brick | tr '/' '-')
302
        echo $GLUSTERD_PIDFILEDIR/vols/$vol/${host}${brick_hiphenated}.pid
303
}
304

305
function get_brick_pid {
306
	cat $(get_brick_pidfile $*)
307
}
308

309
function kill_brick {
310
        local vol=$1
311
        local host=$2
312
        local brick=$3
313

314
	local pidfile=$(get_brick_pidfile $vol $host $brick)
315
	local cmdline="/proc/$(cat $pidfile)/cmdline"
316
	local socket=$(cat $cmdline | tr '\0' '\n' | grep '\.socket$')
317

318
	gf_attach -d $socket $brick
319

320
        local deadline="$(($(date +%s%N) + ${PROCESS_UP_TIMEOUT}000000000))"
321
        while [[ "$(date +%s%N)" < "$deadline" ]]; do
322
                if [[ "$(brick_up_status $vol $host $brick)" == "0" ]]; then
323
                        # The brick termination code is run from an
324
                        # asynchronous thread, so even after glusterd
325
                        # considers it stopped, the brick may still be
326
                        # alive. We need to make sure it's stopped before
327
                        # returning, otherwise an immediate restart could
328
                        # fail. Unfortunately there's no easy way to know
329
                        # when the brick has really been stopped. For now
330
                        # just add some delay.
331
                        sleep 1
332
                        break
333
                fi
334
        done
335
}
336

337
function check_option_help_presence {
338
        local option=$1
339
        $CLI volume set help | grep "^Option:" | grep -w $option
340
}
341

342
function afr_get_changelog_xattr {
343
        local file=$1
344
        local xkey=$2
345
        local xval=$(getfattr -n $xkey -e hex $file 2>/dev/null | grep "$xkey" | cut -f2 -d'=')
346
        if [ -z $xval ]; then
347
                xval="0x000000000000000000000000"
348
        fi
349
        echo $xval
350
}
351

352
function get_pending_heal_count {
353
        local vol=$1
354
        gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
355
}
356

357
function afr_get_split_brain_count {
358
        local vol=$1
359
        gluster volume heal $vol info split-brain | grep "Number of entries in split-brain" | awk '{ sum+=$6} END {print sum}'
360
}
361

362
function afr_get_index_path {
363
        local brick_path=$1
364
        echo "$brick_path/.glusterfs/indices/xattrop"
365
}
366

367
function afr_get_num_indices_in_brick {
368
        local brick_path=$1
369
        echo $(ls $(afr_get_index_path $brick_path) | grep -v xattrop | wc -l)
370
}
371

372
function gf_get_gfid_xattr {
373
        file=$1
374
        getfattr -n trusted.gfid -e hex $file 2>/dev/null | grep "trusted.gfid" | cut -f2 -d'='
375
}
376

377
function gf_gfid_xattr_to_str {
378
        xval=$1
379
        echo "${xval:2:8}-${xval:10:4}-${xval:14:4}-${xval:18:4}-${xval:22:12}"
380
}
381

382
function get_text_xattr {
383
        local key=$1
384
        local path=$2
385
        getfattr -h -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f2 -d'='
386
}
387

388
function get_gfid2path {
389
        local path=$1
390
        getfattr -h --only-values -n glusterfs.gfidtopath $path 2>/dev/null
391
}
392

393
function get_mdata {
394
        local path=$1
395
        getfattr -h -e hex -n trusted.glusterfs.mdata $path 2>/dev/null | grep "trusted.glusterfs.mdata" | cut -f2 -d'='
396
}
397

398
function get_mdata_count {
399
    getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | wc -l
400
}
401

402
function get_mdata_uniq_count {
403
    getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | uniq | wc -l
404
}
405

406
function get_xattr_key {
407
        local key=$1
408
        local path=$2
409
        getfattr -h -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f1 -d'='
410
}
411

412
function gf_check_file_opened_in_brick {
413
        vol=$1
414
        host=$2
415
        brick=$3
416
        realpath=$4
417
        ls -l /proc/$(get_brick_pid $vol $host $brick)/fd | grep "${realpath}$" 2>&1 > /dev/null
418
        if [ $? -eq 0 ]; then
419
                echo "Y"
420
        else
421
                echo "N"
422
        fi
423
}
424

425
function gf_open_file_count_in_brick {
426
        vol=$1
427
        host=$2
428
        brick=$3
429
        realpath=$4
430
        ls -l /proc/$(get_brick_pid $vol $host $brick)/fd | grep "${realpath}$" | wc -l
431
}
432

433
function gf_get_gfid_backend_file_path {
434
        brickpath=$1
435
        filepath_in_brick=$2
436
        gfid=$(gf_get_gfid_xattr "$brickpath/$filepath_in_brick")
437
        gfidstr=$(gf_gfid_xattr_to_str $gfid)
438
        echo "$brickpath/.glusterfs/${gfidstr:0:2}/${gfidstr:2:2}/$gfidstr"
439
}
440

441
function gf_rm_file_and_gfid_link {
442
        brickpath=$1
443
        filepath_in_brick=$2
444
        rm -f $(gf_get_gfid_backend_file_path $brickpath $filepath_in_brick)
445
        rm -f "$brickpath/$filepath_in_brick"
446
}
447

448

449
function gd_is_replace_brick_completed {
450
        local host=$1
451
        local vol=$2
452
        local src_brick=$3
453
        local dst_brick=$4
454
        $CLI volume replace-brick $vol $src_brick $dst_brick status | grep -i "Migration complete"
455
        if [ $? -eq 0 ]; then
456
                echo "Y"
457
        else
458
                echo "N"
459
        fi
460
}
461

462
function dht_get_layout {
463
        local my_xa=trusted.glusterfs.dht
464
        getfattr -d -e hex -n $my_xa $1 2> /dev/null | grep "$my_xa=" | cut -d= -f2
465
}
466

467
function afr_get_specific_changelog_xattr ()
468
{
469
        local path=$1
470
        local key=$2
471
        local type=$3
472
        local specific_changelog=""
473

474
        changelog_xattr=$(afr_get_changelog_xattr "$path" "$key")
475
        if [ "$type" == "data" ]; then
476
                specific_changelog=${changelog_xattr:2:8}
477
        elif [ "$type" == "metadata" ]; then
478
                specific_changelog=${changelog_xattr:10:8}
479
        elif [ "$type" == "entry" ]; then
480
                specific_changelog=${changelog_xattr:18:8}
481
        else
482
                specific_changlog="error"
483
        fi
484

485
        echo $specific_changelog
486
}
487
##
488
 # query pathinfo xattr and extract POSIX pathname(s)
489
 ##
490
function get_backend_paths {
491
       local path=$1
492

493
       getfattr -m . -n trusted.glusterfs.pathinfo $path | tr ' ' '\n' | sed -n 's/<POSIX.*:.*:\(.*\)>.*/\1/p'
494
}
495

496
#Gets the xattr value in hex, also removed 0x in front of the value
497
function get_hex_xattr {
498
        local key=$1
499
        local path=$2
500
        getfattr -d -m. -e hex $2 2>/dev/null | grep $1 | cut -f2 -d'=' | cut -f2 -d'x'
501
}
502

503
function cumulative_stat_count {
504
    echo "$1" | grep "Cumulative Stats:" | wc -l
505
}
506

507
function incremental_stat_count {
508
    echo "$1" | grep "Interval$2Stats:" | wc -l
509
}
510

511
function cleared_stat_count {
512
    echo "$1" | grep "Cleared stats." | wc -l
513
}
514

515
function data_read_count {
516
    echo "$1" | grep "Data Read:$2bytes" | wc -l
517
}
518

519
function data_written_count {
520
    echo "$1" | grep "Data Written:$2bytes" | wc -l
521
}
522

523
function has_holes {
524
        if [ $((`stat -c '%b*%B-%s' $1`)) -lt 0 ];
525
        then
526
                echo "1"
527
        else
528
                echo "0"
529
        fi
530
}
531

532
function do_volume_operations() {
533
        local operation=$1
534
        local count=$2
535
        local force=$3
536

537
        local pids=()
538
        local cli
539
        local v
540

541
         for i in `seq 1 $count`; do
542
                cli="CLI_$i"
543
                v="V`expr $i - 1`"
544
                ${!cli} volume $operation ${!v} $force &
545
                pids[$i]=$!
546
        done
547

548
        for i in `seq 1 $count`; do
549
                wait ${pids[$i]}
550
        done
551
}
552

553
function start_volumes() {
554
        do_volume_operations start $1
555
}
556

557
function stop_volumes() {
558
        do_volume_operations stop $1
559
}
560

561
function start_force_volumes() {
562
        do_volume_operations start $1 force
563
}
564

565
function stop_force_volumes() {
566
        do_volume_operations stop $1 force
567
}
568

569
function delete_volumes() {
570
        do_volume_operations delete $1
571
}
572

573
function volume_exists() {
574
        $CLI volume info $1 > /dev/null 2>&1
575
	if [ $? -eq 0 ]; then
576
		echo "Y"
577
	else
578
		echo "N"
579
	fi
580
}
581

582
function killall_gluster() {
583
        terminate_pids $(process_pids gluster)
584
        find $GLUSTERD_PIDFILEDIR -name '*.pid' | xargs rm -f
585
}
586

587
function afr_get_index_count {
588
        local brick=$1
589
        ls $1/.glusterfs/indices/xattrop | grep -v xattrop | wc -l
590
}
591

592
function landfill_entry_count {
593
        local brick=$1
594
        ls $brick/.glusterfs/landfill | wc -l
595
}
596

597
function path_exists {
598
        stat $1
599
        if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
600
}
601

602
function path_size {
603
        local size=$(stat -c %s $1)
604
        if [ $? -eq 0 ]; then echo $size; else echo ""; fi
605
}
606

607
function force_umount {
608
        ${UMOUNT_F} $*
609
        if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
610
}
611

612
function assign_gfid {
613
        local gfid=$1
614
        local file=$2
615
        setfattr -n trusted.gfid -v $1 $2
616
}
617

618
function get_random_gfid {
619
        echo "0x"$(uuidgen | awk -F '-' 'BEGIN {OFS=""} {print $1,$2,$3,$4,$5}')
620
}
621

622
function volgen_volume_exists {
623
        local volfile="$1"
624
        local xl_vol="$2"
625
        local xl_type="$3"
626
        local xl_feature="$4"
627
        xl=$(sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d" $volfile)
628
        if [ -z "$xl" ];
629
        then
630
                echo "N"
631
        else
632
                echo "Y"
633
        fi
634
}
635

636
function volgen_volume_option {
637
        local volfile="$1"
638
        local xl_vol="$2"
639
        local xl_type="$3"
640
        local xl_feature="$4"
641
        local xl_option="$5"
642
        sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d;/option $xl_option/!d" $volfile | grep " $xl_option " | awk '{print $3}'
643
}
644

645
function mount_get_option_value {
646
        local m=$1
647
        local subvol=$2
648
        local key=$3
649

650
        grep -w "$3" $m/.meta/graphs/active/$subvol/private | awk '{print $3}'
651
}
652

653
function get_volume_mark {
654
        getfattr -n trusted.glusterfs.volume-mark -ehex $1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'
655
}
656

657
# setup geo-rep in a single a node.
658

659
function setup_georep {
660

661
    $CLI volume create $GMV0 replica 2  $H0:$B0/${GMV0}{1,2,3,4};
662

663
    $CLI volume start $GMV0
664

665
    $CLI volume create $GSV0 replica 2  $H0:$B0/${GSV0}{1,2,3,4};
666

667
    $CLI volume start $GSV0
668

669
    $CLI system:: execute gsec_create
670

671
    $CLI volume geo-rep $GMV0 $H0::$GSV0 create push-pem
672

673
    $CLI volume geo-rep $GMV0 $H0::$GSV0 start
674

675
    sleep 80 # after start geo-rep takes a minute to get stable
676

677
}
678

679

680
# stop and delete geo-rep session
681

682
function cleanup_georep {
683

684
    $CLI volume geo-rep $GMV0 $H0::$GSV0 stop
685

686
    $CLI volume geo-rep $GMV0 $H0::$GSV0 delete
687
}
688

689
function num_graphs
690
{
691
    local mountpoint=$1
692
    echo `ls $mountpoint/.meta/graphs/ | grep -v active | wc -l`
693
}
694

695
function get_aux()
696
{
697
##Check if a auxiliary mount is there
698
local aux_suffix=$1
699
local rundir=$(gluster --print-statedumpdir)
700
local pidfile="${rundir}/${V0}$aux_suffix.pid"
701
if [ -f $pidfile ];
702
then
703
        local pid=$(cat ${rundir}/${V0}.pid)
704
        pidof glusterfs 2>&1 | grep -w $pid > /dev/null
705

706
        if [ $? -eq 0 ]
707
        then
708
                echo "0"
709
        else
710
                echo "1"
711
        fi
712
else
713
        echo "1"
714
fi
715
}
716

717
function get_list_aux()
718
{
719
# check for quota list aux mount
720
	get_aux "_quota_list"
721
}
722

723
function get_limit_aux()
724
{
725
# check for quota list aux mount
726
	get_aux "_quota_limit"
727
}
728

729
function check_for_xattr {
730
        local xattr=$1
731
        local filepath=$2
732
        getfattr -n $xattr $filepath 2>/dev/null | grep "$xattr" | cut -f1 -d'='
733
}
734

735
function get_bitd_count {
736
        ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | wc -l
737
}
738

739
function get_scrubd_count {
740
        ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
741
}
742

743
function get_quarantine_count {
744
        ls -l "$1/.glusterfs/quarantine" | wc -l
745
}
746

747
function get_quotad_count {
748
        ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l
749
}
750

751
function get_nfs_count {
752
        ps auxww | grep glusterfs | grep nfs.pid | grep -v grep | wc -l
753
}
754

755
function get_snapd_count {
756
        ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
757
}
758

759
function drop_cache() {
760
	case $OSTYPE in
761
	Linux)
762
		echo 3 > /proc/sys/vm/drop_caches
763
		;;
764
	*)
765
		# fail but flush caches
766
		( cd $1 && umount $1 2>/dev/null )
767
		;;
768
	esac
769
}
770

771
function quota_list_field () {
772
        local QUOTA_PATH=$1
773
        local FIELD=$2
774
        local awk_arg="{print \$$FIELD}"
775

776
        $CLI volume quota $V0 list $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
777
}
778

779
function quota_object_list_field () {
780
        local QUOTA_PATH=$1
781
        local FIELD=$2
782
        local awk_arg="{print \$$FIELD}"
783

784
        $CLI volume quota $V0 list-objects $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
785
}
786

787
function quotausage()
788
{
789
        quota_list_field $1 4
790
}
791

792
function quota_hard_limit()
793
{
794
        quota_list_field $1 2
795
}
796

797
function quota_soft_limit()
798
{
799
        quota_list_field $1 3
800
}
801

802
function quota_sl_exceeded()
803
{
804
        quota_list_field $1 6
805
}
806

807
function quota_hl_exceeded()
808
{
809
        quota_list_field $1 7
810

811
}
812

813
function quota_object_hard_limit()
814
{
815
        quota_object_list_field $1 2
816
}
817

818
function scrub_status()
819
{
820
        local vol=$1;
821
        local field=$2;
822

823
        $CLI volume bitrot $vol scrub status | grep "^$field: " | sed 's/.*: //';
824
}
825

826
function get_gfid_string {
827
        local path=$1;
828
        getfattr -n glusterfs.gfid.string $1 2>/dev/null \
829
                    | grep glusterfs.gfid.string | cut -d '"' -f 2
830
}
831

832
function file_all_zeroes {
833
        < $1 tr -d '\0' | read -n 1 || echo 1
834
}
835

836
function get_hard_link_count {
837
        local path=$1;
838
        stat -c %h $path
839
}
840

841
function count_sh_entries()
842
{
843
    ls $1/.glusterfs/indices/xattrop | grep -v "xattrop-" | wc -l
844
}
845

846
function check_brick_multiplex() {
847
        cnt="$(ls /var/log/glusterfs/bricks|wc -l)"
848
        local ret=$($CLI volume info|grep "cluster.brick-multiplex"|cut -d" " -f2)
849
        local bcnt="$(brick_count)"
850

851
        if [ $bcnt -ne 1 ]; then
852
           if [ "$ret" = "on" ] || [ $cnt -eq 1 ]; then
853
              echo "Y"
854
           else
855
              echo "N"
856
           fi
857
        else
858
           echo "N"
859
        fi
860
}
861

862
function get_value_from_brick_statedump {
863
    local vol="$1"
864
    local host="$2"
865
    local brick="$3"
866
    local key="$4"
867

868
    local statedump="$(generate_brick_statedump $vol $host $brick)"
869
    value="$(grep "$key" $statedump | cut -f2 -d'=' | tail -1)"
870

871
    rm -f "$statedump"
872
    echo "$value"
873
}
874

875
function get_fd_count {
876
        local vol=$1
877
        local host=$2
878
        local brick=$3
879
        local fname=$4
880
        local val="$(check_brick_multiplex)"
881
        local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
882
        local statedump=$(generate_brick_statedump $vol $host $brick)
883
        if [ $val == "N" ]; then
884
            count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
885
        else
886
            count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
887
        fi
888
# If no information is found for a given gfid, it means it has not been
889
# accessed, so it doesn't have any open fd. In this case we return 0.
890
        count="${count:-0}"
891
        rm -f $statedump
892
        echo $count
893
}
894

895

896
function get_active_fd_count {
897
        local vol=$1
898
        local host=$2
899
        local brick=$3
900
        local fname=$4
901
        local val="$(check_brick_multiplex)"
902
        local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
903
        local statedump=$(generate_brick_statedump $vol $host $brick)
904
        if [ $val == "N" ]; then
905
            count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
906
        else
907
            count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
908
        fi
909
        rm -f $statedump
910
        echo $count
911
}
912

913
function get_mount_active_size_value {
914
        local vol=$1
915
        local mount=$2
916
        local statedump=$(generate_mount_statedump $vol $mount)
917
        local val=$(grep "active_size" $statedump | cut -f2 -d'=' | tail -1)
918
        rm -f $statedump
919
        echo $val
920
}
921

922
function get_mount_lru_size_value {
923
        local vol=$1
924
        local mount=$2
925
        local statedump=$(generate_mount_statedump $vol $mount)
926
        local val=$(grep "lru_size" $statedump | cut -f2 -d'=' | tail -1)
927
        rm -f $statedump
928
        echo $val
929
}
930

931
function check_changelog_op {
932
        local clog_path=$1
933
        local op=$2
934

935
        $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep "$op" | wc -l
936
}
937

938
function processed_changelogs {
939
        local processed_dir=$1
940
        count=$(ls -l $processed_dir | grep CHANGELOG | wc -l)
941
        if [ $count -gt 0 ];
942
        then
943
            echo "Y"
944
        else
945
            echo "N"
946
        fi
947
}
948

949
function volgen_check_ancestry {
950
        #Returns Y if ancestor_xl is an ancestor of $child_xl according to the volfile
951
        local volfile="$1"
952

953
        local child_xl_type="$2"
954
        local child_xl="$3"
955

956
        local ancestor_xl_type="$4"
957
        local ancestor_xl="$5"
958

959
        child_linenum=$(awk '/type $child_xl_type\/$child_xl/ {print FNR}' $volfile)
960
        ancestor_linenum=$(awk '/type $ancestor_xl_type\/$ancestor_xl/ {print FNR}' $volfile)
961

962
        if [ $child_linenum -lt $ancestor_linenum ];
963
        then
964
                echo "Y"
965
        else
966
                echo "N"
967
        fi
968
}
969

970
function get_shd_mux_pid {
971
   local volume=$1
972
   pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
973
   echo $pid
974
}
975

976
function shd_count {
977
   ps aux | grep "glustershd" | grep -v grep | wc -l
978
}
979

980
function number_healer_threads_shd {
981
   local pid=$(get_shd_mux_pid $1)
982
   pstack $pid | grep $2 | wc -l
983
}
984

985
function get_mtime {
986
    local time=$(get-mdata-xattr -m $1)
987
    if [ $time == "-1" ];
988
    then
989
        echo $(stat -c %Y $1)
990
    else
991
        echo $time
992
    fi
993
}
994

995
function get_ctime {
996
    local time=$(get-mdata-xattr -c $1)
997
    if [ $time == "-1" ];
998
    then
999
        echo $(stat -c %Z $1)
1000
    else
1001
        echo $time
1002
    fi
1003
}
1004

1005
function get_atime {
1006
    local time=$(get-mdata-xattr -a $1)
1007
    if [ $time == "-1" ];
1008
    then
1009
        echo $(stat -c %X $1)
1010
    else
1011
        echo $time
1012
    fi
1013
}
1014

1015
function get-xml()
1016
{
1017
        $CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
1018
}
1019

1020
function logging_time_check()
1021
{
1022
    local logdir=$1
1023
    local logfile=`echo ${0##*/}`_glusterd1.log
1024

1025
    cat $logdir/1/$logfile | tail -n 2 | head -n 1 | grep $(date +%H:%M) | wc -l
1026
}
1027

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.