1
function volinfo_field()
6
$CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
9
function volume_get_field()
13
$CLI volume get $vol $field | tail -1 | awk '{print $2}'
21
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
24
function check_brick_status() {
25
cmd="gluster --xml volume status"
30
echo `$cmd | grep '<status>1' | wc -l`
32
echo `$cmd | grep -A 5 ${daemon} | grep '<status>1' | wc -l`
36
function online_brick_count ()
45
#First count total Number of bricks and then subtract daemon status
46
v1=`check_brick_status`
47
v2=`check_brick_status "Self-heal"`
48
v3=`check_brick_status "Quota"`
49
v4=`check_brick_status "Snapshot"`
50
v5=`check_brick_status "Tier"`
51
v6=`check_brick_status "Scrubber"`
52
v7=`check_brick_status "Bitrot"`
54
tot=$((v1-v2-v3-v4-v5-v6-v7))
60
function brick_up_status {
64
$CLI volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'
67
function volume_option()
71
$CLI volume info $vol | egrep "^$key: " | cut -f2 -d' ';
74
function rebalanced_size_field {
75
$CLI volume rebalance $1 status | awk '{print $3}' | sed -n 3p
78
function rebalance_status_field {
79
$CLI volume rebalance $1 status | awk '{print $7}' | sed -n 3p
82
function rebalanced_files_field {
83
$CLI volume rebalance $1 status | awk '{print $2}' | sed -n 3p
86
function rebalance_failed_field {
87
$CLI volume rebalance $1 status | awk '{print $5}' | sed -n 3p
90
function fix-layout_status_field {
91
#The fix-layout status can be up to 3 words, (ex:'fix-layout in progress'), hence the awk-print $2 thru $4.
92
#But if the status is less than 3 words, it also prints the next field i.e the run_time_in_secs.(ex:'completed 3.00').
93
#So we trim the numbers out with `tr`. Finally remove the trailing white spaces with sed. What we get is one of the
94
#strings in the 'cli_vol_task_status_str' char array of cli-rpc-ops.c
96
$CLI volume rebalance $1 status | awk '{print $2,$3,$4}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
99
function remove_brick_status_completed_field {
102
$CLI volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p
105
function get_mount_process_pid {
108
ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$vol .*$mnt" | awk '{print $2}' | head -1
111
function get_nfs_pid ()
113
ps auxww | grep "volfile-id\ gluster\/nfs" | awk '{print $2}' | head -1
116
function read_nfs_pidfile ()
118
echo `cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid`
121
function cleanup_statedump {
123
rm -f $statedumpdir/*$pid.dump.*
124
#.vimrc friendly comment */
127
function wait_statedump_ready {
128
local maxtime="${1}000000000"
130
local deadline="$(($(date +%s%N) + maxtime))"
133
while [[ "$(date +%s%N)" < "$deadline" ]]; do
134
fname="$statedumpdir/$(ls $statedumpdir | grep -E "\.$pid\.dump\.")"
135
if [[ -f "$fname" ]]; then
136
grep "^DUMP-END-TIME" "$fname" >/dev/null
137
if [[ $? -eq 0 ]]; then
148
function generate_statedump {
150
#remove old stale statedumps
151
cleanup_statedump $pid
153
wait_statedump_ready 3 $pid
156
function generate_mount_statedump {
159
generate_statedump $(get_mount_process_pid $vol $mnt)
162
function cleanup_mount_statedump {
164
cleanup_statedump $(get_mount_process_pid $vol)
167
function snap_client_connected_status {
169
local fpath=$(generate_mount_statedump $vol)
170
up=$(grep -a -A1 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=')
175
function _afr_child_up_status {
177
#brick_id is (brick-num in volume info - 1)
179
local gen_state_dump=$3
180
local fpath=$($gen_state_dump $vol)
181
up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
186
function afr_child_up_status_meta {
190
grep -E "^child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
193
function client_connected_status_meta {
196
grep "connected" $mnt/.meta/graphs/active/$client/private | awk '{print $3}'
199
function afr_child_up_status {
201
#brick_id is (brick-num in volume info - 1)
203
_afr_child_up_status $vol $brick_id generate_mount_statedump
206
function ec_get_info {
211
local value=$(sed -n "/^\[cluster\/disperse\.$vol-disperse-$dist_id\]/,/^\[/{s/^$key=\(.*\)/\1/p;}" $fpath | head -1)
216
function ec_child_up_status {
219
local brick_id=$(($3 + 1))
221
local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_mount_statedump $vol $mnt))
222
echo "${mask: -$brick_id:1}"
225
function ec_child_up_count {
229
ec_get_info $vol $dist_id "childs_up" $(generate_mount_statedump $vol $mnt)
232
function ec_child_up_status_shd {
235
local brick_id=$(($3 + 1))
236
local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_shd_statedump $vol))
237
echo "${mask: -$brick_id:1}"
240
function ec_child_up_count_shd {
243
ec_get_info $vol $dist_id "childs_up" $(generate_shd_statedump $vol)
246
function get_shd_process_pid {
248
ps auxww | grep "process-name\ glustershd" | awk '{print $2}' | head -1
251
function generate_shd_statedump {
253
generate_statedump $(get_shd_process_pid $vol)
256
function generate_nfs_statedump {
257
generate_statedump $(get_nfs_pid)
260
function generate_brick_statedump {
264
generate_statedump $(get_brick_pid $vol $host $brick)
267
function afr_child_up_status_in_shd {
269
#brick_id is (brick-num in volume info - 1)
271
_afr_child_up_status $vol $brick_id generate_shd_statedump
274
function afr_child_up_status_in_nfs {
276
#brick_id is (brick-num in volume info - 1)
278
_afr_child_up_status $vol $brick_id generate_nfs_statedump
281
function nfs_up_status {
282
gluster volume status | grep "NFS Server" | awk '{print $7}'
285
function glustershd_up_status {
286
gluster volume status | grep "Self-heal Daemon" | awk '{print $7}'
289
function quotad_up_status {
290
gluster volume status | grep "Quota Daemon" | awk '{print $7}'
293
function get_glusterd_pid {
294
pgrep '^glusterd$' | head -1
297
function get_brick_pidfile {
301
local brick_hiphenated=$(echo $brick | tr '/' '-')
302
echo $GLUSTERD_PIDFILEDIR/vols/$vol/${host}${brick_hiphenated}.pid
305
function get_brick_pid {
306
cat $(get_brick_pidfile $*)
314
local pidfile=$(get_brick_pidfile $vol $host $brick)
315
local cmdline="/proc/$(cat $pidfile)/cmdline"
316
local socket=$(cat $cmdline | tr '\0' '\n' | grep '\.socket$')
318
gf_attach -d $socket $brick
320
local deadline="$(($(date +%s%N) + ${PROCESS_UP_TIMEOUT}000000000))"
321
while [[ "$(date +%s%N)" < "$deadline" ]]; do
322
if [[ "$(brick_up_status $vol $host $brick)" == "0" ]]; then
323
# The brick termination code is run from an
324
# asynchronous thread, so even after glusterd
325
# considers it stopped, the brick may still be
326
# alive. We need to make sure it's stopped before
327
# returning, otherwise an immediate restart could
328
# fail. Unfortunately there's no easy way to know
329
# when the brick has really been stopped. For now
330
# just add some delay.
337
function check_option_help_presence {
339
$CLI volume set help | grep "^Option:" | grep -w $option
342
function afr_get_changelog_xattr {
345
local xval=$(getfattr -n $xkey -e hex $file 2>/dev/null | grep "$xkey" | cut -f2 -d'=')
346
if [ -z $xval ]; then
347
xval="0x000000000000000000000000"
352
function get_pending_heal_count {
354
gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
357
function afr_get_split_brain_count {
359
gluster volume heal $vol info split-brain | grep "Number of entries in split-brain" | awk '{ sum+=$6} END {print sum}'
362
function afr_get_index_path {
364
echo "$brick_path/.glusterfs/indices/xattrop"
367
function afr_get_num_indices_in_brick {
369
echo $(ls $(afr_get_index_path $brick_path) | grep -v xattrop | wc -l)
372
function gf_get_gfid_xattr {
374
getfattr -n trusted.gfid -e hex $file 2>/dev/null | grep "trusted.gfid" | cut -f2 -d'='
377
function gf_gfid_xattr_to_str {
379
echo "${xval:2:8}-${xval:10:4}-${xval:14:4}-${xval:18:4}-${xval:22:12}"
382
function get_text_xattr {
385
getfattr -h -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f2 -d'='
388
function get_gfid2path {
390
getfattr -h --only-values -n glusterfs.gfidtopath $path 2>/dev/null
395
getfattr -h -e hex -n trusted.glusterfs.mdata $path 2>/dev/null | grep "trusted.glusterfs.mdata" | cut -f2 -d'='
398
function get_mdata_count {
399
getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | wc -l
402
function get_mdata_uniq_count {
403
getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | uniq | wc -l
406
function get_xattr_key {
409
getfattr -h -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f1 -d'='
412
function gf_check_file_opened_in_brick {
417
ls -l /proc/$(get_brick_pid $vol $host $brick)/fd | grep "${realpath}$" 2>&1 > /dev/null
418
if [ $? -eq 0 ]; then
425
function gf_open_file_count_in_brick {
430
ls -l /proc/$(get_brick_pid $vol $host $brick)/fd | grep "${realpath}$" | wc -l
433
function gf_get_gfid_backend_file_path {
436
gfid=$(gf_get_gfid_xattr "$brickpath/$filepath_in_brick")
437
gfidstr=$(gf_gfid_xattr_to_str $gfid)
438
echo "$brickpath/.glusterfs/${gfidstr:0:2}/${gfidstr:2:2}/$gfidstr"
441
function gf_rm_file_and_gfid_link {
444
rm -f $(gf_get_gfid_backend_file_path $brickpath $filepath_in_brick)
445
rm -f "$brickpath/$filepath_in_brick"
449
function gd_is_replace_brick_completed {
454
$CLI volume replace-brick $vol $src_brick $dst_brick status | grep -i "Migration complete"
455
if [ $? -eq 0 ]; then
462
function dht_get_layout {
463
local my_xa=trusted.glusterfs.dht
464
getfattr -d -e hex -n $my_xa $1 2> /dev/null | grep "$my_xa=" | cut -d= -f2
467
function afr_get_specific_changelog_xattr ()
472
local specific_changelog=""
474
changelog_xattr=$(afr_get_changelog_xattr "$path" "$key")
475
if [ "$type" == "data" ]; then
476
specific_changelog=${changelog_xattr:2:8}
477
elif [ "$type" == "metadata" ]; then
478
specific_changelog=${changelog_xattr:10:8}
479
elif [ "$type" == "entry" ]; then
480
specific_changelog=${changelog_xattr:18:8}
482
specific_changlog="error"
485
echo $specific_changelog
488
# query pathinfo xattr and extract POSIX pathname(s)
490
function get_backend_paths {
493
getfattr -m . -n trusted.glusterfs.pathinfo $path | tr ' ' '\n' | sed -n 's/<POSIX.*:.*:\(.*\)>.*/\1/p'
496
#Gets the xattr value in hex, also removed 0x in front of the value
497
function get_hex_xattr {
500
getfattr -d -m. -e hex $2 2>/dev/null | grep $1 | cut -f2 -d'=' | cut -f2 -d'x'
503
function cumulative_stat_count {
504
echo "$1" | grep "Cumulative Stats:" | wc -l
507
function incremental_stat_count {
508
echo "$1" | grep "Interval$2Stats:" | wc -l
511
function cleared_stat_count {
512
echo "$1" | grep "Cleared stats." | wc -l
515
function data_read_count {
516
echo "$1" | grep "Data Read:$2bytes" | wc -l
519
function data_written_count {
520
echo "$1" | grep "Data Written:$2bytes" | wc -l
524
if [ $((`stat -c '%b*%B-%s' $1`)) -lt 0 ];
532
function do_volume_operations() {
541
for i in `seq 1 $count`; do
544
${!cli} volume $operation ${!v} $force &
548
for i in `seq 1 $count`; do
553
function start_volumes() {
554
do_volume_operations start $1
557
function stop_volumes() {
558
do_volume_operations stop $1
561
function start_force_volumes() {
562
do_volume_operations start $1 force
565
function stop_force_volumes() {
566
do_volume_operations stop $1 force
569
function delete_volumes() {
570
do_volume_operations delete $1
573
function volume_exists() {
574
$CLI volume info $1 > /dev/null 2>&1
575
if [ $? -eq 0 ]; then
582
function killall_gluster() {
583
terminate_pids $(process_pids gluster)
584
find $GLUSTERD_PIDFILEDIR -name '*.pid' | xargs rm -f
587
function afr_get_index_count {
589
ls $1/.glusterfs/indices/xattrop | grep -v xattrop | wc -l
592
function landfill_entry_count {
594
ls $brick/.glusterfs/landfill | wc -l
597
function path_exists {
599
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
603
local size=$(stat -c %s $1)
604
if [ $? -eq 0 ]; then echo $size; else echo ""; fi
607
function force_umount {
609
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
612
function assign_gfid {
615
setfattr -n trusted.gfid -v $1 $2
618
function get_random_gfid {
619
echo "0x"$(uuidgen | awk -F '-' 'BEGIN {OFS=""} {print $1,$2,$3,$4,$5}')
622
function volgen_volume_exists {
626
local xl_feature="$4"
627
xl=$(sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d" $volfile)
636
function volgen_volume_option {
640
local xl_feature="$4"
642
sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d;/option $xl_option/!d" $volfile | grep " $xl_option " | awk '{print $3}'
645
function mount_get_option_value {
650
grep -w "$3" $m/.meta/graphs/active/$subvol/private | awk '{print $3}'
653
function get_volume_mark {
654
getfattr -n trusted.glusterfs.volume-mark -ehex $1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'
657
# setup geo-rep in a single a node.
659
function setup_georep {
661
$CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
663
$CLI volume start $GMV0
665
$CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
667
$CLI volume start $GSV0
669
$CLI system:: execute gsec_create
671
$CLI volume geo-rep $GMV0 $H0::$GSV0 create push-pem
673
$CLI volume geo-rep $GMV0 $H0::$GSV0 start
675
sleep 80 # after start geo-rep takes a minute to get stable
680
# stop and delete geo-rep session
682
function cleanup_georep {
684
$CLI volume geo-rep $GMV0 $H0::$GSV0 stop
686
$CLI volume geo-rep $GMV0 $H0::$GSV0 delete
692
echo `ls $mountpoint/.meta/graphs/ | grep -v active | wc -l`
697
##Check if a auxiliary mount is there
699
local rundir=$(gluster --print-statedumpdir)
700
local pidfile="${rundir}/${V0}$aux_suffix.pid"
703
local pid=$(cat ${rundir}/${V0}.pid)
704
pidof glusterfs 2>&1 | grep -w $pid > /dev/null
717
function get_list_aux()
719
# check for quota list aux mount
720
get_aux "_quota_list"
723
function get_limit_aux()
725
# check for quota list aux mount
726
get_aux "_quota_limit"
729
function check_for_xattr {
732
getfattr -n $xattr $filepath 2>/dev/null | grep "$xattr" | cut -f1 -d'='
735
function get_bitd_count {
736
ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | wc -l
739
function get_scrubd_count {
740
ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
743
function get_quarantine_count {
744
ls -l "$1/.glusterfs/quarantine" | wc -l
747
function get_quotad_count {
748
ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l
751
function get_nfs_count {
752
ps auxww | grep glusterfs | grep nfs.pid | grep -v grep | wc -l
755
function get_snapd_count {
756
ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
759
function drop_cache() {
762
echo 3 > /proc/sys/vm/drop_caches
765
# fail but flush caches
766
( cd $1 && umount $1 2>/dev/null )
771
function quota_list_field () {
774
local awk_arg="{print \$$FIELD}"
776
$CLI volume quota $V0 list $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
779
function quota_object_list_field () {
782
local awk_arg="{print \$$FIELD}"
784
$CLI volume quota $V0 list-objects $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
789
quota_list_field $1 4
792
function quota_hard_limit()
794
quota_list_field $1 2
797
function quota_soft_limit()
799
quota_list_field $1 3
802
function quota_sl_exceeded()
804
quota_list_field $1 6
807
function quota_hl_exceeded()
809
quota_list_field $1 7
813
function quota_object_hard_limit()
815
quota_object_list_field $1 2
818
function scrub_status()
823
$CLI volume bitrot $vol scrub status | grep "^$field: " | sed 's/.*: //';
826
function get_gfid_string {
828
getfattr -n glusterfs.gfid.string $1 2>/dev/null \
829
| grep glusterfs.gfid.string | cut -d '"' -f 2
832
function file_all_zeroes {
833
< $1 tr -d '\0' | read -n 1 || echo 1
836
function get_hard_link_count {
841
function count_sh_entries()
843
ls $1/.glusterfs/indices/xattrop | grep -v "xattrop-" | wc -l
846
function check_brick_multiplex() {
847
cnt="$(ls /var/log/glusterfs/bricks|wc -l)"
848
local ret=$($CLI volume info|grep "cluster.brick-multiplex"|cut -d" " -f2)
849
local bcnt="$(brick_count)"
851
if [ $bcnt -ne 1 ]; then
852
if [ "$ret" = "on" ] || [ $cnt -eq 1 ]; then
862
function get_value_from_brick_statedump {
868
local statedump="$(generate_brick_statedump $vol $host $brick)"
869
value="$(grep "$key" $statedump | cut -f2 -d'=' | tail -1)"
875
function get_fd_count {
880
local val="$(check_brick_multiplex)"
881
local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
882
local statedump=$(generate_brick_statedump $vol $host $brick)
883
if [ $val == "N" ]; then
884
count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
886
count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
888
# If no information is found for a given gfid, it means it has not been
889
# accessed, so it doesn't have any open fd. In this case we return 0.
896
function get_active_fd_count {
901
local val="$(check_brick_multiplex)"
902
local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
903
local statedump=$(generate_brick_statedump $vol $host $brick)
904
if [ $val == "N" ]; then
905
count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
907
count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
913
function get_mount_active_size_value {
916
local statedump=$(generate_mount_statedump $vol $mount)
917
local val=$(grep "active_size" $statedump | cut -f2 -d'=' | tail -1)
922
function get_mount_lru_size_value {
925
local statedump=$(generate_mount_statedump $vol $mount)
926
local val=$(grep "lru_size" $statedump | cut -f2 -d'=' | tail -1)
931
function check_changelog_op {
935
$PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep "$op" | wc -l
938
function processed_changelogs {
939
local processed_dir=$1
940
count=$(ls -l $processed_dir | grep CHANGELOG | wc -l)
949
function volgen_check_ancestry {
950
#Returns Y if ancestor_xl is an ancestor of $child_xl according to the volfile
953
local child_xl_type="$2"
956
local ancestor_xl_type="$4"
957
local ancestor_xl="$5"
959
child_linenum=$(awk '/type $child_xl_type\/$child_xl/ {print FNR}' $volfile)
960
ancestor_linenum=$(awk '/type $ancestor_xl_type\/$ancestor_xl/ {print FNR}' $volfile)
962
if [ $child_linenum -lt $ancestor_linenum ];
970
function get_shd_mux_pid {
972
pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
977
ps aux | grep "glustershd" | grep -v grep | wc -l
980
function number_healer_threads_shd {
981
local pid=$(get_shd_mux_pid $1)
982
pstack $pid | grep $2 | wc -l
986
local time=$(get-mdata-xattr -m $1)
987
if [ $time == "-1" ];
989
echo $(stat -c %Y $1)
996
local time=$(get-mdata-xattr -c $1)
997
if [ $time == "-1" ];
999
echo $(stat -c %Z $1)
1006
local time=$(get-mdata-xattr -a $1)
1007
if [ $time == "-1" ];
1009
echo $(stat -c %X $1)
1017
$CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
1020
function logging_time_check()
1023
local logfile=`echo ${0##*/}`_glusterd1.log
1025
cat $logdir/1/$logfile | tail -n 2 | head -n 1 | grep $(date +%H:%M) | wc -l