glusterfs
1#!/bin/bash
2
3. $(dirname $0)/../../include.rc4. $(dirname $0)/../../volume.rc5
6cleanup
7
8SHARD_COUNT_TIME=59
10TEST glusterd
11TEST pidof glusterd
12TEST $CLI volume create $V0 $H0:$B0/${V0}013TEST $CLI volume set $V0 features.shard on14TEST $CLI volume set $V0 features.shard-block-size 4MB15TEST $CLI volume start $V016
17TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M018
19TEST dd if=/dev/zero conv=fsync of=$M0/one-plus-five-shards bs=1M count=2320
21ACTIVE_INODES_BEFORE=$(get_mount_active_size_value $V0)22TEST rm -f $M0/one-plus-five-shards23# Expect 5 inodes less. But one inode more than before because .remove_me would be created.
24EXPECT_WITHIN $SHARD_COUNT_TIME `expr $ACTIVE_INODES_BEFORE - 5 + 1` get_mount_active_size_value $V0 $M025
26EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M027TEST $CLI volume stop $V028TEST $CLI volume delete $V029
30cleanup
31