summaryrefslogtreecommitdiffstats
path: root/tests/basic
diff options
context:
space:
mode:
Diffstat (limited to 'tests/basic')
-rw-r--r--tests/basic/afr/afr-anon-inode-no-quorum.t63
-rw-r--r--tests/basic/afr/afr-anon-inode.t114
-rw-r--r--tests/basic/afr/afr-seek.t55
-rw-r--r--tests/basic/afr/arbiter-mount.t3
-rw-r--r--tests/basic/afr/durability-off.t2
-rw-r--r--tests/basic/afr/entry-self-heal-anon-dir-off.t459
-rw-r--r--tests/basic/afr/entry-self-heal.t3
-rw-r--r--tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t9
-rw-r--r--tests/basic/afr/gfid-self-heal.t16
-rw-r--r--tests/basic/afr/granular-esh/cli.t32
-rw-r--r--tests/basic/afr/halo.t61
-rw-r--r--tests/basic/afr/rename-data-loss.t72
-rw-r--r--tests/basic/afr/root-squash-self-heal.t3
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t124
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy.t202
-rw-r--r--tests/basic/afr/split-brain-heal-info.t2
-rw-r--r--tests/basic/afr/split-brain-healing-ctime.t252
-rw-r--r--tests/basic/afr/split-brain-healing.t37
-rw-r--r--tests/basic/afr/split-brain-resolution.t15
-rw-r--r--tests/basic/afr/ta-check-locks.t68
-rw-r--r--tests/basic/afr/ta-read.t16
-rw-r--r--tests/basic/afr/ta-shd.t6
-rw-r--r--tests/basic/afr/ta-write-on-bad-brick.t51
-rw-r--r--tests/basic/afr/ta.t22
-rw-r--r--tests/basic/afr/tarissue.t2
-rw-r--r--tests/basic/all_squash.t74
-rwxr-xr-xtests/basic/bd.t142
-rw-r--r--tests/basic/changelog/changelog-api.t37
-rw-r--r--tests/basic/changelog/changelog-history.t13
-rw-r--r--tests/basic/changelog/history-api.t42
-rw-r--r--tests/basic/cloudsync-sanity.t7
-rw-r--r--tests/basic/ctime/ctime-ec-heal.t70
-rw-r--r--tests/basic/ctime/ctime-ec-rebalance.t43
-rw-r--r--tests/basic/ctime/ctime-glfs-init.t2
-rw-r--r--tests/basic/ctime/ctime-heal-symlinks.t4
-rw-r--r--tests/basic/ctime/ctime-mdata-legacy-files.t83
-rw-r--r--tests/basic/ctime/ctime-noatime.t2
-rw-r--r--tests/basic/ctime/ctime-readdir.c29
-rw-r--r--tests/basic/ctime/ctime-readdir.t50
-rw-r--r--tests/basic/ctime/ctime-rep-heal.t70
-rw-r--r--tests/basic/ctime/ctime-rep-rebalance.t41
-rw-r--r--tests/basic/ctime/ctime-utimesat.t28
-rw-r--r--tests/basic/distribute/brick-down.t83
-rw-r--r--tests/basic/distribute/bug-1265677-use-readdirp.t3
-rw-r--r--tests/basic/distribute/dir-heal.t145
-rw-r--r--tests/basic/distribute/file-create.t120
-rw-r--r--tests/basic/distribute/file-rename.t1021
-rw-r--r--tests/basic/distribute/rebal-all-nodes-migrate.t144
-rw-r--r--tests/basic/distribute/spare_file_rebalance.t51
-rw-r--r--tests/basic/ec/ec-1468261.t19
-rw-r--r--tests/basic/ec/ec-badfd.c124
-rwxr-xr-xtests/basic/ec/ec-badfd.t26
-rw-r--r--tests/basic/ec/ec-cpu-extensions.t3
-rw-r--r--tests/basic/ec/ec-dirty-flags.t23
-rw-r--r--tests/basic/ec/ec-fast-fgetxattr.c3
-rw-r--r--tests/basic/ec/ec-fix-openfd.t2
-rw-r--r--tests/basic/ec/ec-quorum-count.t167
-rw-r--r--tests/basic/ec/ec-read-mask.t114
-rw-r--r--tests/basic/ec/ec-reset-brick.t50
-rw-r--r--tests/basic/ec/ec-root-heal.t3
-rw-r--r--tests/basic/ec/ec-seek.t3
-rw-r--r--tests/basic/ec/ec-stripe.t2
-rw-r--r--tests/basic/ec/gfapi-ec-open-truncate.c171
-rw-r--r--tests/basic/ec/gfapi-ec-open-truncate.t48
-rwxr-xr-xtests/basic/ec/nfs.t2
-rw-r--r--tests/basic/ec/self-heal-read-write-fail.t69
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.c227
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.t115
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.c182
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.t102
-rw-r--r--tests/basic/fencing/fence-basic.c229
-rwxr-xr-xtests/basic/fencing/fence-basic.t31
-rw-r--r--tests/basic/fencing/fencing-crash-conistency.t62
-rw-r--r--tests/basic/fencing/test-fence-option.t37
-rwxr-xr-xtests/basic/first-test.t10
-rw-r--r--tests/basic/fops-sanity.c1
-rw-r--r--tests/basic/fuse/active-io-graph-switch.t65
-rwxr-xr-xtests/basic/geo-replication/marker-xattrs.t33
-rw-r--r--tests/basic/gfapi/bug-1507896.c49
-rw-r--r--tests/basic/gfapi/bug-1507896.t33
-rw-r--r--tests/basic/gfapi/gfapi-async-calls-test.c357
-rw-r--r--tests/basic/gfapi/gfapi-copy-file-range.t82
-rw-r--r--tests/basic/gfapi/gfapi-graph-switch-open-fd.t44
-rw-r--r--tests/basic/gfapi/gfapi-keep-writing.c129
-rw-r--r--tests/basic/gfapi/gfapi-ssl-load-volfile-test.c127
-rwxr-xr-xtests/basic/gfapi/gfapi-ssl-load-volfile-test.t76
-rw-r--r--tests/basic/gfapi/gfapi-statx-basic.c184
-rwxr-xr-xtests/basic/gfapi/gfapi-statx-basic.t30
-rw-r--r--tests/basic/gfapi/glfs-copy-file-range.c180
-rw-r--r--tests/basic/gfapi/glfs_h_creat_open.c118
-rwxr-xr-xtests/basic/gfapi/glfs_h_creat_open.t27
-rw-r--r--tests/basic/gfapi/glfsxmp-coverage.c1900
-rw-r--r--tests/basic/gfapi/glfsxmp.t30
-rw-r--r--tests/basic/gfapi/mandatory-lock-optimal.c2
-rw-r--r--tests/basic/gfapi/protocol-client-ssl.vol.in15
-rw-r--r--tests/basic/gfapi/upcall-cache-invalidate.c2
-rw-r--r--tests/basic/gfapi/upcall-register-api.c2
-rw-r--r--tests/basic/global-threading.t104
-rw-r--r--tests/basic/glusterd-restart-shd-mux.t96
-rw-r--r--tests/basic/glusterd/arbiter-volume.t32
-rw-r--r--tests/basic/glusterd/check-cloudsync-ancestry.t48
-rw-r--r--tests/basic/glusterd/disperse-create.t20
-rw-r--r--tests/basic/glusterd/heald.t55
-rw-r--r--tests/basic/glusterd/thin-arbiter-volume-probe.t25
-rw-r--r--tests/basic/glusterd/thin-arbiter-volume.t45
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t2
-rw-r--r--tests/basic/glusterd/volume-brick-count.t61
-rw-r--r--tests/basic/graph-cleanup-brick-down-shd-mux.t64
-rw-r--r--tests/basic/jbr/jbr-volgen.t39
-rwxr-xr-xtests/basic/jbr/jbr.t38
-rw-r--r--tests/basic/logchecks-messages.h2
-rw-r--r--tests/basic/logchecks.c6
-rwxr-xr-xtests/basic/meta.t2
-rw-r--r--tests/basic/metadisp/fsyncdir.c29
-rw-r--r--tests/basic/metadisp/ftruncate.c34
-rw-r--r--tests/basic/metadisp/fxattr.c107
-rw-r--r--tests/basic/metadisp/gfs-fsetxattr.c141
-rw-r--r--tests/basic/metadisp/metadisp.t316
-rw-r--r--tests/basic/metadisp/metadisp.vol14
-rwxr-xr-xtests/basic/mount-nfs-auth.t340
-rw-r--r--tests/basic/mount-options.disabled3
-rwxr-xr-xtests/basic/mount.t8
-rw-r--r--tests/basic/multiple-volume-shd-mux.t46
-rw-r--r--tests/basic/namespace.t2
-rwxr-xr-xtests/basic/nl-cache.t30
-rw-r--r--tests/basic/nufa.t6
-rwxr-xr-xtests/basic/op_errnos.t4
-rw-r--r--tests/basic/open-behind/open-behind.t183
-rw-r--r--tests/basic/open-behind/tester-fd.c99
-rw-r--r--tests/basic/open-behind/tester.c444
-rw-r--r--tests/basic/open-behind/tester.h145
-rwxr-xr-xtests/basic/playground/template-xlator-sanity.t18
-rw-r--r--tests/basic/posix/shared-statfs.t11
-rw-r--r--tests/basic/posix/zero-fill-enospace.c7
-rw-r--r--tests/basic/quick-read-with-upcall.t13
-rwxr-xr-xtests/basic/quota-anon-fd-nfs.t2
-rwxr-xr-xtests/basic/quota-nfs.t2
-rwxr-xr-xtests/basic/quota.t2
-rwxr-xr-xtests/basic/rpc-coverage.sh14
-rwxr-xr-xtests/basic/rpc-coverage.t4
-rw-r--r--tests/basic/sdfs-sanity.t6
-rw-r--r--tests/basic/seek.c (renamed from tests/basic/ec/seek.c)0
-rw-r--r--tests/basic/shd-mux-afr.t70
-rw-r--r--tests/basic/shd-mux-ec.t75
-rw-r--r--tests/basic/stats-dump.t2
-rwxr-xr-xtests/basic/tier/bug-1214222-directories_missing_after_attach_tier.t61
-rw-r--r--tests/basic/tier/bug-1260185-donot-allow-detach-commit-unnecessarily.t47
-rwxr-xr-xtests/basic/tier/ctr-rename-overwrite.t50
-rw-r--r--tests/basic/tier/file_lock.c72
-rwxr-xr-xtests/basic/tier/file_with_spaces.t71
-rwxr-xr-xtests/basic/tier/fops-during-migration-pause.t89
-rwxr-xr-xtests/basic/tier/fops-during-migration.t105
-rw-r--r--tests/basic/tier/frequency-counters.t82
-rw-r--r--tests/basic/tier/legacy-many.t92
-rwxr-xr-xtests/basic/tier/locked_file_migration.t80
-rw-r--r--tests/basic/tier/new-tier-cmds.t129
-rw-r--r--tests/basic/tier/readdir-during-migration.t65
-rwxr-xr-xtests/basic/tier/record-metadata-heat.t106
-rw-r--r--tests/basic/tier/tier-heald.t98
-rw-r--r--tests/basic/tier/tier-snapshot.t47
-rwxr-xr-xtests/basic/tier/tier.t219
-rwxr-xr-xtests/basic/tier/tier_lookup_heal.t69
-rw-r--r--tests/basic/tier/tierd_check.t128
-rwxr-xr-xtests/basic/tier/unlink-during-migration.t92
-rwxr-xr-xtests/basic/trace.t55
-rw-r--r--tests/basic/uss.t36
-rw-r--r--tests/basic/volfile-sanity.t7
-rw-r--r--tests/basic/volume-scale-shd-mux.t116
-rw-r--r--tests/basic/volume-snap-scheduler.t49
-rwxr-xr-xtests/basic/volume-snapshot-xml.t6
-rw-r--r--tests/basic/volume-status.t28
-rw-r--r--[-rwxr-xr-x]tests/basic/volume.t48
173 files changed, 11356 insertions, 2812 deletions
diff --git a/tests/basic/afr/afr-anon-inode-no-quorum.t b/tests/basic/afr/afr-anon-inode-no-quorum.t
new file mode 100644
index 00000000000..896ba0c9b2c
--- /dev/null
+++ b/tests/basic/afr/afr-anon-inode-no-quorum.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+#Test that anon-inode entry is not cleaned up as long as there exists at least
+#one valid entry
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST touch $M0/a $M0/b
+
+gfid_a=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/a))
+gfid_b=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/b))
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST mv $M0/a $M0/a-new
+TEST mv $M0/b $M0/b-new
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST ! ls $M0/a
+TEST ! ls $M0/b
+anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
+#Make sure index heal doesn't happen after enabling heal
+TEST setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1
+TEST rm -f $B0/${V0}1/.glusterfs/indices/xattrop/*
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+#Allow time for a scan
+sleep 5
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
+inum_b=$(STAT_INO $B0/${V0}0/$anon_inode_name/$gfid_b)
+TEST rm -f $M0/a-new
+TEST stat $M0/b-new
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
+EXPECT "$inum_b" STAT_INO $B0/${V0}0/b-new
+
+cleanup
diff --git a/tests/basic/afr/afr-anon-inode.t b/tests/basic/afr/afr-anon-inode.t
new file mode 100644
index 00000000000..f4cf37a2fa0
--- /dev/null
+++ b/tests/basic/afr/afr-anon-inode.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+#Tests that afr-anon-inode test cases work fine as expected
+#These are cases where in entry-heal/name-heal we dont know entry for an inode
+#so these inodes are kept in a special directory
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST $CLI volume set $V0 cluster.use-anonymous-inode no
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST $CLI volume set $V0 cluster.use-anonymous-inode yes
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST mkdir -p $M0/d1/b $M0/d2/a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST mv $M0/d2/a $M0/d1
+TEST mv $M0/d1/b $M0/d2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
+TEST [[ -d $B0/${V0}1/$anon_inode_name ]]
+TEST [[ -d $B0/${V0}2/$anon_inode_name ]]
+anon_gfid=$(gf_get_gfid_xattr $B0/${V0}0/$anon_inode_name)
+EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}1/$anon_inode_name
+EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}2/$anon_inode_name
+
+TEST ! ls $M0/$anon_inode_name
+EXPECT "^4$" echo $(ls -a $M0 | wc -l)
+
+#Test purging code path by shd
+TEST $CLI volume heal $V0 disable
+TEST mkdir $M0/l0 $M0/l1 $M0/l2
+TEST touch $M0/del-file $M0/del-file-nolink $M0/l0/file
+TEST ln $M0/del-file $M0/del-file-link
+TEST ln $M0/l0/file $M0/l1/file-link1
+TEST ln $M0/l0/file $M0/l2/file-link2
+TEST mkdir -p $M0/del-recursive-dir/d1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST rm -f $M0/del-file $M0/del-file-nolink
+TEST rm -rf $M0/del-recursive-dir
+TEST mv $M0/d1/a $M0/d2
+TEST mv $M0/l0/file $M0/l0/renamed-file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 0
+
+nolink_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file-nolink))
+link_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file))
+dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-recursive-dir))
+rename_dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/d1/a))
+rename_file_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/l0/file))
+TEST ! stat $M0/del-file
+TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
+TEST ! stat $M0/del-file-nolink
+TEST ! stat $B0/${V0}0/$anon_inode_name/$nolink_gfid
+TEST ! stat $M0/del-recursive-dir
+TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
+TEST ! stat $M0/d1/a
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
+TEST ! stat $M0/l0/file
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_file_gfid
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/l1/file-link1 $M0/l1/renamed-file-link1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
+TEST ! stat $M0/l1/file-link1
+TEST stat $B0/${V0}1/$anon_inode_name/$rename_file_gfid
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/l2/file-link2 $M0/l2/renamed-file-link2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 2
+TEST ! stat $M0/l2/file-link2
+TEST stat $B0/${V0}2/$anon_inode_name/$rename_file_gfid
+
+#Simulate only anon-inodes present in all bricks
+TEST rm -f $M0/l0/renamed-file $M0/l1/renamed-file-link1 $M0/l2/renamed-file-link2
+
+#Test that shd doesn't cleanup anon-inodes when some bricks are down
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+$CLI volume heal $V0
+sleep 5 #Allow time for completion of one scan
+TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
+TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
+rename_dir_inum=$(STAT_INO $B0/${V0}0/$anon_inode_name/$rename_dir_gfid)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}2
+
+#Test that rename indeed happened instead of rmdir/mkdir
+renamed_dir_inum=$(STAT_INO $B0/${V0}0/d2/a)
+EXPECT "$rename_dir_inum" echo $renamed_dir_inum
+cleanup;
diff --git a/tests/basic/afr/afr-seek.t b/tests/basic/afr/afr-seek.t
new file mode 100644
index 00000000000..c12ee011660
--- /dev/null
+++ b/tests/basic/afr/afr-seek.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+SEEK=$(dirname $0)/seek
+build_tester $(dirname $0)/../seek.c -o ${SEEK}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST mkdir -p $B0/${V0}{0..2}
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+
+TEST $CLI volume start $V0
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST ${SEEK} create ${M0}/test 0 1 1048576 1
+# Determine underlying filesystem allocation block size
+BSIZE="$(($(${SEEK} scan ${M0}/test hole 0) * 2))"
+
+TEST ${SEEK} create ${M0}/test 0 ${BSIZE} $((${BSIZE} * 4 + 512)) ${BSIZE}
+
+EXPECT "^0$" ${SEEK} scan ${M0}/test data 0
+EXPECT "^$((${BSIZE} / 2))$" ${SEEK} scan ${M0}/test data $((${BSIZE} / 2))
+EXPECT "^$((${BSIZE} - 1))$" ${SEEK} scan ${M0}/test data $((${BSIZE} - 1))
+EXPECT "^$((${BSIZE} * 4))$" ${SEEK} scan ${M0}/test data ${BSIZE}
+EXPECT "^$((${BSIZE} * 4))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 4))
+EXPECT "^$((${BSIZE} * 5))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5))
+EXPECT "^$((${BSIZE} * 5 + 511))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5 + 511))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5 + 512))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 6))
+
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole 0
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole $((${BSIZE} / 2))
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole $((${BSIZE} - 1))
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole ${BSIZE}
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 4))
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5))
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 511))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 512))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 6))
+
+rm -f ${SEEK}
+cleanup
+
+# Centos6 regression slaves seem to not support SEEK_DATA/SEEK_HOLE
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/basic/afr/arbiter-mount.t b/tests/basic/afr/arbiter-mount.t
index da99096f81f..404d334d2f9 100644
--- a/tests/basic/afr/arbiter-mount.t
+++ b/tests/basic/afr/arbiter-mount.t
@@ -4,6 +4,9 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../afr.rc
. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
#Check that mounting fails when only arbiter brick is up.
diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t
index 155ffa09ef0..6e0f18b88f8 100644
--- a/tests/basic/afr/durability-off.t
+++ b/tests/basic/afr/durability-off.t
@@ -26,6 +26,8 @@ TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l)
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
#Test that fsyncs happen when durability is on
TEST $CLI volume set $V0 cluster.ensure-durability on
TEST $CLI volume set $V0 performance.strict-write-ordering on
diff --git a/tests/basic/afr/entry-self-heal-anon-dir-off.t b/tests/basic/afr/entry-self-heal-anon-dir-off.t
new file mode 100644
index 00000000000..7bb6ee14193
--- /dev/null
+++ b/tests/basic/afr/entry-self-heal-anon-dir-off.t
@@ -0,0 +1,459 @@
+#!/bin/bash
+
+#This file checks if missing entry self-heal and entry self-heal are working
+#as expected.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function get_file_type {
+ stat -c "%a:%F:%g:%t:%T:%u" $1
+}
+
+function diff_dirs {
+ diff <(ls $1 | sort) <(ls $2 | sort)
+}
+
+function heal_status {
+ local f1_path="${1}/${3}"
+ local f2_path="${2}/${3}"
+ local insync=""
+ diff_dirs $f1_path $f2_path
+ if [ $? -eq 0 ];
+ then
+ insync="Y"
+ else
+ insync="N"
+ fi
+ local xattr11=$(get_hex_xattr trusted.afr.$V0-client-0 $f1_path)
+ local xattr12=$(get_hex_xattr trusted.afr.$V0-client-1 $f1_path)
+ local xattr21=$(get_hex_xattr trusted.afr.$V0-client-0 $f2_path)
+ local xattr22=$(get_hex_xattr trusted.afr.$V0-client-1 $f2_path)
+ local dirty1=$(get_hex_xattr trusted.afr.dirty $f1_path)
+ local dirty2=$(get_hex_xattr trusted.afr.dirty $f2_path)
+ if [ -z $xattr11 ]; then xattr11="000000000000000000000000"; fi
+ if [ -z $xattr12 ]; then xattr12="000000000000000000000000"; fi
+ if [ -z $xattr21 ]; then xattr21="000000000000000000000000"; fi
+ if [ -z $xattr22 ]; then xattr22="000000000000000000000000"; fi
+ if [ -z $dirty1 ]; then dirty1="000000000000000000000000"; fi
+ if [ -z $dirty2 ]; then dirty2="000000000000000000000000"; fi
+ echo ${insync}${xattr11}${xattr12}${xattr21}${xattr22}${dirty1}${dirty2}
+}
+
+function is_heal_done {
+ local zero_xattr="000000000000000000000000"
+ if [ "$(heal_status $@)" == "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function print_pending_heals {
+ local result=":"
+ for i in "$@";
+ do
+ if [ "N" == $(is_heal_done $B0/${V0}0 $B0/${V0}1 $i) ];
+ then
+ result="$result:$i"
+ fi
+ done
+#To prevent any match for EXPECT_WITHIN, print a char non-existent in file-names
+ if [ $result == ":" ]; then result="~"; fi
+ echo $result
+}
+
+zero_xattr="000000000000000000000000"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 cluster.use-anonymous-inode off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --use-readdirp=no $M0
+cd $M0
+#_me_ is dir on which missing entry self-heal happens, _heal is where dir self-heal happens
+#spb is split-brain, fool is all fool
+
+#source_self_accusing means there exists source and a sink which self-accuses.
+#This simulates failures where fops failed on the bricks without it going down.
+#Something like EACCESS/EDQUOT etc
+
+TEST mkdir spb_heal spb spb_me_heal spb_me fool_heal fool_me v1_fool_heal v1_fool_me source_creations_heal source_deletions_heal source_creations_me source_deletions_me v1_dirty_me v1_dirty_heal source_self_accusing
+TEST mkfifo source_deletions_heal/fifo
+TEST mknod source_deletions_heal/block b 4 5
+TEST mknod source_deletions_heal/char c 1 5
+TEST touch source_deletions_heal/file
+TEST ln -s source_deletions_heal/file source_deletions_heal/slink
+TEST mkdir source_deletions_heal/dir1
+TEST mkdir source_deletions_heal/dir1/dir2
+
+TEST mkfifo source_deletions_me/fifo
+TEST mknod source_deletions_me/block b 4 5
+TEST mknod source_deletions_me/char c 1 5
+TEST touch source_deletions_me/file
+TEST ln -s source_deletions_me/file source_deletions_me/slink
+TEST mkdir source_deletions_me/dir1
+TEST mkdir source_deletions_me/dir1/dir2
+
+TEST mkfifo source_self_accusing/fifo
+TEST mknod source_self_accusing/block b 4 5
+TEST mknod source_self_accusing/char c 1 5
+TEST touch source_self_accusing/file
+TEST ln -s source_self_accusing/file source_self_accusing/slink
+TEST mkdir source_self_accusing/dir1
+TEST mkdir source_self_accusing/dir1/dir2
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST touch spb_heal/0 spb/0 spb_me_heal/0 spb_me/0 fool_heal/0 fool_me/0 v1_fool_heal/0 v1_fool_me/0 v1_dirty_heal/0 v1_dirty_me/0
+TEST rm -rf source_deletions_heal/fifo source_deletions_heal/block source_deletions_heal/char source_deletions_heal/file source_deletions_heal/slink source_deletions_heal/dir1
+TEST rm -rf source_deletions_me/fifo source_deletions_me/block source_deletions_me/char source_deletions_me/file source_deletions_me/slink source_deletions_me/dir1
+TEST rm -rf source_self_accusing/fifo source_self_accusing/block source_self_accusing/char source_self_accusing/file source_self_accusing/slink source_self_accusing/dir1
+
+#Test that the files are deleted
+TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}1/source_deletions_heal/block
+TEST ! stat $B0/${V0}1/source_deletions_heal/char
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/slink
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
+TEST ! stat $B0/${V0}1/source_deletions_me/fifo
+TEST ! stat $B0/${V0}1/source_deletions_me/block
+TEST ! stat $B0/${V0}1/source_deletions_me/char
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/slink
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1
+TEST ! stat $B0/${V0}1/source_self_accusing/fifo
+TEST ! stat $B0/${V0}1/source_self_accusing/block
+TEST ! stat $B0/${V0}1/source_self_accusing/char
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/slink
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1
+
+
+TEST mkfifo source_creations_heal/fifo
+TEST mknod source_creations_heal/block b 4 5
+TEST mknod source_creations_heal/char c 1 5
+TEST touch source_creations_heal/file
+TEST ln -s source_creations_heal/file source_creations_heal/slink
+TEST mkdir source_creations_heal/dir1
+TEST mkdir source_creations_heal/dir1/dir2
+
+TEST mkfifo source_creations_me/fifo
+TEST mknod source_creations_me/block b 4 5
+TEST mknod source_creations_me/char c 1 5
+TEST touch source_creations_me/file
+TEST ln -s source_creations_me/file source_creations_me/slink
+TEST mkdir source_creations_me/dir1
+TEST mkdir source_creations_me/dir1/dir2
+
+$CLI volume stop $V0
+
+#simulate fool fool scenario for fool_* dirs
+setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
+
+#Simulate v1-dirty(self-accusing but no pending ops on others) scenario for v1-dirty
+setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/v1_dirty_{heal,me}
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/v1_dirty_{heal,me}
+
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+TEST touch spb_heal/1 spb/0 spb_me_heal/1 spb_me/0 fool_heal/1 fool_me/1 v1_fool_heal/1 v1_fool_me/1
+
+$CLI volume stop $V0
+
+#simulate fool fool scenario for fool_* dirs
+setfattr -x trusted.afr.$V0-client-1 $B0/${V0}0/{fool_heal,fool_me}
+setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
+
+#simulate self-accusing for source_self_accusing
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000006 $B0/${V0}0/source_self_accusing
+
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+# Check if conservative merges happened correctly on _me_ dirs
+TEST stat spb_me_heal/1
+TEST stat $B0/${V0}0/spb_me_heal/1
+TEST stat $B0/${V0}1/spb_me_heal/1
+
+TEST stat spb_me_heal/0
+TEST stat $B0/${V0}0/spb_me_heal/0
+TEST stat $B0/${V0}1/spb_me_heal/0
+
+TEST stat fool_me/1
+TEST stat $B0/${V0}0/fool_me/1
+TEST stat $B0/${V0}1/fool_me/1
+
+TEST stat fool_me/0
+TEST stat $B0/${V0}0/fool_me/0
+TEST stat $B0/${V0}1/fool_me/0
+
+TEST stat v1_fool_me/0
+TEST stat $B0/${V0}0/v1_fool_me/0
+TEST stat $B0/${V0}1/v1_fool_me/0
+
+TEST stat v1_fool_me/1
+TEST stat $B0/${V0}0/v1_fool_me/1
+TEST stat $B0/${V0}1/v1_fool_me/1
+
+TEST stat v1_dirty_me/0
+TEST stat $B0/${V0}0/v1_dirty_me/0
+TEST stat $B0/${V0}1/v1_dirty_me/0
+
+#Check if files that have gfid-mismatches in _me_ are giving EIO
+TEST ! stat spb_me/0
+
+#Check if stale files are deleted on access
+TEST ! stat source_deletions_me/fifo
+TEST ! stat $B0/${V0}0/source_deletions_me/fifo
+TEST ! stat $B0/${V0}1/source_deletions_me/fifo
+TEST ! stat source_deletions_me/block
+TEST ! stat $B0/${V0}0/source_deletions_me/block
+TEST ! stat $B0/${V0}1/source_deletions_me/block
+TEST ! stat source_deletions_me/char
+TEST ! stat $B0/${V0}0/source_deletions_me/char
+TEST ! stat $B0/${V0}1/source_deletions_me/char
+TEST ! stat source_deletions_me/file
+TEST ! stat $B0/${V0}0/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat source_deletions_me/file
+TEST ! stat $B0/${V0}0/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat source_deletions_me/dir1/dir2
+TEST ! stat $B0/${V0}0/source_deletions_me/dir1/dir2
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1/dir2
+TEST ! stat source_deletions_me/dir1
+TEST ! stat $B0/${V0}0/source_deletions_me/dir1
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1
+
+#Test if the files created as part of access are healed correctly
+r=$(get_file_type source_creations_me/fifo)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/fifo
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/fifo
+TEST [ -p source_creations_me/fifo ]
+
+r=$(get_file_type source_creations_me/block)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/block
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/block
+TEST [ -b source_creations_me/block ]
+
+r=$(get_file_type source_creations_me/char)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/char
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/char
+TEST [ -c source_creations_me/char ]
+
+r=$(get_file_type source_creations_me/file)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/file
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/file
+TEST [ -f source_creations_me/file ]
+
+r=$(get_file_type source_creations_me/slink)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/slink
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/slink
+TEST [ -h source_creations_me/slink ]
+
+r=$(get_file_type source_creations_me/dir1/dir2)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1/dir2
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1/dir2
+TEST [ -d source_creations_me/dir1/dir2 ]
+
+r=$(get_file_type source_creations_me/dir1)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1
+TEST [ -d source_creations_me/dir1 ]
+
+#Trigger heal and check _heal dirs are healed properly
+#Trigger change in event generation number. That way inodes would get refreshed during lookup
+TEST kill_brick $V0 $H0 $B0/${V0}1
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST stat spb_heal
+TEST stat spb_me_heal
+TEST stat fool_heal
+TEST stat fool_me
+TEST stat v1_fool_heal
+TEST stat v1_fool_me
+TEST stat source_deletions_heal
+TEST stat source_deletions_me
+TEST stat source_self_accusing
+TEST stat source_creations_heal
+TEST stat source_creations_me
+TEST stat v1_dirty_heal
+TEST stat v1_dirty_me
+TEST $CLI volume stop $V0
+TEST rm -rf $B0/${V0}{0,1}/.glusterfs/indices/xattrop/*
+
+$CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+rm -f $M0/FILE
+EXPECT "1" count_index_entries $B0/${V0}0
+EXPECT "1" count_index_entries $B0/${V0}1
+
+TEST $CLI volume stop $V0;
+
+#Create entries for fool_heal and fool_me to ensure they are fully healed and dirty xattrs erased, before triggering index heal
+create_brick_xattrop_entry $B0/${V0}0 fool_heal fool_me source_creations_heal/dir1
+
+$CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+$CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0;
+EXPECT_WITHIN $HEAL_TIMEOUT "~" print_pending_heals spb_heal spb_me_heal fool_heal fool_me v1_fool_heal v1_fool_me source_deletions_heal source_deletions_me source_creations_heal source_creations_me v1_dirty_heal v1_dirty_me source_self_accusing
+
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_me_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_self_accusing
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_me
+
+#Don't access the files/dirs from mount point as that may cause self-heals
+# Check if conservative merges happened correctly on heal dirs
+TEST stat $B0/${V0}0/spb_heal/1
+TEST stat $B0/${V0}1/spb_heal/1
+
+TEST stat $B0/${V0}0/spb_heal/0
+TEST stat $B0/${V0}1/spb_heal/0
+
+TEST stat $B0/${V0}0/fool_heal/1
+TEST stat $B0/${V0}1/fool_heal/1
+
+TEST stat $B0/${V0}0/fool_heal/0
+TEST stat $B0/${V0}1/fool_heal/0
+
+TEST stat $B0/${V0}0/v1_fool_heal/0
+TEST stat $B0/${V0}1/v1_fool_heal/0
+
+TEST stat $B0/${V0}0/v1_fool_heal/1
+TEST stat $B0/${V0}1/v1_fool_heal/1
+
+TEST stat $B0/${V0}0/v1_dirty_heal/0
+TEST stat $B0/${V0}1/v1_dirty_heal/0
+
+#Check if files that have gfid-mismatches in spb are giving EIO
+TEST ! stat spb/0
+
+#Check if stale files are deleted on access
+TEST ! stat $B0/${V0}0/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}0/source_deletions_heal/block
+TEST ! stat $B0/${V0}1/source_deletions_heal/block
+TEST ! stat $B0/${V0}0/source_deletions_heal/char
+TEST ! stat $B0/${V0}1/source_deletions_heal/char
+TEST ! stat $B0/${V0}0/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}0/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}0/source_deletions_heal/dir1/dir2
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1/dir2
+TEST ! stat $B0/${V0}0/source_deletions_heal/dir1
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
+
+#Check if stale files are deleted on access
+TEST ! stat $B0/${V0}0/source_self_accusing/fifo
+TEST ! stat $B0/${V0}1/source_self_accusing/fifo
+TEST ! stat $B0/${V0}0/source_self_accusing/block
+TEST ! stat $B0/${V0}1/source_self_accusing/block
+TEST ! stat $B0/${V0}0/source_self_accusing/char
+TEST ! stat $B0/${V0}1/source_self_accusing/char
+TEST ! stat $B0/${V0}0/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}0/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}0/source_self_accusing/dir1/dir2
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1/dir2
+TEST ! stat $B0/${V0}0/source_self_accusing/dir1
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1
+
+#Test if the files created as part of full self-heal correctly
+r=$(get_file_type $B0/${V0}0/source_creations_heal/fifo)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/fifo
+TEST [ -p $B0/${V0}0/source_creations_heal/fifo ]
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/block
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/block)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/block
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/char)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/char
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/file)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file
+TEST [ -f $B0/${V0}0/source_creations_heal/file ]
+
+r=$(get_file_type source_creations_heal/file $B0/${V0}0/slink)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file slink
+TEST [ -h $B0/${V0}0/source_creations_heal/slink ]
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1/dir2)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1/dir2
+TEST [ -d $B0/${V0}0/source_creations_heal/dir1/dir2 ]
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1
+TEST [ -d $B0/${V0}0/source_creations_heal/dir1 ]
+
+cd -
+
+#Anonymous directory shouldn't be created
+TEST mkdir $M0/rename-dir
+before_rename=$(STAT_INO $B0/${V0}1/rename-dir)
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/rename-dir $M0/new-name
+TEST $CLI volume start $V0 force
+#'spb' is in split-brain so pending-heal-count will be 2
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+after_rename=$(STAT_INO $B0/${V0}1/new-name)
+EXPECT "0" echo $(ls -a $B0/${V0}0/ | grep anonymous-inode | wc -l)
+EXPECT "0" echo $(ls -a $B0/${V0}1/ | grep anonymous-inode | wc -l)
+EXPECT_NOT "$before_rename" echo $after_rename
+cleanup
diff --git a/tests/basic/afr/entry-self-heal.t b/tests/basic/afr/entry-self-heal.t
index 3c900fdcf9a..0c1da7d211e 100644
--- a/tests/basic/afr/entry-self-heal.t
+++ b/tests/basic/afr/entry-self-heal.t
@@ -79,6 +79,9 @@ TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.io-cache off
TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
TEST $CLI volume start $V0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --use-readdirp=no $M0
diff --git a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
index f4aa351e461..35e295dc170 100644
--- a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
+++ b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
@@ -168,8 +168,8 @@ TEST [ "$gfid_1" != "$gfid_2" ]
#We know that second brick has the bigger size file
BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/f3 | cut -d\ -f1)
-TEST ls $M0/f3
-TEST cat $M0/f3
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f3 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -215,8 +215,8 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
-TEST ls $M0/f4
-TEST cat $M0/f4
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f4 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -227,4 +227,3 @@ HEALED_MD5=$(md5sum $B0/${V0}2/f4 | cut -d\ -f1)
TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
cleanup;
-#G_TESTDEF_TEST_STATUS_NETBSD7=1501390
diff --git a/tests/basic/afr/gfid-self-heal.t b/tests/basic/afr/gfid-self-heal.t
index b54edbcae85..5a530681186 100644
--- a/tests/basic/afr/gfid-self-heal.t
+++ b/tests/basic/afr/gfid-self-heal.t
@@ -50,6 +50,10 @@ TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $M0/a
gfid_1=$(gf_get_gfid_xattr $B0/${V0}1/a)
TEST touch $B0/${V0}0/a
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
$CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST stat $M0/a
@@ -62,6 +66,10 @@ TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $M0/b
TEST mkdir $B0/${V0}0/b
TEST setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
$CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST ! stat $M0/b
@@ -71,6 +79,10 @@ TEST "[[ -z \"$gfid_0\" ]]"
#Check gfid assigning doesn't happen when there is type mismatch
TEST touch $B0/${V0}1/c
TEST mkdir $B0/${V0}0/c
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
TEST ! stat $M0/c
gfid_1=$(gf_get_gfid_xattr $B0/${V0}1/c)
gfid_0=$(gf_get_gfid_xattr $B0/${V0}0/c)
@@ -81,6 +93,10 @@ TEST "[[ -z \"$gfid_0\" ]]"
# gfid split-brain
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $B0/${V0}1/d
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
TEST ! stat $M0/d
gfid_1=$(gf_get_gfid_xattr $B0/${V0}1/d)
TEST "[[ -z \"$gfid_1\" ]]"
diff --git a/tests/basic/afr/granular-esh/cli.t b/tests/basic/afr/granular-esh/cli.t
index cadaf8ffaf9..10b6c6398da 100644
--- a/tests/basic/afr/granular-esh/cli.t
+++ b/tests/basic/afr/granular-esh/cli.t
@@ -11,7 +11,7 @@ TESTS_EXPECTED_IN_LOOP=4
TEST glusterd
TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
# Test that enabling the option should work on a newly created volume
TEST $CLI volume set $V0 cluster.granular-entry-heal on
TEST $CLI volume set $V0 cluster.granular-entry-heal off
@@ -25,34 +25,6 @@ TEST $CLI volume start $V1
TEST ! $CLI volume heal $V1 granular-entry-heal enable
TEST ! $CLI volume heal $V1 granular-entry-heal disable
-#######################
-###### TIER TEST ######
-#######################
-# Execute the same command on a disperse + replicate tiered volume and make
-# sure the option is set on the replicate leg of the volume
-TEST $CLI volume tier $V1 attach replica 2 $H0:$B0/${V1}{3,4}
-TEST $CLI volume heal $V1 granular-entry-heal enable
-EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
-TEST $CLI volume heal $V1 granular-entry-heal disable
-EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
-
-# Kill a disperse brick and make heal be pending on the volume.
-TEST kill_brick $V1 $H0 $B0/${V1}0
-
-# Now make sure that one offline brick in disperse does not affect enabling the
-# option on the volume.
-TEST $CLI volume heal $V1 granular-entry-heal enable
-EXPECT "enable" volume_get_field $V1 cluster.granular-entry-heal
-TEST $CLI volume heal $V1 granular-entry-heal disable
-EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
-
-# Now kill a replicate brick.
-TEST kill_brick $V1 $H0 $B0/${V1}3
-# Now make sure that one offline brick in replicate causes the command to be
-# failed.
-TEST ! $CLI volume heal $V1 granular-entry-heal enable
-EXPECT "disable" volume_get_field $V1 cluster.granular-entry-heal
-
######################
### REPLICATE TEST ###
######################
@@ -136,7 +108,7 @@ TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID
TEST $CLI volume reset $V0
# Ensure that granular entry heal is also disabled
EXPECT "no" volume_get_field $V0 cluster.granular-entry-heal
-EXPECT "on" volume_get_field $V0 cluster.entry-self-heal
+EXPECT "off" volume_get_field $V0 cluster.entry-self-heal
cleanup
#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1399038
diff --git a/tests/basic/afr/halo.t b/tests/basic/afr/halo.t
new file mode 100644
index 00000000000..3f61f5a0402
--- /dev/null
+++ b/tests/basic/afr/halo.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+#Tests that halo basic functionality works as expected
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function get_up_child()
+{
+ if [ "1" == $(afr_private_key_value $V0 $M0 0 "child_up\[0\]") ];
+ then
+ echo 0
+ elif [ "1" == $(afr_private_key_value $V0 $M0 0 "child_up\[1\]") ]
+ then
+ echo 1
+ fi
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.halo-enabled yes
+TEST $CLI volume set $V0 cluster.halo-max-replicas 1
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[0\]"
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[1\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[0\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[1\]"
+
+up_id=$(get_up_child)
+TEST [[ ! -z "$up_id" ]]
+
+down_id=$((1-up_id))
+
+TEST kill_brick $V0 $H0 $B0/${V0}${up_id}
+#As max-replicas is configured to be 1, down_child should be up now
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "child_up\[${up_id}\]"
+EXPECT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[${up_id}\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[${down_id}\]"
+
+#Bring the brick back up and the state should be restored
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+
+up_id=$(get_up_child)
+TEST [[ ! -z "$up_id" ]]
+down_id=$((1-up_id))
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "child_up\[${up_id}\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[0\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[1\]"
+
+cleanup;
diff --git a/tests/basic/afr/rename-data-loss.t b/tests/basic/afr/rename-data-loss.t
new file mode 100644
index 00000000000..256ee2aafce
--- /dev/null
+++ b/tests/basic/afr/rename-data-loss.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+#Self-heal tests
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST `echo "line1" >> file1`
+TEST mkdir dir1
+TEST mkdir dir2
+TEST mkdir -p dir1/dira/dirb
+TEST `echo "line1">>dir1/dira/dirb/file1`
+TEST mkdir delete_me
+TEST `echo "line1" >> delete_me/file1`
+
+#brick0 has witnessed the second write while brick1 is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST `echo "line2" >> file1`
+TEST `echo "line2" >> dir1/dira/dirb/file1`
+TEST `echo "line2" >> delete_me/file1`
+
+#Toggle the bricks that are up/down.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/brick0
+
+#Rename when the 'source' brick0 for data-selfheals is down.
+mv file1 file2
+mv dir1/dira dir2
+
+#Delete a dir when brick0 is down.
+rm -rf delete_me
+cd -
+
+#Bring everything up and trigger heal
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick1
+
+#Remount to avoid reading from caches
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "line2" tail -1 $M0/file2
+EXPECT "line2" tail -1 $M0/dir2/dira/dirb/file1
+TEST ! stat $M0/delete_me/file1
+TEST ! stat $M0/delete_me
+
+anon_inode_name=$(ls -a $B0/brick0 | grep glusterfs-anonymous-inode)
+TEST [[ -d $B0/brick0/$anon_inode_name ]]
+TEST [[ -d $B0/brick1/$anon_inode_name ]]
+cleanup
diff --git a/tests/basic/afr/root-squash-self-heal.t b/tests/basic/afr/root-squash-self-heal.t
index c4fab0a35b2..6e12098465a 100644
--- a/tests/basic/afr/root-squash-self-heal.t
+++ b/tests/basic/afr/root-squash-self-heal.t
@@ -11,6 +11,9 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume set $V0 server.root-squash on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
TEST $CLI volume start $V0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --no-root-squash=yes --use-readdirp=no $M0
TEST kill_brick $V0 $H0 $B0/${V0}0
diff --git a/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t b/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t
new file mode 100644
index 00000000000..7c249c4bcbd
--- /dev/null
+++ b/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+#Test the client side split-brain resolution
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+
+count_files () {
+ ls $1 | wc -l
+}
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST mkdir $M0/data
+TEST touch $M0/data/file
+
+
+############ Client side healing using favorite-child-policy = mtime #################
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/data/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/data/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+mtime1=$(get_mtime $B0/${V0}0/data/file)
+mtime2=$(get_mtime $B0/${V0}1/data/file)
+if (( $(echo "$mtime1 > $mtime2" | bc -l) )); then
+ LATEST_MTIME_MD5=$(md5sum $B0/${V0}0/data/file | cut -d\ -f1)
+else
+ LATEST_MTIME_MD5=$(md5sum $B0/${V0}1/data/file | cut -d\ -f1)
+fi
+
+#file will be in split-brain
+cat $M0/data/file > /dev/null
+EXPECT "1" echo $?
+
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" afr_get_split_brain_count $V0
+cat $M0/data/file > /dev/null
+EXPECT "0" echo $?
+M0_MD5=$(md5sum $M0/data/file | cut -d\ -f1)
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_get_split_brain_count $V0
+TEST [ "$LATEST_MTIME_MD5" == "$M0_MD5" ]
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_MD5=$(md5sum $B0/${V0}0/data/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/data/file | cut -d\ -f1)
+TEST [ "$LATEST_MTIME_MD5" == "$B0_MD5" ]
+TEST [ "$LATEST_MTIME_MD5" == "$B1_MD5" ]
+
+############ Client side directory conservative merge #################
+TEST $CLI volume reset $V0 cluster.favorite-child-policy
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/data/test
+files=$(count_files $M0/data)
+EXPECT "2" echo $files
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/data/test1
+files=$(count_files $M0/data)
+EXPECT "2" echo $files
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#data dir will be in entry split-brain
+ls $M0/data > /dev/null
+EXPECT "2" echo $?
+
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" afr_get_split_brain_count $V0
+
+
+ls $M0/data > /dev/null
+EXPECT "0" echo $?
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_get_split_brain_count $V0
+#Entry Split-brain is gone, but data self-heal is pending on the files
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+cat $M0/data/test > /dev/null
+cat $M0/data/test1 > /dev/null
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+files=$(count_files $M0/data)
+EXPECT "3" echo $files
+
+TEST force_umount $M0
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+cleanup
diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/basic/afr/split-brain-favorite-child-policy.t
deleted file mode 100644
index 0e321c6f095..00000000000
--- a/tests/basic/afr/split-brain-favorite-child-policy.t
+++ /dev/null
@@ -1,202 +0,0 @@
-#!/bin/bash
-
-#Test the split-brain resolution CLI commands.
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-#Create replica 2 volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume set $V0 performance.write-behind off
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-TEST $CLI volume set $V0 cluster.entry-self-heal off
-TEST $CLI volume set $V0 cluster.data-self-heal off
-TEST $CLI volume set $V0 cluster.metadata-self-heal off
-TEST $CLI volume start $V0
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
-TEST touch $M0/file
-
-############ Healing using favorite-child-policy = ctime #################
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST kill_brick $V0 $H0 $B0/${V0}1
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
-
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-TEST $CLI volume heal $V0
-
-#file fill in split-brain
-cat $M0/file > /dev/null
-EXPECT "1" echo $?
-
-# Umount to prevent further FOPS on the file, then find the brick with latest ctime.
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-ctime1=`stat -c "%.Z" $B0/${V0}0/file`
-ctime2=`stat -c "%.Z" $B0/${V0}1/file`
-if (( $(echo "$ctime1 > $ctime2" | bc -l) )); then
- LATEST_CTIME_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
-else
- LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
-fi
-TEST $CLI volume set $V0 cluster.favorite-child-policy ctime
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
-B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
-TEST [ "$LATEST_CTIME_MD5" == "$B0_MD5" ]
-TEST [ "$LATEST_CTIME_MD5" == "$B1_MD5" ]
-TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
-cat $M0/file > /dev/null
-EXPECT "0" echo $?
-
-############ Healing using favorite-child-policy = mtime #################
-TEST $CLI volume set $V0 cluster.favorite-child-policy none
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-TEST kill_brick $V0 $H0 $B0/${V0}1
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
-
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-TEST $CLI volume heal $V0
-
-#file still in split-brain
-cat $M0/file > /dev/null
-EXPECT "1" echo $?
-
-#We know that the second brick has latest mtime.
-LATEST_CTIME_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
-TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-cat $M0/file > /dev/null
-EXPECT "0" echo $?
-HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
-TEST [ "$LATEST_CTIME_MD5" == "$HEALED_MD5" ]
-
-############ Healing using favorite-child-policy = size #################
-TEST $CLI volume set $V0 cluster.favorite-child-policy none
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-TEST kill_brick $V0 $H0 $B0/${V0}1
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10240
-
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-TEST $CLI volume heal $V0
-
-#file fill in split-brain
-cat $M0/file > /dev/null
-EXPECT "1" echo $?
-
-#We know that the second brick has the bigger size file.
-BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
-TEST $CLI volume set $V0 cluster.favorite-child-policy size
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-cat $M0/file > /dev/null
-EXPECT "0" echo $?
-HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
-TEST [ "$BIGGER_FILE_MD5" == "$HEALED_MD5" ]
-
-############ Healing using favorite-child-policy = majority on replica-3 #################
-
-#Convert volume to replica-3
-TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-
-TEST $CLI volume set $V0 cluster.quorum-type none
-TEST $CLI volume set $V0 cluster.favorite-child-policy none
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST kill_brick $V0 $H0 $B0/${V0}1
-TEST kill_brick $V0 $H0 $B0/${V0}2
-TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10240
-
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-TEST $CLI volume heal $V0
-
-#file fill in split-brain
-cat $M0/file > /dev/null
-EXPECT "1" echo $?
-
-#We know that the second and third bricks agree with each other. Pick any one of them.
-MAJORITY_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
-TEST $CLI volume set $V0 cluster.favorite-child-policy majority
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-cat $M0/file > /dev/null
-EXPECT "0" echo $?
-HEALED_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
-TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
-
-TEST force_umount $M0
-cleanup
diff --git a/tests/basic/afr/split-brain-heal-info.t b/tests/basic/afr/split-brain-heal-info.t
index 66275c57207..2e4742fff08 100644
--- a/tests/basic/afr/split-brain-heal-info.t
+++ b/tests/basic/afr/split-brain-heal-info.t
@@ -47,9 +47,11 @@ SPB_FILES=$(($SPB_FILES + 1))
#### Simulate entry-split-brain
TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
TEST touch $M0/espb/a
volume_start_force $V0
TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
TEST mkdir $M0/espb/a
volume_start_force $V0
SPB_FILES=$(($SPB_FILES + 1))
diff --git a/tests/basic/afr/split-brain-healing-ctime.t b/tests/basic/afr/split-brain-healing-ctime.t
new file mode 100644
index 00000000000..676788fce3f
--- /dev/null
+++ b/tests/basic/afr/split-brain-healing-ctime.t
@@ -0,0 +1,252 @@
+#!/bin/bash
+
+#Test the split-brain resolution CLI commands.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function get_replicate_subvol_number {
+ local filename=$1
+ #get_backend_paths
+ if [ -f $B0/${V0}1/$filename ]
+ then
+ echo 0
+ elif [ -f $B0/${V0}3/$filename ]
+ then echo 1
+ else
+ echo -1
+ fi
+}
+
+cleanup;
+
+AREQUAL_PATH=$(dirname $0)/../../utils
+GET_MDATA_PATH=$(dirname $0)/../../utils
+CFLAGS=""
+test "`uname -s`" != "Linux" && {
+ CFLAGS="$CFLAGS -lintl";
+}
+build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+cd $M0
+for i in {1..10}
+do
+ echo "Initial content">>file$i
+done
+
+replica_0_files_list=(`ls $B0/${V0}1|grep -v '^\.'`)
+replica_1_files_list=(`ls $B0/${V0}3|grep -v '^\.'`)
+
+############ Create data split-brain in the files. ###########################
+TEST kill_brick $V0 $H0 $B0/${V0}1
+for file in ${!replica_0_files_list[*]}
+do
+ echo "B1 is down">>${replica_0_files_list[$file]}
+done
+TEST kill_brick $V0 $H0 $B0/${V0}3
+for file in ${!replica_1_files_list[*]}
+do
+ echo "B3 is down">>${replica_1_files_list[$file]}
+done
+
+SMALLER_FILE_SIZE=$(stat -c %s file1)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+for file in ${!replica_0_files_list[*]}
+do
+ echo "B2 is down">>${replica_0_files_list[$file]}
+ echo "appending more content to make it the bigger file">>${replica_0_files_list[$file]}
+done
+TEST kill_brick $V0 $H0 $B0/${V0}4
+for file in ${!replica_1_files_list[*]}
+do
+ echo "B4 is down">>${replica_1_files_list[$file]}
+ echo "appending more content to make it the bigger file">>${replica_1_files_list[$file]}
+done
+
+BIGGER_FILE_SIZE=$(stat -c %s file1)
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+############### Acessing the files should now give EIO. ###############################
+TEST ! cat file1
+TEST ! cat file2
+TEST ! cat file3
+TEST ! cat file4
+TEST ! cat file5
+TEST ! cat file6
+TEST ! cat file7
+TEST ! cat file8
+TEST ! cat file9
+TEST ! cat file10
+###################
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+
+################ Heal file1 using the bigger-file option ##############
+$CLI volume heal $V0 split-brain bigger-file /file1
+EXPECT "0" echo $?
+EXPECT $BIGGER_FILE_SIZE stat -c %s file1
+
+################ Heal file2 using the bigger-file option and its gfid ##############
+subvolume=$(get_replicate_subvol_number file2)
+if [ $subvolume == 0 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file2)
+elif [ $subvolume == 1 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file2)
+fi
+GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+$CLI volume heal $V0 split-brain bigger-file $GFIDSTR
+EXPECT "0" echo $?
+
+################ Heal file3 using the source-brick option ##############
+################ Use the brick having smaller file size as source #######
+subvolume=$(get_replicate_subvol_number file3)
+if [ $subvolume == 0 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 /file3
+elif [ $subvolume == 1 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 /file3
+fi
+EXPECT "0" echo $?
+EXPECT $SMALLER_FILE_SIZE stat -c %s file3
+
+################ Heal file4 using the source-brick option and it's gfid ##############
+################ Use the brick having smaller file size as source #######
+subvolume=$(get_replicate_subvol_number file4)
+if [ $subvolume == 0 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file4)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 $GFIDSTR
+elif [ $subvolume == 1 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file4)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 $GFIDSTR
+fi
+EXPECT "0" echo $?
+EXPECT $SMALLER_FILE_SIZE stat -c %s file4
+
+# With ctime enabled, the ctime xattr ("trusted.glusterfs.mdata") gets healed
+# as part of metadata heal. So mtime would be same, hence it can't be healed
+# using 'latest-mtime' policy, use 'source-brick' option instead.
+################ Heal file5 using the source-brick option ##############
+subvolume=$(get_replicate_subvol_number file5)
+if [ $subvolume == 0 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file5
+elif [ $subvolume == 1 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 /file5
+fi
+EXPECT "0" echo $?
+
+if [ $subvolume == 0 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file5)
+elif [ $subvolume == 1 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file5)
+fi
+
+#TODO: To below comparisons on full sub-second resolution
+
+TEST [ $mtime1_after_heal -eq $mtime2_after_heal ]
+
+mtime_mount_after_heal=$(stat -c %Y file5)
+
+TEST [ $mtime1_after_heal -eq $mtime_mount_after_heal ]
+
+################ Heal file6 using the source-brick option and its gfid ##############
+subvolume=$(get_replicate_subvol_number file6)
+if [ $subvolume == 0 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file6)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
+elif [ $subvolume == 1 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file6)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 $GFIDSTR
+fi
+EXPECT "0" echo $?
+
+if [ $subvolume == 0 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file6)
+elif [ $subvolume == 1 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file6)
+fi
+
+#TODO: To below comparisons on full sub-second resolution
+
+TEST [ $mtime1_after_heal -eq $mtime2_after_heal ]
+
+mtime_mount_after_heal=$(stat -c %Y file6)
+
+TEST [ $mtime1_after_heal -eq $mtime_mount_after_heal ]
+
+################ Heal remaining SB'ed files of replica_0 using B1 as source ##############
+$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1
+EXPECT "0" echo $?
+
+################ Heal remaining SB'ed files of replica_1 using B3 as source ##############
+$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3
+EXPECT "0" echo $?
+
+############### Reading the files should now succeed. ###############################
+TEST cat file1
+TEST cat file2
+TEST cat file3
+TEST cat file4
+TEST cat file5
+TEST cat file6
+TEST cat file7
+TEST cat file8
+TEST cat file9
+TEST cat file10
+
+################ File contents on the bricks must be same. ################################
+TEST diff <(arequal-checksum -p $B0/$V01 -i .glusterfs) <(arequal-checksum -p $B0/$V02 -i .glusterfs)
+TEST diff <(arequal-checksum -p $B0/$V03 -i .glusterfs) <(arequal-checksum -p $B0/$V04 -i .glusterfs)
+
+############### Trying to heal files not in SB should fail. ###############################
+$CLI volume heal $V0 split-brain bigger-file /file1
+EXPECT "1" echo $?
+$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 /file3
+EXPECT "1" echo $?
+
+cd -
+TEST rm $AREQUAL_PATH/arequal-checksum
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+cleanup
diff --git a/tests/basic/afr/split-brain-healing.t b/tests/basic/afr/split-brain-healing.t
index c80f900b909..315e815eb7e 100644
--- a/tests/basic/afr/split-brain-healing.t
+++ b/tests/basic/afr/split-brain-healing.t
@@ -20,11 +20,14 @@ function get_replicate_subvol_number {
cleanup;
AREQUAL_PATH=$(dirname $0)/../../utils
+GET_MDATA_PATH=$(dirname $0)/../../utils
CFLAGS=""
test "`uname -s`" != "Linux" && {
CFLAGS="$CFLAGS -lintl";
}
build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
@@ -32,6 +35,7 @@ TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 ctime off
TEST $CLI volume start $V0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
@@ -152,13 +156,13 @@ EXPECT $SMALLER_FILE_SIZE stat -c %s file4
subvolume=$(get_replicate_subvol_number file5)
if [ $subvolume == 0 ]
then
- mtime1=$(stat -c %Y $B0/${V0}1/file5)
- mtime2=$(stat -c %Y $B0/${V0}2/file5)
+ mtime1=$(get_mtime $B0/${V0}1/file5)
+ mtime2=$(get_mtime $B0/${V0}2/file5)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
elif [ $subvolume == 1 ]
then
- mtime1=$(stat -c %Y $B0/${V0}3/file5)
- mtime2=$(stat -c %Y $B0/${V0}4/file5)
+ mtime1=$(get_mtime $B0/${V0}3/file5)
+ mtime2=$(get_mtime $B0/${V0}4/file5)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
fi
$CLI volume heal $V0 split-brain latest-mtime /file5
@@ -166,12 +170,12 @@ EXPECT "0" echo $?
if [ $subvolume == 0 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}1/file5)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}2/file5)
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file5)
elif [ $subvolume == 1 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}3/file5)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}4/file5)
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file5)
fi
#TODO: To below comparisons on full sub-second resolution
@@ -188,14 +192,14 @@ subvolume=$(get_replicate_subvol_number file6)
if [ $subvolume == 0 ]
then
GFID=$(gf_get_gfid_xattr $B0/${V0}1/file6)
- mtime1=$(stat -c %Y $B0/${V0}1/file6)
- mtime2=$(stat -c %Y $B0/${V0}2/file6)
+ mtime1=$(get_mtime $B0/${V0}1/file6)
+ mtime2=$(get_mtime $B0/${V0}2/file6)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
elif [ $subvolume == 1 ]
then
GFID=$(gf_get_gfid_xattr $B0/${V0}3/file6)
- mtime1=$(stat -c %Y $B0/${V0}3/file6)
- mtime2=$(stat -c %Y $B0/${V0}4/file6)
+ mtime1=$(get_mtime $B0/${V0}3/file6)
+ mtime2=$(get_mtime $B0/${V0}4/file6)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
fi
GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
@@ -204,12 +208,12 @@ EXPECT "0" echo $?
if [ $subvolume == 0 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}1/file6)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}2/file6)
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file6)
elif [ $subvolume == 1 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}3/file6)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}4/file6)
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file6)
fi
#TODO: To below comparisons on full sub-second resolution
@@ -253,4 +257,5 @@ EXPECT "1" echo $?
cd -
TEST rm $AREQUAL_PATH/arequal-checksum
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
cleanup
diff --git a/tests/basic/afr/split-brain-resolution.t b/tests/basic/afr/split-brain-resolution.t
index dbe665e3e51..834237c96ec 100644
--- a/tests/basic/afr/split-brain-resolution.t
+++ b/tests/basic/afr/split-brain-resolution.t
@@ -11,6 +11,9 @@ function get_split_brain_status {
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
TEST $CLI volume start $V0
#Disable self-heal-daemon
@@ -71,6 +74,18 @@ TEST setfattr -n replica.split-brain-choice -v none $M0/data-split-brain.txt
TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
TEST ! cat $M0/data-split-brain.txt
+#Check that after timeout fops result in EIO again.
+#Set one minute timeout
+TEST setfattr -n replica.split-brain-choice-timeout -v 1 $M0/
+TEST setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/data-split-brain.txt
+EXPECT "brick1_alive" cat $M0/data-split-brain.txt
+TEST setfattr -n replica.split-brain-choice -v $V0-client-0 $M0/metadata-split-brain.txt
+EXPECT "brick0" get_text_xattr user.test $M0/metadata-split-brain.txt
+#Wait until timeout completes and test that the fops fail again
+sleep 62
+TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
+TEST ! cat $M0/data-split-brain.txt
+
#Negative test cases should fail
TEST ! setfattr -n replica.split-brain-choice -v $V0-client-4 $M0/data-split-brain.txt
TEST ! setfattr -n replica.split-brain-heal-finalize -v $V0-client-4 $M0/metadata-split-brain.txt
diff --git a/tests/basic/afr/ta-check-locks.t b/tests/basic/afr/ta-check-locks.t
new file mode 100644
index 00000000000..c0102c35b7b
--- /dev/null
+++ b/tests/basic/afr/ta-check-locks.t
@@ -0,0 +1,68 @@
+#!/bin/bash
+#This test checks if all the locks on
+#ta file are being held and released properly
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+
+function get_lock_count_on_ta()
+{
+ tapid=`cat $B0/ta.pid`
+ local sfile=$(generate_statedump $tapid)
+ count=$(grep "inodelk-count" $sfile | cut -f2 -d'=' | tail -1)
+ ncount=$(grep "inodelk.inodelk" $sfile | grep "len=1" | wc -l)
+ echo "count = $count : ncount = $ncount"
+ if [ "$count" = "" ]
+ then
+ count=0
+ fi
+
+ if [ "$count" -eq "$ncount" ]
+ then
+ echo "$count"
+ else
+ echo "-1"
+ fi
+}
+
+cleanup;
+TEST ta_create_brick_and_volfile brick0
+TEST ta_create_brick_and_volfile brick1
+TEST ta_create_ta_and_volfile ta
+TEST ta_start_brick_process brick0
+TEST ta_start_brick_process brick1
+TEST ta_start_ta_process ta
+
+TEST ta_create_mount_volfile brick0 brick1 ta
+TEST ta_start_mount_process $M0
+TEST ta_start_mount_process $M1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M1 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "trusted.afr.patchy-ta-2" ls $B0/ta
+
+TEST ta_create_shd_volfile brick0 brick1 ta
+TEST ta_start_shd_process glustershd
+shd_pid=$(cat $B0/glustershd.pid)
+
+TEST touch $M0/a.txt
+echo "Hello" >> $M0/a.txt
+EXPECT_WITHIN $IO_WAIT_TIMEOUT "0" get_lock_count_on_ta
+
+TEST ta_kill_brick brick0
+echo "Hello" >> $M0/a.txt
+EXPECT_WITHIN $IO_WAIT_TIMEOUT "1" get_lock_count_on_ta
+
+echo "Hello" >> $M1/a.txt
+EXPECT_WITHIN $IO_WAIT_TIMEOUT "2" get_lock_count_on_ta
+
+echo "xyz" >> $M0/a.txt
+EXPECT_WITHIN $IO_WAIT_TIMEOUT "2" get_lock_count_on_ta
+
+chmod 0666 $M0/a.txt
+EXPECT_WITHIN $IO_WAIT_TIMEOUT "2" get_lock_count_on_ta
+
+TEST ta_start_brick_process brick0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_lock_count_on_ta
+
+cleanup;
diff --git a/tests/basic/afr/ta-read.t b/tests/basic/afr/ta-read.t
index f2b3c38e06c..3cfc16b9b8a 100644
--- a/tests/basic/afr/ta-read.t
+++ b/tests/basic/afr/ta-read.t
@@ -25,31 +25,35 @@ TEST ! ls $B0/ta/FILE
# Kill one brick and write to FILE.
TEST ta_kill_brick brick0
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 0
echo "brick0 down">> $M0/FILE
TEST [ $? -eq 0 ]
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/FILE
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.patchy-ta-2
#Umount and mount to remove cached data.
-TEST umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST ta_start_mount_process $M0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
# Read must be allowed since good brick is up.
TEST cat $M0/FILE
+#Umount and mount to remove cached data.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
# Toggle good and bad data brick processes.
TEST ta_start_brick_process brick0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
TEST ta_kill_brick brick1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
# Read must now fail.
TEST ! cat $M0/FILE
# Bring all data bricks up, and kill TA.
TEST ta_start_brick_process brick1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
TA_PID=$(ta_get_pid_by_brick_name ta)
TEST [ -n $TA_PID ]
TEST ta_kill_brick ta
diff --git a/tests/basic/afr/ta-shd.t b/tests/basic/afr/ta-shd.t
index bb2e58b3f77..96ecfc678e0 100644
--- a/tests/basic/afr/ta-shd.t
+++ b/tests/basic/afr/ta-shd.t
@@ -22,7 +22,7 @@ TEST ta_start_shd_process glustershd
TEST touch $M0/a.txt
TEST ta_kill_brick brick0
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 0
echo "Hello" >> $M0/a.txt
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/a.txt
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.$V0-ta-2
@@ -33,14 +33,14 @@ EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/
#the SHD process.
TEST ta_start_brick_process brick0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/a.txt
EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.$V0-ta-2
#Kill the previously up brick and try reading from other brick. Since the heal
#has happened file content should be same.
TEST ta_kill_brick brick1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
#Umount and mount to remove cached data.
TEST umount $M0
TEST ta_start_mount_process $M0
diff --git a/tests/basic/afr/ta-write-on-bad-brick.t b/tests/basic/afr/ta-write-on-bad-brick.t
new file mode 100644
index 00000000000..096ca9f47cf
--- /dev/null
+++ b/tests/basic/afr/ta-write-on-bad-brick.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+cleanup;
+TEST ta_create_brick_and_volfile brick0
+TEST ta_create_brick_and_volfile brick1
+TEST ta_create_ta_and_volfile ta
+TEST ta_start_brick_process brick0
+TEST ta_start_brick_process brick1
+TEST ta_start_ta_process ta
+
+TEST ta_create_mount_volfile brick0 brick1 ta
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "trusted.afr.patchy-ta-2" ls $B0/ta
+
+TEST touch $M0/a.txt
+TEST ls $B0/brick0/a.txt
+TEST ls $B0/brick1/a.txt
+TEST ! ls $B0/ta/a.txt
+
+TEST dd if=/dev/zero of=$M0/a.txt bs=1M count=5
+
+#Good Data brick is down. TA and bad brick are UP
+
+TEST ta_kill_brick brick1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
+TEST dd if=/dev/zero of=$M0/a.txt bs=1M count=5
+TEST ta_kill_brick brick0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 0
+TEST ta_start_brick_process brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
+TEST ! dd if=/dev/zero of=$M0/a.txt bs=1M count=5
+
+# Good Data brick is UP. Bad and TA are down
+TEST ta_kill_brick brick1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
+TEST ta_start_brick_process brick0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
+TEST ta_kill_brick ta
+TEST ! dd if=/dev/zero of=$M0/a.txt bs=1M count=5
+
+# Good and Bad data bricks are UP. TA is down
+TEST ta_start_brick_process brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
+TEST dd if=/dev/zero of=$M0/a.txt bs=1M count=5
+
+cleanup;
diff --git a/tests/basic/afr/ta.t b/tests/basic/afr/ta.t
index d8beb1be461..05d48431c95 100644
--- a/tests/basic/afr/ta.t
+++ b/tests/basic/afr/ta.t
@@ -27,18 +27,28 @@ EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replica
TEST touch $M0/b.txt
EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1
EXPECT "000000010000000200000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/b.txt
-#TODO: New entry mark lead to pending data on the file but no such marking happened on ta
-#EXPECT "000000010000000100000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.patchy-ta-2
+#New entry mark lead to pending data on the file and on ta
+EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.patchy-ta-2
TEST ! ls $B0/brick0/b.txt
TEST ls $B0/brick1/b.txt
+#Try to create an entry while good brick is down and bad brick is UP. Should not create
+TEST ta_start_brick_process brick0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+TEST ta_kill_brick brick1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST ! touch $M0/d.txt
+EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.patchy-ta-2
+
+TEST ta_start_brick_process brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST ta_kill_brick brick0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 0
+
TEST ta_kill_brick ta
-# Entry create must fail since only 1 brick is up.
+# Entry create must fail if only one brick is UP, even if that is a good brick.
TEST ! touch $M0/c.txt
TEST ! ls $B0/brick0/c.txt
TEST ! ls $B0/brick1/c.txt
-TEST ta_start_brick_process brick0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
-
cleanup;
diff --git a/tests/basic/afr/tarissue.t b/tests/basic/afr/tarissue.t
index a35d468eb0c..83f7463130c 100644
--- a/tests/basic/afr/tarissue.t
+++ b/tests/basic/afr/tarissue.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
TESTS_EXPECTED_IN_LOOP=10
cleanup;
diff --git a/tests/basic/all_squash.t b/tests/basic/all_squash.t
new file mode 100644
index 00000000000..29766c50af7
--- /dev/null
+++ b/tests/basic/all_squash.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+# random uid/gid
+uid=22162
+gid=5845
+
+TEST $CLI volume set $V0 server.anonuid $uid;
+TEST $CLI volume set $V0 server.anongid $gid;
+
+# Ensure server.all-squash is disabled
+TEST $CLI volume set $V0 server.all-squash disable;
+
+# Tests for the fuse mount
+mkdir $M0/other;
+chown $uid:$gid $M0/other;
+
+TEST $CLI volume set $V0 server.all-squash enable;
+
+touch $M0/file 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/dir 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+TEST touch $M0/other/file 2>/dev/null;
+TEST [ "$(stat -c %u:%g $M0/other/file)" = "$uid:$gid" ];
+TEST mkdir $M0/other/dir 2>/dev/null;
+TEST [ "$(stat -c %u:%g $M0/other/dir)" = "$uid:$gid" ];
+
+TEST $CLI volume set $V0 server.all-squash disable;
+TEST rm -rf $M0/other;
+
+sleep 1;
+
+# tests for nfs mount
+mkdir $N0/other;
+chown $uid:$gid $N0/other;
+
+TEST $CLI volume set $V0 server.all-squash enable;
+
+touch $N0/file 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/dir 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+TEST touch $N0/other/file 2>/dev/null;
+TEST [ "$(stat -c %u:%g $N0/other/file)" = "$uid:$gid" ];
+TEST mkdir $N0/other/dir 2>/dev/null;
+TEST [ "$(stat -c %u:%g $N0/other/dir)" = "$uid:$gid" ];
+
+TEST $CLI volume set $V0 server.all-squash disable;
+TEST rm -rf $N0/other;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/basic/bd.t b/tests/basic/bd.t
deleted file mode 100755
index 63622edd709..00000000000
--- a/tests/basic/bd.t
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-function execute()
-{
- cmd=$1
- shift
- ${cmd} $@ >/dev/null 2>&1
-}
-
-function bd_cleanup()
-{
- execute vgremove -f ${V0}
- execute pvremove ${ld}
- execute losetup -d ${ld}
- execute rm ${BD_DISK}
- cleanup
-}
-
-function check()
-{
- if [ $? -ne 0 ]; then
- echo prerequsite $@ failed
- bd_cleanup
- exit
- fi
-}
-
-SIZE=256 #in MB
-
-bd_cleanup;
-
-## Configure environment needed for BD backend volumes
-## Create a file with configured size and
-## set it as a temporary loop device to create
-## physical volume & VG. These are basic things needed
-## for testing BD xlator if anyone of these steps fail,
-## test script exits
-function configure()
-{
- GLDIR=`$CLI system:: getwd`
- BD_DISK=${GLDIR}/bd_disk
-
- execute truncate -s${SIZE}M ${BD_DISK}
- check ${BD_DISK} creation
-
- execute losetup -f
- check losetup
- ld=`losetup -f`
-
- execute losetup ${ld} ${BD_DISK}
- check losetup ${BD_DISK}
- execute pvcreate -f ${ld}
- check pvcreate ${ld}
- execute vgcreate ${V0} ${ld}
- check vgcreate ${V0}
- execute lvcreate --thin ${V0}/pool --size 128M
-}
-
-function volinfo_field()
-{
- local vol=$1;
- local field=$2;
- $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
-}
-
-function volume_type()
-{
- getfattr -n volume.type $M0/. --only-values --absolute-names -e text
-}
-
-case $OSTYPE in
-NetBSD)
- echo "Skip test on LVM which is not available on NetBSD" >&2
- SKIP_TESTS
- exit 0
- ;;
-*)
- ;;
-esac
-
-TEST glusterd
-TEST pidof glusterd
-configure
-
-TEST $CLI volume create $V0 ${H0}:/$B0/$V0?${V0}
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-
-## Start volume and verify
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status'
-
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-EXPECT '1' volume_type
-
-## Create posix file
-TEST touch $M0/posix
-
-TEST touch $M0/lv
-gfid=`getfattr -n glusterfs.gfid.string $M0/lv --only-values --absolute-names`
-TEST setfattr -n user.glusterfs.bd -v "lv:4MB" $M0/lv
-# Check if LV is created
-TEST stat /dev/$V0/${gfid}
-
-## Create filesystem
-sleep 1
-TEST mkfs.ext4 -qF $M0/lv
-# Cloning
-TEST touch $M0/lv_clone
-gfid=`getfattr -n glusterfs.gfid.string $M0/lv_clone --only-values --absolute-names`
-TEST setfattr -n clone -v ${gfid} $M0/lv
-TEST stat /dev/$V0/${gfid}
-
-sleep 1
-## Check mounting
-TEST mount -o loop $M0/lv $M1
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
-
-# Snapshot
-TEST touch $M0/lv_sn
-gfid=`getfattr -n glusterfs.gfid.string $M0/lv_sn --only-values --absolute-names`
-TEST setfattr -n snapshot -v ${gfid} $M0/lv
-TEST stat /dev/$V0/${gfid}
-
-# Merge
-sleep 1
-TEST setfattr -n merge -v "$M0/lv_sn" $M0/lv_sn
-TEST ! stat $M0/lv_sn
-TEST ! stat /dev/$V0/${gfid}
-
-
-rm $M0/* -f
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST $CLI volume stop ${V0}
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-TEST $CLI volume delete ${V0}
-
-bd_cleanup
diff --git a/tests/basic/changelog/changelog-api.t b/tests/basic/changelog/changelog-api.t
new file mode 100644
index 00000000000..516c2f2f60d
--- /dev/null
+++ b/tests/basic/changelog/changelog-api.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../env.rc
+
+cleanup;
+
+CHANGELOG_BIN_PATH=$(dirname $0)/../../utils/changelog
+build_tester $CHANGELOG_BIN_PATH/test-changelog-api.c -lgfchangelog
+
+CHANGELOG_PATH_0="$B0/${V0}0/.glusterfs/changelogs"
+ROLLOVER_TIME=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+sleep 3;
+
+#Listen to changelog journal notifcations
+$CHANGELOG_BIN_PATH/test-changelog-api &
+for i in {1..12};do echo "data" > $M0/file$i 2>/dev/null; sleep 1;done &
+
+#Wait for changelogs to be in .processed directory
+sleep 12
+
+EXPECT "Y" processed_changelogs "/tmp/scratch_v1/.processed"
+TEST rm $CHANGELOG_BIN_PATH/test-changelog-api
+rm -rf /tmp/scratch_v1
+
+cleanup;
diff --git a/tests/basic/changelog/changelog-history.t b/tests/basic/changelog/changelog-history.t
index 3ce40981c90..ea952619652 100644
--- a/tests/basic/changelog/changelog-history.t
+++ b/tests/basic/changelog/changelog-history.t
@@ -5,6 +5,7 @@
cleanup;
+SCRIPT_TIMEOUT=300
HISTORY_BIN_PATH=$(dirname $0)/../../utils/changelog
build_tester $HISTORY_BIN_PATH/get-history.c -lgfchangelog
@@ -68,19 +69,23 @@ TEST $CLI volume set $V0 changelog.changelog off
sleep 3
time_after_disable=$(date '+%s')
+TEST $CLI volume set $V0 changelog.changelog on
+sleep 5
+
#Passes, gives the changelogs till continuous changelogs are available
# but returns 1
-EXPECT "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
+EXPECT_WITHIN 10 "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
#Fails as start falls between htime files
-EXPECT "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
+EXPECT_WITHIN 10 "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
#Passes as start and end falls in same htime file
-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
+EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
#Passes, gives the changelogs till continuous changelogs are available
-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
+EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
TEST rm $HISTORY_BIN_PATH/get-history
+rm -rf /tmp/scratch_v1/*
cleanup;
diff --git a/tests/basic/changelog/history-api.t b/tests/basic/changelog/history-api.t
new file mode 100644
index 00000000000..9e63118cef9
--- /dev/null
+++ b/tests/basic/changelog/history-api.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../env.rc
+
+cleanup;
+
+HISTORY_BIN_PATH=$(dirname $0)/../../utils/changelog
+build_tester $HISTORY_BIN_PATH/test-history-api.c -lgfchangelog
+
+CHANGELOG_PATH_0="$B0/${V0}0/.glusterfs/changelogs"
+ROLLOVER_TIME=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+
+sleep 3
+start=$(date '+%s')
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+touch $M0/file{1..10}
+
+for i in {1..12};do echo "data" > $M0/file$i; sleep 1;done
+end=$(date '+%s')
+sleep 2
+
+#Passes as start and end falls in same htime file
+EXPECT "0" $HISTORY_BIN_PATH/test-history-api $start $end
+
+#Wait for changelogs to be in .processed directory
+sleep 2
+
+EXPECT "Y" processed_changelogs "/tmp/scratch_v1/.history/.processed"
+TEST rm $HISTORY_BIN_PATH/test-history-api
+rm -rf /tmp/scratch_v1
+
+cleanup;
diff --git a/tests/basic/cloudsync-sanity.t b/tests/basic/cloudsync-sanity.t
index 3cf719da011..834ba96430c 100644
--- a/tests/basic/cloudsync-sanity.t
+++ b/tests/basic/cloudsync-sanity.t
@@ -19,4 +19,11 @@ TEST $GFS -s $H0 --volfile-id $V0 $M1;
# create operations
TEST $(dirname $0)/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+./glfsxmp $V0 $H0
+cleanup_tester ./glfsxmp
+rm ./glfsxmp.c
+
cleanup;
diff --git a/tests/basic/ctime/ctime-ec-heal.t b/tests/basic/ctime/ctime-ec-heal.t
new file mode 100644
index 00000000000..142237c5014
--- /dev/null
+++ b/tests/basic/ctime/ctime-ec-heal.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# This will test self healing of ctime xattr 'trusted.glusterfs.mdata'
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+
+# Create files
+mkdir $M0/dir1
+echo "Initial content" > $M0/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+# Kill brick
+TEST kill_brick $V0 $H0 $B0/${V0}3
+
+echo "B3 is down" >> $M0/file1
+echo "Change dir1 time attributes" > $M0/dir1/dir1_file1
+echo "Entry heal file" > $M0/entry_heal_file1
+mkdir $M0/entry_heal_dir1
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+TEST $CLI volume start $V0 force
+$CLI volume heal $V0
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-ec-rebalance.t b/tests/basic/ctime/ctime-ec-rebalance.t
new file mode 100644
index 00000000000..2b73bcdd103
--- /dev/null
+++ b/tests/basic/ctime/ctime-ec-rebalance.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# This will test healing of ctime xattr 'trusted.glusterfs.mdata' after add-brick and rebalance
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+
+# Create files
+mkdir $M0/dir1
+echo "test data" > $M0/dir1/file1
+
+# Add brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..8}
+
+#Trigger rebalance
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Verify ctime xattr heal on directory
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}6/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}7/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}8/dir1"
+
+b6_mdata=$(get_mdata "$B0/${V0}6/dir1")
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}7/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}8/dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-glfs-init.t b/tests/basic/ctime/ctime-glfs-init.t
index 0ecd80c2680..56d7d6caee0 100644
--- a/tests/basic/ctime/ctime-glfs-init.t
+++ b/tests/basic/ctime/ctime-glfs-init.t
@@ -9,8 +9,6 @@ TEST glusterd
TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{1,2,3};
TEST $CLI volume set $V0 performance.stat-prefetch off
-TEST $CLI volume set $V0 ctime on
-TEST $CLI volume set $V0 utime on
TEST $CLI volume start $V0;
logdir=`gluster --print-logdir`
diff --git a/tests/basic/ctime/ctime-heal-symlinks.t b/tests/basic/ctime/ctime-heal-symlinks.t
index 3922b12546d..547b1807e94 100644
--- a/tests/basic/ctime/ctime-heal-symlinks.t
+++ b/tests/basic/ctime/ctime-heal-symlinks.t
@@ -9,8 +9,6 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 performance.stat-prefetch off
-TEST $CLI volume set $V0 ctime on
-TEST $CLI volume set $V0 utime on
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
@@ -38,8 +36,6 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 performance.stat-prefetch off
-TEST $CLI volume set $V0 ctime on
-TEST $CLI volume set $V0 utime on
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
diff --git a/tests/basic/ctime/ctime-mdata-legacy-files.t b/tests/basic/ctime/ctime-mdata-legacy-files.t
new file mode 100644
index 00000000000..2e782d5c99d
--- /dev/null
+++ b/tests/basic/ctime/ctime-mdata-legacy-files.t
@@ -0,0 +1,83 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+###############################################################################
+#Replica volume
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#Disable ctime and create file, file doesn't contain "trusted.glusterfs.mdata" xattr
+TEST $CLI volume set $V0 ctime off
+
+TEST "mkdir $M0/DIR"
+TEST "echo hello_world > $M0/DIR/FILE"
+
+#Verify absence of xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR/FILE"
+
+#Enable ctime
+TEST $CLI volume set $V0 ctime on
+sleep 3
+TEST stat $M0/DIR/FILE
+
+#Verify presence "trusted.glusterfs.mdata" xattr on backend
+#The lookup above should have created xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR/FILE"
+
+###############################################################################
+#Disperse Volume
+
+TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+TEST $CLI volume set $V1 performance.stat-prefetch off
+TEST $CLI volume start $V1
+
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M1;
+
+#Disable ctime and create file, file doesn't contain "trusted.glusterfs.mdata" xattr
+TEST $CLI volume set $V1 ctime off
+TEST "mkdir $M1/DIR"
+TEST "echo hello_world > $M1/DIR/FILE"
+
+#Verify absence of xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR/FILE"
+
+#Enable ctime
+TEST $CLI volume set $V1 ctime on
+sleep 3
+TEST stat $M1/DIR/FILE
+
+#Verify presence "trusted.glusterfs.mdata" xattr on backend
+#The lookup above should have created xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR/FILE"
+
+cleanup;
+###############################################################################
diff --git a/tests/basic/ctime/ctime-noatime.t b/tests/basic/ctime/ctime-noatime.t
index d71528c566c..609ccbd72c1 100644
--- a/tests/basic/ctime/ctime-noatime.t
+++ b/tests/basic/ctime/ctime-noatime.t
@@ -28,8 +28,6 @@ TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.io-cache off
-TEST $CLI volume set $V0 utime on
-TEST $CLI volume set $V0 ctime on
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
diff --git a/tests/basic/ctime/ctime-readdir.c b/tests/basic/ctime/ctime-readdir.c
new file mode 100644
index 00000000000..8760db29ae8
--- /dev/null
+++ b/tests/basic/ctime/ctime-readdir.c
@@ -0,0 +1,29 @@
+#include <stdio.h>
+#include <dirent.h>
+#include <string.h>
+#include <assert.h>
+
+int
+main(int argc, char **argv)
+{
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ int ret = 0;
+ char *path = NULL;
+
+ assert(argc == 2);
+ path = argv[1];
+
+ dir = opendir(path);
+ if (!dir) {
+ printf("opendir(%s) failed.\n", path);
+ return -1;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ }
+ if (dir)
+ closedir(dir);
+
+ return ret;
+}
diff --git a/tests/basic/ctime/ctime-readdir.t b/tests/basic/ctime/ctime-readdir.t
new file mode 100644
index 00000000000..4564fc1b667
--- /dev/null
+++ b/tests/basic/ctime/ctime-readdir.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{1,2,3};
+TEST $CLI volume set $V0 performance.stat-prefetch on
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST mkdir $M0/dir0
+TEST "echo hello_world > $M0/dir0/FILE"
+
+ctime1=$(stat -c %Z $M0/dir0/FILE)
+echo "Mount change time: $ctime1"
+
+sleep 2
+
+#Write to back end directly to modify ctime of backend file
+TEST "echo write_from_backend >> $B0/brick1/dir0/FILE"
+TEST "echo write_from_backend >> $B0/brick2/dir0/FILE"
+TEST "echo write_from_backend >> $B0/brick3/dir0/FILE"
+echo "Backend change time"
+echo "brick1: $(stat -c %Z $B0/brick1/dir0/FILE)"
+echo "brick2: $(stat -c %Z $B0/brick2/dir0/FILE)"
+echo "brick3: $(stat -c %Z $B0/brick3/dir0/FILE)"
+
+#Stop and start to hit the case of no inode for readdir
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST build_tester $(dirname $0)/ctime-readdir.c
+
+#Do readdir
+TEST ./$(dirname $0)/ctime-readdir $M0/dir0
+
+EXPECT "$ctime1" stat -c %Z $M0/dir0/FILE
+echo "Mount change time after readdir $(stat -c %Z $M0/dir0/FILE)"
+
+cleanup_tester $(dirname $0)/ctime-readdir
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-rep-heal.t b/tests/basic/ctime/ctime-rep-heal.t
new file mode 100644
index 00000000000..20517c74971
--- /dev/null
+++ b/tests/basic/ctime/ctime-rep-heal.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# This will test self healing of ctime xattr 'trusted.glusterfs.mdata'
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+
+# Create files
+mkdir $M0/dir1
+echo "Initial content" > $M0/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+# Kill brick
+TEST kill_brick $V0 $H0 $B0/${V0}3
+
+echo "B3 is down" >> $M0/file1
+echo "Change dir1 time attributes" > $M0/dir1/dir1_file1
+echo "Entry heal file" > $M0/entry_heal_file1
+mkdir $M0/entry_heal_dir1
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+TEST $CLI volume start $V0 force
+$CLI volume heal $V0
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-rep-rebalance.t b/tests/basic/ctime/ctime-rep-rebalance.t
new file mode 100644
index 00000000000..866cf87e6cb
--- /dev/null
+++ b/tests/basic/ctime/ctime-rep-rebalance.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# This will test healing of ctime xattr 'trusted.glusterfs.mdata' after add-brick and rebalance
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+
+# Create files
+mkdir $M0/dir1
+
+# Add brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..8}
+
+#Trigger rebalance
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Verify ctime xattr heal on directory
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}6/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}7/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}8/dir1"
+
+b6_mdata=$(get_mdata "$B0/${V0}6/dir1")
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}7/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}8/dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-utimesat.t b/tests/basic/ctime/ctime-utimesat.t
new file mode 100644
index 00000000000..540e57aec83
--- /dev/null
+++ b/tests/basic/ctime/ctime-utimesat.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.read-after-open off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+touch $M0/FILE
+
+atime=$(stat -c "%.X" $M0/FILE)
+EXPECT $atime stat -c "%.Y" $M0/FILE
+EXPECT $atime stat -c "%.Z" $M0/FILE
+
+cleanup
diff --git a/tests/basic/distribute/brick-down.t b/tests/basic/distribute/brick-down.t
new file mode 100644
index 00000000000..522ccc07210
--- /dev/null
+++ b/tests/basic/distribute/brick-down.t
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+. $(dirname $0)/../../dht.rc
+
+# Test 1 overview:
+# ----------------
+# Test whether lookups are sent after a brick comes up again
+#
+# 1. Create a 3 brick pure distribute volume
+# 2. Fuse mount the volume so the layout is set on the root
+# 3. Kill one brick and try to create a directory which hashes to that brick.
+# It should fail with EIO.
+# 4. Restart the brick that was killed.
+# 5. Do not remount the volume. Try to create the same directory as in step 3.
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{1..3}
+TEST $CLI volume start $V0
+
+# We want the lookup to reach DHT
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+# Mount using FUSE and lookup the mount so a layout is set on the brick root
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+ls $M0/
+
+TEST mkdir $M0/level1
+
+# Find a dirname that will hash to the brick we are going to kill
+hashed=$V0-client-1
+TEST dht_first_filename_with_hashsubvol "$hashed" $M0 "dir-"
+roottestdir=$fn_return_val
+
+hashed=$V0-client-1
+TEST dht_first_filename_with_hashsubvol "$hashed" $M0/level1 "dir-"
+level1testdir=$fn_return_val
+
+
+TEST kill_brick $V0 $H0 $B0/$V0-2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status $V0 $H0 $B0/$V0-2
+
+TEST $CLI volume status $V0
+
+
+# Unmount and mount the volume again so dht has an incomplete in memory layout
+
+umount -f $M0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+
+mkdir $M0/$roottestdir
+TEST [ $? -ne 0 ]
+
+mkdir $M0/level1/$level1testdir
+TEST [ $? -ne 0 ]
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/$V0-2
+
+#$CLI volume status
+
+# It takes a while for the client to reconnect to the brick
+sleep 5
+
+
+mkdir $M0/$roottestdir
+TEST [ $? -eq 0 ]
+
+mkdir $M0/$level1/level1testdir
+TEST [ $? -eq 0 ]
+
+# Cleanup
+cleanup
+
+
diff --git a/tests/basic/distribute/bug-1265677-use-readdirp.t b/tests/basic/distribute/bug-1265677-use-readdirp.t
index 5b274d62667..eef8affc8b9 100644
--- a/tests/basic/distribute/bug-1265677-use-readdirp.t
+++ b/tests/basic/distribute/bug-1265677-use-readdirp.t
@@ -8,8 +8,7 @@
cleanup
TEST glusterd
TEST pidof glusterd
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..1}
-TEST $CLI volume heal $V0 disable
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0..1}
TEST $CLI volume set $V0 nfs.disable yes
TEST $CLI volume set $V0 dht.force-readdirp yes
TEST $CLI volume set $V0 performance.readdir-ahead off
diff --git a/tests/basic/distribute/dir-heal.t b/tests/basic/distribute/dir-heal.t
new file mode 100644
index 00000000000..851f765b245
--- /dev/null
+++ b/tests/basic/distribute/dir-heal.t
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../common-utils.rc
+
+# Test 1 overview:
+# ----------------
+#
+# 1. Kill one brick of the volume.
+# 2. Create directories and change directory properties.
+# 3. Bring up the brick and access the directory
+# 4. Check the permissions and xattrs on the backend
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{1..3}
+TEST $CLI volume start $V0
+
+# We want the lookup to reach DHT
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+# Mount using FUSE , kill a brick and create directories
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+ls $M0/
+cd $M0
+
+TEST kill_brick $V0 $H0 $B0/$V0-1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status $V0 $H0 $B0/$V0-1
+
+TEST mkdir dir{1..4}
+
+# No change for dir1
+# Change permissions for dir2
+# Set xattr on dir3
+# Change permissions and set xattr on dir4
+
+TEST chmod 777 $M0/dir2
+
+TEST setfattr -n "user.test" -v "test" $M0/dir3
+
+TEST chmod 777 $M0/dir4
+TEST setfattr -n "user.test" -v "test" $M0/dir4
+
+
+# Start all bricks
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/$V0-1
+
+#$CLI volume status
+
+# It takes a while for the client to reconnect to the brick
+sleep 5
+
+stat $M0/dir* > /dev/null
+
+# Check that directories have been created on the brick that was killed
+
+TEST ls $B0/$V0-1/dir1
+
+TEST ls $B0/$V0-1/dir2
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir2
+
+TEST ls $B0/$V0-1/dir3
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir3
+
+
+TEST ls $B0/$V0-1/dir4
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir4
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir4
+
+
+TEST rm -rf $M0/*
+
+cd
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+
+# Test 2 overview:
+# ----------------
+# 1. Create directories with all bricks up.
+# 2. Kill a brick and change directory properties and set user xattr.
+# 2. Bring up the brick and access the directory
+# 3. Check the permissions and xattrs on the backend
+
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+ls $M0/
+cd $M0
+TEST mkdir dir{1..4}
+
+TEST kill_brick $V0 $H0 $B0/$V0-1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status $V0 $H0 $B0/$V0-1
+
+# No change for dir1
+# Change permissions for dir2
+# Set xattr on dir3
+# Change permissions and set xattr on dir4
+
+TEST chmod 777 $M0/dir2
+
+TEST setfattr -n "user.test" -v "test" $M0/dir3
+
+TEST chmod 777 $M0/dir4
+TEST setfattr -n "user.test" -v "test" $M0/dir4
+
+
+# Start all bricks
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/$V0-1
+
+#$CLI volume status
+
+# It takes a while for the client to reconnect to the brick
+sleep 5
+
+stat $M0/dir* > /dev/null
+
+# Check directories on the brick that was killed
+
+TEST ls $B0/$V0-1/dir2
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir2
+
+TEST ls $B0/$V0-1/dir3
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir3
+
+
+TEST ls $B0/$V0-1/dir4
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir4
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir4
+cd
+
+
+# Cleanup
+cleanup
+
diff --git a/tests/basic/distribute/file-create.t b/tests/basic/distribute/file-create.t
new file mode 100644
index 00000000000..41b662eefe2
--- /dev/null
+++ b/tests/basic/distribute/file-create.t
@@ -0,0 +1,120 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+. $(dirname $0)/../../dht.rc
+
+# Test overview: Test file creation in various scenarios
+
+
+# Test 1 : "dht.file.hashed-subvol.<filename>"
+# Get the hashed subvolume for file1 in dir1 using xattr
+# Create file1 in dir1
+# Check if the file is created in the brick returned by xattr
+
+hashdebugxattr="dht.file.hashed-subvol."
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+# We want fixed size bricks to test min-free-disk
+
+# Create 2 loop devices, one per brick.
+TEST truncate -s 25M $B0/brick1
+TEST truncate -s 25M $B0/brick2
+
+TEST L1=`SETUP_LOOP $B0/brick1`
+TEST MKFS_LOOP $L1
+
+TEST L2=`SETUP_LOOP $B0/brick2`
+TEST MKFS_LOOP $L2
+
+
+TEST mkdir -p $B0/${V0}{1,2}
+
+TEST MOUNT_LOOP $L1 $B0/${V0}1
+TEST MOUNT_LOOP $L2 $B0/${V0}2
+
+
+# Create a plain distribute volume with 2 subvols.
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+EXPECT "Started" volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 cluster.min-free-disk 40%
+#TEST $CLI volume set $V0 client-log-level DEBUG
+
+# Mount using FUSE and create a file
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST mkdir $M0/dir1
+
+######################################################
+# Test 1 : Test file creation on correct hashed subvol
+######################################################
+
+hashed="$V0-client-0"
+TEST dht_first_filename_with_hashsubvol "$hashed" $M0/dir1 "big-"
+firstfile=$fn_return_val
+
+#Create a large file to fill up $hashed past the min-free-disk limits
+TEST dd if=/dev/zero of=$M0/dir1/$firstfile bs=1M count=15
+
+brickpath_0=$(cat "$M0/.meta/graphs/active/$hashed/options/remote-subvolume")
+brickpath_1=$(cat "$M0/.meta/graphs/active/$V0-client-1/options/remote-subvolume")
+
+TEST stat "$brickpath_0/dir1/$firstfile"
+EXPECT "0" is_dht_linkfile "$brickpath_0/dir1/$firstfile"
+
+
+######################################################
+# Test 2: Create a file which hashes to the subvol which has crossed
+# the min-free-disk limit. It should be created on the other subvol
+######################################################
+
+# DHT only checks disk usage every second. Create a new file and introduce a
+# delay here to ensure DHT updates the in memory disk usage
+sleep 2
+TEST dd if=/dev/zero of=$M0/dir1/file-2 bs=1024 count=1
+
+# Find a file that will hash to $hash_subvol
+TEST dht_first_filename_with_hashsubvol $hashed $M0/dir1 "newfile-"
+newfile=$fn_return_val
+echo $newfile
+
+# Create $newfile - it should be created on the other subvol as its hash subvol
+# has crossed the min-free-disk limit
+TEST dd if=/dev/zero of=$M0/dir1/$newfile bs=1024 count=20
+TEST stat "$brickpath_0/dir1/$newfile"
+EXPECT "1" is_dht_linkfile "$brickpath_0/dir1/$newfile"
+
+
+#TEST rm -rf $M0/dir1/$firstfile
+#TEST rm -rf $M0/dir1/$newfile
+
+
+######################################################
+# Test 3: Test dht_filter_loc_subvol_key
+######################################################
+
+TEST dht_first_filename_with_hashsubvol $V0-client-1 $M0/dir1 "filter-"
+newfile=$fn_return_val
+echo $newfile
+TEST dd if=/dev/zero of="$M0/dir1/$newfile@$V0-dht:$hashed" bs=1024 count=20
+TEST stat $M0/dir1/$newfile
+TEST stat "$brickpath_0/dir1/$newfile"
+EXPECT "1" is_dht_linkfile "$brickpath_1/dir1/$newfile"
+
+
+force_umount $M0
+TEST $CLI volume stop $V0
+UMOUNT_LOOP ${B0}/${V0}{1,2}
+rm -f ${B0}/brick{1,2}
+
+
+# Cleanup
+cleanup
+
diff --git a/tests/basic/distribute/file-rename.t b/tests/basic/distribute/file-rename.t
new file mode 100644
index 00000000000..63111b8ad8f
--- /dev/null
+++ b/tests/basic/distribute/file-rename.t
@@ -0,0 +1,1021 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../common-utils.rc
+
+# Test overview:
+# Test all combinations of src-hashed/src-cached/dst-hashed/dst-cached
+
+hashdebugxattr="dht.file.hashed-subvol."
+
+function get_brick_index {
+ local inpath=$1
+ brickroot=$(getfattr -m . -n trusted.glusterfs.pathinfo $inpath | tr ' ' '\n' | sed -n 's/<POSIX(\(.*\)):.*:.*>.*/\1/p')
+ echo ${brickroot:(-1)}
+}
+
+function get_brick_path_for_subvol {
+ local in_subvol=$1
+ local in_brickpath
+
+ in_brickpath=$(cat "$M0/.meta/graphs/active/$in_subvol/options/remote-subvolume")
+ echo $in_brickpath
+
+}
+
+#Checks that file exists only on hashed and/or cached
+function file_existence_check
+{
+ local in_file_path=$1
+ local in_hashed=$2
+ local in_cached=$3
+ local in_client_subvol
+ local in_brickpath
+ local ret
+
+ for i in {0..3}
+ do
+ in_client_subvol="$V0-client-$i"
+ in_brickpath=$(cat "$M0/.meta/graphs/active/$in_client_subvol/options/remote-subvolume")
+ stat "$in_brickpath/$in_file_path" 2>/dev/null
+ ret=$?
+ # Either the linkto or the data file must exist on the hashed
+ if [ "$in_client_subvol" == "$in_hashed" ]; then
+ if [ $ret -ne 0 ]; then
+ return 1
+ fi
+ continue
+ fi
+
+ # If the cached is non-null, we expect the file to exist on it
+ if [ "$in_client_subvol" == "$in_cached" ]; then
+ if [ $ret -ne 0 ]; then
+ return 1
+ fi
+ continue
+ fi
+
+ if [ $ret -eq 0 ]; then
+ return 2
+ fi
+ done
+ return 0
+}
+
+
+# Check if file exists on any of the bricks of the volume
+function file_does_not_exist
+{
+ local inpath=$1
+ for i in `seq 0 3`
+ do
+ file_path=$B0/$V0-$i/$inpath
+ if [ -f "$file_path" ]; then
+ echo "1"
+ return 1
+ fi
+ done
+ return 0
+}
+
+
+# Input: filename dirpath
+function get_hash_subvol
+{
+ hash_subvol=$(getfattr --only-values -n "$hashdebugxattr$1" $2 2>/dev/null)
+}
+
+
+
+# Find the first filename that hashes to a subvol
+# other than $1
+
+function first_filename_with_diff_hashsubvol
+{
+ local in_subvol=$1
+ local in_path=$2
+ local file_pattern=$3
+ local in_hash_subvol
+
+ for i in {1..100}
+ do
+ dstfilename="$file_pattern$i"
+ in_hash_subvol=$(get_hash_subvol "$dstfilename" "$in_path")
+ echo $in_hash_subvol
+ if [ "$in_subvol" != "$in_hash_subvol" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Find the first filename that hashes to the same subvol
+# as $1
+function first_filename_with_same_hashsubvol
+{
+ local in_subvol=$1
+ local in_path=$2
+ local in_hash_subvol
+ local file_pattern=$3
+
+ for i in {1..100}
+ do
+ dstfilename="$file_pattern$i"
+ get_hash_subvol "$dstfilename" "$in_path"
+ in_hash_subvol=$hash_subvol
+# echo $in_hash_subvol
+ if [ "$in_subvol" == "$in_hash_subvol" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+function file_is_linkto
+{
+ local brick_filepath=$1
+
+ test=$(stat $brick_filepath 2>&1)
+ if [ $? -ne 0 ]; then
+ echo "2"
+ return
+ fi
+
+ test=$(getfattr -n trusted.glusterfs.dht.linkto -e text $brick_filepath 2>&1)
+
+ if [ $? -eq 0 ]; then
+ echo "1"
+ else
+ echo "0"
+ fi
+}
+
+
+
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+
+# We need at least 4 bricks to test all combinations of hashed and
+# cached files
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{0..3}
+TEST $CLI volume start $V0
+
+# Mount using FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+
+################################################################
+# The first set of tests are those where the Dst file does not exist
+# dst-cached = NULL
+#
+###############################################################
+
+################### Test 1 ####################################
+#
+# src-hashed = src-cached = dst-hashed
+# dst-cached = null
+# src-file = src-1
+
+echo " **** Test 1 **** "
+
+src_file="src-1"
+
+TEST mkdir $M0/test-1
+TEST touch $M0/test-1/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-1
+src_hashed=$hash_subvol
+#echo "Hashed subvol for $src_file: " $src_hashed
+
+# Find a file name that hashes to the same subvol as $src_file
+TEST first_filename_with_same_hashsubvol "$src_hashed" "$M0/test-1" "dst-"
+#echo "dst-file name: " $dstfilename
+dst_hashed=$src_hashed
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-1/$src_file $M0/test-1/$dstfilename
+
+# Expected:
+# dst file is accessible from the mount point
+# dst file exists only on the hashed brick.
+# no linkto files on any bricks
+# src files do not exist
+
+
+TEST stat $M0/test-1/$dstfilename 2>/dev/null
+TEST file_existence_check test-1/$dstfilename $src_hashed
+TEST file_does_not_exist test-1/$src_file
+EXPECT "0" file_is_linkto $src_hash_brick/test-1/$dstfilename
+
+
+################### Test 2 ####################################
+
+# src-hashed = src-cached != dst-hashed
+# dst-cached = null
+
+echo " **** Test 2 **** "
+
+src_file="src-1"
+
+TEST mkdir $M0/test-2
+TEST touch $M0/test-2/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-2
+src_hashed=$hash_subvol
+#echo "Hashed subvol for $src_file: " $src_hashed
+
+# Find a file name that hashes to a diff hashed subvol than $src_file
+TEST first_filename_with_diff_hashsubvol "$src_hashed" "$M0/test-2" "dst-"
+echo "dst-file name: " $dstfilename
+TEST get_hash_subvol $dstfilename $M0/test-2
+dst_hashed=$hash_subvol
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+dst_hash_brick=$(get_brick_path_for_subvol $dst_hashed)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-2/$src_file $M0/test-2/$dstfilename
+
+
+# Expected:
+# dst file is accessible from the mount point
+# dst data file on src_hashed and dst linkto file on dst_hashed
+# src files do not exist
+
+
+TEST stat $M0/test-2/$dstfilename 2>/dev/null
+TEST file_existence_check test-2/$dstfilename $dst_hashed $src_hashed
+TEST file_does_not_exist test-2/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-2/$dstfilename
+EXPECT "0" file_is_linkto $src_hash_brick/test-2/$dstfilename
+
+################### Test 3 ####################################
+
+# src-hashed = dst-hashed != src-cached
+
+echo " **** Test 3 **** "
+
+src_file0="abc-1"
+
+# 1. Create src file with src_cached != src_hashed
+TEST mkdir $M0/test-3
+TEST touch $M0/test-3/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-3
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_diff_hashsubvol "$src_cached" "$M0/test-3" "src-"
+echo "dst-file name: " $dstfilename
+src_file=$dstfilename
+
+TEST mv $M0/test-3/$src_file0 $M0/test-3/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-3
+src_hashed=$hash_subvol
+
+
+# 2. Rename src to dst
+TEST first_filename_with_same_hashsubvol "$src_hashed" "$M0/test-3" "dst-"
+#echo "dst-file name: " $dstfilename
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-3/$src_file $M0/test-3/$dstfilename
+
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-3/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-3/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+TEST file_existence_check test-3/$dstfilename $src_hashed $src_cached
+
+EXPECT "1" file_is_linkto $src_hash_brick/test-3/$dstfilename
+EXPECT "0" file_is_linkto $src_cached_brick/test-3/$dstfilename
+
+
+
+################### Test 4 ####################################
+
+# src-cached = dst-hashed != src-hashed
+
+echo " **** Test 4 **** "
+
+src_file0="abc-1"
+
+# 1. Create src file with src_cached != src_hashed
+TEST mkdir $M0/test-4
+TEST touch $M0/test-4/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-4
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_diff_hashsubvol "$src_cached" "$M0/test-4" "src-"
+src_file=$dstfilename
+
+TEST mv $M0/test-4/$src_file0 $M0/test-4/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-4
+src_hashed=$hash_subvol
+
+
+# 2. Rename src to dst
+TEST first_filename_with_same_hashsubvol "$src_cached" "$M0/test-4" "dst-"
+#echo "dst-file name: " $dstfilename
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-4/$src_file $M0/test-4/$dstfilename
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-4/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-4/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+TEST file_existence_check test-4/$dstfilename $src_cached
+
+EXPECT "0" file_is_linkto $src_cached_brick/test-4/$dstfilename
+
+
+################### Test 5 ####################################
+
+# src-cached != src-hashed
+# src-hashed != dst-hashed
+# src-cached != dst-hashed
+
+
+echo " **** Test 5 **** "
+
+# 1. Create src and dst files
+
+TEST mkdir $M0/test-5
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-5" "abc-"
+src_file0=$dstfilename
+
+TEST touch $M0/test-5/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-5
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-5" "src-"
+src_file=$dstfilename
+
+TEST mv $M0/test-5/$src_file0 $M0/test-5/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-5
+src_hashed=$hash_subvol
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-5" "dst-"
+#echo "dst-file name: " $dstfilename
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+
+# 2. Rename src to dst
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-5/$src_file $M0/test-5/$dstfilename
+
+
+# 3. Validate
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-5/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-5/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+
+EXPECT "0" file_is_linkto $src_cached_brick/test-5/$dstfilename
+EXPECT "1" file_is_linkto $dst_hash_brick/test-5/$dstfilename
+
+
+########################################################################
+#
+# The Dst file exists
+#
+########################################################################
+
+################### Test 6 ####################################
+
+# src_hash = src_cached
+# dst_hash = dst_cached
+# dst_hash = src_hash
+
+
+TEST mkdir $M0/test-6
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-6" "src-"
+src_file=$dstfilename
+
+TEST touch $M0/test-6/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-6" "dst-"
+dst_file=$dstfilename
+
+TEST touch $M0/test-6/$dst_file
+
+
+# 2. Rename src to dst
+
+TEST mv $M0/test-6/$src_file $M0/test-6/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-6/$dst_file 2>/dev/null
+TEST file_existence_check test-6/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-6/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-6/$dst_file
+
+
+################### Test 7 ####################################
+
+# src_hash = src_cached
+# dst_hash = dst_cached
+# dst_hash != src_hash
+
+
+echo " **** Test 7 **** "
+
+TEST mkdir $M0/test-7
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-7" "src-"
+src_file=$dstfilename
+
+TEST touch $M0/test-7/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-7" "dst-"
+dst_file=$dstfilename
+
+TEST touch $M0/test-7/$dst_file
+
+
+# 2. Rename src to dst
+
+TEST mv $M0/test-7/$src_file $M0/test-7/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+src_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-7/$dst_file 2>/dev/null
+TEST file_existence_check test-7/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-7/$src_file
+
+EXPECT "0" file_is_linkto $src_hash_brick/test-7/$dst_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-7/$dst_file
+
+
+################### Test 8 ####################################
+
+# src_hash = src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash
+# dst_cached != src_hash
+
+echo " **** Test 8 **** "
+
+TEST mkdir $M0/test-8
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-8" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-8/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-8" "dst0-"
+dst_file0=$dstfilename
+TEST touch $M0/test-8/$dst_file0
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-8" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-8/$dst_file0 $M0/test-8/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-8/$src_file $M0/test-8/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+src_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-8/$dst_file 2>/dev/null
+TEST file_existence_check test-8/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-8/$src_file
+
+EXPECT "0" file_is_linkto $src_hash_brick/test-8/$dst_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-8/$dst_file
+
+################### Test 9 ####################################
+
+# src_hash = src_cached = dst_hash
+# dst_hash != dst_cached
+
+echo " **** Test 9 **** "
+
+TEST mkdir $M0/test-9
+
+
+# 1. Create src and dst files
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-9" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-9/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-9" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-9/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-9" "dst-"
+dst_file=$dstfilename
+
+TEST mv $M0/test-9/$dst0_file $M0/test-9/$dst_file
+
+# 2. Rename the file
+
+mv $M0/test-9/$src_file $M0/test-9/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-9/$dst_file 2>/dev/null
+TEST file_existence_check test-9/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-9/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-9/$dst_file
+
+
+################### Test 10 ####################################
+
+# src_hash = src_cached = dst_cached
+# dst_hash != dst_cached
+
+echo " **** Test 10 **** "
+
+TEST mkdir $M0/test-10
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-10" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-10/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-10" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-10/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-10" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-10/$dst0_file $M0/test-10/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-10/$src_file $M0/test-10/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-10/$dst_file 2>/dev/null
+TEST file_existence_check test-10/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-10/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-10/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-10/$dst_file
+
+
+################### Test 11 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached = src_cached
+
+echo " **** Test 11 **** "
+
+TEST mkdir $M0/test-11
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-11" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-11/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-11" "src-"
+src_file=$dstfilename
+
+mv $M0/test-11/$src0_file $M0/test-11/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-11" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-11/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-11/$src_file $M0/test-11/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-11/$dst_file 2>/dev/null
+TEST file_existence_check test-11/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-11/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-11/$dst_file
+
+
+################### Test 12 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached = src_hash
+
+echo " **** Test 12 **** "
+
+TEST mkdir $M0/test-12
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-12" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-12/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-12" "src-"
+src_file=$dstfilename
+
+mv $M0/test-12/$src0_file $M0/test-12/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-12" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-12/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-12/$src_file $M0/test-12/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-12/$dst_file 2>/dev/null
+TEST file_existence_check test-12/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-12/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-12/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-12/$dst_file
+
+################### Test 13 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached
+# dst_hash != src_cached
+# dst_hash != src_hash
+
+echo " **** Test 13 **** "
+
+TEST mkdir $M0/test-13
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-13" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-13/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-13" "src-"
+src_file=$dstfilename
+
+mv $M0/test-13/$src0_file $M0/test-13/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-13" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-13/$dst_file
+
+# 2. Rename the file
+
+mv $M0/test-13/$src_file $M0/test-13/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-13/$dst_file 2>/dev/null
+TEST file_existence_check test-13/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-13/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-13/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-13/$dst_file
+
+
+################### Test 14 ####################################
+
+# src_hash != src_cached
+# dst_hash = src_hash
+# dst_cached = src_cached
+
+echo " **** Test 14 **** "
+
+TEST mkdir $M0/test-14
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-14" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-14/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-14" "src-"
+src_file=$dstfilename
+
+mv $M0/test-14/$src0_file $M0/test-14/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-14" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-14/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-14" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-14/$dst0_file $M0/test-14/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-14/$src_file $M0/test-14/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-14/$dst_file 2>/dev/null
+TEST file_existence_check test-14/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-14/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-14/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-14/$dst_file
+
+################### Test 15 ####################################
+
+# src_hash != src_cached
+# dst_hash != src_hash
+# dst_hash != src_cached
+# dst_cached = src_cached
+
+echo " **** Test 15 **** "
+
+TEST mkdir $M0/test-15
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-15" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-15/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-15" "src-"
+src_file=$dstfilename
+
+mv $M0/test-15/$src0_file $M0/test-15/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-15" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-15/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-15" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-15/$dst0_file $M0/test-15/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-15/$src_file $M0/test-15/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-15/$dst_file 2>/dev/null
+TEST file_existence_check test-15/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-15/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-15/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-15/$dst_file
+
+
+
+################### Test 16 ####################################
+
+# src_hash != src_cached
+# dst_hash = src_cached
+# dst_cached = src_hash
+
+echo " **** Test 16 **** "
+
+TEST mkdir $M0/test-16
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-16" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-16/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-16" "src-"
+src_file=$dstfilename
+
+mv $M0/test-16/$src0_file $M0/test-16/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-16" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-16/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-16" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-16/$dst0_file $M0/test-16/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-16/$src_file $M0/test-16/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-16/$dst_file 2>/dev/null
+TEST file_existence_check test-16/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-16/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-16/$dst_file
+
+
+################### Test 17 ####################################
+
+# src_hash != src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash != src_cached
+# dst_cached = src_hash
+
+
+echo " **** Test 17 **** "
+
+TEST mkdir $M0/test-17
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-17" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-17/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-17" "src-"
+src_file=$dstfilename
+
+mv $M0/test-17/$src0_file $M0/test-17/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-17" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-17/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-17" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-17/$dst0_file $M0/test-17/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-17/$src_file $M0/test-17/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-17/$dst_file 2>/dev/null
+TEST file_existence_check test-17/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-17/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-17/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-17/$dst_file
+
+
+################### Test 18 ####################################
+
+# src_hash != src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash != src_cached != dst_cached
+
+
+echo " **** Test 18 **** "
+
+TEST mkdir $M0/test-18
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-18" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-18/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-18" "src-"
+src_file=$dstfilename
+
+mv $M0/test-18/$src0_file $M0/test-18/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-18" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-18/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-3" "$M0/test-18" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-18/$dst0_file $M0/test-18/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-18/$src_file $M0/test-18/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-3")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-18/$dst_file 2>/dev/null
+TEST file_existence_check test-18/$dst_file "$V0-client-3" "$V0-client-0"
+TEST file_does_not_exist test-18/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-18/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-18/$dst_file
+
+
+# Cleanup
+cleanup
+
diff --git a/tests/basic/distribute/rebal-all-nodes-migrate.t b/tests/basic/distribute/rebal-all-nodes-migrate.t
deleted file mode 100644
index acc4ffefecc..00000000000
--- a/tests/basic/distribute/rebal-all-nodes-migrate.t
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../dht.rc
-
-
-# Check if every single rebalance process migrated some files
-
-function cluster_rebal_all_nodes_migrated_files {
- val=0
- a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}');
-# echo $a
- b=($a)
- for i in "${b[@]}"
- do
-# echo "$i";
- if [ "$i" -eq "0" ]; then
- echo "false";
- val=1;
- fi
- done
- echo $val
-}
-
-cleanup
-
-TEST launch_cluster 3;
-TEST $CLI_1 peer probe $H2;
-TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
-
-
-#Start with a pure distribute volume (multiple bricks on the same node)
-TEST $CLI_1 volume create $V0 $H1:$B1/dist1 $H1:$B1/dist2 $H2:$B2/dist3 $H2:$B2/dist4
-
-TEST $CLI_1 volume start $V0
-$CLI_1 volume info $V0
-
-#TEST $CLI_1 volume set $V0 client-log-level DEBUG
-
-## Mount FUSE
-TEST glusterfs -s $H1 --volfile-id $V0 $M0;
-
-TEST mkdir $M0/dir1 2>/dev/null;
-TEST touch $M0/dir1/file-{1..500}
-
-## Add-brick and run rebalance to force file migration
-TEST $CLI_1 volume add-brick $V0 $H1:$B1/dist5 $H2:$B2/dist6
-
-#Start a rebalance
-TEST $CLI_1 volume rebalance $V0 start force
-
-#volume rebalance status should work
-#TEST $CLI_1 volume rebalance $V0 status
-#$CLI_1 volume rebalance $V0 status
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
-EXPECT "0" cluster_rebal_all_nodes_migrated_files
-$CLI_1 volume rebalance $V0 status
-
-
-TEST umount -f $M0
-TEST $CLI_1 volume stop $V0
-TEST $CLI_1 volume delete $V0
-
-
-##############################################################
-
-# Next, a dist-rep volume
-TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/drep1 $H2:$B2/drep1 $H1:$B1/drep2 $H2:$B2/drep2
-
-TEST $CLI_1 volume start $V0
-$CLI_1 volume info $V0
-
-#TEST $CLI_1 volume set $V0 client-log-level DEBUG
-
-## Mount FUSE
-TEST glusterfs -s $H1 --volfile-id $V0 $M0;
-
-TEST mkdir $M0/dir1 2>/dev/null;
-TEST touch $M0/dir1/file-{1..500}
-
-## Add-brick and run rebalance to force file migration
-TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/drep3 $H2:$B2/drep3
-
-#Start a rebalance
-TEST $CLI_1 volume rebalance $V0 start force
-
-#volume rebalance status should work
-#TEST $CLI_1 volume rebalance $V0 status
-#$CLI_1 volume rebalance $V0 status
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
-#EXPECT "0" cluster_rebal_all_nodes_migrated_files
-$CLI_1 volume rebalance $V0 status
-
-
-TEST umount -f $M0
-TEST $CLI_1 volume stop $V0
-TEST $CLI_1 volume delete $V0
-
-##############################################################
-
-# Next, a disperse volume
-TEST $CLI_1 volume create $V0 disperse 3 $H1:$B1/ec1 $H2:$B1/ec2 $H3:$B1/ec3 force
-
-TEST $CLI_1 volume start $V0
-$CLI_1 volume info $V0
-
-#TEST $CLI_1 volume set $V0 client-log-level DEBUG
-
-## Mount FUSE
-TEST glusterfs -s $H1 --volfile-id $V0 $M0;
-
-TEST mkdir $M0/dir1 2>/dev/null;
-TEST touch $M0/dir1/file-{1..500}
-
-## Add-brick and run rebalance to force file migration
-TEST $CLI_1 volume add-brick $V0 $H1:$B2/ec4 $H2:$B2/ec5 $H3:$B2/ec6
-
-#Start a rebalance
-TEST $CLI_1 volume rebalance $V0 start force
-
-#volume rebalance status should work
-#TEST $CLI_1 volume rebalance $V0 status
-#$CLI_1 volume rebalance $V0 status
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
-
-# this will not work unless EC is changed to return all node-uuids
-# comment this out once that patch is ready
-#EXPECT "0" cluster_rebal_all_nodes_migrated_files
-$CLI_1 volume rebalance $V0 status
-
-
-TEST umount -f $M0
-TEST $CLI_1 volume stop $V0
-TEST $CLI_1 volume delete $V0
-
-##############################################################
-
-cleanup
-#G_TESTDEF_TEST_STATUS_NETBSD7=1501388
diff --git a/tests/basic/distribute/spare_file_rebalance.t b/tests/basic/distribute/spare_file_rebalance.t
new file mode 100644
index 00000000000..061c02f7392
--- /dev/null
+++ b/tests/basic/distribute/spare_file_rebalance.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#------------------------------------------------------------
+
+# Test case - Create sparse files on MP and verify
+# file info after rebalance
+#------------------------------------------------------------
+
+# Create some sparse files and get their size
+TEST cd $M0;
+dd if=/dev/urandom of=sparse_file bs=10k count=1 seek=2M
+cp --sparse=always sparse_file sparse_file_3;
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3;
+
+# Trigger rebalance
+TEST $CLI volume rebalance $V0 start force;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed;
+
+# Compare original and rebalanced files
+TEST cd $B0/${V0}2
+TEST cmp sparse_file $B0/${V0}3/sparse_file_3
+EXPECT_WITHIN 30 "";
+
+cleanup;
diff --git a/tests/basic/ec/ec-1468261.t b/tests/basic/ec/ec-1468261.t
index 0273102951e..77d704cf880 100644
--- a/tests/basic/ec/ec-1468261.t
+++ b/tests/basic/ec/ec-1468261.t
@@ -16,6 +16,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume set $V0 disperse.optimistic-change-log on
+TEST $CLI volume set $V0 disperse.other-eager-lock on
TEST $CLI volume start $V0
#Mount the volume
@@ -33,11 +34,12 @@ EXPECT_WITHIN $IO_WAIT_TIMEOUT "^$" get_hex_xattr trusted.ec.dirty $B0/${V0}3/te
EXPECT_WITHIN $IO_WAIT_TIMEOUT "^$" get_hex_xattr trusted.ec.dirty $B0/${V0}4/test_dir
EXPECT_WITHIN $IO_WAIT_TIMEOUT "^$" get_hex_xattr trusted.ec.dirty $B0/${V0}5/test_dir
-#Touch a file and kill two bricks
-TEST touch $M0/test_dir/new_file
+#Kill two bricks and touch a file
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill_brick $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+TEST touch $M0/test_dir/new_file
+sleep 2
#Dirty should be set on up bricks
EXPECT_WITHIN $IO_WAIT_TIMEOUT "^00000000000000010000000000000001$" get_hex_xattr trusted.ec.dirty $B0/${V0}2/test_dir
@@ -57,15 +59,13 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
#Create a tar file
-TEST mkdir $M0/test_dir
-for i in {1..3000};do
-dd if=/dev/urandom of=$M0/test_dir/file-$i bs=1k count=10;
-done
-tar -cf $M0/test_dir.tar $M0/test_dir/ 2>/dev/null
-rm -rf $M0/test_dir/
+TEST mkdir /tmp/test_dir
+seq 1 3000 | xargs -n 1 -P 20 -I {} dd if=/dev/urandom of=/tmp/test_dir/file-{} bs=10K count=1
+tar -cf /tmp/test_dir.tar /tmp/test_dir/ 2>/dev/null
+rm -rf /tmp/test_dir/
#Untar the tar file
-tar -C $M0 -xf $M0/test_dir.tar 2>/dev/null&
+tar -C $M0 -xf /tmp/test_dir.tar 2>/dev/null&
#Kill 1st and 2nd brick
TEST kill_brick $V0 $H0 $B0/${V0}0
@@ -74,6 +74,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
#Stop untaring
TEST kill %1
+rm -f /tmp/test_dir.tar
#Bring up the down bricks
TEST $CLI volume start $V0 force
diff --git a/tests/basic/ec/ec-badfd.c b/tests/basic/ec/ec-badfd.c
new file mode 100644
index 00000000000..8be23c10eaf
--- /dev/null
+++ b/tests/basic/ec/ec-badfd.c
@@ -0,0 +1,124 @@
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+fill_iov(struct iovec *iov, char fillchar, int count)
+{
+ int ret = -1;
+
+ iov->iov_base = malloc(count + 1);
+ if (iov->iov_base == NULL) {
+ return ret;
+ } else {
+ iov->iov_len = count;
+ ret = 0;
+ }
+ memset(iov->iov_base, fillchar, count);
+ memset(iov->iov_base + count, '\0', 1);
+
+ return ret;
+}
+
+int
+write_sync(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ fprintf(stderr, "failed to create iov");
+ goto out;
+ }
+
+ ret = glfs_pwritev(glfd, &iov, 1, 0, flags);
+out:
+ if (ret < 0) {
+ fprintf(stderr, "glfs_pwritev failed, %d", errno);
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ int ret = 1;
+ char volume_cmd[4096] = {0};
+
+ if (argc != 4) {
+ fprintf(stderr, "Syntax: %s <host> <volname> <file>\n", argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_set_logging(fs, "/tmp/ec-badfd.log", 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = write_sync(fs, fd, 16);
+ if (ret < 0) {
+ fprintf(stderr, "write_sync failed\n");
+ }
+
+ snprintf(volume_cmd, sizeof(volume_cmd),
+ "gluster --mode=script volume stop %s", argv[2]);
+ /*Stop the volume so that update-size-version fails*/
+ system(volume_cmd);
+ sleep(8); /* 3 seconds more than eager-lock-timeout*/
+ snprintf(volume_cmd, sizeof(volume_cmd),
+ "gluster --mode=script volume start %s", argv[2]);
+ system(volume_cmd);
+ sleep(8); /*wait for bricks to come up*/
+ ret = glfs_fsync(fd, NULL, NULL);
+ if (ret == 0) {
+ fprintf(stderr, "fsync succeeded on a BADFD\n");
+ exit(1);
+ }
+
+ ret = glfs_close(fd);
+ if (ret == 0) {
+ fprintf(stderr, "flush succeeded on a BADFD\n");
+ exit(1);
+ }
+ ret = 0;
+
+out:
+ unlink("/tmp/ec-badfd.log");
+ glfs_fini(fs);
+
+ return ret;
+}
diff --git a/tests/basic/ec/ec-badfd.t b/tests/basic/ec/ec-badfd.t
new file mode 100755
index 00000000000..56feb47f115
--- /dev/null
+++ b/tests/basic/ec/ec-badfd.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{1..6}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 5
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+TEST touch $M0/file
+
+TEST build_tester $(dirname $0)/ec-badfd.c -lgfapi -Wall -O2
+TEST $(dirname $0)/ec-badfd $H0 $V0 /file
+cleanup_tester $(dirname ${0})/ec-badfd
+
+cleanup;
diff --git a/tests/basic/ec/ec-cpu-extensions.t b/tests/basic/ec/ec-cpu-extensions.t
index a599a316925..c9af27ea234 100644
--- a/tests/basic/ec/ec-cpu-extensions.t
+++ b/tests/basic/ec/ec-cpu-extensions.t
@@ -1,6 +1,6 @@
#!/bin/bash
-DISPERSE=6
+DISPERSE=18
REDUNDANCY=2
. $(dirname $0)/../../include.rc
@@ -39,6 +39,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 redundancy $REDUNDANCY $H0:$B0/${V0}{1..$DISPERSE}
TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume set $V0 disperse.read-policy round-robin
EXPECT 'Created' volinfo_field $V0 'Status'
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'
diff --git a/tests/basic/ec/ec-dirty-flags.t b/tests/basic/ec/ec-dirty-flags.t
new file mode 100644
index 00000000000..68e66103f08
--- /dev/null
+++ b/tests/basic/ec/ec-dirty-flags.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This checks if the fop keeps the dirty flags settings correctly after
+# finishing the fop.
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+cd $M0
+for i in {1..1000}; do dd if=/dev/zero of=file-${i} bs=512k count=2; done
+cd -
+EXPECT "^0$" get_pending_heal_count $V0
+
+cleanup
diff --git a/tests/basic/ec/ec-fast-fgetxattr.c b/tests/basic/ec/ec-fast-fgetxattr.c
index 7fb1aad12c7..bf982151861 100644
--- a/tests/basic/ec/ec-fast-fgetxattr.c
+++ b/tests/basic/ec/ec-fast-fgetxattr.c
@@ -30,7 +30,8 @@ fill_iov(struct iovec *iov, char fillchar, int count)
}
void
-write_async_cbk(glfs_fd_t *fd, ssize_t ret, void *cookie)
+write_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
{
if (ret < 0) {
fprintf(stderr, "glfs_write failed");
diff --git a/tests/basic/ec/ec-fix-openfd.t b/tests/basic/ec/ec-fix-openfd.t
index c32f9332137..04fdd802c62 100644
--- a/tests/basic/ec/ec-fix-openfd.t
+++ b/tests/basic/ec/ec-fix-openfd.t
@@ -37,6 +37,8 @@ TEST fd_open $fd 'rw' "$M0/test_file"
TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+sleep 1
+
#Test the fd count
EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 test_file
EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}1 test_file
diff --git a/tests/basic/ec/ec-quorum-count.t b/tests/basic/ec/ec-quorum-count.t
new file mode 100644
index 00000000000..9310ebbb8f2
--- /dev/null
+++ b/tests/basic/ec/ec-quorum-count.t
@@ -0,0 +1,167 @@
+ #!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../ec.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5}
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 5
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 disperse.heal-wait-qlength 0
+
+#Should fail on non-disperse volume
+TEST ! $CLI volume set $V1 disperse.quorum-count 5
+
+#Should succeed on a valid range
+TEST ! $CLI volume set $V0 disperse.quorum-count 0
+TEST ! $CLI volume set $V0 disperse.quorum-count -0
+TEST ! $CLI volume set $V0 disperse.quorum-count abc
+TEST ! $CLI volume set $V0 disperse.quorum-count 10abc
+TEST ! $CLI volume set $V0 disperse.quorum-count 1
+TEST ! $CLI volume set $V0 disperse.quorum-count 2
+TEST ! $CLI volume set $V0 disperse.quorum-count 3
+TEST $CLI volume set $V0 disperse.quorum-count 4
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+#Test that the option is reflected in the mount
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^4$" ec_option_value $V0 $M0 0 quorum-count
+TEST $CLI volume reset $V0 disperse.quorum-count
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count
+TEST $CLI volume set $V0 disperse.quorum-count 6
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^6$" ec_option_value $V0 $M0 0 quorum-count
+
+TEST touch $M0/a
+TEST touch $M0/data
+TEST setfattr -n trusted.def -v def $M0/a
+TEST touch $M0/src
+TEST touch $M0/del-me
+TEST mkdir $M0/dir1
+TEST dd if=/dev/zero of=$M0/read-file bs=1M count=1 oflag=direct
+TEST dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct
+TEST gf_rm_file_and_gfid_link $B0/${V0}0 del-file
+#modify operations should fail as the file is not in quorum
+TEST ! dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct
+TEST kill_brick $V0 $H0 $B0/${V0}0
+#Read should succeed even when quorum-count is not met
+TEST dd if=$M0/read-file of=/dev/null iflag=direct
+TEST ! touch $M0/a2
+TEST ! mkdir $M0/dir2
+TEST ! mknod $M0/b2 b 4 5
+TEST ! ln -s $M0/a $M0/symlink
+TEST ! ln $M0/a $M0/link
+TEST ! mv $M0/src $M0/dst
+TEST ! rm -f $M0/del-me
+TEST ! rmdir $M0/dir1
+TEST ! dd if=/dev/zero of=$M0/a bs=1M count=1 conv=notrunc
+TEST ! dd if=/dev/zero of=$M0/data bs=1M count=1 conv=notrunc
+TEST ! truncate -s 0 $M0/a
+TEST ! setfattr -n trusted.abc -v abc $M0/a
+TEST ! setfattr -x trusted.def $M0/a
+TEST ! chmod +x $M0/a
+TEST ! fallocate -l 2m -n $M0/a
+TEST ! fallocate -p -l 512k $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+# reset the option and check whether the default redundancy count is
+# accepted or not.
+TEST $CLI volume reset $V0 disperse.quorum-count
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count
+TEST touch $M0/a1
+TEST touch $M0/data1
+TEST setfattr -n trusted.def -v def $M0/a1
+TEST touch $M0/src1
+TEST touch $M0/del-me1
+TEST mkdir $M0/dir11
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/a21
+TEST mkdir $M0/dir21
+TEST mknod $M0/b21 b 4 5
+TEST ln -s $M0/a1 $M0/symlink1
+TEST ln $M0/a1 $M0/link1
+TEST mv $M0/src1 $M0/dst1
+TEST rm -f $M0/del-me1
+TEST rmdir $M0/dir11
+TEST dd if=/dev/zero of=$M0/a1 bs=1M count=1 conv=notrunc
+TEST dd if=/dev/zero of=$M0/data1 bs=1M count=1 conv=notrunc
+TEST truncate -s 0 $M0/a1
+TEST setfattr -n trusted.abc -v abc $M0/a1
+TEST setfattr -x trusted.def $M0/a1
+TEST chmod +x $M0/a1
+TEST fallocate -l 2m -n $M0/a1
+TEST fallocate -p -l 512k $M0/a1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST touch $M0/a2
+TEST touch $M0/data2
+TEST setfattr -n trusted.def -v def $M0/a1
+TEST touch $M0/src2
+TEST touch $M0/del-me2
+TEST mkdir $M0/dir12
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST ! touch $M0/a22
+TEST ! mkdir $M0/dir22
+TEST ! mknod $M0/b22 b 4 5
+TEST ! ln -s $M0/a2 $M0/symlink2
+TEST ! ln $M0/a2 $M0/link2
+TEST ! mv $M0/src2 $M0/dst2
+TEST ! rm -f $M0/del-me2
+TEST ! rmdir $M0/dir12
+TEST ! dd if=/dev/zero of=$M0/a2 bs=1M count=1 conv=notrunc
+TEST ! dd if=/dev/zero of=$M0/data2 bs=1M count=1 conv=notrunc
+TEST ! truncate -s 0 $M0/a2
+TEST ! setfattr -n trusted.abc -v abc $M0/a2
+TEST ! setfattr -x trusted.def $M0/a2
+TEST ! chmod +x $M0/a2
+TEST ! fallocate -l 2m -n $M0/a2
+TEST ! fallocate -p -l 512k $M0/a2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+# Set quorum-count to 5 and kill 1 brick and the fops should pass
+TEST $CLI volume set $V0 disperse.quorum-count 5
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^5$" ec_option_value $V0 $M0 0 quorum-count
+TEST touch $M0/a3
+TEST touch $M0/data3
+TEST setfattr -n trusted.def -v def $M0/a3
+TEST touch $M0/src3
+TEST touch $M0/del-me3
+TEST mkdir $M0/dir13
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/a31
+TEST mkdir $M0/dir31
+TEST mknod $M0/b31 b 4 5
+TEST ln -s $M0/a3 $M0/symlink3
+TEST ln $M0/a3 $M0/link3
+TEST mv $M0/src3 $M0/dst3
+TEST rm -f $M0/del-me3
+TEST rmdir $M0/dir13
+TEST dd if=/dev/zero of=$M0/a3 bs=1M count=1 conv=notrunc
+TEST dd if=/dev/zero of=$M0/data3 bs=1M count=1 conv=notrunc
+TEST truncate -s 0 $M0/a3
+TEST setfattr -n trusted.abc -v abc $M0/a3
+TEST setfattr -x trusted.def $M0/a3
+TEST chmod +x $M0/a3
+TEST fallocate -l 2m -n $M0/a3
+TEST fallocate -p -l 512k $M0/a3
+TEST dd if=/dev/urandom of=$M0/heal-file bs=1M count=1 oflag=direct
+cksum_before_heal="$(md5sum $M0/heal-file | awk '{print $1}')"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick $V0 $H0 $B0/${V0}5
+cksum_after_heal=$(dd if=$M0/heal-file iflag=direct | md5sum | awk '{print $1}')
+TEST [[ $cksum_before_heal == $cksum_after_heal ]]
+cleanup;
diff --git a/tests/basic/ec/ec-read-mask.t b/tests/basic/ec/ec-read-mask.t
new file mode 100644
index 00000000000..ddb556f2973
--- /dev/null
+++ b/tests/basic/ec/ec-read-mask.t
@@ -0,0 +1,114 @@
+ #!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../ec.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Empty read-mask should fail
+TEST ! $GFS --xlator-option=*.ec-read-mask="" -s $H0 --volfile-id $V0 $M0
+
+#Less than 4 number of bricks should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option=*.ec-read-mask="0:1:2" -s $H0 --volfile-id $V0 $M0
+
+#ids greater than 5 should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:6" -s $H0 --volfile-id $V0 $M0
+
+#ids less than 0 should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:-1:2:5" -s $H0 --volfile-id $V0 $M0
+
+#read-mask with non-alphabet or comma should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:5:abc" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:5a" -s $H0 --volfile-id $V0 $M0
+
+#mount with at least 4 read-mask-ids and all of them valid should pass
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5:4:3" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^111111$" ec_option_value $V0 $M0 0 read-mask
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+
+TEST dd if=/dev/urandom of=$M0/a bs=1M count=1
+md5=$(md5sum $M0/a | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Read on the file should fail if any of the read-mask is down when number of
+#ids is data-count
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}5
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+#Read on file should succeed when non-read-mask bricks are down
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}3
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+#Deliberately corrupt chunks 3: 4 and check that reads still give correct data
+TEST dd if=/dev/zero of=$B0/${V0}3/a bs=256k count=1
+TEST dd if=/dev/zero of=$B0/${V0}4/a bs=256k count=1
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup;
diff --git a/tests/basic/ec/ec-reset-brick.t b/tests/basic/ec/ec-reset-brick.t
new file mode 100644
index 00000000000..f1a625df4ff
--- /dev/null
+++ b/tests/basic/ec/ec-reset-brick.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+function num_entries {
+ ls -l $1 | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+mkdir $M0/dir
+touch $M0/dir/{1..10}
+
+mkdir $M0/dir/dir1
+touch $M0/dir/dir1/{1..10}
+
+#kill brick process
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}5 start
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "5" ec_child_up_count $V0 0
+
+#reset-brick by removing all the data and create dir again
+rm -rf $B0/${V0}5
+mkdir $B0/${V0}5
+
+#start brick process and heal by commiting reset-brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}5 $H0:$B0/${V0}5 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count_shd $V0 0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+EXPECT "^12$" num_entries $B0/${V0}5/dir
+EXPECT "^11$" num_entries $B0/${V0}5/dir/dir1
+
+ec_version=$(get_hex_xattr trusted.ec.version $B0/${V0}0)
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}1
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}2
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}3
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}4
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}5
+
+cleanup;
diff --git a/tests/basic/ec/ec-root-heal.t b/tests/basic/ec/ec-root-heal.t
index a133885ef1d..11ea7cdf9d4 100644
--- a/tests/basic/ec/ec-root-heal.t
+++ b/tests/basic/ec/ec-root-heal.t
@@ -22,7 +22,8 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count_shd $V0 0
# active heal
TEST $CLI volume heal $V0 full
#ls -l gives "Total" line so number of lines will be 1 more
-EXPECT_WITHIN $HEAL_TIMEOUT "^11$" num_entries $B0/${V0}6
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+EXPECT "^11$" num_entries $B0/${V0}6
ec_version=$(get_hex_xattr trusted.ec.version $B0/${V0}0)
EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}1
EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}2
diff --git a/tests/basic/ec/ec-seek.t b/tests/basic/ec/ec-seek.t
index 6a0060870c8..5a7d31b9f8f 100644
--- a/tests/basic/ec/ec-seek.t
+++ b/tests/basic/ec/ec-seek.t
@@ -6,7 +6,7 @@
cleanup
SEEK=$(dirname $0)/seek
-build_tester $(dirname $0)/seek.c -o ${SEEK}
+build_tester $(dirname $0)/../seek.c -o ${SEEK}
TEST glusterd
TEST pidof glusterd
@@ -51,6 +51,7 @@ EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5
EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 512))
EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 6))
+rm -f ${SEEK}
cleanup
# Centos6 regression slaves seem to not support SEEK_DATA/SEEK_HOLE
diff --git a/tests/basic/ec/ec-stripe.t b/tests/basic/ec/ec-stripe.t
index 1e940eba81b..98b92294feb 100644
--- a/tests/basic/ec/ec-stripe.t
+++ b/tests/basic/ec/ec-stripe.t
@@ -202,7 +202,7 @@ TEST truncate -s 0 $B0/test_file
TEST truncate -s 0 $M0/test_file
TEST dd if=$B0/misc_file of=$B0/test_file bs=1022 count=5 oflag=seek_bytes,sync seek=400 conv=notrunc
TEST dd if=$B0/misc_file of=$M0/test_file bs=1022 count=5 oflag=seek_bytes,sync seek=400 conv=notrunc
-check_statedump_md5sum 4 5
+check_statedump_md5sum 4 4
clean_file_unmount
### 14 - Truncate to invalidate all but one the stripe in cache ####
diff --git a/tests/basic/ec/gfapi-ec-open-truncate.c b/tests/basic/ec/gfapi-ec-open-truncate.c
new file mode 100644
index 00000000000..fb16807003a
--- /dev/null
+++ b/tests/basic/ec/gfapi-ec-open-truncate.c
@@ -0,0 +1,171 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+int
+fill_iov(struct iovec *iov, char fillchar, int count)
+{
+ int ret = -1;
+
+ iov->iov_base = calloc(count + 1, sizeof(fillchar));
+ if (iov->iov_base == NULL) {
+ return ret;
+ } else {
+ iov->iov_len = count;
+ ret = 0;
+ }
+ memset(iov->iov_base, fillchar, count);
+ memset(iov->iov_base + count, '\0', 1);
+
+ return ret;
+}
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+main(int argc, char *argv[])
+{
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ int ret = 0;
+ int i = 0;
+ int count = 200;
+ struct iovec iov = {0};
+ int flags = O_RDWR;
+ int bricksup = 0;
+ int fdopen = 0;
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ /* Brick is down and we are opening a file to trigger fd heal. */
+ /* Bypass Write-behind */
+ glfd = glfs_open(fs, "a", O_WRONLY | O_TRUNC | O_SYNC);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_open_truncate failed");
+ exit(1);
+ }
+ system("gluster --mode=script volume start patchy force");
+ /*CHILD_UP_TIMEOUT is 20 seconds*/
+ for (i = 0; i < 20; i++) {
+ ret = system(
+ "[ $(gluster --mode=script volume status patchy | "
+ "grep \" Y \" | awk '{print $(NF-1)}' | wc -l) == 3 ]");
+ if (WIFEXITED(ret) && WEXITSTATUS(ret)) {
+ printf("Ret value of system: %d\n, ifexited: %d, exitstatus: %d",
+ ret, WIFEXITED(ret), WEXITSTATUS(ret));
+ sleep(1);
+ continue;
+ }
+ printf("Number of loops: %d\n", i);
+ bricksup = 1;
+ break;
+ }
+ if (!bricksup) {
+ system("gluster --mode=script volume status patchy");
+ LOG_ERR("Bricks didn't come up\n");
+ exit(1);
+ }
+
+ /*Not sure how to check that the child-up reached EC, so sleep 3 for now*/
+ sleep(3);
+ ret = fill_iov(&iov, 'a', 200);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ exit(1);
+ }
+
+ /*write will trigger re-open*/
+ ret = glfs_pwritev(glfd, &iov, 1, 0, flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_test_function failed");
+ exit(1);
+ }
+ /*Check reopen happened by checking for open-fds on the brick*/
+ for (i = 0; i < 20; i++) {
+ ret = system(
+ "[ $(for i in $(pgrep glusterfsd); do ls -l /proc/$i/fd | grep "
+ "\"[.]glusterfs\" | grep -v \".glusterfs/[0-9a-f][0-9a-f]\" | grep "
+ "-v health_check; done | wc -l) == 3 ]");
+ if (WIFEXITED(ret) && WEXITSTATUS(ret)) {
+ printf("Ret value of system: %d\n, ifexited: %d, exitstatus: %d",
+ ret, WIFEXITED(ret), WEXITSTATUS(ret));
+ sleep(1);
+ continue;
+ }
+ fdopen = 1;
+ break;
+ }
+
+ if (!fdopen) {
+ LOG_ERR("fd reopen didn't succeed");
+ exit(1);
+ }
+
+ return 0;
+}
diff --git a/tests/basic/ec/gfapi-ec-open-truncate.t b/tests/basic/ec/gfapi-ec-open-truncate.t
new file mode 100644
index 00000000000..e22562c6ea3
--- /dev/null
+++ b/tests/basic/ec/gfapi-ec-open-truncate.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This .t tests the functionality of open-fd-heal when opened with O_TRUNC.
+#If re-open is not done with O_TRUNC then the test will pass.
+
+cleanup
+
+TEST glusterd
+
+TEST $CLI volume create $V0 disperse 3 ${H0}:$B0/brick{1,2,3}
+EXPECT 'Created' volinfo_field $V0 'Status'
+#Disable heals to prevent any chance of heals masking the problem
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 disperse.heal-wait-qlength 0
+TEST $CLI volume set $V0 performance.write-behind off
+
+#We need truncate fop to go through before pre-op completes for the write-fop
+#which triggers open-fd heal. Otherwise truncate won't be allowed on 'bad' brick
+TEST $CLI volume set $V0 delay-gen posix
+TEST $CLI volume set $V0 delay-gen.enable fxattrop
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.delay-duration 1000000
+
+TEST $CLI volume heal $V0 disable
+
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+TEST touch $M0/a
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST kill_brick $V0 $H0 $B0/brick1
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-ec-open-truncate.c -lgfapi
+
+TEST $CLI volume profile $V0 info clear
+TEST ./$(dirname $0)/gfapi-ec-open-truncate ${H0} $V0 $logdir/gfapi-ec-open-truncate.log
+
+EXPECT "^2$" echo $($CLI volume profile $V0 info incremental | grep -i truncate | wc -l)
+cleanup_tester $(dirname $0)/gfapi-ec-open-truncate
+
+cleanup
diff --git a/tests/basic/ec/nfs.t b/tests/basic/ec/nfs.t
index f0bdff93d5f..3f51a640ef7 100755
--- a/tests/basic/ec/nfs.t
+++ b/tests/basic/ec/nfs.t
@@ -3,6 +3,8 @@
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup
TEST glusterd
diff --git a/tests/basic/ec/self-heal-read-write-fail.t b/tests/basic/ec/self-heal-read-write-fail.t
new file mode 100644
index 00000000000..0ba591b5bb2
--- /dev/null
+++ b/tests/basic/ec/self-heal-read-write-fail.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This test verifies that self-heal fails when read/write fails as part of heal
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+TEST touch $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+echo abc >> $M0/a
+
+# Umount the volume to force all pending writes to reach the bricks
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Load error-gen and fail read fop and test that heal fails
+TEST $CLI volume stop $V0 #Stop volume so that error-gen can be loaded
+TEST $CLI volume set $V0 debug.error-gen posix
+TEST $CLI volume set $V0 debug.error-fops read
+TEST $CLI volume set $V0 debug.error-number EBADF
+TEST $CLI volume set $V0 debug.error-failure 100
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+TEST ! getfattr -n trusted.ec.heal $M0/a
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+#fail write fop and test that heal fails
+TEST $CLI volume stop $V0
+TEST $CLI volume set $V0 debug.error-fops write
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+TEST ! getfattr -n trusted.ec.heal $M0/a
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume stop $V0 #Stop volume so that error-gen can be disabled
+TEST $CLI volume reset $V0 debug.error-gen
+TEST $CLI volume reset $V0 debug.error-fops
+TEST $CLI volume reset $V0 debug.error-number
+TEST $CLI volume reset $V0 debug.error-failure
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+TEST getfattr -n trusted.ec.heal $M0/a
+EXPECT "^0$" get_pending_heal_count $V0
+
+#Test that heal worked as expected by forcing read from brick0
+#remount to make sure data is not served from any cache
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT "abc" cat $M0/a
+
+cleanup
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
index d217559db1a..6329bb60248 100644
--- a/tests/basic/ec/self-heal.t
+++ b/tests/basic/ec/self-heal.t
@@ -131,6 +131,8 @@ TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume set $V0 client-log-level DEBUG
#Write-behind has a bug where lookup can race over write which leads to size mismatch on the mount after a 'cp'
TEST $CLI volume set $V0 performance.write-behind off
+#md-cache can return stale stat due to default timeout being 1 sec
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT "Created" volinfo_field $V0 'Status'
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
diff --git a/tests/basic/fencing/afr-lock-heal-advanced.c b/tests/basic/fencing/afr-lock-heal-advanced.c
new file mode 100644
index 00000000000..e202ccd5b29
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-advanced.c
@@ -0,0 +1,227 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *logfile_fp;
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!logfile_fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(logfile_fp, "\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(logfile_fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_client(char *hostname, char *volname, char *log_file)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(logfile_fp, "\nglfs_new: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+glfs_fd_t *
+open_file(glfs_t *fs, char *fname)
+{
+ glfs_fd_t *fd = NULL;
+
+ fd = glfs_creat(fs, fname, O_CREAT, 0644);
+ if (!fd) {
+ LOG_ERR("glfs_creat", errno);
+ goto out;
+ }
+out:
+ return fd;
+}
+
+int
+acquire_mandatory_lock(glfs_t *fs, glfs_fd_t *fd)
+{
+ struct flock lock;
+ int ret = 0;
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ ret = -1;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int
+perform_test(glfs_t *fs, char *file1, char *file2)
+{
+ int ret = 0;
+ glfs_fd_t *fd1 = NULL;
+ glfs_fd_t *fd2 = NULL;
+ char *buf = "0123456789";
+
+ fd1 = open_file(fs, file1);
+ if (!fd1) {
+ ret = -1;
+ goto out;
+ }
+ fd2 = open_file(fs, file2);
+ if (!fd2) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Kill one brick from the .t.*/
+ pause();
+
+ ret = acquire_mandatory_lock(fs, fd1);
+ if (ret) {
+ goto out;
+ }
+ ret = acquire_mandatory_lock(fs, fd2);
+ if (ret) {
+ goto out;
+ }
+
+ /* Bring the brick up and let the locks heal. */
+ pause();
+ /*At this point, the .t would have killed and brought back 2 bricks, marking
+ * the fd bad.*/
+
+ ret = glfs_write(fd1, buf, 10, 0);
+ if (ret > 0) {
+ /* Write is supposed to fail with EBADFD*/
+ LOG_ERR("glfs_write", ret);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (fd1)
+ glfs_close(fd1);
+ if (fd2)
+ glfs_close(fd2);
+ return ret;
+}
+
+static void
+sigusr1_handler(int signo)
+{
+ /*Signal caught. Just continue with the execution.*/
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname1 = NULL;
+ char *fname2 = NULL;
+
+ if (argc != 7) {
+ fprintf(stderr,
+ "Expect following args %s <host> <volname> <file1> <file2> "
+ "<log file "
+ "location> <log_file_suffix>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ fname1 = argv[3];
+ fname2 = argv[4];
+
+ /*Use SIGUSR1 and pause()as a means of hitting break-points this program
+ *when signalled from the .t test case.*/
+ if (signal(SIGUSR1, sigusr1_handler) == SIG_ERR) {
+ LOG_ERR("SIGUSR1 handler error", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[5], "lock-heal.c", argv[6], "log");
+ logfile_fp = fopen(log_file, "w");
+ if (!logfile_fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[5], "glfs-client", argv[6], "log");
+ fs = setup_client(hostname, volname, log_file);
+ if (!fs) {
+ LOG_ERR("setup_client", errno);
+ return -1;
+ }
+
+ ret = perform_test(fs, fname1, fname2);
+
+error:
+ if (fs) {
+ /*glfs_fini(fs)*/; // glfs fini path is racy and crashes the program
+ }
+
+ fclose(logfile_fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/afr-lock-heal-advanced.t b/tests/basic/fencing/afr-lock-heal-advanced.t
new file mode 100644
index 00000000000..8a5b5989b5e
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-advanced.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+PROCESS_UP_TIMEOUT=90
+
+function is_gfapi_program_alive()
+{
+ pid=$1
+ ps -p $pid
+ if [ $? -eq 0 ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function get_active_lock_count {
+ brick=$1
+ i1=$2
+ i2=$3
+ pattern="ACTIVE.*client-${brick: -1}"
+
+ sdump=$(generate_brick_statedump $V0 $H0 $brick)
+ lock_count1="$(egrep "$i1" $sdump -A3| egrep "$pattern"|uniq|wc -l)"
+ lock_count2="$(egrep "$i2" $sdump -A3| egrep "$pattern"|uniq|wc -l)"
+ echo "$((lock_count1+lock_count2))"
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+TEST build_tester $(dirname $0)/afr-lock-heal-advanced.c -lgfapi -ggdb
+
+#------------------------------------------------------------------------------
+# Use more than 1 fd from same client so that list_for_each_* loops are executed more than once.
+$(dirname $0)/afr-lock-heal-advanced $H0 $V0 "/FILE1" "/FILE2" $logdir C1&
+client_pid=$!
+TEST [ $client_pid ]
+
+TEST sleep 5 # By now, the client would have opened an fd on FILE1 and FILE2 and waiting for a SIGUSR1.
+EXPECT "Y" is_gfapi_program_alive $client_pid
+
+gfid_str1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE1))
+inode1="FILE1|gfid:$gfid_str1"
+gfid_str2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE2))
+inode2="FILE2|gfid:$gfid_str2"
+
+# Kill brick-3 and let client-1 take lock on both files.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill -SIGUSR1 $client_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client_pid
+
+# Check lock is present on brick-1 and brick-2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}0 $inode1 $inode2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}1 $inode1 $inode2
+
+# Restart brick-3 and check that the lock has healed on it.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd.
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}2 $inode1 $inode2
+
+#------------------------------------------------------------------------------
+# Kill same brick before heal completes the first time and check it completes the second time.
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume reset $V0 delay-gen
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}0 $inode1 $inode2
+
+#------------------------------------------------------------------------------
+# Kill 2 bricks and bring it back. The fds must be marked bad.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+# TODO: `gluster v statedump $V0 client localhost:$client_pid` is not working,
+# so sleep for 20 seconds for the client to connect to connect to the bricks.
+TEST sleep $CHILD_UP_TIMEOUT
+
+# Try to write to FILE1 from the .c; it must fail.
+TEST kill -SIGUSR1 $client_pid
+wait $client_pid
+ret=$?
+TEST [ $ret == 0 ]
+
+cleanup_tester $(dirname $0)/afr-lock-heal-advanced
+cleanup;
diff --git a/tests/basic/fencing/afr-lock-heal-basic.c b/tests/basic/fencing/afr-lock-heal-basic.c
new file mode 100644
index 00000000000..768c9e57181
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-basic.c
@@ -0,0 +1,182 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *logfile_fp;
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!logfile_fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(logfile_fp, "\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(logfile_fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_client(char *hostname, char *volname, char *log_file)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(logfile_fp, "\nglfs_new: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+int
+acquire_mandatory_lock(glfs_t *fs, char *fname)
+{
+ struct flock lock;
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+
+ fd = glfs_creat(fs, fname, O_CREAT, 0644);
+ if (!fd) {
+ if (errno != EEXIST) {
+ LOG_ERR("glfs_creat", errno);
+ ret = -1;
+ goto out;
+ }
+ fd = glfs_open(fs, fname, O_RDWR | O_NONBLOCK);
+ if (!fd) {
+ LOG_ERR("glfs_open", errno);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ pause();
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ goto out;
+ }
+
+ pause();
+
+out:
+ if (fd) {
+ glfs_close(fd);
+ }
+
+ return ret;
+}
+
+static void
+sigusr1_handler(int signo)
+{
+ /*Signal caught. Just continue with the execution.*/
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname = NULL;
+
+ if (argc != 6) {
+ fprintf(stderr,
+ "Expect following args %s <host> <volname> <file> <log file "
+ "location> <log_file_suffix>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ fname = argv[3];
+
+ /*Use SIGUSR1 and pause()as a means of hitting break-points this program
+ *when signalled from the .t test case.*/
+ if (signal(SIGUSR1, sigusr1_handler) == SIG_ERR) {
+ LOG_ERR("SIGUSR1 handler error", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[4], "lock-heal-basic.c", argv[5],
+ "log");
+ logfile_fp = fopen(log_file, "w");
+ if (!logfile_fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[4], "glfs-client", argv[5], "log");
+ fs = setup_client(hostname, volname, log_file);
+ if (!fs) {
+ LOG_ERR("setup_client", errno);
+ return -1;
+ }
+
+ ret = acquire_mandatory_lock(fs, fname);
+
+error:
+ if (fs) {
+ /*glfs_fini(fs)*/; // glfs fini path is racy and crashes the program
+ }
+
+ fclose(logfile_fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/afr-lock-heal-basic.t b/tests/basic/fencing/afr-lock-heal-basic.t
new file mode 100644
index 00000000000..69131af085d
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-basic.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function is_gfapi_program_alive()
+{
+ pid=$1
+ ps -p $pid
+ if [ $? -eq 0 ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+TEST build_tester $(dirname $0)/afr-lock-heal-basic.c -lgfapi -ggdb
+
+$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C1&
+client1_pid=$!
+TEST [ $client1_pid ]
+
+$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C2&
+client2_pid=$!
+TEST [ $client2_pid ]
+
+TEST sleep 5 # By now, the 2 clients would have opened an fd on FILE and waiting for a SIGUSR1.
+EXPECT "Y" is_gfapi_program_alive $client1_pid
+EXPECT "Y" is_gfapi_program_alive $client2_pid
+
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE))
+inode="FILE|gfid:$gfid_str"
+
+# Kill brick-3 and let client-1 take lock on the file.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill -SIGUSR1 $client1_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client1_pid
+
+# Check lock is present on brick-1 and brick-2
+b1_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}0)
+c1_lock_on_b1="$(egrep "$inode" $b1_sdump -A3| egrep 'ACTIVE.*client-0'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b2_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}1)
+c1_lock_on_b2="$(egrep "$inode" $b2_sdump -A3| egrep 'ACTIVE.*client-1'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b2" ]
+
+# Restart brick-3 and check that the lock has healed on it.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
+
+b3_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}2)
+c1_lock_on_b3="$(egrep "$inode" $b3_sdump -A3| egrep 'ACTIVE.*client-2'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b3" ]
+
+# Kill brick-1 and let client-2 preempt the lock on bricks 2 and 3.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill -SIGUSR1 $client2_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client2_pid
+
+# Restart brick-1 and let lock healing complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
+
+# Check that all bricks now have locks from client 2 only.
+b1_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}0)
+c2_lock_on_b1="$(egrep "$inode" $b1_sdump -A3| egrep 'ACTIVE.*client-0'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b2_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}1)
+c2_lock_on_b2="$(egrep "$inode" $b2_sdump -A3| egrep 'ACTIVE.*client-1'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b3_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}2)
+c2_lock_on_b3="$(egrep "$inode" $b3_sdump -A3| egrep 'ACTIVE.*client-2'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b2" ]
+TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b3" ]
+TEST [ "$c2_lock_on_b1" != "$c1_lock_on_b1" ]
+
+#Let the client programs run and exit.
+TEST kill -SIGUSR1 $client1_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client1_pid
+TEST kill -SIGUSR1 $client2_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client2_pid
+
+cleanup_tester $(dirname $0)/afr-lock-heal-basic
+cleanup;
diff --git a/tests/basic/fencing/fence-basic.c b/tests/basic/fencing/fence-basic.c
new file mode 100644
index 00000000000..4aa452e19b0
--- /dev/null
+++ b/tests/basic/fencing/fence-basic.c
@@ -0,0 +1,229 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_INIT 1
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *fp;
+char *buf = "0123456789";
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(fp, "\n%d %s : returned error (%s)\n", __LINE__, func, \
+ strerror(err)); \
+ fflush(fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_new_client(char *hostname, char *volname, char *log_file, int flag)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(fp, "\nglfs_new: returned NULL (%s)\n", strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(fp, "\nglfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ if (flag == NO_INIT)
+ goto out;
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+/* test plan
+ *
+ * - take mandatory lock from client 1
+ * - preempt mandatory lock from client 2
+ * - write from client 1 which should fail
+ */
+
+int
+test(glfs_t *fs1, glfs_t *fs2, char *fname)
+{
+ struct flock lock;
+ int ret = 0;
+ glfs_fd_t *fd1, *fd2 = NULL;
+
+ fd1 = glfs_creat(fs1, fname, O_RDWR, 0777);
+ if (ret) {
+ LOG_ERR("glfs_creat", errno);
+ ret = -1;
+ goto out;
+ }
+
+ fd2 = glfs_open(fs2, fname, O_RDWR | O_NONBLOCK);
+ if (ret) {
+ LOG_ERR("glfs_open", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd1, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd1, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ goto out;
+ }
+
+ ret = glfs_write(fd1, buf, 10, 0);
+ if (ret != 10) {
+ LOG_ERR("glfs_write", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* write should fail */
+ ret = glfs_write(fd2, buf, 10, 0);
+ if (ret != -1) {
+ LOG_ERR("glfs_write", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* preempt mandatory lock from client 1*/
+ ret = glfs_file_lock(fd2, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ goto out;
+ }
+
+ /* write should succeed from client 2 */
+ ret = glfs_write(fd2, buf, 10, 0);
+ if (ret == -1) {
+ LOG_ERR("glfs_write", errno);
+ goto out;
+ }
+
+ /* write should fail from client 1 */
+ ret = glfs_write(fd1, buf, 10, 0);
+ if (ret == 10) {
+ LOG_ERR("glfs_write", errno);
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd1) {
+ glfs_close(fd1);
+ }
+
+ if (fd2) {
+ glfs_close(fd2);
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs1 = NULL;
+ glfs_t *fs2 = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname = "/file";
+ glfs_fd_t *fd1 = NULL;
+ glfs_fd_t *fd2 = NULL;
+
+ if (argc != 4) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file location>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+
+ sprintf(log_file, "%s/%s", argv[3], "fence-basic.log");
+ fp = fopen(log_file, "w");
+ if (!fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s", argv[3], "glfs-client-1.log");
+ fs1 = setup_new_client(hostname, volname, log_file, 0);
+ if (!fs1) {
+ LOG_ERR("setup_new_client", errno);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s", argv[3], "glfs-client-2.log");
+ fs2 = setup_new_client(hostname, volname, log_file, 0);
+ if (!fs2) {
+ LOG_ERR("setup_new_client", errno);
+ ret = -1;
+ goto error;
+ }
+
+ ret = test(fs1, fs2, fname);
+
+error:
+ if (fs1) {
+ glfs_fini(fs1);
+ }
+
+ if (fs2) {
+ glfs_fini(fs2);
+ }
+
+ fclose(fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/fence-basic.t b/tests/basic/fencing/fence-basic.t
new file mode 100755
index 00000000000..30f379e7b20
--- /dev/null
+++ b/tests/basic/fencing/fence-basic.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 diagnostics.client-log-flush-timeout 30
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/fence-basic.c -lgfapi -ggdb
+TEST $(dirname $0)/fence-basic $H0 $V0 $logdir
+
+cleanup_tester $(dirname $0)/fence-basic
+
+cleanup; \ No newline at end of file
diff --git a/tests/basic/fencing/fencing-crash-conistency.t b/tests/basic/fencing/fencing-crash-conistency.t
new file mode 100644
index 00000000000..0c69411e90c
--- /dev/null
+++ b/tests/basic/fencing/fencing-crash-conistency.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# with lock enforcement flag write should fail with out lock
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST touch $M0/file
+
+#write should pass
+TEST "echo "test" > $M0/file"
+TEST "truncate -s 0 $M0/file"
+
+#enable mandatory locking
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+
+#write should pass
+TEST "echo "test" >> $M0/file"
+TEST "truncate -s 0 $M0/file"
+
+#enforce lock on the file
+TEST setfattr -n trusted.glusterfs.enforce-mandatory-lock -v 1 $M0/file
+
+#write should fail
+TEST ! "echo "test" >> $M0/file"
+TEST ! "truncate -s 0 $M0/file"
+
+#remove lock enforcement flag
+TEST setfattr -x trusted.glusterfs.enforce-mandatory-lock $M0/file
+
+#write should pass
+TEST "echo "test" >> $M0/file"
+TEST "truncate -s 0 $M0/file"
+
+#enforce lock on the file
+TEST setfattr -n trusted.glusterfs.enforce-mandatory-lock -v 1 $M0/file
+#kill brick
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+TEST $CLI volume start $V0 force
+
+# wait one second for the brick to come online
+sleep 2
+#write should fail (lock xlator gets lock enforcement info from disk)
+TEST ! "echo "test" >> $M0/file"
+TEST ! "truncate -s 0 $M0/file"
+
+cleanup; \ No newline at end of file
diff --git a/tests/basic/fencing/test-fence-option.t b/tests/basic/fencing/test-fence-option.t
new file mode 100644
index 00000000000..115cbe7dbdf
--- /dev/null
+++ b/tests/basic/fencing/test-fence-option.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# with lock enforcement flag write should fail with out lock
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST touch $M0/file
+
+#setfattr for mandatory-enforcement will fail
+TEST ! setfattr -n trusted.glusterfs.enforce-mandatory-lock -v 1 $M0/file
+
+#enable mandatory locking
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+
+#setfattr will fail
+TEST ! setfattr -n trusted.glusterfs.enforce-mandatory-lock -v 1 $M0/file
+
+#set lock-enforcement option
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+
+#setfattr should succeed
+TEST setfattr -n trusted.glusterfs.enforce-mandatory-lock -v 1 $M0/file
+
+cleanup; \ No newline at end of file
diff --git a/tests/basic/first-test.t b/tests/basic/first-test.t
deleted file mode 100755
index 535b269e6b3..00000000000
--- a/tests/basic/first-test.t
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-
-cat << EOF
-This test should run first for http://review.gluster.org/#/c/13439/ and should
-be removed once that patch has been merged.
-EOF
-
-TEST true
diff --git a/tests/basic/fops-sanity.c b/tests/basic/fops-sanity.c
index aff72d89ca1..ef00aa0f088 100644
--- a/tests/basic/fops-sanity.c
+++ b/tests/basic/fops-sanity.c
@@ -26,6 +26,7 @@
#include <errno.h>
#include <string.h>
#include <dirent.h>
+#include <sys/sysmacros.h>
#ifndef linux
#include <sys/socket.h>
diff --git a/tests/basic/fuse/active-io-graph-switch.t b/tests/basic/fuse/active-io-graph-switch.t
new file mode 100644
index 00000000000..6ec3e1fcbfa
--- /dev/null
+++ b/tests/basic/fuse/active-io-graph-switch.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+TESTS_EXPECTED_IN_LOOP=12
+
+function perform_io_on_mount {
+ local m="$1"
+ local f="$2"
+ local lockfile="$3"
+ while [ -f "$m/$lockfile" ];
+ do
+ dd if=/dev/zero of=$m/$f bs=1M count=1
+ done
+}
+
+function perform_graph_switch {
+ for i in {1..3}
+ do
+ TEST_IN_LOOP $CLI volume set $V0 performance.stat-prefetch off
+ sleep 3
+ TEST_IN_LOOP $CLI volume set $V0 performance.stat-prefetch on
+ sleep 3
+ done
+}
+
+function count_files {
+ ls $M0 | wc -l
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/lock
+for i in {1..100}; do perform_io_on_mount $M0 $i lock & done
+EXPECT_WITHIN 5 "101" count_files
+
+perform_graph_switch
+TEST rm -f $M0/lock
+wait
+EXPECT "100" count_files
+TEST rm -f $M0/{1..100}
+EXPECT "0" count_files
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Repeat the tests with reader-thread-count
+TEST $GFS --reader-thread-count=10 --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/lock
+for i in {1..100}; do perform_io_on_mount $M0 $i lock & done
+EXPECT_WITHIN 5 "101" count_files
+
+perform_graph_switch
+TEST rm -f $M0/lock
+wait
+EXPECT "100" count_files
+TEST rm -f $M0/{1..100}
+EXPECT "0" count_files
+
+cleanup
diff --git a/tests/basic/geo-replication/marker-xattrs.t b/tests/basic/geo-replication/marker-xattrs.t
index e5b26a6bd5b..7e5ea8eebec 100755
--- a/tests/basic/geo-replication/marker-xattrs.t
+++ b/tests/basic/geo-replication/marker-xattrs.t
@@ -7,7 +7,7 @@ TEST glusterd
TEST pidof glusterd
## Start and create a replicated volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1,2,3}
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2,3,4,5}
TEST $CLI volume set $V0 indexing on
@@ -78,34 +78,3 @@ TEST $CLI volume stop $V0;
TEST $CLI volume delete $V0;
cleanup
-TEST glusterd
-TEST pidof glusterd
-## Start and create a stripe volume
-TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}-{0,1}
-
-TEST $CLI volume set $V0 indexing on
-
-TEST $CLI volume start $V0;
-
-## Mount native
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
-
-## Mount client-pid=-1
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --client-pid=-1 $M1
-
-TEST touch $M0
-
-vol_uuid=$(get_volume_mark $M1)
-xtime=trusted.glusterfs.$vol_uuid.xtime
-
-TEST "getfattr -n $xtime $B0/${V0}-0 | grep -q ${xtime}="
-
-TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}="
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
-
-TEST $CLI volume stop $V0;
-TEST $CLI volume delete $V0;
-
-cleanup
diff --git a/tests/basic/gfapi/bug-1507896.c b/tests/basic/gfapi/bug-1507896.c
new file mode 100644
index 00000000000..1cc20849c2b
--- /dev/null
+++ b/tests/basic/gfapi/bug-1507896.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define VALIDATE_AND_GOTO_LABEL_ON_ERROR(func, ret, label) \
+ do { \
+ if (ret < 0) { \
+ fprintf(stderr, "%s : returned error %d (%s)\n", func, ret, \
+ strerror(errno)); \
+ goto label; \
+ } \
+ } while (0)
+
+int
+main(int argc, char *argv[])
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs)
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_new(fs)", ret, out);
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_volfile_server(fs)", ret, out);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_logging(fs)", ret, out);
+
+ ret = glfs_init(fs);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_init(fs)", ret, out);
+
+out:
+ if (fs) {
+ ret = glfs_fini(fs);
+ if (ret)
+ fprintf(stderr, "glfs_fini(fs) returned %d\n", ret);
+ }
+ return ret;
+}
diff --git a/tests/basic/gfapi/bug-1507896.t b/tests/basic/gfapi/bug-1507896.t
new file mode 100644
index 00000000000..4764e650232
--- /dev/null
+++ b/tests/basic/gfapi/bug-1507896.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/bug-1507896.c -lgfapi
+
+TEST ./$(dirname $0)/bug-1507896 $H0 $V0 $logdir/bug-1507896.log
+
+#volume name precedding with '/'
+TEST ! ./$(dirname $0)/bug-1507896 $H0 /$V0 $logdir/bug-1507896.log
+
+#volume name passed with any special characters
+TEST ! ./$(dirname $0)/bug-1507896 $H0 test@_$V0 $logdir/bug-1507896.log
+
+cleanup_tester $(dirname $0)/bug-1507896
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-async-calls-test.c b/tests/basic/gfapi/gfapi-async-calls-test.c
index 2ebe1f8a742..55835b14709 100644
--- a/tests/basic/gfapi/gfapi-async-calls-test.c
+++ b/tests/basic/gfapi/gfapi-async-calls-test.c
@@ -17,6 +17,17 @@
int cbk_complete = 0;
int cbk_ret_val = -1;
+void
+cbk_check()
+{
+ while (cbk_complete != 1) {
+ sleep(1);
+ }
+ if (cbk_ret_val < 0) {
+ fprintf(stderr, "cbk_ret_val is -ve\n");
+ }
+}
+
int
fill_iov(struct iovec *iov, char fillchar, int count)
{
@@ -76,24 +87,23 @@ out:
}
void
-write_async_cbk(glfs_fd_t *fd, ssize_t ret, void *cookie)
+pwritev_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
{
if (ret < 0) {
- LOG_ERR("glfs_write failed");
+ LOG_ERR("glfs_pwritev failed");
}
cbk_ret_val = ret;
cbk_complete = 1;
}
int
-write_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+pwritev_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
{
ssize_t ret = -1;
int flags = O_RDWR;
- const char *buff = "This is from my prog\n";
struct iovec iov = {0};
void *write_cookie = NULL;
- void *read_cookie = NULL;
ret = fill_iov(&iov, 'a', char_count);
if (ret) {
@@ -102,7 +112,7 @@ write_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
}
write_cookie = strdup("write_cookie");
- ret = glfs_pwritev_async(glfd, &iov, 1, 0, flags, write_async_cbk,
+ ret = glfs_pwritev_async(glfd, &iov, 1, 0, flags, pwritev_async_cbk,
&write_cookie);
out:
if (ret < 0) {
@@ -111,6 +121,252 @@ out:
return ret;
}
+void
+pwrite_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_pwrite_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+pwrite_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ char *buf2 = "ten bytes!";
+ void *write_cookie = strdup("write_cookie");
+ ret = glfs_pwrite_async(glfd, buf1, 10, 0, flags, pwrite_async_cbk,
+ &write_cookie);
+
+ if (ret < 0) {
+ LOG_ERR("glfs_pwrite_async failed");
+ }
+ return ret;
+}
+
+void
+writev_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_writev_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+writev_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+ void *write_cookie = NULL;
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ goto out;
+ }
+
+ write_cookie = strdup("write_cookie");
+ ret = glfs_writev_async(glfd, &iov, 1, flags, writev_async_cbk,
+ &write_cookie);
+out:
+ if (ret < 0) {
+ LOG_ERR("glfs_writev_async failed");
+ }
+ return ret;
+}
+
+void
+write_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_write_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+write_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ char *buf2 = "ten bytes!";
+ void *write_cookie = strdup("write_cookie");
+ ret = glfs_write_async(glfd, buf1, 10, flags, write_async_cbk,
+ &write_cookie);
+
+ if (ret < 0) {
+ LOG_ERR("glfs_write_async failed");
+ }
+ return ret;
+}
+
+void
+preadv_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_preadv_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+preadv_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+ void *read_cookie = NULL;
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ goto out;
+ }
+
+ read_cookie = strdup("preadv_cookie");
+ ret = glfs_preadv_async(glfd, &iov, 1, 0, flags, preadv_async_cbk,
+ &read_cookie);
+out:
+ if (ret < 0) {
+ LOG_ERR("glfs_preadv async failed");
+ }
+ return ret;
+}
+
+void
+pread_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_pread_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+pread_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ void *read_cookie = strdup("read_cookie");
+ ret = glfs_pread_async(glfd, buf1, 10, 0, flags, pread_async_cbk,
+ &read_cookie);
+ if (ret < 0) {
+ LOG_ERR("glfs_pread_async failed");
+ }
+
+ return ret;
+}
+
+void
+readv_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_readv_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+readv_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+ void *read_cookie = NULL;
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ goto out;
+ }
+
+ read_cookie = strdup("read_cookie");
+ ret = glfs_readv_async(glfd, &iov, 1, flags, readv_async_cbk, &read_cookie);
+out:
+ if (ret < 0) {
+ LOG_ERR("glfs_readv_async failed");
+ }
+ return ret;
+}
+
+void
+read_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_read_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+read_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ void *read_cookie = strdup("read_cookie");
+ ret = glfs_read_async(glfd, buf1, 10, flags, read_async_cbk, &read_cookie);
+
+ if (ret < 0) {
+ LOG_ERR("glfs_read_async failed");
+ }
+ return ret;
+}
+
+void
+fsync_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_fsync_async_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+void
+fdatasync_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_fdatasync_async_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+void
+ftruncate_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_ftruncate_async_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
int
main(int argc, char *argv[])
{
@@ -123,6 +379,7 @@ main(int argc, char *argv[])
int flags = (O_RDWR | O_CREAT);
glfs_fd_t *glfd = NULL;
int count = 200;
+ void *data = strdup("Sample_text");
if (argc != 4) {
fprintf(stderr, "Invalid argument\n");
@@ -145,14 +402,85 @@ main(int argc, char *argv[])
exit(1);
}
- ret = write_async(fs, glfd, count);
+ ret = pwritev_async(fs, glfd, count);
if (ret) {
- LOG_ERR("glfs_test_function failed");
+ LOG_ERR("glfs_pwritev_async_test failed");
exit(1);
}
+ cbk_check();
- while (cbk_complete != 1) {
- sleep(1);
+ ret = writev_async(fs, glfd, count);
+ if (ret) {
+ LOG_ERR("glfs_writev_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = write_async(glfd);
+ if (ret) {
+ LOG_ERR("glfs_write_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = preadv_async(fs, glfd, count);
+ if (ret) {
+ LOG_ERR("glfs_preadv_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = pread_async(glfd);
+ if (ret) {
+ LOG_ERR("glfs_pread_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = readv_async(fs, glfd, count);
+ if (ret) {
+ LOG_ERR("glfs_readv_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = read_async(glfd);
+ if (ret) {
+ LOG_ERR("glfs_read_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = glfs_fsync(glfd, NULL, NULL);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsync failed");
+ exit(1);
+ }
+
+ ret = glfs_fdatasync(glfd, NULL, NULL);
+ if (ret < 0) {
+ LOG_ERR("glfs_fdatasync failed");
+ exit(1);
+ }
+
+ ret = glfs_fsync_async(glfd, fsync_async_cbk, data);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsync_async failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = glfs_fdatasync_async(glfd, fdatasync_async_cbk, data);
+ if (ret < 0) {
+ LOG_ERR("glfs_fdatasync_async failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = glfs_ftruncate_async(glfd, 4, ftruncate_async_cbk, data);
+ if (ret < 0) {
+ LOG_ERR("glfs_ftruncate_async failed");
+ exit(1);
}
ret = glfs_close(glfd);
@@ -160,12 +488,7 @@ main(int argc, char *argv[])
LOG_ERR("glfs close failed");
}
- /*
- * skipping fini
- */
+ ret = glfs_fini(fs);
- if (cbk_ret_val == count)
- return 0;
- else
- return -1;
+ return ret;
}
diff --git a/tests/basic/gfapi/gfapi-copy-file-range.t b/tests/basic/gfapi/gfapi-copy-file-range.t
new file mode 100644
index 00000000000..a56d3a58e07
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-copy-file-range.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+mkfs.xfs 2>&1 | grep reflink
+if [ $? -ne 0 ]; then
+ SKIP_TESTS
+ exit
+fi
+
+
+TEST glusterd
+
+TEST truncate -s 2G $B0/xfs_image
+# for now, a xfs filesystem with reflink support is created.
+# In future, better to make changes in MKFS_LOOP so that,
+# once can create a xfs filesystem with reflink enabled in
+# generic and simple way, instead of doing below steps each
+# time.
+TEST mkfs.xfs -f -i size=512 -m reflink=1 $B0/xfs_image;
+
+TEST mkdir $B0/bricks
+TEST mount -t xfs -o loop $B0/xfs_image $B0/bricks
+
+# Just a single brick volume. More test cases need to be
+# added in future for distribute, replicate,
+# distributed replicate and distributed replicated sharded
+# volumes.
+TEST $CLI volume create $V0 $H0:$B0/bricks/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/urandom of=$M0/file bs=1M count=555;
+
+# check for the existence of the created file
+TEST stat $M0/file;
+
+# grab the size of the file
+SRC_SIZE=$(stat -c %s $M0/file);
+
+logdir=`gluster --print-logdir`
+
+# TODO:
+# For now, do not call copy-file-range utility. This is because,
+# the regression machines are centos-7 based which does not have
+# copy_file_range API available. So, instead of this testcase
+# causing regression failures, for now, this is just a dummy test
+# case. Uncomment the below tests (until volume stop) when there
+# is support for copy_file_range in the regression machines.
+#
+
+TEST build_tester $(dirname $0)/glfs-copy-file-range.c -lgfapi
+
+TEST ./$(dirname $0)/glfs-copy-file-range $H0 $V0 $logdir/gfapi-copy-file-range.log /file /new
+
+# check whether the destination file is created or not
+TEST stat $M0/new
+
+# check the size of the destination file
+DST_SIZE=$(stat -c %s $M0/new);
+
+# The sizes of the source and destination should be same.
+# Atleast it ensures that, copy_file_range API is working
+# as expected. Whether the actual cloning happened via reflink
+# or a read/write happened is different matter.
+TEST [ $SRC_SIZE == $DST_SIZE ];
+
+cleanup_tester $(dirname $0)/glfs-copy-file-range
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+UMOUNT_LOOP $B0/bricks;
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-graph-switch-open-fd.t b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
new file mode 100644
index 00000000000..2e666be7ec7
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{0..2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/sync
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-keep-writing.c -lgfapi
+
+
+#Launch a program to keep doing writes on an fd
+./$(dirname $0)/gfapi-keep-writing ${H0} $V0 $logdir/gfapi-async-calls-test.log sync &
+p=$!
+sleep 1 #Let some writes go through
+#Check if graph switch will lead to any pending markers for ever
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+
+TEST rm -f $M0/sync #Make sure the glfd is closed
+TEST wait #Wait for background process to die
+#Goal is to check if there is permanent FOOL changelog
+sleep 5
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick0/glfs_test.txt trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick1/glfs_test.txt trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick2/glfs_test.txt trusted.afr.dirty
+
+cleanup_tester $(dirname $0)/gfapi-async-calls-test
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-keep-writing.c b/tests/basic/gfapi/gfapi-keep-writing.c
new file mode 100644
index 00000000000..91b59cea02b
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-keep-writing.c
@@ -0,0 +1,129 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+glfs_test_function(const char *hostname, const char *volname,
+ const char *logfile, const char *syncfile)
+{
+ int ret = -1;
+ int flags = O_CREAT | O_RDWR;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ const char *buff = "This is from my prog\n";
+ const char *filename = "glfs_test.txt";
+ struct stat buf = {0};
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ glfd = glfs_creat(fs, filename, flags, 0644);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_creat failed");
+ goto out;
+ }
+
+ while (glfs_stat(fs, syncfile, &buf) == 0) {
+ ret = glfs_write(glfd, buff, strlen(buff), flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+ }
+
+ ret = glfs_close(glfd);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+out:
+ ret = glfs_fini(fs);
+ if (ret) {
+ LOG_ERR("glfs_fini failed");
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ char *syncfile = NULL;
+
+ if (argc != 5) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+ syncfile = argv[4];
+
+ ret = glfs_test_function(hostname, volname, logfile, syncfile);
+ if (ret) {
+ LOG_ERR("glfs_test_function failed");
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c
new file mode 100644
index 00000000000..7beb8dd1fe4
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c
@@ -0,0 +1,127 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *volfile,
+ const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile(fs, volfile);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+glfs_test_function(const char *hostname, const char *volname,
+ const char *volfile, const char *logfile)
+{
+ int ret = -1;
+ int flags = O_CREAT | O_RDWR;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ const char *buff = "This is from my prog\n";
+ const char *filename = "glfs_test.txt";
+
+ fs = init_glfs(hostname, volname, volfile, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ glfd = glfs_creat(fs, filename, flags, 0644);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_creat failed");
+ goto out;
+ }
+
+ ret = glfs_write(glfd, buff, strlen(buff), flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+ ret = glfs_close(glfd);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+out:
+ ret = glfs_fini(fs);
+ if (ret) {
+ LOG_ERR("glfs_fini failed");
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *volfile = NULL;
+ char *logfile = NULL;
+
+ if (argc != 5) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ volfile = argv[3];
+ logfile = argv[4];
+
+ ret = glfs_test_function(hostname, volname, volfile, logfile);
+ if (ret) {
+ LOG_ERR("glfs_test_function failed");
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t
new file mode 100755
index 00000000000..8e94df9d321
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../ssl.rc
+
+cleanup;
+
+sed -e "s,@@HOSTNAME@@,${H0},g" -e "s,@@BRICKPATH@@,${B0}/brick1,g" \
+ -e "s,@@SSL@@,off,g" \
+ $(dirname ${0})/protocol-client-ssl.vol.in \
+ > $(dirname ${0})/protocol-client-ssl.vol
+
+TEST create_self_signed_certs
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-ssl-load-volfile-test.c -lgfapi
+
+# Run test without I/O or management encryption
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+# Enable management encryption
+touch $GLUSTERD_WORKDIR/secure-access
+
+killall_gluster
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# Run test with management encryption (No I/O encryption)
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+# Enable I/O encryption
+TEST $CLI volume set $V0 server.ssl on
+
+killall_gluster
+
+sed -e "s,@@HOSTNAME@@,${H0},g" -e "s,@@BRICKPATH@@,${B0}/brick1,g" \
+ -e "s,@@SSL@@,on,g" \
+ $(dirname ${0})/protocol-client-ssl.vol.in \
+ > $(dirname ${0})/protocol-client-ssl.vol
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# Run test without I/O or management encryption
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+cleanup_tester $(dirname $0)/gfapi-ssl-load-volfile-test
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
+
+# NetBSD build scripts are not up to date therefore this test
+# is failing in NetBSD. Therefore skipping the test in NetBSD
+# as of now.
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/gfapi/gfapi-statx-basic.c b/tests/basic/gfapi/gfapi-statx-basic.c
new file mode 100644
index 00000000000..a4943fa0fd1
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-statx-basic.c
@@ -0,0 +1,184 @@
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+
+#define VALIDATE_AND_GOTO_LABEL_ON_ERROR(func, ret, label) \
+ do { \
+ if (ret < 0) { \
+ fprintf(stderr, "%s : returned error %d (%s)\n", func, ret, \
+ strerror(errno)); \
+ goto label; \
+ } \
+ } while (0)
+
+#define GOTO_LABEL_ON_FALSE(compstr, ret, label) \
+ do { \
+ if (ret == false) { \
+ fprintf(stderr, "%s : comparison failed!\n", compstr); \
+ goto label; \
+ } \
+ } while (0)
+
+#define WRITE_SIZE 513
+#define TRUNC_SIZE 4096
+
+/* Using private function and hence providing a forward declation in sync with
+code in glfs-internal.h */
+int
+glfs_statx(struct glfs *fs, const char *path, unsigned int mask,
+ struct glfs_stat *statxbuf);
+
+int
+main(int argc, char *argv[])
+{
+ int ret = -1;
+ int flags = O_RDWR | O_SYNC;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd1 = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ const char *filename = "file_tmp";
+ const char buff[WRITE_SIZE];
+ struct stat sb;
+ unsigned int mask;
+ struct glfs_stat statx;
+ bool bret;
+
+ if (argc != 3) {
+ fprintf(stderr, "Invalid argument\n");
+ fprintf(stderr, "Usage: %s <volname> <logfile>\n", argv[0]);
+ return 1;
+ }
+
+ volname = argv[1];
+ logfile = argv[2];
+
+ fs = glfs_new(volname);
+ if (!fs)
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_new", ret, out);
+
+ ret = glfs_set_volfile_server(fs, "tcp", "localhost", 24007);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_volfile_server", ret, out);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_logging", ret, out);
+
+ ret = glfs_init(fs);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_init", ret, out);
+
+ fd1 = glfs_creat(fs, filename, flags, 0644);
+ if (fd1 == NULL) {
+ ret = -1;
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_creat", ret, out);
+ }
+
+ ret = glfs_truncate(fs, filename, TRUNC_SIZE);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_truncate", ret, out);
+
+ ret = glfs_write(fd1, buff, WRITE_SIZE, flags);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_write", ret, out);
+
+ ret = glfs_fstat(fd1, &sb);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_fstat", ret, out);
+
+ if (sb.st_size != TRUNC_SIZE) {
+ fprintf(stderr, "wrong size %jd should be %jd\n", (intmax_t)sb.st_size,
+ (intmax_t)2048);
+ ret = -1;
+ goto out;
+ }
+
+ glfs_close(fd1);
+ fd1 = NULL;
+
+ /* TEST 1: Invalid mask to statx */
+ mask = 0xfafadbdb;
+ ret = glfs_statx(fs, filename, mask, NULL);
+ if (ret == 0 || ((ret == -1) && (errno != EINVAL))) {
+ fprintf(stderr,
+ "Invalid args passed, but error returned is"
+ " incorrect (ret - %d, errno - %d)\n",
+ ret, errno);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
+
+ /* TEST 2: Call statx and validate fields against prior fstat data */
+ /* NOTE: This fails, as iatt->ia_flags are not carried through the stack,
+ * for example if mdc_to_iatt is invoked to serve cached stat, we will loose
+ * the flags. */
+ mask = GLFS_STAT_ALL;
+ ret = glfs_statx(fs, filename, mask, &statx);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_statx", ret, out);
+
+ if ((statx.glfs_st_mask & GLFS_STAT_BASIC_STATS) != GLFS_STAT_BASIC_STATS) {
+ fprintf(stderr, "Invalid glfs_st_mask, expecting 0x%x got 0x%x\n",
+ GLFS_STAT_ALL, statx.glfs_st_mask);
+ ret = -1;
+ goto out;
+ }
+
+ bret = (sb.st_ino == statx.glfs_st_ino);
+ GOTO_LABEL_ON_FALSE("(sb.st_ino == statx.glfs_st_ino)", bret, out);
+
+ bret = (sb.st_mode == statx.glfs_st_mode);
+ GOTO_LABEL_ON_FALSE("(sb.st_mode == statx.glfs_st_mode)", bret, out);
+
+ bret = (sb.st_nlink == statx.glfs_st_nlink);
+ GOTO_LABEL_ON_FALSE("(sb.st_nlink == statx.glfs_st_nlink)", bret, out);
+
+ bret = (sb.st_uid == statx.glfs_st_uid);
+ GOTO_LABEL_ON_FALSE("(sb.st_uid == statx.glfs_st_uid)", bret, out);
+
+ bret = (sb.st_gid == statx.glfs_st_gid);
+ GOTO_LABEL_ON_FALSE("(sb.st_gid == statx.glfs_st_gid)", bret, out);
+
+ bret = (sb.st_size == statx.glfs_st_size);
+ GOTO_LABEL_ON_FALSE("(sb.st_size == statx.glfs_st_size)", bret, out);
+
+ bret = (sb.st_blksize == statx.glfs_st_blksize);
+ GOTO_LABEL_ON_FALSE("(sb.st_blksize == statx.glfs_st_blksize)", bret, out);
+
+ bret = (sb.st_blocks == statx.glfs_st_blocks);
+ GOTO_LABEL_ON_FALSE("(sb.st_blocks == statx.glfs_st_blocks)", bret, out);
+
+ bret = (!memcmp(&sb.st_atim, &statx.glfs_st_atime,
+ sizeof(struct timespec)));
+ GOTO_LABEL_ON_FALSE("(sb.st_atim == statx.glfs_st_atime)", bret, out);
+
+ bret = (!memcmp(&sb.st_mtim, &statx.glfs_st_mtime,
+ sizeof(struct timespec)));
+ GOTO_LABEL_ON_FALSE("(sb.st_mtim == statx.glfs_st_mtime)", bret, out);
+
+ bret = (!memcmp(&sb.st_ctim, &statx.glfs_st_ctime,
+ sizeof(struct timespec)));
+ GOTO_LABEL_ON_FALSE("(sb.st_ctim == statx.glfs_st_ctime)", bret, out);
+
+ /* TEST 3: Check if partial masks are accepted */
+ mask = GLFS_STAT_TYPE | GLFS_STAT_UID | GLFS_STAT_GID;
+ ret = glfs_statx(fs, filename, mask, &statx);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_statx", ret, out);
+
+ /* We currently still return all stats, as is acceptable based on the API
+ * definition in the header (and in statx as well) */
+ if ((statx.glfs_st_mask & GLFS_STAT_BASIC_STATS) != GLFS_STAT_BASIC_STATS) {
+ fprintf(stderr, "Invalid glfs_st_mask, expecting 0x%x got 0x%x\n",
+ GLFS_STAT_ALL, statx.glfs_st_mask);
+ ret = -1;
+ goto out;
+ }
+out:
+ if (fd1 != NULL)
+ glfs_close(fd1);
+ if (fs) {
+ (void)glfs_fini(fs);
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-statx-basic.t b/tests/basic/gfapi/gfapi-statx-basic.t
new file mode 100755
index 00000000000..d9acbce2f99
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-statx-basic.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 ${H0}:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# NOTE: Test is passing due to very specific volume configuration
+# Disable md-cache, as it does not save and return ia_flags from iatt
+# This is possibly going to be true of other xlators as well (ec/afr), need to
+# ensure these are fixed, or hack statx to return all basic attrs anyway.
+TEST $CLI volume set $V0 performance.md-cache-timeout 0
+
+logdir=`gluster --print-logdir`
+
+build_tester $(dirname $0)/gfapi-statx-basic.c -lgfapi
+
+TEST ./$(dirname $0)/gfapi-statx-basic $V0 $logdir/gfapi-statx-basic.log
+
+cleanup_tester $(dirname $0)/gfapi-statx-basic
+
+cleanup;
diff --git a/tests/basic/gfapi/glfs-copy-file-range.c b/tests/basic/gfapi/glfs-copy-file-range.c
new file mode 100644
index 00000000000..1c5fd81fc87
--- /dev/null
+++ b/tests/basic/gfapi/glfs-copy-file-range.c
@@ -0,0 +1,180 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <string.h>
+#include <time.h>
+#include <libgen.h>
+
+static void
+cleanup(glfs_t *fs)
+{
+ if (!fs)
+ return;
+#if 0
+ /* glfs fini path is still racy and crashing the program. Since
+ * this program any way has to die, we are not going to call fini
+ * in the released versions. i.e. final builds. For all
+ * internal testing lets enable this so that glfs_fini code
+ * path becomes stable. */
+ glfs_fini (fs);
+#endif
+}
+
+int
+main(int argc, char **argv)
+{
+ glfs_t *fs = NULL;
+ int ret = -1;
+ char *volname = NULL;
+ char *logfilepath = NULL;
+ char *path_src = NULL;
+ char *path_dst = NULL;
+ glfs_fd_t *glfd_in = NULL;
+ glfs_fd_t *glfd_out = NULL;
+ char *volfile_server = NULL;
+
+ struct stat stbuf = {
+ 0,
+ };
+ struct glfs_stat stat_src = {
+ 0,
+ };
+ struct glfs_stat prestat_dst = {
+ 0,
+ };
+ struct glfs_stat poststat_dst = {
+ 0,
+ };
+ size_t len;
+
+ if (argc < 6) {
+ printf("%s <volume> <log file path> <source> <destination>", argv[0]);
+ ret = -1;
+ goto out;
+ }
+
+ volfile_server = argv[1];
+ volname = argv[2];
+ logfilepath = argv[3];
+ path_src = argv[4];
+ path_dst = argv[5];
+
+ if (path_src[0] != '/') {
+ fprintf(stderr, "source path %s is not absolute", path_src);
+ errno = EINVAL;
+ goto out;
+ }
+
+ if (path_dst[0] != '/') {
+ fprintf(stderr, "destination path %s is not absolute", path_dst);
+ errno = EINVAL;
+ goto out;
+ }
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ ret = -errno;
+ fprintf(stderr, "Not able to initialize volume '%s'", volname);
+ goto out;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", volfile_server, 24007);
+ if (ret < 0) {
+ ret = -errno;
+ fprintf(stderr,
+ "Failed to set the volfile server, "
+ "%s",
+ strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfilepath, 7);
+ if (ret < 0) {
+ ret = -errno;
+ fprintf(stderr,
+ "Failed to set the log file path, "
+ "%s",
+ strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ ret = -errno;
+ if (errno == ENOENT) {
+ fprintf(stderr, "Volume %s does not exist", volname);
+ } else {
+ fprintf(stderr,
+ "%s: Not able to fetch "
+ "volfile from glusterd",
+ volname);
+ }
+ goto out;
+ }
+
+ glfd_in = glfs_open(fs, path_src, O_RDONLY | O_NONBLOCK);
+ if (!glfd_in) {
+ ret = -errno;
+ goto out;
+ } else {
+ printf("OPEN_SRC: opening %s is success\n", path_src);
+ }
+
+ glfd_out = glfs_creat(fs, path_dst, O_RDWR, 0644);
+ if (!glfd_out) {
+ fprintf(stderr,
+ "FAILED_DST_OPEN: failed to "
+ "open (create) %s (%s)\n",
+ path_dst, strerror(errno));
+ ret = -errno;
+ goto out;
+ } else {
+ printf("OPEN_DST: opening %s is success\n", path_dst);
+ }
+
+ ret = glfs_fstat(glfd_in, &stbuf);
+ if (ret < 0) {
+ ret = -errno;
+ goto out;
+ } else {
+ printf("FSTAT_SRC: fstat on %s is success\n", path_dst);
+ }
+
+ len = stbuf.st_size;
+
+ do {
+ ret = glfs_copy_file_range(glfd_in, NULL, glfd_out, NULL, len, 0,
+ &stat_src, &prestat_dst, &poststat_dst);
+ if (ret == -1) {
+ fprintf(stderr, "copy_file_range failed with %s\n",
+ strerror(errno));
+ ret = -errno;
+ break;
+ } else {
+ printf("copy_file_range successful\n");
+ len -= ret;
+ }
+ } while (len > 0);
+
+out:
+ if (glfd_in)
+ glfs_close(glfd_in);
+ if (glfd_out)
+ glfs_close(glfd_out);
+
+ cleanup(fs);
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/glfs_h_creat_open.c b/tests/basic/gfapi/glfs_h_creat_open.c
new file mode 100644
index 00000000000..7672561e73f
--- /dev/null
+++ b/tests/basic/gfapi/glfs_h_creat_open.c
@@ -0,0 +1,118 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(func, ret) \
+ do { \
+ if (ret != 0) { \
+ fprintf(stderr, "%s : returned error ret(%d), errno(%d)\n", func, \
+ ret, errno); \
+ exit(1); \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+#define LOG_IF_NO_ERR(func, ret) \
+ do { \
+ if (ret == 0) { \
+ fprintf(stderr, "%s : hasn't returned error %d\n", func, ret); \
+ exit(1); \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ struct glfs_object *root = NULL, *leaf = NULL;
+ glfs_fd_t *fd = NULL;
+ char *filename = "/ro-file";
+ struct stat sb = {
+ 0,
+ };
+ char *logfile = NULL;
+ char *volname = NULL;
+ char *hostname = NULL;
+ char buf[32] = "abcdefghijklmnopqrstuvwxyz012345";
+
+ fprintf(stderr, "Starting glfs_h_creat_open\n");
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ LOG_ERR("glfs_set_volfile_server", ret);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ LOG_ERR("glfs_set_logging", ret);
+
+ ret = glfs_init(fs);
+ LOG_ERR("glfs_init", ret);
+
+ sleep(2);
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (!root) {
+ ret = -1;
+ LOG_ERR("glfs_h_lookupat root", ret);
+ }
+ leaf = glfs_h_lookupat(fs, root, filename, &sb, 0);
+ if (!leaf) {
+ ret = -1;
+ LOG_IF_NO_ERR("glfs_h_lookupat leaf", ret);
+ }
+
+ leaf = glfs_h_creat_open(fs, root, filename, O_RDONLY, 00444, &sb, &fd);
+ if (!leaf || !fd) {
+ ret = -1;
+ LOG_ERR("glfs_h_creat leaf", ret);
+ }
+ fprintf(stderr, "glfs_h_create_open leaf - %p\n", leaf);
+
+ ret = glfs_write(fd, buf, 32, 0);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_write: error writing to file %s, %s\n", filename,
+ strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ LOG_ERR("glfs_h_getattrs", ret);
+
+ if (sb.st_size != 32) {
+ fprintf(stderr, "glfs_write: post size mismatch\n");
+ goto out;
+ }
+
+ fprintf(stderr, "Successfully opened and written to a read-only file \n");
+out:
+ if (fd)
+ glfs_close(fd);
+
+ ret = glfs_fini(fs);
+ LOG_ERR("glfs_fini", ret);
+
+ fprintf(stderr, "End of libgfapi_fini\n");
+
+ exit(0);
+}
diff --git a/tests/basic/gfapi/glfs_h_creat_open.t b/tests/basic/gfapi/glfs_h_creat_open.t
new file mode 100755
index 00000000000..f24ae7395be
--- /dev/null
+++ b/tests/basic/gfapi/glfs_h_creat_open.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/glfs_h_creat_open.c -lgfapi
+
+TEST ./$(dirname $0)/glfs_h_creat_open $H0 $V0 $logdir/glfs.log
+
+cleanup_tester $(dirname $0)/glfs_h_creat_open
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/basic/gfapi/glfsxmp-coverage.c b/tests/basic/gfapi/glfsxmp-coverage.c
new file mode 100644
index 00000000000..51650023efd
--- /dev/null
+++ b/tests/basic/gfapi/glfsxmp-coverage.c
@@ -0,0 +1,1900 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <string.h>
+#include <time.h>
+
+#define TEST_STR_LEN 2048
+
+int
+test_dirops(glfs_t *fs)
+{
+ glfs_fd_t *fd = NULL;
+ char buf[2048];
+ struct dirent *entry = NULL;
+
+ fd = glfs_opendir(fs, "/");
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ fprintf(stderr, "Entries:\n");
+ while (glfs_readdir_r(fd, (struct dirent *)buf, &entry), entry) {
+ fprintf(stderr, "%s: %lu\n", entry->d_name, glfs_telldir(fd));
+ }
+
+ /* Should internally call fsyncdir(), hopefully */
+ glfs_fsync(fd, NULL, NULL);
+
+ glfs_closedir(fd);
+ return 0;
+}
+
+int
+test_xattr(glfs_t *fs)
+{
+ char *filename = "/filename2";
+ char *linkfile = "/linkfile";
+ glfs_fd_t *fd = NULL;
+ char buf[512];
+ char *ptr;
+ int ret;
+
+ ret = glfs_setxattr(fs, filename, "user.testkey", "testval", 8, 0);
+ fprintf(stderr, "setxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_setxattr(fs, filename, "user.testkey2", "testval", 8, 0);
+ fprintf(stderr, "setxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_getxattr(fs, filename, "user.testkey", buf, 512);
+ fprintf(stderr, "getxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_listxattr(fs, filename, buf, 512);
+ fprintf(stderr, "listxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_symlink(fs, "filename", linkfile);
+ fprintf(stderr, "symlink(%s %s): %s\n", filename, linkfile,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_readlink(fs, linkfile, buf, 512);
+ fprintf(stderr, "readlink(%s) : %d (%s)\n", filename, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_lsetxattr(fs, filename, "user.testkey3", "testval", 8, 0);
+ fprintf(stderr, "lsetxattr(%s) : %d (%s)\n", linkfile, ret,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_llistxattr(fs, linkfile, buf, 512);
+ fprintf(stderr, "llistxattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_lgetxattr(fs, filename, "user.testkey3", buf, 512);
+ fprintf(stderr, "lgetxattr(%s): %d (%s)\n", linkfile, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ for (ptr = buf; ptr < buf + ret; ptr++) {
+ printf("key=%s\n", ptr);
+ ptr += strlen(ptr);
+ }
+
+ ret = glfs_removexattr(fs, filename, "user.testkey2");
+ fprintf(stderr, "removexattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ fd = glfs_open(fs, filename, O_RDWR);
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ ret = glfs_fsetxattr(fd, "user.testkey2", "testval", 8, 0);
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_fgetxattr(fd, "user.testkey2", buf, 512);
+ fprintf(stderr, "fgetxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_flistxattr(fd, buf, 512);
+ fprintf(stderr, "flistxattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ for (ptr = buf; ptr < buf + ret; ptr++) {
+ printf("key=%s\n", ptr);
+ ptr += strlen(ptr);
+ }
+
+ ret = glfs_fremovexattr(fd, "user.testkey2");
+ fprintf(stderr, "fremovexattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ glfs_close(fd);
+
+ return 0;
+}
+
+int
+test_chdir(glfs_t *fs)
+{
+ int ret = -1;
+ char *dir = "/dir";
+ char *topdir = "/topdir";
+ char *linkdir = "/linkdir";
+ char *linkdir2 = "/linkdir2";
+ char *subdir = "./subdir";
+ char *respath = NULL;
+ char pathbuf[4096];
+
+ ret = glfs_mkdir(fs, topdir, 0755);
+ fprintf(stderr, "mkdir(%s): %s\n", topdir, strerror(errno));
+ if (ret)
+ return -1;
+
+ ret = glfs_mkdir(fs, dir, 0755);
+ fprintf(stderr, "mkdir(%s): %s\n", dir, strerror(errno));
+ if (ret)
+ return -1;
+
+ respath = glfs_getcwd(fs, pathbuf, 4096);
+ fprintf(stdout, "getcwd() = %s\n", respath);
+
+ ret = glfs_symlink(fs, "topdir", linkdir);
+ if (ret) {
+ fprintf(stderr, "symlink(%s, %s): %s\n", topdir, linkdir,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_chdir(fs, linkdir);
+ if (ret) {
+ fprintf(stderr, "chdir(%s): %s\n", linkdir, strerror(errno));
+ return -1;
+ }
+
+ respath = glfs_getcwd(fs, pathbuf, 4096);
+ fprintf(stdout, "getcwd() = %s\n", respath);
+
+ respath = glfs_realpath(fs, subdir, pathbuf);
+ if (respath) {
+ fprintf(stderr, "realpath(%s) worked unexpectedly: %s\n", subdir,
+ respath);
+ return -1;
+ }
+
+ ret = glfs_mkdir(fs, subdir, 0755);
+ if (ret) {
+ fprintf(stderr, "mkdir(%s): %s\n", subdir, strerror(errno));
+ return -1;
+ }
+
+ respath = glfs_realpath(fs, subdir, pathbuf);
+ if (!respath) {
+ fprintf(stderr, "realpath(%s): %s\n", subdir, strerror(errno));
+ } else {
+ fprintf(stdout, "realpath(%s) = %s\n", subdir, respath);
+ }
+
+ ret = glfs_chdir(fs, subdir);
+ if (ret) {
+ fprintf(stderr, "chdir(%s): %s\n", subdir, strerror(errno));
+ return -1;
+ }
+
+ respath = glfs_getcwd(fs, pathbuf, 4096);
+ fprintf(stdout, "getcwd() = %s\n", respath);
+
+ respath = glfs_realpath(fs, "/linkdir/subdir", pathbuf);
+ if (!respath) {
+ fprintf(stderr, "realpath(/linkdir/subdir): %s\n", strerror(errno));
+ } else {
+ fprintf(stdout, "realpath(/linkdir/subdir) = %s\n", respath);
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static void
+peek_stat(struct stat *sb)
+{
+ printf("Dumping stat information:\n");
+ printf("File type: ");
+
+ switch (sb->st_mode & S_IFMT) {
+ case S_IFBLK:
+ printf("block device\n");
+ break;
+ case S_IFCHR:
+ printf("character device\n");
+ break;
+ case S_IFDIR:
+ printf("directory\n");
+ break;
+ case S_IFIFO:
+ printf("FIFO/pipe\n");
+ break;
+ case S_IFLNK:
+ printf("symlink\n");
+ break;
+ case S_IFREG:
+ printf("regular file\n");
+ break;
+ case S_IFSOCK:
+ printf("socket\n");
+ break;
+ default:
+ printf("unknown?\n");
+ break;
+ }
+
+ printf("I-node number: %ld\n", (long)sb->st_ino);
+
+ printf("Mode: %lo (octal)\n",
+ (unsigned long)sb->st_mode);
+
+ printf("Link count: %ld\n", (long)sb->st_nlink);
+ printf("Ownership: UID=%ld GID=%ld\n", (long)sb->st_uid,
+ (long)sb->st_gid);
+
+ printf("Preferred I/O block size: %ld bytes\n", (long)sb->st_blksize);
+ printf("File size: %lld bytes\n", (long long)sb->st_size);
+ printf("Blocks allocated: %lld\n", (long long)sb->st_blocks);
+
+ printf("Last status change: %s", ctime(&sb->st_ctime));
+ printf("Last file access: %s", ctime(&sb->st_atime));
+ printf("Last file modification: %s", ctime(&sb->st_mtime));
+
+ return;
+}
+
+static void
+peek_handle(unsigned char *glid)
+{
+ int i;
+
+ for (i = 0; i < GFAPI_HANDLE_LENGTH; i++) {
+ printf(":%02x:", glid[i]);
+ }
+ printf("\n");
+}
+#else /* DEBUG */
+static void
+peek_stat(struct stat *sb)
+{
+ return;
+}
+
+static void
+peek_handle(unsigned char *id)
+{
+ return;
+}
+#endif /* DEBUG */
+
+glfs_t *fs = NULL;
+char *full_parent_name = "/testdir", *parent_name = "testdir";
+
+void
+test_h_unlink(void)
+{
+ char *my_dir = "unlinkdir";
+ char *my_file = "file.txt";
+ char *my_subdir = "dir1";
+ struct glfs_object *parent = NULL, *leaf = NULL, *dir = NULL,
+ *subdir = NULL, *subleaf = NULL;
+ struct stat sb;
+ int ret;
+
+ printf("glfs_h_unlink tests: In Progress\n");
+
+ /* Prepare tests */
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dir = glfs_h_mkdir(fs, parent, my_dir, 0755, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, parent, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ leaf = glfs_h_creat(fs, dir, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ subdir = glfs_h_mkdir(fs, dir, my_subdir, 0755, &sb);
+ if (subdir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_subdir, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ subleaf = glfs_h_creat(fs, subdir, my_file, O_CREAT, 0644, &sb);
+ if (subleaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, subdir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink non empty directory */
+ ret = glfs_h_unlink(fs, dir, my_subdir);
+ if ((ret && errno != ENOTEMPTY) || (ret == 0)) {
+ fprintf(stderr,
+ "glfs_h_unlink: error unlinking %s: it is non empty: %s\n",
+ my_subdir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink regular file */
+ ret = glfs_h_unlink(fs, subdir, my_file);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_file, subdir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink directory */
+ ret = glfs_h_unlink(fs, dir, my_subdir);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_subdir, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink regular file */
+ ret = glfs_h_unlink(fs, dir, my_file);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink non-existent regular file */
+ ret = glfs_h_unlink(fs, dir, my_file);
+ if ((ret && errno != ENOENT) || (ret == 0)) {
+ fprintf(stderr,
+ "glfs_h_unlink: error unlinking non-existent %s: invalid errno "
+ ",%d, %s\n",
+ my_file, ret, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink non-existent directory */
+ ret = glfs_h_unlink(fs, dir, my_subdir);
+ if ((ret && errno != ENOENT) || (ret == 0)) {
+ fprintf(stderr,
+ "glfs_h_unlink: error unlinking non-existent %s: invalid "
+ "errno ,%d, %s\n",
+ my_subdir, ret, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink directory */
+ ret = glfs_h_unlink(fs, parent, my_dir);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_dir, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ printf("glfs_h_unlink tests: PASSED\n");
+
+out:
+ if (dir)
+ glfs_h_close(dir);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (subdir)
+ glfs_h_close(subdir);
+ if (subleaf)
+ glfs_h_close(subleaf);
+ if (parent)
+ glfs_h_close(parent);
+
+ return;
+}
+
+void
+test_h_getsetattrs(void)
+{
+ char *my_dir = "attrdir";
+ char *my_file = "attrfile.txt";
+ struct glfs_object *parent = NULL, *leaf = NULL, *dir = NULL;
+ struct stat sb, retsb;
+ int ret, valid;
+ struct timespec timestamp;
+
+ printf("glfs_h_getattrs and setattrs tests: In Progress\n");
+
+ /* Prepare tests */
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dir = glfs_h_mkdir(fs, parent, my_dir, 0755, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, parent, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, dir, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ ret = glfs_h_getattrs(fs, dir, &retsb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error %s: from (%p),%s\n", my_dir,
+ dir, strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&retsb);
+ /* TODO: Compare stat information */
+
+ retsb.st_mode = 00666;
+ retsb.st_uid = 1000;
+ retsb.st_gid = 1001;
+ ret = clock_gettime(CLOCK_REALTIME, &timestamp);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+ retsb.st_atim = timestamp;
+ retsb.st_mtim = timestamp;
+ valid = GFAPI_SET_ATTR_MODE | GFAPI_SET_ATTR_UID | GFAPI_SET_ATTR_GID |
+ GFAPI_SET_ATTR_ATIME | GFAPI_SET_ATTR_MTIME;
+ peek_stat(&retsb);
+
+ ret = glfs_h_setattrs(fs, dir, &retsb, valid);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_setattrs: error %s: from (%p),%s\n", my_dir,
+ dir, strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ memset(&retsb, 0, sizeof(struct stat));
+ ret = glfs_h_stat(fs, dir, &retsb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_stat: error %s: from (%p),%s\n", my_dir, dir,
+ strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&retsb);
+
+ printf("glfs_h_getattrs and setattrs tests: PASSED\n");
+out:
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (dir)
+ glfs_h_close(dir);
+
+ return;
+}
+
+void
+test_h_truncate(void)
+{
+ char *my_dir = "truncatedir";
+ char *my_file = "file.txt";
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL;
+ struct stat sb;
+ glfs_fd_t *fd = NULL;
+ char buf[32];
+ off_t offset = 0;
+ int ret = 0;
+
+ printf("glfs_h_truncate tests: In Progress\n");
+
+ /* Prepare tests */
+ root = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_mkdir(fs, root, my_dir, 0755, &sb);
+ if (parent == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, root, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ fd = glfs_h_open(fs, leaf, O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_h_open: error on open of %s: %s\n", my_file,
+ strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ memcpy(buf, "abcdefghijklmnopqrstuvwxyz012345", 32);
+ ret = glfs_write(fd, buf, 32, 0);
+
+ /* run tests */
+ /* truncate lower */
+ offset = 30;
+ ret = glfs_h_truncate(fs, leaf, offset);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_truncate: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error for %s (%p),%s\n", my_file,
+ leaf, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ if (sb.st_size != offset) {
+ fprintf(stderr, "glfs_h_truncate: post size mismatch\n");
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ /* truncate higher */
+ offset = 32;
+ ret = glfs_h_truncate(fs, leaf, offset);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_truncate: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error for %s (%p),%s\n", my_file,
+ leaf, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ if (sb.st_size != offset) {
+ fprintf(stderr, "glfs_h_truncate: post size mismatch\n");
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ /* truncate equal */
+ offset = 30;
+ ret = glfs_h_truncate(fs, leaf, offset);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_truncate: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error for %s (%p),%s\n", my_file,
+ leaf, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ if (sb.st_size != offset) {
+ fprintf(stderr, "glfs_h_truncate: post size mismatch\n");
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ printf("glfs_h_truncate tests: PASSED\n");
+out:
+ if (fd)
+ glfs_close(fd);
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+
+ return;
+}
+
+void
+test_h_links(void)
+{
+ char *my_dir = "linkdir";
+ char *my_file = "file.txt";
+ char *my_symlnk = "slnk.txt";
+ char *my_lnk = "lnk.txt";
+ char *linksrc_dir = "dir1";
+ char *linktgt_dir = "dir2";
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL,
+ *dirsrc = NULL, *dirtgt = NULL, *dleaf = NULL;
+ struct glfs_object *ln1 = NULL;
+ struct stat sb;
+ int ret;
+ char *buf = NULL;
+
+ printf("glfs_h_link(s) tests: In Progress\n");
+
+ /* Prepare tests */
+ root = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_mkdir(fs, root, my_dir, 0755, &sb);
+ if (parent == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, root, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirsrc = glfs_h_mkdir(fs, parent, linksrc_dir, 0755, &sb);
+ if (dirsrc == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ linksrc_dir, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirtgt = glfs_h_mkdir(fs, parent, linktgt_dir, 0755, &sb);
+ if (dirtgt == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ linktgt_dir, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dleaf = glfs_h_creat(fs, dirsrc, my_file, O_CREAT, 0644, &sb);
+ if (dleaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dirsrc, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* run tests */
+ /* sym link: /testdir/linkdir/file.txt to ./slnk.txt */
+ ln1 = glfs_h_symlink(fs, parent, my_symlnk, "./file.txt", &sb);
+ if (ln1 == NULL) {
+ fprintf(stderr, "glfs_h_symlink: error creating %s: from (%p),%s\n",
+ my_symlnk, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ buf = calloc(1024, sizeof(char));
+ if (buf == NULL) {
+ fprintf(stderr, "Error allocating memory\n");
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+
+ ret = glfs_h_readlink(fs, ln1, buf, 1024);
+ if (ret <= 0) {
+ fprintf(stderr, "glfs_h_readlink: error reading %s: from (%p),%s\n",
+ my_symlnk, ln1, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ if (!(strncmp(buf, my_symlnk, strlen(my_symlnk)))) {
+ fprintf(stderr,
+ "glfs_h_readlink: error mismatch in link name: actual %s: "
+ "retrieved %s\n",
+ my_symlnk, buf);
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+
+ /* link: /testdir/linkdir/file.txt to ./lnk.txt */
+ ret = glfs_h_link(fs, leaf, parent, my_lnk);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_link: error creating %s: from (%p),%s\n",
+ my_lnk, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ /* TODO: Should write content to a file and read from the link */
+
+ /* link: /testdir/linkdir/dir1/file.txt to ../dir2/slnk.txt */
+ ret = glfs_h_link(fs, dleaf, dirtgt, my_lnk);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_link: error creating %s: from (%p),%s\n",
+ my_lnk, dirtgt, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ /* TODO: Should write content to a file and read from the link */
+
+ printf("glfs_h_link(s) tests: PASSED\n");
+
+out:
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (dirsrc)
+ glfs_h_close(dirsrc);
+ if (dirtgt)
+ glfs_h_close(dirtgt);
+ if (dleaf)
+ glfs_h_close(dleaf);
+ if (ln1)
+ glfs_h_close(ln1);
+ if (buf)
+ free(buf);
+
+ return;
+}
+
+void
+test_h_rename(void)
+{
+ char *my_dir = "renamedir";
+ char *my_file = "file.txt";
+ char *src_dir = "dir1";
+ char *tgt_dir = "dir2";
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL,
+ *dirsrc = NULL, *dirtgt = NULL, *dleaf = NULL;
+ struct stat sb;
+ int ret;
+
+ printf("glfs_h_rename tests: In Progress\n");
+
+ /* Prepare tests */
+ root = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_mkdir(fs, root, my_dir, 0755, &sb);
+ if (parent == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, root, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirsrc = glfs_h_mkdir(fs, parent, src_dir, 0755, &sb);
+ if (dirsrc == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ src_dir, parent, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirtgt = glfs_h_mkdir(fs, parent, tgt_dir, 0755, &sb);
+ if (dirtgt == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ tgt_dir, parent, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dleaf = glfs_h_creat(fs, dirsrc, my_file, O_CREAT, 0644, &sb);
+ if (dleaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dirsrc, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* run tests */
+ /* Rename file.txt -> file1.txt */
+ ret = glfs_h_rename(fs, parent, "file.txt", parent, "file1.txt");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n",
+ "file.txt", "file1.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir1/file.txt -> file.txt */
+ ret = glfs_h_rename(fs, dirsrc, "file.txt", parent, "file.txt");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s/%s to %s (%s)\n",
+ src_dir, "file.txt", "file.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename file1.txt -> file.txt (exists) */
+ ret = glfs_h_rename(fs, parent, "file1.txt", parent, "file.txt");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n",
+ "file.txt", "file.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir1 -> dir3 */
+ ret = glfs_h_rename(fs, parent, "dir1", parent, "dir3");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n", "dir1",
+ "dir3", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir2 ->dir3 (exists) */
+ ret = glfs_h_rename(fs, parent, "dir2", parent, "dir3");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n", "dir2",
+ "dir3", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename file.txt -> dir3 (fail) */
+ ret = glfs_h_rename(fs, parent, "file.txt", parent, "dir3");
+ if (ret == 0) {
+ fprintf(stderr, "glfs_h_rename: NO error renaming %s to %s (%s)\n",
+ "file.txt", "dir3", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir3 -> file.txt (fail) */
+ ret = glfs_h_rename(fs, parent, "dir3", parent, "file.txt");
+ if (ret == 0) {
+ fprintf(stderr, "glfs_h_rename: NO error renaming %s to %s (%s)\n",
+ "dir3", "file.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ printf("glfs_h_rename tests: PASSED\n");
+
+out:
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (dirsrc)
+ glfs_h_close(dirsrc);
+ if (dirtgt)
+ glfs_h_close(dirtgt);
+ if (dleaf)
+ glfs_h_close(dleaf);
+
+ return;
+}
+
+void
+assimilatetime(struct timespec *ts, struct timespec ts_st,
+ struct timespec ts_ed)
+{
+ if ((ts_ed.tv_nsec - ts_st.tv_nsec) < 0) {
+ ts->tv_sec += ts_ed.tv_sec - ts_st.tv_sec - 1;
+ ts->tv_nsec += 1000000000 + ts_ed.tv_nsec - ts_st.tv_nsec;
+ } else {
+ ts->tv_sec += ts_ed.tv_sec - ts_st.tv_sec;
+ ts->tv_nsec += ts_ed.tv_nsec - ts_st.tv_nsec;
+ }
+
+ if (ts->tv_nsec > 1000000000) {
+ ts->tv_nsec = ts->tv_nsec - 1000000000;
+ ts->tv_sec += 1;
+ }
+
+ return;
+}
+
+#define MAX_FILES_CREATE 10
+#define MAXPATHNAME 512
+void
+test_h_performance(void)
+{
+ char *my_dir = "perftest", *full_dir_path = "/testdir/perftest";
+ char *my_file = "file_", my_file_name[MAXPATHNAME];
+ struct glfs_object *parent = NULL, *leaf = NULL, *dir = NULL;
+ struct stat sb;
+ int ret, i;
+ struct glfs_fd *fd;
+ struct timespec c_ts = {0, 0}, c_ts_st, c_ts_ed;
+ struct timespec o_ts = {0, 0}, o_ts_st, o_ts_ed;
+
+ printf("glfs_h_performance tests: In Progress\n");
+
+ /* Prepare tests */
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ dir = glfs_h_mkdir(fs, parent, my_dir, 0755, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, parent, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* create performance */
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_FILES_CREATE; i++) {
+ sprintf(my_file_name, "%s%d", my_file, i);
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ leaf = glfs_h_lookupat(fs, dir, my_file_name, &sb, 0);
+ if (leaf != NULL) {
+ fprintf(stderr, "glfs_h_lookup: exists %s\n", my_file_name);
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ leaf = glfs_h_creat(fs, dir, my_file_name, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&c_ts, c_ts_st, c_ts_ed);
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&o_ts, o_ts_st, o_ts_ed);
+
+ printf("Creation performance (handle based):\n\t# empty files:%d\n",
+ MAX_FILES_CREATE);
+ printf("\tOverall time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n", o_ts.tv_sec,
+ o_ts.tv_nsec);
+ printf("\tcreate call time time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n",
+ c_ts.tv_sec, c_ts.tv_nsec);
+
+ /* create using path */
+ c_ts.tv_sec = o_ts.tv_sec = 0;
+ c_ts.tv_nsec = o_ts.tv_nsec = 0;
+
+ sprintf(my_file_name, "%s1", full_dir_path);
+ ret = glfs_mkdir(fs, my_file_name, 0755);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_mkdir: error creating %s: from (%p),%s\n", my_dir,
+ parent, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_FILES_CREATE; i++) {
+ sprintf(my_file_name, "%s1/%sn%d", full_dir_path, my_file, i);
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ ret = glfs_stat(fs, my_file_name, &sb);
+ if (ret == 0) {
+ fprintf(stderr, "glfs_stat: exists %s\n", my_file_name);
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ fd = glfs_creat(fs, my_file_name, O_CREAT, 0644);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&c_ts, c_ts_st, c_ts_ed);
+ glfs_close(fd);
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&o_ts, o_ts_st, o_ts_ed);
+
+ printf("Creation performance (path based):\n\t# empty files:%d\n",
+ MAX_FILES_CREATE);
+ printf("\tOverall time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n", o_ts.tv_sec,
+ o_ts.tv_nsec);
+ printf("\tcreate call time time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n",
+ c_ts.tv_sec, c_ts.tv_nsec);
+out:
+ return;
+}
+
+int
+test_handleops(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+ struct stat sb = {
+ 0,
+ };
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL, *tmp = NULL;
+ char readbuf[32], writebuf[32];
+ unsigned char leaf_handle[GFAPI_HANDLE_LENGTH];
+
+ char *full_leaf_name = "/testdir/testfile.txt", *leaf_name = "testfile.txt",
+ *relative_leaf_name = "testdir/testfile.txt";
+ char *leaf_name1 = "testfile1.txt";
+ char *full_newparent_name = "/testdir/dir1", *newparent_name = "dir1";
+ char *full_newnod_name = "/testdir/nod1", *newnod_name = "nod1";
+
+ /* Initialize test area */
+ ret = glfs_mkdir(fs, full_parent_name, 0755);
+ if (ret != 0 && errno != EEXIST) {
+ fprintf(stderr, "%s: (%p) %s\n", full_parent_name, fd, strerror(errno));
+ printf("Test initialization failed on volume %s\n", argv[1]);
+ goto out;
+ } else if (ret != 0) {
+ printf("Found test directory %s to be existing\n", full_parent_name);
+ printf("Cleanup test directory and restart tests\n");
+ goto out;
+ }
+
+ fd = glfs_creat(fs, full_leaf_name, O_CREAT, 0644);
+ if (fd == NULL) {
+ fprintf(stderr, "%s: (%p) %s\n", full_leaf_name, fd, strerror(errno));
+ printf("Test initialization failed on volume %s\n", argv[1]);
+ goto out;
+ }
+ glfs_close(fd);
+
+ printf("Initialized the test area, within volume %s\n", argv[1]);
+
+ /* Handle based APIs test area */
+
+ /* glfs_lookupat test */
+ printf("glfs_h_lookupat tests: In Progress\n");
+ /* start at root of the volume */
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n", "/",
+ NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* lookup a parent within root */
+ parent = glfs_h_lookupat(fs, root, parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ parent_name, root, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* lookup a leaf/child within the parent */
+ leaf = glfs_h_lookupat(fs, parent, leaf_name, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ leaf_name, parent, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* reset */
+ glfs_h_close(root);
+ root = NULL;
+ glfs_h_close(leaf);
+ leaf = NULL;
+ glfs_h_close(parent);
+ parent = NULL;
+
+ /* check absolute paths */
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n", "/",
+ NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_lookupat(fs, NULL, full_leaf_name, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_leaf_name, parent, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* reset */
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ /* check multiple component paths */
+ leaf = glfs_h_lookupat(fs, root, relative_leaf_name, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ relative_leaf_name, parent, strerror(errno));
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* reset */
+ glfs_h_close(root);
+ root = NULL;
+ glfs_h_close(parent);
+ parent = NULL;
+
+ /* check symlinks in path */
+
+ /* TODO: -ve test cases */
+ /* parent invalid
+ * path invalid
+ * path does not exist after some components
+ * no parent, but relative path
+ * parent and full path? -ve?
+ */
+
+ printf("glfs_h_lookupat tests: PASSED\n");
+
+ /* glfs_openat test */
+ printf("glfs_h_open tests: In Progress\n");
+ fd = glfs_h_open(fs, leaf, O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_h_open: error on open of %s: %s\n",
+ full_leaf_name, strerror(errno));
+ printf("glfs_h_open tests: FAILED\n");
+ goto out;
+ }
+
+ /* test read/write based on fd */
+ memcpy(writebuf, "abcdefghijklmnopqrstuvwxyz012345", 32);
+ ret = glfs_write(fd, writebuf, 32, 0);
+
+ glfs_lseek(fd, 10, SEEK_SET);
+
+ ret = glfs_read(fd, readbuf, 32, 0);
+ if (memcmp(readbuf, writebuf, 32)) {
+ printf("Failed to read what I wrote: %s %s\n", readbuf, writebuf);
+ glfs_close(fd);
+ printf("glfs_h_open tests: FAILED\n");
+ goto out;
+ }
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+ glfs_close(fd);
+
+ printf("glfs_h_open tests: PASSED\n");
+
+ /* Create tests */
+ printf("glfs_h_creat tests: In Progress\n");
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, leaf_name1, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error on create of %s: from (%p),%s\n",
+ leaf_name1, parent, strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_creat(fs, parent, leaf_name1, O_CREAT | O_EXCL, 0644, &sb);
+ if (leaf != NULL || errno != EEXIST) {
+ fprintf(stderr,
+ "glfs_h_creat: existing file, leaf = (%p), errno = %s\n", leaf,
+ strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ if (leaf != NULL) {
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+ }
+
+ tmp = glfs_h_creat(fs, root, parent_name, O_CREAT, 0644, &sb);
+ if (tmp != NULL || !(errno == EISDIR || errno == EINVAL)) {
+ fprintf(stderr, "glfs_h_creat: dir create, tmp = (%p), errno = %s\n",
+ leaf, strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ if (tmp != NULL) {
+ glfs_h_close(tmp);
+ tmp = NULL;
+ }
+ }
+
+ /* TODO: Other combinations and -ve cases as applicable */
+ printf("glfs_h_creat tests: PASSED\n");
+
+ /* extract handle and create from handle test */
+ printf(
+ "glfs_h_extract_handle and glfs_h_create_from_handle tests: In "
+ "Progress\n");
+ /* TODO: Change the lookup to create below for a GIFD recovery failure,
+ * that needs to be fixed */
+ leaf = glfs_h_lookupat(fs, parent, leaf_name1, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ leaf_name1, parent, strerror(errno));
+ printf("glfs_h_extract_handle tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ ret = glfs_h_extract_handle(leaf, leaf_handle, GFAPI_HANDLE_LENGTH);
+ if (ret < 0) {
+ fprintf(stderr,
+ "glfs_h_extract_handle: error extracting handle of %s: %s\n",
+ full_leaf_name, strerror(errno));
+ printf("glfs_h_extract_handle tests: FAILED\n");
+ goto out;
+ }
+ peek_handle(leaf_handle);
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_create_from_handle(fs, leaf_handle, GFAPI_HANDLE_LENGTH, &sb);
+ if (leaf == NULL) {
+ fprintf(
+ stderr,
+ "glfs_h_create_from_handle: error on create of %s: from (%p),%s\n",
+ leaf_name1, leaf_handle, strerror(errno));
+ printf("glfs_h_create_from_handle tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ fd = glfs_h_open(fs, leaf, O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_h_open: error on open of %s: %s\n",
+ full_leaf_name, strerror(errno));
+ printf("glfs_h_create_from_handle tests: FAILED\n");
+ goto out;
+ }
+
+ /* test read/write based on fd */
+ memcpy(writebuf, "abcdefghijklmnopqrstuvwxyz012345", 32);
+ ret = glfs_write(fd, writebuf, 32, 0);
+
+ glfs_lseek(fd, 0, SEEK_SET);
+
+ ret = glfs_read(fd, readbuf, 32, 0);
+ if (memcmp(readbuf, writebuf, 32)) {
+ printf("Failed to read what I wrote: %s %s\n", writebuf, writebuf);
+ printf("glfs_h_create_from_handle tests: FAILED\n");
+ glfs_close(fd);
+ goto out;
+ }
+
+ glfs_close(fd);
+ glfs_h_close(leaf);
+ leaf = NULL;
+ glfs_h_close(parent);
+ parent = NULL;
+
+ printf(
+ "glfs_h_extract_handle and glfs_h_create_from_handle tests: PASSED\n");
+
+ /* Mkdir tests */
+ printf("glfs_h_mkdir tests: In Progress\n");
+
+ ret = glfs_rmdir(fs, full_newparent_name);
+ if (ret && errno != ENOENT) {
+ fprintf(stderr, "glfs_rmdir: Failed for %s: %s\n", full_newparent_name,
+ strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ goto out;
+ }
+
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_mkdir(fs, parent, newparent_name, 0755, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error on mkdir of %s: from (%p),%s\n",
+ newparent_name, parent, strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_mkdir(fs, parent, newparent_name, 0755, &sb);
+ if (leaf != NULL || errno != EEXIST) {
+ fprintf(stderr,
+ "glfs_h_mkdir: existing directory, leaf = (%p), errno = %s\n",
+ leaf, strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ if (leaf != NULL) {
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+ }
+
+ glfs_h_close(parent);
+ parent = NULL;
+
+ printf("glfs_h_mkdir tests: PASSED\n");
+
+ /* Mknod tests */
+ printf("glfs_h_mknod tests: In Progress\n");
+ ret = glfs_unlink(fs, full_newnod_name);
+ if (ret && errno != ENOENT) {
+ fprintf(stderr, "glfs_unlink: Failed for %s: %s\n", full_newnod_name,
+ strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ goto out;
+ }
+
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_mknod(fs, parent, newnod_name, S_IFIFO, 0, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error on mkdir of %s: from (%p),%s\n",
+ newnod_name, parent, strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* TODO: create op on a FIFO node hangs, need to check and fix
+ tmp = glfs_h_creat (fs, parent, newnod_name, O_CREAT, 0644, &sb);
+ if (tmp != NULL || errno != EINVAL) {
+ fprintf (stderr, "glfs_h_creat: node create, tmp = (%p), errno =
+ %s\n", tmp, strerror (errno)); printf ("glfs_h_creat/mknod tests:
+ FAILED\n"); if (tmp != NULL) { glfs_h_close(tmp); tmp = NULL;
+ }
+ } */
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_mknod(fs, parent, newnod_name, 0644, 0, &sb);
+ if (leaf != NULL || errno != EEXIST) {
+ fprintf(stderr,
+ "glfs_h_mknod: existing node, leaf = (%p), errno = %s\n", leaf,
+ strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ if (leaf != NULL) {
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+ }
+
+ glfs_h_close(parent);
+ parent = NULL;
+
+ printf("glfs_h_mknod tests: PASSED\n");
+
+ /* unlink tests */
+ test_h_unlink();
+
+ /* TODO: opendir tests */
+
+ /* getattr tests */
+ test_h_getsetattrs();
+
+ /* TODO: setattr tests */
+
+ /* truncate tests */
+ test_h_truncate();
+
+ /* link tests */
+ test_h_links();
+
+ /* rename tests */
+ test_h_rename();
+
+ /* performance tests */
+ test_h_performance();
+
+ /* END: New APIs test area */
+
+out:
+ /* Cleanup glfs handles */
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+
+ return ret;
+}
+
+int
+test_write_apis(glfs_t *fs)
+{
+ /* Add more content here */
+ /* Some apis we can get are */
+ /*
+ 0. glfs_set_xlator_option()
+
+ Read/Write combinations:
+ . glfs_{p,}readv/{p,}writev
+ . glfs_pread/pwrite
+
+ tests/basic/gfapi/gfapi-async-calls-test.c
+ . glfs_read_async/write_async
+ . glfs_pread_async/pwrite_async
+ . glfs_readv_async/writev_async
+ . glfs_preadv_async/pwritev_async
+
+ . ftruncate/ftruncate_async
+ . fsync/fsync_async
+ . fdatasync/fdatasync_async
+
+ */
+
+ glfs_fd_t *fd = NULL;
+ char *filename = "/filename2";
+ int flags = O_RDWR;
+ char *buf = "some bytes!";
+ char writestr[TEST_STR_LEN];
+ struct iovec iov = {&writestr, TEST_STR_LEN};
+ int ret, i;
+
+ for (i = 0; i < TEST_STR_LEN; i++)
+ writestr[i] = 0x11;
+
+ fd = glfs_open(fs, filename, flags);
+ if (!fd)
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ ret = glfs_writev(fd, &iov, 1, flags);
+ if (ret < 0) {
+ fprintf(stderr, "writev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ ret = glfs_pwrite(fd, buf, 10, 4, flags, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "pwrite(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ ret = glfs_pwritev(fd, &iov, 1, 4, flags);
+ if (ret < 0) {
+ fprintf(stderr, "pwritev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ ret = glfs_fsync(fd, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "fsync(%s): %d (%s)\n", filename, ret, strerror(errno));
+ }
+
+ glfs_close(fd);
+
+ return 0;
+}
+
+int
+test_metadata_ops(glfs_t *fs, glfs_t *fs2)
+{
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+ struct glfs_stat gsb = {
+ 0,
+ };
+ struct statvfs sfs;
+ char readbuf[32];
+ char writebuf[11] = "helloworld";
+
+ char *filename = "/filename2";
+ int ret;
+
+ ret = glfs_lstat(fs, filename, &sb);
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ fd = glfs_creat(fs, filename, O_RDWR, 0644);
+ if (!fd)
+ fprintf(stderr, "creat(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ fd2 = glfs_open(fs2, filename, O_RDWR);
+ if (!fd2)
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_write(fd, writebuf, 11, 0);
+ if (ret < 0) {
+ fprintf(stderr, "writev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ glfs_fsync(fd, NULL, NULL);
+
+ glfs_lseek(fd2, 5, SEEK_SET);
+
+ ret = glfs_read(fd2, readbuf, 32, 0);
+
+ printf("read %d, %s", ret, readbuf);
+
+ /* get stat */
+ ret = glfs_fstat(fd2, &sb);
+ if (ret)
+ fprintf(stderr, "fstat(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_access(fs, filename, R_OK);
+ if (ret)
+ fprintf(stderr, "access(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_fallocate(fd2, 1024, 1024, 1024);
+ if (ret)
+ fprintf(stderr, "fallocate(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_discard(fd2, 1024, 512);
+ if (ret)
+ fprintf(stderr, "discard(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_zerofill(fd2, 2048, 1024);
+ if (ret)
+ fprintf(stderr, "zerofill(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ /* set stat */
+ /* TODO: got some errors, need to fix */
+ ret = glfs_fsetattr(fd2, &gsb);
+
+ glfs_close(fd);
+ glfs_close(fd2);
+
+ filename = "/filename3";
+ ret = glfs_mknod(fs, filename, S_IFIFO, 0);
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_rename(fs, filename, "/filename4");
+ if (ret)
+ fprintf(stderr, "rename(%s): (%d) %s\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_unlink(fs, "/filename4");
+ if (ret)
+ fprintf(stderr, "unlink(%s): (%d) %s\n", "/filename4", ret,
+ strerror(errno));
+
+ filename = "/dirname2";
+ ret = glfs_mkdir(fs, filename, 0);
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_rmdir(fs, filename);
+ if (ret)
+ fprintf(stderr, "rmdir(%s): (%d) %s\n", filename, ret, strerror(errno));
+}
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs2 = NULL;
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+ struct glfs_stat gsb = {
+ 0,
+ };
+ struct statvfs sfs;
+ char readbuf[32];
+ char writebuf[32];
+ char volumeid[64];
+
+ char *filename = "/filename2";
+
+ if ((argc < 2) || (argc > 3)) {
+ printf("Usage:\n\t%s <volname> <hostname>\n\t%s <volfile-path>",
+ argv[0], argv[0]);
+ return -1;
+ }
+
+ if (argc == 2) {
+ /* Generally glfs_new() requires volume name as an argument */
+ fs = glfs_new("test-only");
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile(fs, argv[1]);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile failed\n");
+ } else {
+ fs = glfs_new(argv[1]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+ // ret = glfs_set_volfile_server (fs, "unix", "/tmp/gluster.sock", 0);
+ ret = glfs_set_volfile_server(fs, "tcp", argv[2], 24007);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile_server failed\n");
+ }
+
+ /* Change this to relevant file when running locally */
+ ret = glfs_set_logging(fs, "/dev/stderr", 5);
+ if (ret)
+ fprintf(stderr, "glfs_set_logging failed\n");
+
+ ret = glfs_init(fs);
+ if (ret)
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+
+ if (ret)
+ goto out;
+
+ /* no major use for getting the volume id in this test, done for coverage */
+ ret = glfs_get_volumeid(fs, volumeid, 64);
+ if (ret) {
+ fprintf(stderr, "glfs_get_volumeid: returned %d\n", ret);
+ }
+
+ sleep(2);
+
+ if (argc == 2) {
+ /* Generally glfs_new() requires volume name as an argument */
+ fs2 = glfs_new("test_only_volume");
+ if (!fs2) {
+ fprintf(stderr, "glfs_new(fs2): returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile(fs2, argv[1]);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile failed(fs2)\n");
+ } else {
+ fs2 = glfs_new(argv[1]);
+ if (!fs2) {
+ fprintf(stderr, "glfs_new(fs2): returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile_server(fs2, "tcp", argv[2], 24007);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile_server failed(fs2)\n");
+ }
+
+ ret = glfs_set_statedump_path(fs2, "/tmp");
+ if (ret) {
+ fprintf(stderr, "glfs_set_statedump_path: %s\n", strerror(errno));
+ }
+
+ ret = glfs_init(fs2);
+ if (ret)
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+
+ test_metadata_ops(fs, fs2);
+
+ test_dirops(fs);
+
+ test_xattr(fs);
+
+ test_chdir(fs);
+
+ test_handleops(argc, argv);
+ // done
+
+ /* Test some extra apis */
+ test_write_apis(fs);
+
+ glfs_statvfs(fs, "/", &sfs);
+
+ glfs_unset_volfile_server(fs, "tcp", argv[2], 24007);
+
+ glfs_fini(fs);
+ glfs_fini(fs2);
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/tests/basic/gfapi/glfsxmp.t b/tests/basic/gfapi/glfsxmp.t
new file mode 100644
index 00000000000..b3e6645c0f5
--- /dev/null
+++ b/tests/basic/gfapi/glfsxmp.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+$CLI system getspec $V0 > fubar.vol
+
+TEST cp $(dirname $0)/glfsxmp-coverage.c ./glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+
+TEST ./glfsxmp fubar.vol
+
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/basic/gfapi/mandatory-lock-optimal.c b/tests/basic/gfapi/mandatory-lock-optimal.c
index ceffe523f45..34fef8d0b80 100644
--- a/tests/basic/gfapi/mandatory-lock-optimal.c
+++ b/tests/basic/gfapi/mandatory-lock-optimal.c
@@ -388,7 +388,7 @@ run_test_7(int i)
if (!fd2)
LOG_ERR("glfs_open", errno);
- ret = glfs_ftruncate(fd2, 4);
+ ret = glfs_ftruncate(fd2, 4, NULL, NULL);
if (ret == 0 || errno != EAGAIN)
LOG_ERR("glfs_ftruncate", errno);
diff --git a/tests/basic/gfapi/protocol-client-ssl.vol.in b/tests/basic/gfapi/protocol-client-ssl.vol.in
new file mode 100644
index 00000000000..cdc0c9d0671
--- /dev/null
+++ b/tests/basic/gfapi/protocol-client-ssl.vol.in
@@ -0,0 +1,15 @@
+#
+# This .vol file expects that there is
+#
+# 1. GlusterD listening on @@HOSTNAME@@
+# 2. a volume that provides a brick on @@BRICKPATH@@
+# 3. the volume with the brick has been started
+#
+volume test
+ type protocol/client
+ option remote-host @@HOSTNAME@@
+ option remote-subvolume @@BRICKPATH@@
+ option transport-type socket
+ option transport.socket.ssl-enabled @@SSL@@
+end-volume
+
diff --git a/tests/basic/gfapi/upcall-cache-invalidate.c b/tests/basic/gfapi/upcall-cache-invalidate.c
index ed871a0b421..078286a8956 100644
--- a/tests/basic/gfapi/upcall-cache-invalidate.c
+++ b/tests/basic/gfapi/upcall-cache-invalidate.c
@@ -136,7 +136,7 @@ main(int argc, char *argv[])
LOG_ERR("glfs_lseek", ret);
memset(readbuf, 0, sizeof(readbuf));
- ret = glfs_pread(fd_tmp2, readbuf, 4, 0, 0);
+ ret = glfs_pread(fd_tmp2, readbuf, 4, 0, 0, NULL);
if (ret <= 0) {
ret = -1;
diff --git a/tests/basic/gfapi/upcall-register-api.c b/tests/basic/gfapi/upcall-register-api.c
index 68591fdc517..53ce0ecdb68 100644
--- a/tests/basic/gfapi/upcall-register-api.c
+++ b/tests/basic/gfapi/upcall-register-api.c
@@ -120,7 +120,7 @@ perform_io(glfs_t *fs, glfs_t *fs2, int cnt)
LOG_ERR("glfs_lseek", ret);
memset(readbuf, 0, sizeof(readbuf));
- ret = glfs_pread(fd_tmp2, readbuf, 4, 0, 0);
+ ret = glfs_pread(fd_tmp2, readbuf, 4, 0, 0, NULL);
if (ret <= 0) {
ret = -1;
diff --git a/tests/basic/global-threading.t b/tests/basic/global-threading.t
new file mode 100644
index 00000000000..f7d34044b09
--- /dev/null
+++ b/tests/basic/global-threading.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# Test if the given process has a number of threads of a given type between
+# min and max.
+function check_threads() {
+ local pid="${1}"
+ local pattern="${2}"
+ local min="${3}"
+ local max="${4-}"
+ local count
+
+ count="$(ps hH -o comm ${pid} | grep "${pattern}" | wc -l)"
+ if [[ ${min} -gt ${count} ]]; then
+ return 1
+ fi
+ if [[ ! -z "${max}" && ${max} -lt ${count} ]]; then
+ return 1
+ fi
+
+ return 0
+}
+
+cleanup
+
+TEST glusterd
+
+# Glusterd shouldn't use any thread
+TEST check_threads $(get_glusterd_pid) glfs_tpw 0 0
+TEST check_threads $(get_glusterd_pid) glfs_iotwr 0 0
+
+TEST pkill -9 glusterd
+
+TEST glusterd --global-threading
+
+# Glusterd shouldn't use global threads, even if enabled
+TEST check_threads $(get_glusterd_pid) glfs_tpw 0 0
+TEST check_threads $(get_glusterd_pid) glfs_iotwr 0 0
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/b{0,1}
+
+# Normal configuration using io-threads on bricks
+TEST $CLI volume set $V0 config.global-threading off
+TEST $CLI volume set $V0 performance.iot-pass-through off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume start $V0
+
+# There shouldn't be global threads
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_tpw 0 0
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_tpw 0 0
+
+# There should be at least 1 io-thread
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_iotwr 1
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_iotwr 1
+
+# Self-heal should be using global threads
+TEST check_threads $(get_shd_process_pid) glfs_tpw 1
+TEST check_threads $(get_shd_process_pid) glfs_iotwr 0 0
+
+TEST $CLI volume stop $V0
+
+# Configuration with global threads on bricks
+TEST $CLI volume set $V0 config.global-threading on
+TEST $CLI volume set $V0 performance.iot-pass-through on
+TEST $CLI volume start $V0
+
+# There should be at least 1 global thread
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_tpw 1
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_tpw 1
+
+# There shouldn't be any io-thread worker threads
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_iotwr 0 0
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_iotwr 0 0
+
+# Normal configuration using io-threads on clients
+TEST $CLI volume set $V0 performance.iot-pass-through off
+TEST $CLI volume set $V0 performance.client-io-threads on
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+# There shouldn't be global threads
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_tpw 0 0
+
+# There should be at least 1 io-thread
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_iotwr 1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Configuration with global threads on clients
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --global-threading $M0
+
+# There should be at least 1 global thread
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_tpw 1
+
+# There shouldn't be io-threads
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_iotwr 0 0
+
+# Some basic volume access checks with global-threading enabled everywhere
+TEST mkdir ${M0}/dir
+TEST dd if=/dev/zero of=${M0}/dir/file bs=128k count=8
+
+cleanup
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
new file mode 100644
index 00000000000..46d0dac2fce
--- /dev/null
+++ b/tests/basic/glusterd-restart-shd-mux.t
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=20
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 3); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+#Stop the glusterd
+TEST pkill glusterd
+#Only stopping glusterd, so there will be one shd
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
+TEST glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+shd_pid=$(get_shd_mux_pid $V0)
+for i in $(seq 1 3); do
+ afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
+ ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
+done
+
+#Reboot a node scenario
+TEST pkill gluster
+#Only stopped glusterd, so there will be one shd
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+TEST glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+shd_pid=$(get_shd_mux_pid $V0)
+for i in $(seq 1 3); do
+ afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
+ ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
+done
+
+for i in $(seq 1 3); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}3
+
+TEST touch $M0/foo{1..100}
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
+
+TEST $CLI volume start ${V0} force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST rm -rf $M0/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/glusterd/arbiter-volume.t b/tests/basic/glusterd/arbiter-volume.t
deleted file mode 100644
index 03f9aca2daf..00000000000
--- a/tests/basic/glusterd/arbiter-volume.t
+++ /dev/null
@@ -1,32 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-# This command tests the volume create command validation for arbiter volumes.
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b{4..9}
-EXPECT "2 x \(2 \+ 1\) = 6" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-TEST $CLI volume create $V0 stripe 2 replica 3 arbiter 1 $H0:$B0/b{10..15}
-EXPECT "1 x 2 x \(2 \+ 1\) = 6" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-TEST rm -rf $B0/b{1..3}
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-TEST killall -15 glusterd
-TEST glusterd
-TEST pidof glusterd
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-
-#cleanup
diff --git a/tests/basic/glusterd/check-cloudsync-ancestry.t b/tests/basic/glusterd/check-cloudsync-ancestry.t
new file mode 100644
index 00000000000..ff6ffee8db7
--- /dev/null
+++ b/tests/basic/glusterd/check-cloudsync-ancestry.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# When shard and cloudsync xlators enabled on a volume, shard xlator
+# should be an ancestor of cloudsync. This testcase is to check this condition.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+
+volfile=$(gluster system:: getwd)"/vols/$V0/trusted-$V0.tcp-fuse.vol"
+
+#Test that both shard and cloudsync are not loaded
+EXPECT "N" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "N" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+#Enable shard and cloudsync in that order and check if volfile is correct
+TEST $CLI volume set $V0 shard on
+TEST $CLI volume set $V0 cloudsync on
+
+#Test that both shard and cloudsync are loaded
+EXPECT "Y" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "Y" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+EXPECT "Y" volgen_check_ancestry $volfile features shard features cloudsync
+
+#Disable shard and cloudsync
+TEST $CLI volume set $V0 shard off
+TEST $CLI volume set $V0 cloudsync off
+
+#Test that both shard and cloudsync are not loaded
+EXPECT "N" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "N" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+#Enable cloudsync and shard in that order and check if volfile is correct
+TEST $CLI volume set $V0 cloudsync on
+TEST $CLI volume set $V0 shard on
+
+#Test that both shard and cloudsync are loaded
+EXPECT "Y" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "Y" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+EXPECT "Y" volgen_check_ancestry $volfile features shard features cloudsync
+
+cleanup;
diff --git a/tests/basic/glusterd/disperse-create.t b/tests/basic/glusterd/disperse-create.t
index e5ce74c12b2..db8a621d48e 100644
--- a/tests/basic/glusterd/disperse-create.t
+++ b/tests/basic/glusterd/disperse-create.t
@@ -20,6 +20,10 @@ TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/b7 $H0:$B0/b8 $H0:$B
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
TEST $CLI volume delete $V0
+TEST $CLI volume create $V0 disperse-data 2 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
+EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
+
+TEST $CLI volume delete $V0
TEST $CLI volume create $V0 redundancy 1 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
@@ -48,6 +52,7 @@ TEST ! $CLI volume create $V0 redundancy 1 redundancy 1 $H0:$B0/b20 $H0:$B0/b21
#Minimum counts test
TEST ! $CLI volume create $V0 disperse 2 $H0:$B0/b20 $H0:$B0/b22
TEST ! $CLI volume create $V0 disperse-data 1 redundancy 0 $H0:$B0/b20 $H0:$B0/b22
+TEST ! $CLI volume create $V0 disperse 4 disperse-data 4 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b23 $H0:$B0/b24
TEST ! $CLI volume create $V0 redundancy 0 $H0:$B0/b20 $H0:$B0/b22
#Wrong count n != k+m
@@ -64,18 +69,5 @@ TEST ! $CLI volume create $V0 redundancy 2 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0
TEST ! $CLI volume create $V0 replica 2 disperse 4 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
TEST ! $CLI volume create $V0 replica 2 disperse-data 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22 $H0:$B0/b23
TEST ! $CLI volume create $V0 replica 2 redundancy 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-#Stripe + Disperse
-TEST ! $CLI volume create $V0 disperse 4 stripe 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-TEST ! $CLI volume create $V0 disperse-data 2 stripe 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22 $H0:$B0/b23
-TEST ! $CLI volume create $V0 redundancy 2 stripe 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-TEST ! $CLI volume create $V0 stripe 2 disperse 4 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-TEST ! $CLI volume create $V0 stripe 2 disperse-data 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22 $H0:$B0/b23
-TEST ! $CLI volume create $V0 stripe 2 redundancy 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-#Stripe + Replicate + Disperse, It is failing with striped-dispersed volume.
-TEST ! $CLI volume create $V0 disperse 4 stripe 2 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-TEST ! $CLI volume create $V0 disperse-data 2 stripe 2 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22 $H0:$B0/b23
-TEST ! $CLI volume create $V0 redundancy 2 stripe 2 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-TEST ! $CLI volume create $V0 stripe 2 disperse 4 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
-TEST ! $CLI volume create $V0 stripe 2 disperse-data 2 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22 $H0:$B0/b23
-TEST ! $CLI volume create $V0 stripe 2 redundancy 2 replica 2 $H0:$B0/b20 $H0:$B0/b21 $H0:$B0/b22
+
cleanup
diff --git a/tests/basic/glusterd/heald.t b/tests/basic/glusterd/heald.t
index bdfda8ff0d6..7dae3c3f0fb 100644
--- a/tests/basic/glusterd/heald.t
+++ b/tests/basic/glusterd/heald.t
@@ -7,70 +7,73 @@
# Covers enable/disable at the moment. Will be enhanced later to include
# the other commands as well.
+function is_pid_running {
+ local pid=$1
+ num=`ps auxww | grep glustershd | grep $pid | grep -v grep | wc -l`
+ echo $num
+}
+
cleanup;
TEST glusterd
TEST pidof glusterd
-volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
#Commands should fail when volume doesn't exist
TEST ! $CLI volume heal non-existent-volume enable
TEST ! $CLI volume heal non-existent-volume disable
-# Commands should fail when volume is of distribute/stripe type.
# Glustershd shouldn't be running as long as there are no replicate/disperse
# volumes
TEST $CLI volume create dist $H0:$B0/dist
TEST $CLI volume start dist
-TEST "[ -z $(get_shd_process_pid)]"
+TEST "[ -z $(get_shd_process_pid dist)]"
TEST ! $CLI volume heal dist enable
TEST ! $CLI volume heal dist disable
-TEST $CLI volume create st stripe 3 $H0:$B0/st1 $H0:$B0/st2 $H0:$B0/st3
-TEST $CLI volume start st
-TEST "[ -z $(get_shd_process_pid)]"
-TEST ! $CLI volume heal st
-TEST ! $CLI volume heal st disable
# Commands should work on replicate/disperse volume.
TEST $CLI volume create r2 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
-TEST "[ -z $(get_shd_process_pid)]"
+TEST "[ -z $(get_shd_process_pid r2)]"
TEST $CLI volume start r2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2
TEST $CLI volume heal r2 enable
EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
-EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+volfiler2=$(gluster system:: getwd)"/vols/r2/r2-shd.vol"
+EXPECT "enable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2
+pid=$( get_shd_process_pid r2 )
TEST $CLI volume heal r2 disable
EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
-EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT "disable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon
+EXPECT "1" is_pid_running $pid
# Commands should work on disperse volume.
TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
TEST $CLI volume start ec2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2
TEST $CLI volume heal ec2 enable
EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
-EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+volfileec2=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol"
+EXPECT "enable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2
+pid=$(get_shd_process_pid ec2)
TEST $CLI volume heal ec2 disable
EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
-EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT "disable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon
+EXPECT "1" is_pid_running $pid
#Check that shd graph is rewritten correctly on volume stop/start
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
+EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse
+
+EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate
TEST $CLI volume stop r2
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
+EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse
TEST $CLI volume stop ec2
# When both the volumes are stopped glustershd volfile is not modified just the
# process is stopped
-TEST "[ -z $(get_shd_process_pid) ]"
+TEST "[ -z $(get_shd_process_pid dist) ]"
+TEST "[ -z $(get_shd_process_pid ec2) ]"
TEST $CLI volume start r2
-EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
+EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate
TEST $CLI volume set r2 self-heal-daemon on
TEST $CLI volume set r2 cluster.self-heal-daemon off
diff --git a/tests/basic/glusterd/thin-arbiter-volume-probe.t b/tests/basic/glusterd/thin-arbiter-volume-probe.t
new file mode 100644
index 00000000000..acc6943806d
--- /dev/null
+++ b/tests/basic/glusterd/thin-arbiter-volume-probe.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+#This tests if the thin-arbiter-count is transferred to the other peer.
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+kill_glusterd 2
+$CLI_1 volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b{1..3}
+TEST $glusterd_2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+EXPECT "1 x 2 = 2" volinfo_field_1 $V0 "Number of Bricks"
+EXPECT "1 x 2 = 2" volinfo_field_2 $V0 "Number of Bricks"
+
+cleanup;
diff --git a/tests/basic/glusterd/thin-arbiter-volume.t b/tests/basic/glusterd/thin-arbiter-volume.t
new file mode 100644
index 00000000000..4e813890a45
--- /dev/null
+++ b/tests/basic/glusterd/thin-arbiter-volume.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../ volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+
+#This command tests the volume create command validation for thin-arbiter volumes.
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks"
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST touch $M0/a.txt
+TEST ls $B0/b1/a.txt
+TEST ls $B0/b2/a.txt
+TEST ! ls $B0/b3/a.txt
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+TEST $CLI volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b{4..8}
+EXPECT "2 x 2 = 4" volinfo_field $V0 "Number of Bricks"
+
+TEST $CLI volume delete $V0
+
+TEST rm -rf $B0/b{1..3}
+
+TEST $CLI volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks"
+
+TEST killall -15 glusterd
+TEST glusterd
+TEST pidof glusterd
+EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks"
+
+cleanup
+
diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t
index 309060919b7..e11cfed509a 100644
--- a/tests/basic/glusterd/volfile_server_switch.t
+++ b/tests/basic/glusterd/volfile_server_switch.t
@@ -34,7 +34,7 @@ TEST glusterfs --volfile-id=/$V0 --volfile-server=$H1 --volfile-server=$H2 --vol
TEST kill_glusterd 1
-TEST $CLI_2 volume set $V0 performance.io-cache off
+TEST $CLI_2 volume set $V0 performance.write-behind off
# make sure by this time directory will be created
# TODO: suggest ideal time to wait
diff --git a/tests/basic/glusterd/volume-brick-count.t b/tests/basic/glusterd/volume-brick-count.t
new file mode 100644
index 00000000000..dc1a5278f4f
--- /dev/null
+++ b/tests/basic/glusterd/volume-brick-count.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function test_volume_config()
+{
+ volname=$1
+ type_string=$2
+ brickCount=$3
+ distCount=$4
+ replicaCount=$5
+ arbiterCount=$6
+ disperseCount=$7
+ redundancyCount=$8
+
+ EXPECT "$type_string" volinfo_field $volname "Number of Bricks"
+ EXPECT "$brickCount" get-xml "volume info $volname" "brickCount"
+ EXPECT "$distCount" get-xml "volume info $volname" "distCount"
+ EXPECT "$replicaCount" get-xml "volume info $volname" "replicaCount"
+ EXPECT "$arbiterCount" get-xml "volume info $volname" "arbiterCount"
+ EXPECT "$disperseCount" get-xml "volume info $volname" "disperseCount"
+ EXPECT "$redundancyCount" get-xml "volume info $volname" "redundancyCount"
+}
+
+# This command tests the volume create command and number of bricks for different volume types.
+cleanup;
+TESTS_EXPECTED_IN_LOOP=56
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create ${V0}_1 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+test_volume_config "${V0}_1" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+
+TEST $CLI volume create ${V0}_2 replica 3 arbiter 1 $H0:$B0/b{4..9}
+test_volume_config "${V0}_2" "2 x \(2 \+ 1\) = 6" "6" "2" "3" "1" "0" "0"
+
+
+TEST $CLI volume create ${V0}_3 replica 3 arbiter 1 $H0:$B0/b{10..12}
+test_volume_config "${V0}_3" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+TEST killall -15 glusterd
+TEST glusterd
+TEST pidof glusterd
+test_volume_config "${V0}_3" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+
+TEST $CLI volume create ${V0}_4 replica 3 $H0:$B0/b{13..15}
+test_volume_config "${V0}_4" "1 x 3 = 3" "3" "1" "3" "0" "0" "0"
+
+TEST $CLI volume create ${V0}_5 replica 3 $H0:$B0/b{16..21}
+test_volume_config "${V0}_5" "2 x 3 = 6" "6" "2" "3" "0" "0" "0"
+
+TEST $CLI volume create ${V0}_6 disperse 3 redundancy 1 $H0:$B0/b{22..24}
+test_volume_config "${V0}_6" "1 x \(2 \+ 1\) = 3" "3" "1" "1" "0" "3" "1"
+
+TEST $CLI volume create ${V0}_7 disperse 3 redundancy 1 $H0:$B0/b{25..30}
+test_volume_config "${V0}_7" "2 x \(2 \+ 1\) = 6" "6" "2" "1" "0" "3" "1"
+
+TEST $CLI volume create ${V0}_8 $H0:$B0/b{31..33}
+test_volume_config "${V0}_8" "3" "3" "3" "1" "0" "0" "0"
+
+cleanup
diff --git a/tests/basic/graph-cleanup-brick-down-shd-mux.t b/tests/basic/graph-cleanup-brick-down-shd-mux.t
new file mode 100644
index 00000000000..3c621cdcc26
--- /dev/null
+++ b/tests/basic/graph-cleanup-brick-down-shd-mux.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 2); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#kill one brick and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+#kill an entire subvol and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+#wait for some time to create a race sceanrio
+sleep 1
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+#kill all bricks and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick $V0 $H0 $B0/${V0}5
+#wait for some time to create a race sceanrio
+sleep 2
+
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+cleanup
diff --git a/tests/basic/jbr/jbr-volgen.t b/tests/basic/jbr/jbr-volgen.t
new file mode 100644
index 00000000000..f368710c158
--- /dev/null
+++ b/tests/basic/jbr/jbr-volgen.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+volfiles=${GLUSTERD_WORKDIR}/vols/${V0}/
+check_brick_volfiles () {
+ for vf in ${volfiles}${V0}.$(hostname).*.vol; do
+ grep -qs experimental/jbr $vf || return
+ # At least for now, nothing else would put a client translator
+ # in a brick volfile.
+ grep -qs protocol/client $vf || return
+ done
+ echo "OK"
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume set $V0 cluster.jbr on
+
+# Check that the client volfile got modified properly.
+TEST grep -qs experimental/jbrc ${volfiles}${V0}.tcp-fuse.vol
+
+# Check that the brick volfiles got modified as well.
+EXPECT "OK" check_brick_volfiles
+
+# Put things back and make sure the "undo" worked.
+TEST $CLI volume set $V0 cluster.jbr off
+TEST $CLI volume start $V0
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+echo hello > $M0/probe
+EXPECT hello cat ${B0}/${V0}1/probe
+EXPECT hello cat ${B0}/${V0}2/probe
+
+cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=1385758
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=1385758
diff --git a/tests/basic/jbr/jbr.t b/tests/basic/jbr/jbr.t
new file mode 100755
index 00000000000..605344b5a7e
--- /dev/null
+++ b/tests/basic/jbr/jbr.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../fdl.rc
+
+cleanup;
+
+TEST verify_lvm_version;
+#Create cluster with 3 nodes
+TEST launch_cluster 3;
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+
+TEST $CLI_1 volume create $V0 replica 3 $H1:$L1 $H2:$L2 $H3:$L3
+TEST $CLI_1 volume set $V0 cluster.jbr on
+TEST $CLI_1 volume set $V0 cluster.jbr.quorum-percent 100
+TEST $CLI_1 volume set $V0 features.fdl on
+#TEST $CLI_1 volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI_1 volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H1 --entry-timeout=0 $M0;
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" jbrc_child_up_status $V0 0
+
+echo "file" > $M0/file1
+TEST stat $L1/file1
+TEST stat $L2/file1
+TEST stat $L3/file1
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=1385758
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=1385758
diff --git a/tests/basic/logchecks-messages.h b/tests/basic/logchecks-messages.h
index 1ae68646445..bf364848ec7 100644
--- a/tests/basic/logchecks-messages.h
+++ b/tests/basic/logchecks-messages.h
@@ -11,7 +11,7 @@
#ifndef _LOGCHECKS_MESSAGES_H_
#define _LOGCHECKS_MESSAGES_H_
-#include "glfs-message-id.h"
+#include <glusterfs/glfs-message-id.h>
/* NOTE: Rules for message additions
* 1) Each instance of a message is _better_ left with a unique message ID, even
diff --git a/tests/basic/logchecks.c b/tests/basic/logchecks.c
index 86891057230..df0be28ace0 100644
--- a/tests/basic/logchecks.c
+++ b/tests/basic/logchecks.c
@@ -11,9 +11,9 @@
#include <stdio.h>
#include <unistd.h>
-#include "glusterfs.h"
-#include "globals.h"
-#include "logging.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/globals.h>
+#include <glusterfs/logging.h>
#include "logchecks-messages.h"
#include "../../libglusterfs/src/logging.h"
diff --git a/tests/basic/meta.t b/tests/basic/meta.t
index 55ca005824b..0bac3c6797d 100755
--- a/tests/basic/meta.t
+++ b/tests/basic/meta.t
@@ -9,7 +9,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 stripe 4 $H0:$B0/${V0}{1..16};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..9};
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
diff --git a/tests/basic/metadisp/fsyncdir.c b/tests/basic/metadisp/fsyncdir.c
new file mode 100644
index 00000000000..62b532b9ce4
--- /dev/null
+++ b/tests/basic/metadisp/fsyncdir.c
@@ -0,0 +1,29 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int pfd;
+
+ pfd = open(argv[1], O_RDONLY | O_DIRECTORY);
+ if (pfd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (rename(argv[2], argv[3]) == (-1)) {
+ perror("rename");
+ return EXIT_FAILURE;
+ }
+
+ if (fsync(pfd) == (-1)) {
+ perror("fsync");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/ftruncate.c b/tests/basic/metadisp/ftruncate.c
new file mode 100644
index 00000000000..c9185212c31
--- /dev/null
+++ b/tests/basic/metadisp/ftruncate.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int pfd;
+
+ pfd = open(argv[1], O_RDWR);
+ if (pfd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (ftruncate(pfd, 0) == (-1)) {
+ perror("ftruncate");
+ return EXIT_FAILURE;
+ }
+
+ if (write(pfd, "hello", 5) == (-1)) {
+ perror("write");
+ return EXIT_FAILURE;
+ }
+
+ if (fsync(pfd) == (-1)) {
+ perror("fsync");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/fxattr.c b/tests/basic/metadisp/fxattr.c
new file mode 100644
index 00000000000..e552057778a
--- /dev/null
+++ b/tests/basic/metadisp/fxattr.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+
+static char MY_XATTR[] = "user.fxtest";
+static char *PROGRAM;
+#define CONSUME(v) \
+ do { \
+ if (!argc) { \
+ fprintf(stderr, "missing argument\n"); \
+ return EXIT_FAILURE; \
+ } \
+ v = argv[0]; \
+ ++argv; \
+ --argc; \
+ } while (0)
+
+static int
+do_get(int argc, char **argv, int fd)
+{
+ char *value;
+ int ret;
+ char buf[1024];
+
+ CONSUME(value);
+
+ ret = fgetxattr(fd, MY_XATTR, buf, sizeof(buf));
+ if (ret == (-1)) {
+ perror("fgetxattr");
+ return EXIT_FAILURE;
+ }
+
+ if (strncmp(buf, value, ret) != 0) {
+ fprintf(stderr, "data mismatch\n");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+static int
+do_set(int argc, char **argv, int fd)
+{
+ char *value;
+ int ret;
+
+ CONSUME(value);
+
+ ret = fsetxattr(fd, MY_XATTR, value, strlen(value), 0);
+ if (ret == (-1)) {
+ perror("fsetxattr");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+static int
+do_remove(int argc, char **argv, int fd)
+{
+ int ret;
+
+ ret = fremovexattr(fd, MY_XATTR);
+ if (ret == (-1)) {
+ perror("femovexattr");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+int
+main(int argc, char **argv)
+{
+ int fd;
+ char *path;
+ char *cmd;
+
+ CONSUME(PROGRAM);
+ CONSUME(path);
+ CONSUME(cmd);
+
+ fd = open(path, O_RDWR);
+ if (fd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (strcmp(cmd, "get") == 0) {
+ return do_get(argc, argv, fd);
+ }
+
+ if (strcmp(cmd, "set") == 0) {
+ return do_set(argc, argv, fd);
+ }
+
+ if (strcmp(cmd, "remove") == 0) {
+ return do_remove(argc, argv, fd);
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/gfs-fsetxattr.c b/tests/basic/metadisp/gfs-fsetxattr.c
new file mode 100644
index 00000000000..63578bc528f
--- /dev/null
+++ b/tests/basic/metadisp/gfs-fsetxattr.c
@@ -0,0 +1,141 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int gfapi = 1;
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ int i = 0;
+ glfs_fd_t *fd = NULL;
+ char *topdir = "topdir", *filename = "file1";
+ char *buf = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+ char *basename = NULL;
+ char *dir1 = NULL, *dir2 = NULL, *filename1 = NULL, *filename2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+
+ if (argc != 5) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file> <basename>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ logfile = argv[3];
+ basename = argv[4];
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL (%s)\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&dir1, "%s-dir", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = glfs_mkdir(fs, dir1, 0755);
+ if (ret < 0) {
+ fprintf(stderr, "mkdir(%s): %s\n", dir1, strerror(errno));
+ return -1;
+ }
+
+ fd = glfs_opendir(fs, dir1);
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.dirfattr", "fsetxattr", 9, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_closedir(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_closedir failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&filename1, "%s-file", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = asprintf(&filename2, "%s-file-renamed", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ fd = glfs_creat(fs, filename1, O_RDWR, 0644);
+ if (!fd) {
+ fprintf(stderr, "%s: (%p) %s\n", filename1, fd, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_rename(fs, filename1, filename2);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_rename failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_lstat(fs, filename2, &sb);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_lstat failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.filefattr", "fsetxattr", 9, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_close failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+}
diff --git a/tests/basic/metadisp/metadisp.t b/tests/basic/metadisp/metadisp.t
new file mode 100644
index 00000000000..894ffe07226
--- /dev/null
+++ b/tests/basic/metadisp/metadisp.t
@@ -0,0 +1,316 @@
+#!/usr/bin/env bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+
+# Considering `--enable-metadisp` is an option for `./configure`,
+# which is disabled by default, this test will never pass regression.
+# But to see the value of this test, run below after configuring
+# with above option :
+# `prove -vmfe '/bin/bash' tests/basic/metadisp/metadisp.t`
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST
+
+cleanup;
+
+TEST mkdir -p $B0/b0/{0,1}
+
+TEST setfattr -n trusted.glusterfs.volume-id -v 0xddab9eece7b64a95b07351a1f748f56f ${B0}/b0/0
+TEST setfattr -n trusted.glusterfs.volume-id -v 0xddab9eece7b64a95b07351a1f748f56f ${B0}/b0/1
+
+TEST $GFS --volfile=$(dirname $0)/metadisp.vol --volfile-id=$V0 $M0;
+
+NUM_FILES=40
+TEST touch $M0/{1..${NUM_FILES}}
+
+# each drive should get 40 files
+TEST [ $(dir -1 $B0/b0/0/ | wc -l) -eq $NUM_FILES ]
+TEST [ $(dir -1 $B0/b0/1/ | wc -l) -eq $NUM_FILES ]
+
+# now write some data to a file
+echo "hello" > $M0/3
+filename=$$
+echo "hello" > /tmp/metadisp-write-${filename}
+checksum=$(md5sum /tmp/metadisp-write-${filename} | awk '{print $1}')
+TEST [ "$(md5sum $M0/3 | awk '{print $1}')" == "$checksum" ]
+
+# check that the backend file exists on b1
+gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/b0/*/3))
+TEST [ $(dir -1 $B0/b0/1/$gfid | wc -l) -eq 1 ]
+
+# check that the backend file matches the frontend
+TEST [ "$(md5sum $B0/b0/1/$gfid | awk '{print $1}')" == "$checksum" ]
+
+# delete the file
+TEST rm $M0/3
+
+# ensure the frontend and backend files are cleaned up
+TEST ! -e $M0/3
+TEST ! [ stat $B0/b*/*/$gfid ]
+
+# Test TRUNCATE + WRITE flow
+echo "hello" | tee $M0/4
+echo "goo" | tee $M0/4
+filename=$$
+echo "goo" | tee /tmp/metadisp-truncate-${filename}
+checksum=$(md5sum /tmp/metadisp-truncate-${filename} | awk '{print $1}')
+TEST [ "$(md5sum $M0/4 | awk '{print $1}')" == "$checksum" ]
+
+# Test mkdir + rmdir.
+TEST mkdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+TEST rmdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+
+# Test rename.
+TEST touch $M0/rename_me
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+TEST mv $M0/rename_me $M0/such_rename
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+
+# Test rename of a file that doesn't exist.
+TEST ! mv $M0/does-not-exist $M0/neither-does-this
+
+
+# cleanup all the other files.
+TEST rm -v $M0/1 $M0/2 $M0/{4..${NUM_FILES}}
+TEST rm $M0/such_rename
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq 0 ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq 0 ]
+
+# Test CREATE flow
+NUM_FILES=40
+TEST touch $M0/{1..${NUM_FILES}}
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq $NUM_FILES ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq $NUM_FILES ]
+
+# Test UNLINK flow
+# No drives should have any files
+TEST rm -v $M0/{1..${NUM_FILES}}
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq 0 ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq 0 ]
+
+# Test CREATE + WRITE + READ flow
+filename=$$
+dd if=/dev/urandom of=/tmp/${filename} bs=1M count=10
+checksum=$(md5sum /tmp/${filename} | awk '{print $1}')
+TEST cp -v /tmp/${filename} $M0/1
+TEST cp -v /tmp/${filename} $M0/2
+TEST cp -v /tmp/${filename} $M0/3
+TEST cp -v /tmp/${filename} $M0/4
+TEST [ "$(md5sum $M0/1 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/2 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/3 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/4 | awk '{print $1}')" == "$checksum" ]
+
+# Test TRUNCATE + WRITE flow
+TEST dd if=/dev/zero of=$M0/1 bs=1M count=20
+
+# Check that readdir stats the files properly and we get the correct sizes
+TEST [ $(find $M0 -size +9M | wc -l) -eq 4 ];
+
+# Test mkdir + rmdir.
+TEST mkdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+TEST rmdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+
+# Test rename.
+# Still flaky, so disabled until it can be debugged.
+TEST touch $M0/rename_me
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+TEST mv $M0/rename_me $M0/such_rename
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+
+# Test rename of a file that doesn't exist.
+TEST ! mv $M0/does-not-exist $M0/neither-does-this
+
+# Test rename over an existing file.
+ok=yes
+for i in $(seq 0 9); do
+ echo foo > $M0/src$i
+ echo bar > $M0/dst$i
+done
+for i in $(seq 0 9); do
+ mv $M0/src$i $M0/dst$i
+done
+for i in $(seq 0 9); do
+ nfiles=$(cat $B0/b0/*/dst$i | wc -l)
+ if [ "$nfiles" = "2" ]; then
+ echo "COLLISION on dst$i"
+ (ls -l $B0/b0/*/dst$i; cat $B0/b0/*/dst$i) | sed "/^/s// /"
+ ok=no
+ fi
+done
+EXPECT "yes" echo $ok
+
+# Test rename of a directory.
+count_copies () {
+ ls -d $B0/b?/?/$1 2> /dev/null | wc -l
+}
+TEST mkdir $M0/foo_dir
+EXPECT 1 count_copies foo_dir
+EXPECT 0 count_copies bar_dir
+TEST mv $M0/foo_dir $M0/bar_dir
+EXPECT 0 count_copies foo_dir
+EXPECT 1 count_copies bar_dir
+
+for x in $(seq 0 99); do
+ touch $M0/target$x
+ ln -s $M0/target$x $M0/link$x
+done
+on_0=$(ls $B0/b*/0/link* | wc -l)
+on_1=$(ls $B0/b*/1/link* | wc -l)
+TEST [ "$on_0" -eq 100 ]
+TEST [ "$on_1" -eq 0 ]
+TEST [ "$(ls -l $M0/link* | wc -l)" = 100 ]
+
+# Test (hard) link.
+_test_hardlink () {
+ local b
+ local has_src
+ local has_dst
+ local src_inum
+ local dst_inum
+ touch $M0/hardsrc$1
+ ln $M0/hardsrc$1 $M0/harddst$1
+ for b in $B0/b{0}/{0,1}; do
+ [ -f $b/hardsrc$1 ]; has_src=$?
+ [ -f $b/harddst$1 ]; has_dst=$?
+ if [ "$has_src" != "$has_dst" ]; then
+ echo "MISSING $b/hardxxx$1 $has_src $has_dst"
+ return
+ fi
+ if [ "$has_src$has_dst" = "00" ]; then
+ src_inum=$(stat -c '%i' $b/hardsrc$1)
+ dst_inum=$(stat -c '%i' $b/harddst$1)
+ if [ "$dst_inum" != "$src_inum" ]; then
+ echo "MISMATCH $b/hardxx$i $src_inum $dst_inum"
+ return
+ fi
+ fi
+ done
+ echo "OK"
+}
+
+test_hardlink () {
+ local result=$(_test_hardlink $*)
+ # [ "$result" = "OK" ] || echo $result > /dev/tty
+ echo $result
+}
+
+# Do this multiple times to make sure colocation isn't a fluke.
+EXPECT "OK" test_hardlink 0
+EXPECT "OK" test_hardlink 1
+EXPECT "OK" test_hardlink 2
+EXPECT "OK" test_hardlink 3
+EXPECT "OK" test_hardlink 4
+EXPECT "OK" test_hardlink 5
+EXPECT "OK" test_hardlink 6
+EXPECT "OK" test_hardlink 7
+EXPECT "OK" test_hardlink 8
+EXPECT "OK" test_hardlink 9
+
+# Test remove hardlink source. ensure deleting one file
+# doesn't delete the data unless link-count is 1
+TEST mkdir $M0/hardlink
+TEST touch $M0/hardlink/fileA
+echo "data" >> $M0/hardlink/fileA
+checksum=$(md5sum $M0/hardlink/fileA | awk '{print $1}')
+TEST ln $M0/hardlink/fileA $M0/hardlink/fileB
+TEST [ $(dir -1 $M0/hardlink/ | wc -l) -eq 2 ]
+TEST rm $M0/hardlink/fileA
+TEST [ $(dir -1 $M0/hardlink/ | wc -l) -eq 1 ]
+TEST [ "$(md5sum $M0/hardlink/fileB | awk '{print $1}')" == "$checksum" ]
+
+#
+# FIXME: statfs values look ok but the test is bad
+#
+# Test statfs. If we're doing it right, the numbers for the mountpoint should be
+# double those for the brick filesystem times the number of bricks,
+# but unless we're on a completely idle
+# system (which never happens) the numbers can change even while this function
+# runs and that would trip us up. Do a sloppy comparison to deal with that.
+#compare_fields () {
+# val1=$(df $1 | grep / | awk "{print \$$3}")
+# val2=$(df $2 | grep / | awk "{print \$$3}")
+# [ "$val2" -gt "$(((val1/(29/10))*19/10))" -a "$val2" -lt "$(((val1/(31/10))*21/10))" ]
+#}
+
+#brick_df=$(df $B0 | grep /)
+#mount_df=$(df $M0 | grep /)
+#TEST compare_fields $B0 $M0 2 # Total blocks
+#TEST compare_fields $B0 $M0 3 # Used
+#TEST compare_fields $B0 $M0 4 # Available
+
+# Test removexattr.
+#RXATTR_FILE=$(get_file_not_on_disk0 rxtest)
+#TEST setfattr -n user.foo -v bar $M0/$RXATTR_FILE
+#TEST getfattr -n user.foo $B0/b0/1/$RXATTR_FILE
+#TEST setfattr -x user.foo $M0/$RXATTR_FILE
+#TEST ! getfattr -n user.foo $B0/b0/1/$RXATTR_FILE
+
+# Test fsyncdir. We can't really test whether it's doing the right thing,
+# but we can test that it doesn't fail and we can hand-check that it's calling
+# down to all of the disks instead of just one.
+#
+# P.S. There's no fsyncdir test in the rest of Gluster, so who even knows if
+# other translators are handling it correctly?
+
+#FSYNCDIR_EXE=$(dirname $0)/fsyncdir
+#build_tester ${FSYNCDIR_EXE}.c
+#TEST touch $M0/fsyncdir_src
+#TEST $FSYNCDIR_EXE $M0 $M0/fsyncdir_src $M0/fsyncdir_dst
+#TEST rm -f $FSYNCDIR_EXE
+
+# Test fsetxattr, fgetxattr, fremovexattr (in that order).
+FXATTR_FILE=$M0/fxfile1
+TEST touch $FXATTR_FILE
+FXATTR_EXE=$(dirname $0)/fxattr
+build_tester ${FXATTR_EXE}.c
+TEST ! getfattr -n user.fxtest $FXATTR_FILE
+TEST $FXATTR_EXE $FXATTR_FILE set value1
+TEST getfattr -n user.fxtest $FXATTR_FILE
+TEST setfattr -n user.fxtest -v value2 $FXATTR_FILE
+TEST $FXATTR_EXE $FXATTR_FILE get value2
+TEST $FXATTR_EXE $FXATTR_FILE remove
+TEST ! getfattr -n user.fxtest $FXATTR_FILE
+TEST rm -f $FXATTR_EXE
+
+# Test ftruncate
+FTRUNCATE_EXE=$(dirname $0)/ftruncate
+build_tester ${FTRUNCATE_EXE}.c
+FTRUNCATE_FILE=$M0/ftfile1
+TEST dd if=/dev/urandom of=$FTRUNCATE_FILE count=1 bs=1MB
+TEST $FTRUNCATE_EXE $FTRUNCATE_FILE
+#gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/b0/*/ftfile1))
+
+# Test fallocate, discard, zerofill. Actually we don't so much check that these
+# *work* as that they don't throw any errors (especially ENOENT because the
+# file's not on disk zero).
+FALLOC_FILE=fatest1
+TEST touch $M0/$FALLOC_FILE
+TEST fallocate -l $((4096*5)) $M0/$FALLOC_FILE
+TEST fallocate -p -o 4096 -l 4096 $M0/$FALLOC_FILE
+# This actually fails with "operation not supported" on most filesystems, so
+# don't leave it enabled except to test changes.
+#TEST fallocate -z -o $((4096*3)) -l 4096 $M0/$FALLOC_FILE
+
+#cleanup;
diff --git a/tests/basic/metadisp/metadisp.vol b/tests/basic/metadisp/metadisp.vol
new file mode 100644
index 00000000000..58ae2f6f2a8
--- /dev/null
+++ b/tests/basic/metadisp/metadisp.vol
@@ -0,0 +1,14 @@
+volume posix-0
+ type storage/posix
+ option directory /d/backends/b0/0
+end-volume
+
+volume posix-1
+ type storage/posix
+ option directory /d/backends/b0/1
+end-volume
+
+volume metadisp-0
+ type features/metadisp
+ subvolumes posix-0 posix-1
+end-volume
diff --git a/tests/basic/mount-nfs-auth.t b/tests/basic/mount-nfs-auth.t
deleted file mode 100755
index 0d4e01abb32..00000000000
--- a/tests/basic/mount-nfs-auth.t
+++ /dev/null
@@ -1,340 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../nfs.rc
-
-# Our mount timeout must be as long as the time for a regular configuration
-# change to be acted upon *plus* AUTH_REFRESH_TIMEOUT, not one replacing the
-# other. Otherwise this process races vs. the one making the change we're
-# trying to test, which leads to spurious failures.
-MY_MOUNT_TIMEOUT=$((CONFIG_UPDATE_TIMEOUT+AUTH_REFRESH_INTERVAL))
-
-cleanup;
-## Check whether glusterd is running
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-H0IP=$(ip addr show |grep -w inet |grep -v 127.0.0.1|awk '{ print $2 }'| cut -d "/" -f 1)
-H0IP6=$(host $HOSTNAME | grep IPv6 | awk '{print $NF}')
-
-# Export variables for allow & deny
-EXPORT_ALLOW="/$V0 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
-EXPORT_ALLOW_SLASH="/$V0/ $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
-EXPORT_DENY="/$V0 1.2.3.4(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
-
-# Netgroup variables for allow & deny
-NETGROUP_ALLOW="ngtop ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 ($H0,,)"
-NETGROUP_DENY="ngtop ng1000\nng1000 ng999\nng999 ng1\nng1 ng2\nng2 (1.2.3.4,,)"
-
-V0L1="$V0/L1"
-V0L2="$V0L1/L2"
-V0L3="$V0L2/L3"
-
-# Other variations for allow & deny
-EXPORT_ALLOW_RO="/$V0 $H0(sec=sys,ro,anonuid=0) @ngtop(sec=sys,ro,anonuid=0)"
-EXPORT_ALLOW_L1="/$V0L1 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
-EXPORT_WILDCARD="/$V0 *(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
-
-function build_dirs () {
- mkdir -p $B0/b{0,1,2}/L1/L2/L3
-}
-
-function export_allow_this_host_ipv6 () {
- printf "$EXPORT_ALLOW6\n" > "$GLUSTERD_WORKDIR"/nfs/exports
-}
-
-function export_allow_this_host () {
- printf "$EXPORT_ALLOW\n" > ${NFSDIR}/exports
-}
-
-function export_allow_this_host_with_slash () {
- printf "$EXPORT_ALLOW_SLASH\n" > ${NFSDIR}/exports
-}
-
-function export_deny_this_host () {
- printf "$EXPORT_DENY\n" > ${NFSDIR}/exports
-}
-
-function export_allow_this_host_l1 () {
- printf "$EXPORT_ALLOW_L1\n" >> ${NFSDIR}/exports
-}
-
-function export_allow_wildcard () {
- printf "$EXPORT_WILDCARD\n" > ${NFSDIR}/exports
-}
-
-function export_allow_this_host_ro () {
- printf "$EXPORT_ALLOW_RO\n" > ${NFSDIR}/exports
-}
-
-function netgroup_allow_this_host () {
- printf "$NETGROUP_ALLOW\n" > ${NFSDIR}/netgroups
-}
-
-function netgroup_deny_this_host () {
- printf "$NETGROUP_DENY\n" > ${NFSDIR}/netgroups
-}
-
-function create_vol () {
- $CLI vol create $V0 $H0:$B0/b0
-}
-
-function setup_cluster() {
- build_dirs # Build directories
- export_allow_this_host # Allow this host in the exports file
- netgroup_allow_this_host # Allow this host in the netgroups file
-
- glusterd
- create_vol # Create the volume
-}
-
-function check_mount_success {
- mount_nfs $H0:/$1 $N0 nolock
- if [ $? -eq 0 ]; then
- echo "Y"
- else
- echo "N"
- fi
-}
-
-function check_mount_failure {
- mount_nfs $H0:/$1 $N0 nolock
- if [ $? -ne 0 ]; then
- echo "Y"
- else
- local timeout=$UMOUNT_TIMEOUT
- while ! umount_nfs $N0 && [$timeout -ne 0] ; do
- timeout=$(( $timeout - 1 ))
- sleep 1
- done
- fi
-}
-
-function small_write () {
- dd if=/dev/zero of=$N0/test-small-write count=1 bs=1k 2>&1
- if [ $? -ne 0 ]; then
- echo "N"
- else
- echo "Y"
- fi
-}
-
-function bg_write () {
- dd if=/dev/zero of=$N0/test-bg-write count=1 bs=1k &
- BG_WRITE_PID=$!
-}
-
-function big_write() {
- dd if=/dev/zero of=$N0/test-big-write count=500 bs=1024k
-}
-
-function create () {
- touch $N0/create-test
-}
-
-function stat_nfs () {
- ls $N0/
-}
-
-# Restarts the NFS server
-function restart_nfs () {
- local NFS_PID=$(cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid)
-
- # kill the NFS-server if it is running
- while ps -q ${NFS_PID} 2>&1 > /dev/null; do
- kill ${NFS_PID}
- sleep 0.5
- done
-
- # start-force starts the NFS-server again
- $CLI vol start patchy force
-}
-
-setup_cluster
-
-# run preliminary tests
-TEST $CLI vol set $V0 nfs.disable off
-TEST $CLI vol start $V0
-
-# Get NFS state directory
-NFSDIR=$( $CLI volume get patchy nfs.mount-rmtab | \
- awk '/^nfs.mount-rmtab/{print $2}' | \
- xargs dirname )
-
-## Wait for volume to register with rpc.mountd
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-## NFS server starts with auth disabled
-## Do some tests to verify that.
-
-EXPECT "Y" check_mount_success $V0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Disallow host
-TEST export_deny_this_host
-TEST netgroup_deny_this_host
-
-## Technically deauthorized this host, but since auth is disabled we should be
-## able to do mounts, writes, etc.
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Reauthorize this host
-export_allow_this_host
-netgroup_allow_this_host
-
-## Restart NFS with auth enabled
-$CLI vol stop $V0
-TEST $CLI vol set $V0 nfs.exports-auth-enable on
-$CLI vol start $V0
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-## Mount NFS
-EXPECT "Y" check_mount_success $V0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Mount NFS using the IPv6 export
-export_allow_this_host_ipv6
-EXPECT "Y" check_mount_success $V0
-
-## Disallow host
-TEST export_deny_this_host
-TEST netgroup_deny_this_host
-
-## Writes should not be allowed, host is not authorized
-EXPECT_WITHIN $AUTH_REFRESH_INTERVAL "N" small_write
-
-## Unmount so we can test mount
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Subsequent ounts should not be allowed, host is not authorized
-EXPECT "Y" check_mount_failure $V0
-
-## Reauthorize host
-TEST export_allow_this_host
-TEST netgroup_allow_this_host
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Allow host in netgroups but not in exports, host should be allowed
-TEST export_deny_this_host
-TEST netgroup_allow_this_host
-
-# wait for the mount authentication to rebuild
-sleep $[$AUTH_REFRESH_INTERVAL + 1]
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT "Y" small_write
-TEST big_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Allow host in exports but not in netgroups, host should be allowed
-TEST export_allow_this_host
-TEST netgroup_deny_this_host
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Finally, reauth the host in export and netgroup, test mount & write
-TEST export_allow_this_host_l1
-TEST netgroup_allow_this_host
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1
-EXPECT "Y" small_write
-
-## Failover test: Restarting NFS and then doing a write should pass
-bg_write
-TEST restart_nfs
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-TEST wait $BG_WRITE_PID
-EXPECT "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Test deep mounts
-EXPECT "Y" check_mount_success $V0L1
-EXPECT "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-TEST export_allow_this_host_ro
-TEST netgroup_deny_this_host
-
-## Restart the nfs server to avoid spurious failure(BZ1256352)
-restart_nfs
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT "N" small_write # Writes should not be allowed
-TEST ! create # Create should not be allowed
-TEST stat_nfs # Stat should be allowed
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-TEST export_deny_this_host
-TEST netgroup_deny_this_host
-TEST export_allow_this_host_l1 # Allow this host at L1
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_failure $V0 #V0 shouldnt be allowed
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0L1 #V0L1 should be
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Test wildcard hosts
-TEST export_allow_wildcard
-
-# the $MY_MOUNT_TIMEOUT might not be long enough? restart should do
-restart_nfs
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT_WITHIN $AUTH_REFRESH_INTERVAL "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Test if path is parsed correctly
-## by mounting host:vol/ instead of host:vol
-EXPECT "Y" check_mount_success $V0/
-EXPECT "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-TEST export_allow_this_host_with_slash
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0
-EXPECT "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-EXPECT "Y" check_mount_success $V0/
-EXPECT "Y" small_write
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-
-## Turn off exports authentication
-$CLI vol stop $V0
-TEST $CLI vol set $V0 nfs.exports-auth-enable off
-$CLI vol start $V0
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-TEST export_deny_this_host # Deny the host
-TEST netgroup_deny_this_host
-
-EXPECT_WITHIN $MY_MOUNT_TIMEOUT "Y" check_mount_success $V0 # Do a mount & test
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
-
-## Turn back on the exports authentication
-$CLI vol stop $V0
-TEST $CLI vol set $V0 nfs.exports-auth-enable on
-$CLI vol start $V0
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
-
-## Do a simple test to set the refresh time to 20 seconds
-TEST $CLI vol set $V0 nfs.auth-refresh-interval-sec 20
-
-## Do a simple test to see if the volume option exists
-TEST $CLI vol set $V0 nfs.auth-cache-ttl-sec 400
-
-## Finish up
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-cleanup
diff --git a/tests/basic/mount-options.disabled b/tests/basic/mount-options.disabled
index 2373e4461ce..a04c8686276 100644
--- a/tests/basic/mount-options.disabled
+++ b/tests/basic/mount-options.disabled
@@ -127,6 +127,9 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --volfile-server-transport=ib-verbs
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --auto-invalidation=off
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
TEST ! glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --volfile-server-port=socket
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --volume-name=$V0
diff --git a/tests/basic/mount.t b/tests/basic/mount.t
index 52e760d048d..3a3d7cc9d8d 100755
--- a/tests/basic/mount.t
+++ b/tests/basic/mount.t
@@ -3,15 +3,16 @@
. $(dirname $0)/../include.rc
. $(dirname $0)/../nfs.rc
-cleanup;
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+cleanup;
## Start and create a volume
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6,7,8,9};
TEST $CLI volume set $V0 nfs.disable false
function volinfo_field()
@@ -68,6 +69,9 @@ TEST rm -f $N0/newfile;
TEST ! stat $M0/newfile;
TEST ! stat $M1/newfile;
+# No need to check for status here right now
+$(dirname $0)/rpc-coverage.sh $N0 >/dev/null
+
## Before killing daemon to avoid deadlocks
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
diff --git a/tests/basic/multiple-volume-shd-mux.t b/tests/basic/multiple-volume-shd-mux.t
new file mode 100644
index 00000000000..d7cfbaec85f
--- /dev/null
+++ b/tests/basic/multiple-volume-shd-mux.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=16
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+for i in $(seq 1 3); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+#Delete the volumes
+for i in $(seq 1 3); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+ TEST $CLI volume delete ${V0}_afr$i
+ TEST $CLI volume delete ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/namespace.t b/tests/basic/namespace.t
index 25a6b98b440..d1bbe7eea29 100644
--- a/tests/basic/namespace.t
+++ b/tests/basic/namespace.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../volume.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
# These hashes are a result of calling SuperFastHash
# on the corresponding folder names.
NAMESPACE_HASH=28153613
diff --git a/tests/basic/nl-cache.t b/tests/basic/nl-cache.t
index 141fb75d5c8..90c778c8a88 100755
--- a/tests/basic/nl-cache.t
+++ b/tests/basic/nl-cache.t
@@ -64,5 +64,35 @@ TEST rm $M0/dir1/file_link
TEST rmdir $M0/dir1/dir2
TEST rmdir $M0/dir1
+#Check mknod
+TEST ! ls -l $M0/dir
+TEST mkdir $M0/dir
+TEST mknod -m 0666 $M0/dir/block b 4 5
+TEST mknod -m 0666 $M0/dir/char c 1 5
+TEST mknod -m 0666 $M0/dir/fifo p
+TEST rm $M0/dir/block
+TEST rm $M0/dir/char
+TEST rm $M0/dir/fifo
+
+#Check getxattr
+TEST touch $M0/file1
+TEST getfattr -d -m. -e hex $M0/file1
+TEST getfattr -n "glusterfs.get_real_filename:file1" $M0;
+TEST getfattr -n "glusterfs.get_real_filename:FILE1" $M0;
+TEST ! getfattr -n "glusterfs.get_real_filename:FILE2" $M0;
+
+#Check statedump
+TEST generate_mount_statedump $V0 $M0
+TEST cleanup_mount_statedump $V0
+
+#Check reconfigure
+TEST $CLI volume reset $V0 nl-cache-timeout
+TEST $CLI volume reset $V0 nl-cache-positive-entry
+TEST $CLI volume reset $V0 nl-cache-limit
+TEST $CLI volume reset $V0 nl-cache-pass-through
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
cleanup;
#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/nufa.t b/tests/basic/nufa.t
index 1d74d376b7d..cb09fc5bbbf 100644
--- a/tests/basic/nufa.t
+++ b/tests/basic/nufa.t
@@ -4,18 +4,20 @@
. $(dirname $0)/../volume.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
TEST $CLI volume set $V0 nfs.disable false
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '8' brick_count $V0
+EXPECT '6' brick_count $V0
TEST $CLI volume set $V0 nufa on;
diff --git a/tests/basic/op_errnos.t b/tests/basic/op_errnos.t
index 8b16267cb50..9c48d7a02ad 100755
--- a/tests/basic/op_errnos.t
+++ b/tests/basic/op_errnos.t
@@ -17,8 +17,6 @@ TEST setup_lvm 1
TEST $CLI volume create $V0 $H0:$L1
TEST $CLI volume start $V0
-TEST $CLI volume create $V1 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-TEST $CLI volume start $V1
EXPECT 0 get-op_errno-xml "snapshot create snap1 $V0 no-timestamp"
EXPECT 30806 get-op_errno-xml "snapshot create snap1 imaginary_volume"
@@ -29,10 +27,8 @@ EXPECT 30810 get-op_errno-xml "snapshot create snap1 $V0"
TEST $CLI volume start $V0
EXPECT 30811 get-op_errno-xml "snapshot clone $V0 snap1"
EXPECT 30812 get-op_errno-xml "snapshot create snap1 $V0 no-timestamp"
-EXPECT 30815 get-op_errno-xml "snapshot create snap2 $V1 no-timestamp"
EXPECT 0 get-op_errno-xml "snapshot delete snap1"
TEST $CLI volume stop $V0
-TEST $CLI volume stop $V1
cleanup;
diff --git a/tests/basic/open-behind/open-behind.t b/tests/basic/open-behind/open-behind.t
new file mode 100644
index 00000000000..5e865d602e2
--- /dev/null
+++ b/tests/basic/open-behind/open-behind.t
@@ -0,0 +1,183 @@
+#!/bin/bash
+
+WD="$(dirname "${0}")"
+
+. ${WD}/../../include.rc
+. ${WD}/../../volume.rc
+
+function assign() {
+ local _assign_var="${1}"
+ local _assign_value="${2}"
+
+ printf -v "${_assign_var}" "%s" "${_assign_value}"
+}
+
+function pipe_create() {
+ local _pipe_create_var="${1}"
+ local _pipe_create_name
+ local _pipe_create_fd
+
+ _pipe_create_name="$(mktemp -u)"
+ mkfifo "${_pipe_create_name}"
+ exec {_pipe_create_fd}<>"${_pipe_create_name}"
+ rm "${_pipe_create_name}"
+
+ assign "${_pipe_create_var}" "${_pipe_create_fd}"
+}
+
+function pipe_close() {
+ local _pipe_close_fd="${!1}"
+
+ exec {_pipe_close_fd}>&-
+}
+
+function tester_start() {
+ declare -ag tester
+ local tester_in
+ local tester_out
+
+ pipe_create tester_in
+ pipe_create tester_out
+
+ ${WD}/tester <&${tester_in} >&${tester_out} &
+
+ tester=("$!" "${tester_in}" "${tester_out}")
+}
+
+function tester_send() {
+ declare -ag tester
+ local tester_res
+ local tester_extra
+
+ echo "${*}" >&${tester[1]}
+
+ read -t 3 -u ${tester[2]} tester_res tester_extra
+ echo "${tester_res} ${tester_extra}"
+ if [[ "${tester_res}" == "OK" ]]; then
+ return 0
+ fi
+
+ return 1
+}
+
+function tester_stop() {
+ declare -ag tester
+ local tester_res
+
+ tester_send "quit"
+
+ tester_res=0
+ if ! wait ${tester[0]}; then
+ tester_res=$?
+ fi
+
+ unset tester
+
+ return ${tester_res}
+}
+
+function count_open() {
+ local file="$(realpath "${B0}/${V0}/${1}")"
+ local count="0"
+ local inode
+ local ref
+
+ inode="$(stat -c %i "${file}")"
+
+ for fd in /proc/${BRICK_PID}/fd/*; do
+ ref="$(readlink "${fd}")"
+ if [[ "${ref}" == "${B0}/${V0}/"* ]]; then
+ if [[ "$(stat -c %i "${ref}")" == "${inode}" ]]; then
+ count="$((${count} + 1))"
+ fi
+ fi
+ done
+
+ echo "${count}"
+}
+
+cleanup
+
+TEST build_tester ${WD}/tester.c ${WD}/tester-fd.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST ${CLI} volume create ${V0} ${H0}:${B0}/${V0}
+TEST ${CLI} volume set ${V0} flush-behind off
+TEST ${CLI} volume set ${V0} write-behind off
+TEST ${CLI} volume set ${V0} quick-read off
+TEST ${CLI} volume set ${V0} stat-prefetch on
+TEST ${CLI} volume set ${V0} io-cache off
+TEST ${CLI} volume set ${V0} open-behind on
+TEST ${CLI} volume set ${V0} lazy-open off
+TEST ${CLI} volume set ${V0} read-after-open off
+TEST ${CLI} volume start ${V0}
+
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+BRICK_PID="$(get_brick_pid ${V0} ${H0} ${B0}/${V0})"
+
+TEST touch "${M0}/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_start
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT_WITHIN 5 "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${CLI} volume set ${V0} lazy-open on
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+sleep 2
+EXPECT "0" count_open "/test"
+TEST tester_send fd write 0 "test"
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+EXPECT "test" tester_send fd read 0 64
+# Even though read-after-open is disabled, use-anonymous-fd is also disabled,
+# so reads need to open the file first.
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+TEST tester_send fd open 1 "${M0}/test"
+EXPECT "2" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "1" count_open "/test"
+TEST tester_send fd close 1
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${CLI} volume set ${V0} read-after-open on
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+EXPECT "test" tester_send fd read 0 64
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST tester_stop
+
+cleanup
diff --git a/tests/basic/open-behind/tester-fd.c b/tests/basic/open-behind/tester-fd.c
new file mode 100644
index 00000000000..00f02bc5b0a
--- /dev/null
+++ b/tests/basic/open-behind/tester-fd.c
@@ -0,0 +1,99 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "tester.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+static int32_t
+fd_open(context_t *ctx, command_t *cmd)
+{
+ obj_t *obj;
+ int32_t fd;
+
+ obj = cmd->args[0].obj.ref;
+
+ fd = open(cmd->args[1].str.data, O_RDWR);
+ if (fd < 0) {
+ return error(errno, "open() failed");
+ }
+
+ obj->type = OBJ_TYPE_FD;
+ obj->fd = fd;
+
+ out_ok("%d", fd);
+
+ return 0;
+}
+
+static int32_t
+fd_close(context_t *ctx, command_t *cmd)
+{
+ obj_t *obj;
+
+ obj = cmd->args[0].obj.ref;
+ obj->type = OBJ_TYPE_NONE;
+
+ if (close(obj->fd) != 0) {
+ return error(errno, "close() failed");
+ }
+
+ out_ok();
+
+ return 0;
+}
+
+static int32_t
+fd_write(context_t *ctx, command_t *cmd)
+{
+ ssize_t len, ret;
+
+ len = strlen(cmd->args[1].str.data);
+ ret = write(cmd->args[0].obj.ref->fd, cmd->args[1].str.data, len);
+ if (ret < 0) {
+ return error(errno, "write() failed");
+ }
+
+ out_ok("%zd", ret);
+
+ return 0;
+}
+
+static int32_t
+fd_read(context_t *ctx, command_t *cmd)
+{
+ char data[cmd->args[1].num.value + 1];
+ ssize_t ret;
+
+ ret = read(cmd->args[0].obj.ref->fd, data, cmd->args[1].num.value);
+ if (ret < 0) {
+ return error(errno, "read() failed");
+ }
+
+ data[ret] = 0;
+
+ out_ok("%zd %s", ret, data);
+
+ return 0;
+}
+
+command_t fd_commands[] = {
+ {"open", fd_open, CMD_ARGS(ARG_VAL(OBJ_TYPE_NONE), ARG_STR(1024))},
+ {"close", fd_close, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD))},
+ {"write", fd_write, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_STR(1024))},
+ {"read", fd_read, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_NUM(0, 1024))},
+ CMD_END};
diff --git a/tests/basic/open-behind/tester.c b/tests/basic/open-behind/tester.c
new file mode 100644
index 00000000000..b2da71c8385
--- /dev/null
+++ b/tests/basic/open-behind/tester.c
@@ -0,0 +1,444 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "tester.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+static void *
+mem_alloc(size_t size)
+{
+ void *ptr;
+
+ ptr = malloc(size);
+ if (ptr == NULL) {
+ error(ENOMEM, "Failed to allocate memory (%zu bytes)", size);
+ }
+
+ return ptr;
+}
+
+static void
+mem_free(void *ptr)
+{
+ free(ptr);
+}
+
+static bool
+buffer_create(context_t *ctx, size_t size)
+{
+ ctx->buffer.base = mem_alloc(size);
+ if (ctx->buffer.base == NULL) {
+ return false;
+ }
+
+ ctx->buffer.size = size;
+ ctx->buffer.len = 0;
+ ctx->buffer.pos = 0;
+
+ return true;
+}
+
+static void
+buffer_destroy(context_t *ctx)
+{
+ mem_free(ctx->buffer.base);
+ ctx->buffer.size = 0;
+ ctx->buffer.len = 0;
+}
+
+static int32_t
+buffer_get(context_t *ctx)
+{
+ ssize_t len;
+
+ if (ctx->buffer.pos >= ctx->buffer.len) {
+ len = read(0, ctx->buffer.base, ctx->buffer.size);
+ if (len < 0) {
+ return error(errno, "read() failed");
+ }
+ if (len == 0) {
+ return 0;
+ }
+
+ ctx->buffer.len = len;
+ ctx->buffer.pos = 0;
+ }
+
+ return ctx->buffer.base[ctx->buffer.pos++];
+}
+
+static int32_t
+str_skip_spaces(context_t *ctx, int32_t current)
+{
+ while ((current > 0) && (current != '\n') && isspace(current)) {
+ current = buffer_get(ctx);
+ }
+
+ return current;
+}
+
+static int32_t
+str_token(context_t *ctx, char *buffer, uint32_t size, int32_t current)
+{
+ uint32_t len;
+
+ current = str_skip_spaces(ctx, current);
+
+ len = 0;
+ while ((size > 0) && (current > 0) && (current != '\n') &&
+ !isspace(current)) {
+ len++;
+ *buffer++ = current;
+ size--;
+ current = buffer_get(ctx);
+ }
+
+ if (len == 0) {
+ return error(ENODATA, "Expecting a token");
+ }
+
+ if (size == 0) {
+ return error(ENOBUFS, "Token too long");
+ }
+
+ *buffer = 0;
+
+ return current;
+}
+
+static int32_t
+str_number(context_t *ctx, uint64_t min, uint64_t max, uint64_t *value,
+ int32_t current)
+{
+ char text[32], *ptr;
+ uint64_t num;
+
+ current = str_token(ctx, text, sizeof(text), current);
+ if (current > 0) {
+ num = strtoul(text, &ptr, 0);
+ if ((*ptr != 0) || (num < min) || (num > max)) {
+ return error(ERANGE, "Invalid number");
+ }
+ *value = num;
+ }
+
+ return current;
+}
+
+static int32_t
+str_eol(context_t *ctx, int32_t current)
+{
+ current = str_skip_spaces(ctx, current);
+ if (current != '\n') {
+ return error(EINVAL, "Expecting end of command");
+ }
+
+ return current;
+}
+
+static void
+str_skip(context_t *ctx, int32_t current)
+{
+ while ((current > 0) && (current != '\n')) {
+ current = buffer_get(ctx);
+ }
+}
+
+static int32_t
+cmd_parse_obj(context_t *ctx, arg_t *arg, int32_t current)
+{
+ obj_t *obj;
+ uint64_t id;
+
+ current = str_number(ctx, 0, ctx->obj_count, &id, current);
+ if (current <= 0) {
+ return current;
+ }
+
+ obj = &ctx->objs[id];
+ if (obj->type != arg->obj.type) {
+ if (obj->type != OBJ_TYPE_NONE) {
+ return error(EBUSY, "Object is in use");
+ }
+ return error(ENOENT, "Object is not defined");
+ }
+
+ arg->obj.ref = obj;
+
+ return current;
+}
+
+static int32_t
+cmd_parse_num(context_t *ctx, arg_t *arg, int32_t current)
+{
+ return str_number(ctx, arg->num.min, arg->num.max, &arg->num.value,
+ current);
+}
+
+static int32_t
+cmd_parse_str(context_t *ctx, arg_t *arg, int32_t current)
+{
+ return str_token(ctx, arg->str.data, arg->str.size, current);
+}
+
+static int32_t
+cmd_parse_args(context_t *ctx, command_t *cmd, int32_t current)
+{
+ arg_t *arg;
+
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_OBJ:
+ current = cmd_parse_obj(ctx, arg, current);
+ break;
+ case ARG_TYPE_NUM:
+ current = cmd_parse_num(ctx, arg, current);
+ break;
+ case ARG_TYPE_STR:
+ current = cmd_parse_str(ctx, arg, current);
+ break;
+ default:
+ return error(EINVAL, "Unknown argument type");
+ }
+ }
+
+ if (current < 0) {
+ return current;
+ }
+
+ current = str_eol(ctx, current);
+ if (current <= 0) {
+ return error(EINVAL, "Syntax error");
+ }
+
+ return cmd->handler(ctx, cmd);
+}
+
+static int32_t
+cmd_parse(context_t *ctx, command_t *cmds)
+{
+ char text[32];
+ command_t *cmd;
+ int32_t current;
+
+ cmd = cmds;
+ do {
+ current = str_token(ctx, text, sizeof(text), buffer_get(ctx));
+ if (current <= 0) {
+ return current;
+ }
+
+ while (cmd->name != NULL) {
+ if (strcmp(cmd->name, text) == 0) {
+ if (cmd->handler != NULL) {
+ return cmd_parse_args(ctx, cmd, current);
+ }
+ cmd = cmd->cmds;
+ break;
+ }
+ cmd++;
+ }
+ } while (cmd->name != NULL);
+
+ str_skip(ctx, current);
+
+ return error(ENOTSUP, "Unknown command");
+}
+
+static void
+cmd_fini(context_t *ctx, command_t *cmds)
+{
+ command_t *cmd;
+ arg_t *arg;
+
+ for (cmd = cmds; cmd->name != NULL; cmd++) {
+ if (cmd->handler == NULL) {
+ cmd_fini(ctx, cmd->cmds);
+ } else {
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_STR:
+ mem_free(arg->str.data);
+ arg->str.data = NULL;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+static bool
+cmd_init(context_t *ctx, command_t *cmds)
+{
+ command_t *cmd;
+ arg_t *arg;
+
+ for (cmd = cmds; cmd->name != NULL; cmd++) {
+ if (cmd->handler == NULL) {
+ if (!cmd_init(ctx, cmd->cmds)) {
+ return false;
+ }
+ } else {
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_STR:
+ arg->str.data = mem_alloc(arg->str.size);
+ if (arg->str.data == NULL) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+objs_create(context_t *ctx, uint32_t count)
+{
+ uint32_t i;
+
+ ctx->objs = mem_alloc(sizeof(obj_t) * count);
+ if (ctx->objs == NULL) {
+ return false;
+ }
+ ctx->obj_count = count;
+
+ for (i = 0; i < count; i++) {
+ ctx->objs[i].type = OBJ_TYPE_NONE;
+ }
+
+ return true;
+}
+
+static int32_t
+objs_destroy(context_t *ctx)
+{
+ uint32_t i;
+ int32_t err;
+
+ err = 0;
+ for (i = 0; i < ctx->obj_count; i++) {
+ if (ctx->objs[i].type != OBJ_TYPE_NONE) {
+ err = error(ENOTEMPTY, "Objects not destroyed");
+ break;
+ }
+ }
+
+ mem_free(ctx->objs);
+ ctx->objs = NULL;
+ ctx->obj_count = 0;
+
+ return err;
+}
+
+static context_t *
+init(size_t size, uint32_t objs, command_t *cmds)
+{
+ context_t *ctx;
+
+ ctx = mem_alloc(sizeof(context_t));
+ if (ctx == NULL) {
+ goto failed;
+ }
+
+ if (!buffer_create(ctx, size)) {
+ goto failed_ctx;
+ }
+
+ if (!objs_create(ctx, objs)) {
+ goto failed_buffer;
+ }
+
+ if (!cmd_init(ctx, cmds)) {
+ goto failed_objs;
+ }
+
+ ctx->active = true;
+
+ return ctx;
+
+failed_objs:
+ cmd_fini(ctx, cmds);
+ objs_destroy(ctx);
+failed_buffer:
+ buffer_destroy(ctx);
+failed_ctx:
+ mem_free(ctx);
+failed:
+ return NULL;
+}
+
+static int32_t
+fini(context_t *ctx, command_t *cmds)
+{
+ int32_t ret;
+
+ cmd_fini(ctx, cmds);
+ buffer_destroy(ctx);
+
+ ret = objs_destroy(ctx);
+
+ ctx->active = false;
+
+ return ret;
+}
+
+static int32_t
+exec_quit(context_t *ctx, command_t *cmd)
+{
+ ctx->active = false;
+
+ return 0;
+}
+
+static command_t commands[] = {{"fd", NULL, CMD_SUB(fd_commands)},
+ {"quit", exec_quit, CMD_ARGS()},
+ CMD_END};
+
+int32_t
+main(int32_t argc, char *argv[])
+{
+ context_t *ctx;
+ int32_t res;
+
+ ctx = init(1024, 16, commands);
+ if (ctx == NULL) {
+ return 1;
+ }
+
+ do {
+ res = cmd_parse(ctx, commands);
+ if (res < 0) {
+ out_err(-res);
+ }
+ } while (ctx->active);
+
+ res = fini(ctx, commands);
+ if (res >= 0) {
+ out_ok();
+ return 0;
+ }
+
+ out_err(-res);
+
+ return 1;
+}
diff --git a/tests/basic/open-behind/tester.h b/tests/basic/open-behind/tester.h
new file mode 100644
index 00000000000..64e940c78fc
--- /dev/null
+++ b/tests/basic/open-behind/tester.h
@@ -0,0 +1,145 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __TESTER_H__
+#define __TESTER_H__
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+enum _obj_type;
+typedef enum _obj_type obj_type_t;
+
+enum _arg_type;
+typedef enum _arg_type arg_type_t;
+
+struct _buffer;
+typedef struct _buffer buffer_t;
+
+struct _obj;
+typedef struct _obj obj_t;
+
+struct _context;
+typedef struct _context context_t;
+
+struct _arg;
+typedef struct _arg arg_t;
+
+struct _command;
+typedef struct _command command_t;
+
+enum _obj_type { OBJ_TYPE_NONE, OBJ_TYPE_FD };
+
+enum _arg_type { ARG_TYPE_NONE, ARG_TYPE_OBJ, ARG_TYPE_NUM, ARG_TYPE_STR };
+
+struct _buffer {
+ char *base;
+ uint32_t size;
+ uint32_t len;
+ uint32_t pos;
+};
+
+struct _obj {
+ obj_type_t type;
+ union {
+ int32_t fd;
+ };
+};
+
+struct _context {
+ obj_t *objs;
+ buffer_t buffer;
+ uint32_t obj_count;
+ bool active;
+};
+
+struct _arg {
+ arg_type_t type;
+ union {
+ struct {
+ obj_type_t type;
+ obj_t *ref;
+ } obj;
+ struct {
+ uint64_t value;
+ uint64_t min;
+ uint64_t max;
+ } num;
+ struct {
+ uint32_t size;
+ char *data;
+ } str;
+ };
+};
+
+struct _command {
+ const char *name;
+ int32_t (*handler)(context_t *ctx, command_t *cmd);
+ union {
+ arg_t *args;
+ command_t *cmds;
+ };
+};
+
+#define msg(_stream, _fmt, _args...) \
+ do { \
+ fprintf(_stream, _fmt "\n", ##_args); \
+ fflush(_stream); \
+ } while (0)
+
+#define msg_out(_fmt, _args...) msg(stdout, _fmt, ##_args)
+#define msg_err(_err, _fmt, _args...) \
+ ({ \
+ int32_t __msg_err = (_err); \
+ msg(stderr, "[%4u:%-15s] " _fmt, __LINE__, __FUNCTION__, __msg_err, \
+ ##_args); \
+ -__msg_err; \
+ })
+
+#define error(_err, _fmt, _args...) msg_err(_err, "E(%4d) " _fmt, ##_args)
+#define warn(_err, _fmt, _args...) msg_err(_err, "W(%4d) " _fmt, ##_args)
+#define info(_err, _fmt, _args...) msg_err(_err, "I(%4d) " _fmt, ##_args)
+
+#define out_ok(_args...) msg_out("OK " _args)
+#define out_err(_err) msg_out("ERR %d", _err)
+
+#define ARG_END \
+ { \
+ ARG_TYPE_NONE \
+ }
+
+#define CMD_ARGS1(_x, _args...) \
+ .args = (arg_t[]) { _args }
+#define CMD_ARGS(_args...) CMD_ARGS1(, ##_args, ARG_END)
+
+#define CMD_SUB(_cmds) .cmds = _cmds
+
+#define CMD_END \
+ { \
+ NULL, NULL, CMD_SUB(NULL) \
+ }
+
+#define ARG_VAL(_type) \
+ { \
+ ARG_TYPE_OBJ, .obj = {.type = _type } \
+ }
+#define ARG_NUM(_min, _max) \
+ { \
+ ARG_TYPE_NUM, .num = {.min = _min, .max = _max } \
+ }
+#define ARG_STR(_size) \
+ { \
+ ARG_TYPE_STR, .str = {.size = _size } \
+ }
+
+extern command_t fd_commands[];
+
+#endif /* __TESTER_H__ */ \ No newline at end of file
diff --git a/tests/basic/playground/template-xlator-sanity.t b/tests/basic/playground/template-xlator-sanity.t
index c3090dae5a8..1c665502bfe 100755
--- a/tests/basic/playground/template-xlator-sanity.t
+++ b/tests/basic/playground/template-xlator-sanity.t
@@ -15,6 +15,7 @@ end-volume
volume template
type playground/template
subvolumes posix
+ option dummy 13
end-volume
EOF
@@ -22,6 +23,21 @@ TEST glusterfs -f $B0/template.vol $M0
TEST $(dirname $0)/../rpc-coverage.sh --no-locks $M0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+# Take statedump to get maximum code coverage
+pid=$(ps auxww | grep glusterfs | grep -E "template.vol" | awk '{print $2}' | head -1)
+
+TEST generate_statedump $pid
+
+# For monitor output
+kill -USR2 $pid
+
+# Handle SIGHUP and reconfigure
+sed -i -e '/s/dummy 13/dummy 42/g' $B0/template.vol
+kill -HUP $pid
+
+# for calling 'fini()'
+kill -TERM $pid
+
+force_umount $M0
cleanup;
diff --git a/tests/basic/posix/shared-statfs.t b/tests/basic/posix/shared-statfs.t
index 33439562ec9..0e4a1bb409f 100644
--- a/tests/basic/posix/shared-statfs.t
+++ b/tests/basic/posix/shared-statfs.t
@@ -20,15 +20,18 @@ TEST mkdir -p $B0/${V0}1 $B0/${V0}2
TEST MOUNT_LOOP $LO1 $B0/${V0}1
TEST MOUNT_LOOP $LO2 $B0/${V0}2
+total_brick_blocks=$(df -P $B0/${V0}1 $B0/${V0}2 | tail -2 | awk '{sum = sum+$2}END{print sum}')
+#Account for rounding error
+brick_blocks_two_percent_less=$((total_brick_blocks*98/100))
# Create a subdir in mountpoint and use that for volume.
TEST $CLI volume create $V0 $H0:$B0/${V0}1/1 $H0:$B0/${V0}2/1;
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
+total_mount_blocks=$(df -P $M0 | tail -1 | awk '{ print $2}')
# Keeping the size less than 200M mainly because XFS will use
# some storage in brick to keep its own metadata.
-TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
+TEST [ $total_mount_blocks -gt $brick_blocks_two_percent_less -a $total_mount_blocks -lt 200000 ]
TEST force_umount $M0
@@ -41,8 +44,8 @@ TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1/2 $H0:$B0/${V0}2/2 $H0:$B0/${V0}1/
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
-TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
+total_mount_blocks=$(df -P $M0 | tail -1 | awk '{ print $2}')
+TEST [ $total_mount_blocks -gt $brick_blocks_two_percent_less -a $total_mount_blocks -lt 200000 ]
TEST force_umount $M0
TEST $CLI volume stop $V0
diff --git a/tests/basic/posix/zero-fill-enospace.c b/tests/basic/posix/zero-fill-enospace.c
index 1371ff59a5f..b1f142c6be9 100644
--- a/tests/basic/posix/zero-fill-enospace.c
+++ b/tests/basic/posix/zero-fill-enospace.c
@@ -1,4 +1,5 @@
#include <stdio.h>
+#include <stdlib.h>
#include <glusterfs/api/glfs.h>
#include <glusterfs/api/glfs-handles.h>
@@ -8,7 +9,7 @@ main(int argc, char *argv[])
glfs_t *fs = NULL;
glfs_fd_t *fd = NULL;
int ret = 1;
- int size = 0;
+ off_t size = 0;
if (argc != 6) {
fprintf(stderr,
@@ -45,12 +46,12 @@ main(int argc, char *argv[])
goto out;
}
- size = atoi(argv[5]);
+ size = strtol(argv[5], NULL, 10);
if (size < 0) {
fprintf(stderr, "Wrong size %s", argv[5]);
goto out;
}
- ret = glfs_zerofill(fd, 0, atoi(argv[5]));
+ ret = glfs_zerofill(fd, 0, size);
if (ret <= 0) {
fprintf(stderr, "glfs_zerofill: returned %d\n", ret);
goto out;
diff --git a/tests/basic/quick-read-with-upcall.t b/tests/basic/quick-read-with-upcall.t
index 318e93a1bf0..dfb751dfcdb 100644
--- a/tests/basic/quick-read-with-upcall.t
+++ b/tests/basic/quick-read-with-upcall.t
@@ -15,8 +15,8 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
TEST $CLI volume start $V0
# Mount FUSE without selinux:
-TEST glusterfs -s $H0 --volfile-id $V0 $M0;
-TEST glusterfs -s $H0 --volfile-id $V0 $M1;
+TEST glusterfs -s $H0 --volfile-id $V0 --direct-io-mode=enable $M0;
+TEST glusterfs -s $H0 --volfile-id $V0 --direct-io-mode=enable $M1;
D0="test-message0";
D1="test-message1";
@@ -38,11 +38,13 @@ EXPECT "$D1" cat $M0/test.txt
EXPECT "$D0" cat $M1/test.txt
sleep 1
+
+# TODO: This line normally fails
EXPECT "$D1" cat $M1/test.txt
TEST $CLI volume set $V0 features.cache-invalidation on
-TEST $CLI volume set $V0 performance.qr-cache-timeout 60
-TEST $CLI volume set $V0 performance.md-cache-timeout 60
+TEST $CLI volume set $V0 performance.quick-read-cache-timeout 15
+TEST $CLI volume set $V0 performance.md-cache-timeout 15
TEST write_to "$M0/test1.txt" "$D0"
EXPECT "$D0" cat $M0/test1.txt
@@ -55,9 +57,10 @@ EXPECT "$D0" cat $M1/test1.txt
sleep 1
EXPECT "$D0" cat $M1/test1.txt
-sleep 60
+sleep 30
EXPECT "$D1" cat $M1/test1.txt
+TEST $CLI volume set $V0 performance.quick-read-cache-invalidation on
TEST $CLI volume set $V0 performance.cache-invalidation on
TEST write_to "$M0/test2.txt" "$D0"
diff --git a/tests/basic/quota-anon-fd-nfs.t b/tests/basic/quota-anon-fd-nfs.t
index 11fbe49c7ff..9e6675af6ec 100755
--- a/tests/basic/quota-anon-fd-nfs.t
+++ b/tests/basic/quota-anon-fd-nfs.t
@@ -5,6 +5,8 @@
. $(dirname $0)/../nfs.rc
. $(dirname $0)/../fileio.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
QDD=$(dirname $0)/quota
diff --git a/tests/basic/quota-nfs.t b/tests/basic/quota-nfs.t
index 663a8da90ad..de94a950a7f 100755
--- a/tests/basic/quota-nfs.t
+++ b/tests/basic/quota-nfs.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../volume.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
QDD=$(dirname $0)/quota
# compile the test write program and run it
diff --git a/tests/basic/quota.t b/tests/basic/quota.t
index 7f8b21de6f8..46d1bafff84 100755
--- a/tests/basic/quota.t
+++ b/tests/basic/quota.t
@@ -6,6 +6,8 @@
. $(dirname $0)/../dht.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
QDD=$(dirname $0)/quota
diff --git a/tests/basic/rpc-coverage.sh b/tests/basic/rpc-coverage.sh
index b7aaef0bcb5..6203f0ac7cb 100755
--- a/tests/basic/rpc-coverage.sh
+++ b/tests/basic/rpc-coverage.sh
@@ -419,9 +419,15 @@ function test_rmdir()
rm -rf $PFX || fail "rm -rf"
}
+function test_statvfs()
+{
+ df $DIR 2>&1 || fail "df"
+}
+
function run_tests()
{
+ test_statvfs;
test_mkdir;
test_create;
test_statfs;
@@ -436,15 +442,15 @@ function run_tests()
test_rename;
test_chmod;
test_chown;
- test_utimes;
- if $run_lock_tests; then
- test_locks;
- fi
test_readdir;
test_setxattr;
test_listxattr;
test_getxattr;
test_removexattr;
+ if [ "$run_lock_tests" = "1" ]; then
+ test_locks;
+ fi
+ test_utimes;
test_unlink;
test_rmdir;
}
diff --git a/tests/basic/rpc-coverage.t b/tests/basic/rpc-coverage.t
index a76ba7084eb..2c1bcd5a63a 100755
--- a/tests/basic/rpc-coverage.t
+++ b/tests/basic/rpc-coverage.t
@@ -9,11 +9,11 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6,7,8,9};
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '8' brick_count $V0
+EXPECT '9' brick_count $V0
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
diff --git a/tests/basic/sdfs-sanity.t b/tests/basic/sdfs-sanity.t
index f25376c3cad..16d0bed866f 100644
--- a/tests/basic/sdfs-sanity.t
+++ b/tests/basic/sdfs-sanity.t
@@ -19,4 +19,10 @@ TEST $GFS -s $H0 --volfile-id $V0 $M1;
# create operations
TEST $(dirname $0)/rpc-coverage.sh $M1
+TEST cp $(dirname ${0})/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
cleanup;
diff --git a/tests/basic/ec/seek.c b/tests/basic/seek.c
index 54fa6f463af..54fa6f463af 100644
--- a/tests/basic/ec/seek.c
+++ b/tests/basic/seek.c
diff --git a/tests/basic/shd-mux-afr.t b/tests/basic/shd-mux-afr.t
new file mode 100644
index 00000000000..cf300c148bb
--- /dev/null
+++ b/tests/basic/shd-mux-afr.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Create a one more volume
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
+TEST $CLI volume start ${V0}_1
+
+#Check whether the shd has multiplexed or not
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
+
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
+
+TEST touch $M0/foo{1..100}
+TEST touch $M1/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
+
+TEST $CLI volume start ${V0} force
+TEST $CLI volume start ${V0}_1 force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
+
+TEST rm -rf $M0/*
+TEST rm -rf $M1/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+#Stop the volume
+TEST $CLI volume stop ${V0}_1
+TEST $CLI volume delete ${V0}_1
+
+#Check the stop succeeded and detached the volume with out restarting it
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+
+#Check the thread count become to earlier number after stopping
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+cleanup
diff --git a/tests/basic/shd-mux-ec.t b/tests/basic/shd-mux-ec.t
new file mode 100644
index 00000000000..ef4d65018d3
--- /dev/null
+++ b/tests/basic/shd-mux-ec.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Now create a ec volume and check mux works
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
+TEST $CLI volume start ${V0}_2
+
+#Check whether the shd has multiplexed or not
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
+
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
+
+TEST touch $M0/foo{1..100}
+TEST touch $M1/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
+
+TEST $CLI volume start ${V0} force
+TEST $CLI volume start ${V0}_2 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
+
+TEST rm -rf $M0/*
+TEST rm -rf $M1/*
+
+
+#Stop the volume
+TEST $CLI volume stop ${V0}_2
+TEST $CLI volume delete ${V0}_2
+
+#Check the stop succeeded and detached the volume with out restarting it
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+
+#Check the thread count become to zero for ec related threads
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to earlier number after stopping
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/stats-dump.t b/tests/basic/stats-dump.t
index 054df2e636f..ed73fd1d14a 100644
--- a/tests/basic/stats-dump.t
+++ b/tests/basic/stats-dump.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../volume.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
TEST glusterd
diff --git a/tests/basic/tier/bug-1214222-directories_missing_after_attach_tier.t b/tests/basic/tier/bug-1214222-directories_missing_after_attach_tier.t
deleted file mode 100755
index f9166d7d0e2..00000000000
--- a/tests/basic/tier/bug-1214222-directories_missing_after_attach_tier.t
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-LAST_BRICK=3
-CACHE_BRICK_FIRST=4
-CACHE_BRICK_LAST=5
-DEMOTE_TIMEOUT=12
-PROMOTE_TIMEOUT=5
-
-
-LAST_BRICK=1
-CACHE_BRICK=2
-DEMOTE_TIMEOUT=12
-PROMOTE_TIMEOUT=5
-MIGRATION_TIMEOUT=10
-cleanup
-
-
-TEST glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..$LAST_BRICK}
-TEST $CLI volume start $V0
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-
-# Basic operations.
-cd $M0
-TEST stat .
-TEST mkdir d1
-TEST [ -d d1 ]
-TEST touch file1
-TEST [ -e file1 ]
-
-TEST $CLI volume tier $V0 attach replica 2 $H0:$B0/${V0}$CACHE_BRICK_FIRST $H0:$B0/${V0}$CACHE_BRICK_LAST
-TEST $CLI volume set $V0 features.ctr-enabled on
-
-#check whether the directory's and files are present on mount or not.
-TEST [ -d d1 ]
-TEST [ -e file1 ]
-
-cd
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
-
-tier_status ()
-{
- $CLI volume tier $V0 detach status | grep progress | wc -l
-}
-
-TEST $CLI volume tier $V0 detach start
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_status
-TEST $CLI volume tier $V0 detach commit
-
-EXPECT "0" confirm_tier_removed ${V0}${CACHE_BRICK_FIRST}
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "0" confirm_vol_stopped $V0
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/tier/bug-1260185-donot-allow-detach-commit-unnecessarily.t b/tests/basic/tier/bug-1260185-donot-allow-detach-commit-unnecessarily.t
deleted file mode 100644
index 6efbe32f121..00000000000
--- a/tests/basic/tier/bug-1260185-donot-allow-detach-commit-unnecessarily.t
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ: 1260185
-## Do not allow detach-tier commit without "force" option or without
-## user have not started "detach-tier start" operation
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../tier.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-## Lets create and start the volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}
-TEST $CLI volume start $V0
-
-## Perform attach-tier operation on volume $V0
-TEST $CLI volume tier $V0 attach $H0:$B0/${V0}{3..4}
-
-## detach-tier commit operation without force option on volume $V0
-## should not succeed
-TEST ! $CLI --mode=script volume tier $V0 detach commit
-
-## detach-tier commit operation with force option on volume $V0
-## should succeed
-TEST $CLI volume tier $V0 detach force
-
-sleep 3
-
-## Again performing attach-tier operation on volume $V0
-TEST $CLI volume tier $V0 attach $H0:$B0/${V0}{5..6}
-
-## Do detach-tier start on volume $V0
-TEST $CLI volume tier $V0 detach start
-
-## Now detach-tier commit on volume $V0 should succeed.
-## wait for the detach to complete
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_commit_for_single_node
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=1517961
diff --git a/tests/basic/tier/ctr-rename-overwrite.t b/tests/basic/tier/ctr-rename-overwrite.t
deleted file mode 100755
index 73ee7581338..00000000000
--- a/tests/basic/tier/ctr-rename-overwrite.t
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-LAST_BRICK=1
-CACHE_BRICK_FIRST=4
-CACHE_BRICK_LAST=5
-
-DEMOTE_FREQ=5
-PROMOTE_FREQ=5
-
-cleanup
-
-# Start glusterd
-TEST glusterd
-TEST pidof glusterd
-
-# Set-up tier cluster
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..$LAST_BRICK}
-TEST $CLI volume start $V0
-TEST $CLI volume tier $V0 attach replica 2 $H0:$B0/${V0}$CACHE_BRICK_FIRST $H0:$B0/${V0}$CACHE_BRICK_LAST
-
-TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
-TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
-
-# Start and mount the volume after enabling CTR
-TEST $CLI volume set $V0 features.ctr-enabled on
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-
-# create two files
-echo "hello world" > $M0/file1
-echo "hello world" > $M0/file2
-
-# db in hot brick shows 4 record. 2 for file1 and 2 for file2
-ENTRY_COUNT=$(echo "select * from gf_file_tb; select * from gf_flink_tb;" | \
- sqlite3 $B0/${V0}5/.glusterfs/${V0}5.db | wc -l )
-TEST [ $ENTRY_COUNT -eq 4 ]
-
-#overwrite file2 with file1
-mv -f $M0/file1 $M0/file2
-
-# Now the db in hot tier should have only 2 records for file1.
-ENTRY_COUNT=$(echo "select * from gf_file_tb; select * from gf_flink_tb;" | \
- sqlite3 $B0/${V0}5/.glusterfs/${V0}5.db | wc -l )
-TEST [ $ENTRY_COUNT -eq 2 ]
-
-cleanup
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/file_lock.c b/tests/basic/tier/file_lock.c
deleted file mode 100644
index 20fdbc0f668..00000000000
--- a/tests/basic/tier/file_lock.c
+++ /dev/null
@@ -1,72 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <fcntl.h>
-
-void
-usage(void)
-{
- printf("Usage: testlock <filepath> [R|W]\n");
- return;
-}
-
-int
-main(int argc, char *argv[])
-{
- char *file_path = NULL;
- int fd = -1;
- struct flock lock = {0};
- int ret = -1;
- int c = 0;
-
- if (argc != 3) {
- usage();
- exit(1);
- }
-
- file_path = argv[1];
- fd = open(file_path, O_RDWR);
-
- if (-1 == fd) {
- printf("Failed to open file %s. %m\n", file_path);
- exit(1);
- }
-
- /* TODO: Check for invalid input*/
-
- if (!strcmp(argv[2], "W")) {
- lock.l_type = F_WRLCK;
- printf("Taking write lock\n");
-
- } else {
- lock.l_type = F_RDLCK;
- printf("Taking read lock\n");
- }
-
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
- lock.l_pid = getpid();
-
- printf("Acquiring lock on %s\n", file_path);
- ret = fcntl(fd, F_SETLK, &lock);
- if (ret) {
- printf("Failed to acquire lock on %s (%m)\n", file_path);
- close(fd);
- exit(1);
- }
-
- sleep(10);
-
- /*Unlock*/
-
- printf("Releasing lock on %s\n", file_path);
- lock.l_type = F_UNLCK;
- ret = fcntl(fd, F_SETLK, &lock);
- if (ret) {
- printf("Failed to release lock on %s (%m)\n", file_path);
- }
-
- close(fd);
- return ret;
-}
diff --git a/tests/basic/tier/file_with_spaces.t b/tests/basic/tier/file_with_spaces.t
deleted file mode 100755
index 919b900c730..00000000000
--- a/tests/basic/tier/file_with_spaces.t
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-NUM_BRICKS=3
-DEMOTE_FREQ=5
-DEMOTE_TIMEOUT=10
-PROMOTE_FREQ=5
-
-FILE_SPACE="Testing filenames with spaces.log"
-
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_tier_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume set $V0 features.ctr-enabled on
- TEST $CLI volume start $V0
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 cluster.read-freq-threshold 0
- TEST $CLI volume set $V0 cluster.write-freq-threshold 0
- TEST $CLI volume set $V0 cluster.tier-mode test
-}
-
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-
-#Create and start a tiered volume
-create_dist_tier_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-
-# The file will be created on the hot tier
-
-touch "$M0/$FILE_SPACE"
-
-# Get the path of the file on the hot tier
-HPATH=`find $B0/hot/ -name "$FILE_SPACE"`
-echo "File path on hot tier: "$HPATH
-
-EXPECT "yes" exists_and_regular_file $HPATH
-
-# Wait for the tier process to demote the file
-sleep $DEMOTE_TIMEOUT
-
-# Get the path of the file on the cold tier
-CPATH=`find $B0/cold/ -name "$FILE_SPACE"`
-echo "File path on cold tier: "$CPATH
-
-EXPECT "yes" exists_and_regular_file $CPATH
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/fops-during-migration-pause.t b/tests/basic/tier/fops-during-migration-pause.t
deleted file mode 100755
index 46fc6e445a5..00000000000
--- a/tests/basic/tier/fops-during-migration-pause.t
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-NUM_BRICKS=3
-DEMOTE_FREQ=10
-PROMOTE_FREQ=10
-
-TEST_STR="Testing write and truncate fops on tier migration"
-
-function is_sticky_set () {
- echo $1
- if [ -k $1 ];
- then
- echo "yes"
- else
- echo "no"
- fi
-}
-
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_tier_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume set $V0 features.ctr-enabled on
- TEST $CLI volume start $V0
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 cluster.read-freq-threshold 0
- TEST $CLI volume set $V0 cluster.write-freq-threshold 0
- TEST $CLI volume set $V0 cluster.tier-mode test
-}
-
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-
-#Create and start a tiered volume
-create_dist_tier_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-TEST mkdir $M0/dir1
-
-# Create a large file (800MB), so that rebalance takes time
-# The file will be created on the hot tier
-sleep_until_mid_cycle $DEMOTE_FREQ
-dd if=/dev/zero of=$M0/dir1/FILE1 bs=256k count=5120
-
-# Get the path of the file on the hot tier
-HPATH=`find $B0/hot/ -name FILE1`
-echo "File path on hot tier: "$HPATH
-
-
-# Wait for the tier process to demote the file
-EXPECT_WITHIN $REBALANCE_TIMEOUT "yes" is_sticky_set $HPATH
-
-TEST $CLI volume set $V0 cluster.tier-pause on
-
-# Wait for the tier process to finish migrating the file
-EXPECT_WITHIN $REBALANCE_TIMEOUT "no" is_sticky_set $HPATH
-
-# Get the path of the file on the cold tier
-CPATH=`find $B0/cold/ -name FILE1`
-
-# make sure destination is empty
-TEST ! test -s $CPATH
-
-# make sure source exists and not empty
-TEST test -s $HPATH
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/fops-during-migration.t b/tests/basic/tier/fops-during-migration.t
deleted file mode 100755
index 458c01e93c5..00000000000
--- a/tests/basic/tier/fops-during-migration.t
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-
-NUM_BRICKS=3
-DEMOTE_FREQ=5
-PROMOTE_FREQ=5
-
-TEST_STR="Testing write and truncate fops on tier migration"
-
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_tier_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume set $V0 features.ctr-enabled on
- TEST $CLI volume set $V0 cluster.force-migration on
- TEST $CLI volume start $V0
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 cluster.read-freq-threshold 0
- TEST $CLI volume set $V0 cluster.write-freq-threshold 0
- TEST $CLI volume set $V0 cluster.tier-mode test
-}
-
-
-# Checks that the contents of the file matches the input string
-#$1 : file_path
-#$2 : comparison string
-
-function check_file_content () {
- contents=`cat $1`
- echo $contents
- if [ "$contents" = "$2" ]; then
- echo "1"
- else
- echo "0"
- fi
-}
-
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-
-#Create and start a tiered volume
-create_dist_tier_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-$CLI volume set $V0 diagnostics.client-log-level DEBUG
-
-TEST mkdir $M0/dir1
-
-# Create a large file (320MB), so that rebalance takes time
-# The file will be created on the hot tier
-
-dd if=/dev/zero of=$M0/dir1/FILE1 bs=64k count=5120
-
-# Get the path of the file on the hot tier
-HPATH=`find $B0/hot/ -name FILE1`
-echo "File path on hot tier: "$HPATH
-
-
-# Wait for the tier process to demote the file
-EXPECT_WITHIN $REBALANCE_TIMEOUT "yes" is_sticky_set $HPATH
-
-# Get the path of the file on the cold tier
-CPATH=`find $B0/cold/ -name FILE1`
-echo "File path on cold tier: "$CPATH
-
-# Test setxattr
-TEST setfattr -n "user.test_xattr" -v "qwerty" $M0/dir1/FILE1
-
-# Change the file contents while it is being migrated
-echo $TEST_STR > $M0/dir1/FILE1
-
-# The file contents should have changed even if the file
-# is not done migrating
-EXPECT "1" check_file_content $M0/dir1/FILE1 "$TEST_STR"
-
-
-# Wait for the tier process to finish migrating the file
-EXPECT_WITHIN $REBALANCE_TIMEOUT "no" is_sticky_set $CPATH
-
-# The file contents should have changed
-EXPECT "1" check_file_content $M0/dir1/FILE1 "$TEST_STR"
-
-
-TEST getfattr -n "user.test_xattr" $M0/dir1/FILE1
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/tier/frequency-counters.t b/tests/basic/tier/frequency-counters.t
deleted file mode 100644
index 08e05df12e2..00000000000
--- a/tests/basic/tier/frequency-counters.t
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-
-NUM_BRICKS=3
-DEMOTE_FREQ=10
-PROMOTE_FREQ=10
-NUM_FILES=5
-TEST_DIR=test
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume start $V0
-}
-
-function create_dist_tier_vol () {
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-mode test
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 features.record-counters on
- TEST $CLI volume set $V0 cluster.read-freq-threshold 2
- TEST $CLI volume set $V0 cluster.write-freq-threshold 2
-}
-
-cleanup;
-
-
-TEST glusterd
-
-#Create and start a tiered volume
-create_dist_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-# create some files
-mkdir $M0/$TEST_DIR
-cd $M0/${TEST_DIR}
-
-date > file1
-touch file2
-
-# attach tier
-create_dist_tier_vol $NUM_BRICKS
-
-sleep_until_mid_cycle $PROMOTE_FREQ
-
-# check if promotion on single hit, should fail
-date >> file2
-cat file1
-drop_cache $M0
-sleep $PROMOTE_FREQ
-EXPECT "0" check_counters 0 0
-
-# check if promotion on double hit, should suceed
-sleep_until_mid_cycle $PROMOTE_FREQ
-date >> file2
-drop_cache $M0
-cat file1
-date >> file2
-drop_cache $M0
-cat file1
-
-EXPECT_WITHIN $PROMOTE_FREQ "0" check_counters 2 0
-
-TEST ! $CLI volume set $V0 features.record-counters off
-
-cd /
-
-cleanup
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/legacy-many.t b/tests/basic/tier/legacy-many.t
deleted file mode 100644
index 5795428c794..00000000000
--- a/tests/basic/tier/legacy-many.t
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-
-LAST_BRICK=3
-CACHE_BRICK_FIRST=4
-CACHE_BRICK_LAST=5
-DEMOTE_TIMEOUT=12
-PROMOTE_TIMEOUT=12
-MIGRATION_TIMEOUT=10
-DEMOTE_FREQ=60
-PROMOTE_FREQ=10
-TEST_DIR="test_files"
-NUM_FILES=15
-
-function read_all {
- for file in *
- do
- cat $file
- done
-}
-
-function tier_status () {
- $CLI volume tier $V0 status | grep "success" | wc -l
-}
-
-cleanup
-
-TEST glusterd
-TEST pidof glusterd
-
-# Create distributed replica volume
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..$LAST_BRICK}
-TEST $CLI volume start $V0
-
-TEST $CLI volume set $V0 performance.quick-read off
-TEST $CLI volume set $V0 performance.io-cache off
-TEST $CLI volume set $V0 features.ctr-enabled on
-
-
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-
-# Create a number of "legacy" files before attaching tier
-mkdir $M0/${TEST_DIR}
-cd $M0/${TEST_DIR}
-TEST create_many_files file $NUM_FILES
-wait
-
-# Attach tier
-TEST $CLI volume tier $V0 attach replica 2 $H0:$B0/${V0}$CACHE_BRICK_FIRST $H0:$B0/${V0}$CACHE_BRICK_LAST
-
-TEST $CLI volume set $V0 cluster.tier-mode test
-TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
-TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
-TEST $CLI volume set $V0 cluster.read-freq-threshold 0
-TEST $CLI volume set $V0 cluster.write-freq-threshold 0
-
-# wait a little for lookup heal to finish
-wait_for_tier_start
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status
-
-# make sure fix layout completed
-CPATH=$B0/${V0}0
-echo $CPATH > /tmp/out
-TEST getfattr -n "trusted.tier.fix.layout.complete" $CPATH
-
-# Read "legacy" files
-drop_cache $M0
-
-sleep_until_mid_cycle $DEMOTE_FREQ
-
-TEST read_all
-
-# Test to make sure files were promoted as expected
-sleep $PROMOTE_TIMEOUT
-EXPECT_WITHIN $PROMOTE_TIMEOUT "0" check_counters $NUM_FILES 0
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" detach_start $V0
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}${CACHE_BRICK_FIRST}"
-
-TEST $CLI volume tier $V0 detach commit
-
-# fix layout flag should be cleared
-TEST ! getfattr -n "trusted.tier.fix.layout.complete" $CPATH
-
-cd;
-cleanup
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/locked_file_migration.t b/tests/basic/tier/locked_file_migration.t
deleted file mode 100755
index 7fb17171046..00000000000
--- a/tests/basic/tier/locked_file_migration.t
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-
-NUM_BRICKS=3
-DEMOTE_FREQ=7
-PROMOTE_FREQ=30
-DEMOTE_TIMEOUT=15
-
-TEST_STR="Testing write and truncate fops on tier migration"
-
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_tier_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume set $V0 features.ctr-enabled on
- TEST $CLI volume start $V0
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
-
-#We don't want promotes to happen in this test
- TEST $CLI volume set $V0 cluster.read-freq-threshold 10
- TEST $CLI volume set $V0 cluster.write-freq-threshold 10
- TEST $CLI volume set $V0 cluster.tier-mode test
-}
-
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-
-# Create and start a tiered volume
-create_dist_tier_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-TEST mkdir $M0/dir1
-build_tester $(dirname $0)/file_lock.c -o file_lock
-cp $(dirname $0)/file_lock $M0/file_lock
-
-# The files will be created on the hot tier
-touch $M0/dir1/FILE1
-touch $M0/dir1/FILE2
-
-# For FILE1, take a POSIX write lock on the entire file.
-# Don't take a lock on FILE2
-
-./file_lock $M0/dir1/FILE1 W &
-
-sleep $DEMOTE_FREQ
-
-# Wait for the tier process to demote the file
-# Only FILE2 and file_lock should be demoted
-# FILE1 should be skipped because of the lock held
-# on it
-
-EXPECT_WITHIN $DEMOTE_TIMEOUT "0" check_counters 0 2
-
-sleep 10
-
-rm $(dirname $0)/file_lock
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/new-tier-cmds.t b/tests/basic/tier/new-tier-cmds.t
deleted file mode 100644
index b9c9390536f..00000000000
--- a/tests/basic/tier/new-tier-cmds.t
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-. $(dirname $0)/../../cluster.rc
-
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function create_dist_tier_vol () {
- TEST $CLI_1 volume create $V0 disperse 6 redundancy 2 $H1:$B1/${V0}_b1 $H2:$B2/${V0}_b2 $H3:$B3/${V0}_b3 $H1:$B1/${V0}_b4 $H2:$B2/${V0}_b5 $H3:$B3/${V0}_b6
- TEST $CLI_1 volume start $V0
- TEST $CLI_1 volume tier $V0 attach replica 2 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3 $H1:$B1/${V0}_h4 $H2:$B2/${V0}_h5 $H3:$B3/${V0}_h6
-}
-
-function tier_daemon_status {
- local _VAR=CLI_$1
- local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
- ${!_VAR} --xml volume status $V0 \
- | xmllint --xpath "$xpath_sel" - \
- | sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
-}
-
-function detach_xml_status {
- $CLI_1 volume tier $V0 detach status --xml | sed -n \
- '/.*<opErrstr>Detach tier status successful/p' | wc -l
-}
-
-cleanup;
-
-#setup cluster and test volume
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-
-#Create and start a tiered volume
-create_dist_tier_vol
-
-########### check failure for older commands #############
-
-TEST ! $CLI_1 volume rebalance $V0 tier status
-
-# failure for older command can be removed in 3.11
-
-##########################################################
-
-#Issue detach tier on the tiered volume
-#Will throw error saying detach tier not started
-
-EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status
-
-EXPECT "0" detach_xml_status
-
-#kill a node
-TEST kill_node 2
-
-#check if we have the rest of the node available printed in the output of detach status
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status_node_down
-
-TEST $glusterd_2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-
-#after starting detach tier the detach tier status should display the status
-sleep 2
-$CLI_1 volume status
-TEST $CLI_1 volume tier $V0 detach start
-
-EXPECT "1" detach_xml_status
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
-
-#kill a node
-TEST kill_node 2
-
-#check if we have the rest of the node available printed in the output of detach status
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status_node_down
-
-TEST $glusterd_2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-# Make sure we check that the *bricks* are up and not just the node. >:-(
-EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_b2
-EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 cluster_brick_up_status 1 $V0 $H2 $B2/${V0}_h2
-
-# Parsing normal output doesn't work because of line-wrap issues on our
-# regression machines, and the version of xmllint there doesn't support --xpath
-# so we can't do it that way either. In short, there's no way for us to detect
-# when we can stop waiting, so we just have to wait the maximum time every time
-# and hope any failures will show up later in the script.
-sleep $PROCESS_UP_TIMEOUT
-#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
-
-TEST $CLI_1 volume tier $V0 detach stop
-
-#If detach tier is stopped the detach tier command will fail
-
-EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status
-
-TEST $CLI_1 volume tier $V0 detach start
-
-#wait for the detach to complete
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_commit
-
-#If detach tier is committed then the detach status should fail throwing an error
-#saying its not a tiered volume
-
-EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status
-
-########### check failure for older commands #############
-
-TEST ! $CLI_1 volume rebalance $V0 tier start
-
-# failure for older command can be removed in 3.11
-
-##########################################################
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/readdir-during-migration.t b/tests/basic/tier/readdir-during-migration.t
deleted file mode 100644
index 292ca882b79..00000000000
--- a/tests/basic/tier/readdir-during-migration.t
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-
-NUM_BRICKS=3
-DEMOTE_FREQ=5
-PROMOTE_FREQ=5
-NUM_FILES=30
-TEST_DIR=test
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_tier_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume start $V0
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-mode test
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 cluster.read-freq-threshold 0
- TEST $CLI volume set $V0 cluster.write-freq-threshold 0
-}
-
-function check_file_count() {
- if [ $(ls -1 | wc -l) == $1 ]; then
- echo "1"
- else
- echo "0"
- fi
-}
-
-cleanup;
-
-
-TEST glusterd
-
-#Create and start a tiered volume
-create_dist_tier_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-# Create a number of "legacy" files before attaching tier
-mkdir $M0/${TEST_DIR}
-cd $M0/${TEST_DIR}
-TEST create_many_files tfile $NUM_FILES
-
-EXPECT "1" check_file_count $NUM_FILES
-
-sleep $DEMOTE_FREQ
-
-EXPECT "1" check_file_count $NUM_FILES
-
-cd /
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/record-metadata-heat.t b/tests/basic/tier/record-metadata-heat.t
deleted file mode 100755
index f6f35a83744..00000000000
--- a/tests/basic/tier/record-metadata-heat.t
+++ /dev/null
@@ -1,106 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-NUM_BRICKS=3
-DEMOTE_FREQ=5
-DEMOTE_TIMEOUT=10
-PROMOTE_FREQ=5
-
-FILE="file1.txt"
-FILE_LINK="file2.txt"
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function create_dist_tier_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{0..$1}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume set $V0 features.ctr-enabled on
- TEST $CLI volume start $V0
- TEST $CLI volume tier $V0 attach $H0:$B0/hot/${V0}{0..$1}
- TEST $CLI volume set $V0 cluster.tier-mode test
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 cluster.read-freq-threshold 4
- TEST $CLI volume set $V0 cluster.write-freq-threshold 4
-}
-
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-
-#Create and start a tiered volume
-create_dist_tier_vol $NUM_BRICKS
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-
-# The file will be created on the hot tier
-touch "$M0/$FILE"
-
-# Get the path of the file on the hot tier
-HPATH=`find $B0/hot/ -name "$FILE"`
-echo "File path on hot tier: "$HPATH
-
-############################################
-# as per the changes on b8b050c3
-# To test the xttr set by EC
-TEST ! getfattr -n "trusted.ec.size" $HPATH
-############################################
-
-# Expecting the file to be on the hot tier
-EXPECT "yes" exists_and_regular_file $HPATH
-
-sleep_until_mid_cycle $DEMOTE_FREQ
-
-# Try to heat the file using 5 metadata operations
-# WITHOUT setting ctr-record-metadata-heat on
-touch "$M0/$FILE"
-chmod +x "$M0/$FILE"
-chown root "$M0/$FILE"
-ln "$M0/$FILE" "$M0/$FILE_LINK"
-rm -rf "$M0/$FILE_LINK"
-
-# Wait for the tier process to demote the file
-sleep $DEMOTE_TIMEOUT
-
-# Get the path of the file on the cold tier
-CPATH=`find $B0/cold/ -name "$FILE"`
-echo "File path on cold tier: "$CPATH
-
-# Expecting the file to be on cold tier
-EXPECT "yes" exists_and_regular_file $CPATH
-
-#Set ctr-record-metadata-heat on
-TEST $CLI volume set $V0 ctr-record-metadata-heat on
-
-sleep_until_mid_cycle $DEMOTE_FREQ
-
-# Heating the file using 5 metadata operations
-touch "$M0/$FILE"
-chmod +x "$M0/$FILE"
-chown root "$M0/$FILE"
-ln "$M0/$FILE" "$M0/$FILE_LINK"
-rm -rf "$M0/$FILE_LINK"
-
-# Wait for the tier process to demote the file
-sleep $DEMOTE_TIMEOUT
-
-# Get the path of the file on the hot tier
-echo "File path on hot tier: "$HPATH
-
-# Expecting the file to be on the hot tier
-EXPECT "yes" exists_and_regular_file $HPATH
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/tier/tier-heald.t b/tests/basic/tier/tier-heald.t
deleted file mode 100644
index a8e634fb37b..00000000000
--- a/tests/basic/tier/tier-heald.t
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-# This test contains volume heal commands handled by glusterd.
-# Covers enable/disable at the moment. Will be enhanced later to include
-# the other commands as well.
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
-
-# Commands should fail when both tiers are not of distribute type.
-# Glustershd shouldn't be running as long as there are no replicate/disperse
-# volumes
-TEST $CLI volume create dist_tier $H0:$B0/cold
-TEST $CLI volume start dist_tier
-TEST $CLI volume tier dist_tier attach $H0:$B0/hot
-
-TEST "[ -z $(get_shd_process_pid)]"
-TEST ! $CLI volume heal dist_tier enable
-TEST ! $CLI volume heal dist_tier disable
-
-# Commands should work on replicate/disperse volume.
-TEST $CLI volume create r2 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
-TEST "[ -z $(get_shd_process_pid)]"
-TEST $CLI volume start r2
-
-TEST $CLI volume tier r2 attach $H0:$B0/r2_hot
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
-TEST $CLI volume heal r2 enable
-EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
-EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
-TEST $CLI volume heal r2 disable
-EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
-EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
-# Commands should work on disperse volume.
-TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
-TEST $CLI volume start ec2
-
-TEST $CLI volume tier ec2 attach replica 2 $H0:$B0/ec2_hot{1..4}
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
-TEST $CLI volume heal ec2 enable
-EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
-EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
-TEST $CLI volume heal ec2 disable
-EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
-EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
-
-#Check that shd graph is rewritten correctly on volume stop/start
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
-TEST $CLI volume stop r2
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
-TEST $CLI volume stop ec2
-# When both the volumes are stopped glustershd volfile is not modified just the
-# process is stopped
-TEST "[ -z $(get_shd_process_pid) ]"
-
-TEST $CLI volume start r2
-EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
-
-TEST $CLI volume start ec2
-
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate
-
-TEST $CLI volume tier ec2 detach force
-
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "N" volgen_volume_exists $volfile ec2-replicate-0 cluster replicate
-
-TEST $CLI volume set r2 self-heal-daemon on
-TEST $CLI volume set r2 cluster.self-heal-daemon off
-TEST ! $CLI volume set ec2 self-heal-daemon off
-TEST ! $CLI volume set ec2 cluster.self-heal-daemon on
-TEST ! $CLI volume set dist self-heal-daemon off
-TEST ! $CLI volume set dist cluster.self-heal-daemon on
-
-TEST $CLI volume set ec2 disperse-self-heal-daemon off
-TEST $CLI volume set ec2 cluster.disperse-self-heal-daemon on
-TEST ! $CLI volume set r2 disperse-self-heal-daemon on
-TEST ! $CLI volume set r2 cluster.disperse-self-heal-daemon off
-TEST ! $CLI volume set dist disperse-self-heal-daemon off
-TEST ! $CLI volume set dist cluster.disperse-self-heal-daemon on
-
-cleanup
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/tier-snapshot.t b/tests/basic/tier/tier-snapshot.t
deleted file mode 100644
index 8747c5dc05d..00000000000
--- a/tests/basic/tier/tier-snapshot.t
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../snapshot.rc
-
-cleanup;
-
-TEST init_n_bricks 4;
-TEST setup_lvm 4;
-
-TEST glusterd;
-
-TEST $CLI volume create $V0 replica 2 $H0:$L1 $H0:$L2 ;
-
-TEST $CLI volume start $V0;
-
-TEST $CLI volume tier $V0 attach replica 2 $H0:$L3 $H0:$L4 ;
-
-TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
-
-for i in {1..10} ; do echo "file" > $M0/file$i ; done
-
-TEST $CLI snapshot config activate-on-create enable
-
-TEST $CLI snapshot create snap1 $V0 no-timestamp;
-
-for i in {11..20} ; do echo "file" > $M0/file$i ; done
-
-TEST $CLI snapshot create snap2 $V0 no-timestamp;
-
-mkdir $M0/dir1;
-mkdir $M0/dir2;
-
-for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done
-for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done
-
-TEST $CLI snapshot create snap3 $V0 no-timestamp;
-
-for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done
-for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done
-
-TEST $CLI snapshot create snap4 $V0 no-timestamp;
-
-TEST $CLI snapshot delete all;
-
-cleanup;
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/tier/tier.t b/tests/basic/tier/tier.t
deleted file mode 100755
index 1798541becd..00000000000
--- a/tests/basic/tier/tier.t
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-LAST_BRICK=3
-CACHE_BRICK_FIRST=4
-CACHE_BRICK_LAST=5
-DEMOTE_TIMEOUT=12
-PROMOTE_TIMEOUT=5
-MIGRATION_TIMEOUT=10
-DEMOTE_FREQ=4
-PROMOTE_FREQ=12
-
-function file_on_slow_tier {
- found=0
-
- for i in `seq 0 $LAST_BRICK`; do
- test -e "$B0/${V0}${i}/$1" && found=1 && break;
- done
-
- if [ "$found" == "1" ]
- then
- slow_hash1=$2
- slow_hash2=$(fingerprint "$B0/${V0}${i}/$1")
-
- if [ "$slow_hash1" == "$slow_hash2" ]
- then
- echo "0"
- else
- echo "2"
- fi
- else
- echo "1"
- fi
-
- # temporarily disable non-Linux tests.
- case $OSTYPE in
- NetBSD | FreeBSD | Darwin)
- echo "0"
- ;;
- esac
-}
-
-function file_on_fast_tier {
- found=0
-
- for j in `seq $CACHE_BRICK_FIRST $CACHE_BRICK_LAST`; do
- test -e "$B0/${V0}${j}/$1" && found=1 && break;
- done
-
-
- if [ "$found" == "1" ]
- then
- fast_hash1=$2
- fast_hash2=$(fingerprint "$B0/${V0}${j}/$1")
-
- if [ "$fast_hash1" == "$fast_hash2" ]
- then
- echo "0"
- else
- echo "2"
- fi
- else
- echo "1"
- fi
-}
-
-
-cleanup
-
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..$LAST_BRICK}
-# testing bug 1215122, ie should fail if replica count and bricks are not compatible.
-
-TEST ! $CLI volume tier $V0 attach replica 5 $H0:$B0/${V0}$CACHE_BRICK_FIRST $H0:$B0/${V0}$CACHE_BRICK_LAST
-
-TEST $CLI volume start $V0
-
-# The following two commands instigate a graph switch. Do them
-# before attaching the tier. If done on a tiered volume the rebalance
-# daemon will terminate and must be restarted manually.
-TEST $CLI volume set $V0 performance.quick-read off
-TEST $CLI volume set $V0 performance.io-cache off
-
-#Not a tier volume
-TEST ! $CLI volume set $V0 cluster.tier-demote-frequency 4
-
-#testing bug #1228112, glusterd crashed when trying to detach-tier commit force on a non-tiered volume.
-TEST ! $CLI volume tier $V0 detach commit force
-
-TEST $CLI volume tier $V0 attach replica 2 $H0:$B0/${V0}$CACHE_BRICK_FIRST $H0:$B0/${V0}$CACHE_BRICK_LAST
-
-TEST $CLI volume set $V0 cluster.tier-mode test
-
-# create a file, make sure it can be deleted after attach tier.
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-cd $M0
-TEST touch delete_me.txt
-TEST rm -f delete_me.txt
-
-# confirm watermark CLI works
-TEST $CLI volume set $V0 cluster.watermark-hi 85
-TEST $CLI volume set $V0 cluster.watermark-low 75
-TEST $CLI volume set $V0 cluster.tier-max-mb 1000
-TEST $CLI volume set $V0 cluster.tier-max-files 1000
-TEST $CLI volume set $V0 cluster.tier-max-promote-file-size 1000
-TEST ! $CLI volume set $V0 cluster.tier-max-files -3
-TEST ! $CLI volume set $V0 cluster.watermark-low 90
-TEST ! $CLI volume set $V0 cluster.watermark-hi 75
-TEST ! $CLI volume set $V0 cluster.read-freq-threshold -12
-TEST ! $CLI volume set $V0 cluster.write-freq-threshold -12
-
-#check for watermark reset
-TEST $CLI volume set $V0 cluster.watermark-low 10
-TEST $CLI volume set $V0 cluster.watermark-hi 30
-TEST ! $CLI volume reset $V0 cluster.watermark-low
-TEST $CLI volume reset $V0 cluster.watermark-hi
-TEST $CLI volume reset $V0 cluster.watermark-low
-
-# stop the volume and restart it. The rebalance daemon should restart.
-cd /tmp
-umount $M0
-TEST $CLI volume stop $V0
-TEST $CLI volume start $V0
-
-wait_for_tier_start
-
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-cd $M0
-
-sleep_first_cycle $DEMOTE_FREQ
-$CLI volume tier $V0 status
-
-#Tier options expect non-negative value
-TEST ! $CLI volume set $V0 cluster.tier-promote-frequency -1
-
-#Tier options expect non-negative value
-TEST ! $CLI volume set $V0 cluster.read-freq-threshold qwerty
-
-
-TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
-TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
-TEST $CLI volume set $V0 cluster.read-freq-threshold 0
-TEST $CLI volume set $V0 cluster.write-freq-threshold 0
-
-# Basic operations.
-TEST stat .
-TEST mkdir d1
-TEST [ -d d1 ]
-TEST touch d1/file1
-TEST mkdir d1/d2
-TEST [ -d d1/d2 ]
-TEST find d1
-mkdir /tmp/d1
-
-# Create a file. It should be on the fast tier.
-uuidgen > /tmp/d1/data.txt
-md5data=$(fingerprint /tmp/d1/data.txt)
-mv /tmp/d1/data.txt ./d1/data.txt
-
-TEST file_on_fast_tier d1/data.txt $md5data
-
-uuidgen > /tmp/d1/data2.txt
-md5data2=$(fingerprint /tmp/d1/data2.txt)
-cp /tmp/d1/data2.txt ./d1/data2.txt
-
-#File with spaces and special characters.
-SPACE_FILE="file with spaces & $peci@l ch@r@cter$ @!@$%^$#@^^*&%$#$%.txt"
-
-uuidgen > "/tmp/d1/$SPACE_FILE"
-md5space=$(fingerprint "/tmp/d1/$SPACE_FILE")
-mv "/tmp/d1/$SPACE_FILE" "./d1/$SPACE_FILE"
-
-# Check auto-demotion on write new.
-sleep $DEMOTE_TIMEOUT
-
-# Check auto-promotion on write append.
-UUID=$(uuidgen)
-echo $UUID >> /tmp/d1/data2.txt
-md5data2=$(fingerprint /tmp/d1/data2.txt)
-
-sleep_until_mid_cycle $DEMOTE_FREQ
-drop_cache $M0
-
-echo $UUID >> ./d1/data2.txt
-cat "./d1/$SPACE_FILE"
-
-sleep $PROMOTE_TIMEOUT
-sleep $DEMOTE_FREQ
-EXPECT_WITHIN $DEMOTE_TIMEOUT "0" check_counters 2 6
-
-# stop gluster, when it comes back info file should have tiered volume
-killall glusterd
-TEST glusterd
-
-EXPECT "0" file_on_slow_tier d1/data.txt $md5data
-EXPECT "0" file_on_slow_tier d1/data2.txt $md5data2
-EXPECT "0" file_on_slow_tier "./d1/$SPACE_FILE" $md5space
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" detach_start $V0
-EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}${CACHE_BRICK_FIRST}"
-
-TEST $CLI volume tier $V0 detach commit
-
-EXPECT "0" confirm_tier_removed ${V0}${CACHE_BRICK_FIRST}
-
-confirm_vol_stopped $V0
-
-cd;
-
-cleanup
-rm -rf /tmp/d1
-
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/tier_lookup_heal.t b/tests/basic/tier/tier_lookup_heal.t
deleted file mode 100755
index c7c7f27e8de..00000000000
--- a/tests/basic/tier/tier_lookup_heal.t
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-LAST_BRICK=1
-CACHE_BRICK_FIRST=2
-CACHE_BRICK_LAST=3
-PROMOTE_TIMEOUT=5
-
-function file_on_fast_tier {
- local ret="1"
-
- s1=$(md5sum $1)
- s2=$(md5sum $B0/${V0}${CACHE_BRICK_FIRST}/$1)
-
- if [ -e $B0/${V0}${CACHE_BRICK_FIRST}/$1 ] && ! [ "$s1" == "$s2" ]; then
- echo "0"
- else
- echo "1"
- fi
-}
-
-cleanup
-
-
-TEST glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..$LAST_BRICK}
-TEST $CLI volume start $V0
-TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
-
-# Create files before CTR xlator is on.
-cd $M0
-TEST stat .
-TEST touch file1
-TEST stat file1
-
-#Attach tier and switch ON CTR Xlator.
-TEST $CLI volume tier $V0 attach replica 2 $H0:$B0/${V0}$CACHE_BRICK_FIRST $H0:$B0/${V0}$CACHE_BRICK_LAST
-TEST $CLI volume set $V0 features.ctr-enabled on
-TEST $CLI volume set $V0 cluster.tier-demote-frequency 4
-TEST $CLI volume set $V0 cluster.tier-promote-frequency 4
-TEST $CLI volume set $V0 cluster.read-freq-threshold 0
-TEST $CLI volume set $V0 cluster.write-freq-threshold 0
-TEST $CLI volume set $V0 performance.quick-read off
-TEST $CLI volume set $V0 performance.io-cache off
-TEST $CLI volume set $V0 cluster.tier-mode test
-
-#The lookup should heal the database.
-TEST ls file1
-
-# gf_file_tb and gf_flink_tb should NOT be empty
-ENTRY_COUNT=$(echo "select * from gf_file_tb; select * from gf_flink_tb;" | \
- sqlite3 $B0/${V0}$LAST_BRICK/.glusterfs/${V0}$LAST_BRICK.db | wc -l )
-TEST [ $ENTRY_COUNT -eq 2 ]
-
-# Heat-up the file
-uuidgen > file1
-sleep 5
-
-#Check if the file is promoted
-EXPECT_WITHIN $PROMOTE_TIMEOUT "0" file_on_fast_tier file1
-
-cd;
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/tier/tierd_check.t b/tests/basic/tier/tierd_check.t
deleted file mode 100644
index 5701fa98960..00000000000
--- a/tests/basic/tier/tierd_check.t
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-. $(dirname $0)/../../cluster.rc
-
-
-# Creates a tiered volume with pure distribute hot and cold tiers
-# Both hot and cold tiers will have an equal number of bricks.
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function create_dist_tier_vol () {
- TEST $CLI_1 volume create $V0 $H1:$B1/${V0} $H2:$B2/${V0}
- TEST $CLI_1 volume start $V0
- TEST $CLI_1 volume tier $V0 attach $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2
-}
-
-function tier_status () {
- #$CLI_1 volume tier $V0 status | grep progress | wc -l
- # I don't want to disable the entire test, but this part of it seems
- # highly suspect. *Why* do we always expect the number of lines to be
- # exactly two? What would it mean for it to be otherwise? Are we
- # checking *correctness* of the result, or merely its *consistency*
- # with what was observed at some unspecified time in the past? Does
- # this check only serve to inhibit actual improvements? Until someone
- # can answer these questions and explain why a hard-coded "2" is less
- # arbitrary than what was here before, we might as well disable this
- # part of the test.
- echo "2"
-}
-
-function tier_daemon_kill () {
-pkill -f "tierd/$V0"
-echo "$?"
-}
-
-cleanup;
-
-#setup cluster and test volume
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
-
-#Create and start a tiered volume
-create_dist_tier_vol
-
-wait_for_tier_start
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_daemon_check
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_daemon_kill
-
-TEST $CLI_1 volume tier $V0 start
-
-wait_for_tier_start
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_kill
-
-TEST $CLI_3 volume tier $V0 start force
-
-wait_for_tier_start
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check
-
-#The pattern progress should occur twice only.
-#it shouldn't come up on the third node without tierd even
-#after the tier start force is issued on the node without
-#tierd
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status
-
-#kill the node on which tier is not supposed to run
-TEST kill_node 3
-
-#bring the node back, it should not have tierd running on it
-TEST $glusterd_3;
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status
-
-#after volume restart, check for tierd
-
-TEST $CLI_3 volume stop $V0
-
-TEST $CLI_3 volume start $V0
-
-wait_for_tier_start
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status
-
-#check for detach start and stop
-
-TEST $CLI_3 volume tier $V0 detach start
-
-TEST $CLI_3 volume tier $V0 detach stop
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status
-
-TEST $CLI_1 volume tier $V0 start force
-
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check
-
-# To test for detach start fail while the brick is down
-
-TEST pkill -f "$B1/$V0"
-
-TEST ! $CLI_1 volume tier $V0 detach start
-
-cleanup
-# This test isn't worth keeping. Besides the totally arbitrary tier_status
-# checks mentioned above, someone direct-coded pkill to kill bricks instead of
-# using the volume.rc function we already had. I can't be bothered fixing that,
-# and the next thing, and the next thing, unless there's a clear benefit to
-# doing so, and AFAICT the success or failure of this test tells us nothing
-# useful. Therefore, it's disabled until further notice.
-#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=000000
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/tier/unlink-during-migration.t b/tests/basic/tier/unlink-during-migration.t
deleted file mode 100755
index 1330092177f..00000000000
--- a/tests/basic/tier/unlink-during-migration.t
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../tier.rc
-
-
-DEMOTE_FREQ=5
-PROMOTE_FREQ=5
-
-function create_dist_rep_vol () {
- mkdir $B0/cold
- mkdir $B0/hot
- TEST $CLI volume create $V0 replica 2 $H0:$B0/cold/${V0}{0..3}
- TEST $CLI volume set $V0 performance.quick-read off
- TEST $CLI volume set $V0 performance.io-cache off
- TEST $CLI volume set $V0 features.ctr-enabled on
- TEST $CLI volume start $V0
-}
-
-function attach_dist_rep_tier () {
- TEST $CLI volume tier $V0 attach replica 2 $H0:$B0/hot/${V0}{0..3}
- TEST $CLI volume set $V0 cluster.tier-demote-frequency $DEMOTE_FREQ
- TEST $CLI volume set $V0 cluster.tier-promote-frequency $PROMOTE_FREQ
- TEST $CLI volume set $V0 cluster.read-freq-threshold 0
- TEST $CLI volume set $V0 cluster.write-freq-threshold 0
- TEST $CLI volume set $V0 cluster.tier-mode test
-}
-
-cleanup;
-
-#Basic checks
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume info
-
-
-#Create and start a volume
-create_dist_rep_vol
-
-# Mount FUSE
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
-
-# Create a large file (320MB), so that rebalance takes time
-TEST dd if=/dev/zero of=$M0/foo bs=64k count=5120
-
-# Get the path of the file on the cold tier
-CPATH=`find $B0/cold/ -name foo`
-echo "File path on cold tier: "$CPATH
-
-#Now attach the tier
-attach_dist_rep_tier
-
-#Write into the file to promote it
-echo "good morning">>$M0/foo
-
-# Wait for the tier process to promote the file
-EXPECT_WITHIN $REBALANCE_TIMEOUT "yes" is_sticky_set $CPATH
-
-# Get the path of the file on the hot tier
-HPATH=`find $B0/hot/ -name foo`
-
-echo "File path on hot tier: "$HPATH
-TEST rm -rf $M0/foo
-TEST ! stat $HPATH
-TEST ! stat $CPATH
-
-#unlink during demotion
-HPATH="";
-CPATH="";
-
-# Create a large file (320MB), so that rebalance takes time
-TEST dd if=/dev/zero of=$M0/foo1 bs=64k count=5120
-
-# Get the path of the file on the hot tier
-HPATH=`find $B0/hot/ -name foo1`
-echo "File path on hot tier : "$HPATH
-
-EXPECT_WITHIN $REBALANCE_TIMEOUT "yes" is_sticky_set $HPATH
-
-# Get the path of the file on the cold tier
-CPATH=`find $B0/cold/ -name foo1`
-echo "File path on cold tier : "$CPATH
-
-TEST rm -rf $M0/foo1
-
-TEST ! stat $HPATH
-TEST ! stat $CPATH
-
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/trace.t b/tests/basic/trace.t
new file mode 100755
index 00000000000..01e7c9e0a25
--- /dev/null
+++ b/tests/basic/trace.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST mkdir -p $B0/single-brick
+cat > $B0/template.vol <<EOF
+volume posix
+ type storage/posix
+ option directory $B0/single-brick
+end-volume
+
+volume trace
+ type debug/trace
+ option log-file yes
+ option log-history yes
+ subvolumes posix
+end-volume
+EOF
+
+TEST glusterfs -f $B0/template.vol $M0
+
+TEST $(dirname $0)/rpc-coverage.sh --no-locks $M0
+
+# Take statedump to get maximum code coverage
+pid=$(ps auxww | grep glusterfs | grep -E "template.vol" | awk '{print $2}' | head -1)
+
+TEST generate_statedump $pid
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Now, use the glusterd way of enabling trace
+TEST glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume set $V0 debug.trace marker
+TEST $CLI volume set $V0 debug.log-file yes
+#TEST $CLI volume set $V0 debug.log-history yes
+
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+TEST $(dirname $0)/rpc-coverage.sh --no-locks $M1
+cp $(dirname ${0})/gfapi/glfsxmp-coverage.c ./glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+./glfsxmp $V0 $H0 > /dev/null
+cleanup_tester ./glfsxmp
+rm ./glfsxmp.c
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+cleanup;
diff --git a/tests/basic/uss.t b/tests/basic/uss.t
index 40e556e372e..09dd00ef995 100644
--- a/tests/basic/uss.t
+++ b/tests/basic/uss.t
@@ -6,6 +6,8 @@
. $(dirname $0)/../fileio.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
function check_readonly()
{
$@ 2>&1 | grep -q 'Read-only file system'
@@ -34,6 +36,7 @@ TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
TEST $CLI volume set $V0 nfs.disable false
@@ -372,6 +375,15 @@ TEST rm -f $M0/aaa;
TEST $CLI snapshot delete snap6;
+# drop the caches so that, the dentry for "snap6" is
+# is forgotten from the client cache.
+drop_cache $M0
+
+EXPECT_WITHIN 30 "5" count_snaps $M0;
+
+# This should fail, as snap6 just got deleted.
+TEST ! stat $M0/.history/snap6
+
TEST $CLI snapshot create snap6 $V0 no-timestamp
TEST ls $M0/.history;
@@ -382,4 +394,28 @@ TEST ls $M0/.history/snap6/;
TEST ! stat $M0/.history/snap6/aaa;
+TEST stat $M0
+
+# done with the tests start cleaning up of things
+TEST $CLI volume set $V0 features.uss disable
+
+TEST $CLI snapshot delete snap6;
+
+TEST $CLI snapshot delete snap5;
+
+TEST $CLI snapshot delete snap4;
+
+TEST $CLI snapshot delete snap3;
+
+TEST $CLI snapshot delete snap2;
+
+TEST $CLI snapshot delete snap1;
+
+# nfs client has been already unmounted at line 333
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+
+TEST $CLI volume delete $V0
+
cleanup;
diff --git a/tests/basic/volfile-sanity.t b/tests/basic/volfile-sanity.t
index 04d1d2869c2..ef2f9344468 100644
--- a/tests/basic/volfile-sanity.t
+++ b/tests/basic/volfile-sanity.t
@@ -12,8 +12,11 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}1
killall glusterd
-## Mount FUSE with caching disabled (read-write)
-TEST $GFS -f /var/lib/glusterd/vols/${V0}/${V0}.${H0}.*.vol
+# Client by default tries to connect to port 24007
+# So, start server on that port, and you can see
+# client successfully working.
+TEST $GFS --xlator-option "${V0}-server.transport.socket.listen-port=24007" \
+ -f /var/lib/glusterd/vols/${V0}/${V0}.${H0}.*.vol
TEST $GFS -f /var/lib/glusterd/vols/${V0}/${V0}.tcp-fuse.vol $M0
TEST $(df -h $M0 | grep -q ${V0})
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
new file mode 100644
index 00000000000..102de22468e
--- /dev/null
+++ b/tests/basic/volume-scale-shd-mux.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=6
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 2); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Remove the brick and check the detach is successful
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+#Remove the brick and check the detach is successful
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+
+for i in $(seq 1 2); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+
+TEST touch $M0/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+
+TEST $CLI volume start ${V0} force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST rm -rf $M0/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+shd_pid=$(get_shd_mux_pid $V0)
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
+TEST $CLI volume start ${V0}_distribute1
+
+#Creating a non-replicate/non-ec volume should not have any effect in shd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
+
+TEST mkdir $B0/add/
+#Now convert the distributed volume to replicate
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#scale down the volume
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Before stopping the process, make sure there is no pending clenup threads hanging
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+TEST rm -rf $B0/add/2 $B0/add/3
+
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "afr_shd_index_healer"
+
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
+TEST rm -rf $B0/add/
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1708929
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1708929
diff --git a/tests/basic/volume-snap-scheduler.t b/tests/basic/volume-snap-scheduler.t
new file mode 100644
index 00000000000..a638c5cc46a
--- /dev/null
+++ b/tests/basic/volume-snap-scheduler.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $V0
+
+## Create, start and mount meta_volume as
+## snap_scheduler expects shared storage to be enabled.
+## This test is very basic in nature not creating any snapshot
+## and purpose is to validate snap scheduling commands.
+
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##function to check status
+function check_status_scheduler()
+{
+ local key=$1
+ snap_scheduler.py status | grep -F "$key" | wc -l
+}
+
+##Basic snap_scheduler command test init/enable/disable/list
+
+TEST snap_scheduler.py init
+
+TEST snap_scheduler.py enable
+
+EXPECT 1 check_status_scheduler "Enabled"
+
+TEST snap_scheduler.py disable
+
+EXPECT 1 check_status_scheduler "Disabled"
+
+TEST snap_scheduler.py list
+
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/basic/volume-snapshot-xml.t b/tests/basic/volume-snapshot-xml.t
index 3ba25f4ddbb..ff63b54538d 100755
--- a/tests/basic/volume-snapshot-xml.t
+++ b/tests/basic/volume-snapshot-xml.t
@@ -1,13 +1,9 @@
#!/bin/bash
. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
. $(dirname $0)/../snapshot.rc
-function get-xml()
-{
- $CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
-}
-
cleanup;
TEST verify_lvm_version;
TEST glusterd;
diff --git a/tests/basic/volume-status.t b/tests/basic/volume-status.t
index 8cea5c7530a..01d7ebf6c07 100644
--- a/tests/basic/volume-status.t
+++ b/tests/basic/volume-status.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../volume.rc
. $(dirname $0)/../nfs.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
function gluster_client_list_status () {
@@ -22,7 +24,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
TEST $CLI volume set $V0 nfs.disable false
TEST $CLI volume start $V0;
@@ -32,10 +34,11 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
## Mount FUSE
TEST $GFS -s $H0 --volfile-id $V0 $M0;
+TEST touch $M0/file{1..20}
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "8" gluster_fd_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" gluster_fd_status
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1024" gluster_inode_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "768" gluster_inode_status
##Disabling this test until the client-list command works for brick-multiplexing
#EXPECT_WITHIN $PROCESS_UP_TIMEOUT "7" gluster_client_list_status
@@ -55,6 +58,8 @@ function test_nfs_cmds () {
for cmd in ${nfs_cmds[@]}; do
$CLI volume status $V0 nfs $cmd
(( ret += $? ))
+ $CLI volume status $V0 nfs $cmd --xml
+ (( ret += $? ))
done
return $ret
}
@@ -65,6 +70,8 @@ function test_shd_cmds () {
for cmd in ${shd_cmds[@]}; do
$CLI volume status $V0 shd $cmd
(( ret += $? ))
+ $CLI volume status $V0 shd $cmd --xml
+ (( ret += $? ))
done
return $ret
}
@@ -76,14 +83,29 @@ function test_brick_cmds () {
for i in {1..2}; do
$CLI volume status $V0 $H0:$B0/${V0}$i $cmd
(( ret += $? ))
+ $CLI volume status $V0 $H0:$B0/${V0}$i $cmd --xml
+ (( ret += $? ))
done
done
return $ret
}
+function test_status_cmds () {
+ local ret=0
+ declare -a cmds=("detail" "clients" "mem" "inode" "fd" "callpool" "tasks" "client-list")
+ for cmd in ${cmds[@]}; do
+ $CLI volume status $V0 $cmd
+ (( ret += $? ))
+ $CLI volume status $V0 $cmd --xml
+ (( ret += $? ))
+ done
+ return $ret
+}
+
TEST test_shd_cmds;
TEST test_nfs_cmds;
TEST test_brick_cmds;
+TEST test_status_cmds;
## Before killing daemon to avoid deadlocks
diff --git a/tests/basic/volume.t b/tests/basic/volume.t
index 23b740af1ed..27fe093d07d 100755..100644
--- a/tests/basic/volume.t
+++ b/tests/basic/volume.t
@@ -9,26 +9,52 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
-
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '8' brick_count $V0
+EXPECT '6' brick_count $V0
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
-TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{9,10,11,12};
-EXPECT '12' brick_count $V0
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{9,10,11};
+EXPECT '9' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{1,2,3} force;
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume top $V0 read-perf bs 4096 count 1000
+TEST $CLI volume top $V0 write-perf bs 1048576 count 2
+
+TEST touch $M0/foo
+
+# statedump path should be a directory, setting it to a file path should fail
+
+TEST ! $CLI v set $V0 server.statedump-path $M0/foo;
+EXPECT '/var/run/gluster' $CLI v get $V0 server.statedump-path
+
+#set the statedump path to an existing ditectory which should succeed
+TEST mkdir $D0/level;
+TEST $CLI v set $V0 server.statedump-path $D0/level
+EXPECT '/level' volinfo_field $V0 'server.statedump-path'
+
+ret=$(ls $D0/level | wc -l);
+TEST [ $ret == 0 ]
+TEST $CLI v statedump $V0;
+ret=$(ls $D0/level | wc -l);
+TEST ! [ $ret == 0 ]
+
+#set the statedump path to a non - existing directory which should fail
+TEST ! $CLI v set $V0 server.statedump-path /root/test
+EXPECT '/level' volinfo_field $V0 'server.statedump-path'
-TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{1,2,3,4} force;
-EXPECT '8' brick_count $V0
+TEST rm -rf $D0/level
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status'
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
+TEST $CLI volume delete $V0
+TEST ! $CLI volume info $V0
cleanup;