summaryrefslogtreecommitdiffstats
path: root/tests/line-coverage
diff options
context:
space:
mode:
Diffstat (limited to 'tests/line-coverage')
-rw-r--r--tests/line-coverage/afr-heal-info.t43
-rwxr-xr-xtests/line-coverage/arbiter-coverage.t32
-rw-r--r--tests/line-coverage/cli-peer-and-volume-operations.t135
-rw-r--r--tests/line-coverage/cli-volume-top-profile-coverage.t62
-rwxr-xr-xtests/line-coverage/errorgen-coverage.t42
-rw-r--r--tests/line-coverage/log-and-brick-ops-negative-case.t82
-rwxr-xr-xtests/line-coverage/meta-max-coverage.t33
-rw-r--r--tests/line-coverage/namespace-linecoverage.t39
-rwxr-xr-xtests/line-coverage/old-protocol.t37
-rwxr-xr-xtests/line-coverage/quiesce-coverage.t44
-rw-r--r--tests/line-coverage/shard-coverage.t33
-rw-r--r--tests/line-coverage/some-features-in-libglusterfs.t67
-rw-r--r--tests/line-coverage/volfile-with-all-graph-syntax.t73
13 files changed, 722 insertions, 0 deletions
diff --git a/tests/line-coverage/afr-heal-info.t b/tests/line-coverage/afr-heal-info.t
new file mode 100644
index 00000000000..182665917c4
--- /dev/null
+++ b/tests/line-coverage/afr-heal-info.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+#Test that parallel heal-info command execution doesn't result in spurious
+#entries with locking-scheme granular
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+
+function write_and_del_file {
+ dd of=$M0/a.txt if=/dev/zero bs=1024k count=100
+ rm -f $M0/b.txt
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 locking-scheme granular
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/a.txt $M0/b.txt
+write_and_del_file &
+touch $B0/f1 $B0/f2
+
+# All above is similar to basic/afr/heal-info.t
+
+TEST $CLI volume heal $V0 enable
+TEST $CLI volume heal $V0 info --xml
+TEST $CLI volume heal $V0 info summary
+TEST $CLI volume heal $V0 info summary --xml
+TEST $CLI volume heal $V0 info split-brain
+TEST $CLI volume heal $V0 info split-brain --xml
+
+TEST $CLI volume heal $V0 statistics heal-count
+
+# It may fail as the file is not in splitbrain
+$CLI volume heal $V0 split-brain latest-mtime /a.txt
+
+TEST $CLI volume heal $V0 disable
+
+TEST $CLI volume stop $V0
+cleanup;
diff --git a/tests/line-coverage/arbiter-coverage.t b/tests/line-coverage/arbiter-coverage.t
new file mode 100755
index 00000000000..82b470141b5
--- /dev/null
+++ b/tests/line-coverage/arbiter-coverage.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 arbiter 1 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+$(dirname $0)/../basic/rpc-coverage.sh $M1 >/dev/null
+./glfsxmp $V0 $H0 >/dev/null
+
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+## Finish up
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/line-coverage/cli-peer-and-volume-operations.t b/tests/line-coverage/cli-peer-and-volume-operations.t
new file mode 100644
index 00000000000..0cf8dbe81f9
--- /dev/null
+++ b/tests/line-coverage/cli-peer-and-volume-operations.t
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 system uuid reset
+
+## basic peer commands
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+#probe a unreachable node
+TEST kill_glusterd 3
+TEST ! $CLI_1 peer probe $H3
+
+#detach a node which is not a part of cluster
+TEST ! $CLI_1 peer detach $H3
+TEST ! $CLI_1 peer detach $H3 force
+
+TEST start_glusterd 3
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+
+# probe a node which is already part of cluster
+TEST $CLI_1 peer probe $H3
+
+#probe an invalid address
+TEST ! $CLI_1 peer probe 1024.1024.1024.1024
+
+TEST $CLI_1 pool list
+
+TEST $CLI_1 --help
+TEST $CLI_1 --version
+TEST $CLI_1 --print-logdir
+TEST $CLI_1 --print-statedumpdir
+
+# try unrecognised command
+TEST ! $CLI_1 volume
+TEST pidof glusterd
+
+## all help commands
+TEST $CLI_1 global help
+TEST $CLI_1 help
+
+TEST $CLI_1 peer help
+TEST $CLI_1 volume help
+TEST $CLI_1 volume bitrot help
+TEST $CLI_1 volume quota help
+TEST $CLI_1 snapshot help
+
+## volume operations
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+# create a volume with already existing volume name
+TEST ! $CLI_1 volume create $V0 $H1:$B1/$V1 $H2:$B2/$V1
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+# Mount the volume and create files
+TEST glusterfs -s $H1 --volfile-id $V0 $M1
+TEST touch $M1/file{1..100}
+
+#fails because $V0 is not shd compatible
+TEST ! $CLI_1 volume status $V0 shd
+
+#test explicitly provided options
+TEST $CLI_1 --timeout=120 --log-level=INFO volume status
+
+#changing timezone to a different one, to check localtime logging feature
+TEST export TZ='Asia/Kolkata'
+TEST restart_glusterd 1
+
+#localtime logging enable
+TEST $CLI_1 volume set all cluster.localtime-logging enable
+EXPECT '1' logging_time_check $LOGDIR
+
+#localtime logging disable
+TEST $CLI_1 volume set all cluster.localtime-logging disable
+EXPECT '0' logging_time_check $LOGDIR
+
+#changing timezone back to original timezone
+TEST export TZ='UTC'
+
+#negative tests for volume options
+#'set' option to enable quota/inode-quota is now depreciated
+TEST ! $CLI_1 volume set $V0 quota enable
+TEST ! $CLI_1 volume set $V0 inode-quota enable
+
+#invalid transport type 'rcp'
+TEST ! $CLI_1 volume set $V0 config.transport rcp
+
+#'op-version' option is not valid for a single volume
+TEST ! $CLI_1 volume set $V0 cluster.op-version 72000
+
+#'op-version' option can't be used with any other option
+TEST ! $CLI_1 volume set all cluster.localtime-logging disable cluster.op-version 72000
+
+#invalid format of 'op-version'
+TEST ! $CLI_1 volume set all cluster.op-version 72-000
+
+#provided 'op-version' value is greater than max allowed op-version
+op_version=$($CLI_1 volume get all cluster.max-op-version | awk 'NR==3 {print$2}')
+op_version=$((op_version+1000)) #this can be any number greater than 0
+TEST ! $CLI_1 volume set all cluster.op-version $op_version
+
+#provided 'op-verison' value cannot be less than the current cluster op-version value
+TEST ! $CLI_1 volume set all cluster.op-version 00000
+
+# system commnds
+TEST $CLI_1 system help
+TEST $CLI_1 system uuid get
+TEST $CLI_1 system getspec $V0
+TEST $CLI_1 system getwd
+TEST $CLI_1 system fsm log
+
+# Both these may fail, but it covers xdr functions and some
+# more code in cli/glusterd
+$CLI_1 system:: mount test local:/$V0
+$CLI_1 system:: umount $M0 lazy
+$CLI_1 system:: copy file options
+$CLI_1 system:: portmap brick2port $H0:$B0/brick
+$CLI_1 system:: uuid reset
+
+cleanup
diff --git a/tests/line-coverage/cli-volume-top-profile-coverage.t b/tests/line-coverage/cli-volume-top-profile-coverage.t
new file mode 100644
index 00000000000..35713c26faa
--- /dev/null
+++ b/tests/line-coverage/cli-volume-top-profile-coverage.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../volume.rc
+
+cleanup
+
+# Creating cluster
+TEST launch_cluster 3
+
+# Probing peers
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+
+# Creating a volume and starting it.
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+TEST glusterfs -s $H1 --volfile-id $V0 $M1
+TEST touch $M1/file{1..100}
+
+# Testing volume top command with and without xml output
+function test_volume_top_cmds () {
+ local ret=0
+ declare -a top_cmds=("read" "open" "write" "opendir" "readdir")
+ for cmd in ${top_cmds[@]}; do
+ $CLI_1 volume top $V0 $cmd
+ (( ret += $? ))
+ $CLI_1 volume top $V0 clear
+ (( ret += $? ))
+ $CLI_1 volume top $V0 $cmd --xml
+ (( ret += $? ))
+ $CLI_1 volume top $V0 $cmd brick $H1:$B1/$V0
+ (( ret += $? ))
+ $CLI_1 volume top $V0 clear brick $H1:$B1/$V0
+ (( ret += $? ))
+ $CLI_1 volume top $V0 $cmd brick $H1:$B1/$V0 --xml
+ (( ret += $? ))
+ done
+ return $ret
+}
+
+# Testing volume profile command with and without xml
+function test_volume_profile_cmds () {
+ local ret=0
+ declare -a profile_cmds=("start" "info" "info peek" "info cumulative" "info clear" "info incremental peek" "stop")
+ for cmd in "${profile_cmds[@]}"; do
+ $CLI_1 volume profile $V0 $cmd
+ (( ret += $? ))
+ $CLI_1 volume profile $V0 $cmd --xml
+ (( ret += $? ))
+ done
+ return $ret
+}
+
+TEST test_volume_top_cmds;
+TEST test_volume_profile_cmds;
+
+cleanup
diff --git a/tests/line-coverage/errorgen-coverage.t b/tests/line-coverage/errorgen-coverage.t
new file mode 100755
index 00000000000..f4622428d79
--- /dev/null
+++ b/tests/line-coverage/errorgen-coverage.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+# Because I have added 10 iterations of rpc-coverage and glfsxmp for errorgen
+SCRIPT_TIMEOUT=600
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume set $V0 error-gen posix;
+TEST $CLI volume set $V0 debug.error-failure 3%;
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+for i in $(seq 1 10); do
+ # as there is error-gen, there can be errors, so no
+ # need to test for success of below two commands
+ $(dirname $0)/../basic/rpc-coverage.sh $M1 >/dev/null
+ ./glfsxmp $V0 $H0 >/dev/null
+done
+
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+## Finish up
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/line-coverage/log-and-brick-ops-negative-case.t b/tests/line-coverage/log-and-brick-ops-negative-case.t
new file mode 100644
index 00000000000..d86cb452282
--- /dev/null
+++ b/tests/line-coverage/log-and-brick-ops-negative-case.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+
+#create volumes
+TEST $CLI volume create ${V0}_1 $H0:$B0/v{1..2}
+
+TEST $CLI volume create ${V0}_2 replica 3 arbiter 1 $H0:$B0/v{3..5}
+
+TEST $CLI volume create ${V0}_3 disperse 3 redundancy 1 $H0:$B0/v{6..8}
+TEST $CLI volume start ${V0}_3
+EXPECT 'Started' volinfo_field ${V0}_3 'Status'
+
+TEST $CLI volume create ${V0}_4 replica 3 $H0:$B0/v{9..14}
+TEST $CLI volume start ${V0}_4
+EXPECT 'Started' volinfo_field ${V0}_4 'Status'
+
+#log rotate option
+#provided volume does not exist
+TEST ! $CLI volume log ${V0}_5 rotate
+
+#volume must be started before using log rotate option
+TEST ! $CLI volume log ${V0}_1 rotate
+TEST $CLI volume start ${V0}_1
+EXPECT 'Started' volinfo_field ${V0}_1 'Status'
+
+#incorrect brick provided for the volume
+TEST ! $CLI volume log ${V0}_1 rotate $H0:$B0/v15
+
+#add-brick operations
+#volume must be in started to state to increase replica count
+TEST ! $CLI volume add-brick ${V0}_2 replica 4 $H0:$B0/v15
+TEST $CLI volume start ${V0}_2
+EXPECT 'Started' volinfo_field ${V0}_2 'Status'
+
+#incorrect number of bricks for a replica 4 volume
+TEST ! $CLI volume add-brick ${V0}_1 replica 4 $H0:$B0/v15
+
+#replica count provided is less than the current replica count
+TEST ! $CLI volume add-brick ${V0}_2 replica 2 $H0:$B0/v15
+
+#dispersed to replicated dispersed not possible
+TEST ! $CLI volume add-brick ${V0}_3 replica 2 $H0:$B0/v15
+
+#remove-brick operations
+#replica count option provided for dispersed vol
+TEST ! $CLI volume remove-brick ${V0}_3 replica 2 $H0:$B0/v8 start
+
+#given replica count is greater than the current replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 4 $H0:$B0/v5 start
+
+#number of bricks to be removed, must be a multiple of replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 3 $H0:$B0/v{3..4} start
+
+#less number of bricks given to reduce the replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 1 $H0:$B0/v3 start
+
+#bricks should be from different subvol
+TEST ! $CLI volume remove-brick ${V0}_4 replica 2 $H0:$B0/v{13..14} start
+
+#arbiter must be removed to reduce replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 1 $H0:$B0/v{3..4} start
+
+#removal of bricks is not allowed without reducing the replica count explicitly
+TEST ! $CLI volume remove-brick ${V0}_2 replica 3 $H0:$B0/v{3..5} start
+
+#incorrect brick for given vol
+TEST ! $CLI volume remove-brick ${V0}_1 $H0:$B0/v15 start
+
+#removing all the bricks are not allowed
+TEST ! $CLI volume remove-brick ${V0}_1 $H0:$B0/v{1..2} start
+
+#volume must not be stopped state while removing bricks
+TEST $CLI volume stop ${V0}_1
+TEST ! $CLI volume remove-brick ${V0}_1 $H0:$B0/v1 start
+
+cleanup \ No newline at end of file
diff --git a/tests/line-coverage/meta-max-coverage.t b/tests/line-coverage/meta-max-coverage.t
new file mode 100755
index 00000000000..1cc07610aa7
--- /dev/null
+++ b/tests/line-coverage/meta-max-coverage.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0;
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1
+
+TEST stat $M1/.meta/
+
+# expect failures in rpc-coverage.sh execution.
+res=$($(dirname $0)/../basic/rpc-coverage.sh $M1/.meta)
+
+
+# Expect errors here, hence no need to 'check for success'
+for file in $(find $M1/.meta type f -print); do
+ cat $file >/dev/null
+ echo 1>$file
+ echo hello>$file
+done
+
+TEST umount $M1
+
+cleanup;
diff --git a/tests/line-coverage/namespace-linecoverage.t b/tests/line-coverage/namespace-linecoverage.t
new file mode 100644
index 00000000000..8de6a0f279b
--- /dev/null
+++ b/tests/line-coverage/namespace-linecoverage.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+cleanup;
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.read-subvolume-index 0
+TEST $CLI volume set $V0 features.tag-namespaces on
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 storage.build-pgfid on
+
+sleep 2
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+
+mkdir -p $M1/namespace
+
+# subvol_1 = bar, subvol_2 = foo, subvol_3 = hey
+# Test create, write (tagged by loc, fd respectively).
+touch $M1/namespace/{bar,foo,hey}
+
+open $M1/namespace/hey
+
+## TODO: best way to increase coverage is to have a gfapi program
+## which covers maximum fops
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+cleanup;
diff --git a/tests/line-coverage/old-protocol.t b/tests/line-coverage/old-protocol.t
new file mode 100755
index 00000000000..5676e5636db
--- /dev/null
+++ b/tests/line-coverage/old-protocol.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+file="/var/lib/glusterd/vols/$V0/trusted-$V0.tcp-fuse.vol"
+sed -i -e 's$send-gids true$send-gids true\n option testing.old-protocol true$g' $file
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+## TODO: best way to increase coverage is to have a gfapi program
+## which covers maximum fops
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+cleanup;
diff --git a/tests/line-coverage/quiesce-coverage.t b/tests/line-coverage/quiesce-coverage.t
new file mode 100755
index 00000000000..ca29343451e
--- /dev/null
+++ b/tests/line-coverage/quiesce-coverage.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+file="/var/lib/glusterd/vols/$V0/trusted-$V0.tcp-fuse.vol"
+
+cat >> ${file} <<EOF
+
+volume quiesce
+ type features/quiesce
+ subvolumes ${V0}
+end-volume
+EOF
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+## TODO: best way to increase coverage is to have a gfapi program
+## which covers maximum fops
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+cleanup;
diff --git a/tests/line-coverage/shard-coverage.t b/tests/line-coverage/shard-coverage.t
new file mode 100644
index 00000000000..1797999c146
--- /dev/null
+++ b/tests/line-coverage/shard-coverage.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick
+TEST $CLI volume set $V0 features.shard on
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# It is good to copy the file locally and build it, so the scope remains
+# inside tests directory.
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/line-coverage/some-features-in-libglusterfs.t b/tests/line-coverage/some-features-in-libglusterfs.t
new file mode 100644
index 00000000000..5719c4e039c
--- /dev/null
+++ b/tests/line-coverage/some-features-in-libglusterfs.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function grep_string {
+ local f=$1
+ local string=$2
+ # The output of test script also shows up in log. Ignore them.
+ echo $(grep ${string} ${f} | grep -v "++++++" | wc -l)
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume set $V0 client-log-level TRACE
+TEST $CLI volume start $V0;
+
+log_file="$(gluster --print-logdir)/gluster.log"
+## Mount FUSE
+TEST $GFS -s $H0 --log-file $log_file --volfile-id $V0 $M1
+
+## Cover 'monitoring.c' here
+pgrep 'glusterfs' | xargs kill -USR2
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'sig:USR2'
+
+## Also cover statedump
+pgrep 'glusterfs' | xargs kill -USR1
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'sig:USR1'
+
+## Also cover SIGHUP
+pgrep 'glusterfs' | xargs kill -HUP
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'sig:HUP'
+
+## Also cover SIGTERM
+pgrep 'glusterfs' | xargs kill -TERM
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'cleanup_and_exit'
+
+# Previous call should make umount of the process.
+# force_umount $M1
+
+# TODO: below section is commented out, mainly as our regression treats the test
+# as failure because sending ABRT signal will cause the process to dump core.
+# Our regression treats the test as failure, if there is a core.
+# FIXME: figure out a way to run this test, because this part of the code gets
+# executed only when there is coredump, and it is critical for debugging, to
+# keep it working always.
+
+# # Restart client
+# TEST $GFS -s $H0 --log-file $log_file --volfile-id $V0 $M1
+#
+# ## Also cover SIGABRT
+# pgrep 'glusterfs ' | xargs kill -ABRT
+#
+# TEST [ 1 -eq $(grep 'pending frames' $log_file | wc -l) ]
+
+TEST rm $log_file
+
+cleanup;
diff --git a/tests/line-coverage/volfile-with-all-graph-syntax.t b/tests/line-coverage/volfile-with-all-graph-syntax.t
new file mode 100644
index 00000000000..b137432cceb
--- /dev/null
+++ b/tests/line-coverage/volfile-with-all-graph-syntax.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST mkdir -p $B0/test
+cat > $B0/test.vol <<EOF
+volume test
+ type storage/posix
+ option directory $B0/test
+ option multiple-line-string "I am
+ testing a feature of volfile graph.l"
+ option single-line-string "this is running on $H0"
+ option option-with-back-tick `date +%Y%M%d`
+end-volume
+EOF
+
+# This should succeed, but it will have some unknown options, which is OK.
+TEST glusterfs -f $B0/test.vol $M0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ type storage/posix
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+ type storage/posix
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ option test and test
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ subvolumes
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ type storage/posix
+ new-option key value
+ option directory $B0/test
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+cleanup;