summaryrefslogtreecommitdiffstats
path: root/tests/bugs
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs')
-rw-r--r--tests/bugs/access-control/bug-1051896.c94
-rw-r--r--tests/bugs/access-control/bug-1051896.t34
-rw-r--r--tests/bugs/access-control/bug-1387241.c18
-rw-r--r--tests/bugs/access-control/bug-1387241.t36
-rw-r--r--tests/bugs/access-control/bug-887098-gmount-crash.t42
-rw-r--r--tests/bugs/access-control/bug-958691.t52
-rwxr-xr-xtests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t57
-rw-r--r--tests/bugs/bitrot/1209751-bitrot-scrub-tunable-reset.t48
-rw-r--r--tests/bugs/bitrot/1209752-volume-status-should-show-bitrot-scrub-info.t70
-rw-r--r--tests/bugs/bitrot/1209818-vol-info-show-scrub-process-properly.t48
-rw-r--r--tests/bugs/bitrot/bug-1210684-scrub-pause-resume-error-handling.t39
-rw-r--r--tests/bugs/bitrot/bug-1227996.t57
-rw-r--r--tests/bugs/bitrot/bug-1228680.t48
-rw-r--r--tests/bugs/bitrot/bug-1229134-bitd-not-support-vol-set.t38
-rw-r--r--tests/bugs/bitrot/bug-1245981.t55
-rw-r--r--tests/bugs/bitrot/bug-1288490.t48
-rwxr-xr-xtests/bugs/bug-1064147.t72
-rw-r--r--tests/bugs/bug-1110262.t72
-rw-r--r--tests/bugs/bug-1138841.t25
-rwxr-xr-xtests/bugs/bug-1258069.t30
-rw-r--r--tests/bugs/bug-1368312.t86
-rw-r--r--tests/bugs/bug-1371806.t81
-rw-r--r--tests/bugs/bug-1371806_1.t48
-rw-r--r--tests/bugs/bug-1371806_2.t52
-rw-r--r--tests/bugs/bug-1371806_3.t63
-rw-r--r--tests/bugs/bug-1371806_acl.t96
-rw-r--r--tests/bugs/bug-1584517.t70
-rw-r--r--tests/bugs/bug-1620580.t67
-rw-r--r--tests/bugs/bug-1694920.t63
-rw-r--r--tests/bugs/bug-1702299.t67
-rwxr-xr-xtests/bugs/changelog/bug-1208470.t40
-rw-r--r--tests/bugs/changelog/bug-1211327.t46
-rw-r--r--tests/bugs/changelog/bug-1225542.t30
-rw-r--r--tests/bugs/changelog/bug-1321955.t60
-rw-r--r--tests/bugs/cli/bug-1004218.t26
-rw-r--r--tests/bugs/cli/bug-1022905.t37
-rw-r--r--tests/bugs/cli/bug-1030580.t53
-rw-r--r--tests/bugs/cli/bug-1047378.t12
-rw-r--r--tests/bugs/cli/bug-1047416.t71
-rw-r--r--tests/bugs/cli/bug-1077682.t24
-rwxr-xr-xtests/bugs/cli/bug-1087487.t23
-rw-r--r--tests/bugs/cli/bug-1113476.t45
-rw-r--r--tests/bugs/cli/bug-1169302.c79
-rwxr-xr-xtests/bugs/cli/bug-1169302.t55
-rwxr-xr-xtests/bugs/cli/bug-1320388.t44
-rw-r--r--tests/bugs/cli/bug-1353156-get-state-cli-validations.t147
-rw-r--r--tests/bugs/cli/bug-1378842-volume-get-all.t23
-rw-r--r--tests/bugs/cli/bug-764638.t13
-rwxr-xr-xtests/bugs/cli/bug-822830.t64
-rw-r--r--tests/bugs/cli/bug-867252.t41
-rwxr-xr-xtests/bugs/cli/bug-921215.t13
-rw-r--r--tests/bugs/cli/bug-949298.t12
-rw-r--r--tests/bugs/cli/bug-961307.t20
-rwxr-xr-xtests/bugs/cli/bug-969193.t13
-rw-r--r--tests/bugs/cli/bug-977246.t21
-rw-r--r--tests/bugs/cli/bug-982174.t36
-rw-r--r--tests/bugs/cli/bug-983317-volume-get.t45
-rw-r--r--tests/bugs/core/949327.t23
-rw-r--r--tests/bugs/core/brick-mux-fd-cleanup.t78
-rw-r--r--tests/bugs/core/bug-1110917.t39
-rw-r--r--tests/bugs/core/bug-1111557.t12
-rw-r--r--tests/bugs/core/bug-1117951.t24
-rw-r--r--tests/bugs/core/bug-1119582.t24
-rw-r--r--tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t18
-rwxr-xr-xtests/bugs/core/bug-1168803-snapd-option-validation-fix.t30
-rwxr-xr-xtests/bugs/core/bug-1402841.t-mt-dir-scan-race.t42
-rw-r--r--tests/bugs/core/bug-1421721-mpx-toggle.t25
-rw-r--r--tests/bugs/core/bug-1432542-mpx-restart-crash.t116
-rw-r--r--tests/bugs/core/bug-1650403.t113
-rw-r--r--tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t33
-rw-r--r--tests/bugs/core/bug-834465.c60
-rwxr-xr-xtests/bugs/core/bug-834465.t51
-rw-r--r--tests/bugs/core/bug-845213.t19
-rw-r--r--tests/bugs/core/bug-903336.t13
-rwxr-xr-xtests/bugs/core/bug-908146.t30
-rw-r--r--tests/bugs/core/bug-913544.t24
-rwxr-xr-xtests/bugs/core/bug-924075.t23
-rwxr-xr-xtests/bugs/core/bug-927616.t65
-rw-r--r--tests/bugs/core/bug-949242.t55
-rw-r--r--tests/bugs/core/bug-986429.t19
-rwxr-xr-xtests/bugs/core/io-stats-1322825.t67
-rwxr-xr-xtests/bugs/core/log-bug-1362520.t43
-rwxr-xr-xtests/bugs/ctime/issue-832.t32
-rw-r--r--tests/bugs/distribute/bug-1042725.t49
-rwxr-xr-xtests/bugs/distribute/bug-1066798.t88
-rwxr-xr-xtests/bugs/distribute/bug-1086228.t34
-rwxr-xr-xtests/bugs/distribute/bug-1088231.t173
-rw-r--r--tests/bugs/distribute/bug-1099890.t130
-rwxr-xr-xtests/bugs/distribute/bug-1125824.t103
-rwxr-xr-xtests/bugs/distribute/bug-1161156.t58
-rwxr-xr-xtests/bugs/distribute/bug-1161311.t164
-rw-r--r--tests/bugs/distribute/bug-1190734.t93
-rw-r--r--tests/bugs/distribute/bug-1193636.c68
-rw-r--r--tests/bugs/distribute/bug-1193636.t74
-rwxr-xr-xtests/bugs/distribute/bug-1204140.t22
-rw-r--r--tests/bugs/distribute/bug-1247563.t62
-rw-r--r--tests/bugs/distribute/bug-1368012.t51
-rw-r--r--tests/bugs/distribute/bug-1389697.t42
-rw-r--r--tests/bugs/distribute/bug-1543279.t67
-rw-r--r--tests/bugs/distribute/bug-1600379.t54
-rw-r--r--tests/bugs/distribute/bug-1667804.t63
-rwxr-xr-xtests/bugs/distribute/bug-1786679.t69
-rwxr-xr-xtests/bugs/distribute/bug-853258.t47
-rw-r--r--tests/bugs/distribute/bug-860663.c41
-rw-r--r--tests/bugs/distribute/bug-860663.t56
-rw-r--r--tests/bugs/distribute/bug-862967.t59
-rwxr-xr-xtests/bugs/distribute/bug-882278.t73
-rwxr-xr-xtests/bugs/distribute/bug-884455.t84
-rwxr-xr-xtests/bugs/distribute/bug-884597.t173
-rwxr-xr-xtests/bugs/distribute/bug-907072.t49
-rwxr-xr-xtests/bugs/distribute/bug-912564.t92
-rwxr-xr-xtests/bugs/distribute/bug-915554.t76
-rwxr-xr-xtests/bugs/distribute/bug-921408.t90
-rwxr-xr-xtests/bugs/distribute/bug-924265.t35
-rw-r--r--tests/bugs/distribute/bug-961615.t34
-rwxr-xr-xtests/bugs/distribute/bug-973073.t48
-rwxr-xr-xtests/bugs/distribute/issue-1327.t33
-rwxr-xr-xtests/bugs/distribute/overlap.py59
-rw-r--r--tests/bugs/ec/bug-1161621.t42
-rw-r--r--tests/bugs/ec/bug-1161886.c53
-rw-r--r--tests/bugs/ec/bug-1161886.t44
-rw-r--r--tests/bugs/ec/bug-1179050.t23
-rw-r--r--tests/bugs/ec/bug-1187474.t43
-rw-r--r--tests/bugs/ec/bug-1188145.t50
-rw-r--r--tests/bugs/ec/bug-1227869.t33
-rw-r--r--tests/bugs/ec/bug-1236065.t95
-rw-r--r--tests/bugs/ec/bug-1251446.t50
-rwxr-xr-xtests/bugs/ec/bug-1304988.t42
-rw-r--r--tests/bugs/ec/bug-1547662.t41
-rw-r--r--tests/bugs/ec/bug-1699866-check-reopen-fd.t34
-rw-r--r--tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t54
-rwxr-xr-xtests/bugs/error-gen/bug-767095.t51
-rw-r--r--tests/bugs/fuse/bug-1030208.t35
-rw-r--r--tests/bugs/fuse/bug-1126048.c43
-rwxr-xr-xtests/bugs/fuse/bug-1126048.t30
-rw-r--r--tests/bugs/fuse/bug-1283103.t59
-rw-r--r--tests/bugs/fuse/bug-1309462.t50
-rw-r--r--tests/bugs/fuse/bug-1336818.t52
-rwxr-xr-xtests/bugs/fuse/bug-858215.t79
-rw-r--r--tests/bugs/fuse/bug-858488-min-free-disk.t108
-rwxr-xr-xtests/bugs/fuse/bug-924726.t45
-rw-r--r--tests/bugs/fuse/bug-963678.t57
-rwxr-xr-xtests/bugs/fuse/bug-983477.t53
-rw-r--r--tests/bugs/fuse/bug-985074.t54
-rwxr-xr-xtests/bugs/fuse/many-groups-for-acl.t123
-rwxr-xr-xtests/bugs/fuse/setup.sh20
-rwxr-xr-xtests/bugs/fuse/teardown.sh5
-rw-r--r--tests/bugs/geo-replication/bug-1111490.t35
-rw-r--r--tests/bugs/geo-replication/bug-1296496.t44
-rwxr-xr-xtests/bugs/geo-replication/bug-877293.t41
-rw-r--r--tests/bugs/gfapi/bug-1032894.t33
-rw-r--r--tests/bugs/gfapi/bug-1093594.c311
-rwxr-xr-xtests/bugs/gfapi/bug-1093594.t22
-rwxr-xr-xtests/bugs/gfapi/bug-1319374-THIS-crash.t27
-rw-r--r--tests/bugs/gfapi/bug-1319374.c131
-rw-r--r--tests/bugs/gfapi/bug-1447266/1460514.c150
-rw-r--r--tests/bugs/gfapi/bug-1447266/1460514.t26
-rw-r--r--tests/bugs/gfapi/bug-1447266/bug-1447266.c107
-rw-r--r--tests/bugs/gfapi/bug-1447266/bug-1447266.t60
-rw-r--r--tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.c112
-rw-r--r--tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.t25
-rw-r--r--tests/bugs/gfapi/glfs_vol_set_IO_ERR.c163
-rwxr-xr-xtests/bugs/gfapi/glfs_vol_set_IO_ERR.t20
-rwxr-xr-xtests/bugs/glusterd/859927/repl.t59
-rw-r--r--tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t110
-rw-r--r--tests/bugs/glusterd/brick-mux-validation-in-cluster.t108
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t104
-rw-r--r--tests/bugs/glusterd/brick-mux.t81
-rw-r--r--tests/bugs/glusterd/brick-order-check-add-brick.t61
-rwxr-xr-xtests/bugs/glusterd/bug-1070734.t80
-rw-r--r--tests/bugs/glusterd/bug-1085330-and-bug-916549.t93
-rwxr-xr-xtests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t93
-rw-r--r--tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t44
-rw-r--r--tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t38
-rw-r--r--tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t29
-rw-r--r--tests/bugs/glusterd/bug-1595320.t93
-rw-r--r--tests/bugs/glusterd/bug-1696046.t113
-rw-r--r--tests/bugs/glusterd/bug-1699339.t73
-rw-r--r--tests/bugs/glusterd/bug-1720566.t50
-rw-r--r--tests/bugs/glusterd/bug-824753-file-locker.c46
-rwxr-xr-xtests/bugs/glusterd/bug-824753.t45
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729-force.t103
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t77
-rw-r--r--tests/bugs/glusterd/bug-948729/bug-948729.t80
-rw-r--r--tests/bugs/glusterd/bug-949930.t29
-rw-r--r--tests/bugs/glusterd/check_elastic_server.t63
-rw-r--r--tests/bugs/glusterd/daemon-log-level-option.t93
-rw-r--r--tests/bugs/glusterd/df-results-post-replace-brick-operations.t61
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t71
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t115
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t305
-rw-r--r--tests/bugs/glusterd/quorum-validation.t122
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t52
-rw-r--r--tests/bugs/glusterd/rebalance-operations-in-single-node.t131
-rw-r--r--tests/bugs/glusterd/remove-brick-in-cluster.t60
-rw-r--r--tests/bugs/glusterd/remove-brick-testcases.t119
-rw-r--r--tests/bugs/glusterd/remove-brick-validation.t68
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t80
-rw-r--r--tests/bugs/glusterd/replace-brick-operations.t48
-rw-r--r--tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t63
-rw-r--r--tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/snapshot-operations.t50
-rw-r--r--tests/bugs/glusterd/sync-post-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/validating-options-for-replicated-volume.t142
-rw-r--r--tests/bugs/glusterd/validating-server-quorum.t125
-rwxr-xr-xtests/bugs/glusterfs-server/bug-852147.t85
-rwxr-xr-xtests/bugs/glusterfs-server/bug-861542.t50
-rwxr-xr-xtests/bugs/glusterfs-server/bug-864222.t30
-rw-r--r--tests/bugs/glusterfs-server/bug-873549.t17
-rwxr-xr-xtests/bugs/glusterfs-server/bug-877992.t63
-rwxr-xr-xtests/bugs/glusterfs-server/bug-887145.t99
-rw-r--r--tests/bugs/glusterfs-server/bug-889996.t19
-rwxr-xr-xtests/bugs/glusterfs-server/bug-904300.t65
-rw-r--r--tests/bugs/glusterfs-server/bug-905864.c83
-rw-r--r--tests/bugs/glusterfs-server/bug-905864.t32
-rwxr-xr-xtests/bugs/glusterfs-server/bug-912297.t44
-rw-r--r--tests/bugs/glusterfs/bug-1482528.t100
-rwxr-xr-xtests/bugs/glusterfs/bug-811493.t18
-rwxr-xr-xtests/bugs/glusterfs/bug-844688.t55
-rw-r--r--tests/bugs/glusterfs/bug-848251.t52
-rwxr-xr-xtests/bugs/glusterfs/bug-853690.t91
-rw-r--r--tests/bugs/glusterfs/bug-856455.t43
-rw-r--r--tests/bugs/glusterfs/bug-860297.t13
-rw-r--r--tests/bugs/glusterfs/bug-861015-index.t36
-rw-r--r--tests/bugs/glusterfs/bug-861015-log.t29
-rw-r--r--tests/bugs/glusterfs/bug-866459.t44
-rw-r--r--tests/bugs/glusterfs/bug-867253.t64
-rw-r--r--tests/bugs/glusterfs/bug-869724.t37
-rwxr-xr-xtests/bugs/glusterfs/bug-872923.t59
-rw-r--r--tests/bugs/glusterfs/bug-873962-spb.t40
-rwxr-xr-xtests/bugs/glusterfs/bug-873962.t107
-rwxr-xr-xtests/bugs/glusterfs/bug-879490.t37
-rwxr-xr-xtests/bugs/glusterfs/bug-879494.t37
-rwxr-xr-xtests/bugs/glusterfs/bug-892730.t77
-rw-r--r--tests/bugs/glusterfs/bug-893338.t34
-rwxr-xr-xtests/bugs/glusterfs/bug-893378.t75
-rw-r--r--tests/bugs/glusterfs/bug-895235.t23
-rwxr-xr-xtests/bugs/glusterfs/bug-896431.t89
-rwxr-xr-xtests/bugs/glusterfs/bug-902610.t66
-rw-r--r--tests/bugs/glusterfs/bug-906646.t97
-rw-r--r--tests/bugs/glusterfs/getlk_owner.c124
-rw-r--r--tests/bugs/heal-symlinks.t65
-rw-r--r--tests/bugs/index/bug-1559004-EMLINK-handling.t91
-rw-r--r--tests/bugs/io-cache/bug-858242.c84
-rwxr-xr-xtests/bugs/io-cache/bug-858242.t28
-rw-r--r--tests/bugs/io-cache/bug-read-hang.c125
-rwxr-xr-xtests/bugs/io-cache/bug-read-hang.t34
-rwxr-xr-xtests/bugs/io-stats/bug-1598548.t41
-rwxr-xr-xtests/bugs/logging/bug-823081.t41
-rwxr-xr-xtests/bugs/md-cache/afr-stale-read.t44
-rwxr-xr-xtests/bugs/md-cache/bug-1211863.t69
-rwxr-xr-xtests/bugs/md-cache/bug-1211863_unlink.t49
-rw-r--r--tests/bugs/md-cache/bug-1476324.t27
-rwxr-xr-xtests/bugs/md-cache/bug-1632503.t24
-rw-r--r--tests/bugs/md-cache/bug-1726205.t22
-rwxr-xr-xtests/bugs/md-cache/setxattr-prepoststat.t38
-rwxr-xr-xtests/bugs/nfs/bug-1053579.t114
-rw-r--r--tests/bugs/nfs/bug-1143880-fix-gNFSd-auth-crash.t24
-rw-r--r--tests/bugs/nfs/bug-1157223-symlink-mounting.t126
-rw-r--r--tests/bugs/nfs/bug-1161092-nfs-acls.t39
-rwxr-xr-xtests/bugs/nfs/bug-1166862.t69
-rw-r--r--tests/bugs/nfs/bug-1210338.c31
-rw-r--r--tests/bugs/nfs/bug-1210338.t30
-rwxr-xr-xtests/bugs/nfs/bug-1302948.t13
-rwxr-xr-xtests/bugs/nfs/bug-847622.t39
-rwxr-xr-xtests/bugs/nfs/bug-877885.t39
-rwxr-xr-xtests/bugs/nfs/bug-904065.t100
-rwxr-xr-xtests/bugs/nfs/bug-915280.t54
-rwxr-xr-xtests/bugs/nfs/bug-970070.t13
-rwxr-xr-xtests/bugs/nfs/bug-974972.t41
-rw-r--r--tests/bugs/nfs/showmount-many-clients.t43
-rwxr-xr-xtests/bugs/nfs/socket-as-fifo.py33
-rw-r--r--tests/bugs/nfs/socket-as-fifo.t25
-rw-r--r--tests/bugs/nfs/subdir-trailing-slash.t32
-rwxr-xr-xtests/bugs/nfs/zero-atime.t33
-rwxr-xr-xtests/bugs/nl-cache/bug-1451588.t25
-rw-r--r--tests/bugs/posix/bug-1034716.t60
-rwxr-xr-xtests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t60
-rwxr-xr-xtests/bugs/posix/bug-1113960.t98
-rwxr-xr-xtests/bugs/posix/bug-1122028.t51
-rw-r--r--tests/bugs/posix/bug-1175711.c37
-rwxr-xr-xtests/bugs/posix/bug-1175711.t30
-rw-r--r--tests/bugs/posix/bug-1360679.t36
-rwxr-xr-xtests/bugs/posix/bug-1619720.t58
-rw-r--r--tests/bugs/posix/bug-1651445.t54
-rw-r--r--tests/bugs/posix/bug-765380.t39
-rwxr-xr-xtests/bugs/posix/bug-990028.t157
-rw-r--r--tests/bugs/posix/bug-gfid-path.t70
-rw-r--r--tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.c104
-rwxr-xr-xtests/bugs/posix/disallow-gfid-volumeid-fremovexattr.t21
-rw-r--r--tests/bugs/posix/disallow-gfid-volumeid-removexattr.t26
-rw-r--r--tests/bugs/protocol/bug-1321578.t82
-rw-r--r--tests/bugs/protocol/bug-1390914.t36
-rw-r--r--tests/bugs/protocol/bug-1433815-auth-allow.t40
-rwxr-xr-xtests/bugs/protocol/bug-762989.t40
-rwxr-xr-xtests/bugs/protocol/bug-808400-dist.t32
-rw-r--r--tests/bugs/protocol/bug-808400-fcntl.c124
-rw-r--r--tests/bugs/protocol/bug-808400-flock.c100
-rwxr-xr-xtests/bugs/protocol/bug-808400-repl.t31
-rwxr-xr-xtests/bugs/protocol/bug-808400.t35
-rwxr-xr-xtests/bugs/quick-read/bug-846240.t59
-rwxr-xr-xtests/bugs/quick-read/bz1523599/bz1523599.t32
-rw-r--r--tests/bugs/quick-read/bz1523599/test_bz1523599.c198
-rw-r--r--tests/bugs/quota/afr-quota-xattr-mdata-heal.t140
-rw-r--r--tests/bugs/quota/bug-1035576.t53
-rw-r--r--tests/bugs/quota/bug-1038598.t52
-rw-r--r--tests/bugs/quota/bug-1087198.t86
-rwxr-xr-xtests/bugs/quota/bug-1104692.t26
-rw-r--r--tests/bugs/quota/bug-1153964.t81
-rw-r--r--tests/bugs/quota/bug-1178130.t44
-rw-r--r--tests/bugs/quota/bug-1235182.t61
-rw-r--r--tests/bugs/quota/bug-1243798.t46
-rw-r--r--tests/bugs/quota/bug-1250582-volume-reset-should-not-remove-quota-quota-deem-statfs.t53
-rw-r--r--tests/bugs/quota/bug-1260545.t53
-rw-r--r--tests/bugs/quota/bug-1287996.t21
-rw-r--r--tests/bugs/quota/bug-1292020.t28
-rw-r--r--tests/bugs/quota/bug-1293601.t33
-rw-r--r--tests/bugs/read-only/bug-1134822-read-only-default-in-graph.t67
-rw-r--r--tests/bugs/readdir-ahead/bug-1390050.c72
-rw-r--r--tests/bugs/readdir-ahead/bug-1390050.t29
-rwxr-xr-xtests/bugs/readdir-ahead/bug-1436090.t44
-rwxr-xr-xtests/bugs/readdir-ahead/bug-1439640.t31
-rwxr-xr-xtests/bugs/readdir-ahead/bug-1446516.t21
-rwxr-xr-xtests/bugs/readdir-ahead/bug-1512437.t23
-rw-r--r--tests/bugs/readdir-ahead/bug-1670253-consistent-metadata.t23
-rw-r--r--tests/bugs/replicate/886998/strict-readdir.t52
-rwxr-xr-xtests/bugs/replicate/bug-1015990-rep.t61
-rwxr-xr-xtests/bugs/replicate/bug-1015990.t69
-rw-r--r--tests/bugs/replicate/bug-1032927.t32
-rwxr-xr-xtests/bugs/replicate/bug-1037501.t104
-rwxr-xr-xtests/bugs/replicate/bug-1046624.t47
-rw-r--r--tests/bugs/replicate/bug-1058797.t48
-rw-r--r--tests/bugs/replicate/bug-1101647.t31
-rw-r--r--tests/bugs/replicate/bug-1130892.t76
-rw-r--r--tests/bugs/replicate/bug-1132102.t28
-rw-r--r--tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t56
-rw-r--r--tests/bugs/replicate/bug-1139230.t58
-rw-r--r--tests/bugs/replicate/bug-1180545.t77
-rw-r--r--tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t57
-rw-r--r--tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t42
-rw-r--r--tests/bugs/replicate/bug-1238398-split-brain-resolution.t51
-rw-r--r--tests/bugs/replicate/bug-1238508-self-heal.t51
-rw-r--r--tests/bugs/replicate/bug-1250170-fsync.c56
-rw-r--r--tests/bugs/replicate/bug-1250170-fsync.t35
-rw-r--r--tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t54
-rw-r--r--tests/bugs/replicate/bug-1292379.t58
-rw-r--r--tests/bugs/replicate/bug-1297695.t43
-rw-r--r--tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t40
-rw-r--r--tests/bugs/replicate/bug-1325792.t25
-rw-r--r--tests/bugs/replicate/bug-1335652.t29
-rw-r--r--tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t46
-rw-r--r--tests/bugs/replicate/bug-1341650.t63
-rw-r--r--tests/bugs/replicate/bug-1363721.t118
-rw-r--r--tests/bugs/replicate/bug-1365455.t54
-rw-r--r--tests/bugs/replicate/bug-1386188-sbrain-fav-child.t82
-rw-r--r--tests/bugs/replicate/bug-1402730.t47
-rw-r--r--tests/bugs/replicate/bug-1408712.t101
-rw-r--r--tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t69
-rw-r--r--tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t79
-rw-r--r--tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t46
-rw-r--r--tests/bugs/replicate/bug-1448804-check-quorum-type-values.t47
-rw-r--r--tests/bugs/replicate/bug-1473026.t31
-rw-r--r--tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t52
-rw-r--r--tests/bugs/replicate/bug-1480525.t18
-rw-r--r--tests/bugs/replicate/bug-1493415-gfid-heal.t78
-rw-r--r--tests/bugs/replicate/bug-1498570-client-iot-graph-check.t48
-rwxr-xr-xtests/bugs/replicate/bug-1539358-split-brain-detection.t89
-rw-r--r--tests/bugs/replicate/bug-1561129-enospc.t24
-rw-r--r--tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t72
-rw-r--r--tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t128
-rw-r--r--tests/bugs/replicate/bug-1626994-info-split-brain.t62
-rw-r--r--tests/bugs/replicate/bug-1637249-gfid-heal.t149
-rw-r--r--tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t45
-rw-r--r--tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t55
-rwxr-xr-xtests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t55
-rw-r--r--tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t95
-rw-r--r--tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t40
-rw-r--r--tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t38
-rwxr-xr-xtests/bugs/replicate/bug-1696599-io-hang.t47
-rw-r--r--tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t136
-rw-r--r--tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t116
-rw-r--r--tests/bugs/replicate/bug-1728770-pass-xattrs.t52
-rw-r--r--tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t102
-rw-r--r--tests/bugs/replicate/bug-1744548-heal-timeout.t47
-rw-r--r--tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t89
-rw-r--r--tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t111
-rw-r--r--tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t74
-rw-r--r--tests/bugs/replicate/bug-1801624-entry-heal.t58
-rw-r--r--tests/bugs/replicate/bug-765564.t86
-rwxr-xr-xtests/bugs/replicate/bug-767585-gfid.t42
-rwxr-xr-xtests/bugs/replicate/bug-802417.t120
-rw-r--r--tests/bugs/replicate/bug-821056.t52
-rwxr-xr-xtests/bugs/replicate/bug-830665.t127
-rwxr-xr-xtests/bugs/replicate/bug-859581.t53
-rwxr-xr-xtests/bugs/replicate/bug-865825.t82
-rw-r--r--tests/bugs/replicate/bug-880898.t30
-rw-r--r--tests/bugs/replicate/bug-884328.t12
-rw-r--r--tests/bugs/replicate/bug-886998.t52
-rw-r--r--tests/bugs/replicate/bug-888174.t62
-rw-r--r--tests/bugs/replicate/bug-913051.t56
-rw-r--r--tests/bugs/replicate/bug-916226.t26
-rw-r--r--tests/bugs/replicate/bug-918437-sh-mtime.t71
-rwxr-xr-xtests/bugs/replicate/bug-921231.t31
-rw-r--r--tests/bugs/replicate/bug-957877.t33
-rw-r--r--tests/bugs/replicate/bug-976800.t29
-rwxr-xr-xtests/bugs/replicate/bug-977797.t96
-rw-r--r--tests/bugs/replicate/bug-978794.t29
-rwxr-xr-xtests/bugs/replicate/bug-979365.t47
-rwxr-xr-xtests/bugs/replicate/bug-986905.t27
-rw-r--r--tests/bugs/replicate/issue-1254-prioritize-enospc.t80
-rw-r--r--tests/bugs/replicate/mdata-heal-no-xattrs.t59
-rw-r--r--tests/bugs/replicate/ta-inode-refresh-read.t40
-rwxr-xr-xtests/bugs/rpc/bug-1043886.t58
-rwxr-xr-xtests/bugs/rpc/bug-847624.t29
-rw-r--r--tests/bugs/rpc/bug-884452.t47
-rwxr-xr-xtests/bugs/rpc/bug-921072.t132
-rwxr-xr-xtests/bugs/rpc/bug-954057.t63
-rw-r--r--tests/bugs/shard/bug-1245547.t35
-rw-r--r--tests/bugs/shard/bug-1248887.t39
-rw-r--r--tests/bugs/shard/bug-1250855.t34
-rw-r--r--tests/bugs/shard/bug-1251824.t109
-rw-r--r--tests/bugs/shard/bug-1256580.t34
-rw-r--r--tests/bugs/shard/bug-1258334.t40
-rw-r--r--tests/bugs/shard/bug-1259651.t40
-rw-r--r--tests/bugs/shard/bug-1260637.t42
-rw-r--r--tests/bugs/shard/bug-1261773.t14
-rw-r--r--tests/bugs/shard/bug-1272986.t35
-rw-r--r--tests/bugs/shard/bug-1342298.t23
-rw-r--r--tests/bugs/shard/bug-1468483.t58
-rw-r--r--tests/bugs/shard/bug-1488546.t25
-rw-r--r--tests/bugs/shard/bug-1568521-EEXIST.t91
-rw-r--r--tests/bugs/shard/bug-1568521.t53
-rw-r--r--tests/bugs/shard/bug-1605056-2.t34
-rw-r--r--tests/bugs/shard/bug-1605056.t63
-rw-r--r--tests/bugs/shard/bug-1669077.t29
-rw-r--r--tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t34
-rw-r--r--tests/bugs/shard/bug-1696136.c122
-rw-r--r--tests/bugs/shard/bug-1696136.t33
-rw-r--r--tests/bugs/shard/bug-1705884.t32
-rw-r--r--tests/bugs/shard/bug-1738419.t29
-rw-r--r--tests/bugs/shard/bug-shard-discard.c70
-rw-r--r--tests/bugs/shard/bug-shard-discard.t65
-rw-r--r--tests/bugs/shard/bug-shard-zerofill.c60
-rw-r--r--tests/bugs/shard/bug-shard-zerofill.t46
-rw-r--r--tests/bugs/shard/configure-lru-limit.t52
-rw-r--r--tests/bugs/shard/issue-1243.t43
-rw-r--r--tests/bugs/shard/issue-1281.t34
-rw-r--r--tests/bugs/shard/issue-1425.t45
-rw-r--r--tests/bugs/shard/parallel-truncate-read.t48
-rw-r--r--tests/bugs/shard/shard-append-test.c183
-rw-r--r--tests/bugs/shard/shard-append-test.t32
-rw-r--r--tests/bugs/shard/shard-fallocate.c113
-rw-r--r--tests/bugs/shard/shard-inode-refcount-test.t30
-rw-r--r--tests/bugs/shard/unlinks-and-renames.t333
-rw-r--r--tests/bugs/shard/zero-flag.t76
-rwxr-xr-xtests/bugs/snapshot/bug-1045333.t44
-rwxr-xr-xtests/bugs/snapshot/bug-1049834.t44
-rw-r--r--tests/bugs/snapshot/bug-1064768.t20
-rw-r--r--tests/bugs/snapshot/bug-1087203.t103
-rwxr-xr-xtests/bugs/snapshot/bug-1090042.t26
-rw-r--r--tests/bugs/snapshot/bug-1109770.t65
-rw-r--r--tests/bugs/snapshot/bug-1109889.t74
-rwxr-xr-xtests/bugs/snapshot/bug-1111041.t40
-rw-r--r--tests/bugs/snapshot/bug-1112613.t49
-rw-r--r--tests/bugs/snapshot/bug-1113975.t38
-rw-r--r--tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t36
-rwxr-xr-xtests/bugs/snapshot/bug-1157991.t30
-rwxr-xr-xtests/bugs/snapshot/bug-1162462.t38
-rwxr-xr-xtests/bugs/snapshot/bug-1162498.t56
-rwxr-xr-xtests/bugs/snapshot/bug-1166197.t52
-rw-r--r--tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t205
-rw-r--r--tests/bugs/snapshot/bug-1168875.t96
-rw-r--r--tests/bugs/snapshot/bug-1178079.t24
-rw-r--r--tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t37
-rw-r--r--tests/bugs/snapshot/bug-1205592.t30
-rw-r--r--tests/bugs/snapshot/bug-1227646.t31
-rwxr-xr-xtests/bugs/snapshot/bug-1232430.t22
-rwxr-xr-xtests/bugs/snapshot/bug-1250387.t26
-rw-r--r--tests/bugs/snapshot/bug-1260848.t28
-rwxr-xr-xtests/bugs/snapshot/bug-1275616.t50
-rw-r--r--tests/bugs/snapshot/bug-1279327.t29
-rw-r--r--tests/bugs/snapshot/bug-1316437.t29
-rw-r--r--tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t56
-rwxr-xr-xtests/bugs/snapshot/bug-1399598-uss-with-ssl.t108
-rw-r--r--tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t133
-rw-r--r--tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t39
-rw-r--r--tests/bugs/snapshot/bug-1597662.t58
-rw-r--r--tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t48
-rw-r--r--tests/bugs/stripe/bug-1002207.t55
-rw-r--r--tests/bugs/stripe/bug-1111454.t20
-rwxr-xr-xtests/bugs/trace/bug-797171.t41
-rwxr-xr-xtests/bugs/transport/bug-873367.t45
-rw-r--r--tests/bugs/unclassified/bug-1034085.t31
-rw-r--r--tests/bugs/unclassified/bug-1357397.t35
-rw-r--r--tests/bugs/unclassified/bug-874498.t64
-rw-r--r--tests/bugs/unclassified/bug-991622.t35
-rwxr-xr-xtests/bugs/upcall/bug-1227204.t29
-rwxr-xr-xtests/bugs/upcall/bug-1369430.t43
-rwxr-xr-xtests/bugs/upcall/bug-1394131.t29
-rwxr-xr-xtests/bugs/upcall/bug-1422776.t30
-rwxr-xr-xtests/bugs/upcall/bug-1458127.t36
-rwxr-xr-xtests/bugs/upcall/bug-upcall-stat.t39
-rw-r--r--tests/bugs/write-behind/bug-1058663.c123
-rw-r--r--tests/bugs/write-behind/bug-1058663.t28
-rw-r--r--tests/bugs/write-behind/bug-1279730.c149
-rwxr-xr-xtests/bugs/write-behind/bug-1279730.t37
-rw-r--r--tests/bugs/write-behind/issue-884.c267
-rwxr-xr-xtests/bugs/write-behind/issue-884.t40
508 files changed, 29988 insertions, 0 deletions
diff --git a/tests/bugs/access-control/bug-1051896.c b/tests/bugs/access-control/bug-1051896.c
new file mode 100644
index 00000000000..31799d97a71
--- /dev/null
+++ b/tests/bugs/access-control/bug-1051896.c
@@ -0,0 +1,94 @@
+#include <sys/param.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <utime.h>
+#include <sys/acl.h>
+
+int
+do_setfacl(const char *path, const char *options, const char *textacl)
+{
+ int r;
+ int type;
+ acl_t acl;
+ int dob;
+ int dok;
+ int dom;
+ struct stat st;
+ char textmode[30];
+
+ r = 0;
+ dob = strchr(options, 'b') != (char *)NULL;
+ dok = strchr(options, 'k') != (char *)NULL;
+ dom = strchr(options, 'm') != (char *)NULL;
+ if ((dom && !textacl) ||
+ (!dom && (textacl || (!dok && !dob) || strchr(options, 'd')))) {
+ errno = EBADRQC; /* "bad request" */
+ r = -1;
+ } else {
+ if (dob || dok) {
+ r = acl_delete_def_file(path);
+ }
+ if (dob && !r) {
+ if (!stat(path, &st)) {
+ sprintf(textmode, "u::%c%c%c,g::%c%c%c,o::%c%c%c",
+ (st.st_mode & 0400 ? 'r' : '-'),
+ (st.st_mode & 0200 ? 'w' : '-'),
+ (st.st_mode & 0100 ? 'x' : '-'),
+ (st.st_mode & 0040 ? 'r' : '-'),
+ (st.st_mode & 0020 ? 'w' : '-'),
+ (st.st_mode & 0010 ? 'x' : '-'),
+ (st.st_mode & 004 ? 'r' : '-'),
+ (st.st_mode & 002 ? 'w' : '-'),
+ (st.st_mode & 001 ? 'x' : '-'));
+ acl = acl_from_text(textmode);
+ if (acl) {
+ r = acl_set_file(path, ACL_TYPE_ACCESS, acl);
+ acl_free(acl);
+ } else
+ r = -1;
+ } else
+ r = -1;
+ }
+ if (!r && dom) {
+ if (strchr(options, 'd'))
+ type = ACL_TYPE_DEFAULT;
+ else
+ type = ACL_TYPE_ACCESS;
+ acl = acl_from_text(textacl);
+ if (acl) {
+ r = acl_set_file(path, type, acl);
+ acl_free(acl);
+ } else
+ r = -1;
+ }
+ }
+ if (r)
+ r = -errno;
+ return r;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int rc = 0;
+
+ if (argc != 4) {
+ fprintf(stderr, "usage: ./setfacl_test <path> <options> <textacl>\n");
+ return 0;
+ }
+ rc = do_setfacl(argv[1], argv[2], argv[3]);
+ if (rc != 0) {
+ fprintf(stderr, "do_setfacl failed: %s\n", strerror(errno));
+ return rc;
+ }
+ return 0;
+}
diff --git a/tests/bugs/access-control/bug-1051896.t b/tests/bugs/access-control/bug-1051896.t
new file mode 100644
index 00000000000..870ede7db21
--- /dev/null
+++ b/tests/bugs/access-control/bug-1051896.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+case $OSTYPE in
+NetBSD)
+ echo "Skip test on ACL which are not available on NetBSD" >&2
+ SKIP_TESTS
+ exit 0
+ ;;
+*)
+ ;;
+esac
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --acl -s $H0 --volfile-id $V0 $M0;
+
+TEST touch $M0/file1;
+
+TEST $CC $(dirname $0)/bug-1051896.c -o $(dirname $0)/bug-1051896 -lacl
+TEST ! $(dirname $0)/bug-1051896 $M0/file1 m 'u::r,u::w,g::r--,o::r--'
+TEST rm -f $(dirname $0)/bug-1051896
+
+cleanup
diff --git a/tests/bugs/access-control/bug-1387241.c b/tests/bugs/access-control/bug-1387241.c
new file mode 100644
index 00000000000..e2e843a2fda
--- /dev/null
+++ b/tests/bugs/access-control/bug-1387241.c
@@ -0,0 +1,18 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+int
+main(int argc, char *argv[])
+{
+ int ret = EXIT_FAILURE;
+ int fd = open(argv[1], O_RDONLY | O_TRUNC);
+
+ if (fd) {
+ ret = EXIT_SUCCESS;
+ close(fd);
+ }
+
+ return ret;
+}
diff --git a/tests/bugs/access-control/bug-1387241.t b/tests/bugs/access-control/bug-1387241.t
new file mode 100644
index 00000000000..2efd80547d6
--- /dev/null
+++ b/tests/bugs/access-control/bug-1387241.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+case $OSTYPE in
+NetBSD)
+ echo "Skip test on ACL which are not available on NetBSD" >&2
+ SKIP_TESTS
+ exit 0
+ ;;
+*)
+ ;;
+esac
+
+#cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --acl -s $H0 --volfile-id $V0 $M0;
+
+TEST touch $M0/file1;
+
+TEST $CC $(dirname $0)/bug-1387241.c -o $(dirname $0)/bug-1387241
+
+TEST $(dirname $0)/bug-1387241 $M0/file1
+
+TEST rm -f $(dirname $0)/bug-1387241
+
+#cleanup
diff --git a/tests/bugs/access-control/bug-887098-gmount-crash.t b/tests/bugs/access-control/bug-887098-gmount-crash.t
new file mode 100644
index 00000000000..ba9937bd5bf
--- /dev/null
+++ b/tests/bugs/access-control/bug-887098-gmount-crash.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0
+MOUNT_PID=$(get_mount_process_pid $V0)
+
+for i in {1..25};
+do
+ mkdir $M0/tmp_$i && cat /etc/hosts > $M0/tmp_$i/file
+ cp -RPp $M0/tmp_$i $M0/newtmp_$i && cat /etc/hosts > $M0/newtmp_$i/newfile
+done
+
+EXPECT "$MOUNT_PID" get_mount_process_pid $V0
+TEST rm -rf $M0/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/access-control/bug-958691.t b/tests/bugs/access-control/bug-958691.t
new file mode 100644
index 00000000000..8b70607bdbb
--- /dev/null
+++ b/tests/bugs/access-control/bug-958691.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+# Tests for the fuse mount
+TEST mkdir $M0/dir;
+TEST chmod 1777 $M0/dir;
+TEST touch $M0/dir/file{1,2};
+
+TEST $CLI volume set $V0 server.root-squash enable;
+
+mv $M0/dir/file1 $M0/dir/file11 2>/dev/null;
+TEST [ $? -ne 0 ];
+
+TEST $CLI volume set $V0 server.root-squash disable;
+TEST rm -rf $M0/dir;
+
+sleep 1;
+
+# tests for nfs mount
+TEST mkdir $N0/dir;
+TEST chmod 1777 $N0/dir;
+TEST touch $N0/dir/file{1,2};
+
+TEST $CLI volume set $V0 server.root-squash enable;
+
+mv $N0/dir/file1 $N0/dir/file11 2>/dev/null;
+TEST [ $? -ne 0 ];
+
+TEST $CLI volume set $V0 server.root-squash disable;
+TEST rm -rf $N0/dir;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t b/tests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t
new file mode 100755
index 00000000000..691ebc303e4
--- /dev/null
+++ b/tests/bugs/bitrot/1207029-bitrot-daemon-should-start-on-valid-node.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+## Test case for bitrot
+## bitd daemon should not start on the node which dont have any brick
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+function get_bitd_count {
+ ps auxw | grep glusterfs | grep bitd.pid | grep -v grep | wc -l
+}
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+## Creating a volume which is having brick only on one node
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H1:$B1/${V0}1
+
+## Start the volume
+TEST $CLI_1 volume start $V0
+
+## Enable bitrot on volume from 2nd node.
+TEST $CLI_2 volume bitrot $V0 enable
+
+## Bitd daemon should be running on the node which is having brick. Here node1
+## only have brick so bitrot daemon count value should be 1.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+## Bitd daemon should not run on 2nd node and it should not create bitrot
+## volfile on this node. Below test case it to check whether its creating bitrot
+## volfile or not for 2nd node which dont have any brick.
+## Get current working directory of 2nd node which dont have any brick and do
+## stat on bitrot volfile.
+
+cur_wrk_dir2=$($CLI_2 system:: getwd)
+TEST ! stat $cur_wrk_dir2/bitd/bitd-server.vol
+
+
+## Bitd daemon should run on 1st node and it should create bitrot
+## volfile on this node. Below test case it to check whether its creating bitrot
+## volfile or not for 1st node which is having brick.
+## Get current working directory of 1st node which have brick and do
+## stat on bitrot volfile.
+
+cur_wrk_dir1=$($CLI_1 system:: getwd)
+TEST stat $cur_wrk_dir1/bitd/bitd-server.vol
+
+cleanup;
diff --git a/tests/bugs/bitrot/1209751-bitrot-scrub-tunable-reset.t b/tests/bugs/bitrot/1209751-bitrot-scrub-tunable-reset.t
new file mode 100644
index 00000000000..919ffc3ba62
--- /dev/null
+++ b/tests/bugs/bitrot/1209751-bitrot-scrub-tunable-reset.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for bitrot
+## On restarting glusterd should not reset bitrot tunable value to default
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Enable bitrot on volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+## Set bitrot scrub-throttle value to lazy
+TEST $CLI volume bitrot $V0 scrub-throttle lazy
+
+## Set bitrot scrub-frequency value to monthly
+TEST $CLI volume bitrot $V0 scrub-frequency monthly
+
+## Set bitrot scrubber to pause state
+TEST $CLI volume bitrot $V0 scrub pause
+
+## restart glusterd process
+pkill glusterd;
+TEST glusterd;
+TEST pidof glusterd;
+
+## All the bitrot scrub tunable value should come back again.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+EXPECT 'lazy' volinfo_field $V0 'features.scrub-throttle';
+EXPECT 'monthly' volinfo_field $V0 'features.scrub-freq';
+EXPECT 'pause' volinfo_field $V0 'features.scrub';
+EXPECT 'on' volinfo_field $V0 'features.bitrot';
+
+cleanup;
diff --git a/tests/bugs/bitrot/1209752-volume-status-should-show-bitrot-scrub-info.t b/tests/bugs/bitrot/1209752-volume-status-should-show-bitrot-scrub-info.t
new file mode 100644
index 00000000000..6101910666c
--- /dev/null
+++ b/tests/bugs/bitrot/1209752-volume-status-should-show-bitrot-scrub-info.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+## Test case for bitrot
+## gluster volume status command should show status of bitrot daemon
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+## Lets create and start the volume
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume start $V0
+
+## Enable bitrot on volume $V0
+TEST $CLI_1 volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_bitd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_scrubd_count
+
+## From node 1 Gluster volume status command should show the status of bitrot
+## daemon of all the nodes. there are 2 nodes in a cluster with having brick
+## ${V0}1 and ${V0}2 . So there should be 2 bitrot daemon running.
+
+bitd=$($CLI_1 volume status $V0 | grep "Bitrot Daemon" | grep -v grep | wc -l)
+TEST [ "$bitd" -eq 2 ];
+
+
+
+## From node 2 Gluster volume status command should show the status of Scrubber
+## daemon of all the nodes. There are 2 nodes in a cluster with having brick
+## ${V0}1 and ${V0}2 . So there should be 2 Scrubber daemon running.
+
+scrub=$($CLI_2 volume status $V0 | grep "Scrubber Daemon" | grep -v grep | \
+ wc -l)
+TEST [ "$scrub" -eq 2 ];
+
+
+
+## From node 1 Gluster volume status command should print status of only
+## scrubber daemon. There should be total 2 scrubber daemon running, one daemon
+## for each node
+
+scrub=$($CLI_1 volume status $V0 scrub | grep "Scrubber Daemon" | \
+ grep -v grep | wc -l)
+TEST [ "$scrub" -eq 2 ];
+
+
+
+## From node 2 Gluster volume status command should print status of only
+## bitd daemon. There should be total 2 bitd daemon running, one daemon
+## for each node
+
+bitd=$($CLI_2 volume status $V0 bitd | grep "Bitrot Daemon" | \
+ grep -v grep | wc -l)
+TEST [ "$bitd" -eq 2 ];
+
+
+cleanup;
diff --git a/tests/bugs/bitrot/1209818-vol-info-show-scrub-process-properly.t b/tests/bugs/bitrot/1209818-vol-info-show-scrub-process-properly.t
new file mode 100644
index 00000000000..4fe02dc7f63
--- /dev/null
+++ b/tests/bugs/bitrot/1209818-vol-info-show-scrub-process-properly.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for bitrot.
+## volume info should not show 'features.scrub: resume' if scrub process is
+## resumed from paused state.
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Enable bitrot on volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+## Set bitrot scrubber process to pause state
+TEST $CLI volume bitrot $V0 scrub pause
+
+## gluster volume info command should show scrub process pause.
+EXPECT 'pause' volinfo_field $V0 'features.scrub';
+
+
+## Resume scrub process on volume $V0
+TEST $CLI volume bitrot $V0 scrub resume
+
+## gluster volume info command should show scrub process Active
+EXPECT 'Active' volinfo_field $V0 'features.scrub';
+
+
+## Disable bitrot on volume $V0
+TEST $CLI volume bitrot $V0 disable
+
+## gluster volume info command should show scrub process Inactive
+EXPECT 'Inactive' volinfo_field $V0 'features.scrub';
+
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1210684-scrub-pause-resume-error-handling.t b/tests/bugs/bitrot/bug-1210684-scrub-pause-resume-error-handling.t
new file mode 100644
index 00000000000..b15b908d21a
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1210684-scrub-pause-resume-error-handling.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+## Test case for bitrot BZ:1210684
+## Bitrot scrub pause/resume option should give proper error if scrubber is
+## already pause/resume and admin try to perform same operation on a volume
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}
+TEST $CLI volume start $V0
+
+## Enable bitrot for volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+## Pause scrubber operation on volume $V0
+TEST $CLI volume bitrot $V0 scrub pause
+
+## Pausing scrubber again should not success and should give error
+TEST ! $CLI volume bitrot $V0 scrub pause
+
+## Resume scrubber operation on volume $V0
+TEST $CLI volume bitrot $V0 scrub resume
+
+## Resuming scrubber again should not success and should give error
+TEST ! $CLI volume bitrot $V0 scrub resume
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1227996.t b/tests/bugs/bitrot/bug-1227996.t
new file mode 100644
index 00000000000..121c7b5f279
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1227996.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+## Test case for bitrot
+## Tunable object signing waiting time value for bitrot.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+SLEEP_TIME=3
+
+cleanup;
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+## Enable bitrot on volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+# wait a bit for oneshot crawler to finish
+sleep $SLEEP_TIME
+
+## Set object expiry time value
+TEST $CLI volume bitrot $V0 signing-time $SLEEP_TIME
+
+## Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+# create and check object signature
+fname="$M0/filezero"
+echo "ZZZ" > $fname
+
+# wait till the object is signed
+sleep `expr $SLEEP_TIME \* 2`
+
+backpath=$(get_backend_paths $fname)
+TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+
+## for now just remove the signature xattr to test for signing
+## upon truncate()
+TEST setfattr -x trusted.bit-rot.signature $backpath
+
+## overwrite the file (truncate(), write())
+echo "XYX" > $fname
+
+# wait till the object is signed
+sleep `expr $SLEEP_TIME \* 2`
+
+# test for new signature
+TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1228680.t b/tests/bugs/bitrot/bug-1228680.t
new file mode 100644
index 00000000000..23db9d5e208
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1228680.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for bitrot
+## Tunable object signing waiting time value for bitrot.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+SLEEP_TIME=3
+
+cleanup;
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Enable bitrot on volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+# wait a bit for oneshot crawler to finish
+sleep $SLEEP_TIME
+
+## negative test
+TEST ! $CLI volume bitrot $V0 signing-time -100
+
+## Set object expiry time value 5 second.
+TEST $CLI volume bitrot $V0 signing-time $SLEEP_TIME
+
+## Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+# create and check object signature
+fname="$M0/filezero"
+echo "ZZZ" > $fname
+
+# wait till the object is signed
+sleep `expr $SLEEP_TIME \* 2`
+
+backpath=$(get_backend_paths $fname)
+TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1229134-bitd-not-support-vol-set.t b/tests/bugs/bitrot/bug-1229134-bitd-not-support-vol-set.t
new file mode 100644
index 00000000000..471471f4b6b
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1229134-bitd-not-support-vol-set.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+## Test case for bitrot BZ:1229134
+## gluster volume set <VOLNAME> bitrot * command succeeds,
+## which is not supported to enable bitrot.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}
+TEST $CLI volume start $V0
+
+## 'gluster volume set <VOLNAME>' command for bitrot should failed.
+TEST ! $CLI volume set $V0 bitrot enable
+TEST ! $CLI volume set $V0 bitrot disable
+TEST ! $CLI volume set $V0 scrub-frequency daily
+TEST ! $CLI volume set $V0 scrub pause
+TEST ! $CLI volume set $V0 scrub-throttle lazy
+
+
+## 'gluster volume bitrot <VOLNAME> *' command for bitrot should succeeds.
+TEST $CLI volume bitrot $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+TEST $CLI volume bitrot $V0 scrub pause
+TEST $CLI volume bitrot $V0 scrub-frequency daily
+TEST $CLI volume bitrot $V0 scrub-throttle lazy
+
+cleanup;
+
diff --git a/tests/bugs/bitrot/bug-1245981.t b/tests/bugs/bitrot/bug-1245981.t
new file mode 100644
index 00000000000..f3955256b01
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1245981.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+## Test case for bitrot
+## Tunable object signing waiting time value for bitrot.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+SLEEP_TIME=5
+
+cleanup;
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 network.inode-lru-limit 1
+## Enable bitrot on volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+# wait a bit for oneshot crawler to finish
+sleep 2;
+
+## Set object expiry time value
+TEST $CLI volume bitrot $V0 signing-time $SLEEP_TIME
+
+## Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+# create and check object signature
+fname="$M0/filezero"
+echo "ZZZ" > $fname
+echo "123" > $M0/new_file;
+
+touch $M0/1
+touch $M0/2
+touch $M0/3
+touch $M0/4
+touch $M0/5
+
+# wait till the object is signed
+sleep `expr $SLEEP_TIME \* 2`
+
+backpath=$(get_backend_paths $fname)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
+
+backpath=$(get_backend_paths $M0/new_file)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1288490.t b/tests/bugs/bitrot/bug-1288490.t
new file mode 100644
index 00000000000..5f67f4a6ec5
--- /dev/null
+++ b/tests/bugs/bitrot/bug-1288490.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume start $V0
+
+TEST $CLI volume bitrot $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST dd if=/dev/urandom of=$M0/FILE bs=1024 count=1
+
+# corrupt data -- append 2 bytes
+echo -n "~~" >> $B0/brick0/FILE
+# manually set bad-file xattr
+TEST setfattr -n trusted.bit-rot.bad-file -v 0x3100 $B0/brick0/FILE
+
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+# trigger lookup
+TEST stat $M0/FILE
+
+# extend the file
+TEST dd if=/dev/urandom of=$M0/FILE bs=1024 count=1 oflag=append conv=notrunc
+
+# check backend file size
+EXPECT "1026" stat -c "%s" $B0/brick0/FILE
+EXPECT "2048" stat -c "%s" $B0/brick1/FILE
+
+# check file size on mount
+EXPECT "2048" stat -c "%s" $M0/FILE
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/bug-1064147.t b/tests/bugs/bug-1064147.t
new file mode 100755
index 00000000000..27ffde4eb44
--- /dev/null
+++ b/tests/bugs/bug-1064147.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+#------------------------------------------------------------
+
+# Test case 1 - Subvolume down + Healing
+#------------------------------------------------------------
+# Kill 2nd brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+
+# Change root permissions
+TEST chmod 444 $M0
+
+# Store permission for comparision
+TEST permission_new=`stat -c "%A" $M0`
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check brick permissions
+TEST brick_perm=`stat -c "%A" $B0/${V0}2`
+TEST [ ${brick_perm} = ${permission_new} ]
+#------------------------------------------------------------
+
+# Test case 2 - Add-brick + Healing
+#------------------------------------------------------------
+# Change root permissions
+TEST chmod 777 $M0
+
+# Store permission for comparision
+TEST permission_new_2=`stat -c "%A" $M0`
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check permissions on the new brick
+TEST brick_perm2=`stat -c "%A" $B0/${V0}3`
+
+TEST [ ${brick_perm2} = ${permission_new_2} ]
+
+cleanup;
diff --git a/tests/bugs/bug-1110262.t b/tests/bugs/bug-1110262.t
new file mode 100644
index 00000000000..90b101fc98d
--- /dev/null
+++ b/tests/bugs/bug-1110262.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../traps.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 nfs.disable false
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+#do some operation on mount, so that kill_brick is guaranteed to be
+#done _after_ first lookup on root and dht has a proper layout on
+#it. Otherwise mkdir done in later stages of script might fail due to
+#lack of layout on "/" as dht-self-heal won't proceed if any of its
+#subvolumes are down.
+TEST ls $M0
+#kill one of the brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+
+cleanup_user_group () {
+ userdel --force dev
+ groupdel QA
+}
+push_trapfunc cleanup_user_group
+
+#create a user and group
+TEST useradd dev
+TEST groupadd QA
+
+#create a new directory now with special user, group and mode bits
+mkdir -m 7777 $M0/dironedown
+TEST chown dev $M0/dironedown
+TEST chgrp QA $M0/dironedown
+
+#store the permissions for comparision
+permission_onedown=`ls -l $M0 | grep dironedown | awk '{print $1}'`
+
+#Now bring up the brick process
+TEST $CLI volume start $V0 force
+
+#The updation of directory attrs happens on the revalidate path. Hence, atmax on
+#2 lookups the update will happen.
+sleep 5
+TEST ls $M0/dironedown;
+
+#check directory that was created post brick going down
+TEST brick_perm=`ls -l $B0/${V0}2 | grep dironedown | awk '{print $1}'`
+TEST echo $brick_perm;
+TEST [ ${brick_perm} = ${permission_onedown} ]
+uid=`ls -l $B0/${V0}2 | grep dironedown | awk '{print $3}'`
+TEST echo $uid
+TEST [ $uid = dev ]
+gid=`ls -l $B0/${V0}2 | grep dironedown | awk '{print $4}'`
+TEST echo $gid
+TEST [ $gid = QA ]
+
+cleanup
diff --git a/tests/bugs/bug-1138841.t b/tests/bugs/bug-1138841.t
new file mode 100644
index 00000000000..abec5e89d56
--- /dev/null
+++ b/tests/bugs/bug-1138841.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Create a volume and set auth.allow using cidr format ip
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 auth.allow 127.0.0.1/20
+TEST $CLI volume start $V0
+
+
+## mount the volume and create a file on the mount point
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+TEST touch $M0/tmp1
+
+## Stop the volume and do the cleanup
+
+TEST $CLI volume stop $V0
+cleanup
diff --git a/tests/bugs/bug-1258069.t b/tests/bugs/bug-1258069.t
new file mode 100755
index 00000000000..b87ecbf2fe8
--- /dev/null
+++ b/tests/bugs/bug-1258069.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+TEST mount_nfs $H0:/$V0 $N0 nolock
+TEST mkdir -p $N0/a/b/c
+TEST umount_nfs $N0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+rmdir $M0/a/b/c
+mkdir $M0/a/b/c
+TEST mount_nfs $H0:/$V0/a/b/c $N0 nolock
+TEST umount_nfs $N0
+TEST umount $M0
+
+cleanup
diff --git a/tests/bugs/bug-1368312.t b/tests/bugs/bug-1368312.t
new file mode 100644
index 00000000000..c60d562bbd7
--- /dev/null
+++ b/tests/bugs/bug-1368312.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+cleanup;
+
+function compare_get_split_brain_status {
+ local path=$1
+ local choice=$2
+ echo `getfattr -n replica.split-brain-status $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//' | grep $choice
+ if [ $? -ne 0 ]
+ then
+ echo 1
+ else
+ echo 0
+ fi
+
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+
+#Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST mkdir $M0/tmp1
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST chmod 666 $M0/tmp1
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST chmod 757 $M0/tmp1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+EXPECT 2 get_pending_heal_count $V0
+
+
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST chmod 755 $M0/tmp1
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 4
+
+TEST chmod 766 $M0/tmp1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 5
+
+EXPECT 4 get_pending_heal_count $V0
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 765 $M0/tmp1
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST chmod 756 $M0/tmp1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT 6 get_pending_heal_count $V0
+
+cd $M0
+EXPECT 0 compare_get_split_brain_status ./tmp1 patchy-client-0
+EXPECT 0 compare_get_split_brain_status ./tmp1 patchy-client-1
+EXPECT 0 compare_get_split_brain_status ./tmp1 patchy-client-2
+EXPECT 0 compare_get_split_brain_status ./tmp1 patchy-client-3
+EXPECT 0 compare_get_split_brain_status ./tmp1 patchy-client-4
+EXPECT 0 compare_get_split_brain_status ./tmp1 patchy-client-5
+
+cd -
+cleanup
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/bug-1371806.t b/tests/bugs/bug-1371806.t
new file mode 100644
index 00000000000..08180525650
--- /dev/null
+++ b/tests/bugs/bug-1371806.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+function get_getfattr {
+ local path=$1
+ echo `getfattr -n user.foo $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+function set_fattr {
+ for i in `seq 1 10`
+ do
+ setfattr -n user.foo -v "newabc" ./tmp${i}
+ if [ "$?" = "0" ]
+ then
+ succ=$((succ+1))
+ else
+ fail=$((fail+1))
+ fi
+ done
+}
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+cd $M0
+TEST mkdir tmp{1..10}
+
+##First set user.foo xattr with value abc on all dirs
+
+TEST setfattr -n user.foo -v "abc" ./tmp{1..10}
+EXPECT "abc" get_getfattr ./tmp{1..10}
+EXPECT "abc" get_getfattr $B0/${V0}5/tmp{1..10}
+
+TEST kill_brick $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "5" online_brick_count
+
+succ=fail=0
+## set user.foo xattr with value newabc after kill one brick
+set_fattr
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
+
+cd -
+TEST umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+cd $M0
+## At this point dht code will heal xattr on down brick only for those dirs
+## hashed subvol was up at the time of update xattr
+TEST stat ./tmp{1..10}
+
+## Count the user.foo xattr value with abc on mount point and compare with fail value
+count=`getfattr -n user.foo ./tmp{1..10} | grep "user.foo" | grep -iw "abc" | wc -l`
+EXPECT "$fail" echo $count
+
+## Count the user.foo xattr value with newabc on mount point and compare with succ value
+count=`getfattr -n user.foo ./tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+## Count the user.foo xattr value with abc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}5/tmp{1..10} | grep "user.foo" | grep -iw "abc" | wc -l`
+EXPECT "$fail" echo $count
+
+## Count the user.foo xattr value with newabc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}5/tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+
+cd -
+cleanup
+exit
diff --git a/tests/bugs/bug-1371806_1.t b/tests/bugs/bug-1371806_1.t
new file mode 100644
index 00000000000..df19a8c1c2a
--- /dev/null
+++ b/tests/bugs/bug-1371806_1.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+function get_getfattr {
+ local path=$1
+ echo `getfattr -n user.foo $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+function remove_mds_xattr {
+
+ for i in `seq 1 10`
+ do
+ setfattr -x trusted.glusterfs.dht.mds $1/tmp${i} 2> /dev/null
+ done
+}
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST mkdir tmp{1..10}
+
+##Remove internal mds xattr from all directory
+remove_mds_xattr $B0/${V0}0
+remove_mds_xattr $B0/${V0}1
+remove_mds_xattr $B0/${V0}2
+remove_mds_xattr $B0/${V0}3
+
+cd -
+umount $M0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+cd $M0
+
+TEST setfattr -n user.foo -v "abc" ./tmp{1..10}
+EXPECT "abc" get_getfattr ./tmp{1..10}
+
+cd -
+cleanup
diff --git a/tests/bugs/bug-1371806_2.t b/tests/bugs/bug-1371806_2.t
new file mode 100644
index 00000000000..e6aa8e7c1ad
--- /dev/null
+++ b/tests/bugs/bug-1371806_2.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+function get_getfattr {
+ local path=$1
+ echo `getfattr -n user.foo $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+function remove_mds_xattr {
+
+ for i in `seq 1 10`
+ do
+ setfattr -x trusted.glusterfs.dht.mds $1/tmp${i} 2> /dev/null
+ done
+}
+
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 --attribute-timeout=0 $M0;
+cd $M0
+TEST mkdir tmp{1..10}
+
+##Remove internal mds xattr from all directory
+remove_mds_xattr $B0/${V0}0
+remove_mds_xattr $B0/${V0}1
+remove_mds_xattr $B0/${V0}2
+remove_mds_xattr $B0/${V0}3
+
+##First set user.foo xattr with value abc on all dirs
+
+TEST setfattr -n user.foo -v "abc" ./tmp{1..10}
+EXPECT "abc" get_getfattr ./tmp{1..10}
+EXPECT "abc" get_getfattr $B0/${V0}0/tmp{1..10}
+EXPECT "abc" get_getfattr $B0/${V0}1/tmp{1..10}
+EXPECT "abc" get_getfattr $B0/${V0}2/tmp{1..10}
+EXPECT "abc" get_getfattr $B0/${V0}3/tmp{1..10}
+
+cd -
+TEST umount $M0
+
+cd -
+cleanup
+exit
diff --git a/tests/bugs/bug-1371806_3.t b/tests/bugs/bug-1371806_3.t
new file mode 100644
index 00000000000..cb13f37c737
--- /dev/null
+++ b/tests/bugs/bug-1371806_3.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+function get_getfattr {
+ local path=$1
+ echo `getfattr -n user.foo $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+function set_fattr {
+ for i in `seq 1 10`
+ do
+ setfattr -n user.foo -v "newabc" ./tmp${i}
+ if [ "$?" = "0" ]
+ then
+ succ=$((succ+1))
+ else
+ fail=$((fail+1))
+ fi
+ done
+}
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 --attribute-timeout=0 $M0;
+
+cd $M0
+TEST mkdir tmp{1..10}
+
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "3" online_brick_count
+
+succ=fail=0
+## set user.foo xattr with value newabc after kill one brick
+set_fattr
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" online_brick_count
+
+cd -
+TEST umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 --attribute-timeout=0 $M0;
+
+cd $M0
+## At this point dht code will heal xattr on down brick only for those dirs
+## hashed subvol was up at the time of update xattr
+TEST stat ./tmp{1..10}
+
+
+## Count the user.foo xattr value with newabc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}3/tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+
+cd -
+cleanup
+exit
diff --git a/tests/bugs/bug-1371806_acl.t b/tests/bugs/bug-1371806_acl.t
new file mode 100644
index 00000000000..c39165628cc
--- /dev/null
+++ b/tests/bugs/bug-1371806_acl.t
@@ -0,0 +1,96 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+TEST useradd tmpuser
+
+function set_facl_user {
+ for i in `seq 1 10`
+ do
+ setfacl -m u:tmpuser:rw ./tmp${i}
+ if [ "$?" = "0" ]
+ then
+ succ=$((succ+1))
+ else
+ fail=$((fail+1))
+ fi
+ done
+}
+
+function set_facl_default {
+ for i in `seq 1 10`
+ do
+ setfacl -m d:o:rw ./tmp${i}
+ if [ "$?" = "0" ]
+ then
+ succ1=$((succ1+1))
+ else
+ fail1=$((fail1+1))
+ fi
+ done
+}
+
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume start $V0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
+
+TEST glusterfs --volfile-id=$V0 --acl --volfile-server=$H0 --entry-timeout=0 $M0;
+
+cd $M0
+TEST mkdir tmp{1..10}
+TEST setfacl -m u:tmpuser:rwx ./tmp{1..10}
+count=`getfacl -p $M0/tmp{1..10} | grep -c "user:tmpuser:rwx"`
+EXPECT "10" echo $count
+TEST setfacl -m d:o:rwx ./tmp{1..10}
+count=`getfacl -p $M0/tmp{1..10} | grep -c "default:other::rwx"`
+EXPECT "10" echo $count
+count=`getfacl -p $B0/${V0}5/tmp{1..10} | grep -c "user:tmpuser:rwx"`
+EXPECT "10" echo $count
+count=`getfacl -p $B0/${V0}5/tmp{1..10} | grep -c "default:other::rwx"`
+EXPECT "10" echo $count
+
+
+TEST kill_brick $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "5" online_brick_count
+
+succ=fail=0
+## Update acl attributes on dir after kill one brick
+set_facl_user
+succ1=fail1=0
+set_facl_default
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
+
+cd -
+TEST umount $M0
+TEST glusterfs --volfile-id=$V0 --acl --volfile-server=$H0 --entry-timeout=0 $M0;
+
+cd $M0
+## At this point dht will heal xatts on down brick only for those hashed_subvol
+## was up at the time of updated xattrs
+TEST stat ./tmp{1..10}
+
+# Make sure to send a write and read on the file inside mount
+echo "helloworld" > ./tmp1/file
+TEST cat ./tmp1/file
+
+## Compare succ value with updated acl attributes
+count=`getfacl -p $B0/${V0}5/tmp{1..10} | grep -c "user:tmpuser:rw-"`
+EXPECT "$succ" echo $count
+
+
+count=`getfacl -p $B0/${V0}5/tmp{1..10} | grep -c "default:other::rw-"`
+EXPECT "$succ1" echo $count
+
+cd -
+userdel --force tmpuser
+
+cleanup
diff --git a/tests/bugs/bug-1584517.t b/tests/bugs/bug-1584517.t
new file mode 100644
index 00000000000..7f48015a034
--- /dev/null
+++ b/tests/bugs/bug-1584517.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+#This test case verifies attributes (uid/gid/perm) for the
+#directory are healed after stop/start brick. To verify the same
+#test case change attributes of the directory after down a DHT subvolume
+#and one AFR children. After start the volume with force and run lookup
+#operation attributes should be healed on started bricks at the backend.
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+TEST useradd dev -M
+TEST groupadd QA
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST mkdir $M0/dironedown
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "5" online_brick_count
+
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" online_brick_count
+
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "3" online_brick_count
+
+TEST kill_brick $V0 $H0 $B0/${V0}5
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "2" online_brick_count
+
+TEST chown dev $M0/dironedown
+TEST chgrp QA $M0/dironedown
+TEST chmod 777 $M0/dironedown
+
+#store the permissions for comparision
+permission_onedown=`ls -l $M0 | grep dironedown | awk '{print $1}'`
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Run lookup two times to hit revalidate code path in dht
+# to heal user attr
+
+TEST ls $M0/dironedown
+
+#check attributes those were created post brick going down
+TEST brick_perm=`ls -l $B0/${V0}3 | grep dironedown | awk '{print $1}'`
+TEST echo $brick_perm
+TEST [ ${brick_perm} = ${permission_onedown} ]
+uid=`ls -l $B0/${V0}3 | grep dironedown | awk '{print $3}'`
+TEST echo $uid
+TEST [ $uid = dev ]
+gid=`ls -l $B0/${V0}3 | grep dironedown | awk '{print $4}'`
+TEST echo $gid
+TEST [ $gid = QA ]
+
+TEST umount $M0
+userdel --force dev
+groupdel QA
+
+cleanup
+exit
+
diff --git a/tests/bugs/bug-1620580.t b/tests/bugs/bug-1620580.t
new file mode 100644
index 00000000000..0c74d4a6089
--- /dev/null
+++ b/tests/bugs/bug-1620580.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+#do some operation on mount, so that kill_brick is guaranteed to be
+#done _after_ first lookup on root
+
+TEST ls $M0
+TEST touch $M0/file
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+# Case of Same volume name, but different bricks
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{3,4};
+TEST $CLI volume start $V0;
+
+# Give time for 'reconnect' to happen
+sleep 4
+
+TEST ! ls $M0
+TEST ! touch $M0/file1
+
+# Case of Same brick, but different volume (ie, recreated).
+TEST $CLI volume create $V1 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V1;
+
+# Give time for 'reconnect' to happen
+sleep 4
+TEST ! ls $M0
+TEST ! touch $M0/file2
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1
+
+# Case of Same brick, but different volume (but same volume name)
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0;
+
+# Give time for 'reconnect' to happen
+sleep 4
+TEST ! ls $M0
+TEST ! touch $M0/file3
+
+
+cleanup
diff --git a/tests/bugs/bug-1694920.t b/tests/bugs/bug-1694920.t
new file mode 100644
index 00000000000..5bf93c92f94
--- /dev/null
+++ b/tests/bugs/bug-1694920.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=300
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0};
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+TEST $GFS -s $H0 --volfile-id=$V0 $M0;
+
+TEST touch $M0/a
+
+#When all bricks are up, lock and unlock should succeed
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST fd_close $fd1
+
+#When all bricks are down, lock/unlock should fail
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST $CLI volume stop $V0
+TEST ! flock -x $fd1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST fd_close $fd1
+
+#When a brick goes down and comes back up operations on fd which had locks on it should succeed by default
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST $CLI volume stop $V0
+sleep 2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST fd_write $fd1 "data"
+TEST fd_close $fd1
+
+#When a brick goes down and comes back up operations on fd which had locks on it should fail when client.strict-locks is on
+TEST $CLI volume set $V0 client.strict-locks on
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST $CLI volume stop $V0
+sleep 2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST ! fd_write $fd1 "data"
+TEST fd_close $fd1
+
+cleanup
diff --git a/tests/bugs/bug-1702299.t b/tests/bugs/bug-1702299.t
new file mode 100644
index 00000000000..1cff2ed5d3d
--- /dev/null
+++ b/tests/bugs/bug-1702299.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+function get_getfattr {
+ local path=$1
+ echo `getfattr -n user.foo $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+function set_fattr {
+ for i in `seq 1 10`
+ do
+ setfattr -n user.foo -v "newabc" ./tmp${i}
+ if [ "$?" = "0" ]
+ then
+ succ=$((succ+1))
+ else
+ fail=$((fail+1))
+ fi
+ done
+}
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 --attribute-timeout=0 $M0;
+
+cd $M0
+TEST mkdir tmp{1..10}
+
+succ=fail=0
+## set user.foo xattr with value newabc after kill one brick
+set_fattr
+count=10
+EXPECT "$succ" echo $count
+count=0
+EXPECT "$fail" echo $count
+
+cd -
+
+# Add-brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5}
+
+cd $M0
+## At this point dht code will heal xattr on down brick only for those dirs
+## hashed subvol was up at the time of update xattr
+TEST stat ./tmp{1..10}
+
+
+## Count the user.foo xattr value with newabc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}4/tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+## Count the user.foo xattr value with newabc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}5/tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+
+cd -
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/changelog/bug-1208470.t b/tests/bugs/changelog/bug-1208470.t
new file mode 100755
index 00000000000..526f8f20612
--- /dev/null
+++ b/tests/bugs/changelog/bug-1208470.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+## Testcase:
+## Avoid creating any EMPTY changelog(over the changelog rollover time)
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../changelog.rc
+cleanup;
+
+## override current changelog rollover-time
+## to avoid sleeping for long duration.
+CL_RO_TIME=5
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 $H0:$B0/$V0"1";
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Set changelog ON
+TEST $CLI volume set $V0 changelog.changelog on;
+
+EXPECT 1 count_changelog_files $B0/${V0}1
+
+## Set changelog rollover time
+TEST $CLI volume set $V0 changelog.rollover-time $CL_RO_TIME;
+
+## Wait for changelog rollover time
+sleep $CL_RO_TIME
+
+## NO additional empty changelogs created
+EXPECT 1 count_changelog_files $B0/${V0}1
diff --git a/tests/bugs/changelog/bug-1211327.t b/tests/bugs/changelog/bug-1211327.t
new file mode 100644
index 00000000000..a849ec3981f
--- /dev/null
+++ b/tests/bugs/changelog/bug-1211327.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+#Testcase:
+#On brick restart, new HTIME.TSTAMP file should not be created.
+#But on changelog disable/enable HTIME.TSTAMP should be created.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../changelog.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 $H0:$B0/$V0"1";
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 changelog.changelog on;
+##Let changelog init complete before killing gluster processes
+sleep 1
+
+TEST killall_gluster;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "0" online_brick_count
+
+TEST glusterd;
+TEST pidof glusterd;
+
+##Let the brick processes starts
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+##On brick restart only one HTIME should be found.
+EXPECT 1 count_htime_files;
+
+##On changelog disable/enable, new HTIME should be created.
+TEST $CLI volume set $V0 changelog.changelog off;
+TEST $CLI volume set $V0 changelog.changelog on;
+EXPECT 2 count_htime_files;
+
+cleanup;
diff --git a/tests/bugs/changelog/bug-1225542.t b/tests/bugs/changelog/bug-1225542.t
new file mode 100644
index 00000000000..a646aa88014
--- /dev/null
+++ b/tests/bugs/changelog/bug-1225542.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+#Testcase:
+#On snapshot, notify changelog reconfigure upon explicit rollover
+#irrespective of any failures and send error back to barrier if any.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+## Create a volume
+TEST $CLI volume create $V0 $H0:$L1
+
+## Start volume and verify
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 changelog.changelog on
+##Wait for changelog init to complete.
+sleep 1
+
+## Take snapshot
+TEST $CLI snapshot create snap1 $V0
+
+cleanup;
diff --git a/tests/bugs/changelog/bug-1321955.t b/tests/bugs/changelog/bug-1321955.t
new file mode 100644
index 00000000000..9e3752b1728
--- /dev/null
+++ b/tests/bugs/changelog/bug-1321955.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+#This file checks if missing entry self-heal and entry self-heal are working
+#as expected.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 changelog.changelog enable
+TEST $CLI volume set $V0 changelog.capture-del-path on
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+#Create files
+TEST touch $M0/file1
+TEST mkdir $M0/dir1
+TEST touch $M0/dir1/file1
+
+#Check for presence of files
+TEST stat $B0/${V0}0/dir1/file1
+TEST stat $B0/${V0}1/dir1/file1
+TEST stat $B0/${V0}0/file1
+TEST stat $B0/${V0}1/file1
+
+#Kill brick1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#Del dir1/file1
+TEST rm -f $M0/dir1/file1
+
+#file1 should be present in brick1 and not in brick0
+TEST ! stat $B0/${V0}0/dir1/file1
+TEST stat $B0/${V0}1/dir1/file1
+
+#Bring up the brick which is down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+#Initiate heal
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+#dir1/file1 in brick1 should be deleted
+TEST ! stat $B0/${V0}1/dir1/file1
+
+#file1 under root should not be deleted in brick1
+TEST stat $B0/${V0}1/file1
+
+cleanup;
diff --git a/tests/bugs/cli/bug-1004218.t b/tests/bugs/cli/bug-1004218.t
new file mode 100644
index 00000000000..ab8307d6405
--- /dev/null
+++ b/tests/bugs/cli/bug-1004218.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+# Test if only a single xml document is generated by 'status all'
+# when a volume is not started
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create ${V0}1 $H0:$B0/${V0}1{1,2}
+TEST $CLI volume create ${V0}2 $H0:$B0/${V0}2{1,2}
+
+TEST $CLI volume start ${V0}1
+
+function test_status_all ()
+{
+ $CLI volume status all --xml | xmllint -format -
+}
+
+TEST test_status_all
+
+TEST $CLI volume stop ${V0}1
+
+cleanup
diff --git a/tests/bugs/cli/bug-1022905.t b/tests/bugs/cli/bug-1022905.t
new file mode 100644
index 00000000000..ee629e970d9
--- /dev/null
+++ b/tests/bugs/cli/bug-1022905.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Create a volume
+TEST glusterd;
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Enable a protected and a resettable/unprotected option
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+## Reset cmd resets only unprotected option(s), succeeds.
+TEST $CLI volume reset $V0;
+
+## Set an unprotected option
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+## Now 1 protected and 1 unprotected options are set
+## Reset force should succeed
+TEST $CLI volume reset $V0 force;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/cli/bug-1030580.t b/tests/bugs/cli/bug-1030580.t
new file mode 100644
index 00000000000..ac8b1d8f6db
--- /dev/null
+++ b/tests/bugs/cli/bug-1030580.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function write_to_file {
+ dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+# Increasing the json stats dump time interval, so that it doesn't mess with the test.
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 3600
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Clear the profile info uptill now.
+TEST $CLI volume profile $V0 info clear
+
+# Verify 'volume profile info' prints both cumulative and incremental stats
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info)
+EXPECT 2 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 0 '
+
+# Verify 'volume profile info incremental' prints incremental stats only
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info incremental)
+EXPECT 0 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 1 '
+
+# Verify 'volume profile info cumulative' prints cumulative stats only
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info cumulative)
+EXPECT 2 cumulative_stat_count "$output"
+EXPECT 0 incremental_stat_count "$output" '.*'
+
+# Verify the 'volume profile info cumulative' command above didn't alter
+# the interval id
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info incremental)
+EXPECT 0 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 2 '
+
+cleanup;
diff --git a/tests/bugs/cli/bug-1047378.t b/tests/bugs/cli/bug-1047378.t
new file mode 100644
index 00000000000..33ee6be3d8b
--- /dev/null
+++ b/tests/bugs/cli/bug-1047378.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST "echo volume list | $CLI --xml | xmllint --format -"
+
+cleanup
diff --git a/tests/bugs/cli/bug-1047416.t b/tests/bugs/cli/bug-1047416.t
new file mode 100644
index 00000000000..864301034c9
--- /dev/null
+++ b/tests/bugs/cli/bug-1047416.t
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function write_to_file {
+ dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+# Increasing the json stats dump time interval, so that it doesn't mess with the test.
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 3600
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Clear the profile info uptill now.
+TEST $CLI volume profile $V0 info clear
+
+# Verify 'volume profile info' prints both cumulative and incremental stats
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info)
+EXPECT 2 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 0 '
+
+# Verify 'volume profile info peek' prints both cumulative and incremental stats
+# without clearing incremental stats
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info peek)
+EXPECT 2 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 1 '
+
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info peek)
+EXPECT 2 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 1 '
+
+# Verify 'volume profile info incremental peek' prints incremental stats only
+# without clearing incremental stats
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info incremental peek)
+EXPECT 0 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 1 '
+
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info incremental peek)
+EXPECT 0 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 1 '
+
+# Verify 'volume profile info clear' clears both incremental and cumulative stats
+write_to_file &
+wait
+output=$($CLI volume profile $V0 info clear)
+EXPECT 2 cleared_stat_count "$output"
+
+output=$($CLI volume profile $V0 info)
+EXPECT 2 cumulative_stat_count "$output"
+EXPECT 2 incremental_stat_count "$output" ' 0 '
+EXPECT 4 data_read_count "$output" ' 0 '
+EXPECT 4 data_written_count "$output" ' 0 '
+
+cleanup;
diff --git a/tests/bugs/cli/bug-1077682.t b/tests/bugs/cli/bug-1077682.t
new file mode 100644
index 00000000000..eab5d86d04b
--- /dev/null
+++ b/tests/bugs/cli/bug-1077682.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4}
+TEST $CLI volume start $V0
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \
+"$H0:$B0/${V0}3"
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit
+TEST killall glusterd
+TEST glusterd
+
+cleanup
diff --git a/tests/bugs/cli/bug-1087487.t b/tests/bugs/cli/bug-1087487.t
new file mode 100755
index 00000000000..0659ffab684
--- /dev/null
+++ b/tests/bugs/cli/bug-1087487.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function rebalance_start {
+ $CLI volume rebalance $1 start | head -1;
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume start $V0
+
+EXPECT "volume rebalance: $V0: success: Rebalance on $V0 has \
+been started successfully. Use rebalance status command to \
+check status of the rebalance process." rebalance_start $V0
+
+cleanup;
diff --git a/tests/bugs/cli/bug-1113476.t b/tests/bugs/cli/bug-1113476.t
new file mode 100644
index 00000000000..fc7dbca537d
--- /dev/null
+++ b/tests/bugs/cli/bug-1113476.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+function volinfo_validate ()
+{
+ local var=$1
+ $CLI volume info $V0 | grep "^$var" | sed 's/.*: //'
+}
+
+cleanup;
+
+TEST verify_lvm_version
+TEST glusterd
+TEST pidof glusterd
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+
+EXPECT '' volinfo_validate 'snap-max-hard-limit'
+EXPECT '' volinfo_validate 'snap-max-soft-limit'
+EXPECT '' volinfo_validate 'auto-delete'
+
+TEST $CLI snapshot config snap-max-hard-limit 100
+TEST $CLI snapshot config $V0 snap-max-hard-limit 50
+EXPECT '' volinfo_validate 'snap-max-hard-limit'
+EXPECT '' volinfo_validate 'snap-max-soft-limit'
+EXPECT '' volinfo_validate 'auto-delete'
+
+TEST $CLI snapshot config snap-max-soft-limit 50
+EXPECT '' volinfo_validate 'snap-max-hard-limit'
+EXPECT '' volinfo_validate 'snap-max-soft-limit'
+EXPECT '' volinfo_validate 'auto-delete'
+
+TEST $CLI snapshot config auto-delete enable
+EXPECT '' volinfo_validate 'snap-max-hard-limit'
+EXPECT '' volinfo_validate 'snap-max-soft-limit'
+EXPECT 'enable' volinfo_validate 'auto-delete'
+
+cleanup;
+
+
diff --git a/tests/bugs/cli/bug-1169302.c b/tests/bugs/cli/bug-1169302.c
new file mode 100644
index 00000000000..7c6b5fbf856
--- /dev/null
+++ b/tests/bugs/cli/bug-1169302.c
@@ -0,0 +1,79 @@
+#include <errno.h>
+#include <stdio.h>
+#include <signal.h>
+
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int keep_running = 1;
+
+void
+stop_running(int sig)
+{
+ if (sig == SIGTERM)
+ keep_running = 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+ char *filename = NULL;
+ char *logfile = NULL;
+ char *host = NULL;
+
+ if (argc != 5) {
+ return -1;
+ }
+
+ host = argv[2];
+ logfile = argv[3];
+ filename = argv[4];
+
+ /* setup signal handler for exiting */
+ signal(SIGTERM, stop_running);
+
+ fs = glfs_new(argv[1]);
+ if (!fs) {
+ return -1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", host, 24007);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ return -1;
+ }
+
+ fd = glfs_creat(fs, filename, O_RDWR, 0644);
+ if (!fd) {
+ return -1;
+ }
+
+ /* sleep until SIGTERM has been received */
+ while (keep_running) {
+ sleep(1);
+ }
+
+ ret = glfs_close(fd);
+ if (ret < 0) {
+ return -1;
+ }
+
+ ret = glfs_fini(fs);
+ if (ret < 0) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/bugs/cli/bug-1169302.t b/tests/bugs/cli/bug-1169302.t
new file mode 100755
index 00000000000..19660e033a8
--- /dev/null
+++ b/tests/bugs/cli/bug-1169302.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+cleanup
+
+#setup cluster and test volume
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+
+# test CLI parameter acceptance
+TEST $CLI_1 volume statedump $V0
+TEST $CLI_2 volume statedump $V0
+TEST $CLI_3 volume statedump $V0
+TEST ! $CLI_1 volume statedump $V0 client $H2:0
+TEST ! $CLI_2 volume statedump $V0 client $H2:-1
+TEST $CLI_3 volume statedump $V0 client $H2:765
+TEST ! $CLI_1 volume statedump $V0 client $H2:
+TEST ! $CLI_2 volume statedump $V0 client
+TEST ! $CLI_3 volume statedump $V0 client $H2 $GFAPI_PID
+
+# build and run a gfapi appliction for triggering a statedump
+logdir=`gluster --print-logdir`
+STATEDUMP_TIMEOUT=60
+
+build_tester $(dirname $0)/bug-1169302.c -lgfapi
+$(dirname $0)/bug-1169302 $V0 $H1 $logdir/bug-1169302.log testfile & GFAPI_PID=$!
+
+cleanup_statedump
+
+# Take the statedump of the process connected to $H1, it should match the
+# hostname or IP-address with the connection from the bug-1169302 executable.
+# In our CI it seems not possible to use $H0, 'localhost', $(hostname --fqdn)
+# or even "127.0.0.1"....
+sleep 2
+host=`netstat -nap | grep $GFAPI_PID | grep 24007 | awk '{print $4}' | cut -d: -f1`
+TEST $CLI_3 volume statedump $V0 client $host:$GFAPI_PID
+EXPECT_WITHIN $STATEDUMP_TIMEOUT "Y" path_exists $statedumpdir/glusterdump.$GFAPI_PID*
+
+kill $GFAPI_PID
+
+cleanup_statedump
+cleanup_tester $(dirname $0)/bug-1169302
+cleanup \ No newline at end of file
diff --git a/tests/bugs/cli/bug-1320388.t b/tests/bugs/cli/bug-1320388.t
new file mode 100755
index 00000000000..e719fc59033
--- /dev/null
+++ b/tests/bugs/cli/bug-1320388.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test enables management ssl and then test the
+# heal info command.
+
+for d in /etc/ssl /etc/openssl /usr/local/etc/openssl ; do
+ if test -d $d ; then
+ SSL_BASE=$d
+ break
+ fi
+done
+
+SSL_KEY=$SSL_BASE/glusterfs.key
+SSL_CERT=$SSL_BASE/glusterfs.pem
+SSL_CA=$SSL_BASE/glusterfs.ca
+
+cleanup;
+rm -f $SSL_BASE/glusterfs.*
+touch "$GLUSTERD_WORKDIR"/secure-access
+
+TEST openssl genrsa -out $SSL_KEY 2048
+TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
+ln $SSL_CERT $SSL_CA
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume set $V0 disperse.eager-lock off
+TEST $CLI volume set $V0 disperse.other-eager-lock off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^6$" ec_child_up_count $V0 0
+touch $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}5
+echo abc > $M0/a
+EXPECT_WITHIN $HEAL_TIMEOUT "^5$" get_pending_heal_count $V0 #One for each active brick
+$CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^6$" ec_child_up_count $V0 0
+TEST gluster volume heal $V0 info
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 #One for each active brick
+cleanup;
diff --git a/tests/bugs/cli/bug-1353156-get-state-cli-validations.t b/tests/bugs/cli/bug-1353156-get-state-cli-validations.t
new file mode 100644
index 00000000000..a4556c9c997
--- /dev/null
+++ b/tests/bugs/cli/bug-1353156-get-state-cli-validations.t
@@ -0,0 +1,147 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../traps.rc
+
+cleanup;
+
+ODIR="/var/tmp/gdstates/"
+NOEXDIR="/var/tmp/gdstatesfoo/"
+
+function get_daemon_not_supported_part {
+ echo $1
+}
+
+function get_usage_part {
+ echo $7
+}
+
+function get_directory_doesnt_exist_part {
+ echo $1
+}
+
+function get_parsing_arguments_part {
+ echo $1
+}
+
+function positive_test {
+ local text=$("$@")
+ echo $text > /dev/stderr
+ (echo -n $text | grep -qs ' state dumped to ') || return 1
+ local opath=$(echo -n $text | awk '{print $5}')
+ [ -r $opath ] || return 1
+ rm -f $opath
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST mkdir -p $ODIR
+
+push_trapfunc rm -rf $ODIR
+
+TEST $CLI volume create $V0 disperse $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+TEST $CLI volume start $V0
+
+TEST setup_lvm 1
+TEST $CLI volume create $V1 $H0:$L1;
+TEST $CLI volume start $V1
+
+TEST $CLI snapshot create ${V1}_snap $V1
+
+TEST positive_test $CLI get-state
+
+TEST positive_test $CLI get-state glusterd
+
+TEST ! $CLI get-state glusterfsd;
+ERRSTR=$($CLI get-state glusterfsd 2>&1 >/dev/null);
+EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
+EXPECT 'Usage:' get_usage_part $ERRSTR;
+
+TEST positive_test $CLI get-state file gdstate
+
+TEST positive_test $CLI get-state glusterd file gdstate
+
+TEST ! $CLI get-state glusterfsd file gdstate;
+ERRSTR=$($CLI get-state glusterfsd file gdstate 2>&1 >/dev/null);
+EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
+EXPECT 'Usage:' get_usage_part $ERRSTR;
+
+TEST positive_test $CLI get-state odir $ODIR
+
+TEST positive_test $CLI get-state glusterd odir $ODIR
+
+TEST positive_test $CLI get-state odir $ODIR file gdstate
+
+TEST positive_test $CLI get-state glusterd odir $ODIR file gdstate
+
+TEST positive_test $CLI get-state detail
+
+TEST positive_test $CLI get-state glusterd detail
+
+TEST positive_test $CLI get-state odir $ODIR detail
+
+TEST positive_test $CLI get-state glusterd odir $ODIR detail
+
+TEST positive_test $CLI get-state glusterd odir $ODIR file gdstate detail
+
+TEST positive_test $CLI get-state volumeoptions
+
+TEST positive_test $CLI get-state glusterd volumeoptions
+
+TEST positive_test $CLI get-state odir $ODIR volumeoptions
+
+TEST positive_test $CLI get-state glusterd odir $ODIR volumeoptions
+
+TEST positive_test $CLI get-state glusterd odir $ODIR file gdstate volumeoptions
+
+TEST ! $CLI get-state glusterfsd odir $ODIR;
+ERRSTR=$($CLI get-state glusterfsd odir $ODIR 2>&1 >/dev/null);
+EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
+EXPECT 'Usage:' get_usage_part $ERRSTR;
+
+TEST ! $CLI get-state glusterfsd odir $ODIR file gdstate;
+ERRSTR=$($CLI get-state glusterfsd odir $ODIR file gdstate 2>&1 >/dev/null);
+EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
+EXPECT 'Usage:' get_usage_part $ERRSTR;
+
+TEST ! $CLI get-state glusterfsd odir $NOEXDIR file gdstate;
+ERRSTR=$($CLI get-state glusterfsd odir $NOEXDIR file gdstate 2>&1 >/dev/null);
+EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
+EXPECT 'Usage:' get_usage_part $ERRSTR;
+
+TEST ! $CLI get-state odir $NOEXDIR;
+ERRSTR=$($CLI get-state odir $NOEXDIR 2>&1 >/dev/null);
+EXPECT 'Failed' get_directory_doesnt_exist_part $ERRSTR;
+
+TEST ! $CLI get-state odir $NOEXDIR file gdstate;
+ERRSTR=$($CLI get-state odir $NOEXDIR 2>&1 >/dev/null);
+EXPECT 'Failed' get_directory_doesnt_exist_part $ERRSTR;
+
+TEST ! $CLI get-state foo bar;
+ERRSTR=$($CLI get-state foo bar 2>&1 >/dev/null);
+EXPECT 'glusterd' get_daemon_not_supported_part $ERRSTR;
+EXPECT 'Usage:' get_usage_part $ERRSTR;
+
+TEST ! $CLI get-state glusterd foo bar;
+ERRSTR=$($CLI get-state glusterd foo bar 2>&1 >/dev/null);
+EXPECT 'Problem' get_parsing_arguments_part $ERRSTR;
+
+TEST ! $CLI get-state glusterd detail file gdstate;
+ERRSTR=$($CLI get-state glusterd foo bar 2>&1 >/dev/null);
+EXPECT 'Problem' get_parsing_arguments_part $ERRSTR;
+
+TEST ! $CLI get-state glusterd foo bar detail;
+ERRSTR=$($CLI get-state glusterd foo bar 2>&1 >/dev/null);
+EXPECT 'Problem' get_parsing_arguments_part $ERRSTR;
+
+TEST ! $CLI get-state glusterd volumeoptions file gdstate;
+ERRSTR=$($CLI get-state glusterd foo bar 2>&1 >/dev/null);
+EXPECT 'Problem' get_parsing_arguments_part $ERRSTR;
+
+TEST ! $CLI get-state glusterd foo bar volumeoptions;
+ERRSTR=$($CLI get-state glusterd foo bar 2>&1 >/dev/null);
+EXPECT 'Problem' get_parsing_arguments_part $ERRSTR;
+
+cleanup;
diff --git a/tests/bugs/cli/bug-1378842-volume-get-all.t b/tests/bugs/cli/bug-1378842-volume-get-all.t
new file mode 100644
index 00000000000..be41f25b000
--- /dev/null
+++ b/tests/bugs/cli/bug-1378842-volume-get-all.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume set all server-quorum-ratio 80
+
+# Execute volume get without having an explicit option, this should fail
+TEST ! $CLI volume get all
+
+# Execute volume get with an explicit global option
+TEST $CLI volume get all server-quorum-ratio
+EXPECT '80' volume_get_field all 'cluster.server-quorum-ratio'
+
+# Execute volume get with 'all'
+TEST $CLI volume get all all
+
+cleanup;
+
diff --git a/tests/bugs/cli/bug-764638.t b/tests/bugs/cli/bug-764638.t
new file mode 100644
index 00000000000..ffc613409d6
--- /dev/null
+++ b/tests/bugs/cli/bug-764638.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI pool list;
+TEST $CLI pool list --xml;
+
+cleanup;
diff --git a/tests/bugs/cli/bug-822830.t b/tests/bugs/cli/bug-822830.t
new file mode 100755
index 00000000000..a9904854110
--- /dev/null
+++ b/tests/bugs/cli/bug-822830.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting nfs.rpc-auth-reject as 192.{}.1.2
+TEST ! $CLI volume set $V0 nfs.rpc-auth-reject 192.{}.1.2
+EXPECT '' volinfo_field $V0 'nfs.rpc-auth-reject';
+
+# Setting nfs.rpc-auth-allow as a.a.
+TEST ! $CLI volume set $V0 nfs.rpc-auth-allow a.a.
+EXPECT '' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+## Setting nfs.rpc-auth-reject as 192.*..*
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*..*
+EXPECT '192.*..*' volinfo_field $V0 'nfs.rpc-auth-reject';
+
+# Setting nfs.rpc-auth-allow as a.a
+TEST $CLI volume set $V0 nfs.rpc-auth-allow a.a
+EXPECT 'a.a' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+# Setting nfs.rpc-auth-allow as *.redhat.com
+TEST $CLI volume set $V0 nfs.rpc-auth-allow *.redhat.com
+EXPECT '\*.redhat.com' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+# Setting nfs.rpc-auth-allow as 192.168.10.[1-5]
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.10.[1-5]
+EXPECT '192.168.10.\[1-5]' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+# Setting nfs.rpc-auth-allow as 192.168.70.?
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.70.?
+EXPECT '192.168.70.?' volinfo_field $V0 'nfs.rpc-auth-allow';
+
+# Setting nfs.rpc-auth-reject as 192.168.10.5/16
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.10.5/16
+EXPECT '192.168.10.5/16' volinfo_field $V0 'nfs.rpc-auth-reject';
+
+## Setting nfs.rpc-auth-reject as 192.*.*
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.*.*
+EXPECT '192.*.*' volinfo_field $V0 'nfs.rpc-auth-reject';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/cli/bug-867252.t b/tests/bugs/cli/bug-867252.t
new file mode 100644
index 00000000000..ccc33d82a0f
--- /dev/null
+++ b/tests/bugs/cli/bug-867252.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '1' brick_count $V0
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2;
+EXPECT '2' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
+EXPECT '1' brick_count $V0
+
+cleanup;
diff --git a/tests/bugs/cli/bug-921215.t b/tests/bugs/cli/bug-921215.t
new file mode 100755
index 00000000000..02532562cff
--- /dev/null
+++ b/tests/bugs/cli/bug-921215.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# This is test case for bug no 921215 "Can not create volume with a . in the name"
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST ! $CLI volume create $V0.temp replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+cleanup;
diff --git a/tests/bugs/cli/bug-949298.t b/tests/bugs/cli/bug-949298.t
new file mode 100644
index 00000000000..e0692f0c157
--- /dev/null
+++ b/tests/bugs/cli/bug-949298.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI --xml volume info $V0
+
+cleanup;
diff --git a/tests/bugs/cli/bug-961307.t b/tests/bugs/cli/bug-961307.t
new file mode 100644
index 00000000000..602a6e34bce
--- /dev/null
+++ b/tests/bugs/cli/bug-961307.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+REPLICA=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11
+TEST $CLI volume start $V0
+
+var1=$($CLI volume remove-brick $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 start 2>&1)
+var2="volume remove-brick start: failed: Volume $H0:$B0/${V0}-00 does not exist"
+
+EXPECT "$var2" echo "$var1"
+cleanup;
diff --git a/tests/bugs/cli/bug-969193.t b/tests/bugs/cli/bug-969193.t
new file mode 100755
index 00000000000..dd6d7cdf100
--- /dev/null
+++ b/tests/bugs/cli/bug-969193.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# Test that "system getspec" works without op_version problems.
+
+. $(dirname $0)/../../include.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+TEST $CLI volume create $V0 $H0:$B0/brick1
+TEST $CLI system getspec $V0
+cleanup;
diff --git a/tests/bugs/cli/bug-977246.t b/tests/bugs/cli/bug-977246.t
new file mode 100644
index 00000000000..bb8d6328e8b
--- /dev/null
+++ b/tests/bugs/cli/bug-977246.t
@@ -0,0 +1,21 @@
+#! /bin/bash
+
+# This test checks if address validation, correctly catches hostnames
+# with consective dots, such as 'example..org', as invalid
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume info $V0
+TEST $CLI volume start $V0
+
+TEST ! $CLI volume set $V0 auth.allow example..org
+
+TEST $CLI volume stop $V0
+
+cleanup;
diff --git a/tests/bugs/cli/bug-982174.t b/tests/bugs/cli/bug-982174.t
new file mode 100644
index 00000000000..067e5b97c17
--- /dev/null
+++ b/tests/bugs/cli/bug-982174.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+# Test to check
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#Check if incorrect log-level keywords does not crash the CLI
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+
+function set_log_level_status {
+ local level=$1
+ $CLI volume set $V0 diagnostics.client-log-level $level 2>&1 |grep -oE 'success|failed'
+}
+
+
+LOG_LEVEL="trace"
+EXPECT "failed" set_log_level_status $LOG_LEVEL
+
+
+LOG_LEVEL="error-gen"
+EXPECT "failed" set_log_level_status $LOG_LEVEL
+
+
+LOG_LEVEL="TRACE"
+EXPECT "success" set_log_level_status $LOG_LEVEL
+
+EXPECT "$LOG_LEVEL" echo `$CLI volume info | grep diagnostics | awk '{print $2}'`
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/cli/bug-983317-volume-get.t b/tests/bugs/cli/bug-983317-volume-get.t
new file mode 100644
index 00000000000..c793bbc9f0c
--- /dev/null
+++ b/tests/bugs/cli/bug-983317-volume-get.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Set a volume option
+TEST $CLI volume set $V0 open-behind on
+TEST $CLI volume start $V0
+
+TEST $CLI volume set all server-quorum-ratio 80
+
+TEST $CLI volume set $V0 user.metadata 'dummy'
+
+# Execute volume get without having an explicit option, this should fail
+TEST ! $CLI volume get $V0
+
+# Execute volume get with an explicit option
+TEST $CLI volume get $V0 open-behind
+EXPECT 'on' volume_get_field $V0 'open-behind'
+
+# Execute volume get with 'all"
+TEST $CLI volume get $V0 all
+
+# Check if volume get can display correct global options values as well
+EXPECT '80' volume_get_field $V0 'server-quorum-ratio'
+
+# Check user.* options can also be retrived using volume get
+EXPECT 'dummy' volume_get_field $V0 'user.metadata'
+
+TEST $CLI volume set all brick-multiplex enable
+EXPECT 'enable' volume_get_field $V0 'brick-multiplex'
+
+TEST $CLI volume set all brick-multiplex disable
+EXPECT 'disable' volume_get_field $V0 'brick-multiplex'
+
+#setting an cluster level option for single volume should fail
+TEST ! $CLI volume set $V0 brick-multiplex enable
+
diff --git a/tests/bugs/core/949327.t b/tests/bugs/core/949327.t
new file mode 100644
index 00000000000..6b8033a5c85
--- /dev/null
+++ b/tests/bugs/core/949327.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function tmp_file_count()
+{
+ echo $(ls -lh /tmp/tmp.* 2>/dev/null | wc -l)
+}
+
+
+old_count=$(tmp_file_count);
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+new_count=$(tmp_file_count);
+
+TEST [ "$old_count" -eq "$new_count" ]
+
+cleanup
diff --git a/tests/bugs/core/brick-mux-fd-cleanup.t b/tests/bugs/core/brick-mux-fd-cleanup.t
new file mode 100644
index 00000000000..de11c177b8a
--- /dev/null
+++ b/tests/bugs/core/brick-mux-fd-cleanup.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This .t tests that the fds from client are closed on brick when gluster volume
+#stop is executed in brick-mux setup.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+function keep_fd_open {
+#This function has to be run as background job because opening the fd in
+#foreground and running commands is leading to flush calls on these fds
+#which is making it very difficult to create the race where fds will be left
+#open even after the brick dies.
+ exec 5>$M1/a
+ exec 6>$M1/b
+ while [ -f $M0/a ]; do sleep 1; done
+}
+
+function count_open_files {
+ local brick_pid="$1"
+ local pattern="$2"
+ ls -l /proc/$brick_pid/fd | grep -i "$pattern" | wc -l
+}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume create $V1 replica 2 $H0:$B0/${V1}{2,3}
+#Have same configuration on both bricks so that they are multiplexed
+#Delay flush fop for a second
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume heal $V1 disable
+TEST $CLI volume set $V0 delay-gen posix
+TEST $CLI volume set $V0 delay-gen.enable flush
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.delay-duration 1000000
+TEST $CLI volume set $V1 delay-gen posix
+TEST $CLI volume set $V1 delay-gen.enable flush
+TEST $CLI volume set $V1 delay-gen.delay-percentage 100
+TEST $CLI volume set $V1 delay-gen.delay-duration 1000000
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0
+TEST $GFS -s $H0 --volfile-id=$V1 --direct-io-mode=enable $M1
+
+TEST touch $M0/a
+keep_fd_open &
+TEST $CLI volume profile $V1 start
+brick_pid=$(get_brick_pid $V1 $H0 $B0/${V1}2)
+TEST count_open_files $brick_pid "$B0/${V1}2/a"
+TEST count_open_files $brick_pid "$B0/${V1}2/b"
+TEST count_open_files $brick_pid "$B0/${V1}3/a"
+TEST count_open_files $brick_pid "$B0/${V1}3/b"
+
+#If any other flush fops are introduced into the system other than the one at
+#cleanup it interferes with the race, so test for it
+EXPECT "^0$" echo "$($CLI volume profile $V1 info incremental | grep -i flush | wc -l)"
+#Stop the volume
+TEST $CLI volume stop $V1
+
+#Wait for cleanup resources or volume V1
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}2/a"
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}2/b"
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}3/a"
+EXPECT_WITHIN $GRAPH_SWITCH_TIMEOUT "^0$" count_open_files $brick_pid "$B0/${V1}3/b"
+
+TEST rm -f $M0/a #Exit keep_fd_open()
+wait
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+cleanup
diff --git a/tests/bugs/core/bug-1110917.t b/tests/bugs/core/bug-1110917.t
new file mode 100644
index 00000000000..c4b04fbf2c7
--- /dev/null
+++ b/tests/bugs/core/bug-1110917.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+TEST $CLI volume set $V0 changelog on
+TEST $CLI volume set $V0 changelog.fsync-interval 1
+
+# perform I/O on the background
+f=$(basename `mktemp -t ${0##*/}.XXXXXX`)
+dd if=/dev/urandom of=$M0/$f count=100000 bs=4k &
+
+# this is the best we can do without inducing _error points_ in the code
+# without the patch reconfigre() would hang...
+TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1`
+TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1`
+
+TEST $CLI volume set $V0 changelog off
+TEST $CLI volume set $V0 changelog on
+TEST $CLI volume set $V0 changelog off
+TEST $CLI volume set $V0 changelog on
+
+TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1`
+TEST $CLI volume set $V0 changelog.rollover-time `expr $((RANDOM % 9)) + 1`
+
+# if there's a deadlock, this would hang
+wait;
+
+cleanup;
diff --git a/tests/bugs/core/bug-1111557.t b/tests/bugs/core/bug-1111557.t
new file mode 100644
index 00000000000..4ed45761bce
--- /dev/null
+++ b/tests/bugs/core/bug-1111557.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0}
+TEST $CLI volume set $V0 diagnostics.brick-log-buf-size 0
+TEST ! $CLI volume set $V0 diagnostics.brick-log-buf-size -0
+cleanup
diff --git a/tests/bugs/core/bug-1117951.t b/tests/bugs/core/bug-1117951.t
new file mode 100644
index 00000000000..b484fee2fe4
--- /dev/null
+++ b/tests/bugs/core/bug-1117951.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0
+
+# Running with a locale not using '.' as decimal separator should work
+export LC_NUMERIC=sv_SE.utf8
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# As should a locale using '.' as a decimal separator
+export LC_NUMERIC=C
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup
diff --git a/tests/bugs/core/bug-1119582.t b/tests/bugs/core/bug-1119582.t
new file mode 100644
index 00000000000..c30057c2b2c
--- /dev/null
+++ b/tests/bugs/core/bug-1119582.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume set $V0 features.uss disable;
+
+TEST killall glusterd;
+
+rm -f $GLUSTERD_WORKDIR/vols/$V0/snapd.info
+
+TEST glusterd
+
+cleanup ;
diff --git a/tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t b/tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t
new file mode 100644
index 00000000000..d26aa561321
--- /dev/null
+++ b/tests/bugs/core/bug-1135514-allow-setxattr-with-null-value.t
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#Test
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST touch $M0/file
+TEST setfattr -n user.attribute1 $M0/file
+TEST getfattr -n user.attribute1 $M0/file
+cleanup
+
diff --git a/tests/bugs/core/bug-1168803-snapd-option-validation-fix.t b/tests/bugs/core/bug-1168803-snapd-option-validation-fix.t
new file mode 100755
index 00000000000..89b015d6374
--- /dev/null
+++ b/tests/bugs/core/bug-1168803-snapd-option-validation-fix.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+## Test case for BZ-1168803 - snapd option validation should not fail if the
+#snapd is not running
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 features.uss enable
+
+## Now set another volume option, this should not fail
+TEST $CLI volume set $V0 performance.io-cache off
+
+## start the volume
+TEST $CLI volume start $V0
+
+## Kill snapd daemon and then try to stop the volume which should not fail
+kill $(ps aux | grep glusterfsd | grep snapd | awk '{print $2}')
+
+TEST $CLI volume stop $V0
+
+cleanup;
diff --git a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
new file mode 100755
index 00000000000..a1b9a851bf7
--- /dev/null
+++ b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+FILE_COUNT=500
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.shd-wait-qlength 100
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+for i in `seq 1 $FILE_COUNT`; do touch $M0/file$i; done
+TEST kill_brick $V0 $H0 $B0/${V0}1
+for i in `seq 1 $FILE_COUNT`; do echo hello>$M0/file$i; chmod -x $M0/file$i; done
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT "$FILE_COUNT" get_pending_heal_count $V0
+TEST $CLI volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+TEST $CLI volume set $V0 self-heal-daemon off
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+TEST $CLI volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+TEST umount $M0
+cleanup;
diff --git a/tests/bugs/core/bug-1421721-mpx-toggle.t b/tests/bugs/core/bug-1421721-mpx-toggle.t
new file mode 100644
index 00000000000..231be5b81a0
--- /dev/null
+++ b/tests/bugs/core/bug-1421721-mpx-toggle.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+write_a_file () {
+ echo $1 > $2
+}
+
+TEST glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}[0,1]
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume start $V0
+
+TEST $GFS -s $H0 --volfile-id=$V0 $M0
+TEST write_a_file "hello" $M0/a_file
+
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+
+TEST $CLI volume set all cluster.brick-multiplex off
+TEST $CLI volume start $V0
+
+cleanup
diff --git a/tests/bugs/core/bug-1432542-mpx-restart-crash.t b/tests/bugs/core/bug-1432542-mpx-restart-crash.t
new file mode 100644
index 00000000000..2793d7008e1
--- /dev/null
+++ b/tests/bugs/core/bug-1432542-mpx-restart-crash.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=800
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+
+cleanup;
+
+NUM_VOLS=15
+MOUNT_BASE=$(dirname $M0)
+
+# GlusterD reports that bricks are started when in fact their attach requests
+# might still need to be retried. That's a bit of a hack, but there's no
+# feasible way to wait at that point (in attach_brick) and the rest of the
+# code is unprepared to deal with transient errors so the whole "brick start"
+# would fail. Meanwhile, glusterfsd can only handle attach requests at a
+# rather slow rate. After GlusterD tries to start a couple of hundred bricks,
+# glusterfsd can fall behind and we start getting mount failures. Arguably,
+# those are spurious because we will eventually catch up. We're just not
+# ready *yet*. More to the point, even if the errors aren't spurious that's
+# not what we're testing right now. Therefore, we give glusterfsd a bit more
+# breathing room for this test than we would otherwise.
+MOUNT_TIMEOUT=15
+
+get_brick_base () {
+ printf "%s/vol%02d" $B0 $1
+}
+
+get_mount_point () {
+ printf "%s/vol%02d" $MOUNT_BASE $1
+}
+
+function count_up_bricks {
+ vol=$1;
+ $CLI --xml volume status $vol | grep '<status>1' | wc -l
+}
+
+create_volume () {
+
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
+
+ local brick_base=$(get_brick_base $1)
+ local cmd="$CLI volume create $vol_name replica 3"
+ local b
+ for b in $(seq 0 5); do
+ local this_brick=${brick_base}/brick$b
+ mkdir -p $this_brick
+ cmd="$cmd $H0:$this_brick"
+ done
+ TEST $cmd
+ TEST $CLI volume start $vol_name
+ # check for 6 bricks and 1 shd daemon to be up and running
+ EXPECT_WITHIN 120 7 count_up_bricks $vol_name
+ local mount_point=$(get_mount_point $1)
+ mkdir -p $mount_point
+ TEST $GFS -s $H0 --volfile-id=$vol_name $mount_point
+}
+
+cleanup_func () {
+ local v
+ for v in $(seq 1 $NUM_VOLS); do
+ local mount_point=$(get_mount_point $v)
+ force_umount $mount_point
+ rm -rf $mount_point
+ local vol_name=$(printf "%s-vol%02d" $V0 $v)
+ $CLI volume stop $vol_name
+ $CLI volume delete $vol_name
+ rm -rf $(get_brick_base $1) &
+ done &> /dev/null
+ wait
+}
+push_trapfunc cleanup_func
+
+TEST glusterd
+TEST $CLI volume set all cluster.brick-multiplex on
+
+# Our infrastructure can't handle an arithmetic expression here. The formula
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
+TESTS_EXPECTED_IN_LOOP=84
+for i in $(seq 1 $NUM_VOLS); do
+ starttime="$(date +%s)";
+
+ create_volume $i
+ TEST dd if=/dev/zero of=$(get_mount_point $i)/a_file bs=4k count=1
+ # Unmounting to reduce memory footprint on regression hosts
+ mnt_point=$(get_mount_point $i)
+ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $mnt_point
+ endtime=$(expr $(date +%s) - $starttime)
+
+ echo "Memory Used after $i volumes : $(pmap -x $(pgrep glusterfsd) | grep total)"
+ echo "Thread Count after $i volumes: $(ps -T -p $(pgrep glusterfsd) | wc -l)"
+ echo "Time taken : ${endtime} seconds"
+done
+
+echo "=========="
+echo "List of all the threads in the Brick process"
+ps -T -p $(pgrep glusterfsd)
+echo "=========="
+
+# Kill glusterd, and wait a bit for all traces to disappear.
+TEST killall -9 glusterd
+sleep 5
+TEST killall -9 glusterfsd
+sleep 5
+
+# Restart glusterd. This is where the brick daemon supposedly dumps core,
+# though I (jdarcy) have yet to see that. Again, give it a while to settle,
+# just to be sure.
+TEST glusterd
+
+cleanup_func
+trap - EXIT
+cleanup
diff --git a/tests/bugs/core/bug-1650403.t b/tests/bugs/core/bug-1650403.t
new file mode 100644
index 00000000000..43d09bc8bd9
--- /dev/null
+++ b/tests/bugs/core/bug-1650403.t
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=500
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+
+cleanup;
+
+NUM_VOLS=5
+MOUNT_BASE=$(dirname $M0)
+
+# GlusterD reports that bricks are started when in fact their attach requests
+# might still need to be retried. That's a bit of a hack, but there's no
+# feasible way to wait at that point (in attach_brick) and the rest of the
+# code is unprepared to deal with transient errors so the whole "brick start"
+# would fail. Meanwhile, glusterfsd can only handle attach requests at a
+# rather slow rate. After GlusterD tries to start a couple of hundred bricks,
+# glusterfsd can fall behind and we start getting mount failures. Arguably,
+# those are spurious because we will eventually catch up. We're just not
+# ready *yet*. More to the point, even if the errors aren't spurious that's
+# not what we're testing right now. Therefore, we give glusterfsd a bit more
+# breathing room for this test than we would otherwise.
+MOUNT_TIMEOUT=15
+
+get_brick_base () {
+ printf "%s/vol%02d" $B0 $1
+}
+
+get_mount_point () {
+ printf "%s/vol%02d" $MOUNT_BASE $1
+}
+
+function count_up_bricks {
+ vol=$1;
+ $CLI --xml volume status $vol | grep '<status>1' | wc -l
+}
+
+create_volume () {
+
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
+
+ local brick_base=$(get_brick_base $1)
+ local cmd="$CLI volume create $vol_name replica 3"
+ local b
+ for b in $(seq 0 5); do
+ local this_brick=${brick_base}/brick$b
+ mkdir -p $this_brick
+ cmd="$cmd $H0:$this_brick"
+ done
+ TEST $cmd
+ TEST $CLI volume start $vol_name
+ # check for 6 bricks and 1 shd daemon to be up and running
+ EXPECT_WITHIN 120 7 count_up_bricks $vol_name
+ local mount_point=$(get_mount_point $1)
+ mkdir -p $mount_point
+ TEST $GFS -s $H0 --volfile-id=$vol_name $mount_point
+}
+
+cleanup_func () {
+ local v
+ for v in $(seq 1 $NUM_VOLS); do
+ local mount_point=$(get_mount_point $v)
+ force_umount $mount_point
+ rm -rf $mount_point
+ local vol_name=$(printf "%s-vol%02d" $V0 $v)
+ $CLI volume stop $vol_name
+ $CLI volume delete $vol_name
+ rm -rf $(get_brick_base $1) &
+ done &> /dev/null
+ wait
+}
+push_trapfunc cleanup_func
+
+TEST glusterd
+TEST $CLI volume set all cluster.brick-multiplex on
+
+# Our infrastructure can't handle an arithmetic expression here. The formula
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
+TESTS_EXPECTED_IN_LOOP=24
+for i in $(seq 1 $NUM_VOLS); do
+ create_volume $i
+ TEST dd if=/dev/zero of=$(get_mount_point $i)/a_file bs=4k count=1
+ # Unmounting to reduce memory footprint on regression hosts
+ mnt_point=$(get_mount_point $i)
+ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $mnt_point
+done
+
+glustershd_pid=`ps auxwww | grep glustershd | grep -v grep | awk -F " " '{print $2}'`
+TEST [ $glustershd_pid != 0 ]
+start=`pmap -x $glustershd_pid | grep total | awk -F " " '{print $4}'`
+echo "Memory consumption for glustershd process"
+for i in $(seq 1 50); do
+ pmap -x $glustershd_pid | grep total
+ for j in $(seq 1 $NUM_VOLS); do
+ vol_name=$(printf "%s-vol%02d" $V0 $j)
+ gluster v set $vol_name cluster.self-heal-daemon off > /dev/null
+ gluster v set $vol_name cluster.self-heal-daemon on > /dev/null
+ done
+done
+
+end=`pmap -x $glustershd_pid | grep total | awk -F " " '{print $4}'`
+diff=$((end-start))
+
+# If memory consumption is more than 10M it means some leak in reconfigure
+# code path
+
+TEST [ $diff -lt 10000 ]
+
+trap - EXIT
+cleanup
diff --git a/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t b/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t
new file mode 100644
index 00000000000..1acbaa8dc0b
--- /dev/null
+++ b/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume stop $V1
+# At the time initialize brick daemon it always keeps open
+# standard fd's (0, 1 , 2) so after stop 1 volume fd's should
+# be open
+nofds=$(ls -lrth /proc/`pgrep glusterfsd`/fd | grep dev/null | wc -l)
+TEST [ $((nofds)) -eq 3 ]
+
+cleanup
diff --git a/tests/bugs/core/bug-834465.c b/tests/bugs/core/bug-834465.c
new file mode 100644
index 00000000000..33dd270b112
--- /dev/null
+++ b/tests/bugs/core/bug-834465.c
@@ -0,0 +1,60 @@
+#include <sys/file.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+int
+main(int argc, char *argv[])
+{
+ int fd = -1;
+ char *filename = NULL;
+ struct flock lock = {
+ 0,
+ };
+ int i = 0;
+ int ret = -1;
+
+ if (argc != 2) {
+ fprintf(stderr, "Usage: %s <filename> ", argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+
+ fd = open(filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf(stderr, "open (%s) failed (%s)\n", filename, strerror(errno));
+ goto out;
+ }
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 1;
+ lock.l_len = 1;
+
+ while (i < 100) {
+ lock.l_type = F_WRLCK;
+ ret = fcntl(fd, F_SETLK, &lock);
+ if (ret < 0) {
+ fprintf(stderr, "fcntl setlk failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ lock.l_type = F_UNLCK;
+ ret = fcntl(fd, F_SETLK, &lock);
+ if (ret < 0) {
+ fprintf(stderr, "fcntl setlk failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ i++;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/core/bug-834465.t b/tests/bugs/core/bug-834465.t
new file mode 100755
index 00000000000..996248d4116
--- /dev/null
+++ b/tests/bugs/core/bug-834465.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+
+#memory-accounting is enabled by default
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+sdump1=$(generate_mount_statedump $V0);
+nalloc1=0
+grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump1
+if [ $? -eq '0' ]
+then
+ nalloc1=`grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump1 | grep -E "^num_allocs" | cut -d '=' -f2`
+fi
+
+build_tester $(dirname $0)/bug-834465.c
+
+TEST $(dirname $0)/bug-834465 $M0/testfile
+
+sdump2=$(generate_mount_statedump $V0);
+nalloc2=0
+grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump2
+if [ $? -eq '0' ]
+then
+ nalloc2=`grep -A3 "fuse - usage-type gf_common_mt_fd_lk_ctx_node_t" $sdump2 | grep -E "^num_allocs" | cut -d '=' -f2`
+fi
+
+TEST [ $nalloc1 -eq $nalloc2 ];
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-834465
+cleanup_mount_statedump $V0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/core/bug-845213.t b/tests/bugs/core/bug-845213.t
new file mode 100644
index 00000000000..136e4126d14
--- /dev/null
+++ b/tests/bugs/core/bug-845213.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Create and start a volume with aio enabled
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 remote-dio enable;
+TEST $CLI volume set $V0 network.remote-dio disable;
+
+cleanup;
+
diff --git a/tests/bugs/core/bug-903336.t b/tests/bugs/core/bug-903336.t
new file mode 100644
index 00000000000..b52c1a4758e
--- /dev/null
+++ b/tests/bugs/core/bug-903336.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST setfattr -n trusted.io-stats-dump -v /tmp $M0
+cleanup
diff --git a/tests/bugs/core/bug-908146.t b/tests/bugs/core/bug-908146.t
new file mode 100755
index 00000000000..327be6e54bc
--- /dev/null
+++ b/tests/bugs/core/bug-908146.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+
+TEST touch $M0/a
+
+exec 4>"$M0/a"
+exec 5>"$M1/a"
+EXPECT "2" get_fd_count $V0 $H0 $B0/${V0}0 a
+
+exec 4>&-
+EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}0 a
+
+exec 5>&-
+EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 a
+
+cleanup
diff --git a/tests/bugs/core/bug-913544.t b/tests/bugs/core/bug-913544.t
new file mode 100644
index 00000000000..af421722590
--- /dev/null
+++ b/tests/bugs/core/bug-913544.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#simulate a split-brain of a file and do truncate. This should not crash the mount point
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+TEST touch a
+#simulate no-changelog data split-brain
+echo "abc" > $B0/${V0}1/a
+echo "abcd" > $B0/${V0}0/a
+TEST truncate -s 0 a
+TEST ls
+cd
+
+cleanup
diff --git a/tests/bugs/core/bug-924075.t b/tests/bugs/core/bug-924075.t
new file mode 100755
index 00000000000..61ce0f18286
--- /dev/null
+++ b/tests/bugs/core/bug-924075.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#FIXME: there is another patch which moves the following function into
+#include.rc
+function process_leak_count ()
+{
+ local pid=$1;
+ return $(ls -lh /proc/$pid/fd | grep "(deleted)" | wc -l)
+}
+
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+TEST $CLI volume start $V0;
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+mount_pid=$(get_mount_process_pid $V0);
+TEST process_leak_count $mount_pid;
+
+cleanup;
diff --git a/tests/bugs/core/bug-927616.t b/tests/bugs/core/bug-927616.t
new file mode 100755
index 00000000000..18257131ac7
--- /dev/null
+++ b/tests/bugs/core/bug-927616.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 performance.open-behind off;
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+TEST mkdir $M0/dir;
+
+mkdir $M0/other;
+cp /etc/passwd $M0/;
+cp $M0/passwd $M0/file;
+chmod 600 $M0/file;
+
+chown -R nfsnobody:nfsnobody $M0/dir;
+
+TEST $CLI volume set $V0 server.root-squash on;
+
+sleep 1;
+
+# tests should fail.
+touch $M0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+touch $N0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+TEST $CLI volume set $V0 server.root-squash off;
+
+sleep 1;
+
+# tests should pass.
+touch $M0/foo 2>/dev/null;
+TEST [ $? -eq 0 ]
+touch $N0/bar 2>/dev/null;
+TEST [ $? -eq 0 ]
+mkdir $M0/new 2>/dev/null;
+TEST [ $? -eq 0 ]
+mkdir $N0/old 2>/dev/null;
+TEST [ $? -eq 0 ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/core/bug-949242.t b/tests/bugs/core/bug-949242.t
new file mode 100644
index 00000000000..5e916cbdbe6
--- /dev/null
+++ b/tests/bugs/core/bug-949242.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+#
+# Bug 949242 - Test basic fallocate functionality.
+#
+# Run several commands to verify basic fallocate functionality. We verify that
+# fallocate creates and allocates blocks to a file. We also verify that the keep
+# size option does not modify the file size.
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../fallocate.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# check for fallocate support before continuing the test
+require_fallocate -l 1m -n $M0/file && rm -f $M0/file
+
+# fallocate a file and verify blocks are allocated
+TEST fallocate -l 1m $M0/file
+blksz=`stat -c %b $M0/file`
+nblks=`stat -c %B $M0/file`
+TEST [ $(($blksz * $nblks)) -eq 1048576 ]
+
+TEST unlink $M0/file
+
+# truncate a file to a fixed size, fallocate and verify that the size does not
+# change
+TEST truncate -s 1M $M0/file
+TEST fallocate -l 2m -n $M0/file
+blksz=`stat -c %b $M0/file`
+nblks=`stat -c %B $M0/file`
+sz=`stat -c %s $M0/file`
+TEST [ $sz -eq 1048576 ]
+# Note that gluster currently incorporates a hack to limit the number of blocks
+# reported as allocated to the file by the file size. We have allocated beyond the
+# file size here. Just check for non-zero allocation to avoid setting a land mine
+# for if/when that behavior might change.
+TEST [ ! $(($blksz * $nblks)) -eq 0 ]
+
+TEST unlink $M0/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/core/bug-986429.t b/tests/bugs/core/bug-986429.t
new file mode 100644
index 00000000000..e512301775f
--- /dev/null
+++ b/tests/bugs/core/bug-986429.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+## This tests failover achieved by providing multiple
+## servers from the trusted pool for fetching volume
+## specification
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s non-existent -s $H0 --volfile-id=/$V0 $M0
+
+cleanup;
diff --git a/tests/bugs/core/io-stats-1322825.t b/tests/bugs/core/io-stats-1322825.t
new file mode 100755
index 00000000000..53f2d040daa
--- /dev/null
+++ b/tests/bugs/core/io-stats-1322825.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+# Test details:
+# This is to test that the io-stat-dump xattr is not set on the brick,
+# against the path that is used to trigger the stats dump.
+# Additionally it also tests if as many io-stat dumps are generated as there
+# are io-stat xlators in the graphs, which is 2 by default
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+# Covering replication and distribution in the test
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..4}
+TEST $CLI volume start $V0
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+
+# Generate some activity for the stats to produce something useful
+TEST $CLI volume profile $V0 start
+TEST mkdir $M0/dir1
+
+# Generate the stat dump across the io-stat instances
+TEST setfattr -n trusted.io-stats-dump -v io-stats-1322825 $M0
+
+# Check if $M0 is clean w.r.t xattr information
+# TODO: if there are better ways to check we really get no attr error, please
+# correct the following.
+getfattr -n trusted.io-stats-dump $B0/${V0}1 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+getfattr -n trusted.io-stats-dump $B0/${V0}2 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+getfattr -n trusted.io-stats-dump $B0/${V0}3 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+getfattr -n trusted.io-stats-dump $B0/${V0}4 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+
+# Check if we have 5 io-stat files in /tmp
+EXPECT 5 ls -1 /var/run/gluster/io-stats-1322825*
+# Cleanup the 5 generated files
+rm -f /var/run/gluster/io-stats-1322825*
+
+# Rinse and repeat above for a directory
+TEST setfattr -n trusted.io-stats-dump -v io-stats-1322825 $M0/dir1
+getfattr -n trusted.io-stats-dump $B0/${V0}1/dir1 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+getfattr -n trusted.io-stats-dump $B0/${V0}2/dir1 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+getfattr -n trusted.io-stats-dump $B0/${V0}3/dir1 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+getfattr -n trusted.io-stats-dump $B0/${V0}4/dir1 2>&1 | grep -qi "no such attribute"
+ret=$(echo $?)
+EXPECT 0 echo $ret
+
+EXPECT 5 ls -1 /var/run/gluster/io-stats-1322825*
+rm -f /var/run/gluster/io-stats-1322825*
+
+cleanup;
diff --git a/tests/bugs/core/log-bug-1362520.t b/tests/bugs/core/log-bug-1362520.t
new file mode 100755
index 00000000000..cde854c3349
--- /dev/null
+++ b/tests/bugs/core/log-bug-1362520.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+#. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1};
+TEST $CLI volume start $V0
+
+# Mount FUSE without selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 $@ $M0
+
+#Get the client log file
+log_wd=$(gluster --print-logdir)
+log_id=${M0:1} # Remove initial slash
+log_id=${log_id//\//-} # Replace remaining slashes with dashes
+log_file=$log_wd/$log_id.log
+
+#Set the client xlator log-level to TRACE and check if the TRACE logs get
+#printed
+TEST setfattr -n trusted.glusterfs.$V0-client-0.set-log-level -v TRACE $M0
+TEST ! stat $M0/xyz
+grep -q " T \[rpc-clnt\.c" $log_file
+res=$?
+EXPECT '0' echo $res
+
+#Set the client xlator log-level to INFO and make sure the TRACE logs do
+#not get printed
+echo > $log_file
+TEST setfattr -n trusted.glusterfs.$V0-client-0.set-log-level -v INFO $M0
+TEST ! stat $M0/xyz
+grep -q " T \[rpc-clnt\.c" $log_file
+res=$?
+EXPECT_NOT '0' echo $res
+
+cleanup;
diff --git a/tests/bugs/ctime/issue-832.t b/tests/bugs/ctime/issue-832.t
new file mode 100755
index 00000000000..740f731ab73
--- /dev/null
+++ b/tests/bugs/ctime/issue-832.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+
+#Trigger trusted.glusterfs.mdata setting codepath and see things work as expected
+cleanup
+
+TEST_USER=test-ctime-user
+TEST_UID=27341
+
+TEST useradd -o -M -u ${TEST_UID} ${TEST_USER}
+push_trapfunc "userdel --force ${TEST_USER}"
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+
+$GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+echo abc > $M0/test
+TEST chmod 755 $M0/
+TEST chmod 744 $M0/test
+TEST setfattr -x trusted.glusterfs.mdata $B0/$V0/test
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+$GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+su ${TEST_USER} -c "cat $M0/test"
+TEST getfattr -n trusted.glusterfs.mdata $B0/$V0/test
+
+cleanup
diff --git a/tests/bugs/distribute/bug-1042725.t b/tests/bugs/distribute/bug-1042725.t
new file mode 100644
index 00000000000..5497eb8bc00
--- /dev/null
+++ b/tests/bugs/distribute/bug-1042725.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+#Create files
+TEST mkdir $M0/foo
+TEST touch $M0/foo/{1..20}
+for file in {1..20}; do
+ ln $M0/foo/$file $M0/foo/${file}_linkfile;
+done
+
+#Stop one of the brick
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}1
+
+rm -rf $M0/foo 2>/dev/null
+TEST stat $M0/foo
+
+touch $M0/foo/{1..20} 2>/dev/null
+touch $M0/foo/{1..20}_linkfile 2>/dev/null
+
+TEST $CLI volume start $V0 force;
+sleep 5
+function verify_duplicate {
+ count=`ls $M0/foo | sort | uniq --repeated | grep [0-9] -c`
+ echo $count
+}
+EXPECT 0 verify_duplicate
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1066798.t b/tests/bugs/distribute/bug-1066798.t
new file mode 100755
index 00000000000..03de970a637
--- /dev/null
+++ b/tests/bugs/distribute/bug-1066798.t
@@ -0,0 +1,88 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=200
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+############################################################
+#TEST_PLAN#
+#Create a file
+#Store the hashed brick information
+#Create hard links to it
+#Remove the hashed brick
+#Check now all the hardlinks are migrated in to "OTHERBRICK"
+#Check also in mount point for all the files
+#check there is no failures and skips for migration
+############################################################
+
+TEST touch $M0/file1;
+
+file_perm=`ls -l $M0/file1 | grep file1 | awk '{print $1}'`;
+
+if [ -f $B0/${V0}1/file1 ]
+then
+ HASHED=$B0/${V0}1
+ OTHER=$B0/${V0}2
+else
+ HASHED=$B0/${V0}2
+ OTHER=$B0/${V0}1
+fi
+
+#create hundred hard links
+for i in {1..50};
+do
+TEST_IN_LOOP ln $M0/file1 $M0/link$i;
+done
+
+
+TEST $CLI volume remove-brick $V0 $H0:${HASHED} start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:${HASHED}";
+
+#check consistency in mount point
+#And also check all the links are migrated to OTHER
+for i in {1..50}
+do
+TEST_IN_LOOP [ -f ${OTHER}/link${i} ];
+TEST_IN_LOOP [ -f ${M0}/link${i} ];
+done;
+
+#check in OTHER that all the files has proper permission (Means no
+#linkto files)
+
+for i in {1..50}
+do
+link_perm=`ls -l $OTHER | grep -w link${i} | awk '{print $1}'`;
+TEST_IN_LOOP [ "${file_perm}" == "${link_perm}" ]
+
+done
+
+#check that remove-brick status should not have any failed or skipped files
+
+var=`$CLI volume remove-brick $V0 $H0:${HASHED} status | grep completed`
+
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/distribute/bug-1086228.t b/tests/bugs/distribute/bug-1086228.t
new file mode 100755
index 00000000000..e14ea572b61
--- /dev/null
+++ b/tests/bugs/distribute/bug-1086228.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../dht.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0;
+TEST glusterfs --direct-io-mode=yes --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+echo "D" > $M0/file1;
+TEST chmod +st $M0/file1;
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}"3"
+TEST $CLI volume rebalance $V0 start force
+
+EXPECT_WITHIN "10" "0" rebalance_completed
+count=0
+for i in `ls $B0/$V0"3"`;
+ do
+ var=`stat -c %A $B0/$V0"3"/$i | cut -c 4`;
+ echo $B0/$V0"3"/$i $var
+ if [ "$var" != "S" ]; then
+ count=$((count + 1))
+ fi
+ done
+
+TEST [[ $count == 0 ]]
+cleanup
diff --git a/tests/bugs/distribute/bug-1088231.t b/tests/bugs/distribute/bug-1088231.t
new file mode 100755
index 00000000000..8d4d1db73a5
--- /dev/null
+++ b/tests/bugs/distribute/bug-1088231.t
@@ -0,0 +1,173 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../dht.rc
+
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.randomize-hash-range-by-gfid on
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0
+TEST mkdir $M0/a
+
+
+## Bug Description: In case of dht_discover code path, which is triggered
+## when lookup done is nameless lookup, at the end of the lookup, even if
+## it finds that self-heal is needed to fix-the layout it wont heal because
+## healing code path is not added under nameless lookup.
+
+## What to test: With Patch, Even in case of nameless lookup, if layout
+## needs to be fixed, the it will be fixed wherever lookup is successful
+## and it will not create any directory for subvols having ENOENT as it is
+## nameless lookup.
+
+gfid_with_hyphen=`getfattr -n glusterfs.gfid.string $M0/a 2>/dev/null \
+ | grep glusterfs.gfid.string | cut -d '"' -f 2`
+
+TEST setfattr -x trusted.glusterfs.dht $B0/$V0"0"/a
+
+## new healing code don't attempt healing if inode is already
+## populated. So, unmount and remount before we do stat.
+TEST umount $M0
+TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0
+
+TEST stat $M0/.gfid/$gfid_with_hyphen
+
+## Assuming that we have two bricks, we can have two permutations of layout
+## Case 1: Brick - A Brick - B
+## 0 - 50 51-100
+##
+## Case 2: Brick - A Brick - B
+## 51 - 100 0 - 50
+##
+## To ensure layout is assigned properly, the following tests should be
+## performed.
+##
+## Case 1: Layout_b0_s = 0; Layout_b0_e = 50, Layout_b1_s=51,
+## Layout_b1_e = 100;
+##
+## layout_b1_s = layout_b0_e + 1;
+## layout_b0_s = layout_b1_e + 1; but b0_s is 0, so change to 101
+## then compare
+## Case 2: Layout_b0_s = 51, Layout_b0_e = 100, Layout_b1_s=0,
+## Layout_b1_e = 51
+##
+## layout_b0_s = Layout_b1_e + 1;
+## layout_b1_s = Layout_b0_e + 1; but b1_s is 0, so chage to 101.
+
+
+##Extract Layout
+echo `get_layout $B0/$V0"0"/a`
+echo `get_layout $B0/$V0"1"/a`
+layout_b0_s=`get_layout $B0/$V0"0"/a | cut -c19-26`
+layout_b0_e=`get_layout $B0/$V0"0"/a | cut -c27-34`
+layout_b1_s=`get_layout $B0/$V0"1"/a | cut -c19-26`
+layout_b1_e=`get_layout $B0/$V0"1"/a | cut -c27-34`
+
+
+##Add 0X to perform Hex arithematic
+layout_b0_s="0x"$layout_b0_s
+layout_b0_e="0x"$layout_b0_e
+layout_b1_s="0x"$layout_b1_s
+layout_b1_e="0x"$layout_b1_e
+
+
+## Logic of converting starting layout "0" to "Max_value of layout + 1"
+comp1=$(($layout_b0_s + 0))
+if [ "$comp1" == "0" ];then
+ comp1=4294967296
+fi
+
+comp2=$(($layout_b1_s + 0))
+if [ "$comp2" == "0" ];then
+ comp2=4294967296
+fi
+
+diff1=$(($layout_b0_e + 1))
+diff2=$(($layout_b1_e + 1))
+
+
+healed=0
+
+if [ "$comp1" == "$diff1" ] && [ "$comp2" == "$diff2" ]; then
+ healed=$(($healed + 1))
+fi
+
+if [ "$comp1" == "$diff2" ] && [ "$comp2" == "$diff1" ]; then
+ healed=$(($healed + 1))
+fi
+
+TEST [ $healed == 1 ]
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.randomize-hash-range-by-gfid on
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0
+TEST mkdir $M0/a
+
+gfid_with_hyphen=`getfattr -n glusterfs.gfid.string $M0/a 2>/dev/null \
+ | grep glusterfs.gfid.string | cut -d '"' -f 2`
+
+TEST setfattr -x trusted.glusterfs.dht $B0/$V0"0"/a
+TEST setfattr -x trusted.glusterfs.dht $B0/$V0"1"/a
+
+## new healing code don't attempt healing if inode is already
+## populated. So, unmount and remount before we do stat.
+TEST umount $M0
+TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0
+
+TEST stat $M0/.gfid/$gfid_with_hyphen
+
+##Extract Layout
+
+layout_b0_s=`get_layout $B0/$V0"0"/a | cut -c19-26`
+layout_b0_e=`get_layout $B0/$V0"0"/a | cut -c27-34`
+layout_b1_s=`get_layout $B0/$V0"1"/a | cut -c19-26`
+layout_b1_e=`get_layout $B0/$V0"1"/a | cut -c27-34`
+
+
+##Add 0X to perform Hex arithematic
+layout_b0_s="0x"$layout_b0_s
+layout_b0_e="0x"$layout_b0_e
+layout_b1_s="0x"$layout_b1_s
+layout_b1_e="0x"$layout_b1_e
+
+
+
+## Logic of converting starting layout "0" to "Max_value of layout + 1"
+comp1=$(($layout_b0_s + 0))
+if [ "$comp1" == "0" ];then
+ comp1=4294967296
+fi
+
+comp2=$(($layout_b1_s + 0))
+if [ "$comp2" == "0" ];then
+ comp2=4294967296
+fi
+
+diff1=$(($layout_b0_e + 1))
+diff2=$(($layout_b1_e + 1))
+
+
+healed=0
+
+if [ "$comp1" == "$diff1" ] && [ "$comp2" == "$diff2" ]; then
+ healed=$(($healed + 1))
+fi
+
+if [ "$comp1" == "$diff2" ] && [ "$comp2" == "$diff1" ]; then
+ healed=$(($healed + 1))
+fi
+
+TEST [ $healed == 1 ]
+cleanup
+
diff --git a/tests/bugs/distribute/bug-1099890.t b/tests/bugs/distribute/bug-1099890.t
new file mode 100644
index 00000000000..1a19ba880c0
--- /dev/null
+++ b/tests/bugs/distribute/bug-1099890.t
@@ -0,0 +1,130 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+## TO-DO: Fix the following once the dht du refresh interval issue is fixed:
+## 1. Do away with sleep(1).
+## 2. Do away with creation of empty files.
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd;
+TEST pidof glusterd;
+
+# Create 2 loop devices, one per brick.
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 100M $B0/brick2
+
+TEST L1=`SETUP_LOOP $B0/brick1`
+TEST MKFS_LOOP $L1
+
+TEST L2=`SETUP_LOOP $B0/brick2`
+TEST MKFS_LOOP $L2
+
+TEST mkdir -p $B0/${V0}{1,2}
+
+TEST MOUNT_LOOP $L1 $B0/${V0}1
+TEST MOUNT_LOOP $L2 $B0/${V0}2
+
+# Create a plain distribute volume with 2 subvols.
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+TEST $CLI volume start $V0;
+EXPECT "Started" volinfo_field $V0 'Status';
+
+TEST $CLI volume quota $V0 enable;
+
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+
+TEST $CLI volume quota $V0 limit-usage / 150MB;
+
+TEST $CLI volume set $V0 cluster.min-free-disk 50%
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+# Make sure quota-deem-statfs is working as expected
+EXPECT "150M" echo `df -h $M0 -P | tail -1 | awk {'print $2'}`
+
+# Create a new file 'foo' under the root of the volume, which hashes to subvol-0
+# of DHT, that consumes 40M
+TEST $QDD $M0/foo 256 160
+
+TEST stat $B0/${V0}1/foo
+TEST ! stat $B0/${V0}2/foo
+
+# Create a new file 'bar' under the root of the volume, which hashes to subvol-1
+# of DHT, that consumes 40M
+TEST $QDD $M0/bar 256 160
+
+TEST ! stat $B0/${V0}1/bar
+TEST stat $B0/${V0}2/bar
+
+# Touch a zero-byte file on the root of the volume to make sure the statfs data
+# on DHT is refreshed
+sleep 1;
+TEST touch $M0/empty1;
+
+# At this point, the available space on each subvol {60M,60M} is greater than
+# their min-free-disk {50M,50M}, but if this bug still exists, then
+# the total available space on the volume as perceived by DHT should be less
+# than min-free-disk, i.e.,
+#
+# consumed space returned per subvol by quota = (40M + 40M) = 80M
+#
+# Therefore, consumed space per subvol computed by DHT WITHOUT the fix would be:
+# (80M/150M)*100 = 53%
+#
+# Available space per subvol as perceived by DHT with the bug = 47%
+# which is less than min-free-disk
+
+# Now I create a file that hashes to subvol-1 (counting from 0) of DHT.
+# If this bug still exists,then DHT should be routing this creation to subvol-0.
+# If this bug is fixed, then DHT should be routing the creation to subvol-1 only
+# as it has more than min-free-disk space available.
+
+TEST $QDD $M0/file 1 1
+sleep 1;
+TEST ! stat $B0/${V0}1/file
+TEST stat $B0/${V0}2/file
+
+# Touch another zero-byte file on the root of the volume to refresh statfs
+# values stored by DHT.
+
+TEST touch $M0/empty2;
+
+# Now I create a new file that hashes to subvol-0, at the end of which, there
+# will be less than min-free-disk space available on it.
+TEST $QDD $M0/fil 256 80
+sleep 1;
+TEST stat $B0/${V0}1/fil
+TEST ! stat $B0/${V0}2/fil
+
+# Touch to refresh statfs info cached by DHT
+
+TEST touch $M0/empty3;
+
+# Now I create a file that hashes to subvol-0 but since it has less than
+# min-free-disk space available, its data will be cached on subvol-1.
+
+TEST $QDD $M0/zz 256 20
+
+TEST stat $B0/${V0}1/zz
+TEST stat $B0/${V0}2/zz
+
+EXPECT "$V0-client-1" dht_get_linkto_target "$B0/${V0}1/zz"
+
+EXPECT "1" is_dht_linkfile "$B0/${V0}1/zz"
+
+force_umount $M0
+TEST $CLI volume stop $V0
+UMOUNT_LOOP ${B0}/${V0}{1,2}
+rm -f ${B0}/brick{1,2}
+
+rm -f $QDD
+cleanup
diff --git a/tests/bugs/distribute/bug-1125824.t b/tests/bugs/distribute/bug-1125824.t
new file mode 100755
index 00000000000..7e401092273
--- /dev/null
+++ b/tests/bugs/distribute/bug-1125824.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+create_files () {
+ for i in {1..10}; do
+ orig=$(printf %s/file%04d $1 $i)
+ echo "This is file $i" > $orig
+ done
+ for i in {1..10}; do
+ mkdir $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+create_dirs () {
+ for i in {1..10}; do
+ mkdir $(printf %s/dir%04d $1 $i)
+ create_files $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+stat_files () {
+ for i in {1..10}; do
+ orig=$(printf %s/file%04d $1 $i)
+ stat $orig
+ done
+ for i in {1..10}; do
+ stat $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+stat_dirs () {
+ for i in {1..10}; do
+ stat $(printf %s/dir%04d $1 $i)
+ stat_files $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '4' brick_count $V0
+
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0
+
+# Create and poulate the NFS inode tables
+TEST create_dirs $N0
+TEST stat_dirs $N0
+
+# add-bricks changing the state of the volume where some bricks
+# would have some directories and others would not
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}{5,6,7,8}
+
+# Post this dht_access was creating a mess for directories which is fixed
+# with this commit. The issues could range from getting ENOENT or
+# ESTALE or entries missing to directories not having complete
+# layouts.
+TEST cd $N0
+TEST ls -lR
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+# tests to check post rebalance if layouts and entires are fine and
+# accessible by NFS to clear the volume
+TEST ls -lR
+rm -rf ./*
+# There are additional bugs where NFS+DHT does not delete all entries
+# on an rm -rf, so we do an additional rm -rf to ensure all is done
+# and we are facing this transient issue, rather than a bad directory
+# layout that is cached in memory
+TEST rm -rf ./*
+
+# Get out of the mount, so that umount can work
+TEST cd /
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1161156.t b/tests/bugs/distribute/bug-1161156.t
new file mode 100755
index 00000000000..2b9e15407ca
--- /dev/null
+++ b/tests/bugs/distribute/bug-1161156.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# Testing with NFS for no particular reason
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+TEST mount_nfs $H0:/$V0 $N0
+mydir="dir"
+TEST mkdir -p $N0/$mydir
+TEST mkdir -p $N0/newdir
+
+TEST $QDD $N0/$mydir/file 256 40
+
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume quota $V0 limit-usage / 20MB
+TEST $CLI volume quota $V0 limit-usage /newdir 5MB
+TEST $CLI volume quota $V0 soft-timeout 0
+TEST $CLI volume quota $V0 hard-timeout 0
+
+TEST $QDD $N0/$mydir/newfile_1 256 20
+# wait for write behind to complete.
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "15.0MB" quotausage "/"
+TEST ! $QDD $N0/$mydir/newfile_2 256 40
+
+# Test rename within a directory. It should pass even when the
+# corresponding directory quota is filled.
+TEST mv $N0/dir/file $N0/dir/newfile_3
+
+# rename should fail here with disk quota exceeded
+TEST ! mv $N0/dir/newfile_3 $N0/newdir/
+
+umount_nfs $N0
+TEST $CLI volume stop $V0
+
+rm -f $QDD
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1161311.t b/tests/bugs/distribute/bug-1161311.t
new file mode 100755
index 00000000000..62796068928
--- /dev/null
+++ b/tests/bugs/distribute/bug-1161311.t
@@ -0,0 +1,164 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=350
+
+# This tests for hard link preservation for files that are linked, when the
+# file is undergoing migration
+
+# --- Improvements and other tests ---
+## Fail rebalance of the large file for which links are created during P1/2
+### phases of migration
+## Start with multiple hard links to the file and then create more during P1/2
+### phases of migration
+## Test the same with NFS as the mount rather than FUSE
+## Create links when file is under P2 of migration specifically
+## Test with quota, to error out during hard link creation (if possible)
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+TEST truncate -s 10GB $B0/brick1
+TEST truncate -s 10GB $B0/brick2
+TEST truncate -s 10GB $B0/brick3
+
+TEST LO1=`SETUP_LOOP $B0/brick1`
+TEST MKFS_LOOP $LO1
+
+TEST LO2=`SETUP_LOOP $B0/brick2`
+TEST MKFS_LOOP $LO2
+
+TEST LO3=`SETUP_LOOP $B0/brick3`
+TEST MKFS_LOOP $LO3
+
+TEST mkdir -p $B0/${V0}1 $B0/${V0}2 $B0/${V0}3
+
+
+TEST MOUNT_LOOP $LO1 $B0/${V0}1
+TEST MOUNT_LOOP $LO2 $B0/${V0}2
+TEST MOUNT_LOOP $LO3 $B0/${V0}3
+
+checksticky () {
+ i=0;
+ while [ ! -k $1 ]; do
+ sleep 1
+ i=$((i+1));
+ # Try for 10 seconds to get the sticky bit state
+ # else fail the test, as we may never see it
+ if [[ $i == 10 ]]; then
+ return $i
+ fi
+ echo "Waiting... $i"
+ done
+ echo "Done... got out @ $i"
+ return 0
+}
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '3' brick_count $V0
+
+TEST $CLI volume set $V0 parallel-readdir on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount FUSE with caching disabled (read-write)
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+# Create a directories to hold the links
+TEST mkdir $M0/dir1
+TEST mkdir -p $M0/dir2/dir3
+
+# Create a large file (8 GB), so that rebalance takes time
+# Since we really don't care about the contents of the file, we use fallocate
+# to generate the file much faster. We could also use truncate, which is even
+# faster, but rebalance could take advantage of an sparse file and migrate it
+# in an optimized way, but we don't want a fast migration.
+TEST fallocate -l 8G $M0/dir1/FILE2
+
+# Rename the file to create a linkto, for rebalance to
+# act on the file
+## FILE1 and FILE2 hashes are, 678b1c4a e22c1ada, so they fall
+## into separate bricks when brick count is 3
+TEST mv $M0/dir1/FILE2 $M0/dir1/FILE1
+
+brick_loc=$(get_backend_paths $M0/dir1/FILE1)
+
+# unmount and remount the volume
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+# Start the rebalance
+TEST $CLI volume rebalance $V0 start force
+
+# Wait for FILE to get the sticky bit on, so that file is under
+# active rebalance, before creating the links
+TEST checksticky $brick_loc
+
+# Create the links
+## FILE3 FILE5 FILE7 have hashes, c8c91469 566d26ce 22ce7eba
+## Which fall into separate bricks on a 3 brick layout
+cd $M0
+TEST ln ./dir1/FILE1 ./dir1/FILE7
+TEST ln ./dir1/FILE1 ./dir1/FILE5
+TEST ln ./dir1/FILE1 ./dir1/FILE3
+
+TEST ln ./dir1/FILE1 ./dir2/FILE7
+TEST ln ./dir1/FILE1 ./dir2/FILE5
+TEST ln ./dir1/FILE1 ./dir2/FILE3
+
+TEST ln ./dir1/FILE1 ./dir2/dir3/FILE7
+TEST ln ./dir1/FILE1 ./dir2/dir3/FILE5
+TEST ln ./dir1/FILE1 ./dir2/dir3/FILE3
+cd /
+
+# Ideally for this test to have done its job, the file should still be
+# under migration, so check the sticky bit again
+TEST checksticky $brick_loc
+
+# Wait for rebalance to complete
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+# Check if all files are clean and migrated right
+## stat on the original file should show linkcount of 10
+linkcountsrc=$(stat -c %h $M0/dir1/FILE1)
+TEST [[ $linkcountsrc == 10 ]]
+
+## inode and size of every file should be same as original file
+inodesrc=$(stat -c %i $M0/dir1/FILE1)
+TEST [[ $(stat -c %i $M0/dir1/FILE3) == $inodesrc ]]
+TEST [[ $(stat -c %i $M0/dir1/FILE5) == $inodesrc ]]
+TEST [[ $(stat -c %i $M0/dir1/FILE7) == $inodesrc ]]
+
+TEST [[ $(stat -c %i $M0/dir2/FILE3) == $inodesrc ]]
+TEST [[ $(stat -c %i $M0/dir2/FILE5) == $inodesrc ]]
+TEST [[ $(stat -c %i $M0/dir2/FILE7) == $inodesrc ]]
+
+TEST [[ $(stat -c %i $M0/dir2/dir3/FILE3) == $inodesrc ]]
+TEST [[ $(stat -c %i $M0/dir2/dir3/FILE5) == $inodesrc ]]
+TEST [[ $(stat -c %i $M0/dir2/dir3/FILE7) == $inodesrc ]]
+
+# Check, newer link creations
+cd $M0
+TEST ln ./dir1/FILE1 ./FILE1
+TEST ln ./dir2/FILE3 ./FILE3
+TEST ln ./dir2/dir3/FILE5 ./FILE5
+TEST ln ./dir1/FILE7 ./FILE7
+cd /
+linkcountsrc=$(stat -c %h $M0/dir1/FILE1)
+TEST [[ $linkcountsrc == 14 ]]
+
+
+# Stop the volume
+TEST $CLI volume stop $V0;
+
+UMOUNT_LOOP ${B0}/${V0}{1..3}
+rm -f ${B0}/brick{1..3}
+cleanup;
diff --git a/tests/bugs/distribute/bug-1190734.t b/tests/bugs/distribute/bug-1190734.t
new file mode 100644
index 00000000000..9256088f7a0
--- /dev/null
+++ b/tests/bugs/distribute/bug-1190734.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+BRICK_COUNT=3
+FILE_COUNT=100
+FILE_COUNT_TIME=5
+
+function create_files {
+ rm -rf $2
+ mkdir $2
+ for i in `seq 1 $1`; do
+ touch $2/file_$i
+ done
+}
+
+function get_file_count {
+ ls $1/file_[0-9]* | wc -l
+}
+
+function reset {
+ $CLI volume stop $V0
+ ${UMOUNT_F} $1
+ $CLI volume delete $V0
+}
+
+function start_mount_fuse {
+ $CLI volume start $V0
+ [ $? -ne 0 ] && return 1
+
+ $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+ [ $? -ne 0 ] && return 1
+
+ create_files $FILE_COUNT $M0/$1
+ [ $? -ne 0 ] && return 1
+
+ return 0
+}
+
+function start_mount_nfs {
+ $CLI volume start $V0
+ [ $? -ne 0 ] && return 1
+
+ sleep 3
+ mount_nfs $H0:/$V0 $N0
+ [ $? -ne 0 ] && return 1
+
+ create_files $FILE_COUNT $N0/$1
+ [ $? -ne 0 ] && return 1
+
+ return 0
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+# Test 1-2 Create repliacted volume
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3 $H0:$B0/${V0}4 $H0:$B0/${V0}5
+TEST $CLI volume set $V0 nfs.disable false
+
+# ------- test 1: AFR, fuse + remove bricks
+
+TEST start_mount_fuse test1
+EXPECT_WITHIN $FILE_COUNT_TIME $FILE_COUNT get_file_count $M0/test1
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{2,3} start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}2 $H0:$B0/${V0}3"
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{2,3} commit
+EXPECT_WITHIN $FILE_COUNT_TIME $FILE_COUNT get_file_count $M0/test1
+reset $M0
+
+# ------- test 2: AFR, nfs + remove bricks
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3 $H0:$B0/${V0}4 $H0:$B0/${V0}5
+TEST $CLI volume set $V0 nfs.disable false
+
+TEST start_mount_nfs test2
+EXPECT_WITHIN $FILE_COUNT_TIME $FILE_COUNT get_file_count $N0/test2
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}2 $H0:$B0/${V0}3 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}2 $H0:$B0/${V0}3"
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}{2,3} commit
+EXPECT_WITHIN $FILE_COUNT_TIME $FILE_COUNT get_file_count $N0/test2
+reset $N0
+
+cleanup
diff --git a/tests/bugs/distribute/bug-1193636.c b/tests/bugs/distribute/bug-1193636.c
new file mode 100644
index 00000000000..ea3f79a4e06
--- /dev/null
+++ b/tests/bugs/distribute/bug-1193636.c
@@ -0,0 +1,68 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+#include <fcntl.h>
+#include <string.h>
+
+#define MY_XATTR_NAME "user.ftest"
+#define MY_XATTR_VAL "ftestval"
+
+void
+usage(void)
+{
+ printf("Usage : bug-1193636 <filename> <xattr_name> <op>\n");
+ printf(" op : 0 - set, 1 - remove\n");
+}
+
+int
+main(int argc, char **argv)
+{
+ int fd;
+ int err = 0;
+ char *xattr_name = NULL;
+ int op = 0;
+
+ if (argc != 4) {
+ usage();
+ exit(1);
+ }
+
+ op = atoi(argv[3]);
+
+ if ((op != 0) && (op != 1)) {
+ printf("Invalid operation specified.\n");
+ usage();
+ exit(1);
+ }
+
+ xattr_name = argv[2];
+
+ fd = open(argv[1], O_RDWR);
+ if (fd == -1) {
+ printf("Failed to open file %s\n", argv[1]);
+ exit(1);
+ }
+
+ if (!op) {
+ err = fsetxattr(fd, xattr_name, MY_XATTR_VAL, strlen(MY_XATTR_VAL) + 1,
+ XATTR_CREATE);
+
+ if (err) {
+ printf("Failed to set xattr %s: %m\n", xattr_name);
+ exit(1);
+ }
+
+ } else {
+ err = fremovexattr(fd, xattr_name);
+
+ if (err) {
+ printf("Failed to remove xattr %s: %m\n", xattr_name);
+ exit(1);
+ }
+ }
+
+ close(fd);
+
+ return 0;
+}
diff --git a/tests/bugs/distribute/bug-1193636.t b/tests/bugs/distribute/bug-1193636.t
new file mode 100644
index 00000000000..b377910336e
--- /dev/null
+++ b/tests/bugs/distribute/bug-1193636.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+
+checksticky () {
+ i=0;
+ while [ ! -k $1 ]; do
+ sleep 1
+ i=$((i+1));
+ if [[ $i == 10 ]]; then
+ return $i
+ fi
+ echo "Waiting... $i"
+ done
+ echo "done ...got out @ $i"
+ return 0
+}
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST mkdir $M0/dir1
+
+# Create a large file (1GB), so that rebalance takes time
+dd if=/dev/zero of=$M0/dir1/FILE2 bs=64k count=10240
+
+# Rename the file to create a linkto, for rebalance to
+# act on the file
+TEST mv $M0/dir1/FILE2 $M0/dir1/FILE1
+
+brick_loc=$(get_backend_paths $M0/dir1/FILE1)
+
+build_tester $(dirname $0)/bug-1193636.c
+
+TEST $CLI volume rebalance $V0 start force
+
+TEST checksticky $brick_loc
+
+TEST setfattr -n "user.test1" -v "test1" $M0/dir1/FILE1
+TEST setfattr -n "user.test2" -v "test1" $M0/dir1/FILE1
+TEST setfattr -n "user.test3" -v "test1" $M0/dir1/FILE1
+
+TEST $(dirname $0)/bug-1193636 $M0/dir1/FILE1 user.fsetx 0
+TEST $(dirname $0)/bug-1193636 $M0/dir1/FILE1 user.fremx 0
+
+TEST getfattr -n "user.fremx" $M0/dir1/FILE1
+TEST setfattr -x "user.test2" $M0/dir1/FILE1
+
+
+TEST $(dirname $0)/bug-1193636 $M0/dir1/FILE1 user.fremx 1
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+TEST getfattr -n "user.fsetx" $M0/dir1/FILE1
+TEST getfattr -n "user.test1" $M0/dir1/FILE1
+TEST ! getfattr -n "user.test2" $M0/dir1/FILE1
+TEST ! getfattr -n "user.fremx" $M0/dir1/FILE1
+TEST getfattr -n "user.test3" $M0/dir1/FILE1
+
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1204140.t b/tests/bugs/distribute/bug-1204140.t
new file mode 100755
index 00000000000..050c069ea30
--- /dev/null
+++ b/tests/bugs/distribute/bug-1204140.t
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup;
+
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST touch $M0/new.txt;
+TEST getfattr -n "glusterfs.get_real_filename:NEW.txt" $M0;
+TEST ! getfattr -n "glusterfs.get_realfilename:NEXT.txt" $M0;
+
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1247563.t b/tests/bugs/distribute/bug-1247563.t
new file mode 100644
index 00000000000..a2fc722896f
--- /dev/null
+++ b/tests/bugs/distribute/bug-1247563.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+is_sticky_set () {
+ echo $1
+ if [ -k $1 ];
+ then
+ echo "yes"
+ else
+ echo "no"
+ fi
+}
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs --acl -s $H0 --volfile-id $V0 $M0
+
+TEST mkdir $M0/dir1
+
+echo "Testing pacls on rebalance" > $M0/dir1/FILE1
+
+FPATH1=`find $B0/ -name FILE1`
+
+# Rename the file to create a linkto, for rebalance to
+# act on the file
+
+TEST mv $M0/dir1/FILE1 $M0/dir1/FILE2
+FPATH2=`find $B0/ -perm 1000 -name FILE2`
+
+setfacl -m user:root:rwx $M0/dir1/FILE2
+
+# Run rebalance without the force option to skip
+# the file migration
+TEST $CLI volume rebalance $V0 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Check that the file has been skipped,i.e., the linkto still exists
+EXPECT "yes" is_sticky_set $FPATH2
+
+
+#The linkto file should not have any posix acls set
+COUNT=`getfacl $FPATH2 |grep -c "user:root:rwx"`
+EXPECT "0" echo $COUNT
+
+cleanup;
+
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/distribute/bug-1368012.t b/tests/bugs/distribute/bug-1368012.t
new file mode 100644
index 00000000000..0b626353aab
--- /dev/null
+++ b/tests/bugs/distribute/bug-1368012.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function get_permission {
+ stat -c "%A" $1
+}
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
+TEST $CLI volume set $V0 performance.stat-prefetch off
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+##Test case: Add-brick
+#------------------------------------------------------------
+#change permission of both root
+TEST chmod 444 $M0
+
+#store permission for comparision
+TEST permission_root=`stat -c "%A" $M0`
+TEST echo $permission_root
+#Add-brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" online_brick_count
+
+#Allow one lookup to happen
+TEST ls $M0
+#Generate another lookup
+echo 3 > /proc/sys/vm/drop_caches
+TEST ls $M0
+#check root permission
+EXPECT_WITHIN "5" $permission_root get_permission $M0
+#check permission on the new-brick
+EXPECT $permission_root get_permission $B0/${V0}3
+cleanup
diff --git a/tests/bugs/distribute/bug-1389697.t b/tests/bugs/distribute/bug-1389697.t
new file mode 100644
index 00000000000..0d428b8d9d2
--- /dev/null
+++ b/tests/bugs/distribute/bug-1389697.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/b1 $H1:$B1/b2 $H2:$B2/b3
+TEST $CLI_1 volume start $V0
+
+#Start a fix-layout
+TEST $CLI_1 volume rebalance $V0 fix-layout start
+
+#volume rebalance status should work
+TEST $CLI_1 volume rebalance $V0 status
+$CLI_1 volume rebalance $V0 status
+
+val=$($CLI_1 volume rebalance $V0 status |grep "fix-layout" 2>&1)
+val=$?
+TEST [ $val -eq 0 ];
+
+#Start a remove brick for the brick on H2
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/b3 start
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/b3 status
+
+#Check remove brick status from H1
+$CLI_1 volume remove-brick $V0 $H2:$B2/b3 status |grep "fix-layout" 2>&1
+val=$?
+TEST [ $val -eq 1 ];
+
+$CLI_1 volume remove-brick $V0 $H2:$B2/b3 status
+$CLI_2 volume remove-brick $V0 $H2:$B2/b3 status
+
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/b3 stop
+
+cleanup
diff --git a/tests/bugs/distribute/bug-1543279.t b/tests/bugs/distribute/bug-1543279.t
new file mode 100644
index 00000000000..47b8b4a4a95
--- /dev/null
+++ b/tests/bugs/distribute/bug-1543279.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+TESTS_EXPECTED_IN_LOOP=44
+SCRIPT_TIMEOUT=600
+
+rename_files() {
+ MOUNT=$1
+ ITERATIONS=$2
+ for i in $(seq 1 $ITERATIONS); do uuid="`uuidgen`"; echo "some data" > $MOUNT/test$uuid; mv $MOUNT/test$uuid $MOUNT/test -f || return $?; done
+}
+
+run_test_for_volume() {
+ VOLUME=$1
+ ITERATIONS=$2
+ TEST_IN_LOOP $CLI volume start $VOLUME
+
+ TEST_IN_LOOP glusterfs -s $H0 --volfile-id $VOLUME $M0
+ TEST_IN_LOOP glusterfs -s $H0 --volfile-id $VOLUME $M1
+ TEST_IN_LOOP glusterfs -s $H0 --volfile-id $VOLUME $M2
+ TEST_IN_LOOP glusterfs -s $H0 --volfile-id $VOLUME $M3
+
+ rename_files $M0 $ITERATIONS &
+ M0_RENAME_PID=$!
+
+ rename_files $M1 $ITERATIONS &
+ M1_RENAME_PID=$!
+
+ rename_files $M2 $ITERATIONS &
+ M2_RENAME_PID=$!
+
+ rename_files $M3 $ITERATIONS &
+ M3_RENAME_PID=$!
+
+ TEST_IN_LOOP wait $M0_RENAME_PID
+ TEST_IN_LOOP wait $M1_RENAME_PID
+ TEST_IN_LOOP wait $M2_RENAME_PID
+ TEST_IN_LOOP wait $M3_RENAME_PID
+
+ TEST_IN_LOOP $CLI volume stop $VOLUME
+ TEST_IN_LOOP $CLI volume delete $VOLUME
+ umount $M0 $M1 $M2 $M3
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0..8} force
+run_test_for_volume $V0 200
+
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0..8} force
+run_test_for_volume $V0 200
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..8} force
+run_test_for_volume $V0 200
+
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5} force
+run_test_for_volume $V0 200
+
+cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/distribute/bug-1600379.t b/tests/bugs/distribute/bug-1600379.t
new file mode 100644
index 00000000000..8d2f6154100
--- /dev/null
+++ b/tests/bugs/distribute/bug-1600379.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+#------------------------------------------------------------
+
+# Test case - Remove xattr from killed brick on lookup
+#------------------------------------------------------------
+# Create a dir and set custom xattr
+TEST mkdir $M0/testdir
+TEST setfattr -n user.attr -v val $M0/testdir
+xattr_val=`getfattr -d $B0/${V0}2/testdir | awk '{print $1}'`;
+TEST ${xattr_val}='user.attr="val"';
+
+# Kill 2nd brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+
+# Remove custom xattr
+TEST setfattr -x user.attr $M0/testdir
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0/testdir
+
+# Check brick xattrs
+xattr_val_2=`getfattr -d $B0/${V0}2/testdir`;
+TEST [ ${xattr_val_2} = ''] ;
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1667804.t b/tests/bugs/distribute/bug-1667804.t
new file mode 100644
index 00000000000..3f7c43111d7
--- /dev/null
+++ b/tests/bugs/distribute/bug-1667804.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+function confirm_all_linkto_files ()
+{
+ inpath=$1
+ for infile in $inpath/*
+ do
+ echo $infile
+ ret1=$(is_dht_linkfile $infile)
+ if [ "$ret1" -eq 0 ]; then
+ echo "$infile is not a linkto file"
+ echo 0
+ return
+ fi
+ done
+ echo 1
+}
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+#Create files and rename them in order to create linkto files
+TEST mkdir -p $M0/dir0/dir1
+TEST touch $M0/dir0/dir1/file-{1..50}
+
+for i in {1..50}; do
+ mv $M0/dir0/dir1/file-$i $M0/dir0/dir1/nfile-$i;
+done
+
+#Remove the second brick to force the creation of linkto files
+#on the removed brick
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}2"
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+EXPECT "1" confirm_all_linkto_files $B0/${V0}2/dir0/dir1
+
+#Modify the xattrs of the linkto files on the removed brick to point to itself.
+
+target=$(cat $M0/.meta/graphs/active/$V0-dht/subvolumes/1/name)
+
+setfattr -n trusted.glusterfs.dht.linkto -v "$target\0" $B0/${V0}2/dir0/dir1/nfile*
+
+
+TEST rm -rf $M0/dir0
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1786679.t b/tests/bugs/distribute/bug-1786679.t
new file mode 100755
index 00000000000..219ce51c8a9
--- /dev/null
+++ b/tests/bugs/distribute/bug-1786679.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+
+# create 2 subvols
+# create a dir
+# create a file
+# change layout
+# remove the file
+# execute create from a different mount
+# Without the patch, the file will be present on both of the bricks
+
+cleanup
+
+function get_layout () {
+
+layout=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1 | grep dht | gawk -F"=" '{print $2}'`
+
+echo $layout
+
+}
+
+function set_layout()
+{
+ setfattr -n "trusted.glusterfs.dht" -v $1 $2
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK1=$B0/${V0}-0
+BRICK2=$B0/${V0}-1
+
+TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
+TEST $CLI volume start $V0
+
+# Mount FUSE and create symlink
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+TEST ! stat "$BRICK1/dir/file"
+TEST stat "$BRICK2/dir/file"
+
+layout1="$(get_layout "$BRICK1/dir")"
+layout2="$(get_layout "$BRICK2/dir")"
+
+TEST set_layout $layout1 "$BRICK2/dir"
+TEST set_layout $layout2 "$BRICK1/dir"
+
+TEST rm $M0/dir/file -f
+TEST gluster v set $V0 client-log-level DEBUG
+
+#Without the patch in place, this client will create the file in $BRICK2
+#which will lead to two files being on both the bricks when a new client
+#create the file with the same name
+TEST touch $M0/dir/file
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M1
+TEST touch $M1/dir/file
+
+TEST stat "$BRICK1/dir/file"
+TEST ! stat "$BRICK2/dir/file"
+
+cleanup
diff --git a/tests/bugs/distribute/bug-853258.t b/tests/bugs/distribute/bug-853258.t
new file mode 100755
index 00000000000..6817d9e2cd3
--- /dev/null
+++ b/tests/bugs/distribute/bug-853258.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+mkdir -p $H0:$B0/${V0}0
+mkdir -p $H0:$B0/${V0}1
+mkdir -p $H0:$B0/${V0}2
+mkdir -p $H0:$B0/${V0}3
+
+# Create and start a volume.
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 cluster.weighted-rebalance off
+
+# Force assignment of initial ranges.
+TEST $CLI volume rebalance $V0 fix-layout start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0
+
+# Get the original values.
+xattrs=""
+for i in $(seq 0 2); do
+ xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)"
+done
+
+# Expand the volume and force assignment of new ranges.
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" online_brick_count
+# Force assignment of initial ranges.
+TEST $CLI volume rebalance $V0 fix-layout start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0
+
+for i in $(seq 0 3); do
+ xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)"
+done
+
+overlap=$( $PYTHON $(dirname $0)/overlap.py $xattrs)
+# 2863311531 = 0xaaaaaaab = 2/3 overlap
+TEST [ "$overlap" -ge 2863311531 ]
+
+cleanup
diff --git a/tests/bugs/distribute/bug-860663.c b/tests/bugs/distribute/bug-860663.c
new file mode 100644
index 00000000000..ca0c31ffe8f
--- /dev/null
+++ b/tests/bugs/distribute/bug-860663.c
@@ -0,0 +1,41 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <err.h>
+#include <sys/param.h>
+
+int main(argc, argv) int argc;
+char **argv;
+{
+ char *basepath;
+ char path[MAXPATHLEN + 1];
+ unsigned int count;
+ int i, fd;
+
+ if (argc != 3)
+ errx(1, "usage: %s path count", argv[0]);
+
+ basepath = argv[1];
+ count = atoi(argv[2]);
+
+ if (count > 999999)
+ errx(1, "count too big");
+
+ if (strlen(basepath) > MAXPATHLEN - 6)
+ errx(1, "path too long");
+
+ for (i = 0; i < count; i++) {
+ (void)sprintf(path, "%s%06d", basepath, i);
+
+ fd = open(path, O_CREAT | O_RDWR, 0644);
+ if (fd == -1)
+ err(1, "create %s failed", path);
+
+ if (close(fd) != 0)
+ warn("close %s failed", path);
+ }
+
+ return 0;
+}
diff --git a/tests/bugs/distribute/bug-860663.t b/tests/bugs/distribute/bug-860663.t
new file mode 100644
index 00000000000..59b8223ef3f
--- /dev/null
+++ b/tests/bugs/distribute/bug-860663.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+function file_count()
+{
+ val=1
+
+ if [ "$1" == "$2" ]
+ then
+ val=0
+ fi
+ echo $val
+}
+
+BRICK_COUNT=3
+
+build_tester $(dirname $0)/bug-860663.c
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST $(dirname $0)/bug-860663 $M0/files 1000
+
+ORIG_FILE_COUNT=`ls -l $M0 | wc -l`;
+TEST [ $ORIG_FILE_COUNT -ge 1000 ]
+
+# Kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout failed" fix-layout_status_field $V0;
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+# Unmount and remount to make sure we're doing fresh lookups.
+TEST umount $M0
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+NEW_FILE_COUNT=`ls -l $M0 | wc -l`;
+
+EXPECT "0" file_count $ORIG_FILE_COUNT $NEW_FILE_COUNT
+
+rm -f $(dirname $0)/bug-860663
+cleanup;
diff --git a/tests/bugs/distribute/bug-862967.t b/tests/bugs/distribute/bug-862967.t
new file mode 100644
index 00000000000..2fb0848bd7c
--- /dev/null
+++ b/tests/bugs/distribute/bug-862967.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function uid_gid_compare()
+{
+ val=1
+
+ if [ "$1" == "$3" ]
+ then
+ if [ "$2" == "$4" ]
+ then
+ val=0
+ fi
+ fi
+ echo "$val"
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0;
+
+# change dir permissions
+mkdir $M0/dir;
+chown 1:1 $M0/dir;
+
+# Kill a brick process
+
+kill_brick $V0 $H0 $B0/${V0}2
+# change dir ownership
+NEW_UID=36;
+NEW_GID=36;
+chown $NEW_UID:$NEW_GID $M0/dir;
+
+# bring the brick back up
+TEST $CLI volume start $V0 force
+
+sleep 10;
+
+ls -l $M0/dir;
+
+# check if uid/gid is healed on backend brick which was taken down
+BACKEND_UID=`stat -c %u $B0/${V0}2/dir`;
+BACKEND_GID=`stat -c %g $B0/${V0}2/dir`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-882278.t b/tests/bugs/distribute/bug-882278.t
new file mode 100755
index 00000000000..8cb51474720
--- /dev/null
+++ b/tests/bugs/distribute/bug-882278.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup
+
+# Is there a good reason to require --fqdn elsewhere? It's worse than useless
+# here.
+H0=$(hostname -s)
+
+function recreate {
+ # The rm is necessary so we don't get fooled by leftovers from old runs.
+ rm -rf $1 && mkdir -p $1
+}
+
+function count_lines {
+ grep "$1" $2/* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+TEST recreate ${B0}/${V0}-0
+TEST recreate ${B0}/${V0}-1
+TEST $CLI volume create $V0 $H0:$B0/${V0}-{0,1}
+TEST $CLI volume set $V0 cluster.nufa on
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+special_option="--xlator-option ${V0}-dht.local-volume-name=${V0}-client-1"
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $special_option $M0
+
+## Create a bunch of test files.
+for i in $(seq 0 99); do
+ echo hello > $(printf $M0/file%02d $i)
+done
+
+## Make sure the files went to the right place. There might be link files in
+## the other brick, but they won't have any contents.
+EXPECT "0" count_lines hello ${B0}/${V0}-0
+EXPECT "100" count_lines hello ${B0}/${V0}-1
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-884455.t b/tests/bugs/distribute/bug-884455.t
new file mode 100755
index 00000000000..59413cd5408
--- /dev/null
+++ b/tests/bugs/distribute/bug-884455.t
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup;
+
+function layout_compare()
+{
+ res=0
+
+ if [ "$1" == "$2" ]
+ then
+ res=1
+ fi
+ if [ "$1" == "$3" ]
+ then
+ res=1
+ fi
+ if [ "$2" == "$3" ]
+ then
+ res=1
+ fi
+
+ echo $res
+}
+
+function get_layout()
+{
+ layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2`
+ layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2`
+ layout3=`getfattr -n trusted.glusterfs.dht -e hex $3 2>&1|grep dht |cut -d = -f2`
+
+ ret=$(layout_compare $layout1 $layout2 $layout3)
+
+ if [ $ret -ne 0 ]
+ then
+ echo 1
+ else
+ echo 0
+ fi
+
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+## set subvols-per-dir option
+TEST $CLI volume set $V0 subvols-per-directory 2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir{1..10} 2>/dev/null;
+
+## Add-brick n run rebalance to force re-write of layout
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}2
+sleep 5;
+
+## trigger dir self heal on client
+TEST ls -l $M0 2>/dev/null;
+
+TEST $CLI volume rebalance $V0 start force
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
+
+## check for layout overlaps.
+EXPECT "0" get_layout $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+EXPECT "0" get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1 $B0/${V0}2/dir1
+EXPECT "0" get_layout $B0/${V0}0/dir2 $B0/${V0}1/dir2 $B0/${V0}2/dir2
+EXPECT "0" get_layout $B0/${V0}0/dir3 $B0/${V0}1/dir3 $B0/${V0}2/dir3
+EXPECT "0" get_layout $B0/${V0}0/dir4 $B0/${V0}1/dir4 $B0/${V0}2/dir4
+EXPECT "0" get_layout $B0/${V0}0/dir5 $B0/${V0}1/dir5 $B0/${V0}2/dir5
+EXPECT "0" get_layout $B0/${V0}0/dir6 $B0/${V0}1/dir6 $B0/${V0}2/dir6
+EXPECT "0" get_layout $B0/${V0}0/dir7 $B0/${V0}1/dir7 $B0/${V0}2/dir7
+EXPECT "0" get_layout $B0/${V0}0/dir8 $B0/${V0}1/dir8 $B0/${V0}2/dir8
+EXPECT "0" get_layout $B0/${V0}0/dir9 $B0/${V0}1/dir9 $B0/${V0}2/dir9
+EXPECT "0" get_layout $B0/${V0}0/dir10 $B0/${V0}1/dir10 $B0/${V0}2/dir10
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-884597.t b/tests/bugs/distribute/bug-884597.t
new file mode 100755
index 00000000000..d6a2c65f370
--- /dev/null
+++ b/tests/bugs/distribute/bug-884597.t
@@ -0,0 +1,173 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+BRICK_COUNT=3
+function uid_gid_compare()
+{
+ val=1
+
+ if [ "$1" == "$3" ]
+ then
+ if [ "$2" == "$4" ]
+ then
+ val=0
+ fi
+ fi
+ echo "$val"
+}
+
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+i=1
+NEW_UID=36
+NEW_GID=36
+
+TEST touch $M0/$i
+
+chown $NEW_UID:$NEW_GID $M0/$i
+## rename till file gets a linkfile
+
+has_link=0
+while [ $i -lt 100 ]
+do
+ mv $M0/$i $M0/$(( $i+1 ))
+ if [ $? -ne 0 ]
+ then
+ break
+ fi
+ let i++
+ file_has_linkfile $i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+done
+
+TEST [ $has_link -eq 2 ]
+
+get_hashed_brick $i
+cached=$?
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat -c %u $B0/${V0}$cached/$i`;
+BACKEND_GID=`stat -c %g $B0/${V0}$cached/$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+
+# remove linkfile from backend, and trigger a lookup heal. uid/gid should match
+rm -rf $B0/${V0}$cached/$i
+
+# without a unmount, we are not able to trigger a lookup based heal
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+lookup=`ls -l $M0/$i 2>/dev/null`
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat -c %u $B0/${V0}$cached/$i`;
+BACKEND_GID=`stat -c %g $B0/${V0}$cached/$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+# create hardlinks. Make sure a linkfile gets created
+
+i=1
+NEW_UID=36
+NEW_GID=36
+
+TEST touch $M0/file
+chown $NEW_UID:$NEW_GID $M0/file;
+
+## ln till file gets a linkfile
+
+has_link=0
+while [ $i -lt 100 ]
+do
+ ln $M0/file $M0/link$i
+ if [ $? -ne 0 ]
+ then
+ break
+ fi
+ file_has_linkfile link$i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+ let i++
+done
+
+TEST [ $has_link -eq 2 ]
+
+get_hashed_brick link$i
+cached=$?
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat -c %u $B0/${V0}$cached/link$i`;
+BACKEND_GID=`stat -c %g $B0/${V0}$cached/link$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+
+## UID/GID creation as different user
+i=1
+NEW_UID=36
+NEW_GID=36
+
+TEST touch $M0/user_file1
+TEST chown $NEW_UID:$NEW_GID $M0/user_file1;
+
+## Give permission on volume, so that different users can perform rename
+
+TEST chmod 0777 $M0
+
+## Add a user known as ABC and perform renames
+TEST `useradd -M ABC 2>/dev/null`
+
+TEST cd $M0
+## rename as different user till file gets a linkfile
+
+has_link=0
+while [ $i -lt 100 ]
+do
+ su -m ABC -c "mv $M0/user_file$i $M0/user_file$(( $i+1 ))"
+ if [ $? -ne 0 ]
+ then
+ break
+ fi
+ let i++
+ file_has_linkfile user_file$i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+done
+
+TEST [ $has_link -eq 2 ]
+
+## del user ABC
+TEST userdel ABC
+
+get_hashed_brick user_file$i
+cached=$?
+
+# check if uid/gid on linkfile is created with correct uid/gid
+BACKEND_UID=`stat -c %u $B0/${V0}$cached/user_file$i`;
+BACKEND_GID=`stat -c %g $B0/${V0}$cached/user_file$i`;
+
+EXPECT "0" uid_gid_compare $NEW_UID $NEW_GID $BACKEND_UID $BACKEND_GID
+cleanup;
diff --git a/tests/bugs/distribute/bug-907072.t b/tests/bugs/distribute/bug-907072.t
new file mode 100755
index 00000000000..a4d98831380
--- /dev/null
+++ b/tests/bugs/distribute/bug-907072.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3};
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/test;
+
+# Extract the layout sans the commit hash
+OLD_LAYOUT0=`get_layout $B0/${V0}0/test | cut -c11-34`;
+OLD_LAYOUT1=`get_layout $B0/${V0}1/test | cut -c11-34`;
+OLD_LAYOUT2=`get_layout $B0/${V0}2/test | cut -c11-34`;
+OLD_LAYOUT3=`get_layout $B0/${V0}3/test | cut -c11-34`;
+
+TEST killall glusterfsd;
+
+# Delete directory on one brick
+TEST rm -rf $B0/${V}1/test;
+
+# And only layout xattr on another brick
+TEST setfattr -x trusted.glusterfs.dht $B0/${V0}2/test;
+
+TEST $CLI volume start $V0 force;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST stat $M0/test;
+
+# Extract the layout sans the commit hash
+NEW_LAYOUT0=`get_layout $B0/${V0}0/test | cut -c11-34`;
+NEW_LAYOUT1=`get_layout $B0/${V0}1/test | cut -c11-34`;
+NEW_LAYOUT2=`get_layout $B0/${V0}2/test | cut -c11-34`;
+NEW_LAYOUT3=`get_layout $B0/${V0}3/test | cut -c11-34`;
+
+EXPECT $OLD_LAYOUT0 echo $NEW_LAYOUT0;
+EXPECT $OLD_LAYOUT1 echo $NEW_LAYOUT1;
+EXPECT $OLD_LAYOUT2 echo $NEW_LAYOUT2;
+EXPECT $OLD_LAYOUT3 echo $NEW_LAYOUT3;
diff --git a/tests/bugs/distribute/bug-912564.t b/tests/bugs/distribute/bug-912564.t
new file mode 100755
index 00000000000..d437728f83b
--- /dev/null
+++ b/tests/bugs/distribute/bug-912564.t
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+# Test that the rsync and "extra" regexes cause rename-in-place without
+# creating linkfiles, when they're supposed to. Without the regex we'd have a
+# 1/4 chance of each file being assigned to the right place, so with 16 files
+# we have a 1/2^32 chance of getting the correct result by accident.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function count_linkfiles {
+ local i
+ local count=0
+ for i in $(seq $2 $3); do
+ x=$(find $1$i -perm -1000 | wc -l)
+ # Divide by two because of the .glusterfs links.
+ count=$((count+x/2))
+ done
+ echo $count
+}
+
+# This function only exists to get around quoting difficulties in TEST.
+function set_regex {
+ $CLI volume set $1 cluster.extra-hash-regex '^foo(.+)bar$'
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+mkdir -p $H0:$B0/${V0}0
+mkdir -p $H0:$B0/${V0}1
+mkdir -p $H0:$B0/${V0}2
+mkdir -p $H0:$B0/${V0}3
+
+# Create and start a volume.
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+# Mount it.
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# Make sure the rsync regex works, by verifying that no linkfiles are
+# created.
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf .%s.%d $fn $RANDOM)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -eq "0" ]
+
+# Make sure that linkfiles *are* created for normal files.
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf foo%sbar $fn)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -ne "0" ]
+
+# Make sure that setting an extra regex suppresses the linkfiles.
+TEST set_regex $V0
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf foo%sbar $fn)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -eq "0" ]
+
+# Re-test the rsync regex, to make sure the extra one didn't break it.
+rm -f $M0/file*
+for i in $(seq 0 15); do
+ fn=$(printf file%x $i)
+ tmp_fn=$(printf .%s.%d $fn $RANDOM)
+ echo testing > $M0/$tmp_fn
+ mv $M0/$tmp_fn $M0/$fn
+done
+lf=$(count_linkfiles $B0/$V0 0 3)
+TEST [ "$lf" -eq "0" ]
+
+cleanup
diff --git a/tests/bugs/distribute/bug-915554.t b/tests/bugs/distribute/bug-915554.t
new file mode 100755
index 00000000000..1f59008c56f
--- /dev/null
+++ b/tests/bugs/distribute/bug-915554.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+#
+# Bug <915554>
+#
+# This test checks for a condition where a rebalance migrates a file and does
+# not preserve the original file size. This can occur due to hole preservation
+# logic in the file migration code. If a file size is aligned to a disk sector
+# boundary (512b) and the tail portion of the file is zero-filled, the file
+# may end up truncated to the end of the last data region in the file.
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK_COUNT=3
+# create, start and mount a two brick DHT volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 --gid-timeout=-1 -s $H0 --volfile-id $V0 $M0;
+
+i=1
+# Write some data to a file and extend such that the file is sparse to a sector
+# aligned boundary.
+echo test > $M0/$i
+TEST truncate -s 1M $M0/$i
+
+# cache the original size
+SIZE1=`stat -c %s $M0/$i`
+
+# rename till file gets a linkfile
+
+while [ $i -ne 0 ]
+do
+ test=`mv $M0/$i $M0/$(( $i+1 )) 2>/dev/null`
+ if [ $? -ne 0 ]
+ then
+ echo "rename failed"
+ break
+ fi
+ let i++
+ file_has_linkfile $i
+ has_link=$?
+ if [ $has_link -eq 2 ]
+ then
+ break;
+ fi
+done
+
+# start a rebalance (force option to overide checks) to trigger migration of
+# file
+
+TEST $CLI volume rebalance $V0 start force
+
+# check if rebalance has completed for up to 15 secs
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
+
+# validate the file size after the migration
+SIZE2=`stat -c %s $M0/$i`
+
+TEST [ $SIZE1 -eq $SIZE2 ]
+
+TEST rm -f $M0/$i
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-921408.t b/tests/bugs/distribute/bug-921408.t
new file mode 100755
index 00000000000..559114bb85a
--- /dev/null
+++ b/tests/bugs/distribute/bug-921408.t
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+wait_check_status ()
+{
+ n=0
+ while [ $n -lt $1 ]
+ do
+ ret=$(rebalance_completed)
+ if [ $ret == "0" ]
+ then
+ return 0;
+ else
+ sleep 1
+ n=`expr $n + 1`;
+ fi
+ done
+ return 1;
+}
+
+addbr_rebal_till_layout_change()
+{
+ val=1
+ l=$1
+ i=1
+ while [ $i -lt 5 ]
+ do
+ $CLI volume add-brick $V0 $H0:$B0/${V0}$l &>/dev/null
+ $CLI volume rebalance $V0 fix-layout start &>/dev/null
+ wait_check_status $REBALANCE_TIMEOUT
+ if [ $? -eq 1 ]
+ then
+ break
+ fi
+ NEW_LAYOUT=`get_layout $B0/${V0}0 | cut -c11-34`
+ if [ $OLD_LAYOUT == $NEW_LAYOUT ]
+ then
+ i=`expr $i + 1`;
+ l=`expr $l + 1`;
+ else
+ val=0
+ break
+ fi
+ done
+ return $val
+}
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 subvols-per-directory 1
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/test
+TEST touch $M0/test/test
+
+fd=`fd_available`
+TEST fd_open $fd "rw" $M0/test/test
+
+OLD_LAYOUT=`get_layout $B0/${V0}0 | cut -c11-34`
+
+addbr_rebal_till_layout_change 1
+
+TEST [ $? -eq 0 ]
+
+for i in $(seq 1 1000)
+do
+ ls -l $M0/ >/dev/null
+ ret=$?
+ if [ $ret != 0 ]
+ then
+ break
+ fi
+done
+
+TEST [ $ret == 0 ];
+TEST fd_close $fd;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/distribute/bug-924265.t b/tests/bugs/distribute/bug-924265.t
new file mode 100755
index 00000000000..67c21de97cb
--- /dev/null
+++ b/tests/bugs/distribute/bug-924265.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Test that setting cluster.dht-xattr-name works, and that DHT consistently
+# uses the specified name instead of the default.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# We only care about the exit code, so keep it quiet.
+function silent_getfattr {
+ getfattr $* &> /dev/null
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+mkdir -p $H0:$B0/${V0}0
+
+# Create a volume and set the option.
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 cluster.dht-xattr-name trusted.foo.bar
+
+# Start and mount the volume.
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# Create a directory and make sure it has the right xattr.
+mkdir $M0/test
+TEST ! silent_getfattr -n trusted.glusterfs.dht $B0/${V0}0/test
+TEST silent_getfattr -n trusted.foo.bar $B0/${V0}0/test
+
+cleanup
diff --git a/tests/bugs/distribute/bug-961615.t b/tests/bugs/distribute/bug-961615.t
new file mode 100644
index 00000000000..00938e8fa9b
--- /dev/null
+++ b/tests/bugs/distribute/bug-961615.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This test tests that an extra fd_unref does not happen in rebalance
+#migration completion check code path in dht
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST touch $M0/1
+#This rename creates a link file for 10 in the other volume.
+TEST mv $M0/1 $M0/10
+#Lets keep writing to the file which will trigger rebalance completion check
+dd if=/dev/zero of=$M0/10 bs=1k &
+bg_pid=$!
+#Now rebalance force will migrate file '10'
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+#If the bug exists mount would have crashed by now
+TEST ls $M0
+kill -9 $bg_pid > /dev/null 2>&1
+wait > /dev/null 2>&1
+cleanup
diff --git a/tests/bugs/distribute/bug-973073.t b/tests/bugs/distribute/bug-973073.t
new file mode 100755
index 00000000000..cdb785abac0
--- /dev/null
+++ b/tests/bugs/distribute/bug-973073.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../dht.rc
+
+## Steps followed are one described in bugzilla
+
+cleanup;
+
+function get_layout()
+{
+ layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1`
+
+ if [ $? -ne 0 ]
+ then
+ echo 1
+ else
+ echo 0
+ fi
+
+}
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+## remove-brick status == rebalance_status
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" remove_brick_completed
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
+
+TEST mkdir $M0/dir 2>/dev/null;
+
+EXPECT "0" get_layout $B0/${V0}2/dir
+cleanup;
diff --git a/tests/bugs/distribute/issue-1327.t b/tests/bugs/distribute/issue-1327.t
new file mode 100755
index 00000000000..acd8c8c6614
--- /dev/null
+++ b/tests/bugs/distribute/issue-1327.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK1=$B0/${V0}-0
+BRICK2=$B0/${V0}-1
+
+TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mkdir $M0/dir
+
+#remove dir from one of the brick
+TEST rmdir $BRICK2/dir
+
+#safe cache timeout for lookup to be triggered
+sleep 2
+
+TEST ls $M0/dir
+
+TEST stat $BRICK2/dir
+
+cleanup
diff --git a/tests/bugs/distribute/overlap.py b/tests/bugs/distribute/overlap.py
new file mode 100755
index 00000000000..2813979787b
--- /dev/null
+++ b/tests/bugs/distribute/overlap.py
@@ -0,0 +1,59 @@
+
+from __future__ import print_function
+import sys
+
+def calculate_one (ov, nv):
+ old_start = int(ov[18:26], 16)
+ old_end = int(ov[26:34], 16)
+ new_start = int(nv[18:26], 16)
+ new_end = int(nv[26:34], 16)
+ if (new_end < old_start) or (new_start > old_end):
+ #print '%s, %s -> ZERO' % (ov, nv)
+ return 0
+ all_start = max(old_start, new_start)
+ all_end = min(old_end, new_end)
+ #print '%s, %s -> %08x' % (ov, nv, all_end - all_start + 1)
+ return all_end - all_start + 1
+
+def calculate_all (values):
+ total = 0
+ nv_index = len(values) // 2
+ for old_val in values[:nv_index]:
+ new_val = values[nv_index]
+ nv_index += 1
+ total += calculate_one(old_val, new_val)
+ return total
+
+"""
+test1_vals = [
+ '0x0000000000000000000000003fffffff', # first quarter
+ '0x0000000000000000400000007fffffff', # second quarter
+ '0x000000000000000080000000ffffffff', # second half
+ '0x00000000000000000000000055555554', # first third
+ '0x000000000000000055555555aaaaaaa9', # second third
+ '0x0000000000000000aaaaaaaaffffffff', # last third
+]
+
+test2_vals = [
+ '0x0000000000000000000000003fffffff', # first quarter
+ '0x0000000000000000400000007fffffff', # second quarter
+ '0x000000000000000080000000ffffffff', # second half
+ '0x00000000000000000000000055555554', # first third
+ # Next two are (incorrectly) swapped.
+ '0x0000000000000000aaaaaaaaffffffff', # last third
+ '0x000000000000000055555555aaaaaaa9', # second third
+]
+
+print '%08x' % calculate_one(test1_vals[0], test1_vals[3])
+print '%08x' % calculate_one(test1_vals[1], test1_vals[4])
+print '%08x' % calculate_one(test1_vals[2], test1_vals[5])
+print '= %08x' % calculate_all(test1_vals)
+print '%08x' % calculate_one(test2_vals[0], test2_vals[3])
+print '%08x' % calculate_one(test2_vals[1], test2_vals[4])
+print '%08x' % calculate_one(test2_vals[2], test2_vals[5])
+print '= %08x' % calculate_all(test2_vals)
+"""
+
+if __name__ == '__main__':
+ # Return decimal so bash can reason about it.
+ print('%d' % calculate_all(sys.argv[1:]))
diff --git a/tests/bugs/ec/bug-1161621.t b/tests/bugs/ec/bug-1161621.t
new file mode 100644
index 00000000000..84361e440dc
--- /dev/null
+++ b/tests/bugs/ec/bug-1161621.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 5 redundancy 2 $H0:$B0/${V0}{0..4}
+EXPECT "Created" volinfo_field $V0 'Status'
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "5" ec_child_up_count $V0 0
+
+tmpdir=$(mktemp -d -t ${0##*/}.XXXXXX)
+push_trapfunc "rm -rf $tmpdir"
+
+TEST dd if=/dev/urandom of=$tmpdir/file bs=1234 count=20
+cs=$(sha1sum $tmpdir/file | awk '{ print $1 }')
+# Test O_APPEND on create
+TEST dd if=$tmpdir/file of=$M0/file bs=1234 count=10 oflag=append
+# Test O_APPEND on open
+TEST dd if=$tmpdir/file of=$M0/file bs=1234 skip=10 oflag=append conv=notrunc
+EXPECT "$cs" echo $(sha1sum $M0/file | awk '{ print $1 }')
+
+# Fill a file with ff (I don't use 0's because empty holes created by an
+# incorrect offset will be returned as 0's and won't be detected)
+dd if=/dev/zero bs=24680 count=1000 | tr '\0' '\377' >$tmpdir/shared
+cs=$(sha1sum $tmpdir/shared | awk '{ print $1 }')
+# Test concurrent writes to the same file using O_APPEND
+dd if=$tmpdir/shared of=$M0/shared bs=123400 count=100 oflag=append conv=notrunc &
+dd if=$tmpdir/shared of=$M1/shared bs=123400 count=100 oflag=append conv=notrunc &
+wait
+
+EXPECT "24680000" stat -c "%s" $M0/shared
+EXPECT "$cs" echo $(sha1sum $M0/shared | awk '{ print $1 }')
+
+cleanup
diff --git a/tests/bugs/ec/bug-1161886.c b/tests/bugs/ec/bug-1161886.c
new file mode 100644
index 00000000000..1f12650ea6d
--- /dev/null
+++ b/tests/bugs/ec/bug-1161886.c
@@ -0,0 +1,53 @@
+
+#include <stdio.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ int ret = 1;
+
+ if (argc != 4) {
+ fprintf(stderr, "Syntax: %s <host> <volname> <file>\n", argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_set_logging(fs, "/dev/null", 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR | O_TRUNC);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+ glfs_close(fd);
+
+ ret = 0;
+
+out:
+ glfs_fini(fs);
+
+ return ret;
+}
diff --git a/tests/bugs/ec/bug-1161886.t b/tests/bugs/ec/bug-1161886.t
new file mode 100644
index 00000000000..e7322210a3e
--- /dev/null
+++ b/tests/bugs/ec/bug-1161886.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function check_ec_size
+{
+ local res
+
+ for i in {0..2}; do
+ res=$(( `getfattr -n trusted.ec.size -e hex $B0/$V0$i/$1 | sed -n "s/^trusted.ec.size=//p"` ))
+ if [ "x$res" == "x" -o "$res" != "$2" ]; then
+ echo "N"
+ return 0
+ fi
+ done
+ echo "Y"
+ return 0
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 redundancy 1 $H0:$B0/${V0}{0..2}
+EXPECT "Created" volinfo_field $V0 'Status'
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+
+TEST dd if=/dev/zero of=$M0/file1 bs=4k count=1
+TEST mv $M0/file1 $M0/file2
+TEST ! stat $M0/file1
+TEST stat $M0/file2
+
+TEST build_tester $(dirname $0)/bug-1161886.c -lgfapi -Wall -O2
+TEST $(dirname $0)/bug-1161886 $H0 $V0 /file2
+EXPECT "^0$" stat -c "%s" $M0/file2
+EXPECT "^Y$" check_ec_size file2 0
+
+rm -f $(dirname $0)/bug-1161886
+
+cleanup
diff --git a/tests/bugs/ec/bug-1179050.t b/tests/bugs/ec/bug-1179050.t
new file mode 100644
index 00000000000..ea2d7b39838
--- /dev/null
+++ b/tests/bugs/ec/bug-1179050.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 redundancy 1 $H0:$B0/${V0}{0..2}
+EXPECT "Created" volinfo_field $V0 'Status'
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+
+TEST $CLI volume clear-locks $V0 / kind all inode
+
+sleep 1
+
+EXPECT "3" ec_child_up_count $V0 0
+
+cleanup
diff --git a/tests/bugs/ec/bug-1187474.t b/tests/bugs/ec/bug-1187474.t
new file mode 100644
index 00000000000..e6344c26e73
--- /dev/null
+++ b/tests/bugs/ec/bug-1187474.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+function check_dir()
+{
+ local count
+
+ count=`ls $1 | grep "dir.[0-9]*" | wc -l`
+ if [[ $count -eq 100 ]]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
+EXPECT "Created" volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+TEST mount_nfs $H0:/$V0 $N0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST mkdir $M0/dir.{1..100}
+
+sleep 2
+
+EXPECT "Y" check_dir $N0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+cleanup
diff --git a/tests/bugs/ec/bug-1188145.t b/tests/bugs/ec/bug-1188145.t
new file mode 100644
index 00000000000..aa3a59bc62f
--- /dev/null
+++ b/tests/bugs/ec/bug-1188145.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function create_dirs()
+{
+ local stop=$1
+ local idx
+ local res
+
+ res=0
+ idx=1
+ while [[ -f ${stop} ]]; do
+ mkdir $M0/${idx}
+ if [[ $? -ne 0 ]]; then
+ res=1
+ break;
+ fi
+ idx=$(( idx + 1 ))
+ done
+
+ return ${res}
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
+EXPECT "Created" volinfo_field $V0 'Status'
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+name=`mktemp -t ${0##*/}.XXXXXX`
+create_dirs ${name} &
+pid=$!
+
+sleep 2
+TEST $CLI volume set $V0 uss on
+sleep 5
+TEST $CLI volume set $V0 uss off
+sleep 5
+
+TEST rm -f ${name}
+TEST wait ${pid}
+
+cleanup
diff --git a/tests/bugs/ec/bug-1227869.t b/tests/bugs/ec/bug-1227869.t
new file mode 100644
index 00000000000..00fad825fae
--- /dev/null
+++ b/tests/bugs/ec/bug-1227869.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 $H0:$B0/${V0}{1..3}
+EXPECT "Created" volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
+
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume quota $V0 limit-usage / 100MB
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST $QDD $M0/file 256 40
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "10.0MB" quotausage "/"
+
+EXPECT "0" echo $(df -k $M0 | grep -q '10240 '; echo $?)
+EXPECT "0" echo $(df -k $M0 | grep -q '92160 '; echo $?)
+
+rm -f $QDD
+cleanup
diff --git a/tests/bugs/ec/bug-1236065.t b/tests/bugs/ec/bug-1236065.t
new file mode 100644
index 00000000000..9181e73ec19
--- /dev/null
+++ b/tests/bugs/ec/bug-1236065.t
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+SCRIPT_TIMEOUT=400
+
+cleanup
+
+ec_test_dir=$M0/test
+
+function ec_test_generate_src()
+{
+ mkdir -p $ec_test_dir
+ for i in `seq 0 19`; do
+ dd if=/dev/zero of=$ec_test_dir/$i.c bs=1024 count=2
+ done
+}
+
+function ec_test_make()
+{
+ for i in `ls *.c`; do
+ file=`basename $i`
+ filename=${file%.*}
+ cp $i $filename.o
+ done
+}
+
+## step 1
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 7 redundancy 3 $H0:$B0/${V0}{0..6}
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "7" ec_child_up_count $V0 0
+
+## step 2
+TEST ec_test_generate_src
+
+cd $ec_test_dir
+TEST ec_test_make
+
+## step 3
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT '5' online_brick_count
+
+TEST rm -f *.o
+TEST ec_test_make
+
+## step 4
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "7" online_brick_count
+
+# active heal
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+TEST $CLI volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST rm -f *.o
+TEST ec_test_make
+
+## step 5
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT '5' online_brick_count
+
+TEST rm -f *.o
+TEST ec_test_make
+
+EXPECT '5' online_brick_count
+
+## step 6
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "7" online_brick_count
+
+# self-healing
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+TEST $CLI volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST rm -f *.o
+TEST ec_test_make
+
+TEST pidof glusterd
+EXPECT "$V0" volinfo_field $V0 'Volume Name'
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT '7' online_brick_count
+## cleanup
+cd
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $B0/*
+
+cleanup;
diff --git a/tests/bugs/ec/bug-1251446.t b/tests/bugs/ec/bug-1251446.t
new file mode 100644
index 00000000000..5779c2e973b
--- /dev/null
+++ b/tests/bugs/ec/bug-1251446.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 4 redundancy 1 $H0:$B0/${V0}{0..3}
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+
+TEST dd if=/dev/urandom of=$M0/test1 bs=1024k count=2
+cs=$(sha1sum $M0/test1 | awk '{ print $1 }')
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT '3' online_brick_count
+
+TEST cp $M0/test1 $M0/test2
+EXPECT "$cs" echo $(sha1sum $M0/test2 | awk '{ print $1 }')
+
+TEST $CLI volume start $V0 force
+EXPECT '4' online_brick_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+TEST $CLI volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "699392" stat -c "%s" $B0/${V0}0/test2
+
+# force cache clear
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT '3' online_brick_count
+
+EXPECT "$cs" echo $(sha1sum $M0/test2 | awk '{ print $1 }')
+
+## cleanup
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/ec/bug-1304988.t b/tests/bugs/ec/bug-1304988.t
new file mode 100755
index 00000000000..334e5c25932
--- /dev/null
+++ b/tests/bugs/ec/bug-1304988.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test renames files from dir to test and vice versa in an infinite loop
+# at the same time add-brick and rebalance starts which should NOT be hanged
+
+cleanup;
+
+function rename_files {
+while :
+do
+ for i in {1..100}; do mv $M0/dir/file-$i $M0/test/newfile-$i; done
+ for i in {1..100}; do mv $M0/test/newfile-$i $M0/dir/file-$i; done
+done
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+mkdir $M0/dir
+mkdir $M0/test
+touch $M0/dir/file-{1..100}
+rename_files &
+back_pid1=$!;
+echo "Started rename $back_pid1"
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..11}
+TEST $CLI volume rebalance $V0 start force
+
+#Test if rebalance completed with success.
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+echo "rebalance done..."
+kill $back_pid1
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1332022
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1332022
diff --git a/tests/bugs/ec/bug-1547662.t b/tests/bugs/ec/bug-1547662.t
new file mode 100644
index 00000000000..5748218587e
--- /dev/null
+++ b/tests/bugs/ec/bug-1547662.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# Immediately after replace-brick, trusted.ec.version will be absent, so if it
+# is present we can assume that heal was started on root
+function root_heal_attempted {
+ if [ -z $(get_hex_xattr trusted.ec.version $1) ]; then
+ echo "N"
+ else
+ echo "Y"
+ fi
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST ${CLI} volume create ${V0} disperse 6 redundancy 2 ${H0}:${B0}/${V0}{0..5}
+TEST ${CLI} volume start ${V0}
+TEST ${GFS} --volfile-server ${H0} --volfile-id ${V0} ${M0}
+EXPECT_WITHIN ${CHILD_UP_TIMEOUT} "6" ec_child_up_count ${V0} 0
+
+TEST mkdir ${M0}/base
+TEST mkdir ${M0}/base/dir.{1,2}
+TEST mkdir ${M0}/base/dir.{1,2}/dir.{1,2}
+TEST mkdir ${M0}/base/dir.{1,2}/dir.{1,2}/dir.{1,2}
+TEST mkdir ${M0}/base/dir.{1,2}/dir.{1,2}/dir.{1,2}/dir.{1,2}
+TEST mkdir ${M0}/base/dir.{1,2}/dir.{1,2}/dir.{1,2}/dir.{1,2}/dir.{1,2}
+TEST mkdir ${M0}/base/dir.{1,2}/dir.{1,2}/dir.{1,2}/dir.{1,2}/dir.{1,2}/dir.{1,2}
+
+TEST ${CLI} volume replace-brick ${V0} ${H0}:${B0}/${V0}5 ${H0}:${B0}/${V0}6 commit force
+EXPECT_WITHIN ${CHILD_UP_TIMEOUT} "6" ec_child_up_count ${V0} 0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "Y" glustershd_up_status
+EXPECT_WITHIN ${CHILD_UP_TIMEOUT} "6" ec_child_up_count_shd ${V0} 0
+EXPECT_WITHIN ${HEAL_TIMEOUT} "Y" root_heal_attempted ${B0}/${V0}6
+EXPECT_WITHIN ${HEAL_TIMEOUT} "^0$" get_pending_heal_count ${V0}
+EXPECT "^127$" echo $(find ${B0}/${V0}6/base -type d | wc -l)
+
+cleanup;
diff --git a/tests/bugs/ec/bug-1699866-check-reopen-fd.t b/tests/bugs/ec/bug-1699866-check-reopen-fd.t
new file mode 100644
index 00000000000..4386d010318
--- /dev/null
+++ b/tests/bugs/ec/bug-1699866-check-reopen-fd.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 open-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST mkdir -p $M0/dir
+
+fd="$(fd_available)"
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "5" ec_child_up_count $V0 0
+
+TEST fd_open ${fd} rw $M0/dir/test
+TEST fd_write ${fd} "test1"
+TEST $CLI volume replace-brick ${V0} $H0:$B0/${V0}0 $H0:$B0/${V0}0_1 commit force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+TEST fd_write ${fd} "test2"
+TEST fd_close ${fd}
+
+cleanup
diff --git a/tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t b/tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t
new file mode 100644
index 00000000000..67fdb184b46
--- /dev/null
+++ b/tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function do_ls() {
+ local dir="${1}"
+ local i
+
+ for i in {1..50}; do
+ ls -l $M0/${dir} >/dev/null &
+ ls -l $M1/${dir} >/dev/null &
+ ls -l $M2/${dir} >/dev/null &
+ ls -l $M3/${dir} >/dev/null &
+ done
+ wait
+}
+
+function measure_time() {
+ {
+ LC_ALL=C
+ time -p "${@}"
+ } 2>&1 | awk '/^real/ { print $2 * 1000 }'
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+
+TEST $CLI volume set $V0 disperse.eager-lock on
+TEST $CLI volume set $V0 disperse.other-eager-lock on
+TEST $CLI volume set $V0 features.locks-notify-contention on
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 10
+TEST $CLI volume set $V0 disperse.other-eager-lock-timeout 10
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M3
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file.{1..10}
+
+# Run multiple 'ls' concurrently from multiple clients so that they collide and
+# cause partial locks.
+TEST [[ $(measure_time do_ls dir) -lt 10000 ]]
+
+cleanup
diff --git a/tests/bugs/error-gen/bug-767095.t b/tests/bugs/error-gen/bug-767095.t
new file mode 100755
index 00000000000..6cc254f559d
--- /dev/null
+++ b/tests/bugs/error-gen/bug-767095.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+dump_dir='/tmp/gerrit_glusterfs'
+TEST mkdir -p $dump_dir;
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 error-gen posix;
+TEST $CLI volume set $V0 server.statedump-path $dump_dir;
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST PID=`gluster --xml volume status patchy | grep -A 8 patchy1 | grep '<pid>' | cut -d '>' -f 2 | cut -d '<' -f 1`
+TEST kill -USR1 $PID;
+sleep 2;
+for file_name in $(ls $dump_dir)
+do
+ TEST grep "error-gen.priv" $dump_dir/$file_name;
+done
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+TEST rm -rf $dump_dir;
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-1030208.t b/tests/bugs/fuse/bug-1030208.t
new file mode 100644
index 00000000000..526283cf101
--- /dev/null
+++ b/tests/bugs/fuse/bug-1030208.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+#Test case: Hardlink test
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+#Create a file and perform fop on a DIR
+TEST touch $M0/foo
+TEST ls $M0/
+
+#Create hardlink
+TEST ln $M0/foo $M0/bar
+
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-1126048.c b/tests/bugs/fuse/bug-1126048.c
new file mode 100644
index 00000000000..19165ecf6f7
--- /dev/null
+++ b/tests/bugs/fuse/bug-1126048.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+
+/*
+ * This function opens a file and to trigger migration failure, unlinks the
+ * file and performs graph switch (cmd passed in argv). If everything goes fine,
+ * fsync should fail without crashing the mount process.
+ */
+int
+main(int argc, char **argv)
+{
+ int ret = 0;
+ int fd = 0;
+ char *cmd = argv[1];
+ struct stat stbuf = {
+ 0,
+ };
+
+ printf("cmd is: %s\n", cmd);
+ fd = open("a.txt", O_CREAT | O_RDWR, 0644);
+ if (fd < 0)
+ printf("open failed: %s\n", strerror(errno));
+
+ ret = unlink("a.txt");
+ if (ret < 0)
+ printf("unlink failed: %s\n", strerror(errno));
+ if (write(fd, "abc", 3) < 0)
+ printf("Not able to print %s\n", strerror(errno));
+ system(cmd);
+ sleep(1); /* No way to confirm graph switch so sleep 1 */
+ ret = fstat(fd, &stbuf);
+ if (ret < 0)
+ printf("fstat failed %\n", strerror(errno));
+ ret = fsync(fd);
+ if (ret < 0)
+ printf("Not able to fsync %s\n", strerror(errno));
+ return 0;
+}
diff --git a/tests/bugs/fuse/bug-1126048.t b/tests/bugs/fuse/bug-1126048.t
new file mode 100755
index 00000000000..5e2ed293cd3
--- /dev/null
+++ b/tests/bugs/fuse/bug-1126048.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+function grep_for_ebadf {
+ $M0/bug-1126048 "gluster --mode=script --wignore volume add-brick $V0 $H0:$B0/brick2" | grep -i "Bad file descriptor"
+}
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes
+
+build_tester $(dirname $0)/bug-1126048.c
+
+TEST cp $(dirname $0)/bug-1126048 $M0
+cd $M0
+TEST grep_for_ebadf
+TEST ls -l $M0
+cd -
+TEST rm -f $(dirname $0)/bug-1126048
+cleanup;
diff --git a/tests/bugs/fuse/bug-1283103.t b/tests/bugs/fuse/bug-1283103.t
new file mode 100644
index 00000000000..56612534cb9
--- /dev/null
+++ b/tests/bugs/fuse/bug-1283103.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+#
+# https://bugzilla.redhat.com/show_bug.cgi?id=1283103
+#
+# Test that it is possible to set and get security.*
+# xattrs other thatn security.selinux irrespective of
+# whether the mount was done with --selinux. This is
+# for example important for Samba to be able to store
+# the Windows-level acls in the security.NTACL xattr
+# when the acl_xattr vfs module is used.
+#
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE without selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TESTFILE="$M0/testfile"
+TEST touch ${TESTFILE}
+
+TEST echo "setfattr -n security.foobar -v value ${TESTFILE}"
+TEST setfattr -n security.foobar -v value ${TESTFILE}
+TEST getfattr -n security.foobar ${TESTFILE}
+TEST setfattr -x security.foobar ${TESTFILE}
+
+# can not currently test the security.selinux xattrs
+# since the kernel intercepts them.
+# see https://bugzilla.redhat.com/show_bug.cgi?id=1272868
+#TEST ! getfattr -n security.selinux ${TESTFILE}
+#TEST ! setfattr -n security.selinux -v value ${TESTFILE}
+
+TEST umount $M0
+
+# Mount FUSE with selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 --selinux $M0
+
+TEST setfattr -n security.foobar -v value ${TESTFILE}
+TEST getfattr -n security.foobar ${TESTFILE}
+TEST setfattr -x security.foobar ${TESTFILE}
+
+# can not currently test the security.selinux xattrs
+# since the kernel intercepts them.
+# see https://bugzilla.redhat.com/show_bug.cgi?id=1272868
+#TEST setfattr -n security.selinux -v value ${TESTFILE}
+#TEST getfattr -n security.selinux ${TESTFILE}
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-1309462.t b/tests/bugs/fuse/bug-1309462.t
new file mode 100644
index 00000000000..975d72d82ed
--- /dev/null
+++ b/tests/bugs/fuse/bug-1309462.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+# https://bugzilla.redhat.com/show_bug.cgi?id=1309462
+# Test the new fuse mount option --capability.
+# Set/get xattr on security.capability should be sent
+# down from fuse, only if --selinux or --capability option
+# is used for mounting.
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE without selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TESTFILE="$M0/testfile"
+TEST touch ${TESTFILE}
+
+TEST ! setfattr -n security.capability -v value ${TESTFILE}
+TEST ! getfattr -n security.capability ${TESTFILE}
+
+TEST umount $M0
+
+# Mount FUSE with selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 --selinux $M0
+
+TEST setfattr -n security.capability -v value ${TESTFILE}
+TEST getfattr -n security.capability ${TESTFILE}
+TEST setfattr -x security.capability ${TESTFILE}
+
+TEST umount $M0
+
+# Mount FUSE with capability:
+TEST glusterfs -s $H0 --volfile-id $V0 --capability $M0
+
+TEST setfattr -n security.capability -v value ${TESTFILE}
+TEST getfattr -n security.capability ${TESTFILE}
+TEST setfattr -x security.capability ${TESTFILE}
+
+TEST umount $M0
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1581735
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1581735 \ No newline at end of file
diff --git a/tests/bugs/fuse/bug-1336818.t b/tests/bugs/fuse/bug-1336818.t
new file mode 100644
index 00000000000..53286521742
--- /dev/null
+++ b/tests/bugs/fuse/bug-1336818.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+#Test case: OOM score adjust
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# Prepare
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# Basic check
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST umount $M0
+
+# Check valid value (< 0)
+TEST glusterfs --oom-score-adj=-1000 -s $H0 --volfile-id $V0 $M0
+TEST umount $M0
+
+# Check valid value (> 0)
+TEST glusterfs --oom-score-adj=1000 -s $H0 --volfile-id $V0 $M0
+TEST umount $M0
+
+# Check valid value (= 0)
+TEST glusterfs --oom-score-adj=0 -s $H0 --volfile-id $V0 $M0
+TEST umount $M0
+
+# Check invalid value (no value given)
+TEST ! glusterfs --oom-score-adj -s $H0 --volfile-id $V0 $M0
+
+# Check invalid value (< OOM_SCORE_ADJ_MIN)
+TEST ! glusterfs --oom-score-adj=-1001 -s $H0 --volfile-id $V0 $M0
+
+# Check invalid value (> OOM_SCORE_ADJ_MAX)
+TEST ! glusterfs --oom-score-adj=1001 -s $H0 --volfile-id $V0 $M0
+
+# Check invalid value (float)
+TEST ! glusterfs --oom-score-adj=12.34 -s $H0 --volfile-id $V0 $M0
+
+# Check invalid value (non-integer string)
+TEST ! glusterfs --oom-score-adj=qwerty -s $H0 --volfile-id $V0 $M0
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-858215.t b/tests/bugs/fuse/bug-858215.t
new file mode 100755
index 00000000000..95999f6ad24
--- /dev/null
+++ b/tests/bugs/fuse/bug-858215.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+TEST $CLI volume set $V0 nfs.disable off
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --event-history=on -s $H0 --volfile-id $V0 $M0;
+
+## Test for checking whether the fops have been saved in the event-history
+TEST ! stat $M0/newfile;
+TEST touch $M0/newfile;
+TEST stat $M0/newfile;
+TEST rm $M0/newfile;
+
+nfs_pid=$(cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid || echo -1);
+glustershd_pid=`ps auxwww | grep glustershd | grep -v grep | awk -F " " '{print $2}'`
+TEST [ $glustershd_pid != 0 ];
+pids=$(pidof glusterfs);
+for i in $pids
+do
+ if [ $i -ne $nfs_pid ] && [ $i -ne $glustershd_pid ]; then
+ mount_pid=$i;
+ break;
+ fi
+done
+
+dump_dir='/tmp/gerrit_glusterfs'
+cat >$statedumpdir/glusterdump.options <<EOF
+all=yes
+path=$dump_dir
+EOF
+
+TEST mkdir -p $dump_dir;
+TEST kill -USR1 $mount_pid;
+sleep 2;
+for file_name in $(ls $dump_dir)
+do
+ TEST grep -q "xlator.mount.fuse.history" $dump_dir/$file_name;
+done
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+TEST rm -rf $dump_dir;
+TEST rm $statedumpdir/glusterdump.options;
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-858488-min-free-disk.t b/tests/bugs/fuse/bug-858488-min-free-disk.t
new file mode 100644
index 00000000000..635dc04d1e6
--- /dev/null
+++ b/tests/bugs/fuse/bug-858488-min-free-disk.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create partitions for bricks
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 200M $B0/brick2
+TEST LO1=`SETUP_LOOP $B0/brick1`
+TEST MKFS_LOOP $LO1
+TEST LO2=`SETUP_LOOP $B0/brick2`
+TEST MKFS_LOOP $LO2
+TEST mkdir -p $B0/${V0}1 $B0/${V0}2
+TEST MOUNT_LOOP $LO1 $B0/${V0}1
+TEST MOUNT_LOOP $LO2 $B0/${V0}2
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 --acl $M0
+## Real test starts here
+## ----------------------------------------------------------------------------
+
+MINFREEDISKVALUE=90%
+
+## Set min free disk to MINFREEDISKVALUE percent
+TEST $CLI volume set $V0 cluster.min-free-disk $MINFREEDISKVALUE
+
+## We need to have file name to brick map based on hash.
+## We will use this info in test case 0.
+i=1
+CONTINUE=2
+BRICK1FILE=0
+BRICK2FILE=0
+while [[ $CONTINUE -ne 0 ]]
+do
+ dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024 1>/dev/null 2>&1
+
+ if [[ -e $B0/${V0}1/file$i.data && $BRICK1FILE = "0" ]]
+ then
+ BRICK1FILE=file$i.data
+ CONTINUE=$(( $CONTINUE - 1 ))
+ fi
+
+ if [[ -e $B0/${V0}2/file$i.data && $BRICK2FILE = "0" ]]
+ then
+ BRICK2FILE=file$i.data
+ CONTINUE=$(( $CONTINUE - 1 ))
+ fi
+
+ rm $M0/file$i.data
+ let i++
+done
+
+
+## Bring free space on one of the bricks to less than minfree value by
+## creating one big file.
+dd if=/dev/zero of=$M0/fillonebrick.data bs=1024 count=25600 1>/dev/null 2>&1
+
+#Lets find out where it was created
+if [ -f $B0/${V0}1/fillonebrick.data ]
+then
+ FILETOCREATE=$BRICK1FILE
+ OTHERBRICK=$B0/${V0}2
+else
+ FILETOCREATE=$BRICK2FILE
+ OTHERBRICK=$B0/${V0}1
+fi
+
+##--------------------------------TEST CASE 0-----------------------------------
+## If we try to create a file which should go into full brick as per hash, it
+## should go into the other brick instead.
+
+## Before that let us create files just to make gluster refresh the stat
+## Using touch so it should not change the disk usage stats
+for k in {1..20};
+do
+ touch $M0/dummyfile$k
+done
+
+dd if=/dev/zero of=$M0/$FILETOCREATE bs=1024 count=2048 1>/dev/null 2>&1
+TEST [ -e $OTHERBRICK/$FILETOCREATE ]
+## Done testing, lets clean up
+TEST rm -rf $M0/*
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+$CLI volume delete $V0;
+
+UMOUNT_LOOP ${B0}/${V0}{1,2}
+rm -f ${B0}/brick{1,2}
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-924726.t b/tests/bugs/fuse/bug-924726.t
new file mode 100755
index 00000000000..2d3c7680798
--- /dev/null
+++ b/tests/bugs/fuse/bug-924726.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+TESTS_EXPECTED_IN_LOOP=10
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function get_socket_count() {
+ netstat -nap | grep $1 | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST ls $M0
+
+GLFS_MNT_PID=`ps ax | grep "glusterfs -s $H0 \-\-volfile\-id $V0 $M0" | sed -e "s/^ *\([0-9]*\).*/\1/g"`
+
+SOCKETS_BEFORE_SWITCH=`netstat -nap | grep $GLFS_MNT_PID | grep ESTABLISHED | wc -l`
+
+for i in $(seq 1 5); do
+ TEST_IN_LOOP $CLI volume set $V0 performance.write-behind off;
+ sleep 1;
+ TEST_IN_LOOP $CLI volume set $V0 performance.write-behind on;
+ sleep 1;
+done
+
+SOCKETS_AFTER_SWITCH=`netstat -nap | grep $GLFS_MNT_PID | grep ESTABLISHED | wc -l`
+
+# currently active graph is not cleaned up till some operation on
+# mount-point. Hence there is one extra graph.
+TEST [ $SOCKETS_AFTER_SWITCH = `expr $SOCKETS_BEFORE_SWITCH + 1` ]
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-963678.t b/tests/bugs/fuse/bug-963678.t
new file mode 100644
index 00000000000..006181f26e1
--- /dev/null
+++ b/tests/bugs/fuse/bug-963678.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+#
+# Bug 963678 - Test discard functionality
+#
+# Test that basic discard (hole punch) functionality works via the fallocate
+# command line tool. Hole punch deallocates a region of a file, creating a hole
+# and a zero-filled data region. We verify that hole punch works, frees blocks
+# and that subsequent reads do not read stale data (caches are invalidated).
+#
+# NOTE: fuse fallocate is known to be broken with regard to cache invalidation
+# up to 3.9.0 kernels. Therefore, FOPEN_KEEP_CACHE is not used in this
+# test (opens will invalidate the fuse cache).
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../fallocate.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# check for fallocate and hole punch support
+require_fallocate -l 1m $M0/file
+require_fallocate -p -l 512k $M0/file && rm -f $M0/file
+
+# allocate some blocks, punch a hole and verify block allocation
+TEST fallocate -l 1m $M0/file
+blksz=`stat -c %B $M0/file`
+nblks=`stat -c %b $M0/file`
+TEST [ $(($blksz * $nblks)) -ge 1048576 ]
+TEST fallocate -p -o 512k -l 128k $M0/file
+
+nblks=`stat -c %b $M0/file`
+# allow some room for xattr blocks
+TEST [ $(($blksz * $nblks)) -lt $((917504 + 16384)) ]
+TEST unlink $M0/file
+
+# write some data, punch a hole and verify the file content changes
+TEST dd if=/dev/urandom of=$M0/file bs=1024k count=1
+TEST cp $M0/file $M0/file.copy.pre
+TEST fallocate -p -o 512k -l 128k $M0/file
+TEST cp $M0/file $M0/file.copy.post
+TEST ! cmp $M0/file.copy.pre $M0/file.copy.post
+TEST unlink $M0/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/fuse/bug-983477.t b/tests/bugs/fuse/bug-983477.t
new file mode 100755
index 00000000000..41ddd9e55a9
--- /dev/null
+++ b/tests/bugs/fuse/bug-983477.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This script checks if use-readdirp option works as accepted in mount options
+
+function get_use_readdirp_value {
+ local vol=$1
+ local statedump=$(generate_mount_statedump $vol)
+ sleep 1
+ local val=$(grep "use_readdirp=" $statedump | cut -f2 -d'=' | tail -1)
+ rm -f $statedump
+ echo $val
+}
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0
+#If readdirp is enabled statedump should reflect it
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=yes
+TEST cd $M0
+EXPECT "1" get_use_readdirp_value $V0
+TEST cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#If readdirp is enabled statedump should reflect it
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=no
+TEST cd $M0
+EXPECT "0" get_use_readdirp_value $V0
+TEST cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Since args are optional on this argument just specifying "--use-readdirp" should also turn it `on` not `off`
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp
+TEST cd $M0
+EXPECT "1" get_use_readdirp_value $V0
+TEST cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#By default it is enabled.
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST cd $M0
+EXPECT "1" get_use_readdirp_value $V0
+TEST cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Invalid values for use-readdirp should not be accepted
+TEST ! glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=please-fail
+
+cleanup
diff --git a/tests/bugs/fuse/bug-985074.t b/tests/bugs/fuse/bug-985074.t
new file mode 100644
index 00000000000..ffa6df54144
--- /dev/null
+++ b/tests/bugs/fuse/bug-985074.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+#
+# Bug 985074 - Verify stale inode/dentry mappings are cleaned out.
+#
+# This test verifies that an inode/dentry mapping for a file removed via a
+# separate mount point is cleaned up appropriately. We create a file and hard
+# link from client 1. Next we remove the link via client 2. Finally, from client
+# 1 we attempt to rename the original filename to the name of the just removed
+# hard link.
+#
+# If the inode is not unlinked properly, the removed directory entry can resolve
+# to an inode (on the client that never saw the rm) that ends up passed down
+# through the lookup call. If md-cache holds valid metadata on the inode (due to
+# a large timeout value or recent lookup on the valid name), it is tricked into
+# returning a successful lookup that should have returned ENOENT. This manifests
+# as an error from the mv command in the following test sequence because file
+# and file.link resolve to the same file:
+#
+# # mv /mnt/glusterfs/0/file /mnt/glusterfs/0/file.link
+# mv: `/mnt/glusterfs/0/file' and `/mnt/glusterfs/0/file.link' are the same file
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1 --entry-timeout=0 --attribute-timeout=0
+
+TEST touch $M0/file
+TEST ln $M0/file $M0/file.link
+TEST ls -ali $M0 $M1
+TEST rm -f $M1/file.link
+TEST ls -ali $M0 $M1
+
+TEST mv $M0/file $M0/file.link
+TEST stat $M0/file.link
+TEST ! stat $M0/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/fuse/many-groups-for-acl.t b/tests/bugs/fuse/many-groups-for-acl.t
new file mode 100755
index 00000000000..a51b1bc7267
--- /dev/null
+++ b/tests/bugs/fuse/many-groups-for-acl.t
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup
+
+# prepare the users and groups
+NEW_USER=bug1246275
+NEW_UID=1246275
+NEW_GID=1246275
+LAST_GID=1246403
+NEW_GIDS=${NEW_GID}
+
+# OS-specific overrides
+case $OSTYPE in
+NetBSD|Darwin)
+ # no ACLs, and only NGROUPS_MAX=16 secondary groups are supported
+ SKIP_TESTS
+ exit 0
+ ;;
+FreeBSD)
+ # NGROUPS_MAX=1023 (FreeBSD>=8.0), we can afford 200 groups
+ ;;
+Linux)
+ # NGROUPS_MAX=65536, we can afford 200 groups
+ ;;
+*)
+ ;;
+esac
+
+# create a user that belongs to many groups
+for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID})
+do
+ groupadd -o -g ${GID} ${NEW_USER}-${GID}
+ NEW_GIDS="${NEW_GIDS},${NEW_USER}-${GID}"
+done
+TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER}
+
+# Linux < 3.8 exports only first 32 gids of pid to userspace
+kernel_exports_few_gids=0
+if [ "$OSTYPE" = Linux ] && \
+ su -m ${NEW_USER} -c "grep ^Groups: /proc/self/status | wc -w | xargs -I@ expr @ - 1 '<' $LAST_GID - $NEW_GID + 1" > /dev/null; then
+ kernel_exports_few_gids=1
+fi
+
+# preparation done, start the tests
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create ${V0} ${H0}:${B0}/${V0}1
+TEST $CLI volume set $V0 nfs.disable off
+# disable manage-gids on the server-side for now, gets enabled later
+TEST $CLI volume set ${V0} server.manage-gids off
+TEST $CLI volume start ${V0}
+
+# This is just a synchronization hack to make sure the bricks are
+# up before going on.
+EXPECT_WITHIN ${NFS_EXPORT_TIMEOUT} "1" is_nfs_export_available
+
+# mount the volume with POSIX ACL support, without --resolve-gids
+TEST glusterfs --acl --volfile-id=/${V0} --volfile-server=${H0} ${M0}
+
+# create some directories for testing
+TEST mkdir ${M0}/first-32-gids-1
+TEST setfacl -m g:${NEW_UID}:rwx ${M0}/first-32-gids-1
+TEST mkdir ${M0}/first-32-gids-2
+TEST setfacl -m g:$[NEW_UID+16]:rwx ${M0}/first-32-gids-2
+TEST mkdir ${M0}/gid-64
+TEST setfacl -m g:$[NEW_UID+64]:rwx ${M0}/gid-64
+TEST mkdir ${M0}/gid-120
+TEST setfacl -m g:$[NEW_UID+120]:rwx ${M0}/gid-120
+
+su -m ${NEW_USER} -c "touch ${M0}/first-32-gids-1/success > /dev/null"
+TEST [ $? -eq 0 ]
+
+su -m ${NEW_USER} -c "touch ${M0}/first-32-gids-2/success > /dev/null"
+TEST [ $? -eq 0 ]
+
+su -m ${NEW_USER} -c "touch ${M0}/gid-64/success--if-all-gids-exported > /dev/null"
+TEST [ $? -eq $kernel_exports_few_gids ]
+
+su -m ${NEW_USER} -c "touch ${M0}/gid-120/failure > /dev/null"
+TEST [ $? -ne 0 ]
+
+# unmount and remount with --resolve-gids
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "Y" force_umount ${M0}
+TEST glusterfs --acl --resolve-gids --volfile-id=/${V0} --volfile-server=${H0} ${M0}
+
+su -m ${NEW_USER} -c "touch ${M0}/gid-64/success > /dev/null"
+TEST [ $? -eq 0 ]
+
+su -m ${NEW_USER} -c "touch ${M0}/gid-120/failure > /dev/null"
+TEST [ $? -ne 0 ]
+
+# enable server-side resolving of the groups
+# stopping and starting is not really needed, but it prevents races
+TEST $CLI volume stop ${V0}
+TEST $CLI volume set ${V0} server.manage-gids on
+TEST $CLI volume start ${V0}
+EXPECT_WITHIN ${NFS_EXPORT_TIMEOUT} "1" is_nfs_export_available
+
+# unmount and remount to prevent more race conditions on test systems
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "Y" force_umount ${M0}
+TEST glusterfs --acl --resolve-gids --volfile-id=/${V0} --volfile-server=${H0} ${M0}
+
+su -m ${NEW_USER} -c "touch ${M0}/gid-120/success > /dev/null"
+TEST [ $? -eq 0 ]
+
+# cleanup
+userdel --force ${NEW_USER}
+for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID})
+do
+ groupdel ${NEW_USER}-${GID}
+done
+
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "Y" force_umount ${M0}
+
+TEST ${CLI} volume stop ${V0}
+TEST ${CLI} volume delete ${V0}
+
+cleanup
diff --git a/tests/bugs/fuse/setup.sh b/tests/bugs/fuse/setup.sh
new file mode 100755
index 00000000000..d44d8dba027
--- /dev/null
+++ b/tests/bugs/fuse/setup.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+#. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE without selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 $@ $M0
+
+echo "Gluster started and volume '$V0' mounted under '$M0'"
diff --git a/tests/bugs/fuse/teardown.sh b/tests/bugs/fuse/teardown.sh
new file mode 100755
index 00000000000..5d57974e1f9
--- /dev/null
+++ b/tests/bugs/fuse/teardown.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
diff --git a/tests/bugs/geo-replication/bug-1111490.t b/tests/bugs/geo-replication/bug-1111490.t
new file mode 100644
index 00000000000..9686fb5a0ef
--- /dev/null
+++ b/tests/bugs/geo-replication/bug-1111490.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume start $V0
+
+# mount with auxiliary gfid mount
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount
+
+# create file with specific gfid
+uuid=`uuidgen`
+TEST chown 10:10 $M0
+EXPECT "File creation OK" $PYTHON $(dirname $0)/../../utils/gfid-access.py \
+ $M0 ROOT file0 $uuid file 10 10 0644
+
+# check gfid
+EXPECT "$uuid" getfattr --only-values -n glusterfs.gfid.string $M0/file0
+
+# unmount and mount again so as to start with a fresh inode table
+# or use another mount...
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0 --aux-gfid-mount
+
+# touch the file again (gfid-access.py handles errno)
+EXPECT "File creation OK" $PYTHON $(dirname $0)/../../utils/gfid-access.py \
+ $M0 ROOT file0 $uuid file 10 10 0644
+
+cleanup;
diff --git a/tests/bugs/geo-replication/bug-1296496.t b/tests/bugs/geo-replication/bug-1296496.t
new file mode 100644
index 00000000000..a157be7849a
--- /dev/null
+++ b/tests/bugs/geo-replication/bug-1296496.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a replicated volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+TEST $CLI volume set $V0 indexing on
+
+TEST $CLI volume start $V0;
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Mount client-pid=-1
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --client-pid=-1 $M1
+
+TEST touch $M0
+
+vol_uuid=$(gluster vol info $V0 | grep "Volume ID" | awk '{print $3}')
+
+xtime="trusted.glusterfs.$vol_uuid.xtime"
+
+#TEST xtime
+TEST ! getfattr -n $xtime $M0
+TEST getfattr -n $xtime $B0/${V0}-0
+TEST getfattr -n $xtime $B0/${V0}-1
+
+#TEST stime
+slave_uuid=$(uuidgen)
+stime="trusted.glusterfs.$vol_uuid.$slave_uuid.stime"
+TEST setfattr -n $stime -v "0xFFFE" $B0/${V0}-0
+TEST setfattr -n $stime -v "0xFFFF" $B0/${V0}-1
+
+TEST ! getfattr -n $stime $M0
+TEST getfattr -n $stime $M1
+
+cleanup;
diff --git a/tests/bugs/geo-replication/bug-877293.t b/tests/bugs/geo-replication/bug-877293.t
new file mode 100755
index 00000000000..c5205e8109e
--- /dev/null
+++ b/tests/bugs/geo-replication/bug-877293.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a replicated volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+TEST $CLI volume set $V0 indexing on
+
+TEST $CLI volume start $V0;
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Mount client-pid=-1
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --client-pid=-1 $M1
+
+TEST touch $M0
+
+vol_uuid=`getfattr -n trusted.glusterfs.volume-mark -ehex $M1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'`
+xtime=trusted.glusterfs.$vol_uuid.xtime
+
+TEST "getfattr -n $xtime $B0/${V0}-0 | grep -q ${xtime}="
+
+TEST kill_brick $V0 $H0 $B0/${V0}-0
+
+TEST "getfattr -n $xtime $B0/${V0}-1 | grep -q ${xtime}="
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup
diff --git a/tests/bugs/gfapi/bug-1032894.t b/tests/bugs/gfapi/bug-1032894.t
new file mode 100644
index 00000000000..88d110136e2
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1032894.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#Check stale indices are deleted as part of self-heal-daemon crawl.
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+cd $M0
+TEST mkdir a
+cd a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+# Create stale indices
+for i in {1..10}; do echo abc > $i; done
+for i in {1..10}; do rm -f $i; done
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+
+#Since maximum depth of the directory structure that needs healin is 2
+#Trigger two self-heals. That should make sure the heal is complete
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_index_count $B0/${V0}1
+cleanup
diff --git a/tests/bugs/gfapi/bug-1093594.c b/tests/bugs/gfapi/bug-1093594.c
new file mode 100644
index 00000000000..f7a06dd5ba8
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1093594.c
@@ -0,0 +1,311 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define WRITE_SIZE (128 * 1024)
+#define READ_WRITE_LOOP 100
+#define FOP_LOOP_COUNT 20
+#define TEST_CASE_LOOP 20
+
+int gfapi = 1;
+static int extension = 1;
+
+static int
+large_number_of_fops(glfs_t *fs)
+{
+ int ret = 0;
+ int i = 0;
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd1 = NULL;
+ char *dir1 = NULL, *dir2 = NULL, *filename1 = NULL, *filename2 = NULL;
+ char *buf = NULL;
+ struct stat sb = {
+ 0,
+ };
+
+ for (i = 0; i < FOP_LOOP_COUNT; i++) {
+ ret = asprintf(&dir1, "dir%d", extension);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ extension++;
+
+ ret = glfs_mkdir(fs, dir1, 0755);
+ if (ret < 0) {
+ fprintf(stderr, "mkdir(%s): %s\n", dir1, strerror(errno));
+ return -1;
+ }
+
+ fd = glfs_opendir(fs, dir1);
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.dirfattr", "fsetxattr", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_closedir(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_closedir failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_rmdir(fs, dir1);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_unlink failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&filename1, "file%d", extension);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = asprintf(&filename2, "file-%d", extension);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ extension++;
+
+ fd = glfs_creat(fs, filename1, O_RDWR, 0644);
+ if (!fd) {
+ fprintf(stderr, "%s: (%p) %s\n", filename1, fd, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_rename(fs, filename1, filename2);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_rename failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_lstat(fs, filename2, &sb);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_lstat failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_close failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_unlink(fs, filename2);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_unlink failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+ }
+}
+
+static int
+large_read_write(glfs_t *fs)
+{
+ int ret = 0;
+ int j = 0;
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd1 = NULL;
+ char *filename = NULL;
+ char *buf = NULL;
+
+ ret = asprintf(&filename, "filerw%d", extension);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ extension++;
+
+ fd = glfs_creat(fs, filename, O_RDWR, 0644);
+ if (!fd) {
+ fprintf(stderr, "%s: (%p) %s\n", filename, fd, strerror(errno));
+ return -1;
+ }
+
+ buf = (char *)malloc(WRITE_SIZE);
+ memset(buf, '-', WRITE_SIZE);
+
+ for (j = 0; j < READ_WRITE_LOOP; j++) {
+ ret = glfs_write(fd, buf, WRITE_SIZE, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Write(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ return ret;
+ }
+ }
+
+ fd1 = glfs_open(fs, filename, O_RDWR);
+ if (fd1 < 0) {
+ fprintf(stderr, "Open(%s): %d (%s)\n", filename, ret, strerror(errno));
+ return -1;
+ }
+
+ glfs_lseek(fd1, 0, SEEK_SET);
+ for (j = 0; j < READ_WRITE_LOOP; j++) {
+ ret = glfs_read(fd1, buf, WRITE_SIZE, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Read(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ return ret;
+ }
+ }
+
+ for (j = 0; j < READ_WRITE_LOOP; j++) {
+ ret = glfs_write(fd1, buf, WRITE_SIZE, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Write(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ return ret;
+ }
+ }
+
+ glfs_close(fd);
+ glfs_close(fd1);
+ ret = glfs_unlink(fs, filename);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_unlink failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ free(buf);
+ free(filename);
+}
+
+static int
+volfile_change(const char *volname)
+{
+ int ret = 0;
+ char *cmd = NULL, *cmd1 = NULL;
+
+ ret = asprintf(&cmd, "gluster volume set %s stat-prefetch off", volname);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct cli command string (%s)",
+ strerror(errno));
+ return ret;
+ }
+
+ ret = asprintf(&cmd1, "gluster volume set %s stat-prefetch on", volname);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct cli command string (%s)",
+ strerror(errno));
+ return ret;
+ }
+
+ ret = system(cmd);
+ if (ret < 0) {
+ fprintf(stderr, "stat-prefetch off on (%s) failed", volname);
+ return ret;
+ }
+
+ ret = system(cmd1);
+ if (ret < 0) {
+ fprintf(stderr, "stat-prefetch on on (%s) failed", volname);
+ return ret;
+ }
+
+ free(cmd);
+ free(cmd1);
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ int i = 0;
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd1 = NULL;
+ char *topdir = "topdir", *filename = "file1";
+ char *buf = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+
+ if (argc != 4) {
+ fprintf(stderr,
+ "Expect following args %s <hostname> <Vol> <log file>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ logfile = argv[3];
+
+ for (i = 0; i < TEST_CASE_LOOP; i++) {
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL (%s)\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = large_number_of_fops(fs);
+ if (ret < 0)
+ return -1;
+
+ ret = large_read_write(fs);
+ if (ret < 0)
+ return -1;
+
+ ret = volfile_change(argv[2]);
+ if (ret < 0)
+ return -1;
+
+ ret = large_number_of_fops(fs);
+ if (ret < 0)
+ return -1;
+
+ ret = large_read_write(fs);
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_fini(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+ }
+ return 0;
+}
diff --git a/tests/bugs/gfapi/bug-1093594.t b/tests/bugs/gfapi/bug-1093594.t
new file mode 100755
index 00000000000..0c220b3b007
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1093594.t
@@ -0,0 +1,22 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/bug-1093594.c -lgfapi
+TEST $(dirname $0)/bug-1093594 $H0 $V0 $logdir/bug-1093594.log
+
+cleanup_tester $(dirname $0)/bug-1093594
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1371541
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1371541
diff --git a/tests/bugs/gfapi/bug-1319374-THIS-crash.t b/tests/bugs/gfapi/bug-1319374-THIS-crash.t
new file mode 100755
index 00000000000..8d3db421c88
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1319374-THIS-crash.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 diagnostics.client-log-flush-timeout 30
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/bug-1319374.c -lgfapi
+TEST $(dirname $0)/bug-1319374 $H0 $V0 $logdir/bug-1319374.log
+
+cleanup_tester $(dirname $0)/bug-1319374
+
+cleanup;
diff --git a/tests/bugs/gfapi/bug-1319374.c b/tests/bugs/gfapi/bug-1319374.c
new file mode 100644
index 00000000000..ea0dfb6b0f2
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1319374.c
@@ -0,0 +1,131 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_INIT 1
+
+glfs_t *
+setup_new_client(char *hostname, char *volname, char *log_file, int flag)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "\nglfs_new: returned NULL (%s)\n", strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ if (flag == NO_INIT)
+ goto out;
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs1 = NULL;
+ glfs_t *fs2 = NULL;
+ glfs_t *fs3 = NULL;
+ char *volname = NULL;
+ char *log_file = NULL;
+ char *hostname = NULL;
+
+ if (argc != 4) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file location>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ log_file = argv[3];
+
+ fs1 = setup_new_client(hostname, volname, log_file, NO_INIT);
+ if (!fs1) {
+ fprintf(stderr, "\nsetup_new_client: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ fs2 = setup_new_client(hostname, volname, log_file, 0);
+ if (!fs2) {
+ fprintf(stderr, "\nsetup_new_client: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ fs3 = setup_new_client(hostname, volname, log_file, 0);
+ if (!fs3) {
+ fprintf(stderr, "\nsetup_new_client: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_fini(fs3);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ /* The crash is seen in gf_log_flush_timeout_cbk(), and this gets
+ * triggered when 30s timer expires, hence the sleep of 31s
+ */
+ sleep(31);
+ ret = glfs_fini(fs2);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs1);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_fini(fs1);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ return 0;
+error:
+ return -1;
+}
diff --git a/tests/bugs/gfapi/bug-1447266/1460514.c b/tests/bugs/gfapi/bug-1447266/1460514.c
new file mode 100644
index 00000000000..c721559a668
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1447266/1460514.c
@@ -0,0 +1,150 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(func, ret) \
+ do { \
+ if (ret != 0) { \
+ fprintf(stderr, "%s : returned error %d (%s)\n", func, ret, \
+ strerror(errno)); \
+ goto out; \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ struct glfs_object *root = NULL, *dir = NULL, *subdir = NULL;
+ struct stat sb = {
+ 0,
+ };
+ char *dirname = "dir";
+ char *subdirname = "subdir";
+ char *logfile = NULL;
+ char *volname = NULL;
+ char *hostname = NULL;
+ unsigned char subdir_handle[GFAPI_HANDLE_LENGTH] = {'\0'};
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ LOG_ERR("glfs_set_volfile_server", ret);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ LOG_ERR("glfs_set_logging", ret);
+
+ ret = glfs_init(fs);
+ LOG_ERR("first attempt glfs_init", ret);
+
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr, "glfs_h_lookupat: error on lookup of / ,%s\n",
+ strerror(errno));
+ goto out;
+ }
+ dir = glfs_h_mkdir(fs, root, dirname, 0644, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error on directory creation dir ,%s\n",
+ strerror(errno));
+ goto out;
+ }
+ subdir = glfs_h_mkdir(fs, root, subdirname, 0644, &sb);
+ if (subdir == NULL) {
+ fprintf(stderr,
+ "glfs_h_mkdir: error on directory creation subdir ,%s\n",
+ strerror(errno));
+ goto out;
+ }
+ ret = glfs_h_extract_handle(subdir, subdir_handle, GFAPI_HANDLE_LENGTH);
+ if (ret < 0) {
+ fprintf(stderr,
+ "glfs_h_extract_handle: error extracting handle of %s: %s\n",
+ subdirname, strerror(errno));
+ goto out;
+ }
+
+ glfs_h_close(subdir);
+ subdir = NULL;
+ glfs_h_close(dir);
+ dir = NULL;
+
+ if (fs) {
+ ret = glfs_fini(fs);
+ fprintf(stderr, "glfs_fini(fs) returned %d \n", ret);
+ }
+
+ fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ LOG_ERR("glfs_set_volfile_server", ret);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ LOG_ERR("glfs_set_logging", ret);
+
+ ret = glfs_init(fs);
+ LOG_ERR("second attempt glfs_init", ret);
+
+ subdir = glfs_h_create_from_handle(fs, subdir_handle, GFAPI_HANDLE_LENGTH,
+ &sb);
+ if (subdir == NULL) {
+ fprintf(
+ stderr,
+ "glfs_h_create_from_handle: error on create of %s: from (%p),%s\n",
+ subdirname, subdir_handle, strerror(errno));
+ goto out;
+ }
+ dir = glfs_h_lookupat(fs, subdir, "..", &sb, 0);
+ if (dir == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on directory lookup dir using .. ,%s\n",
+ strerror(errno));
+ goto out;
+ }
+
+out:
+ if (subdir)
+ glfs_h_close(subdir);
+ if (dir)
+ glfs_h_close(dir);
+
+ if (fs) {
+ ret = glfs_fini(fs);
+ fprintf(stderr, "glfs_fini(fs) returned %d \n", ret);
+ }
+
+ if (ret)
+ exit(1);
+ exit(0);
+}
diff --git a/tests/bugs/gfapi/bug-1447266/1460514.t b/tests/bugs/gfapi/bug-1447266/1460514.t
new file mode 100644
index 00000000000..594af75cae2
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1447266/1460514.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/1460514.c -lgfapi -o $(dirname $0)/1460514
+TEST ./$(dirname $0)/1460514 $H0 $V0 $logdir/1460514.log
+
+cleanup_tester $(dirname $0)/1460514
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/gfapi/bug-1447266/bug-1447266.c b/tests/bugs/gfapi/bug-1447266/bug-1447266.c
new file mode 100644
index 00000000000..2b7e2d627fe
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1447266/bug-1447266.c
@@ -0,0 +1,107 @@
+#include <glusterfs/api/glfs.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#define TOTAL_ARGS 4
+int
+main(int argc, char *argv[])
+{
+ char *cwd = (char *)malloc(PATH_MAX * sizeof(char *));
+ char *resolved = NULL;
+ char *result = NULL;
+ char *buf = NULL;
+ struct stat st;
+ char *path = NULL;
+ int ret;
+
+ if (argc != TOTAL_ARGS) {
+ printf(
+ "Please give all required command line args.\n"
+ "Format : <volname> <server_ip> <path_name>\n");
+ goto out;
+ }
+
+ glfs_t *fs = glfs_new(argv[1]);
+
+ if (fs == NULL) {
+ printf("glfs_new: %s\n", strerror(errno));
+ /* No need to fail the test for this error */
+ ret = 0;
+ goto out;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[2], 24007);
+ if (ret) {
+ printf("glfs_set_volfile_server: %s\n", strerror(errno));
+ /* No need to fail the test for this error */
+ ret = 0;
+ goto out;
+ }
+
+ path = argv[3];
+
+ ret = glfs_set_logging(fs, "/tmp/gfapi.log", 7);
+ if (ret) {
+ printf("glfs_set_logging: %s\n", strerror(errno));
+ /* No need to fail the test for this error */
+ ret = 0;
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret) {
+ printf("glfs_init: %s\n", strerror(errno));
+ /* No need to fail the test for this error */
+ ret = 0;
+ goto out;
+ }
+
+ sleep(1);
+
+ ret = glfs_chdir(fs, path);
+ if (ret) {
+ printf("glfs_chdir: %s\n", strerror(errno));
+ goto out;
+ }
+
+ buf = glfs_getcwd(fs, cwd, PATH_MAX);
+ if (cwd == NULL) {
+ printf("glfs_getcwd: %s\n", strerror(errno));
+ goto out;
+ }
+
+ printf("\ncwd = %s\n\n", cwd);
+
+ result = glfs_realpath(fs, path, resolved);
+ if (result == NULL) {
+ printf("glfs_realpath: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_stat(fs, path, &st);
+ if (ret) {
+ printf("glfs_stat: %s\n", strerror(errno));
+ goto out;
+ }
+ if (cwd)
+ free(cwd);
+
+ result = glfs_realpath(fs, path, resolved);
+ if (result == NULL) {
+ printf("glfs_realpath: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_fini(fs);
+ if (ret) {
+ printf("glfs_fini: %s\n", strerror(errno));
+ /* No need to fail the test for this error */
+ ret = 0;
+ goto out;
+ }
+
+ printf("\n");
+out:
+ return ret;
+}
diff --git a/tests/bugs/gfapi/bug-1447266/bug-1447266.t b/tests/bugs/gfapi/bug-1447266/bug-1447266.t
new file mode 100644
index 00000000000..45547f4f0e7
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1447266/bug-1447266.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume set $V0 nfs.disable false
+
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+# Create file and hard-links
+TEST touch $M0/f1
+TEST mkdir $M0/dir
+TEST ln $M0/f1 $M0/f2
+TEST ln $M0/f1 $M0/dir/f3
+
+TEST $CLI snapshot config activate-on-create enable
+TEST $CLI volume set $V0 features.uss enable;
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0 no-timestamp;
+TEST build_tester $(dirname $0)/bug-1447266.c -lgfapi
+
+#Testing strts from here-->
+
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.."
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/."
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/../."
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/../.."
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/dir/../."
+#Since dir1 is not present, this test should fail
+TEST ! $(dirname $0)/bug-1447266 $V0 $H0 "/dir/../dir1"
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/dir/.."
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps"
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/."
+#Since snap3 is not present, this test should fail
+TEST ! $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/.././snap3"
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/../."
+TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/./snap1/./../snap1/dir/."
+
+cleanup_tester $(dirname $0)/bug-1447266
+cleanup;
diff --git a/tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.c b/tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.c
new file mode 100644
index 00000000000..d151784627c
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.c
@@ -0,0 +1,112 @@
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define VALIDATE_AND_GOTO_LABEL_ON_ERROR(func, ret, label) \
+ do { \
+ if (ret < 0) { \
+ fprintf(stderr, "%s : returned error %d (%s)\n", func, ret, \
+ strerror(errno)); \
+ goto label; \
+ } \
+ } while (0)
+
+int
+main(int argc, char *argv[])
+{
+ int ret = -1;
+ int flags = O_WRONLY | O_CREAT | O_TRUNC;
+ int do_write = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd1 = NULL;
+ glfs_fd_t *fd2 = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ const char *dirname = "/some_dir1";
+ const char *filename = "/some_dir1/testfile";
+ const char *short_filename = "testfile";
+ struct stat sb;
+ char buf[512];
+ struct dirent *entry = NULL;
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ fprintf(stderr, "Usage: %s <volname> <logfile> <do-write [0/1]\n",
+ argv[0]);
+ return 1;
+ }
+
+ volname = argv[1];
+ logfile = argv[2];
+ do_write = atoi(argv[3]);
+
+ fs = glfs_new(volname);
+ if (!fs)
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_new", ret, out);
+
+ ret = glfs_set_volfile_server(fs, "tcp", "localhost", 24007);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_volfile_server", ret, out);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_logging", ret, out);
+
+ ret = glfs_init(fs);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_init", ret, out);
+
+ ret = glfs_mkdir(fs, dirname, 0755);
+ if (ret && errno != EEXIST)
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_mkdir", ret, out);
+
+ fd1 = glfs_creat(fs, filename, flags, 0644);
+ if (fd1 == NULL) {
+ ret = -1;
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_creat", ret, out);
+ }
+
+ if (do_write) {
+ ret = glfs_write(fd1, "hello world", 11, flags);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_write", ret, out);
+ }
+
+ fd2 = glfs_opendir(fs, dirname);
+ if (fd2 == NULL) {
+ ret = -1;
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_opendir", ret, out);
+ }
+
+ do {
+ ret = glfs_readdirplus_r(fd2, &sb, (struct dirent *)buf, &entry);
+ if (entry != NULL) {
+ if (!strcmp(entry->d_name, short_filename)) {
+ if (sb.st_mode == 0) {
+ fprintf(
+ stderr,
+ "Mode bits are incorrect: d_name - %s, st_mode - %jd\n",
+ entry->d_name, (intmax_t)sb.st_mode);
+ ret = -1;
+ goto out;
+ }
+ }
+ }
+ } while (entry != NULL);
+
+out:
+ if (fd1 != NULL)
+ glfs_close(fd1);
+ if (fd2 != NULL)
+ glfs_closedir(fd2);
+
+ if (fs) {
+ /*
+ * If this fails (as it does on Special Snowflake NetBSD for no
+ * good reason), it shouldn't affect the result of the test.
+ */
+ (void)glfs_fini(fs);
+ }
+
+ return ret;
+}
diff --git a/tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.t b/tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.t
new file mode 100644
index 00000000000..ac59aeeb47b
--- /dev/null
+++ b/tests/bugs/gfapi/bug-1630804/gfapi-bz1630804.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 ${H0}:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+build_tester $(dirname $0)/gfapi-bz1630804.c -lgfapi
+
+TEST ./$(dirname $0)/gfapi-bz1630804 $V0 $logdir/gfapi-bz1630804.log 0
+TEST ./$(dirname $0)/gfapi-bz1630804 $V0 $logdir/gfapi-bz1630804.log 1
+
+cleanup_tester $(dirname $0)/gfapi-trunc
+
+cleanup;
diff --git a/tests/bugs/gfapi/glfs_vol_set_IO_ERR.c b/tests/bugs/gfapi/glfs_vol_set_IO_ERR.c
new file mode 100644
index 00000000000..f38f01144d3
--- /dev/null
+++ b/tests/bugs/gfapi/glfs_vol_set_IO_ERR.c
@@ -0,0 +1,163 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define WRITE_SIZE (128)
+
+glfs_t *
+setup_new_client(char *hostname, char *volname, char *log_fileile)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "\nglfs_new: returned NULL (%s)\n", strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_fileile, 7);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+ return fs;
+error:
+ return NULL;
+}
+
+int
+write_something(glfs_t *fs)
+{
+ glfs_fd_t *fd = NULL;
+ char *buf = NULL;
+ int ret = 0;
+ int j = 0;
+
+ fd = glfs_creat(fs, "filename", O_RDWR, 0644);
+ if (!fd) {
+ fprintf(stderr, "%s: (%p) %s\n", "filename", fd, strerror(errno));
+ return -1;
+ }
+
+ buf = (char *)malloc(WRITE_SIZE);
+ memset(buf, '-', WRITE_SIZE);
+
+ for (j = 0; j < 4; j++) {
+ ret = glfs_write(fd, buf, WRITE_SIZE, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Write(%s): %d (%s)\n", "filename", ret,
+ strerror(errno));
+ return ret;
+ }
+ glfs_lseek(fd, 0, SEEK_SET);
+ }
+ return 0;
+}
+
+static int
+volfile_change(const char *volname)
+{
+ int ret = 0;
+ char *cmd = NULL, *cmd1 = NULL;
+
+ ret = asprintf(&cmd, "gluster volume set %s quick-read on", volname);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct cli command string (%s)",
+ strerror(errno));
+ return ret;
+ }
+
+ ret = asprintf(&cmd1, "gluster volume set %s quick-read off", volname);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct cli command string (%s)",
+ strerror(errno));
+ return ret;
+ }
+
+ ret = system(cmd);
+ if (ret < 0) {
+ fprintf(stderr, "quick-read off on (%s) failed", volname);
+ return ret;
+ }
+
+ ret = system(cmd1);
+ if (ret < 0) {
+ fprintf(stderr, "quick-read on on (%s) failed", volname);
+ return ret;
+ }
+
+ ret = system(cmd);
+ if (ret < 0) {
+ fprintf(stderr, "quick-read off on (%s) failed", volname);
+ return ret;
+ }
+
+ free(cmd);
+ free(cmd1);
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char buf[100];
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 4) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file location>\n",
+ argv[0]);
+ return -1;
+ }
+
+ fs = setup_new_client(argv[1], argv[2], argv[3]);
+ if (!fs)
+ goto error;
+
+ ret = volfile_change(argv[2]);
+ if (ret < 0)
+ goto error;
+
+ /* This is required as volfile change takes a while to reach this
+ * gfapi client and precess the graph change. Without this the issue
+ * cannot be reproduced as in cannot be tested.
+ */
+ sleep(10);
+
+ ret = write_something(fs);
+ if (ret < 0)
+ goto error;
+
+ ret = glfs_fini(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ return 0;
+error:
+ return -1;
+}
diff --git a/tests/bugs/gfapi/glfs_vol_set_IO_ERR.t b/tests/bugs/gfapi/glfs_vol_set_IO_ERR.t
new file mode 100755
index 00000000000..5893ef273bd
--- /dev/null
+++ b/tests/bugs/gfapi/glfs_vol_set_IO_ERR.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/glfs_vol_set_IO_ERR.c -lgfapi
+TEST $(dirname $0)/glfs_vol_set_IO_ERR $H0 $V0 $logdir/glfs_vol_set_IO_ERR.log
+
+cleanup_tester $(dirname $0)/glfs_vol_set_IO_ERR
+cleanup;
diff --git a/tests/bugs/glusterd/859927/repl.t b/tests/bugs/glusterd/859927/repl.t
new file mode 100755
index 00000000000..6e7c23b5b1d
--- /dev/null
+++ b/tests/bugs/glusterd/859927/repl.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+#Tests for data-self-heal-algorithm option
+function create_setup_for_self_heal {
+ file=$1
+ kill_brick $V0 $H0 $B0/${V0}1
+ dd of=$file if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null
+ $CLI volume start $V0 force
+}
+
+function test_write {
+ dd of=$M0/a if=/dev/urandom bs=1k count=1 2>&1 > /dev/null
+}
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0;
+
+touch $M0/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full
+EXPECT full volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff
+EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm
+create_setup_for_self_heal $M0/a
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+cat $file > /dev/null 2>&1
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+TEST cmp $B0/${V0}1/a $B0/${V0}2/a
+
+TEST ! $CLI volume set $V0 cluster.data-self-heal-algorithm ""
+
+cleanup;
diff --git a/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
new file mode 100644
index 00000000000..95d0eb69ac1
--- /dev/null
+++ b/tests/bugs/glusterd/add-brick-and-validate-replicated-volume-options.t
@@ -0,0 +1,110 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+#bug-1102656 - validating volume top command
+
+TEST $CLI volume top $V0 open
+TEST ! $CLI volume top $V0 open brick $H0:/tmp/brick
+TEST $CLI volume top $V0 read
+
+TEST $CLI volume status
+
+#bug- 1002556
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks';
+
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}3 force
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+TEST killall glusterd
+TEST glusterd
+
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks';
+
+#bug-1406411- fail-add-brick-when-replica-count-changes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}3
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+TEST kill_brick $V1 $H0 $B0/${V1}1
+
+#add-brick should fail
+TEST ! $CLI_NO_FORCE volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+
+TEST $CLI volume add-brick $V1 replica 2 $H0:$B0/${V1}{3,4}
+
+#bug-905307 - validate cluster.post-op-delay-secs option
+
+#Strings should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs abc
+
+#-ve ints should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs -1
+
+#INT_MAX+1 should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 2147483648
+
+#floats should not be accepted.
+TEST ! $CLI volume set $V0 cluster.post-op-delay-secs 1.25
+
+#min val 0 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 0
+EXPECT "0" volume_option $V0 cluster.post-op-delay-secs
+
+#max val 2147483647 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147483647
+EXPECT "2147483647" volume_option $V0 cluster.post-op-delay-secs
+
+#some middle val in range 2147 should be accepted
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 2147
+EXPECT "2147" volume_option $V0 cluster.post-op-delay-secs
+
+#bug-1265479 - validate-replica-volume-options
+
+#Setting data-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+TEST $CLI volume set $V1 cluster.data-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 metadata-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.metadata-self-heal';
+TEST $CLI volume set $V1 cluster.metadata-self-heal on
+
+#Setting entry-self-heal option on for distribute-replicate volume
+TEST $CLI volume set $V1 entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+TEST $CLI volume set $V1 cluster.entry-self-heal on
+EXPECT 'on' volinfo_field $V1 'cluster.entry-self-heal';
+
+cleanup
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
new file mode 100644
index 00000000000..b6af487a791
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+function count_N/A_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -- '\-1' | sort | uniq | wc -l
+}
+
+function check_peers {
+ $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+#bug-1609163 - bricks of normal volume should not attach to bricks of gluster_shared_storage volume
+
+##Create, start and mount meta_volume i.e., shared_storage
+TEST $CLI_1 volume create $META_VOL replica 3 $H1:$B1/${META_VOL}1 $H2:$B2/${META_VOL}1 $H3:$B3/${META_VOL}1
+TEST $CLI_1 volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H1 --volfile-id $META_VOL $META_MNT
+
+TEST $CLI_1 volume info gluster_shared_storage
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}{1..3} $H2:$B2/${V0}{1..3}
+TEST $CLI_1 volume start $V0
+
+# bricks of normal volume should not attach to bricks of gluster_shared_storage volume
+EXPECT 5 count_brick_processes
+
+#bug-1549996 - stale brick processes on the nodes after volume deletion
+
+TEST $CLI_1 volume create $V1 replica 3 $H1:$B1/${V1}{1..3} $H2:$B2/${V1}{1..3}
+TEST $CLI_1 volume start $V1
+
+EXPECT 5 count_brick_processes
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume stop $V1
+
+EXPECT 3 count_brick_processes
+
+TEST $CLI_1 volume stop $META_VOL
+
+TEST $CLI_1 volume delete $META_VOL
+TEST $CLI_1 volume delete $V0
+TEST $CLI_1 volume delete $V1
+
+#bug-1773856 - Brick process fails to come up with brickmux on
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force
+TEST $CLI_1 volume start $V0
+
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force
+TEST $CLI_1 volume start $V1
+
+EXPECT 3 count_brick_processes
+
+V2=patchy2
+TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force
+TEST $CLI_1 volume start $V2
+
+EXPECT 3 count_brick_processes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+
+TEST kill_node 1
+
+sleep 10
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+$CLI_2 volume set $V0 performance.readdir-ahead on
+$CLI_2 volume set $V1 performance.readdir-ahead on
+
+TEST $glusterd_1;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_N/A_brick_pids
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation.t b/tests/bugs/glusterd/brick-mux-validation.t
new file mode 100644
index 00000000000..61b0455f9a8
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux-validation.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../volume.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+function count_brick_pids {
+ $CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+cleanup;
+
+#bug-1451248 - validate brick mux after glusterd reboot
+
+TEST glusterd
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+EXPECT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+pkill gluster
+TEST glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1..3}
+TEST $CLI volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1560957 - brick status goes offline after remove-brick followed by add-brick
+
+pkill glusterd
+TEST glusterd
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 force
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1_new force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+
+#bug-1446172 - reset brick with brick multiplexing enabled
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new start
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
+EXPECT 1 count_brick_processes
+
+# Negative case with brick killed but volume-id xattr present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
+
+# reset-brick commit force should work and should bring up the brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+EXPECT 1 count_brick_processes
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 $M1;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M1/file$i.txt
+done
+
+TEST $CLI volume reset-brick $V1 $H0:$B0/${V1}1 start
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
+EXPECT 1 count_brick_processes
+
+# Simulate reset disk
+for i in {1..5}
+do
+ rm -rf $B0/${V1}1/file$i.txt
+done
+
+setfattr -x trusted.glusterfs.volume-id $B0/${V1}1
+setfattr -x trusted.gfid $B0/${V1}1
+
+# Test reset-brick commit. Using CLI_IGNORE_PARTITION since normal CLI uses
+# the --wignore flag that essentially makes the command act like "commit force"
+TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 commit
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
+EXPECT 1 count_brick_processes
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t
new file mode 100644
index 00000000000..927940534c1
--- /dev/null
+++ b/tests/bugs/glusterd/brick-mux.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd -LDEBUG
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+#bug-1499509 - stop all the bricks when a brick process is killed
+kill -9 $(pgrep glusterfsd)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count
+
+TEST $CLI volume start $V0 force
+TEST $CLI volume start $V1 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume set $V1 performance.io-cache-size 32MB
+TEST $CLI volume stop $V1
+TEST $CLI volume start $V1
+
+#Check No. of brick processes after change option
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+pkill glusterd
+TEST glusterd
+
+#Check brick status after restart glusterd should not be NA
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 2 count_brick_processes
+
+#bug-1444596_brick_mux_posix_hlth_chk_status
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST rm -rf $H0:$B0/brick{0,1}
+
+#Check No. of brick processes after remove brick from back-end
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/file{1..10}
+
+pkill glusterd
+TEST glusterd -LDEBUG
+sleep 5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count
+
+cleanup
+
diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
new file mode 100644
index 00000000000..0be31dac768
--- /dev/null
+++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST verify_lvm_version;
+#Create cluster with 3 nodes
+TEST launch_cluster 3 -NO_DEBUG -NO_FORCE
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#add-brick with or without mentioning the replica count should not fail
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1
+EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host with force should succeed
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
+EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Add-brick with Increasing replica count
+TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+#Add-brick with Increasing replica count from same host should fail
+TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
+
+#adding multiple bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1070734.t b/tests/bugs/glusterd/bug-1070734.t
new file mode 100755
index 00000000000..0afcb3b37b3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1070734.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 nfs.disable false
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0;
+
+############################################################################
+#TEST-PLAN:
+#Create a directory DIR and a file inside DIR
+#check the hash brick of the file
+#delete the directory for recreating later after remove-brick
+#remove the brick where the files hashed to
+#After remove-brick status says complete go on creating the same directory \
+#DIR and file
+#Check if the file now falls into the other brick
+#Check if the other brick gets the full layout and the remove brick gets \
+#the zeroed layout
+############################################################################
+
+TEST mkdir $N0/DIR;
+
+TEST touch $N0/DIR/file;
+
+if [ -f $B0/${V0}1/DIR/file ]
+then
+ HASHED=$B0/${V0}1;
+ OTHERBRICK=$B0/${V0}2;
+else
+ HASHED=$B0/${V0}2;
+ OTHERBRICK=$B0/${V0}1;
+fi
+
+TEST rm -f $N0/DIR/file;
+TEST rmdir $N0/DIR;
+TEST $CLI volume remove-brick $V0 $H0:${HASHED} start;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \
+"$H0:${HASHED}";
+
+TEST mkdir $N0/DIR;
+TEST touch $N0/DIR/file;
+
+#Check now the file should fall in to OTHERBRICK
+TEST [ -f ${OTHERBRICK}/DIR/file ]
+
+#Check the DIR on HASHED should have got zeroed layout and the \
+#OTHERBRICK should have got full layout
+shorter_layout () {
+ dht_get_layout $1 | cut -c 19-
+}
+EXPECT "0000000000000000" shorter_layout $HASHED/DIR ;
+EXPECT "00000000ffffffff" shorter_layout $OTHERBRICK/DIR;
+
+## Before killing daemon to avoid deadlocks
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1085330-and-bug-916549.t b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
new file mode 100644
index 00000000000..892a30d74ea
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1085330-and-bug-916549.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+STR="1234567890"
+volname="Vol"
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+#testcase: bug-1085330
+
+# Construct volname string such that its more than 256 characters
+for i in {1..30}
+do
+ volname+=$STR
+done
+# Now $volname is more than 256 chars
+
+TEST ! $CLI volume create $volname $H0:$B0/${volname}{1,2};
+
+TEST $CLI volume info;
+
+# Construct brick string such that its more than 256 characters
+volname="Vol1234"
+brick="brick"
+for i in {1..30}
+do
+ brick+=$STR
+done
+# Now $brick1 is more than 256 chars
+
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# Now try to create a volume with couple of bricks (strlen(volname) = 128 &
+# strlen(brick1) = 128
+# Command should still fail as strlen(volp path) > 256
+
+volname="Volume-0"
+brick="brick-00"
+STR="12345678"
+
+for i in {1..15}
+do
+ volname+=$STR
+ brick+=$STR
+done
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# test case with brick path as 255 and a trailing "/"
+brick=""
+STR1="12345678"
+volname="vol"
+
+for i in {1..31}
+do
+ brick+=$STR1
+done
+brick+="123456/"
+
+echo $brick | wc -c
+# Now $brick is exactly 255 chars, but at end a trailing space
+# This will still fail as volfpath exceeds more than _POSIX_MAX chars
+
+TEST ! $CLI volume create $volname $H0:$B0/$brick;
+
+TEST $CLI volume info;
+
+# Positive test case
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+
+TEST $CLI volume info;
+
+TEST $CLI volume start $V0;
+
+#testcase: bug-916549
+
+pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
+brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
+
+kill -SIGKILL $brick_pid;
+TEST $CLI volume start $V0 force;
+TEST process_leak_count $(pidof glusterd);
+
+cleanup
+
diff --git a/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t
new file mode 100755
index 00000000000..9ac9d8fedd9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1091935-brick-order-check-from-cli-to-glusterd.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+## Lets create partitions for bricks
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 200M $B0/brick2
+TEST truncate -s 200M $B0/brick3
+TEST truncate -s 200M $B0/brick4
+
+
+TEST LO1=`SETUP_LOOP $B0/brick1`
+TEST LO2=`SETUP_LOOP $B0/brick2`
+TEST LO3=`SETUP_LOOP $B0/brick3`
+TEST LO4=`SETUP_LOOP $B0/brick4`
+
+
+TEST MKFS_LOOP $LO1
+TEST MKFS_LOOP $LO2
+TEST MKFS_LOOP $LO3
+TEST MKFS_LOOP $LO4
+
+TEST mkdir -p ${B0}/${V0}{0..3}
+
+
+TEST MOUNT_LOOP $LO1 $B0/${V0}0
+TEST MOUNT_LOOP $LO2 $B0/${V0}1
+TEST MOUNT_LOOP $LO3 $B0/${V0}2
+TEST MOUNT_LOOP $LO4 $B0/${V0}3
+
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+
+CLI_1_WITHOUT_WIGNORE=$(echo $CLI_1 | sed 's/ --wignore//')
+
+
+# Creating volume with non resolvable host name
+TEST ! $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick redhat:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick redhat:$B0/${V0}3/brick
+
+
+#Workaround for Bug:1091935
+#Failure to create volume above leaves 1st brick with xattrs.
+rm -rf $B0/${V0}{0..3}/brick;
+
+
+# Creating distribute-replica volume with bad brick order. It will fail
+# due to bad brick order.
+TEST ! $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick $H1:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick $H1:$B0/${V0}3/brick
+
+
+
+#Workaround for Bug:1091935
+#Failure to create volume above leaves 1st brick with xattrs.
+rm -rf $B0/${V0}{0..3}/brick;
+
+
+
+# Test for positive case, volume create should pass for
+# resolved hostnames and bricks in order.
+TEST $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick $H2:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick $H2:$B0/${V0}3/brick
+
+# Delete the volume as we want to reuse bricks
+TEST $CLI_1_WITHOUT_WIGNORE volume delete $V0
+
+
+# Now with force at the end of command it will bypass brick-order check
+# for replicate or distribute-replicate volume. and it will create volume
+TEST $CLI_1_WITHOUT_WIGNORE volume create $V0 replica 2 \
+ $H1:$B0/${V0}0/brick $H1:$B0/${V0}1/brick \
+ $H1:$B0/${V0}2/brick $H1:$B0/${V0}3/brick force
+
+# Need to cleanup the loop back devices.
+UMOUNT_LOOP ${B0}/${V0}{0..3}
+rm -f ${B0}/brick{1..4}
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t b/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t
new file mode 100644
index 00000000000..7be076caaf3
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+## Test case for stopping all running daemons service on peer detach.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+
+## Start a 2 node virtual cluster
+TEST launch_cluster 2;
+
+## Peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+
+## Creating and starting volume
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H1:$B1/${V0}1
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 nfs.disable off
+
+## To Do: Add test case for quota and snapshot daemon. Currently quota
+## Daemon is not working in cluster framework. And sanpd daemon
+## Start only in one node in cluster framework. Add test case
+## once patch http://review.gluster.org/#/c/11666/ merged,
+
+## We are having 2 node "nfs" daemon should run on both node.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_nfs_count
+
+## Detach 2nd node from the cluster.
+TEST $CLI_1 peer detach $H2;
+
+
+## After detaching 2nd node we will have only 1 nfs and quota daemon running.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
new file mode 100644
index 00000000000..c229d4371b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1242875-do-not-pass-volinfo-quota.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create volume V0 and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Lets create volume V1 and start the volume
+TEST $CLI volume create $V1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V1
+
+## Enable quota on 2nd volume
+TEST $CLI volume quota $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+## Killing all gluster process
+pkill gluster;
+
+## there should not be any quota daemon running after killing quota process
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_quotad_count
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Quotad daemon should start on restarting the glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_quotad_count
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t b/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t
new file mode 100644
index 00000000000..967595e4dbb
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1482906-peer-file-blank-line.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+#Tests for add new line in peers file
+function add_new_line_to_peer_file {
+ UUID_NAME=$($CLI_1 peer status | grep Uuid)
+ PEER_ID=$(echo $UUID_NAME | cut -c 7-)
+ GD_WD=$($CLI_1 system getwd)
+ GD_WD+=/peers/
+ PATH_TO_PEER_FILE=$GD_WD$PEER_ID
+ sed -i '1s/^/\n/gm; $s/$/\n/gm' $PATH_TO_PEER_FILE
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+add_new_line_to_peer_file
+
+TEST kill_glusterd 1
+TEST $glusterd_1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1595320.t b/tests/bugs/glusterd/bug-1595320.t
new file mode 100644
index 00000000000..c10e11821a1
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1595320.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup
+
+function count_up_bricks {
+ $CLI --xml volume status $V0 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+# Setup 3 LVMS
+LVM_PREFIX="test"
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+# Create volume and enable brick multiplexing
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3
+TEST $CLI v set all cluster.brick-multiplex on
+
+# Start the volume
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Kill volume ungracefully
+brick_pid=`pgrep glusterfsd`
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+b1_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-1*.pid)
+b2_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-2*.pid)
+b3_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-3*.pid)
+
+kill -9 $brick_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_brick_processes
+
+# Unmount 3rd brick root from node
+brick_root=$L3
+_umount_lv 3
+
+# Start the volume only 2 brick should be start
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks
+EXPECT 1 count_brick_processes
+
+brick_pid=`pgrep glusterfsd`
+
+# Make sure only two brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 0 ]
+
+# Mount the brick root
+TEST mkdir -p $brick_root
+TEST mount -t xfs -o nouuid /dev/test_vg_3/brick_lvm $brick_root
+
+# Replace brick_pid file to test brick_attach code
+TEST cp $b1_pid_file $b3_pid_file
+
+# Start the volume all brick should be up
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1696046.t b/tests/bugs/glusterd/bug-1696046.t
new file mode 100644
index 00000000000..e1c1eb2ceb9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1696046.t
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function count_up_bricks {
+ $CLI --xml volume status $1 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+logdir=`gluster --print-logdir`
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{1,2,3};
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume start $V1;
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V1
+
+EXPECT 1 count_brick_processes
+
+# Mount V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+function client-log-file-name()
+{
+ logfilename=$M0".log"
+ echo ${logfilename:1} | tr / -
+}
+
+function brick-log-file-name()
+{
+ logfilename=$B0"/"$V0"1.log"
+ echo ${logfilename:1} | tr / -
+}
+
+log_file=$logdir"/"`client-log-file-name`
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+
+# Do some operation
+touch $M0/file1
+
+# Check debug message debug message should be exist only for V0
+# Server xlator is common in brick_mux so after enabling DEBUG log
+# some debug message should be available for other xlators like posix
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep file1 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+#Check if any debug log exist in client-log file
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to INFO
+TEST $CLI volume set $V0 diagnostics.brick-log-level INFO
+
+## Set client-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+# Do some operation
+touch $M0/file2
+
+nofdlog=$(cat $brick_log_file | grep " D " | grep file2 | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+# Unmount V0
+TEST umount $M0
+
+#Mount V1
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#do some operation
+touch $M0/file3
+
+
+# DEBUG log level is enabled only for V0 so no debug message should be available
+# in log specific to file2 creation except for server xlator, server xlator is
+# common xlator in brick mulitplex
+nofdlog=$(cat $brick_log_file | grep file3 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+# Unmount V1
+TEST umount $M0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t
new file mode 100644
index 00000000000..bb8d4f46eb8
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1699339.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+NUM_VOLS=15
+
+
+get_brick_base () {
+ printf "%s/vol%02d" $B0 $1
+}
+
+function count_up_bricks {
+ vol=$1;
+ $CLI_1 --xml volume status $vol | grep '<status>1' | wc -l
+}
+
+create_volume () {
+
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
+
+ TEST $CLI_1 volume create $vol_name replica 3 $H1:$B1/${vol_name} $H2:$B2/${vol_name} $H3:$B3/${vol_name}
+ TEST $CLI_1 volume start $vol_name
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+
+# The option accepts the value in the range from 5 to 200
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 210
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 4
+
+TEST $CLI_1 volume set all glusterd.vol_count_per_thread 5
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Our infrastructure can't handle an arithmetic expression here. The formula
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
+TESTS_EXPECTED_IN_LOOP=28
+for i in $(seq 1 $NUM_VOLS); do
+ starttime="$(date +%s)";
+ create_volume $i
+done
+
+TEST kill_glusterd 1
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+TEST $CLI_2 volume set $vol1 performance.readdir-ahead on
+done
+
+# Bring back 1st glusterd
+TEST $glusterd_1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead
+done
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t
new file mode 100644
index 00000000000..99bcf6ff785
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1720566.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+
+cleanup;
+V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd"
+V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd"
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V1
+EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status';
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V1 $M1;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST mkdir $M1/dir{1..4};
+TEST touch $M1/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1
+TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1
+
+
+TEST $CLI_1 volume rebalance $V0 start
+TEST $CLI_1 volume rebalance $V1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-824753-file-locker.c b/tests/bugs/glusterd/bug-824753-file-locker.c
new file mode 100644
index 00000000000..f5dababad30
--- /dev/null
+++ b/tests/bugs/glusterd/bug-824753-file-locker.c
@@ -0,0 +1,46 @@
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+int
+main(int argc, char *argv[])
+{
+ int fd = -1;
+ int ret = -1;
+ char command[2048] = "";
+ char filepath[255] = "";
+ struct flock fl;
+
+ fl.l_type = F_WRLCK;
+ fl.l_whence = SEEK_SET;
+ fl.l_start = 7;
+ fl.l_len = 1;
+ fl.l_pid = getpid();
+
+ snprintf(filepath, 255, "%s/%s", argv[4], argv[5]);
+
+ fd = open(filepath, O_RDWR);
+
+ if (fd == -1)
+ return -1;
+
+ if (fcntl(fd, F_SETLKW, &fl) == -1) {
+ return -1;
+ }
+
+ snprintf(command, sizeof(command),
+ "gluster volume clear-locks %s /%s kind all posix 0,7-1 |"
+ " grep %s | awk -F'..: ' '{print $1}' | grep %s:%s/%s",
+ argv[1], argv[5], argv[2], argv[2], argv[3], argv[1]);
+
+ ret = system(command);
+ close(fd);
+
+ if (ret)
+ return -1;
+ else
+ return 0;
+}
diff --git a/tests/bugs/glusterd/bug-824753.t b/tests/bugs/glusterd/bug-824753.t
new file mode 100755
index 00000000000..b969e28f35e
--- /dev/null
+++ b/tests/bugs/glusterd/bug-824753.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+touch $M0/file1;
+
+TEST $CC -g $(dirname $0)/bug-824753-file-locker.c -o $(dirname $0)/file-locker
+
+TEST $(dirname $0)/file-locker $V0 $H0 $B0 $M0 file1
+
+## Finish up
+TEST rm -f $(dirname $0)/file-locker
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-force.t b/tests/bugs/glusterd/bug-948729/bug-948729-force.t
new file mode 100644
index 00000000000..f4f71f9a1e2
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729-force.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+V1=patchy1
+V2=patchy2
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+B4=/d/backends/4
+B5=/d/backends/5
+B6=/d/backends/6
+
+mkdir -p $B3 $B4 $B5 $B6
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+TEST truncate -s 16M $B4/brick4
+TEST truncate -s 16M $B5/brick5
+TEST truncate -s 16M $B6/brick6
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+TEST LD4=`SETUP_LOOP $B4/brick4`
+TEST MKFS_LOOP $LD4
+TEST LD5=`SETUP_LOOP $B5/brick5`
+TEST MKFS_LOOP $LD5
+TEST LD6=`SETUP_LOOP $B6/brick6`
+TEST MKFS_LOOP $LD6
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0 $B4/$V0 $B5/$V0 $B6/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+TEST MOUNT_LOOP $LD4 $B4/$V0
+TEST MOUNT_LOOP $LD5 $B5/$V0
+TEST MOUNT_LOOP $LD6 $B6/$V0
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $CLI1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2 force
+
+#Case 1: File system root is being used as brick directory
+TEST $CLI1 volume create $V0 $H1:$B5/$V0 $H2:$B6/$V0 force
+
+#Case 2: Brick directory contains only one component
+TEST $CLI1 volume create $V1 $H1:/$uuid1 $H2:/$uuid2 force
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $CLI1 volume create $V2 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2 force
+
+#add-brick tests
+TEST ! $CLI1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3 force
+TEST $CLI1 volume add-brick $V0 $H1:$B3/$V0 force
+TEST $CLI1 volume add-brick $V1 $H1:/$uuid3 force
+TEST $CLI1 volume add-brick $V2 $H1:$B4/$V0/brick3 force
+
+#####replace-brick tests
+#FIX-ME: replace-brick does not work with the newly introduced cluster test
+#####framework
+
+rmdir /$uuid1 /$uuid2 /$uuid3;
+
+$CLI volume stop $V0
+$CLI volume stop $V1
+$CLI volume stop $V2
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+UMOUNT_LOOP $B4/$V0
+UMOUNT_LOOP $B5/$V0
+UMOUNT_LOOP $B6/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+rm -f $B4/brick4
+rm -f $B5/brick5
+rm -f $B6/brick6
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t
new file mode 100644
index 00000000000..18bf9a1c4b6
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729-mode-script.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+
+cli1=$(echo $CLI1 | sed 's/ --wignore//')
+
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/brick3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME : replace-brick does not currently work in the newly introduced
+#####cluster test framework
+
+$CLI1 volume stop $V0
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-948729/bug-948729.t b/tests/bugs/glusterd/bug-948729/bug-948729.t
new file mode 100644
index 00000000000..2b574aa1a14
--- /dev/null
+++ b/tests/bugs/glusterd/bug-948729/bug-948729.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+uuid1=`uuidgen`;
+uuid2=`uuidgen`;
+uuid3=`uuidgen`;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+B3=/d/backends/3
+
+mkdir -p $B3
+
+TEST truncate -s 16M $B1/brick1
+TEST truncate -s 16M $B2/brick2
+TEST truncate -s 16M $B3/brick3
+
+TEST LD1=`SETUP_LOOP $B1/brick1`
+TEST MKFS_LOOP $LD1
+TEST LD2=`SETUP_LOOP $B2/brick2`
+TEST MKFS_LOOP $LD2
+TEST LD3=`SETUP_LOOP $B3/brick3`
+TEST MKFS_LOOP $LD3
+
+mkdir -p $B1/$V0 $B2/$V0 $B3/$V0
+
+TEST MOUNT_LOOP $LD1 $B1/$V0
+TEST MOUNT_LOOP $LD2 $B2/$V0
+TEST MOUNT_LOOP $LD3 $B3/$V0
+
+#Tests without options 'mode=script' and 'wignore'
+cli1=$(echo $CLI1 | sed 's/ --mode=script//')
+cli1=$(echo $cli1 | sed 's/ --wignore//')
+#Case 0: Parent directory of the brick is absent
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0/nonexistent/b1 $H2:$B2/$V0/nonexistent/b2
+
+#Case 1: File system root being used as brick directory
+TEST ! $cli1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+
+#Case 2: Brick directory contains only one component
+TEST ! $cli1 volume create $V0 $H1:/$uuid1 $H2:/$uuid2
+
+#Case 3: Sub-directories of the backend FS being used as brick directory
+TEST $cli1 volume create $V0 $H1:$B1/$V0/brick1 $H2:$B2/$V0/brick2
+
+#add-brick tests
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0/nonexistent/b3
+TEST ! $cli1 volume add-brick $V0 $H1:$B3/$V0
+TEST ! $cli1 volume add-brick $V0 $H1:/$uuid3
+TEST $cli1 volume add-brick $V0 $H1:$B3/$V0/brick3
+
+#####replace-brick tests
+#FIX-ME: Replace-brick does not work currently in the newly introduced cluster
+#####test framework.
+
+$CLI1 volume stop $V0
+
+UMOUNT_LOOP $B1/$V0
+UMOUNT_LOOP $B2/$V0
+UMOUNT_LOOP $B3/$V0
+
+rm -f $B1/brick1
+rm -f $B2/brick2
+rm -f $B3/brick3
+
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-949930.t b/tests/bugs/glusterd/bug-949930.t
new file mode 100644
index 00000000000..9a6d38fa37f
--- /dev/null
+++ b/tests/bugs/glusterd/bug-949930.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+V1=patchy2
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume start $V0;
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{1,2};
+TEST $CLI volume set $V1 nfs.disable off
+TEST $CLI volume start $V1;
+
+TEST ! $CLI volume set $V0 performance.nfs.read-ahead blah
+EXPECT '' volume_option $V0 performance.nfs.read-ahead
+
+TEST $CLI volume set $V0 performance.nfs.read-ahead on
+EXPECT "on" volume_option $V0 performance.nfs.read-ahead
+
+EXPECT '' volume_option $V1 performance.nfs.read-ahead
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t
new file mode 100644
index 00000000000..41d2140aa2b
--- /dev/null
+++ b/tests/bugs/glusterd/check_elastic_server.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function cluster_rebalance_status {
+ local vol=$1
+ $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //'
+}
+
+cleanup;
+TEST launch_cluster 4;
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+TEST $CLI_1 peer probe $H4;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#Mount invalid volume
+TEST ! glusterfs -s $H1 --volfile-id=$V0_NA $M0;
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0"
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit
+
+kill_glusterd 1
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+kill_glusterd 2
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/daemon-log-level-option.t b/tests/bugs/glusterd/daemon-log-level-option.t
new file mode 100644
index 00000000000..66e55e3d758
--- /dev/null
+++ b/tests/bugs/glusterd/daemon-log-level-option.t
@@ -0,0 +1,93 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+function Info_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " I " | wc -l
+}
+
+function Warning_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " W " | wc -l
+}
+
+function Debug_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " D " | wc -l
+}
+
+function Trace_messages_count() {
+ local shd_log=$1
+ cat $shd_log | grep " T " | wc -l
+}
+
+cleanup;
+
+# Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+# set cluster.daemon-log-level option to DEBUG
+TEST $CLI volume set all cluster.daemon-log-level DEBUG
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to INFO and start the volume
+TEST $CLI volume set all cluster.daemon-log-level INFO
+TEST $CLI volume start $V0
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to WARNING and start the volume
+TEST $CLI volume set all cluster.daemon-log-level WARNING
+TEST $CLI volume start $V0
+
+# log should not have any info messages
+EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+# stop the volume and remove glustershd log
+TEST $CLI volume stop $V0
+rm -f /var/log/glusterfs/glustershd.log
+
+# set cluster.daemon-log-level option to ERROR and start the volume
+TEST $CLI volume set all cluster.daemon-log-level ERROR
+TEST $CLI volume start $V0
+
+# log should not have any info messages
+EXPECT 0 Info_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any warning messages
+EXPECT 0 Warning_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any debug messages
+EXPECT 0 Debug_messages_count "/var/log/glusterfs/glustershd.log"
+
+# log should not have any trace messages
+EXPECT 0 Trace_messages_count "/var/log/glusterfs/glustershd.log"
+
+cleanup
diff --git a/tests/bugs/glusterd/df-results-post-replace-brick-operations.t b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
new file mode 100644
index 00000000000..04f75889388
--- /dev/null
+++ b/tests/bugs/glusterd/df-results-post-replace-brick-operations.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+TEST glusterd
+
+#Create brick partitions
+TEST truncate -s 100M $B0/brick1
+TEST truncate -s 100M $B0/brick2
+TEST truncate -s 100M $B0/brick3
+TEST truncate -s 100M $B0/brick4
+TEST truncate -s 100M $B0/brick5
+
+LO1=`SETUP_LOOP $B0/brick1`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO1
+
+LO2=`SETUP_LOOP $B0/brick2`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO2
+
+LO3=`SETUP_LOOP $B0/brick3`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO3
+
+LO4=`SETUP_LOOP $B0/brick4`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO4
+
+LO5=`SETUP_LOOP $B0/brick5`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO5
+
+TEST mkdir -p $B0/${V0}1 $B0/${V0}2 $B0/${V0}3 $B0/${V0}4 $B0/${V0}5
+TEST MOUNT_LOOP $LO1 $B0/${V0}1
+TEST MOUNT_LOOP $LO2 $B0/${V0}2
+TEST MOUNT_LOOP $LO3 $B0/${V0}3
+TEST MOUNT_LOOP $LO4 $B0/${V0}4
+TEST MOUNT_LOOP $LO5 $B0/${V0}5
+
+# create a subdirectory in mount point and use it for volume creation
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}3/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" online_brick_count
+
+# mount the volume and check the size at mount point
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
+
+# perform replace brick operations
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1/brick1 $H0:$B0/${V0}4/brick1 commit force
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2/brick1 $H0:$B0/${V0}5/brick1 commit force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# check for the size at mount point, it should be same as previous
+total_space_new=$(df -P $M0 | tail -1 | awk '{ print $2}')
+TEST [ $total_space -eq $total_space_new ]
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..8001359e6b3
--- /dev/null
+++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
@@ -0,0 +1,71 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2
+
+#bug-1109741 - validate mgmt handshake
+
+TEST ! $CLI_3 peer probe $H1
+
+GD1_WD=$($CLI_1 system getwd)
+OP_VERS_ORIG=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+
+TEST $CLI_3 system uuid get # Needed for glusterd.info to be created
+
+GD3_WD=$($CLI_3 system getwd)
+TEST sed -rnie "'s/(operating-version=)\w+/\130600/gip'" ${GD3_WD}/glusterd.info
+
+TEST kill_glusterd 3
+TEST start_glusterd 3
+
+TEST ! $CLI_3 peer probe $H1
+
+OP_VERS_NEW=$(grep 'operating-version' ${GD1_WD}/glusterd.info | cut -d '=' -f 2)
+TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
+
+#bug-948686 - volume sync after bringing up the killed node
+
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+
+#kill a node
+TEST kill_node 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2
+
+#modify volume config to see change in volume-sync
+TEST $CLI_1 volume set $V0 write-behind off
+#add some files to the volume to see effect of volume-heal cmd
+TEST touch $M0/{1..100};
+TEST $CLI_1 volume stop $V0;
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status'
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+sleep 5
+TEST $CLI_3 volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+TEST $CLI_2 volume stop $V0;
+TEST $CLI_2 volume delete $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
new file mode 100644
index 00000000000..99272e14245
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases-in-cluster.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+#bug-1454418 - Setting Port number in specific range
+sysctl net.ipv4.ip_local_reserved_ports="24007-24008,32765-32768,49152-49156"
+
+TEST launch_cluster 4;
+
+#bug-1223213
+
+# Fool the cluster to operate with 3.5 version even though binary's op-version
+# is > 3.5. This is to ensure 3.5 code path is hit to test that volume status
+# works when a node is upgraded from 3.5 to 3.7 or higher as mgmt_v3 lock is
+# been introduced in 3.6 version and onwards
+
+GD1_WD=$($CLI_1 system getwd)
+$CLI_1 system uuid get
+Old_op_version=$(cat ${GD1_WD}/glusterd.info | grep operating-version | cut -d '=' -f 2)
+
+TEST sed -rnie "'s/(operating-version=)\w+/\130500/gip'" ${GD1_WD}/glusterd.info
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST `sed -i "s/"30500"/${Old_op_version}/g" ${GD1_WD}/glusterd.info`
+
+TEST kill_glusterd 1
+TEST start_glusterd 1
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+#bug-1454418
+sysctl net.ipv4.ip_local_reserved_ports="
+"
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+#bug-888752 - volume status --xml from peer in the cluster
+
+TEST $CLI_1 volume status $V0 $H2:$B2/$V0 --xml
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1
+
+# bug - 1635820
+# rebooting a node which doen't host bricks for any one volume
+# peer should not go into rejected state
+TEST kill_glusterd 2
+TEST start_glusterd 2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
+
+TEST $CLI_1 volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V1 'Status'
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 peer probe $H4;
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+
+#bug-1173414 - validate mgmt-v3-remote-lock-failure
+
+for i in {1..20}
+do
+$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_1 volume set $V1 barrier on
+$CLI_2 volume set $V0 diagnostics.client-log-level DEBUG &
+$CLI_2 volume set $V1 barrier on
+done
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+TEST $CLI_1 volume status
+TEST $CLI_2 volume status
+
+#bug-1293414 - validate peer detach
+
+# peers hosting bricks cannot be detached
+TEST ! $CLI_4 peer detach $H1
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count 1
+
+# peer not hosting bricks should be detachable
+TEST $CLI_4 peer detach $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#bug-1344407 - deleting a volume when peer is down should fail
+
+#volume should be stopped before deletion
+TEST $CLI_1 volume stop $V0
+
+TEST kill_glusterd 2
+TEST ! $CLI_1 volume delete $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
new file mode 100644
index 00000000000..b89ca22415e
--- /dev/null
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
@@ -0,0 +1,305 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+function get_opret_value () {
+ local VOL=$1
+ $CLI volume info $VOL --xml | sed -ne 's/.*<opRet>\([-0-9]*\)<\/opRet>/\1/p'
+}
+
+function check_brick()
+{
+ vol=$1;
+ num=$2
+ $CLI volume info $V0 | grep "Brick$num" | awk '{print $2}';
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+function get_brick_host_uuid()
+{
+ local vol=$1;
+ local uuid_regex='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
+ local host_uuid_list=$($CLI volume info $vol --xml | grep "brick.uuid" | grep -o -E "$uuid_regex");
+
+ echo $host_uuid_list | awk '{print $1}'
+}
+
+function generate_statedump_and_check_for_glusterd_info {
+ pid=`pidof glusterd`
+ #remove old stale statedumps
+ cleanup_statedump $pid
+ kill -USR1 $pid
+ #Wait till the statedump is generated
+ sleep 1
+ fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
+ cat $statedumpdir/$fname | grep "xlator.glusterd.priv" | wc -l
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+#bug-1238135-lazy-daemon-initialization-on-demand
+
+GDWD=$($CLI system getwd)
+
+# glusterd.info file will be created on either first peer probe or volume
+# creation, hence we expect file to be not present in this case
+TEST ! -e $GDWD/glusterd.info
+
+#bug-913487 - setting volume options before creation of volume should fail
+
+TEST ! $CLI volume set $V0 performance.open-behind off;
+TEST pidof glusterd;
+
+#bug-1433578 - glusterd should not crash after probing a invalid peer
+
+TEST ! $CLI peer probe invalid-peer
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+#bug-1786478 - default volume option after volume reset
+addr_family=`volinfo_field $V0 'transport.address-family'`
+TEST $CLI volume reset $V0
+EXPECT $addr_family volinfo_field $V0 'transport.address-family'
+
+#bug-955588 - uuid validation
+
+uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
+EXPECT $uuid get_brick_host_uuid $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+#bug-958790 - set options from file
+
+touch $GLUSTERD_WORKDIR/groups/test
+echo "read-ahead=off" > $GLUSTERD_WORKDIR/groups/test
+echo "open-behind=off" >> $GLUSTERD_WORKDIR/groups/test
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 group test
+EXPECT "off" volume_option $V0 performance.read-ahead
+EXPECT "off" volume_option $V0 performance.open-behind
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1321836 - validate opret value for non existing volume
+
+EXPECT 0 get_opret_value $V0
+EXPECT -1 get_opret_value "novol"
+
+EXPECT '2' brick_count $V0
+
+#bug-862834 - validate brick status
+
+EXPECT "$H0:$B0/${V0}1" check_brick $V0 '1';
+EXPECT "$H0:$B0/${V0}2" check_brick $V0 '2';
+
+TEST ! $CLI volume create $V1 $H0:$B0/${V1}0 $H0:$B0/${V0}1;
+
+#bug-1482344 - setting volume-option-at-cluster-level should not result in glusterd crash
+
+TEST ! $CLI volume set all transport.listen-backlog 128
+
+# Check the volume info output, if glusterd would have crashed then this command
+# will fail
+TEST $CLI volume info $V0;
+
+#bug-1002556 and bug-1199451 - command should retrieve current op-version of the node
+TEST $CLI volume get all cluster.op-version
+
+#bug-1315186 - reject-lowering-down-op-version
+
+OP_VERS_ORIG=$(grep 'operating-version' ${GDWD}/glusterd.info | cut -d '=' -f 2)
+OP_VERS_NEW=`expr $OP_VERS_ORIG-1`
+
+TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
+
+#bug-1022055 - validate log rotate command
+
+TEST ! $CLI volume log rotate $V0;
+TEST $CLI volume log $V0 rotate;
+
+#bug-1092841 - validating barrier enable/disable
+
+TEST $CLI volume barrier $V0 enable;
+TEST ! $CLI volume barrier $V0 enable;
+
+TEST $CLI volume barrier $V0 disable;
+TEST ! $CLI volume barrier $V0 disable;
+
+#bug-1095097 - validate volume profile command
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+
+#bug-839595 - validate server-quorum options
+
+TEST $CLI volume set $V0 cluster.server-quorum-type server
+EXPECT "server" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume set $V0 cluster.server-quorum-type none
+EXPECT "none" volume_option $V0 cluster.server-quorum-type
+TEST $CLI volume reset $V0 cluster.server-quorum-type
+TEST ! $CLI volume set $V0 cluster.server-quorum-type abc
+TEST ! $CLI volume set all cluster.server-quorum-type none
+TEST ! $CLI volume set $V0 cluster.server-quorum-ratio 100
+
+TEST ! $CLI volume set all cluster.server-quorum-ratio abc
+TEST ! $CLI volume set all cluster.server-quorum-ratio -1
+TEST ! $CLI volume set all cluster.server-quorum-ratio 100.0000005
+TEST $CLI volume set all cluster.server-quorum-ratio 0
+EXPECT "0" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100
+EXPECT "100" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 0.0000005
+EXPECT "0.0000005" volume_option $V0 cluster.server-quorum-ratio
+TEST $CLI volume set all cluster.server-quorum-ratio 100%
+EXPECT "100%" volume_option $V0 cluster.server-quorum-ratio
+
+#bug-1265479 - validate-distributed-volume-options
+
+#Setting data-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+TEST ! $CLI volume set $V0 cluster.data-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.data-self-heal';
+
+#Setting metadata-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+TEST ! $CLI volume set $V0 cluster.metadata-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.metadata-self-heal';
+
+#Setting entry-self-heal option on for distribute volume
+TEST ! $CLI volume set $V0 entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+TEST ! $CLI volume set $V0 cluster.entry-self-heal on
+EXPECT '' volinfo_field $V0 'cluster.entrydata-self-heal';
+
+#bug-1163108 - validate min-free-disk-option
+
+## Setting invalid value for option cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk 143.!/12
+TEST ! $CLI volume set $V0 min-free-disk 123%
+TEST ! $CLI volume set $V0 min-free-disk 194.34%
+
+## Setting fractional value as a size (unit is byte) for option
+## cluster.min-free-disk should fail
+TEST ! $CLI volume set $V0 min-free-disk 199.051
+TEST ! $CLI volume set $V0 min-free-disk 111.999
+
+## Setting valid value for option cluster.min-free-disk should pass
+TEST $CLI volume set $V0 min-free-disk 12%
+TEST $CLI volume set $V0 min-free-disk 56.7%
+TEST $CLI volume set $V0 min-free-disk 120
+TEST $CLI volume set $V0 min-free-disk 369.0000
+
+#bug-1179175-uss-option-validation
+
+## Set features.uss option with non-boolean value. These non-boolean value
+## for features.uss option should fail.
+TEST ! $CLI volume set $V0 features.uss abcd
+TEST ! $CLI volume set $V0 features.uss #$#$
+TEST ! $CLI volume set $V0 features.uss 2324
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+## Set features.uss option with valid boolean value. It should succeed.
+TEST $CLI volume set $V0 features.uss enable
+TEST $CLI volume set $V0 features.uss disable
+
+
+## Setting other options with valid value. These options should succeed.
+TEST $CLI volume set $V0 barrier enable
+TEST $CLI volume set $V0 ping-timeout 60
+
+#bug-1209329 - daemon-svcs-on-reset-volume
+
+##enable the bitrot and verify bitd is running or not
+TEST $CLI volume bitrot $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.bitrot'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+##Do reset force which set the bitrot options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_bitd_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Do reset force which set the uss options to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##verify initial nfs disabled by default
+EXPECT "0" get_nfs_count
+
+##enable nfs and verify
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+EXPECT "1" get_nfs_count
+
+##Do reset force which set the nfs.option to default
+TEST $CLI volume reset $V0 force;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+##enable the uss option and verify snapd is running or not
+TEST $CLI volume set $V0 features.uss on
+EXPECT 'on' volinfo_field $V0 'features.uss'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_snapd_count
+
+##Disable the uss option using set command and verify snapd
+TEST $CLI volume set $V0 features.uss off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_snapd_count
+
+##enable nfs.disable and verify
+TEST $CLI volume set $V0 nfs.disable on
+EXPECT 'on' volinfo_field $V0 'nfs.disable'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" get_nfs_count
+
+## disable nfs.disable option using set command
+TEST $CLI volume set $V0 nfs.disable off
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_nfs_count
+
+TEST $CLI volume info;
+TEST $CLI volume create $V1 $H0:$B0/${V1}1
+TEST $CLI volume start $V1
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+TEST $CLI volume status $V1
+
+#bug-853601 - Avoid using /var/lib/glusterd as a brick
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd force
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc
+TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc force
+mkdir -p /xyz/var/lib/glusterd/abc
+
+#bug 1716812 - volfile should be created with transport type both
+TEST $CLI volume create "test" transport tcp,rdma $H0:/xyz/var/lib/glusterd/abc
+EXPECT 'Created' volinfo_field "test" 'Status';
+
+#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
+#failure. So Adding a EXPECT_WITHIN
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info
+
+cleanup_statedump `pidof glusterd`
+cleanup
diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t
new file mode 100644
index 00000000000..3cc3351b43b
--- /dev/null
+++ b/tests/bugs/glusterd/quorum-validation.t
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+#bug-1177132 - sync server quorum options when a node is brought up
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
+
+#Bring down 2nd glusterd
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+#bug-1104642 - sync server quorum options when a node is brought up
+#set the volume all options from the 1st glusterd
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
+
+# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
+#(Command based on syncop framework)should fail
+TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
+TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
+TEST ! $CLI_1 volume set $V0 barrier enable
+
+#quorum is not met, rebalance/profile start should fail
+TEST ! $CLI_1 volume rebalance $V0 start
+TEST ! $CLI_1 volume profile $V0 start
+
+#bug-1690753 - Volume stop when quorum not met is successful
+TEST ! $CLI_1 volume stop $V0
+
+#Bring back the 2nd glusterd
+TEST $glusterd_2
+
+#verify whether the value has been synced
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_1 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT '1' peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT "80" volinfo_field_2 all cluster.server-quorum-ratio
+
+# Now quorum is meet.
+# Add-brick, Remove-brick, volume-set command should success
+TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
+TEST $CLI_1 volume set $V0 barrier enable
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+## Stop the volume
+TEST $CLI_1 volume stop $V0
+
+## Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+## Now quorum is not meet. Starting volume on 1st node should not success
+TEST ! $CLI_1 volume start $V0
+
+## Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+## Now quorum is meet. Starting volume on 1st node should be success.
+TEST $CLI_1 volume start $V0
+
+# Now re-execute the same profile command and this time it should succeed
+TEST $CLI_1 volume profile $V0 start
+
+#bug-1352277
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type none
+
+# Bring down all the gluster processes
+TEST killall_gluster
+
+#bring back 1st glusterd and check whether the brick process comes back
+TEST $glusterd_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+
+#enabling quorum should bring down the brick
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+
+#bug-1367478 - brick processes should not be up when quorum is not met
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}1 $H2:$B2/${V1}2
+TEST $CLI_1 volume start $V1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2
+
+# Restart 2nd glusterd
+TEST kill_glusterd 2
+TEST $glusterd_2
+
+# Check if all bricks are up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H1 $B1/${V1}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V1 $H2 $B2/${V1}2
+
+cleanup
diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t
new file mode 100644
index 00000000000..469ec6cd48e
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function rebalance_status_field_1 {
+ $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p
+}
+
+cleanup;
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#bug-1231437
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
+
+TEST $CLI_1 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+
+#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm
+TEST kill_glusterd 2
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0
+
+TEST start_glusterd 2
+#bug-1245142
+
+$CLI_1 volume rebalance $V0 start &
+#kill glusterd2 after requst sent, so that call back is called
+#with rpc->status fail ,so roughly 1sec delay is introduced to get this scenario.
+sleep 1
+kill_glusterd 2
+#check glusterd commands are working after rebalance start command
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/rebalance-operations-in-single-node.t b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
new file mode 100644
index 00000000000..ef85887f440
--- /dev/null
+++ b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
@@ -0,0 +1,131 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function get_rebalanced_info()
+{
+ local rebal_info_key=$2
+ $CLI volume rebalance $1 status | awk '{print $'$rebal_info_key'}' |sed -n 3p| sed 's/ *$//g'
+}
+
+volname="StartMigrationDuringRebalanceTest"
+TEST glusterd
+TEST pidof glusterd;
+
+TEST $CLI volume info;
+TEST $CLI volume create $volname $H0:$B0/${volname}{1..4};
+TEST $CLI volume start $volname;
+
+#bug-1046308 - validate rebalance on a specified volume name
+TEST $CLI volume rebalance $volname start;
+
+#bug-1089668 - validation of rebalance status and remove brick status
+#bug-963541 - after remove brick start rebalance/remove brick start without commiting should fail
+
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}1 status
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 start
+TEST ! $CLI volume rebalance $volname start
+TEST ! $CLI volume rebalance $volname status
+TEST ! $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field \
+"$volname" "$H0:$B0/${volname}1"
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}1 commit
+
+TEST $CLI volume rebalance $volname start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $volname
+TEST $CLI volume rebalance $volname stop
+
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 start
+TEST $CLI volume remove-brick $volname $H0:$B0/${volname}2 stop
+
+#bug-1351021-rebalance-info-post-glusterd-restart
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3};
+TEST $CLI volume start $V0;
+
+#Mount volume and create data
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST mkdir $M0/dir{1..10}
+TEST touch $M0/dir{1..10}/file{1..10}
+
+# Add-brick and start rebalance
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4
+TEST $CLI volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Rebalance info before glusterd restart
+OLD_REBAL_FILES=$(get_rebalanced_info $V0 2)
+OLD_SIZE=$(get_rebalanced_info $V0 3)
+OLD_SCANNED=$(get_rebalanced_info $V0 4)
+OLD_FAILURES=$(get_rebalanced_info $V0 5)
+OLD_SKIPPED=$(get_rebalanced_info $V0 6)
+
+
+pkill glusterd;
+pkill glusterfsd;
+TEST glusterd
+
+#Rebalance info after glusterd restart
+NEW_REBAL_FILES=$(get_rebalanced_info $V0 2)
+NEW_SIZE=$(get_rebalanced_info $V0 3)
+NEW_SCANNED=$(get_rebalanced_info $V0 4)
+NEW_FAILURES=$(get_rebalanced_info $V0 5)
+NEW_SKIPPED=$(get_rebalanced_info $V0 6)
+#Check rebalance info before and after glusterd restart
+TEST [ $OLD_REBAL_FILES == $NEW_REBAL_FILES ]
+TEST [ $OLD_SIZE == $NEW_SIZE ]
+TEST [ $OLD_SCANNED == $NEW_SCANNED ]
+TEST [ $OLD_FAILURES == $NEW_FAILURES ]
+TEST [ $OLD_SKIPPED == $NEW_SKIPPED ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-1004744 - validation of rebalance fix layout
+
+TEST $CLI volume start $V0 force
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+for i in `seq 11 20`;
+do
+ mkdir $M0/dir_$i
+ echo file>$M0/dir_$i/file_$i
+ for j in `seq 1 100`;
+ do
+ mkdir $M0/dir_$i/dir_$j
+ echo file>$M0/dir_$i/dir_$j/file_$j
+ done
+done
+
+#add 2 bricks
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{5,6};
+
+#perform rebalance fix-layout
+TEST $CLI volume rebalance $V0 fix-layout start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0;
+
+#bug-1075087 - rebalance post add brick
+TEST mkdir $M0/dir{21..30};
+TEST touch $M0/dir{21..30}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8}
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN 180 "completed" rebalance_status_field $V0
+
+TEST pkill gluster
+TEST glusterd
+TEST pidof glusterd
+
+# status should be "completed" immediate after glusterd has respawned.
+EXPECT_WITHIN 20 "completed" rebalance_status_field $V0
+
+cleanup
diff --git a/tests/bugs/glusterd/remove-brick-in-cluster.t b/tests/bugs/glusterd/remove-brick-in-cluster.t
new file mode 100644
index 00000000000..de94220a906
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-in-cluster.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2;
+
+#bug-1047955 - remove brick from new peer in cluster
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
+TEST $CLI_1 volume start $V0;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
+TEST $CLI_2 volume info
+
+#bug-964059 - volume status post remove brick start
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}0 $H2:$B2/${V1}1
+TEST $CLI_1 volume start $V1
+TEST $CLI_1 volume remove-brick $V1 $H2:$B2/${V1}1 start
+TEST $CLI_1 volume status
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+#bug-1230121 - decrease replica count by remove-brick and increse by add-brick
+## Creating a 2x3 replicate volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick2 \
+ $H1:$B1/brick3 $H2:$B2/brick4 \
+ $H1:$B1/brick5 $H2:$B2/brick6
+
+## Start the volume
+TEST $CLI_1 volume start $V0
+
+## Shrinking volume replica 2x3 to 2x2 by performing remove-brick operation.
+TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick1 $H2:$B2/brick6 force
+
+## Shrinking volume replica 2x2 to 1x2 by performing remove-brick operation
+TEST $CLI_1 volume remove-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
+
+## Shrinking volume replica from 1x2 to 1x1 by performing remove-brick operation
+TEST $CLI_1 volume remove-brick $V0 replica 1 $H1:$B1/brick5 force
+
+
+### Expanding volume replica by performing add-brick operation.
+
+## Expend volume replica from 1x1 to 1x2 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick5 force
+
+## Expend volume replica from 1x2 to 2x2 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/brick3 $H2:$B2/brick2 force
+
+## Expend volume replica from 2x2 to 2x3 by performing add-brick operation
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$B1/brick1 $H2:$B2/brick6 force
+
+cleanup
+
diff --git a/tests/bugs/glusterd/remove-brick-testcases.t b/tests/bugs/glusterd/remove-brick-testcases.t
new file mode 100644
index 00000000000..2f982d5266f
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-testcases.t
@@ -0,0 +1,119 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+function brick_count()
+{
+ local vol=$1;
+
+ $CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..5}
+TEST $CLI volume start $V0
+
+#bug-1225716 - remove-brick on a brick which is down should fail
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should fail as the brick is down
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+#remove-brick start should succeed as the brick is up
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}1"
+
+#kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+
+#remove-brick commit should pass even if the brick is down
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 commit
+
+#bug-1121584 - brick-existing-validation-for-remove-brick-status-stop
+## Start remove-brick operation on the volume
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
+
+## By giving non existing brick for remove-brick status/stop command should
+## give error.
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD status
+TEST ! $CLI volume remove-brick $V0 $H0:$B0/ABCD stop
+
+## By giving brick which is part of volume for remove-brick status/stop command
+## should print statistics of remove-brick operation or stop remove-brick
+## operation.
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 status
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
+
+#bug-878004 - validate remove brick force
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force;
+EXPECT '3' brick_count $V0
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 force;
+EXPECT '2' brick_count $V0
+
+#bug-1027171 - Do not allow commit if the bricks are not decommissioned
+#Remove bricks and commit without starting
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}4 commit 2>&1 |grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status;
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+
+#Create a 2X3 distributed-replicate volume
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+#Try to reduce replica count with start option
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "failed" remove_brick_start_status;
+
+#Remove bricks with commit option
+function remove_brick_commit_status2 {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}3 $H0:$B0/${V0}6 commit 2>&1 |
+ grep -oE "success|decommissioned"
+}
+EXPECT "decommissioned" remove_brick_commit_status2;
+TEST $CLI volume info $V0
+
+#bug-1040408 - reduce replica count of distributed replicate volume
+
+# Reduce to 2x2 volume by specifying bricks in reverse order
+function remove_brick_status {
+ $CLI volume remove-brick $V0 replica 2 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}3 force 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_status;
+TEST $CLI volume info $V0
+
+#bug-1120647 - remove brick validation
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} start
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}5"
+EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0 $H0:$B0/${V0}4"
+TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{4..5} commit
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}2 force
+
+cleanup
diff --git a/tests/bugs/glusterd/remove-brick-validation.t b/tests/bugs/glusterd/remove-brick-validation.t
new file mode 100644
index 00000000000..a0ff4ff6a24
--- /dev/null
+++ b/tests/bugs/glusterd/remove-brick-validation.t
@@ -0,0 +1,68 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+## start a 3 node virtual cluster
+TEST launch_cluster 3;
+
+## peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+#testcase: bug-1245045-remove-brick-validation
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+kill_glusterd 2
+
+#remove-brick should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+kill_glusterd 2
+
+#remove-brick commit should fail as the peer hosting the brick is down
+TEST ! $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} commit
+
+TEST $glusterd_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+
+#volume status should work
+TEST $CLI_2 volume status
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} stop
+
+kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0} start
+
+TEST start_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+TEST $CLI_3 volume status
+
+cleanup
diff --git a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
new file mode 100644
index 00000000000..00beab59137
--- /dev/null
+++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a 3X2 distributed-replicate volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..6};
+TEST $CLI volume start $V0
+
+#bug-974007 - remove multiple replica pairs in a single brick command
+# Mount FUSE and create files
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file{1..10}
+
+# Remove bricks from two sub-volumes to make it a 1x2 vol.
+# Bricks in question are given in a random order but from the same subvols.
+function remove_brick_start_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 start 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_start_status;
+
+# Wait for rebalance to complete
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5"
+
+# Check commit status
+function remove_brick_commit_status {
+ $CLI volume remove-brick $V0 \
+ $H0:$B0/${V0}6 $H0:$B0/${V0}1 \
+ $H0:$B0/${V0}2 $H0:$B0/${V0}5 commit 2>&1 |grep -oE "success|failed"
+}
+EXPECT "success" remove_brick_commit_status;
+
+
+# Check the volume type
+EXPECT "Replicate" echo `$CLI volume info |grep Type |awk '{print $2}'`
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#bug-961669 - remove brick start should fail when reducing the replica count
+
+#Create a 3x3 dist-rep volume
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
+TEST $CLI volume start $V1
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "9" brick_count ${V1}
+
+# Mount FUSE and create file/directory
+TEST glusterfs -s $H0 --volfile-id $V1 $M0
+TEST touch $M0/zerobytefile.txt
+TEST mkdir $M0/test_dir
+TEST dd if=/dev/zero of=$M0/file bs=1024 count=1024
+
+function remove_brick_start {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} start 2>&1|grep -oE 'success|failed'
+}
+
+function remove_brick {
+ $CLI volume remove-brick $V1 replica 2 $H0:$B0/${V1}{1,4,7} force 2>&1|grep -oE 'success|failed'
+}
+
+#remove-brick start variant
+#Actual message displayed at cli is:
+#"volume remove-brick start: failed: Rebalancing not needed when reducing replica count. Try without the 'start' option"
+EXPECT "failed" remove_brick_start;
+
+#remove-brick commit-force
+#Actual message displayed at cli is:
+#"volume remove-brick commit force: success"
+EXPECT "success" remove_brick
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup;
diff --git a/tests/bugs/glusterd/replace-brick-operations.t b/tests/bugs/glusterd/replace-brick-operations.t
new file mode 100644
index 00000000000..044aa3d6c6d
--- /dev/null
+++ b/tests/bugs/glusterd/replace-brick-operations.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+## Test case for BZ: 1094119 Remove replace-brick support from gluster
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+## Lets create and start volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0
+
+#bug-1094119-remove-replace-brick-support-from-glusterd
+
+## Now with this patch replace-brick only accept following commad
+## volume replace-brick <VOLNAME> <SOURCE-BRICK> <NEW-BRICK> {commit force}
+## Apart form this replace brick command will failed.
+
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 start
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 status
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 abort
+
+
+## replace-brick commit force command should success
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}2 $H0:$B0/${V0}3 commit force
+
+#bug-1242543-replace-brick validation
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Replace brick1 without killing
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1_new commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/${V0}1_new
+
+# Replace brick1 after killing the brick
+TEST $CLI volume replace-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_newer commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+cleanup;
diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
new file mode 100644
index 00000000000..e6e65c48456
--- /dev/null
+++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function shd_up_status_1 {
+ $CLI_1 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $7}'
+}
+
+function shd_up_status_2 {
+ $CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $7}'
+}
+
+function get_shd_pid_2 {
+ $CLI_2 volume status | grep "localhost" | grep "Self-heal Daemon" | awk '{print $8}'
+}
+
+cleanup;
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$B0/${V0} $H2:$B0/${V0}
+TEST $CLI_1 volume start $V0
+
+#testcase: bug-1507466 - validate reset-brick commit force
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} start
+# Now test if reset-brick commit force works
+TEST $CLI_1 volume reset-brick $V0 $H1:$B0/${V0} $H1:$B0/${V0} commit force
+
+#testcase: bug-1383893 - shd should not come up after restarting the peer glusterd
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B0/${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
+
+# Bring down shd on 2nd node
+kill -15 $(get_shd_pid_2)
+
+# Bring down glusterd on 1st node
+TEST kill_glusterd 1
+
+#Bring back 1st glusterd
+TEST $glusterd_1
+
+# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started
+#on node 2, because once glusterd regains quorum, it will restart all volume
+#level daemons
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
+
+cleanup;
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
new file mode 100644
index 00000000000..a871e112d87
--- /dev/null
+++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
@@ -0,0 +1,54 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+count=`$CLI_3 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
+echo $count
+}
+
+function check_shd {
+ps aux | grep $1 | grep glustershd | wc -l
+}
+
+cleanup
+
+
+TEST launch_cluster 6
+
+TESTS_EXPECTED_IN_LOOP=25
+for i in $(seq 2 6); do
+ hostname="H$i"
+ TEST $CLI_1 peer probe ${!hostname}
+done
+
+
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers;
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i
+ TEST $CLI_1 volume start ${V0}_$i force
+
+done
+
+#kill a node
+TEST kill_node 3
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3
+
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume stop ${V0}_$i
+ TEST $CLI_1 volume delete ${V0}_$i
+
+done
+
+for i in $(seq 1 6); do
+ hostname="H$i"
+ EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname}
+done
+cleanup
diff --git a/tests/bugs/glusterd/snapshot-operations.t b/tests/bugs/glusterd/snapshot-operations.t
new file mode 100644
index 00000000000..4705577d741
--- /dev/null
+++ b/tests/bugs/glusterd/snapshot-operations.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 3;
+TEST setup_lvm 3;
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#bug-1318591 - skip-non-directories-inside-vols
+
+b="B1"
+TEST touch ${!b}/glusterd/vols/file
+
+TEST $CLI_1 snapshot create snap1 $V0 no-timestamp;
+
+TEST touch ${!b}/glusterd/snaps/snap1/file
+
+#bug-1322145 - peer hosting snapshotted bricks should not be detachable
+
+kill_glusterd 2
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume replace-brick $V0 $H2:$L2 $H3:$L3 commit force
+
+# peer hosting snapshotted bricks should not be detachable
+TEST ! $CLI_1 peer detach $H2
+
+TEST killall_gluster
+TEST $glusterd_1
+TEST $glusterd_2
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/sync-post-glusterd-restart.t b/tests/bugs/glusterd/sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..de3dff715ab
--- /dev/null
+++ b/tests/bugs/glusterd/sync-post-glusterd-restart.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function volume_get_field()
+{
+ local vol=$1
+ local field=$2
+ $CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
+}
+
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 performance.readdir-ahead on
+
+# Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+##bug-1420637 and bug-1323287 - sync post glusterd restart
+
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
+TEST $CLI_1 volume set $V0 performance.readdir-ahead off
+TEST $CLI_1 volume set $V0 performance.write-behind off
+
+# Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+#bug-1420637-volume sync post glusterd restart
+
+EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
+
+#bug-1323287
+EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
+
+#bug-1213295 - volume stop should not crash glusterd post glusterd restart
+
+TEST $CLI_2 volume stop $V0
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-options-for-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
new file mode 100644
index 00000000000..ddc80b17870
--- /dev/null
+++ b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
@@ -0,0 +1,142 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#bug-1314649 - validate group virt
+TEST $CLI volume set $V0 group virt;
+
+#bug-765230 - remove-quota-related-option-after-disabling-quota
+## setting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+## enabling quota
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+## eetting soft-timeout as 20
+TEST $CLI volume set $V0 features.soft-timeout 20
+EXPECT '20' volinfo_field $V0 'features.soft-timeout';
+
+## enabling features.quota-deem-statfs
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+## disabling quota
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+EXPECT '' volinfo_field $V0 'features.soft-timeout'
+
+## setting soft-timeout as 30
+TEST $CLI volume set $V0 features.soft-timeout 30
+EXPECT '30' volinfo_field $V0 'features.soft-timeout';
+
+## disabling features.quota-deem-statfs
+TEST ! $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST ! $CLI volume set $V0 statedump-path ""
+TEST ! $CLI volume set $V0 statedump-path " "
+TEST $CLI volume set $V0 statedump-path "/home/"
+EXPECT "/home/" volume_option $V0 server.statedump-path
+
+TEST ! $CLI volume set $V0 background-self-heal-count ""
+TEST ! $CLI volume set $V0 background-self-heal-count " "
+TEST $CLI volume set $V0 background-self-heal-count 10
+EXPECT "10" volume_option $V0 cluster.background-self-heal-count
+
+TEST ! $CLI volume set $V0 io-cache-size ""
+TEST ! $CLI volume set $V0 io-cache-size " "
+TEST $CLI volume set $V0 io-cache-size 64MB
+EXPECT "64MB" volume_option $V0 performance.io-cache-size
+
+TEST ! $CLI volume set $V0 quick-read-cache-size ""
+TEST ! $CLI volume set $V0 quick-read-cache-size " "
+TEST $CLI volume set $V0 quick-read-cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.quick-read-cache-size
+
+TEST ! $CLI volume set $V0 self-heal-daemon ""
+TEST ! $CLI volume set $V0 self-heal-daemon " "
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT "on" volume_option $V0 cluster.self-heal-daemon
+
+TEST ! $CLI volume set $V0 read-subvolume ""
+TEST ! $CLI volume set $V0 read-subvolume " "
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+EXPECT "$V0-client-0" volume_option $V0 cluster.read-subvolume
+
+TEST ! $CLI volume set $V0 data-self-heal-algorithm ""
+TEST ! $CLI volume set $V0 data-self-heal-algorithm " "
+TEST ! $CLI volume set $V0 data-self-heal-algorithm on
+TEST $CLI volume set $V0 data-self-heal-algorithm full
+EXPECT "full" volume_option $V0 cluster.data-self-heal-algorithm
+
+TEST ! $CLI volume set $V0 min-free-inodes ""
+TEST ! $CLI volume set $V0 min-free-inodes " "
+TEST $CLI volume set $V0 min-free-inodes 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-inodes
+
+TEST ! $CLI volume set $V0 min-free-disk ""
+TEST ! $CLI volume set $V0 min-free-disk " "
+TEST $CLI volume set $V0 min-free-disk 60%
+EXPECT "60%" volume_option $V0 cluster.min-free-disk
+
+TEST $CLI volume set $V0 min-free-disk 120
+EXPECT "120" volume_option $V0 cluster.min-free-disk
+
+TEST ! $CLI volume set $V0 frame-timeout ""
+TEST ! $CLI volume set $V0 frame-timeout " "
+TEST $CLI volume set $V0 frame-timeout 0
+EXPECT "0" volume_option $V0 network.frame-timeout
+
+TEST ! $CLI volume set $V0 auth.allow ""
+TEST ! $CLI volume set $V0 auth.allow " "
+TEST $CLI volume set $V0 auth.allow 192.168.122.1
+EXPECT "192.168.122.1" volume_option $V0 auth.allow
+
+#bug-782095 - validate performance cache min/max size value
+
+## setting performance cache min size as 2MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 2MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## setting performance cache max size as 20MB
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## trying to set performance cache min size as 25MB
+TEST ! $CLI volume set $V0 performance.cache-min-file-size 25MB
+EXPECT '2MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## able to set performance cache min size as long as its lesser than max size
+TEST $CLI volume set $V0 performance.cache-min-file-size 15MB
+EXPECT '15MB' volinfo_field $V0 'performance.cache-min-file-size';
+
+## trying it out with only cache-max-file-size in CLI as 10MB
+TEST ! $CLI volume set $V0 cache-max-file-size 10MB
+EXPECT '20MB' volinfo_field $V0 'performance.cache-max-file-size';
+
+## finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup
diff --git a/tests/bugs/glusterd/validating-server-quorum.t b/tests/bugs/glusterd/validating-server-quorum.t
new file mode 100644
index 00000000000..ae7d83fd81c
--- /dev/null
+++ b/tests/bugs/glusterd/validating-server-quorum.t
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_fs {
+ df $1 &> /dev/null
+ echo $?
+}
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 3
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Lets create the volume
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/${V0}1 $H2:$B2/${V0}2 $H3:$B3/${V0}3
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+
+# Start the volume
+TEST $CLI_1 volume start $V0
+
+#bug-1345727 - bricks should be down when quorum is not met
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H3 $B3/${V0}3
+
+# Bring down glusterd on 2nd node
+TEST kill_glusterd 2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST kill_glusterd 3
+EXPECT_WITHIN $PROBE_TIMEOUT 0 peer_count
+
+# Server quorum is not met. Brick on 1st node must be down
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+
+# Set quorum ratio 95. means 95 % or more than 95% nodes of total available node
+# should be available for performing volume operation.
+# i.e. Server-side quorum is met if the number of nodes that are available is
+# greater than or equal to 'quorum-ratio' times the number of nodes in the
+# cluster
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 95
+
+#bug-1483058 - replace-brick should fail when quorum is not met
+TEST ! $CLI_1 volume replace-brick $V0 $H2:$B2/${V0}2 $H1:$B1/${V0}2_new commit force
+
+#Bring back 2nd glusterd
+TEST $glusterd_2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+# Server quorum is still not met. Bricks should be down on 1st and 2nd nodes
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2
+
+# Bring back 3rd glusterd
+TEST $glusterd_3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Server quorum is met now. Bricks should be up on all nodes
+# Check from 3rd instance of glusterd so that the 3rd node finishes all its
+# handshake and then report back the brick status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H3 $B3/${V0}3
+
+# Check from 1st instance of glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H2 $B2/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 3 $V0 $H3 $B3/${V0}3
+
+# TODO : Because commit fe71ee7 introduced a delay of 1 sec to wait for shd connect and
+# disconnect events to be serially processed during a restart of shd daemon,
+# this introduced a race where while releasing big lock, if any command sneaks
+# and acquires the big lock, it might be able to work on a volinfo which is
+# stale. We need to find a better way to fix this.
+
+sleep 3
+
+# quorum is met. replace-brick will execute successfully
+EXPECT_WITHIN $PEER_SYNC_TIMEOUT 0 attempt_replace_brick 1 $V0 $H2:$B2/${V0}2 $H2:$B2/${V0}2_new
+
+TEST $CLI_1 volume reset all
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H3 $B3/${V0}3
+
+
+#bug-913555 - volume should become unwritable when quorum does not met
+
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill one pseudo-node, make sure the others survive and volume stays up.
+TEST kill_node 3;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H2 $B2/${V0}2_new
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+# Kill another pseudo-node, make sure the last one dies and volume goes down.
+TEST kill_node 2;
+EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
+#two glusterfsds of the other two glusterds must be dead
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" cluster_brick_up_status 1 $V0 $H1 $B1/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
+
+cleanup
diff --git a/tests/bugs/glusterfs-server/bug-852147.t b/tests/bugs/glusterfs-server/bug-852147.t
new file mode 100755
index 00000000000..75db2a26e05
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-852147.t
@@ -0,0 +1,85 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+logdir=`gluster --print-logdir`"/bricks"
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+touch $M0/file1;
+
+TEST $CLI volume set $V0 performance.cache-max-file-size 20MB
+TEST $CLI volume set $V0 performance.cache-min-file-size 10MB
+
+EXPECT "20MB" volinfo_field $V0 'performance.cache-max-file-size';
+EXPECT "10MB" volinfo_field $V0 'performance.cache-min-file-size';
+
+#Performing volume reset and verifying.
+TEST $CLI volume reset $V0
+EXPECT "" volinfo_field $V0 'performance.cache-max-file-size';
+EXPECT "" volinfo_field $V0 'performance.cache-min-file-size';
+
+#Verifying vlolume-profile start, info and stop
+EXPECT "Starting volume profile on $V0 has been successful " $CLI volume profile $V0 start
+
+function vol_prof_info()
+{
+ $CLI volume profile $V0 info | grep Brick | wc -l
+}
+EXPECT "6" vol_prof_info
+
+EXPECT "Stopping volume profile on $V0 has been successful " $CLI volume profile $V0 stop
+
+function log-file-name()
+{
+ logfilename=$B0"/"$V0"1.log"
+ echo ${logfilename:1} | tr / -
+}
+
+function file-size()
+{
+ ls -lrt $1 | awk '{print $5}'
+}
+
+#Finding the current log file's size
+log_file=$logdir"/"`log-file-name`
+log_file_size=`file-size $log_file`
+
+#Removing the old backup log files
+ren_file=$log_file".*"
+rm -rf $ren_file
+
+#Initiating log rotate
+TEST $CLI volume log $V0 rotate
+
+#Capturing new log file's size
+new_file_size=`file-size $log_file`
+
+#Verifying the size of the new log file and the creation of the backup log file
+TEST ! [ $new_file_size -eq $log_file_size ]
+TEST ls -lrt $ren_file
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-861542.t b/tests/bugs/glusterfs-server/bug-861542.t
new file mode 100755
index 00000000000..60d1b132fb4
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-861542.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+# Distributed volume with a single brick was chosen solely for the ease of
+#implementing the test case (to be precise, for the ease of extracting the port number).
+TEST $CLI volume create $V0 $H0:$B0/brick0;
+
+TEST $CLI volume start $V0;
+
+function port_field()
+{
+ local vol=$1;
+ local opt=$2;
+ if [ $opt -eq '0' ]; then
+ $CLI volume status $vol | grep "brick0" | awk '{print $3}';
+ else
+ $CLI volume status $vol detail | grep "^TCP Port " | awk '{print $4}';
+ fi
+}
+
+function xml_port_field()
+{
+ local vol=$1;
+ local opt=$2;
+ $CLI --xml volume status $vol $opt | tr -d '\n' |\
+#Find the first occurrence of the string between <port> and </port>
+ sed -rn 's/<port>/&###/;s/<\/port>/###&/;s/^.*###(.*)###.*$/\1/p'
+}
+
+TEST $CLI volume status $V0;
+TEST $CLI volume status $V0 detail;
+TEST $CLI --xml volume status $V0;
+TEST $CLI --xml volume status $V0 detail;
+
+# Kill the brick process. After this, port number for the killed (in this case brick) process must be "N/A".
+kill `cat $GLUSTERD_PIDFILEDIR/vols/$V0/$H0-d-backends-brick0.pid`
+
+EXPECT "N/A" port_field $V0 '0'; # volume status
+EXPECT "N/A" port_field $V0 '1'; # volume status detail
+
+EXPECT "N/A" xml_port_field $V0 '';
+EXPECT "N/A" xml_port_field $V0 'detail';
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-864222.t b/tests/bugs/glusterfs-server/bug-864222.t
new file mode 100755
index 00000000000..01a7a4e3afd
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-864222.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+cd $N0
+
+TEST ls
+
+TEST $CLI volume set $V0 nfs.enable-ino32 on
+# Main test. This should pass.
+TEST ls
+
+cd
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+cleanup
+
diff --git a/tests/bugs/glusterfs-server/bug-873549.t b/tests/bugs/glusterfs-server/bug-873549.t
new file mode 100644
index 00000000000..8b5534728fd
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-873549.t
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd -LDEBUG;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+TEST $CLI volume set $V0 performance.quick-read-cache-size 512MB
+TEST $CLI volume start $V0
+TEST $CLI volume statedump $V0 all
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-877992.t b/tests/bugs/glusterfs-server/bug-877992.t
new file mode 100755
index 00000000000..300000bcf2c
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-877992.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd -LDEBUG
+TEST pidof glusterd
+
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+function hooks_prep ()
+{
+ local event=$1
+ touch /tmp/pre.out /tmp/post.out
+ touch $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ touch $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
+
+ printf "#! /bin/bash\necho "$event"Pre > /tmp/pre.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ printf "#! /bin/bash\necho "$event"Post > /tmp/post.out\n" > $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
+ chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ chmod a+x $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
+}
+
+function hooks_cleanup ()
+{
+ local event=$1
+ rm /tmp/pre.out /tmp/post.out
+ rm $GLUSTERD_WORKDIR/hooks/1/"$event"/pre/Spre.sh
+ rm $GLUSTERD_WORKDIR/hooks/1/"$event"/post/Spost.sh
+}
+
+## Verify volume is created and its hooks script ran
+hooks_prep 'create'
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'createPre' cat /tmp/pre.out;
+# Spost.sh comes after S10selinux-label-brick.sh under create post hook script
+# list. So consider the delay in setting SELinux context on bricks
+EXPECT_WITHIN 5 'createPost' cat /tmp/post.out;
+hooks_cleanup 'create'
+
+
+## Start volume and verify that its hooks script ran
+hooks_prep 'start'
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN 5 'startPre' cat /tmp/pre.out;
+EXPECT_WITHIN 5 'startPost' cat /tmp/post.out;
+hooks_cleanup 'start'
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-887145.t b/tests/bugs/glusterfs-server/bug-887145.t
new file mode 100755
index 00000000000..db2cf3c050b
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-887145.t
@@ -0,0 +1,99 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 performance.open-behind off;
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+
+useradd tmp_user 2>/dev/null 1>/dev/null;
+mkdir $M0/dir;
+mkdir $M0/other;
+cp /etc/passwd $M0/;
+cp $M0/passwd $M0/file;
+chmod 600 $M0/file;
+
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+grep nfsnobody /etc/passwd > /dev/null
+if [ $? -eq 1 ]; then
+usr=nobody
+grp=nobody
+else
+usr=nfsnobody
+grp=nfsnobody
+fi
+chown -R $usr:$grp $M0/dir;
+chown -R tmp_user:tmp_user $M0/other;
+
+TEST $CLI volume set $V0 server.root-squash on;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+# create files and directories in the root of the glusterfs and nfs mount
+# which is owned by root and hence the right behavior is getting EACCESS
+# as the fops are executed as nfsnobody/nobody.
+touch $M0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+touch $N0/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+cp $M0/file $M0/tmp_file 2>/dev/null;
+TEST [ $? -ne 0 ]
+cp $N0/file $N0/tmp_file 2>/dev/null;
+TEST [ $? -ne 0 ]
+cat $M0/file 2>/dev/null;
+TEST [ $? -ne 0 ]
+# here read should be allowed because eventhough file "passwd" is owned
+# by root, the permissions if the file allow other users to read it.
+cat $M0/passwd 1>/dev/null;
+TEST [ $? -eq 0 ]
+cat $N0/passwd 1>/dev/null;
+TEST [ $? -eq 0 ]
+
+# create files and directories should succeed as the fops are being executed
+# inside the directory owned by nfsnobody/nobody
+TEST touch $M0/dir/file;
+TEST touch $N0/dir/foo;
+TEST mkdir $M0/dir/new;
+TEST mkdir $N0/dir/other;
+TEST rm -f $M0/dir/file $M0/dir/foo;
+TEST rmdir $N0/dir/*;
+
+# create files and directories here should fail as other directory is owned
+# by tmp_user.
+touch $M0/other/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+touch $N0/other/foo 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/other/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $N0/other/new 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+userdel tmp_user;
+rm -rf /home/tmp_user;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-889996.t b/tests/bugs/glusterfs-server/bug-889996.t
new file mode 100644
index 00000000000..d7d25c42933
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-889996.t
@@ -0,0 +1,19 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+rm -rf $B0/${V0}1;
+
+TEST ! $CLI volume start $V0;
+EXPECT 0 online_brick_count;
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-904300.t b/tests/bugs/glusterfs-server/bug-904300.t
new file mode 100755
index 00000000000..95d5d381c8b
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-904300.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+# 1-8
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0;
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs $H0:/$V0 $N0 nolock
+TEST mkdir $N0/dir1
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+#
+# Case 1: Allow "dir1" to be mounted only from 127.0.0.1
+# 9-12
+TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.1)"\"
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0/dir1 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+#
+# Case 2: Allow "dir1" to be mounted only from 8.8.8.8. This is
+# a negative test case therefore the mount should fail.
+# 13-16
+TEST $CLI volume set $V0 export-dir \""/dir1(8.8.8.8)"\"
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
+
+TEST ! mount_nfs $H0:/$V0/dir1 $N0 nolock
+TEST ! umount $N0
+
+
+# Case 3: Variation of test case1. Here we are checking with hostname
+# instead of ip address.
+# 17-20
+TEST $CLI volume set $V0 export-dir \""/dir1($H0)"\"
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
+
+TEST mount_nfs $H0:/$V0/dir1 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# Case 4: Variation of test case1. Here we are checking with IP range
+# 21-24
+TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.0/24)"\"
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0/dir1 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+## Finish up
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs-server/bug-905864.c b/tests/bugs/glusterfs-server/bug-905864.c
new file mode 100644
index 00000000000..f70003736e7
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-905864.c
@@ -0,0 +1,83 @@
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+
+pthread_t th[5] = {0};
+void
+flock_init(struct flock *f, short int type, off_t start, off_t len)
+{
+ f->l_type = type;
+ f->l_start = start;
+ f->l_len = len;
+}
+
+int
+flock_range_in_steps(int fd, int is_set, short l_type, int start, int end,
+ int step)
+{
+ int ret = 0;
+ int i = 0;
+ struct flock f = {
+ 0,
+ };
+
+ for (i = start; i + step < end; i += step) {
+ flock_init(&f, l_type, i, step);
+ ret = fcntl(fd, (is_set) ? F_SETLKW : F_GETLK, &f);
+ if (ret) {
+ perror("fcntl");
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+void *
+random_locker(void *arg)
+{
+ int fd = *(int *)arg;
+ int i = 0;
+ int is_set = 0;
+
+ /* use thread id to choose GETLK or SETLK operation*/
+ is_set = pthread_self() % 2;
+ (void)flock_range_in_steps(fd, is_set, F_WRLCK, 0, 400, 1);
+
+ return NULL;
+}
+
+int
+main(int argc, char **argv)
+{
+ int fd = -1;
+ int ret = 1;
+ int i = 0;
+ char *fname = NULL;
+
+ if (argc < 2)
+ goto out;
+
+ fname = argv[1];
+ fd = open(fname, O_RDWR);
+ if (fd == -1) {
+ perror("open");
+ goto out;
+ }
+
+ ret = flock_range_in_steps(fd, 1, F_WRLCK, 0, 2000, 2);
+ for (i = 0; i < 5; i++) {
+ pthread_create(&th[i], NULL, random_locker, (void *)&fd);
+ }
+ ret = flock_range_in_steps(fd, 1, F_WRLCK, 0, 2000, 2);
+ for (i = 0; i < 5; i++) {
+ pthread_join(th[i], NULL);
+ }
+out:
+ if (fd != -1)
+ close(fd);
+
+ return ret;
+}
diff --git a/tests/bugs/glusterfs-server/bug-905864.t b/tests/bugs/glusterfs-server/bug-905864.t
new file mode 100644
index 00000000000..44923a85333
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-905864.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1;
+
+TEST touch $M0/file1;
+
+#following C program tries open up race(s) if any, in F_GETLK/F_SETLKW codepaths
+#of locks xlator
+TEST $CC -pthread -g3 $(dirname $0)/bug-905864.c -o $(dirname $0)/bug-905864
+
+$(dirname $0)/bug-905864 $M0/file1 &
+$(dirname $0)/bug-905864 $M1/file1;
+wait
+
+TEST rm -f $(dirname $0)/bug-905864
+EXPECT $(brick_count $V0) online_brick_count
+
+cleanup
diff --git a/tests/bugs/glusterfs-server/bug-912297.t b/tests/bugs/glusterfs-server/bug-912297.t
new file mode 100755
index 00000000000..08f5dcea9b9
--- /dev/null
+++ b/tests/bugs/glusterfs-server/bug-912297.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting owner-uid as -12
+TEST ! $CLI volume set $V0 owner-uid -12
+EXPECT '' volinfo_field $V0 'storage.owner-uid'
+
+## Setting owner-gid as -5
+TEST ! $CLI volume set $V0 owner-gid -5
+EXPECT '' volinfo_field $V0 'storage.owner-gid'
+
+## Setting owner-uid as 36
+TEST $CLI volume set $V0 owner-uid 36
+EXPECT '36' volinfo_field $V0 'storage.owner-uid'
+
+## Setting owner-gid as 36
+TEST $CLI volume set $V0 owner-gid 36
+EXPECT '36' volinfo_field $V0 'storage.owner-gid'
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-1482528.t b/tests/bugs/glusterfs/bug-1482528.t
new file mode 100644
index 00000000000..3adf260bdcd
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-1482528.t
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2}
+TEST $CLI volume start $V0
+
+# Mount FUSE without selinux:
+TEST glusterfs -s $H0 --volfile-id $V0 $@ $M0
+
+TEST touch $M0/default.txt
+EXPECT "644" stat -c %a $M0/default.txt
+
+TEST chmod 0444 $M0/default.txt
+EXPECT "444" stat -c %a $M0/default.txt
+
+TEST mkdir $M0/default
+EXPECT "755" stat -c %a $M0/default
+
+TEST chmod 0444 $M0/default
+EXPECT "444" stat -c %a $M0/default
+
+TEST mkfifo $M0/mkfifo
+EXPECT "644" stat -c %a $M0/mkfifo
+
+TEST mknod $M0/dmknod b 4 5
+EXPECT "644" stat -c %a $M0/dmknod
+
+#Set the create-directory-mask and create-mask options
+TEST $CLI volume set $V0 storage.create-directory-mask 0444
+TEST $CLI volume set $V0 storage.create-mask 0444
+
+TEST mkdir $M0/create-directory
+EXPECT "444" stat -c %a $M0/create-directory
+
+TEST touch $M0/create-mask.txt
+EXPECT "444" stat -c %a $M0/create-mask.txt
+
+TEST chmod 0777 $M0/create-mask.txt
+EXPECT "444" stat -c %a $M0/create-mask.txt
+
+TEST chmod 0400 $M0/create-mask.txt
+EXPECT "400" stat -c %a $M0/create-mask.txt
+
+TEST chmod 0777 $M0/create-directory
+EXPECT "444" stat -c %a $M0/create-directory
+
+TEST chmod 0400 $M0/create-directory
+EXPECT "400" stat -c %a $M0/create-directory
+
+TEST mkfifo $M0/cfifo
+EXPECT "444" stat -c %a $M0/cfifo
+
+TEST chmod 0777 $M0/cfifo
+EXPECT "444" stat -c %a $M0/cfifo
+
+TEST mknod $M0/cmknod b 4 5
+EXPECT "444" stat -c %a $M0/cmknod
+
+#set force-create-mode and force-directory-mode options
+TEST $CLI volume set $V0 storage.force-create-mode 0777
+TEST $CLI volume set $V0 storage.force-directory-mode 0333
+
+TEST touch $M0/force-create-mode.txt
+EXPECT "777" stat -c %a $M0/force-create-mode.txt
+
+TEST mkdir $M0/force-directory
+EXPECT "777" stat -c %a $M0/force-directory
+
+TEST chmod 0222 $M0/force-create-mode.txt
+EXPECT "777" stat -c %a $M0/force-create-mode.txt
+
+TEST chmod 0222 $M0/force-directory
+EXPECT "333" stat -c %a $M0/force-directory
+
+TEST mkdir $M0/link
+TEST ln -s $M0/force-create-mode.txt $M0/link
+EXPECT "777" stat -c %a $M0/link/force-create-mode.txt
+
+TEST ln $M0/force-create-mode.txt $M0/link/fc.txt
+EXPECT "777" stat -c %a $M0/link/fc.txt
+
+TEST setfacl -m o:r $M0/force-create-mode.txt
+EXPECT "777" stat -c %a $M0/force-create-mode.txt
+
+TEST ln -s $M0/force-directory $M0/link
+EXPECT "777" stat -c %a $M0/link/force-directory
+
+TEST mkfifo $M0/ffifo
+EXPECT "777" stat -c %a $M0/ffifo
+
+TEST mknod $M0/mknod b 4 5
+EXPECT "777" stat -c %a $M0/mknod
diff --git a/tests/bugs/glusterfs/bug-811493.t b/tests/bugs/glusterfs/bug-811493.t
new file mode 100755
index 00000000000..98f7c121a02
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-811493.t
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI system uuid reset;
+
+uuid1=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "=");
+
+TEST $CLI system uuid reset;
+uuid2=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f 2 -d "=");
+
+TEST [ $uuid1 != $uuid2 ]
+
+cleanup
diff --git a/tests/bugs/glusterfs/bug-844688.t b/tests/bugs/glusterfs/bug-844688.t
new file mode 100755
index 00000000000..65f41b342a5
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-844688.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function check_callstack_log {
+ local text=$1
+ statedump_file=$(generate_mount_statedump $V0);
+ grep $text $statedump_file 2>/dev/null 1>/dev/null;
+ if [ $? -eq 0 ]; then
+ echo "1";
+ else
+ echo "0";
+ fi;
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick0
+TEST $CLI volume start $V0
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+mount_pid=$(get_mount_process_pid $V0);
+# enable dumping of call stack creation and frame creation times in statedump
+# monitoring is enabled by default
+
+# We want to make sure that there is a pending frame in gluster stack.
+# For that we are creating a blocking lock scenario.
+
+TEST touch $M0/lockfile;
+# Open two fd's on the same file
+exec 8>$M0/lockfile;
+exec 9>$M0/lockfile;
+
+# First flock will succeed and the second one will block, hence the background run.
+flock -x 8 ;
+flock -x 9 &
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" check_callstack_log "callstack-creation-time";
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" check_callstack_log "frame-creation-time";
+
+flock -u 8
+flock -u 9;
+
+# Closing the fd's
+exec 8>&-
+exec 9>&-
+
+TEST rm -f $M0/lockfile;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+rm -f $statedumpdir/glusterdump.$mount_pid.*;
+cleanup
diff --git a/tests/bugs/glusterfs/bug-848251.t b/tests/bugs/glusterfs/bug-848251.t
new file mode 100644
index 00000000000..69ffe680f7f
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-848251.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+
+TEST $CLI volume start $V0;
+
+#enable quota
+TEST $CLI volume quota $V0 enable;
+
+#mount on a random dir
+TEST MOUNTDIR="/tmp/$RANDOM"
+TEST mkdir $MOUNTDIR
+TEST glusterfs -s $H0 --volfile-id=$V0 $MOUNTDIR
+
+function set_quota(){
+ mkdir "$MOUNTDIR/$name"
+ $CLI volume quota $V0 limit-usage /$name 50KB
+}
+
+function quota_list(){
+ $CLI volume quota $V0 list | grep -- /$name | awk '{print $3}'
+}
+
+TEST name=":d1"
+#file name containing ':' in the start
+TEST set_quota
+EXPECT "80%" quota_list
+
+TEST name=":d1/d:1"
+#file name containing ':' in between
+TEST set_quota
+EXPECT "80%" quota_list
+
+TEST name=":d1/d:1/d1:"
+#file name containing ':' in the end
+TEST set_quota
+EXPECT "80%" quota_list
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+TEST rm -rf $MOUNTDIR
+TEST $CLI volume stop $V0
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-853690.t b/tests/bugs/glusterfs/bug-853690.t
new file mode 100755
index 00000000000..59facfcddb0
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-853690.t
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+# Bug 853690 - Test that short writes do not lead to corruption.
+#
+# Mismanagement of short writes in AFR leads to corruption and immediately
+# detectable split-brain. Write a file to a replica volume using error-gen
+# to cause short writes on one replica.
+#
+# Short writes are also possible during heal. If ignored, the files are marked
+# consistent and silently differ. After reading the file, cause a lookup, wait
+# for self-heal and verify that the afr xattrs do not match.
+#
+########
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST mkdir -p $B0/test{1,2}
+
+# Our graph is a two brick replica with 100% frequency of short writes on one
+# side of the replica. This guarantees a single write fop leads to an out-of-sync
+# situation.
+cat > $B0/test.vol <<EOF
+volume test-posix-0
+ type storage/posix
+ option directory $B0/test1
+end-volume
+
+volume test-error-0
+ type debug/error-gen
+ option failure 100
+ option enable writev
+ option error-no GF_ERROR_SHORT_WRITE
+ subvolumes test-posix-0
+end-volume
+
+volume test-locks-0
+ type features/locks
+ subvolumes test-error-0
+end-volume
+
+volume test-posix-1
+ type storage/posix
+ option directory $B0/test2
+end-volume
+
+volume test-locks-1
+ type features/locks
+ subvolumes test-posix-1
+end-volume
+
+volume test-replicate-0
+ type cluster/replicate
+ option background-self-heal-count 0
+ subvolumes test-locks-0 test-locks-1
+end-volume
+EOF
+
+TEST glusterd
+
+TEST glusterfs --volfile=$B0/test.vol --attribute-timeout=0 --entry-timeout=0 $M0
+
+# Send a single write, guaranteed to be short on one replica, and attempt to
+# read the data back. Failure to detect the short write results in different
+# file sizes and immediate split-brain (EIO).
+TEST dd if=/dev/urandom of=$M0/file bs=128k count=1
+TEST dd if=$M0/file of=/dev/null bs=128k count=1
+########
+#
+# Test self-heal with short writes...
+#
+########
+
+# Cause a lookup and wait a few seconds for posterity. This self-heal also fails
+# due to a short write.
+TEST ls $M0/file
+# Verify the attributes on the healthy replica do not reflect consistency with
+# the other replica.
+xa=`getfattr -n trusted.afr.test-locks-0 -e hex $B0/test2/file 2>&1 | grep = | cut -f2 -d=`
+EXPECT_NOT 0x000000000000000000000000 echo $xa
+
+TEST rm -f $M0/file
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+rm -f $B0/test.vol
+rm -rf $B0/test1 $B0/test2
+
+cleanup;
+
diff --git a/tests/bugs/glusterfs/bug-856455.t b/tests/bugs/glusterfs/bug-856455.t
new file mode 100644
index 00000000000..d02b39bda8e
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-856455.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+BRICK_COUNT=3
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST $GFS -s $H0 --volfile-id $V0 $M0;
+
+function query_pathinfo()
+{
+ local path=$1;
+ local retval;
+
+ local pathinfo=$(getfattr -n trusted.glusterfs.pathinfo $path);
+ retval=$(echo $pathinfo | grep -o 'POSIX' | wc -l);
+ echo $retval
+}
+
+TEST touch $M0/f00f;
+TEST mkdir $M0/f00d;
+
+# verify pathinfo for a file and directory
+EXPECT 1 query_pathinfo $M0/f00f;
+EXPECT $BRICK_COUNT query_pathinfo $M0/f00d;
+
+# Kill a brick process and then query for pathinfo
+# for directories pathinfo should list backend patch from available (up) subvolumes
+
+kill_brick $V0 $H0 ${B0}/${V0}1
+
+EXPECT `expr $BRICK_COUNT - 1` query_pathinfo $M0/f00d;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-860297.t b/tests/bugs/glusterfs/bug-860297.t
new file mode 100644
index 00000000000..c2d21553f68
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-860297.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+TEST $CLI volume create $V0 $H0:$B0/brick1
+setfattr -x trusted.glusterfs.volume-id $B0/brick1
+## If Extended attribute trusted.glusterfs.volume-id is not present
+## then volume should not be able to start
+TEST ! $CLI volume start $V0;
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-861015-index.t b/tests/bugs/glusterfs/bug-861015-index.t
new file mode 100644
index 00000000000..74ffc45bf2e
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-861015-index.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}4
+cd $M0
+HEAL_FILES=0
+for i in {1..10}
+do
+ echo "abc" > $i
+ HEAL_FILES=$(($HEAL_FILES+1))
+done
+HEAL_FILES=$(($HEAL_FILES+3)) #count brick root distribute-subvol num of times
+
+cd ~
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
+TEST rm -f $M0/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume heal $V0 info
+#Only root dir should be present now in the indices
+EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}1
+EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}3
+EXPECT "1" afr_get_num_indices_in_brick $B0/${V0}5
+cleanup
diff --git a/tests/bugs/glusterfs/bug-861015-log.t b/tests/bugs/glusterfs/bug-861015-log.t
new file mode 100644
index 00000000000..2f3e0ad14f4
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-861015-log.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+log_wd=$(gluster --print-logdir)
+TEST glusterd
+TEST pidof glusterd
+rm -f $log_wd/glustershd.log
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+cd $M0
+for i in {1..10}
+do
+ dd if=/dev/urandom of=f bs=1024k count=10 2>/dev/null
+done
+
+cd ~
+TEST $CLI volume heal $V0 info
+function count_inode_link_failures {
+ logfile=$1
+ grep "inode link failed on the inode" $logfile | wc -l
+}
+EXPECT "0" count_inode_link_failures $log_wd/glustershd.log
+cleanup
diff --git a/tests/bugs/glusterfs/bug-866459.t b/tests/bugs/glusterfs/bug-866459.t
new file mode 100644
index 00000000000..a6c767b4b0e
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-866459.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Create and start a volume with aio enabled
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 linux-aio on
+TEST $CLI volume set $V0 background-self-heal-count 0
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+dd of=$M0/a if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+## Bring a brick down
+TEST kill_brick $V0 $H0 $B0/${V0}1
+## Rewrite the file
+dd of=$M0/a if=/dev/urandom bs=1024k count=1 2>&1 > /dev/null
+TEST $CLI volume start $V0 force
+## Wait for the brick to give CHILD_UP in client protocol
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+md5offile2=`md5sum $B0/${V0}2/a | awk '{print $1}'`
+
+##trigger self-heal
+ls -l $M0/a
+
+EXPECT "$md5offile2" echo `md5sum $B0/${V0}1/a | awk '{print $1}'`
+
+## Finish up
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-867253.t b/tests/bugs/glusterfs/bug-867253.t
new file mode 100644
index 00000000000..8c3c39baace
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-867253.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+function file_count()
+{
+ val=1
+
+ if [ "$1" == "0" ]
+ then
+ if [ "$2" == "0" ]
+ then
+ val=0
+ fi
+ fi
+ echo $val
+}
+
+BRICK_COUNT=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+## Mount nfs, with nocache option
+TEST mount_nfs $H0:/$V0 $M0 nolock,noac;
+
+touch $M0/files{1..1000};
+
+# Kill a brick process
+kill_brick $V0 $H0 $B0/${V0}0
+
+drop_cache $M0
+
+ls -l $M0 >/dev/null;
+
+NEW_FILE_COUNT=`echo $?`;
+
+TEST $CLI volume start $V0 force
+
+# Kill a brick process
+kill_brick $V0 $H0 $B0/${V0}1
+
+drop_cache $M0
+
+ls -l $M0 >/dev/null;
+
+NEW_FILE_COUNT1=`echo $?`;
+
+EXPECT "0" file_count $NEW_FILE_COUNT $NEW_FILE_COUNT1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup
diff --git a/tests/bugs/glusterfs/bug-869724.t b/tests/bugs/glusterfs/bug-869724.t
new file mode 100644
index 00000000000..ca5bb17081c
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-869724.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1;
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+## Make volume tightly consistent for metdata
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+touch $M0/test;
+build_tester $(dirname $0)/getlk_owner.c
+
+TEST $(dirname $0)/getlk_owner $M0/test;
+
+rm -f $(dirname $0)/getlk_owner
+cleanup;
+
diff --git a/tests/bugs/glusterfs/bug-872923.t b/tests/bugs/glusterfs/bug-872923.t
new file mode 100755
index 00000000000..00e02c89cbe
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-872923.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+
+cd $N0
+mkdir test_hardlink_self_heal;
+cd test_hardlink_self_heal;
+
+for i in `seq 1 5`;
+do
+ mkdir dir.$i;
+ for j in `seq 1 10`;
+ do
+ dd if=/dev/zero of=dir.$i/file.$j bs=1k count=$j > /dev/null 2>&1;
+ done;
+done;
+
+cd ..
+TEST kill_brick $V0 $H0 $B0/brick0
+cd test_hardlink_self_heal;
+
+RET=0
+for i in `seq 1 5`;
+do
+ for j in `seq 1 10`;
+ do
+ ln dir.$i/file.$j dir.$i/link_file.$j > /dev/null 2>&1;
+ RET=$?
+ if [ $RET -ne 0 ]; then
+ break;
+ fi
+ done ;
+ if [ $RET -ne 0 ]; then
+ break;
+ fi
+done;
+
+cd
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+EXPECT "0" echo $RET;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-873962-spb.t b/tests/bugs/glusterfs/bug-873962-spb.t
new file mode 100644
index 00000000000..db71cc0f6fe
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-873962-spb.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+touch $M0/a
+
+exec 5<$M0/a
+
+kill_brick $V0 $H0 $B0/${V0}0
+echo "hi" > $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+kill_brick $V0 $H0 $B0/${V0}1
+echo "bye" > $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST ! cat $M0/a #To mark split-brain
+
+TEST ! read -u 5 line
+exec 5<&-
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-873962.t b/tests/bugs/glusterfs/bug-873962.t
new file mode 100755
index 00000000000..7faa9998159
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-873962.t
@@ -0,0 +1,107 @@
+#!/bin/bash
+
+#AFR TEST-IDENTIFIER SPLIT-BRAIN
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+
+# If we allow self-heal to happen in the background, we'll get spurious
+# failures - especially at the point labeled "FAIL HERE" but
+# occasionally elsewhere. This behavior is very timing-dependent. It
+# doesn't show up in Jenkins, but it does on JD's and KP's machines, and
+# it got sharply worse because of an unrelated fsync change (6ae6f3d)
+# which changed timing. Putting anything at the FAIL HERE marker tends
+# to make it go away most of the time on affected machines, even if the
+# "anything" is unrelated.
+#
+# What's going on is that the I/O on the first mountpoint is allowed to
+# complete even though self-heal is still in progress and the state on
+# disk does not reflect its result. In fact, the state changes during
+# self-heal create the appearance of split brain when the second I/O
+# comes in, so that fails even though we haven't actually been in split
+# brain since the manual xattr operations. By disallowing background
+# self-heal, we ensure that the second I/O can't happen before self-heal
+# is complete, because it has to follow the first I/O which now has to
+# follow self-heal.
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+#Make sure self-heal is not triggered when the bricks are re-started
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+TEST touch $M0/a
+TEST touch $M0/b
+TEST touch $M0/c
+TEST touch $M0/d
+echo "1" > $M0/b
+echo "1" > $M0/d
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "1" > $M0/a
+echo "1" > $M0/c
+TEST setfattr -n trusted.mdata -v abc $M0/b
+TEST setfattr -n trusted.mdata -v abc $M0/d
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+echo "2" > $M0/a
+echo "2" > $M0/c
+TEST setfattr -n trusted.mdata -v def $M0/b
+TEST setfattr -n trusted.mdata -v def $M0/d
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable
+
+#Files are in split-brain, so open should fail
+TEST ! cat $M0/a;
+TEST ! cat $M1/a;
+TEST ! cat $M0/b;
+TEST ! cat $M1/b;
+
+#Reset split-brain status
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/a;
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/b;
+
+#The operations should do self-heal and give correct output
+EXPECT "2" cat $M0/a;
+# FAIL HERE - see comment about cluster.self-heal-background-count above.
+EXPECT "2" cat $M1/a;
+TEST dd if=$M0/b of=/dev/null bs=1024k
+EXPECT "def" getfattr -n trusted.mdata --only-values $M0/b 2>/dev/null
+EXPECT "def" getfattr -n trusted.mdata --only-values $M1/b 2>/dev/null
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable
+
+#Files are in split-brain, so open should fail
+TEST ! cat $M0/c
+TEST ! cat $M1/c
+TEST ! cat $M0/d
+TEST ! cat $M1/d
+
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/c
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/d
+
+#The operations should NOT do self-heal but give correct output
+EXPECT "2" cat $M0/c
+EXPECT "2" cat $M1/c
+EXPECT "1" cat $M0/d
+EXPECT "1" cat $M1/d
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-879490.t b/tests/bugs/glusterfs/bug-879490.t
new file mode 100755
index 00000000000..fb8d4263919
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-879490.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+function peer_probe()
+{
+ $CLI peer probe a.b.c.d --xml | xmllint --format - | grep "<opErrstr>"
+}
+
+EXPECT " <opErrstr>Probe returned with Transport endpoint is not connected</opErrstr>" peer_probe
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-879494.t b/tests/bugs/glusterfs/bug-879494.t
new file mode 100755
index 00000000000..12ee466b33a
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-879494.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+function peer_probe()
+{
+ $CLI peer detach a.b.c.d --xml | xmllint --format - | grep "<opErrstr>"
+}
+
+EXPECT " <opErrstr>a.b.c.d is not part of cluster</opErrstr>" peer_probe
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-892730.t b/tests/bugs/glusterfs/bug-892730.t
new file mode 100755
index 00000000000..a76961134c5
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-892730.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+#
+# Bug 892730 - Verify that afr handles EIO errors from the brick properly.
+#
+# The associated bug describes a problem where EIO errors returned from the
+# local filesystem of a brick that is part of a replica volume are exposed to
+# the user. This test simulates such failures and verifies that the volume
+# operates as expected.
+#
+########
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST mkdir -p $B0/test{1,2}
+
+# The graph is a two brick replica with error-gen enabled on the second brick
+# and configured to return EIO lookup errors 100% of the time. This simulates
+# a brick with a crashed or shut down local filesystem. Note that the order in
+# which errors occur is a factor in reproducing the original bug (error-gen
+# must be enabled in the second brick for this test to be effective).
+
+cat > $B0/test.vol <<EOF
+volume test-posix-0
+ type storage/posix
+ option directory $B0/test1
+end-volume
+
+volume test-locks-0
+ type features/locks
+ subvolumes test-posix-0
+end-volume
+
+volume test-posix-1
+ type storage/posix
+ option directory $B0/test2
+end-volume
+
+volume test-error-1
+ type debug/error-gen
+ option failure 100
+ option enable lookup
+ option error-no EIO
+ subvolumes test-posix-1
+end-volume
+
+volume test-locks-1
+ type features/locks
+ subvolumes test-error-1
+end-volume
+
+volume test-replicate-0
+ type cluster/replicate
+ option background-self-heal-count 0
+ subvolumes test-locks-0 test-locks-1
+end-volume
+EOF
+
+TEST glusterd
+
+TEST glusterfs --volfile=$B0/test.vol --attribute-timeout=0 --entry-timeout=0 $M0
+
+# We should be able to create and remove a file without interference from the
+# "broken" brick.
+
+TEST touch $M0/file
+TEST rm $M0/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+rm -f $B0/test.vol
+rm -rf $B0/test1 $B0/test2
+
+cleanup;
+
diff --git a/tests/bugs/glusterfs/bug-893338.t b/tests/bugs/glusterfs/bug-893338.t
new file mode 100644
index 00000000000..b915d3e791e
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-893338.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+## Test for symlink success
+TEST touch $M0/reg_file
+TEST ln -s $M0/reg_file $M0/symlink
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-893378.t b/tests/bugs/glusterfs/bug-893378.t
new file mode 100755
index 00000000000..444dafb44bc
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-893378.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+BRICK_COUNT=3
+
+function file_has_linkfile()
+{
+ i=0
+ j=0
+ while [ $i -lt $BRICK_COUNT ]
+ do
+ stat=`stat $B0/${V0}$i/$1 2>/dev/null`
+ if [ $? -eq 0 ]
+ then
+ let j++
+ let "BRICK${j}=$i"
+
+ fi
+ let i++
+ done
+ return $j
+}
+
+function get_cached_brick()
+{
+ i=1
+ while [ $i -lt 3 ]
+ do
+ test=`getfattr -n trusted.glusterfs.dht.linkto -e text $B0/${V0}$BRICK$i 2>&1`
+ if [ $? -eq 1 ]
+ then
+ cached=$BRICK"$i"
+ i=$(( $i+3 ))
+ fi
+ let i++
+ done
+
+ return $cached
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+## create a linkfile on subvolume 0
+TEST touch $M0/1
+TEST mv $M0/1 $M0/2
+
+file_has_linkfile 2
+has_link=$?
+if [ $has_link -eq 2 ]
+then
+ get_cached_brick
+ CACHED=$?
+ # Kill a brick process
+ kill_brick $V0 $H0 $B0/${V0}$CACHED
+fi
+
+## trigger a lookup
+ls -l $M0/2 2>/dev/null
+
+## fail dd if file exists.
+
+dd if=/dev/zero of=$M0/2 bs=1 count=1 conv=excl 2>/dev/null
+EXPECT "1" echo $?
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-895235.t b/tests/bugs/glusterfs/bug-895235.t
new file mode 100644
index 00000000000..ac9caae9561
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-895235.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+
+TEST gluster volume profile $V0 start
+TEST dd of=$M0/a if=/dev/zero bs=1024k count=1 oflag=append
+finodelk_max_latency=$($CLI volume profile $V0 info | grep FINODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+
+TEST [ -z $finodelk_max_latency ]
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-896431.t b/tests/bugs/glusterfs/bug-896431.t
new file mode 100755
index 00000000000..61f71141713
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-896431.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting cluster.subvols-per-directory as -5
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory -5
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory -5
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 0
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 0
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 0
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 4 (the total number of bricks)
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 4
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 4
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 2 (the total number of subvolumes)
+TEST $CLI volume set $V0 cluster.subvols-per-directory 2
+EXPECT '2' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 1
+TEST $CLI volume set $V0 subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+
+## Start and create a pure replicate volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 8 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT 'Replicate' volinfo_field $V0 'Type';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Setting cluster.subvols-per-directory as 8 for a replicate volume
+TEST ! $CLI volume set $V0 cluster.subvols-per-directory 8
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST ! $CLI volume set $V0 subvols-per-directory 8
+EXPECT '' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Setting cluster.subvols-per-directory as 1 for a replicate volume
+TEST $CLI volume set $V0 cluster.subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+TEST $CLI volume set $V0 subvols-per-directory 1
+EXPECT '1' volinfo_field $V0 'cluster.subvols-per-directory';
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-902610.t b/tests/bugs/glusterfs/bug-902610.t
new file mode 100755
index 00000000000..112c947e116
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-902610.t
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Layout-spread set to 3, but subvols up are 2. So layout should split 50-50
+function get_layout()
+{
+ layout1=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1|grep dht |cut -d = -f2`
+ layout1_s=$(echo $layout1 | cut -c 19-26)
+ layout1_e=$(echo $layout1 | cut -c 27-34)
+ #echo "layout1 from $layout1_s to $layout1_e" > /dev/tty
+ layout2=`getfattr -n trusted.glusterfs.dht -e hex $2 2>&1|grep dht |cut -d = -f2`
+ layout2_s=$(echo $layout2 | cut -c 19-26)
+ layout2_e=$(echo $layout2 | cut -c 27-34)
+ #echo "layout2 from $layout2_s to $layout2_e" > /dev/tty
+
+ if [ x"$layout2_s" = x"00000000" ]; then
+ # Reverse so we only have the real logic in one place.
+ tmp_s=$layout1_s
+ tmp_e=$layout1_e
+ layout1_s=$layout2_s
+ layout1_e=$layout2_e
+ layout2_s=$tmp_s
+ layout2_e=$tmp_e
+ fi
+
+ # Figure out where the join point is.
+ target=$( $PYTHON -c "print('%08x' % (0x$layout1_e + 1))")
+ #echo "target for layout2 = $target" > /dev/tty
+
+ # The second layout should cover everything that the first doesn't.
+ if [ x"$layout2_s" = x"$target" -a x"$layout2_e" = x"ffffffff" ]; then
+ return 0
+ fi
+
+ return 1
+}
+
+BRICK_COUNT=4
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+## set subvols-per-dir option
+TEST $CLI volume set $V0 subvols-per-directory 3
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0 --entry-timeout=0 --attribute-timeout=0;
+
+TEST ls -l $M0
+
+## kill 2 bricks to bring down available subvol < spread count
+kill_brick $V0 $H0 $B0/${V0}2
+kill_brick $V0 $H0 $B0/${V0}3
+
+mkdir $M0/dir1 2>/dev/null
+
+get_layout $B0/${V0}0/dir1 $B0/${V0}1/dir1
+EXPECT "0" echo $?
+
+cleanup;
diff --git a/tests/bugs/glusterfs/bug-906646.t b/tests/bugs/glusterfs/bug-906646.t
new file mode 100644
index 00000000000..37b8fe5c8eb
--- /dev/null
+++ b/tests/bugs/glusterfs/bug-906646.t
@@ -0,0 +1,97 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+REPLICA=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}-00 $H0:$B0/${V0}-01 $H0:$B0/${V0}-10 $H0:$B0/${V0}-11
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+## Mount FUSE with caching disabled
+TEST $GFS -s $H0 --volfile-id $V0 $M0;
+
+function xattr_query_check()
+{
+ local path=$1
+ local xa_name=$2
+
+ local ret=$(getfattr -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l)
+ echo $ret
+}
+
+function set_xattr()
+{
+ local path=$1
+ local xa_name=$2
+ local xa_val=$3
+
+ setfattr -n $xa_name -v $xa_val $path
+ echo $?
+}
+
+function remove_xattr()
+{
+ local path=$1
+ local xa_name=$2
+
+ setfattr -x $xa_name $path
+ echo $?
+}
+
+f=f00f
+pth=$M0/$f
+
+TEST touch $pth
+
+# fetch backend paths
+backend_paths=`get_backend_paths $pth`
+
+# convert it into and array
+backend_paths_array=($backend_paths)
+
+# setxattr xattr for this file
+EXPECT 0 set_xattr $pth "trusted.name" "test"
+
+# confirm the set on backend
+EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name"
+EXPECT 0 xattr_query_check ${backend_paths_array[1]} "trusted.name"
+
+brick_path=`echo ${backend_paths_array[0]} | sed -n 's/\(.*\)\/'$f'/\1/p'`
+brick_id=`$CLI volume info $V0 | grep "Brick[[:digit:]]" | grep -n $brick_path | cut -f1 -d:`
+
+# Kill a brick process
+TEST kill_brick $V0 $H0 $brick_path
+
+# remove the xattr from the mount point
+EXPECT 0 remove_xattr $pth "trusted.name"
+
+# we killed ${backend_paths[0]} - so expect the xattr to be there
+# on the backend there
+EXPECT 0 xattr_query_check ${backend_paths_array[0]} "trusted.name"
+EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name"
+
+# restart the brick process
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+# check backends - xattr should not be present anywhere
+EXPECT 1 xattr_query_check ${backend_paths_array[0]} "trusted.name"
+EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name"
+
+cleanup;
diff --git a/tests/bugs/glusterfs/getlk_owner.c b/tests/bugs/glusterfs/getlk_owner.c
new file mode 100644
index 00000000000..cbe277318c1
--- /dev/null
+++ b/tests/bugs/glusterfs/getlk_owner.c
@@ -0,0 +1,124 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+
+#define GETLK_OWNER_CHECK(f, cp, label) \
+ do { \
+ switch (f.l_type) { \
+ case F_RDLCK: \
+ case F_WRLCK: \
+ ret = 1; \
+ goto label; \
+ case F_UNLCK: \
+ if (!are_flocks_sane(&f, &cp)) { \
+ ret = 1; \
+ goto label; \
+ } \
+ break; \
+ } \
+ } while (0)
+
+void
+flock_init(struct flock *f, short int type, off_t start, off_t len)
+{
+ f->l_type = type;
+ f->l_start = start;
+ f->l_len = len;
+}
+
+int
+flock_cp(struct flock *dst, struct flock *src)
+{
+ memcpy((void *)dst, (void *)src, sizeof(struct flock));
+}
+
+int
+are_flocks_sane(struct flock *src, struct flock *cpy)
+{
+ return ((src->l_whence == cpy->l_whence) &&
+ (src->l_start == cpy->l_start) && (src->l_len == cpy->l_len));
+}
+
+/*
+ * Test description:
+ * SETLK (0,3), F_WRLCK
+ * SETLK (3,3), F_WRLCK
+ *
+ * the following GETLK requests must return flock struct unmodified
+ * except for l_type to F_UNLCK
+ * GETLK (3,3), F_WRLCK
+ * GETLK (3,3), F_RDLCK
+ *
+ * */
+
+int
+main(int argc, char **argv)
+{
+ int fd = -1;
+ int ret = 1;
+ char *fname = NULL;
+ struct flock f = {
+ 0,
+ };
+ struct flock cp = {
+ 0,
+ };
+
+ if (argc < 2)
+ goto out;
+
+ fname = argv[1];
+ fd = open(fname, O_RDWR);
+ if (fd == -1) {
+ perror("open");
+ goto out;
+ }
+
+ flock_init(&f, F_WRLCK, 0, 3);
+ flock_cp(&cp, &f);
+ ret = fcntl(fd, F_SETLK, &f);
+ if (ret) {
+ perror("fcntl");
+ goto out;
+ }
+ if (!are_flocks_sane(&f, &cp)) {
+ ret = 1;
+ goto out;
+ }
+
+ flock_init(&f, F_WRLCK, 3, 3);
+ flock_cp(&cp, &f);
+ ret = fcntl(fd, F_SETLK, &f);
+ if (ret) {
+ perror("fcntl");
+ goto out;
+ }
+ if (!are_flocks_sane(&f, &cp)) {
+ ret = 1;
+ goto out;
+ }
+
+ flock_init(&f, F_WRLCK, 3, 3);
+ flock_cp(&cp, &f);
+ ret = fcntl(fd, F_GETLK, &f);
+ if (ret) {
+ perror("fcntl");
+ return 1;
+ }
+ GETLK_OWNER_CHECK(f, cp, out);
+
+ flock_init(&f, F_RDLCK, 3, 3);
+ flock_cp(&cp, &f);
+ ret = fcntl(fd, F_GETLK, &f);
+ if (ret) {
+ perror("fcntl");
+ return 1;
+ }
+ GETLK_OWNER_CHECK(f, cp, out);
+
+out:
+ if (fd != -1)
+ close(fd);
+ return ret;
+}
diff --git a/tests/bugs/heal-symlinks.t b/tests/bugs/heal-symlinks.t
new file mode 100644
index 00000000000..ecd2b525be1
--- /dev/null
+++ b/tests/bugs/heal-symlinks.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../afr.rc
+cleanup;
+
+###############################################################################
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+cd $M0
+TEST "echo hello_world > FILE"
+TEST ln -s FILE SOFTLINK
+
+# Remove symlink only (not the .glusterfs entry) and trigger named heal.
+TEST rm -f $B0/${V0}2/SOFTLINK
+TEST stat SOFTLINK
+
+# To heal and clear new-entry mark on source bricks.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT 2 stat -c %h $B0/${V0}2/SOFTLINK
+EXPECT "hello_world" cat $B0/${V0}2/SOFTLINK
+
+cd -
+cleanup
+###############################################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+cd $M0
+TEST "echo hello_world > FILE"
+TEST ln -s FILE SOFTLINK
+
+# Remove symlink only (not the .glusterfs entry) and trigger named heal.
+TEST rm -f $B0/${V0}2/SOFTLINK
+TEST stat SOFTLINK
+
+# To heal and clear new-entry mark on source bricks.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT 2 stat -c %h $B0/${V0}2/SOFTLINK
+TEST kill_brick $V0 $H0 $B0/${V0}0
+cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+cd $M0
+EXPECT "hello_world" cat SOFTLINK
+
+cd -
+cleanup
+###############################################################################
diff --git a/tests/bugs/index/bug-1559004-EMLINK-handling.t b/tests/bugs/index/bug-1559004-EMLINK-handling.t
new file mode 100644
index 00000000000..5596fa56c4c
--- /dev/null
+++ b/tests/bugs/index/bug-1559004-EMLINK-handling.t
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function create_fake_links() {
+ local dst="$1"
+ local dir="$2"
+ local end=0
+ local start=0
+ local src
+
+ src="$(ls ${dst}/.glusterfs/indices/${dir}/${dir}-* | head -1)"
+ mkdir -p ${dst}/.glusterfs/dummy/${dir}
+ while ln ${src} ${dst}/.glusterfs/dummy/${dir}/link-${end}; do
+ end="$((${end} + 1))"
+ done
+
+ if [[ ${end} -gt 50 ]]; then
+ start="$((${end} - 50))"
+ fi
+ if [[ ${end} -gt 0 ]]; then
+ end="$((${end} - 1))"
+ fi
+
+ for i in $(seq ${start} ${end}); do
+ rm -f ${dst}/.glusterfs/dummy/${dir}/link-${i}
+ done
+}
+
+function count_fake_links() {
+ local dst="$1"
+ local dir="$2"
+
+ echo "$(find ${dst}/.glusterfs/dummy/${dir}/ -name "link-*" | wc -l)"
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST touch $B0/ext4-1
+TEST touch $B0/ext4-2
+TEST touch $B0/ext4-3
+TEST truncate -s 2GB $B0/ext4-1
+TEST truncate -s 2GB $B0/ext4-2
+TEST truncate -s 2GB $B0/ext4-3
+
+TEST mkfs.ext4 -F $B0/ext4-1
+TEST mkfs.ext4 -F $B0/ext4-2
+TEST mkfs.ext4 -F $B0/ext4-3
+TEST mkdir $B0/ext41
+TEST mkdir $B0/ext42
+TEST mkdir $B0/ext43
+TEST mount -t ext4 -o loop $B0/ext4-1 $B0/ext41
+TEST mount -t ext4 -o loop $B0/ext4-2 $B0/ext42
+TEST mount -t ext4 -o loop $B0/ext4-3 $B0/ext43
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/ext4{1,2,3}
+TEST $CLI volume start $V0
+TEST $CLI volume heal $V0 granular-entry-heal enable
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST kill_brick $V0 $H0 $B0/ext41
+
+# Make sure indices exist and are initialized
+TEST touch $M0/dummy
+
+# Create enough hard links on bricks to make it fail faster. This is much
+# faster than creating ~70000 files on a volume.
+create_fake_links $B0/ext42 xattrop &
+create_fake_links $B0/ext42 entry-changes &
+wait
+count_xattrop="$(count_fake_links $B0/ext42 xattrop)"
+count_entry="$(count_fake_links $B0/ext42 entry-changes)"
+
+TEST mkdir $M0/d{1..10}
+TEST touch $M0/d{1..10}/{1..10}
+
+#On ext4 max number of hardlinks is ~65k, so there should be 2 base index files
+EXPECT "^2$" echo $(ls $B0/ext42/.glusterfs/indices/xattrop | grep xattrop | wc -l)
+EXPECT "^2$" echo $(ls $B0/ext42/.glusterfs/indices/entry-changes | grep entry-changes | wc -l)
+
+#Number of hardlinks: count_xattrop/count_entry for fake links, 101 for files,
+# 10 for dirs and 2 for base-indices and root-dir for xattrop
+EXPECT "$((${count_xattrop} + 114))" echo $(ls -l $B0/ext42/.glusterfs/indices/xattrop | grep xattrop | awk '{sum+=$2} END{print sum}')
+EXPECT "$((${count_entry} + 113))" echo $(ls -l $B0/ext42/.glusterfs/indices/entry-changes | grep entry-changes | awk '{sum+=$2} END{print sum}')
+
+cleanup
diff --git a/tests/bugs/io-cache/bug-858242.c b/tests/bugs/io-cache/bug-858242.c
new file mode 100644
index 00000000000..ac87a15533e
--- /dev/null
+++ b/tests/bugs/io-cache/bug-858242.c
@@ -0,0 +1,84 @@
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+int
+main(int argc, char *argv[])
+{
+ char *filename = NULL, *volname = NULL, *cmd = NULL;
+ char buffer[1024] = {
+ 0,
+ };
+ int fd = -1;
+ int ret = -1;
+ struct stat statbuf = {
+ 0,
+ };
+
+ if (argc != 3) {
+ fprintf(stderr, "usage: %s <file-name> <volname>\n", argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+ volname = argv[2];
+
+ fd = open(filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf(stderr, "open (%s) failed (%s)\n", filename, strerror(errno));
+ goto out;
+ }
+
+ ret = write(fd, "test-content", 12);
+ if (ret < 0) {
+ fprintf(stderr, "write failed (%s)", strerror(errno));
+ goto out;
+ }
+
+ ret = fsync(fd);
+ if (ret < 0) {
+ fprintf(stderr, "fsync failed (%s)", strerror(errno));
+ goto out;
+ }
+
+ ret = fstat(fd, &statbuf);
+ if (ret < 0) {
+ fprintf(stderr, "fstat failed (%s)", strerror(errno));
+ goto out;
+ }
+
+ ret = asprintf(&cmd, "gluster --mode=script volume stop %s force", volname);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct cli command string (%s)",
+ strerror(errno));
+ goto out;
+ }
+
+ ret = system(cmd);
+ if (ret < 0) {
+ fprintf(stderr, "stopping volume (%s) failed", volname);
+ goto out;
+ }
+
+ sleep(3);
+
+ ret = read(fd, buffer, 1024);
+ if (ret >= 0) {
+ fprintf(stderr,
+ "read should've returned error, "
+ "but is successful\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/tests/bugs/io-cache/bug-858242.t b/tests/bugs/io-cache/bug-858242.t
new file mode 100755
index 00000000000..0c8ffb6ab30
--- /dev/null
+++ b/tests/bugs/io-cache/bug-858242.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 performance.quick-read off
+
+#mount on a random dir
+TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=yes
+
+build_tester $(dirname $0)/bug-858242.c
+
+TEST $(dirname $0)/bug-858242 $M0/testfile $V0
+
+TEST rm -rf $(dirname $0)/858242
+cleanup;
+
diff --git a/tests/bugs/io-cache/bug-read-hang.c b/tests/bugs/io-cache/bug-read-hang.c
new file mode 100644
index 00000000000..e1fae97e7e8
--- /dev/null
+++ b/tests/bugs/io-cache/bug-read-hang.c
@@ -0,0 +1,125 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_INIT 1
+
+int count = 0;
+void
+read_cbk(glfs_fd_t *fd, ssize_t ret, void *data)
+{
+ count++;
+}
+
+glfs_t *
+setup_new_client(char *hostname, char *volname, char *log_file, int flag)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "\nglfs_new: returned NULL (%s)\n", strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+ if (flag == NO_INIT)
+ goto out;
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ struct glfs_fd *fd = NULL;
+ char *volname = NULL;
+ char *log_file = NULL;
+ char *hostname = NULL;
+ char *buf = NULL;
+ struct stat stat;
+
+ if (argc != 4) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file location>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ log_file = argv[3];
+
+ fs = setup_new_client(hostname, volname, log_file, 0);
+ if (!fs) {
+ fprintf(stderr, "\nsetup_new_client: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ fd = glfs_opendir(fs, "/");
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ glfs_readdirplus(fd, &stat);
+
+ fd = glfs_open(fs, "/test", O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto error;
+ }
+
+ buf = (char *)malloc(5);
+
+ ret = glfs_pread(fd, buf, 5, 0, 0, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "Read(%s): %d (%s)\n", "test", ret, strerror(errno));
+ return ret;
+ }
+
+ free(buf);
+ glfs_close(fd);
+
+ ret = glfs_fini(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ return 0;
+error:
+ return -1;
+}
diff --git a/tests/bugs/io-cache/bug-read-hang.t b/tests/bugs/io-cache/bug-read-hang.t
new file mode 100755
index 00000000000..f8efe281723
--- /dev/null
+++ b/tests/bugs/io-cache/bug-read-hang.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+#. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..2};
+
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 performance.cache-invalidation on
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.cache-samba-metadata on
+TEST $CLI volume set $V0 open-behind off
+
+logdir=`gluster --print-logdir`
+
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+echo "Hello" > $M0/test
+
+TEST build_tester $(dirname $0)/bug-read-hang.c -lgfapi
+TEST $(dirname $0)/bug-read-hang $H0 $V0 $logdir/bug-read-hang.log
+
+cleanup_tester $(dirname $0)/bug-read-hang
+
+cleanup;
diff --git a/tests/bugs/io-stats/bug-1598548.t b/tests/bugs/io-stats/bug-1598548.t
new file mode 100755
index 00000000000..19b0c053d08
--- /dev/null
+++ b/tests/bugs/io-stats/bug-1598548.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+checkdumpthread () {
+ local brick_pid=$(get_brick_pid $1 $2 $3)
+ local thread_count=$(gstack $brick_pid | grep -c _ios_dump_thread)
+ echo $thread_count
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume start $V0
+
+TEST $CLI volume profile $V0 start
+EXPECT 0 checkdumpthread $V0 $H0 $B0/${V0}0
+
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 3
+EXPECT 1 checkdumpthread $V0 $H0 $B0/${V0}0
+
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 10
+EXPECT 1 checkdumpthread $V0 $H0 $B0/${V0}0
+
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 0
+EXPECT 0 checkdumpthread $V0 $H0 $B0/${V0}0
+
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 7
+EXPECT 1 checkdumpthread $V0 $H0 $B0/${V0}0
+
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 0
+EXPECT 0 checkdumpthread $V0 $H0 $B0/${V0}0
+
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 11
+EXPECT 1 checkdumpthread $V0 $H0 $B0/${V0}0
+
+cleanup;
diff --git a/tests/bugs/logging/bug-823081.t b/tests/bugs/logging/bug-823081.t
new file mode 100755
index 00000000000..bd1965d2d49
--- /dev/null
+++ b/tests/bugs/logging/bug-823081.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+cmd_log_history="cmd_history.log"
+V1=patchy2
+
+TEST glusterd
+TEST pidof glusterd
+
+logdir=`gluster --print-logdir`
+function set_tail ()
+{
+ vol=$1;
+ tail_success="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : SUCCESS"
+ tail_failure="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 : FAILED : Volume $vol already exists"
+ tail_success_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : SUCCESS"
+ tail_failure_force="volume create $vol $H0:$B0/${vol}1 $H0:$B0/${vol}2 force : FAILED : Volume $vol already exists"
+}
+
+set_tail $V0;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
+TEST [[ \"$tail\" == \"$tail_success\" ]]
+
+TEST ! $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
+TEST [[ \"$tail\" == \"$tail_failure\" ]]
+
+set_tail $V1;
+TEST gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
+TEST [[ \"$tail\" == \"$tail_success_force\" ]]
+
+TEST ! gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
+TEST [[ \"$tail\" == \"$tail_failure_force\" ]]
+
+cleanup;
diff --git a/tests/bugs/md-cache/afr-stale-read.t b/tests/bugs/md-cache/afr-stale-read.t
new file mode 100755
index 00000000000..7cee5afe27e
--- /dev/null
+++ b/tests/bugs/md-cache/afr-stale-read.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+#. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..2};
+
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 performance.cache-invalidation on
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.cache-samba-metadata on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 read-subvolume $V0-client-0
+TEST $CLI volume set $V0 performance.quick-read off
+
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+#Write some data from M0 and read it from M1,
+#so that M1 selects a read subvol, and caches the lookup
+TEST `echo "one" > $M0/file1`
+EXPECT "one" cat $M1/file1
+
+#Fail few writes from M0 on brick-0, as a result of this failure
+#upcall in brick-0 will invalidate the read subvolume of M1.
+TEST chattr +i $B0/${V0}1/file1
+TEST `echo "two" > $M0/file1`
+TEST `echo "three" > $M0/file1`
+TEST `echo "four" > $M0/file1`
+TEST `echo "five" > $M0/file1`
+
+EXPECT_WITHIN $MDC_TIMEOUT "five" cat $M1/file1
+TEST chattr -i $B0/${V0}1/file1
+cleanup;
diff --git a/tests/bugs/md-cache/bug-1211863.t b/tests/bugs/md-cache/bug-1211863.t
new file mode 100755
index 00000000000..ba9bde9fee8
--- /dev/null
+++ b/tests/bugs/md-cache/bug-1211863.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## 1. Start glusterd
+TEST glusterd;
+
+## 2. Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+## 3. Start the volume
+TEST $CLI volume start $V0
+
+## 4. Enable the upcall xlator, and increase the md-cache timeout to max
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.xattr-cache-list "user.*"
+
+## 6. Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+## 8. Create a file
+TEST touch $M0/file1
+
+## 9. Setxattr from mount-0
+TEST "setfattr -n user.DOSATTRIB -v "abc" $M0/file1"
+## 10. Getxattr from mount-1, this should return the correct value as it is a fresh getxattr
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q abc"
+
+## 11. Now modify the same xattr from mount-0 again
+TEST "setfattr -n user.DOSATTRIB -v "xyz" $M0/file1"
+## 12. Since the xattr is already cached in mount-1 it returns the old xattr
+ #value, until the timeout (600)
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q abc"
+
+## 13. Unmount to clean all the cache
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 performance.cache-invalidation on
+
+## 19. Restart the volume to restart the bick process
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+
+## 21. Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+## 23. Repeat the tests 11-14, but this time since cache invalidation is on,
+ #the getxattr will reflect the new value
+TEST "setfattr -n user.DOSATTRIB -v "abc" $M0/file1"
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q abc"
+TEST "setfattr -n user.DOSATTRIB -v "xyz" $M0/file1"
+sleep 2; #There can be a very very small window where the next getxattr
+ #reaches md-cache, before the cache-invalidation caused by previous
+ #setxattr, reaches md-cache. Hence sleeping for 2 sec.
+ #Also it should not be > 600.
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q xyz"
+
+TEST $CLI volume set $V0 cache-samba-metadata off
+EXPECT 'off' volinfo_field $V0 'performance.cache-samba-metadata'
+
+cleanup;
diff --git a/tests/bugs/md-cache/bug-1211863_unlink.t b/tests/bugs/md-cache/bug-1211863_unlink.t
new file mode 100755
index 00000000000..34392ed919f
--- /dev/null
+++ b/tests/bugs/md-cache/bug-1211863_unlink.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+## Start the volume
+TEST $CLI volume start $V0
+
+## Enable the upcall xlator, and increase the md-cache timeout to max
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 performance.cache-invalidation on
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.cache-samba-metadata on
+
+## Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+## Create files and directories from M0
+TEST mkdir $M0/dir1
+TEST touch $M0/dir1/file{1..5}
+
+## Lookup few files from M1, so that md-cache cahces
+TEST ls -l $M1/dir1/file2
+TEST ls -l $M1/dir1/file3
+
+## Remove the looked up file from M0
+TEST rm $M0/dir1/file2
+TEST mv $M0/dir1/file3 $M0/dir1/file6
+
+## Check if the files are not visible from both M0 and M1
+EXPECT_WITHIN $MDC_TIMEOUT "N" path_exists $M0/dir1/file2
+EXPECT_WITHIN $MDC_TIMEOUT "N" path_exists $M0/dir1/file3
+EXPECT_WITHIN $MDC_TIMEOUT "Y" path_exists $M0/dir1/file6
+
+EXPECT_WITHIN $MDC_TIMEOUT "N" path_exists $M1/dir1/file2
+EXPECT_WITHIN $MDC_TIMEOUT "N" path_exists $M1/dir1/file3
+EXPECT_WITHIN $MDC_TIMEOUT "Y" path_exists $M1/dir1/file6
+
+cleanup;
diff --git a/tests/bugs/md-cache/bug-1476324.t b/tests/bugs/md-cache/bug-1476324.t
new file mode 100644
index 00000000000..c34f412a15e
--- /dev/null
+++ b/tests/bugs/md-cache/bug-1476324.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.cache-samba-metadata on
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST touch $M0/file1
+
+TEST "setfattr -n user.DOSATTRIB -v 0sAAOW $M0/file1"
+TEST "getfattr -n user.DOSATTRIB $M0/file1 -e base64 | grep -q 0sAAOW"
+
+TEST "setfattr -n user.DOSATTRIB -v 0x00ff $M0/file1"
+TEST "getfattr -n user.DOSATTRIB $M0/file1 -e hex | grep -q 0x00ff"
+
+cleanup;
diff --git a/tests/bugs/md-cache/bug-1632503.t b/tests/bugs/md-cache/bug-1632503.t
new file mode 100755
index 00000000000..aeb57f65639
--- /dev/null
+++ b/tests/bugs/md-cache/bug-1632503.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TESTS_EXPECTED_IN_LOOP=5
+
+TEST glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.md-cache-statfs on
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+for i in $(seq 1 5); do
+ TEST_IN_LOOP df $M0;
+done
+
+cleanup;
diff --git a/tests/bugs/md-cache/bug-1726205.t b/tests/bugs/md-cache/bug-1726205.t
new file mode 100644
index 00000000000..795130e9bd8
--- /dev/null
+++ b/tests/bugs/md-cache/bug-1726205.t
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 group samba
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST touch $M0/file
+TEST "setfattr -n "user.DosStream.Zone.Identifier:\$DATA" -v '\0' $M0/file"
+TEST "getfattr -n "user.DosStream.Zone.Identifier:\$DATA" -e hex $M0/file | grep -q 0x00"
+
+cleanup;
diff --git a/tests/bugs/md-cache/setxattr-prepoststat.t b/tests/bugs/md-cache/setxattr-prepoststat.t
new file mode 100755
index 00000000000..01fa768299c
--- /dev/null
+++ b/tests/bugs/md-cache/setxattr-prepoststat.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## 1. Start glusterd
+TEST glusterd;
+
+## 2. Lets create volume
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+
+TEST $CLI volume set $V0 group metadata-cache
+TEST $CLI volume set $V0 performance.xattr-cache-list "user.*"
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+TEST touch $M0/file1
+TEST `echo "abakjshdjahskjdhakjhdskjac" >> $M0/file1`
+size=`stat -c '%s' $M0/file1`
+
+## Setxattr from mount-0
+TEST "setfattr -n user.DOSATTRIB -v "abc" $M0/file1"
+EXPECT $size stat -c '%s' $M0/file1
+
+## Getxattr from mount-1, this should return the correct value
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q abc"
+
+TEST "setfattr -x user.DOSATTRIB $M1/file1"
+EXPECT $size stat -c '%s' $M1/file1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+cleanup;
diff --git a/tests/bugs/nfs/bug-1053579.t b/tests/bugs/nfs/bug-1053579.t
new file mode 100755
index 00000000000..2f53172e24c
--- /dev/null
+++ b/tests/bugs/nfs/bug-1053579.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup
+
+# prepare the users and groups
+NEW_USER=bug1053579
+NEW_UID=1053579
+NEW_GID=1053579
+LAST_GID=1053779
+NEW_GIDS=${NEW_GID}
+
+# OS-specific overrides
+case $OSTYPE in
+NetBSD|Darwin)
+ # only NGROUPS_MAX=16 secondary groups are supported
+ LAST_GID=1053593
+ ;;
+FreeBSD)
+ # NGROUPS_MAX=1023 (FreeBSD>=8.0), we can afford 200 groups
+ ;;
+Linux)
+ # NGROUPS_MAX=65536, we can afford 200 groups
+ ;;
+*)
+ ;;
+esac
+
+# create a user that belongs to many groups
+for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID})
+do
+ groupadd -o -g ${GID} ${NEW_USER}-${GID}
+ NEW_GIDS="${NEW_GIDS},${NEW_USER}-${GID}"
+done
+TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER}
+
+# preparation done, start the tests
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume set $V0 nfs.server-aux-gids on
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+# mount the volume
+TEST mount_nfs $H0:/$V0 $N0 nolock
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+# the actual test, this used to crash
+su -m ${NEW_USER} -c "stat $N0/. > /dev/null"
+TEST [ $? -eq 0 ]
+
+# create a file that only a user in a high-group can access
+echo 'Hello World!' > $N0/README
+chgrp ${LAST_GID} $N0/README
+chmod 0640 $N0/README
+
+#su -m ${NEW_USER} -c "cat $N0/README 2>&1 > /dev/null"
+su -m ${NEW_USER} -c "cat $N0/README"
+ret=$?
+
+case $OSTYPE in
+Linux) # Linux NFS fails with big GID
+ if [ $ret -ne 0 ] ; then
+ res="Y"
+ else
+ res="N"
+ fi
+ ;;
+*) # Other systems should cope better
+ if [ $ret -eq 0 ] ; then
+ res="Y"
+ else
+ res="N"
+ fi
+ ;;
+esac
+TEST [ "x$res" = "xY" ]
+
+# This passes only on build.gluster.org, not reproducible on other machines?!
+#su -m ${NEW_USER} -c "cat $M0/README 2>&1 > /dev/null"
+#TEST [ $? -ne 0 ]
+
+# enable server.manage-gids and things should work
+TEST $CLI volume set $V0 server.manage-gids on
+
+su -m ${NEW_USER} -c "cat $N0/README 2>&1 > /dev/null"
+TEST [ $? -eq 0 ]
+su -m ${NEW_USER} -c "cat $M0/README 2>&1 > /dev/null"
+TEST [ $? -eq 0 ]
+
+# cleanup
+userdel --force ${NEW_USER}
+for GID in $(seq -f '%6.0f' ${NEW_GID} ${LAST_GID})
+do
+ groupdel ${NEW_USER}-${GID}
+done
+
+rm -f $N0/README
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/nfs/bug-1143880-fix-gNFSd-auth-crash.t b/tests/bugs/nfs/bug-1143880-fix-gNFSd-auth-crash.t
new file mode 100644
index 00000000000..c360db4c91c
--- /dev/null
+++ b/tests/bugs/nfs/bug-1143880-fix-gNFSd-auth-crash.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs $H0:/$V0 $N0 nolock
+TEST mkdir -p $N0/foo
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+TEST mount_nfs $H0:/$V0/foo $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+cleanup
diff --git a/tests/bugs/nfs/bug-1157223-symlink-mounting.t b/tests/bugs/nfs/bug-1157223-symlink-mounting.t
new file mode 100644
index 00000000000..dea609ed193
--- /dev/null
+++ b/tests/bugs/nfs/bug-1157223-symlink-mounting.t
@@ -0,0 +1,126 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume info;
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+
+## Wait for volume to register with rpc.mountd
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+## Mount NFS
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+mkdir $N0/dir1;
+mkdir $N0/dir2;
+pushd $N0/ ;
+
+##link created using relative path
+ln -s dir1 symlink1;
+
+##relative path contains ".."
+ln -s ../dir1 dir2/symlink2;
+
+##link created using absolute path
+ln -s $N0/dir1 symlink3;
+
+##link pointing to another symlinks
+ln -s symlink1 symlink4
+ln -s symlink3 symlink5
+
+##dead links
+ln -s does/not/exist symlink6
+
+##link which contains ".." points out of glusterfs
+ln -s ../../ symlink7
+
+##links pointing to unauthorized area
+ln -s .glusterfs symlink8
+
+popd ;
+
+##Umount the volume
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount and umount NFS via directory
+TEST mount_nfs $H0:/$V0/dir1 $N0 nolock;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount and umount NFS via symlink1
+TEST mount_nfs $H0:/$V0/symlink1 $N0 nolock;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount and umount NFS via symlink2
+TEST mount_nfs $H0:/$V0/dir2/symlink2 $N0 nolock;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS via symlink3 should fail
+TEST ! mount_nfs $H0:/$V0/symlink3 $N0 nolock;
+
+## Mount and umount NFS via symlink4
+TEST mount_nfs $H0:/$V0/symlink4 $N0 nolock;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS via symlink5 should fail
+TEST ! mount_nfs $H0:/$V0/symlink5 $N0 nolock;
+
+## Mount NFS via symlink6 should fail
+TEST ! mount_nfs $H0:/$V0/symlink6 $N0 nolock;
+
+## Mount NFS via symlink7 should fail
+TEST ! mount_nfs $H0:/$V0/symlink7 $N0 nolock;
+
+## Mount NFS via symlink8 should fail
+TEST ! mount_nfs $H0:/$V0/symlink8 $N0 nolock;
+
+##Similar check for udp mount
+$CLI volume stop $V0
+TEST $CLI volume set $V0 nfs.mount-udp on
+$CLI volume start $V0
+
+## Wait for volume to register with rpc.mountd
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+## Mount and umount NFS via directory
+TEST mount_nfs $H0:/$V0/dir1 $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount and umount NFS via symlink1
+TEST mount_nfs $H0:/$V0/symlink1 $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount and umount NFS via symlink2
+TEST mount_nfs $H0:/$V0/dir2/symlink2 $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS via symlink3 should fail
+TEST ! mount_nfs $H0:/$V0/symlink3 $N0 nolock,mountproto=udp,proto=tcp;
+
+## Mount and umount NFS via symlink4
+TEST mount_nfs $H0:/$V0/symlink4 $N0 nolock,mountproto=udp,proto=tcp;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS via symlink5 should fail
+TEST ! mount_nfs $H0:/$V0/symlink5 $N0 nolock,mountproto=udp,proto=tcp;
+
+## Mount NFS via symlink6 should fail
+TEST ! mount_nfs $H0:/$V0/symlink6 $N0 nolock,mountproto=udp,proto=tcp;
+
+##symlink7 is not check here, because in udp mount ../../ resolves into root '/'
+
+## Mount NFS via symlink8 should fail
+TEST ! mount_nfs $H0:/$V0/symlink8 $N0 nolock,mountproto=udp,proto=tcp;
+
+rm -rf $H0:$B0/
+cleanup;
diff --git a/tests/bugs/nfs/bug-1161092-nfs-acls.t b/tests/bugs/nfs/bug-1161092-nfs-acls.t
new file mode 100644
index 00000000000..45a22e79336
--- /dev/null
+++ b/tests/bugs/nfs/bug-1161092-nfs-acls.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 nfs.disable false
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+TEST mount_nfs $H0:/$V0 $N0
+
+TEST touch $N0/file1
+TEST chmod 700 $N0/file1
+TEST getfacl $N0/file1
+
+TEST $CLI volume set $V0 root-squash on
+TEST getfacl $N0/file1
+
+TEST umount_nfs $H0:/$V0 $N0
+TEST mount_nfs $H0:/$V0 $N0
+TEST getfacl $N0/file1
+
+## Before killing daemon to avoid deadlocks
+umount_nfs $N0
+
+cleanup;
+
diff --git a/tests/bugs/nfs/bug-1166862.t b/tests/bugs/nfs/bug-1166862.t
new file mode 100755
index 00000000000..c4f51a2d446
--- /dev/null
+++ b/tests/bugs/nfs/bug-1166862.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+#
+# When nfs.mount-rmtab is disabled, it should not get updated.
+#
+# Based on: bug-904065.t
+#
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+# count the lines of a file, return 0 if the file does not exist
+function count_lines()
+{
+ if [ -n "$1" ]
+ then
+ $@ 2>/dev/null | wc -l
+ else
+ echo 0
+ fi
+}
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1
+EXPECT 'Created' volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 nfs.disable false
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# glusterfs/nfs needs some time to start up in the background
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+# disable the rmtab by settting it to the magic "/-" value
+TEST $CLI volume set $V0 nfs.mount-rmtab /-
+
+# before mounting the rmtab should be empty
+EXPECT '0' count_lines cat $GLUSTERD_WORKDIR/nfs/rmtab
+
+TEST mount_nfs $H0:/$V0 $N0 nolock
+EXPECT '0' count_lines cat $GLUSTERD_WORKDIR/nfs/rmtab
+
+# showmount should list one client
+EXPECT '1' count_lines showmount --no-headers $H0
+
+# unmount
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# after resetting the option, the rmtab should get updated again
+TEST $CLI volume reset $V0 nfs.mount-rmtab
+
+# before mounting the rmtab should be empty
+EXPECT '0' count_lines cat $GLUSTERD_WORKDIR/nfs/rmtab
+
+TEST mount_nfs $H0:/$V0 $N0 nolock
+EXPECT '2' count_lines cat $GLUSTERD_WORKDIR/nfs/rmtab
+
+# removing a mount
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+EXPECT '0' count_lines cat $GLUSTERD_WORKDIR/nfs/rmtab
+
+cleanup
diff --git a/tests/bugs/nfs/bug-1210338.c b/tests/bugs/nfs/bug-1210338.c
new file mode 100644
index 00000000000..d4099244176
--- /dev/null
+++ b/tests/bugs/nfs/bug-1210338.c
@@ -0,0 +1,31 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+int
+main(int argc, char *argv[])
+{
+ int ret = -1;
+ int fd = -1;
+
+ fd = open(argv[1], O_CREAT | O_EXCL, 0644);
+
+ if (fd == -1) {
+ fprintf(stderr, "creation of the file %s failed (%s)\n", argv[1],
+ strerror(errno));
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd > 0)
+ close(fd);
+
+ return ret;
+}
diff --git a/tests/bugs/nfs/bug-1210338.t b/tests/bugs/nfs/bug-1210338.t
new file mode 100644
index 00000000000..b5c9245affd
--- /dev/null
+++ b/tests/bugs/nfs/bug-1210338.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+NFS_SOURCE=$(dirname $0)/bug-1210338.c
+NFS_EXEC=$(dirname $0)/excl_create
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+
+build_tester $NFS_SOURCE -o $NFS_EXEC
+TEST [ -e $NFS_EXEC ]
+
+TEST $NFS_EXEC $N0/my_file
+
+rm -f $NFS_EXEC;
+
+cleanup
diff --git a/tests/bugs/nfs/bug-1302948.t b/tests/bugs/nfs/bug-1302948.t
new file mode 100755
index 00000000000..a2fb0e68ff0
--- /dev/null
+++ b/tests/bugs/nfs/bug-1302948.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+# TEST the nfs.rdirplus option
+. $(dirname $0)/../../include.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 nfs.rdirplus off
+TEST $CLI volume set $V0 nfs.rdirplus on
+cleanup
diff --git a/tests/bugs/nfs/bug-847622.t b/tests/bugs/nfs/bug-847622.t
new file mode 100755
index 00000000000..5ccee722ed9
--- /dev/null
+++ b/tests/bugs/nfs/bug-847622.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+case $OSTYPE in
+NetBSD)
+ echo "Skip test on ACL which are not available on NetBSD" >&2
+ SKIP_TESTS
+ exit 0
+ ;;
+*)
+ ;;
+esac
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+cd $N0
+
+# simple getfacl setfacl commands
+TEST touch testfile
+TEST setfacl -m u:14:r testfile
+TEST getfacl testfile
+
+cd
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+cleanup
+
diff --git a/tests/bugs/nfs/bug-877885.t b/tests/bugs/nfs/bug-877885.t
new file mode 100755
index 00000000000..dca315a3d01
--- /dev/null
+++ b/tests/bugs/nfs/bug-877885.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \
+$M0;
+
+TEST touch $M0/file
+TEST mkdir $M0/dir
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+cd $N0
+
+rm -rf * &
+
+TEST mount_nfs $H0:/$V0 $N1 retry=0,nolock;
+
+cd;
+
+kill %1;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1
+
+cleanup
diff --git a/tests/bugs/nfs/bug-904065.t b/tests/bugs/nfs/bug-904065.t
new file mode 100755
index 00000000000..0eba86e7ee8
--- /dev/null
+++ b/tests/bugs/nfs/bug-904065.t
@@ -0,0 +1,100 @@
+#!/bin/bash
+#
+# This test does not use 'showmount' from the nfs-utils package, it would
+# require setting up a portmapper (either rpcbind or portmap, depending on the
+# Linux distribution used for testing). The persistancy of the rmtab should not
+# affect the current showmount outputs, so existing regression tests should be
+# sufficient.
+#
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+# count the lines of a file, return 0 if the file does not exist
+function count_lines()
+{
+ if [ -e "$1" ]
+ then
+ wc -l < $1
+ else
+ echo 0
+ fi
+}
+
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1
+EXPECT 'Created' volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 nfs.disable false
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# glusterfs/nfs needs some time to start up in the background
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+# before mounting the rmtab should be empty
+EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
+
+TEST mount_nfs $H0:/$V0 $N0 nolock
+# the output would looks similar to:
+#
+# hostname-0=172.31.122.104
+# mountpoint-0=/ufo
+#
+EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
+
+# duplicate mounts should not be recorded (client could have crashed)
+TEST mount_nfs $H0:/$V0 $N1 nolock
+EXPECT '2' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
+
+# removing a mount should (even if there are two) should remove the entry
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N1
+EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
+
+# unmounting the other mount should work flawlessly
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+EXPECT '0' count_lines $GLUSTERD_WORKDIR/nfs/rmtab
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# we'll create a fake rmtab here, similar to how an other storage server would do
+# using an invalid IP address to prevent (unlikely) collisions on the test-machine
+cat << EOF > $M0/rmtab
+hostname-0=127.0.0.256
+mountpoint-0=/ufo
+EOF
+EXPECT '2' count_lines $M0/rmtab
+
+# reconfigure merges the rmtab with the one on the volume
+TEST gluster volume set $V0 nfs.mount-rmtab $M0/rmtab
+
+# glusterfs/nfs needs some time to restart
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+# Apparently "is_nfs_export_available" might return even if the export is
+# not, in fact, available. (eyeroll) Give it a bit of extra time.
+#
+# TBD: fix the broken shell function instead of working around it here
+sleep 5
+
+# a new mount should be added to the rmtab, not overwrite exiting ones
+TEST mount_nfs $H0:/$V0 $N0 nolock
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '4' count_lines $M0/rmtab
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+EXPECT '2' count_lines $M0/rmtab
+
+# TODO: nfs/reconfigure() is never called and is therefor disabled. When the
+# NFS-server supports reloading and does not get restarted anymore, we should
+# add a test that includes the merging of entries in the old rmtab with the new
+# rmtab.
+
+cleanup
diff --git a/tests/bugs/nfs/bug-915280.t b/tests/bugs/nfs/bug-915280.t
new file mode 100755
index 00000000000..bd279157c25
--- /dev/null
+++ b/tests/bugs/nfs/bug-915280.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 nfs.disable false
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$N0;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock,timeo=30,retrans=1
+TEST touch $N0/testfile
+
+TEST $CLI volume set $V0 debug.error-gen client
+TEST $CLI volume set $V0 debug.error-fops stat
+TEST $CLI volume set $V0 debug.error-failure 100
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+pid_file=$(read_nfs_pidfile);
+
+getfacl $N0/testfile 2>/dev/null
+
+nfs_pid=$(get_nfs_pid);
+if [ ! $nfs_pid ]
+then
+ nfs_pid=0;
+fi
+
+TEST [ $nfs_pid -eq $pid_file ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/nfs/bug-970070.t b/tests/bugs/nfs/bug-970070.t
new file mode 100755
index 00000000000..61be4844e51
--- /dev/null
+++ b/tests/bugs/nfs/bug-970070.t
@@ -0,0 +1,13 @@
+#!/bin/bash
+# TEST the nfs.acl option
+. $(dirname $0)/../../include.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 nfs.acl off
+TEST $CLI volume set $V0 nfs.acl on
+cleanup
diff --git a/tests/bugs/nfs/bug-974972.t b/tests/bugs/nfs/bug-974972.t
new file mode 100755
index 00000000000..975c46f85a4
--- /dev/null
+++ b/tests/bugs/nfs/bug-974972.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+#This script checks that nfs mount does not fail lookup on files with split-brain
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0
+TEST touch $N0/1
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}1
+echo abc > $N0/1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1
+
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}0
+echo def > $N0/1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1
+
+#Lookup should not fail
+TEST ls $N0/1
+TEST ! cat $N0/1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+cleanup
diff --git a/tests/bugs/nfs/showmount-many-clients.t b/tests/bugs/nfs/showmount-many-clients.t
new file mode 100644
index 00000000000..c6c9c35d60a
--- /dev/null
+++ b/tests/bugs/nfs/showmount-many-clients.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# The nfs.rpc-auth-allow volume option is used to generate the list of clients
+# that are displayed as able to mount the export. The "group" in the export
+# should be a list of all clients, identified by "name". In previous versions,
+# the "name" was the copied string from nfs.rpc-auth-allow. This is not
+# correct, as the volume option should be parsed and split into different
+# groups.
+#
+# When the single string is passed, this testcase fails when the
+# nfs.rpc-auth-allow volume option is longer than 256 characters. By splitting
+# the groups into their own structures, this testcase passes.
+#
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1
+EXPECT 'Created' volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 nfs.disable false
+
+CLIENTS=$(echo 127.0.0.{1..128} | tr ' ' ,)
+TEST $CLI volume set $V0 nfs.rpc-auth-allow ${CLIENTS}
+TEST $CLI volume set $V0 nfs.rpc-auth-reject all
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# glusterfs/nfs needs some time to start up in the background
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+# showmount should not timeout (no reply is sent on error)
+TEST showmount -e $H0
+
+cleanup
diff --git a/tests/bugs/nfs/socket-as-fifo.py b/tests/bugs/nfs/socket-as-fifo.py
new file mode 100755
index 00000000000..eb507e1d30b
--- /dev/null
+++ b/tests/bugs/nfs/socket-as-fifo.py
@@ -0,0 +1,33 @@
+#
+# Create a unix domain socket and test if it is a socket (and not a fifo/pipe).
+#
+# Author: Niels de Vos <ndevos@redhat.com>
+#
+
+from __future__ import print_function
+import os
+import stat
+import sys
+import socket
+
+ret = 1
+
+if len(sys.argv) != 2:
+ print('Usage: %s <socket>' % (sys.argv[0]))
+ sys.exit(ret)
+
+path = sys.argv[1]
+
+sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+sock.bind(path)
+
+stbuf = os.stat(path)
+mode = stbuf.st_mode
+
+if stat.S_ISSOCK(mode):
+ ret = 0
+
+sock.close()
+os.unlink(path)
+
+sys.exit(ret)
diff --git a/tests/bugs/nfs/socket-as-fifo.t b/tests/bugs/nfs/socket-as-fifo.t
new file mode 100644
index 00000000000..d9b9e959ce3
--- /dev/null
+++ b/tests/bugs/nfs/socket-as-fifo.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+
+# this is the actual test
+TEST $PYTHON $(dirname $0)/socket-as-fifo.py $N0/not-a-fifo.socket
+
+TEST umount_nfs $N0
+
+cleanup
diff --git a/tests/bugs/nfs/subdir-trailing-slash.t b/tests/bugs/nfs/subdir-trailing-slash.t
new file mode 100644
index 00000000000..6a114877ac7
--- /dev/null
+++ b/tests/bugs/nfs/subdir-trailing-slash.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+#
+# Verify that mounting a subdir over NFS works, even with a trailing /
+#
+# For example:
+# mount -t nfs server.example.com:/volume/subdir/
+#
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+
+TEST mount_nfs $H0:/$V0 $N0 nolock
+TEST mkdir -p $N0/subdir
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST mount_nfs $H0:/$V0/subdir/ $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+cleanup
diff --git a/tests/bugs/nfs/zero-atime.t b/tests/bugs/nfs/zero-atime.t
new file mode 100755
index 00000000000..2a940091ad9
--- /dev/null
+++ b/tests/bugs/nfs/zero-atime.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# posix_do_utimes() sets atime and mtime to the values in the passed IATT. If
+# not set, these values are 0 and cause a atime/mtime set to the Epoch.
+#
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+
+# create a file for testing
+TEST dd if=/dev/urandom of=$M0/small count=1 bs=1024k
+
+# timezone in UTC results in atime=0 if not set correctly
+TEST TZ=UTC dd if=/dev/urandom of=$M0/small bs=64k count=1 conv=nocreat
+TEST [ "$(stat --format=%X $M0/small)" != "0" ]
+
+TEST rm $M0/small
+
+cleanup
diff --git a/tests/bugs/nl-cache/bug-1451588.t b/tests/bugs/nl-cache/bug-1451588.t
new file mode 100755
index 00000000000..cf07d04c5cc
--- /dev/null
+++ b/tests/bugs/nl-cache/bug-1451588.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0..4}
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume set $V0 performance.nl-cache on
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --volfile-id=/$V0 --aux-gfid-mount --volfile-server=$H0 $M0
+
+TEST ! stat $M0/.gfid/1901b1a0-c612-46ee-b45a-e8345d5a0b48
+
+cleanup;
+G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/posix/bug-1034716.t b/tests/bugs/posix/bug-1034716.t
new file mode 100644
index 00000000000..d36f8b598f4
--- /dev/null
+++ b/tests/bugs/posix/bug-1034716.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+#Create a file and perform fop on a DIR
+TEST touch $M0/foo
+
+function xattr_query_check() {
+ local path=$1
+
+ local ret=`getfattr -m . -d $path 2>&1 | grep -c 'trusted.glusterfs'`
+ echo $ret
+}
+
+function set_xattr() {
+ local path=$1
+ local xa_name=$2
+ local xa_val=$3
+
+ setfattr -n $xa_name -v $xa_val $path
+ echo $?
+}
+
+function remove_xattr() {
+ local path=$1
+ local xa_name=$2
+
+ setfattr -x $xa_name $path
+ echo $?
+}
+
+EXPECT 0 xattr_query_check $M0/
+EXPECT 0 xattr_query_check $M0/foo
+
+EXPECT 1 set_xattr $M0/ 'trusted.glusterfs.volume-id' 'foo'
+EXPECT 1 remove_xattr $M0/ 'trusted.glusterfs.volume-id'
+
+
+## Finish up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t b/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t
new file mode 100755
index 00000000000..3839c6e3380
--- /dev/null
+++ b/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function get_uid() {
+ stat -c '%u' $1;
+}
+
+function get_gid() {
+ stat -c '%g' $1;
+}
+
+function check_stat() {
+ stat $1
+ echo $?
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+EXPECT 0 get_uid $M0;
+EXPECT 0 get_gid $M0;
+
+TEST chown 100:101 $M0;
+
+EXPECT 100 get_uid $M0;
+EXPECT 101 get_gid $M0;
+
+TEST $CLI volume stop $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" online_brick_count
+
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" check_stat $M0
+EXPECT 100 get_uid $M0;
+EXPECT 101 get_gid $M0;
+
+cleanup;
diff --git a/tests/bugs/posix/bug-1113960.t b/tests/bugs/posix/bug-1113960.t
new file mode 100755
index 00000000000..ee42de2a092
--- /dev/null
+++ b/tests/bugs/posix/bug-1113960.t
@@ -0,0 +1,98 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+NUM_DIRS=50
+NUM_FILES=10
+
+create_dirs () {
+ for (( i=1; i<=$NUM_DIRS; i+=1));do
+ mkdir $1/olddir$i
+ for (( j=0; j<$NUM_FILES; j+=1));do
+ echo "This is file $j in dir $i" > $1/olddir$i/file$j;
+ done
+ done
+ echo "0" > $M0/status
+}
+
+move_dirs () {
+ old_path="$1"
+ new_path="$1"
+
+ #Create a deep directory
+ for (( i=$NUM_DIRS; i>=2; i-=1));do
+ mv $1/olddir$i $1/olddir`expr $i - 1` > /dev/null 2>&1;
+ done
+
+ #Start renaming files and dirs so the paths change for the
+ #posix_handle_path calculations
+
+ for (( i=1; i<=$NUM_DIRS; i+=1));do
+ old_path="$new_path/olddir$i"
+ new_path="$new_path/longernamedir$i"
+ mv $old_path $new_path;
+
+ for (( j=0; j<$NUM_FILES; j+=1));do
+ mv $new_path/file$j $new_path/newfile$j > /dev/null 2>&1;
+ done
+ done
+ echo "done" > $M0/status
+}
+
+ls_loop () {
+ #Loop until the move_dirs function is done
+ for (( i=0; i<=500; i+=1 )); do
+ ls -lR $1 > /dev/null 2>&1
+ done
+}
+
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '4' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount FUSE with caching disabled (read-write)
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+TEST create_dirs $M0
+
+## Mount FUSE with caching disabled (read-write) again
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M2;
+
+(ls_loop $M1)&
+ls_pid1=$!
+
+(ls_loop $M2)&
+ls_pid2=$!
+
+
+#Start moving/renaming the directories so the paths change
+TEST move_dirs $M0
+
+EXPECT_WITHIN 180 "done" cat $M0/status
+
+#Kill the ls processes
+
+kill -SIGTERM $ls_pid1
+kill -SIGTERM $ls_pid2
+
+
+#Check if all bricks are still up
+EXPECT '4' online_brick_count $V0
+
+cleanup;
diff --git a/tests/bugs/posix/bug-1122028.t b/tests/bugs/posix/bug-1122028.t
new file mode 100755
index 00000000000..492668cf1dc
--- /dev/null
+++ b/tests/bugs/posix/bug-1122028.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+# Create a 1x1 distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+# Start volume
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# Mount volume over FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/a
+TEST ln $M0/dir/a $M0/dir/b
+
+# Confirm hardlinks
+inum1=$(ls -i $M0/dir/a | cut -d' ' -f1)
+inum2=$(ls -i $M0/dir/b | cut -d' ' -f1)
+TEST [ "$inum1" = "$inum2" ]
+
+# Turn on build-pgfid
+TEST $CLI volume set $V0 build-pgfid on
+EXPECT 'on' volinfo_field $V0 'storage.build-pgfid'
+
+# Unlink files
+TEST unlink $M0/dir/a
+TEST unlink $M0/dir/b
+
+# Unmount
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Stop the volume
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+# Delete the volume
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup
diff --git a/tests/bugs/posix/bug-1175711.c b/tests/bugs/posix/bug-1175711.c
new file mode 100644
index 00000000000..8ab193c4014
--- /dev/null
+++ b/tests/bugs/posix/bug-1175711.c
@@ -0,0 +1,37 @@
+#include <stdio.h>
+#include <dirent.h>
+#include <string.h>
+#include <assert.h>
+
+int
+main(int argc, char **argv)
+{
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ int ret = 0;
+ char *path = NULL;
+
+ assert(argc == 2);
+ path = argv[1];
+
+ dir = opendir(path);
+ if (!dir) {
+ printf("opendir(%s) failed.\n", path);
+ return -1;
+ }
+
+#ifdef _DIRENT_HAVE_D_TYPE
+ while ((entry = readdir(dir)) != NULL) {
+ if (entry->d_type == DT_UNKNOWN) {
+ printf("d_type found to be DT_UNKNOWN\n");
+ ret = -1;
+ break;
+ }
+ }
+#endif
+
+ if (dir)
+ closedir(dir);
+
+ return ret;
+}
diff --git a/tests/bugs/posix/bug-1175711.t b/tests/bugs/posix/bug-1175711.t
new file mode 100755
index 00000000000..f4162544d92
--- /dev/null
+++ b/tests/bugs/posix/bug-1175711.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# Create, start and mount the volume.
+TEST glusterd;
+TEST $CLI volume create $V0 $H0:$B0/$V0;
+TEST $CLI volume start $V0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+# Compile the test program
+TEST $CC -Wall $(dirname $0)/bug-1175711.c -o $(dirname $0)/bug-1175711
+
+# Create directory and some entries inside them.
+mkdir -p $M0/dir-bug-1175711
+mkdir -p $M0/dir-bug-1175711/DT_DIR
+touch $M0/dir-bug-1175711/DT_REG
+
+# Invoke the test program and pass path of directory to it.
+TEST $(dirname $0)/bug-1175711 $M0/dir-bug-1175711
+
+# Unmount, stop and delete the volume
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/posix/bug-1360679.t b/tests/bugs/posix/bug-1360679.t
new file mode 100644
index 00000000000..fbb9d027ddb
--- /dev/null
+++ b/tests/bugs/posix/bug-1360679.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup
+function num_entries {
+ ls -l $1 | wc -l
+}
+
+function create_unlink_entry {
+ for i in {0..1}
+ do
+ mkdir -p $B0/${V0}$i/.glusterfs/unlink/{1..3}/{1..10}/1
+ dd if=/dev/urandom of=$B0/${V0}$i/.glusterfs/unlink/file-1 bs=1M count=1
+ dd if=/dev/urandom of=$B0/${V0}$i/.glusterfs/unlink/file-2 bs=1M count=1
+ dd if=/dev/urandom of=$B0/${V0}$i/.glusterfs/unlink/1/file-1 bs=1M count=1
+ dd if=/dev/urandom of=$B0/${V0}$i/.glusterfs/unlink/2/file-1 bs=1M count=1
+ dd if=/dev/urandom of=$B0/${V0}$i/.glusterfs/unlink/3/file-1 bs=1M count=1
+ ln $B0/${V0}$i/.glusterfs/unlink/file-1 $B0/${V0}$i/.glusterfs/unlink/file-link
+ ln -s $B0/${V0}$i/.glusterfs/unlink/1 $B0/${V0}$i/.glusterfs/unlink/link
+ ln -s $B0/${V0}$i/.glusterfs/unlink/2 $B0/${V0}$i/.glusterfs/unlink/link-2
+ done
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+TEST $CLI volume stop $V0
+create_unlink_entry
+TEST $CLI volume start $V0
+EXPECT_WITHIN $UNLINK_TIMEOUT "^1$" num_entries $B0/${V0}0/.glusterfs/unlink/
+EXPECT_WITHIN $UNLINK_TIMEOUT "^1$" num_entries $B0/${V0}1/.glusterfs/unlink/
+cleanup;
diff --git a/tests/bugs/posix/bug-1619720.t b/tests/bugs/posix/bug-1619720.t
new file mode 100755
index 00000000000..bfd304dc809
--- /dev/null
+++ b/tests/bugs/posix/bug-1619720.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup;
+
+
+# Test steps:
+# The test checks to make sure that the trusted.pgfid.xx xattr is set on
+# both the linkto and data files post the final rename.
+# The test creates files file-1 and file-3 so that src_hashed = dst_hashed,
+# src_cached = dst_cached and xxx_hashed != xxx_cached.
+# It then renames file-1 to file-3 which triggers the posix_mknod call
+# which updates the trusted.pgfid.xx xattr.
+
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 storage.build-pgfid on
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/tmp
+
+
+
+# Not the best way to do this but I need files which hash to the same subvol and
+# whose cached subvols are the same.
+# In a 2 subvol distributed volume, file-{1,3} hash to the same subvol.
+# file-2 will hash to the other subvol
+
+TEST touch $M0/tmp/file-2
+pgfid_xattr_name=$(getfattr -m "trusted.pgfid.*" $B0/${V0}1/tmp/file-2 | grep "trusted.pgfid")
+echo $pgfid_xattr_name
+
+
+TEST mv $M0/tmp/file-2 $M0/tmp/file-1
+TEST touch $M0/tmp/file-2
+TEST mv $M0/tmp/file-2 $M0/tmp/file-3
+
+# At this point, both the file-1 and file-3 data files exist on one subvol
+# and both linkto files on the other
+
+TEST mv -f $M0/tmp/file-1 $M0/tmp/file-3
+
+
+TEST getfattr -n $pgfid_xattr_name $B0/${V0}0/tmp/file-3
+TEST getfattr -n $pgfid_xattr_name $B0/${V0}1/tmp/file-3
+
+# Not required for the test but an extra check if required.
+# The linkto file was not renamed Without the fix.
+#TEST mv $M0/tmp/file-3 $M0/tmp/file-6
+cleanup;
diff --git a/tests/bugs/posix/bug-1651445.t b/tests/bugs/posix/bug-1651445.t
new file mode 100644
index 00000000000..4d08b69b9b0
--- /dev/null
+++ b/tests/bugs/posix/bug-1651445.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup
+
+TEST verify_lvm_version
+TEST glusterd
+TEST pidof glusterd
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+TEST $CLI volume create $V0 replica 3 $H0:$L{1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+#Setting the size in bytes
+TEST $CLI volume set $V0 storage.reserve 40MB
+
+#wait 5s to reset disk_space_full flag
+sleep 5
+
+TEST dd if=/dev/zero of=$M0/a bs=100M count=1
+TEST dd if=/dev/zero of=$M0/b bs=10M count=1
+
+# Wait 5s to update disk_space_full flag because thread check disk space
+# after every 5s
+
+sleep 5
+# setup_lvm create lvm partition of 150M and 40M are reserve so after
+# consuming more than 110M next dd should fail
+TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
+TEST dd if=/dev/urandom of=$M0/a bs=1022 count=1 oflag=seek_bytes,sync seek=102 conv=notrunc
+
+rm -rf $M0/*
+
+#Setting the size in percent and repeating the above steps
+TEST $CLI volume set $V0 storage.reserve 40
+
+sleep 5
+
+TEST dd if=/dev/zero of=$M0/a bs=80M count=1
+TEST dd if=/dev/zero of=$M0/b bs=10M count=1
+
+sleep 5
+TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/posix/bug-765380.t b/tests/bugs/posix/bug-765380.t
new file mode 100644
index 00000000000..384b8022a42
--- /dev/null
+++ b/tests/bugs/posix/bug-765380.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+REPLICA=2
+
+TEST $CLI volume create $V0 replica $REPLICA $H0:$B0/${V0}00 $H0:$B0/${V0}01 $H0:$B0/${V0}10 $H0:$B0/${V0}11
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST $GFS -s $H0 --volfile-id $V0 $M0;
+
+function count_hostname_or_uuid_from_pathinfo()
+{
+ pathinfo=$(getfattr -n trusted.glusterfs.pathinfo $M0/f00f)
+ echo $pathinfo | grep -o $1 | wc -l
+}
+
+TEST touch $M0/f00f
+
+EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $H0
+
+# turn on node-uuid-pathinfo option
+TEST $CLI volume set $V0 node-uuid-pathinfo on
+
+# do not expext hostname as part of the pathinfo string
+EXPECT 0 count_hostname_or_uuid_from_pathinfo $H0
+
+uuid=$(grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=)
+
+# ... but expect the uuid $REPLICA times
+EXPECT $REPLICA count_hostname_or_uuid_from_pathinfo $uuid
+
+cleanup;
diff --git a/tests/bugs/posix/bug-990028.t b/tests/bugs/posix/bug-990028.t
new file mode 100755
index 00000000000..bef36a8897d
--- /dev/null
+++ b/tests/bugs/posix/bug-990028.t
@@ -0,0 +1,157 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=153
+
+function __init()
+{
+ TEST glusterd
+ TEST pidof glusterd
+ TEST $CLI volume info;
+
+ TEST $CLI volume create $V0 $H0:$B0/brick
+
+ EXPECT 'Created' volinfo_field $V0 'Status';
+
+ TEST $CLI volume start $V0
+
+ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+ TEST $CLI volume quota $V0 enable
+}
+
+#CASE-1
+#checking pgfid under same directory
+function links_in_same_directory()
+{
+ # create a file file1
+ TEST touch $M0/file1
+
+ # create 50 hardlinks for file1
+ for i in `seq 2 50`; do
+ TEST_IN_LOOP ln $M0/file1 $M0/file$i
+ done
+
+ # store the pgfid of file1 in PGFID_FILE1 [should be 50 now (0x000000032)]
+ PGFID_FILE1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file1 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'`
+
+ # compare the pgfid(link value ) of each hard links are equal or not
+ for i in `seq 2 50`; do
+ TEMP=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file$i 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'`
+ TEST_IN_LOOP [ $PGFID_FILE1 = $TEMP ]
+ done
+
+ # check if no of links value is 50 or not
+ TEST [ $PGFID_FILE1 = "0x00000032" ]
+
+ # unlink file 2 to 50
+ for i in `seq 2 50`; do
+ TEST_IN_LOOP unlink $M0/file$i;
+ done
+
+ # now check if pgfid value is 1 or not
+ PGFID_FILE1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/file1 2>&1 | grep "trusted.pgfid" | gawk -F '=' '{print $2}'`;
+
+ TEST [ $PGFID_FILE1 = "0x00000001" ]
+
+ TEST rm -f $M0/*
+}
+
+##checking pgfid under diff directories
+function links_across_directories()
+{
+ TEST mkdir $M0/dir1 $M0/dir2;
+
+ # create a file in dir1
+ TEST touch $M0/dir1/file1;
+
+ # create hard link for file1 in dir2
+ TEST ln $M0/dir1/file1 $M0/dir2/file2;
+
+ #first check is to find whether there are two pgfids or not
+ LINES=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | wc -l`
+ TEST [ $LINES = 2 ]
+
+ for i in $(seq 1 2); do
+ HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir$i/file$i 2>&1 | grep "trusted.pgfid" | awk -v n=$i 'NR==n' | cut -d'=' -f2`
+ TEST_IN_LOOP [ $HL = "0x00000001" ]
+ done
+
+ #now unlink file2 and check the pgfid of file1
+ #1. no. of pgfid should be one
+ #2. no. of hard link should be one
+ TEST unlink $M0/dir2/file2
+
+ LINES=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | wc -l`
+ TEST [ $LINES == 1 ]
+
+ #next to check is to whether they contain hard link value of one or not
+ HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir1/file1 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2`
+ TEST [ $HL = "0x00000001" ]
+
+ #rename file under same directory
+
+ TEST touch $M0/r_file1
+ PGFID_rfile1=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file1 2>&1 | grep "trusted.pgfid"`
+
+ #cross check whether hard link count is one
+ HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file1 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2`
+
+ TEST [ $HL = "0x00000001" ]
+
+ #now rename the file to r_file1
+ TEST mv $M0/r_file1 $M0/r_file2
+
+ #now check the pgfid hard link count is still one or not
+ HL=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/r_file2 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2`
+
+ TEST [ $HL = "0x00000001" ]
+
+ #now move the file to a different directory where it has no hard link and check
+ TEST mkdir $M0/dir3;
+ TEST mv $M0/r_file2 $M0/dir3;
+
+ #now check the pgfid has changed or not and hard limit is one or not
+ PGFID_newDir=`getfattr -m "trusted.pgfid.*" -de hex $B0/brick/dir3/r_file2 2>&1 | grep "trusted.pgfid"`
+
+ #now the older pgfid and new pgfid shouldn't match
+ TEST [ $PGFID_rfile1 != $PGFID_newDir ]
+
+ HL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir3/r_file2 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2`
+ TEST [ $HL = "0x00000001" ]
+
+ TEST touch $M0/dir1/rl_file_1
+ ln $M0/dir1/rl_file_1 $M0/dir2/rl_file_2
+ mv $M0/dir1/rl_file_1 $M0/dir2
+
+ #now the there should be just one pgfid for both files
+ for i in $(seq 1 2); do
+ NL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_$i 2>&1 | grep "trusted.pgfid"|wc -l `
+ TEST_IN_LOOP [ $HL = "0x00000001" ]
+ done
+
+ #now pgfid of both files should match
+ P_rl_file_1=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_1 2>&1 | grep "trusted.pgfid"`
+ P_rl_file_2=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_2 2>&1 | grep "trusted.pgfid"`
+ TEST [ $P_rl_file_1 = $P_rl_file_2 ]
+
+ #now the no of hard link should be two for both rl_file_1 and rl_file_2
+ for i in $(seq 1 2); do
+ HL=`getfattr -m "trusted.pgfid" -de hex $B0/brick/dir2/rl_file_$i 2>&1 | grep "trusted.pgfid" | cut -d'=' -f2`
+ TEST_IN_LOOP [ $HL = "0x00000002" ]
+ done
+
+ TEST rm -rf $M0/*
+}
+
+__init;
+links_in_same_directory;
+links_across_directories;
+TEST $CLI volume stop $V0
+
+cleanup
diff --git a/tests/bugs/posix/bug-gfid-path.t b/tests/bugs/posix/bug-gfid-path.t
new file mode 100644
index 00000000000..1bbbe9f0670
--- /dev/null
+++ b/tests/bugs/posix/bug-gfid-path.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# This test case is for the bug where, even though a file is
+# created when gfid2path option is turned off (default is ON),
+# getfattr of "glusterfs.gfidtopath" was succeeding for that
+# file. Ideally the getfattr should fail, as the file does not
+# have its path(s) stored as a extended attribute (because it
+# was created when gfid2path option was off)
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3,4};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '4' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir
+TEST mkdir $M0/new
+TEST mkdir $M0/3
+
+TEST touch $M0/dir/file
+
+# except success as by default gfid2path is enabled
+# and the glusterfs.gfidtopath xattr should give the
+# path of the object as the value
+
+TEST getfattr -n glusterfs.gfidtopath $M0/dir/file
+
+# turn off gfid2path feature
+TEST $CLI volume set $V0 storage.gfid2path off
+
+TEST touch $M0/new/foo
+
+# again enable gfid2path. This has to be enabled before
+# trying the getfattr. Because, glusterfs.gfidtopath xattr
+# request is handled only if gfid2path is enabled. If not,
+# then getxattr on glusterfs.gfid2path fails anyways. In this
+# context we want getfattr to fail, because the file was created
+# when gfid2path feature was disabled and not because gfid2path
+# feature itself is disabled.
+TEST $CLI volume set $V0 storage.gfid2path on
+
+# getfattr should fail as it is attempted on a file
+# which does not have its path stored as a xattr
+# (because file got created after disabling gfid2path)
+TEST ! getfattr -n glusterfs.gfidtopath $M0/new/foo;
+
+
+
+TEST touch $M0/3/new
+
+# should be successful
+TEST getfattr -n glusterfs.gfidtopath $M0/3/new
+
+TEST rm -rf $M0/*
+
+cleanup;
diff --git a/tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.c b/tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.c
new file mode 100644
index 00000000000..4ed3181d48f
--- /dev/null
+++ b/tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.c
@@ -0,0 +1,104 @@
+#include <glusterfs/api/glfs.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ int i = 0;
+ glfs_fd_t *fd = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+
+ if (argc != 4) {
+ fprintf(stderr,
+ "Expect following args %s <hostname> <Vol> <log file>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ logfile = argv[3];
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL (%s)\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ fd = glfs_opendir(fs, "/");
+ if (!fd) {
+ fprintf(stderr, "glfs_opendir failed with (%s)\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fremovexattr(fd, "trusted.gfid");
+ if (ret == 0 || errno != EPERM) {
+ fprintf(stderr,
+ "glfs_fremovexattr gfid exited with ret: "
+ "%d (%s)\n",
+ ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fremovexattr(fd, "trusted.glusterfs.volume-id");
+ if (ret == 0 || errno != EPERM) {
+ fprintf(stderr,
+ "glfs_fremovexattr volume-id exited with ret: "
+ "%d (%s)\n",
+ ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "trusted.abc", "abc", 3, 0);
+ if (ret < 0) {
+ fprintf(stderr,
+ "glfs_fsetxattr trusted.abc exited with ret: "
+ "%d (%s)\n",
+ ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fremovexattr(fd, "trusted.abc");
+ if (ret < 0) {
+ fprintf(stderr,
+ "glfs_fremovexattr trusted.abc exited with "
+ "ret: %d (%s)\n",
+ ret, strerror(errno));
+ return -1;
+ }
+
+ (void)glfs_closedir(fd);
+ ret = glfs_fini(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fini failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+ return 0;
+}
diff --git a/tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.t b/tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.t
new file mode 100755
index 00000000000..b9fd44ae0d7
--- /dev/null
+++ b/tests/bugs/posix/disallow-gfid-volumeid-fremovexattr.t
@@ -0,0 +1,21 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0};
+TEST $CLI volume start $V0;
+logdir=`gluster --print-logdir`
+
+
+TEST build_tester $(dirname $0)/disallow-gfid-volumeid-fremovexattr.c -lgfapi
+TEST $(dirname $0)/disallow-gfid-volumeid-fremovexattr $H0 $V0 $logdir/disallow-gfid-volumeid-fremovexattr.log
+
+cleanup_tester $(dirname $0)/disallow-gfid-volumeid-fremovexattr
+cleanup;
diff --git a/tests/bugs/posix/disallow-gfid-volumeid-removexattr.t b/tests/bugs/posix/disallow-gfid-volumeid-removexattr.t
new file mode 100644
index 00000000000..d26eb21ccc5
--- /dev/null
+++ b/tests/bugs/posix/disallow-gfid-volumeid-removexattr.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+#This test checks that gfid/volume-id removexattrs are not allowed.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Basic checks
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+#Create a distributed volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0
+
+# Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST ! setfattr -x trusted.gfid $M0
+TEST ! setfattr -x trusted.glusterfs.volume-id $M0
+TEST setfattr -n trusted.abc -v abc $M0
+TEST setfattr -x trusted.abc $M0
+
+cleanup;
diff --git a/tests/bugs/protocol/bug-1321578.t b/tests/bugs/protocol/bug-1321578.t
new file mode 100644
index 00000000000..83904817467
--- /dev/null
+++ b/tests/bugs/protocol/bug-1321578.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+check_mounted () {
+ df | grep $1 | wc -l
+}
+
+CHECK_MOUNT_TIMEOUT=7
+
+TEST glusterd
+TEST $CLI volume create $V0 $H0:$B0/$V0
+
+# Set auth.allow to dummy hostname so it *doesn't* include ourselves.
+TEST $CLI volume set $V0 auth.allow example.org
+TEST $CLI volume start $V0
+
+# "System getspec" will include the username and password if the request comes
+# from a server (which we are). Unfortunately, this will cause authentication
+# to succeed in auth.login regardless of whether auth.addr is working properly
+# or not, which is useless to us. To get a proper test, strip out those lines.
+$CLI system getspec $V0 | sed -e /username/d -e /password/d > fubar.vol
+
+# This mount should fail because auth.allow doesn't include us.
+TEST $GFS -f fubar.vol $M0
+
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
+
+# Add tests when only username is present, but not password
+# "System getspec" will include the username and password if the request comes
+# from a server (which we are). Unfortunately, this will cause authentication
+# to succeed in auth.login regardless of whether auth.addr is working properly
+# or not, which is useless to us. To get a proper test, strip out those lines.
+$CLI system getspec $V0 | sed -e /password/d > fubar.vol
+
+# This mount should fail because auth.allow doesn't include our password.
+TEST $GFS -f fubar.vol $M0
+
+# If we had DONT_EXPECT_WITHIN we could use that, but we don't.
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
+
+# Now, add a test for login failure when server doesn't have the password entry
+# Add tests when only username is present, but not password
+# "System getspec" will include the username and password if the request comes
+# from a server (which we are). Unfortunately, this will cause authentication
+# to succeed in auth.login regardless of whether auth.addr is working properly
+# or not, which is useless to us. To get a proper test, strip out those lines.
+$CLI system getspec $V0 > fubar.vol
+TEST $CLI volume stop $V0
+
+sed -i -e '/password /d' /var/lib/glusterd/vols/$V0/$V0.*$V0.vol
+
+TEST $CLI volume start $V0
+
+# This mount should fail because auth.allow doesn't include our password.
+TEST $GFS -f fubar.vol $M0
+
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
+
+# Set auth.allow to include us. This mount should therefore succeed.
+TEST $CLI volume set $V0 auth.allow $H0
+$CLI system getspec $V0 | sed -e /password/d > fubar.vol
+
+TEST $GFS -f fubar.vol $M0
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 1 check_mounted $M0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Set auth.reject to include us. This mount should therefore fail.
+TEST $CLI volume stop $V0
+
+TEST $CLI volume set $V0 auth.allow "\*"
+TEST $CLI volume set $V0 auth.reject $H0
+TEST $CLI volume start $V0
+
+# Do this, so login module is not in picture
+$CLI system getspec $V0 | sed -e /password/d > fubar.vol
+
+TEST $GFS -f fubar.vol $M0
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
+
+cleanup
diff --git a/tests/bugs/protocol/bug-1390914.t b/tests/bugs/protocol/bug-1390914.t
new file mode 100644
index 00000000000..e3dab92de5a
--- /dev/null
+++ b/tests/bugs/protocol/bug-1390914.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+cleanup;
+
+#test that fops are not wound on anon-fd when fd is not open on that brick
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0;
+
+TEST touch $M0/1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST fd_open 200 'w' "$M0/1"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#lk should only happen on 2 bricks, if there is a bug, it will plant a lock
+#with anon-fd on first-brick which will never be released because flush won't
+#be wound below server xlator for anon-fd
+TEST flock -x -n 200
+TEST fd_close 200
+
+TEST fd_open 200 'w' "$M0/1"
+#this lock will fail if there is a stale lock
+TEST flock -x -n 200
+TEST fd_close 200
+cleanup;
diff --git a/tests/bugs/protocol/bug-1433815-auth-allow.t b/tests/bugs/protocol/bug-1433815-auth-allow.t
new file mode 100644
index 00000000000..a78c0eb7111
--- /dev/null
+++ b/tests/bugs/protocol/bug-1433815-auth-allow.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+check_mounted () {
+ df | grep $1 | wc -l
+}
+
+get_addresses () {
+ ip addr | sed -n '/.*inet \([0-9.]*\).*/s//\1/p' | tr '\n' ','
+}
+
+TEST glusterd
+TEST $CLI volume create $V0 $H0:$B0/$V0
+
+# Set auth.allow so it *doesn't* include ourselves.
+TEST $CLI volume set $V0 auth.allow 1.2.3.4
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# "System getspec" will include the username and password if the request comes
+# from a server (which we are). Unfortunately, this will cause authentication
+# to succeed in auth.login regardless of whether auth.addr is working properly
+# or not, which is useless to us. To get a proper test, strip out those lines.
+$CLI system getspec $V0 | sed -e /username/d -e /password/d > fubar.vol
+
+# This mount should fail because auth.allow doesn't include us.
+TEST $GFS -f fubar.vol $M0
+# If we had DONT_EXPECT_WITHIN we could use that, but we don't.
+sleep 10
+EXPECT 0 check_mounted $M0
+
+# Set auth.allow to include us. This mount should therefore succeed.
+TEST $CLI volume set $V0 auth.allow "$(get_addresses)"
+TEST $GFS -f fubar.vol $M0
+sleep 10
+EXPECT 1 check_mounted $M0
+
+cleanup
diff --git a/tests/bugs/protocol/bug-762989.t b/tests/bugs/protocol/bug-762989.t
new file mode 100755
index 00000000000..7d201b78b58
--- /dev/null
+++ b/tests/bugs/protocol/bug-762989.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+# Skip the entire test if ip_local_reserved_ports does not exist
+if [ ! -f /proc/sys/net/ipv4/ip_local_reserved_ports ] ; then
+ echo "Skip test on /proc/sys/net/ipv4/ip_local_reserved_ports, "\
+ "which does not exists on this system" >&2
+ SKIP_TESTS
+ exit 0
+fi
+
+## reserve port 1023
+older_ports=$(cat /proc/sys/net/ipv4/ip_local_reserved_ports);
+echo "1023" > /proc/sys/net/ipv4/ip_local_reserved_ports;
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \
+$M0;
+
+## Wait for volume to register with rpc.mountd
+sleep 6;
+## check if port 1023 (which has been reserved) is used by the gluster processes
+op=$(netstat -ntp | grep gluster | grep -w 1023);
+EXPECT "" echo $op;
+
+#set the reserved ports to the older values
+echo $older_ports > /proc/sys/net/ipv4/ip_local_reserved_ports
+
+cleanup;
diff --git a/tests/bugs/protocol/bug-808400-dist.t b/tests/bugs/protocol/bug-808400-dist.t
new file mode 100755
index 00000000000..0df972585c0
--- /dev/null
+++ b/tests/bugs/protocol/bug-808400-dist.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/protocol/bug-808400-fcntl.c b/tests/bugs/protocol/bug-808400-fcntl.c
new file mode 100644
index 00000000000..a703ca5c120
--- /dev/null
+++ b/tests/bugs/protocol/bug-808400-fcntl.c
@@ -0,0 +1,124 @@
+#include <sys/file.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+
+#ifndef linux
+#define fstat64(fd, st) fstat(fd, st)
+#endif
+
+int
+run_child(char *filename)
+{
+ int fd = -1, ret = -1;
+ struct flock lock = {
+ 0,
+ };
+ int ppid = 0;
+
+ fd = open(filename, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "open failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ ppid = getppid();
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+
+ ret = fcntl(fd, F_GETLK, &lock);
+ if (ret < 0) {
+ fprintf(stderr, "GETLK failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ if ((lock.l_type == F_UNLCK) || (ppid != lock.l_pid)) {
+ fprintf(stderr,
+ "no locks present, though parent has held "
+ "one\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int fd = -1, ret = -1, status = 0;
+ char *filename = NULL, *cmd = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ struct flock lock = {
+ 0,
+ };
+
+ if (argc != 3) {
+ fprintf(stderr,
+ "Usage: %s <filename> "
+ "<gluster-cmd-to-trigger-graph-switch>\n",
+ argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+ cmd = argv[2];
+
+ fd = open(filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf(stderr, "open (%s) failed (%s)\n", filename, strerror(errno));
+ goto out;
+ }
+
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+
+ ret = fcntl(fd, F_SETLK, &lock);
+ if (ret < 0) {
+ fprintf(stderr, "fcntl failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ system(cmd);
+
+ /* wait till graph switch completes */
+ ret = fstat64(fd, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "fstat64 failure (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ sleep(10);
+
+ /* By now old-graph would be disconnected and locks should be cleaned
+ * up if they are not migrated. Check that by trying to acquire a lock
+ * on a new fd opened by another process on same file.
+ */
+ ret = fork();
+ if (ret == 0) {
+ ret = run_child(filename);
+ } else {
+ wait(&status);
+ if (WIFEXITED(status)) {
+ ret = WEXITSTATUS(status);
+ } else {
+ ret = 0;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/protocol/bug-808400-flock.c b/tests/bugs/protocol/bug-808400-flock.c
new file mode 100644
index 00000000000..54a507cc227
--- /dev/null
+++ b/tests/bugs/protocol/bug-808400-flock.c
@@ -0,0 +1,100 @@
+#include <sys/file.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/wait.h>
+
+#ifndef linux
+#define fstat64(fd, st) fstat(fd, st)
+#endif
+
+int
+run_child(char *filename)
+{
+ int fd = -1, ret = -1;
+
+ fd = open(filename, O_RDWR);
+ if (fd < 0) {
+ fprintf(stderr, "open failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ ret = flock(fd, LOCK_EX | LOCK_NB);
+ if ((ret == 0) || (errno != EWOULDBLOCK)) {
+ fprintf(stderr,
+ "no locks present, though parent has held "
+ "one\n");
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int fd = -1, ret = -1, status = 0;
+ char *filename = NULL, *cmd = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+
+ if (argc != 3) {
+ fprintf(stderr,
+ "Usage: %s <filename> "
+ "<gluster-cmd-to-trigger-graph-switch>\n",
+ argv[0]);
+ goto out;
+ }
+
+ filename = argv[1];
+ cmd = argv[2];
+
+ fd = open(filename, O_RDWR | O_CREAT, 0);
+ if (fd < 0) {
+ fprintf(stderr, "open (%s) failed (%s)\n", filename, strerror(errno));
+ goto out;
+ }
+
+ ret = flock(fd, LOCK_EX);
+ if (ret < 0) {
+ fprintf(stderr, "flock failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ system(cmd);
+
+ /* wait till graph switch completes */
+ ret = fstat64(fd, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "fstat64 failure (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ sleep(10);
+
+ /* By now old-graph would be disconnected and locks should be cleaned
+ * up if they are not migrated. Check that by trying to acquire a lock
+ * on a new fd opened by another process on same file
+ */
+ ret = fork();
+ if (ret == 0) {
+ ret = run_child(filename);
+ } else {
+ wait(&status);
+ if (WIFEXITED(status)) {
+ ret = WEXITSTATUS(status);
+ } else {
+ ret = 0;
+ }
+ }
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/protocol/bug-808400-repl.t b/tests/bugs/protocol/bug-808400-repl.t
new file mode 100755
index 00000000000..611e5ec93b7
--- /dev/null
+++ b/tests/bugs/protocol/bug-808400-repl.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/protocol/bug-808400.t b/tests/bugs/protocol/bug-808400.t
new file mode 100755
index 00000000000..4ae1722fca2
--- /dev/null
+++ b/tests/bugs/protocol/bug-808400.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#mount on a random dir
+TEST MOUNTDIR="/tmp/$RANDOM"
+TEST mkdir $MOUNTDIR
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+
+build_tester $(dirname $0)/bug-808400-flock.c
+build_tester $(dirname $0)/bug-808400-fcntl.c
+
+TEST $(dirname $0)/bug-808400-flock $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind off\'
+TEST $(dirname $0)/bug-808400-fcntl $MOUNTDIR/testfile \'gluster volume set $V0 performance.write-behind on\'
+
+TEST rm -rf $MOUNTDIR/*
+TEST rm -rf $(dirname $0)/bug-808400-flock $(dirname $0)/bug-808400-fcntl $(dirname $0)/glusterfs.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+TEST rm -rf $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/quick-read/bug-846240.t b/tests/bugs/quick-read/bug-846240.t
new file mode 100755
index 00000000000..bb997e10013
--- /dev/null
+++ b/tests/bugs/quick-read/bug-846240.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+MOUNTDIR=$M0;
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $MOUNTDIR;
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M1;
+
+TEST touch $M0/testfile;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+# open the file with the fd as 4
+TEST fd=`fd_available`;
+TEST fd_open $fd 'w' "$M0/testfile";
+
+# remove the file from the other mount point. If unlink is sent from
+# $M0 itself, then the file will be actually opened by open-behind which
+# we dont want for this testcase
+TEST rm -f $M1/testfile;
+
+# below command opens the file and writes to the file.
+# upon open, open-behind unwinds the open call with success.
+# now when write comes, open-behind actually opens the file
+# and then sends write on the fd. But before sending open itself,
+# the file would have been removed from the mount $M1. open() gets error
+# and the write call which is put into a stub (open had to be sent first)
+# should unwind with the error received in the open call.
+TEST ! fd_write $fd data
+
+TEST fd_close $fd;
+
+TEST rm -rf $MOUNTDIR/*
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $MOUNTDIR
+
+cleanup;
diff --git a/tests/bugs/quick-read/bz1523599/bz1523599.t b/tests/bugs/quick-read/bz1523599/bz1523599.t
new file mode 100755
index 00000000000..5027efe8e9a
--- /dev/null
+++ b/tests/bugs/quick-read/bz1523599/bz1523599.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../../fileio.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/test_bz1523599.c -lgfapi -o $(dirname $0)/test_bz1523599
+TEST ./$(dirname $0)/test_bz1523599 0 $H0 $V0 test_bz1523599 $logdir/bz1523599.log
+TEST ./$(dirname $0)/test_bz1523599 1 $H0 $V0 test_bz1523599 $logdir/bz1523599.log
+TEST ./$(dirname $0)/test_bz1523599 0 $H0 $V0 test_bz1523599 $logdir/bz1523599.log
+TEST ./$(dirname $0)/test_bz1523599 2 $H0 $V0 test_bz1523599 $logdir/bz1523599.log
+
+cleanup_tester $(dirname $0)/test_bz1523599
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
+
diff --git a/tests/bugs/quick-read/bz1523599/test_bz1523599.c b/tests/bugs/quick-read/bz1523599/test_bz1523599.c
new file mode 100644
index 00000000000..5076a9447f3
--- /dev/null
+++ b/tests/bugs/quick-read/bz1523599/test_bz1523599.c
@@ -0,0 +1,198 @@
+/*
+ * ./test_bz1523599 0 vm140-111 gv0 test211 log
+ * ./test_bz1523599 1 vm140-111 gv0 test211 log
+ * Open - Discard - Read - Then check read information to see if the initial
+ * TEST_STR_LEN/2 bytes read zero
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <sys/uio.h>
+
+#define TEST_STR_LEN 2048
+
+enum fallocate_flag {
+ TEST_WRITE,
+ TEST_DISCARD,
+ TEST_ZEROFILL,
+};
+
+void
+print_str(char *str, int len)
+{
+ int i, addr;
+
+ printf("%07x\t", 0);
+ for (i = 0; i < len; i++) {
+ printf("%02x", str[i]);
+ if (i) {
+ if ((i + 1) % 16 == 0)
+ printf("\n%07x\t", i + 1);
+ else if ((i + 1) % 4 == 0)
+ printf(" ");
+ }
+ }
+ printf("\n");
+}
+
+int
+test_read(char *str, int total_length, int len_zero)
+{
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < len_zero; i++) {
+ if (str[i]) {
+ fprintf(stderr, "char at position %d not zeroed out\n", i);
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ for (i = len_zero; i < total_length; i++) {
+ if (str[i] != 0x11) {
+ fprintf(stderr, "char at position %d does not contain pattern\n",
+ i);
+ ret = -EIO;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int opcode;
+ char *host_name, *volume_name, *file_path, *glfs_log_path;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ off_t offset = 0;
+ size_t len_zero = TEST_STR_LEN / 2;
+ char writestr[TEST_STR_LEN];
+ char readstr[TEST_STR_LEN];
+ struct iovec iov = {&readstr, TEST_STR_LEN};
+ int i;
+ int ret = 1;
+
+ for (i = 0; i < TEST_STR_LEN; i++)
+ writestr[i] = 0x11;
+ for (i = 0; i < TEST_STR_LEN; i++)
+ readstr[i] = 0x22;
+
+ if (argc != 6) {
+ fprintf(
+ stderr,
+ "Syntax: %s <test type> <host> <volname> <file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ opcode = atoi(argv[1]);
+ host_name = argv[2];
+ volume_name = argv[3];
+ file_path = argv[4];
+ glfs_log_path = argv[5];
+
+ fs = glfs_new(volume_name);
+ if (!fs) {
+ perror("glfs_new");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", host_name, 24007);
+ if (ret != 0) {
+ perror("glfs_set_volfile_server");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, glfs_log_path, 7);
+ if (ret != 0) {
+ perror("glfs_set_logging");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ perror("glfs_init");
+ goto out;
+ }
+
+ fd = glfs_creat(fs, file_path, O_RDWR, 0777);
+ if (fd == NULL) {
+ perror("glfs_creat");
+ ret = -1;
+ goto out;
+ }
+
+ switch (opcode) {
+ case TEST_WRITE:
+ fprintf(stderr, "Test Write\n");
+ ret = glfs_write(fd, writestr, TEST_STR_LEN, 0);
+ if (ret < 0) {
+ perror("glfs_write");
+ goto out;
+ } else if (ret != TEST_STR_LEN) {
+ fprintf(stderr, "insufficient data written %d \n", ret);
+ ret = -EIO;
+ goto out;
+ }
+ ret = 0;
+ goto out;
+ case TEST_DISCARD:
+ fprintf(stderr, "Test Discard\n");
+ ret = glfs_discard(fd, offset, len_zero);
+ if (ret < 0) {
+ if (errno == EOPNOTSUPP) {
+ fprintf(stderr, "Operation not supported\n");
+ ret = 0;
+ goto out;
+ }
+ perror("glfs_discard");
+ goto out;
+ }
+ goto test_read;
+ case TEST_ZEROFILL:
+ fprintf(stderr, "Test Zerofill\n");
+ ret = glfs_zerofill(fd, offset, len_zero);
+ if (ret < 0) {
+ if (errno == EOPNOTSUPP) {
+ fprintf(stderr, "Operation not supported\n");
+ ret = 0;
+ goto out;
+ }
+ perror("glfs_zerofill");
+ goto out;
+ }
+ goto test_read;
+ default:
+ ret = -1;
+ fprintf(stderr, "Incorrect test code %d\n", opcode);
+ goto out;
+ }
+
+test_read:
+ ret = glfs_readv(fd, &iov, 1, 0);
+ if (ret < 0) {
+ perror("glfs_readv");
+ goto out;
+ }
+
+ /* printf("Read str\n"); print_str(readstr, TEST_STR_LEN); printf("\n"); */
+ ret = test_read(readstr, TEST_STR_LEN, len_zero);
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+
+ if (ret)
+ return -1;
+
+ return 0;
+}
diff --git a/tests/bugs/quota/afr-quota-xattr-mdata-heal.t b/tests/bugs/quota/afr-quota-xattr-mdata-heal.t
new file mode 100644
index 00000000000..ebfa5545728
--- /dev/null
+++ b/tests/bugs/quota/afr-quota-xattr-mdata-heal.t
@@ -0,0 +1,140 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume quota $V0 limit-usage / 1MB
+TEST mkdir $M0/d
+TEST $CLI volume quota $V0 limit-usage /d 1MB
+TEST touch $M0/d/a
+echo abc > $M0/d/a
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "512Bytes" quotausage "/"
+
+#Set the acl xattrs directly on backend, for some reason on mount it gives error
+acl_access_val="0x0200000001000600ffffffff04000400ffffffff10000400ffffffff20000400ffffffff"
+acl_file_val="0x0000000400000001ffffffff0006000000000004ffffffff0004000000000010ffffffff0004000000000020ffffffff00040000"
+TEST setfattr -n system.posix_acl_access -v $acl_access_val $B0/${V0}0/d
+TEST setfattr -n trusted.SGI_ACL_FILE -v $acl_file_val $B0/${V0}0/d
+TEST setfattr -n system.posix_acl_access -v $acl_access_val $B0/${V0}1/d
+TEST setfattr -n trusted.SGI_ACL_FILE -v $acl_file_val $B0/${V0}1/d
+TEST setfattr -n trusted.foo -v "baz" $M0/d
+TEST setfattr -n trusted.foo -v "baz" $M0/d/a
+TEST setfattr -n trusted.foo1 -v "baz1" $M0/d
+TEST setfattr -n trusted.foo1 -v "baz1" $M0/d/a
+TEST setfattr -n trusted.foo3 -v "unchanged" $M0/d
+TEST setfattr -n trusted.foo3 -v "unchanged" $M0/d/a
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+#Induce metadata self-heal
+TEST setfattr -n trusted.foo -v "bar" $M0/d
+TEST setfattr -n trusted.foo -v "bar" $M0/d/a
+TEST setfattr -x trusted.foo1 $M0/d
+TEST setfattr -x trusted.foo1 $M0/d/a
+TEST setfattr -n trusted.foo2 -v "bar2" $M0/d
+TEST setfattr -n trusted.foo2 -v "bar2" $M0/d/a
+d_quota_contri=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri")
+d_quota_dirty=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty")
+d_quota_limit=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set")
+d_quota_size=$(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size")
+
+a_pgfid=$(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.")
+
+#Change internal xattrs in the backend, later check that they are not healed
+TEST setfattr -n trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri -v 0x0000000000000400 $B0/${V0}0/d
+TEST setfattr -n trusted.glusterfs.quota.dirty -v 0x0000000000000400 $B0/${V0}0/d
+TEST setfattr -n trusted.glusterfs.quota.limit-set -v 0x0000000000000400 $B0/${V0}0/d #This will be healed, this is external xattr
+TEST setfattr -n trusted.glusterfs.quota.size -v 0x0000000000000400 $B0/${V0}0/d
+TEST setfattr -n $(echo $a_pgfid | cut -f1 -d'=') -v "orphan" $B0/${V0}0/d/a
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+#Check external xattrs match
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo)
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo)
+TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d
+TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d/a
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo3)
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo3)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo2)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo2)
+EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}0/d | grep "trusted.glusterfs.quota.limit-set")
+
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo)
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo)
+TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d
+TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d/a
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo3)
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo3)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo2)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo2)
+EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep "trusted.glusterfs.quota.limit-set")
+
+#Test that internal xattrs on B0 are not healed
+EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri)
+EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.dirty)
+EXPECT "$d_quota_limit" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.limit-set) #This will be healed, this is external xattr
+EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.size)
+EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep system.posix_acl_access)
+EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.SGI_ACL_FILE)
+EXPECT "orphan" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep $(echo $a_pgfid | cut -f1 -d'='))
+
+#Test that xattrs didn't go bad in source
+EXPECT "$d_quota_contri" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri")
+EXPECT "$d_quota_dirty" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty")
+EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set")
+EXPECT "$d_quota_size" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size")
+EXPECT "$a_pgfid" echo $(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.")
+EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep system.posix_acl_access)
+EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep trusted.SGI_ACL_FILE)
+
+#Do a lookup and it shouldn't trigger metadata self-heal and heal xattrs
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo)
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo)
+TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d
+TEST ! getfattr -n trusted.foo1 $B0/${V0}0/d/a
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo3)
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo3)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d | grep trusted.foo2)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep trusted.foo2)
+EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}0/d | grep "trusted.glusterfs.quota.limit-set")
+
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo)
+EXPECT "bar" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo)
+TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d
+TEST ! getfattr -n trusted.foo1 $B0/${V0}1/d/a
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo3)
+EXPECT "unchanged" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo3)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d | grep trusted.foo2)
+EXPECT "bar2" echo $(getfattr -d -m. -e text $B0/${V0}1/d/a | grep trusted.foo2)
+EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep "trusted.glusterfs.quota.limit-set")
+
+#Test that internal xattrs on B0 are not healed
+EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri)
+EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.dirty)
+EXPECT "$d_quota_limit" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.limit-set) #This will be healed, this is external xattr
+EXPECT 0x0000000000000400 echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.glusterfs.quota.size)
+EXPECT "orphan" echo $(getfattr -d -m. -e text $B0/${V0}0/d/a | grep $(echo $a_pgfid | cut -f1 -d'='))
+
+#Test that xattrs didn't go bad in source
+EXPECT "$d_quota_contri" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.*.contri")
+EXPECT "$d_quota_dirty" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.dirty")
+EXPECT "$d_quota_limit" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.limit-set")
+EXPECT "$d_quota_size" echo $(getfattr -d -m . -e hex $B0/${V0}1/d | grep -E "trusted.glusterfs.quota.size")
+EXPECT "$a_pgfid" echo $(getfattr -d -m . -e hex $B0/${V0}1/d/a | grep -E "trusted.pgfid.")
+
+EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep system.posix_acl_access)
+EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}0/d | grep trusted.SGI_ACL_FILE)
+EXPECT "$acl_access_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep system.posix_acl_access)
+EXPECT "$acl_file_val" echo $(getfattr -d -m. -e hex $B0/${V0}1/d | grep trusted.SGI_ACL_FILE)
+cleanup
diff --git a/tests/bugs/quota/bug-1035576.t b/tests/bugs/quota/bug-1035576.t
new file mode 100644
index 00000000000..cbc1b69ebb3
--- /dev/null
+++ b/tests/bugs/quota/bug-1035576.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This script tests that self-heal of limit-set xattr is happening on a directory
+#but self-heal of quota.size xattr is not happening
+
+cleanup;
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+#Lets disable perf-xls so that lookup would reach afr
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume quota $V0 enable
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+TEST mkdir $M0/a
+TEST $CLI volume quota $V0 limit-usage /a 1GB
+echo abc > $M0/a/f
+$CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+quota_limit_val1=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}1/a)
+quota_size_val1=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}1/a)
+
+#Trigger entry,metadata self-heal
+TEST ls $M0/a
+
+quota_limit_val0=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}0/a)
+quota_size_val0=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}0/a)
+
+#Test that limit-set xattr is healed
+TEST [ $quota_limit_val0 == $quota_limit_val1 ]
+
+#Only entry, metadata self-heal is done quota size value should not be same
+TEST [ $quota_size_val0 != $quota_size_val1 ]
+TEST cat $M0/a/f
+
+#Now that data self-heal is done quota size value should be same
+quota_size_val0=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}0/a)
+TEST [ $quota_size_val0 == $quota_size_val1 ]
+cleanup
diff --git a/tests/bugs/quota/bug-1038598.t b/tests/bugs/quota/bug-1038598.t
new file mode 100644
index 00000000000..108e14cb8d8
--- /dev/null
+++ b/tests/bugs/quota/bug-1038598.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '2' brick_count $V0
+
+TEST $CLI volume start $V0;
+
+TEST $CLI volume quota $V0 enable
+sleep 5
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir -p $M0/test_dir
+TEST $CLI volume quota $V0 limit-usage /test_dir 10MB 50
+
+EXPECT "10.0MB" quota_hard_limit "/test_dir";
+EXPECT "50%" quota_soft_limit "/test_dir";
+
+TEST $QDD $M0/test_dir/file1.txt 256 16
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "4.0MB" quotausage "/test_dir";
+EXPECT 'No' quota_sl_exceeded "/test_dir";
+EXPECT 'No' quota_hl_exceeded "/test_dir";
+
+TEST $QDD $M0/test_dir/file1.txt 256 24
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "6.0MB" quotausage "/test_dir";
+EXPECT 'Yes' quota_sl_exceeded "/test_dir";
+EXPECT 'No' quota_hl_exceeded "/test_dir";
+
+#set timeout to 0 so that quota gets enforced without any lag
+TEST $CLI volume set $V0 features.hard-timeout 0
+TEST $CLI volume set $V0 features.soft-timeout 0
+
+TEST ! $QDD $M0/test_dir/file1.txt 256 60
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT 'Yes' quota_sl_exceeded "/test_dir";
+EXPECT 'Yes' quota_hl_exceeded "/test_dir";
+
+rm -f $QDD
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1087198.t b/tests/bugs/quota/bug-1087198.t
new file mode 100644
index 00000000000..618a46b957d
--- /dev/null
+++ b/tests/bugs/quota/bug-1087198.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+## The script tests the logging of the quota in the bricks after reaching soft
+## limit of the configured limit.
+##
+## Steps:
+## 1. Create and mount the volume
+## 2. Enable quota and set the limit on 2 directories
+## 3. Write some data to cross the limit
+## 4. Grep the string expected in brick logs
+## 5. Wait for 10 seconds (alert timeout is set to 10s)
+## 6. Repeat 3 and 4.
+## 7. Cleanup
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+#1
+## Step 1
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick{1..4};
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 noac,nolock
+
+QUOTA_LIMIT_DIR="quota_limit_dir"
+BRICK_LOG_DIR="`gluster --print-logdir`/bricks"
+
+#9
+TEST mkdir $N0/$QUOTA_LIMIT_DIR
+
+#10
+## Step 2
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume quota $V0 alert-time 10
+TEST $CLI volume quota $V0 hard-timeout 0
+TEST $CLI volume quota $V0 soft-timeout 0
+
+# Set limit to 200KB (204800B)
+TEST $CLI volume quota $V0 limit-usage / 204800B
+TEST $CLI volume quota $V0 limit-usage /$QUOTA_LIMIT_DIR 100KB
+
+#16
+## Step 3 and 4
+TEST $QDD $N0/$QUOTA_LIMIT_DIR/95KB_file 1 95
+#Uncomment below TEST once the bug# 1202292 is fixed
+#TEST grep -e "\"Usage crossed soft limit:.*used by /$QUOTA_LIMIT_DIR\"" -- $BRICK_LOG_DIR/*
+
+TEST $QDD $N0/100KB_file 1 100
+#Uncomment below TEST once the bug# 1202292 is fixed
+#TEST grep -e "\"Usage crossed soft limit:.*used by /\"" -- $BRICK_LOG_DIR/*
+
+#20
+## Step 5
+TEST sleep 10
+
+## Step 6
+TEST $QDD $N0/$QUOTA_LIMIT_DIR/1KB_file 1 1
+TEST grep -e "\"Usage is above soft limit:.*used by /$QUOTA_LIMIT_DIR\"" -- $BRICK_LOG_DIR/*
+
+#23
+TEST $QDD $N0/1KB_file 1 1
+TEST grep -e "\"Usage is above soft limit:.*used by /\"" -- $BRICK_LOG_DIR/*
+
+#25
+## Step 7
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST $CLI volume stop $V0
+
+rm -f $QDD
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1104692.t b/tests/bugs/quota/bug-1104692.t
new file mode 100755
index 00000000000..9640996135f
--- /dev/null
+++ b/tests/bugs/quota/bug-1104692.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+TEST mkdir -p $M0/limit_one/limit_two/limit_three $M0/limit_four \
+ $M0/limit_one/limit_five
+
+TEST $CLI volume set $V0 server.root-squash on
+TEST $CLI volume quota $V0 enable
+
+TEST $CLI volume quota $V0 limit-usage / 1GB
+TEST $CLI volume quota $V0 limit-usage /limit_one 1GB
+TEST $CLI volume quota $V0 limit-usage /limit_one/limit_two 1GB
+TEST $CLI volume quota $V0 limit-usage /limit_one/limit_two/limit_three 1GB
+TEST $CLI volume quota $V0 limit-usage /limit_four 1GB
+TEST $CLI volume quota $V0 limit-usage /limit_one/limit_five 1GB
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1153964.t b/tests/bugs/quota/bug-1153964.t
new file mode 100644
index 00000000000..2e449d3ba00
--- /dev/null
+++ b/tests/bugs/quota/bug-1153964.t
@@ -0,0 +1,81 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+function rename_loop()
+{
+ local i=0
+ local limit=$1
+ while [ $i -lt $limit ]
+ do
+ j=$[$i + 1]
+ mv $N0/test_dir/file$i $N0/test_dir/file$j
+ if [ "$?" != "0" ]
+ then
+ return 1
+ fi
+ i=$[$i + 1]
+ done
+ return 0
+}
+
+function createFile_and_checkLimit()
+{
+ local count_val=$1;
+ dd if=/dev/zero of="$N0/test_dir/file0" bs=1048576 count=$count_val
+ sleep 3
+ if [ -f $N0/test_dir/file0 ]
+ then
+ rename_loop 10
+ if [ "$?" == "0" ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+TEST mkdir -p $N0/test_dir/
+
+# Try to rename file under various case and check if
+# quota limit exceeds or not.
+TEST $CLI volume quota $V0 limit-usage /test_dir 100MB
+# Case1 : If used size is less than hard-limit size
+# Create a 600MB file
+EXPECT 'Y' createFile_and_checkLimit 60
+
+TEST rm -rf $N0/test_dir/*
+# Case2 : If used size is equal to hard-limit size
+# Create a 100MB file
+EXPECT 'Y' createFile_and_checkLimit 100
+
+TEST rm -rf $N0/test_dir/*
+# Case3 : If used size is greater than hard-limit size
+# Create a 110MB file
+EXPECT 'Y' createFile_and_checkLimit 110
+
+# remove this directory as it has been created as part
+# of above testcase
+TEST rm -rf $N0/test_dir/
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1178130.t b/tests/bugs/quota/bug-1178130.t
new file mode 100644
index 00000000000..ccd6b792cf8
--- /dev/null
+++ b/tests/bugs/quota/bug-1178130.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+# This regression test tries to ensure renaming a directory with content, and
+# no limit set, is accounted properly, when moved into a directory with quota
+# limit set.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+
+TEST $CLI volume quota $V0 enable;
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST $CLI volume quota $V0 limit-usage / 500MB
+TEST $CLI volume quota $V0 hard-timeout 0
+TEST $CLI volume quota $V0 soft-timeout 0
+
+TEST $QDD $M0/file 256 40
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "10.0MB" quotausage "/"
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/file $M0/file2
+TEST $CLI volume start $V0 force;
+
+#wait for self heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "0" STAT "$B0/${V0}2/file2"
+
+#usage should remain same after rename and self-heal operation
+EXPECT "10.0MB" quotausage "/"
+
+rm -f $QDD
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1235182.t b/tests/bugs/quota/bug-1235182.t
new file mode 100644
index 00000000000..6091146cb97
--- /dev/null
+++ b/tests/bugs/quota/bug-1235182.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# This regression test tries to ensure renaming a directory with content, and
+# no limit set, is accounted properly, when moved into a directory with quota
+# limit set.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0};
+TEST $CLI volume start $V0;
+
+TEST $CLI volume quota $V0 enable;
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST $CLI volume quota $V0 limit-usage / 1GB
+TEST $CLI volume quota $V0 hard-timeout 0
+TEST $CLI volume quota $V0 soft-timeout 0
+
+TEST mkdir $M0/1
+$QDD $M0/1/f1 256 400&
+PID=$!
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/1/f1
+TESTS_EXPECTED_IN_LOOP=150
+for i in {1..50}; do
+ ii=`expr $i + 1`;
+ touch $M0/$i/f$ii
+ echo Hello > $M0/$i/f$ii
+
+ #rename within same dir
+ TEST_IN_LOOP mv -f $M0/$i/f$i $M0/$i/f$ii;
+
+ #rename to different dir
+ TEST_IN_LOOP mkdir $M0/$ii
+ TEST_IN_LOOP mv -f $M0/$i/f$ii $M0/$ii/f$ii;
+ stat $M0/$ii/f$ii >/dev/null
+done
+
+echo "Wait for process with pid $PID to complete"
+wait $PID
+echo "Process with pid $PID finished"
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/51/f51
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "100.0MB" quotausage "/"
+
+rm -f $QDD
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/quota/bug-1243798.t b/tests/bugs/quota/bug-1243798.t
new file mode 100644
index 00000000000..fa6abeb08fb
--- /dev/null
+++ b/tests/bugs/quota/bug-1243798.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 noac,nolock
+
+TEST mkdir -p $N0/dir1/dir2
+TEST touch $N0/dir1/dir2/file
+
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume quota $V0 hard-timeout 0
+TEST $CLI volume quota $V0 soft-timeout 0
+TEST $CLI volume quota $V0 limit-objects /dir1 10
+
+TEST stat $N0/dir1/dir2/file
+
+sleep 2
+
+#Remove size and contri xattr from /dir1
+#Remove contri xattr from /dir1/dir2
+setfattr -x trusted.glusterfs.quota.size.1 $B0/$V0/dir1
+setfattr -x trusted.glusterfs.quota.00000000-0000-0000-0000-000000000001.contri.1 $B0/$V0/dir1
+contri=$(getfattr -d -m . -e hex $B0/$V0/dir1/dir2 | grep contri | awk -F= '{print $1}')
+setfattr -x $contri $B0/$V0/dir1/dir2
+
+#Initiate healing by writing to a file
+echo Hello > $N0/dir1/dir2/file
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "2" quota_object_list_field "/dir1" 5
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1250582-volume-reset-should-not-remove-quota-quota-deem-statfs.t b/tests/bugs/quota/bug-1250582-volume-reset-should-not-remove-quota-quota-deem-statfs.t
new file mode 100644
index 00000000000..3b55e739bf9
--- /dev/null
+++ b/tests/bugs/quota/bug-1250582-volume-reset-should-not-remove-quota-quota-deem-statfs.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+# This test ensures that 'gluster volume reset' command do not remove
+# features.quota-deem-statfs, features.quota.
+# Also, tests that 'gluster volume set features.quota-deem-statfs' can be
+# turned on/off when quota is enabled.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${v0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume quota $V0 enable
+EXPECT 'on' volinfo_field $V0 'features.quota'
+EXPECT 'on' volinfo_field $V0 'features.inode-quota'
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST $CLI volume reset $V0
+EXPECT 'on' volinfo_field $V0 'features.quota'
+EXPECT 'on' volinfo_field $V0 'features.inode-quota'
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST $CLI volume reset $V0 force
+EXPECT 'on' volinfo_field $V0 'features.quota'
+EXPECT 'on' volinfo_field $V0 'features.inode-quota'
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST $CLI volume reset $V0 features.quota-deem-statfs
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST $CLI volume set $V0 features.quota-deem-statfs off
+EXPECT 'off' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST $CLI volume set $V0 features.quota-deem-statfs on
+EXPECT 'on' volinfo_field $V0 'features.quota-deem-statfs'
+
+TEST $CLI volume quota $V0 disable
+EXPECT 'off' volinfo_field $V0 'features.quota'
+EXPECT 'off' volinfo_field $V0 'features.inode-quota'
+EXPECT '' volinfo_field $V0 'features.quota-deem-statfs'
+
+cleanup;
+
diff --git a/tests/bugs/quota/bug-1260545.t b/tests/bugs/quota/bug-1260545.t
new file mode 100644
index 00000000000..46808022f01
--- /dev/null
+++ b/tests/bugs/quota/bug-1260545.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
+TEST glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2;
+TEST $CLI volume start $V0;
+
+TEST $CLI volume quota $V0 enable;
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST $CLI volume quota $V0 limit-usage / 11MB
+TEST $CLI volume quota $V0 hard-timeout 0
+TEST $CLI volume quota $V0 soft-timeout 0
+
+TEST $QDD $M0/f1 256 40
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "10.0MB" quotausage "/"
+
+if [ -f "$B0/${V0}1/f1" ]; then
+ HASHED="$B0/${V0}1"
+ OTHER="$B0/${V0}2"
+else
+ HASHED="$B0/${V0}2"
+ OTHER="$B0/${V0}1"
+fi
+
+TEST $CLI volume remove-brick $V0 $H0:${HASHED} start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:${HASHED}";
+
+#check consistency in mount point and also check that file is migrated to OTHER
+TEST [ -f "$OTHER/f1" ];
+TEST [ -f "$M0/f1" ];
+
+#check that remove-brick status should not have any failed or skipped files
+var=`$CLI volume remove-brick $V0 $H0:${HASHED} status | grep completed`
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "10.0MB" quotausage "/"
+
+rm -f $QDD
+cleanup;
diff --git a/tests/bugs/quota/bug-1287996.t b/tests/bugs/quota/bug-1287996.t
new file mode 100644
index 00000000000..2f46ee1ca2d
--- /dev/null
+++ b/tests/bugs/quota/bug-1287996.t
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume quota $V0 enable
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+cleanup;
diff --git a/tests/bugs/quota/bug-1292020.t b/tests/bugs/quota/bug-1292020.t
new file mode 100644
index 00000000000..b70047ae3f9
--- /dev/null
+++ b/tests/bugs/quota/bug-1292020.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function write_sample_data () {
+ dd if=/dev/zero of=$M0/f1 bs=256k count=400 2>&1 |
+ egrep -i 'exceeded|no space' && echo 'passed'
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0;
+TEST $CLI volume quota $V0 enable;
+TEST $CLI volume quota $V0 limit-usage / 1
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+
+EXPECT_WITHIN 30 "passed" write_sample_data
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+cleanup;
diff --git a/tests/bugs/quota/bug-1293601.t b/tests/bugs/quota/bug-1293601.t
new file mode 100644
index 00000000000..741758b73f5
--- /dev/null
+++ b/tests/bugs/quota/bug-1293601.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" online_brick_count
+TEST $CLI volume quota $V0 enable
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+for i in {1..512}; do
+ dd if=/dev/zero of=$M0/f$i bs=1k count=1
+done
+
+mkdir $M0/dir1
+for i in {513..1024}; do
+ dd if=/dev/zero of=$M0/dir1/f$i bs=1k count=1
+done
+
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "1.0MB" quotausage "/"
+
+TEST $CLI volume quota $V0 disable
+TEST $CLI volume quota $V0 enable
+
+EXPECT_WITHIN 60 "1.0MB" quotausage "/"
+
+cleanup;
diff --git a/tests/bugs/read-only/bug-1134822-read-only-default-in-graph.t b/tests/bugs/read-only/bug-1134822-read-only-default-in-graph.t
new file mode 100644
index 00000000000..cc27d04656f
--- /dev/null
+++ b/tests/bugs/read-only/bug-1134822-read-only-default-in-graph.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+#Test case: This test checks when a volume is made read-only though volume set
+# and bricks are not restarted no write operations can be performed on
+# this volume
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}0;
+TEST $CLI volume start $V0
+
+# Mount FUSE and create file/directory, create should succeed as the read-only
+# is off by default
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/zerobytefile1.txt
+TEST mkdir $M0/test_dir1
+TEST dd if=/dev/zero of=$M0/file1 bs=1024 count=1024
+
+# turn on read-only option through volume set
+TEST gluster volume set $V0 read-only on
+
+# worm feature can't be enabled if read-only is enabled
+TEST ! gluster volume set $V0 worm on
+
+# turn off read-only option through volume set
+TEST gluster volume set $V0 read-only off
+
+# turn on worm option through volume set
+TEST gluster volume set $V0 worm on
+
+# read-only feature can't be enabled if worm is enabled
+TEST ! gluster volume set $V0 read-only on
+
+
+TEST gluster volume set $V0 worm off
+TEST gluster volume set $V0 read-only on
+
+# Check whether read-operations can be performed or not
+TEST cat $M0/file1
+
+# All write operations should fail now
+TEST ! touch $M0/zerobytefile2.txt
+TEST ! mkdir $M0/test_dir2
+TEST ! dd if=/dev/zero of=$M0/file2 bs=1024 count=1024
+
+# turn off read-only option through volume set
+TEST gluster volume set $V0 read-only off
+
+# All write operations should succeed now
+TEST touch $M0/zerobytefile2.txt
+TEST mkdir $M0/test_dir2
+TEST dd if=/dev/zero of=$M0/file2 bs=1024 count=1024
+
+# Turn on worm
+TEST gluster volume set $V0 worm on
+
+# unlink should fail now
+TEST ! rm -rf $M0/zerobytefile2.txt
+
+cleanup;
diff --git a/tests/bugs/readdir-ahead/bug-1390050.c b/tests/bugs/readdir-ahead/bug-1390050.c
new file mode 100644
index 00000000000..9578df2dd90
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1390050.c
@@ -0,0 +1,72 @@
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <string.h>
+#include <errno.h>
+
+int
+main(int argc, char *argv[])
+{
+ const char *glfs_dir = NULL, *filepath = NULL;
+ DIR *dirfd = NULL;
+ int filefd = 0, ret = 0;
+ struct stat stbuf = {
+ 0,
+ };
+ size_t size_before_write = 0;
+
+ glfs_dir = argv[1];
+ filepath = argv[2];
+ dirfd = opendir(glfs_dir);
+ if (dirfd == NULL) {
+ fprintf(stderr, "opening directory failed (%s)\n", strerror(errno));
+ goto err;
+ }
+
+ filefd = open(filepath, O_RDWR);
+ if (filefd < 0) {
+ fprintf(stderr, "open failed on path %s (%s)\n", filepath,
+ strerror(errno));
+ goto err;
+ }
+
+ ret = stat(filepath, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "stat failed on path %s (%s)\n", filepath,
+ strerror(errno));
+ goto err;
+ }
+
+ size_before_write = stbuf.st_size;
+
+ ret = write(filefd, "testdata", strlen("testdata123") + 1);
+ if (ret <= 0) {
+ fprintf(stderr, "write failed (%s)\n", strerror(errno));
+ goto err;
+ }
+
+ while (readdir(dirfd)) {
+ /* do nothing */
+ }
+
+ ret = stat(filepath, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "stat failed on path %s (%s)\n", strerror(errno));
+ goto err;
+ }
+
+ if (stbuf.st_size == size_before_write) {
+ fprintf(stderr,
+ "file size (%lu) has not changed even after "
+ "its written to\n",
+ stbuf.st_size);
+ goto err;
+ }
+
+ return 0;
+err:
+ return -1;
+}
diff --git a/tests/bugs/readdir-ahead/bug-1390050.t b/tests/bugs/readdir-ahead/bug-1390050.t
new file mode 100644
index 00000000000..ab1d7d4ead9
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1390050.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B{0..1}/$V0
+TEST $CLI volume set $V0 readdir-ahead on
+
+DIRECTORY="$M0/subdir1/subdir2"
+
+#Make sure md-cache has large timeout to hold stat from readdirp_cbk in its cache
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+rm -rf $M0/*
+TEST mkdir -p $DIRECTORY
+rm -rf $DIRECTORY/*
+TEST touch $DIRECTORY/file{0..10}
+rdd_tester=$(dirname $0)/rdd-tester
+TEST build_tester $(dirname $0)/bug-1390050.c -o $rdd_tester
+TEST $rdd_tester $DIRECTORY $DIRECTORY/file4
+rm -f $rdd_tester
+cleanup;
+
diff --git a/tests/bugs/readdir-ahead/bug-1436090.t b/tests/bugs/readdir-ahead/bug-1436090.t
new file mode 100755
index 00000000000..e0877f15684
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1436090.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+TEST glusterfs -s $H1 --volfile-id $V0 $M0;
+TEST mkdir $M0/dir1
+
+# Create a large file (4 GB), so that rebalance takes time
+# Since we really don't care about the contents of the file, we use fallocate
+# to generate the file much faster. We could also use truncate, which is even
+# faster, but rebalance could take advantage of an sparse file and migrate it
+# in an optimized way, but we don't want a fast migration.
+TEST fallocate -l 4G $M0/dir1/foo
+
+TEST mv $M0/dir1/foo $M0/dir1/bar
+
+TEST $CLI_1 volume rebalance $V0 start force
+TEST ! $CLI_1 volume set $V0 parallel-readdir on
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 2 $V0
+TEST $CLI_1 volume set $V0 parallel-readdir on
+TEST mv $M0/dir1/bar $M0/dir1/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs -s $H1 --volfile-id $V0 $M0;
+TEST $CLI_1 volume rebalance $V0 start force
+TEST ln $M0/dir1/foo $M0/dir1/bar
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 2 $V0
+cleanup;
diff --git a/tests/bugs/readdir-ahead/bug-1439640.t b/tests/bugs/readdir-ahead/bug-1439640.t
new file mode 100755
index 00000000000..dcd54076444
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1439640.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B{0..1}/$V0
+TEST $CLI volume set $V0 readdir-ahead on
+TEST $CLI volume start $V0
+
+TEST ! $CLI volume set $V0 parallel-readdir sdf
+
+TEST $CLI volume set $V0 parallel-readdir off
+TEST $CLI volume set $V0 parallel-readdir on
+
+TEST ! $CLI volume set $V0 rda-cache-limit 0
+TEST ! $CLI volume set $V0 rda-cache-limit -634
+TEST ! $CLI volume set $V0 rda-cache-limit 87adh
+TEST ! $CLI volume set $V0 parallel-readdir sdf
+
+TEST ! $CLI volume set $V0 rda-request-size 0
+TEST ! $CLI volume set $V0 rda-request-size -634
+TEST ! $CLI volume set $V0 rda-request-size 87adh
+
+TEST $CLI volume set $V0 rda-cache-limit 10MB
+TEST $CLI volume set $V0 rda-request-size 128KB
+
+cleanup;
diff --git a/tests/bugs/readdir-ahead/bug-1446516.t b/tests/bugs/readdir-ahead/bug-1446516.t
new file mode 100755
index 00000000000..2bf6520d861
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1446516.t
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1..4}
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 parallel-readdir on
+
+TEST $CLI volume set $V0 rda-cache-limit 4GB
+
+TEST $CLI volume set $V0 parallel-readdir off
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+cleanup;
diff --git a/tests/bugs/readdir-ahead/bug-1512437.t b/tests/bugs/readdir-ahead/bug-1512437.t
new file mode 100755
index 00000000000..50eaa7d6696
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1512437.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 parallel-readdir on
+TEST $CLI volume set $V0 readdir-optimize on
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+TEST mkdir -p $M0/subdir1/subdir2;
+umount $M0
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+count=`ls -1 $M0/subdir1 | wc -l`
+TEST [ $count -eq 1 ]
+
+cleanup;
diff --git a/tests/bugs/readdir-ahead/bug-1670253-consistent-metadata.t b/tests/bugs/readdir-ahead/bug-1670253-consistent-metadata.t
new file mode 100644
index 00000000000..6adfc17c92c
--- /dev/null
+++ b/tests/bugs/readdir-ahead/bug-1670253-consistent-metadata.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 readdir-ahead on #on by default as of writing this .t.
+TEST $CLI volume set $V0 consistent-metadata on
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+TEST touch $M0/FILE
+echo "abc" >> $M0/FILE
+EXPECT "^0$" echo $?
+EXPECT "abc" cat $M0/FILE
+echo "truncate" >$M0/FILE
+EXPECT "^0$" echo $?
+EXPECT "truncate" cat $M0/FILE
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/886998/strict-readdir.t b/tests/bugs/replicate/886998/strict-readdir.t
new file mode 100644
index 00000000000..63fe313b201
--- /dev/null
+++ b/tests/bugs/replicate/886998/strict-readdir.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../../volume.rc
+
+function num_files_in_dir {
+ d=$1
+ ls $d | sort | uniq | wc -l
+}
+
+#Basic sanity tests for readdir functionality
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2d2_0 $H0:$B0/r2d2_1 $H0:$B0/r2d2_2 $H0:$B0/r2d2_3
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+
+TEST touch $M0/{1..100}
+EXPECT "100" num_files_in_dir $M0
+
+TEST kill_brick $V0 $H0 $B0/r2d2_0
+TEST kill_brick $V0 $H0 $B0/r2d2_2
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/r2d2_1
+TEST kill_brick $V0 $H0 $B0/r2d2_3
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+TEST $CLI volume set $V0 cluster.strict-readdir on
+EXPECT "on" volinfo_field $V0 cluster.strict-readdir
+TEST kill_brick $V0 $H0 $B0/r2d2_0
+TEST kill_brick $V0 $H0 $B0/r2d2_2
+EXPECT "100" num_files_in_dir $M0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/r2d2_1
+TEST kill_brick $V0 $H0 $B0/r2d2_3
+EXPECT "100" num_files_in_dir $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1015990-rep.t b/tests/bugs/replicate/bug-1015990-rep.t
new file mode 100755
index 00000000000..ab8166e372a
--- /dev/null
+++ b/tests/bugs/replicate/bug-1015990-rep.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST kill_brick $V0 $H0 $B0/$V0"3"
+
+for i in {1..100}; do echo "STRING" > $M0/File$i; done
+
+# Check shd is connected to all up bricks before running statistics command.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+
+command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1")
+substring="Number of entries:"
+count=0
+while read -r line;
+do
+ if [[ "$line" == *$substring* ]]
+ then
+ value=$(echo $line | cut -f 2 -d :)
+ count=$(($count + $value))
+ fi
+
+done <<< "$command_output"
+
+brick_2_entries_count=$count
+xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
+EXPECT $brick_2_entries_count echo $xattrop_count_brick_2
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1015990.t b/tests/bugs/replicate/bug-1015990.t
new file mode 100755
index 00000000000..a8b12f2c202
--- /dev/null
+++ b/tests/bugs/replicate/bug-1015990.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+sleep 5
+TEST kill_brick $V0 $H0 $B0/$V0"3"
+sleep 5
+
+for i in {1..100}; do echo "STRING" > $M0/File$i; done
+
+brick_2_sh_entries=$(count_sh_entries $B0/$V0"2")
+brick_4_sh_entries=$(count_sh_entries $B0/$V0"4")
+
+
+command_output=$(gluster volume heal $V0 statistics heal-count)
+
+
+substring="Number of entries:"
+count=0
+while read -r line;
+do
+ if [[ "$line" == *$substring* ]]
+ then
+ value=$(echo $line | cut -f 2 -d :)
+ count=$(($count + $value))
+ fi
+
+done <<< "$command_output"
+
+brick_2_entries_count=$(($count-$value))
+brick_4_entries_count=$value
+
+xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
+xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4")
+EXPECT $brick_2_entries_count echo $xattrop_count_brick_2
+EXPECT $brick_4_entries_count echo $xattrop_count_brick_4
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0
+
+cleanup;
+
diff --git a/tests/bugs/replicate/bug-1032927.t b/tests/bugs/replicate/bug-1032927.t
new file mode 100644
index 00000000000..eb663d03fed
--- /dev/null
+++ b/tests/bugs/replicate/bug-1032927.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This tests if pathinfo getxattr fails when one of the bricks is down
+#Lets hope it doesn't
+
+cleanup;
+function get_pathinfo_in_loop {
+ failed=0
+ for i in {1..1000}
+ do
+ getfattr -n trusted.glusterfs.pathinfo $M0 2>/dev/null
+ if [ $? -ne 0 ]; then failed=1;break; fi
+ done
+ return $failed
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#when one of the bricks is down getfattr of pathinfo should not fail
+#Lets just do the test for 1000 times to see if we hit the race
+TEST get_pathinfo_in_loop
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1037501.t b/tests/bugs/replicate/bug-1037501.t
new file mode 100755
index 00000000000..ce079555b50
--- /dev/null
+++ b/tests/bugs/replicate/bug-1037501.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function write_file()
+{
+ path="$1"; shift
+ echo "$*" > "$path"
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+TEST `echo "TEST-FILE" > $M0/File`
+TEST `mkdir $M0/Dir`
+TEST `ln $M0/File $M0/Link`
+TEST `mknod $M0/FIFO p`
+
+TEST $CLI volume add-brick $V0 replica 4 $H0:$B0/$V0-3 force
+TEST $CLI volume add-brick $V0 replica 5 $H0:$B0/$V0-4 force
+TEST $CLI volume add-brick $V0 replica 6 $H0:$B0/$V0-5 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+TEST gluster volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/File
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/File
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Link
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Link
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/Dir
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/Dir
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-0/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-1/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-2/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-3/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-4/FIFO
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" path_exists $B0/$V0-5/FIFO
+
+EXPECT 10 stat -c '%s' $B0/$V0-0/File
+EXPECT 10 stat -c '%s' $B0/$V0-1/File
+EXPECT 10 stat -c '%s' $B0/$V0-2/File
+EXPECT 10 stat -c '%s' $B0/$V0-3/File
+EXPECT 10 stat -c '%s' $B0/$V0-4/File
+EXPECT 10 stat -c '%s' $B0/$V0-5/File
+
+EXPECT 3 stat -c '%h' $B0/$V0-0/Link
+EXPECT 3 stat -c '%h' $B0/$V0-1/Link
+EXPECT 3 stat -c '%h' $B0/$V0-2/Link
+EXPECT 3 stat -c '%h' $B0/$V0-3/Link
+EXPECT 3 stat -c '%h' $B0/$V0-4/Link
+EXPECT 3 stat -c '%h' $B0/$V0-5/Link
+
+EXPECT 'directory' stat -c '%F' $B0/$V0-0/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-1/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-2/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-3/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-4/Dir
+EXPECT 'directory' stat -c '%F' $B0/$V0-5/Dir
+
+EXPECT 'fifo' stat -c '%F' $B0/$V0-0/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-1/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-2/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-3/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-4/FIFO
+EXPECT 'fifo' stat -c '%F' $B0/$V0-5/FIFO
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1046624.t b/tests/bugs/replicate/bug-1046624.t
new file mode 100755
index 00000000000..e2762ea6764
--- /dev/null
+++ b/tests/bugs/replicate/bug-1046624.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume set $V0 background-self-heal-count 0
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST ${GFS} --volfile-server=$H0 --volfile-id=$V0 --use-readdirp=no $M0
+
+TEST `echo "TEST-FILE" > $M0/File`
+TEST `mkdir $M0/Dir`
+TEST kill_brick $V0 $H0 $B0/${V0}-0
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
+
+TEST `ln -s $M0/File $M0/Link1`
+TEST `ln -s $M0/Dir $M0/Link2`
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST `find $M0/ 2>/dev/null 1>/dev/null`
+TEST `find $M0/ | xargs stat 2>/dev/null 1>/dev/null`
+
+TEST stat $B0/${V0}-0/Link1
+TEST stat $B0/${V0}-0/Link2
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1058797.t b/tests/bugs/replicate/bug-1058797.t
new file mode 100644
index 00000000000..598062a0dab
--- /dev/null
+++ b/tests/bugs/replicate/bug-1058797.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+#Test that the setuid bit is healed correctly.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+#Basic checks
+TEST glusterd
+
+#Create a 1x2 replica volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1};
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+# FUSE mount;create a file
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+
+#Kill brick1 and set S_ISUID and S_ISGID bits from mount point
+kill_brick $V0 $H0 $B0/brick1
+TEST chmod +x,+s $M0/file
+
+#Get file permissions from backend brick0 and verify that S_ISUID is indeed set
+file_permissions1=`ls -l $B0/brick0/file | awk '{print $1}'| cut -d. -f1 | cut -d- -f2,3,4,5,6`
+setuid_bit1=`echo $file_permissions1 | cut -b3`
+EXPECT "s" echo $setuid_bit1
+
+#Restart volume and do lookup from mount to trigger heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST dd if=$M0/file of=/dev/null
+
+#Get file permissions from healed brick1 and verify that S_ISUID is indeed set
+file_permissions2=`ls -l $B0/brick1/file | awk '{print $1}' | cut -d. -f1 | cut -d- -f2,3,4,5,6`
+setuid_bit2=`echo $file_permissions2 | cut -b3`
+EXPECT "s" echo $setuid_bit2
+
+#Also compare the entire permission string,just to be sure
+EXPECT $file_permissions1 echo $file_permissions2
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t
new file mode 100644
index 00000000000..708bc1a1e29
--- /dev/null
+++ b/tests/bugs/replicate/bug-1101647.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V0;
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+#Create base entry in indices/xattrop
+echo "Data">$M0/file
+
+TEST $CLI volume heal $V0
+#Entries from indices/xattrop should not be cleared after a heal.
+EXPECT 1 count_index_entries $B0/$V0"1"
+EXPECT 1 count_index_entries $B0/$V0"2"
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "More data">>$M0/file
+
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count|grep "Number of entries:"|head -n1|awk '{print $4}'`
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t
new file mode 100644
index 00000000000..c7509f33cc2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1130892.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Create a 1X2 replica
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Disable self-heal daemon
+TEST gluster volume set $V0 self-heal-daemon off
+
+# Enable Client side heal
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+# Disable all perf-xlators
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+# Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# FUSE Mount
+TEST ${GFS} -s $H0 --volfile-id $V0 $M0
+
+# Create files and dirs
+TEST mkdir -p $M0/one/two/
+TEST `echo "Carpe diem" > $M0/one/two/three`
+
+# Simulate disk-replacement
+TEST kill_brick $V0 $H0 $B0/${V0}-1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
+TEST rm -rf $B0/${V0}-1/one
+TEST rm -rf $B0/${V0}-1/.glusterfs
+
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-1/.glusterfs && chmod 600 $B0/${V0}-1/.glusterfs
+
+# Start force
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/one
+
+sleep 1
+
+# Check pending xattrs
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
+
+TEST gluster volume set $V0 self-heal-daemon on
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two/three
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1132102.t b/tests/bugs/replicate/bug-1132102.t
new file mode 100644
index 00000000000..c7dbbf818aa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1132102.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+#This tests that mknod and create fops mark necessary pending changelog
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+cd $M0
+TEST mkfifo fifo
+TEST mknod block b 0 0
+TEST touch a
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/fifo trusted.afr.$V0-client-0 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/block trusted.afr.$V0-client-0 metadata
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a trusted.afr.$V0-client-0 metadata
+cleanup
diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
new file mode 100644
index 00000000000..b69a38ae788
--- /dev/null
+++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+#### Test iatt and user xattr heal from lookup code path ####
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+cd $M0
+TEST touch file
+TEST setfattr -n user.attribute1 -v "value" $B0/brick0/file
+TEST kill_brick $V0 $H0 $B0/brick2
+TEST chmod +x file
+iatt=$(stat -c "%g:%u:%A" file)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT 2 get_pending_heal_count $V0
+
+#Trigger metadataheal
+TEST stat file
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+#iattrs must be matching
+iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file)
+iatt2=$(stat -c "%g:%u:%A" $B0/brick1/file)
+iatt3=$(stat -c "%g:%u:%A" $B0/brick2/file)
+EXPECT $iatt echo $iatt1
+EXPECT $iatt echo $iatt2
+EXPECT $iatt echo $iatt3
+
+#xattrs must be matching
+xatt1_cnt=$(getfattr -d $B0/brick0/file|wc|awk '{print $1}')
+xatt2_cnt=$(getfattr -d $B0/brick1/file|wc|awk '{print $1}')
+xatt3_cnt=$(getfattr -d $B0/brick2/file|wc|awk '{print $1}')
+EXPECT "$xatt1_cnt" echo $xatt2_cnt
+EXPECT "$xatt1_cnt" echo $xatt3_cnt
+
+#changelogs must be zero
+xattr1=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick0/file)
+xattr2=$(get_hex_xattr trusted.afr.$V0-client-2 $B0/brick1/file)
+EXPECT "000000000000000000000000" echo $xattr1
+EXPECT "000000000000000000000000" echo $xattr2
+
+cd -
+cleanup;
diff --git a/tests/bugs/replicate/bug-1139230.t b/tests/bugs/replicate/bug-1139230.t
new file mode 100644
index 00000000000..9ceac6c4f4e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1139230.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+# Create a 1X2 replica
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Volume start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+# FUSE Mount
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+TEST mkdir -p $M0/one
+
+# Kill a brick
+TEST kill_brick $V0 $H0 $B0/${V0}-1
+
+TEST `echo "A long" > $M0/one/two`
+
+# Start force
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_file_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
+
+# Pending xattrs should be set for all the bricks once self-heal is done
+# Check pending xattrs
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one trusted.afr.dirty
+
+TEST `echo "time ago" > $M0/one/three`
+
+# Pending xattrs should be set for all the bricks once transaction is done
+# Check pending xattrs
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-0
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.$V0-client-1
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-0/one/three trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/${V0}-1/one/three trusted.afr.dirty
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1180545.t b/tests/bugs/replicate/bug-1180545.t
new file mode 100644
index 00000000000..5e40edd6c38
--- /dev/null
+++ b/tests/bugs/replicate/bug-1180545.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+#Create gfid split-brain of directory and check if conservative merge
+#completes successfully.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function check_sh_entries() {
+ local expected="$1"
+ local count=
+ local good="0"
+ shift
+
+ for i in $*; do
+ count="$(count_sh_entries $i)"
+ if [[ "x${count}" == "x${expected}" ]]; then
+ good="$((good + 1))"
+ fi
+ done
+ if [[ "x${good}" != "x${last_good}" ]]; then
+ last_good="${good}"
+# This triggers a sweep of the heal index. However if more than one brick
+# tries to heal the same directory at the same time, one of them will take
+# the lock and the other will give up, waiting for the next heal cycle, which
+# is set to 60 seconds (the minimum valid value). So, each time we detect
+# that one brick has completed the heal, we trigger another heal.
+ $CLI volume heal $V0
+ fi
+
+ echo "${good}"
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 cluster.heal-timeout 60
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Create files with alternate brick down. One file has gfid mismatch.
+TEST mkdir $M0/DIR
+
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST touch $M0/DIR/FILE
+TEST touch $M0/DIR/file{1..5}
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/DIR/FILE
+TEST touch $M0/DIR/file{6..10}
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Trigger heal and verify number of entries in backend
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+last_good=""
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" check_sh_entries 2 $B0/brick{0,1}
+
+#Two entries for DIR and two for FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+TEST diff <(ls $B0/brick0/DIR) <(ls $B0/brick1/DIR)
+cleanup
diff --git a/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
new file mode 100644
index 00000000000..fe8e992e8f8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+#Stale entries in xattrop folder for files which do not need heal must be removed during the next index heal crawl.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0;
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST `echo hello>$M0/datafile`
+TEST touch $M0/mdatafile
+
+#Create split-brain and reset the afr xattrs, so that we have only the entry inside xattrop folder.
+#This is to simulate the case where the brick crashed just before pre-op happened, but index xlator created the entry inside xattrop folder.
+
+#Create data, metadata SB.
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST stat $M0/datafile
+TEST `echo append>>$M0/datafile`
+TEST chmod +x $M0/mdatafile
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST kill_brick $V0 $H0 $B0/$V0"0"
+TEST stat $M0/datafile
+TEST `echo append>>$M0/datafile`
+TEST chmod -x $M0/mdatafile
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+TEST ! cat $M0/datafile
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/$V0"0"
+EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/$V0"1"
+
+#Reset xattrs and trigger heal.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}0/datafile
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/datafile
+
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}0/mdatafile
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/mdatafile
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $B0/$V0"0"
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $B0/$V0"1"
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
new file mode 100644
index 00000000000..6ff471fbf15
--- /dev/null
+++ b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+#Allow readdirs to proceed on directories that are in split-brain
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0;
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file{1..5}
+
+#Create entry split-brain
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
+TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
+TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST kill_brick $V0 $H0 $B0/$V0"0"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
+TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
+TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+
+cd $M0/dir
+EXPECT "6" echo $(ls | wc -l)
+TEST ! cat FILE
+TEST `echo hello>hello.txt`
+cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1238398-split-brain-resolution.t b/tests/bugs/replicate/bug-1238398-split-brain-resolution.t
new file mode 100644
index 00000000000..8ef3aae979f
--- /dev/null
+++ b/tests/bugs/replicate/bug-1238398-split-brain-resolution.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+function get_split_brain_status {
+ local path=$1
+ echo `getfattr -n replica.split-brain-status $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+#Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST `echo "some-data" > $M0/metadata-split-brain.txt`
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 666 $M0/metadata-split-brain.txt
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST chmod 757 $M0/metadata-split-brain.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT 2 get_pending_heal_count $V0
+
+#Inspect the file in metadata-split-brain
+EXPECT "data-split-brain:no metadata-split-brain:yes Choices:patchy-client-0,patchy-client-1" get_split_brain_status $M0/metadata-split-brain.txt
+TEST setfattr -n replica.split-brain-choice -v $V0-client-0 $M0/metadata-split-brain.txt
+
+EXPECT "757" stat -c %a $M0/metadata-split-brain.txt
+
+TEST setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/metadata-split-brain.txt
+EXPECT "666" stat -c %a $M0/metadata-split-brain.txt
+
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1238508-self-heal.t b/tests/bugs/replicate/bug-1238508-self-heal.t
new file mode 100644
index 00000000000..24fb07d31f0
--- /dev/null
+++ b/tests/bugs/replicate/bug-1238508-self-heal.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+# Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST mkdir $M0/olddir;
+TEST `echo "some-data" > $M0/olddir/oldfile`
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/olddir/oldfile $M0/olddir/newfile;
+TEST mv $M0/olddir $M0/newdir;
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Test if the files are present on both the bricks
+EXPECT "newdir" ls $B0/${V0}0/
+EXPECT "newdir" ls $B0/${V0}1/
+EXPECT "newfile" ls $B0/${V0}0/newdir/
+EXPECT "newfile" ls $B0/${V0}1/newdir/
+
+# Test if gfid-link files in .glusterfs also provide correct info
+brick0gfid=$(gf_get_gfid_backend_file_path $B0/${V0}0 newdir)
+brick1gfid=$(gf_get_gfid_backend_file_path $B0/${V0}1 newdir)
+EXPECT "newfile" ls $brick0gfid
+EXPECT "newfile" ls $brick1gfid
+
+# Test if the files are accessible from the mount
+EXPECT "newdir" ls $M0/
+EXPECT "newfile" ls $M0/newdir
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1250170-fsync.c b/tests/bugs/replicate/bug-1250170-fsync.c
new file mode 100644
index 00000000000..21fd96594aa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1250170-fsync.c
@@ -0,0 +1,56 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+int
+main(int argc, char **argv)
+{
+ char *file = NULL;
+ int fd = -1;
+ char *buffer = NULL;
+ size_t buf_size = 0;
+ size_t written = 0;
+ int ret = 0;
+ off_t offset = 0;
+ int i = 0;
+ int loop_count = 5;
+
+ if (argc < 2) {
+ printf("Usage:%s <filename>\n", argv[0]);
+ return -1;
+ }
+
+ file = argv[1];
+ buf_size = 1024;
+ buffer = malloc(buf_size);
+ if (!buffer) {
+ perror("malloc");
+ return -1;
+ }
+ memset(buffer, 'R', buf_size);
+
+ fd = open(file, O_WRONLY);
+ if (fd == -1) {
+ perror("open");
+ return -1;
+ }
+
+ for (i = 0; i < loop_count; i++) {
+ ret = write(fd, buffer, buf_size);
+ if (ret == -1) {
+ perror("write");
+ return ret;
+ } else {
+ written += ret;
+ }
+ offset = lseek(fd, 0, SEEK_SET);
+ }
+
+ free(buffer);
+ return 0;
+}
diff --git a/tests/bugs/replicate/bug-1250170-fsync.t b/tests/bugs/replicate/bug-1250170-fsync.t
new file mode 100644
index 00000000000..7a3fdbf8bb5
--- /dev/null
+++ b/tests/bugs/replicate/bug-1250170-fsync.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST gcc $(dirname $0)/bug-1250170-fsync.c -o $(dirname $0)/bug-1250170-fsync
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST touch $M0/file
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST gluster volume profile $V0 start
+#Perform 5 non-sequential writes.
+TEST $(dirname $0)/bug-1250170-fsync $M0/file
+
+#Run profile info initially to filter out the interval statistics in the
+#subsequent runs.
+TEST $CLI volume profile $V0 info
+#We get only cumulative statistics.
+write_count=$($CLI volume profile $V0 info | grep WRITE |awk '{count += $8} END {print count}')
+fsync_count=$($CLI volume profile $V0 info | grep FSYNC |awk '{count += $8} END {print count}')
+
+EXPECT "5" echo $write_count
+TEST [ -z $fsync_count ]
+
+TEST $CLI volume profile $V0 stop
+TEST umount $M0
+rm -f $(dirname $0)/bug-1250170-fsync
+cleanup
diff --git a/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t b/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t
new file mode 100644
index 00000000000..884b7892954
--- /dev/null
+++ b/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+# Negative case with brick killed but volume-id xattr present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
+
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 start
+# Simulated reset disk
+for i in {1..5}
+do
+ rm -rf $B0/${V0}{1}/file$i.txt
+done
+for i in {6..10}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Now reset the brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+EXPECT "10" echo $(ls $B0/${V0}1 | wc -l)
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1292379.t b/tests/bugs/replicate/bug-1292379.t
new file mode 100644
index 00000000000..be1bf699173
--- /dev/null
+++ b/tests/bugs/replicate/bug-1292379.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.eager-lock off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST wfd=`fd_available`
+TEST fd_open $wfd "w" $M0/a
+
+TEST fd_write $wfd "abcd"
+
+# Kill brick-0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# While brick-0 is down, rename 'a' to 'b'
+TEST mv $M0/a $M0/b
+
+TEST fd_write $wfd "lmn"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST fd_write $wfd "pqrs"
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST fd_write $wfd "xyz"
+TEST fd_close $wfd
+
+md5sum_b0=$(md5sum $B0/${V0}0/b | awk '{print $1}')
+
+EXPECT "$md5sum_b0" echo `md5sum $B0/${V0}1/b | awk '{print $1}'`
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1297695.t b/tests/bugs/replicate/bug-1297695.t
new file mode 100644
index 00000000000..d5c1a214fe2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1297695.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function write_to_file {
+ dd of=$M0/dir/file if=/dev/urandom bs=1024k count=128 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume set $V0 post-op-delay-secs 3
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $CLI volume set $V0 ensure-durability off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST `echo 'abc' > $M0/dir/file`
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+write_to_file &
+#Test if the MAX [F]INODELK fop latency is of the order of seconds.
+EXPECT "^1$" get_pending_heal_count $V0
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+TEST [ -z $inodelk_max_latency ]
+cleanup
diff --git a/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t b/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
new file mode 100644
index 00000000000..780ddb9250c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Test that for files in metadata-split-brain, we do not wind even a single read.
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 700 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST chmod 777 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+lines=`cat $M0/file|wc|awk '{print $1}'`
+EXPECT 0 echo $lines
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1325792.t b/tests/bugs/replicate/bug-1325792.t
new file mode 100644
index 00000000000..73190e5f341
--- /dev/null
+++ b/tests/bugs/replicate/bug-1325792.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}0 | grep -A 1 ${V0}0 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}1 | grep -A 1 ${V0}1 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}2 | grep -A 1 ${V0}2 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}3 | grep -A 1 ${V0}3 | grep "entries" | wc -l`
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1335652.t b/tests/bugs/replicate/bug-1335652.t
new file mode 100644
index 00000000000..653a1b05ce2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1335652.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 shard on
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Kill the zero'th brick so that 1st and 2nd get marked dirty
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST dd if=/dev/urandom of=$M0/file bs=10MB count=20
+
+#At any point value of dirty should not be greater than 0 on source bricks
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1/.shard
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}2/.shard
+
+rm -rf $M0/file;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t b/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t
new file mode 100644
index 00000000000..6d177a7d3f8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Create a 2x2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r11 $H0:$B0/r12 $H0:$B0/r21 $H0:$B0/r22;
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start the volume
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## for release > 3.7 , gluster nfs is off by default
+TEST $CLI vol set $V0 nfs.disable off;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+TEST mount_nfs $H0:/$V0 $N0;
+
+## create some directories and files inside mount
+mkdir $N0/io;
+for j in {1..10}; do mkdir $N0/io/b$j; for k in {1..10}; do touch $N0/io/b$j/c$k; done done
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/r11 $H0:$B0/r12 start;
+
+TEST mkdir $N0/dir1;
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/r11 $H0:$B0/r12"
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/r11 $H0:$B0/r12 commit;
+
+TEST mkdir $N0/dir2;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1341650.t b/tests/bugs/replicate/bug-1341650.t
new file mode 100644
index 00000000000..610342ca5bd
--- /dev/null
+++ b/tests/bugs/replicate/bug-1341650.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume set $V0 quota-deem-statfs on
+TEST $CLI volume set $V0 soft-timeout 0
+TEST $CLI volume set $V0 hard-timeout 0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+TEST $CLI volume quota $V0 limit-objects /dir 10
+
+TEST touch $M0/dir/file1
+TEST touch $M0/dir/file2
+TEST touch $M0/dir/file3
+TEST touch $M0/dir/file4
+TEST touch $M0/dir/file5
+
+# Kill 3rd brick and create entries
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST touch $M0/dir/file6
+TEST touch $M0/dir/file7
+TEST touch $M0/dir/file8
+TEST touch $M0/dir/file9
+
+# Quota object limit is reached. Remove object for create to succeed.
+TEST ! touch $M0/dir/file10
+
+TEST rm $M0/dir/file1
+TEST touch $M0/dir/file10
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}0/dir
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1/dir
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+#Check that no convervative merge happened.file1 must not be present on any brick.
+TEST ! stat $B0/${V0}0/dir/file1
+TEST ! stat $B0/${V0}1/dir/file1
+TEST ! stat $B0/${V0}2/dir/file1
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1363721.t b/tests/bugs/replicate/bug-1363721.t
new file mode 100644
index 00000000000..0ed34d8a4f4
--- /dev/null
+++ b/tests/bugs/replicate/bug-1363721.t
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+FILE_UPDATE_TIMEOUT=20
+cleanup
+
+function size_increased {
+ local file=$1
+ local size=$2
+ local new_size=$(stat -c%s $file)
+ if [ $new_size -gt $size ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function has_write_failed {
+ local pid=$1
+ if [ -d /proc/$pid ]; then echo "N"; else echo "Y"; fi
+}
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --direct-io-mode=enable $M0
+
+cd $M0
+
+# Start writing to a file.
+(dd if=/dev/urandom of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
+dd_pid=$!
+
+# Let IO happen
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 0
+
+# Now kill the zeroth brick
+kill_brick $V0 $H0 $B0/${V0}0
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now kill the first brick
+kill_brick $V0 $H0 $B0/${V0}1
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# Let IO continue for 3 seconds
+sleep 3
+
+# Now kill the second brick
+kill_brick $V0 $H0 $B0/${V0}2
+
+# At this point the write should have been failed.
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "Y" has_write_failed $dd_pid
+
+# Also make sure that the second brick is never an accused.
+
+md5sum_2=$(md5sum $B0/${V0}2/file1 | awk '{print $1}')
+
+EXPECT_NOT "$md5sum_2" echo `md5sum $B0/${V0}0/file1 | awk '{print $1}'`
+EXPECT_NOT "$md5sum_2" echo `md5sum $B0/${V0}1/file1 | awk '{print $1}'`
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.dirty data
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.dirty data
+
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.$V0-client-2 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.$V0-client-2 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file1 trusted.afr.$V0-client-2 metadata
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}0/file1 | awk '{print $1}'`
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}1/file1 | awk '{print $1}'`
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}2/file1 | awk '{print $1}'`
+
+cd ~
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1365455.t b/tests/bugs/replicate/bug-1365455.t
new file mode 100644
index 00000000000..1953e2a9327
--- /dev/null
+++ b/tests/bugs/replicate/bug-1365455.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function check_size
+{
+ for i in {1..10}; do
+ size1=`stat -c %s $B0/${V0}0/tmp$i`
+ size2=`stat -c %s $B0/${V0}1/tmp$i`
+ if [[ $size1 -eq 0 ]] || [[ $size2 -eq 0 ]] || [[ $size1 -ne $size2 ]]; then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+for i in {1..10}
+do
+ echo abc > $M0/tmp$i
+done
+
+
+# Add Another brick
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1
+
+#Check if self heal daemon has come up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+
+#Check if self heal daemon is able to see all bricks
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+
+#Check size of files on bricks
+TEST check_size
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t b/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t
new file mode 100644
index 00000000000..d049d95ef9a
--- /dev/null
+++ b/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/data.txt
+TEST touch $M0/mdata.txt
+
+#Create data and metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/data.txt bs=1024 count=1024
+TEST setfattr -n user.value -v value1 $M0/mdata.txt
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/data.txt bs=1024 count=1024
+TEST setfattr -n user.value -v value2 $M0/mdata.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+## Check that the file still in split-brain,
+ ## I/O fails
+ cat $M0/data.txt > /dev/null
+ EXPECT "1" echo $?
+ ## pending xattrs blame each other.
+ brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/data.txt)
+ brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST [ $brick0_pending -ne "000000000000000000000000" ]
+ TEST [ $brick1_pending -ne "000000000000000000000000" ]
+
+ ## I/O fails
+ getfattr -n user.value $M0/mdata.txt
+ EXPECT "1" echo $?
+ brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/mdata.txt)
+ brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/mdata.txt)
+ TEST [ $brick0_pending -ne "000000000000000000000000" ]
+ TEST [ $brick1_pending -ne "000000000000000000000000" ]
+
+## Let us use mtime as fav-child policy. So brick0 will be source.
+ # Set dirty (data part) on the sink brick to check if it is reset later along with the pending xattr.
+ TEST setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/${V0}1/data.txt
+ # Set dirty (metadata part) on the sink brick to check if it is reset later along with the pending xattr.
+ TEST setfattr -n trusted.afr.dirty -v 0x000000000000000100000000 $B0/${V0}1/mdata.txt
+
+ TEST $CLI volume set $V0 favorite-child-policy mtime
+
+ # Reading the file should be allowed and sink brick xattrs must be reset.
+ cat $M0/data.txt > /dev/null
+ EXPECT "0" echo $?
+ TEST brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST brick1_dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}1/data.txt)
+ TEST [ $brick1_dirty -eq "000000000000000000000000" ]
+ TEST [ $brick1_pending -eq "000000000000000000000000" ]
+
+ # Accessing the file should be allowed and sink brick xattrs must be reset.
+ EXPECT "value2" echo $(getfattr --only-values -n user.value $M0/mdata.txt)
+ TEST brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST brick1_dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}1/data.txt)
+ TEST [ $brick1_dirty -eq "000000000000000000000000" ]
+ TEST [ $brick1_pending -eq "000000000000000000000000" ]
+
+#Enable shd and heal the file.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT 0 get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1402730.t b/tests/bugs/replicate/bug-1402730.t
new file mode 100644
index 00000000000..c7866df463b
--- /dev/null
+++ b/tests/bugs/replicate/bug-1402730.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0
+
+TEST mkdir -p $M0/a/b/c -p
+cd $M0/a/b/c
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+rm -rf $B0/${V0}2/*
+rm -rf $B0/${V0}2/.glusterfs
+
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}2/.glusterfs && chmod 600 $B0/${V0}2/.glusterfs
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch file
+
+GFID_C=$(get_gfid_string $M0/a/b/c)
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$GFID_C/file
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$GFID_C/file
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/a/b/c trusted.afr.$V0-client-2 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a/b/c trusted.afr.$V0-client-2 entry
+
+cd ~
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1408712.t b/tests/bugs/replicate/bug-1408712.t
new file mode 100644
index 00000000000..9499a598ef1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1408712.t
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=12
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume heal $V0 granular-entry-heal enable
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.flush-behind off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 2
+
+TEST cd $M0
+TEST dd if=/dev/zero of=file bs=1M count=8
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}0
+
+TEST "dd if=/dev/zero bs=1M count=8 >> file"
+
+FILE_GFID=$(get_gfid_string $M0/file)
+
+# Test that the index associated with '/.shard' is created on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+# Check for successful creation of granular entry indices
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+TEST cd ~
+TEST md5sum $M1/file
+
+# Test that the index associated with '/.shard' and the created shards do not disappear on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+
+for i in {2..3}
+do
+ TEST_IN_LOOP ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t b/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t
new file mode 100644
index 00000000000..d0e2fee8bcd
--- /dev/null
+++ b/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/file
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=20
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST ! dd if=$M0/file of=/dev/null
+SOURCE_BRICK_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+
+# Various fav-child policies must not heal the file when some bricks are down.
+TEST $CLI volume set $V0 favorite-child-policy size
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy ctime
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy mtime
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy majority
+TEST ! dd if=$M0/file of=/dev/null
+
+# CLI/mount based split-brain resolution must also not work.
+TEST ! $CLI volume heal $V0 split-brain bigger-file /file
+TEST ! $CLI volume heal $V0 split-brain mtime /file
+TEST ! $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 /file1
+
+TEST ! getfattr -n replica.split-brain-status $M0/file
+TEST ! setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/file
+
+# Bring all bricks back up and launch heal.
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT 0 get_pending_heal_count $V0
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$SOURCE_BRICK_MD5" == "$B1_MD5" ]
+TEST [ "$SOURCE_BRICK_MD5" == "$B2_MD5" ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
new file mode 100644
index 00000000000..10ce0131f4f
--- /dev/null
+++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Disable self-heal-daemon, client-side-heal and set quorum-type to none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.quorum-type none
+
+#Kill bricks 0 & 1 and create a file to have pending entry for 0 & 1 on brick 2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+echo "file 1" >> $M0/f1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#Kill bricks 1 & 2 and create a file to have pending entry for 1 & 2 on brick 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "file 2" >> $M0/f2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Kill bricks 2 & 0 and create a file to have pending entry for 2 & 0 on brick 1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+echo "file 3" >> $M0/f3
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# We were killing one brick and checking that entry heal does not reset the
+# pending xattrs for the down brick. Now that we need all bricks to be up for
+# entry heal, I'm removing that test from the .t
+
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST ls $M0
+TEST cat $M0/f1
+TEST cat $M0/f2
+TEST cat $M0/f3
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+#Check whether all the bricks contains all the 3 files.
+EXPECT "3" echo $(ls $B0/${V0}0 | wc -l)
+EXPECT "3" echo $(ls $B0/${V0}1 | wc -l)
+EXPECT "3" echo $(ls $B0/${V0}2 | wc -l)
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t
new file mode 100644
index 00000000000..cdcaf62c925
--- /dev/null
+++ b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+NEW_USER=bug1438255
+NEW_UID=1438255
+NEW_GID=1438255
+
+TEST groupadd -o -g ${NEW_GID} ${NEW_USER}-${NEW_GID}
+TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -K MAIL_DIR=/dev/null ${NEW_USER}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+TEST touch $M0/FILE
+TEST kill_brick $V0 $H0 $B0/${V0}2
+chown $NEW_UID:$NEW_GID $M0/FILE
+EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# setfattr done as NEW_USER fails on 3rd brick with EPERM but suceeds on
+# the first 2 and hence on the mount.
+su -m bug1438255 -c "setfattr -n user.myattr -v myvalue $M0/FILE"
+TEST [ $? -eq 0 ]
+EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+# Brick 3 does not have any self-blaming pending xattr.
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+TEST userdel --force ${NEW_USER}
+TEST groupdel ${NEW_USER}-${NEW_GID}
+cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t b/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
new file mode 100644
index 00000000000..5bacf3edcfe
--- /dev/null
+++ b/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..1}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Default quorum-type for replica 2 is none. quorum-count is zero but it is not displayed.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# Convert to replica-3.
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Default quorum-type for replica 3 is auto. quorum-count is INT_MAX but it is not displayed.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# Change the type to fixed.
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+# We haven't set quorum-count yet, so it takes the default value of zero in reconfigure() and hence the quorum-type is displayed as none.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# set quorum-count and check.
+TEST $CLI volume set $V0 cluster.quorum-count 1
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "fixed" get_quorum_type $M0 $V0 0
+EXPECT "1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count|awk '{print $3}'`
+
+# reset to default values.
+TEST $CLI volume reset $V0 cluster.quorum-type
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1473026.t b/tests/bugs/replicate/bug-1473026.t
new file mode 100644
index 00000000000..efb3ffa0d39
--- /dev/null
+++ b/tests/bugs/replicate/bug-1473026.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=1473026
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=1473026
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0;
+
+#kill one brick (this has some issue)
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#kill the brick to be replaced
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# We know this command would fail because file system is read only now
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}4 commit force
+
+TEST pkill glusterd
+
+# Glusterd should start but the volume info and brick volfiles don't match
+TEST glusterd
+TEST pidof glusterd
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t b/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t
new file mode 100644
index 00000000000..bb858a8a63d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST mkdir -p $M0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 $M0/d2
+gfid_d1=$(gf_get_gfid_xattr $B0/${V0}0/d1)
+gfid_d2=$(gf_get_gfid_xattr $B0/${V0}0/d2)
+gfid_dir=$(gf_get_gfid_xattr $B0/${V0}0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789)
+
+gfid_str_d1=$(gf_gfid_xattr_to_str $gfid_d1)
+gfid_str_d2=$(gf_gfid_xattr_to_str $gfid_d2)
+gfid_str_d3=$(gf_gfid_xattr_to_str $gfid_dir)
+
+# Kill 3rd brick and rename the dir from mount.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 $M0/d2
+
+# Bring it back and trigger heal.
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check that .glusterfs symlink for dir exists and points to d2/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
+TEST linkname=$(readlink $B0/${V0}2/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3)
+EXPECT "dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" basename $linkname
+TEST parent_dir_gfid_str=$(echo $linkname|cut -d / -f5)
+EXPECT $gfid_str_d2 echo $parent_dir_gfid_str
+
+TEST rmdir $M0/d2/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
+
+TEST ! stat $B0/${V0}0/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+TEST ! stat $B0/${V0}1/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+TEST ! stat $B0/${V0}2/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+cleanup;
diff --git a/tests/bugs/replicate/bug-1480525.t b/tests/bugs/replicate/bug-1480525.t
new file mode 100644
index 00000000000..7c63bb2e4ea
--- /dev/null
+++ b/tests/bugs/replicate/bug-1480525.t
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+EXPECT_NOT "-1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep read_child |awk '{print $3}'`
+TEST $CLI volume set $V0 choose-local off
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "-1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep read_child |awk '{print $3}'`
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1493415-gfid-heal.t b/tests/bugs/replicate/bug-1493415-gfid-heal.t
new file mode 100644
index 00000000000..8a79febf4b4
--- /dev/null
+++ b/tests/bugs/replicate/bug-1493415-gfid-heal.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST $CLI volume set $V0 self-heal-daemon off
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+#------------------------------------------------------------------------------#
+TEST touch $M0/f1
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/f1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/f1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+# Assume there were no pending xattrs on parent dir due to 1st brick crashing
+# too. Then name heal from client must heal the gfid.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST stat $M0/f1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/f1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+#------------------------------------------------------------------------------#
+TEST mkdir $M0/dir
+TEST touch $M0/dir/f2
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir/f2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#Now simulate setting of pending entry xattr on parent dir of 1st brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
+create_brick_xattrop_entry $B0/${V0}0 dir
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+#Trigger entry-heal via shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir/f2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#------------------------------------------------------------------------------#
+cleanup;
diff --git a/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t b/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t
new file mode 100644
index 00000000000..2b3b3040228
--- /dev/null
+++ b/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+TESTS_EXPECTED_IN_LOOP=21
+function reset_cluster
+{
+ cleanup
+ TEST glusterd
+ TEST pidof glusterd
+
+}
+function check_iot_option
+{
+ local enabled=$1
+ local is_loaded_in_graph=$2
+
+ EXPECT "$enabled" volume_get_field $V0 client-io-threads
+ IOT_STRING="volume\ $V0-io-threads"
+ grep "$IOT_STRING" $GLUSTERD_WORKDIR/vols/$V0/trusted-$V0.tcp-fuse.vol
+ TEST ret=$?
+ EXPECT_NOT "$is_loaded_in_graph" echo $ret
+}
+
+reset_cluster
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+check_iot_option on 1
+
+reset_cluster
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+check_iot_option off 0
+
+reset_cluster
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1
+check_iot_option off 0
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}1 force
+check_iot_option on 1
+
+reset_cluster
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..5}
+TEST $CLI volume set $V0 client-io-threads on
+check_iot_option on 1
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}2 $H0:$B0/${V0}5 force
+check_iot_option on 1
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1539358-split-brain-detection.t b/tests/bugs/replicate/bug-1539358-split-brain-detection.t
new file mode 100755
index 00000000000..7b71a7a9e7d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1539358-split-brain-detection.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+###############################################################################yy
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST `echo "hello" >> $M0/file`
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST `echo "append" >> $M0/file`
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/file trusted.afr.$V0-client-1 data
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 data
+CLIENT_MD5=$(md5sum $M0/file | cut -d\ -f1)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$CLIENT_MD5" == "$B0_MD5" ]
+TEST [ "$CLIENT_MD5" == "$B1_MD5" ]
+TEST [ "$CLIENT_MD5" == "$B2_MD5" ]
+
+TEST rm $M0/file
+
+###############################################################################yy
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST `echo "hello" >> $M0/file`
+# Mark cyclic xattrs and modify file content directly on the bricks.
+TEST $CLI volume set $V0 self-heal-daemon off
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/$V0"0"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"0"/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"2"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"2"/file
+
+TEST `echo "ab" >> $B0/$V0"0"/file`
+TEST `echo "cdef" >> $B0/$V0"1"/file`
+TEST `echo "ghi" >> $B0/$V0"2"/file`
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$B0_MD5" == "$B1_MD5" ]
+TEST [ "$B0_MD5" == "$B2_MD5" ]
+###############################################################################yy
+cleanup
diff --git a/tests/bugs/replicate/bug-1561129-enospc.t b/tests/bugs/replicate/bug-1561129-enospc.t
new file mode 100644
index 00000000000..1b402fcc781
--- /dev/null
+++ b/tests/bugs/replicate/bug-1561129-enospc.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+#Tests that sequential write workload doesn't lead to FSYNCs
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST truncate -s 128M $B0/xfs_image
+TEST mkfs.xfs -f $B0/xfs_image
+TEST mkdir $B0/bricks
+TEST mount -t xfs -o loop $B0/xfs_image $B0/bricks
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/bricks/brick{0,1,3}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Write 50MB of data, which will try to consume 50x3=150MB on $B0/bricks.
+# Before that, we hit ENOSPC in pre-op cbk, which should not crash the mount.
+TEST ! dd if=/dev/zero of=$M0/a bs=1M count=50
+TEST stat $M0/a
+cleanup;
diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
new file mode 100644
index 00000000000..49c4dea4e9c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ dd if=/dev/zero of=$M0/file$i bs=1M count=10
+ if [ -e $B0/${V0}0/file$i ] || [ -e $B0/${V0}1/file$i ]; then
+ ((i++))
+ else
+ break
+ fi
+ done
+ echo $i
+}
+
+TEST glusterd
+
+#Create brick partitions
+TEST truncate -s 100M $B0/brick0
+TEST truncate -s 100M $B0/brick1
+#Have the 3rd brick of a higher size to test the scenario of entry transaction
+#passing on only one brick and not on other bricks.
+TEST truncate -s 110M $B0/brick2
+LO1=`SETUP_LOOP $B0/brick0`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO1
+LO2=`SETUP_LOOP $B0/brick1`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO2
+LO3=`SETUP_LOOP $B0/brick2`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO3
+TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+TEST MOUNT_LOOP $LO1 $B0/${V0}0
+TEST MOUNT_LOOP $LO2 $B0/${V0}1
+TEST MOUNT_LOOP $LO3 $B0/${V0}2
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+i=$(create_files)
+TEST ! ls $B0/${V0}0/file$i
+TEST ! ls $B0/${V0}1/file$i
+TEST ls $B0/${V0}2/file$i
+dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
+TEST [ "$dirty" != "000000000000000000000000" ]
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST rm -f $M0/file1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+UMOUNT_LOOP ${B0}/${V0}{0,1,2}
+rm -f ${B0}/brick{0,1,2}
+cleanup;
diff --git a/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
new file mode 100644
index 00000000000..c6e5459e9a8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=30
+
+##############################################################################
+# Test on 1x3 volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+
+# Create files directly in the backend on different bricks
+echo $RANDOM >> $B0/${V0}0/file1
+echo $RANDOM >> $B0/${V0}1/file2
+echo $RANDOM >> $B0/${V0}2/file3
+
+# To prevent is_fresh_file code path
+sleep 2
+
+# Access them from mount to trigger name + gfid heal.
+TEST stat $M0/file1
+TEST stat $M0/file2
+TEST stat $M0/file3
+
+# Launch index heal to complete any pending data/metadata heals.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check each file has a gfid and the .glusterfs hardlink
+check_gfid_and_link_count file1
+check_gfid_and_link_count file2
+check_gfid_and_link_count file3
+
+TEST rm $M0/file1
+TEST rm $M0/file2
+TEST rm $M0/file3
+cleanup;
+
+##############################################################################
+# Test on 1x (2+1) volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+
+# Create files directly in the backend on different bricks
+echo $RANDOM >> $B0/${V0}0/file1
+echo $RANDOM >> $B0/${V0}1/file2
+touch $B0/${V0}2/file3
+
+# To prevent is_fresh_file code path
+sleep 2
+
+# Access them from mount to trigger name + gfid heal.
+TEST stat $M0/file1
+TEST stat $M0/file2
+
+# Though file is created on all 3 bricks, lookup will fail as arbiter blames the
+# other 2 bricks and ariter is not 'readable'.
+TEST ! stat $M0/file3
+
+# Launch index heal to complete any pending data/metadata heals.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now file3 should be accesible from mount.
+TEST stat $M0/file3
+
+# Check each file has a gfid and the .glusterfs hardlink
+check_gfid_and_link_count file1
+check_gfid_and_link_count file2
+check_gfid_and_link_count file3
+
+TEST rm $M0/file1
+TEST rm $M0/file2
+TEST rm $M0/file3
+cleanup;
diff --git a/tests/bugs/replicate/bug-1626994-info-split-brain.t b/tests/bugs/replicate/bug-1626994-info-split-brain.t
new file mode 100644
index 00000000000..86bfecb1a9e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1626994-info-split-brain.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check dirs having dirty xattr do not show up in info split-brain.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+rm -f $M0/FILE
+EXPECT "1" count_index_entries $B0/${V0}0
+EXPECT "1" count_index_entries $B0/${V0}1
+EXPECT "1" count_index_entries $B0/${V0}2
+
+TEST mkdir $M0/dirty_dir
+TEST mkdir $M0/pending_dir
+
+# Set dirty xattrs on all bricks to simulate the case where entry transaction
+# succeeded only the pre-op phase.
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}0/dirty_dir
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dirty_dir
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2/dirty_dir
+create_brick_xattrop_entry $B0/${V0}0 dirty_dir
+# Should not show up as split-brain.
+EXPECT "0" afr_get_split_brain_count $V0
+
+# replace/reset brick case where the new brick has dirty and the other 2 bricks
+# blame it should not be reported as split-brain.
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}0
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2
+create_brick_xattrop_entry $B0/${V0}0 "/"
+# Should not show up as split-brain.
+EXPECT "0" afr_get_split_brain_count $V0
+
+# Set pending xattrs on all bricks blaming each other to simulate the case of
+# entry split-brain.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/pending_dir
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1/pending_dir
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/pending_dir
+create_brick_xattrop_entry $B0/${V0}0 pending_dir
+# Should show up as split-brain.
+EXPECT "1" afr_get_split_brain_count $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1637249-gfid-heal.t b/tests/bugs/replicate/bug-1637249-gfid-heal.t
new file mode 100644
index 00000000000..e824f14531e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637249-gfid-heal.t
@@ -0,0 +1,149 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+###############################################################################
+
+# Test for gfid + name heal when there is no 'source' brick, i.e. parent dir
+# xattrs are in split-brain or have dirty xattrs.
+
+TEST mkdir $M0/dir_pending
+TEST dd if=/dev/urandom of=$M0/dir_pending/file1 bs=1024 count=1024
+TEST mkdir $M0/dir_pending/dir11
+TEST mkdir $M0/dir_dirty
+TEST touch $M0/dir_dirty/file2
+
+# Set pending entry xattrs on dir_pending and remove gfid of entries under it on one brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir_pending
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/dir_pending
+
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/file1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_pending/file1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+gfid_d11=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/dir11)
+gfid_str_d11=$(gf_gfid_xattr_to_str $gfid_d11)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_pending/dir11
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+
+# Set dirty entry xattrs on dir_dirty and remove gfid of entries under it on one brick.
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dir_dirty
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir_dirty/file2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_dirty/file2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+# Create a file under dir_pending directly on the backend only on 1 brick
+TEST touch $B0/${V0}1/dir_pending/file3
+
+# Create a file under dir_pending directly on the backend on all bricks
+TEST touch $B0/${V0}0/dir_pending/file4
+TEST touch $B0/${V0}1/dir_pending/file4
+
+# Stop & start the volume and mount client again.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/dir_pending/file1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/dir_pending/file1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+TEST stat $M0/dir_pending/dir11
+EXPECT "$gfid_d11" gf_get_gfid_xattr $B0/${V0}1/dir_pending/dir11
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+
+TEST stat $M0/dir_dirty/file2
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir_dirty/file2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+TEST stat $M0/dir_pending/file3 # This assigns gfid on 2nd brick and heals the entry on to the 1st brick.
+gfid_f3=$(gf_get_gfid_xattr $B0/${V0}1/dir_pending/file3)
+TEST [ ! -z "$gfid_f3" ]
+EXPECT "$gfid_f3" gf_get_gfid_xattr $B0/${V0}0/dir_pending/file3
+
+TEST stat $M0/dir_pending/file4
+gfid_f4=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/file4)
+TEST [ ! -z "$gfid_f4" ]
+EXPECT "$gfid_f4" gf_get_gfid_xattr $B0/${V0}1/dir_pending/file4
+###############################################################################
+
+# Test for gfid + name heal when all bricks are 'source', i.e. parent dir
+# does not have any pending or dirty xattrs.
+
+TEST mkdir $M0/dir_clean
+TEST dd if=/dev/urandom of=$M0/dir_clean/file1 bs=1024 count=1024
+TEST mkdir $M0/dir_clean/dir11
+
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/file1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_clean/file1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+gfid_d11=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/dir11)
+gfid_str_d11=$(gf_gfid_xattr_to_str $gfid_d11)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_clean/dir11
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+# Create a file under dir_clean directly on the backend only on 1 brick
+TEST touch $B0/${V0}1/dir_clean/file3
+
+# Create a file under dir_clean directly on the backend on all bricks
+TEST touch $B0/${V0}0/dir_clean/file4
+TEST touch $B0/${V0}1/dir_clean/file4
+
+# Stop & start the volume and mount client again.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/dir_clean/file1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/dir_clean/file1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+TEST stat $M0/dir_clean/dir11
+EXPECT "$gfid_d11" gf_get_gfid_xattr $B0/${V0}1/dir_clean/dir11
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+TEST stat $M0/dir_clean/file3 # This assigns gfid on 2nd brick and heals the entry on to the 1st brick.
+gfid_f3=$(gf_get_gfid_xattr $B0/${V0}1/dir_clean/file3)
+TEST [ ! -z "$gfid_f3" ]
+EXPECT "$gfid_f3" gf_get_gfid_xattr $B0/${V0}0/dir_clean/file3
+
+TEST stat $M0/dir_clean/file4
+gfid_f4=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/file4)
+TEST [ ! -z "$gfid_f4" ]
+EXPECT "$gfid_f4" gf_get_gfid_xattr $B0/${V0}1/dir_clean/file4
+###############################################################################
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
new file mode 100644
index 00000000000..d7d1f285e01
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check that data self-heal does not leave any stale lock.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+# Kill arbiter brick and write to FILE.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "arbiter down" >> $M0/FILE
+EXPECT 2 get_pending_heal_count $V0
+
+# Bring it back up and let heal complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# write to the FILE must succeed.
+echo "this must succeed" >> $M0/FILE
+TEST [ $? -eq 0 ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t b/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t
new file mode 100644
index 00000000000..63f72e86bf6
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST mkdir dir
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 757 dir
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST chmod 747 dir
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#Use size as fav-child policy.
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+
+#Enable shd and heal the file.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+
+b1c1dir=$(afr_get_specific_changelog_xattr $B0/${V0}0/dir \
+ trusted.afr.$V0-client-1 "metadata")
+b2c0dir=$(afr_get_specific_changelog_xattr $B0/${V0}1/dir \
+ trusted.afr.$V0-client-0 "metadata")
+
+EXPECT "00000001" echo $b1c1dir
+EXPECT "00000001" echo $b2c0dir
+
+#Finish up
+TEST force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t b/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
new file mode 100755
index 00000000000..319736e1157
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+#Test the split-brain resolution CLI commands.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/file
+
+############ Healing using favorite-child-policy = size and size of bricks is same #################
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT_NOT "^0$" echo $?
+
+#We know that both bricks have same size file
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT_NOT "^0$" echo $?
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup
+
diff --git a/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t b/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t
new file mode 100644
index 00000000000..783016dc3c0
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t
@@ -0,0 +1,95 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+# Conversion from 2x1 to 2x3
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST mkdir $M0/dir
+TEST dd if=/dev/urandom of=$M0/dir/file bs=100K count=5
+file_md5sum=$(md5sum $M0/dir/file | awk '{print $1}')
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{2..5}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+
+# Trigger heal and wait for for it to complete
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check whether the directory & file are healed to the newly added bricks
+TEST ls $B0/${V0}2/dir
+TEST ls $B0/${V0}3/dir
+TEST ls $B0/${V0}4/dir
+TEST ls $B0/${V0}5/dir
+
+TEST [ $file_md5sum == $(md5sum $B0/${V0}4/dir/file | awk '{print $1}') ]
+TEST [ $file_md5sum == $(md5sum $B0/${V0}5/dir/file | awk '{print $1}') ]
+
+
+# Conversion from 2x1 to 2x(2+1)
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1}
+EXPECT 'Created' volinfo_field $V1 'Status';
+TEST $CLI volume start $V1
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+
+TEST $GFS --volfile-id=$V1 --volfile-server=$H0 $M1;
+TEST mkdir $M1/dir
+TEST dd if=/dev/urandom of=$M1/dir/file bs=100K count=5
+file_md5sum=$(md5sum $M1/dir/file | awk '{print $1}')
+
+TEST $CLI volume add-brick $V1 replica 3 arbiter 1 $H0:$B0/${V1}{2..5}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 5
+
+# Trigger heal and wait for for it to complete
+TEST $CLI volume heal $V1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V1
+
+# Check whether the directory & file are healed to the newly added bricks
+TEST ls $B0/${V1}2/dir
+TEST ls $B0/${V1}3/dir
+TEST ls $B0/${V1}4/dir
+TEST ls $B0/${V1}5/dir
+
+EXPECT "0" stat -c %s $B0/${V1}5/dir/file
+TEST [ $file_md5sum == $(md5sum $B0/${V1}4/dir/file | awk '{print $1}') ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t b/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t
new file mode 100644
index 00000000000..b180f0e1239
--- /dev/null
+++ b/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST mkdir $M0/dir
+TEST "echo abc > $M0/file1"
+TEST "echo uvw > $M0/file2"
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST "echo def > $M0/file1"
+TEST "echo xyz > $M0/file2"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+# Rename file1 and read it. Read should be served from the 3rd brick
+TEST mv $M0/file1 $M0/file3
+EXPECT "def" cat $M0/file3
+
+# Create a link to file2 and read it. Read should be served from the 3rd brick
+TEST ln $M0/file2 $M0/dir/file4
+EXPECT "xyz" cat $M0/dir/file4
+EXPECT "xyz" cat $M0/file2
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
new file mode 100644
index 00000000000..78581e99614
--- /dev/null
+++ b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+CHANGELOG_PATH_0="$B0/${V0}2/.glusterfs/changelogs"
+ROLLOVER_TIME=100
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST dd if=/dev/zero of=$M0/file1 bs=128K count=5
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST $CLI volume profile $V0 info
+truncate_count=$($CLI volume profile $V0 info | grep TRUNCATE | awk '{count += $8} END {print count}')
+
+EXPECT "1" echo $truncate_count
+EXPECT "1" check_changelog_op ${CHANGELOG_PATH_0} "^ D "
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1696599-io-hang.t b/tests/bugs/replicate/bug-1696599-io-hang.t
new file mode 100755
index 00000000000..869cdb94bda
--- /dev/null
+++ b/tests/bugs/replicate/bug-1696599-io-hang.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+#Tests that local structures in afr are removed from granted/blocked list of
+#locks when inodelk fails on all bricks
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+#Trigger write and stop bricks so inodelks fail on all bricks leading to
+#lock failure condition
+echo abc >> $M0/file &
+
+TEST $CLI volume stop $V0
+TEST $CLI volume reset $V0 delay-gen
+wait
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+#Test that only one write succeeded, this tests that delay-gen worked as
+#expected
+echo abc >> $M0/file
+EXPECT "abc" cat $M0/file
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
new file mode 100644
index 00000000000..76d1f2170f2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume heal $V0 disable
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+###############################################################################
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST mkdir $M0/dir
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir trusted.afr.$V0-client-1 metadata
+CLIENT_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $M0/dir)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir)
+
+TEST [ "$CLIENT_XATTR" == "$B0_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B1_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B2_XATTR" ]
+TEST setfattr -x user.metadata $M0/dir
+
+###############################################################################
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# Mark cyclic xattrs and modify metadata directly on the bricks.
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"2"/dir/file
+
+setfattr -n user.metadata -v 1 $B0/$V0"0"/dir/file
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+TEST rm -f $M0/dir/file
+
+###############################################################################
+# Case of 2 bricks having quorum blaming and the other having only one blaming.
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir/file
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir/file trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir/file trusted.afr.$V0-client-1 metadata
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+
+# B0 must blame B2
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+
+# Modify the metadata directly on the bricks B1 & B2.
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+
+###############################################################################
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
new file mode 100644
index 00000000000..0aeaaafc84c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+
+##########################################################################################
+# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
+
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on one brick.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+rm -f $M0/dir/file
+
+
+###########################################################################################
+# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
+
+TEST $CLI volume heal $V0 disable
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on two bricks.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
+TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1728770-pass-xattrs.t b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
new file mode 100644
index 00000000000..159c4fcc6a1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+function fop_on_bad_disk {
+ local path=$1
+ mkdir $path/dir{1..1000} 2>/dev/null
+ mv $path/dir1 $path/newdir
+ touch $path/foo.txt
+ echo $?
+}
+
+function ls_fop_on_bad_disk {
+ local path=$1
+ ls $path
+ echo $?
+}
+
+TEST init_n_bricks 6;
+TEST setup_lvm 6;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3 $H0:$L4 $H0:$L5 $H0:$L6;
+TEST $CLI volume set $V0 health-check-interval 1000;
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+#corrupt last disk
+dd if=/dev/urandom of=/dev/mapper/patchy_snap_vg_6-brick_lvm bs=512K count=200 status=progress && sync
+
+
+# Test the disk is now returning EIO for touch and ls
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^1$" fop_on_bad_disk "$L6"
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^2$" ls_fop_on_bad_disk "$L6"
+
+TEST touch $M0/foo{1..100}
+TEST $CLI volume remove-brick $V0 replica 3 $H0:$L4 $H0:$L5 $H0:$L6 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$L4 $H0:$L5 $H0:$L6";
+
+#check that remove-brick status should not have any failed or skipped files
+var=`$CLI volume remove-brick $V0 $H0:$L4 $H0:$L5 $H0:$L6 status | grep completed`
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
new file mode 100644
index 00000000000..14dfae89135
--- /dev/null
+++ b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function time_stamps_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ atime_source_b0=$(get_atime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ atime_source_b2=$(get_atime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+ atime_sink_b1=$(get_atime $B0/${V0}1/$path)
+
+ #The same brick must be the source of heal for both atime and mtime.
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1 && $atime_source_b0 -eq $atime_sink_b1 ) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1 && $atime_source_b2 -eq $atime_sink_b1 ) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2 Atimes: $atime_source_b0:$atime_sink_b1:$atime_source_b2"
+ fi
+
+}
+
+function mtimes_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2"
+ fi
+
+}
+
+# Test that the parent dir's timestamps are restored during entry-heal.
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+###############################################################################
+TEST mkdir $M0/DIR
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" time_stamps_match DIR
+ctime_source1=$(get_ctime $B0/${V0}0/$path)
+ctime_source2=$(get_ctime $B0/${V0}2/$path)
+ctime_sink=$(get_ctime $B0/${V0}1/$path)
+TEST [ $ctime_source1 -eq $ctime_sink ]
+TEST [ $ctime_source2 -eq $ctime_sink ]
+
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+TEST mkdir $M0/DIR2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR2/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+#Executing parallel heal may lead to changing atime after heal. So better
+#to test just the mtime
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" mtimes_match DIR2
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+cleanup;
diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
new file mode 100644
index 00000000000..011535066f9
--- /dev/null
+++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function get_cumulative_opendir_count {
+#sed command prints content between Cumulative and Interval, this keeps content from Cumulative stats
+ $CLI volume profile $V0 info |sed -n '/^Cumulative/,/^Interval/p'|grep OPENDIR| awk '{print $8}'|tr -d '\n'
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST ! $CLI volume heal $V0
+
+# Enable shd and verify that index crawl is triggered immediately.
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume heal $V0 enable
+# Each brick does 4 opendirs, corresponding to dirty, xattrop and entry-changes, anonymous-inode
+EXPECT_WITHIN 4 "^444$" get_cumulative_opendir_count
+
+# Check that a change in heal-timeout is honoured immediately.
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+sleep 10
+# Two crawls must have happened.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^121212$" get_cumulative_opendir_count
+
+# shd must not heal if it is disabled and heal-timeout is changed.
+TEST $CLI volume heal $V0 disable
+#Wait for configuration update and any opendir fops to complete
+sleep 10
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume set $V0 cluster.heal-timeout 6
+#Better to wait for more than 6 seconds to account for configuration updates
+sleep 10
+COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+TEST [ -z $COUNT ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
new file mode 100644
index 00000000000..96279084065
--- /dev/null
+++ b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=18
+
+################################################################################
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST `echo "File 1 " > $M0/dir/file1`
+TEST touch $M0/dir/file{2..4}
+
+# Remove file2 from 1st & 3rd bricks
+TEST rm -f $B0/$V0"0"/dir/file2
+TEST rm -f $B0/$V0"2"/dir/file2
+
+# Remove file3 and the .glusterfs hardlink from 1st & 2nd bricks
+gfid_file3=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file3)
+gfid_str_file3=$(gf_gfid_xattr_to_str $gfid_file3)
+TEST rm $B0/$V0"0"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm $B0/$V0"1"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm -f $B0/$V0"0"/dir/file3
+TEST rm -f $B0/$V0"1"/dir/file3
+
+# Remove the .glusterfs hardlink and the gfid xattr of file4 on 3rd brick
+gfid_file4=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file4)
+gfid_str_file4=$(gf_gfid_xattr_to_str $gfid_file4)
+TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_str_file4
+TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
+
+# B0 and B2 blame each other
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir on first brick.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# All the files must be present on all the bricks after conservative merge and
+# should have the gfid xattr and the .glusterfs hardlink.
+check_gfid_and_link_count dir/file1
+check_gfid_and_link_count dir/file2
+check_gfid_and_link_count dir/file3
+check_gfid_and_link_count dir/file4
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
new file mode 100644
index 00000000000..c1bdf34ee6d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard enable
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Create split-brain by setting afr xattrs/gfids manually.
+#file1 is non-sharded and will be in data split-brain.
+#file2 will have one shard which will be in data split-brain.
+#file3 will have one shard which will be in gfid split-brain.
+#file4 will have one shard which will be in data & metadata split-brain.
+TEST dd if=/dev/zero of=$M0/file1 bs=1024 count=1024 oflag=direct
+TEST dd if=/dev/zero of=$M0/file2 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file3 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file4 bs=1M count=6 oflag=direct
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#-------------------------------------------------------------------------------
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/file1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/file1
+
+#-------------------------------------------------------------------------------
+gfid_f2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file2))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+
+#-------------------------------------------------------------------------------
+TESTS_EXPECTED_IN_LOOP=5
+function assign_new_gfid {
+ brickpath=$1
+ filename=$2
+ gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/$filename))
+ gfid_shard=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/.shard/$gfid.1))
+
+ TEST rm $brickpath/.glusterfs/${gfid_shard:0:2}/${gfid_shard:2:2}/$gfid_shard
+ TEST setfattr -x trusted.gfid $brickpath/.shard/$gfid.1
+ new_gfid=$(get_random_gfid)
+ new_gfid_str=$(gf_gfid_xattr_to_str $new_gfid)
+ TEST setfattr -n trusted.gfid -v $new_gfid $brickpath/.shard/$gfid.1
+ TEST mkdir -p $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}
+ TEST ln $brickpath/.shard/$gfid.1 $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}/$new_gfid_str
+}
+assign_new_gfid $B0/$V0"1" file3
+assign_new_gfid $B0/$V0"2" file3
+
+#-------------------------------------------------------------------------------
+gfid_f4=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file4))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+
+#-------------------------------------------------------------------------------
+#Add entry to xattrop dir on first brick and check for split-brain.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+
+gfid_f1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f1
+
+gfid_f2_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f2.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f2_shard1
+
+gfid_f3=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file3))
+gfid_f3_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f3.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f3_shard1
+
+gfid_f4_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f4.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f4_shard1
+
+#-------------------------------------------------------------------------------
+#gfid split-brain won't show up in split-brain count.
+EXPECT "3" afr_get_split_brain_count $V0
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+
+#Resolve split-brains
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file1
+GFIDSTR="gfid:$gfid_f2_shard1"
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f3.1
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f4.1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
new file mode 100644
index 00000000000..7e24eaec03d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch $M0/a
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/a
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime0=$(get_mtime $B0/brick0/a)
+mtime1=$(get_mtime $B0/brick1/a)
+TEST [ $mtime0 -eq $mtime1 ]
+
+ctime0=$(get_ctime $B0/brick0/a)
+ctime1=$(get_ctime $B0/brick1/a)
+TEST [ $ctime0 -eq $ctime1 ]
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+
+TEST touch $M0/b
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/b
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime2=$(get_mtime $B0/brick0/b)
+mtime3=$(get_mtime $B0/brick1/b)
+TEST [ $mtime2 -eq $mtime3 ]
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
new file mode 100644
index 00000000000..94b465181fa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 heal-timeout 5
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Re-create the file when a brick is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST rm $M0/FILE
+echo "New Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+
+# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
+$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
+TEST sleep 5 # give index heal a chance to do one run.
+brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
+brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
+TEST [ $brick0_pending -eq "000000000000000000000002" ]
+TEST [ $brick2_pending -eq "000000000000000000000002" ]
+EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+$CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST cat $M0/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-765564.t b/tests/bugs/replicate/bug-765564.t
new file mode 100644
index 00000000000..098d225018f
--- /dev/null
+++ b/tests/bugs/replicate/bug-765564.t
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+TEST $CLI volume start $V0;
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+#returns success if 'olddir' is absent
+#'olddir' must be absent in both replicas
+function rm_succeeded () {
+ local dir1=$1
+ [[ -d $H0:$B0/${V0}-0/$dir1 || -d $H0:$B0/${V0}-1/$dir1 ]] && return 0
+ return 1
+}
+
+# returns successes if 'newdir' is present
+#'newdir' must be present in both replicas
+function mv_succeeded () {
+ local dir1=$1
+ [[ -d $H0:$B0/${V0}-0/$dir1 && -d $H0:$B0/${V0}-1/$dir1 ]] && return 1
+ return 0
+}
+
+# returns zero on success
+# Only one of rm and mv can succeed. This is captured by the XOR below
+
+function chk_backend_consistency(){
+ local dir1=$1
+ local dir2=$2
+ local rm_status=rm_succeeded $dir1
+ local mv_status=mv_succeeded $dir2
+ [[ ( $rm_status && ! $mv_status ) || ( ! $rm_status && $mv_status ) ]] && return 0
+ return 1
+}
+
+#concurrent removal/rename of dirs
+function rm_mv_correctness () {
+ ret=0
+ for i in {1..100}; do
+ mkdir $M0/"dir"$i
+ rmdir $M0/"dir"$i &
+ mv $M0/"dir"$i $M0/"adir"$i &
+ wait
+ tmp_ret=$(chk_backend_consistency "dir"$i "adir"$i)
+ (( ret += tmp_ret ))
+ rm -rf $M0/"dir"$i
+ rm -rf $M0/"adir"$i
+ done
+ return $ret
+}
+
+TEST touch $M0/a;
+TEST mv $M0/a $M0/b;
+
+#test rename fop when one of the bricks is down
+kill_brick ${V0} ${H0} ${B0}/${V0}-1;
+TEST touch $M0/h;
+TEST mv $M0/h $M0/1;
+
+TEST $CLI volume start $V0 force;
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
+find $M0 2>/dev/null 1>/dev/null;
+find $M0 | xargs stat 2>/dev/null 1>/dev/null;
+
+TEST rm_mv_correctness;
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
+
diff --git a/tests/bugs/replicate/bug-767585-gfid.t b/tests/bugs/replicate/bug-767585-gfid.t
new file mode 100755
index 00000000000..4176aabb544
--- /dev/null
+++ b/tests/bugs/replicate/bug-767585-gfid.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#Test cases to perform gfid-self-heal
+#file 'a' should be assigned a fresh gfid
+#file 'b' should be healed with gfid1 from brick1
+#file 'c' should be healed with gfid2 from brick2
+
+gfid1="0x8428b7193a764bf8be8046fb860b8993"
+gfid2="0x85ad91afa2f74694bf52c3326d048209"
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+touch $B0/${V0}0/a $B0/${V0}1/a
+touch $B0/${V0}0/b $B0/${V0}1/b
+touch $B0/${V0}0/c $B0/${V0}1/c
+
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/b
+TEST setfattr -n trusted.gfid -v $gfid2 $B0/${V0}1/c
+
+sleep 2
+
+TEST stat $M0/a
+TEST stat $M0/b
+TEST stat $M0/c
+
+TEST gf_get_gfid_xattr $B0/${V0}0/a
+TEST gf_get_gfid_xattr $B0/${V0}1/a
+
+EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}0/b
+EXPECT "$gfid1" gf_get_gfid_xattr $B0/${V0}1/b
+
+EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}0/c
+EXPECT "$gfid2" gf_get_gfid_xattr $B0/${V0}1/c
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t
new file mode 100755
index 00000000000..f213439401e
--- /dev/null
+++ b/tests/bugs/replicate/bug-802417.t
@@ -0,0 +1,120 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function write_file()
+{
+ path="$1"; shift
+ echo "$*" > "$path"
+}
+
+cleanup;
+
+#####################################################
+# We are currently not triggering data heal unless all bricks of the replica are
+# up. We will need to modify this .t once the fix for preventing stale reads
+# being served to clients for files in spurious split-brains is done. Spurious
+# split-brains here means afr xattrs indicates sbrain but it is actually not.
+# Self-heal will heal such files automatically but before the heal completes,
+# reads can be served which needs fixing.
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+######################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure io-cache and write-behind don't interfere.
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 cluster.quorum-type none
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Create a file with some recognizably stale data.
+TEST write_file $M0/a_file "old_data"
+
+## Kill two of the bricks and write some newer data.
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-1
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
+TEST write_file $M0/a_file "new_data"
+
+## Bring all the bricks up and kill one so we do a partial self-heal.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
+TEST dd if=${M0}/a_file of=/dev/null
+
+
+obs_path_0=${B0}/${V0}-0/a_file
+obs_path_1=${B0}/${V0}-1/a_file
+obs_path_2=${B0}/${V0}-2/a_file
+
+tgt_xattr_0="trusted.afr.${V0}-client-0"
+tgt_xattr_1="trusted.afr.${V0}-client-1"
+tgt_xattr_2="trusted.afr.${V0}-client-2"
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0x000000000000000000000000" \
+afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1
+
+actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2)
+EXPECT "0x000000030000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_1 $tgt_xattr_2)
+EXPECT "0x000000010000000000000000" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_0)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_1)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+actual=$(afr_get_changelog_xattr $obs_path_2 $tgt_xattr_2)
+EXPECT "0x000000000000000000000000|^\$" echo $actual
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t
new file mode 100644
index 00000000000..81186d86309
--- /dev/null
+++ b/tests/bugs/replicate/bug-821056.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+touch $M0/a
+
+#Open file with fd as 5
+exec 5>$M0/a
+realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a")
+
+kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+
+kill_brick $V0 $H0 $B0/${V0}0
+TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+ls -l $M0/a > /dev/null 2>&1 #Make sure the file is re-created
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
+
+for i in {1..1024}; do
+ echo "open sesame" >&5
+done
+
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+#close the fd
+exec 5>&-
+
+#Check that anon-fd based file is not leaking.
+EXPECT_WITHIN $REOPEN_TIMEOUT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+cleanup;
diff --git a/tests/bugs/replicate/bug-830665.t b/tests/bugs/replicate/bug-830665.t
new file mode 100755
index 00000000000..68180424803
--- /dev/null
+++ b/tests/bugs/replicate/bug-830665.t
@@ -0,0 +1,127 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+function recreate {
+ rm -rf $1 && mkdir -p $1
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+recreate ${B0}/${V0}-0
+recreate ${B0}/${V0}-1
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+TEST $CLI volume set $V0 nfs.disable false
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+#EXPECT_WITHIN fails the test if the command it executes fails. This function
+#returns "" when the file doesn't exist
+function friendly_cat {
+ if [ ! -f $1 ];
+ then
+ echo "";
+ else
+ cat $1;
+ fi
+}
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure stat-prefetch doesn't prevent self-heal checks.
+TEST $CLI volume set $V0 performance.stat-prefetch off;
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+## Mount NFS
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+## Create some files and directories
+echo "test_data" > $N0/a_file;
+mkdir $N0/a_dir;
+echo "more_test_data" > $N0/a_dir/another_file;
+
+## Unmount and stop the volume.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+TEST $CLI volume stop $V0;
+
+# Recreate the brick. Note that because of http://review.gluster.org/#change,4202
+# we need to preserve and restore the volume ID or else the brick (and thus the
+# entire not-very-HA-any-more volume) won't start. When that bug is fixed, we can
+# remove the [gs]etxattr calls.
+volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null \
+ | grep = | cut -d= -f2)
+rm -rf $B0/${V0}-0;
+mkdir $B0/${V0}-0;
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-0/.glusterfs && chmod 600 $B0/${V0}-0/.glusterfs
+
+setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0
+
+## Restart and remount. Note that we use actimeo=0 so that the stat calls
+## we need for self-heal don't get blocked by the NFS client.
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0;
+
+## The Linux NFS client has a really charming habit of caching stuff right
+## after mount, even though we set actimeo=0 above. Life would be much easier
+## if NFS developers cared as much about correctness as they do about shaving
+## a few seconds off of benchmarks.
+ls -l $N0 &> /dev/null;
+sleep 5;
+
+## Force entry self-heal.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST gluster volume heal $V0 full
+#ls -lR $N0 > /dev/null;
+
+## Do NOT check through the NFS mount here. That will force a new self-heal
+## check, but we want to test whether self-heal already happened.
+
+## Make sure everything's in order on the recreated brick.
+EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' friendly_cat $B0/${V0}-0/a_file;
+EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' friendly_cat $B0/${V0}-0/a_dir/another_file;
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-859581.t b/tests/bugs/replicate/bug-859581.t
new file mode 100755
index 00000000000..d8b45a257a1
--- /dev/null
+++ b/tests/bugs/replicate/bug-859581.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --direct-io-mode=yes --use-readdirp=no --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+mkdir -p $M0/dir1/dir2
+
+TEST rm -f $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1")
+TEST rmdir $B0/${V0}1/dir1/dir2
+
+TEST stat $M0/dir1/dir2
+
+TEST [ -d $B0/${V0}1/dir1/dir2 ]
+TEST [ ! -d $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ]
+
+# Stop the volume to flush caches and force symlink recreation
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+# Till now, protocol/server was not doing inode linking as part of readdirp.
+# But pas part of user servicable snapshots patcth, changes to do inode linking
+# in protocol/server in readdirp, were introduced. So now to make sure
+# the gfid handle of dir1 is healed, explicit lookup has to be sent on it.
+# Otherwise, whenever ls -l is done just on the mount point $M0, lookup on the
+# entries received as part of readdirp, is not sent, because the inodes for
+# those entries were linked as part of readdirp itself. i.e instead of doing
+# "ls -l $M0", it has to be the below command.
+ls -l $M0/dir1;
+
+TEST [ -h $(gf_get_gfid_backend_file_path $B0/${V0}1 "dir1") ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
+
diff --git a/tests/bugs/replicate/bug-865825.t b/tests/bugs/replicate/bug-865825.t
new file mode 100755
index 00000000000..ffb2e0f6437
--- /dev/null
+++ b/tests/bugs/replicate/bug-865825.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+## Start and create a volume
+mkdir -p ${B0}/${V0}-0
+mkdir -p ${B0}/${V0}-1
+mkdir -p ${B0}/${V0}-2
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}-{0,1,2}
+
+function volinfo_field()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
+}
+
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Make sure io-cache and write-behind don't interfere.
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 performance.io-cache off;
+TEST $CLI volume set $V0 performance.quick-read off;
+TEST $CLI volume set $V0 performance.write-behind off;
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+## Make sure automatic self-heal doesn't perturb our results.
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## Mount native
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+
+## Create a file with some recognizable contents.
+echo "test_data" > $M0/a_file;
+
+## Unmount.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+## Mess with the flags as though brick-0 accuses brick-2 while brick-1 is
+## missing its brick-2 changelog altogether.
+value=0x000000010000000000000000
+setfattr -n trusted.afr.${V0}-client-2 -v $value $B0/${V0}-0/a_file
+setfattr -x trusted.afr.${V0}-client-2 $B0/${V0}-1/a_file
+echo "wrong_data" > $B0/${V0}-2/a_file
+
+gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+gluster volume heal $V0 full
+
+## Make sure brick 2 now has the correct contents.
+EXPECT_WITHIN $HEAL_TIMEOUT "test_data" cat $B0/${V0}-2/a_file
+
+if [ "$EXIT_EARLY" = "1" ]; then
+ exit 0;
+fi
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t
new file mode 100644
index 00000000000..660d34ca25f
--- /dev/null
+++ b/tests/bugs/replicate/bug-880898.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+pkill glusterfs
+uuid=""
+for line in $(cat $GLUSTERD_WORKDIR/glusterd.info)
+do
+ if [[ $line == UUID* ]]
+ then
+ uuid=`echo $line | sed -r 's/^.{5}//'`
+ fi
+done
+
+#Command execution should fail reporting that the bricks are not running.
+TEST ! $CLI volume heal $V0 info
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-884328.t b/tests/bugs/replicate/bug-884328.t
new file mode 100644
index 00000000000..acc8e542240
--- /dev/null
+++ b/tests/bugs/replicate/bug-884328.t
@@ -0,0 +1,12 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST check_option_help_presence "cluster.quorum-type"
+TEST check_option_help_presence "cluster.quorum-count"
+cleanup;
diff --git a/tests/bugs/replicate/bug-886998.t b/tests/bugs/replicate/bug-886998.t
new file mode 100644
index 00000000000..bcac235ff09
--- /dev/null
+++ b/tests/bugs/replicate/bug-886998.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+# This tests that the replicate trash directory(.landfill) has following
+# properties.
+# Note: This is to have backward compatibility with 3.3 glusterfs
+# In the latest releases this dir is present inside .glusterfs of brick.
+# 1) lookup of trash dir fails
+# 2) readdir does not show this directory
+# 3) Self-heal does not do any self-heal of these directories.
+gfid1="0xc2e75dde97f346e7842d1076a8e699f8"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --direct-io-mode=enable
+
+TEST mkdir $B0/${V0}1/.landfill
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}1/.landfill
+TEST mkdir $B0/${V0}0/.landfill
+TEST setfattr -n trusted.gfid -v $gfid1 $B0/${V0}0/.landfill
+
+TEST ! stat $M0/.landfill
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+
+TEST rmdir $B0/${V0}0/.landfill
+#Force a conservative merge and it should not create .landfill
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/
+
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/
+
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+TEST ! stat $B0/${V0}0/.landfill
+TEST stat $B0/${V0}1/.landfill
+
+#TEST that the dir is not deleted even when xattrs suggest to delete
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}0/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/
+
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}1/
+
+EXPECT "" echo $(ls -a $M0 | grep ".landfill")
+TEST ! stat $B0/${V0}0/.landfill
+TEST stat $B0/${V0}1/.landfill
+cleanup;
diff --git a/tests/bugs/replicate/bug-888174.t b/tests/bugs/replicate/bug-888174.t
new file mode 100644
index 00000000000..8c70265513d
--- /dev/null
+++ b/tests/bugs/replicate/bug-888174.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This tests if flush, fsync wakes up the delayed post-op or not.
+#If it is not woken up, INODELK from the next command waits
+#for post-op-delay secs. There would be pending changelog even after the command
+#completes.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
+
+TEST $CLI volume set $V0 cluster.eager-lock on
+
+TEST $CLI volume set $V0 performance.flush-behind off
+EXPECT "off" volume_option $V0 performance.flush-behind
+
+TEST $CLI volume set $V0 cluster.post-op-delay-secs 3
+EXPECT "3" volume_option $V0 cluster.post-op-delay-secs
+
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0
+
+#Check that INODELK MAX latency is not in the order of seconds
+TEST gluster volume profile $V0 start
+for i in {1..5}
+do
+ echo hi > $M0/a
+done
+#Test if the MAX INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+
+TEST [ -z $inodelk_max_latency ]
+
+TEST dd of=$M0/a if=/dev/urandom bs=1024k count=10 conv=fsync
+#Check for no trace of pending changelog. Flush should make sure of it.
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty
+
+
+dd of=$M0/a if=/dev/urandom bs=1024k count=1024 2>/dev/null &
+p=$!
+#trigger graph switches, tests for fsync not leaving any pending flags
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+kill -TERM $p
+#wait for dd to exit
+wait > /dev/null 2>&1
+
+#Goal is to check if there is permanent FOOL changelog
+sleep 5
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_0/a trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/r2_1/a trusted.afr.dirty
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-913051.t b/tests/bugs/replicate/bug-913051.t
new file mode 100644
index 00000000000..6794995e6fe
--- /dev/null
+++ b/tests/bugs/replicate/bug-913051.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+#Test that afr opens the file on the bricks that were offline at the time of
+# open after the brick comes online. This tests for writev, readv triggering
+# open-fd-fix in afr.
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume start $V0
+TEST $GFS --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/a
+TEST touch $M0/dir/b
+echo abc > $M0/dir/b
+
+TEST wfd=`fd_available`
+TEST fd_open $wfd "w" $M0/dir/a
+TEST rfd=`fd_available`
+TEST fd_open $rfd "r" $M0/dir/b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#attempt self-heal so that the files are created on brick-0
+
+TEST dd if=$M0/dir/a of=/dev/null bs=1024k
+TEST dd if=$M0/dir/b of=/dev/null bs=1024k
+
+#trigger writev for attempting open-fd-fix in afr
+TEST fd_write $wfd "open sesame"
+
+#trigger readv for attempting open-fd-fix in afr
+TEST fd_cat $rfd
+
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha"
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb"
+
+TEST fd_close $wfd
+TEST fd_close $rfd
+cleanup;
diff --git a/tests/bugs/replicate/bug-916226.t b/tests/bugs/replicate/bug-916226.t
new file mode 100644
index 00000000000..893905f9a47
--- /dev/null
+++ b/tests/bugs/replicate/bug-916226.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}3
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume start $V0
+
+## Mount FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST mkdir $M0/dir{1..10};
+TEST touch $M0/dir{1..10}/files{1..10};
+
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-918437-sh-mtime.t b/tests/bugs/replicate/bug-918437-sh-mtime.t
new file mode 100644
index 00000000000..6a194b14a9b
--- /dev/null
+++ b/tests/bugs/replicate/bug-918437-sh-mtime.t
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function get_mtime {
+ local f=$1
+ stat $f | grep Modify | awk '{print $2 $3}' | cut -f1 -d'.'
+}
+
+function file_exists {
+ if [ -f $1 ]; then echo "Y"; else echo "N"; fi
+}
+cleanup;
+
+## Tests if mtime is correct after self-heal.
+TEST glusterd
+TEST pidof glusterd
+TEST mkdir -p $B0/gfs0/brick0{1,2}
+TEST $CLI volume create $V0 replica 2 transport tcp $H0:$B0/gfs0/brick01 $H0:$B0/gfs0/brick02
+TEST $CLI volume set $V0 nfs.disable on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --direct-io-mode=enable
+# file 'a' is healed from brick02 to brick01 where as file 'b' is healed from
+# brick01 to brick02
+
+TEST cp -p /etc/passwd $M0/a
+TEST cp -p /etc/passwd $M0/b
+
+#Store mtimes before self-heals
+TEST modify_atstamp=$(get_mtime $B0/gfs0/brick02/a)
+TEST modify_btstamp=$(get_mtime $B0/gfs0/brick02/b)
+
+TEST $CLI volume stop $V0
+TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a
+TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+#TODO remove these 2 lines once heal-full is fixed in v2.
+TEST stat $M0/a
+TEST stat $M0/b
+
+TEST gluster volume heal $V0 full
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a
+EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
+
+size=`stat -c '%s' /etc/passwd`
+EXPECT $size stat -c '%s' $B0/gfs0/brick01/a
+
+TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a)
+TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a)
+EXPECT $modify_atstamp echo $modify_atstamp1
+EXPECT $modify_atstamp echo $modify_atstamp2
+
+TEST modify_btstamp1=$(get_mtime $B0/gfs0/brick01/b)
+TEST modify_btstamp2=$(get_mtime $B0/gfs0/brick02/b)
+EXPECT $modify_btstamp echo $modify_btstamp1
+EXPECT $modify_btstamp echo $modify_btstamp2
+cleanup;
diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t
new file mode 100755
index 00000000000..81504612f63
--- /dev/null
+++ b/tests/bugs/replicate/bug-921231.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test writes to same file with 2 fds and tests that cluster.eager-lock is not
+# causing extra delay because of post-op-delay-secs
+cleanup;
+
+function write_to_file {
+ dd of=$M0/1 if=/dev/zero bs=1024k count=128 oflag=append 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume set $V0 post-op-delay-secs 3
+TEST $CLI volume set $V0 client-log-level DEBUG
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $CLI volume set $V0 ensure-durability off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+write_to_file &
+write_to_file &
+wait
+#Test if the MAX [F]INODELK fop latency is of the order of seconds.
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+TEST [ -z $inodelk_max_latency ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-957877.t b/tests/bugs/replicate/bug-957877.t
new file mode 100644
index 00000000000..bcce7e3c9e7
--- /dev/null
+++ b/tests/bugs/replicate/bug-957877.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/f1
+TEST setfattr -n "user.foo" -v "test" $M0/f1
+
+BRICK=$B0"/${V0}1"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+# Wait for self-heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $BRICK;
+
+TEST getfattr -n "user.foo" $B0/${V0}0/f1;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t
new file mode 100644
index 00000000000..27f8b27619e
--- /dev/null
+++ b/tests/bugs/replicate/bug-976800.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test checks if there are any open fds on the brick
+# even after the file is closed on the mount. This particular
+# test tests dd with "fsync" to check afr's fsync codepath
+cleanup;
+
+function is_fd_open {
+ local v=$1
+ local h=$2
+ local b=$3
+ local bpid=$(get_brick_pid $v $h $b)
+ ls -l /proc/$bpid/fd | grep -w "\-> $b/1"
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 flush-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync
+TEST ! is_fd_open $V0 $H0 $B0/${V0}0
+cleanup;
diff --git a/tests/bugs/replicate/bug-977797.t b/tests/bugs/replicate/bug-977797.t
new file mode 100755
index 00000000000..9a8f36c956c
--- /dev/null
+++ b/tests/bugs/replicate/bug-977797.t
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 open-behind off
+TEST $CLI volume set $V0 quick-read off
+TEST $CLI volume set $V0 read-ahead off
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 io-cache off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+
+TEST mkdir -p $M0/a
+TEST `echo "GLUSTERFS" > $M0/a/file`
+
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+TEST chown root $M0/a
+TEST chown root $M0/a/file
+TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file`
+TEST mkdir $M0/a/b
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0;
+
+
+
+TEST kill_brick $V0 $H0 $B0/$V0"2"
+
+TEST chmod 757 $M0/a
+TEST chmod 757 $M0/a/file
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
+
+#Trigger entry heal of $M0/a
+getfattr -n user.nosuchattr $M0/a
+dd if=$M0/a/file of=/dev/null bs=1024k
+#read fails, but heal is triggered.
+TEST [ $? -ne 0 ]
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a/file trusted.afr.$V0-client-0 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a/file trusted.afr.$V0-client-1 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-0 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-1 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-0 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-1 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry"
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-978794.t b/tests/bugs/replicate/bug-978794.t
new file mode 100644
index 00000000000..8e43e74bf79
--- /dev/null
+++ b/tests/bugs/replicate/bug-978794.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+
+# This test opens 100 fds and triggers graph switches to check if fsync
+# as part of graph-switch causes crash or not.
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/{1..100}
+for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3}
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+TEST cat $M0/{1..100}
+for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5}
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
+TEST cat $M0/{1..100}
+cleanup
diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t
new file mode 100755
index 00000000000..c09c7d51772
--- /dev/null
+++ b/tests/bugs/replicate/bug-979365.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This script checks that ensure-durability option enables/disables afr
+#sending fsyncs
+cleanup;
+
+function num_fsyncs {
+ $CLI volume profile $V0 info | grep -w FSYNC | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 ensure-durability on
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd of=$M0/a if=/dev/zero bs=1024k count=10
+#fsyncs take a while to complete.
+sleep 5
+
+# There can be zero or more fsyncs, depending on the order
+# in which the writes reached the server, in turn deciding
+# whether they were treated as "appending" writes or not.
+
+TEST [[ $(num_fsyncs) -ge 0 ]]
+#Stop the volume to erase the profile info of old operations
+TEST $CLI volume profile $V0 stop
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+#Disable ensure-durability now to disable fsyncs in afr.
+TEST $CLI volume set $V0 ensure-durability off
+TEST $CLI volume start $V0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST $CLI volume profile $V0 start
+TEST dd of=$M0/a if=/dev/zero bs=1024k count=10
+#fsyncs take a while to complete.
+sleep 5
+TEST [[ $(num_fsyncs) -eq 0 ]]
+
+cleanup
diff --git a/tests/bugs/replicate/bug-986905.t b/tests/bugs/replicate/bug-986905.t
new file mode 100755
index 00000000000..f4f7386ebc4
--- /dev/null
+++ b/tests/bugs/replicate/bug-986905.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This script checks if hardlinks that are created while a brick is down are
+#healed properly.
+
+cleanup;
+function get_inum {
+ ls -i $1 | awk '{print $1}'
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/a
+TEST ln $M0/a $M0/link_a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST ls -l $M0
+inum=$(get_inum $B0/${V0}0/a)
+EXPECT "$inum" get_inum $B0/${V0}0/link_a
+cleanup
diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
new file mode 100644
index 00000000000..fab94b71b27
--- /dev/null
+++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function create_bricks {
+ TEST truncate -s 100M $B0/brick0
+ TEST truncate -s 100M $B0/brick1
+ TEST truncate -s 20M $B0/brick2
+ LO1=`SETUP_LOOP $B0/brick0`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO1
+ LO2=`SETUP_LOOP $B0/brick1`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO2
+ LO3=`SETUP_LOOP $B0/brick2`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO3
+ TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+ TEST MOUNT_LOOP $LO1 $B0/${V0}0
+ TEST MOUNT_LOOP $LO2 $B0/${V0}1
+ TEST MOUNT_LOOP $LO3 $B0/${V0}2
+}
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ touch $M0/file$i
+ if [ -e $B0/${V0}2/file$i ];
+ then
+ ((i++))
+ else
+ break
+ fi
+ done
+}
+
+TESTS_EXPECTED_IN_LOOP=13
+
+#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((echo "Test" > $M0/file-3) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
+
+#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
+#Keeping the third brick of lower size to simulate disk full scenario#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((cat /dev/zero > $M0/file1) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
new file mode 100644
index 00000000000..d3b0c504c80
--- /dev/null
+++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Change permission on brick-0: simulates the case where there is metadata
+# mismatch but no pending xattrs. This brick will become the source for heal.
+TEST chmod +x $B0/$V0"0"/FILE
+
+# Add gfid to xattrop
+xattrop_b0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_b0`
+gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
+TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Brick-0 should contain xattrs blaming other 2 bricks.
+# The values will be zero because heal is over.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
+
+# Brick-1 and Brick-2 must not contain any afr xattrs.
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+# check permission bits.
+EXPECT '755' stat -c %a $B0/${V0}0/FILE
+EXPECT '755' stat -c %a $B0/${V0}1/FILE
+EXPECT '755' stat -c %a $B0/${V0}2/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/ta-inode-refresh-read.t b/tests/bugs/replicate/ta-inode-refresh-read.t
new file mode 100644
index 00000000000..6dd6ff7f163
--- /dev/null
+++ b/tests/bugs/replicate/ta-inode-refresh-read.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Test read transaction inode refresh logic for thin-arbiter.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+cleanup;
+TEST ta_create_brick_and_volfile brick0
+TEST ta_create_brick_and_volfile brick1
+TEST ta_create_ta_and_volfile ta
+TEST ta_start_brick_process brick0
+TEST ta_start_brick_process brick1
+TEST ta_start_ta_process ta
+
+TEST ta_create_mount_volfile brick0 brick1 ta
+# Set afr xlator options to choose brick0 as read-subvol.
+sed -i '/iam-self-heal-daemon/a \ option read-subvolume-index 0' $B0/mount.vol
+TEST [ $? -eq 0 ]
+sed -i '/iam-self-heal-daemon/a \ option choose-local false' $B0/mount.vol
+TEST [ $? -eq 0 ]
+
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "trusted.afr.patchy-ta-2" ls $B0/ta
+
+TEST touch $M0/FILE
+TEST ls $B0/brick0/FILE
+TEST ls $B0/brick1/FILE
+TEST ! ls $B0/ta/FILE
+TEST setfattr -n user.name -v ravi $M0/FILE
+
+# Remove gfid hardlink from brick0 which is the read-subvol for FILE.
+# This triggers inode refresh up on a getfattr and eventually calls
+# afr_ta_read_txn(). Without this patch, afr_ta_read_txn() will again query
+# brick0 causing getfattr to fail.
+TEST rm -f $(gf_get_gfid_backend_file_path $B0/brick0 FILE)
+TEST getfattr -n user.name $M0/FILE
+
+cleanup;
diff --git a/tests/bugs/rpc/bug-1043886.t b/tests/bugs/rpc/bug-1043886.t
new file mode 100755
index 00000000000..c1ea7a71e8b
--- /dev/null
+++ b/tests/bugs/rpc/bug-1043886.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+## Mount FUSE with caching disabled
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+## Mount volume as NFS export
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+# just a random uid/gid
+uid=22162
+gid=5845
+
+mkdir $N0/other;
+chown $uid:$gid $N0/other;
+
+TEST $CLI volume set $V0 server.root-squash on;
+TEST $CLI volume set $V0 server.anonuid $uid;
+TEST $CLI volume set $V0 server.anongid $gid;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+# create files and directories in the root of the glusterfs and nfs mount
+# which is owned by root and hence the right behavior is getting EACCESS
+# as the fops are executed as nfsnobody.
+touch $M0/file 2>/dev/null;
+TEST [ $? -ne 0 ]
+mkdir $M0/dir 2>/dev/null;
+TEST [ $? -ne 0 ]
+
+# Here files and directories should be getting created as other directory is owned
+# by tmp_user as server.anonuid and server.anongid have the value of tmp_user uid and gid
+TEST touch $M0/other/file 2>/dev/null;
+TEST [ "$(stat -c %u:%g $N0/other/file)" = "$uid:$gid" ];
+TEST mkdir $M0/other/dir 2>/dev/null;
+TEST [ "$(stat -c %u:%g $N0/other/dir)" = "$uid:$gid" ];
+
+## Before killing daemon to avoid deadlocks
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/rpc/bug-847624.t b/tests/bugs/rpc/bug-847624.t
new file mode 100755
index 00000000000..fe8fc982887
--- /dev/null
+++ b/tests/bugs/rpc/bug-847624.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup
+
+#1
+TEST glusterd
+TEST pidof glusterd
+#3
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 nfs.drc on
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+cd $N0
+#7
+TEST dbench -t 10 10
+TEST rm -rf $N0/*
+cd
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+#10
+TEST $CLI volume set $V0 nfs.drc-size 10000
+cleanup
diff --git a/tests/bugs/rpc/bug-884452.t b/tests/bugs/rpc/bug-884452.t
new file mode 100644
index 00000000000..c161a68190d
--- /dev/null
+++ b/tests/bugs/rpc/bug-884452.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/{1..10000}
+
+RUN_LS_LOOP_FILE="$M0/run-ls-loop"
+function ls-loop
+{
+ while [ -f $RUN_LS_LOOP_FILE ]; do
+ ls -lR $M0 1>/dev/null 2>&1
+ done;
+}
+
+touch $RUN_LS_LOOP_FILE
+ls-loop &
+
+function vol-status-loop
+{
+ for i in {1..1000}; do
+ $CLI volume status $V0 clients >/dev/null 2>&1
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+ done;
+
+ return 0
+}
+
+TEST vol-status-loop
+
+rm -f $RUN_LS_LOOP_FILE
+wait
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup;
diff --git a/tests/bugs/rpc/bug-921072.t b/tests/bugs/rpc/bug-921072.t
new file mode 100755
index 00000000000..ae7eb0101bc
--- /dev/null
+++ b/tests/bugs/rpc/bug-921072.t
@@ -0,0 +1,132 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+#1
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+TEST mount_nfs $H0:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# based on ip addresses (1-4)
+# case 1: allow only localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# case 2: allow only non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+#11
+TEST ! mount_nfs localhost:/$V0 $N0 nolock
+TEST $CLI volume reset $V0 force
+TEST $CLI volume set $V0 nfs.disable off
+# case 3: reject only localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST ! mount_nfs localhost:/$V0 $N0 nolock
+
+# case 4: reject only non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+
+
+# NEED TO CHECK BOTH IP AND NAME BASED AUTH.
+# CASES WITH NFS.ADDR-NAMELOOKUP ON (5-12)
+TEST $CLI volume reset $V0 force
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+#20
+TEST mount_nfs localhost:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# case 5: allow only localhost
+TEST $CLI volume set $V0 nfs.rpc-auth-allow localhost
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# case 6: allow only somehost
+TEST $CLI volume set $V0 nfs.rpc-auth-allow somehost
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST ! mount_nfs localhost:/$V0 $N0 nolock
+
+# case 7: reject only localhost
+TEST $CLI volume reset $V0 force
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-reject localhost
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+#30
+TEST ! mount_nfs localhost:/$V0 $N0 nolock
+
+# case 8: reject only somehost
+TEST $CLI volume set $V0 nfs.rpc-auth-reject somehost
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# based on ip addresses: repeat of cases 1-4
+# case 9: allow only localhost ip
+TEST $CLI volume reset $V0 force
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0 $N0 nolock
+TEST mkdir -p $N0/subdir
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+# case 10: allow a non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+#41
+TEST ! mount_nfs localhost:/$V0 $N0 nolock
+
+# case 11: reject only localhost ip
+TEST $CLI volume reset $V0 force
+TEST $CLI volume set $V0 nfs.disable off
+TEST $CLI volume set $V0 nfs.addr-namelookup on
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST ! mount_nfs localhost:/$V0 $N0 nolock
+TEST ! mount_nfs localhost:/$V0/subdir $N0 nolock
+
+# case 12: reject only non-localhost ip
+TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
+
+TEST mount_nfs localhost:/$V0 $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST mount_nfs localhost:/$V0/subdir $N0 nolock
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+TEST $CLI volume stop --mode=script $V0
+#52
+TEST $CLI volume delete --mode=script $V0
+cleanup
diff --git a/tests/bugs/rpc/bug-954057.t b/tests/bugs/rpc/bug-954057.t
new file mode 100755
index 00000000000..40acdc2fdc7
--- /dev/null
+++ b/tests/bugs/rpc/bug-954057.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This script checks if use-readdirp option works as accepted in mount options
+
+# Note on re-reading $M0/new after enabling root-squash:
+# Since we have readen it once, the file is present in various caches.
+# In order to actually fail on second attempt we must:
+# 1) drop kernel cache
+# 2) make sure FUSE does not cache the entry. This is also
+# in the kernel, but not flushed by a failed umount.
+# Using $GFS enforces this because it sets --entry-timeout=0
+# 3) make sure reading new permissins does not produce stale
+# information from glusterfs metadata cache. Setting volume
+# option performance.stat-prefetch off enforces that.
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST mkdir $M0/nobody
+grep nfsnobody /etc/passwd > /dev/null
+if [ $? -eq 1 ]; then
+usr=nobody
+grp=nobody
+else
+usr=nfsnobody
+grp=nfsnobody
+fi
+TEST chown $usr:$grp $M0/nobody
+TEST `echo "file" >> $M0/file`
+TEST cp $M0/file $M0/new
+TEST chmod 700 $M0/new
+TEST cat $M0/new
+
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 server.root-squash enable
+drop_cache $M0
+TEST ! mkdir $M0/other
+TEST mkdir $M0/nobody/other
+TEST cat $M0/file
+TEST ! cat $M0/new
+TEST `echo "nobody" >> $M0/nobody/file`
+
+#mount the client without root-squashing
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --no-root-squash=yes $M1
+TEST mkdir $M1/m1_dir
+TEST `echo "file" >> $M1/m1_file`
+TEST cp $M0/file $M1/new
+TEST chmod 700 $M1/new
+TEST cat $M1/new
+
+TEST $CLI volume set $V0 server.root-squash disable
+TEST mkdir $M0/other
+TEST cat $M0/new
+
+cleanup
diff --git a/tests/bugs/shard/bug-1245547.t b/tests/bugs/shard/bug-1245547.t
new file mode 100644
index 00000000000..3c46785d10f
--- /dev/null
+++ b/tests/bugs/shard/bug-1245547.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+#Create a file.
+TEST touch $M0/foo
+#Write some data into it.
+TEST `echo "abc" > $M0/foo`
+
+#This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+
+#Create a file 'bar' with holes.
+TEST touch $M0/bar
+TEST truncate -s 10G $M0/bar
+#Unlink on such a file should succeed.
+TEST unlink $M0/bar
+
+#Create a file 'baz' with holes.
+TEST touch $M0/baz
+TEST truncate -s 10G $M0/baz
+#Rename with a sharded existing dest that has holes must succeed.
+TEST mv -f $M0/foo $M0/baz
+
+cleanup
diff --git a/tests/bugs/shard/bug-1248887.t b/tests/bugs/shard/bug-1248887.t
new file mode 100644
index 00000000000..2c51f7ce0e8
--- /dev/null
+++ b/tests/bugs/shard/bug-1248887.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+#Create a file.
+TEST touch $M0/foo
+#Write some data into it.
+TEST `echo "abc" > $M0/foo`
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}0/foo
+EXPECT "0000000000000004000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}0/foo
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}1/foo
+EXPECT "0000000000000004000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}1/foo
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST `echo "abc" >> $M0/foo`
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}1/foo
+EXPECT "0000000000000008000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}1/foo
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}0/foo
+EXPECT "0000000000000008000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}0/foo
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}1/foo
+EXPECT "0000000000000008000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}1/foo
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1250855.t b/tests/bugs/shard/bug-1250855.t
new file mode 100644
index 00000000000..b8bc3b42513
--- /dev/null
+++ b/tests/bugs/shard/bug-1250855.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=40
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+
+for i in {1..20}; do
+ TEST_IN_LOOP touch $M0/dir/$i;
+done
+
+TEST $CLI volume set $V0 features.shard on
+
+TEST ls $M0
+TEST ls $M0/dir
+
+for i in {1..10}; do
+ TEST_IN_LOOP mv $M0/dir/$i $M0/dir/$i-sharded;
+done
+
+for i in {11..20}; do
+ TEST_IN_LOOP unlink $M0/dir/$i;
+done
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1251824.t b/tests/bugs/shard/bug-1251824.t
new file mode 100644
index 00000000000..d81685d01de
--- /dev/null
+++ b/tests/bugs/shard/bug-1251824.t
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST useradd -M test_user 2>/dev/null
+
+# Create 3 files as root.
+TEST touch $M0/foo
+TEST touch $M0/bar
+TEST touch $M0/baz
+TEST touch $M0/qux
+TEST mkdir $M0/dir
+
+# Change ownership to non-root on foo and bar.
+TEST chown test_user:test_user $M0/foo
+TEST chown test_user:test_user $M0/bar
+
+# Write 6M of data on foo as non-root, 2M overflowing into block-1.
+TEST run_cmd_as_user test_user "dd if=/dev/zero of=$M0/foo bs=1M count=6"
+
+# Ensure owner and group are root on the block-1 shard.
+gfid_foo=$(get_gfid_string $M0/foo)
+
+EXPECT "root" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %U`
+EXPECT "root" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %G`
+
+#Ensure /.shard is owned by root.
+EXPECT "root" echo `find $B0/${V0}0 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}0 -name .shard | xargs stat -c %G`
+EXPECT "root" echo `find $B0/${V0}1 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}1 -name .shard | xargs stat -c %G`
+EXPECT "root" echo `find $B0/${V0}2 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}2 -name .shard | xargs stat -c %G`
+EXPECT "root" echo `find $B0/${V0}3 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}3 -name .shard | xargs stat -c %G`
+
+# Write 6M of data on bar as root.
+TEST dd if=/dev/zero of=$M0/bar bs=1M count=6
+
+# Ensure owner and group are root on the block-1 shard.
+gfid_bar=$(get_gfid_string $M0/bar)
+
+EXPECT "root" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %U`
+EXPECT "root" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %G`
+
+# Write 6M of data on baz as root.
+TEST dd if=/dev/zero of=$M0/baz bs=1M count=6
+
+gfid_baz=$(get_gfid_string $M0/baz)
+
+# Ensure owner and group are root on the block-1 shard.
+EXPECT "root" echo `find $B0 -name $gfid_baz.1 | xargs stat -c %U`
+EXPECT "root" echo `find $B0 -name $gfid_baz.1 | xargs stat -c %G`
+
+# Test to ensure unlink from an unauthorized user does not lead to only
+# the shards under /.shard getting unlinked while that on the base file fails
+# with EPERM/ACCES.
+
+TEST ! run_cmd_as_user test_user "unlink $M0/baz"
+TEST find $B0/*/.shard/$gfid_baz.1
+
+# Test to ensure rename of a file where the dest file exists and is sharded,
+# from an unauthorized user does not lead to only the shards under /.shard
+# getting unlinked while that on the base file fails with EPERM/ACCES.
+
+TEST ! run_cmd_as_user test_user "mv -f $M0/qux $M0/baz"
+TEST find $B0/*/.shard/$gfid_baz.1
+TEST stat $M0/qux
+
+# Shard translator executes steps in the following order while doing a truncate
+# to a lower size:
+# 1) unlinking shards under /.shard first with frame->root->{uid,gid} being 0,
+# 2) truncate the original file by the right amount.
+# The following two tests are towards ensuring that truncate attempt from an
+# unauthorised user doesn't result in only the shards under /.shard getting
+# removed (since they're being performed as root) while step 2) above fails,
+# leaving the file in an inconsistent state.
+
+TEST ! run_cmd_as_user test_user "truncate -s 1M $M0/baz"
+TEST find $B0/*/.shard/$gfid_baz.1
+
+# Perform a cp as non-root user. This should trigger readv() which will trigger
+# reads on first shard of "foo" under /.shard, and this must not fail if shard
+# translator correctly sets frame->root->uid,gid to 0 before reading off the
+# first shard, since it's owned by root.
+TEST chown test_user:test_user $M0/dir
+TEST run_cmd_as_user test_user "cp $M0/foo $M0/dir/quux"
+
+md5sum_foo=$(md5sum $M0/foo | awk '{print $1}')
+EXPECT "$md5sum_foo" echo `md5sum $M0/dir/quux | awk '{print $1}'`
+
+userdel test_user
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1256580.t b/tests/bugs/shard/bug-1256580.t
new file mode 100644
index 00000000000..279fcc54e48
--- /dev/null
+++ b/tests/bugs/shard/bug-1256580.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+
+# Create "file" with holes.
+TEST truncate -s 6M $M0/dir/file
+EXPECT '6291456' stat -c %s $M0/dir/file
+
+# Perform writes that do not cross the 6M boundary
+TEST dd if=/dev/zero of=$M0/dir/file bs=1024 seek=3072 count=2048 conv=notrunc
+
+# Ensure that the file size is 6M (as opposed to 8M that would appear in the
+# presence of this bug).
+EXPECT '6291456' stat -c %s $M0/dir/file
+
+#Extend the write beyond EOF such that it again creates a hole of 1M size
+TEST dd if=/dev/zero of=$M0/dir/file bs=1024 seek=7168 count=2048 conv=notrunc
+
+# Ensure that the file size is not greater than 9M.
+EXPECT '9437184' stat -c %s $M0/dir/file
+cleanup
diff --git a/tests/bugs/shard/bug-1258334.t b/tests/bugs/shard/bug-1258334.t
new file mode 100644
index 00000000000..94ed822aae8
--- /dev/null
+++ b/tests/bugs/shard/bug-1258334.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/foo
+TEST touch $M0/dir/bar
+TEST touch $M0/dir/new
+
+TEST truncate -s 14M $M0/dir/foo
+TEST truncate -s 14M $M0/dir/bar
+
+# Perform writes that fall on the 2nd block of "foo" (counting from 0)
+TEST dd if=/dev/zero of=$M0/dir/foo bs=1024 seek=10240 count=2048 conv=notrunc
+
+# Perform writes that fall on the 2nd block of "bar" (counting from 0)
+TEST dd if=/dev/zero of=$M0/dir/bar bs=1024 seek=10240 count=2048 conv=notrunc
+
+# Now unlink "foo". If the bug exists, it should fail with EINVAL.
+TEST unlink $M0/dir/foo
+
+# Now rename "new" to "bar". If the bug exists, it should fail with EINVAL.
+TEST mv -f $M0/dir/new $M0/dir/bar
+
+TEST dd if=/dev/zero of=$M0/dir/new bs=1024 count=5120
+
+# Now test that this fix does not break unlink of files without holes
+TEST unlink $M0/dir/new
+
+cleanup
diff --git a/tests/bugs/shard/bug-1259651.t b/tests/bugs/shard/bug-1259651.t
new file mode 100644
index 00000000000..72856fdbaad
--- /dev/null
+++ b/tests/bugs/shard/bug-1259651.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file_plain
+
+# Create "file_plain" with holes.
+TEST truncate -s 12M $M0/dir/file_plain
+
+# Perform writes on it that would create holes.
+TEST dd if=/dev/zero of=$M0/dir/file_plain bs=1024 seek=10240 count=1024 conv=notrunc
+
+md5sum_file_plain=$(md5sum $M0/dir/file_plain | awk '{print $1}')
+
+# Now enable sharding on the volume.
+TEST $CLI volume set $V0 features.shard on
+
+# Create a sharded file called "file_sharded"
+TEST touch $M0/dir/file_sharded
+
+# Truncate it to make it sparse
+TEST truncate -s 12M $M0/dir/file_sharded
+
+# Perform writes on it that would create holes in block-0 and block-1.
+TEST dd if=/dev/zero of=$M0/dir/file_sharded bs=1024 seek=10240 count=1024 conv=notrunc
+
+# If this bug is fixed, md5sum of file_sharded and file_plain should be same.
+EXPECT "$md5sum_file_plain" echo `md5sum $M0/dir/file_sharded | awk '{print $1}'`
+
+cleanup
diff --git a/tests/bugs/shard/bug-1260637.t b/tests/bugs/shard/bug-1260637.t
new file mode 100644
index 00000000000..21008ee19dd
--- /dev/null
+++ b/tests/bugs/shard/bug-1260637.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create a file.
+TEST touch $M0/foo
+
+# Check that the shard xattrs are set in the backend.
+TEST getfattr -n trusted.glusterfs.shard.block-size $B0/${V0}0/foo
+TEST getfattr -n trusted.glusterfs.shard.file-size $B0/${V0}0/foo
+
+# Verify that shard xattrs are not exposed on the mount.
+TEST ! getfattr -n trusted.glusterfs.shard.block-size $M0/foo
+TEST ! getfattr -n trusted.glusterfs.shard.file-size $M0/foo
+
+# Verify that shard xattrs cannot be set from the mount.
+TEST ! setfattr -n trusted.glusterfs.shard.block-size -v "123" $M0/foo
+TEST ! setfattr -n trusted.glusterfs.shard.file-size -v "123" $M0/foo
+
+# Verify that shard xattrs cannot be removed from the mount.
+TEST ! setfattr -x trusted.glusterfs.shard.block-size $M0/foo
+TEST ! setfattr -x trusted.glusterfs.shard.file-size $M0/foo
+
+# Verify that shard xattrs are not listed when listxattr is triggered.
+TEST ! "getfattr -d -m . $M0/foo | grep shard"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1261773.t b/tests/bugs/shard/bug-1261773.t
new file mode 100644
index 00000000000..46d5a8b91c9
--- /dev/null
+++ b/tests/bugs/shard/bug-1261773.t
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST check_option_help_presence "features.shard"
+TEST check_option_help_presence "features.shard-block-size"
+
+cleanup
diff --git a/tests/bugs/shard/bug-1272986.t b/tests/bugs/shard/bug-1272986.t
new file mode 100644
index 00000000000..66e896ad0c4
--- /dev/null
+++ b/tests/bugs/shard/bug-1272986.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+# $M0 is where the reads will be done and $M1 is where files will be created,
+# written to, etc.
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
+
+# Write some data into a file, such that its size crosses the shard block size.
+TEST dd if=/dev/urandom of=$M1/file bs=1M count=5 conv=notrunc oflag=direct
+
+md5sum1_reader=$(md5sum $M0/file | awk '{print $1}')
+
+EXPECT "$md5sum1_reader" echo `md5sum $M1/file | awk '{print $1}'`
+
+# Append some more data into the file.
+TEST dd if=/dev/urandom of=$M1/file bs=256k count=1 conv=notrunc oflag=direct
+
+md5sum2_reader=$(dd if=$M0/file iflag=direct bs=256k| md5sum | awk '{print $1}')
+
+# Test to see if the reader refreshes its cache correctly as part of the reads
+# triggered through md5sum. If it does, then the md5sum on the reader and writer
+# must match.
+EXPECT "$md5sum2_reader" echo `md5sum $M1/file | awk '{print $1}'`
+
+cleanup
diff --git a/tests/bugs/shard/bug-1342298.t b/tests/bugs/shard/bug-1342298.t
new file mode 100644
index 00000000000..ecd7720e8db
--- /dev/null
+++ b/tests/bugs/shard/bug-1342298.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+echo a > $M0/a
+TEST dd if=$M0/a of=/dev/null bs=4096 count=1 iflag=direct
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1468483.t b/tests/bugs/shard/bug-1468483.t
new file mode 100644
index 00000000000..e462b8d54d5
--- /dev/null
+++ b/tests/bugs/shard/bug-1468483.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 16MB
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST dd if=/dev/zero conv=fsync of=$M0/foo bs=1M count=100
+
+#This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+
+gfid_foo=$(get_gfid_string $M0/foo)
+
+TEST stat $B0/${V0}0/.shard/$gfid_foo.1
+TEST stat $B0/${V0}0/.shard/$gfid_foo.2
+TEST stat $B0/${V0}0/.shard/$gfid_foo.3
+TEST stat $B0/${V0}0/.shard/$gfid_foo.4
+TEST stat $B0/${V0}0/.shard/$gfid_foo.5
+TEST stat $B0/${V0}0/.shard/$gfid_foo.6
+
+# For a file with 7 shards, there should be 7 fsyncs on the brick. Without this
+# fix, I was seeing only 1 fsync (on the base shard alone).
+
+EXPECT "7" echo `$CLI volume profile $V0 info incremental | grep -w FSYNC | awk '{print $8}'`
+
+useradd -M test_user 2>/dev/null
+
+TEST touch $M0/bar
+
+# Change ownership to non-root on bar.
+TEST chown test_user:test_user $M0/bar
+
+TEST $CLI volume profile $V0 stop
+TEST $CLI volume profile $V0 start
+
+# Write 100M of data on bar as non-root.
+TEST run_cmd_as_user test_user "dd if=/dev/zero conv=fsync of=$M0/bar bs=1M count=100"
+
+EXPECT "7" echo `$CLI volume profile $V0 info incremental | grep -w FSYNC | awk '{print $8}'`
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+userdel test_user
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1488546.t b/tests/bugs/shard/bug-1488546.t
new file mode 100644
index 00000000000..60480dc55e5
--- /dev/null
+++ b/tests/bugs/shard/bug-1488546.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 md-cache-timeout 60
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero of=$M0/file bs=1M count=20
+TEST ln $M0/file $M0/linkey
+
+EXPECT "20971520" stat -c %s $M0/linkey
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1568521-EEXIST.t b/tests/bugs/shard/bug-1568521-EEXIST.t
new file mode 100644
index 00000000000..2f9f165aa63
--- /dev/null
+++ b/tests/bugs/shard/bug-1568521-EEXIST.t
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+FILE_COUNT_TIME=5
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+# Unlink a temporary file to trigger creation of .remove_me
+TEST touch $M0/tmp
+TEST unlink $M0/tmp
+
+TEST stat $B0/${V0}0/.shard/.remove_me
+TEST stat $B0/${V0}1/.shard/.remove_me
+
+TEST dd if=/dev/zero of=$M0/dir/file bs=1024 count=9216
+gfid_file=$(get_gfid_string $M0/dir/file)
+
+# Create marker file from the backend to simulate ENODATA.
+touch $B0/${V0}0/.shard/.remove_me/$gfid_file
+touch $B0/${V0}1/.shard/.remove_me/$gfid_file
+
+# Set block and file size to incorrect values of 64MB and 5MB to simulate "stale xattrs" case
+# and confirm that the correct values are set when the actual unlink takes place
+
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}0/.shard/.remove_me/$gfid_file
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}1/.shard/.remove_me/$gfid_file
+
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}0/.shard/.remove_me/$gfid_file
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}1/.shard/.remove_me/$gfid_file
+
+# Sleep for 2 seconds to prevent posix_gfid_heal() from believing marker file is "fresh" and failing lookup with ENOENT
+sleep 2
+
+TEST unlink $M0/dir/file
+TEST ! stat $B0/${V0}0/dir/file
+TEST ! stat $B0/${V0}1/dir/file
+
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_file
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_file
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_file
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_file
+
+##############################
+### Repeat test for rename ###
+##############################
+
+TEST touch $M0/src
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=9216
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+
+# Create marker file from the backend to simulate ENODATA.
+touch $B0/${V0}0/.shard/.remove_me/$gfid_dst
+touch $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+# Set block and file size to incorrect values of 64MB and 5MB to simulate "stale xattrs" case
+# and confirm that the correct values are set when the actual unlink takes place
+
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}0/.shard/.remove_me/$gfid_dst
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}0/.shard/.remove_me/$gfid_dst
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+# Sleep for 2 seconds to prevent posix_gfid_heal() from believing marker file is "fresh" and failing lookup with ENOENT
+sleep 2
+
+TEST mv -f $M0/src $M0/dir/dst
+TEST ! stat $B0/${V0}0/src
+TEST ! stat $B0/${V0}1/src
+
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_dst
+
+cleanup
diff --git a/tests/bugs/shard/bug-1568521.t b/tests/bugs/shard/bug-1568521.t
new file mode 100644
index 00000000000..167fb635ac8
--- /dev/null
+++ b/tests/bugs/shard/bug-1568521.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+
+function delete_files {
+ local mountpoint=$1;
+ local success=0;
+ local value=$2
+ for i in {1..500}; do
+ unlink $mountpoint/file-$i 2>/dev/null 1>/dev/null
+ if [ $? -eq 0 ]; then
+ echo $2 >> $B0/output.txt
+ fi
+ done
+ echo $success
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M1
+
+for i in {1..500}; do
+ dd if=/dev/urandom of=$M0/file-$i bs=1M count=2
+done
+
+for i in {1..500}; do
+ stat $M1/file-$i > /dev/null
+done
+
+delete_files $M0 0 &
+delete_files $M1 1 &
+wait
+
+success1=$(grep 0 $B0/output.txt | wc -l);
+success2=$(grep 1 $B0/output.txt | wc -l);
+
+echo "Success1 is $success1";
+echo "Success2 is $success2";
+
+success_total=$((success1 + success2));
+
+EXPECT 500 echo $success_total
+
+cleanup
diff --git a/tests/bugs/shard/bug-1605056-2.t b/tests/bugs/shard/bug-1605056-2.t
new file mode 100644
index 00000000000..a9c10fec3ea
--- /dev/null
+++ b/tests/bugs/shard/bug-1605056-2.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Perform a write that would cause 25 shards to be created under .shard
+TEST dd if=/dev/zero of=$M0/foo bs=1M count=104
+
+# Write into another file bar to ensure all of foo's shards are evicted from lru list of $M0
+TEST dd if=/dev/zero of=$M0/bar bs=1M count=104
+
+# Delete foo from $M0. If there's a bug, the mount will crash.
+TEST unlink $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1605056.t b/tests/bugs/shard/bug-1605056.t
new file mode 100644
index 00000000000..c2329ea79f8
--- /dev/null
+++ b/tests/bugs/shard/bug-1605056.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+SHARD_COUNT_TIME=5
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M1
+
+# Perform a write that would cause 25 shards to be created under .shard
+TEST dd if=/dev/zero of=$M0/foo bs=1M count=104
+
+# Read the file from $M1, indirectly filling up the lru list.
+TEST `cat $M1/foo > /dev/null`
+statedump=$(generate_mount_statedump $V0 $M1)
+sleep 1
+EXPECT "25" echo $(grep "inode-count" $statedump | cut -f2 -d'=' | tail -1)
+rm -f $statedump
+
+# Delete foo from $M0.
+TEST unlink $M0/foo
+
+# Send stat on foo from $M1 to force $M1 to "forget" inode associated with foo.
+# Now the ghost shards associated with "foo" are still in lru list of $M1.
+TEST ! stat $M1/foo
+
+# Let's force the ghost shards of "foo" out of lru list by looking up more shards
+# through I/O on a file named "bar" from $M1. This should crash if the base inode
+# had been destroyed by now.
+
+TEST dd if=/dev/zero of=$M1/bar bs=1M count=104
+
+###############################################
+#### Now for some inode ref-leak tests ... ####
+###############################################
+
+# Expect there to be 29 active inodes - 26 belonging to "bar", 1 for .shard,
+# 1 for .shard/remove_me and 1 for '/'
+EXPECT_WITHIN $SHARD_COUNT_TIME `expr 26 + 3` get_mount_active_size_value $V0 $M1
+
+TEST rm -f $M1/bar
+EXPECT_WITHIN $SHARD_COUNT_TIME 3 get_mount_active_size_value $V0 $M1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1669077.t b/tests/bugs/shard/bug-1669077.t
new file mode 100644
index 00000000000..8d3a67a36be
--- /dev/null
+++ b/tests/bugs/shard/bug-1669077.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+SHARD_COUNT_TIME=5
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# If the bug still exists, client should crash during fallocate below
+TEST fallocate -l 200M $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t b/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t
new file mode 100644
index 00000000000..3e4a65af19a
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 120
+TEST $CLI volume set $V0 features.shard-deletion-rate 120
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST build_tester $(dirname $0)/bug-1696136.c -lgfapi -Wall -O2
+
+# Create a file
+TEST touch $M0/file1
+
+# Fallocate a 500M file. This will make sure number of participant shards are > lru-limit
+TEST $(dirname $0)/bug-1696136 $H0 $V0 "0" "0" "536870912" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/bug-1696136
+
+cleanup
diff --git a/tests/bugs/shard/bug-1696136.c b/tests/bugs/shard/bug-1696136.c
new file mode 100644
index 00000000000..cb650535b09
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136.c
@@ -0,0 +1,122 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+enum fallocate_flag {
+ TEST_FALLOCATE_NONE,
+ TEST_FALLOCATE_KEEP_SIZE,
+ TEST_FALLOCATE_ZERO_RANGE,
+ TEST_FALLOCATE_PUNCH_HOLE,
+ TEST_FALLOCATE_MAX,
+};
+
+int
+get_fallocate_flag(int opcode)
+{
+ int ret = 0;
+
+ switch (opcode) {
+ case TEST_FALLOCATE_NONE:
+ ret = 0;
+ break;
+ case TEST_FALLOCATE_KEEP_SIZE:
+ ret = FALLOC_FL_KEEP_SIZE;
+ break;
+ case TEST_FALLOCATE_ZERO_RANGE:
+ ret = FALLOC_FL_ZERO_RANGE;
+ break;
+ case TEST_FALLOCATE_PUNCH_HOLE:
+ ret = FALLOC_FL_PUNCH_HOLE;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 1;
+ int opcode = -1;
+ off_t offset = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 8) {
+ fprintf(stderr,
+ "Syntax: %s <host> <volname> <opcode> <offset> <len> "
+ "<file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[7], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ opcode = atoi(argv[3]);
+ opcode = get_fallocate_flag(opcode);
+ if (opcode < 0) {
+ fprintf(stderr, "get_fallocate_flag: invalid flag \n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ fd = glfs_open(fs, argv[6], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = glfs_fallocate(fd, opcode, offset, len);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fallocate: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_unlink(fs, argv[6]);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_unlink: returned %d\n", ret);
+ goto out;
+ }
+ /* Sleep for 3s to give enough time for background deletion to complete
+ * during which if the bug exists, the process will crash.
+ */
+ sleep(3);
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-1696136.t b/tests/bugs/shard/bug-1696136.t
new file mode 100644
index 00000000000..b6dc858f083
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 120
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST build_tester $(dirname $0)/bug-1696136.c -lgfapi -Wall -O2
+
+# Create a file
+TEST touch $M0/file1
+
+# Fallocate a 500M file. This will make sure number of participant shards are > lru-limit
+TEST $(dirname $0)/bug-1696136 $H0 $V0 "0" "0" "536870912" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/bug-1696136
+
+cleanup
diff --git a/tests/bugs/shard/bug-1705884.t b/tests/bugs/shard/bug-1705884.t
new file mode 100644
index 00000000000..f6e50376a58
--- /dev/null
+++ b/tests/bugs/shard/bug-1705884.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+require_fallocate -l 1m $M0/file
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 200M $M0/foo
+EXPECT `echo "$(( ( 200 * 1024 * 1024 ) / 512 ))"` stat -c %b $M0/foo
+TEST truncate -s 0 $M0/foo
+EXPECT "0" stat -c %b $M0/foo
+TEST fallocate -l 100M $M0/foo
+EXPECT `echo "$(( ( 100 * 1024 * 1024 ) / 512 ))"` stat -c %b $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1738419.t b/tests/bugs/shard/bug-1738419.t
new file mode 100644
index 00000000000..8d0a31d9754
--- /dev/null
+++ b/tests/bugs/shard/bug-1738419.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 network.remote-dio off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero of=$M0/metadata bs=501 count=1
+
+EXPECT "501" echo $("dd" if=$M0/metadata bs=4096 count=1 of=/dev/null iflag=direct 2>&1 | awk '/bytes/ {print $1}')
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-shard-discard.c b/tests/bugs/shard/bug-shard-discard.c
new file mode 100644
index 00000000000..6fa93fb89d1
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-discard.c
@@ -0,0 +1,70 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ off_t off = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 7) {
+ fprintf(
+ stderr,
+ "Syntax: %s <host> <volname> <file-path> <off> <len> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[6], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ off = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ ret = glfs_discard(fd, off, len);
+ if (ret <= 0) {
+ fprintf(stderr, "glfs_discard: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-shard-discard.t b/tests/bugs/shard/bug-shard-discard.t
new file mode 100644
index 00000000000..910ade14801
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-discard.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+FILE_COUNT_TIME=5
+
+function get_shard_count {
+ ls $1/$2.* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create a file.
+TEST touch $M0/foo
+TEST dd if=/dev/urandom of=$M0/foo bs=1M count=10
+
+# This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+TEST stat $B0/${V0}2/.shard
+TEST stat $B0/${V0}3/.shard
+
+#Note the size of the file, it should be 10M
+EXPECT '10485760' stat -c %s $M0/foo
+
+gfid_foo=$(get_gfid_string $M0/foo)
+
+TEST build_tester $(dirname $0)/bug-shard-discard.c -lgfapi -Wall -O2
+#Call discard on the file at off=7M and len=3M
+TEST $(dirname $0)/bug-shard-discard $H0 $V0 /foo 7340032 3145728 `gluster --print-logdir`/glfs-$V0.log
+
+#Ensure that discard doesn't change the original size of the file.
+EXPECT '10485760' stat -c %s $M0/foo
+
+# Ensure that the last shard is all zero'd out
+EXPECT "1" file_all_zeroes `find $B0 -name $gfid_foo.2`
+EXPECT_NOT "1" file_all_zeroes `find $B0 -name $gfid_foo.1`
+
+# Now unlink the file. And ensure that all shards associated with the file are cleaned up
+TEST unlink $M0/foo
+
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}0/.shard $gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}1/.shard $gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}2/.shard $gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}3/.shard $gfid_foo
+TEST ! stat $M0/foo
+
+#clean up everything
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+TEST rm -f $(dirname $0)/bug-shard-discard
+
+cleanup
diff --git a/tests/bugs/shard/bug-shard-zerofill.c b/tests/bugs/shard/bug-shard-zerofill.c
new file mode 100644
index 00000000000..ed4c8c54dc2
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-zerofill.c
@@ -0,0 +1,60 @@
+#include <stdio.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ int ret = 1;
+
+ if (argc != 5) {
+ fprintf(stderr, "Syntax: %s <host> <volname> <file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_set_logging(fs, argv[4], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ /* Zero-fill "foo" with 10MB of data */
+ ret = glfs_zerofill(fd, 0, 10485760);
+ if (ret <= 0) {
+ fprintf(stderr, "glfs_zerofill: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-shard-zerofill.t b/tests/bugs/shard/bug-shard-zerofill.t
new file mode 100644
index 00000000000..4a919a24b99
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-zerofill.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create a file.
+TEST touch $M0/foo
+
+gfid_foo=$(get_gfid_string $M0/foo)
+
+TEST build_tester $(dirname $0)/bug-shard-zerofill.c -lgfapi -Wall -O2
+TEST $(dirname $0)/bug-shard-zerofill $H0 $V0 /foo `gluster --print-logdir`/glfs-$V0.log
+
+# This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+TEST stat $B0/${V0}2/.shard
+TEST stat $B0/${V0}3/.shard
+
+EXPECT "4194304" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %s`
+EXPECT "2097152" echo `find $B0 -name $gfid_foo.2 | xargs stat -c %s`
+
+EXPECT "1" file_all_zeroes $M0/foo
+
+TEST `echo "abc" >> $M0/foo`
+
+EXPECT_NOT "1" file_all_zeroes $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+TEST rm -f $(dirname $0)/bug-shard-zerofill
+
+cleanup
diff --git a/tests/bugs/shard/configure-lru-limit.t b/tests/bugs/shard/configure-lru-limit.t
new file mode 100644
index 00000000000..923a4d8d747
--- /dev/null
+++ b/tests/bugs/shard/configure-lru-limit.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Perform a write that would cause 25 shards to be created, 24 of them under .shard
+TEST dd if=/dev/zero of=$M0/foo bs=1M count=100
+
+statedump=$(generate_mount_statedump $V0)
+sleep 1
+EXPECT "25" echo $(grep "lru-max-limit" $statedump | cut -f2 -d'=' | tail -1)
+
+# Base shard is never added to this list. So all other shards should make up for 24 inodes in lru list
+EXPECT "24" echo $(grep "inode-count" $statedump | cut -f2 -d'=' | tail -1)
+
+rm -f $statedump
+
+# Test to ensure there's no "reconfiguration" of the value once set.
+TEST $CLI volume set $V0 features.shard-lru-limit 30
+statedump=$(generate_mount_statedump $V0)
+sleep 1
+EXPECT "25" echo $(grep "lru-max-limit" $statedump | cut -f2 -d'=' | tail -1)
+rm -f $statedump
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+statedump=$(generate_mount_statedump $V0)
+sleep 1
+EXPECT "30" echo $(grep "lru-max-limit" $statedump | cut -f2 -d'=' | tail -1)
+rm -f $statedump
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
new file mode 100644
index 00000000000..ba22d2b74fe
--- /dev/null
+++ b/tests/bugs/shard/issue-1243.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST $CLI volume set $V0 md-cache-timeout 10
+
+# Write data into a file such that its size crosses shard-block-size
+TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a setxattr on the file.
+TEST setfattr -n trusted.libvirt -v some-value $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a removexattr on the file.
+TEST setfattr -x trusted.libvirt $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+cleanup
diff --git a/tests/bugs/shard/issue-1281.t b/tests/bugs/shard/issue-1281.t
new file mode 100644
index 00000000000..9704caa8944
--- /dev/null
+++ b/tests/bugs/shard/issue-1281.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+#Open a file and store descriptor in fd = 5
+exec 5>$M0/foo
+
+#Unlink the same file which is opened in prev step
+TEST unlink $M0/foo
+
+#Write something on the file using the open fd = 5
+echo "issue-1281" >&5
+
+#Write on the descriptor should be succesful
+EXPECT 0 echo $?
+
+#Close the fd = 5
+exec 5>&-
+
+cleanup
diff --git a/tests/bugs/shard/issue-1425.t b/tests/bugs/shard/issue-1425.t
new file mode 100644
index 00000000000..bbe82c0e5b2
--- /dev/null
+++ b/tests/bugs/shard/issue-1425.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+FILE_COUNT_TIME=5
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 20M $M0/foo
+gfid_new=$(get_gfid_string $M0/foo)
+
+# Check for the base shard
+TEST stat $M0/foo
+TEST stat $B0/${V0}0/foo
+
+# There should be 4 associated shards
+EXPECT_WITHIN $FILE_COUNT_TIME 4 get_file_count $B0/${V0}0/.shard/$gfid_new
+
+# There should be 1+4 shards and we expect 4 lookups less than on the build without this patch
+EXPECT "21" echo `$CLI volume profile $V0 info incremental | grep -w LOOKUP | awk '{print $8}'`
+
+# Delete the base shard and check shards get cleaned up
+TEST unlink $M0/foo
+
+TEST ! stat $M0/foo
+TEST ! stat $B0/${V0}0/foo
+
+# There should be no shards now
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_new
+cleanup
diff --git a/tests/bugs/shard/parallel-truncate-read.t b/tests/bugs/shard/parallel-truncate-read.t
new file mode 100644
index 00000000000..4de876f58f6
--- /dev/null
+++ b/tests/bugs/shard/parallel-truncate-read.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+#This test will crash if shard's LRU contains a shard's inode even after the
+#inode is forgotten. Minimum time for crash to happen I saw was 180 seconds
+
+. $(dirname $0)/../../include.rc
+
+function keep_writing {
+ cd $M0;
+ while [ -f /tmp/parallel-truncate-read ]
+ do
+ dd if=/dev/zero of=file1 bs=1M count=16
+ done
+ cd
+}
+
+function keep_reading {
+ cd $M0;
+ while [ -f /tmp/parallel-truncate-read ]
+ do
+ cat file1 > /dev/null
+ done
+ cd
+}
+
+cleanup;
+
+TEST touch /tmp/parallel-truncate-read
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+keep_writing &
+keep_reading &
+sleep 180
+TEST rm -f /tmp/parallel-truncate-read
+wait
+#test that the mount is operational
+TEST stat $M0
+
+cleanup;
diff --git a/tests/bugs/shard/shard-append-test.c b/tests/bugs/shard/shard-append-test.c
new file mode 100644
index 00000000000..c7debb2b182
--- /dev/null
+++ b/tests/bugs/shard/shard-append-test.c
@@ -0,0 +1,183 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+/*This test tests that shard xlator handles offset in appending writes
+ * correctly. This test performs writes of 1025 bytes 1025 times, in 5 threads
+ * with different threads. The buffer to be written is same character repeated
+ * 1025 times in the buffer for a thread. At the end it reads the buffer till
+ * end of file and tests that the read of 1025 bytes is always same character
+ * and the content read is 5*1025*1025 size. 1025 bytes is chosen because it
+ * will lead to write on more than one shard at some point when the size is
+ * going over the initial shard*/
+pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+int thread_data = '1';
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+void *
+write_data(void *data)
+{
+ char buf[1025] = {0};
+ glfs_fd_t *glfd = NULL;
+ glfs_t *fs = data;
+ int i = 0;
+
+ pthread_mutex_lock(&lock);
+ {
+ memset(buf, thread_data, sizeof(buf));
+ thread_data++;
+ }
+ pthread_mutex_unlock(&lock);
+
+ for (i = 0; i < 1025; i++) {
+ glfd = glfs_creat(fs, "parallel-write.txt", O_WRONLY | O_APPEND,
+ S_IRUSR | S_IWUSR | O_SYNC);
+ if (!glfd) {
+ LOG_ERR("Failed to create file");
+ exit(1);
+ }
+
+ if (glfs_write(glfd, buf, sizeof(buf), 0) < 0) {
+ LOG_ERR("Failed to write to file");
+ exit(1);
+ }
+ if (glfs_close(glfd) != 0) {
+ LOG_ERR("Failed to close file");
+ exit(1);
+ }
+ }
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t tid[5] = {0};
+ char buf[1025] = {0};
+ char cmp_buf[1025] = {0};
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ ssize_t bytes_read = 0;
+ ssize_t total_bytes_read = 0;
+ int i = 0;
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ for (i = 0; i < 5; i++) {
+ pthread_create(&tid[i], NULL, write_data, fs);
+ }
+
+ for (i = 0; i < 5; i++) {
+ pthread_join(tid[i], NULL);
+ }
+ glfd = glfs_open(fs, "parallel-write.txt", O_RDONLY);
+ if (!glfd) {
+ LOG_ERR("Failed to open file for reading");
+ exit(1);
+ }
+
+ while ((bytes_read = glfs_read(glfd, buf, sizeof(buf), 0)) > 0) {
+ if (bytes_read != sizeof(buf)) {
+ fprintf(stderr,
+ "Didn't read complete data read: %zd "
+ "expected: %lu",
+ bytes_read, sizeof(buf));
+ exit(1);
+ }
+
+ total_bytes_read += bytes_read;
+ if (buf[0] < '1' || buf[0] >= thread_data) {
+ fprintf(stderr, "Invalid character found: %c", buf[0]);
+ exit(1);
+ }
+ memset(cmp_buf, buf[0], sizeof(cmp_buf));
+ if (memcmp(cmp_buf, buf, sizeof(cmp_buf))) {
+ LOG_ERR("Data corrupted");
+ exit(1);
+ }
+ memset(cmp_buf, 0, sizeof(cmp_buf));
+ }
+
+ if (total_bytes_read != 5 * 1025 * 1025) {
+ fprintf(stderr,
+ "Failed to read what is written, read; %zd, "
+ "expected %zu",
+ total_bytes_read, 5 * 1025 * 1025);
+ exit(1);
+ }
+
+ if (glfs_close(glfd) != 0) {
+ LOG_ERR("Failed to close");
+ exit(1);
+ }
+ return 0;
+}
diff --git a/tests/bugs/shard/shard-append-test.t b/tests/bugs/shard/shard-append-test.t
new file mode 100644
index 00000000000..f8719f2a2c1
--- /dev/null
+++ b/tests/bugs/shard/shard-append-test.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{1,2,3};
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+
+#Uncomment the following line after shard-queuing is implemented
+#TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0;
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/shard-append-test.c -lgfapi -lpthread
+
+TEST ./$(dirname $0)/shard-append-test ${H0} $V0 $logdir/shard-append-test.log
+
+cleanup_tester $(dirname $0)/shard-append-test
+
+cleanup;
diff --git a/tests/bugs/shard/shard-fallocate.c b/tests/bugs/shard/shard-fallocate.c
new file mode 100644
index 00000000000..cb0714e8564
--- /dev/null
+++ b/tests/bugs/shard/shard-fallocate.c
@@ -0,0 +1,113 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+enum fallocate_flag {
+ TEST_FALLOCATE_NONE,
+ TEST_FALLOCATE_KEEP_SIZE,
+ TEST_FALLOCATE_ZERO_RANGE,
+ TEST_FALLOCATE_PUNCH_HOLE,
+ TEST_FALLOCATE_MAX,
+};
+
+int
+get_fallocate_flag(int opcode)
+{
+ int ret = 0;
+
+ switch (opcode) {
+ case TEST_FALLOCATE_NONE:
+ ret = 0;
+ break;
+ case TEST_FALLOCATE_KEEP_SIZE:
+ ret = FALLOC_FL_KEEP_SIZE;
+ break;
+ case TEST_FALLOCATE_ZERO_RANGE:
+ ret = FALLOC_FL_ZERO_RANGE;
+ break;
+ case TEST_FALLOCATE_PUNCH_HOLE:
+ ret = FALLOC_FL_PUNCH_HOLE;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 1;
+ int opcode = -1;
+ off_t offset = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 8) {
+ fprintf(stderr,
+ "Syntax: %s <host> <volname> <opcode> <offset> <len> "
+ "<file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[7], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ opcode = atoi(argv[3]);
+ opcode = get_fallocate_flag(opcode);
+ if (opcode < 0) {
+ fprintf(stderr, "get_fallocate_flag: invalid flag \n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ fd = glfs_open(fs, argv[6], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = glfs_fallocate(fd, opcode, offset, len);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fallocate: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/shard-inode-refcount-test.t b/tests/bugs/shard/shard-inode-refcount-test.t
new file mode 100644
index 00000000000..3fd181be690
--- /dev/null
+++ b/tests/bugs/shard/shard-inode-refcount-test.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+SHARD_COUNT_TIME=5
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero conv=fsync of=$M0/one-plus-five-shards bs=1M count=23
+
+ACTIVE_INODES_BEFORE=$(get_mount_active_size_value $V0)
+TEST rm -f $M0/one-plus-five-shards
+# Expect 5 inodes less. But one inode more than before because .remove_me would be created.
+EXPECT_WITHIN $SHARD_COUNT_TIME `expr $ACTIVE_INODES_BEFORE - 5 + 1` get_mount_active_size_value $V0 $M0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/unlinks-and-renames.t b/tests/bugs/shard/unlinks-and-renames.t
new file mode 100644
index 00000000000..990ca69a8b1
--- /dev/null
+++ b/tests/bugs/shard/unlinks-and-renames.t
@@ -0,0 +1,333 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+# The aim of this test script is to exercise the various codepaths of unlink
+# and rename fops in sharding and make sure they work fine.
+#
+
+FILE_COUNT_TIME=5
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+#################################################
+################### UNLINK ######################
+#################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/foo
+TEST touch $M0/dir/new
+
+##########################################
+##### 01. Unlink with /.shard absent #####
+##########################################
+
+TEST truncate -s 5M $M0/dir/foo
+TEST ! stat $B0/${V0}0/.shard
+TEST ! stat $B0/${V0}1/.shard
+# Test to ensure that unlink doesn't fail due to absence of /.shard
+gfid_foo=$(get_gfid_string $M0/dir/foo)
+TEST unlink $M0/dir/foo
+TEST stat $B0/${V0}0/.shard/.remove_me
+TEST stat $B0/${V0}1/.shard/.remove_me
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_foo
+
+######################################################
+##### 02. Unlink of a sharded file without holes #####
+######################################################
+
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/new bs=1024 count=9216
+gfid_new=$(get_gfid_string $M0/dir/new)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_new.1
+TEST stat $B0/${V0}1/.shard/$gfid_new.1
+TEST stat $B0/${V0}0/.shard/$gfid_new.2
+TEST stat $B0/${V0}1/.shard/$gfid_new.2
+TEST unlink $M0/dir/new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_new
+TEST ! stat $M0/dir/new
+TEST ! stat $B0/${V0}0/dir/new
+TEST ! stat $B0/${V0}1/dir/new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_new
+
+###########################################
+##### 03. Unlink with /.shard present #####
+###########################################
+
+TEST truncate -s 5M $M0/dir/foo
+gfid_foo=$(get_gfid_string $M0/dir/foo)
+# Ensure its shards are absent.
+TEST ! stat $B0/${V0}0/.shard/$gfid_foo.1
+TEST ! stat $B0/${V0}1/.shard/$gfid_foo.1
+# Test to ensure that unlink of a sparse file works fine.
+TEST unlink $M0/dir/foo
+TEST ! stat $B0/${V0}0/dir/foo
+TEST ! stat $B0/${V0}1/dir/foo
+TEST ! stat $M0/dir/foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_foo
+
+#################################################################
+##### 04. Unlink of a file with only one block (the zeroth) #####
+#################################################################
+
+TEST touch $M0/dir/foo
+gfid_foo=$(get_gfid_string $M0/dir/foo)
+TEST dd if=/dev/zero of=$M0/dir/foo bs=1024 count=1024
+# Test to ensure that unlink of a file with only base shard works fine.
+TEST unlink $M0/dir/foo
+TEST ! stat $B0/${V0}0/dir/foo
+TEST ! stat $B0/${V0}1/dir/foo
+TEST ! stat $M0/dir/foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_foo
+
+########################################################
+##### 05. Unlink of a sharded file with hard-links #####
+########################################################
+
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/original bs=1024 count=9216
+gfid_original=$(get_gfid_string $M0/dir/original)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_original.1
+TEST stat $B0/${V0}1/.shard/$gfid_original.1
+TEST stat $B0/${V0}0/.shard/$gfid_original.2
+TEST stat $B0/${V0}1/.shard/$gfid_original.2
+# Create a hard link.
+TEST ln $M0/dir/original $M0/link
+# Now delete the original file.
+TEST unlink $M0/dir/original
+TEST ! stat $B0/${V0}0/.shard/.remove_me/$gfid_original
+TEST ! stat $B0/${V0}1/.shard/.remove_me/$gfid_original
+# Ensure the shards are still intact.
+TEST stat $B0/${V0}0/.shard/$gfid_original.1
+TEST stat $B0/${V0}1/.shard/$gfid_original.1
+TEST stat $B0/${V0}0/.shard/$gfid_original.2
+TEST stat $B0/${V0}1/.shard/$gfid_original.2
+TEST ! stat $M0/dir/original
+TEST stat $M0/link
+TEST stat $B0/${V0}0/link
+TEST stat $B0/${V0}1/link
+# Now delete the last link.
+TEST unlink $M0/link
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_original
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_original
+# Ensure that the shards are all cleaned up.
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_original
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_original
+TEST ! stat $M0/link
+TEST ! stat $B0/${V0}0/link
+TEST ! stat $B0/${V0}1/link
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
+
+#################################################
+################### RENAME ######################
+#################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/src
+TEST touch $M0/dir/dst
+
+##########################################
+##### 06. Rename with /.shard absent #####
+##########################################
+
+TEST truncate -s 5M $M0/dir/dst
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+TEST ! stat $B0/${V0}0/.shard
+TEST ! stat $B0/${V0}1/.shard
+# Test to ensure that rename doesn't fail due to absence of /.shard
+TEST mv -f $M0/dir/src $M0/dir/dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+######################################################
+##### 07. Rename to a sharded file without holes #####
+######################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=9216
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST stat $B0/${V0}1/.shard/$gfid_dst.2
+TEST mv -f $M0/dir/src $M0/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+#######################################################
+##### 08. Rename of dst file with /.shard present #####
+#######################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+TEST truncate -s 5M $M0/dir/dst
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Test to ensure that rename into a sparse file works fine.
+TEST mv -f $M0/dir/src $M0/dir/dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+###################################################################
+##### 09. Rename of dst file with only one block (the zeroth) #####
+###################################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=1024
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Test to ensure that rename into a file with only base shard works fine.
+TEST mv -f $M0/dir/src $M0/dir/dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+############################################################
+##### 10. Rename to a dst sharded file with hard-links #####
+############################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=9216
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST stat $B0/${V0}1/.shard/$gfid_dst.2
+# Create a hard link.
+TEST ln $M0/dir/dst $M0/link
+# Now rename src to the dst.
+TEST mv -f $M0/dir/src $M0/dir/dst
+# Ensure the shards are still intact.
+TEST stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST stat $B0/${V0}1/.shard/$gfid_dst.2
+TEST ! stat $M0/dir/src
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST ! stat $B0/${V0}0/.shard/.remove_me/$gfid_dst
+TEST ! stat $B0/${V0}1/.shard/.remove_me/$gfid_dst
+# Now rename another file to the last link.
+TEST touch $M0/dir/src2
+TEST mv -f $M0/dir/src2 $M0/link
+# Ensure that the shards are all cleaned up.
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_dst
+TEST ! stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST ! stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST ! stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST ! stat $B0/${V0}1/.shard/$gfid_dst.2
+TEST ! stat $M0/dir/src2
+TEST ! stat $B0/${V0}0/dir/src2
+TEST ! stat $B0/${V0}1/dir/src2
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+##############################################################
+##### 11. Rename with non-existent dst and a sharded src #####
+##############################################################l
+
+TEST touch $M0/dir/src
+TEST dd if=/dev/zero of=$M0/dir/src bs=1024 count=9216
+gfid_src=$(get_gfid_string $M0/dir/src)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_src.1
+TEST stat $B0/${V0}1/.shard/$gfid_src.1
+TEST stat $B0/${V0}0/.shard/$gfid_src.2
+TEST stat $B0/${V0}1/.shard/$gfid_src.2
+# Now rename src to the dst.
+TEST mv $M0/dir/src $M0/dir/dst2
+
+TEST stat $B0/${V0}0/.shard/$gfid_src.1
+TEST stat $B0/${V0}1/.shard/$gfid_src.1
+TEST stat $B0/${V0}0/.shard/$gfid_src.2
+TEST stat $B0/${V0}1/.shard/$gfid_src.2
+TEST ! stat $M0/dir/src
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $M0/dir/dst2
+TEST stat $B0/${V0}0/dir/dst2
+TEST stat $B0/${V0}1/dir/dst2
+
+#############################################################################
+##### 12. Rename with non-existent dst and a sharded src with no shards #####
+#############################################################################
+
+TEST touch $M0/dir/src
+TEST dd if=/dev/zero of=$M0/dir/src bs=1024 count=1024
+gfid_src=$(get_gfid_string $M0/dir/src)
+TEST ! stat $B0/${V0}0/.shard/$gfid_src.1
+TEST ! stat $B0/${V0}1/.shard/$gfid_src.1
+# Now rename src to the dst.
+TEST mv $M0/dir/src $M0/dir/dst1
+TEST ! stat $M0/dir/src
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $M0/dir/dst1
+TEST stat $B0/${V0}0/dir/dst1
+TEST stat $B0/${V0}1/dir/dst1
+
+cleanup
diff --git a/tests/bugs/shard/zero-flag.t b/tests/bugs/shard/zero-flag.t
new file mode 100644
index 00000000000..1f39787ab9f
--- /dev/null
+++ b/tests/bugs/shard/zero-flag.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+require_fallocate -l 1m $M0/file
+require_fallocate -p -l 512k $M0/file && rm -f $M0/file
+require_fallocate -z -l 512k $M0/file && rm -f $M0/file
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST build_tester $(dirname $0)/shard-fallocate.c -lgfapi -Wall -O2
+
+# On file1 confirm that when fallocate's offset + len > cur file size,
+# the new file size will increase.
+TEST touch $M0/tmp
+TEST `echo 'abcdefghijklmnopqrstuvwxyz' > $M0/tmp`
+TEST touch $M0/file1
+
+gfid_file1=$(get_gfid_string $M0/file1)
+
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "0" "6291456" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT '6291456' stat -c %s $M0/file1
+
+# This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+TEST stat $B0/${V0}2/.shard
+TEST stat $B0/${V0}3/.shard
+
+EXPECT "2097152" echo `find $B0 -name $gfid_file1.1 | xargs stat -c %s`
+EXPECT "1" file_all_zeroes $M0/file1
+
+
+# On file2 confirm that fallocate to already allocated region of the
+# file does not change the content of the file.
+TEST truncate -s 6M $M0/file2
+TEST dd if=$M0/tmp of=$M0/file2 bs=1 seek=3145728 count=26 conv=notrunc
+md5sum_file2=$(md5sum $M0/file2 | awk '{print $1}')
+
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "3145728" "26" /file2 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT '6291456' stat -c %s $M0/file2
+EXPECT "$md5sum_file2" echo `md5sum $M0/file2 | awk '{print $1}'`
+
+# On file3 confirm that fallocate to a region of the file that consists
+#of holes creates a new shard in its place, fallocates it and there is no
+#change in the file content seen by the application.
+TEST touch $M0/file3
+
+gfid_file3=$(get_gfid_string $M0/file3)
+
+TEST dd if=$M0/tmp of=$M0/file3 bs=1 seek=9437184 count=26 conv=notrunc
+TEST ! stat $B0/$V0*/.shard/$gfid_file3.1
+TEST stat $B0/$V0*/.shard/$gfid_file3.2
+md5sum_file3=$(md5sum $M0/file3 | awk '{print $1}')
+EXPECT "1048602" echo `find $B0 -name $gfid_file3.2 | xargs stat -c %s`
+
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "5242880" "1048576" /file3 `gluster --print-logdir`/glfs-$V0.log
+EXPECT "$md5sum_file3" echo `md5sum $M0/file3 | awk '{print $1}'`
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/shard-fallocate
+cleanup
diff --git a/tests/bugs/snapshot/bug-1045333.t b/tests/bugs/snapshot/bug-1045333.t
new file mode 100755
index 00000000000..6c0b995b5b0
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1045333.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+
+
+S1="${V0}-snap1" #Create snapshot with name contains hyphen(-)
+S2="-${V0}-snap2" #Create snapshot with name starts with hyphen(-)
+#Create snapshot with a long name
+S3="${V0}_single_gluster_volume_is_accessible_by_multiple_clients_offline_snapshot_is_a_long_name"
+
+TEST $CLI snapshot create $S1 $V0 no-timestamp
+TEST snapshot_exists 0 $S1
+
+TEST $CLI snapshot create $S2 $V0 no-timestamp
+TEST snapshot_exists 0 $S2
+
+TEST $CLI snapshot create $S3 $V0 no-timestamp
+TEST snapshot_exists 0 $S3
+
+
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M0
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S2/$V0 $M1
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S3/$V0 $M2
+
+#Clean up
+#TEST $CLI snapshot delete $S1
+#TEST $CLI snapshot delete $S2
+#TEST $CLI snapshot delete $S3
+
+TEST $CLI volume stop $V0 force
+#TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1049834.t b/tests/bugs/snapshot/bug-1049834.t
new file mode 100755
index 00000000000..29c75cc7f96
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1049834.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Setting the snap-max-hard-limit to 4
+TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 4
+PID_1=$!
+wait $PID_1
+
+#Creating 3 snapshots on the volume (which is the soft-limit)
+TEST create_n_snapshots $V0 3 $V0_snap
+TEST snapshot_n_exists $V0 3 $V0_snap
+
+#Creating the 4th snapshot on the volume and expecting it to be created
+# but with the deletion of the oldest snapshot i.e 1st snapshot
+TEST $CLI_1 snapshot create ${V0}_snap4 ${V0} no-timestamp
+TEST snapshot_exists 1 ${V0}_snap4
+TEST ! snapshot_exists 1 ${V0}_snap1
+TEST $CLI_1 snapshot delete ${V0}_snap4
+TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} no-timestamp
+TEST snapshot_exists 1 ${V0}_snap1
+
+#Deleting the 4 snaps
+#TEST delete_n_snapshots $V0 4 $V0_snap
+#TEST ! snapshot_n_exists $V0 4 $V0_snap
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1064768.t b/tests/bugs/snapshot/bug-1064768.t
new file mode 100644
index 00000000000..53140a0e13e
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1064768.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info
+TEST $CLI volume profile $V0 stop
+
+TEST $CLI volume status
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status';
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1087203.t b/tests/bugs/snapshot/bug-1087203.t
new file mode 100644
index 00000000000..035be098576
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1087203.t
@@ -0,0 +1,103 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../cluster.rc
+
+function get_volume_info ()
+{
+ local var=$1
+ $CLI_1 volume info $V0 | grep "^$var" | sed 's/.*: //'
+}
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT "$V0" get_volume_info 'Volume Name';
+EXPECT 'Created' get_volume_info 'Status';
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' get_volume_info 'Status';
+
+
+# Setting system limit
+TEST $CLI_1 snapshot config snap-max-hard-limit 100
+
+# Volume limit cannot exceed system limit, as limit is set to 100,
+# this should fail.
+TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit 101
+
+# Following are the invalid cases
+TEST ! $CLI_1 snapshot config $V0 snap-max-hard-limit a10
+TEST ! $CLI_1 snapshot config snap-max-hard-limit 10a
+TEST ! $CLI_1 snapshot config snap-max-hard-limit 10%
+TEST ! $CLI_1 snapshot config snap-max-soft-limit 50%1
+TEST ! $CLI_1 snapshot config snap-max-soft-limit 0111
+TEST ! $CLI_1 snapshot config snap-max-hard-limit OXA
+TEST ! $CLI_1 snapshot config snap-max-hard-limit 11.11
+TEST ! $CLI_1 snapshot config snap-max-soft-limit 50%
+TEST ! $CLI_1 snapshot config snap-max-hard-limit -100
+TEST ! $CLI_1 snapshot config snap-max-soft-limit -90
+
+# Soft limit cannot be assigned to volume
+TEST ! $CLI_1 snapshot config $V0 snap-max-soft-limit 10
+
+# Valid case
+TEST $CLI_1 snapshot config snap-max-soft-limit 50
+TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 10
+
+# Validating auto-delete feature
+# Make sure auto-delete is disabled by default
+EXPECT 'disable' snap_config CLI_1 'auto-delete'
+
+# Test for invalid value for auto-delete
+TEST ! $CLI_1 snapshot config auto-delete test
+
+TEST $CLI_1 snapshot config snap-max-hard-limit 6
+TEST $CLI_1 snapshot config snap-max-soft-limit 50
+
+# Create 4 snapshots
+snap_index=1
+snap_count=4
+TEST snap_create CLI_1 $V0 $snap_index $snap_count
+
+# If auto-delete is disabled then oldest snapshot
+# should not be deleted automatically.
+EXPECT '4' get_snap_count CLI_1;
+
+TEST snap_delete CLI_1 $snap_index $snap_count;
+
+# After all those 4 snaps are deleted, There will not be any snaps present
+EXPECT '0' get_snap_count CLI_1;
+
+TEST $CLI_1 snapshot config auto-delete enable
+
+# auto-delete is already enabled, Hence expect a failure.
+TEST ! $CLI_1 snapshot config auto-delete on
+
+# Testing other boolean values with auto-delete
+TEST $CLI_1 snapshot config auto-delete off
+EXPECT 'off' snap_config CLI_1 'auto-delete'
+
+TEST $CLI_1 snapshot config auto-delete true
+EXPECT 'true' snap_config CLI_1 'auto-delete'
+
+# Try to create 4 snaps again, As auto-delete is enabled
+# oldest snap should be deleted and snapcount should be 3
+
+TEST snap_create CLI_1 $V0 $snap_index $snap_count;
+EXPECT '3' get_snap_count CLI_1;
+
+TEST $CLI_1 snapshot config auto-delete disable
+EXPECT 'disable' snap_config CLI_1 'auto-delete'
+
+cleanup;
+
diff --git a/tests/bugs/snapshot/bug-1090042.t b/tests/bugs/snapshot/bug-1090042.t
new file mode 100755
index 00000000000..98531a9751e
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1090042.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+TEST glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume start $V0;
+
+TEST kill_brick $V0 $H0 $L1;
+
+#Normal snap create should fail
+TEST ! $CLI snapshot create ${V0}_snap1 $V0 no-timestamp;
+TEST ! snapshot_exists 0 ${V0}_snap1;
+
+#With changes introduced in BZ #1184344 force snap create should fail too
+TEST ! $CLI snapshot create ${V0}_snap1 $V0 no-timestamp force;
+TEST ! snapshot_exists 0 ${V0}_snap1;
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1109770.t b/tests/bugs/snapshot/bug-1109770.t
new file mode 100644
index 00000000000..22511995937
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1109770.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0 no-timestamp;
+
+mkdir $M0/dir1;
+mkdir $M0/dir2;
+
+for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap3 $V0 no-timestamp;
+
+for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap4 $V0 no-timestamp;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+TEST $CLI volume set $V0 features.uss disable;
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST ! [ $SNAPD_PID -gt 0 ];
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+TEST $CLI volume stop $V0;
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST ! [ $SNAPD_PID -gt 0 ];
+
+cleanup ;
diff --git a/tests/bugs/snapshot/bug-1109889.t b/tests/bugs/snapshot/bug-1109889.t
new file mode 100644
index 00000000000..5fdc7dc9506
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1109889.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+MOUNT_PID=$(get_mount_process_pid $V0 $M0)
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot config activate-on-create enable
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0 no-timestamp;
+
+mkdir $M0/dir1;
+mkdir $M0/dir2;
+
+for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap3 $V0 no-timestamp;
+
+for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap4 $V0 no-timestamp;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+#let snapd get started properly and client connect to snapd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0
+
+SNAPD_PID=$(ps auxww | grep snapd | grep -v grep | awk '{print $2}');
+
+TEST [ $SNAPD_PID -gt 0 ];
+
+TEST stat $M0/.snaps;
+
+kill -KILL $SNAPD_PID;
+
+# let snapd die properly
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "0" snap_client_connected_status $V0
+
+TEST ! stat $M0/.snaps;
+
+TEST $CLI volume start $V0 force;
+
+# let client get the snapd port from glusterd and connect
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/.snaps
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1111041.t b/tests/bugs/snapshot/bug-1111041.t
new file mode 100755
index 00000000000..efda9688d8b
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1111041.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+function is_snapd_running {
+ $CLI volume status $1 | grep "Snapshot Daemon" | wc -l;
+}
+
+function snapd_pid {
+ $CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'}
+}
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume start $V0;
+
+EXPECT "0" is_snapd_running $v0
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT "1" is_snapd_running $V0
+
+SNAPD_PID=$(snapd_pid);
+
+TEST [ $SNAPD_PID -gt 0 ]
+
+kill -9 $SNAPD_PID
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^N/A$" snapd_pid
+
+cleanup ;
diff --git a/tests/bugs/snapshot/bug-1112613.t b/tests/bugs/snapshot/bug-1112613.t
new file mode 100644
index 00000000000..e566de056bc
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1112613.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+V1="patchy2"
+
+TEST verify_lvm_version;
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume create $V1 $H2:$L2
+TEST $CLI_1 volume start $V1
+
+# Create 3 snapshots for volume $V0
+snap_count=3
+snap_index=1
+TEST snap_create CLI_1 $V0 $snap_index $snap_count;
+
+# Create 3 snapshots for volume $V1
+snap_count=4
+snap_index=11
+TEST snap_create CLI_1 $V1 $snap_index $snap_count;
+
+EXPECT '3' get_snap_count CLI_1 $V0;
+EXPECT '4' get_snap_count CLI_1 $V1;
+EXPECT '7' get_snap_count CLI_1
+
+TEST $CLI_1 snapshot delete volume $V0
+EXPECT '0' get_snap_count CLI_1 $V0;
+EXPECT '4' get_snap_count CLI_1 $V1;
+EXPECT '4' get_snap_count CLI_1
+
+TEST $CLI_1 snapshot delete all
+EXPECT '0' get_snap_count CLI_1 $V0;
+EXPECT '0' get_snap_count CLI_1 $V1;
+EXPECT '0' get_snap_count CLI_1
+
+cleanup;
+
diff --git a/tests/bugs/snapshot/bug-1113975.t b/tests/bugs/snapshot/bug-1113975.t
new file mode 100644
index 00000000000..86c1739fb46
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1113975.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+TEST $CLI snapshot create snap2 $V0 no-timestamp;
+
+TEST $CLI volume stop $V0
+
+TEST $CLI snapshot restore snap1;
+
+TEST $CLI snapshot restore snap2;
+
+TEST $CLI volume start $V0
+
+cleanup ;
diff --git a/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t
new file mode 100644
index 00000000000..c5a285eb775
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1155042-dont-display-deactivated-snapshots.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 2
+TEST setup_lvm 2
+TEST glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2
+TEST $CLI volume start $V0
+
+# enable uss and mount the volume
+TEST $CLI volume set $V0 features.uss enable
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+# create 10 snapshots and check if all are being reflected
+# in the USS world
+gluster snapshot config activate-on-create enable
+for i in {1..10}; do $CLI snapshot create snap$i $V0 no-timestamp; done
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 10 uss_count_snap_displayed $M0
+
+# snapshots should not be displayed after deactivation
+for i in {1..10}; do $CLI snapshot deactivate snap$i --mode=script; done
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 uss_count_snap_displayed $M0
+
+# activate all the snapshots and check if all the activated snapshots
+# are displayed again
+for i in {1..10}; do $CLI snapshot activate snap$i --mode=script; done
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 10 uss_count_snap_displayed $M0
+
+cleanup;
+
diff --git a/tests/bugs/snapshot/bug-1157991.t b/tests/bugs/snapshot/bug-1157991.t
new file mode 100755
index 00000000000..f626ef2b705
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1157991.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+EXPECT 'Stopped' snapshot_status snap1;
+
+TEST $CLI snapshot config activate-on-create enable
+TEST $CLI snapshot create snap2 $V0 no-timestamp
+EXPECT 'Started' snapshot_status snap2;
+
+#Clean up
+TEST $CLI snapshot delete snap1
+TEST $CLI snapshot delete snap2
+
+TEST $CLI volume stop $V0 force
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1162462.t b/tests/bugs/snapshot/bug-1162462.t
new file mode 100755
index 00000000000..5c2e4fe37f0
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1162462.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 features.uss enable;
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+mkdir $M0/test
+echo "file1" > $M0/file1
+ln -s $M0/file1 $M0/test/file_symlink
+ls -l $M0/ > /dev/null
+ls -l $M0/test/ > /dev/null
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+$CLI snapshot activate snap1;
+EXPECT 'Started' snapshot_status snap1;
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0
+ls $M0/.snaps/snap1/test/ > /dev/null
+ls -l $M0/.snaps/snap1/test/ > /dev/null
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" snap_client_connected_status $V0
+
+TEST $CLI snapshot delete snap1;
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1162498.t b/tests/bugs/snapshot/bug-1162498.t
new file mode 100755
index 00000000000..a97e4429ee7
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1162498.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+
+TEST $CLI snapshot config activate-on-create enable
+TEST $CLI volume set $V0 features.uss enable
+
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+TEST mkdir $M0/xyz
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+TEST $CLI snapshot create snap2 $V0 no-timestamp
+
+TEST rmdir $M0/xyz
+
+TEST $CLI snapshot create snap3 $V0 no-timestamp
+TEST $CLI snapshot create snap4 $V0 no-timestamp
+
+TEST mkdir $M0/xyz
+TEST ls $M0/xyz/.snaps/
+
+TEST $CLI volume stop $V0
+TEST $CLI snapshot restore snap2
+TEST $CLI volume start $V0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+#Dir xyz exists in snap1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/xyz
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" count_snaps $M0/xyz
+TEST mkdir $M0/abc
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" count_snaps $M0/abc
+
+#Clean up
+TEST $CLI snapshot delete snap1
+TEST $CLI snapshot delete snap3
+TEST $CLI snapshot delete snap4
+TEST $CLI volume stop $V0 force
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1166197.t b/tests/bugs/snapshot/bug-1166197.t
new file mode 100755
index 00000000000..b070ae271ba
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1166197.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+CURDIR=`pwd`
+
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+TEST $CLI snapshot config activate-on-create enable
+TEST $CLI volume set $V0 features.uss enable
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0 nolock
+TEST mkdir $N0/testdir
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+TEST $CLI snapshot create snap2 $V0 no-timestamp
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/testdir/.snaps
+
+TEST cd $N0/testdir
+TEST cd .snaps
+TEST ls
+
+TEST $CLI snapshot deactivate snap2
+TEST ls
+
+TEST cd $CURDIR
+
+#Clean up
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+TEST $CLI snapshot delete snap1
+TEST $CLI snapshot delete snap2
+TEST $CLI volume stop $V0 force
+TEST $CLI volume delete $V0
+
+cleanup;
+
diff --git a/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t b/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t
new file mode 100644
index 00000000000..52a7a790b97
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1167580-set-proper-uid-and-gid-during-nfs-access.t
@@ -0,0 +1,205 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+# This function returns a value "Y" if user can execute
+# the given command. Else it will return "N"
+# @arg-1 : Name of the user
+# @arg-2 : Path of the file
+# @arg-3 : command to be executed
+function check_if_permitted () {
+ local usr=$1
+ local path=$2
+ local cmd=$3
+ local var
+ local ret
+ var=$(su - $usr -c "$cmd $path")
+ ret=$?
+
+ if [ "$cmd" == "cat" ]
+ then
+ if [ "$var" == "Test" ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ else
+ if [ "$ret" == "0" ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ fi
+}
+
+# Create a directory in /tmp to specify which directory to make
+# as home directory for user
+home_dir=$(mktemp -d)
+chmod 777 $home_dir
+
+function get_new_user() {
+ local temp=$(uuidgen | tr -dc 'a-zA-Z' | head -c 8)
+ id $temp
+ if [ "$?" == "0" ]
+ then
+ get_new_user
+ else
+ echo $temp
+ fi
+}
+
+function create_user() {
+ local user=$1
+ local group=$2
+
+ if [ "$group" == "" ]
+ then
+ /usr/sbin/useradd -d $home_dir/$user $user
+ else
+ /usr/sbin/useradd -d $home_dir/$user -G $group $user
+ fi
+
+ return $?
+}
+
+cleanup;
+
+TEST setup_lvm 1
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume start $V0
+
+# Mount the volume as both fuse and nfs mount
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mount_nfs $H0:/$V0 $N0 nolock
+
+# Create 2 user
+user1=$(get_new_user)
+create_user $user1
+user2=$(get_new_user)
+create_user $user2
+
+# create a file for which only user1 has access
+echo "Test" > $M0/README
+chown $user1 $M0/README
+chmod 700 $M0/README
+
+# enable uss and take a snapshot
+TEST $CLI volume set $V0 uss enable
+TEST $CLI snapshot config activate-on-create on
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+
+# try to access the file using user1 account.
+# It should succeed with both normal mount and snapshot world.
+# There is time delay in which snapd might not have got the notification
+# from glusterd about snapshot create hence using "EXPECT_WITHIN"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $M0/README cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $N0/README cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $M0/.snaps/snap1/README cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user1 $N0/.snaps/snap1/README cat
+
+
+# try to access the file using user2 account
+# It should fail from both normal mount and snapshot world
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $M0/README cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $N0/README cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $M0/.snaps/snap1/README cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user2 $N0/.snaps/snap1/README cat
+
+# We need to test another scenario where user belonging to one group
+# tries to access files from user belonging to another group
+# instead of using the already created users and making the test case look complex
+# I thought of using two different users.
+
+# The test case written below does the following things
+# 1) Create 2 users (user{3,4}), belonging to 2 different groups (group{3,4})
+# 2) Take a snapshot "snap2"
+# 3) Create a file for which only users belonging to group3 have
+# permission to read
+# 4) Test various combinations of Read-Write, Fuse-NFS mount, User{3,4,5}
+# from both normal mount, and USS world.
+
+echo "Test" > $M0/file3
+
+chmod 740 $M0/file3
+
+group3=$(get_new_user)
+groupadd $group3
+
+group4=$(get_new_user)
+groupadd $group4
+
+user3=$(get_new_user)
+create_user $user3 $group3
+
+user4=$(get_new_user)
+create_user $user4 $group4
+
+user5=$(get_new_user)
+create_user $user5
+
+chgrp $group3 $M0/file3
+
+TEST $CLI snapshot create snap2 $V0 no-timestamp
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $M0/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $M0/.snaps/snap2/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $M0/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $M0/.snaps/snap2/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $N0/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" check_if_permitted $user3 $N0/.snaps/snap2/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $N0/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user3 $N0/.snaps/snap2/file3 "echo Hello >"
+
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/.snaps/snap2/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $M0/.snaps/snap2/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/.snaps/snap2/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user4 $N0/.snaps/snap2/file3 "echo Hello >"
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/.snaps/snap2/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $M0/.snaps/snap2/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/.snaps/snap2/file3 cat
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/file3 "echo Hello >"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" check_if_permitted $user5 $N0/.snaps/snap2/file3 "echo Hello >"
+
+# cleanup
+/usr/sbin/userdel -f -r $user1
+/usr/sbin/userdel -f -r $user2
+/usr/sbin/userdel -f -r $user3
+/usr/sbin/userdel -f -r $user4
+/usr/sbin/userdel -f -r $user5
+
+#cleanup all the home directory which is created as part of this test case
+if [ -d "$home_dir" ]
+then
+ rm -rf $home_dir
+fi
+
+
+groupdel $group3
+groupdel $group4
+
+TEST $CLI snapshot delete all
+
+cleanup;
+
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/snapshot/bug-1168875.t b/tests/bugs/snapshot/bug-1168875.t
new file mode 100644
index 00000000000..9737784fd84
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1168875.t
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../fileio.rc
+. $(dirname $0)/../../nfs.rc
+
+cleanup;
+
+function check_entry_point_exists ()
+{
+ local entry_point=$1;
+ local _path=$2;
+
+ ls -a $_path | grep $entry_point;
+
+ if [ $? -eq 0 ]; then
+ echo 'Y';
+ else
+ echo 'N';
+ fi
+}
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 --xlator-option *-snapview-client.snapdir-entry-path=/dir $M0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $N0;
+for i in {1..10} ; do echo "file" > $M0/file$i ; done
+
+
+for i in {11..20} ; do echo "file" > $M0/file$i ; done
+
+mkdir $M0/dir;
+
+for i in {1..10} ; do echo "file" > $M0/dir/file$i ; done
+
+mkdir $M0/dir1;
+mkdir $M0/dir2;
+
+for i in {1..10} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {1..10} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+for i in {11..20} ; do echo "foo" > $M0/dir1/foo$i ; done
+for i in {11..20} ; do echo "foo" > $M0/dir2/foo$i ; done
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+TEST $CLI snapshot activate snap1;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $M0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $N0/dir
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $M0/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists .snaps $N0/dir1
+
+TEST $CLI volume set $V0 features.show-snapshot-directory enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_entry_point_exists ".snaps" $M0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir1
+
+TEST $CLI volume set $V0 features.show-snapshot-directory disable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $M0/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" STAT $N0/dir1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $M0/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'N' check_entry_point_exists ".snaps" $N0/dir1
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1178079.t b/tests/bugs/snapshot/bug-1178079.t
new file mode 100644
index 00000000000..a1a6b0b9d49
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1178079.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1..2};
+TEST $CLI volume start $V0;
+
+TEST $CLI volume set $V0 features.uss on;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0;
+
+TEST touch $M0/file;
+
+TEST getfattr -d -m . -e hex $M0/file;
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t b/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t
new file mode 100644
index 00000000000..addc05917d8
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1202436-calculate-quota-cksum-during-snap-restore.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 1
+
+TEST $CLI_1 volume create $V0 $H1:$L1
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# Quota is not working with cluster test framework
+# Need to check why, Until then commenting out this
+#TEST $CLI_1 volume quota $V0 enable
+#EXPECT 'on' volinfo_field $V0 'features.quota'
+
+TEST $CLI_1 snapshot create ${V0}_snap $V0
+EXPECT '1' get_snap_count CLI_1 $V0
+
+TEST $CLI_1 volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status'
+
+TEST $CLI_1 snapshot restore $($CLI_1 snapshot list)
+EXPECT '0' get_snap_count CLI_1 $V0
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1205592.t b/tests/bugs/snapshot/bug-1205592.t
new file mode 100644
index 00000000000..f7d99345c8d
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1205592.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version
+TEST launch_cluster 3
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2 $H3:$L3
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+
+kill_glusterd 3
+# If glusterd-quorum is not met then snapshot-create should fail
+TEST ! $CLI_1 snapshot create ${V0}_snap1 ${V0} no-timestamp
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1227646.t b/tests/bugs/snapshot/bug-1227646.t
new file mode 100644
index 00000000000..9b73dfdb32f
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1227646.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+#TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume create $V0 $H0:$L2 $H0:$L3;
+TEST $CLI volume start $V0;
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+TEST $CLI volume stop $V0
+TEST $CLI snapshot restore snap1;
+TEST $CLI volume start $V0
+
+TEST pkill gluster
+TEST glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'
+
+TEST $CLI volume stop $V0
+
+cleanup ;
+
diff --git a/tests/bugs/snapshot/bug-1232430.t b/tests/bugs/snapshot/bug-1232430.t
new file mode 100755
index 00000000000..50411b1dbfc
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1232430.t
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd -LDEBUG;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1/brick_dir
+TEST $CLI volume start $V0
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+
+TEST $CLI snapshot delete snap1
+
+TEST $CLI volume stop $V0 force
+TEST $CLI volume delete $V0
+cleanup
diff --git a/tests/bugs/snapshot/bug-1250387.t b/tests/bugs/snapshot/bug-1250387.t
new file mode 100755
index 00000000000..c9039e37f73
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1250387.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 1;
+TEST setup_lvm 1;
+
+TEST glusterd;
+
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+EXPECT "description" get-cmd-field-xml "snapshot info snap1" "description"
+
+cleanup ;
diff --git a/tests/bugs/snapshot/bug-1260848.t b/tests/bugs/snapshot/bug-1260848.t
new file mode 100644
index 00000000000..6455d8297b2
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1260848.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume set $V0 nfs.disable false
+TEST $CLI volume set $V0 uss on
+TEST $CLI volume start $V0
+
+## Wait for volume to register with rpc.mountd
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+## Mount NFS
+TEST mount_nfs $H0:/$V0 $N0 nolock;
+
+TEST df -h $N0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1275616.t b/tests/bugs/snapshot/bug-1275616.t
new file mode 100755
index 00000000000..dcaeae30f90
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1275616.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+TEST $CLI snapshot config activate-on-create enable
+
+TEST $CLI snapshot config $V0 snap-max-hard-limit 100
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+
+TEST $CLI snapshot config $V0 snap-max-hard-limit 150
+TEST $CLI snapshot create snap2 $V0 no-timestamp
+
+TEST $CLI snapshot config $V0 snap-max-hard-limit 200
+TEST $CLI snapshot create snap3 $V0 no-timestamp
+EXPECT '197' snap_info_volume CLI "Snaps Available" $V0;
+
+TEST $CLI volume stop $V0
+
+# Restore the snapshots and verify the snap-max-hard-limit
+# and the Snaps Available
+TEST $CLI snapshot restore snap1
+EXPECT '98' snap_info_volume CLI "Snaps Available" $V0;
+EXPECT '100' snap_config_volume CLI 'snap-max-hard-limit' $V0
+
+TEST $CLI snapshot restore snap2
+EXPECT '149' snap_info_volume CLI "Snaps Available" $V0;
+EXPECT '150' snap_config_volume CLI 'snap-max-hard-limit' $V0
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Yes" get_snap_brick_status snap3
+
+#Take a clone and verify it inherits snapshot's snap-max-hard-limit
+TEST $CLI snapshot clone clone1 snap3
+
+EXPECT '149' snap_info_volume CLI "Snaps Available" $V0;
+EXPECT '150' snap_config_volume CLI 'snap-max-hard-limit' $V0
+
+EXPECT '200' snap_info_volume CLI "Snaps Available" clone1
+EXPECT '200' snap_config_volume CLI 'snap-max-hard-limit' clone1
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1279327.t b/tests/bugs/snapshot/bug-1279327.t
new file mode 100644
index 00000000000..4e4be6eeea6
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1279327.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+TEST $CLI volume create $V0 $H0:$L1
+TEST $CLI volume start $V0
+TEST $CLI volume quota $V0 enable
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp
+TEST $CLI snapshot activate snap1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Yes" get_snap_brick_status snap1
+
+#Take a clone and verify it inherits snapshot's snap-max-hard-limit
+TEST $CLI snapshot clone clone1 snap1
+TEST $CLI volume start clone1
+EXPECT 'Started' volinfo_field clone1 'Status';
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1316437.t b/tests/bugs/snapshot/bug-1316437.t
new file mode 100644
index 00000000000..300c03c97f5
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1316437.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST glusterd
+
+# Intentionally not carving lvms for this as we will not be taking
+# snapshots in this testcase
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume start $V0;
+
+TEST $CLI volume set $V0 features.uss enable;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+killall glusterd glusterfsd glusterfs
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'N' check_if_snapd_exist
+
+glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t b/tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t
new file mode 100644
index 00000000000..488bd462a01
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1322772-real-path-fix-for-snapshot.t
@@ -0,0 +1,56 @@
+#!/bin/bash
+
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../include.rc
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=2
+
+TEST verify_lvm_version
+TEST init_n_bricks 2
+TEST setup_lvm 2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$L1
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume create $V1 $H0:$L2
+EXPECT 'Created' volinfo_field $V1 'Status'
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V1
+EXPECT 'Started' volinfo_field $V1 'Status'
+
+TEST $CLI snapshot config activate-on-create enable
+TEST $CLI snapshot create ${V0}_snap $V0 no-timestamp
+TEST $CLI snapshot create ${V1}_snap $V1 no-timestamp
+
+# Simulate a node reboot by unmounting the brick, snap_brick and followed by
+# deleting the brick. Now once glusterd restarts, it should be able to construct
+# and remount the snap brick
+snap_bricks=`gluster snap status | grep "Brick Path" | awk -F ":" '{print $3}'`
+
+TEST $CLI volume stop $V1
+TEST $CLI snapshot restore ${V1}_snap;
+
+pkill gluster
+for snap_brick in $snap_bricks
+do
+ echo "Unmounting snap brick" $snap_brick
+ EXPECT_WITHIN_TEST_IN_LOOP $UMOUNT_TIMEOUT "Y" force_umount $snap_brick
+done
+
+rm -rf $snap_brick
+
+TEST glusterd
+TEST pidof glusterd
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $L1
+
+cleanup
+
diff --git a/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t
new file mode 100755
index 00000000000..f4e4e6ec4d2
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../ssl.rc
+
+function file_exists
+{
+ if [ -f $1 ]; then echo "Y"; else echo "N"; fi
+}
+
+function volume_online_brick_count
+{
+ $CLI volume status $V0 | awk '$1 == "Brick" && $6 != "N/A" { print $6}' | wc -l;
+}
+
+function total_online_bricks
+{
+ # This will count snapd, which isn't really a brick, but callers can
+ # account for that so it's OK.
+ find $GLUSTERD_PIDFILEDIR -name '*.pid' | wc -l
+}
+
+cleanup;
+
+# Initialize the test setup
+TEST setup_lvm 1;
+
+TEST create_self_signed_certs
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd;
+#EST $CLI volume set all cluster.brick-multiplex on
+
+# Create and start the volume
+TEST $CLI volume create $V0 $H0:$L1/b1;
+
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" total_online_bricks
+
+# Mount the volume and create some files
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+TEST touch $M0/file;
+
+# Enable activate-on-create
+TEST $CLI snapshot config activate-on-create enable;
+
+# Create a snapshot
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" total_online_bricks
+
+TEST $CLI volume set $V0 features.uss enable;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file
+# Volume set can trigger graph switch therefore chances are we send this
+# req to old graph. Old graph will not have .snaps. Therefore we should
+# wait for some time.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Enable management encryption
+touch $GLUSTERD_WORKDIR/secure-access
+killall_gluster
+
+TEST glusterd
+TEST pidof glusterd;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+# Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Enable I/O encryption
+TEST $CLI volume set $V0 client.ssl on
+TEST $CLI volume set $V0 server.ssl on
+
+killall_gluster
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist
+
+# Mount the volume
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file
+
+TEST $CLI snapshot delete all
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
new file mode 100644
index 00000000000..04a85db0c1a
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
@@ -0,0 +1,133 @@
+#!/bin/bash
+
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function create_snapshots() {
+ $CLI_1 snapshot create ${V0}_snap ${V0} no-timestamp &
+ PID_1=$!
+
+ $CLI_1 snapshot create ${V1}_snap ${V1} no-timestamp &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function activate_snapshots() {
+ $CLI_1 snapshot activate ${V0}_snap &
+ PID_1=$!
+
+ $CLI_1 snapshot activate ${V1}_snap &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+
+function deactivate_snapshots() {
+ $CLI_1 snapshot deactivate ${V0}_snap &
+ PID_1=$!
+
+ $CLI_1 snapshot deactivate ${V1}_snap &
+ PID_2=$!
+
+ wait $PID_1 $PID_2
+}
+cleanup;
+
+TEST verify_lvm_version;
+# Create cluster with 3 nodes
+TEST launch_cluster 3;
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+
+# Create volumes
+TEST $CLI_1 volume create $V0 $H1:$L1
+TEST $CLI_2 volume create $V1 $H2:$L2 $H3:$L3
+
+# Start volumes
+TEST $CLI_1 volume start $V0
+TEST $CLI_2 volume start $V1
+
+TEST $CLI_1 snapshot config activate-on-create enable
+
+# Snapshot Operations
+create_snapshots
+
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+
+deactivate_snapshots
+
+EXPECT 'Stopped' snapshot_status ${V0}_snap;
+EXPECT 'Stopped' snapshot_status ${V1}_snap;
+
+activate_snapshots
+
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+
+# This Function will get snap id form snap info command and will
+# check for mount point in system against snap id.
+function mounted_snaps
+{
+ snap_id=`$CLI_1 snap info $1_snap | grep "Snap Volume Name" |
+ awk -F ":" '{print $2}'`
+ echo `mount | grep $snap_id | wc -l`
+}
+
+EXPECT "1" mounted_snaps ${V0}
+EXPECT "2" mounted_snaps ${V1}
+
+deactivate_snapshots
+
+EXPECT "0" mounted_snaps ${V0}
+EXPECT "0" mounted_snaps ${V1}
+
+# This part of test is designed to validate that updates are properly being
+# handled during handshake.
+
+activate_snapshots
+
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+
+kill_glusterd 2
+
+deactivate_snapshots
+EXPECT 'Stopped' snapshot_status ${V0}_snap;
+EXPECT 'Stopped' snapshot_status ${V1}_snap;
+
+TEST start_glusterd 2
+
+# Updates form friend should reflect as snap was deactivated while glusterd
+# process was inactive and mount point should also not exist.
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" mounted_snaps ${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" mounted_snaps ${V1}
+
+# It might be possible that the import snap synctask is still updating the data,
+# we need to allow a buffer time to be on the safer side
+sleep 2
+
+kill_glusterd 2
+activate_snapshots
+EXPECT 'Started' snapshot_status ${V0}_snap;
+EXPECT 'Started' snapshot_status ${V1}_snap;
+TEST start_glusterd 2
+
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
+
+# Updates form friend should reflect as snap was activated while glusterd
+# process was inactive and mount point should exist.
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" mounted_snaps ${V0}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" mounted_snaps ${V1}
+
+cleanup;
+# run first!
+#G_TESTDEF_TEST_STATUS_CENTOS6=BRICK_MUX_BAD_TEST,BUG=1743069
diff --git a/tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t b/tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t
new file mode 100644
index 00000000000..53b274e8819
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1512451-snapshot-creation-failed-after-brick-reset.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$L1/B1 $H2:$L2/B1
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} no-timestamp
+TEST snapshot_exists 1 ${V0}_snap1
+
+TEST $CLI_1 snapshot delete ${V0}_snap1
+TEST ! snapshot_exists 1 ${V0}_snap1
+
+TEST $CLI_1 volume reset-brick $V0 $H1:$L1/B1 start
+TEST $CLI_1 volume reset-brick $V0 $H1:$L1/B1 $H1:$L1/B1 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" cluster_brick_up_status 1 $V0 $H1 $L1/B1
+
+TEST $CLI_1 snapshot create ${V0}_snap1 ${V0} no-timestamp
+TEST snapshot_exists 1 ${V0}_snap1
+
+TEST $CLI_1 snapshot delete ${V0}_snap1
+TEST ! snapshot_exists 1 ${V0}_snap1
+
+cleanup;
diff --git a/tests/bugs/snapshot/bug-1597662.t b/tests/bugs/snapshot/bug-1597662.t
new file mode 100644
index 00000000000..f582930476a
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1597662.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST init_n_bricks 3;
+TEST setup_lvm 3;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+TEST $CLI volume start $V0;
+
+snap_path=/var/run/gluster/snaps
+
+TEST $CLI snapshot create snap1 $V0 no-timestamp;
+
+$CLI snapshot activate snap1;
+
+EXPECT 'Started' snapshot_status snap1;
+
+# This Function will check for entry /var/run/gluster/snaps/<snap-name>
+# against snap-name
+
+function is_snap_path
+{
+ echo `ls $snap_path | grep snap1 | wc -l`
+}
+
+# snap is active so snap_path should exist
+EXPECT "1" is_snap_path
+
+$CLI snapshot deactivate snap1;
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} 'Stopped' snapshot_status snap1
+# snap is deactivated so snap_path should not exist
+EXPECT "0" is_snap_path
+
+# activate snap again
+$CLI snapshot activate snap1;
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} 'Started' snapshot_status snap1
+
+# snap is active so snap_path should exist
+EXPECT "1" is_snap_path
+
+# delete snap now
+TEST $CLI snapshot delete snap1;
+
+# snap is deleted so snap_path should not exist
+EXPECT "0" is_snap_path
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
+
diff --git a/tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t b/tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t
new file mode 100644
index 00000000000..a2c004e435e
--- /dev/null
+++ b/tests/bugs/snapshot/bug-1618004-fix-memory-corruption-in-snap-import.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+. $(dirname $0)/../../cluster.rc
+
+function get_volume_info ()
+{
+ local var=$1
+ $CLI_1 volume info $V0 | grep "^$var" | sed 's/.*: //'
+}
+
+cleanup;
+
+TEST verify_lvm_version
+TEST launch_cluster 2
+TEST setup_lvm 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
+EXPECT "$V0" get_volume_info 'Volume Name';
+EXPECT 'Created' get_volume_info 'Status';
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' get_volume_info 'Status';
+
+
+# Setting system limit
+TEST $CLI_1 snapshot config activate-on-create enable
+
+TEST $CLI_1 snapshot create snap1 $V0 no-timestamp description "test"
+TEST kill_glusterd 1
+#deactivate snapshot for changing snap version, so that handshake will
+#happen when glusterd is restarted
+TEST $CLI_2 snapshot deactivate snap1
+TEST start_glusterd 1
+
+#Wait till handshake complete
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} 'Stopped' snapshot_status snap1
+
+#Delete the snapshot, without this fix, delete will lead to assertion failure
+$CLI_1 snapshot delete all
+EXPECT '0' get_snap_count CLI_1;
+cleanup;
+
diff --git a/tests/bugs/stripe/bug-1002207.t b/tests/bugs/stripe/bug-1002207.t
new file mode 100644
index 00000000000..c58a6e20ab8
--- /dev/null
+++ b/tests/bugs/stripe/bug-1002207.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+
+## Verify volume is is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --attribute-timeout=0 --entry-timeout=0 -s $H0 --volfile-id=$V0 $M0;
+TEST dd if=/dev/zero of=$M0/file$i.data bs=1024 count=1024;
+
+function xattr_query_check()
+{
+ local path=$1
+ local xa_name=$2
+
+ local ret=$(getfattr -n $xa_name $path 2>&1 | grep -o "$xa_name: No such attribute" | wc -l)
+ echo $ret
+}
+
+function set_xattr()
+{
+ local path=$1
+ local xa_name=$2
+ local xa_val=$3
+
+ setfattr -n $xa_name -v $xa_val $path
+ echo $?
+}
+
+EXPECT 0 set_xattr $M0/file$i.data "trusted.name" "testofafairlylongxattrstringthatbutnotlongenoughtofailmemoryallocation"
+EXPECT 0 xattr_query_check $M0/file$i.data "trusted.name"
+
+## Finish up
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/stripe/bug-1111454.t b/tests/bugs/stripe/bug-1111454.t
new file mode 100644
index 00000000000..1509dd7b1a2
--- /dev/null
+++ b/tests/bugs/stripe/bug-1111454.t
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#symlink resolution should succeed
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 stripe 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+TEST ln -s file $M0/dir/symlinkfile
+TEST ls -lR $M0
+cleanup
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/trace/bug-797171.t b/tests/bugs/trace/bug-797171.t
new file mode 100755
index 00000000000..b823e477229
--- /dev/null
+++ b/tests/bugs/trace/bug-797171.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+TEST $CLI volume set $V0 debug.trace marker;
+TEST $CLI volume set $V0 debug.log-history on
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 \
+$M0;
+
+touch $M0/{1..22};
+rm -f $M0/*;
+
+pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/);
+brick_pid=$(cat $GLUSTERD_PIDFILEDIR/vols/$V0/$pid_file);
+
+mkdir $statedumpdir/statedump_tmp/;
+echo "path=$statedumpdir/statedump_tmp" > $statedumpdir/glusterdump.options;
+echo "all=yes" >> $statedumpdir/glusterdump.options;
+
+TEST $CLI volume statedump $V0 history;
+
+file_name=$(ls $statedumpdir/statedump_tmp);
+TEST grep "xlator.debug.trace.history" $statedumpdir/statedump_tmp/$file_name;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+rm -rf $statedumpdir/statedump_tmp;
+rm -f $statedumpdir/glusterdump.options;
+
+cleanup;
diff --git a/tests/bugs/transport/bug-873367.t b/tests/bugs/transport/bug-873367.t
new file mode 100755
index 00000000000..8070bc1b83c
--- /dev/null
+++ b/tests/bugs/transport/bug-873367.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+SSL_BASE=/etc/ssl
+SSL_KEY=$SSL_BASE/glusterfs.key
+SSL_CERT=$SSL_BASE/glusterfs.pem
+SSL_CA=$SSL_BASE/glusterfs.ca
+
+cleanup;
+rm -f $SSL_BASE/glusterfs.*
+mkdir -p $B0/1
+mkdir -p $M0
+
+TEST openssl genrsa -out $SSL_KEY 2048
+TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
+ln $SSL_CERT $SSL_CA
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/1
+TEST $CLI volume set $V0 server.ssl on
+TEST $CLI volume set $V0 client.ssl on
+TEST $CLI volume set $V0 ssl.certificate-depth 6
+TEST $CLI volume set $V0 ssl.cipher-list HIGH
+TEST $CLI volume set $V0 auth.ssl-allow Anyone
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+echo some_data > $M0/data_file
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# If the bug is not fixed, the next mount will fail.
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
+EXPECT some_data cat $M0/data_file
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/unclassified/bug-1034085.t b/tests/bugs/unclassified/bug-1034085.t
new file mode 100644
index 00000000000..aacaa24d642
--- /dev/null
+++ b/tests/bugs/unclassified/bug-1034085.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+#Test case: Check the creation of indices/xattrop dir as soon as brick comes up.
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+#Create a volume
+TEST glusterd;
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST mkdir -p $B0/${V0}-0/.glusterfs/indices/
+TEST touch $B0/${V0}-0/.glusterfs/indices/xattrop
+
+#Volume start should not work when xattrop dir not created
+TEST ! $CLI volume start $V0;
+
+TEST rm $B0/${V0}-0/.glusterfs/indices/xattrop
+
+#Volume start should work now
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#Check for the existence of indices/xattrop dir
+TEST [ -d $B0/${V0}-0/.glusterfs/indices/xattrop/ ];
+
+cleanup;
diff --git a/tests/bugs/unclassified/bug-1357397.t b/tests/bugs/unclassified/bug-1357397.t
new file mode 100644
index 00000000000..e2ec6f4d253
--- /dev/null
+++ b/tests/bugs/unclassified/bug-1357397.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2}
+
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 features.trash on
+TEST $CLI volume set $V0 features.trash-internal-op on
+
+TEST [ -e $B0/${V0}1/.trashcan ]
+
+TEST [ -e $B0/${V0}1/.trashcan/internal_op ]
+
+TEST $CLI volume stop $V0
+
+rm -rf $B0/${V0}1/.trashcan/internal_op
+
+TEST [ ! -e $B0/${V0}1/.trashcan/internal_op ]
+
+TEST $CLI volume start $V0 force
+
+TEST [ -e $B0/${V0}1/.trashcan/internal_op ]
+
+cleanup
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1385758
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1385758
diff --git a/tests/bugs/unclassified/bug-874498.t b/tests/bugs/unclassified/bug-874498.t
new file mode 100644
index 00000000000..2aa9b168a8a
--- /dev/null
+++ b/tests/bugs/unclassified/bug-874498.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2;
+TEST $CLI volume start $V0;
+
+
+TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+B0_hiphenated=`echo $B0 | tr '/' '-'`
+kill_brick $V0 $H0 $B0/brick1
+
+echo "GLUSTER FILE SYSTEM" > $M0/FILE1
+echo "GLUSTER FILE SYSTEM" > $M0/FILE2
+
+FILEN=$B0"/brick2"
+XATTROP=$FILEN/.glusterfs/indices/xattrop
+
+function get_gfid()
+{
+path_of_file=$1
+
+gfid_value=`getfattr -d -m . $path_of_file -e hex 2>/dev/null | grep trusted.gfid | grep -v gfid2path | cut --complement -c -15 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'`
+
+echo $gfid_value
+}
+
+GFID_ROOT=`get_gfid $B0/brick2`
+GFID_FILE1=`get_gfid $B0/brick2/FILE1`
+GFID_FILE2=`get_gfid $B0/brick2/FILE2`
+
+
+count=0
+for i in `ls $XATTROP`
+do
+ if [ "$i" == "$GFID_ROOT" ] || [ "$i" == "$GFID_FILE1" ] || [ "$i" == "$GFID_FILE2" ]
+ then
+ count=$(( count + 1 ))
+ fi
+done
+
+EXPECT "3" echo $count
+
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+
+##Expected number of entries are 0 in the .glusterfs/indices/xattrop directory
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $FILEN;
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/unclassified/bug-991622.t b/tests/bugs/unclassified/bug-991622.t
new file mode 100644
index 00000000000..17b37a7767d
--- /dev/null
+++ b/tests/bugs/unclassified/bug-991622.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+#This tests that no fd leaks are observed in unlink/rename in open-behind
+function leaked_fds {
+ ls -l /proc/$(get_brick_pid $V0 $H0 $B0/$V0)/fd | grep deleted
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume set $V0 open-behind on
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' "$M0/testfile1"
+TEST fd_write $fd1 "content"
+
+TEST fd2=`fd_available`
+TEST fd_open $fd2 'w' "$M0/testfile2"
+TEST fd_write $fd2 "content"
+
+TEST touch $M0/a
+TEST rm $M0/testfile1
+TEST mv $M0/a $M0/testfile2
+TEST fd_close $fd1
+TEST fd_close $fd2
+TEST ! leaked_fds
+cleanup;
diff --git a/tests/bugs/upcall/bug-1227204.t b/tests/bugs/upcall/bug-1227204.t
new file mode 100755
index 00000000000..fc393b1837f
--- /dev/null
+++ b/tests/bugs/upcall/bug-1227204.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# This regression test tries to ensure that quota limit-usage set work with
+# features.cache-invalidation on.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6};
+TEST $CLI volume start $V0;
+
+TEST $CLI volume set $V0 features.cache-invalidation on;
+TEST $CLI volume quota $V0 enable;
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST mkdir -p $M0/1/2;
+TEST $CLI volume quota $V0 limit-usage /1/2 100MB 70%;
+
+TEST $CLI volume status $V0
+TEST $CLI volume stop $V0
+
+cleanup;
diff --git a/tests/bugs/upcall/bug-1369430.t b/tests/bugs/upcall/bug-1369430.t
new file mode 100755
index 00000000000..f53c17a1495
--- /dev/null
+++ b/tests/bugs/upcall/bug-1369430.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## 1. Start glusterd
+TEST glusterd;
+
+## 2. Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+## 3. Start the volume
+TEST $CLI volume start $V0
+
+## 4. Enable the upcall xlator, and increase the md-cache timeout to max
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 performance.cache-invalidation on
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.cache-samba-metadata on
+
+## 8. Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+## 10. Create directory and files from the M0
+TEST mkdir $M0/dir1
+TEST touch $M0/dir1/file{1..5}
+
+## 12. Access the files via readdirp, from M1
+TEST ls -l $M1/dir1/
+
+# Change the stat of one of the files from M0 and wait for it to
+# invalidate the md-cache of another mount M0
+echo "hello" > $M0/dir1/file2
+sleep 2;
+
+## 13. Expct non zero size when stat from M1
+EXPECT_NOT "0" stat -c %s $M1/dir1/file2
+
+cleanup;
diff --git a/tests/bugs/upcall/bug-1394131.t b/tests/bugs/upcall/bug-1394131.t
new file mode 100755
index 00000000000..b371ce4e682
--- /dev/null
+++ b/tests/bugs/upcall/bug-1394131.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## 1. Start glusterd
+TEST glusterd;
+
+## 2. Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+## 3. Enable the upcall xlator, and increase the md-cache timeout to max
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 indexing on
+
+## 6. Start the volume
+TEST $CLI volume start $V0
+
+## 7. Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+## 8. Create directory and files from the M0
+TEST touch $M0/file1
+TEST mv $M0/file1 $M0/file2
+
+cleanup;
diff --git a/tests/bugs/upcall/bug-1422776.t b/tests/bugs/upcall/bug-1422776.t
new file mode 100755
index 00000000000..cb249ce1cd2
--- /dev/null
+++ b/tests/bugs/upcall/bug-1422776.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+## Enable the upcall xlator, and increase the md-cache timeout to max
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 indexing on
+
+## Start the volume
+TEST $CLI volume start $V0
+TEST $CLI volume quota $V0 enable
+
+## Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+## Create directory and files from the M0
+TEST touch $M0/file1
+TEST mv $M0/file1 $M0/file2
+
+cleanup;
diff --git a/tests/bugs/upcall/bug-1458127.t b/tests/bugs/upcall/bug-1458127.t
new file mode 100755
index 00000000000..e844f37f1d3
--- /dev/null
+++ b/tests/bugs/upcall/bug-1458127.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0}
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume set $V0 performance.nl-cache on
+TEST $CLI volume set $V0 nl-cache-positive-entry on
+TEST $CLI volume set $V0 nl-cache-timeout 2
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 2
+TEST $CLI volume set $V0 md-cache-timeout 20
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/xyz
+#Wait until upcall clears the fact that M0 had accessed dir
+sleep 4
+TEST mv $M0/dir/xyz $M0/dir/xyz1
+TEST ! ls $M0/dir/file1
+TEST touch $M1/dir/file1
+TEST ls $M0/dir/file1
+TEST ls $M0/dir/file1
+
+cleanup;
diff --git a/tests/bugs/upcall/bug-upcall-stat.t b/tests/bugs/upcall/bug-upcall-stat.t
new file mode 100755
index 00000000000..0ba944ec441
--- /dev/null
+++ b/tests/bugs/upcall/bug-upcall-stat.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1};
+
+TEST $CLI volume set $V0 features.cache-invalidation on
+TEST $CLI volume set $V0 features.cache-invalidation-timeout 600
+TEST $CLI volume set $V0 performance.cache-invalidation on
+TEST $CLI volume set $V0 performance.md-cache-timeout 600
+TEST $CLI volume set $V0 performance.cache-samba-metadata on
+
+#TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+
+## 5. Create two gluster mounts
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M1
+
+TEST $CLI volume profile $V0 start
+
+## 8. Create a file
+TEST touch $M0/file1
+
+TEST "setfattr -n user.DOSATTRIB -v "abc" $M0/file1"
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q abc"
+TEST "setfattr -n user.DOSATTRIB -v "xyz" $M0/file1"
+sleep 2; #There can be a very very small window where the next getxattr
+ #reaches md-cache, before the cache-invalidation caused by previous
+ #setxattr, reaches md-cache. Hence sleeping for 2 sec.
+ #Also it should not be > 600.
+TEST "getfattr -n user.DOSATTRIB $M1/file1 | grep -q xyz"
+
+$CLI volume profile $V0 info | grep -q CI_XATTR
+EXPECT '0' echo $?
diff --git a/tests/bugs/write-behind/bug-1058663.c b/tests/bugs/write-behind/bug-1058663.c
new file mode 100644
index 00000000000..aedf97d7487
--- /dev/null
+++ b/tests/bugs/write-behind/bug-1058663.c
@@ -0,0 +1,123 @@
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <signal.h>
+
+#define FILE_SIZE 1048576
+
+/* number of tests to run */
+#define RUN_LOOP 1000
+
+/* number of SIGBUS before exiting */
+#define MAX_SIGBUS 1
+static int expect_sigbus;
+static int sigbus_received;
+
+/* test for truncate()/seek()/write()/mmap()
+ * There should ne no SIGBUS triggered.
+ */
+void
+seek_write(char *filename)
+{
+ int fd;
+ uint8_t *map;
+ int i;
+
+ fd = open(filename, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ lseek(fd, FILE_SIZE - 1, SEEK_SET);
+ write(fd, "\xff", 1);
+
+ map = mmap(NULL, FILE_SIZE, PROT_READ, MAP_PRIVATE, fd, 0);
+ for (i = 0; i < (FILE_SIZE - 1); i++) {
+ if (map[i] != 0) /* should never be true */
+ abort();
+ }
+ munmap(map, FILE_SIZE);
+
+ close(fd);
+}
+
+int
+read_after_eof(char *filename)
+{
+ int ret = 0;
+ int fd;
+ char *data;
+ uint8_t *map;
+
+ fd = open(filename, O_RDWR | O_CREAT | O_TRUNC, 0600);
+ lseek(fd, FILE_SIZE - 1, SEEK_SET);
+ write(fd, "\xff", 1);
+
+ /* trigger verify that reading after EOF fails */
+ ret = read(fd, data, FILE_SIZE / 2);
+ if (ret != 0)
+ return 1;
+
+ /* map an area of 1 byte after FILE_SIZE */
+ map = mmap(NULL, 1, PROT_READ, MAP_PRIVATE, fd, FILE_SIZE);
+ /* map[0] is an access after EOF, it should trigger SIGBUS */
+ if (map[0] != 0)
+ /* it is expected that we exit before we get here */
+ if (!sigbus_received)
+ return 1;
+ munmap(map, FILE_SIZE);
+
+ close(fd);
+
+ return ret;
+}
+
+/* signal handler for SIGBUS */
+void
+catch_sigbus(int signum)
+{
+ switch (signum) {
+#ifdef __NetBSD__
+ /* Depending on architecture, we can get SIGSEGV */
+ case SIGSEGV: /* FALLTHROUGH */
+#endif
+ case SIGBUS:
+ sigbus_received++;
+ if (!expect_sigbus)
+ exit(EXIT_FAILURE);
+ if (sigbus_received >= MAX_SIGBUS)
+ exit(EXIT_SUCCESS);
+ break;
+ default:
+ printf("Unexpected signal received: %d\n", signum);
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int i = 0;
+
+ if (argc == 1) {
+ printf("Usage: %s <filename>\n", argv[0]);
+ return EXIT_FAILURE;
+ }
+
+#ifdef __NetBSD__
+ /* Depending on architecture, we can get SIGSEGV */
+ signal(SIGSEGV, catch_sigbus);
+#endif
+ signal(SIGBUS, catch_sigbus);
+
+ /* the next test should not trigger SIGBUS */
+ expect_sigbus = 0;
+ for (i = 0; i < RUN_LOOP; i++) {
+ seek_write(argv[1]);
+ }
+
+ /* the next test should trigger SIGBUS */
+ expect_sigbus = 1;
+ if (read_after_eof(argv[1]))
+ return EXIT_FAILURE;
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/bugs/write-behind/bug-1058663.t b/tests/bugs/write-behind/bug-1058663.t
new file mode 100644
index 00000000000..a900a6d7afa
--- /dev/null
+++ b/tests/bugs/write-behind/bug-1058663.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/$V0;
+TEST $CLI volume start $V0;
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+# compile the test program and run it
+TEST $CC $(dirname $0)/bug-1058663.c -o $(dirname $0)/bug-1058663;
+TEST $(dirname $0)/bug-1058663 $M0/bug-1058663.bin;
+TEST rm -f $(dirname $0)/M0/bug-1058663.bin;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/bugs/write-behind/bug-1279730.c b/tests/bugs/write-behind/bug-1279730.c
new file mode 100644
index 00000000000..706ae67b102
--- /dev/null
+++ b/tests/bugs/write-behind/bug-1279730.c
@@ -0,0 +1,149 @@
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <assert.h>
+
+int
+main(int argc, char *argv[])
+{
+ int fd = -1, ret = -1, len = 0;
+ char *path = NULL,
+ buf[128] =
+ {
+ 0,
+ },
+ *cmd = NULL;
+ struct stat stbuf = {
+ 0,
+ };
+ int write_to_child[2] =
+ {
+ 0,
+ },
+ write_to_parent[2] = {
+ 0,
+ };
+
+ path = argv[1];
+ cmd = argv[2];
+
+ assert(argc == 3);
+
+ ret = pipe(write_to_child);
+ if (ret < 0) {
+ fprintf(stderr,
+ "creation of write-to-child pipe failed "
+ "(%s)\n",
+ strerror(errno));
+ goto out;
+ }
+
+ ret = pipe(write_to_parent);
+ if (ret < 0) {
+ fprintf(stderr,
+ "creation of write-to-parent pipe failed "
+ "(%s)\n",
+ strerror(errno));
+ goto out;
+ }
+
+ ret = fork();
+ switch (ret) {
+ case 0:
+ close(write_to_child[1]);
+ close(write_to_parent[0]);
+
+ /* child, wait for instructions to execute command */
+ ret = read(write_to_child[0], buf, 128);
+ if (ret < 0) {
+ fprintf(stderr, "child: read on pipe failed (%s)\n",
+ strerror(errno));
+ goto out;
+ }
+
+ system(cmd);
+
+ ret = write(write_to_parent[1], "1", 2);
+ if (ret < 0) {
+ fprintf(stderr, "child: write to pipe failed (%s)\n",
+ strerror(errno));
+ goto out;
+ }
+ break;
+
+ case -1:
+ fprintf(stderr, "fork failed (%s)\n", strerror(errno));
+ goto out;
+
+ default:
+ close(write_to_parent[1]);
+ close(write_to_child[0]);
+
+ fd = open(path, O_CREAT | O_RDWR | O_APPEND, S_IRWXU);
+ if (fd < 0) {
+ fprintf(stderr, "open failed (%s)\n", strerror(errno));
+ goto out;
+ }
+
+ len = strlen("test-content") + 1;
+ ret = write(fd, "test-content", len);
+
+ if (ret < len) {
+ fprintf(stderr, "write failed %d (%s)\n", ret, strerror(errno));
+ }
+
+ ret = pread(fd, buf, 128, 0);
+ if ((ret == len) && (strcmp(buf, "test-content") == 0)) {
+ fprintf(stderr,
+ "read should've failed as previous "
+ "write would've failed with EDQUOT, but its "
+ "successful");
+ ret = -1;
+ goto out;
+ }
+
+ ret = write(write_to_child[1], "1", 2);
+ if (ret < 0) {
+ fprintf(stderr, "parent: write to pipe failed (%s)\n",
+ strerror(errno));
+ goto out;
+ }
+
+ ret = read(write_to_parent[0], buf, 128);
+ if (ret < 0) {
+ fprintf(stderr, "parent: read from pipe failed (%s)\n",
+ strerror(errno));
+ goto out;
+ }
+
+ /* this will force a sync on cached-write and now that quota
+ limit is increased, sync will be successful. ignore return
+ value as fstat would fail with EDQUOT (picked up from
+ cached-write because of previous sync failure.
+ */
+ fstat(fd, &stbuf);
+
+ ret = pread(fd, buf, 128, 0);
+ if (ret != len) {
+ fprintf(stderr,
+ "post cmd read failed %d (data:%s) "
+ "(error:%s)\n",
+ ret, buf, strerror(errno));
+ goto out;
+ }
+
+ if (strcmp(buf, "test-content")) {
+ fprintf(stderr, "wrong data (%s)\n", buf);
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
diff --git a/tests/bugs/write-behind/bug-1279730.t b/tests/bugs/write-behind/bug-1279730.t
new file mode 100755
index 00000000000..20447c349d5
--- /dev/null
+++ b/tests/bugs/write-behind/bug-1279730.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/$V0;
+TEST $CLI volume start $V0;
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume quota $V0 limit-usage / 4
+TEST $CLI volume quota $V0 hard-timeout 0
+TEST $CLI volume quota $V0 soft-timeout 0
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+# compile the test program and run it
+TEST $CC -O0 -g3 $(dirname $0)/bug-1279730.c -o $(dirname $0)/bug-1279730
+
+TEST $(dirname $0)/bug-1279730 $M0/file "\"$CLI volume quota $V0 limit-usage / 1024\""
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0;
+TEST $CLI volume delete $V0;
+
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1279730
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1279730
+
diff --git a/tests/bugs/write-behind/issue-884.c b/tests/bugs/write-behind/issue-884.c
new file mode 100644
index 00000000000..e9c33b351ad
--- /dev/null
+++ b/tests/bugs/write-behind/issue-884.c
@@ -0,0 +1,267 @@
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <pthread.h>
+
+#include <glusterfs/api/glfs.h>
+
+/* Based on a reproducer by Stefan Ring. It seems to be quite sensible to any
+ * timing modification, so the code has been maintained as is, only with minor
+ * changes. */
+
+struct glfs *glfs;
+
+pthread_mutex_t the_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t the_cond = PTHREAD_COND_INITIALIZER;
+
+typedef struct _my_aiocb {
+ int64_t size;
+ volatile int64_t seq;
+ int which;
+} my_aiocb;
+
+typedef struct _worker_data {
+ my_aiocb cb;
+ struct iovec iov;
+ int64_t offset;
+} worker_data;
+
+typedef struct {
+ worker_data wdata[2];
+
+ volatile unsigned busy;
+} all_data_t;
+
+all_data_t all_data;
+
+static void
+completion_fnc(struct glfs_fd *fd, ssize_t ret, struct glfs_stat *pre,
+ struct glfs_stat *post, void *arg)
+{
+ void *the_thread;
+ my_aiocb *cb = (my_aiocb *)arg;
+ long seq = cb->seq;
+
+ assert(ret == cb->size);
+
+ pthread_mutex_lock(&the_mutex);
+ pthread_cond_broadcast(&the_cond);
+
+ all_data.busy &= ~(1 << cb->which);
+ cb->seq = -1;
+
+ the_thread = (void *)pthread_self();
+ printf("worker %d is done from thread %p, seq %ld!\n", cb->which,
+ the_thread, seq);
+
+ pthread_mutex_unlock(&the_mutex);
+}
+
+static void
+init_wdata(worker_data *data, int which)
+{
+ data->cb.which = which;
+ data->cb.seq = -1;
+
+ data->iov.iov_base = malloc(1024 * 1024);
+ memset(data->iov.iov_base, 6,
+ 1024 * 1024); /* tail part never overwritten */
+}
+
+static void
+init()
+{
+ all_data.busy = 0;
+
+ init_wdata(&all_data.wdata[0], 0);
+ init_wdata(&all_data.wdata[1], 1);
+}
+
+static void
+do_write(struct glfs_fd *fd, int content, int size, int64_t seq,
+ worker_data *wdata, const char *name)
+{
+ int ret;
+
+ wdata->cb.size = size;
+ wdata->cb.seq = seq;
+
+ if (content >= 0)
+ memset(wdata->iov.iov_base, content, size);
+ wdata->iov.iov_len = size;
+
+ pthread_mutex_lock(&the_mutex);
+ printf("(%d) dispatching write \"%s\", offset %lx, len %x, seq %ld\n",
+ wdata->cb.which, name, (long)wdata->offset, size, (long)seq);
+ pthread_mutex_unlock(&the_mutex);
+ ret = glfs_pwritev_async(fd, &wdata->iov, 1, wdata->offset, 0,
+ completion_fnc, &wdata->cb);
+ assert(ret >= 0);
+}
+
+#define IDLE 0 // both workers must be idle
+#define ANY 1 // use any worker, other one may be busy
+
+int
+get_worker(int waitfor, int64_t excl_seq)
+{
+ int which;
+
+ pthread_mutex_lock(&the_mutex);
+
+ while (waitfor == IDLE && (all_data.busy & 3) != 0 ||
+ waitfor == ANY &&
+ ((all_data.busy & 3) == 3 ||
+ excl_seq >= 0 && (all_data.wdata[0].cb.seq == excl_seq ||
+ all_data.wdata[1].cb.seq == excl_seq)))
+ pthread_cond_wait(&the_cond, &the_mutex);
+
+ if (!(all_data.busy & 1))
+ which = 0;
+ else
+ which = 1;
+
+ all_data.busy |= (1 << which);
+
+ pthread_mutex_unlock(&the_mutex);
+
+ return which;
+}
+
+static int
+doit(struct glfs_fd *fd)
+{
+ int ret;
+ int64_t seq = 0;
+ int64_t offset = 0; // position in file, in blocks
+ int64_t base = 0x1000; // where to place the data, in blocks
+
+ int async_mode = ANY;
+
+ init();
+
+ for (;;) {
+ int which;
+ worker_data *wdata;
+
+ // for growing to the first offset
+ for (;;) {
+ int gap = base + 0x42 - offset;
+ if (!gap)
+ break;
+ if (gap > 80)
+ gap = 80;
+
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = offset << 9;
+ do_write(fd, 0, gap << 9, seq++, wdata, "gap-filling");
+
+ offset += gap;
+ }
+
+ // 8700
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x42) << 9;
+ do_write(fd, 1, 62 << 9, seq++, wdata, "!8700");
+
+ // 8701
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x42) << 9;
+ do_write(fd, 2, 55 << 9, seq++, wdata, "!8701");
+
+ // 8702
+ which = get_worker(async_mode, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x79) << 9;
+ do_write(fd, 3, 54 << 9, seq++, wdata, "!8702");
+
+ // 8703
+ which = get_worker(async_mode, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0xaf) << 9;
+ do_write(fd, 4, 81 << 9, seq++, wdata, "!8703");
+
+ // 8704
+ // this writes both 5s and 6s
+ // the range of 5s is the one that overwrites 8703
+
+ which = get_worker(async_mode, seq - 1);
+ wdata = &all_data.wdata[which];
+
+ memset(wdata->iov.iov_base, 5, 81 << 9);
+ wdata->offset = (base + 0xaf) << 9;
+ do_write(fd, -1, 1623 << 9, seq++, wdata, "!8704");
+
+ offset = base + 0x706;
+ base += 0x1000;
+ if (base >= 0x100000)
+ break;
+ }
+
+ printf("done!\n");
+ fflush(stdout);
+
+ pthread_mutex_lock(&the_mutex);
+
+ while ((all_data.busy & 3) != 0)
+ pthread_cond_wait(&the_cond, &the_mutex);
+
+ pthread_mutex_unlock(&the_mutex);
+
+ ret = glfs_close(fd);
+ assert(ret >= 0);
+ /*
+ ret = glfs_fini(glfs);
+ assert(ret >= 0);
+ */
+ return 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret;
+ int open_flags = O_RDWR | O_DIRECT | O_TRUNC;
+ struct glfs_fd *fd;
+
+ glfs = glfs_new(argv[1]);
+ if (!glfs) {
+ printf("glfs_new!\n");
+ goto out;
+ }
+ ret = glfs_set_volfile_server(glfs, "tcp", "localhost", 24007);
+ if (ret < 0) {
+ printf("set_volfile!\n");
+ goto out;
+ }
+ ret = glfs_init(glfs);
+ if (ret) {
+ printf("init!\n");
+ goto out;
+ }
+ fd = glfs_open(glfs, argv[2], open_flags);
+ if (!fd) {
+ printf("open!\n");
+ goto out;
+ }
+ srand(time(NULL));
+ return doit(fd);
+out:
+ return 1;
+}
diff --git a/tests/bugs/write-behind/issue-884.t b/tests/bugs/write-behind/issue-884.t
new file mode 100755
index 00000000000..2bcf7d15265
--- /dev/null
+++ b/tests/bugs/write-behind/issue-884.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test tries to detect a race condition in write-behind. It's based on a
+# reproducer written by Stefan Ring that is able to hit it sometimes. On my
+# system, it happened around 10% of the runs. This means that if this bug
+# appears again, this test will fail once every 10 runs. Most probably this
+# failure will be hidden by the automatic test retry of the testing framework.
+#
+# Please, if this test fails, it needs to be analyzed in detail.
+
+function run() {
+ "${@}" >/dev/null
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+# This makes it easier to hit the issue
+TEST $CLI volume set $V0 client-log-level TRACE
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+build_tester $(dirname $0)/issue-884.c -lgfapi
+
+TEST touch $M0/testfile
+
+# This program generates a file of 535694336 bytes with a fixed pattern
+TEST run $(dirname $0)/issue-884 $V0 testfile
+
+# This is the md5sum of the expected pattern without corruption
+EXPECT "ad105f9349345a70fc697632cbb5eec8" echo "$(md5sum $B0/$V0/testfile | awk '{ print $1; }')"
+
+cleanup