summaryrefslogtreecommitdiffstats
path: root/cli/src
diff options
context:
space:
mode:
authorMilind Changire <mchangir@redhat.com>2016-04-22 16:56:47 +0530
committerAravinda VK <avishwan@redhat.com>2016-06-28 23:41:54 -0700
commit70fd68d94f768c098b3178c151fa92c5079a8cfd (patch)
tree84b94505ceadc9aa99754a5e08ffd99908b1cc08 /cli/src
parent1e60f9746cf7cb8ce34e2b1572410c39b11d7664 (diff)
georep: add reset-sync-time option for session delete
Set the stime xattr at all the brick roots to (0,0) if the argument reset-sync-time has been provided on the command-line. To avoid testing against directory specific stime, the remote stime is assumed to be minus_infinity, if the root directory stime is set to (0,0), before the directory scan begins. This triggers a full volume resync to slave in the case of a geo-rep session recreation with the same master-slave volume pair. Command synopsis: gluster volume geo-replication <MASTERVOL> <SLAVE>::<SLAVEVOL> delete \ [reset-sync-time] Update gluster cli man page to include new sub-command reset-sync-time. Change-Id: Ie4ce03b9425ed9bb81eda8681058c0fc6f990948 BUG: 1311926 Signed-off-by: Milind Changire <mchangir@redhat.com> Reviewed-on: http://review.gluster.org/14051 Reviewed-by: Kotresh HR <khiremat@redhat.com> Smoke: Gluster Build System <jenkins@build.gluster.org> NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org> CentOS-regression: Gluster Build System <jenkins@build.gluster.org> Reviewed-by: Aravinda VK <avishwan@redhat.com>
Diffstat (limited to 'cli/src')
-rw-r--r--cli/src/cli-cmd-parser.c18
-rw-r--r--cli/src/cli-cmd-volume.c2
2 files changed, 18 insertions, 2 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index ae74d441c33..a570c343133 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -2574,7 +2574,7 @@ cli_cmd_gsync_set_parse (const char **words, int wordcount, dict_t **options)
* volume geo-replication [$m [$s]] status [detail]
* volume geo-replication [$m] $s config [[!]$opt [$val]]
* volume geo-replication $m $s start|stop [force]
- * volume geo-replication $m $s delete
+ * volume geo-replication $m $s delete [reset-sync-time]
* volume geo-replication $m $s pause [force]
* volume geo-replication $m $s resume [force]
*/
@@ -2702,6 +2702,22 @@ cli_cmd_gsync_set_parse (const char **words, int wordcount, dict_t **options)
cmdi++;
}
+ if (type == GF_GSYNC_OPTION_TYPE_DELETE &&
+ !strcmp ((char *)words[wordcount-1], "reset-sync-time")) {
+ if (strcmp ((char *)words[wordcount-2], "delete")) {
+ ret = -1;
+ goto out;
+ }
+ if (!slavei || !masteri) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_uint32 (dict, "reset-sync-time", _gf_true);
+ if (ret)
+ goto out;
+ cmdi++;
+ }
+
if (type != GF_GSYNC_OPTION_TYPE_CONFIG &&
(cmdi < wordcount - 1 || glob))
goto out;
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index b09a6ccbe3d..2ae4aa2dbf4 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -2636,7 +2636,7 @@ struct cli_cmd volume_cmds[] = {
#if (SYNCDAEMON_COMPILE)
{"volume "GEOREP" [<VOLNAME>] [<SLAVE-URL>] {create [[ssh-port n] [[no-verify]|[push-pem]]] [force]"
- "|start [force]|stop [force]|pause [force]|resume [force]|config|status [detail]|delete} [options...]",
+ "|start [force]|stop [force]|pause [force]|resume [force]|config|status [detail]|delete [reset-sync-time]} [options...]",
cli_cmd_volume_gsync_set_cbk,
"Geo-sync operations",
cli_cmd_check_gsync_exists_cbk},
9%;'/> -rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h50
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.c2070
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.h46
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-ssm.c124
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-ssm.h38
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c2232
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.h302
-rw-r--r--xlators/features/bit-rot/src/stub/Makefile.am20
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-common.h178
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-object-version.h30
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c796
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h36
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h117
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.c3590
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.h515
-rw-r--r--xlators/features/changelog/Makefile.am3
-rw-r--r--xlators/features/changelog/lib/Makefile.am (renamed from xlators/features/filter/Makefile.am)2
-rw-r--r--xlators/features/changelog/lib/examples/c/get-changes-multi.c90
-rw-r--r--xlators/features/changelog/lib/examples/c/get-changes.c93
-rw-r--r--xlators/features/changelog/lib/examples/c/get-history.c116
-rwxr-xr-xxlators/features/changelog/lib/examples/python/changes.py34
-rw-r--r--xlators/features/changelog/lib/examples/python/libgfchangelog.py71
-rw-r--r--xlators/features/changelog/lib/src/Makefile.am35
-rw-r--r--xlators/features/changelog/lib/src/changelog-lib-messages.h74
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-api.c224
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-helpers.c170
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-helpers.h255
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal-handler.c1029
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal.h116
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-reborp.c413
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-rpc.c98
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-rpc.h28
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog.c652
-rw-r--r--xlators/features/changelog/lib/src/gf-history-changelog.c1020
-rw-r--r--xlators/features/changelog/src/Makefile.am29
-rw-r--r--xlators/features/changelog/src/changelog-barrier.c131
-rw-r--r--xlators/features/changelog/src/changelog-encoders.c232
-rw-r--r--xlators/features/changelog/src/changelog-encoders.h50
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.c412
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.h136
-rw-r--r--xlators/features/changelog/src/changelog-helpers.c1977
-rw-r--r--xlators/features/changelog/src/changelog-helpers.h716
-rw-r--r--xlators/features/changelog/src/changelog-mem-types.h34
-rw-r--r--xlators/features/changelog/src/changelog-messages.h172
-rw-r--r--xlators/features/changelog/src/changelog-misc.h131
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.c359
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.h85
-rw-r--r--xlators/features/changelog/src/changelog-rpc.c440
-rw-r--r--xlators/features/changelog/src/changelog-rpc.h31
-rw-r--r--xlators/features/changelog/src/changelog-rt.c66
-rw-r--r--xlators/features/changelog/src/changelog-rt.h33
-rw-r--r--xlators/features/changelog/src/changelog.c2989
-rw-r--r--xlators/features/cloudsync/Makefile.am (renamed from xlators/features/mac-compat/Makefile.am)2
-rw-r--r--xlators/features/cloudsync/src/Makefile.am46
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h24
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.c60
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.h134
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-c.py324
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-h.py31
-rw-r--r--xlators/features/cloudsync/src/cloudsync-mem-types.h22
-rw-r--r--xlators/features/cloudsync/src/cloudsync-messages.h16
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am11
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c584
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h50
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h203
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c842
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h84
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c2076
-rw-r--r--xlators/features/cloudsync/src/cloudsync.h123
-rw-r--r--xlators/features/compress/Makefile.am3
-rw-r--r--xlators/features/compress/src/Makefile.am19
-rw-r--r--xlators/features/compress/src/cdc-helper.c527
-rw-r--r--xlators/features/compress/src/cdc-mem-types.h23
-rw-r--r--xlators/features/compress/src/cdc.c348
-rw-r--r--xlators/features/compress/src/cdc.h99
-rw-r--r--xlators/features/filter/src/Makefile.am15
-rw-r--r--xlators/features/filter/src/filter-mem-types.h30
-rw-r--r--xlators/features/filter/src/filter.c1744
-rw-r--r--xlators/features/gfid-access/Makefile.am1
-rw-r--r--xlators/features/gfid-access/src/Makefile.am16
-rw-r--r--xlators/features/gfid-access/src/gfid-access-mem-types.h22
-rw-r--r--xlators/features/gfid-access/src/gfid-access.c1420
-rw-r--r--xlators/features/gfid-access/src/gfid-access.h107
-rw-r--r--xlators/features/index/Makefile.am3
-rw-r--r--xlators/features/index/src/Makefile.am19
-rw-r--r--xlators/features/index/src/index-mem-types.h23
-rw-r--r--xlators/features/index/src/index-messages.h33
-rw-r--r--xlators/features/index/src/index.c2682
-rw-r--r--xlators/features/index/src/index.h86
-rw-r--r--xlators/features/leases/Makefile.am3
-rw-r--r--xlators/features/leases/src/Makefile.am20
-rw-r--r--xlators/features/leases/src/leases-internal.c1412
-rw-r--r--xlators/features/leases/src/leases-mem-types.h27
-rw-r--r--xlators/features/leases/src/leases-messages.h33
-rw-r--r--xlators/features/leases/src/leases.c1168
-rw-r--r--xlators/features/leases/src/leases.h259
-rw-r--r--xlators/features/locks/src/Makefile.am23
-rw-r--r--xlators/features/locks/src/clear.c460
-rw-r--r--xlators/features/locks/src/clear.h73
-rw-r--r--xlators/features/locks/src/common.c1946
-rw-r--r--xlators/features/locks/src/common.h265
-rw-r--r--xlators/features/locks/src/entrylk.c1484
-rw-r--r--xlators/features/locks/src/inodelk.c1482
-rw-r--r--xlators/features/locks/src/locks-mem-types.h44
-rw-r--r--xlators/features/locks/src/locks.h336
-rw-r--r--xlators/features/locks/src/pl-messages.h29
-rw-r--r--xlators/features/locks/src/posix.c5801
-rw-r--r--xlators/features/locks/src/reservelk.c606
-rw-r--r--xlators/features/locks/tests/unit-test.c118
-rw-r--r--xlators/features/mac-compat/src/Makefile.am13
-rw-r--r--xlators/features/mac-compat/src/mac-compat.c247
-rw-r--r--xlators/features/marker/Makefile.am2
-rw-r--r--xlators/features/marker/src/Makefile.am19
-rw-r--r--xlators/features/marker/src/marker-common.c111
-rw-r--r--xlators/features/marker/src/marker-common.h37
-rw-r--r--xlators/features/marker/src/marker-mem-types.h43
-rw-r--r--xlators/features/marker/src/marker-quota-helper.c563
-rw-r--r--xlators/features/marker/src/marker-quota-helper.h106
-rw-r--r--xlators/features/marker/src/marker-quota.c3555
-rw-r--r--xlators/features/marker/src/marker-quota.h244
-rw-r--r--xlators/features/marker/src/marker.c4118
-rw-r--r--xlators/features/marker/src/marker.h193
-rw-r--r--xlators/features/marker/utils/Makefile.am7
-rwxr-xr-xxlators/features/marker/utils/gsyncd.in55
-rw-r--r--xlators/features/marker/utils/syncdaemon/Makefile.am5
-rw-r--r--xlators/features/marker/utils/syncdaemon/README.md81
-rw-r--r--xlators/features/marker/utils/syncdaemon/__init__.py0
-rw-r--r--xlators/features/marker/utils/syncdaemon/configinterface.py185
-rw-r--r--xlators/features/marker/utils/syncdaemon/gconf.py15
-rw-r--r--xlators/features/marker/utils/syncdaemon/gsyncd.py302
-rw-r--r--xlators/features/marker/utils/syncdaemon/libcxattr.py62
-rw-r--r--xlators/features/marker/utils/syncdaemon/master.py366
-rw-r--r--xlators/features/marker/utils/syncdaemon/monitor.py80
-rw-r--r--xlators/features/marker/utils/syncdaemon/repce.py162
-rw-r--r--xlators/features/marker/utils/syncdaemon/resource.py467
-rw-r--r--xlators/features/marker/utils/syncdaemon/syncdutils.py160
-rw-r--r--xlators/features/metadisp/Makefile.am3
-rw-r--r--xlators/features/metadisp/src/Makefile.am38
-rw-r--r--xlators/features/metadisp/src/backend.c45
-rw-r--r--xlators/features/metadisp/src/fops-tmpl.c10
-rw-r--r--xlators/features/metadisp/src/gen-fops.py160
-rw-r--r--xlators/features/metadisp/src/metadisp-create.c101
-rw-r--r--xlators/features/metadisp/src/metadisp-fops.h51
-rw-r--r--xlators/features/metadisp/src/metadisp-fsync.c54
-rw-r--r--xlators/features/metadisp/src/metadisp-lookup.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-open.c70
-rw-r--r--xlators/features/metadisp/src/metadisp-readdir.c65
-rw-r--r--xlators/features/metadisp/src/metadisp-setattr.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-stat.c124
-rw-r--r--xlators/features/metadisp/src/metadisp-unlink.c160
-rw-r--r--xlators/features/metadisp/src/metadisp.c46
-rw-r--r--xlators/features/metadisp/src/metadisp.h45
-rw-r--r--xlators/features/namespace/Makefile.am3
-rw-r--r--xlators/features/namespace/src/Makefile.am17
-rw-r--r--xlators/features/namespace/src/namespace.c1344
-rw-r--r--xlators/features/namespace/src/namespace.h23
-rw-r--r--xlators/features/path-convertor/src/Makefile.am14
-rw-r--r--xlators/features/path-convertor/src/path-mem-types.h32
-rw-r--r--xlators/features/path-convertor/src/path.c1239
-rw-r--r--xlators/features/quiesce/src/Makefile.am10
-rw-r--r--xlators/features/quiesce/src/quiesce-mem-types.h27
-rw-r--r--xlators/features/quiesce/src/quiesce-messages.h28
-rw-r--r--xlators/features/quiesce/src/quiesce.c3532
-rw-r--r--xlators/features/quiesce/src/quiesce.h89
-rw-r--r--xlators/features/quota/src/Makefile.am29
-rw-r--r--xlators/features/quota/src/quota-enforcer-client.c503
-rw-r--r--xlators/features/quota/src/quota-mem-types.h46
-rw-r--r--xlators/features/quota/src/quota-messages.h39
-rw-r--r--xlators/features/quota/src/quota.c6615
-rw-r--r--xlators/features/quota/src/quota.h393
-rw-r--r--xlators/features/quota/src/quotad-aggregator.c494
-rw-r--r--xlators/features/quota/src/quotad-aggregator.h38
-rw-r--r--xlators/features/quota/src/quotad-helpers.c107
-rw-r--r--xlators/features/quota/src/quotad-helpers.h24
-rw-r--r--xlators/features/quota/src/quotad.c245
-rw-r--r--xlators/features/read-only/src/Makefile.am20
-rw-r--r--xlators/features/read-only/src/read-only-common.c406
-rw-r--r--xlators/features/read-only/src/read-only-common.h121
-rw-r--r--xlators/features/read-only/src/read-only-mem-types.h20
-rw-r--r--xlators/features/read-only/src/read-only.c374
-rw-r--r--xlators/features/read-only/src/read-only.h37
-rw-r--r--xlators/features/read-only/src/worm-helper.c395
-rw-r--r--xlators/features/read-only/src/worm-helper.h44
-rw-r--r--xlators/features/read-only/src/worm.c722
-rw-r--r--xlators/features/sdfs/Makefile.am3
-rw-r--r--xlators/features/sdfs/src/Makefile.am19
-rw-r--r--xlators/features/sdfs/src/sdfs-messages.h67
-rw-r--r--xlators/features/sdfs/src/sdfs.c1479
-rw-r--r--xlators/features/sdfs/src/sdfs.h49
-rw-r--r--xlators/features/selinux/Makefile.am3
-rw-r--r--xlators/features/selinux/src/Makefile.am20
-rw-r--r--xlators/features/selinux/src/selinux-mem-types.h19
-rw-r--r--xlators/features/selinux/src/selinux-messages.h30
-rw-r--r--xlators/features/selinux/src/selinux.c323
-rw-r--r--xlators/features/selinux/src/selinux.h24
-rw-r--r--xlators/features/shard/Makefile.am3
-rw-r--r--xlators/features/shard/src/Makefile.am17
-rw-r--r--xlators/features/shard/src/shard-mem-types.h24
-rw-r--r--xlators/features/shard/src/shard-messages.h39
-rw-r--r--xlators/features/shard/src/shard.c7382
-rw-r--r--xlators/features/shard/src/shard.h348
-rw-r--r--xlators/features/snapview-client/Makefile.am1
-rw-r--r--xlators/features/snapview-client/src/Makefile.am16
-rw-r--r--xlators/features/snapview-client/src/snapview-client-mem-types.h24
-rw-r--r--xlators/features/snapview-client/src/snapview-client-messages.h71
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c2791
-rw-r--r--xlators/features/snapview-client/src/snapview-client.h101
-rw-r--r--xlators/features/snapview-server/Makefile.am1
-rw-r--r--xlators/features/snapview-server/src/Makefile.am25
-rw-r--r--xlators/features/snapview-server/src/snapview-server-helpers.c715
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mem-types.h25
-rw-r--r--xlators/features/snapview-server/src/snapview-server-messages.h54
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mgmt.c524
-rw-r--r--xlators/features/snapview-server/src/snapview-server.c2720
-rw-r--r--xlators/features/snapview-server/src/snapview-server.h255
-rw-r--r--xlators/features/thin-arbiter/Makefile.am3
-rw-r--r--xlators/features/thin-arbiter/src/Makefile.am22
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter-mem-types.h19
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter-messages.h28
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter.c661
-rw-r--r--xlators/features/thin-arbiter/src/thin-arbiter.h59
-rw-r--r--xlators/features/trash/src/Makefile.am12
-rw-r--r--xlators/features/trash/src/trash-mem-types.h33
-rw-r--r--xlators/features/trash/src/trash.c3761
-rw-r--r--xlators/features/trash/src/trash.h128
-rw-r--r--xlators/features/upcall/Makefile.am3
-rw-r--r--xlators/features/upcall/src/Makefile.am23
-rw-r--r--xlators/features/upcall/src/upcall-cache-invalidation.h18
-rw-r--r--xlators/features/upcall/src/upcall-internal.c689
-rw-r--r--xlators/features/upcall/src/upcall-mem-types.h23
-rw-r--r--xlators/features/upcall/src/upcall-messages.h29
-rw-r--r--xlators/features/upcall/src/upcall.c2505
-rw-r--r--xlators/features/upcall/src/upcall.h131
-rw-r--r--xlators/features/utime/Makefile.am3
-rw-r--r--xlators/features/utime/src/Makefile.am41
-rw-r--r--xlators/features/utime/src/utime-autogen-fops-tmpl.c28
-rw-r--r--xlators/features/utime/src/utime-autogen-fops-tmpl.h22
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-c.py147
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-h.py35
-rw-r--r--xlators/features/utime/src/utime-helpers.c110
-rw-r--r--xlators/features/utime/src/utime-helpers.h25
-rw-r--r--xlators/features/utime/src/utime-mem-types.h21
-rw-r--r--xlators/features/utime/src/utime-messages.h29
-rw-r--r--xlators/features/utime/src/utime.c392
-rw-r--r--xlators/features/utime/src/utime.h23
273 files changed, 89087 insertions, 18357 deletions
diff --git a/xlators/features/Makefile.am b/xlators/features/Makefile.am
index 809bbe51075..c57897f11ea 100644
--- a/xlators/features/Makefile.am
+++ b/xlators/features/Makefile.am
@@ -1,3 +1,14 @@
-SUBDIRS = locks trash quota read-only mac-compat quiesce marker#path-converter # filter
+if BUILD_CLOUDSYNC
+ CLOUDSYNC_DIR = cloudsync
+endif
+
+if BUILD_METADISP
+ METADISP_DIR = metadisp
+endif
+
+SUBDIRS = locks quota read-only quiesce marker index barrier arbiter upcall \
+ compress changelog gfid-access snapview-client snapview-server trash \
+ shard bit-rot leases selinux sdfs namespace $(CLOUDSYNC_DIR) thin-arbiter \
+ utime $(METADISP_DIR)
CLEANFILES =
diff --git a/xlators/features/access-control/src/Makefile.am b/xlators/features/access-control/src/Makefile.am
deleted file mode 100644
index 6ab8cc4ec4e..00000000000
--- a/xlators/features/access-control/src/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-xlator_LTLIBRARIES = access-control.la
-xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
-access_control_la_LDFLAGS = -module -avoidversion
-access_control_la_SOURCES = access-control.c
-access_control_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
-noinst_HEADERS = access-control.h
-
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
- -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)\
- -L$(xlatordir)/
-
-CLEANFILES =
diff --git a/xlators/features/access-control/Makefile.am b/xlators/features/arbiter/Makefile.am
index a985f42a877..a985f42a877 100644
--- a/xlators/features/access-control/Makefile.am
+++ b/xlators/features/arbiter/Makefile.am
diff --git a/xlators/features/arbiter/src/Makefile.am b/xlators/features/arbiter/src/Makefile.am
new file mode 100644
index 00000000000..badc42f37be
--- /dev/null
+++ b/xlators/features/arbiter/src/Makefile.am
@@ -0,0 +1,19 @@
+if WITH_SERVER
+xlator_LTLIBRARIES = arbiter.la
+endif
+
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+arbiter_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+arbiter_la_SOURCES = arbiter.c
+arbiter_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = arbiter.h arbiter-mem-types.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/arbiter/src/arbiter-mem-types.h b/xlators/features/arbiter/src/arbiter-mem-types.h
new file mode 100644
index 00000000000..05d18374c46
--- /dev/null
+++ b/xlators/features/arbiter/src/arbiter-mem-types.h
@@ -0,0 +1,18 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __ARBITER_MEM_TYPES_H__
+#define __ARBITER_MEM_TYPES_H__
+#include <glusterfs/mem-types.h>
+
+typedef enum gf_arbiter_mem_types_ {
+ gf_arbiter_mt_inode_ctx_t = gf_common_mt_end + 1,
+ gf_arbiter_mt_end
+} gf_arbiter_mem_types_t;
+#endif
diff --git a/xlators/features/arbiter/src/arbiter.c b/xlators/features/arbiter/src/arbiter.c
new file mode 100644
index 00000000000..83a97e3354b
--- /dev/null
+++ b/xlators/features/arbiter/src/arbiter.c
@@ -0,0 +1,380 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "arbiter.h"
+#include "arbiter-mem-types.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+
+static arbiter_inode_ctx_t *
+__arbiter_inode_ctx_get(inode_t *inode, xlator_t *this)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ int ret = 0;
+ uint64_t ctx_addr = 0;
+
+ ret = __inode_ctx_get(inode, this, &ctx_addr);
+ if (ret == 0) {
+ ctx = (arbiter_inode_ctx_t *)(long)ctx_addr;
+ goto out;
+ }
+
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_arbiter_mt_inode_ctx_t);
+ if (!ctx)
+ goto out;
+
+ ret = __inode_ctx_put(inode, this, (uint64_t)(uintptr_t)ctx);
+ if (ret) {
+ GF_FREE(ctx);
+ ctx = NULL;
+ gf_log_callingfn(this->name, GF_LOG_ERROR,
+ "failed to "
+ "set the inode ctx (%s)",
+ uuid_utoa(inode->gfid));
+ }
+out:
+ return ctx;
+}
+
+static arbiter_inode_ctx_t *
+arbiter_inode_ctx_get(inode_t *inode, xlator_t *this)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+
+ LOCK(&inode->lock);
+ {
+ ctx = __arbiter_inode_ctx_get(inode, this);
+ }
+ UNLOCK(&inode->lock);
+ return ctx;
+}
+
+int32_t
+arbiter_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata, struct iatt *postparent)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+
+ if (op_ret != 0)
+ goto unwind;
+ ctx = arbiter_inode_ctx_get(inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ memcpy(&ctx->iattbuf, buf, sizeof(ctx->iattbuf));
+
+unwind:
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, xdata,
+ postparent);
+ return 0;
+}
+
+int32_t
+arbiter_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ STACK_WIND(frame, arbiter_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xdata);
+ return 0;
+}
+
+int32_t
+arbiter_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ struct iatt *buf = NULL;
+ int32_t op_ret = 0;
+ int32_t op_errno = 0;
+
+ ctx = arbiter_inode_ctx_get(loc->inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ buf = &ctx->iattbuf;
+unwind:
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, buf, buf, NULL);
+ return 0;
+}
+
+int32_t
+arbiter_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ struct iatt *buf = NULL;
+ int32_t op_ret = 0;
+ int32_t op_errno = 0;
+
+ ctx = arbiter_inode_ctx_get(fd->inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ buf = &ctx->iattbuf;
+unwind:
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, buf, buf, NULL);
+ return 0;
+}
+
+dict_t *
+arbiter_fill_writev_xdata(fd_t *fd, dict_t *xdata, xlator_t *this)
+{
+ dict_t *rsp_xdata = NULL;
+ int32_t ret = 0;
+ int is_append = 1;
+
+ if (!fd || !fd->inode || gf_uuid_is_null(fd->inode->gfid)) {
+ goto out;
+ }
+
+ if (!xdata)
+ goto out;
+
+ rsp_xdata = dict_new();
+ if (!rsp_xdata)
+ goto out;
+
+ if (dict_get(xdata, GLUSTERFS_OPEN_FD_COUNT)) {
+ ret = dict_set_uint32(rsp_xdata, GLUSTERFS_OPEN_FD_COUNT,
+ fd->inode->fd_count);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set dict value"
+ " for GLUSTERFS_OPEN_FD_COUNT");
+ }
+ }
+ if (dict_get(xdata, GLUSTERFS_WRITE_IS_APPEND)) {
+ ret = dict_set_uint32(rsp_xdata, GLUSTERFS_WRITE_IS_APPEND, is_append);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set dict value"
+ " for GLUSTERFS_WRITE_IS_APPEND");
+ }
+ }
+out:
+ return rsp_xdata;
+}
+
+int32_t
+arbiter_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t off, uint32_t flags,
+ struct iobref *iobref, dict_t *xdata)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ struct iatt *buf = NULL;
+ dict_t *rsp_xdata = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ ctx = arbiter_inode_ctx_get(fd->inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ buf = &ctx->iattbuf;
+ op_ret = iov_length(vector, count);
+ rsp_xdata = arbiter_fill_writev_xdata(fd, xdata, this);
+unwind:
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, buf, buf, rsp_xdata);
+ if (rsp_xdata)
+ dict_unref(rsp_xdata);
+ return 0;
+}
+
+int32_t
+arbiter_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int32_t keep_size, off_t offset, size_t len, dict_t *xdata)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ struct iatt *buf = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ ctx = arbiter_inode_ctx_get(fd->inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ buf = &ctx->iattbuf;
+unwind:
+ STACK_UNWIND_STRICT(fallocate, frame, op_ret, op_errno, buf, buf, NULL);
+ return 0;
+}
+
+int32_t
+arbiter_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ struct iatt *buf = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ ctx = arbiter_inode_ctx_get(fd->inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ buf = &ctx->iattbuf;
+unwind:
+ STACK_UNWIND_STRICT(discard, frame, op_ret, op_errno, buf, buf, NULL);
+ return 0;
+}
+
+int32_t
+arbiter_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ off_t len, dict_t *xdata)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ struct iatt *buf = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+
+ ctx = arbiter_inode_ctx_get(fd->inode, this);
+ if (!ctx) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ buf = &ctx->iattbuf;
+unwind:
+ STACK_UNWIND_STRICT(zerofill, frame, op_ret, op_errno, buf, buf, NULL);
+ return 0;
+}
+
+static int32_t
+arbiter_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(readv, frame, -1, ENOSYS, NULL, 0, NULL, NULL, NULL);
+ return 0;
+}
+
+static int32_t
+arbiter_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ gf_seek_what_t what, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(seek, frame, -1, ENOSYS, 0, xdata);
+ return 0;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ ret = xlator_mem_acct_init(this, gf_arbiter_mt_end + 1);
+ if (ret)
+ gf_log(this->name, GF_LOG_ERROR,
+ "Memory accounting "
+ "initialization failed.");
+ return ret;
+}
+
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ return 0;
+}
+
+int
+arbiter_forget(xlator_t *this, inode_t *inode)
+{
+ arbiter_inode_ctx_t *ctx = NULL;
+ uint64_t ctx_addr = 0;
+
+ inode_ctx_del(inode, this, &ctx_addr);
+ if (!ctx_addr)
+ return 0;
+ ctx = (arbiter_inode_ctx_t *)(long)ctx_addr;
+ GF_FREE(ctx);
+ return 0;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "'arbiter' not configured with exactly one child");
+ return -1;
+ }
+
+ if (!this->parents)
+ gf_log(this->name, GF_LOG_ERROR, "dangling volume. check volfile ");
+
+ return 0;
+}
+
+void
+fini(xlator_t *this)
+{
+ return;
+}
+
+struct xlator_fops fops = {
+ .lookup = arbiter_lookup,
+
+ /* Return success for these inode write FOPS without winding it down to
+ * posix; this is needed for AFR write transaction logic to work.*/
+ .truncate = arbiter_truncate,
+ .writev = arbiter_writev,
+ .ftruncate = arbiter_ftruncate,
+ .fallocate = arbiter_fallocate,
+ .discard = arbiter_discard,
+ .zerofill = arbiter_zerofill,
+
+ /* AFR is not expected to wind these inode read FOPS initiated by the
+ * application to the arbiter brick. But in case a bug causes them
+ * to be called, we return ENOSYS. */
+ .readv = arbiter_readv,
+ .seek = arbiter_seek,
+
+ /* The following inode read FOPS initiated by the application are not
+ * wound by AFR either but internal logic like shd, glfsheal and
+ * client side healing in AFR will send them for selfheal/ inode refresh
+ * operations etc.,so we need to wind them down to posix:
+ *
+ * (f)stat, readdir(p), readlink, (f)getxattr.*/
+
+ /* All other FOPs not listed here are safe to be wound down to posix.*/
+};
+
+struct xlator_cbks cbks = {
+ .forget = arbiter_forget,
+};
+
+struct volume_options options[] = {
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "arbiter",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/arbiter/src/arbiter.h b/xlators/features/arbiter/src/arbiter.h
new file mode 100644
index 00000000000..546db7b751a
--- /dev/null
+++ b/xlators/features/arbiter/src/arbiter.h
@@ -0,0 +1,21 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _ARBITER_H
+#define _ARBITER_H
+
+#include <glusterfs/locking.h>
+#include <glusterfs/common-utils.h>
+
+typedef struct arbiter_inode_ctx_ {
+ struct iatt iattbuf;
+} arbiter_inode_ctx_t;
+
+#endif /* _ARBITER_H */
diff --git a/xlators/features/path-convertor/Makefile.am b/xlators/features/barrier/Makefile.am
index d471a3f9243..a985f42a877 100644
--- a/xlators/features/path-convertor/Makefile.am
+++ b/xlators/features/barrier/Makefile.am
@@ -1,3 +1,3 @@
SUBDIRS = src
-CLEANFILES =
+CLEANFILES =
diff --git a/xlators/features/barrier/src/Makefile.am b/xlators/features/barrier/src/Makefile.am
new file mode 100644
index 00000000000..25099bc56e5
--- /dev/null
+++ b/xlators/features/barrier/src/Makefile.am
@@ -0,0 +1,17 @@
+xlator_LTLIBRARIES = barrier.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+barrier_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+barrier_la_SOURCES = barrier.c
+
+barrier_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = barrier.h barrier-mem-types.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/barrier/src/barrier-mem-types.h b/xlators/features/barrier/src/barrier-mem-types.h
new file mode 100644
index 00000000000..71ed7898d9c
--- /dev/null
+++ b/xlators/features/barrier/src/barrier-mem-types.h
@@ -0,0 +1,20 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BARRIER_MEM_TYPES_H__
+#define __BARRIER_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+
+enum gf_barrier_mem_types_ {
+ gf_barrier_mt_priv_t = gf_common_mt_end + 1,
+ gf_barrier_mt_end
+};
+#endif
diff --git a/xlators/features/barrier/src/barrier.c b/xlators/features/barrier/src/barrier.c
new file mode 100644
index 00000000000..852bbacb99d
--- /dev/null
+++ b/xlators/features/barrier/src/barrier.c
@@ -0,0 +1,809 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "barrier.h"
+#include <glusterfs/defaults.h>
+#include <glusterfs/call-stub.h>
+
+#include <glusterfs/statedump.h>
+
+void
+barrier_local_set_gfid(call_frame_t *frame, uuid_t gfid, xlator_t *this)
+{
+ if (gfid) {
+ uuid_t *id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!id) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Could not set gfid"
+ ". gfid will not be dumped in statedump file.");
+ return;
+ }
+ gf_uuid_copy(*id, gfid);
+ frame->local = id;
+ }
+}
+
+void
+barrier_local_free_gfid(call_frame_t *frame)
+{
+ if (frame->local) {
+ GF_FREE(frame->local);
+ frame->local = NULL;
+ }
+}
+
+int32_t
+barrier_truncate_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+barrier_ftruncate_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+barrier_unlink_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+ return 0;
+}
+
+int32_t
+barrier_rmdir_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(rmdir, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+ return 0;
+}
+
+int32_t
+barrier_rename_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent,
+ dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(rename, frame, op_ret, op_errno, buf, preoldparent,
+ postoldparent, prenewparent, postnewparent, xdata);
+ return 0;
+}
+
+int32_t
+barrier_writev_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+barrier_fsync_cbk_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(fsync, frame, op_ret, op_errno, prebuf, postbuf, xdata);
+ return 0;
+}
+
+int32_t
+barrier_removexattr_cbk_resume(call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret, int32_t op_errno,
+ dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(removexattr, frame, op_ret, op_errno, xdata);
+ return 0;
+}
+
+int32_t
+barrier_fremovexattr_cbk_resume(call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ barrier_local_free_gfid(frame);
+ STACK_UNWIND_STRICT(fremovexattr, frame, op_ret, op_errno, xdata);
+ return 0;
+}
+
+int32_t
+barrier_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(writev, out, frame, this, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_fremovexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(fremovexattr, out, frame, this, op_ret, op_errno, xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(removexattr, out, frame, this, op_ret, op_errno, xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(truncate, out, frame, this, op_ret, op_errno, prebuf,
+ postbuf, xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(ftruncate, out, frame, this, op_ret, op_errno, prebuf,
+ postbuf, xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent,
+ dict_t *xdata)
+{
+ BARRIER_FOP_CBK(rename, out, frame, this, op_ret, op_errno, buf,
+ preoldparent, postoldparent, prenewparent, postnewparent,
+ xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(rmdir, out, frame, this, op_ret, op_errno, preparent,
+ postparent, xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(unlink, out, frame, this, op_ret, op_errno, preparent,
+ postparent, xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ BARRIER_FOP_CBK(fsync, out, frame, this, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+out:
+ return 0;
+}
+
+int32_t
+barrier_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t off, uint32_t flags,
+ struct iobref *iobref, dict_t *xdata)
+{
+ if (!((flags | fd->flags) & (O_SYNC | O_DSYNC))) {
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, off,
+ flags, iobref, xdata);
+
+ return 0;
+ }
+
+ barrier_local_set_gfid(frame, fd->inode->gfid, this);
+ STACK_WIND(frame, barrier_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, off, flags,
+ iobref, xdata);
+ return 0;
+}
+
+int32_t
+barrier_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, fd->inode->gfid, this);
+ STACK_WIND(frame, barrier_fremovexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+ return 0;
+}
+
+int32_t
+barrier_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, loc->inode->gfid, this);
+ STACK_WIND(frame, barrier_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
+}
+
+int32_t
+barrier_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, loc->inode->gfid, this);
+ STACK_WIND(frame, barrier_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+}
+
+int32_t
+barrier_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc, dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, oldloc->inode->gfid, this);
+ STACK_WIND(frame, barrier_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+ return 0;
+}
+
+int
+barrier_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, loc->inode->gfid, this);
+ STACK_WIND(frame, barrier_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, loc, flags, xdata);
+ return 0;
+}
+
+int32_t
+barrier_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, loc->inode->gfid, this);
+ STACK_WIND(frame, barrier_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ return 0;
+}
+
+int32_t
+barrier_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, fd->inode->gfid, this);
+ STACK_WIND(frame, barrier_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+}
+
+int32_t
+barrier_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+ dict_t *xdata)
+{
+ barrier_local_set_gfid(frame, fd->inode->gfid, this);
+ STACK_WIND(frame, barrier_fsync_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsync, fd, flags, xdata);
+ return 0;
+}
+
+call_stub_t *
+__barrier_dequeue(xlator_t *this, struct list_head *queue)
+{
+ call_stub_t *stub = NULL;
+ barrier_priv_t *priv = NULL;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (list_empty(queue))
+ goto out;
+
+ stub = list_entry(queue->next, call_stub_t, list);
+ list_del_init(&stub->list);
+
+out:
+ return stub;
+}
+
+void
+barrier_dequeue_all(xlator_t *this, struct list_head *queue)
+{
+ call_stub_t *stub = NULL;
+
+ gf_log(this->name, GF_LOG_INFO, "Dequeuing all the barriered fops");
+
+ /* TODO: Start the below task in a new thread */
+ while ((stub = __barrier_dequeue(this, queue)))
+ call_resume(stub);
+
+ gf_log(this->name, GF_LOG_INFO,
+ "Dequeuing the barriered fops is "
+ "finished");
+ return;
+}
+
+void
+barrier_timeout(void *data)
+{
+ xlator_t *this = NULL;
+ barrier_priv_t *priv = NULL;
+ struct list_head queue = {
+ 0,
+ };
+
+ this = data;
+ THIS = this;
+ priv = this->private;
+
+ INIT_LIST_HEAD(&queue);
+
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "Disabling barrier because of "
+ "the barrier timeout.");
+
+ LOCK(&priv->lock);
+ {
+ __barrier_disable(this, &queue);
+ }
+ UNLOCK(&priv->lock);
+
+ barrier_dequeue_all(this, &queue);
+
+ return;
+}
+
+void
+__barrier_enqueue(xlator_t *this, call_stub_t *stub)
+{
+ barrier_priv_t *priv = NULL;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ list_add_tail(&stub->list, &priv->queue);
+ priv->queue_size++;
+
+ return;
+}
+
+void
+__barrier_disable(xlator_t *this, struct list_head *queue)
+{
+ GF_UNUSED int ret = 0;
+ barrier_priv_t *priv = NULL;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (priv->timer) {
+ ret = gf_timer_call_cancel(this->ctx, priv->timer);
+ priv->timer = NULL;
+ }
+
+ list_splice_init(&priv->queue, queue);
+ priv->queue_size = 0;
+ priv->barrier_enabled = _gf_false;
+}
+
+int
+__barrier_enable(xlator_t *this, barrier_priv_t *priv)
+{
+ int ret = -1;
+
+ priv->timer = gf_timer_call_after(this->ctx, priv->timeout, barrier_timeout,
+ (void *)this);
+ if (!priv->timer) {
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "Couldn't add barrier "
+ "timeout event.");
+ goto out;
+ }
+
+ priv->barrier_enabled = _gf_true;
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+notify(xlator_t *this, int event, void *data, ...)
+{
+ barrier_priv_t *priv = this->private;
+ dict_t *dict = NULL;
+ int ret = -1;
+ int barrier_enabled = _gf_false;
+ struct list_head queue = {
+ 0,
+ };
+
+ GF_ASSERT(priv);
+ INIT_LIST_HEAD(&queue);
+
+ switch (event) {
+ case GF_EVENT_TRANSLATOR_OP: {
+ dict = data;
+ barrier_enabled = dict_get_str_boolean(dict, "barrier", -1);
+
+ if (barrier_enabled == -1) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Could not fetch "
+ " barrier key from the dictionary.");
+ goto out;
+ }
+
+ LOCK(&priv->lock);
+ {
+ if (!priv->barrier_enabled) {
+ if (barrier_enabled) {
+ ret = __barrier_enable(this, priv);
+ } else {
+ UNLOCK(&priv->lock);
+ gf_log(this->name, GF_LOG_ERROR, "Already disabled.");
+ goto post_unlock;
+ }
+ } else {
+ if (!barrier_enabled) {
+ __barrier_disable(this, &queue);
+ ret = 0;
+ } else {
+ UNLOCK(&priv->lock);
+ gf_log(this->name, GF_LOG_ERROR, "Already enabled");
+ goto post_unlock;
+ }
+ }
+ }
+ UNLOCK(&priv->lock);
+ post_unlock:
+ if (!list_empty(&queue))
+ barrier_dequeue_all(this, &queue);
+
+ break;
+ }
+ default: {
+ default_notify(this, event, data);
+ ret = 0;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ barrier_priv_t *priv = NULL;
+ int ret = -1;
+ gf_boolean_t barrier_enabled = _gf_false;
+ uint32_t timeout = {
+ 0,
+ };
+ struct list_head queue = {
+ 0,
+ };
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ GF_OPTION_RECONF("barrier", barrier_enabled, options, bool, out);
+ GF_OPTION_RECONF("barrier-timeout", timeout, options, time, out);
+
+ INIT_LIST_HEAD(&queue);
+
+ LOCK(&priv->lock);
+ {
+ if (!priv->barrier_enabled) {
+ if (barrier_enabled) {
+ ret = __barrier_enable(this, priv);
+ if (ret) {
+ goto unlock;
+ }
+ }
+ } else {
+ if (!barrier_enabled) {
+ __barrier_disable(this, &queue);
+ }
+ }
+ priv->timeout.tv_sec = timeout;
+ ret = 0;
+ }
+unlock:
+ UNLOCK(&priv->lock);
+
+ if (!list_empty(&queue))
+ barrier_dequeue_all(this, &queue);
+
+out:
+ return ret;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ ret = xlator_mem_acct_init(this, gf_barrier_mt_end + 1);
+ if (ret)
+ gf_log(this->name, GF_LOG_ERROR,
+ "Memory accounting "
+ "initialization failed.");
+
+ return ret;
+}
+
+int
+init(xlator_t *this)
+{
+ int ret = -1;
+ barrier_priv_t *priv = NULL;
+ uint32_t timeout = {
+ 0,
+ };
+
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "'barrier' not configured with exactly one child");
+ goto out;
+ }
+
+ if (!this->parents)
+ gf_log(this->name, GF_LOG_WARNING, "dangling volume. check volfile ");
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_barrier_mt_priv_t);
+ if (!priv)
+ goto out;
+
+ LOCK_INIT(&priv->lock);
+
+ GF_OPTION_INIT("barrier", priv->barrier_enabled, bool, out);
+ GF_OPTION_INIT("barrier-timeout", timeout, time, out);
+ priv->timeout.tv_sec = timeout;
+
+ INIT_LIST_HEAD(&priv->queue);
+
+ if (priv->barrier_enabled) {
+ ret = __barrier_enable(this, priv);
+ if (ret == -1)
+ goto out;
+ }
+
+ this->private = priv;
+ ret = 0;
+out:
+ if (ret && priv)
+ GF_FREE(priv);
+
+ return ret;
+}
+
+void
+fini(xlator_t *this)
+{
+ barrier_priv_t *priv = NULL;
+ struct list_head queue = {
+ 0,
+ };
+
+ priv = this->private;
+ if (!priv)
+ goto out;
+
+ INIT_LIST_HEAD(&queue);
+
+ gf_log(this->name, GF_LOG_INFO,
+ "Disabling barriering and dequeuing "
+ "all the queued fops");
+ LOCK(&priv->lock);
+ {
+ __barrier_disable(this, &queue);
+ }
+ UNLOCK(&priv->lock);
+
+ if (!list_empty(&queue))
+ barrier_dequeue_all(this, &queue);
+
+ this->private = NULL;
+
+ LOCK_DESTROY(&priv->lock);
+ GF_FREE(priv);
+out:
+ return;
+}
+
+static void
+barrier_dump_stub(call_stub_t *stub, char *prefix)
+{
+ char key[GF_DUMP_MAX_BUF_LEN] = {
+ 0,
+ };
+
+ gf_proc_dump_build_key(key, prefix, "fop");
+ gf_proc_dump_write(key, "%s", gf_fop_list[stub->fop]);
+
+ if (stub->frame->local) {
+ gf_proc_dump_build_key(key, prefix, "gfid");
+ gf_proc_dump_write(key, "%s",
+ uuid_utoa(*(uuid_t *)(stub->frame->local)));
+ }
+ if (stub->args.loc.path) {
+ gf_proc_dump_build_key(key, prefix, "path");
+ gf_proc_dump_write(key, "%s", stub->args.loc.path);
+ }
+ if (stub->args.loc.name) {
+ gf_proc_dump_build_key(key, prefix, "name");
+ gf_proc_dump_write(key, "%s", stub->args.loc.name);
+ }
+
+ return;
+}
+
+static void
+__barrier_dump_queue(barrier_priv_t *priv)
+{
+ call_stub_t *stub = NULL;
+ char key[GF_DUMP_MAX_BUF_LEN] = {
+ 0,
+ };
+ int i = 0;
+
+ GF_VALIDATE_OR_GOTO("barrier", priv, out);
+
+ list_for_each_entry(stub, &priv->queue, list)
+ {
+ snprintf(key, sizeof(key), "stub.%d", i++);
+ gf_proc_dump_add_section("%s", key);
+ barrier_dump_stub(stub, key);
+ }
+
+out:
+ return;
+}
+
+int
+barrier_dump_priv(xlator_t *this)
+{
+ int ret = -1;
+ char key[GF_DUMP_MAX_BUF_LEN] = {
+ 0,
+ };
+ barrier_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("barrier", this, out);
+
+ priv = this->private;
+ if (!priv)
+ return 0;
+
+ gf_proc_dump_build_key(key, "xlator.features.barrier", "priv");
+ gf_proc_dump_add_section("%s", key);
+ gf_proc_dump_build_key(key, "barrier", "enabled");
+
+ LOCK(&priv->lock);
+ {
+ gf_proc_dump_write(key, "%d", priv->barrier_enabled);
+ gf_proc_dump_build_key(key, "barrier", "timeout");
+ gf_proc_dump_write(key, "%ld", priv->timeout.tv_sec);
+ if (priv->barrier_enabled) {
+ gf_proc_dump_build_key(key, "barrier", "queue_size");
+ gf_proc_dump_write(key, "%d", priv->queue_size);
+ __barrier_dump_queue(priv);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+out:
+ return ret;
+}
+
+struct xlator_fops fops = {
+
+ /* Barrier Class fops */
+ .rmdir = barrier_rmdir,
+ .unlink = barrier_unlink,
+ .rename = barrier_rename,
+ .removexattr = barrier_removexattr,
+ .fremovexattr = barrier_fremovexattr,
+ .truncate = barrier_truncate,
+ .ftruncate = barrier_ftruncate,
+ .fsync = barrier_fsync,
+
+ /* Writes with only O_SYNC flag */
+ .writev = barrier_writev,
+};
+
+struct xlator_dumpops dumpops = {
+ .priv = barrier_dump_priv,
+};
+
+struct xlator_cbks cbks;
+
+struct volume_options options[] = {
+ {.key = {"barrier"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "disable",
+ .op_version = {GD_OP_VERSION_3_6_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "When \"enabled\", blocks acknowledgements to application "
+ "for file operations such as rmdir, rename, unlink, "
+ "removexattr, fremovexattr, truncate, ftruncate, "
+ "write (with O_SYNC), fsync. It is turned \"off\" by "
+ "default."},
+ {.key = {"barrier-timeout"},
+ .type = GF_OPTION_TYPE_TIME,
+ .default_value = BARRIER_TIMEOUT,
+ .op_version = {GD_OP_VERSION_3_6_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "After 'timeout' seconds since the time 'barrier' "
+ "option was set to \"on\", acknowledgements to file "
+ "operations are no longer blocked and previously "
+ "blocked acknowledgements are sent to the application"},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .notify = notify,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .dumpops = &dumpops,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "barrier",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/barrier/src/barrier.h b/xlators/features/barrier/src/barrier.h
new file mode 100644
index 00000000000..1337f311f7d
--- /dev/null
+++ b/xlators/features/barrier/src/barrier.h
@@ -0,0 +1,89 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BARRIER_H__
+#define __BARRIER_H__
+
+#include "barrier-mem-types.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/timer.h>
+#include <glusterfs/call-stub.h>
+
+#define BARRIER_FOP_CBK(fop_name, label, frame, this, params...) \
+ do { \
+ barrier_priv_t *_priv = NULL; \
+ call_stub_t *_stub = NULL; \
+ gf_boolean_t _barrier_enabled = _gf_false; \
+ struct list_head queue = { \
+ 0, \
+ }; \
+ \
+ INIT_LIST_HEAD(&queue); \
+ \
+ _priv = this->private; \
+ GF_ASSERT(_priv); \
+ \
+ LOCK(&_priv->lock); \
+ { \
+ if (_priv->barrier_enabled) { \
+ _barrier_enabled = _priv->barrier_enabled; \
+ \
+ _stub = fop_##fop_name##_cbk_stub( \
+ frame, barrier_##fop_name##_cbk_resume, params); \
+ if (!_stub) { \
+ __barrier_disable(this, &queue); \
+ goto unlock; \
+ } \
+ \
+ __barrier_enqueue(this, _stub); \
+ } \
+ } \
+ unlock: \
+ UNLOCK(&_priv->lock); \
+ \
+ if (_stub) \
+ goto label; \
+ \
+ if (_barrier_enabled && !_stub) { \
+ gf_log(this->name, GF_LOG_CRITICAL, \
+ "Failed to barrier FOPs, disabling " \
+ "barrier. FOP: %s, ERROR: %s", \
+ #fop_name, strerror(ENOMEM)); \
+ barrier_dequeue_all(this, &queue); \
+ } \
+ barrier_local_free_gfid(frame); \
+ STACK_UNWIND_STRICT(fop_name, frame, params); \
+ goto label; \
+ } while (0)
+
+typedef struct {
+ gf_timer_t *timer;
+ gf_lock_t lock;
+ struct list_head queue;
+ struct timespec timeout;
+ uint32_t queue_size;
+ gf_boolean_t barrier_enabled;
+ char _pad[3]; /* manual padding */
+} barrier_priv_t;
+
+int
+__barrier_enable(xlator_t *this, barrier_priv_t *priv);
+void
+__barrier_enqueue(xlator_t *this, call_stub_t *stub);
+void
+__barrier_disable(xlator_t *this, struct list_head *queue);
+void
+barrier_timeout(void *data);
+void
+barrier_dequeue_all(xlator_t *this, struct list_head *queue);
+call_stub_t *
+__barrier_dequeue(xlator_t *this, struct list_head *queue);
+
+#endif
diff --git a/xlators/features/bit-rot/Makefile.am b/xlators/features/bit-rot/Makefile.am
new file mode 100644
index 00000000000..f963effea22
--- /dev/null
+++ b/xlators/features/bit-rot/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = src \ No newline at end of file
diff --git a/xlators/features/bit-rot/src/Makefile.am b/xlators/features/bit-rot/src/Makefile.am
new file mode 100644
index 00000000000..b5e4a7d62a0
--- /dev/null
+++ b/xlators/features/bit-rot/src/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = stub bitd
diff --git a/xlators/features/bit-rot/src/bitd/Makefile.am b/xlators/features/bit-rot/src/bitd/Makefile.am
new file mode 100644
index 00000000000..6db800e6565
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/Makefile.am
@@ -0,0 +1,23 @@
+if WITH_SERVER
+xlator_LTLIBRARIES = bit-rot.la
+endif
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+bit_rot_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src/ -I$(top_builddir)/rpc/xdr/src/ \
+ -I$(top_srcdir)/rpc/rpc-lib/src -I$(CONTRIBDIR)/timer-wheel \
+ -I$(top_srcdir)/xlators/features/bit-rot/src/stub
+
+bit_rot_la_SOURCES = bit-rot.c bit-rot-scrub.c bit-rot-ssm.c \
+ bit-rot-scrub-status.c
+bit_rot_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/xlators/features/changelog/lib/src/libgfchangelog.la
+
+noinst_HEADERS = bit-rot.h bit-rot-scrub.h bit-rot-bitd-messages.h bit-rot-ssm.h \
+ bit-rot-scrub-status.h
+
+AM_CFLAGS = -Wall -DBR_RATE_LIMIT_SIGNER $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h b/xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h
new file mode 100644
index 00000000000..5bc5103a27c
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h
@@ -0,0 +1,101 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _BITROT_BITD_MESSAGES_H_
+#define _BITROT_BITD_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(BITROT_BITD, BRB_MSG_FD_CREATE_FAILED, BRB_MSG_READV_FAILED,
+ BRB_MSG_BLOCK_READ_FAILED, BRB_MSG_CALC_CHECKSUM_FAILED,
+ BRB_MSG_NO_MEMORY, BRB_MSG_GET_SIGN_FAILED, BRB_MSG_SET_SIGN_FAILED,
+ BRB_MSG_OP_FAILED, BRB_MSG_READ_AND_SIGN_FAILED, BRB_MSG_SIGN_FAILED,
+ BRB_MSG_GET_SUBVOL_FAILED, BRB_MSG_SET_TIMER_FAILED,
+ BRB_MSG_GET_INFO_FAILED, BRB_MSG_PATH_FAILED, BRB_MSG_MARK_BAD_FILE,
+ BRB_MSG_TRIGGER_SIGN, BRB_MSG_REGISTER_FAILED,
+ BRB_MSG_CRAWLING_START, BRB_MSG_SPAWN_FAILED,
+ BRB_MSG_INVALID_SUBVOL_CHILD, BRB_MSG_SKIP_OBJECT, BRB_MSG_NO_CHILD,
+ BRB_MSG_CHECKSUM_MISMATCH, BRB_MSG_MARK_CORRUPTED,
+ BRB_MSG_CRAWLING_FINISH, BRB_MSG_CALC_ERROR, BRB_MSG_LOOKUP_FAILED,
+ BRB_MSG_PARTIAL_VERSION_PRESENCE, BRB_MSG_MEM_ACNT_FAILED,
+ BRB_MSG_TIMER_WHEEL_UNAVAILABLE, BRB_MSG_BITROT_LOADED,
+ BRB_MSG_SCALE_DOWN_FAILED, BRB_MSG_SCALE_UP_FAILED,
+ BRB_MSG_SCALE_DOWN_SCRUBBER, BRB_MSG_SCALING_UP_SCRUBBER,
+ BRB_MSG_UNKNOWN_THROTTLE, BRB_MSG_RATE_LIMIT_INFO,
+ BRB_MSG_SCRUB_INFO, BRB_MSG_CONNECTED_TO_BRICK, BRB_MSG_BRICK_INFO,
+ BRB_MSG_SUBVOL_CONNECT_FAILED, BRB_MSG_INVALID_SUBVOL,
+ BRB_MSG_RESCHEDULE_SCRUBBER_FAILED, BRB_MSG_SCRUB_START,
+ BRB_MSG_SCRUB_FINISH, BRB_MSG_SCRUB_RUNNING,
+ BRB_MSG_SCRUB_RESCHEDULED, BRB_MSG_SCRUB_TUNABLE,
+ BRB_MSG_SCRUB_THREAD_CLEANUP, BRB_MSG_SCRUBBER_CLEANED,
+ BRB_MSG_GENERIC_SSM_INFO, BRB_MSG_ZERO_TIMEOUT_BUG,
+ BRB_MSG_BAD_OBJ_READDIR_FAIL, BRB_MSG_SSM_FAILED,
+ BRB_MSG_SCRUB_WAIT_FAILED, BRB_MSG_TRIGGER_SIGN_FAILED,
+ BRB_MSG_EVENT_UNHANDLED, BRB_MSG_COULD_NOT_SCHEDULE_SCRUB,
+ BRB_MSG_THREAD_CREATION_FAILED, BRB_MSG_MEM_POOL_ALLOC,
+ BRB_MSG_SAVING_HASH_FAILED);
+
+#define BRB_MSG_FD_CREATE_FAILED_STR "failed to create fd for the inode"
+#define BRB_MSG_READV_FAILED_STR "readv failed"
+#define BRB_MSG_BLOCK_READ_FAILED_STR "reading block failed"
+#define BRB_MSG_NO_MEMORY_STR "failed to allocate memory"
+#define BRB_MSG_CALC_CHECKSUM_FAILED_STR "calculating checksum failed"
+#define BRB_MSG_GET_SIGN_FAILED_STR "failed to get the signature"
+#define BRB_MSG_SET_SIGN_FAILED_STR "signing failed"
+#define BRB_MSG_OP_FAILED_STR "failed on object"
+#define BRB_MSG_TRIGGER_SIGN_FAILED_STR "Could not trigger signing"
+#define BRB_MSG_READ_AND_SIGN_FAILED_STR "reading and signing of object failed"
+#define BRB_MSG_SET_TIMER_FAILED_STR "Failed to allocate object expiry timer"
+#define BRB_MSG_GET_SUBVOL_FAILED_STR \
+ "failed to get the subvolume for the brick"
+#define BRB_MSG_PATH_FAILED_STR "path failed"
+#define BRB_MSG_SKIP_OBJECT_STR "Entry is marked corrupted. skipping"
+#define BRB_MSG_PARTIAL_VERSION_PRESENCE_STR \
+ "PArtial version xattr presence detected, ignoring"
+#define BRB_MSG_TRIGGER_SIGN_STR "Triggering signing"
+#define BRB_MSG_CRAWLING_START_STR \
+ "Crawling brick, scanning for unsigned objects"
+#define BRB_MSG_CRAWLING_FINISH_STR "Completed crawling brick"
+#define BRB_MSG_REGISTER_FAILED_STR "Register to changelog failed"
+#define BRB_MSG_SPAWN_FAILED_STR "failed to spawn"
+#define BRB_MSG_CONNECTED_TO_BRICK_STR "Connected to brick"
+#define BRB_MSG_LOOKUP_FAILED_STR "lookup on root failed"
+#define BRB_MSG_GET_INFO_FAILED_STR "failed to get stub info"
+#define BRB_MSG_SCRUB_THREAD_CLEANUP_STR "Error cleaning up scanner thread"
+#define BRB_MSG_SCRUBBER_CLEANED_STR "clened up scrubber for brick"
+#define BRB_MSG_SUBVOL_CONNECT_FAILED_STR \
+ "callback handler for subvolume failed"
+#define BRB_MSG_MEM_ACNT_FAILED_STR "Memory accounting init failed"
+#define BRB_MSG_EVENT_UNHANDLED_STR "Event unhandled for child"
+#define BRB_MSG_INVALID_SUBVOL_STR "Got event from invalid subvolume"
+#define BRB_MSG_RESCHEDULE_SCRUBBER_FAILED_STR \
+ "on demand scrub schedule failed. Scrubber is not in pending state."
+#define BRB_MSG_COULD_NOT_SCHEDULE_SCRUB_STR \
+ "Could not schedule ondemand scrubbing. Scrubbing will continue " \
+ "according to old frequency."
+#define BRB_MSG_THREAD_CREATION_FAILED_STR "thread creation failed"
+#define BRB_MSG_RATE_LIMIT_INFO_STR "Rate Limit Info"
+#define BRB_MSG_MEM_POOL_ALLOC_STR "failed to allocate mem-pool for timer"
+#define BRB_MSG_NO_CHILD_STR "FATAL: no children"
+#define BRB_MSG_TIMER_WHEEL_UNAVAILABLE_STR "global timer wheel unavailable"
+#define BRB_MSG_BITROT_LOADED_STR "bit-rot xlator loaded"
+#define BRB_MSG_SAVING_HASH_FAILED_STR \
+ "failed to allocate memory for saving hash of the object"
+#endif /* !_BITROT_BITD_MESSAGES_H_ */
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c b/xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c
new file mode 100644
index 00000000000..5cef2ffa5e5
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c
@@ -0,0 +1,78 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <string.h>
+#include <stdio.h>
+
+#include "bit-rot-scrub-status.h"
+
+void
+br_inc_unsigned_file_count(br_scrub_stats_t *scrub_stat)
+{
+ if (!scrub_stat)
+ return;
+
+ pthread_mutex_lock(&scrub_stat->lock);
+ {
+ scrub_stat->unsigned_files++;
+ }
+ pthread_mutex_unlock(&scrub_stat->lock);
+}
+
+void
+br_inc_scrubbed_file(br_scrub_stats_t *scrub_stat)
+{
+ if (!scrub_stat)
+ return;
+
+ pthread_mutex_lock(&scrub_stat->lock);
+ {
+ scrub_stat->scrubbed_files++;
+ }
+ pthread_mutex_unlock(&scrub_stat->lock);
+}
+
+void
+br_update_scrub_start_time(br_scrub_stats_t *scrub_stat, time_t time)
+{
+ if (!scrub_stat)
+ return;
+
+ pthread_mutex_lock(&scrub_stat->lock);
+ {
+ scrub_stat->scrub_start_time = time;
+ }
+ pthread_mutex_unlock(&scrub_stat->lock);
+}
+
+void
+br_update_scrub_finish_time(br_scrub_stats_t *scrub_stat, char *timestr,
+ time_t time)
+{
+ int lst_size = 0;
+
+ if (!scrub_stat)
+ return;
+
+ lst_size = sizeof(scrub_stat->last_scrub_time);
+ if (strlen(timestr) >= lst_size)
+ return;
+
+ pthread_mutex_lock(&scrub_stat->lock);
+ {
+ scrub_stat->scrub_end_time = time;
+
+ scrub_stat->scrub_duration = scrub_stat->scrub_end_time -
+ scrub_stat->scrub_start_time;
+
+ snprintf(scrub_stat->last_scrub_time, lst_size, "%s", timestr);
+ }
+ pthread_mutex_unlock(&scrub_stat->lock);
+}
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h b/xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h
new file mode 100644
index 00000000000..f022aa831eb
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h
@@ -0,0 +1,50 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BIT_ROT_SCRUB_STATUS_H__
+#define __BIT_ROT_SCRUB_STATUS_H__
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+#include <glusterfs/common-utils.h>
+
+struct br_scrub_stats {
+ uint64_t scrubbed_files; /* Total number of scrubbed files. */
+
+ uint64_t unsigned_files; /* Total number of unsigned files. */
+
+ uint64_t scrub_duration; /* Duration of last scrub. */
+
+ char last_scrub_time[GF_TIMESTR_SIZE]; /* Last scrub completion time. */
+
+ time_t scrub_start_time; /* Scrubbing starting time. */
+
+ time_t scrub_end_time; /* Scrubbing finishing time. */
+
+ int8_t scrub_running; /* Whether scrub running or not. */
+
+ pthread_mutex_t lock;
+};
+
+typedef struct br_scrub_stats br_scrub_stats_t;
+
+void
+br_inc_unsigned_file_count(br_scrub_stats_t *scrub_stat);
+void
+br_inc_scrubbed_file(br_scrub_stats_t *scrub_stat);
+void
+br_update_scrub_start_time(br_scrub_stats_t *scrub_stat, time_t time);
+void
+br_update_scrub_finish_time(br_scrub_stats_t *scrub_stat, char *timestr,
+ time_t time);
+
+#endif /* __BIT_ROT_SCRUB_STATUS_H__ */
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c b/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c
new file mode 100644
index 00000000000..289dd53f610
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-scrub.c
@@ -0,0 +1,2070 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <math.h>
+#include <ctype.h>
+#include <sys/uio.h>
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/common-utils.h>
+
+#include "bit-rot-scrub.h"
+#include <pthread.h>
+#include "bit-rot-bitd-messages.h"
+#include "bit-rot-scrub-status.h"
+#include <glusterfs/events.h>
+
+struct br_scrubbers {
+ pthread_t scrubthread;
+
+ struct list_head list;
+};
+
+struct br_fsscan_entry {
+ void *data;
+
+ loc_t parent;
+
+ gf_dirent_t *entry;
+
+ struct br_scanfs *fsscan; /* backpointer to subvolume scanner */
+
+ struct list_head list;
+};
+
+/**
+ * fetch signature extended attribute from an object's fd.
+ * NOTE: On success @xattr is not unref'd as @sign points
+ * to the dictionary value.
+ */
+static int32_t
+bitd_fetch_signature(xlator_t *this, br_child_t *child, fd_t *fd,
+ dict_t **xattr, br_isignature_out_t **sign)
+{
+ int32_t ret = -1;
+
+ ret = syncop_fgetxattr(child->xl, fd, xattr, GLUSTERFS_GET_OBJECT_SIGNATURE,
+ NULL, NULL);
+ if (ret < 0) {
+ br_log_object(this, "fgetxattr", fd->inode->gfid, -ret);
+ goto out;
+ }
+
+ ret = dict_get_ptr(*xattr, GLUSTERFS_GET_OBJECT_SIGNATURE, (void **)sign);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_GET_SIGN_FAILED,
+ "failed to extract signature info [GFID: %s]",
+ uuid_utoa(fd->inode->gfid));
+ goto unref_dict;
+ }
+
+ return 0;
+
+unref_dict:
+ dict_unref(*xattr);
+out:
+ return -1;
+}
+
+/**
+ * POST COMPUTE CHECK
+ *
+ * Checks to be performed before verifying calculated signature
+ * Object is skipped if:
+ * - has stale signature
+ * - mismatches versions caches in pre-compute check
+ */
+
+int32_t
+bitd_scrub_post_compute_check(xlator_t *this, br_child_t *child, fd_t *fd,
+ unsigned long version,
+ br_isignature_out_t **signature,
+ br_scrub_stats_t *scrub_stat,
+ gf_boolean_t skip_stat)
+{
+ int32_t ret = 0;
+ size_t signlen = 0;
+ dict_t *xattr = NULL;
+ br_isignature_out_t *signptr = NULL;
+
+ ret = bitd_fetch_signature(this, child, fd, &xattr, &signptr);
+ if (ret < 0) {
+ if (!skip_stat)
+ br_inc_unsigned_file_count(scrub_stat);
+ goto out;
+ }
+
+ /**
+ * Either the object got dirtied during the time the signature was
+ * calculated OR the version we saved during pre-compute check does
+ * not match now, implying that the object got dirtied and signed in
+ * between scrubs pre & post compute checks (checksum window).
+ *
+ * The log entry looks pretty ugly, but helps in debugging..
+ */
+ if (signptr->stale || (signptr->version != version)) {
+ if (!skip_stat)
+ br_inc_unsigned_file_count(scrub_stat);
+ gf_msg_debug(this->name, 0,
+ "<STAGE: POST> Object [GFID: %s] "
+ "either has a stale signature OR underwent "
+ "signing during checksumming {Stale: %d | "
+ "Version: %lu,%lu}",
+ uuid_utoa(fd->inode->gfid), (signptr->stale) ? 1 : 0,
+ version, signptr->version);
+ ret = -1;
+ goto unref_dict;
+ }
+
+ signlen = signptr->signaturelen;
+ *signature = GF_MALLOC(sizeof(br_isignature_out_t) + signlen,
+ gf_common_mt_char);
+
+ (void)memcpy(*signature, signptr, sizeof(br_isignature_out_t) + signlen);
+
+ (*signature)->signaturelen = signlen;
+
+unref_dict:
+ dict_unref(xattr);
+out:
+ return ret;
+}
+
+static int32_t
+bitd_signature_staleness(xlator_t *this, br_child_t *child, fd_t *fd,
+ int *stale, unsigned long *version,
+ br_scrub_stats_t *scrub_stat, gf_boolean_t skip_stat)
+{
+ int32_t ret = -1;
+ dict_t *xattr = NULL;
+ br_isignature_out_t *signptr = NULL;
+
+ ret = bitd_fetch_signature(this, child, fd, &xattr, &signptr);
+ if (ret < 0) {
+ if (!skip_stat)
+ br_inc_unsigned_file_count(scrub_stat);
+ goto out;
+ }
+
+ /**
+ * save version for validation in post compute stage
+ * c.f. bitd_scrub_post_compute_check()
+ */
+ *stale = signptr->stale ? 1 : 0;
+ *version = signptr->version;
+
+ dict_unref(xattr);
+
+out:
+ return ret;
+}
+
+/**
+ * PRE COMPUTE CHECK
+ *
+ * Checks to be performed before initiating object signature calculation.
+ * An object is skipped if:
+ * - it's already marked corrupted
+ * - has stale signature
+ */
+int32_t
+bitd_scrub_pre_compute_check(xlator_t *this, br_child_t *child, fd_t *fd,
+ unsigned long *version,
+ br_scrub_stats_t *scrub_stat,
+ gf_boolean_t skip_stat)
+{
+ int stale = 0;
+ int32_t ret = -1;
+
+ if (bitd_is_bad_file(this, child, NULL, fd)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, BRB_MSG_SKIP_OBJECT,
+ "Object [GFID: %s] is marked corrupted, skipping..",
+ uuid_utoa(fd->inode->gfid));
+ goto out;
+ }
+
+ ret = bitd_signature_staleness(this, child, fd, &stale, version, scrub_stat,
+ skip_stat);
+ if (!ret && stale) {
+ if (!skip_stat)
+ br_inc_unsigned_file_count(scrub_stat);
+ gf_msg_debug(this->name, 0,
+ "<STAGE: PRE> Object [GFID: %s] "
+ "has stale signature",
+ uuid_utoa(fd->inode->gfid));
+ ret = -1;
+ }
+
+out:
+ return ret;
+}
+
+/* static int */
+int
+bitd_compare_ckum(xlator_t *this, br_isignature_out_t *sign, unsigned char *md,
+ inode_t *linked_inode, gf_dirent_t *entry, fd_t *fd,
+ br_child_t *child, loc_t *loc)
+{
+ int ret = -1;
+ dict_t *xattr = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, sign, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+ GF_VALIDATE_OR_GOTO(this->name, linked_inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, md, out);
+ GF_VALIDATE_OR_GOTO(this->name, entry, out);
+
+ if (strncmp(sign->signature, (char *)md, sign->signaturelen) == 0) {
+ gf_msg_debug(this->name, 0,
+ "%s [GFID: %s | Brick: %s] "
+ "matches calculated checksum",
+ loc->path, uuid_utoa(linked_inode->gfid),
+ child->brick_path);
+ return 0;
+ }
+
+ gf_msg(this->name, GF_LOG_DEBUG, 0, BRB_MSG_CHECKSUM_MISMATCH,
+ "Object checksum mismatch: %s [GFID: %s | Brick: %s]", loc->path,
+ uuid_utoa(linked_inode->gfid), child->brick_path);
+ gf_msg(this->name, GF_LOG_ALERT, 0, BRB_MSG_CHECKSUM_MISMATCH,
+ "CORRUPTION DETECTED: Object %s {Brick: %s | GFID: %s}", loc->path,
+ child->brick_path, uuid_utoa(linked_inode->gfid));
+
+ /* Perform bad-file marking */
+ xattr = dict_new();
+ if (!xattr) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_int32(xattr, BITROT_OBJECT_BAD_KEY, _gf_true);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_MARK_BAD_FILE,
+ "Error setting bad-file marker for %s [GFID: %s | "
+ "Brick: %s]",
+ loc->path, uuid_utoa(linked_inode->gfid), child->brick_path);
+ goto dictfree;
+ }
+
+ gf_msg(this->name, GF_LOG_ALERT, 0, BRB_MSG_MARK_CORRUPTED,
+ "Marking"
+ " %s [GFID: %s | Brick: %s] as corrupted..",
+ loc->path, uuid_utoa(linked_inode->gfid), child->brick_path);
+ gf_event(EVENT_BITROT_BAD_FILE, "gfid=%s;path=%s;brick=%s",
+ uuid_utoa(linked_inode->gfid), loc->path, child->brick_path);
+ ret = syncop_fsetxattr(child->xl, fd, xattr, 0, NULL, NULL);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_MARK_BAD_FILE,
+ "Error marking object %s [GFID: %s] as corrupted", loc->path,
+ uuid_utoa(linked_inode->gfid));
+
+dictfree:
+ dict_unref(xattr);
+out:
+ return ret;
+}
+
+/**
+ * "The Scrubber"
+ *
+ * Perform signature validation for a given object with the assumption
+ * that the signature is SHA256 (because signer as of now _always_
+ * signs with SHA256).
+ */
+int
+br_scrubber_scrub_begin(xlator_t *this, struct br_fsscan_entry *fsentry)
+{
+ int32_t ret = -1;
+ fd_t *fd = NULL;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt iatt = {
+ 0,
+ };
+ struct iatt parent_buf = {
+ 0,
+ };
+ pid_t pid = 0;
+ br_child_t *child = NULL;
+ unsigned char *md = NULL;
+ inode_t *linked_inode = NULL;
+ br_isignature_out_t *sign = NULL;
+ unsigned long signedversion = 0;
+ gf_dirent_t *entry = NULL;
+ br_private_t *priv = NULL;
+ loc_t *parent = NULL;
+ gf_boolean_t skip_stat = _gf_false;
+ uuid_t shard_root_gfid = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("bit-rot", fsentry, out);
+
+ entry = fsentry->entry;
+ parent = &fsentry->parent;
+ child = fsentry->data;
+
+ priv = this->private;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", entry, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", parent, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", child, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", priv, out);
+
+ pid = GF_CLIENT_PID_SCRUB;
+
+ ret = br_prepare_loc(this, child, parent, entry, &loc);
+ if (!ret)
+ goto out;
+
+ syncopctx_setfspid(&pid);
+
+ ret = syncop_lookup(child->xl, &loc, &iatt, &parent_buf, NULL, NULL);
+ if (ret) {
+ br_log_object_path(this, "lookup", loc.path, -ret);
+ goto out;
+ }
+
+ linked_inode = inode_link(loc.inode, parent->inode, loc.name, &iatt);
+ if (linked_inode)
+ inode_lookup(linked_inode);
+
+ gf_msg_debug(this->name, 0, "Scrubbing object %s [GFID: %s]", entry->d_name,
+ uuid_utoa(linked_inode->gfid));
+
+ if (iatt.ia_type != IA_IFREG) {
+ gf_msg_debug(this->name, 0, "%s is not a regular file", entry->d_name);
+ ret = 0;
+ goto unref_inode;
+ }
+
+ if (IS_DHT_LINKFILE_MODE((&iatt))) {
+ gf_msg_debug(this->name, 0, "%s is a dht sticky bit file",
+ entry->d_name);
+ ret = 0;
+ goto unref_inode;
+ }
+
+ /* skip updating scrub statistics for shard entries */
+ gf_uuid_parse(SHARD_ROOT_GFID, shard_root_gfid);
+ if (gf_uuid_compare(loc.pargfid, shard_root_gfid) == 0)
+ skip_stat = _gf_true;
+
+ /**
+ * open() an fd for subsequent operations
+ */
+ fd = fd_create(linked_inode, 0);
+ if (!fd) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_FD_CREATE_FAILED,
+ "failed to create fd for inode %s",
+ uuid_utoa(linked_inode->gfid));
+ goto unref_inode;
+ }
+
+ ret = syncop_open(child->xl, &loc, O_RDWR, fd, NULL, NULL);
+ if (ret) {
+ br_log_object(this, "open", linked_inode->gfid, -ret);
+ ret = -1;
+ goto unrefd;
+ }
+
+ fd_bind(fd);
+
+ /**
+ * perform pre compute checks before initiating checksum
+ * computation
+ * - presence of bad object
+ * - signature staleness
+ */
+ ret = bitd_scrub_pre_compute_check(this, child, fd, &signedversion,
+ &priv->scrub_stat, skip_stat);
+ if (ret)
+ goto unrefd; /* skip this object */
+
+ /* if all's good, proceed to calculate the hash */
+ md = GF_MALLOC(SHA256_DIGEST_LENGTH, gf_common_mt_char);
+ if (!md)
+ goto unrefd;
+
+ ret = br_calculate_obj_checksum(md, child, fd, &iatt);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_CALC_ERROR,
+ "error calculating hash for object [GFID: %s]",
+ uuid_utoa(fd->inode->gfid));
+ ret = -1;
+ goto free_md;
+ }
+
+ /**
+ * perform post compute checks as an object's signature may have
+ * become stale while scrubber calculated checksum.
+ */
+ ret = bitd_scrub_post_compute_check(this, child, fd, signedversion, &sign,
+ &priv->scrub_stat, skip_stat);
+ if (ret)
+ goto free_md;
+
+ ret = bitd_compare_ckum(this, sign, md, linked_inode, entry, fd, child,
+ &loc);
+
+ if (!skip_stat)
+ br_inc_scrubbed_file(&priv->scrub_stat);
+
+ GF_FREE(sign); /* allocated on post-compute */
+
+ /** fd_unref() takes care of closing fd.. like syncop_close() */
+
+free_md:
+ GF_FREE(md);
+unrefd:
+ fd_unref(fd);
+unref_inode:
+ inode_unref(linked_inode);
+out:
+ loc_wipe(&loc);
+ return ret;
+}
+
+static void
+_br_lock_cleaner(void *arg)
+{
+ pthread_mutex_t *mutex = arg;
+
+ pthread_mutex_unlock(mutex);
+}
+
+static void
+wait_for_scrubbing(xlator_t *this, struct br_scanfs *fsscan)
+{
+ br_private_t *priv = NULL;
+ struct br_scrubber *fsscrub = NULL;
+
+ priv = this->private;
+ fsscrub = &priv->fsscrub;
+
+ pthread_cleanup_push(_br_lock_cleaner, &fsscan->waitlock);
+ pthread_mutex_lock(&fsscan->waitlock);
+ {
+ pthread_cleanup_push(_br_lock_cleaner, &fsscrub->mutex);
+ pthread_mutex_lock(&fsscrub->mutex);
+ {
+ list_replace_init(&fsscan->queued, &fsscan->ready);
+
+ /* wake up scrubbers */
+ pthread_cond_broadcast(&fsscrub->cond);
+ }
+ pthread_mutex_unlock(&fsscrub->mutex);
+ pthread_cleanup_pop(0);
+
+ while (fsscan->entries != 0)
+ pthread_cond_wait(&fsscan->waitcond, &fsscan->waitlock);
+ }
+ pthread_mutex_unlock(&fsscan->waitlock);
+ pthread_cleanup_pop(0);
+}
+
+static void
+_br_fsscan_inc_entry_count(struct br_scanfs *fsscan)
+{
+ fsscan->entries++;
+}
+
+static void
+_br_fsscan_dec_entry_count(struct br_scanfs *fsscan)
+{
+ if (--fsscan->entries == 0) {
+ pthread_mutex_lock(&fsscan->waitlock);
+ {
+ pthread_cond_signal(&fsscan->waitcond);
+ }
+ pthread_mutex_unlock(&fsscan->waitlock);
+ }
+}
+
+static void
+_br_fsscan_collect_entry(struct br_scanfs *fsscan,
+ struct br_fsscan_entry *fsentry)
+{
+ list_add_tail(&fsentry->list, &fsscan->queued);
+ _br_fsscan_inc_entry_count(fsscan);
+}
+
+#define NR_ENTRIES (1 << 7) /* ..bulk scrubbing */
+
+int
+br_fsscanner_handle_entry(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+ void *data)
+{
+ int32_t ret = -1;
+ int scrub = 0;
+ br_child_t *child = NULL;
+ xlator_t *this = NULL;
+ struct br_scanfs *fsscan = NULL;
+ struct br_fsscan_entry *fsentry = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", subvol, error_return);
+ GF_VALIDATE_OR_GOTO("bit-rot", data, error_return);
+
+ child = data;
+ this = child->this;
+ fsscan = &child->fsscan;
+
+ _mask_cancellation();
+
+ fsentry = GF_CALLOC(1, sizeof(*fsentry), gf_br_mt_br_fsscan_entry_t);
+ if (!fsentry)
+ goto error_return;
+
+ {
+ fsentry->data = data;
+ fsentry->fsscan = &child->fsscan;
+
+ /* copy parent loc */
+ ret = loc_copy(&fsentry->parent, parent);
+ if (ret)
+ goto dealloc;
+
+ /* copy child entry */
+ fsentry->entry = entry_copy(entry);
+ if (!fsentry->entry)
+ goto locwipe;
+
+ INIT_LIST_HEAD(&fsentry->list);
+ }
+
+ LOCK(&fsscan->entrylock);
+ {
+ _br_fsscan_collect_entry(fsscan, fsentry);
+
+ /**
+ * need not be a equality check as entries may be pushed
+ * back onto the scanned queue when thread(s) are cleaned.
+ */
+ if (fsscan->entries >= NR_ENTRIES)
+ scrub = 1;
+ }
+ UNLOCK(&fsscan->entrylock);
+
+ _unmask_cancellation();
+
+ if (scrub)
+ wait_for_scrubbing(this, fsscan);
+
+ return 0;
+
+locwipe:
+ loc_wipe(&fsentry->parent);
+dealloc:
+ GF_FREE(fsentry);
+error_return:
+ return -1;
+}
+
+int32_t
+br_fsscan_deactivate(xlator_t *this)
+{
+ int ret = 0;
+ br_private_t *priv = NULL;
+ br_scrub_state_t nstate = 0;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ ret = gf_tw_del_timer(priv->timer_wheel, scrub_monitor->timer);
+ if (ret == 0) {
+ nstate = BR_SCRUB_STATE_STALLED;
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Volume is under active scrubbing. Pausing scrub..");
+ } else {
+ nstate = BR_SCRUB_STATE_PAUSED;
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Scrubber paused");
+ }
+
+ _br_monitor_set_scrub_state(scrub_monitor, nstate);
+
+ return 0;
+}
+
+static void
+br_scrubber_log_time(xlator_t *this, const char *sfx)
+{
+ char timestr[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ br_private_t *priv = NULL;
+ time_t now = 0;
+
+ now = gf_time();
+ priv = this->private;
+
+ gf_time_fmt(timestr, sizeof(timestr), now, gf_timefmt_FT);
+
+ if (strcasecmp(sfx, "started") == 0) {
+ br_update_scrub_start_time(&priv->scrub_stat, now);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_START,
+ "Scrubbing %s at %s", sfx, timestr);
+ } else {
+ br_update_scrub_finish_time(&priv->scrub_stat, timestr, now);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_FINISH,
+ "Scrubbing %s at %s", sfx, timestr);
+ }
+}
+
+static void
+br_fsscanner_log_time(xlator_t *this, br_child_t *child, const char *sfx)
+{
+ char timestr[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ time_t now = 0;
+
+ now = gf_time();
+ gf_time_fmt(timestr, sizeof(timestr), now, gf_timefmt_FT);
+
+ if (strcasecmp(sfx, "started") == 0) {
+ gf_msg_debug(this->name, 0, "Scrubbing \"%s\" %s at %s",
+ child->brick_path, sfx, timestr);
+ } else {
+ gf_msg_debug(this->name, 0, "Scrubbing \"%s\" %s at %s",
+ child->brick_path, sfx, timestr);
+ }
+}
+
+void
+br_child_set_scrub_state(br_child_t *child, gf_boolean_t state)
+{
+ child->active_scrubbing = state;
+}
+
+static void
+br_fsscanner_wait_until_kicked(xlator_t *this, br_child_t *child)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ pthread_cleanup_push(_br_lock_cleaner, &scrub_monitor->wakelock);
+ pthread_mutex_lock(&scrub_monitor->wakelock);
+ {
+ while (!scrub_monitor->kick)
+ pthread_cond_wait(&scrub_monitor->wakecond,
+ &scrub_monitor->wakelock);
+
+ /* Child lock is to synchronize with disconnect events */
+ pthread_cleanup_push(_br_lock_cleaner, &child->lock);
+ pthread_mutex_lock(&child->lock);
+ {
+ scrub_monitor->active_child_count++;
+ br_child_set_scrub_state(child, _gf_true);
+ }
+ pthread_mutex_unlock(&child->lock);
+ pthread_cleanup_pop(0);
+ }
+ pthread_mutex_unlock(&scrub_monitor->wakelock);
+ pthread_cleanup_pop(0);
+}
+
+static void
+br_scrubber_entry_control(xlator_t *this)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ LOCK(&scrub_monitor->lock);
+ {
+ /* Move the state to BR_SCRUB_STATE_ACTIVE */
+ if (scrub_monitor->state == BR_SCRUB_STATE_PENDING)
+ scrub_monitor->state = BR_SCRUB_STATE_ACTIVE;
+ br_scrubber_log_time(this, "started");
+ priv->scrub_stat.scrub_running = 1;
+ }
+ UNLOCK(&scrub_monitor->lock);
+}
+
+static void
+br_scrubber_exit_control(xlator_t *this)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ LOCK(&scrub_monitor->lock);
+ {
+ br_scrubber_log_time(this, "finished");
+ priv->scrub_stat.scrub_running = 0;
+
+ if (scrub_monitor->state == BR_SCRUB_STATE_ACTIVE) {
+ (void)br_fsscan_activate(this);
+ } else {
+ UNLOCK(&scrub_monitor->lock);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Volume waiting to get rescheduled..");
+ return;
+ }
+ }
+ UNLOCK(&scrub_monitor->lock);
+}
+
+static void
+br_fsscanner_entry_control(xlator_t *this, br_child_t *child)
+{
+ br_fsscanner_log_time(this, child, "started");
+}
+
+static void
+br_fsscanner_exit_control(xlator_t *this, br_child_t *child)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ if (!_br_is_child_connected(child)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, BRB_MSG_SCRUB_INFO,
+ "Brick [%s] disconnected while scrubbing. Scrubbing "
+ "might be incomplete",
+ child->brick_path);
+ }
+
+ br_fsscanner_log_time(this, child, "finished");
+
+ pthread_cleanup_push(_br_lock_cleaner, &scrub_monitor->wakelock);
+ pthread_mutex_lock(&scrub_monitor->wakelock);
+ {
+ scrub_monitor->active_child_count--;
+ pthread_cleanup_push(_br_lock_cleaner, &child->lock);
+ pthread_mutex_lock(&child->lock);
+ {
+ br_child_set_scrub_state(child, _gf_false);
+ }
+ pthread_mutex_unlock(&child->lock);
+ pthread_cleanup_pop(0);
+
+ if (scrub_monitor->active_child_count == 0) {
+ /* The last child has finished scrubbing.
+ * Set the kick to false and wake up other
+ * children who are waiting for the last
+ * child to complete scrubbing.
+ */
+ scrub_monitor->kick = _gf_false;
+ pthread_cond_broadcast(&scrub_monitor->wakecond);
+
+ /* Signal monitor thread waiting for the all
+ * the children to finish scrubbing.
+ */
+ pthread_cleanup_push(_br_lock_cleaner, &scrub_monitor->donelock);
+ pthread_mutex_lock(&scrub_monitor->donelock);
+ {
+ scrub_monitor->done = _gf_true;
+ pthread_cond_signal(&scrub_monitor->donecond);
+ }
+ pthread_mutex_unlock(&scrub_monitor->donelock);
+ pthread_cleanup_pop(0);
+ } else {
+ while (scrub_monitor->active_child_count)
+ pthread_cond_wait(&scrub_monitor->wakecond,
+ &scrub_monitor->wakelock);
+ }
+ }
+ pthread_mutex_unlock(&scrub_monitor->wakelock);
+ pthread_cleanup_pop(0);
+}
+
+void *
+br_fsscanner(void *arg)
+{
+ loc_t loc = {
+ 0,
+ };
+ br_child_t *child = NULL;
+ xlator_t *this = NULL;
+ struct br_scanfs *fsscan = NULL;
+
+ child = arg;
+ this = child->this;
+ fsscan = &child->fsscan;
+
+ THIS = this;
+ loc.inode = child->table->root;
+
+ while (1) {
+ br_fsscanner_wait_until_kicked(this, child);
+ {
+ /* precursor for scrub */
+ br_fsscanner_entry_control(this, child);
+
+ /* scrub */
+ (void)syncop_ftw(child->xl, &loc, GF_CLIENT_PID_SCRUB, child,
+ br_fsscanner_handle_entry);
+ if (!list_empty(&fsscan->queued))
+ wait_for_scrubbing(this, fsscan);
+
+ /* scrub exit criteria */
+ br_fsscanner_exit_control(this, child);
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Keep this routine extremely simple and do not ever try to acquire
+ * child->lock here: it may lead to deadlock. Scrubber state is
+ * modified in br_fsscanner(). An intermediate state change to pause
+ * changes the scrub state to the _correct_ state by identifying a
+ * non-pending timer.
+ */
+void
+br_kickstart_scanner(struct gf_tw_timer_list *timer, void *data,
+ unsigned long calltime)
+{
+ xlator_t *this = NULL;
+ struct br_monitor *scrub_monitor = data;
+ br_private_t *priv = NULL;
+
+ THIS = this = scrub_monitor->this;
+ priv = this->private;
+
+ /* Reset scrub statistics */
+ priv->scrub_stat.scrubbed_files = 0;
+ priv->scrub_stat.unsigned_files = 0;
+
+ /* Moves state from PENDING to ACTIVE */
+ (void)br_scrubber_entry_control(this);
+
+ /* kickstart scanning.. */
+ pthread_mutex_lock(&scrub_monitor->wakelock);
+ {
+ scrub_monitor->kick = _gf_true;
+ GF_ASSERT(scrub_monitor->active_child_count == 0);
+ pthread_cond_broadcast(&scrub_monitor->wakecond);
+ }
+ pthread_mutex_unlock(&scrub_monitor->wakelock);
+
+ return;
+}
+
+static uint32_t
+br_fsscan_calculate_delta(uint32_t times)
+{
+ return times;
+}
+
+#define BR_SCRUB_ONDEMAND (1)
+#define BR_SCRUB_MINUTE (60)
+#define BR_SCRUB_HOURLY (60 * 60)
+#define BR_SCRUB_DAILY (1 * 24 * 60 * 60)
+#define BR_SCRUB_WEEKLY (7 * 24 * 60 * 60)
+#define BR_SCRUB_BIWEEKLY (14 * 24 * 60 * 60)
+#define BR_SCRUB_MONTHLY (30 * 24 * 60 * 60)
+
+static unsigned int
+br_fsscan_calculate_timeout(scrub_freq_t freq)
+{
+ uint32_t timo = 0;
+
+ switch (freq) {
+ case BR_FSSCRUB_FREQ_MINUTE:
+ timo = br_fsscan_calculate_delta(BR_SCRUB_MINUTE);
+ break;
+ case BR_FSSCRUB_FREQ_HOURLY:
+ timo = br_fsscan_calculate_delta(BR_SCRUB_HOURLY);
+ break;
+ case BR_FSSCRUB_FREQ_DAILY:
+ timo = br_fsscan_calculate_delta(BR_SCRUB_DAILY);
+ break;
+ case BR_FSSCRUB_FREQ_WEEKLY:
+ timo = br_fsscan_calculate_delta(BR_SCRUB_WEEKLY);
+ break;
+ case BR_FSSCRUB_FREQ_BIWEEKLY:
+ timo = br_fsscan_calculate_delta(BR_SCRUB_BIWEEKLY);
+ break;
+ case BR_FSSCRUB_FREQ_MONTHLY:
+ timo = br_fsscan_calculate_delta(BR_SCRUB_MONTHLY);
+ break;
+ default:
+ timo = 0;
+ }
+
+ return timo;
+}
+
+int32_t
+br_fsscan_schedule(xlator_t *this)
+{
+ uint32_t timo = 0;
+ br_private_t *priv = NULL;
+ char timestr[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ struct br_scrubber *fsscrub = NULL;
+ struct gf_tw_timer_list *timer = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ fsscrub = &priv->fsscrub;
+ scrub_monitor = &priv->scrub_monitor;
+
+ scrub_monitor->boot = gf_time();
+
+ timo = br_fsscan_calculate_timeout(fsscrub->frequency);
+ if (timo == 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_ZERO_TIMEOUT_BUG,
+ "BUG: Zero schedule timeout");
+ goto error_return;
+ }
+
+ scrub_monitor->timer = GF_CALLOC(1, sizeof(*scrub_monitor->timer),
+ gf_br_stub_mt_br_scanner_freq_t);
+ if (!scrub_monitor->timer)
+ goto error_return;
+
+ timer = scrub_monitor->timer;
+ INIT_LIST_HEAD(&timer->entry);
+
+ timer->data = scrub_monitor;
+ timer->expires = timo;
+ timer->function = br_kickstart_scanner;
+
+ gf_tw_add_timer(priv->timer_wheel, timer);
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_PENDING);
+
+ gf_time_fmt(timestr, sizeof(timestr), (scrub_monitor->boot + timo),
+ gf_timefmt_FT);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Scrubbing is "
+ "scheduled to run at %s",
+ timestr);
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+int32_t
+br_fsscan_activate(xlator_t *this)
+{
+ uint32_t timo = 0;
+ char timestr[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ time_t now = 0;
+ br_private_t *priv = NULL;
+ struct br_scrubber *fsscrub = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ fsscrub = &priv->fsscrub;
+ scrub_monitor = &priv->scrub_monitor;
+
+ now = gf_time();
+ timo = br_fsscan_calculate_timeout(fsscrub->frequency);
+ if (timo == 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_ZERO_TIMEOUT_BUG,
+ "BUG: Zero schedule timeout");
+ return -1;
+ }
+
+ pthread_mutex_lock(&scrub_monitor->donelock);
+ {
+ scrub_monitor->done = _gf_false;
+ }
+ pthread_mutex_unlock(&scrub_monitor->donelock);
+
+ gf_time_fmt(timestr, sizeof(timestr), now + timo, gf_timefmt_FT);
+ (void)gf_tw_mod_timer(priv->timer_wheel, scrub_monitor->timer, timo);
+
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_PENDING);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Scrubbing is "
+ "rescheduled to run at %s",
+ timestr);
+
+ return 0;
+}
+
+int32_t
+br_fsscan_reschedule(xlator_t *this)
+{
+ int32_t ret = 0;
+ uint32_t timo = 0;
+ char timestr[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ time_t now = 0;
+ br_private_t *priv = NULL;
+ struct br_scrubber *fsscrub = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ fsscrub = &priv->fsscrub;
+ scrub_monitor = &priv->scrub_monitor;
+
+ if (!fsscrub->frequency_reconf)
+ return 0;
+
+ now = gf_time();
+ timo = br_fsscan_calculate_timeout(fsscrub->frequency);
+ if (timo == 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_ZERO_TIMEOUT_BUG,
+ "BUG: Zero schedule timeout");
+ return -1;
+ }
+
+ gf_time_fmt(timestr, sizeof(timestr), now + timo, gf_timefmt_FT);
+
+ pthread_mutex_lock(&scrub_monitor->donelock);
+ {
+ scrub_monitor->done = _gf_false;
+ }
+ pthread_mutex_unlock(&scrub_monitor->donelock);
+
+ ret = gf_tw_mod_timer_pending(priv->timer_wheel, scrub_monitor->timer,
+ timo);
+ if (ret == 0)
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Scrubber is currently running and would be "
+ "rescheduled after completion");
+ else {
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_PENDING);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Scrubbing rescheduled to run at %s", timestr);
+ }
+
+ return 0;
+}
+
+int32_t
+br_fsscan_ondemand(xlator_t *this)
+{
+ int32_t ret = 0;
+ uint32_t timo = 0;
+ char timestr[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ time_t now = 0;
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ now = gf_time();
+ timo = BR_SCRUB_ONDEMAND;
+ gf_time_fmt(timestr, sizeof(timestr), now + timo, gf_timefmt_FT);
+
+ pthread_mutex_lock(&scrub_monitor->donelock);
+ {
+ scrub_monitor->done = _gf_false;
+ }
+ pthread_mutex_unlock(&scrub_monitor->donelock);
+
+ ret = gf_tw_mod_timer_pending(priv->timer_wheel, scrub_monitor->timer,
+ timo);
+ if (ret == 0)
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Scrubber is currently running and would be "
+ "rescheduled after completion");
+ else {
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_PENDING);
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Ondemand Scrubbing scheduled to run at %s", timestr);
+ }
+
+ return 0;
+}
+
+#define BR_SCRUB_THREAD_SCALE_LAZY 0
+#define BR_SCRUB_THREAD_SCALE_NORMAL 0.4
+#define BR_SCRUB_THREAD_SCALE_AGGRESSIVE 1.0
+
+#ifndef M_E
+#define M_E 2.718
+#endif
+
+/**
+ * This is just a simple exponential scale to a fixed value selected
+ * per throttle config. We probably need to be more smart and select
+ * the scale based on the number of processor cores too.
+ */
+static unsigned int
+br_scrubber_calc_scale(xlator_t *this, br_private_t *priv,
+ scrub_throttle_t throttle)
+{
+ unsigned int scale = 0;
+
+ switch (throttle) {
+ case BR_SCRUB_THROTTLE_VOID:
+ case BR_SCRUB_THROTTLE_STALLED:
+ scale = 0;
+ break;
+ case BR_SCRUB_THROTTLE_LAZY:
+ scale = priv->child_count * pow(M_E, BR_SCRUB_THREAD_SCALE_LAZY);
+ break;
+ case BR_SCRUB_THROTTLE_NORMAL:
+ scale = priv->child_count * pow(M_E, BR_SCRUB_THREAD_SCALE_NORMAL);
+ break;
+ case BR_SCRUB_THROTTLE_AGGRESSIVE:
+ scale = priv->child_count *
+ pow(M_E, BR_SCRUB_THREAD_SCALE_AGGRESSIVE);
+ break;
+ default:
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_UNKNOWN_THROTTLE,
+ "Unknown throttle %d", throttle);
+ }
+
+ return scale;
+}
+
+static br_child_t *
+_br_scrubber_get_next_child(struct br_scrubber *fsscrub)
+{
+ br_child_t *child = NULL;
+
+ child = list_first_entry(&fsscrub->scrublist, br_child_t, list);
+ list_rotate_left(&fsscrub->scrublist);
+
+ return child;
+}
+
+static void
+_br_scrubber_get_entry(br_child_t *child, struct br_fsscan_entry **fsentry)
+{
+ struct br_scanfs *fsscan = &child->fsscan;
+
+ if (list_empty(&fsscan->ready))
+ return;
+ *fsentry = list_first_entry(&fsscan->ready, struct br_fsscan_entry, list);
+ list_del_init(&(*fsentry)->list);
+}
+
+static void
+_br_scrubber_find_scrubbable_entry(struct br_scrubber *fsscrub,
+ struct br_fsscan_entry **fsentry)
+{
+ br_child_t *child = NULL;
+ br_child_t *firstchild = NULL;
+
+ while (1) {
+ while (list_empty(&fsscrub->scrublist))
+ pthread_cond_wait(&fsscrub->cond, &fsscrub->mutex);
+
+ firstchild = NULL;
+ for (child = _br_scrubber_get_next_child(fsscrub); child != firstchild;
+ child = _br_scrubber_get_next_child(fsscrub)) {
+ if (!firstchild)
+ firstchild = child;
+
+ _br_scrubber_get_entry(child, fsentry);
+ if (*fsentry)
+ break;
+ }
+
+ if (*fsentry)
+ break;
+
+ /* nothing to work on.. wait till available */
+ pthread_cond_wait(&fsscrub->cond, &fsscrub->mutex);
+ }
+}
+
+static void
+br_scrubber_pick_entry(struct br_scrubber *fsscrub,
+ struct br_fsscan_entry **fsentry)
+{
+ pthread_cleanup_push(_br_lock_cleaner, &fsscrub->mutex);
+
+ pthread_mutex_lock(&fsscrub->mutex);
+ {
+ *fsentry = NULL;
+ _br_scrubber_find_scrubbable_entry(fsscrub, fsentry);
+ }
+ pthread_mutex_unlock(&fsscrub->mutex);
+
+ pthread_cleanup_pop(0);
+}
+
+struct br_scrub_entry {
+ gf_boolean_t scrubbed;
+ struct br_fsscan_entry *fsentry;
+};
+
+/**
+ * We need to be a bit careful here. These thread(s) are prone to cancellations
+ * when threads are scaled down (depending on the thottling value configured)
+ * and pausing scrub. A thread can get cancelled while it's waiting for entries
+ * in the ->pending queue or when an object is undergoing scrubbing.
+ */
+static void
+br_scrubber_entry_handle(void *arg)
+{
+ struct br_scanfs *fsscan = NULL;
+ struct br_scrub_entry *sentry = NULL;
+ struct br_fsscan_entry *fsentry = NULL;
+
+ sentry = arg;
+
+ fsentry = sentry->fsentry;
+ fsscan = fsentry->fsscan;
+
+ LOCK(&fsscan->entrylock);
+ {
+ if (sentry->scrubbed) {
+ _br_fsscan_dec_entry_count(fsscan);
+
+ /* cleanup ->entry */
+ fsentry->data = NULL;
+ fsentry->fsscan = NULL;
+ loc_wipe(&fsentry->parent);
+ gf_dirent_entry_free(fsentry->entry);
+
+ GF_FREE(sentry->fsentry);
+ } else {
+ /* (re)queue the entry again for scrub */
+ _br_fsscan_collect_entry(fsscan, sentry->fsentry);
+ }
+ }
+ UNLOCK(&fsscan->entrylock);
+}
+
+static void
+br_scrubber_scrub_entry(xlator_t *this, struct br_fsscan_entry *fsentry)
+{
+ struct br_scrub_entry sentry = {
+ 0,
+ };
+
+ sentry.scrubbed = 0;
+ sentry.fsentry = fsentry;
+
+ pthread_cleanup_push(br_scrubber_entry_handle, &sentry);
+ {
+ (void)br_scrubber_scrub_begin(this, fsentry);
+ sentry.scrubbed = 1;
+ }
+ pthread_cleanup_pop(1);
+}
+
+void *
+br_scrubber_proc(void *arg)
+{
+ xlator_t *this = NULL;
+ struct br_scrubber *fsscrub = NULL;
+ struct br_fsscan_entry *fsentry = NULL;
+
+ fsscrub = arg;
+ THIS = this = fsscrub->this;
+
+ while (1) {
+ br_scrubber_pick_entry(fsscrub, &fsentry);
+ br_scrubber_scrub_entry(this, fsentry);
+ sleep(1);
+ }
+
+ return NULL;
+}
+
+static int32_t
+br_scrubber_scale_up(xlator_t *this, struct br_scrubber *fsscrub,
+ unsigned int v1, unsigned int v2)
+{
+ int i = 0;
+ int32_t ret = -1;
+ int diff = 0;
+ struct br_scrubbers *scrub = NULL;
+
+ diff = (int)(v2 - v1);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCALING_UP_SCRUBBER,
+ "Scaling up scrubbers [%d => %d]", v1, v2);
+
+ for (i = 0; i < diff; i++) {
+ scrub = GF_CALLOC(diff, sizeof(*scrub), gf_br_mt_br_scrubber_t);
+ if (!scrub)
+ break;
+
+ INIT_LIST_HEAD(&scrub->list);
+ ret = gf_thread_create(&scrub->scrubthread, NULL, br_scrubber_proc,
+ fsscrub, "brsproc");
+ if (ret)
+ break;
+
+ fsscrub->nr_scrubbers++;
+ list_add_tail(&scrub->list, &fsscrub->scrubbers);
+ }
+
+ if ((i != diff) && !scrub)
+ goto error_return;
+
+ if (i != diff) /* degraded scaling.. */
+ gf_msg(this->name, GF_LOG_WARNING, 0, BRB_MSG_SCALE_UP_FAILED,
+ "Could not fully scale up to %d scrubber(s). Spawned "
+ "%d/%d [total scrubber(s): %d]",
+ v2, i, diff, (v1 + i));
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int32_t
+br_scrubber_scale_down(xlator_t *this, struct br_scrubber *fsscrub,
+ unsigned int v1, unsigned int v2)
+{
+ int i = 0;
+ int diff = 0;
+ int32_t ret = -1;
+ struct br_scrubbers *scrub = NULL;
+
+ diff = (int)(v1 - v2);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCALE_DOWN_SCRUBBER,
+ "Scaling down scrubbers [%d => %d]", v1, v2);
+
+ for (i = 0; i < diff; i++) {
+ scrub = list_first_entry(&fsscrub->scrubbers, struct br_scrubbers,
+ list);
+
+ list_del_init(&scrub->list);
+ ret = gf_thread_cleanup_xint(scrub->scrubthread);
+ if (ret)
+ break;
+ GF_FREE(scrub);
+
+ fsscrub->nr_scrubbers--;
+ }
+
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, BRB_MSG_SCALE_DOWN_FAILED,
+ "Could not fully scale down "
+ "to %d scrubber(s). Terminated %d/%d [total "
+ "scrubber(s): %d]",
+ v1, i, diff, (v2 - i));
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int32_t
+br_scrubber_configure(xlator_t *this, br_private_t *priv,
+ struct br_scrubber *fsscrub, scrub_throttle_t nthrottle)
+{
+ int32_t ret = 0;
+ unsigned int v1 = 0;
+ unsigned int v2 = 0;
+
+ v1 = fsscrub->nr_scrubbers;
+ v2 = br_scrubber_calc_scale(this, priv, nthrottle);
+
+ if (v1 == v2)
+ return 0;
+
+ if (v1 > v2)
+ ret = br_scrubber_scale_down(this, fsscrub, v1, v2);
+ else
+ ret = br_scrubber_scale_up(this, fsscrub, v1, v2);
+
+ return ret;
+}
+
+static int32_t
+br_scrubber_fetch_option(xlator_t *this, char *opt, dict_t *options,
+ char **value)
+{
+ if (options)
+ GF_OPTION_RECONF(opt, *value, options, str, error_return);
+ else
+ GF_OPTION_INIT(opt, *value, str, error_return);
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+/* internal "throttle" override */
+#define BR_SCRUB_STALLED "STALLED"
+
+/* TODO: token buket spec */
+static int32_t
+br_scrubber_handle_throttle(xlator_t *this, br_private_t *priv, dict_t *options,
+ gf_boolean_t scrubstall)
+{
+ int32_t ret = 0;
+ char *tmp = NULL;
+ struct br_scrubber *fsscrub = NULL;
+ scrub_throttle_t nthrottle = BR_SCRUB_THROTTLE_VOID;
+
+ fsscrub = &priv->fsscrub;
+ fsscrub->throttle_reconf = _gf_false;
+
+ ret = br_scrubber_fetch_option(this, "scrub-throttle", options, &tmp);
+ if (ret)
+ goto error_return;
+
+ if (scrubstall)
+ tmp = BR_SCRUB_STALLED;
+
+ if (strcasecmp(tmp, "lazy") == 0)
+ nthrottle = BR_SCRUB_THROTTLE_LAZY;
+ else if (strcasecmp(tmp, "normal") == 0)
+ nthrottle = BR_SCRUB_THROTTLE_NORMAL;
+ else if (strcasecmp(tmp, "aggressive") == 0)
+ nthrottle = BR_SCRUB_THROTTLE_AGGRESSIVE;
+ else if (strcasecmp(tmp, BR_SCRUB_STALLED) == 0)
+ nthrottle = BR_SCRUB_THROTTLE_STALLED;
+ else
+ goto error_return;
+
+ /* on failure old throttling value is preserved */
+ ret = br_scrubber_configure(this, priv, fsscrub, nthrottle);
+ if (ret)
+ goto error_return;
+
+ if (fsscrub->throttle != nthrottle)
+ fsscrub->throttle_reconf = _gf_true;
+
+ fsscrub->throttle = nthrottle;
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int32_t
+br_scrubber_handle_stall(xlator_t *this, br_private_t *priv, dict_t *options,
+ gf_boolean_t *scrubstall)
+{
+ int32_t ret = 0;
+ char *tmp = NULL;
+
+ ret = br_scrubber_fetch_option(this, "scrub-state", options, &tmp);
+ if (ret)
+ goto error_return;
+
+ if (strcasecmp(tmp, "pause") == 0) /* anything else is active */
+ *scrubstall = _gf_true;
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int32_t
+br_scrubber_handle_freq(xlator_t *this, br_private_t *priv, dict_t *options,
+ gf_boolean_t scrubstall)
+{
+ int32_t ret = -1;
+ char *tmp = NULL;
+ scrub_freq_t frequency = BR_FSSCRUB_FREQ_HOURLY;
+ struct br_scrubber *fsscrub = NULL;
+
+ fsscrub = &priv->fsscrub;
+ fsscrub->frequency_reconf = _gf_true;
+
+ ret = br_scrubber_fetch_option(this, "scrub-freq", options, &tmp);
+ if (ret)
+ goto error_return;
+
+ if (scrubstall)
+ tmp = BR_SCRUB_STALLED;
+
+ if (strcasecmp(tmp, "hourly") == 0) {
+ frequency = BR_FSSCRUB_FREQ_HOURLY;
+ } else if (strcasecmp(tmp, "daily") == 0) {
+ frequency = BR_FSSCRUB_FREQ_DAILY;
+ } else if (strcasecmp(tmp, "weekly") == 0) {
+ frequency = BR_FSSCRUB_FREQ_WEEKLY;
+ } else if (strcasecmp(tmp, "biweekly") == 0) {
+ frequency = BR_FSSCRUB_FREQ_BIWEEKLY;
+ } else if (strcasecmp(tmp, "monthly") == 0) {
+ frequency = BR_FSSCRUB_FREQ_MONTHLY;
+ } else if (strcasecmp(tmp, "minute") == 0) {
+ frequency = BR_FSSCRUB_FREQ_MINUTE;
+ } else if (strcasecmp(tmp, BR_SCRUB_STALLED) == 0) {
+ frequency = BR_FSSCRUB_FREQ_STALLED;
+ } else
+ goto error_return;
+
+ if (fsscrub->frequency == frequency)
+ fsscrub->frequency_reconf = _gf_false;
+ else
+ fsscrub->frequency = frequency;
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static void
+br_scrubber_log_option(xlator_t *this, br_private_t *priv,
+ gf_boolean_t scrubstall)
+{
+ struct br_scrubber *fsscrub = &priv->fsscrub;
+ char *scrub_throttle_str[] = {
+ [BR_SCRUB_THROTTLE_LAZY] = "lazy",
+ [BR_SCRUB_THROTTLE_NORMAL] = "normal",
+ [BR_SCRUB_THROTTLE_AGGRESSIVE] = "aggressive",
+ [BR_SCRUB_THROTTLE_STALLED] = "stalled",
+ };
+
+ char *scrub_freq_str[] = {
+ [0] = "",
+ [BR_FSSCRUB_FREQ_HOURLY] = "hourly",
+ [BR_FSSCRUB_FREQ_DAILY] = "daily",
+ [BR_FSSCRUB_FREQ_WEEKLY] = "weekly",
+ [BR_FSSCRUB_FREQ_BIWEEKLY] = "biweekly",
+ [BR_FSSCRUB_FREQ_MONTHLY] = "monthly (30 days)",
+ [BR_FSSCRUB_FREQ_MINUTE] = "every minute",
+ };
+
+ if (scrubstall)
+ return; /* logged as pause */
+
+ if (fsscrub->frequency_reconf || fsscrub->throttle_reconf) {
+ if (fsscrub->throttle == BR_SCRUB_THROTTLE_VOID)
+ return;
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_TUNABLE,
+ "SCRUB TUNABLES:: [Frequency: %s, Throttle: %s]",
+ scrub_freq_str[fsscrub->frequency],
+ scrub_throttle_str[fsscrub->throttle]);
+ }
+}
+
+int32_t
+br_scrubber_handle_options(xlator_t *this, br_private_t *priv, dict_t *options)
+{
+ int32_t ret = 0;
+ gf_boolean_t scrubstall = _gf_false; /* not as dangerous as it sounds */
+
+ ret = br_scrubber_handle_stall(this, priv, options, &scrubstall);
+ if (ret)
+ goto error_return;
+
+ ret = br_scrubber_handle_throttle(this, priv, options, scrubstall);
+ if (ret)
+ goto error_return;
+
+ ret = br_scrubber_handle_freq(this, priv, options, scrubstall);
+ if (ret)
+ goto error_return;
+
+ br_scrubber_log_option(this, priv, scrubstall);
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+inode_t *
+br_lookup_bad_obj_dir(xlator_t *this, br_child_t *child, uuid_t gfid)
+{
+ struct iatt statbuf = {
+ 0,
+ };
+ inode_table_t *table = NULL;
+ int32_t ret = -1;
+ loc_t loc = {
+ 0,
+ };
+ inode_t *linked_inode = NULL;
+ int32_t op_errno = 0;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-scrubber", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+
+ table = child->table;
+
+ loc.inode = inode_new(table);
+ if (!loc.inode) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_NO_MEMORY,
+ "failed to allocate a new inode for"
+ "bad object directory");
+ goto out;
+ }
+
+ gf_uuid_copy(loc.gfid, gfid);
+
+ ret = syncop_lookup(child->xl, &loc, &statbuf, NULL, NULL, NULL);
+ if (ret < 0) {
+ op_errno = -ret;
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_LOOKUP_FAILED,
+ "failed to lookup the bad "
+ "objects directory (gfid: %s (%s))",
+ uuid_utoa(gfid), strerror(op_errno));
+ goto out;
+ }
+
+ linked_inode = inode_link(loc.inode, NULL, NULL, &statbuf);
+ if (linked_inode)
+ inode_lookup(linked_inode);
+
+out:
+ loc_wipe(&loc);
+ return linked_inode;
+}
+
+int32_t
+br_read_bad_object_dir(xlator_t *this, br_child_t *child, fd_t *fd,
+ dict_t *dict)
+{
+ gf_dirent_t entries;
+ gf_dirent_t *entry = NULL;
+ int32_t ret = -1;
+ off_t offset = 0;
+ int32_t count = 0;
+ char key[32] = {
+ 0,
+ };
+ dict_t *out_dict = NULL;
+
+ INIT_LIST_HEAD(&entries.list);
+
+ while ((ret = syncop_readdir(child->xl, fd, 131072, offset, &entries, NULL,
+ &out_dict))) {
+ if (ret < 0)
+ goto out;
+
+ list_for_each_entry(entry, &entries.list, list)
+ {
+ offset = entry->d_off;
+
+ snprintf(key, sizeof(key), "quarantine-%d", count);
+
+ /*
+ * ignore the dict_set errors for now. The intention is
+ * to get as many bad objects as possible instead of
+ * erroring out at the first failure.
+ */
+ ret = dict_set_dynstr_with_alloc(dict, key, entry->d_name);
+ if (!ret)
+ count++;
+
+ if (out_dict) {
+ dict_copy(out_dict, dict);
+ dict_unref(out_dict);
+ out_dict = NULL;
+ }
+ }
+
+ gf_dirent_free(&entries);
+ }
+
+ ret = count;
+ ret = dict_set_int32_sizen(dict, "count", count);
+
+out:
+ return ret;
+}
+
+int32_t
+br_get_bad_objects_from_child(xlator_t *this, dict_t *dict, br_child_t *child)
+{
+ inode_t *inode = NULL;
+ inode_table_t *table = NULL;
+ fd_t *fd = NULL;
+ int32_t ret = -1;
+ loc_t loc = {
+ 0,
+ };
+ int32_t op_errno = 0;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-scrubber", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ table = child->table;
+
+ inode = inode_find(table, BR_BAD_OBJ_CONTAINER);
+ if (!inode) {
+ inode = br_lookup_bad_obj_dir(this, child, BR_BAD_OBJ_CONTAINER);
+ if (!inode)
+ goto out;
+ }
+
+ fd = fd_create(inode, 0);
+ if (!fd) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_FD_CREATE_FAILED,
+ "fd creation for the bad "
+ "objects directory failed (gfid: %s)",
+ uuid_utoa(BR_BAD_OBJ_CONTAINER));
+ goto out;
+ }
+
+ loc.inode = inode;
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ ret = syncop_opendir(child->xl, &loc, fd, NULL, NULL);
+ if (ret < 0) {
+ op_errno = -ret;
+ fd_unref(fd);
+ fd = NULL;
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, BRB_MSG_FD_CREATE_FAILED,
+ "failed to open the bad "
+ "objects directory %s",
+ uuid_utoa(BR_BAD_OBJ_CONTAINER));
+ goto out;
+ }
+
+ fd_bind(fd);
+
+ ret = br_read_bad_object_dir(this, child, fd, dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, BRB_MSG_BAD_OBJ_READDIR_FAIL,
+ "readdir of the bad "
+ "objects directory (%s) failed ",
+ uuid_utoa(BR_BAD_OBJ_CONTAINER));
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ loc_wipe(&loc);
+ if (fd)
+ fd_unref(fd);
+ return ret;
+}
+
+int32_t
+br_collect_bad_objects_of_child(xlator_t *this, br_child_t *child, dict_t *dict,
+ dict_t *child_dict, int32_t total_count)
+{
+ int32_t ret = -1;
+ int32_t count = 0;
+ char key[32] = {
+ 0,
+ };
+ char main_key[32] = {
+ 0,
+ };
+ int32_t j = 0;
+ int32_t tmp_count = 0;
+ char *entry = NULL;
+ char tmp[PATH_MAX] = {
+ 0,
+ };
+ char *path = NULL;
+ int32_t len = 0;
+
+ ret = dict_get_int32_sizen(child_dict, "count", &count);
+ if (ret)
+ goto out;
+
+ tmp_count = total_count;
+
+ for (j = 0; j < count; j++) {
+ len = snprintf(key, sizeof(key), "quarantine-%d", j);
+ ret = dict_get_strn(child_dict, key, len, &entry);
+ if (ret)
+ continue;
+
+ ret = dict_get_str(child_dict, entry, &path);
+ len = snprintf(tmp, PATH_MAX, "%s ==> BRICK: %s\n path: %s", entry,
+ child->brick_path, path);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ continue;
+ }
+ snprintf(main_key, sizeof(main_key), "quarantine-%d", tmp_count);
+
+ ret = dict_set_dynstr_with_alloc(dict, main_key, tmp);
+ if (!ret)
+ tmp_count++;
+ path = NULL;
+ }
+
+ ret = tmp_count;
+
+out:
+ return ret;
+}
+
+int32_t
+br_collect_bad_objects_from_children(xlator_t *this, dict_t *dict)
+{
+ int32_t ret = -1;
+ dict_t *child_dict = NULL;
+ int32_t i = 0;
+ int32_t total_count = 0;
+ br_child_t *child = NULL;
+ br_private_t *priv = NULL;
+ dict_t *tmp_dict = NULL;
+
+ priv = this->private;
+ tmp_dict = dict;
+
+ for (i = 0; i < priv->child_count; i++) {
+ child = &priv->children[i];
+ GF_ASSERT(child);
+ if (!_br_is_child_connected(child))
+ continue;
+
+ child_dict = dict_new();
+ if (!child_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_NO_MEMORY,
+ "failed to allocate dict");
+ continue;
+ }
+ ret = br_get_bad_objects_from_child(this, child_dict, child);
+ /*
+ * Continue asking the remaining children for the list of
+ * bad objects even though getting the list from one of them
+ * fails.
+ */
+ if (ret) {
+ dict_unref(child_dict);
+ continue;
+ }
+
+ ret = br_collect_bad_objects_of_child(this, child, tmp_dict, child_dict,
+ total_count);
+ if (ret < 0) {
+ dict_unref(child_dict);
+ continue;
+ }
+
+ total_count = ret;
+ dict_unref(child_dict);
+ child_dict = NULL;
+ }
+
+ ret = dict_set_int32(tmp_dict, "total-count", total_count);
+
+ return ret;
+}
+
+int32_t
+br_get_bad_objects_list(xlator_t *this, dict_t **dict)
+{
+ int32_t ret = -1;
+ dict_t *tmp_dict = NULL;
+
+ GF_VALIDATE_OR_GOTO("bir-rot-scrubber", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ tmp_dict = *dict;
+ if (!tmp_dict) {
+ tmp_dict = dict_new();
+ if (!tmp_dict) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_NO_MEMORY,
+ "failed to allocate dict");
+ goto out;
+ }
+ *dict = tmp_dict;
+ }
+
+ ret = br_collect_bad_objects_from_children(this, tmp_dict);
+
+out:
+ return ret;
+}
+
+static int
+wait_for_scrub_to_finish(xlator_t *this)
+{
+ int ret = -1;
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", scrub_monitor, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_INFO,
+ "Waiting for all children to start and finish scrub");
+
+ pthread_mutex_lock(&scrub_monitor->donelock);
+ {
+ while (!scrub_monitor->done)
+ pthread_cond_wait(&scrub_monitor->donecond,
+ &scrub_monitor->donelock);
+ }
+ pthread_mutex_unlock(&scrub_monitor->donelock);
+ ret = 0;
+out:
+ return ret;
+}
+
+/**
+ * This function is executed in a separate thread. This is scrubber monitor
+ * thread that takes care of state machine.
+ */
+void *
+br_monitor_thread(void *arg)
+{
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ this = arg;
+ priv = this->private;
+
+ /*
+ * Since, this is the topmost xlator, THIS has to be set by bit-rot
+ * xlator itself (STACK_WIND won't help in this case). Also it has
+ * to be done for each thread that gets spawned. Otherwise, a new
+ * thread will get global_xlator's pointer when it does "THIS".
+ */
+ THIS = this;
+
+ scrub_monitor = &priv->scrub_monitor;
+
+ pthread_mutex_lock(&scrub_monitor->mutex);
+ {
+ while (!scrub_monitor->inited)
+ pthread_cond_wait(&scrub_monitor->cond, &scrub_monitor->mutex);
+ }
+ pthread_mutex_unlock(&scrub_monitor->mutex);
+
+ /* this needs to be serialized with reconfigure() */
+ pthread_mutex_lock(&priv->lock);
+ {
+ ret = br_scrub_state_machine(this, _gf_false);
+ }
+ pthread_mutex_unlock(&priv->lock);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, BRB_MSG_SSM_FAILED,
+ "Scrub state machine failed");
+ goto out;
+ }
+
+ while (1) {
+ /* Wait for all children to finish scrubbing */
+ ret = wait_for_scrub_to_finish(this);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, BRB_MSG_SCRUB_WAIT_FAILED,
+ "Scrub wait failed");
+ goto out;
+ }
+
+ /* scrub exit criteria: Move the state to PENDING */
+ br_scrubber_exit_control(this);
+ }
+
+out:
+ return NULL;
+}
+
+static void
+br_set_scrub_state(struct br_monitor *scrub_monitor, br_scrub_state_t state)
+{
+ LOCK(&scrub_monitor->lock);
+ {
+ _br_monitor_set_scrub_state(scrub_monitor, state);
+ }
+ UNLOCK(&scrub_monitor->lock);
+}
+
+int32_t
+br_scrubber_monitor_init(xlator_t *this, br_private_t *priv)
+{
+ struct br_monitor *scrub_monitor = NULL;
+ int ret = 0;
+
+ scrub_monitor = &priv->scrub_monitor;
+
+ LOCK_INIT(&scrub_monitor->lock);
+ scrub_monitor->this = this;
+
+ scrub_monitor->inited = _gf_false;
+ pthread_mutex_init(&scrub_monitor->mutex, NULL);
+ pthread_cond_init(&scrub_monitor->cond, NULL);
+
+ scrub_monitor->kick = _gf_false;
+ scrub_monitor->active_child_count = 0;
+ pthread_mutex_init(&scrub_monitor->wakelock, NULL);
+ pthread_cond_init(&scrub_monitor->wakecond, NULL);
+
+ scrub_monitor->done = _gf_false;
+ pthread_mutex_init(&scrub_monitor->donelock, NULL);
+ pthread_cond_init(&scrub_monitor->donecond, NULL);
+
+ /* Set the state to INACTIVE */
+ br_set_scrub_state(&priv->scrub_monitor, BR_SCRUB_STATE_INACTIVE);
+
+ /* Start the monitor thread */
+ ret = gf_thread_create(&scrub_monitor->thread, NULL, br_monitor_thread,
+ this, "brmon");
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, -ret, BRB_MSG_SPAWN_FAILED,
+ "monitor thread creation failed");
+ ret = -1;
+ goto err;
+ }
+
+ return 0;
+err:
+ pthread_mutex_destroy(&scrub_monitor->mutex);
+ pthread_cond_destroy(&scrub_monitor->cond);
+
+ pthread_mutex_destroy(&scrub_monitor->wakelock);
+ pthread_cond_destroy(&scrub_monitor->wakecond);
+
+ pthread_mutex_destroy(&scrub_monitor->donelock);
+ pthread_cond_destroy(&scrub_monitor->donecond);
+
+ LOCK_DESTROY(&scrub_monitor->lock);
+
+ return ret;
+}
+
+int32_t
+br_scrubber_init(xlator_t *this, br_private_t *priv)
+{
+ struct br_scrubber *fsscrub = NULL;
+ int ret = 0;
+
+ priv->tbf = tbf_init(NULL, 0);
+ if (!priv->tbf)
+ return -1;
+
+ ret = br_scrubber_monitor_init(this, priv);
+ if (ret)
+ return -1;
+
+ fsscrub = &priv->fsscrub;
+
+ fsscrub->this = this;
+ fsscrub->throttle = BR_SCRUB_THROTTLE_VOID;
+
+ pthread_mutex_init(&fsscrub->mutex, NULL);
+ pthread_cond_init(&fsscrub->cond, NULL);
+
+ fsscrub->nr_scrubbers = 0;
+ INIT_LIST_HEAD(&fsscrub->scrubbers);
+ INIT_LIST_HEAD(&fsscrub->scrublist);
+
+ return 0;
+}
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-scrub.h b/xlators/features/bit-rot/src/bitd/bit-rot-scrub.h
new file mode 100644
index 00000000000..4e5f67bc021
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-scrub.h
@@ -0,0 +1,46 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BIT_ROT_SCRUB_H__
+#define __BIT_ROT_SCRUB_H__
+
+#include <glusterfs/xlator.h>
+#include "bit-rot.h"
+
+void *
+br_fsscanner(void *);
+
+int32_t
+br_fsscan_schedule(xlator_t *);
+int32_t
+br_fsscan_reschedule(xlator_t *);
+int32_t
+br_fsscan_activate(xlator_t *);
+int32_t
+br_fsscan_deactivate(xlator_t *);
+int32_t
+br_fsscan_ondemand(xlator_t *);
+
+int32_t
+br_scrubber_handle_options(xlator_t *, br_private_t *, dict_t *);
+
+int32_t
+br_scrubber_monitor_init(xlator_t *, br_private_t *);
+
+int32_t
+br_scrubber_init(xlator_t *, br_private_t *);
+
+int32_t
+br_collect_bad_objects_from_children(xlator_t *this, dict_t *dict);
+
+void
+br_child_set_scrub_state(br_child_t *, gf_boolean_t);
+
+#endif /* __BIT_ROT_SCRUB_H__ */
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-ssm.c b/xlators/features/bit-rot/src/bitd/bit-rot-ssm.c
new file mode 100644
index 00000000000..753e31a3b23
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-ssm.c
@@ -0,0 +1,124 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "bit-rot-ssm.h"
+#include "bit-rot-scrub.h"
+#include "bit-rot-bitd-messages.h"
+
+int
+br_scrub_ssm_noop(xlator_t *this)
+{
+ return 0;
+}
+
+int
+br_scrub_ssm_state_pause(xlator_t *this)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_GENERIC_SSM_INFO,
+ "Scrubber paused");
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_PAUSED);
+ return 0;
+}
+
+int
+br_scrub_ssm_state_ipause(xlator_t *this)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_GENERIC_SSM_INFO,
+ "Scrubber paused");
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_IPAUSED);
+ return 0;
+}
+
+int
+br_scrub_ssm_state_active(xlator_t *this)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ if (scrub_monitor->done) {
+ (void)br_fsscan_activate(this);
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_GENERIC_SSM_INFO,
+ "Scrubbing resumed");
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_ACTIVE);
+ }
+
+ return 0;
+}
+
+int
+br_scrub_ssm_state_stall(xlator_t *this)
+{
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ gf_msg(this->name, GF_LOG_INFO, 0, BRB_MSG_GENERIC_SSM_INFO,
+ "Volume is under active scrubbing. Pausing scrub..");
+ _br_monitor_set_scrub_state(scrub_monitor, BR_SCRUB_STATE_STALLED);
+ return 0;
+}
+
+static br_scrub_ssm_call *br_scrub_ssm[BR_SCRUB_MAXSTATES][BR_SCRUB_MAXEVENTS] =
+ {
+ /* INACTIVE */
+ {br_fsscan_schedule, br_scrub_ssm_state_ipause, br_scrub_ssm_noop},
+ /* PENDING */
+ {br_fsscan_reschedule, br_fsscan_deactivate, br_fsscan_ondemand},
+ /* ACTIVE */
+ {br_scrub_ssm_noop, br_scrub_ssm_state_stall, br_scrub_ssm_noop},
+ /* PAUSED */
+ {br_fsscan_activate, br_scrub_ssm_noop, br_scrub_ssm_noop},
+ /* IPAUSED */
+ {br_fsscan_schedule, br_scrub_ssm_noop, br_scrub_ssm_noop},
+ /* STALLED */
+ {br_scrub_ssm_state_active, br_scrub_ssm_noop, br_scrub_ssm_noop},
+};
+
+int32_t
+br_scrub_state_machine(xlator_t *this, gf_boolean_t scrub_ondemand)
+{
+ br_private_t *priv = NULL;
+ br_scrub_ssm_call *call = NULL;
+ struct br_scrubber *fsscrub = NULL;
+ br_scrub_state_t currstate = 0;
+ br_scrub_event_t event = 0;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ fsscrub = &priv->fsscrub;
+ scrub_monitor = &priv->scrub_monitor;
+
+ currstate = scrub_monitor->state;
+ if (scrub_ondemand)
+ event = BR_SCRUB_EVENT_ONDEMAND;
+ else
+ event = _br_child_get_scrub_event(fsscrub);
+
+ call = br_scrub_ssm[currstate][event];
+ return call(this);
+}
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot-ssm.h b/xlators/features/bit-rot/src/bitd/bit-rot-ssm.h
new file mode 100644
index 00000000000..37b45a42eac
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot-ssm.h
@@ -0,0 +1,38 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BIT_ROT_SSM_H__
+#define __BIT_ROT_SSM_H__
+
+#include <glusterfs/xlator.h>
+
+typedef enum br_scrub_state {
+ BR_SCRUB_STATE_INACTIVE = 0,
+ BR_SCRUB_STATE_PENDING,
+ BR_SCRUB_STATE_ACTIVE,
+ BR_SCRUB_STATE_PAUSED,
+ BR_SCRUB_STATE_IPAUSED,
+ BR_SCRUB_STATE_STALLED,
+ BR_SCRUB_MAXSTATES,
+} br_scrub_state_t;
+
+typedef enum br_scrub_event {
+ BR_SCRUB_EVENT_SCHEDULE = 0,
+ BR_SCRUB_EVENT_PAUSE,
+ BR_SCRUB_EVENT_ONDEMAND,
+ BR_SCRUB_MAXEVENTS,
+} br_scrub_event_t;
+
+struct br_monitor;
+
+int32_t
+br_scrub_state_machine(xlator_t *, gf_boolean_t);
+
+#endif /* __BIT_ROT_SSM_H__ */
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.c b/xlators/features/bit-rot/src/bitd/bit-rot.c
new file mode 100644
index 00000000000..a2f1c343a1d
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot.c
@@ -0,0 +1,2232 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <ctype.h>
+
+#include <glusterfs/logging.h>
+#include <glusterfs/compat-errno.h>
+
+#include "bit-rot.h"
+#include "bit-rot-scrub.h"
+#include <pthread.h>
+#include "bit-rot-bitd-messages.h"
+
+#define BR_HASH_CALC_READ_SIZE (128 * 1024)
+
+typedef int32_t(br_child_handler)(xlator_t *, br_child_t *);
+
+struct br_child_event {
+ xlator_t *this;
+
+ br_child_t *child;
+
+ br_child_handler *call;
+
+ struct list_head list;
+};
+
+static int
+br_find_child_index(xlator_t *this, xlator_t *child)
+{
+ br_private_t *priv = NULL;
+ int i = -1;
+ int index = -1;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+
+ priv = this->private;
+
+ for (i = 0; i < priv->child_count; i++) {
+ if (child == priv->children[i].xl) {
+ index = i;
+ break;
+ }
+ }
+
+out:
+ return index;
+}
+
+br_child_t *
+br_get_child_from_brick_path(xlator_t *this, char *brick_path)
+{
+ br_private_t *priv = NULL;
+ br_child_t *child = NULL;
+ br_child_t *tmp = NULL;
+ int i = 0;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, brick_path, out);
+
+ priv = this->private;
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ for (i = 0; i < priv->child_count; i++) {
+ tmp = &priv->children[i];
+ if (!strcmp(tmp->brick_path, brick_path)) {
+ child = tmp;
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+out:
+ return child;
+}
+
+/**
+ * probably we'll encapsulate brick inside our own structure when
+ * needed -- later.
+ */
+void *
+br_brick_init(void *xl, struct gf_brick_spec *brick)
+{
+ return brick;
+}
+
+/**
+ * and cleanup things here when allocated br_brick_init().
+ */
+void
+br_brick_fini(void *xl, char *brick, void *data)
+{
+ return;
+}
+
+/**
+ * TODO: Signature can contain null terminators which causes bitrot
+ * stub to store truncated hash as it depends on string length of
+ * the hash.
+ *
+ * FIX: Send the string length as part of the signature struct and
+ * change stub to handle this change.
+ */
+static br_isignature_t *
+br_prepare_signature(const unsigned char *sign, unsigned long hashlen,
+ int8_t hashtype, br_object_t *object)
+{
+ br_isignature_t *signature = NULL;
+
+ /* TODO: use mem-pool */
+ signature = GF_CALLOC(1, signature_size(hashlen + 1),
+ gf_br_stub_mt_signature_t);
+ if (!signature)
+ return NULL;
+
+ /* object version */
+ signature->signedversion = object->signedversion;
+
+ /* signature length & type */
+ signature->signaturelen = hashlen;
+ signature->signaturetype = hashtype;
+
+ /* signature itself */
+ memcpy(signature->signature, (char *)sign, hashlen);
+ signature->signature[hashlen + 1] = '\0';
+
+ return signature;
+}
+
+gf_boolean_t
+bitd_is_bad_file(xlator_t *this, br_child_t *child, loc_t *loc, fd_t *fd)
+{
+ int32_t ret = -1;
+ dict_t *xattr = NULL;
+ inode_t *inode = NULL;
+ gf_boolean_t bad_file = _gf_false;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+
+ inode = (loc) ? loc->inode : fd->inode;
+
+ if (fd)
+ ret = syncop_fgetxattr(child->xl, fd, &xattr, BITROT_OBJECT_BAD_KEY,
+ NULL, NULL);
+ else if (loc)
+ ret = syncop_getxattr(child->xl, loc, &xattr, BITROT_OBJECT_BAD_KEY,
+ NULL, NULL);
+
+ if (!ret) {
+ gf_msg_debug(this->name, 0, "[GFID: %s] is marked corrupted",
+ uuid_utoa(inode->gfid));
+ bad_file = _gf_true;
+ }
+
+ if (xattr)
+ dict_unref(xattr);
+
+out:
+ return bad_file;
+}
+
+/**
+ * Do a lookup on the gfid present within the object.
+ */
+static int32_t
+br_object_lookup(xlator_t *this, br_object_t *object, struct iatt *iatt,
+ inode_t **linked_inode)
+{
+ int ret = -EINVAL;
+ loc_t loc = {
+ 0,
+ };
+ inode_t *inode = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, object, out);
+
+ inode = inode_find(object->child->table, object->gfid);
+
+ if (inode)
+ loc.inode = inode;
+ else
+ loc.inode = inode_new(object->child->table);
+
+ if (!loc.inode) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ gf_uuid_copy(loc.gfid, object->gfid);
+
+ ret = syncop_lookup(object->child->xl, &loc, iatt, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * The file might have been deleted by the application
+ * after getting the event, but before doing a lookup.
+ * So use linked_inode after inode_link is done.
+ */
+ *linked_inode = inode_link(loc.inode, NULL, NULL, iatt);
+ if (*linked_inode)
+ inode_lookup(*linked_inode);
+
+out:
+ loc_wipe(&loc);
+ return ret;
+}
+
+/**
+ * open the object with O_RDONLY flags and return the fd. How to let brick
+ * know that open is being done by bitd because syncop framework does not allow
+ * passing xdata -- may be use frame->root->pid itself.
+ */
+static int32_t
+br_object_open(xlator_t *this, br_object_t *object, inode_t *inode,
+ fd_t **openfd)
+{
+ int32_t ret = -1;
+ fd_t *fd = NULL;
+ loc_t loc = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, object, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
+
+ ret = -EINVAL;
+ fd = fd_create(inode, 0);
+ if (!fd) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_FD_CREATE_FAILED,
+ "gfid=%s", uuid_utoa(inode->gfid), NULL);
+ goto out;
+ }
+
+ loc.inode = inode_ref(inode);
+ gf_uuid_copy(loc.gfid, inode->gfid);
+
+ ret = syncop_open(object->child->xl, &loc, O_RDONLY, fd, NULL, NULL);
+ if (ret) {
+ br_log_object(this, "open", inode->gfid, -ret);
+ fd_unref(fd);
+ fd = NULL;
+ } else {
+ fd_bind(fd);
+ *openfd = fd;
+ }
+
+ loc_wipe(&loc);
+
+out:
+ return ret;
+}
+
+/**
+ * read 128k block from the object @object from the offset @offset
+ * and return the buffer.
+ */
+static int32_t
+br_object_read_block_and_sign(xlator_t *this, fd_t *fd, br_child_t *child,
+ off_t offset, size_t size, SHA256_CTX *sha256)
+{
+ int32_t ret = -1;
+ tbf_t *tbf = NULL;
+ struct iovec *iovec = NULL;
+ struct iobref *iobref = NULL;
+ br_private_t *priv = NULL;
+ int count = 0;
+ int i = 0;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd->inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ priv = this->private;
+
+ GF_VALIDATE_OR_GOTO(this->name, priv->tbf, out);
+ tbf = priv->tbf;
+
+ ret = syncop_readv(child->xl, fd, size, offset, 0, &iovec, &count, &iobref,
+ NULL, NULL, NULL);
+
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, BRB_MSG_READV_FAILED,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ ret = -1;
+ goto out;
+ }
+
+ if (ret == 0)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ TBF_THROTTLE_BEGIN(tbf, TBF_OP_HASH, iovec[i].iov_len);
+ {
+ SHA256_Update(sha256, (const unsigned char *)(iovec[i].iov_base),
+ iovec[i].iov_len);
+ }
+ TBF_THROTTLE_BEGIN(tbf, TBF_OP_HASH, iovec[i].iov_len);
+ }
+
+out:
+ if (iovec)
+ GF_FREE(iovec);
+
+ if (iobref)
+ iobref_unref(iobref);
+
+ return ret;
+}
+
+int32_t
+br_calculate_obj_checksum(unsigned char *md, br_child_t *child, fd_t *fd,
+ struct iatt *iatt)
+{
+ int32_t ret = -1;
+ off_t offset = 0;
+ size_t block = BR_HASH_CALC_READ_SIZE;
+ xlator_t *this = NULL;
+
+ SHA256_CTX sha256;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", child, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", iatt, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", fd, out);
+
+ this = child->this;
+
+ SHA256_Init(&sha256);
+
+ while (1) {
+ ret = br_object_read_block_and_sign(this, fd, child, offset, block,
+ &sha256);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_BLOCK_READ_FAILED,
+ "offset=%" PRIu64, offset, "object-gfid=%s",
+ uuid_utoa(fd->inode->gfid), NULL);
+ break;
+ }
+
+ if (ret == 0)
+ break;
+
+ offset += ret;
+ }
+
+ if (ret == 0)
+ SHA256_Final(md, &sha256);
+
+out:
+ return ret;
+}
+
+static int32_t
+br_object_checksum(unsigned char *md, br_object_t *object, fd_t *fd,
+ struct iatt *iatt)
+{
+ return br_calculate_obj_checksum(md, object->child, fd, iatt);
+}
+
+static int32_t
+br_object_read_sign(inode_t *linked_inode, fd_t *fd, br_object_t *object,
+ struct iatt *iatt)
+{
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ dict_t *xattr = NULL;
+ unsigned char *md = NULL;
+ br_isignature_t *sign = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", object, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", linked_inode, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", fd, out);
+
+ this = object->this;
+
+ md = GF_MALLOC(SHA256_DIGEST_LENGTH, gf_common_mt_char);
+ if (!md) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_SAVING_HASH_FAILED,
+ "object-gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto out;
+ }
+
+ ret = br_object_checksum(md, object, fd, iatt);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_CALC_CHECKSUM_FAILED,
+ "object-gfid=%s", uuid_utoa(linked_inode->gfid), NULL);
+ goto free_signature;
+ }
+
+ sign = br_prepare_signature(md, SHA256_DIGEST_LENGTH,
+ BR_SIGNATURE_TYPE_SHA256, object);
+ if (!sign) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_GET_SIGN_FAILED,
+ "object-gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto free_signature;
+ }
+
+ xattr = dict_for_key_value(GLUSTERFS_SET_OBJECT_SIGNATURE, (void *)sign,
+ signature_size(SHA256_DIGEST_LENGTH), _gf_true);
+
+ if (!xattr) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_SET_SIGN_FAILED,
+ "dict-allocation object-gfid=%s", uuid_utoa(fd->inode->gfid),
+ NULL);
+ goto free_isign;
+ }
+
+ ret = syncop_fsetxattr(object->child->xl, fd, xattr, 0, NULL, NULL);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_SET_SIGN_FAILED,
+ "fsetxattr object-gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto unref_dict;
+ }
+
+ ret = 0;
+
+unref_dict:
+ dict_unref(xattr);
+free_isign:
+ GF_FREE(sign);
+free_signature:
+ GF_FREE(md);
+out:
+ return ret;
+}
+
+static int
+br_object_sign_softerror(int32_t op_errno)
+{
+ return ((op_errno == ENOENT) || (op_errno == ESTALE) ||
+ (op_errno == ENODATA));
+}
+
+void
+br_log_object(xlator_t *this, char *op, uuid_t gfid, int32_t op_errno)
+{
+ int softerror = br_object_sign_softerror(op_errno);
+ if (softerror) {
+ gf_msg_debug(this->name, 0,
+ "%s() failed on object %s "
+ "[reason: %s]",
+ op, uuid_utoa(gfid), strerror(op_errno));
+ } else {
+ gf_smsg(this->name, GF_LOG_ERROR, op_errno, BRB_MSG_OP_FAILED, "op=%s",
+ op, "gfid=%s", uuid_utoa(gfid), NULL);
+ }
+}
+
+void
+br_log_object_path(xlator_t *this, char *op, const char *path, int32_t op_errno)
+{
+ int softerror = br_object_sign_softerror(op_errno);
+ if (softerror) {
+ gf_msg_debug(this->name, 0,
+ "%s() failed on object %s "
+ "[reason: %s]",
+ op, path, strerror(op_errno));
+ } else {
+ gf_smsg(this->name, GF_LOG_ERROR, op_errno, BRB_MSG_OP_FAILED, "op=%s",
+ op, "path=%s", path, NULL);
+ }
+}
+
+static void
+br_trigger_sign(xlator_t *this, br_child_t *child, inode_t *linked_inode,
+ loc_t *loc, gf_boolean_t need_reopen)
+{
+ fd_t *fd = NULL;
+ int32_t ret = -1;
+ uint32_t val = 0;
+ dict_t *dict = NULL;
+ pid_t pid = GF_CLIENT_PID_BITD;
+
+ syncopctx_setfspid(&pid);
+
+ val = (need_reopen == _gf_true) ? BR_OBJECT_REOPEN : BR_OBJECT_RESIGN;
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
+ ret = dict_set_uint32(dict, BR_REOPEN_SIGN_HINT_KEY, val);
+ if (ret)
+ goto cleanup_dict;
+
+ ret = -1;
+ fd = fd_create(linked_inode, 0);
+ if (!fd) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_FD_CREATE_FAILED,
+ "gfid=%s", uuid_utoa(linked_inode->gfid), NULL);
+ goto cleanup_dict;
+ }
+
+ ret = syncop_open(child->xl, loc, O_RDWR, fd, NULL, NULL);
+ if (ret) {
+ br_log_object(this, "open", linked_inode->gfid, -ret);
+ goto unref_fd;
+ }
+
+ fd_bind(fd);
+
+ ret = syncop_fsetxattr(child->xl, fd, dict, 0, NULL, NULL);
+ if (ret)
+ br_log_object(this, "fsetxattr", linked_inode->gfid, -ret);
+
+ /* passthough: fd_unref() */
+
+unref_fd:
+ fd_unref(fd);
+cleanup_dict:
+ dict_unref(dict);
+out:
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRB_MSG_TRIGGER_SIGN_FAILED,
+ "gfid=%s", uuid_utoa(linked_inode->gfid), "reopen-hint-val=%d",
+ val, NULL);
+ }
+}
+
+static void
+br_object_resign(xlator_t *this, br_object_t *object, inode_t *linked_inode)
+{
+ loc_t loc = {
+ 0,
+ };
+
+ loc.inode = inode_ref(linked_inode);
+ gf_uuid_copy(loc.gfid, linked_inode->gfid);
+
+ br_trigger_sign(this, object->child, linked_inode, &loc, _gf_false);
+
+ loc_wipe(&loc);
+}
+
+/**
+ * Sign a given object. This routine runs full throttle. There needs to be
+ * some form of priority scheduling and/or read burstness to avoid starving
+ * (or kicking) client I/O's.
+ */
+static int32_t
+br_sign_object(br_object_t *object)
+{
+ int32_t ret = -1;
+ inode_t *linked_inode = NULL;
+ xlator_t *this = NULL;
+ fd_t *fd = NULL;
+ struct iatt iatt = {
+ 0,
+ };
+ pid_t pid = GF_CLIENT_PID_BITD;
+ br_sign_state_t sign_info = BR_SIGN_NORMAL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", object, out);
+
+ this = object->this;
+
+ /**
+ * FIXME: This is required as signing an object is restricted to
+ * clients with special frame->root->pid. Change the way client
+ * pid is set.
+ */
+ syncopctx_setfspid(&pid);
+
+ ret = br_object_lookup(this, object, &iatt, &linked_inode);
+ if (ret) {
+ br_log_object(this, "lookup", object->gfid, -ret);
+ goto out;
+ }
+
+ /**
+ * For fd's that have notified for reopening, we send an explicit
+ * open() followed by a dummy write() call. This triggers the
+ * actual signing of the object.
+ */
+ sign_info = ntohl(object->sign_info);
+ if (sign_info == BR_SIGN_REOPEN_WAIT) {
+ br_object_resign(this, object, linked_inode);
+ goto unref_inode;
+ }
+
+ ret = br_object_open(this, object, linked_inode, &fd);
+ if (!fd) {
+ br_log_object(this, "open", object->gfid, -ret);
+ goto unref_inode;
+ }
+
+ /**
+ * we have an open file descriptor on the object. from here on,
+ * do not be generous to file operation errors.
+ */
+ gf_msg_debug(this->name, 0, "Signing object [%s]",
+ uuid_utoa(linked_inode->gfid));
+
+ ret = br_object_read_sign(linked_inode, fd, object, &iatt);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_READ_AND_SIGN_FAILED,
+ "gfid=%s", uuid_utoa(linked_inode->gfid), NULL);
+ goto unref_fd;
+ }
+
+ ret = 0;
+
+unref_fd:
+ fd_unref(fd);
+unref_inode:
+ inode_unref(linked_inode);
+out:
+ return ret;
+}
+
+static br_object_t *
+__br_pick_object(br_private_t *priv)
+{
+ br_object_t *object = NULL;
+
+ while (list_empty(&priv->obj_queue->objects)) {
+ pthread_cond_wait(&priv->object_cond, &priv->lock);
+ }
+
+ object = list_first_entry(&priv->obj_queue->objects, br_object_t, list);
+ list_del_init(&object->list);
+
+ return object;
+}
+
+/**
+ * This is the place where the signing of the objects is triggered.
+ */
+void *
+br_process_object(void *arg)
+{
+ xlator_t *this = NULL;
+ br_object_t *object = NULL;
+ br_private_t *priv = NULL;
+ int32_t ret = -1;
+
+ this = arg;
+ priv = this->private;
+
+ THIS = this;
+
+ for (;;) {
+ pthread_mutex_lock(&priv->lock);
+ {
+ object = __br_pick_object(priv);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ ret = br_sign_object(object);
+ if (ret && !br_object_sign_softerror(-ret))
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_SET_SIGN_FAILED,
+ "gfid=%s", uuid_utoa(object->gfid), NULL);
+ GF_FREE(object);
+ }
+
+ return NULL;
+}
+
+/**
+ * This function gets kicked in once the object is expired from the
+ * timer wheel. This actually adds the object received via notification
+ * from the changelog to the queue from where the objects gets picked
+ * up for signing.
+ *
+ * This routine can be made lightweight by introducing an alternate
+ * timer-wheel API that dispatches _all_ expired objects in one-shot
+ * rather than an object at-a-time. This routine can then just simply
+ * be a call to list_splice_tail().
+ *
+ * NOTE: use call_time to instrument signing time in br_sign_object().
+ */
+void
+br_add_object_to_queue(struct gf_tw_timer_list *timer, void *data,
+ unsigned long call_time)
+{
+ br_object_t *object = NULL;
+ xlator_t *this = NULL;
+ br_private_t *priv = NULL;
+
+ object = data;
+ this = object->this;
+ priv = this->private;
+
+ THIS = this;
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ list_add_tail(&object->list, &priv->obj_queue->objects);
+ pthread_cond_broadcast(&priv->object_cond);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ if (timer)
+ mem_put(timer);
+ return;
+}
+
+static br_object_t *
+br_initialize_object(xlator_t *this, br_child_t *child, changelog_event_t *ev)
+{
+ br_object_t *object = NULL;
+
+ object = GF_CALLOC(1, sizeof(*object), gf_br_mt_br_object_t);
+ if (!object)
+ goto out;
+ INIT_LIST_HEAD(&object->list);
+
+ object->this = this;
+ object->child = child;
+ gf_uuid_copy(object->gfid, ev->u.releasebr.gfid);
+
+ /* NOTE: it's BE, but no worry */
+ object->signedversion = ev->u.releasebr.version;
+ object->sign_info = ev->u.releasebr.sign_info;
+
+out:
+ return object;
+}
+
+static struct gf_tw_timer_list *
+br_initialize_timer(xlator_t *this, br_object_t *object, br_child_t *child,
+ changelog_event_t *ev)
+{
+ br_private_t *priv = NULL;
+ struct gf_tw_timer_list *timer = NULL;
+
+ priv = this->private;
+
+ timer = mem_get0(child->timer_pool);
+ if (!timer)
+ goto out;
+ INIT_LIST_HEAD(&timer->entry);
+
+ timer->expires = priv->expiry_time;
+ if (!timer->expires)
+ timer->expires = 1;
+
+ timer->data = object;
+ timer->function = br_add_object_to_queue;
+ gf_tw_add_timer(priv->timer_wheel, timer);
+
+out:
+ return timer;
+}
+
+static int32_t
+br_schedule_object_reopen(xlator_t *this, br_object_t *object,
+ br_child_t *child, changelog_event_t *ev)
+{
+ struct gf_tw_timer_list *timer = NULL;
+
+ timer = br_initialize_timer(this, object, child, ev);
+ if (!timer)
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_SET_TIMER_FAILED,
+ "gfid=%s", uuid_utoa(object->gfid), NULL);
+ return timer ? 0 : -1;
+}
+
+static int32_t
+br_object_quicksign(xlator_t *this, br_object_t *object)
+{
+ br_add_object_to_queue(NULL, object, 0ULL);
+ return 0;
+}
+
+/**
+ * This callback function registered with the changelog is executed
+ * whenever a notification from the changelog is received. This should
+ * add the object (or the gfid) on which the notification has come to
+ * the timer-wheel with some expiry time.
+ *
+ * TODO: use mem-pool for allocations and maybe allocate timer and
+ * object as a single alloc and bifurcate their respective pointers.
+ */
+void
+br_brick_callback(void *xl, char *brick, void *data, changelog_event_t *ev)
+{
+ int32_t ret = 0;
+ uuid_t gfid = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ br_object_t *object = NULL;
+ br_child_t *child = NULL;
+ br_sign_state_t sign_info = BR_SIGN_INVALID;
+
+ this = xl;
+
+ GF_VALIDATE_OR_GOTO(this->name, ev, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ GF_ASSERT(ev->ev_type == CHANGELOG_OP_TYPE_BR_RELEASE);
+ GF_ASSERT(!gf_uuid_is_null(ev->u.releasebr.gfid));
+
+ gf_uuid_copy(gfid, ev->u.releasebr.gfid);
+
+ gf_msg_debug(this->name, 0, "RELEASE EVENT [GFID %s]", uuid_utoa(gfid));
+
+ child = br_get_child_from_brick_path(this, brick);
+ if (!child) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_GET_SUBVOL_FAILED,
+ "brick=%s", brick, NULL);
+ goto out;
+ }
+
+ object = br_initialize_object(this, child, ev);
+ if (!object) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_NO_MEMORY,
+ "object-gfid=%s", uuid_utoa(gfid), NULL);
+ goto out;
+ }
+
+ /* sanity check */
+ sign_info = ntohl(object->sign_info);
+ GF_ASSERT(sign_info != BR_SIGN_NORMAL);
+
+ if (sign_info == BR_SIGN_REOPEN_WAIT)
+ ret = br_schedule_object_reopen(this, object, child, ev);
+ else
+ ret = br_object_quicksign(this, object);
+
+ if (ret)
+ goto free_object;
+
+ gf_msg_debug(this->name, 0, "->callback: brick [%s], type [%d]\n", brick,
+ ev->ev_type);
+ return;
+
+free_object:
+ GF_FREE(object);
+out:
+ return;
+}
+
+void
+br_fill_brick_spec(struct gf_brick_spec *brick, char *path)
+{
+ brick->brick_path = gf_strdup(path);
+ brick->filter = CHANGELOG_OP_TYPE_BR_RELEASE;
+
+ brick->init = br_brick_init;
+ brick->fini = br_brick_fini;
+ brick->callback = br_brick_callback;
+ brick->connected = NULL;
+ brick->disconnected = NULL;
+}
+
+static gf_boolean_t
+br_check_object_need_sign(xlator_t *this, dict_t *xattr, br_child_t *child)
+{
+ int32_t ret = -1;
+ gf_boolean_t need_sign = _gf_false;
+ br_isignature_out_t *sign = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, xattr, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+
+ ret = dict_get_ptr(xattr, GLUSTERFS_GET_OBJECT_SIGNATURE, (void **)&sign);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_GET_SIGN_FAILED,
+ "object-info", NULL);
+ goto out;
+ }
+
+ /* Object has been opened and hence dirty. Do not sign it */
+ if (sign->stale)
+ need_sign = _gf_true;
+
+out:
+ return need_sign;
+}
+
+int32_t
+br_prepare_loc(xlator_t *this, br_child_t *child, loc_t *parent,
+ gf_dirent_t *entry, loc_t *loc)
+{
+ int32_t ret = -1;
+ inode_t *inode = NULL;
+
+ inode = inode_grep(child->table, parent->inode, entry->d_name);
+ if (!inode)
+ loc->inode = inode_new(child->table);
+ else {
+ loc->inode = inode;
+ if (loc->inode->ia_type != IA_IFREG) {
+ gf_msg_debug(this->name, 0,
+ "%s is not a regular "
+ "file",
+ entry->d_name);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ loc->parent = inode_ref(parent->inode);
+ gf_uuid_copy(loc->pargfid, parent->inode->gfid);
+
+ ret = inode_path(parent->inode, entry->d_name, (char **)&loc->path);
+ if (ret < 0 || !loc->path) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_PATH_FAILED,
+ "inode_path=%s", entry->d_name, "parent-gfid=%s",
+ uuid_utoa(parent->inode->gfid), NULL);
+ goto out;
+ }
+
+ loc->name = strrchr(loc->path, '/');
+ if (loc->name)
+ loc->name++;
+
+ ret = 1;
+
+out:
+ return ret;
+}
+
+/**
+ * Oneshot crawler
+ * ---------------
+ * This is a catchup mechanism. Objects that remained unsigned from the
+ * last run for whatever reason (node crashes, reboots, etc..) become
+ * candidates for signing. This allows the signature to "catch up" with
+ * the current state of the object. Triggering signing is easy: perform
+ * an open() followed by a close() thereby resulting in call boomerang.
+ * (though not back to itself :))
+ */
+int
+bitd_oneshot_crawl(xlator_t *subvol, gf_dirent_t *entry, loc_t *parent,
+ void *data)
+{
+ int op_errno = 0;
+ br_child_t *child = NULL;
+ xlator_t *this = NULL;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt iatt = {
+ 0,
+ };
+ struct iatt parent_buf = {
+ 0,
+ };
+ dict_t *xattr = NULL;
+ int32_t ret = -1;
+ inode_t *linked_inode = NULL;
+ gf_boolean_t need_signing = _gf_false;
+ gf_boolean_t need_reopen = _gf_true;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", subvol, out);
+ GF_VALIDATE_OR_GOTO("bit-rot", data, out);
+
+ child = data;
+ this = child->this;
+
+ ret = br_prepare_loc(this, child, parent, entry, &loc);
+ if (!ret)
+ goto out;
+
+ ret = syncop_lookup(child->xl, &loc, &iatt, &parent_buf, NULL, NULL);
+ if (ret) {
+ br_log_object_path(this, "lookup", loc.path, -ret);
+ goto out;
+ }
+
+ linked_inode = inode_link(loc.inode, parent->inode, loc.name, &iatt);
+ if (linked_inode)
+ inode_lookup(linked_inode);
+
+ if (iatt.ia_type != IA_IFREG) {
+ gf_msg_debug(this->name, 0,
+ "%s is not a regular file, "
+ "skipping..",
+ entry->d_name);
+ ret = 0;
+ goto unref_inode;
+ }
+
+ /**
+ * As of now, 2 cases are possible and handled.
+ * 1) GlusterFS is upgraded from a previous version which does not
+ * have any idea about bit-rot and have data in the filesystem.
+ * In this case syncop_getxattr fails with ENODATA and the object
+ * is signed. (In real, when crawler sends lookup, bit-rot-stub
+ * creates the xattrs before returning lookup reply)
+ * 2) Bit-rot was not enabled or BitD was does for some reasons, during
+ * which some files were created, but since BitD was down, were not
+ * signed.
+ * If the file was just created and was being written some data when
+ * the down BitD came up, then bit-rot stub should be intelligent to
+ * identify this case (by comparing the ongoing version or by checking
+ * if there are any fds present for that inode) and handle properly.
+ */
+
+ if (bitd_is_bad_file(this, child, &loc, NULL)) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRB_MSG_SKIP_OBJECT, "path=%s",
+ loc.path, NULL);
+ goto unref_inode;
+ }
+
+ ret = syncop_getxattr(child->xl, &loc, &xattr,
+ GLUSTERFS_GET_OBJECT_SIGNATURE, NULL, NULL);
+ if (ret < 0) {
+ op_errno = -ret;
+ br_log_object(this, "getxattr", linked_inode->gfid, op_errno);
+
+ /**
+ * No need to sign the zero byte objects as the signing
+ * happens upon first modification of the object.
+ */
+ if (op_errno == ENODATA && (iatt.ia_size != 0))
+ need_signing = _gf_true;
+ if (op_errno == EINVAL)
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ BRB_MSG_PARTIAL_VERSION_PRESENCE, "gfid=%s",
+ uuid_utoa(linked_inode->gfid), NULL);
+ } else {
+ need_signing = br_check_object_need_sign(this, xattr, child);
+
+ /*
+ * If we are here means, bitrot daemon has started. Is it just
+ * a simple restart of the daemon or is it started because the
+ * feature is enabled is something hard to determine. Hence,
+ * if need_signing is false (because bit-rot version and signature
+ * are present), then still go ahead and sign it.
+ */
+ if (!need_signing) {
+ need_signing = _gf_true;
+ need_reopen = _gf_true;
+ }
+ }
+
+ if (!need_signing)
+ goto unref_dict;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_TRIGGER_SIGN, "path=%s",
+ loc.path, "gfid=%s", uuid_utoa(linked_inode->gfid), "Brick-path=%s",
+ child->brick_path, NULL);
+ br_trigger_sign(this, child, linked_inode, &loc, need_reopen);
+
+ ret = 0;
+
+unref_dict:
+ if (xattr)
+ dict_unref(xattr);
+unref_inode:
+ inode_unref(linked_inode);
+out:
+ loc_wipe(&loc);
+
+ return ret;
+}
+
+#define BR_CRAWL_THROTTLE_COUNT 50
+#define BR_CRAWL_THROTTLE_ZZZ 5
+
+void *
+br_oneshot_signer(void *arg)
+{
+ loc_t loc = {
+ 0,
+ };
+ xlator_t *this = NULL;
+ br_child_t *child = NULL;
+
+ child = arg;
+ this = child->this;
+
+ THIS = this;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_CRAWLING_START, "brick-path=%s",
+ child->brick_path, NULL);
+
+ loc.inode = child->table->root;
+ (void)syncop_ftw_throttle(child->xl, &loc, GF_CLIENT_PID_BITD, child,
+ bitd_oneshot_crawl, BR_CRAWL_THROTTLE_COUNT,
+ BR_CRAWL_THROTTLE_ZZZ);
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_CRAWLING_FINISH,
+ "brick-path=%s", child->brick_path, NULL);
+
+ return NULL;
+}
+
+static void
+br_set_child_state(br_child_t *child, br_child_state_t state)
+{
+ pthread_mutex_lock(&child->lock);
+ {
+ _br_set_child_state(child, state);
+ }
+ pthread_mutex_unlock(&child->lock);
+}
+
+/**
+ * At this point a thread is spawned to crawl the filesystem (in
+ * tortoise pace) to sign objects that were not signed in previous run(s).
+ * Such objects are identified by examining it's dirtyness and timestamp.
+ *
+ * pick object:
+ * signature_is_stale() && (object_timestamp() <= stub_init_time())
+ *
+ * Also, we register to the changelog library to subscribe for event
+ * notifications.
+ */
+static int32_t
+br_enact_signer(xlator_t *this, br_child_t *child, br_stub_init_t *stub)
+{
+ int32_t ret = 0;
+ br_private_t *priv = NULL;
+ struct gf_brick_spec *brick = NULL;
+
+ priv = this->private;
+
+ brick = GF_CALLOC(1, sizeof(struct gf_brick_spec),
+ gf_common_mt_gf_brick_spec_t);
+ if (!brick)
+ goto error_return;
+
+ br_fill_brick_spec(brick, stub->export);
+ ret = gf_changelog_register_generic(brick, 1, 1,
+ this->ctx->cmd_args.log_file, -1, this);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, BRB_MSG_REGISTER_FAILED, NULL);
+ goto dealloc;
+ }
+
+ child->threadrunning = 0;
+ ret = gf_thread_create(&child->thread, NULL, br_oneshot_signer, child,
+ "brosign");
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRB_MSG_SPAWN_FAILED,
+ "FS-crawler-thread", NULL);
+ else
+ child->threadrunning = 1;
+
+ /* it's OK to continue, "old" objects would be signed when modified */
+ list_add_tail(&child->list, &priv->signing);
+ return 0;
+
+dealloc:
+ GF_FREE(brick);
+error_return:
+ return -1;
+}
+
+static int32_t
+br_launch_scrubber(xlator_t *this, br_child_t *child, struct br_scanfs *fsscan,
+ struct br_scrubber *fsscrub)
+{
+ int32_t ret = -1;
+ br_private_t *priv = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+
+ scrub_monitor = &priv->scrub_monitor;
+ ret = gf_thread_create(&child->thread, NULL, br_fsscanner, child,
+ "brfsscan");
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ALERT, 0, BRB_MSG_SPAWN_FAILED,
+ "bitrot-scrubber-daemon Brick-path=%s", child->brick_path,
+ NULL);
+ goto error_return;
+ }
+
+ /* Signal monitor to kick off state machine*/
+ pthread_mutex_lock(&scrub_monitor->mutex);
+ {
+ if (!scrub_monitor->inited)
+ pthread_cond_signal(&scrub_monitor->cond);
+ scrub_monitor->inited = _gf_true;
+ }
+ pthread_mutex_unlock(&scrub_monitor->mutex);
+
+ /**
+ * Everything has been setup.. add this subvolume to scrubbers
+ * list.
+ */
+ pthread_mutex_lock(&fsscrub->mutex);
+ {
+ list_add_tail(&child->list, &fsscrub->scrublist);
+ pthread_cond_broadcast(&fsscrub->cond);
+ }
+ pthread_mutex_unlock(&fsscrub->mutex);
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int32_t
+br_enact_scrubber(xlator_t *this, br_child_t *child)
+{
+ int32_t ret = 0;
+ br_private_t *priv = NULL;
+ struct br_scanfs *fsscan = NULL;
+ struct br_scrubber *fsscrub = NULL;
+
+ priv = this->private;
+
+ fsscan = &child->fsscan;
+ fsscrub = &priv->fsscrub;
+
+ /**
+ * if this child already witnesses a successful connection earlier
+ * there's no need to initialize mutexes, condvars, etc..
+ */
+ if (_br_child_witnessed_connection(child))
+ return br_launch_scrubber(this, child, fsscan, fsscrub);
+
+ LOCK_INIT(&fsscan->entrylock);
+ pthread_mutex_init(&fsscan->waitlock, NULL);
+ pthread_cond_init(&fsscan->waitcond, NULL);
+
+ fsscan->entries = 0;
+ INIT_LIST_HEAD(&fsscan->queued);
+ INIT_LIST_HEAD(&fsscan->ready);
+
+ ret = br_launch_scrubber(this, child, fsscan, fsscrub);
+ if (ret)
+ goto error_return;
+
+ return 0;
+
+error_return:
+ LOCK_DESTROY(&fsscan->entrylock);
+ pthread_mutex_destroy(&fsscan->waitlock);
+ pthread_cond_destroy(&fsscan->waitcond);
+
+ return -1;
+}
+
+static int32_t
+br_child_enaction(xlator_t *this, br_child_t *child, br_stub_init_t *stub)
+{
+ int32_t ret = -1;
+ br_private_t *priv = this->private;
+
+ pthread_mutex_lock(&child->lock);
+ {
+ if (priv->iamscrubber)
+ ret = br_enact_scrubber(this, child);
+ else
+ ret = br_enact_signer(this, child, stub);
+
+ if (!ret) {
+ child->witnessed = 1;
+ _br_set_child_state(child, BR_CHILD_STATE_CONNECTED);
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_CONNECTED_TO_BRICK,
+ "brick-path=%s", child->brick_path, NULL);
+ }
+ }
+ pthread_mutex_unlock(&child->lock);
+
+ return ret;
+}
+
+/**
+ * This routine fetches various attributes associated with a child which
+ * is basically a subvolume. Attributes include brick path and the stub
+ * birth time. This is done by performing a lookup on the root followed
+ * by getxattr() on a virtual key. Depending on the configuration, the
+ * process either acts as a signer or a scrubber.
+ */
+int32_t
+br_brick_connect(xlator_t *this, br_child_t *child)
+{
+ int32_t ret = -1;
+ loc_t loc = {
+ 0,
+ };
+ struct iatt buf = {
+ 0,
+ };
+ struct iatt parent = {
+ 0,
+ };
+ br_stub_init_t *stub = NULL;
+ dict_t *xattr = NULL;
+ int op_errno = 0;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, child, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+
+ br_child_set_scrub_state(child, _gf_false);
+ br_set_child_state(child, BR_CHILD_STATE_INITIALIZING);
+
+ loc.inode = inode_ref(child->table->root);
+ gf_uuid_copy(loc.gfid, loc.inode->gfid);
+ loc.path = gf_strdup("/");
+
+ ret = syncop_lookup(child->xl, &loc, &buf, &parent, NULL, NULL);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, op_errno, BRB_MSG_LOOKUP_FAILED,
+ NULL);
+ goto wipeloc;
+ }
+
+ ret = syncop_getxattr(child->xl, &loc, &xattr,
+ GLUSTERFS_GET_BR_STUB_INIT_TIME, NULL, NULL);
+ if (ret) {
+ op_errno = -ret;
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, op_errno, BRB_MSG_GET_INFO_FAILED,
+ NULL);
+ goto wipeloc;
+ }
+
+ ret = dict_get_ptr(xattr, GLUSTERFS_GET_BR_STUB_INIT_TIME, (void **)&stub);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_GET_INFO_FAILED, NULL);
+ goto free_dict;
+ }
+
+ memcpy(child->brick_path, stub->export, strlen(stub->export) + 1);
+ child->tv.tv_sec = ntohl(stub->timebuf[0]);
+ child->tv.tv_usec = ntohl(stub->timebuf[1]);
+
+ ret = br_child_enaction(this, child, stub);
+
+free_dict:
+ dict_unref(xattr);
+wipeloc:
+ loc_wipe(&loc);
+out:
+ if (ret)
+ br_set_child_state(child, BR_CHILD_STATE_CONNFAILED);
+ return ret;
+}
+
+/* TODO: cleanup signer */
+static int32_t
+br_cleanup_signer(xlator_t *this, br_child_t *child)
+{
+ return 0;
+}
+
+static int32_t
+br_cleanup_scrubber(xlator_t *this, br_child_t *child)
+{
+ int32_t ret = 0;
+ br_private_t *priv = NULL;
+ struct br_scrubber *fsscrub = NULL;
+ struct br_monitor *scrub_monitor = NULL;
+
+ priv = this->private;
+ fsscrub = &priv->fsscrub;
+ scrub_monitor = &priv->scrub_monitor;
+
+ if (_br_is_child_scrub_active(child)) {
+ scrub_monitor->active_child_count--;
+ br_child_set_scrub_state(child, _gf_false);
+ }
+
+ /**
+ * 0x0: child (brick) goes out of rotation
+ *
+ * This is fully safe w.r.t. entries for this child being actively
+ * scrubbed. Each of the scrubber thread(s) would finish scrubbing
+ * the entry (probably failing due to disconnection) and either
+ * putting the entry back into the queue or continuing further.
+ * Either way, pending entries for this child's queue need not be
+ * drained; entries just sit there in the queued/ready list to be
+ * consumed later upon re-connection.
+ */
+ pthread_mutex_lock(&fsscrub->mutex);
+ {
+ list_del_init(&child->list);
+ }
+ pthread_mutex_unlock(&fsscrub->mutex);
+
+ /**
+ * 0x1: cleanup scanner thread
+ *
+ * The pending timer needs to be removed _after_ cleaning up the
+ * filesystem scanner (scheduling the next scrub time is not a
+ * cancellation point).
+ */
+ ret = gf_thread_cleanup_xint(child->thread);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUB_THREAD_CLEANUP, NULL);
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_SCRUBBER_CLEANED,
+ "brick-path=%s", child->brick_path, NULL);
+
+ return 0;
+}
+
+/**
+ * OK.. this child has made it's mind to go down the drain. So,
+ * let's clean up what it touched. (NOTE: there's no need to clean
+ * the inode table, it's just reused taking care of stale inodes)
+ */
+int32_t
+br_brick_disconnect(xlator_t *this, br_child_t *child)
+{
+ int32_t ret = 0;
+ struct br_monitor *scrub_monitor = NULL;
+ br_private_t *priv = this->private;
+
+ scrub_monitor = &priv->scrub_monitor;
+
+ /* Lock order should be wakelock and then child lock to
+ * dead locks.
+ */
+ pthread_mutex_lock(&scrub_monitor->wakelock);
+ {
+ pthread_mutex_lock(&child->lock);
+ {
+ if (!_br_is_child_connected(child))
+ goto unblock;
+
+ /* child is on death row.. */
+ _br_set_child_state(child, BR_CHILD_STATE_DISCONNECTED);
+
+ if (priv->iamscrubber)
+ ret = br_cleanup_scrubber(this, child);
+ else
+ ret = br_cleanup_signer(this, child);
+ }
+ unblock:
+ pthread_mutex_unlock(&child->lock);
+ }
+ pthread_mutex_unlock(&scrub_monitor->wakelock);
+
+ return ret;
+}
+
+/**
+ * This function is executed in a separate thread. The thread gets the
+ * brick from where CHILD_UP has received from the queue and gets the
+ * information regarding that brick (such as brick path).
+ */
+void *
+br_handle_events(void *arg)
+{
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+ br_private_t *priv = NULL;
+ br_child_t *child = NULL;
+ struct br_child_event *childev = NULL;
+
+ this = arg;
+ priv = this->private;
+
+ /*
+ * Since, this is the topmost xlator, THIS has to be set by bit-rot
+ * xlator itself (STACK_WIND won't help in this case). Also it has
+ * to be done for each thread that gets spawned. Otherwise, a new
+ * thread will get global_xlator's pointer when it does "THIS".
+ */
+ THIS = this;
+
+ while (1) {
+ pthread_mutex_lock(&priv->lock);
+ {
+ while (list_empty(&priv->bricks))
+ pthread_cond_wait(&priv->cond, &priv->lock);
+
+ childev = list_first_entry(&priv->bricks, struct br_child_event,
+ list);
+ list_del_init(&childev->list);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ child = childev->child;
+ ret = childev->call(this, child);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_SUBVOL_CONNECT_FAILED,
+ "name=%s", child->xl->name, NULL);
+ GF_FREE(childev);
+ }
+
+ return NULL;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int32_t ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_br_stub_mt_end + 1);
+
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRB_MSG_MEM_ACNT_FAILED, NULL);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void
+_br_qchild_event(xlator_t *this, br_child_t *child, br_child_handler *call)
+{
+ br_private_t *priv = NULL;
+ struct br_child_event *childev = NULL;
+
+ priv = this->private;
+
+ childev = GF_CALLOC(1, sizeof(*childev), gf_br_mt_br_child_event_t);
+ if (!childev) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_EVENT_UNHANDLED,
+ "Brick-name=%s", child->xl->name, NULL);
+ return;
+ }
+
+ INIT_LIST_HEAD(&childev->list);
+ childev->this = this;
+ childev->child = child;
+ childev->call = call;
+
+ list_add_tail(&childev->list, &priv->bricks);
+}
+
+int
+br_scrubber_status_get(xlator_t *this, dict_t **dict)
+{
+ int ret = -1;
+ br_private_t *priv = NULL;
+ struct br_scrub_stats *scrub_stats = NULL;
+
+ priv = this->private;
+
+ GF_VALIDATE_OR_GOTO("bit-rot", priv, out);
+
+ scrub_stats = &priv->scrub_stat;
+
+ ret = br_get_bad_objects_list(this, dict);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed to collect corrupt "
+ "files");
+ }
+
+ ret = dict_set_int8(*dict, "scrub-running", scrub_stats->scrub_running);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed setting scrub_running "
+ "entry to the dictionary");
+ }
+
+ ret = dict_set_uint64(*dict, "scrubbed-files", scrub_stats->scrubbed_files);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed to setting scrubbed file "
+ "entry to the dictionary");
+ }
+
+ ret = dict_set_uint64(*dict, "unsigned-files", scrub_stats->unsigned_files);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set unsigned file count"
+ " entry to the dictionary");
+ }
+
+ ret = dict_set_uint64(*dict, "scrub-duration", scrub_stats->scrub_duration);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set scrub duration"
+ " entry to the dictionary");
+ }
+
+ ret = dict_set_dynstr_with_alloc(*dict, "last-scrub-time",
+ scrub_stats->last_scrub_time);
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set "
+ "last scrub time value");
+ }
+
+out:
+ return ret;
+}
+
+int
+notify(xlator_t *this, int32_t event, void *data, ...)
+{
+ int idx = -1;
+ int ret = -1;
+ xlator_t *subvol = NULL;
+ br_child_t *child = NULL;
+ br_private_t *priv = NULL;
+ dict_t *output = NULL;
+ va_list ap;
+ struct br_monitor *scrub_monitor = NULL;
+
+ subvol = (xlator_t *)data;
+ priv = this->private;
+ scrub_monitor = &priv->scrub_monitor;
+
+ gf_msg_trace(this->name, 0, "Notification received: %d", event);
+
+ idx = br_find_child_index(this, subvol);
+
+ switch (event) {
+ case GF_EVENT_CHILD_UP:
+ if (idx < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_INVALID_SUBVOL,
+ "event=%d", event, NULL);
+ goto out;
+ }
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ child = &priv->children[idx];
+ if (child->child_up == 1)
+ goto unblock_0;
+ priv->up_children++;
+
+ child->child_up = 1;
+ child->xl = subvol;
+ if (!child->table)
+ child->table = inode_table_new(4096, subvol);
+
+ _br_qchild_event(this, child, br_brick_connect);
+ pthread_cond_signal(&priv->cond);
+ }
+ unblock_0:
+ pthread_mutex_unlock(&priv->lock);
+
+ if (priv->up_children == priv->child_count)
+ default_notify(this, event, data);
+ break;
+
+ case GF_EVENT_CHILD_DOWN:
+ if (idx < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_INVALID_SUBVOL,
+ "event=%d", event, NULL);
+ goto out;
+ }
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ child = &priv->children[idx];
+ if (child->child_up == 0)
+ goto unblock_1;
+
+ child->child_up = 0;
+ priv->up_children--;
+
+ _br_qchild_event(this, child, br_brick_disconnect);
+ pthread_cond_signal(&priv->cond);
+ }
+ unblock_1:
+ pthread_mutex_unlock(&priv->lock);
+
+ if (priv->up_children == 0)
+ default_notify(this, event, data);
+ break;
+
+ case GF_EVENT_SCRUB_STATUS:
+ gf_msg_debug(this->name, GF_LOG_INFO,
+ "BitRot scrub status "
+ "called");
+ va_start(ap, data);
+ output = va_arg(ap, dict_t *);
+ va_end(ap);
+
+ ret = br_scrubber_status_get(this, &output);
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ break;
+
+ case GF_EVENT_SCRUB_ONDEMAND:
+ gf_log(this->name, GF_LOG_INFO,
+ "BitRot scrub ondemand "
+ "called");
+
+ if (scrub_monitor->state != BR_SCRUB_STATE_PENDING) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRB_MSG_RESCHEDULE_SCRUBBER_FAILED, "Current-state=%d",
+ scrub_monitor->state, NULL);
+ return -2;
+ }
+
+ /* Needs synchronization with reconfigure thread */
+ pthread_mutex_lock(&priv->lock);
+ {
+ ret = br_scrub_state_machine(this, _gf_true);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRB_MSG_COULD_NOT_SCHEDULE_SCRUB, NULL);
+ }
+ gf_msg_debug(this->name, 0, "returning %d", ret);
+ break;
+ default:
+ default_notify(this, event, data);
+ }
+
+out:
+ return 0;
+}
+
+static void
+br_fini_signer(xlator_t *this, br_private_t *priv)
+{
+ int i = 0;
+
+ if (priv == NULL)
+ return;
+
+ for (; i < priv->signer_th_count; i++) {
+ (void)gf_thread_cleanup_xint(priv->obj_queue->workers[i]);
+ }
+ GF_FREE(priv->obj_queue->workers);
+
+ pthread_cond_destroy(&priv->object_cond);
+}
+
+/**
+ * Initialize signer specific structures, spawn worker threads.
+ */
+
+static int32_t
+br_init_signer(xlator_t *this, br_private_t *priv)
+{
+ int i = 0;
+ int32_t ret = -1;
+
+ /* initialize gfchangelog xlator context */
+ ret = gf_changelog_init(this);
+ if (ret)
+ goto out;
+
+ pthread_cond_init(&priv->object_cond, NULL);
+
+ priv->obj_queue = GF_CALLOC(1, sizeof(*priv->obj_queue),
+ gf_br_mt_br_ob_n_wk_t);
+ if (!priv->obj_queue)
+ goto cleanup_cond;
+ INIT_LIST_HEAD(&priv->obj_queue->objects);
+
+ priv->obj_queue->workers = GF_CALLOC(
+ priv->signer_th_count, sizeof(pthread_t), gf_br_mt_br_worker_t);
+ if (!priv->obj_queue->workers)
+ goto cleanup_obj_queue;
+
+ for (i = 0; i < priv->signer_th_count; i++) {
+ ret = gf_thread_create(&priv->obj_queue->workers[i], NULL,
+ br_process_object, this, "brpobj");
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret,
+ BRB_MSG_THREAD_CREATION_FAILED, NULL);
+ ret = -1;
+ goto cleanup_threads;
+ }
+ }
+
+ return 0;
+
+cleanup_threads:
+ for (i--; i >= 0; i--) {
+ (void)gf_thread_cleanup_xint(priv->obj_queue->workers[i]);
+ }
+ GF_FREE(priv->obj_queue->workers);
+
+cleanup_obj_queue:
+ GF_FREE(priv->obj_queue);
+
+cleanup_cond:
+ /* that's explicit */
+ pthread_cond_destroy(&priv->object_cond);
+out:
+ return -1;
+}
+
+/**
+ * For signer, only rate limit CPU usage (during hash calculation) when
+ * compiled with -DBR_RATE_LIMIT_SIGNER cflags, else let it run full
+ * throttle.
+ */
+static int32_t
+br_rate_limit_signer(xlator_t *this, int child_count, int numbricks)
+{
+ br_private_t *priv = NULL;
+ tbf_opspec_t spec = {
+ 0,
+ };
+
+ priv = this->private;
+
+ spec.op = TBF_OP_HASH;
+ spec.rate = 0;
+ spec.maxlimit = 0;
+
+ /**
+ * OK. Most implementations of TBF I've come across generate tokens
+ * every second (UML, etc..) and some chose sub-second granularity
+ * (blk-iothrottle cgroups). TBF algorithm itself does not enforce
+ * any logic for choosing generation interval and it seems pretty
+ * logical as one could jack up token count per interval w.r.t.
+ * generation rate.
+ *
+ * Value used here is chosen based on a series of test(s) performed
+ * to balance object signing time and not maxing out on all available
+ * CPU cores. It's obvious to have seconds granularity and jack up
+ * token count per interval, thereby achieving close to similar
+ * results. Let's stick to this as it seems to be working fine for
+ * the set of ops that are throttled.
+ **/
+ spec.token_gen_interval = 600000; /* In usec */
+
+#ifdef BR_RATE_LIMIT_SIGNER
+
+ double contribution = 0;
+ contribution = ((double)1 - ((double)child_count / (double)numbricks));
+ if (contribution == 0)
+ contribution = 1;
+ spec.rate = BR_HASH_CALC_READ_SIZE * contribution;
+ spec.maxlimit = priv->signer_th_count * BR_HASH_CALC_READ_SIZE;
+
+#endif
+
+ if (!spec.rate)
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_RATE_LIMIT_INFO,
+ "FULL THROTTLE", NULL);
+ else
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_RATE_LIMIT_INFO,
+ "tokens/sec-rate=%lu", spec.rate, "maxlimit=%lu", spec.maxlimit,
+ NULL);
+
+ priv->tbf = tbf_init(&spec, 1);
+ return priv->tbf ? 0 : -1;
+}
+
+static int32_t
+br_signer_handle_options(xlator_t *this, br_private_t *priv, dict_t *options)
+{
+ if (options) {
+ GF_OPTION_RECONF("expiry-time", priv->expiry_time, options, uint32,
+ error_return);
+ GF_OPTION_RECONF("signer-threads", priv->signer_th_count, options,
+ uint32, error_return);
+ } else {
+ GF_OPTION_INIT("expiry-time", priv->expiry_time, uint32, error_return);
+ GF_OPTION_INIT("signer-threads", priv->signer_th_count, uint32,
+ error_return);
+ }
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int32_t
+br_signer_init(xlator_t *this, br_private_t *priv)
+{
+ int32_t ret = 0;
+ int numbricks = 0;
+
+ GF_OPTION_INIT("expiry-time", priv->expiry_time, uint32, error_return);
+ GF_OPTION_INIT("brick-count", numbricks, int32, error_return);
+ GF_OPTION_INIT("signer-threads", priv->signer_th_count, uint32,
+ error_return);
+
+ ret = br_rate_limit_signer(this, priv->child_count, numbricks);
+ if (ret)
+ goto error_return;
+
+ ret = br_init_signer(this, priv);
+ if (ret)
+ goto cleanup_tbf;
+
+ return 0;
+
+cleanup_tbf:
+ /* cleanup TBF */
+error_return:
+ return -1;
+}
+
+static void
+br_free_scrubber_monitor(xlator_t *this, br_private_t *priv)
+{
+ struct br_monitor *scrub_monitor = &priv->scrub_monitor;
+
+ if (scrub_monitor->timer) {
+ (void)gf_tw_del_timer(priv->timer_wheel, scrub_monitor->timer);
+
+ GF_FREE(scrub_monitor->timer);
+ scrub_monitor->timer = NULL;
+ }
+
+ (void)gf_thread_cleanup_xint(scrub_monitor->thread);
+
+ /* Clean up cond and mutex variables */
+ pthread_mutex_destroy(&scrub_monitor->mutex);
+ pthread_cond_destroy(&scrub_monitor->cond);
+
+ pthread_mutex_destroy(&scrub_monitor->wakelock);
+ pthread_cond_destroy(&scrub_monitor->wakecond);
+
+ pthread_mutex_destroy(&scrub_monitor->donelock);
+ pthread_cond_destroy(&scrub_monitor->donecond);
+
+ LOCK_DESTROY(&scrub_monitor->lock);
+}
+
+static void
+br_free_children(xlator_t *this, br_private_t *priv, int count)
+{
+ br_child_t *child = NULL;
+
+ for (--count; count >= 0; count--) {
+ child = &priv->children[count];
+ mem_pool_destroy(child->timer_pool);
+ pthread_mutex_destroy(&child->lock);
+ }
+
+ GF_FREE(priv->children);
+ priv->children = NULL;
+}
+
+static int
+br_init_children(xlator_t *this, br_private_t *priv)
+{
+ int i = 0;
+ br_child_t *child = NULL;
+ xlator_list_t *trav = NULL;
+
+ priv->child_count = xlator_subvolume_count(this);
+ priv->children = GF_CALLOC(priv->child_count, sizeof(*priv->children),
+ gf_br_mt_br_child_t);
+ if (!priv->children)
+ goto err;
+
+ trav = this->children;
+ while (trav) {
+ child = &priv->children[i];
+
+ pthread_mutex_init(&child->lock, NULL);
+ child->witnessed = 0;
+
+ br_set_child_state(child, BR_CHILD_STATE_DISCONNECTED);
+
+ child->this = this;
+ child->xl = trav->xlator;
+
+ child->timer_pool = mem_pool_new(struct gf_tw_timer_list, 4096);
+ if (!child->timer_pool) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_MEM_POOL_ALLOC,
+ NULL);
+ errno = ENOMEM;
+ goto freechild;
+ }
+
+ INIT_LIST_HEAD(&child->list);
+
+ i++;
+ trav = trav->next;
+ }
+
+ return 0;
+
+freechild:
+ br_free_children(this, priv, i);
+err:
+ return -1;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ int32_t ret = -1;
+ br_private_t *priv = NULL;
+
+ if (!this->children) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_NO_CHILD, NULL);
+ goto out;
+ }
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_br_mt_br_private_t);
+ if (!priv) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRB_MSG_NO_MEMORY, NULL);
+ goto out;
+ }
+
+ GF_OPTION_INIT("scrubber", priv->iamscrubber, bool, free_priv);
+
+ ret = br_init_children(this, priv);
+ if (ret)
+ goto free_priv;
+
+ pthread_mutex_init(&priv->lock, NULL);
+ pthread_cond_init(&priv->cond, NULL);
+
+ INIT_LIST_HEAD(&priv->bricks);
+ INIT_LIST_HEAD(&priv->signing);
+
+ priv->timer_wheel = glusterfs_ctx_tw_get(this->ctx);
+ if (!priv->timer_wheel) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_TIMER_WHEEL_UNAVAILABLE,
+ NULL);
+ goto cleanup;
+ }
+
+ this->private = priv;
+
+ if (!priv->iamscrubber) {
+ ret = br_signer_init(this, priv);
+ if (!ret)
+ ret = br_signer_handle_options(this, priv, NULL);
+ } else {
+ ret = br_scrubber_init(this, priv);
+ if (!ret)
+ ret = br_scrubber_handle_options(this, priv, NULL);
+ }
+
+ if (ret)
+ goto cleanup;
+
+ ret = gf_thread_create(&priv->thread, NULL, br_handle_events, this,
+ "brhevent");
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, -ret, BRB_MSG_THREAD_CREATION_FAILED,
+ NULL);
+ ret = -1;
+ }
+
+ if (!ret) {
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRB_MSG_BITROT_LOADED, "mode=%s",
+ (priv->iamscrubber) ? "SCRUBBER" : "SIGNER", NULL);
+ return 0;
+ }
+
+cleanup:
+ (void)pthread_cond_destroy(&priv->cond);
+ (void)pthread_mutex_destroy(&priv->lock);
+
+ br_free_children(this, priv, priv->child_count);
+
+free_priv:
+ GF_FREE(priv);
+out:
+ this->private = NULL;
+ return -1;
+}
+
+void
+fini(xlator_t *this)
+{
+ br_private_t *priv = this->private;
+
+ if (!priv)
+ return;
+
+ if (!priv->iamscrubber)
+ br_fini_signer(this, priv);
+ else
+ (void)br_free_scrubber_monitor(this, priv);
+
+ br_free_children(this, priv, priv->child_count);
+
+ this->private = NULL;
+ GF_FREE(priv);
+
+ glusterfs_ctx_tw_put(this->ctx);
+
+ return;
+}
+
+static void
+br_reconfigure_monitor(xlator_t *this)
+{
+ int32_t ret = 0;
+
+ ret = br_scrub_state_machine(this, _gf_false);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRB_MSG_COULD_NOT_SCHEDULE_SCRUB,
+ NULL);
+ }
+}
+
+static int
+br_reconfigure_scrubber(xlator_t *this, dict_t *options)
+{
+ int32_t ret = -1;
+ br_private_t *priv = NULL;
+
+ priv = this->private;
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ ret = br_scrubber_handle_options(this, priv, options);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ if (ret)
+ goto err;
+
+ /* change state for all _up_ subvolume(s) */
+ pthread_mutex_lock(&priv->lock);
+ {
+ br_reconfigure_monitor(this);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+err:
+ return ret;
+}
+
+static int
+br_reconfigure_signer(xlator_t *this, dict_t *options)
+{
+ br_private_t *priv = this->private;
+
+ return br_signer_handle_options(this, priv, options);
+}
+
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ int ret = 0;
+ br_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (priv->iamscrubber)
+ ret = br_reconfigure_scrubber(this, options);
+ else
+ ret = br_reconfigure_signer(this, options);
+
+ return ret;
+}
+
+struct xlator_fops fops;
+
+struct xlator_cbks cbks;
+
+struct volume_options options[] = {
+ {
+ .key = {"expiry-time"},
+ .type = GF_OPTION_TYPE_INT,
+ .default_value = SIGNING_TIMEOUT,
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "Waiting time for an object on which it waits "
+ "before it is signed",
+ },
+ {
+ .key = {"brick-count"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Total number of bricks for the current node for "
+ "all volumes in the trusted storage pool.",
+ },
+ {
+ .key = {"scrubber", "scrub"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "false",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_FORCE,
+ .description = "option to run as a scrubber",
+ },
+ {
+ .key = {"scrub-throttle"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = "lazy",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "Scrub-throttle value is a measure of how fast "
+ "or slow the scrubber scrubs the filesystem for "
+ "volume <VOLNAME>",
+ },
+ {
+ .key = {"scrub-freq"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = "biweekly",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "Scrub frequency for volume <VOLNAME>",
+ },
+ {
+ .key = {"scrub-state"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = "active",
+ .op_version = {GD_OP_VERSION_4_0_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "Pause/Resume scrub. Upon resume, scrubber "
+ "continues from where it left off.",
+ },
+ {
+ .key = {"signer-threads"},
+ .type = GF_OPTION_TYPE_INT,
+ .default_value = BR_WORKERS,
+ .op_version = {GD_OP_VERSION_8_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .description = "Number of signing process threads. As a best "
+ "practice, set this to the number of processor cores",
+ },
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .notify = notify,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "bit-rot",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/bit-rot/src/bitd/bit-rot.h b/xlators/features/bit-rot/src/bitd/bit-rot.h
new file mode 100644
index 00000000000..8ac7dcdac3d
--- /dev/null
+++ b/xlators/features/bit-rot/src/bitd/bit-rot.h
@@ -0,0 +1,302 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BIT_ROT_H__
+#define __BIT_ROT_H__
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/syncop-utils.h>
+#include "changelog.h"
+#include "timer-wheel.h"
+
+#include <glusterfs/throttle-tbf.h>
+#include "bit-rot-ssm.h"
+
+#include "bit-rot-common.h"
+#include "bit-rot-stub-mem-types.h"
+#include "bit-rot-scrub-status.h"
+
+#include <openssl/sha.h>
+
+typedef enum scrub_throttle {
+ BR_SCRUB_THROTTLE_VOID = -1,
+ BR_SCRUB_THROTTLE_LAZY = 0,
+ BR_SCRUB_THROTTLE_NORMAL = 1,
+ BR_SCRUB_THROTTLE_AGGRESSIVE = 2,
+ BR_SCRUB_THROTTLE_STALLED = 3,
+} scrub_throttle_t;
+
+typedef enum scrub_freq {
+ BR_FSSCRUB_FREQ_HOURLY = 1,
+ BR_FSSCRUB_FREQ_DAILY,
+ BR_FSSCRUB_FREQ_WEEKLY,
+ BR_FSSCRUB_FREQ_BIWEEKLY,
+ BR_FSSCRUB_FREQ_MONTHLY,
+ BR_FSSCRUB_FREQ_MINUTE,
+ BR_FSSCRUB_FREQ_STALLED,
+} scrub_freq_t;
+
+#define signature_size(hl) (sizeof(br_isignature_t) + hl + 1)
+
+struct br_scanfs {
+ gf_lock_t entrylock;
+
+ pthread_mutex_t waitlock;
+ pthread_cond_t waitcond;
+
+ unsigned int entries;
+ struct list_head queued;
+ struct list_head ready;
+};
+
+/* just need three states to track child status */
+typedef enum br_child_state {
+ BR_CHILD_STATE_CONNECTED = 1,
+ BR_CHILD_STATE_INITIALIZING,
+ BR_CHILD_STATE_CONNFAILED,
+ BR_CHILD_STATE_DISCONNECTED,
+} br_child_state_t;
+
+struct br_child {
+ pthread_mutex_t lock; /* protects child state */
+ char witnessed; /* witnessed at least one successful
+ connection */
+ br_child_state_t c_state; /* current state of this child */
+
+ char child_up; /* Indicates whether this child is
+ up or not */
+ xlator_t *xl; /* client xlator corresponding to
+ this child */
+ inode_table_t *table; /* inode table for this child */
+ char brick_path[PATH_MAX]; /* brick export directory of this
+ child */
+ struct list_head list; /* hook to attach to the list of
+ UP children */
+ xlator_t *this; /* Bit rot xlator */
+
+ pthread_t thread; /* initial crawler for unsigned
+ object(s) or scrub crawler */
+ int threadrunning; /* active thread */
+
+ struct mem_pool *timer_pool; /* timer-wheel's timer mem-pool */
+
+ struct timeval tv;
+
+ struct br_scanfs fsscan; /* per subvolume FS scanner */
+
+ gf_boolean_t active_scrubbing; /* Actively scrubbing or not */
+};
+
+typedef struct br_child br_child_t;
+
+struct br_obj_n_workers {
+ struct list_head objects; /* queue of objects expired from the
+ timer wheel and ready to be picked
+ up for signing */
+ pthread_t *workers; /* Threads which pick up the objects
+ from the above queue and start
+ signing each object */
+};
+
+struct br_scrubber {
+ xlator_t *this;
+
+ scrub_throttle_t throttle;
+
+ /**
+ * frequency of scanning for this subvolume. this should
+ * normally be per-child, but since all children follow the
+ * same frequency for a volume, this option ends up here
+ * instead of br_child_t.
+ */
+ scrub_freq_t frequency;
+
+ gf_boolean_t frequency_reconf;
+ gf_boolean_t throttle_reconf;
+
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+
+ unsigned int nr_scrubbers;
+ struct list_head scrubbers;
+
+ /**
+ * list of "rotatable" subvolume(s) undergoing scrubbing
+ */
+ struct list_head scrublist;
+};
+
+struct br_monitor {
+ gf_lock_t lock;
+ pthread_t thread; /* Monitor thread */
+
+ gf_boolean_t inited;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond; /* Thread starts and will be waiting on cond.
+ First child which is up wakes this up */
+
+ xlator_t *this;
+ /* scheduler */
+ uint32_t boot;
+
+ int32_t active_child_count; /* Number of children currently scrubbing */
+ gf_boolean_t kick; /* This variable tracks the scrubber is
+ * kicked or not. Both 'kick' and
+ * 'active_child_count' uses the same pair
+ * of mutex-cond variable, i.e, wakelock and
+ * wakecond. */
+
+ pthread_mutex_t wakelock;
+ pthread_cond_t wakecond;
+
+ gf_boolean_t done;
+ pthread_mutex_t donelock;
+ pthread_cond_t donecond;
+
+ struct gf_tw_timer_list *timer;
+ br_scrub_state_t state; /* current scrub state */
+};
+
+typedef struct br_obj_n_workers br_obj_n_workers_t;
+
+typedef struct br_private br_private_t;
+
+typedef void (*br_scrubbed_file_update)(br_private_t *priv);
+
+struct br_private {
+ pthread_mutex_t lock;
+
+ struct list_head bricks; /* list of bricks from which enents
+ have been received */
+
+ struct list_head signing;
+
+ pthread_cond_t object_cond; /* handling signing of objects */
+ int child_count;
+ br_child_t *children; /* list of subvolumes */
+ int up_children;
+
+ pthread_cond_t cond; /* handling CHILD_UP notifications */
+ pthread_t thread; /* thread for connecting each UP
+ child with changelog */
+
+ struct tvec_base *timer_wheel; /* timer wheel where the objects which
+ changelog has sent sits and waits
+ for expiry */
+ br_obj_n_workers_t *obj_queue; /* place holder for all the objects
+ that are expired from timer wheel
+ and ready to be picked up for
+ signing and the workers which sign
+ the objects */
+
+ uint32_t expiry_time; /* objects "wait" time */
+
+ uint32_t signer_th_count; /* Number of signing process threads */
+
+ tbf_t *tbf; /* token bucket filter */
+
+ gf_boolean_t iamscrubber; /* function as a fs scrubber */
+
+ struct br_scrub_stats scrub_stat; /* statistics of scrub*/
+
+ struct br_scrubber fsscrub; /* scrubbers for this subvolume */
+
+ struct br_monitor scrub_monitor; /* scrubber monitor */
+};
+
+struct br_object {
+ xlator_t *this;
+
+ uuid_t gfid;
+
+ unsigned long signedversion; /* version against which this object will
+ be signed */
+ br_child_t *child; /* object's subvolume */
+
+ int sign_info;
+
+ struct list_head list; /* hook to add to the queue once the
+ object is expired from timer wheel */
+ void *data;
+};
+
+typedef struct br_object br_object_t;
+typedef int32_t(br_scrub_ssm_call)(xlator_t *);
+
+void
+br_log_object(xlator_t *, char *, uuid_t, int32_t);
+
+void
+br_log_object_path(xlator_t *, char *, const char *, int32_t);
+
+int32_t
+br_calculate_obj_checksum(unsigned char *, br_child_t *, fd_t *, struct iatt *);
+
+int32_t
+br_prepare_loc(xlator_t *, br_child_t *, loc_t *, gf_dirent_t *, loc_t *);
+
+gf_boolean_t
+bitd_is_bad_file(xlator_t *, br_child_t *, loc_t *, fd_t *);
+
+static inline void
+_br_set_child_state(br_child_t *child, br_child_state_t state)
+{
+ child->c_state = state;
+}
+
+static inline int
+_br_is_child_connected(br_child_t *child)
+{
+ return (child->c_state == BR_CHILD_STATE_CONNECTED);
+}
+
+static inline int
+_br_is_child_scrub_active(br_child_t *child)
+{
+ return child->active_scrubbing;
+}
+
+static inline int
+_br_child_failed_conn(br_child_t *child)
+{
+ return (child->c_state == BR_CHILD_STATE_CONNFAILED);
+}
+
+static inline int
+_br_child_witnessed_connection(br_child_t *child)
+{
+ return (child->witnessed == 1);
+}
+
+/* scrub state */
+static inline void
+_br_monitor_set_scrub_state(struct br_monitor *scrub_monitor,
+ br_scrub_state_t state)
+{
+ scrub_monitor->state = state;
+}
+
+static inline br_scrub_event_t
+_br_child_get_scrub_event(struct br_scrubber *fsscrub)
+{
+ return (fsscrub->frequency == BR_FSSCRUB_FREQ_STALLED)
+ ? BR_SCRUB_EVENT_PAUSE
+ : BR_SCRUB_EVENT_SCHEDULE;
+}
+
+int32_t
+br_get_bad_objects_list(xlator_t *this, dict_t **dict);
+
+#endif /* __BIT_ROT_H__ */
diff --git a/xlators/features/bit-rot/src/stub/Makefile.am b/xlators/features/bit-rot/src/stub/Makefile.am
new file mode 100644
index 00000000000..f13de7145fc
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/Makefile.am
@@ -0,0 +1,20 @@
+if WITH_SERVER
+xlator_LTLIBRARIES = bitrot-stub.la
+endif
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+bitrot_stub_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+bitrot_stub_la_SOURCES = bit-rot-stub-helpers.c bit-rot-stub.c
+bitrot_stub_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = bit-rot-stub.h bit-rot-common.h bit-rot-stub-mem-types.h \
+ bit-rot-object-version.h bit-rot-stub-messages.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-common.h b/xlators/features/bit-rot/src/stub/bit-rot-common.h
new file mode 100644
index 00000000000..20561aa7764
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-common.h
@@ -0,0 +1,178 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BIT_ROT_COMMON_H__
+#define __BIT_ROT_COMMON_H__
+
+#include <glusterfs/glusterfs.h>
+#include "bit-rot-object-version.h"
+
+#define BR_VXATTR_VERSION (1 << 0)
+#define BR_VXATTR_SIGNATURE (1 << 1)
+
+#define BR_VXATTR_SIGN_MISSING (BR_VXATTR_SIGNATURE)
+#define BR_VXATTR_ALL_MISSING (BR_VXATTR_VERSION | BR_VXATTR_SIGNATURE)
+
+#define BR_BAD_OBJ_CONTAINER \
+ (uuid_t) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8 }
+
+typedef enum br_vxattr_state {
+ BR_VXATTR_STATUS_FULL = 0,
+ BR_VXATTR_STATUS_MISSING = 1,
+ BR_VXATTR_STATUS_UNSIGNED = 2,
+ BR_VXATTR_STATUS_INVALID = 3,
+} br_vxattr_status_t;
+
+typedef enum br_sign_state {
+ BR_SIGN_INVALID = -1,
+ BR_SIGN_NORMAL = 0,
+ BR_SIGN_REOPEN_WAIT = 1,
+ BR_SIGN_QUICK = 2,
+} br_sign_state_t;
+
+static inline br_vxattr_status_t
+br_version_xattr_state(dict_t *xattr, br_version_t **obuf,
+ br_signature_t **sbuf, gf_boolean_t *objbad)
+{
+ int32_t ret = 0;
+ int32_t vxattr = 0;
+ br_vxattr_status_t status;
+ void *data = NULL;
+
+ /**
+ * The key being present in the dict indicates the xattr was set on
+ * disk. The presence of xattr itself as of now is suffecient to say
+ * the the object is bad.
+ */
+ *objbad = _gf_false;
+ ret = dict_get_bin(xattr, BITROT_OBJECT_BAD_KEY, (void **)&data);
+ if (!ret)
+ *objbad = _gf_true;
+
+ ret = dict_get_bin(xattr, BITROT_CURRENT_VERSION_KEY, (void **)obuf);
+ if (ret)
+ vxattr |= BR_VXATTR_VERSION;
+
+ ret = dict_get_bin(xattr, BITROT_SIGNING_VERSION_KEY, (void **)sbuf);
+ if (ret)
+ vxattr |= BR_VXATTR_SIGNATURE;
+
+ switch (vxattr) {
+ case 0:
+ status = BR_VXATTR_STATUS_FULL;
+ break;
+ case BR_VXATTR_SIGN_MISSING:
+ status = BR_VXATTR_STATUS_UNSIGNED;
+ break;
+ case BR_VXATTR_ALL_MISSING:
+ status = BR_VXATTR_STATUS_MISSING;
+ break;
+ default:
+ status = BR_VXATTR_STATUS_INVALID;
+ }
+
+ return status;
+}
+
+/**
+ * in-memory representation of signature used by signer for object
+ * signing.
+ */
+typedef struct br_isignature_in {
+ int8_t signaturetype; /* signature type */
+
+ unsigned long signedversion; /* version against which the
+ object was signed */
+
+ size_t signaturelen; /* signature length */
+ char signature[0]; /* object signature */
+} br_isignature_t;
+
+/**
+ * in-memory representation of signature used by scrubber for object
+ * verification.
+ */
+typedef struct br_isignature_out {
+ char stale; /* stale signature? */
+
+ unsigned long version; /* current signed version */
+
+ uint32_t time[2]; /* time when the object
+ got dirtied */
+
+ int8_t signaturetype; /* hash type */
+ size_t signaturelen; /* signature length */
+ char signature[0]; /* signature (hash) */
+} br_isignature_out_t;
+
+typedef struct br_stub_init {
+ uint32_t timebuf[2];
+ char export[PATH_MAX];
+} br_stub_init_t;
+
+typedef enum {
+ BR_SIGNATURE_TYPE_VOID = -1, /* object is not signed */
+ BR_SIGNATURE_TYPE_ZERO = 0, /* min boundary */
+ BR_SIGNATURE_TYPE_SHA256 = 1, /* signed with SHA256 */
+ BR_SIGNATURE_TYPE_MAX = 2, /* max boundary */
+} br_signature_type;
+
+/* BitRot stub start time (virtual xattr) */
+#define GLUSTERFS_GET_BR_STUB_INIT_TIME "trusted.glusterfs.bit-rot.stub-init"
+
+/* signing/reopen hint */
+#define BR_OBJECT_RESIGN 0
+#define BR_OBJECT_REOPEN 1
+#define BR_REOPEN_SIGN_HINT_KEY "trusted.glusterfs.bit-rot.reopen-hint"
+
+static inline int
+br_is_signature_type_valid(int8_t signaturetype)
+{
+ return ((signaturetype > BR_SIGNATURE_TYPE_ZERO) &&
+ (signaturetype < BR_SIGNATURE_TYPE_MAX));
+}
+
+static inline void
+br_set_default_ongoingversion(br_version_t *buf, uint32_t *tv)
+{
+ buf->ongoingversion = BITROT_DEFAULT_CURRENT_VERSION;
+ buf->timebuf[0] = tv[0];
+ buf->timebuf[1] = tv[1];
+}
+
+static inline void
+br_set_default_signature(br_signature_t *buf, size_t *size)
+{
+ buf->signaturetype = (int8_t)BR_SIGNATURE_TYPE_VOID;
+ buf->signedversion = BITROT_DEFAULT_SIGNING_VERSION;
+
+ *size = sizeof(br_signature_t); /* no signature */
+}
+
+static inline void
+br_set_ongoingversion(br_version_t *buf, unsigned long version, uint32_t *tv)
+{
+ buf->ongoingversion = version;
+ buf->timebuf[0] = tv[0];
+ buf->timebuf[1] = tv[1];
+}
+
+static inline void
+br_set_signature(br_signature_t *buf, br_isignature_t *sign,
+ size_t signaturelen, size_t *size)
+{
+ buf->signaturetype = sign->signaturetype;
+ buf->signedversion = ntohl(sign->signedversion);
+
+ memcpy(buf->signature, sign->signature, signaturelen);
+ *size = sizeof(br_signature_t) + signaturelen;
+}
+
+#endif /* __BIT_ROT_COMMON_H__ */
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-object-version.h b/xlators/features/bit-rot/src/stub/bit-rot-object-version.h
new file mode 100644
index 00000000000..7ae6a5200df
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-object-version.h
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __BIT_ROT_OBJECT_VERSION_H
+#define __BIT_ROT_OBJECT_VERSION_H
+
+/**
+ * on-disk formats for ongoing version and object signature.
+ */
+typedef struct br_version {
+ unsigned long ongoingversion;
+ uint32_t timebuf[2];
+} br_version_t;
+
+typedef struct __attribute__((__packed__)) br_signature {
+ int8_t signaturetype;
+
+ unsigned long signedversion;
+
+ char signature[0];
+} br_signature_t;
+
+#endif
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c b/xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c
new file mode 100644
index 00000000000..8ac13a09941
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c
@@ -0,0 +1,796 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "bit-rot-stub.h"
+
+br_stub_fd_t *
+br_stub_fd_new(void)
+{
+ br_stub_fd_t *br_stub_fd = NULL;
+
+ br_stub_fd = GF_CALLOC(1, sizeof(*br_stub_fd), gf_br_stub_mt_br_stub_fd_t);
+
+ return br_stub_fd;
+}
+
+int
+__br_stub_fd_ctx_set(xlator_t *this, fd_t *fd, br_stub_fd_t *br_stub_fd)
+{
+ uint64_t value = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+ GF_VALIDATE_OR_GOTO(this->name, br_stub_fd, out);
+
+ value = (uint64_t)(long)br_stub_fd;
+
+ ret = __fd_ctx_set(fd, this, value);
+
+out:
+ return ret;
+}
+
+br_stub_fd_t *
+__br_stub_fd_ctx_get(xlator_t *this, fd_t *fd)
+{
+ br_stub_fd_t *br_stub_fd = NULL;
+ uint64_t value = 0;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+
+ ret = __fd_ctx_get(fd, this, &value);
+ if (ret)
+ return NULL;
+
+ br_stub_fd = (br_stub_fd_t *)((long)value);
+
+out:
+ return br_stub_fd;
+}
+
+br_stub_fd_t *
+br_stub_fd_ctx_get(xlator_t *this, fd_t *fd)
+{
+ br_stub_fd_t *br_stub_fd = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+
+ LOCK(&fd->lock);
+ {
+ br_stub_fd = __br_stub_fd_ctx_get(this, fd);
+ }
+ UNLOCK(&fd->lock);
+
+out:
+ return br_stub_fd;
+}
+
+int32_t
+br_stub_fd_ctx_set(xlator_t *this, fd_t *fd, br_stub_fd_t *br_stub_fd)
+{
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+ GF_VALIDATE_OR_GOTO(this->name, br_stub_fd, out);
+
+ LOCK(&fd->lock);
+ {
+ ret = __br_stub_fd_ctx_set(this, fd, br_stub_fd);
+ }
+ UNLOCK(&fd->lock);
+
+out:
+ return ret;
+}
+
+/**
+ * Adds an entry to the bad objects directory.
+ * @gfid: gfid of the bad object being added to the bad objects directory
+ */
+int
+br_stub_add(xlator_t *this, uuid_t gfid)
+{
+ char gfid_path[BR_PATH_MAX_PLUS] = {0};
+ char bad_gfid_path[BR_PATH_MAX_PLUS] = {0};
+ int ret = 0;
+ br_stub_private_t *priv = NULL;
+ struct stat st = {0};
+
+ priv = this->private;
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, !gf_uuid_is_null(gfid), out,
+ errno, EINVAL);
+
+ snprintf(gfid_path, sizeof(gfid_path), "%s/%s", priv->stub_basepath,
+ uuid_utoa(gfid));
+
+ ret = sys_stat(gfid_path, &st);
+ if (!ret)
+ goto out;
+ snprintf(bad_gfid_path, sizeof(bad_gfid_path), "%s/stub-%s",
+ priv->stub_basepath, uuid_utoa(priv->bad_object_dir_gfid));
+
+ ret = sys_link(bad_gfid_path, gfid_path);
+ if (ret) {
+ if ((errno != ENOENT) && (errno != EMLINK) && (errno != EEXIST))
+ goto out;
+
+ /*
+ * Continue with success. At least we'll have half of the
+ * functionality, in the sense, object is marked bad and
+ * would be inaccessible. It's only scrub status that would
+ * show up less number of objects. That's fine as we'll have
+ * the log files that will have the missing information.
+ */
+ gf_smsg(this->name, GF_LOG_WARNING, errno, BRS_MSG_LINK_FAIL, "gfid=%s",
+ uuid_utoa(gfid), NULL);
+ }
+
+ return 0;
+out:
+ return -1;
+}
+
+int
+br_stub_del(xlator_t *this, uuid_t gfid)
+{
+ int32_t op_errno __attribute__((unused)) = 0;
+ br_stub_private_t *priv = NULL;
+ int ret = 0;
+ char gfid_path[BR_PATH_MAX_PLUS] = {0};
+
+ priv = this->private;
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, !gf_uuid_is_null(gfid), out,
+ op_errno, EINVAL);
+ snprintf(gfid_path, sizeof(gfid_path), "%s/%s", priv->stub_basepath,
+ uuid_utoa(gfid));
+ ret = sys_unlink(gfid_path);
+ if (ret && (errno != ENOENT)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, BRS_MSG_BAD_OBJ_UNLINK_FAIL,
+ "path=%s", gfid_path, NULL);
+ ret = -errno;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int
+br_stub_check_stub_directory(xlator_t *this, char *fullpath)
+{
+ int ret = 0;
+ struct stat st = {
+ 0,
+ };
+ char oldpath[BR_PATH_MAX_PLUS] = {0};
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ snprintf(oldpath, sizeof(oldpath), "%s/%s", priv->export,
+ OLD_BR_STUB_QUARANTINE_DIR);
+
+ ret = sys_stat(fullpath, &st);
+ if (!ret && !S_ISDIR(st.st_mode))
+ goto error_return;
+ if (ret) {
+ if (errno != ENOENT)
+ goto error_return;
+ ret = sys_stat(oldpath, &st);
+ if (ret)
+ ret = mkdir_p(fullpath, 0600, _gf_true);
+ else
+ ret = sys_rename(oldpath, fullpath);
+ }
+
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno, BRS_MSG_BAD_OBJECT_DIR_FAIL,
+ "create-path=%s", fullpath, NULL);
+ return ret;
+
+error_return:
+ gf_smsg(this->name, GF_LOG_ERROR, errno, BRS_MSG_BAD_OBJECT_DIR_FAIL,
+ "verify-path=%s", fullpath, NULL);
+ return -1;
+}
+
+/**
+ * Function to create the container for the bad objects within the bad objects
+ * directory.
+ */
+static int
+br_stub_check_stub_file(xlator_t *this, char *path)
+{
+ int ret = 0;
+ int fd = -1;
+ struct stat st = {
+ 0,
+ };
+
+ ret = sys_stat(path, &st);
+ if (!ret && !S_ISREG(st.st_mode))
+ goto error_return;
+ if (ret) {
+ if (errno != ENOENT)
+ goto error_return;
+ fd = sys_creat(path, 0);
+ if (fd < 0)
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ BRS_MSG_BAD_OBJECT_DIR_FAIL, "create-path=%s", path, NULL);
+ }
+
+ if (fd >= 0) {
+ sys_close(fd);
+ ret = 0;
+ }
+
+ return ret;
+
+error_return:
+ gf_smsg(this->name, GF_LOG_ERROR, errno, BRS_MSG_BAD_OBJECT_DIR_FAIL,
+ "verify-path=%s", path, NULL);
+ return -1;
+}
+
+int
+br_stub_dir_create(xlator_t *this, br_stub_private_t *priv)
+{
+ int ret = -1;
+ char fullpath[BR_PATH_MAX_PLUS] = {
+ 0,
+ };
+ char stub_gfid_path[BR_PATH_MAX_PLUS] = {
+ 0,
+ };
+
+ gf_uuid_copy(priv->bad_object_dir_gfid, BR_BAD_OBJ_CONTAINER);
+
+ if (snprintf(fullpath, sizeof(fullpath), "%s", priv->stub_basepath) >=
+ sizeof(fullpath))
+ goto out;
+
+ if (snprintf(stub_gfid_path, sizeof(stub_gfid_path), "%s/stub-%s",
+ priv->stub_basepath, uuid_utoa(priv->bad_object_dir_gfid)) >=
+ sizeof(stub_gfid_path))
+ goto out;
+
+ ret = br_stub_check_stub_directory(this, fullpath);
+ if (ret)
+ goto out;
+ ret = br_stub_check_stub_file(this, stub_gfid_path);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return -1;
+}
+
+call_stub_t *
+__br_stub_dequeue(struct list_head *callstubs)
+{
+ call_stub_t *stub = NULL;
+
+ if (!list_empty(callstubs)) {
+ stub = list_entry(callstubs->next, call_stub_t, list);
+ list_del_init(&stub->list);
+ }
+
+ return stub;
+}
+
+void
+__br_stub_enqueue(struct list_head *callstubs, call_stub_t *stub)
+{
+ list_add_tail(&stub->list, callstubs);
+}
+
+void
+br_stub_worker_enqueue(xlator_t *this, call_stub_t *stub)
+{
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+ pthread_mutex_lock(&priv->container.bad_lock);
+ {
+ __br_stub_enqueue(&priv->container.bad_queue, stub);
+ pthread_cond_signal(&priv->container.bad_cond);
+ }
+ pthread_mutex_unlock(&priv->container.bad_lock);
+}
+
+void *
+br_stub_worker(void *data)
+{
+ br_stub_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ call_stub_t *stub = NULL;
+
+ THIS = data;
+ this = data;
+ priv = this->private;
+
+ for (;;) {
+ pthread_mutex_lock(&priv->container.bad_lock);
+ {
+ while (list_empty(&priv->container.bad_queue)) {
+ (void)pthread_cond_wait(&priv->container.bad_cond,
+ &priv->container.bad_lock);
+ }
+
+ stub = __br_stub_dequeue(&priv->container.bad_queue);
+ }
+ pthread_mutex_unlock(&priv->container.bad_lock);
+
+ if (stub) /* guard against spurious wakeups */
+ call_resume(stub);
+ }
+
+ return NULL;
+}
+
+int32_t
+br_stub_lookup_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xattr_req)
+{
+ br_stub_private_t *priv = NULL;
+ struct stat lstatbuf = {0};
+ int ret = 0;
+ int32_t op_errno = EINVAL;
+ int32_t op_ret = -1;
+ struct iatt stbuf = {
+ 0,
+ };
+ struct iatt postparent = {
+ 0,
+ };
+ dict_t *xattr = NULL;
+ gf_boolean_t ver_enabled = _gf_false;
+
+ BR_STUB_VER_ENABLED_IN_CALLPATH(frame, ver_enabled);
+ priv = this->private;
+ BR_STUB_VER_COND_GOTO(priv, (!ver_enabled), done);
+
+ VALIDATE_OR_GOTO(loc, done);
+ if (gf_uuid_compare(loc->gfid, priv->bad_object_dir_gfid))
+ goto done;
+
+ ret = sys_lstat(priv->stub_basepath, &lstatbuf);
+ if (ret) {
+ gf_msg_debug(this->name, errno,
+ "Stat failed on stub bad "
+ "object dir");
+ op_errno = errno;
+ goto done;
+ } else if (!S_ISDIR(lstatbuf.st_mode)) {
+ gf_msg_debug(this->name, errno,
+ "bad object container is not "
+ "a directory");
+ op_errno = ENOTDIR;
+ goto done;
+ }
+
+ iatt_from_stat(&stbuf, &lstatbuf);
+ gf_uuid_copy(stbuf.ia_gfid, priv->bad_object_dir_gfid);
+
+ op_ret = op_errno = 0;
+ xattr = dict_new();
+ if (!xattr) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ }
+
+done:
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, loc->inode, &stbuf,
+ xattr, &postparent);
+ if (xattr)
+ dict_unref(xattr);
+ return 0;
+}
+
+static int
+is_bad_gfid_file_current(char *filename, uuid_t gfid)
+{
+ char current_stub_gfid[GF_UUID_BUF_SIZE + 16] = {
+ 0,
+ };
+
+ snprintf(current_stub_gfid, sizeof current_stub_gfid, "stub-%s",
+ uuid_utoa(gfid));
+ return (!strcmp(filename, current_stub_gfid));
+}
+
+static void
+check_delete_stale_bad_file(xlator_t *this, char *filename)
+{
+ int ret = 0;
+ struct stat st = {0};
+ char filepath[BR_PATH_MAX_PLUS] = {0};
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (is_bad_gfid_file_current(filename, priv->bad_object_dir_gfid))
+ return;
+
+ snprintf(filepath, sizeof(filepath), "%s/%s", priv->stub_basepath,
+ filename);
+
+ ret = sys_stat(filepath, &st);
+ if (!ret && st.st_nlink == 1)
+ sys_unlink(filepath);
+}
+
+static int
+br_stub_fill_readdir(fd_t *fd, br_stub_fd_t *fctx, DIR *dir, off_t off,
+ size_t size, gf_dirent_t *entries)
+{
+ off_t in_case = -1;
+ off_t last_off = 0;
+ size_t filled = 0;
+ int count = 0;
+ int32_t this_size = -1;
+ gf_dirent_t *this_entry = NULL;
+ xlator_t *this = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+
+ this = THIS;
+ if (!off) {
+ rewinddir(dir);
+ } else {
+ seekdir(dir, off);
+#ifndef GF_LINUX_HOST_OS
+ if ((u_long)telldir(dir) != off && off != fctx->bad_object.dir_eof) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ BRS_MSG_BAD_OBJECT_DIR_SEEK_FAIL, "off=(0x%llx)", off,
+ "dir=%p", dir, NULL);
+ errno = EINVAL;
+ count = -1;
+ goto out;
+ }
+#endif /* GF_LINUX_HOST_OS */
+ }
+
+ while (filled <= size) {
+ in_case = (u_long)telldir(dir);
+
+ if (in_case == -1) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ BRS_MSG_BAD_OBJECT_DIR_TELL_FAIL, "dir=%p", dir, "err=%s",
+ strerror(errno), NULL);
+ goto out;
+ }
+
+ errno = 0;
+ entry = sys_readdir(dir, scratch);
+ if (!entry || errno != 0) {
+ if (errno == EBADF) {
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0,
+ BRS_MSG_BAD_OBJECT_DIR_READ_FAIL, "dir=%p", dir,
+ "err=%s", strerror(errno), NULL);
+ goto out;
+ }
+ break;
+ }
+
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
+ continue;
+
+ if (!strncmp(entry->d_name, "stub-", strlen("stub-"))) {
+ check_delete_stale_bad_file(this, entry->d_name);
+ continue;
+ }
+
+ this_size = max(sizeof(gf_dirent_t), sizeof(gfs3_dirplist)) +
+ strlen(entry->d_name) + 1;
+
+ if (this_size + filled > size) {
+ seekdir(dir, in_case);
+#ifndef GF_LINUX_HOST_OS
+ if ((u_long)telldir(dir) != in_case &&
+ in_case != fctx->bad_object.dir_eof) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ BRS_MSG_BAD_OBJECT_DIR_SEEK_FAIL, "in_case=(0x%llx)",
+ in_case, "dir=%p", dir, NULL);
+ errno = EINVAL;
+ count = -1;
+ goto out;
+ }
+#endif /* GF_LINUX_HOST_OS */
+ break;
+ }
+
+ this_entry = gf_dirent_for_name(entry->d_name);
+
+ if (!this_entry) {
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ BRS_MSG_CREATE_GF_DIRENT_FAILED, "entry-name=%s",
+ entry->d_name, "err=%s", strerror(errno), NULL);
+ goto out;
+ }
+ /*
+ * we store the offset of next entry here, which is
+ * probably not intended, but code using syncop_readdir()
+ * (glfs-heal.c, afr-self-heald.c, pump.c) rely on it
+ * for directory read resumption.
+ */
+ last_off = (u_long)telldir(dir);
+ this_entry->d_off = last_off;
+ this_entry->d_ino = entry->d_ino;
+
+ list_add_tail(&this_entry->list, &entries->list);
+
+ filled += this_size;
+ count++;
+ }
+
+ if ((!sys_readdir(dir, scratch) && (errno == 0))) {
+ /* Indicate EOF */
+ errno = ENOENT;
+ /* Remember EOF offset for later detection */
+ fctx->bad_object.dir_eof = last_off;
+ }
+out:
+ return count;
+}
+
+int32_t
+br_stub_readdir_wrapper(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t off, dict_t *xdata)
+{
+ br_stub_fd_t *fctx = NULL;
+ DIR *dir = NULL;
+ int ret = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ int count = 0;
+ gf_dirent_t entries;
+ gf_boolean_t xdata_unref = _gf_false;
+ dict_t *dict = NULL;
+
+ INIT_LIST_HEAD(&entries.list);
+
+ fctx = br_stub_fd_ctx_get(this, fd);
+ if (!fctx) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_GET_FD_CONTEXT_FAILED,
+ "fd=%p", fd, NULL);
+ op_errno = -ret;
+ goto done;
+ }
+
+ dir = fctx->bad_object.dir;
+
+ if (!dir) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_BAD_HANDLE_DIR_NULL,
+ "fd=%p", fd, NULL);
+ op_errno = EINVAL;
+ goto done;
+ }
+
+ count = br_stub_fill_readdir(fd, fctx, dir, off, size, &entries);
+
+ /* pick ENOENT to indicate EOF */
+ op_errno = errno;
+ op_ret = count;
+
+ dict = xdata;
+ (void)br_stub_bad_objects_path(this, fd, &entries, &dict);
+ if (!xdata && dict) {
+ xdata = dict;
+ xdata_unref = _gf_true;
+ }
+
+done:
+ STACK_UNWIND_STRICT(readdir, frame, op_ret, op_errno, &entries, xdata);
+ gf_dirent_free(&entries);
+ if (xdata_unref)
+ dict_unref(xdata);
+ return 0;
+}
+
+/**
+ * This function is called to mainly obtain the paths of the corrupt
+ * objects (files as of now). Currently scrub status prints only the
+ * gfid of the corrupted files. Reason is, bitrot-stub maintains the
+ * list of the corrupted objects as entries inside the quarantine
+ * directory (<brick export>/.glusterfs/quarantine)
+ *
+ * And the name of each entry in the qurantine directory is the gfid
+ * of the corrupted object. So scrub status will just show that info.
+ * But it helps the users a lot if the actual path to the object is
+ * also reported. Hence the below function to get that information.
+ * The function allocates a new dict to be returned (if it does not
+ * get one from the caller of readdir i.e. scrubber as of now), and
+ * stores the paths of each corrupted gfid there. The gfid is used as
+ * the key and path is used as the value.
+ *
+ * NOTE: The path will be there in following situations
+ * 1) gfid2path option has been enabled (posix xlator option)
+ * and the corrupted file contains the path as an extended
+ * attribute.
+ * 2) If the gfid2path option is not enabled, OR if the xattr
+ * is absent, then the inode table should have it.
+ * The path will be there if a name based lookup has happened
+ * on the file which has been corrupted. With lookup a inode and
+ * dentry would be created in the inode table. And the path is
+ * constructed using the in memory inode and dentry. If a lookup
+ * has not happened OR the inode corresponding to the corrupted
+ * file does not exist in the inode table (because it got purged
+ * as lru limit of the inodes exceeded) OR a nameless lookup had
+ * happened to populate the inode in the inode table, then the
+ * path will not be printed in scrub and only the gfid will be there.
+ **/
+int
+br_stub_bad_objects_path(xlator_t *this, fd_t *fd, gf_dirent_t *entries,
+ dict_t **dict)
+{
+ gf_dirent_t *entry = NULL;
+ inode_t *inode = NULL;
+ char *hpath = NULL;
+ uuid_t gfid = {0};
+ int ret = -1;
+ dict_t *tmp_dict = NULL;
+ char str_gfid[64] = {0};
+
+ if (list_empty(&entries->list))
+ return 0;
+
+ tmp_dict = *dict;
+
+ if (!tmp_dict) {
+ tmp_dict = dict_new();
+ /*
+ * If the allocation of dict fails then no need treat it
+ * it as a error. This path (or function) is executed when
+ * "gluster volume bitrot <volume name> scrub status" is
+ * executed, to get the list of the corrupted objects.
+ * And the motive of this function is to get the paths of
+ * the corrupted objects. If the dict allocation fails, then
+ * the scrub status will only show the gfids of those corrupted
+ * objects (which is the behavior as of the time of this patch
+ * being worked upon). So just return and only the gfids will
+ * be shown.
+ */
+ if (!tmp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_ALLOC_FAILED, NULL);
+ goto out;
+ }
+ }
+
+ list_for_each_entry(entry, &entries->list, list)
+ {
+ gf_uuid_clear(gfid);
+ gf_uuid_parse(entry->d_name, gfid);
+
+ inode = inode_find(fd->inode->table, gfid);
+
+ /* No need to check the return value here.
+ * Because @hpath is examined.
+ */
+ (void)br_stub_get_path_of_gfid(this, fd->inode, inode, gfid, &hpath);
+
+ if (hpath) {
+ gf_msg_debug(this->name, 0,
+ "path of the corrupted "
+ "object (gfid: %s) is %s",
+ uuid_utoa(gfid), hpath);
+ br_stub_entry_xattr_fill(this, hpath, entry, tmp_dict);
+ } else
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_PATH_GET_FAILED,
+ "gfid=%s", uuid_utoa_r(gfid, str_gfid), NULL);
+
+ inode = NULL;
+ hpath = NULL;
+ }
+
+ ret = 0;
+ *dict = tmp_dict;
+
+out:
+ return ret;
+}
+
+int
+br_stub_get_path_of_gfid(xlator_t *this, inode_t *parent, inode_t *inode,
+ uuid_t gfid, char **path)
+{
+ int32_t ret = -1;
+ char gfid_str[64] = {0};
+
+ GF_VALIDATE_OR_GOTO("bitrot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, parent, out);
+ GF_VALIDATE_OR_GOTO(this->name, path, out);
+
+ /* Above, No need to validate the @inode for hard resolution. Because
+ * inode can be NULL and if it is NULL, then syncop_gfid_to_path_hard
+ * will allocate a new inode and proceed. So no need to bother about
+ * @inode. Because we need it only to send a syncop_getxattr call
+ * from inside syncop_gfid_to_path_hard. And getxattr fetches the
+ * path from the backend.
+ */
+
+ ret = syncop_gfid_to_path_hard(parent->table, FIRST_CHILD(this), gfid,
+ inode, path, _gf_true);
+ if (ret < 0)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_PATH_GET_FAILED,
+ "gfid=%s", uuid_utoa_r(gfid, gfid_str), NULL);
+
+ /*
+ * Try with soft resolution of path if hard resolve fails. Because
+ * checking the xattr on disk to get the path of a inode (or gfid)
+ * is dependent on whether that option is enabled in the posix
+ * xlator or not. If it is not enabled, then hard resolution by
+ * checking the on disk xattr fails.
+ *
+ * Thus in such situations fall back to the soft resolution which
+ * mainly depends on the inode_path() function. And for using
+ * inode_path, @inode has to be linked i.e. a successful lookup should
+ * have happened on the gfid (or the path) to link the inode to the
+ * inode table. And if @inode is NULL, means, the inode has not been
+ * found in the inode table and better not to do inode_path() on the
+ * inode which has not been linked.
+ */
+ if (ret < 0 && inode) {
+ ret = syncop_gfid_to_path_hard(parent->table, FIRST_CHILD(this), gfid,
+ inode, path, _gf_false);
+ if (ret < 0)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_PATH_GET_FAILED,
+ "from-memory gfid=%s", uuid_utoa_r(gfid, gfid_str), NULL);
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * NOTE: If the file has multiple hardlinks (in gluster volume
+ * namespace), the path would be one of the hardlinks. Its up to
+ * the user to find the remaining hardlinks (using find -samefile)
+ * and remove them.
+ **/
+void
+br_stub_entry_xattr_fill(xlator_t *this, char *hpath, gf_dirent_t *entry,
+ dict_t *dict)
+{
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, hpath, out);
+
+ /*
+ * Use the entry->d_name (which is nothing but the gfid of the
+ * corrupted object) as the key. And the value will be the actual
+ * path of that object (or file).
+ *
+ * ALso ignore the dict_set errors. scrubber will get the gfid of
+ * the corrupted object for sure. So, for now lets just log the
+ * dict_set_dynstr failure and move on.
+ */
+
+ ret = dict_set_dynstr(dict, entry->d_name, hpath);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_DICT_SET_FAILED,
+ "path=%s", hpath, "object-name=%s", entry->d_name, NULL);
+out:
+ return;
+}
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h b/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h
new file mode 100644
index 00000000000..9d93caf069f
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h
@@ -0,0 +1,36 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _BR_MEM_TYPES_H
+#define _BR_MEM_TYPES_H
+
+#include <glusterfs/mem-types.h>
+
+enum br_mem_types {
+ gf_br_stub_mt_private_t = gf_common_mt_end + 1,
+ gf_br_stub_mt_version_t,
+ gf_br_stub_mt_inode_ctx_t,
+ gf_br_stub_mt_signature_t,
+ gf_br_mt_br_private_t,
+ gf_br_mt_br_child_t,
+ gf_br_mt_br_object_t,
+ gf_br_mt_br_ob_n_wk_t,
+ gf_br_mt_br_scrubber_t,
+ gf_br_mt_br_fsscan_entry_t,
+ gf_br_stub_mt_br_stub_fd_t,
+ gf_br_stub_mt_br_scanner_freq_t,
+ gf_br_stub_mt_sigstub_t,
+ gf_br_mt_br_child_event_t,
+ gf_br_stub_mt_misc,
+ gf_br_mt_br_worker_t,
+ gf_br_stub_mt_end,
+};
+
+#endif
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h b/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h
new file mode 100644
index 00000000000..6c15a166f18
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h
@@ -0,0 +1,117 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _BITROT_STUB_MESSAGES_H_
+#define _BITROT_STUB_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(BITROT_STUB, BRS_MSG_NO_MEMORY, BRS_MSG_SET_EVENT_FAILED,
+ BRS_MSG_MEM_ACNT_FAILED, BRS_MSG_CREATE_FRAME_FAILED,
+ BRS_MSG_SET_CONTEXT_FAILED, BRS_MSG_CHANGE_VERSION_FAILED,
+ BRS_MSG_ADD_FD_TO_LIST_FAILED, BRS_MSG_SET_FD_CONTEXT_FAILED,
+ BRS_MSG_CREATE_ANONYMOUS_FD_FAILED, BRS_MSG_NO_CHILD,
+ BRS_MSG_STUB_ALLOC_FAILED, BRS_MSG_GET_INODE_CONTEXT_FAILED,
+ BRS_MSG_CANCEL_SIGN_THREAD_FAILED, BRS_MSG_ADD_FD_TO_INODE,
+ BRS_MSG_SIGN_VERSION_ERROR, BRS_MSG_BAD_OBJ_MARK_FAIL,
+ BRS_MSG_NON_SCRUB_BAD_OBJ_MARK, BRS_MSG_REMOVE_INTERNAL_XATTR,
+ BRS_MSG_SET_INTERNAL_XATTR, BRS_MSG_BAD_OBJECT_ACCESS,
+ BRS_MSG_BAD_CONTAINER_FAIL, BRS_MSG_BAD_OBJECT_DIR_FAIL,
+ BRS_MSG_BAD_OBJECT_DIR_SEEK_FAIL, BRS_MSG_BAD_OBJECT_DIR_TELL_FAIL,
+ BRS_MSG_BAD_OBJECT_DIR_READ_FAIL, BRS_MSG_GET_FD_CONTEXT_FAILED,
+ BRS_MSG_BAD_HANDLE_DIR_NULL, BRS_MSG_BAD_OBJ_THREAD_FAIL,
+ BRS_MSG_BAD_OBJ_DIR_CLOSE_FAIL, BRS_MSG_LINK_FAIL,
+ BRS_MSG_BAD_OBJ_UNLINK_FAIL, BRS_MSG_DICT_SET_FAILED,
+ BRS_MSG_PATH_GET_FAILED, BRS_MSG_NULL_LOCAL,
+ BRS_MSG_SPAWN_SIGN_THRD_FAILED, BRS_MSG_KILL_SIGN_THREAD,
+ BRS_MSG_NON_BITD_PID, BRS_MSG_SIGN_PREPARE_FAIL,
+ BRS_MSG_USING_DEFAULT_THREAD_SIZE, BRS_MSG_ALLOC_MEM_FAILED,
+ BRS_MSG_DICT_ALLOC_FAILED, BRS_MSG_CREATE_GF_DIRENT_FAILED,
+ BRS_MSG_ALLOC_FAILED, BRS_MSG_PATH_XATTR_GET_FAILED,
+ BRS_MSG_VERSION_PREPARE_FAIL);
+
+#define BRS_MSG_MEM_ACNT_FAILED_STR "Memory accounting init failed"
+#define BRS_MSG_BAD_OBJ_THREAD_FAIL_STR "pthread_init failed"
+#define BRS_MSG_USING_DEFAULT_THREAD_SIZE_STR "Using default thread stack size"
+#define BRS_MSG_NO_CHILD_STR "FATAL: no children"
+#define BRS_MSG_SPAWN_SIGN_THRD_FAILED_STR \
+ "failed to create the new thread for signer"
+#define BRS_MSG_BAD_CONTAINER_FAIL_STR \
+ "failed to launch the thread for storing bad gfids"
+#define BRS_MSG_CANCEL_SIGN_THREAD_FAILED_STR \
+ "Could not cancel sign serializer thread"
+#define BRS_MSG_KILL_SIGN_THREAD_STR "killed the signer thread"
+#define BRS_MSG_GET_INODE_CONTEXT_FAILED_STR \
+ "failed to init the inode context for the inode"
+#define BRS_MSG_ADD_FD_TO_INODE_STR "failed to add fd to the inode"
+#define BRS_MSG_NO_MEMORY_STR "local allocation failed"
+#define BRS_MSG_BAD_OBJECT_ACCESS_STR "bad object accessed. Returning"
+#define BRS_MSG_SIGN_VERSION_ERROR_STR "Signing version exceeds current version"
+#define BRS_MSG_NON_BITD_PID_STR \
+ "PID from where signature request came, does not belong to bit-rot " \
+ "daemon. Unwinding the fop"
+#define BRS_MSG_SIGN_PREPARE_FAIL_STR \
+ "failed to prepare the signature. Unwinding the fop"
+#define BRS_MSG_VERSION_PREPARE_FAIL_STR \
+ "failed to prepare the version. Unwinding the fop"
+#define BRS_MSG_STUB_ALLOC_FAILED_STR "failed to allocate stub fop, Unwinding"
+#define BRS_MSG_BAD_OBJ_MARK_FAIL_STR "failed to mark object as bad"
+#define BRS_MSG_NON_SCRUB_BAD_OBJ_MARK_STR \
+ "bad object marking is not from the scrubber"
+#define BRS_MSG_ALLOC_MEM_FAILED_STR "failed to allocate memory"
+#define BRS_MSG_SET_INTERNAL_XATTR_STR "called on the internal xattr"
+#define BRS_MSG_REMOVE_INTERNAL_XATTR_STR "removexattr called on internal xattr"
+#define BRS_MSG_CREATE_ANONYMOUS_FD_FAILED_STR \
+ "failed to create anonymous fd for the inode"
+#define BRS_MSG_ADD_FD_TO_LIST_FAILED_STR "failed add fd to the list"
+#define BRS_MSG_SET_FD_CONTEXT_FAILED_STR \
+ "failed to set the fd context for the file"
+#define BRS_MSG_NULL_LOCAL_STR "local is NULL"
+#define BRS_MSG_DICT_ALLOC_FAILED_STR \
+ "dict allocation failed: cannot send IPC FOP to changelog"
+#define BRS_MSG_SET_EVENT_FAILED_STR "cannot set release event in dict"
+#define BRS_MSG_CREATE_FRAME_FAILED_STR "create_frame() failure"
+#define BRS_MSG_BAD_OBJ_DIR_CLOSE_FAIL_STR "closedir error"
+#define BRS_MSG_LINK_FAIL_STR "failed to record gfid"
+#define BRS_MSG_BAD_OBJ_UNLINK_FAIL_STR \
+ "failed to delete bad object link from quaratine directory"
+#define BRS_MSG_BAD_OBJECT_DIR_FAIL_STR "failed stub directory"
+#define BRS_MSG_BAD_OBJECT_DIR_SEEK_FAIL_STR \
+ "seekdir failed. Invalid argument (offset reused from another DIR * " \
+ "structure)"
+#define BRS_MSG_BAD_OBJECT_DIR_TELL_FAIL_STR "telldir failed on dir"
+#define BRS_MSG_BAD_OBJECT_DIR_READ_FAIL_STR "readdir failed on dir"
+#define BRS_MSG_CREATE_GF_DIRENT_FAILED_STR "could not create gf_dirent"
+#define BRS_MSG_GET_FD_CONTEXT_FAILED_STR "pfd is NULL"
+#define BRS_MSG_BAD_HANDLE_DIR_NULL_STR "dir if NULL"
+#define BRS_MSG_ALLOC_FAILED_STR \
+ "failed to allocate new dict for saving the paths of the corrupted " \
+ "objects. Scrub status will only display the gfid"
+#define BRS_MSG_PATH_GET_FAILED_STR "failed to get the path"
+#define BRS_MSG_PATH_XATTR_GET_FAILED_STR \
+ "failed to get the path xattr from disk for the gfid. Trying to get path " \
+ "from the memory"
+#define BRS_MSG_DICT_SET_FAILED_STR \
+ "failed to set the actual path as the value in the dict for the " \
+ "corrupted object"
+#define BRS_MSG_SET_CONTEXT_FAILED_STR \
+ "could not set fd context for release callback"
+#define BRS_MSG_CHANGE_VERSION_FAILED_STR "change version failed"
+#endif /* !_BITROT_STUB_MESSAGES_H_ */
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.c b/xlators/features/bit-rot/src/stub/bit-rot-stub.c
new file mode 100644
index 00000000000..447dd47ff41
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.c
@@ -0,0 +1,3590 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <ctype.h>
+#include <sys/uio.h>
+#include <signal.h>
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include "changelog.h"
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/call-stub.h>
+
+#include "bit-rot-stub.h"
+#include "bit-rot-stub-mem-types.h"
+#include "bit-rot-stub-messages.h"
+#include "bit-rot-common.h"
+
+#define BR_STUB_REQUEST_COOKIE 0x1
+
+void
+br_stub_lock_cleaner(void *arg)
+{
+ pthread_mutex_t *clean_mutex = arg;
+
+ pthread_mutex_unlock(clean_mutex);
+ return;
+}
+
+void *
+br_stub_signth(void *);
+
+struct br_stub_signentry {
+ unsigned long v;
+
+ call_stub_t *stub;
+
+ struct list_head list;
+};
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int32_t ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_br_stub_mt_end + 1);
+
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_MEM_ACNT_FAILED, NULL);
+ return ret;
+ }
+
+ return ret;
+}
+
+int
+br_stub_bad_object_container_init(xlator_t *this, br_stub_private_t *priv)
+{
+ pthread_attr_t w_attr;
+ int ret = -1;
+
+ ret = pthread_cond_init(&priv->container.bad_cond, NULL);
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_OBJ_THREAD_FAIL,
+ "cond_init ret=%d", ret, NULL);
+ goto out;
+ }
+
+ ret = pthread_mutex_init(&priv->container.bad_lock, NULL);
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_OBJ_THREAD_FAIL,
+ "mutex_init ret=%d", ret, NULL);
+ goto cleanup_cond;
+ }
+
+ ret = pthread_attr_init(&w_attr);
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_OBJ_THREAD_FAIL,
+ "attr_init ret=%d", ret, NULL);
+ goto cleanup_lock;
+ }
+
+ ret = pthread_attr_setstacksize(&w_attr, BAD_OBJECT_THREAD_STACK_SIZE);
+ if (ret == EINVAL) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ BRS_MSG_USING_DEFAULT_THREAD_SIZE, NULL);
+ }
+
+ INIT_LIST_HEAD(&priv->container.bad_queue);
+ ret = br_stub_dir_create(this, priv);
+ if (ret < 0)
+ goto cleanup_lock;
+
+ ret = gf_thread_create(&priv->container.thread, &w_attr, br_stub_worker,
+ this, "brswrker");
+ if (ret)
+ goto cleanup_attr;
+
+ return 0;
+
+cleanup_attr:
+ pthread_attr_destroy(&w_attr);
+cleanup_lock:
+ pthread_mutex_destroy(&priv->container.bad_lock);
+cleanup_cond:
+ pthread_cond_destroy(&priv->container.bad_cond);
+out:
+ return -1;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ int ret = 0;
+ char *tmp = NULL;
+ struct timeval tv = {
+ 0,
+ };
+ br_stub_private_t *priv = NULL;
+
+ if (!this->children) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_NO_CHILD, NULL);
+ goto error_return;
+ }
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_br_stub_mt_private_t);
+ if (!priv)
+ goto error_return;
+
+ priv->local_pool = mem_pool_new(br_stub_local_t, 512);
+ if (!priv->local_pool)
+ goto free_priv;
+
+ GF_OPTION_INIT("bitrot", priv->do_versioning, bool, free_mempool);
+
+ GF_OPTION_INIT("export", tmp, str, free_mempool);
+
+ if (snprintf(priv->export, PATH_MAX, "%s", tmp) >= PATH_MAX)
+ goto free_mempool;
+
+ if (snprintf(priv->stub_basepath, sizeof(priv->stub_basepath), "%s/%s",
+ priv->export,
+ BR_STUB_QUARANTINE_DIR) >= sizeof(priv->stub_basepath))
+ goto free_mempool;
+
+ (void)gettimeofday(&tv, NULL);
+
+ /* boot time is in network endian format */
+ priv->boot[0] = htonl(tv.tv_sec);
+ priv->boot[1] = htonl(tv.tv_usec);
+
+ pthread_mutex_init(&priv->lock, NULL);
+ pthread_cond_init(&priv->cond, NULL);
+ INIT_LIST_HEAD(&priv->squeue);
+
+ /* Thread creations need 'this' to be passed so that THIS can be
+ * assigned inside the thread. So setting this->private here.
+ */
+ this->private = priv;
+ if (!priv->do_versioning)
+ return 0;
+
+ ret = gf_thread_create(&priv->signth, NULL, br_stub_signth, this,
+ "brssign");
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SPAWN_SIGN_THRD_FAILED,
+ NULL);
+ goto cleanup_lock;
+ }
+
+ ret = br_stub_bad_object_container_init(this, priv);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_CONTAINER_FAIL, NULL);
+ goto cleanup_lock;
+ }
+
+ gf_msg_debug(this->name, 0, "bit-rot stub loaded");
+
+ return 0;
+
+cleanup_lock:
+ pthread_cond_destroy(&priv->cond);
+ pthread_mutex_destroy(&priv->lock);
+free_mempool:
+ mem_pool_destroy(priv->local_pool);
+ priv->local_pool = NULL;
+free_priv:
+ GF_FREE(priv);
+ this->private = NULL;
+error_return:
+ return -1;
+}
+
+/* TODO:
+ * As of now enabling bitrot option does 2 things.
+ * 1) Start the Bitrot Daemon which signs the objects (currently files only)
+ * upon getting notified by the stub.
+ * 2) Enable versioning of the objects. Object versions (again files only) are
+ * incremented upon modification.
+ * So object versioning is tied to bitrot daemon's signing. In future, object
+ * versioning might be necessary for other things as well apart from bit-rot
+ * detection (well that's the objective of bringing in object-versioning :)).
+ * In that case, better to make versioning a new option and letting it to be
+ * enabled despite bit-rot detection is not needed.
+ * Ex: ICAP.
+ */
+int32_t
+reconfigure(xlator_t *this, dict_t *options)
+{
+ int32_t ret = -1;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ GF_OPTION_RECONF("bitrot", priv->do_versioning, options, bool, err);
+ if (priv->do_versioning && !priv->signth) {
+ ret = gf_thread_create(&priv->signth, NULL, br_stub_signth, this,
+ "brssign");
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ BRS_MSG_SPAWN_SIGN_THRD_FAILED, NULL);
+ goto err;
+ }
+
+ ret = br_stub_bad_object_container_init(this, priv);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_CONTAINER_FAIL,
+ NULL);
+ goto err;
+ }
+ } else {
+ if (priv->signth) {
+ if (gf_thread_cleanup_xint(priv->signth)) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_CANCEL_SIGN_THREAD_FAILED, NULL);
+ } else {
+ gf_smsg(this->name, GF_LOG_INFO, 0, BRS_MSG_KILL_SIGN_THREAD,
+ NULL);
+ priv->signth = 0;
+ }
+ }
+
+ if (priv->container.thread) {
+ if (gf_thread_cleanup_xint(priv->container.thread)) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_CANCEL_SIGN_THREAD_FAILED, NULL);
+ }
+ priv->container.thread = 0;
+ }
+ }
+
+ ret = 0;
+ return ret;
+err:
+ if (priv->signth) {
+ if (gf_thread_cleanup_xint(priv->signth)) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_CANCEL_SIGN_THREAD_FAILED, NULL);
+ }
+ priv->signth = 0;
+ }
+
+ if (priv->container.thread) {
+ if (gf_thread_cleanup_xint(priv->container.thread)) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_CANCEL_SIGN_THREAD_FAILED, NULL);
+ }
+ priv->container.thread = 0;
+ }
+ ret = -1;
+ return ret;
+}
+
+int
+notify(xlator_t *this, int event, void *data, ...)
+{
+ br_stub_private_t *priv = NULL;
+
+ if (!this)
+ return 0;
+
+ priv = this->private;
+ if (!priv)
+ return 0;
+
+ default_notify(this, event, data);
+ return 0;
+}
+
+void
+fini(xlator_t *this)
+{
+ int32_t ret = 0;
+ br_stub_private_t *priv = this->private;
+ struct br_stub_signentry *sigstub = NULL;
+ call_stub_t *stub = NULL;
+
+ if (!priv)
+ return;
+
+ if (!priv->do_versioning)
+ goto cleanup;
+
+ ret = gf_thread_cleanup_xint(priv->signth);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_CANCEL_SIGN_THREAD_FAILED,
+ NULL);
+ goto out;
+ }
+ priv->signth = 0;
+
+ while (!list_empty(&priv->squeue)) {
+ sigstub = list_first_entry(&priv->squeue, struct br_stub_signentry,
+ list);
+ list_del_init(&sigstub->list);
+
+ call_stub_destroy(sigstub->stub);
+ GF_FREE(sigstub);
+ }
+
+ ret = gf_thread_cleanup_xint(priv->container.thread);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_CANCEL_SIGN_THREAD_FAILED,
+ NULL);
+ goto out;
+ }
+
+ priv->container.thread = 0;
+
+ while (!list_empty(&priv->container.bad_queue)) {
+ stub = list_first_entry(&priv->container.bad_queue, call_stub_t, list);
+ list_del_init(&stub->list);
+ call_stub_destroy(stub);
+ }
+
+ pthread_mutex_destroy(&priv->container.bad_lock);
+ pthread_cond_destroy(&priv->container.bad_cond);
+
+cleanup:
+ pthread_mutex_destroy(&priv->lock);
+ pthread_cond_destroy(&priv->cond);
+
+ if (priv->local_pool) {
+ mem_pool_destroy(priv->local_pool);
+ priv->local_pool = NULL;
+ }
+
+ this->private = NULL;
+ GF_FREE(priv);
+
+out:
+ return;
+}
+
+static int
+br_stub_alloc_versions(br_version_t **obuf, br_signature_t **sbuf,
+ size_t signaturelen)
+{
+ void *mem = NULL;
+ size_t size = 0;
+
+ if (obuf)
+ size += sizeof(br_version_t);
+ if (sbuf)
+ size += sizeof(br_signature_t) + signaturelen;
+
+ mem = GF_CALLOC(1, size, gf_br_stub_mt_version_t);
+ if (!mem)
+ goto error_return;
+
+ if (obuf) {
+ *obuf = (br_version_t *)mem;
+ mem = ((char *)mem + sizeof(br_version_t));
+ }
+ if (sbuf) {
+ *sbuf = (br_signature_t *)mem;
+ }
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static void
+br_stub_dealloc_versions(void *mem)
+{
+ GF_FREE(mem);
+}
+
+static br_stub_local_t *
+br_stub_alloc_local(xlator_t *this)
+{
+ br_stub_private_t *priv = this->private;
+
+ return mem_get0(priv->local_pool);
+}
+
+static void
+br_stub_dealloc_local(br_stub_local_t *ptr)
+{
+ if (!ptr)
+ return;
+
+ mem_put(ptr);
+}
+
+static int
+br_stub_prepare_version_request(xlator_t *this, dict_t *dict,
+ br_version_t *obuf, unsigned long oversion)
+{
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+ br_set_ongoingversion(obuf, oversion, priv->boot);
+
+ return dict_set_bin(dict, BITROT_CURRENT_VERSION_KEY, (void *)obuf,
+ sizeof(br_version_t));
+}
+
+static int
+br_stub_prepare_signing_request(dict_t *dict, br_signature_t *sbuf,
+ br_isignature_t *sign, size_t signaturelen)
+{
+ size_t size = 0;
+
+ br_set_signature(sbuf, sign, signaturelen, &size);
+
+ return dict_set_bin(dict, BITROT_SIGNING_VERSION_KEY, (void *)sbuf, size);
+}
+
+/**
+ * initialize an inode context starting with a given ongoing version.
+ * a fresh lookup() or a first creat() call initializes the inode
+ * context, hence the inode is marked dirty. this routine also
+ * initializes the transient inode version.
+ */
+static int
+br_stub_init_inode_versions(xlator_t *this, fd_t *fd, inode_t *inode,
+ unsigned long version, gf_boolean_t markdirty,
+ gf_boolean_t bad_object, uint64_t *ctx_addr)
+{
+ int32_t ret = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ ctx = GF_CALLOC(1, sizeof(br_stub_inode_ctx_t), gf_br_stub_mt_inode_ctx_t);
+ if (!ctx)
+ goto error_return;
+
+ INIT_LIST_HEAD(&ctx->fd_list);
+ (markdirty) ? __br_stub_mark_inode_dirty(ctx)
+ : __br_stub_mark_inode_synced(ctx);
+ __br_stub_set_ongoing_version(ctx, version);
+
+ if (bad_object)
+ __br_stub_mark_object_bad(ctx);
+
+ if (fd) {
+ ret = br_stub_add_fd_to_inode(this, fd, ctx);
+ if (ret)
+ goto free_ctx;
+ }
+
+ ret = br_stub_set_inode_ctx(this, inode, ctx);
+ if (ret)
+ goto free_ctx;
+
+ if (ctx_addr)
+ *ctx_addr = (uint64_t)(uintptr_t)ctx;
+ return 0;
+
+free_ctx:
+ GF_FREE(ctx);
+error_return:
+ return -1;
+}
+
+/**
+ * modify the ongoing version of an inode.
+ */
+static int
+br_stub_mod_inode_versions(xlator_t *this, fd_t *fd, inode_t *inode,
+ unsigned long version)
+{
+ int32_t ret = -1;
+ br_stub_inode_ctx_t *ctx = 0;
+
+ LOCK(&inode->lock);
+ {
+ ctx = __br_stub_get_ongoing_version_ctx(this, inode, NULL);
+ if (ctx == NULL)
+ goto unblock;
+ if (__br_stub_is_inode_dirty(ctx)) {
+ __br_stub_set_ongoing_version(ctx, version);
+ __br_stub_mark_inode_synced(ctx);
+ }
+
+ ret = 0;
+ }
+unblock:
+ UNLOCK(&inode->lock);
+
+ return ret;
+}
+
+static void
+br_stub_fill_local(br_stub_local_t *local, call_stub_t *stub, fd_t *fd,
+ inode_t *inode, uuid_t gfid, int versioningtype,
+ unsigned long memversion)
+{
+ local->fopstub = stub;
+ local->versioningtype = versioningtype;
+ local->u.context.version = memversion;
+ if (fd)
+ local->u.context.fd = fd_ref(fd);
+ if (inode)
+ local->u.context.inode = inode_ref(inode);
+ gf_uuid_copy(local->u.context.gfid, gfid);
+}
+
+static void
+br_stub_cleanup_local(br_stub_local_t *local)
+{
+ if (!local)
+ return;
+
+ local->fopstub = NULL;
+ local->versioningtype = 0;
+ local->u.context.version = 0;
+ if (local->u.context.fd) {
+ fd_unref(local->u.context.fd);
+ local->u.context.fd = NULL;
+ }
+ if (local->u.context.inode) {
+ inode_unref(local->u.context.inode);
+ local->u.context.inode = NULL;
+ }
+ memset(local->u.context.gfid, '\0', sizeof(uuid_t));
+}
+
+static int
+br_stub_need_versioning(xlator_t *this, fd_t *fd, gf_boolean_t *versioning,
+ gf_boolean_t *modified, br_stub_inode_ctx_t **ctx)
+{
+ int32_t ret = -1;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *c = NULL;
+ unsigned long version = BITROT_DEFAULT_CURRENT_VERSION;
+
+ *versioning = _gf_false;
+ *modified = _gf_false;
+
+ /* Bitrot stub inode context was initialized only in lookup, create
+ * and mknod cbk path. Object versioning was enabled by default
+ * irrespective of bitrot enabled or not. But it's made optional now.
+ * As a consequence there could be cases where getting inode ctx would
+ * fail because it's not set yet.
+ * e.g., If versioning (with bitrot enable) is enabled while I/O is
+ * happening, it could directly get other fops like writev without
+ * lookup, where getting inode ctx would fail. Hence initialize the
+ * inode ctx on failure to get ctx. This is done in all places where
+ * applicable.
+ */
+ ret = br_stub_get_inode_ctx(this, fd->inode, &ctx_addr);
+ if (ret < 0) {
+ ret = br_stub_init_inode_versions(this, fd, fd->inode, version,
+ _gf_true, _gf_false, &ctx_addr);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_GET_INODE_CONTEXT_FAILED, "gfid=%s",
+ uuid_utoa(fd->inode->gfid), NULL);
+ goto error_return;
+ }
+ }
+
+ c = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&fd->inode->lock);
+ {
+ if (__br_stub_is_inode_dirty(c))
+ *versioning = _gf_true;
+ if (__br_stub_is_inode_modified(c))
+ *modified = _gf_true;
+ }
+ UNLOCK(&fd->inode->lock);
+
+ if (ctx)
+ *ctx = c;
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int32_t
+br_stub_anon_fd_ctx(xlator_t *this, fd_t *fd, br_stub_inode_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ br_stub_fd_t *br_stub_fd = NULL;
+
+ br_stub_fd = br_stub_fd_ctx_get(this, fd);
+ if (!br_stub_fd) {
+ ret = br_stub_add_fd_to_inode(this, fd, ctx);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_ADD_FD_TO_INODE,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int
+br_stub_versioning_prep(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ br_stub_inode_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ br_stub_local_t *local = NULL;
+
+ local = br_stub_alloc_local(this);
+ if (!local) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRS_MSG_NO_MEMORY, "gfid=%s",
+ uuid_utoa(fd->inode->gfid), NULL);
+ goto error_return;
+ }
+
+ if (fd_is_anonymous(fd)) {
+ ret = br_stub_anon_fd_ctx(this, fd, ctx);
+ if (ret)
+ goto free_local;
+ }
+
+ frame->local = local;
+
+ return 0;
+
+free_local:
+ br_stub_dealloc_local(local);
+error_return:
+ return -1;
+}
+
+static int
+br_stub_mark_inode_modified(xlator_t *this, br_stub_local_t *local)
+{
+ fd_t *fd = NULL;
+ int32_t ret = 0;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ unsigned long version = BITROT_DEFAULT_CURRENT_VERSION;
+
+ fd = local->u.context.fd;
+
+ ret = br_stub_get_inode_ctx(this, fd->inode, &ctx_addr);
+ if (ret < 0) {
+ ret = br_stub_init_inode_versions(this, fd, fd->inode, version,
+ _gf_true, _gf_false, &ctx_addr);
+ if (ret)
+ goto error_return;
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&fd->inode->lock);
+ {
+ __br_stub_set_inode_modified(ctx);
+ }
+ UNLOCK(&fd->inode->lock);
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+/**
+ * The possible return values from br_stub_is_bad_object () are:
+ * 1) 0 => as per the inode context object is not bad
+ * 2) -1 => Failed to get the inode context itself
+ * 3) -2 => As per the inode context object is bad
+ * Both -ve values means the fop which called this function is failed
+ * and error is returned upwards.
+ */
+static int
+br_stub_check_bad_object(xlator_t *this, inode_t *inode, int32_t *op_ret,
+ int32_t *op_errno)
+{
+ int ret = -1;
+ unsigned long version = BITROT_DEFAULT_CURRENT_VERSION;
+
+ ret = br_stub_is_bad_object(this, inode);
+ if (ret == -2) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_OBJECT_ACCESS,
+ "gfid=%s", uuid_utoa(inode->gfid), NULL);
+ *op_ret = -1;
+ *op_errno = EIO;
+ }
+
+ if (ret == -1) {
+ ret = br_stub_init_inode_versions(this, NULL, inode, version, _gf_true,
+ _gf_false, NULL);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_GET_INODE_CONTEXT_FAILED, "gfid=%s",
+ uuid_utoa(inode->gfid), NULL);
+ *op_ret = -1;
+ *op_errno = EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * callback for inode/fd versioning
+ */
+int
+br_stub_fd_incversioning_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xdata)
+{
+ fd_t *fd = NULL;
+ inode_t *inode = NULL;
+ unsigned long version = 0;
+ br_stub_local_t *local = NULL;
+
+ local = (br_stub_local_t *)frame->local;
+ if (op_ret < 0)
+ goto done;
+ fd = local->u.context.fd;
+ inode = local->u.context.inode;
+ version = local->u.context.version;
+
+ op_ret = br_stub_mod_inode_versions(this, fd, inode, version);
+ if (op_ret < 0)
+ op_errno = EINVAL;
+
+done:
+ if (op_ret < 0) {
+ frame->local = NULL;
+ call_unwind_error(local->fopstub, -1, op_errno);
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+ } else {
+ call_resume(local->fopstub);
+ }
+ return 0;
+}
+
+/**
+ * Initial object versioning
+ *
+ * Version persists two (2) extended attributes as explained below:
+ * 1. Current (ongoing) version: This is incremented on an writev ()
+ * or truncate () and is the running version for an object.
+ * 2. Signing version: This is the version against which an object
+ * was signed (checksummed).
+ *
+ * During initial versioning, both ongoing and signing versions are
+ * set of one and zero respectively. A write() call increments the
+ * ongoing version as an indication of modification to the object.
+ * Additionally this needs to be persisted on disk and needs to be
+ * durable: fsync().. :-/
+ * As an optimization only the first write() synchronizes the ongoing
+ * version to disk, subsequent write()s before the *last* release()
+ * are no-op's.
+ *
+ * create(), just like lookup() initializes the object versions to
+ * the default. As an optimization this is not a durable operation:
+ * in case of a crash, hard reboot etc.. absence of versioning xattrs
+ * is ignored in scrubber along with the one time crawler explicitly
+ * triggering signing for such objects.
+ *
+ * c.f. br_stub_writev() / br_stub_truncate()
+ */
+
+/**
+ * perform full or incremental versioning on an inode pointd by an
+ * fd. incremental versioning is done when an inode is dirty and a
+ * writeback is triggered.
+ */
+
+int
+br_stub_fd_versioning(xlator_t *this, call_frame_t *frame, call_stub_t *stub,
+ dict_t *dict, fd_t *fd, br_stub_version_cbk *callback,
+ unsigned long memversion, int versioningtype, int durable)
+{
+ int32_t ret = -1;
+ int flags = 0;
+ dict_t *xdata = NULL;
+ br_stub_local_t *local = NULL;
+
+ xdata = dict_new();
+ if (!xdata)
+ goto done;
+
+ ret = dict_set_int32(xdata, GLUSTERFS_INTERNAL_FOP_KEY, 1);
+ if (ret)
+ goto dealloc_xdata;
+
+ if (durable) {
+ ret = dict_set_int32(xdata, GLUSTERFS_DURABLE_OP, 0);
+ if (ret)
+ goto dealloc_xdata;
+ }
+
+ local = frame->local;
+
+ br_stub_fill_local(local, stub, fd, fd->inode, fd->inode->gfid,
+ versioningtype, memversion);
+
+ STACK_WIND(frame, callback, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+
+ ret = 0;
+
+dealloc_xdata:
+ dict_unref(xdata);
+done:
+ return ret;
+}
+
+static int
+br_stub_perform_incversioning(xlator_t *this, call_frame_t *frame,
+ call_stub_t *stub, fd_t *fd,
+ br_stub_inode_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ br_version_t *obuf = NULL;
+ unsigned long writeback_version = 0;
+ int op_errno = 0;
+ br_stub_local_t *local = NULL;
+
+ op_errno = EINVAL;
+ local = frame->local;
+
+ writeback_version = __br_stub_writeback_version(ctx);
+
+ op_errno = ENOMEM;
+ dict = dict_new();
+ if (!dict)
+ goto out;
+ ret = br_stub_alloc_versions(&obuf, NULL, 0);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_ALLOC_MEM_FAILED,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto out;
+ }
+ ret = br_stub_prepare_version_request(this, dict, obuf, writeback_version);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_VERSION_PREPARE_FAIL,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ br_stub_dealloc_versions(obuf);
+ goto out;
+ }
+
+ ret = br_stub_fd_versioning(
+ this, frame, stub, dict, fd, br_stub_fd_incversioning_cbk,
+ writeback_version, BR_STUB_INCREMENTAL_VERSIONING, !WRITEBACK_DURABLE);
+out:
+ if (dict)
+ dict_unref(dict);
+ if (ret) {
+ if (local)
+ frame->local = NULL;
+ call_unwind_error(stub, -1, op_errno);
+ if (local) {
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+ }
+ }
+
+ return ret;
+}
+
+/** {{{ */
+
+/* fsetxattr() */
+
+int32_t
+br_stub_perform_objsign(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ dict_t *dict, int flags, dict_t *xdata)
+{
+ STACK_WIND(frame, default_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+
+ dict_unref(xdata);
+ return 0;
+}
+
+void *
+br_stub_signth(void *arg)
+{
+ xlator_t *this = arg;
+ br_stub_private_t *priv = this->private;
+ struct br_stub_signentry *sigstub = NULL;
+
+ THIS = this;
+ while (1) {
+ /*
+ * Disabling bit-rot feature leads to this particular thread
+ * getting cleaned up by reconfigure via a call to the function
+ * gf_thread_cleanup_xint (which in turn calls pthread_cancel
+ * and pthread_join). But, if this thread had held the mutex
+ * &priv->lock at the time of cancellation, then it leads to
+ * deadlock in future when bit-rot feature is enabled (which
+ * again spawns this thread which cant hold the lock as the
+ * mutex is still held by the previous instance of the thread
+ * which got killed). Also, the br_stub_handle_object_signature
+ * function which is called whenever file has to be signed
+ * also gets blocked as it too attempts to acquire &priv->lock.
+ *
+ * So, arrange for the lock to be unlocked as part of the
+ * cleanup of this thread using pthread_cleanup_push and
+ * pthread_cleanup_pop.
+ */
+ pthread_cleanup_push(br_stub_lock_cleaner, &priv->lock);
+ pthread_mutex_lock(&priv->lock);
+ {
+ while (list_empty(&priv->squeue))
+ pthread_cond_wait(&priv->cond, &priv->lock);
+
+ sigstub = list_first_entry(&priv->squeue, struct br_stub_signentry,
+ list);
+ list_del_init(&sigstub->list);
+ }
+ pthread_mutex_unlock(&priv->lock);
+ pthread_cleanup_pop(0);
+
+ call_resume(sigstub->stub);
+
+ GF_FREE(sigstub);
+ }
+
+ return NULL;
+}
+
+static gf_boolean_t
+br_stub_internal_xattr(dict_t *dict)
+{
+ if (dict_get(dict, GLUSTERFS_SET_OBJECT_SIGNATURE) ||
+ dict_get(dict, GLUSTERFS_GET_OBJECT_SIGNATURE) ||
+ dict_get(dict, BR_REOPEN_SIGN_HINT_KEY) ||
+ dict_get(dict, BITROT_OBJECT_BAD_KEY) ||
+ dict_get(dict, BITROT_SIGNING_VERSION_KEY) ||
+ dict_get(dict, BITROT_CURRENT_VERSION_KEY))
+ return _gf_true;
+
+ return _gf_false;
+}
+
+int
+orderq(struct list_head *elem1, struct list_head *elem2)
+{
+ struct br_stub_signentry *s1 = NULL;
+ struct br_stub_signentry *s2 = NULL;
+
+ s1 = list_entry(elem1, struct br_stub_signentry, list);
+ s2 = list_entry(elem2, struct br_stub_signentry, list);
+
+ return (s1->v > s2->v);
+}
+
+static int
+br_stub_compare_sign_version(xlator_t *this, inode_t *inode,
+ br_signature_t *sbuf, dict_t *dict,
+ int *fakesuccess)
+{
+ int32_t ret = -1;
+ uint64_t tmp_ctx = 0;
+ gf_boolean_t invalid = _gf_false;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, inode, out);
+ GF_VALIDATE_OR_GOTO(this->name, sbuf, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ ret = br_stub_get_inode_ctx(this, inode, &tmp_ctx);
+ if (ret) {
+ dict_del(dict, BITROT_SIGNING_VERSION_KEY);
+ goto out;
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)tmp_ctx;
+
+ LOCK(&inode->lock);
+ {
+ if (ctx->currentversion < sbuf->signedversion) {
+ invalid = _gf_true;
+ } else if (ctx->currentversion > sbuf->signedversion) {
+ gf_msg_debug(this->name, 0,
+ "\"Signing version\" "
+ "(%lu) lower than \"Current version \" "
+ "(%lu)",
+ ctx->currentversion, sbuf->signedversion);
+ *fakesuccess = 1;
+ }
+ }
+ UNLOCK(&inode->lock);
+
+ if (invalid) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SIGN_VERSION_ERROR,
+ "Signing-ver=%lu", sbuf->signedversion, "current-ver=%lu",
+ ctx->currentversion, NULL);
+ }
+
+out:
+ return ret;
+}
+
+static int
+br_stub_prepare_signature(xlator_t *this, dict_t *dict, inode_t *inode,
+ br_isignature_t *sign, int *fakesuccess)
+{
+ int32_t ret = -1;
+ size_t signaturelen = 0;
+ br_signature_t *sbuf = NULL;
+
+ if (!br_is_signature_type_valid(sign->signaturetype))
+ goto out;
+
+ signaturelen = sign->signaturelen;
+ ret = br_stub_alloc_versions(NULL, &sbuf, signaturelen);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_ALLOC_MEM_FAILED,
+ "gfid=%s", uuid_utoa(inode->gfid), NULL);
+ ret = -1;
+ goto out;
+ }
+ ret = br_stub_prepare_signing_request(dict, sbuf, sign, signaturelen);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_SIGN_PREPARE_FAIL,
+ "gfid=%s", uuid_utoa(inode->gfid), NULL);
+ ret = -1;
+ br_stub_dealloc_versions(sbuf);
+ goto out;
+ }
+
+ /* At this point sbuf has been added to dict, so the memory will be freed
+ * when the data from the dict is destroyed
+ */
+ ret = br_stub_compare_sign_version(this, inode, sbuf, dict, fakesuccess);
+out:
+ return ret;
+}
+
+static void
+br_stub_handle_object_signature(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ dict_t *dict, br_isignature_t *sign,
+ dict_t *xdata)
+{
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ int fakesuccess = 0;
+ br_stub_private_t *priv = NULL;
+ struct br_stub_signentry *sigstub = NULL;
+
+ priv = this->private;
+
+ if (frame->root->pid != GF_CLIENT_PID_BITD) {
+ gf_smsg(this->name, GF_LOG_WARNING, op_errno, BRS_MSG_NON_BITD_PID,
+ "PID=%d", frame->root->pid, NULL);
+ goto dofop;
+ }
+
+ ret = br_stub_prepare_signature(this, dict, fd->inode, sign, &fakesuccess);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SIGN_PREPARE_FAIL,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto dofop;
+ }
+ if (fakesuccess) {
+ op_ret = op_errno = 0;
+ goto dofop;
+ }
+
+ dict_del(dict, GLUSTERFS_SET_OBJECT_SIGNATURE);
+
+ ret = -1;
+ if (!xdata) {
+ xdata = dict_new();
+ if (!xdata)
+ goto dofop;
+ } else {
+ dict_ref(xdata);
+ }
+
+ ret = dict_set_int32(xdata, GLUSTERFS_DURABLE_OP, 0);
+ if (ret)
+ goto unref_dict;
+
+ /* prepare dispatch stub to order object signing */
+ sigstub = GF_CALLOC(1, sizeof(*sigstub), gf_br_stub_mt_sigstub_t);
+ if (!sigstub)
+ goto unref_dict;
+
+ INIT_LIST_HEAD(&sigstub->list);
+ sigstub->v = ntohl(sign->signedversion);
+ sigstub->stub = fop_fsetxattr_stub(frame, br_stub_perform_objsign, fd, dict,
+ 0, xdata);
+ if (!sigstub->stub)
+ goto cleanup_stub;
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ list_add_order(&sigstub->list, &priv->squeue, orderq);
+ pthread_cond_signal(&priv->cond);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ return;
+
+cleanup_stub:
+ GF_FREE(sigstub);
+unref_dict:
+ dict_unref(xdata);
+dofop:
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, NULL);
+}
+
+int32_t
+br_stub_fsetxattr_resume(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ int32_t ret = -1;
+ br_stub_local_t *local = NULL;
+
+ local = frame->local;
+ frame->local = NULL;
+
+ ret = br_stub_mark_inode_modified(this, local);
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, xdata);
+
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+
+ return 0;
+}
+
+/**
+ * Handles object reopens. Object reopens can be of 3 types. 2 are from
+ * oneshot crawler and 1 from the regular signer.
+ * ONESHOT CRAWLER:
+ * For those objects which were created before bitrot was enabled. oneshow
+ * crawler crawls the namespace and signs all the objects. It has to do
+ * the versioning before making bit-rot-stub send a sign notification.
+ * So it sends fsetxattr with BR_OBJECT_REOPEN as the value. And bit-rot-stub
+ * upon getting BR_OBJECT_REOPEN value checks if the version has to be
+ * increased or not. By default the version will be increased. But if the
+ * object is modified before BR_OBJECT_REOPEN from oneshot crawler, then
+ * versioning need not be done. In that case simply a success is returned.
+ * SIGNER:
+ * Signer wait for 2 minutes upon getting the notification from bit-rot-stub
+ * and then it sends a dummy write (in reality a fsetxattr) call, to change
+ * the state of the inode from REOPEN_WAIT to SIGN_QUICK. The funny part here
+ * is though the inode's state is REOPEN_WAIT, the call sent by signer is
+ * BR_OBJECT_RESIGN. Once the state is changed to SIGN_QUICK, then yet another
+ * notification is sent upon release (RESIGN would have happened via fsetxattr,
+ * so a fd is needed) and the object is signed truly this time.
+ * There is a challenge in the above RESIGN method by signer. After sending
+ * the 1st notification, the inode could be forgotten before RESIGN request
+ * is received. In that case, the inode's context (the newly looked up inode)
+ * would not indicate the inode as being modified (it would be in the default
+ * state) and because of this, a SIGN_QUICK notification to truly sign the
+ * object would not be sent. So, this is how its handled.
+ * if (request == RESIGN) {
+ * if (inode->sign_info == NORMAL) {
+ * mark_inode_non_dirty;
+ * mark_inode_modified;
+ * }
+ * GOBACK (means unwind without doing versioning)
+ * }
+ */
+static void
+br_stub_handle_object_reopen(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ uint32_t val)
+{
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ call_stub_t *stub = NULL;
+ gf_boolean_t inc_version = _gf_false;
+ gf_boolean_t modified = _gf_false;
+ br_stub_inode_ctx_t *ctx = NULL;
+ br_stub_local_t *local = NULL;
+ gf_boolean_t goback = _gf_true;
+
+ ret = br_stub_need_versioning(this, fd, &inc_version, &modified, &ctx);
+ if (ret)
+ goto unwind;
+
+ LOCK(&fd->inode->lock);
+ {
+ if ((val == BR_OBJECT_REOPEN) && inc_version)
+ goback = _gf_false;
+ if (val == BR_OBJECT_RESIGN && ctx->info_sign == BR_SIGN_NORMAL) {
+ __br_stub_mark_inode_synced(ctx);
+ __br_stub_set_inode_modified(ctx);
+ }
+ (void)__br_stub_inode_sign_state(ctx, GF_FOP_FSETXATTR, fd);
+ }
+ UNLOCK(&fd->inode->lock);
+
+ if (goback) {
+ op_ret = op_errno = 0;
+ goto unwind;
+ }
+
+ ret = br_stub_versioning_prep(frame, this, fd, ctx);
+ if (ret)
+ goto unwind;
+ local = frame->local;
+
+ stub = fop_fsetxattr_cbk_stub(frame, br_stub_fsetxattr_resume, 0, 0, NULL);
+ if (!stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_STUB_ALLOC_FAILED,
+ "fsetxattr gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto cleanup_local;
+ }
+
+ (void)br_stub_perform_incversioning(this, frame, stub, fd, ctx);
+ return;
+
+cleanup_local:
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, NULL);
+}
+
+/**
+ * This function only handles bad file identification. Instead of checking in
+ * fops like open, readv, writev whether the object is bad or not by doing
+ * getxattr calls, better to catch them when scrubber marks it as bad.
+ * So this callback is called only when the fsetxattr is sent by the scrubber
+ * to mark the object as bad.
+ */
+int
+br_stub_fsetxattr_bad_object_cbk(call_frame_t *frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ br_stub_local_t *local = NULL;
+ int32_t ret = -1;
+
+ local = frame->local;
+ frame->local = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ /*
+ * What to do if marking the object as bad fails? (i.e. in memory
+ * marking within the inode context. If we are here means fsetxattr
+ * fop has succeeded on disk and the bad object xattr has been set).
+ * We can return failure to scruber, but there is nothing the scrubber
+ * can do with it (it might assume that the on disk setxattr itself has
+ * failed). The main purpose of this operation is to help identify the
+ * bad object by checking the inode context itself (thus avoiding the
+ * necessity of doing a getxattr fop on the disk).
+ *
+ * So as of now, success itself is being returned even though inode
+ * context set operation fails.
+ * In future if there is any change in the policy which can handle this,
+ * then appropriate response should be sent (i.e. success or error).
+ */
+ ret = br_stub_mark_object_bad(this, local->u.context.inode);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_OBJ_MARK_FAIL,
+ "gfid=%s", uuid_utoa(local->u.context.inode->gfid), NULL);
+
+ ret = br_stub_add(this, local->u.context.inode->gfid);
+
+unwind:
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, xdata);
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+ return 0;
+}
+
+static int32_t
+br_stub_handle_bad_object_key(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ dict_t *dict, int flags, dict_t *xdata)
+{
+ br_stub_local_t *local = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ if (frame->root->pid != GF_CLIENT_PID_SCRUB) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_NON_SCRUB_BAD_OBJ_MARK,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto unwind;
+ }
+
+ local = br_stub_alloc_local(this);
+ if (!local) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_ALLOC_MEM_FAILED,
+ "fsetxattr gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ br_stub_fill_local(local, NULL, fd, fd->inode, fd->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+ frame->local = local;
+
+ STACK_WIND(frame, br_stub_fsetxattr_bad_object_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, NULL);
+ return 0;
+}
+
+/**
+ * As of now, versioning is done by the stub (though as a setxattr
+ * operation) as part of inode modification operations such as writev,
+ * truncate, ftruncate. And signing is done by BitD by a fsetxattr call.
+ * So any kind of setxattr coming on the versioning and the signing xattr is
+ * not allowed (i.e. BITROT_CURRENT_VERSION_KEY and BITROT_SIGNING_VERSION_KEY).
+ * In future if BitD/scrubber are allowed to change the versioning
+ * xattrs (though I cannot see a reason for it as of now), then the below
+ * function can be modified to block setxattr on version for only applications.
+ *
+ * NOTE: BitD sends sign request on GLUSTERFS_SET_OBJECT_SIGNATURE key.
+ * BITROT_SIGNING_VERSION_KEY is the xattr used to save the signature.
+ *
+ */
+static int32_t
+br_stub_handle_internal_xattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ char *key)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_SET_INTERNAL_XATTR,
+ "setxattr key=%s", key, "inode-gfid=%s", uuid_utoa(fd->inode->gfid),
+ NULL);
+
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, NULL);
+ return 0;
+}
+
+static void
+br_stub_dump_xattr(xlator_t *this, dict_t *dict, int *op_errno)
+{
+ char *format = "(%s:%s)";
+ char *dump = NULL;
+
+ dump = GF_CALLOC(1, BR_STUB_DUMP_STR_SIZE, gf_br_stub_mt_misc);
+ if (!dump) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
+ dict_dump_to_str(dict, dump, BR_STUB_DUMP_STR_SIZE, format);
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_SET_INTERNAL_XATTR,
+ "fsetxattr dump=%s", dump, NULL);
+out:
+ if (dump) {
+ GF_FREE(dump);
+ }
+ return;
+}
+
+int
+br_stub_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int flags, dict_t *xdata)
+{
+ int32_t ret = 0;
+ uint32_t val = 0;
+ br_isignature_t *sign = NULL;
+ br_stub_private_t *priv = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ priv = this->private;
+
+ if ((frame->root->pid != GF_CLIENT_PID_BITD &&
+ frame->root->pid != GF_CLIENT_PID_SCRUB) &&
+ br_stub_internal_xattr(dict)) {
+ br_stub_dump_xattr(this, dict, &op_errno);
+ goto unwind;
+ }
+
+ if (!priv->do_versioning)
+ goto wind;
+
+ if (!IA_ISREG(fd->inode->ia_type))
+ goto wind;
+
+ /* object signature request */
+ ret = dict_get_bin(dict, GLUSTERFS_SET_OBJECT_SIGNATURE, (void **)&sign);
+ if (!ret) {
+ gf_msg_debug(this->name, 0, "got SIGNATURE request on %s",
+ uuid_utoa(fd->inode->gfid));
+ br_stub_handle_object_signature(frame, this, fd, dict, sign, xdata);
+ goto done;
+ }
+
+ /* signing xattr */
+ if (dict_get(dict, BITROT_SIGNING_VERSION_KEY)) {
+ br_stub_handle_internal_xattr(frame, this, fd,
+ BITROT_SIGNING_VERSION_KEY);
+ goto done;
+ }
+
+ /* version xattr */
+ if (dict_get(dict, BITROT_CURRENT_VERSION_KEY)) {
+ br_stub_handle_internal_xattr(frame, this, fd,
+ BITROT_CURRENT_VERSION_KEY);
+ goto done;
+ }
+
+ if (dict_get(dict, GLUSTERFS_GET_OBJECT_SIGNATURE)) {
+ br_stub_handle_internal_xattr(frame, this, fd,
+ GLUSTERFS_GET_OBJECT_SIGNATURE);
+ goto done;
+ }
+
+ /* object reopen request */
+ ret = dict_get_uint32(dict, BR_REOPEN_SIGN_HINT_KEY, &val);
+ if (!ret) {
+ br_stub_handle_object_reopen(frame, this, fd, val);
+ goto done;
+ }
+
+ /* handle bad object */
+ if (dict_get(dict, BITROT_OBJECT_BAD_KEY)) {
+ br_stub_handle_bad_object_key(frame, this, fd, dict, flags, xdata);
+ goto done;
+ }
+
+wind:
+ STACK_WIND(frame, default_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, NULL);
+
+done:
+ return 0;
+}
+
+/**
+ * Currently BitD and scrubber are doing fsetxattr to either sign the object
+ * or to mark it as bad. Hence setxattr on any of those keys is denied directly
+ * without checking from where the fop is coming.
+ * Later, if BitD or Scrubber does setxattr of those keys, then appropriate
+ * check has to be added below.
+ */
+int
+br_stub_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int flags, dict_t *xdata)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ if (br_stub_internal_xattr(dict)) {
+ br_stub_dump_xattr(this, dict, &op_errno);
+ goto unwind;
+ }
+
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->setxattr,
+ loc, dict, flags, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(setxattr, frame, op_ret, op_errno, NULL);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* {f}removexattr() */
+
+int32_t
+br_stub_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ if (!strcmp(BITROT_OBJECT_BAD_KEY, name) ||
+ !strcmp(BITROT_SIGNING_VERSION_KEY, name) ||
+ !strcmp(BITROT_CURRENT_VERSION_KEY, name)) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_REMOVE_INTERNAL_XATTR,
+ "name=%s", name, "file-path=%s", loc->path, NULL);
+ goto unwind;
+ }
+
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(removexattr, frame, op_ret, op_errno, NULL);
+ return 0;
+}
+
+int32_t
+br_stub_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ if (!strcmp(BITROT_OBJECT_BAD_KEY, name) ||
+ !strcmp(BITROT_SIGNING_VERSION_KEY, name) ||
+ !strcmp(BITROT_CURRENT_VERSION_KEY, name)) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_REMOVE_INTERNAL_XATTR,
+ "name=%s", name, "inode-gfid=%s", uuid_utoa(fd->inode->gfid),
+ NULL);
+ goto unwind;
+ }
+
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(fremovexattr, frame, op_ret, op_errno, NULL);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* {f}getxattr() */
+
+int
+br_stub_listxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xattr, dict_t *xdata)
+{
+ if (op_ret < 0)
+ goto unwind;
+
+ br_stub_remove_vxattrs(xattr, _gf_true);
+
+unwind:
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, xattr, xdata);
+ return 0;
+}
+
+/**
+ * ONE SHOT CRAWLER from BitD signs the objects that it encounters while
+ * crawling, if the object is identified as stale by the stub. Stub follows
+ * the below logic to mark an object as stale or not.
+ * If the ongoing version and the signed_version match, then the object is not
+ * stale. Just return. Otherwise if they does not match, then it means one
+ * of the below things.
+ * 1) If the inode does not need write back of the version and the sign state is
+ * is NORMAL, then some active i/o is going on the object. So skip it.
+ * A notification will be sent to trigger the sign once the release is
+ * received on the object.
+ * 2) If inode does not need writeback of the version and the sign state is
+ * either reopen wait or quick sign, then it means:
+ * A) BitD restarted and it is not sure whether the object it encountered
+ * while crawling is in its timer wheel or not. Since there is no way to
+ * scan the timer wheel as of now, ONE SHOT CRAWLER just goes ahead and
+ * signs the object. Since the inode does not need writeback, version will
+ * not be incremented and directly the object will be signed.
+ * 3) If the inode needs writeback, then it means the inode was forgotten after
+ * the versioning and it has to be signed now.
+ *
+ * This is the algorithm followed:
+ * if (ongoing_version == signed_version); then
+ * object_is_not_stale;
+ * return;
+ * else; then
+ * if (!inode_needs_writeback && inode_sign_state != NORMAL); then
+ * object_is_stale;
+ * if (inode_needs_writeback); then
+ * object_is_stale;
+ *
+ * For SCRUBBER, no need to check for the sign state and inode writeback.
+ * If the ondisk ongoingversion and the ondisk signed version does not match,
+ * then treat the object as stale.
+ */
+char
+br_stub_is_object_stale(xlator_t *this, call_frame_t *frame, inode_t *inode,
+ br_version_t *obuf, br_signature_t *sbuf)
+{
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ char stale = 0;
+
+ if (obuf->ongoingversion == sbuf->signedversion)
+ goto out;
+
+ if (frame->root->pid == GF_CLIENT_PID_SCRUB) {
+ stale = 1;
+ goto out;
+ }
+
+ ret = br_stub_get_inode_ctx(this, inode, &ctx_addr);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_GET_INODE_CONTEXT_FAILED,
+ "gfid=%s", uuid_utoa(inode->gfid), NULL);
+ goto out;
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&inode->lock);
+ {
+ if ((!__br_stub_is_inode_dirty(ctx) &&
+ ctx->info_sign != BR_SIGN_NORMAL) ||
+ __br_stub_is_inode_dirty(ctx))
+ stale = 1;
+ }
+ UNLOCK(&inode->lock);
+
+out:
+ return stale;
+}
+
+int
+br_stub_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, dict_t *xattr, dict_t *xdata)
+{
+ int32_t ret = 0;
+ size_t totallen = 0;
+ size_t signaturelen = 0;
+ br_stub_private_t *priv = NULL;
+ br_version_t *obuf = NULL;
+ br_signature_t *sbuf = NULL;
+ br_isignature_out_t *sign = NULL;
+ br_vxattr_status_t status;
+ br_stub_local_t *local = NULL;
+ inode_t *inode = NULL;
+ gf_boolean_t bad_object = _gf_false;
+ gf_boolean_t ver_enabled = _gf_false;
+
+ BR_STUB_VER_ENABLED_IN_CALLPATH(frame, ver_enabled);
+ priv = this->private;
+
+ if (op_ret < 0)
+ goto unwind;
+ BR_STUB_VER_COND_GOTO(priv, (!ver_enabled), delkeys);
+
+ if (cookie != (void *)BR_STUB_REQUEST_COOKIE)
+ goto unwind;
+
+ local = frame->local;
+ frame->local = NULL;
+ if (!local) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto unwind;
+ }
+ inode = local->u.context.inode;
+
+ op_ret = -1;
+ status = br_version_xattr_state(xattr, &obuf, &sbuf, &bad_object);
+
+ op_errno = EIO;
+ if (bad_object)
+ goto delkeys;
+
+ op_errno = EINVAL;
+ if (status == BR_VXATTR_STATUS_INVALID)
+ goto delkeys;
+
+ op_errno = ENODATA;
+ if ((status == BR_VXATTR_STATUS_MISSING) ||
+ (status == BR_VXATTR_STATUS_UNSIGNED))
+ goto delkeys;
+
+ /**
+ * okay.. we have enough information to satisfy the request,
+ * namely: version and signing extended attribute. what's
+ * pending is the signature length -- that's figured out
+ * indirectly via the size of the _whole_ xattr and the
+ * on-disk signing xattr header size.
+ */
+ op_errno = EINVAL;
+ ret = dict_get_uint32(xattr, BITROT_SIGNING_XATTR_SIZE_KEY,
+ (uint32_t *)&signaturelen);
+ if (ret)
+ goto delkeys;
+
+ signaturelen -= sizeof(br_signature_t);
+ totallen = sizeof(br_isignature_out_t) + signaturelen;
+
+ op_errno = ENOMEM;
+ sign = GF_CALLOC(1, totallen, gf_br_stub_mt_signature_t);
+ if (!sign)
+ goto delkeys;
+
+ sign->time[0] = obuf->timebuf[0];
+ sign->time[1] = obuf->timebuf[1];
+
+ /* Object's dirty state & current signed version */
+ sign->version = sbuf->signedversion;
+ sign->stale = br_stub_is_object_stale(this, frame, inode, obuf, sbuf);
+
+ /* Object's signature */
+ sign->signaturelen = signaturelen;
+ sign->signaturetype = sbuf->signaturetype;
+ (void)memcpy(sign->signature, sbuf->signature, signaturelen);
+
+ op_errno = EINVAL;
+ ret = dict_set_bin(xattr, GLUSTERFS_GET_OBJECT_SIGNATURE, (void *)sign,
+ totallen);
+ if (ret < 0) {
+ GF_FREE(sign);
+ goto delkeys;
+ }
+ op_errno = 0;
+ op_ret = totallen;
+
+delkeys:
+ br_stub_remove_vxattrs(xattr, _gf_true);
+
+unwind:
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, xattr, xdata);
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+ return 0;
+}
+
+static void
+br_stub_send_stub_init_time(call_frame_t *frame, xlator_t *this)
+{
+ int op_ret = 0;
+ int op_errno = 0;
+ dict_t *xattr = NULL;
+ br_stub_init_t stub = {
+ {
+ 0,
+ },
+ };
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ xattr = dict_new();
+ if (!xattr) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ stub.timebuf[0] = priv->boot[0];
+ stub.timebuf[1] = priv->boot[1];
+ memcpy(stub.export, priv->export, strlen(priv->export) + 1);
+
+ op_ret = dict_set_static_bin(xattr, GLUSTERFS_GET_BR_STUB_INIT_TIME,
+ (void *)&stub, sizeof(br_stub_init_t));
+ if (op_ret < 0) {
+ op_errno = EINVAL;
+ goto unwind;
+ }
+
+ op_ret = sizeof(br_stub_init_t);
+
+unwind:
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, xattr, NULL);
+
+ if (xattr)
+ dict_unref(xattr);
+}
+
+int
+br_stub_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ void *cookie = NULL;
+ static uuid_t rootgfid = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
+ fop_getxattr_cbk_t cbk = br_stub_getxattr_cbk;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ br_stub_local_t *local = NULL;
+ br_stub_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc->inode, unwind);
+
+ if (!name) {
+ cbk = br_stub_listxattr_cbk;
+ goto wind;
+ }
+
+ if (br_stub_is_internal_xattr(name))
+ goto unwind;
+
+ priv = this->private;
+ BR_STUB_VER_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ /**
+ * If xattr is node-uuid and the inode is marked bad, return EIO.
+ * Returning EIO would result in AFR to choose correct node-uuid
+ * corresponding to the subvolume * where the good copy of the
+ * file resides.
+ */
+ if (IA_ISREG(loc->inode->ia_type) && XATTR_IS_NODE_UUID(name) &&
+ br_stub_check_bad_object(this, loc->inode, &op_ret, &op_errno)) {
+ goto unwind;
+ }
+
+ /**
+ * this special extended attribute is allowed only on root
+ */
+ if (name &&
+ (strncmp(name, GLUSTERFS_GET_BR_STUB_INIT_TIME,
+ sizeof(GLUSTERFS_GET_BR_STUB_INIT_TIME) - 1) == 0) &&
+ ((gf_uuid_compare(loc->gfid, rootgfid) == 0) ||
+ (gf_uuid_compare(loc->inode->gfid, rootgfid) == 0))) {
+ BR_STUB_RESET_LOCAL_NULL(frame);
+ br_stub_send_stub_init_time(frame, this);
+ return 0;
+ }
+
+ if (!IA_ISREG(loc->inode->ia_type))
+ goto wind;
+
+ if (name && (strncmp(name, GLUSTERFS_GET_OBJECT_SIGNATURE,
+ sizeof(GLUSTERFS_GET_OBJECT_SIGNATURE) - 1) == 0)) {
+ cookie = (void *)BR_STUB_REQUEST_COOKIE;
+
+ local = br_stub_alloc_local(this);
+ if (!local) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ br_stub_fill_local(local, NULL, NULL, loc->inode, loc->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+ frame->local = local;
+ }
+
+wind:
+ STACK_WIND_COOKIE(frame, cbk, cookie, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xdata);
+ return 0;
+unwind:
+ BR_STUB_RESET_LOCAL_NULL(frame);
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, NULL, NULL);
+ return 0;
+}
+
+int
+br_stub_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
+{
+ void *cookie = NULL;
+ static uuid_t rootgfid = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
+ fop_fgetxattr_cbk_t cbk = br_stub_getxattr_cbk;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ br_stub_local_t *local = NULL;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (!name) {
+ cbk = br_stub_listxattr_cbk;
+ goto wind;
+ }
+
+ if (br_stub_is_internal_xattr(name))
+ goto unwind;
+
+ BR_STUB_VER_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ /**
+ * If xattr is node-uuid and the inode is marked bad, return EIO.
+ * Returning EIO would result in AFR to choose correct node-uuid
+ * corresponding to the subvolume * where the good copy of the
+ * file resides.
+ */
+ if (IA_ISREG(fd->inode->ia_type) && XATTR_IS_NODE_UUID(name) &&
+ br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno)) {
+ goto unwind;
+ }
+
+ /**
+ * this special extended attribute is allowed only on root
+ */
+ if (name &&
+ (strncmp(name, GLUSTERFS_GET_BR_STUB_INIT_TIME,
+ sizeof(GLUSTERFS_GET_BR_STUB_INIT_TIME) - 1) == 0) &&
+ (gf_uuid_compare(fd->inode->gfid, rootgfid) == 0)) {
+ BR_STUB_RESET_LOCAL_NULL(frame);
+ br_stub_send_stub_init_time(frame, this);
+ return 0;
+ }
+
+ if (!IA_ISREG(fd->inode->ia_type))
+ goto wind;
+
+ if (name && (strncmp(name, GLUSTERFS_GET_OBJECT_SIGNATURE,
+ sizeof(GLUSTERFS_GET_OBJECT_SIGNATURE) - 1) == 0)) {
+ cookie = (void *)BR_STUB_REQUEST_COOKIE;
+
+ local = br_stub_alloc_local(this);
+ if (!local) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ br_stub_fill_local(local, NULL, fd, fd->inode, fd->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+ frame->local = local;
+ }
+
+wind:
+ STACK_WIND_COOKIE(frame, cbk, cookie, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fgetxattr, fd, name, xdata);
+ return 0;
+unwind:
+ BR_STUB_RESET_LOCAL_NULL(frame);
+ STACK_UNWIND_STRICT(fgetxattr, frame, op_ret, op_errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+br_stub_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ int32_t ret = -1;
+ br_stub_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, frame, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd->inode, unwind);
+
+ priv = this->private;
+ if (!priv->do_versioning)
+ goto wind;
+
+ ret = br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+wind:
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->readv,
+ fd, size, offset, flags, xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, NULL, 0, NULL, NULL,
+ NULL);
+ return 0;
+}
+
+/**
+ * The first write response on the first fd in the list of fds will set
+ * the flag to indicate that the inode is modified. The subsequent write
+ * respnses coming on either the first fd or some other fd will not change
+ * the fd. The inode-modified flag is unset only upon release of all the
+ * fds.
+ */
+int32_t
+br_stub_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ int32_t ret = 0;
+ br_stub_local_t *local = NULL;
+
+ local = frame->local;
+ frame->local = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ ret = br_stub_mark_inode_modified(this, local);
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+
+unwind:
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+
+ return 0;
+}
+
+int32_t
+br_stub_writev_resume(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t offset,
+ uint32_t flags, struct iobref *iobref, dict_t *xdata)
+{
+ STACK_WIND(frame, br_stub_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
+}
+
+/**
+ * This is probably the most crucial part about the whole versioning thing.
+ * There's absolutely no differentiation as such between an anonymous fd
+ * and a regular fd except the fd context initialization. Object versioning
+ * is performed when the inode is dirty. Parallel write operations are no
+ * special with each write performing object versioning followed by marking
+ * the inode as non-dirty (synced). This is followed by the actual operation
+ * (writev() in this case) which on a success marks the inode as modified.
+ * This prevents signing of objects that have not been modified.
+ */
+int32_t
+br_stub_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t offset,
+ uint32_t flags, struct iobref *iobref, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ gf_boolean_t inc_version = _gf_false;
+ gf_boolean_t modified = _gf_false;
+ br_stub_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ fop_writev_cbk_t cbk = default_writev_cbk;
+ br_stub_local_t *local = NULL;
+ br_stub_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, frame, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
+
+ priv = this->private;
+ if (!priv->do_versioning)
+ goto wind;
+
+ ret = br_stub_need_versioning(this, fd, &inc_version, &modified, &ctx);
+ if (ret)
+ goto unwind;
+
+ ret = br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+ /**
+ * The inode is not dirty and also witnessed at least one successful
+ * modification operation. Therefore, subsequent operations need not
+ * perform any special tracking.
+ */
+ if (!inc_version && modified)
+ goto wind;
+
+ /**
+ * okay.. so, either the inode needs versioning or the modification
+ * needs to be tracked. ->cbk is set to the appropriate callback
+ * routine for this.
+ * NOTE: ->local needs to be deallocated on failures from here on.
+ */
+ ret = br_stub_versioning_prep(frame, this, fd, ctx);
+ if (ret)
+ goto unwind;
+
+ local = frame->local;
+ if (!inc_version) {
+ br_stub_fill_local(local, NULL, fd, fd->inode, fd->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+ cbk = br_stub_writev_cbk;
+ goto wind;
+ }
+
+ stub = fop_writev_stub(frame, br_stub_writev_resume, fd, vector, count,
+ offset, flags, iobref, xdata);
+
+ if (!stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_STUB_ALLOC_FAILED,
+ "write gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto cleanup_local;
+ }
+
+ /* Perform Versioning */
+ return br_stub_perform_incversioning(this, frame, stub, fd, ctx);
+
+wind:
+ STACK_WIND(frame, cbk, FIRST_CHILD(this), FIRST_CHILD(this)->fops->writev,
+ fd, vector, count, offset, flags, iobref, xdata);
+ return 0;
+
+cleanup_local:
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, NULL, NULL, NULL);
+
+ return 0;
+}
+
+int32_t
+br_stub_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ int32_t ret = -1;
+ br_stub_local_t *local = NULL;
+
+ local = frame->local;
+ frame->local = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ ret = br_stub_mark_inode_modified(this, local);
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+
+unwind:
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+
+ return 0;
+}
+
+int32_t
+br_stub_ftruncate_resume(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ off_t offset, dict_t *xdata)
+{
+ STACK_WIND(frame, br_stub_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+}
+
+/* c.f. br_stub_writev() for explanation */
+int32_t
+br_stub_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ br_stub_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ gf_boolean_t inc_version = _gf_false;
+ gf_boolean_t modified = _gf_false;
+ br_stub_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ fop_ftruncate_cbk_t cbk = default_ftruncate_cbk;
+ br_stub_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, frame, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
+
+ priv = this->private;
+ if (!priv->do_versioning)
+ goto wind;
+
+ ret = br_stub_need_versioning(this, fd, &inc_version, &modified, &ctx);
+ if (ret)
+ goto unwind;
+
+ ret = br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+ if (!inc_version && modified)
+ goto wind;
+
+ ret = br_stub_versioning_prep(frame, this, fd, ctx);
+ if (ret)
+ goto unwind;
+
+ local = frame->local;
+ if (!inc_version) {
+ br_stub_fill_local(local, NULL, fd, fd->inode, fd->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+ cbk = br_stub_ftruncate_cbk;
+ goto wind;
+ }
+
+ stub = fop_ftruncate_stub(frame, br_stub_ftruncate_resume, fd, offset,
+ xdata);
+ if (!stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_STUB_ALLOC_FAILED,
+ "ftruncate gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto cleanup_local;
+ }
+
+ return br_stub_perform_incversioning(this, frame, stub, fd, ctx);
+
+wind:
+ STACK_WIND(frame, cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+
+cleanup_local:
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, NULL, NULL, NULL);
+
+ return 0;
+}
+
+int32_t
+br_stub_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ int32_t ret = 0;
+ br_stub_local_t *local = NULL;
+
+ local = frame->local;
+ frame->local = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ ret = br_stub_mark_inode_modified(this, local);
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+
+unwind:
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+ return 0;
+}
+
+int32_t
+br_stub_truncate_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset, dict_t *xdata)
+{
+ br_stub_local_t *local = frame->local;
+
+ fd_unref(local->u.context.fd);
+ STACK_WIND(frame, br_stub_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+}
+
+/**
+ * Bit-rot-stub depends heavily on the fd based operations to for doing
+ * versioning and sending notification. It starts tracking the operation
+ * upon getting first fd based modify operation by doing versioning and
+ * sends notification when last fd using which the inode was modified is
+ * released.
+ * But for truncate there is no fd and hence it becomes difficult to do
+ * the versioning and send notification. It is handled by doing versioning
+ * on an anonymous fd. The fd will be valid till the completion of the
+ * truncate call. It guarantees that release on this anonymous fd will happen
+ * after the truncate call and notification is sent after the truncate call.
+ *
+ * c.f. br_writev_cbk() for explanation
+ */
+int32_t
+br_stub_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ br_stub_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ gf_boolean_t inc_version = _gf_false;
+ gf_boolean_t modified = _gf_false;
+ br_stub_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ fd_t *fd = NULL;
+ fop_truncate_cbk_t cbk = default_truncate_cbk;
+ br_stub_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, frame, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc->inode, unwind);
+
+ priv = this->private;
+ if (!priv->do_versioning)
+ goto wind;
+
+ fd = fd_anonymous(loc->inode);
+ if (!fd) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_CREATE_ANONYMOUS_FD_FAILED,
+ "inode-gfid=%s", uuid_utoa(loc->inode->gfid), NULL);
+ goto unwind;
+ }
+
+ ret = br_stub_need_versioning(this, fd, &inc_version, &modified, &ctx);
+ if (ret)
+ goto cleanup_fd;
+
+ ret = br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+ if (!inc_version && modified)
+ goto wind;
+
+ ret = br_stub_versioning_prep(frame, this, fd, ctx);
+ if (ret)
+ goto cleanup_fd;
+
+ local = frame->local;
+ if (!inc_version) {
+ br_stub_fill_local(local, NULL, fd, fd->inode, fd->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+ cbk = br_stub_truncate_cbk;
+ goto wind;
+ }
+
+ stub = fop_truncate_stub(frame, br_stub_truncate_resume, loc, offset,
+ xdata);
+ if (!stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_STUB_ALLOC_FAILED,
+ "truncate gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto cleanup_local;
+ }
+
+ return br_stub_perform_incversioning(this, frame, stub, fd, ctx);
+
+wind:
+ STACK_WIND(frame, cbk, FIRST_CHILD(this), FIRST_CHILD(this)->fops->truncate,
+ loc, offset, xdata);
+ if (fd)
+ fd_unref(fd);
+ return 0;
+
+cleanup_local:
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+cleanup_fd:
+ fd_unref(fd);
+unwind:
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, NULL, NULL, NULL);
+
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* open() */
+
+/**
+ * It's probably worth mentioning a bit about why some of the housekeeping
+ * work is done in open() call path, rather than the callback path.
+ * Two (or more) open()'s in parallel can race and lead to a situation
+ * where a release() gets triggered (possibly after a series of write()
+ * calls) when *other* open()'s have still not reached callback path
+ * thereby having an active fd on an inode that is in process of getting
+ * signed with the current version.
+ *
+ * Maintaining fd list in the call path ensures that a release() would
+ * not be triggered if an open() call races ahead (followed by a close())
+ * threby finding non-empty fd list.
+ */
+
+int
+br_stub_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
+{
+ int32_t ret = -1;
+ br_stub_inode_ctx_t *ctx = NULL;
+ uint64_t ctx_addr = 0;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ br_stub_private_t *priv = NULL;
+ unsigned long version = BITROT_DEFAULT_CURRENT_VERSION;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd->inode, unwind);
+
+ priv = this->private;
+
+ if (!priv->do_versioning)
+ goto wind;
+
+ ret = br_stub_get_inode_ctx(this, fd->inode, &ctx_addr);
+ if (ret) {
+ ret = br_stub_init_inode_versions(this, fd, fd->inode, version,
+ _gf_true, _gf_false, &ctx_addr);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ BRS_MSG_GET_INODE_CONTEXT_FAILED, "path=%s", loc->path,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto unwind;
+ }
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ ret = br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+ if (frame->root->pid == GF_CLIENT_PID_SCRUB)
+ goto wind;
+
+ if (flags == O_RDONLY)
+ goto wind;
+
+ ret = br_stub_add_fd_to_inode(this, fd, ctx);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_ADD_FD_TO_LIST_FAILED,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto unwind;
+ }
+
+wind:
+ STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, NULL, NULL);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* creat() */
+
+/**
+ * This routine registers a release callback for the given fd and adds the
+ * fd to the inode context fd tracking list.
+ */
+int32_t
+br_stub_add_fd_to_inode(xlator_t *this, fd_t *fd, br_stub_inode_ctx_t *ctx)
+{
+ int32_t ret = -1;
+ br_stub_fd_t *br_stub_fd = NULL;
+
+ ret = br_stub_require_release_call(this, fd, &br_stub_fd);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_SET_FD_CONTEXT_FAILED,
+ "gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
+ goto out;
+ }
+
+ LOCK(&fd->inode->lock);
+ {
+ list_add_tail(&ctx->fd_list, &br_stub_fd->list);
+ }
+ UNLOCK(&fd->inode->lock);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+br_stub_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, fd_t *fd, inode_t *inode,
+ struct iatt *stbuf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ int32_t ret = 0;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ unsigned long version = BITROT_DEFAULT_CURRENT_VERSION;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ if (!priv->do_versioning)
+ goto unwind;
+
+ ret = br_stub_get_inode_ctx(this, fd->inode, &ctx_addr);
+ if (ret < 0) {
+ ret = br_stub_init_inode_versions(this, fd, inode, version, _gf_true,
+ _gf_false, &ctx_addr);
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+ } else {
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+ ret = br_stub_add_fd_to_inode(this, fd, ctx);
+ }
+
+unwind:
+ STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, stbuf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int
+br_stub_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc->inode, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, fd->inode, unwind);
+
+ STACK_WIND(frame, br_stub_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(create, frame, -1, EINVAL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+}
+
+int
+br_stub_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode, struct iatt *stbuf,
+ struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ int32_t ret = -1;
+ unsigned long version = BITROT_DEFAULT_CURRENT_VERSION;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ if (!priv->do_versioning)
+ goto unwind;
+
+ ret = br_stub_init_inode_versions(this, NULL, inode, version, _gf_true,
+ _gf_false, NULL);
+ /**
+ * Like lookup, if init_inode_versions fail, return EINVAL
+ */
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+
+unwind:
+ STACK_UNWIND_STRICT(mknod, frame, op_ret, op_errno, inode, stbuf, preparent,
+ postparent, xdata);
+ return 0;
+}
+
+int
+br_stub_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t dev, mode_t umask, dict_t *xdata)
+{
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc->inode, unwind);
+
+ STACK_WIND(frame, br_stub_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, dev, umask, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(mknod, frame, -1, EINVAL, NULL, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+/** }}} */
+
+/**
+ * As of now, only lookup searches for bad object xattr and marks the
+ * object as bad in its inode context if the xattr is present. But there
+ * is a possibility that, at the time of the lookup the object was not
+ * marked bad (i.e. bad object xattr was not set), and later its marked
+ * as bad. In this case, object is not bad, so when a fop such as open or
+ * readv or writev comes on the object, the fop will be sent downward instead
+ * of sending as error upwards.
+ * The solution for this is to do a getxattr for the below list of fops.
+ * lookup, readdirp, open, readv, writev.
+ * But doing getxattr for each of the above fops might be costly.
+ * So another method followed is to catch the bad file marking by the scrubber
+ * and set that info within the object's inode context. In this way getxattr
+ * calls can be avoided and bad objects can be caught instantly. Fetching the
+ * xattr is needed only in lookups when there is a brick restart or inode
+ * forget.
+ *
+ * If the dict (@xattr) is NULL, then how should that be handled? Fail the
+ * lookup operation? Or let it continue with version being initialized to
+ * BITROT_DEFAULT_CURRENT_VERSION. But what if the version was different
+ * on disk (and also a right signature was there), but posix failed to
+ * successfully allocate the dict? Posix does not treat call back xdata
+ * creattion failure as the lookup failure.
+ */
+static int32_t
+br_stub_lookup_version(xlator_t *this, uuid_t gfid, inode_t *inode,
+ dict_t *xattr)
+{
+ unsigned long version = 0;
+ br_version_t *obuf = NULL;
+ br_signature_t *sbuf = NULL;
+ br_vxattr_status_t status;
+ gf_boolean_t bad_object = _gf_false;
+
+ /**
+ * versioning xattrs were requested from POSIX. if available, figure
+ * out the correct version to use in the inode context (start with
+ * the default version if unavailable). As of now versions are not
+ * persisted on-disk. The inode is marked dirty, so that the first
+ * operation (such as write(), etc..) triggers synchronization to
+ * disk.
+ */
+ status = br_version_xattr_state(xattr, &obuf, &sbuf, &bad_object);
+ version = ((status == BR_VXATTR_STATUS_FULL) ||
+ (status == BR_VXATTR_STATUS_UNSIGNED))
+ ? obuf->ongoingversion
+ : BITROT_DEFAULT_CURRENT_VERSION;
+
+ /**
+ * If signature is there, but version is not there then that status is
+ * is treated as INVALID. So in that case, we should not initialize the
+ * inode context with wrong version names etc.
+ */
+ if (status == BR_VXATTR_STATUS_INVALID)
+ return -1;
+
+ return br_stub_init_inode_versions(this, NULL, inode, version, _gf_true,
+ bad_object, NULL);
+}
+
+/** {{{ */
+
+int32_t
+br_stub_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
+{
+ br_stub_private_t *priv = NULL;
+ br_stub_fd_t *fd_ctx = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+
+ priv = this->private;
+ if (gf_uuid_compare(fd->inode->gfid, priv->bad_object_dir_gfid))
+ goto normal;
+
+ fd_ctx = br_stub_fd_new();
+ if (!fd_ctx) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ fd_ctx->bad_object.dir_eof = -1;
+ fd_ctx->bad_object.dir = sys_opendir(priv->stub_basepath);
+ if (!fd_ctx->bad_object.dir) {
+ op_errno = errno;
+ goto err_freectx;
+ }
+
+ op_ret = br_stub_fd_ctx_set(this, fd, fd_ctx);
+ if (!op_ret)
+ goto unwind;
+
+ sys_closedir(fd_ctx->bad_object.dir);
+
+err_freectx:
+ GF_FREE(fd_ctx);
+unwind:
+ STACK_UNWIND_STRICT(opendir, frame, op_ret, op_errno, fd, NULL);
+ return 0;
+
+normal:
+ STACK_WIND(frame, default_opendir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ return 0;
+}
+
+int32_t
+br_stub_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+ if (!priv->do_versioning)
+ goto out;
+
+ if (gf_uuid_compare(fd->inode->gfid, priv->bad_object_dir_gfid))
+ goto out;
+ stub = fop_readdir_stub(frame, br_stub_readdir_wrapper, fd, size, off,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readdir, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+ }
+ br_stub_worker_enqueue(this, stub);
+ return 0;
+out:
+ STACK_WIND(frame, default_readdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdir, fd, size, off, xdata);
+ return 0;
+}
+
+int
+br_stub_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, gf_dirent_t *entries,
+ dict_t *dict)
+{
+ int32_t ret = 0;
+ uint64_t ctxaddr = 0;
+ gf_dirent_t *entry = NULL;
+ br_stub_private_t *priv = NULL;
+ gf_boolean_t ver_enabled = _gf_false;
+
+ BR_STUB_VER_ENABLED_IN_CALLPATH(frame, ver_enabled);
+ priv = this->private;
+ BR_STUB_VER_COND_GOTO(priv, (!ver_enabled), unwind);
+
+ if (op_ret < 0)
+ goto unwind;
+
+ list_for_each_entry(entry, &entries->list, list)
+ {
+ if ((strcmp(entry->d_name, ".") == 0) ||
+ (strcmp(entry->d_name, "..") == 0))
+ continue;
+
+ if (!IA_ISREG(entry->d_stat.ia_type))
+ continue;
+
+ /*
+ * Readdirp for most part is a bulk lookup for all the entries
+ * present in the directory being read. Ideally, for each
+ * entry, the handling should be similar to that of a lookup
+ * callback. But for now, just keeping this as it has been
+ * until now (which means, this comment has been added much
+ * later as part of a change that wanted to send the flag
+ * of true/false to br_stub_remove_vxattrs to indicate whether
+ * the bad-object xattr should be removed from the entry->dict
+ * or not). Until this change, the function br_stub_remove_vxattrs
+ * was just removing all the xattrs associated with bit-rot-stub
+ * (like version, bad-object, signature etc). But, there are
+ * scenarios where we only want to send bad-object xattr and not
+ * others. So this comment is part of that change which also
+ * mentions about another possible change that might be needed
+ * in future.
+ * But for now, adding _gf_true means functionally its same as
+ * what this function was doing before. Just remove all the stub
+ * related xattrs.
+ */
+ ret = br_stub_get_inode_ctx(this, entry->inode, &ctxaddr);
+ if (ret < 0)
+ ctxaddr = 0;
+ if (ctxaddr) { /* already has the context */
+ br_stub_remove_vxattrs(entry->dict, _gf_true);
+ continue;
+ }
+
+ ret = br_stub_lookup_version(this, entry->inode->gfid, entry->inode,
+ entry->dict);
+ br_stub_remove_vxattrs(entry->dict, _gf_true);
+ if (ret) {
+ /**
+ * there's no per-file granularity support in case of
+ * failure. let's fail the entire request for now..
+ */
+ break;
+ }
+ }
+
+ if (ret) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
+
+unwind:
+ STACK_UNWIND_STRICT(readdirp, frame, op_ret, op_errno, entries, dict);
+
+ return 0;
+}
+
+int
+br_stub_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *dict)
+{
+ int32_t ret = -1;
+ int op_errno = 0;
+ gf_boolean_t xref = _gf_false;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+ BR_STUB_VER_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ op_errno = ENOMEM;
+ if (!dict) {
+ dict = dict_new();
+ if (!dict)
+ goto unwind;
+ } else {
+ dict = dict_ref(dict);
+ }
+
+ xref = _gf_true;
+
+ op_errno = EINVAL;
+ ret = dict_set_uint32(dict, BITROT_CURRENT_VERSION_KEY, 0);
+ if (ret)
+ goto unwind;
+ ret = dict_set_uint32(dict, BITROT_SIGNING_VERSION_KEY, 0);
+ if (ret)
+ goto unwind;
+ ret = dict_set_uint32(dict, BITROT_OBJECT_BAD_KEY, 0);
+ if (ret)
+ goto unwind;
+
+wind:
+ STACK_WIND(frame, br_stub_readdirp_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, offset, dict);
+ goto unref_dict;
+
+unwind:
+ if (frame->local == (void *)0x1)
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(readdirp, frame, -1, op_errno, NULL, NULL);
+ return 0;
+
+unref_dict:
+ if (xref)
+ dict_unref(dict);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* lookup() */
+
+/**
+ * This function mainly handles the ENOENT error for the bad objects. Though
+ * br_stub_forget () handles removal of the link for the bad object from the
+ * quarantine directory, its better to handle it in lookup as well, where
+ * a failed lookup on a bad object with ENOENT, will trigger deletion of the
+ * link for the bad object from quarantine directory. So whoever comes first
+ * either forget () or lookup () will take care of removing the link.
+ */
+void
+br_stub_handle_lookup_error(xlator_t *this, inode_t *inode, int32_t op_errno)
+{
+ int32_t ret = -1;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ if (op_errno != ENOENT)
+ goto out;
+
+ if (!inode_is_linked(inode))
+ goto out;
+
+ ret = br_stub_get_inode_ctx(this, inode, &ctx_addr);
+ if (ret)
+ goto out;
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&inode->lock);
+ {
+ if (__br_stub_is_bad_object(ctx))
+ (void)br_stub_del(this, inode->gfid);
+ }
+ UNLOCK(&inode->lock);
+
+ if (__br_stub_is_bad_object(ctx)) {
+ /* File is not present, might be deleted for recovery,
+ * del the bitrot inode context
+ */
+ ctx_addr = 0;
+ inode_ctx_del(inode, this, &ctx_addr);
+ if (ctx_addr) {
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+ GF_FREE(ctx);
+ }
+ }
+
+out:
+ return;
+}
+
+int
+br_stub_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, inode_t *inode, struct iatt *stbuf,
+ dict_t *xattr, struct iatt *postparent)
+{
+ int32_t ret = 0;
+ br_stub_private_t *priv = NULL;
+ gf_boolean_t ver_enabled = _gf_false;
+ gf_boolean_t remove_bad_file_marker = _gf_true;
+
+ BR_STUB_VER_ENABLED_IN_CALLPATH(frame, ver_enabled);
+ priv = this->private;
+
+ if (op_ret < 0) {
+ (void)br_stub_handle_lookup_error(this, inode, op_errno);
+
+ /*
+ * If the lookup error is not ENOENT, then it is better
+ * to send the bad file marker to the higher layer (if
+ * it has been set)
+ */
+ if (op_errno != ENOENT)
+ remove_bad_file_marker = _gf_false;
+ goto delkey;
+ }
+
+ BR_STUB_VER_COND_GOTO(priv, (!ver_enabled), delkey);
+
+ if (!IA_ISREG(stbuf->ia_type))
+ goto unwind;
+
+ /**
+ * If the object is bad, then "bad inode" marker has to be sent back
+ * in resoinse, for revalidated lookups as well. Some xlators such as
+ * quick-read might cache the data in revalidated lookup as fresh
+ * lookup would anyway have sent "bad inode" marker.
+ * In general send bad inode marker for every lookup operation on the
+ * bad object.
+ */
+ if (cookie != (void *)BR_STUB_REQUEST_COOKIE) {
+ ret = br_stub_mark_xdata_bad_object(this, inode, xattr);
+ if (ret) {
+ op_ret = -1;
+ op_errno = EIO;
+ /*
+ * This flag ensures that in the label @delkey below,
+ * bad file marker is not removed from the dictinary,
+ * but other virtual xattrs (such as version, signature)
+ * are removed.
+ */
+ remove_bad_file_marker = _gf_false;
+ }
+ goto delkey;
+ }
+
+ ret = br_stub_lookup_version(this, stbuf->ia_gfid, inode, xattr);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto delkey;
+ }
+
+ /**
+ * If the object is bad, send "bad inode" marker back in response
+ * for xlator(s) to act accordingly (such as quick-read, etc..)
+ */
+ ret = br_stub_mark_xdata_bad_object(this, inode, xattr);
+ if (ret) {
+ /**
+ * aaha! bad object, but sorry we would not
+ * satisfy the request on allocation failures.
+ */
+ op_ret = -1;
+ op_errno = EIO;
+ goto delkey;
+ }
+
+delkey:
+ br_stub_remove_vxattrs(xattr, remove_bad_file_marker);
+unwind:
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, stbuf, xattr,
+ postparent);
+
+ return 0;
+}
+
+int
+br_stub_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ int32_t ret = 0;
+ int op_errno = 0;
+ void *cookie = NULL;
+ uint64_t ctx_addr = 0;
+ gf_boolean_t xref = _gf_false;
+ br_stub_private_t *priv = NULL;
+ call_stub_t *stub = NULL;
+
+ GF_VALIDATE_OR_GOTO("bit-rot-stub", this, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc, unwind);
+ GF_VALIDATE_OR_GOTO(this->name, loc->inode, unwind);
+
+ priv = this->private;
+
+ BR_STUB_VER_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ if (!gf_uuid_compare(loc->gfid, priv->bad_object_dir_gfid) ||
+ !gf_uuid_compare(loc->pargfid, priv->bad_object_dir_gfid)) {
+ stub = fop_lookup_stub(frame, br_stub_lookup_wrapper, loc, xdata);
+ if (!stub) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ br_stub_worker_enqueue(this, stub);
+ return 0;
+ }
+
+ ret = br_stub_get_inode_ctx(this, loc->inode, &ctx_addr);
+ if (ret < 0)
+ ctx_addr = 0;
+ if (ctx_addr != 0)
+ goto wind;
+
+ /**
+ * fresh lookup: request version keys from POSIX
+ */
+ op_errno = ENOMEM;
+ if (!xdata) {
+ xdata = dict_new();
+ if (!xdata)
+ goto unwind;
+ } else {
+ xdata = dict_ref(xdata);
+ }
+
+ xref = _gf_true;
+
+ /**
+ * Requesting both xattrs provides a way of sanity checking the
+ * object. Anomaly checking is done in cbk by examining absence
+ * of either or both xattrs.
+ */
+ op_errno = EINVAL;
+ ret = dict_set_uint32(xdata, BITROT_CURRENT_VERSION_KEY, 0);
+ if (ret)
+ goto unwind;
+ ret = dict_set_uint32(xdata, BITROT_SIGNING_VERSION_KEY, 0);
+ if (ret)
+ goto unwind;
+ ret = dict_set_uint32(xdata, BITROT_OBJECT_BAD_KEY, 0);
+ if (ret)
+ goto unwind;
+ cookie = (void *)BR_STUB_REQUEST_COOKIE;
+
+wind:
+ STACK_WIND_COOKIE(frame, br_stub_lookup_cbk, cookie, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xdata);
+ goto dealloc_dict;
+
+unwind:
+ if (frame->local == (void *)0x1)
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(lookup, frame, -1, op_errno, NULL, NULL, NULL, NULL);
+dealloc_dict:
+ if (xref)
+ dict_unref(xdata);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* stat */
+int
+br_stub_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ int32_t ret = 0;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (!priv->do_versioning)
+ goto wind;
+
+ if (!IA_ISREG(loc->inode->ia_type))
+ goto wind;
+
+ ret = br_stub_check_bad_object(this, loc->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+wind:
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->stat,
+ loc, xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(stat, frame, op_ret, op_errno, NULL, NULL);
+ return 0;
+}
+
+/* fstat */
+int
+br_stub_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ int32_t ret = 0;
+ int32_t op_ret = -1;
+ int32_t op_errno = EINVAL;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+
+ if (!priv->do_versioning)
+ goto wind;
+
+ if (!IA_ISREG(fd->inode->ia_type))
+ goto wind;
+
+ ret = br_stub_check_bad_object(this, fd->inode, &op_ret, &op_errno);
+ if (ret)
+ goto unwind;
+
+wind:
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->fstat,
+ fd, xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(fstat, frame, op_ret, op_errno, NULL, NULL);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* unlink() */
+
+int
+br_stub_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ br_stub_local_t *local = NULL;
+ inode_t *inode = NULL;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ br_stub_private_t *priv = NULL;
+ gf_boolean_t ver_enabled = _gf_false;
+
+ BR_STUB_VER_ENABLED_IN_CALLPATH(frame, ver_enabled);
+ priv = this->private;
+ BR_STUB_VER_COND_GOTO(priv, (!ver_enabled), unwind);
+
+ local = frame->local;
+ frame->local = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ if (!local) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_NULL_LOCAL, NULL);
+ goto unwind;
+ }
+ inode = local->u.context.inode;
+ if (!IA_ISREG(inode->ia_type))
+ goto unwind;
+
+ ret = br_stub_get_inode_ctx(this, inode, &ctx_addr);
+ if (ret) {
+ /**
+ * If the inode is bad AND context is not there, then there
+ * is a possibility of the gfid of the object being listed
+ * in the quarantine directory and will be shown in the
+ * bad objects list. So continuing with the fop with a
+ * warning log. The entry from the quarantine directory
+ * has to be removed manually. Its not a good idea to fail
+ * the fop, as the object has already been deleted.
+ */
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_GET_INODE_CONTEXT_FAILED,
+ "inode-gfid=%s", uuid_utoa(inode->gfid), NULL);
+ goto unwind;
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&inode->lock);
+ {
+ /**
+ * Ignoring the return value of br_stub_del ().
+ * There is not much that can be done if unlinking
+ * of the entry in the quarantine directory fails.
+ * The failure is logged.
+ */
+ if (__br_stub_is_bad_object(ctx))
+ (void)br_stub_del(this, inode->gfid);
+ }
+ UNLOCK(&inode->lock);
+
+unwind:
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+ br_stub_cleanup_local(local);
+ br_stub_dealloc_local(local);
+ return 0;
+}
+
+int
+br_stub_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int flag,
+ dict_t *xdata)
+{
+ br_stub_local_t *local = NULL;
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ br_stub_private_t *priv = NULL;
+
+ priv = this->private;
+ BR_STUB_VER_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ local = br_stub_alloc_local(this);
+ if (!local) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, BRS_MSG_ALLOC_MEM_FAILED,
+ "local path=%s", loc->path, "gfid=%s",
+ uuid_utoa(loc->inode->gfid), NULL);
+ goto unwind;
+ }
+
+ br_stub_fill_local(local, NULL, NULL, loc->inode, loc->inode->gfid,
+ BR_STUB_NO_VERSIONING, 0);
+
+ frame->local = local;
+
+wind:
+ STACK_WIND(frame, br_stub_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, flag, xdata);
+ return 0;
+
+unwind:
+ if (frame->local == (void *)0x1)
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, NULL, NULL, NULL);
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* forget() */
+
+int
+br_stub_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ inode_ctx_del(inode, this, &ctx_addr);
+ if (!ctx_addr)
+ return 0;
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ GF_FREE(ctx);
+
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+int32_t
+br_stub_noop(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ STACK_DESTROY(frame->root);
+ return 0;
+}
+
+static void
+br_stub_send_ipc_fop(xlator_t *this, fd_t *fd, unsigned long releaseversion,
+ int sign_info)
+{
+ int32_t op = 0;
+ int32_t ret = 0;
+ dict_t *xdata = NULL;
+ call_frame_t *frame = NULL;
+ changelog_event_t ev = {
+ 0,
+ };
+
+ ev.ev_type = CHANGELOG_OP_TYPE_BR_RELEASE;
+ ev.u.releasebr.version = releaseversion;
+ ev.u.releasebr.sign_info = sign_info;
+ gf_uuid_copy(ev.u.releasebr.gfid, fd->inode->gfid);
+
+ xdata = dict_new();
+ if (!xdata) {
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, BRS_MSG_DICT_ALLOC_FAILED,
+ NULL);
+ goto out;
+ }
+
+ ret = dict_set_static_bin(xdata, "RELEASE-EVENT", &ev, CHANGELOG_EV_SIZE);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SET_EVENT_FAILED, NULL);
+ goto dealloc_dict;
+ }
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_CREATE_FRAME_FAILED,
+ NULL);
+ goto dealloc_dict;
+ }
+
+ op = GF_IPC_TARGET_CHANGELOG;
+ STACK_WIND(frame, br_stub_noop, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ipc, op, xdata);
+
+dealloc_dict:
+ dict_unref(xdata);
+out:
+ return;
+}
+
+/**
+ * This is how the state machine of sign info works:
+ * 3 states:
+ * 1) BR_SIGN_NORMAL => The default State of the inode
+ * 2) BR_SIGN_REOPEN_WAIT => A release has been sent and is waiting for reopen
+ * 3) BR_SIGN_QUICK => reopen has happened and this release should trigger sign
+ * 2 events:
+ * 1) GF_FOP_RELEASE
+ * 2) GF_FOP_WRITE (actually a dummy write for BitD)
+ *
+ * This is how states are changed based on events:
+ * EVENT: GF_FOP_RELEASE:
+ * if (state == BR_SIGN_NORMAL) ; then
+ * set state = BR_SIGN_REOPEN_WAIT;
+ * if (state == BR_SIGN_QUICK); then
+ * set state = BR_SIGN_NORMAL;
+ * EVENT: GF_FOP_WRITE:
+ * if (state == BR_SIGN_REOPEN_WAIT); then
+ * set state = BR_SIGN_QUICK;
+ */
+br_sign_state_t
+__br_stub_inode_sign_state(br_stub_inode_ctx_t *ctx, glusterfs_fop_t fop,
+ fd_t *fd)
+{
+ br_sign_state_t sign_info = BR_SIGN_INVALID;
+
+ switch (fop) {
+ case GF_FOP_FSETXATTR:
+ sign_info = ctx->info_sign = BR_SIGN_QUICK;
+ break;
+
+ case GF_FOP_RELEASE:
+ GF_ASSERT(ctx->info_sign != BR_SIGN_REOPEN_WAIT);
+
+ if (ctx->info_sign == BR_SIGN_NORMAL) {
+ sign_info = ctx->info_sign = BR_SIGN_REOPEN_WAIT;
+ } else {
+ sign_info = ctx->info_sign;
+ ctx->info_sign = BR_SIGN_NORMAL;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return sign_info;
+}
+
+int32_t
+br_stub_release(xlator_t *this, fd_t *fd)
+{
+ int32_t ret = 0;
+ int32_t flags = 0;
+ inode_t *inode = NULL;
+ unsigned long releaseversion = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ uint64_t tmp = 0;
+ br_stub_fd_t *br_stub_fd = NULL;
+ int32_t signinfo = 0;
+
+ inode = fd->inode;
+
+ LOCK(&inode->lock);
+ {
+ ctx = __br_stub_get_ongoing_version_ctx(this, inode, NULL);
+ if (ctx == NULL)
+ goto unblock;
+ br_stub_fd = br_stub_fd_ctx_get(this, fd);
+ if (br_stub_fd) {
+ list_del_init(&br_stub_fd->list);
+ }
+
+ ret = __br_stub_can_trigger_release(inode, ctx, &releaseversion);
+ if (!ret)
+ goto unblock;
+
+ signinfo = __br_stub_inode_sign_state(ctx, GF_FOP_RELEASE, fd);
+ signinfo = htonl(signinfo);
+
+ /* inode back to initital state: mark dirty */
+ if (ctx->info_sign == BR_SIGN_NORMAL) {
+ __br_stub_mark_inode_dirty(ctx);
+ __br_stub_unset_inode_modified(ctx);
+ }
+ }
+unblock:
+ UNLOCK(&inode->lock);
+
+ if (ret) {
+ gf_msg_debug(this->name, 0,
+ "releaseversion: %lu | flags: %d "
+ "| signinfo: %d",
+ (unsigned long)ntohl(releaseversion), flags,
+ ntohl(signinfo));
+ br_stub_send_ipc_fop(this, fd, releaseversion, signinfo);
+ }
+
+ ret = fd_ctx_del(fd, this, &tmp);
+ br_stub_fd = (br_stub_fd_t *)(long)tmp;
+
+ GF_FREE(br_stub_fd);
+
+ return 0;
+}
+
+int32_t
+br_stub_releasedir(xlator_t *this, fd_t *fd)
+{
+ br_stub_fd_t *fctx = NULL;
+ uint64_t ctx = 0;
+ int ret = 0;
+
+ ret = fd_ctx_del(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
+
+ fctx = (br_stub_fd_t *)(long)ctx;
+ if (fctx->bad_object.dir) {
+ ret = sys_closedir(fctx->bad_object.dir);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_BAD_OBJ_DIR_CLOSE_FAIL,
+ "error=%s", strerror(errno), NULL);
+ }
+
+ GF_FREE(fctx);
+out:
+ return 0;
+}
+
+/** }}} */
+
+/** {{{ */
+
+/* ictxmerge */
+
+void
+br_stub_ictxmerge(xlator_t *this, fd_t *fd, inode_t *inode,
+ inode_t *linked_inode)
+{
+ int32_t ret = 0;
+ uint64_t ctxaddr = 0;
+ uint64_t lctxaddr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ br_stub_inode_ctx_t *lctx = NULL;
+ br_stub_fd_t *br_stub_fd = NULL;
+
+ ret = br_stub_get_inode_ctx(this, inode, &ctxaddr);
+ if (ret < 0)
+ goto done;
+ ctx = (br_stub_inode_ctx_t *)(uintptr_t)ctxaddr;
+
+ LOCK(&linked_inode->lock);
+ {
+ ret = __br_stub_get_inode_ctx(this, linked_inode, &lctxaddr);
+ if (ret < 0)
+ goto unblock;
+ lctx = (br_stub_inode_ctx_t *)(uintptr_t)lctxaddr;
+
+ GF_ASSERT(list_is_singular(&ctx->fd_list));
+ br_stub_fd = list_first_entry(&ctx->fd_list, br_stub_fd_t, list);
+ if (br_stub_fd) {
+ GF_ASSERT(br_stub_fd->fd == fd);
+ list_move_tail(&br_stub_fd->list, &lctx->fd_list);
+ }
+ }
+unblock:
+ UNLOCK(&linked_inode->lock);
+
+done:
+ return;
+}
+
+/** }}} */
+
+struct xlator_fops fops = {
+ .lookup = br_stub_lookup,
+ .stat = br_stub_stat,
+ .fstat = br_stub_fstat,
+ .open = br_stub_open,
+ .create = br_stub_create,
+ .readdirp = br_stub_readdirp,
+ .getxattr = br_stub_getxattr,
+ .fgetxattr = br_stub_fgetxattr,
+ .fsetxattr = br_stub_fsetxattr,
+ .writev = br_stub_writev,
+ .truncate = br_stub_truncate,
+ .ftruncate = br_stub_ftruncate,
+ .mknod = br_stub_mknod,
+ .readv = br_stub_readv,
+ .removexattr = br_stub_removexattr,
+ .fremovexattr = br_stub_fremovexattr,
+ .setxattr = br_stub_setxattr,
+ .opendir = br_stub_opendir,
+ .readdir = br_stub_readdir,
+ .unlink = br_stub_unlink,
+};
+
+struct xlator_cbks cbks = {
+ .forget = br_stub_forget,
+ .release = br_stub_release,
+ .ictxmerge = br_stub_ictxmerge,
+};
+
+struct volume_options options[] = {
+ {.key = {"bitrot"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_FORCE,
+ .tags = {"bitrot"},
+ .description = "enable/disable bitrot stub"},
+ {.key = {"export"},
+ .type = GF_OPTION_TYPE_PATH,
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .tags = {"bitrot"},
+ .description = "brick path for versioning",
+ .default_value = "{{ brick.path }}"},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .notify = notify,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "bitrot-stub",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/bit-rot/src/stub/bit-rot-stub.h b/xlators/features/bit-rot/src/stub/bit-rot-stub.h
new file mode 100644
index 00000000000..edd79a77e4f
--- /dev/null
+++ b/xlators/features/bit-rot/src/stub/bit-rot-stub.h
@@ -0,0 +1,515 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef __BIT_ROT_STUB_H__
+#define __BIT_ROT_STUB_H__
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/call-stub.h>
+#include "bit-rot-stub-mem-types.h"
+#include <glusterfs/syscall.h>
+#include <glusterfs/common-utils.h>
+#include "bit-rot-common.h"
+#include "bit-rot-stub-messages.h"
+#include "glusterfs3-xdr.h"
+#include <glusterfs/syncop.h>
+#include <glusterfs/syncop-utils.h>
+
+#define BAD_OBJECT_THREAD_STACK_SIZE ((size_t)(1024 * 1024))
+#define BR_STUB_DUMP_STR_SIZE 65536
+
+#define BR_PATH_MAX_EXTRA (PATH_MAX + 1024)
+#define BR_PATH_MAX_PLUS (PATH_MAX + 2048)
+
+/*
+ * Oops. Spelling mistake. Correcting it
+ */
+#define OLD_BR_STUB_QUARANTINE_DIR GF_HIDDEN_PATH "/quanrantine"
+#define BR_STUB_QUARANTINE_DIR GF_HIDDEN_PATH "/quarantine"
+
+/* do not reference frame->local in cbk unless initialized.
+ * Assigned 0x1 marks verisoning flag between call path and
+ * cbk path.
+ */
+#define BR_STUB_VER_NOT_ACTIVE_THEN_GOTO(frame, priv, label) \
+ do { \
+ if (priv->do_versioning) \
+ frame->local = (void *)0x1; \
+ else \
+ goto label; \
+ } while (0)
+
+#define BR_STUB_VER_COND_GOTO(priv, cond, label) \
+ do { \
+ if (!priv->do_versioning || cond) \
+ goto label; \
+ } while (0)
+
+#define BR_STUB_VER_ENABLED_IN_CALLPATH(frame, flag) \
+ do { \
+ if (frame->local) \
+ flag = _gf_true; \
+ if (frame->local == (void *)0x1) \
+ frame->local = NULL; \
+ } while (0)
+
+#define BR_STUB_RESET_LOCAL_NULL(frame) \
+ do { \
+ if (frame->local == (void *)0x1) \
+ frame->local = NULL; \
+ } while (0)
+
+typedef int(br_stub_version_cbk)(call_frame_t *, void *, xlator_t *, int32_t,
+ int32_t, dict_t *);
+
+typedef struct br_stub_inode_ctx {
+ int need_writeback; /* does the inode need
+ a writeback to disk? */
+ unsigned long currentversion; /* ongoing version */
+
+ int info_sign;
+ struct list_head fd_list; /* list of open fds or fds participating in
+ write operations */
+ gf_boolean_t bad_object;
+} br_stub_inode_ctx_t;
+
+typedef struct br_stub_fd {
+ fd_t *fd;
+ struct list_head list;
+ struct bad_object_dir {
+ DIR *dir;
+ off_t dir_eof;
+ } bad_object;
+} br_stub_fd_t;
+
+#define I_DIRTY (1 << 0) /* inode needs writeback */
+#define I_MODIFIED (1 << 1)
+#define WRITEBACK_DURABLE 1 /* writeback is durable */
+
+/**
+ * This could just have been a plain struct without unions and all,
+ * but we may need additional things in the future.
+ */
+typedef struct br_stub_local {
+ call_stub_t *fopstub; /* stub for original fop */
+
+ int versioningtype; /* not much used atm */
+
+ union {
+ struct br_stub_ctx {
+ fd_t *fd;
+ uuid_t gfid;
+ inode_t *inode;
+ unsigned long version;
+ } context;
+ } u;
+} br_stub_local_t;
+
+#define BR_STUB_NO_VERSIONING (1 << 0)
+#define BR_STUB_INCREMENTAL_VERSIONING (1 << 1)
+
+typedef struct br_stub_private {
+ gf_boolean_t do_versioning;
+
+ uint32_t boot[2];
+ char export[PATH_MAX];
+
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+
+ struct list_head squeue; /* ordered signing queue */
+ pthread_t signth;
+ struct bad_objects_container {
+ pthread_t thread;
+ pthread_mutex_t bad_lock;
+ pthread_cond_t bad_cond;
+ struct list_head bad_queue;
+ } container;
+ struct mem_pool *local_pool;
+
+ char stub_basepath[BR_PATH_MAX_EXTRA];
+
+ uuid_t bad_object_dir_gfid;
+} br_stub_private_t;
+
+br_stub_fd_t *
+br_stub_fd_new(void);
+
+int
+__br_stub_fd_ctx_set(xlator_t *this, fd_t *fd, br_stub_fd_t *br_stub_fd);
+
+br_stub_fd_t *
+__br_stub_fd_ctx_get(xlator_t *this, fd_t *fd);
+
+br_stub_fd_t *
+br_stub_fd_ctx_get(xlator_t *this, fd_t *fd);
+
+int32_t
+br_stub_fd_ctx_set(xlator_t *this, fd_t *fd, br_stub_fd_t *br_stub_fd);
+
+static inline gf_boolean_t
+__br_stub_is_bad_object(br_stub_inode_ctx_t *ctx)
+{
+ return ctx->bad_object;
+}
+
+static inline void
+__br_stub_mark_object_bad(br_stub_inode_ctx_t *ctx)
+{
+ ctx->bad_object = _gf_true;
+}
+
+/* inode writeback helpers */
+static inline void
+__br_stub_mark_inode_dirty(br_stub_inode_ctx_t *ctx)
+{
+ ctx->need_writeback |= I_DIRTY;
+}
+
+static inline void
+__br_stub_mark_inode_synced(br_stub_inode_ctx_t *ctx)
+{
+ ctx->need_writeback &= ~I_DIRTY;
+}
+
+static inline int
+__br_stub_is_inode_dirty(br_stub_inode_ctx_t *ctx)
+{
+ return (ctx->need_writeback & I_DIRTY);
+}
+
+/* inode mofification markers */
+static inline void
+__br_stub_set_inode_modified(br_stub_inode_ctx_t *ctx)
+{
+ ctx->need_writeback |= I_MODIFIED;
+}
+
+static inline void
+__br_stub_unset_inode_modified(br_stub_inode_ctx_t *ctx)
+{
+ ctx->need_writeback &= ~I_MODIFIED;
+}
+
+static inline int
+__br_stub_is_inode_modified(br_stub_inode_ctx_t *ctx)
+{
+ return (ctx->need_writeback & I_MODIFIED);
+}
+
+static inline int
+br_stub_require_release_call(xlator_t *this, fd_t *fd, br_stub_fd_t **fd_ctx)
+{
+ int32_t ret = 0;
+ br_stub_fd_t *br_stub_fd = NULL;
+
+ br_stub_fd = br_stub_fd_new();
+ if (!br_stub_fd)
+ return -1;
+
+ br_stub_fd->fd = fd;
+ INIT_LIST_HEAD(&br_stub_fd->list);
+
+ ret = br_stub_fd_ctx_set(this, fd, br_stub_fd);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, BRS_MSG_SET_CONTEXT_FAILED,
+ NULL);
+ else
+ *fd_ctx = br_stub_fd;
+
+ return ret;
+}
+
+/* get/set inode context helpers */
+
+static inline int
+__br_stub_get_inode_ctx(xlator_t *this, inode_t *inode, uint64_t *ctx)
+{
+ return __inode_ctx_get(inode, this, ctx);
+}
+
+static inline int
+br_stub_get_inode_ctx(xlator_t *this, inode_t *inode, uint64_t *ctx)
+{
+ int ret = -1;
+
+ LOCK(&inode->lock);
+ {
+ ret = __br_stub_get_inode_ctx(this, inode, ctx);
+ }
+ UNLOCK(&inode->lock);
+
+ return ret;
+}
+
+static inline int
+br_stub_set_inode_ctx(xlator_t *this, inode_t *inode, br_stub_inode_ctx_t *ctx)
+{
+ uint64_t ctx_addr = (uint64_t)(uintptr_t)ctx;
+ return inode_ctx_set(inode, this, &ctx_addr);
+}
+
+/* version get/set helpers */
+
+static inline unsigned long
+__br_stub_writeback_version(br_stub_inode_ctx_t *ctx)
+{
+ return (ctx->currentversion + 1);
+}
+
+static inline void
+__br_stub_set_ongoing_version(br_stub_inode_ctx_t *ctx, unsigned long version)
+{
+ if (ctx->currentversion < version)
+ ctx->currentversion = version;
+ else
+ gf_smsg("bit-rot-stub", GF_LOG_WARNING, 0,
+ BRS_MSG_CHANGE_VERSION_FAILED, "current version=%lu",
+ ctx->currentversion, "new version=%lu", version, NULL);
+}
+
+static inline int
+__br_stub_can_trigger_release(inode_t *inode, br_stub_inode_ctx_t *ctx,
+ unsigned long *version)
+{
+ /**
+ * If the inode is modified, then it has to be dirty. An inode is
+ * marked dirty once version is increased. Its marked as modified
+ * when the modification call (write/truncate) which triggered
+ * the versioning is successful.
+ */
+ if (__br_stub_is_inode_modified(ctx) && list_empty(&ctx->fd_list) &&
+ (ctx->info_sign != BR_SIGN_REOPEN_WAIT)) {
+ GF_ASSERT(__br_stub_is_inode_dirty(ctx) == 0);
+
+ if (version)
+ *version = htonl(ctx->currentversion);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int32_t
+br_stub_get_ongoing_version(xlator_t *this, inode_t *inode,
+ unsigned long *version)
+{
+ int32_t ret = 0;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &ctx_addr);
+ if (ret < 0)
+ goto unblock;
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+ *version = ctx->currentversion;
+ }
+unblock:
+ UNLOCK(&inode->lock);
+
+ return ret;
+}
+
+/**
+ * fetch the current version from inode and return the context.
+ * inode->lock should be held before invoking this as context
+ * *needs* to be valid in the caller.
+ */
+static inline br_stub_inode_ctx_t *
+__br_stub_get_ongoing_version_ctx(xlator_t *this, inode_t *inode,
+ unsigned long *version)
+{
+ int32_t ret = 0;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ ret = __inode_ctx_get(inode, this, &ctx_addr);
+ if (ret < 0)
+ return NULL;
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+ if (version)
+ *version = ctx->currentversion;
+
+ return ctx;
+}
+
+/* filter for xattr fetch */
+static inline int
+br_stub_is_internal_xattr(const char *name)
+{
+ if (name && ((strncmp(name, BITROT_CURRENT_VERSION_KEY,
+ SLEN(BITROT_CURRENT_VERSION_KEY)) == 0) ||
+ (strncmp(name, BITROT_SIGNING_VERSION_KEY,
+ SLEN(BITROT_SIGNING_VERSION_KEY)) == 0)))
+ return 1;
+ return 0;
+}
+
+static inline void
+br_stub_remove_vxattrs(dict_t *xattr, gf_boolean_t remove_bad_marker)
+{
+ if (xattr) {
+ /*
+ * When a file is corrupted, bad-object should be
+ * set in the dict. But, other info such as version,
+ * signature etc should not be set. Hence the flag
+ * remove_bad_marker. The consumer should know whether
+ * to send the bad-object info in the dict or not.
+ */
+ if (remove_bad_marker)
+ dict_del(xattr, BITROT_OBJECT_BAD_KEY);
+ dict_del(xattr, BITROT_CURRENT_VERSION_KEY);
+ dict_del(xattr, BITROT_SIGNING_VERSION_KEY);
+ dict_del(xattr, BITROT_SIGNING_XATTR_SIZE_KEY);
+ }
+}
+
+/**
+ * This function returns the below values for different situations
+ * 0 => as per the inode context object is not bad
+ * -1 => Failed to get the inode context itself
+ * -2 => As per the inode context object is bad
+ * Both -ve values means the fop which called this function is failed
+ * and error is returned upwards.
+ * In future if needed or more errors have to be handled, then those
+ * errors can be made into enums.
+ */
+static inline int
+br_stub_is_bad_object(xlator_t *this, inode_t *inode)
+{
+ int bad_object = 0;
+ gf_boolean_t tmp = _gf_false;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+
+ ret = br_stub_get_inode_ctx(this, inode, &ctx_addr);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_GET_INODE_CONTEXT_FAILED,
+ "inode-gfid=%s", uuid_utoa(inode->gfid), NULL);
+ bad_object = -1;
+ goto out;
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&inode->lock);
+ {
+ tmp = __br_stub_is_bad_object(ctx);
+ if (tmp)
+ bad_object = -2;
+ }
+ UNLOCK(&inode->lock);
+
+out:
+ return bad_object;
+}
+
+static inline int32_t
+br_stub_mark_object_bad(xlator_t *this, inode_t *inode)
+{
+ int32_t ret = -1;
+ uint64_t ctx_addr = 0;
+ br_stub_inode_ctx_t *ctx = NULL;
+
+ ret = br_stub_get_inode_ctx(this, inode, &ctx_addr);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, BRS_MSG_GET_INODE_CONTEXT_FAILED,
+ "inode-gfid=%s", uuid_utoa(inode->gfid), NULL);
+ goto out;
+ }
+
+ ctx = (br_stub_inode_ctx_t *)(long)ctx_addr;
+
+ LOCK(&inode->lock);
+ {
+ __br_stub_mark_object_bad(ctx);
+ }
+ UNLOCK(&inode->lock);
+
+out:
+ return ret;
+}
+
+/**
+ * There is a possibility that dict_set might fail. The o/p of dict_set is
+ * given to the caller and the caller has to decide what to do.
+ */
+static inline int32_t
+br_stub_mark_xdata_bad_object(xlator_t *this, inode_t *inode, dict_t *xdata)
+{
+ int32_t ret = 0;
+
+ if (br_stub_is_bad_object(this, inode) == -2)
+ ret = dict_set_int32(xdata, GLUSTERFS_BAD_INODE, 1);
+
+ return ret;
+}
+
+int32_t
+br_stub_add_fd_to_inode(xlator_t *this, fd_t *fd, br_stub_inode_ctx_t *ctx);
+
+br_sign_state_t
+__br_stub_inode_sign_state(br_stub_inode_ctx_t *ctx, glusterfs_fop_t fop,
+ fd_t *fd);
+
+int
+br_stub_dir_create(xlator_t *this, br_stub_private_t *priv);
+
+int
+br_stub_add(xlator_t *this, uuid_t gfid);
+
+int32_t
+br_stub_create_stub_gfid(xlator_t *this, char *stub_gfid_path, uuid_t gfid);
+
+int
+br_stub_dir_create(xlator_t *this, br_stub_private_t *priv);
+
+call_stub_t *
+__br_stub_dequeue(struct list_head *callstubs);
+
+void
+__br_stub_enqueue(struct list_head *callstubs, call_stub_t *stub);
+
+void
+br_stub_worker_enqueue(xlator_t *this, call_stub_t *stub);
+
+void *
+br_stub_worker(void *data);
+
+int32_t
+br_stub_lookup_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xattr_req);
+
+int32_t
+br_stub_readdir_wrapper(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t off, dict_t *xdata);
+
+int
+br_stub_del(xlator_t *this, uuid_t gfid);
+
+int
+br_stub_bad_objects_path(xlator_t *this, fd_t *fd, gf_dirent_t *entries,
+ dict_t **dict);
+
+void
+br_stub_entry_xattr_fill(xlator_t *this, char *hpath, gf_dirent_t *entry,
+ dict_t *dict);
+
+int
+br_stub_get_path_of_gfid(xlator_t *this, inode_t *parent, inode_t *inode,
+ uuid_t gfid, char **path);
+
+#endif /* __BIT_ROT_STUB_H__ */
diff --git a/xlators/features/changelog/Makefile.am b/xlators/features/changelog/Makefile.am
new file mode 100644
index 00000000000..153bb685076
--- /dev/null
+++ b/xlators/features/changelog/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src lib
+
+CLEANFILES =
diff --git a/xlators/features/filter/Makefile.am b/xlators/features/changelog/lib/Makefile.am
index d471a3f9243..a985f42a877 100644
--- a/xlators/features/filter/Makefile.am
+++ b/xlators/features/changelog/lib/Makefile.am
@@ -1,3 +1,3 @@
SUBDIRS = src
-CLEANFILES =
+CLEANFILES =
diff --git a/xlators/features/changelog/lib/examples/c/get-changes-multi.c b/xlators/features/changelog/lib/examples/c/get-changes-multi.c
new file mode 100644
index 00000000000..5ea5bbb6630
--- /dev/null
+++ b/xlators/features/changelog/lib/examples/c/get-changes-multi.c
@@ -0,0 +1,90 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/**
+ * Compile it using:
+ * gcc -o getchanges-multi `pkg-config --cflags libgfchangelog` \
+ * get-changes-multi.c `pkg-config --libs libgfchangelog`
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include "changelog.h"
+
+void *
+brick_init(void *xl, struct gf_brick_spec *brick)
+{
+ return brick;
+}
+
+void
+brick_fini(void *xl, char *brick, void *data)
+{
+ return;
+}
+
+void
+brick_callback(void *xl, char *brick, void *data, changelog_event_t *ev)
+{
+ printf("->callback: (brick,type) [%s:%d]\n", brick, ev->ev_type);
+}
+
+void
+fill_brick_spec(struct gf_brick_spec *brick, char *path)
+{
+ brick->brick_path = strdup(path);
+ brick->filter = CHANGELOG_OP_TYPE_BR_RELEASE;
+
+ brick->init = brick_init;
+ brick->fini = brick_fini;
+ brick->callback = brick_callback;
+ brick->connected = NULL;
+ brick->disconnected = NULL;
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret = 0;
+ void *bricks = NULL;
+ struct gf_brick_spec *brick = NULL;
+
+ bricks = calloc(2, sizeof(struct gf_brick_spec));
+ if (!bricks)
+ goto error_return;
+
+ brick = (struct gf_brick_spec *)bricks;
+ fill_brick_spec(brick, "/export/z1/zwoop");
+
+ brick++;
+ fill_brick_spec(brick, "/export/z2/zwoop");
+
+ ret = gf_changelog_init(NULL);
+ if (ret)
+ goto error_return;
+
+ ret = gf_changelog_register_generic((struct gf_brick_spec *)bricks, 2, 0,
+ "/tmp/multi-changes.log", 9, NULL);
+ if (ret)
+ goto error_return;
+
+ /* let callbacks do the job */
+ select(0, NULL, NULL, NULL, NULL);
+
+error_return:
+ return -1;
+}
diff --git a/xlators/features/changelog/lib/examples/c/get-changes.c b/xlators/features/changelog/lib/examples/c/get-changes.c
new file mode 100644
index 00000000000..8bc651c24a4
--- /dev/null
+++ b/xlators/features/changelog/lib/examples/c/get-changes.c
@@ -0,0 +1,93 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/**
+ * get set of new changes every 10 seconds (just print the file names)
+ *
+ * Compile it using:
+ * gcc -o getchanges `pkg-config --cflags libgfchangelog` get-changes.c \
+ * `pkg-config --libs libgfchangelog`
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include "changelog.h"
+
+#define handle_error(fn) printf("%s (reason: %s)\n", fn, strerror(errno))
+
+int
+main(int argc, char **argv)
+{
+ int i = 0;
+ int ret = 0;
+ ssize_t nr_changes = 0;
+ ssize_t changes = 0;
+ char fbuf[PATH_MAX] = {
+ 0,
+ };
+
+ ret = gf_changelog_init(NULL);
+ if (ret) {
+ handle_error("Init failed");
+ goto out;
+ }
+
+ /* get changes for brick "/home/vshankar/export/yow/yow-1" */
+ ret = gf_changelog_register("/export/z1/zwoop", "/tmp/scratch",
+ "/tmp/change.log", 9, 5);
+ if (ret) {
+ handle_error("register failed");
+ goto out;
+ }
+
+ while (1) {
+ i = 0;
+ nr_changes = gf_changelog_scan();
+ if (nr_changes < 0) {
+ handle_error("scan(): ");
+ break;
+ }
+
+ if (nr_changes == 0)
+ goto next;
+
+ printf("Got %ld changelog files\n", nr_changes);
+
+ while ((changes = gf_changelog_next_change(fbuf, PATH_MAX)) > 0) {
+ printf("changelog file [%d]: %s\n", ++i, fbuf);
+
+ /* process changelog */
+ /* ... */
+ /* ... */
+ /* ... */
+ /* done processing */
+
+ ret = gf_changelog_done(fbuf);
+ if (ret)
+ handle_error("gf_changelog_done");
+ }
+
+ if (changes == -1)
+ handle_error("gf_changelog_next_change");
+
+ next:
+ sleep(10);
+ }
+
+out:
+ return ret;
+}
diff --git a/xlators/features/changelog/lib/examples/c/get-history.c b/xlators/features/changelog/lib/examples/c/get-history.c
new file mode 100644
index 00000000000..3e888d75ca6
--- /dev/null
+++ b/xlators/features/changelog/lib/examples/c/get-history.c
@@ -0,0 +1,116 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/**
+ * get set of new changes every 10 seconds (just print the file names)
+ *
+ * Compile it using:
+ * gcc -o gethistory `pkg-config --cflags libgfchangelog` get-history.c \
+ * `pkg-config --libs libgfchangelog`
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include "changelog.h"
+
+#define handle_error(fn) printf("%s (reason: %s)\n", fn, strerror(errno))
+
+int
+main(int argc, char **argv)
+{
+ int i = 0;
+ int ret = 0;
+ ssize_t nr_changes = 0;
+ ssize_t changes = 0;
+ char fbuf[PATH_MAX] = {
+ 0,
+ };
+ unsigned long end_ts = 0;
+
+ ret = gf_changelog_init(NULL);
+ if (ret) {
+ handle_error("init failed");
+ goto out;
+ }
+
+ ret = gf_changelog_register("/export/z1/zwoop", "/tmp/scratch_v1",
+ "/tmp/changes.log", 9, 5);
+ if (ret) {
+ handle_error("register failed");
+ goto out;
+ }
+
+ int a, b;
+ printf("give the two numbers start and end\t");
+ scanf("%d%d", &a, &b);
+ ret = gf_history_changelog("/export/z1/zwoop/.glusterfs/changelogs", a, b,
+ 3, &end_ts);
+ if (ret == -1) {
+ printf("history failed");
+ goto out;
+ }
+
+ printf("end time till when changelog available : %d , ret(%d) \t", end_ts,
+ ret);
+ fflush(stdout);
+
+ while (1) {
+ nr_changes = gf_history_changelog_scan();
+ printf("scanned, nr_changes : %d\n", nr_changes);
+ if (nr_changes < 0) {
+ handle_error("scan(): ");
+ break;
+ }
+
+ if (nr_changes == 0) {
+ printf("done scanning \n");
+ goto out;
+ }
+
+ printf("Got %ld changelog files\n", nr_changes);
+
+ while ((changes = gf_history_changelog_next_change(fbuf, PATH_MAX)) >
+ 0) {
+ printf("changelog file [%d]: %s\n", ++i, fbuf);
+
+ /* process changelog */
+ /* ... */
+ /* ... */
+ /* ... */
+ /* done processing */
+
+ ret = gf_history_changelog_done(fbuf);
+ if (ret)
+ handle_error("gf_changelog_done");
+ }
+ /*
+ if (changes == -1)
+ handle_error ("gf_changelog_next_change");
+ if (nr_changes ==1){
+ printf("continue scanning\n");
+ }
+
+ if(nr_changes == 0){
+ printf("done scanning \n");
+ goto out;
+ }
+ */
+ }
+
+out:
+ return ret;
+}
diff --git a/xlators/features/changelog/lib/examples/python/changes.py b/xlators/features/changelog/lib/examples/python/changes.py
new file mode 100755
index 00000000000..c410d3b000d
--- /dev/null
+++ b/xlators/features/changelog/lib/examples/python/changes.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python3
+
+from __future__ import print_function
+import os
+import sys
+import time
+import libgfchangelog
+
+cl = libgfchangelog.Changes()
+
+def get_changes(brick, scratch_dir, log_file, log_level, interval):
+ change_list = []
+ try:
+ cl.cl_init()
+ cl.cl_register(brick, scratch_dir, log_file, log_level)
+ while True:
+ cl.cl_scan()
+ change_list = cl.cl_getchanges()
+ if change_list:
+ print(change_list)
+ for change in change_list:
+ print(('done with %s' % (change)))
+ cl.cl_done(change)
+ time.sleep(interval)
+ except OSError:
+ ex = sys.exc_info()[1]
+ print(ex)
+
+if __name__ == '__main__':
+ if len(sys.argv) != 6:
+ print(("usage: %s <brick> <scratch-dir> <log-file> <fetch-interval>"
+ % (sys.argv[0])))
+ sys.exit(1)
+ get_changes(sys.argv[1], sys.argv[2], sys.argv[3], 9, int(sys.argv[4]))
diff --git a/xlators/features/changelog/lib/examples/python/libgfchangelog.py b/xlators/features/changelog/lib/examples/python/libgfchangelog.py
new file mode 100644
index 00000000000..2da9f2d2a8c
--- /dev/null
+++ b/xlators/features/changelog/lib/examples/python/libgfchangelog.py
@@ -0,0 +1,71 @@
+import os
+from ctypes import *
+from ctypes.util import find_library
+
+class Changes(object):
+ libgfc = CDLL(find_library("gfchangelog"), mode=RTLD_GLOBAL,
+ use_errno=True)
+
+ @classmethod
+ def geterrno(cls):
+ return get_errno()
+
+ @classmethod
+ def raise_oserr(cls):
+ errn = cls.geterrno()
+ raise OSError(errn, os.strerror(errn))
+
+ @classmethod
+ def _get_api(cls, call):
+ return getattr(cls.libgfc, call)
+
+ @classmethod
+ def cl_init(cls):
+ ret = cls._get_api('gf_changelog_init')(None)
+ if ret == -1:
+ cls.raise_changelog_err()
+
+ @classmethod
+ def cl_register(cls, brick, path, log_file, log_level, retries = 0):
+ ret = cls._get_api('gf_changelog_register')(brick, path,
+ log_file, log_level, retries)
+ if ret == -1:
+ cls.raise_oserr()
+
+ @classmethod
+ def cl_scan(cls):
+ ret = cls._get_api('gf_changelog_scan')()
+ if ret == -1:
+ cls.raise_oserr()
+
+ @classmethod
+ def cl_startfresh(cls):
+ ret = cls._get_api('gf_changelog_start_fresh')()
+ if ret == -1:
+ cls.raise_oserr()
+
+ @classmethod
+ def cl_getchanges(cls):
+ """ remove hardcoding for path name length """
+ def clsort(f):
+ return f.split('.')[-1]
+ changes = []
+ buf = create_string_buffer('\0', 4096)
+ call = cls._get_api('gf_changelog_next_change')
+
+ while True:
+ ret = call(buf, 4096)
+ if ret in (0, -1):
+ break;
+ changes.append(buf.raw[:ret-1])
+ if ret == -1:
+ cls.raise_oserr()
+ # cleanup tracker
+ cls.cl_startfresh()
+ return sorted(changes, key=clsort)
+
+ @classmethod
+ def cl_done(cls, clfile):
+ ret = cls._get_api('gf_changelog_done')(clfile)
+ if ret == -1:
+ cls.raise_oserr()
diff --git a/xlators/features/changelog/lib/src/Makefile.am b/xlators/features/changelog/lib/src/Makefile.am
new file mode 100644
index 00000000000..c933ec53ed2
--- /dev/null
+++ b/xlators/features/changelog/lib/src/Makefile.am
@@ -0,0 +1,35 @@
+libgfchangelog_la_CFLAGS = -Wall $(GF_CFLAGS) $(GF_DARWIN_LIBGLUSTERFS_CFLAGS) \
+ -DDATADIR=\"$(localstatedir)\"
+
+libgfchangelog_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 -D__USE_LARGEFILE64 -fpic \
+ -I../../../src/ -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/xlators/features/changelog/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src \
+ -I$(top_srcdir)/rpc/rpc-transport/socket/src \
+ -DDATADIR=\"$(localstatedir)\"
+
+libgfchangelog_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la \
+ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la
+
+libgfchangelog_la_LDFLAGS = $(GF_LDFLAGS) \
+ -version-info $(LIBGFCHANGELOG_LT_VERSION) \
+ $(GF_NO_UNDEFINED)
+
+lib_LTLIBRARIES = libgfchangelog.la
+
+CONTRIB_BUILDDIR = $(top_builddir)/contrib
+
+libgfchangelog_la_SOURCES = gf-changelog.c gf-changelog-journal-handler.c \
+ gf-changelog-helpers.c gf-changelog-api.c gf-history-changelog.c \
+ gf-changelog-rpc.c gf-changelog-reborp.c \
+ $(top_srcdir)/xlators/features/changelog/src/changelog-rpc-common.c
+
+noinst_HEADERS = gf-changelog-helpers.h gf-changelog-rpc.h \
+ gf-changelog-journal.h changelog-lib-messages.h
+
+CLEANFILES =
+
+$(top_builddir)/libglusterfs/src/libglusterfs.la:
+ $(MAKE) -C $(top_builddir)/libglusterfs/src/ all
diff --git a/xlators/features/changelog/lib/src/changelog-lib-messages.h b/xlators/features/changelog/lib/src/changelog-lib-messages.h
new file mode 100644
index 00000000000..d7fe7274353
--- /dev/null
+++ b/xlators/features/changelog/lib/src/changelog-lib-messages.h
@@ -0,0 +1,74 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _CHANGELOG_LIB_MESSAGES_H_
+#define _CHANGELOG_LIB_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(
+ CHANGELOG_LIB, CHANGELOG_LIB_MSG_OPEN_FAILED,
+ CHANGELOG_LIB_MSG_FAILED_TO_RMDIR,
+ CHANGELOG_LIB_MSG_SCRATCH_DIR_ENTRIES_CREATION_ERROR,
+ CHANGELOG_LIB_MSG_THREAD_CREATION_FAILED, CHANGELOG_LIB_MSG_OPENDIR_ERROR,
+ CHANGELOG_LIB_MSG_RENAME_FAILED, CHANGELOG_LIB_MSG_READ_ERROR,
+ CHANGELOG_LIB_MSG_HTIME_ERROR, CHANGELOG_LIB_MSG_GET_TIME_ERROR,
+ CHANGELOG_LIB_MSG_WRITE_FAILED, CHANGELOG_LIB_MSG_PTHREAD_ERROR,
+ CHANGELOG_LIB_MSG_MMAP_FAILED, CHANGELOG_LIB_MSG_MUNMAP_FAILED,
+ CHANGELOG_LIB_MSG_ASCII_ERROR, CHANGELOG_LIB_MSG_STAT_FAILED,
+ CHANGELOG_LIB_MSG_GET_XATTR_FAILED, CHANGELOG_LIB_MSG_PUBLISH_ERROR,
+ CHANGELOG_LIB_MSG_PARSE_ERROR, CHANGELOG_LIB_MSG_MIN_MAX_INFO,
+ CHANGELOG_LIB_MSG_CLEANUP_ERROR, CHANGELOG_LIB_MSG_UNLINK_FAILED,
+ CHANGELOG_LIB_MSG_NOTIFY_REGISTER_FAILED,
+ CHANGELOG_LIB_MSG_INVOKE_RPC_FAILED, CHANGELOG_LIB_MSG_DRAINING_EVENT_INFO,
+ CHANGELOG_LIB_MSG_CLEANING_BRICK_ENTRY_INFO,
+ CHANGELOG_LIB_MSG_FREEING_ENTRY_INFO, CHANGELOG_LIB_MSG_XDR_DECODING_FAILED,
+ CHANGELOG_LIB_MSG_NOTIFY_REGISTER_INFO,
+ CHANGELOG_LIB_MSG_THREAD_CLEANUP_WARNING,
+ CHANGELOG_LIB_MSG_COPY_FROM_BUFFER_FAILED,
+ CHANGELOG_LIB_MSG_PTHREAD_JOIN_FAILED, CHANGELOG_LIB_MSG_HIST_FAILED,
+ CHANGELOG_LIB_MSG_DRAINED_EVENT_INFO, CHANGELOG_LIB_MSG_PARSE_ERROR_CEASED,
+ CHANGELOG_LIB_MSG_REQUESTING_INFO, CHANGELOG_LIB_MSG_FINAL_INFO);
+
+#define CHANGELOG_LIB_MSG_NOTIFY_REGISTER_INFO_STR "Registering brick"
+#define CHANGELOG_LIB_MSG_RENAME_FAILED_STR "error moving changelog file"
+#define CHANGELOG_LIB_MSG_OPEN_FAILED_STR "cannot open changelog file"
+#define CHANGELOG_LIB_MSG_UNLINK_FAILED_STR "failed to unlink"
+#define CHANGELOG_LIB_MSG_FAILED_TO_RMDIR_STR "failed to rmdir"
+#define CHANGELOG_LIB_MSG_STAT_FAILED_STR "stat failed on changelog file"
+#define CHANGELOG_LIB_MSG_PARSE_ERROR_STR "could not parse changelog"
+#define CHANGELOG_LIB_MSG_PARSE_ERROR_CEASED_STR \
+ "parsing error, ceased publishing..."
+#define CHANGELOG_LIB_MSG_HTIME_ERROR_STR "fop failed on htime file"
+#define CHANGELOG_LIB_MSG_GET_XATTR_FAILED_STR \
+ "error extracting max timstamp from htime file"
+#define CHANGELOG_LIB_MSG_MIN_MAX_INFO_STR "changelogs min max"
+#define CHANGELOG_LIB_MSG_REQUESTING_INFO_STR "Requesting historical changelogs"
+#define CHANGELOG_LIB_MSG_FINAL_INFO_STR "FINAL"
+#define CHANGELOG_LIB_MSG_HIST_FAILED_STR \
+ "Requested changelog range is not available"
+#define CHANGELOG_LIB_MSG_GET_TIME_ERROR_STR "wrong result"
+#define CHANGELOG_LIB_MSG_CLEANING_BRICK_ENTRY_INFO_STR \
+ "Cleaning brick entry for brick"
+#define CHANGELOG_LIB_MSG_DRAINING_EVENT_INFO_STR "Draining event"
+#define CHANGELOG_LIB_MSG_DRAINED_EVENT_INFO_STR "Drained event"
+#define CHANGELOG_LIB_MSG_FREEING_ENTRY_INFO_STR "freeing entry"
+
+#endif /* !_CHANGELOG_MESSAGES_H_ */
diff --git a/xlators/features/changelog/lib/src/gf-changelog-api.c b/xlators/features/changelog/lib/src/gf-changelog-api.c
new file mode 100644
index 00000000000..81a5cbfec10
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-api.c
@@ -0,0 +1,224 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/compat-uuid.h>
+#include <glusterfs/globals.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/syscall.h>
+
+#include "gf-changelog-helpers.h"
+#include "gf-changelog-journal.h"
+#include "changelog-mem-types.h"
+#include "changelog-lib-messages.h"
+
+int
+gf_changelog_done(char *file)
+{
+ int ret = -1;
+ char *buffer = NULL;
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ char to_path[PATH_MAX] = {
+ 0,
+ };
+
+ errno = EINVAL;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+
+ if (!file || !strlen(file))
+ goto out;
+
+ /* make sure 'file' is inside ->jnl_working_dir */
+ buffer = realpath(file, NULL);
+ if (!buffer)
+ goto out;
+
+ if (strncmp(jnl->jnl_working_dir, buffer, strlen(jnl->jnl_working_dir)))
+ goto out;
+
+ (void)snprintf(to_path, PATH_MAX, "%s%s", jnl->jnl_processed_dir,
+ basename(buffer));
+ gf_msg_debug(this->name, 0, "moving %s to processed directory", file);
+ ret = sys_rename(buffer, to_path);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_RENAME_FAILED, "from=%s", file, "to=%s",
+ to_path, NULL);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (buffer)
+ free(buffer); /* allocated by realpath() */
+ return ret;
+}
+
+/**
+ * @API
+ * for a set of changelogs, start from the beginning
+ */
+int
+gf_changelog_start_fresh()
+{
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ errno = EINVAL;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+
+ if (gf_ftruncate(jnl->jnl_fd, 0))
+ goto out;
+
+ return 0;
+
+out:
+ return -1;
+}
+
+/**
+ * @API
+ * return the next changelog file entry. zero means all chanelogs
+ * consumed.
+ */
+ssize_t
+gf_changelog_next_change(char *bufptr, size_t maxlen)
+{
+ ssize_t size = -1;
+ int tracker_fd = 0;
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ char buffer[PATH_MAX] = {
+ 0,
+ };
+
+ errno = EINVAL;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+
+ tracker_fd = jnl->jnl_fd;
+
+ size = gf_readline(tracker_fd, buffer, maxlen);
+ if (size < 0) {
+ size = -1;
+ goto out;
+ }
+
+ if (size == 0)
+ goto out;
+
+ memcpy(bufptr, buffer, size - 1);
+ bufptr[size - 1] = '\0';
+
+out:
+ return size;
+}
+
+/**
+ * @API
+ * gf_changelog_scan() - scan and generate a list of change entries
+ *
+ * calling this api multiple times (without calling gf_changlog_done())
+ * would result new changelogs(s) being refreshed in the tracker file.
+ * This call also acts as a cancellation point for the consumer.
+ */
+ssize_t
+gf_changelog_scan()
+{
+ int tracker_fd = 0;
+ size_t off = 0;
+ xlator_t *this = NULL;
+ size_t nr_entries = 0;
+ gf_changelog_journal_t *jnl = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ char buffer[PATH_MAX] = {
+ 0,
+ };
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+ if (JNL_IS_API_DISCONNECTED(jnl)) {
+ errno = ENOTCONN;
+ goto out;
+ }
+
+ errno = EINVAL;
+
+ tracker_fd = jnl->jnl_fd;
+ if (gf_ftruncate(tracker_fd, 0))
+ goto out;
+
+ rewinddir(jnl->jnl_dir);
+
+ for (;;) {
+ errno = 0;
+ entry = sys_readdir(jnl->jnl_dir, scratch);
+ if (!entry || errno != 0)
+ break;
+
+ if (!strcmp(basename(entry->d_name), ".") ||
+ !strcmp(basename(entry->d_name), ".."))
+ continue;
+
+ nr_entries++;
+
+ GF_CHANGELOG_FILL_BUFFER(jnl->jnl_processing_dir, buffer, off,
+ strlen(jnl->jnl_processing_dir));
+ GF_CHANGELOG_FILL_BUFFER(entry->d_name, buffer, off,
+ strlen(entry->d_name));
+ GF_CHANGELOG_FILL_BUFFER("\n", buffer, off, 1);
+
+ if (gf_changelog_write(tracker_fd, buffer, off) != off) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_WRITE_FAILED,
+ "error writing changelog filename"
+ " to tracker file");
+ break;
+ }
+ off = 0;
+ }
+
+ if (!entry) {
+ if (gf_lseek(tracker_fd, 0, SEEK_SET) != -1)
+ return nr_entries;
+ }
+out:
+ return -1;
+}
diff --git a/xlators/features/changelog/lib/src/gf-changelog-helpers.c b/xlators/features/changelog/lib/src/gf-changelog-helpers.c
new file mode 100644
index 00000000000..75f8a6dfc08
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-helpers.c
@@ -0,0 +1,170 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "changelog-mem-types.h"
+#include "gf-changelog-helpers.h"
+#include "changelog-lib-messages.h"
+#include <glusterfs/syscall.h>
+
+size_t
+gf_changelog_write(int fd, char *buffer, size_t len)
+{
+ ssize_t size = 0;
+ size_t written = 0;
+
+ while (written < len) {
+ size = sys_write(fd, buffer + written, len - written);
+ if (size <= 0)
+ break;
+
+ written += size;
+ }
+
+ return written;
+}
+
+void
+gf_rfc3986_encode_space_newline(unsigned char *s, char *enc, char *estr)
+{
+ for (; *s; s++) {
+ if (estr[*s])
+ sprintf(enc, "%c", estr[*s]);
+ else
+ sprintf(enc, "%%%02X", *s);
+ while (*++enc)
+ ;
+ }
+}
+
+/**
+ * thread safe version of readline with buffering
+ * (taken from Unix Network Programming Volume I, W.R. Stevens)
+ *
+ * This is favoured over fgets() as we'd need to ftruncate()
+ * (see gf_changelog_scan() API) to record new changelog files.
+ * stream open functions does have a truncate like api (although
+ * that can be done via @fflush(fp), @ftruncate(fd) and @fseek(fp),
+ * but this involves mixing POSIX file descriptors and stream FILE *).
+ *
+ * NOTE: This implementation still does work with more than one fd's
+ * used to perform gf_readline(). For this very reason it's not
+ * made a part of libglusterfs.
+ */
+
+static __thread read_line_t thread_tsd = {};
+
+static ssize_t
+my_read(read_line_t *tsd, int fd, char *ptr)
+{
+ if (tsd->rl_cnt <= 0) {
+ tsd->rl_cnt = sys_read(fd, tsd->rl_buf, MAXLINE);
+
+ if (tsd->rl_cnt < 0)
+ return -1;
+ else if (tsd->rl_cnt == 0)
+ return 0;
+ tsd->rl_bufptr = tsd->rl_buf;
+ }
+
+ tsd->rl_cnt--;
+ *ptr = *tsd->rl_bufptr++;
+ return 1;
+}
+
+ssize_t
+gf_readline(int fd, void *vptr, size_t maxlen)
+{
+ size_t n = 0;
+ size_t rc = 0;
+ char c = ' ';
+ char *ptr = NULL;
+ read_line_t *tsd = &thread_tsd;
+
+ ptr = vptr;
+ for (n = 1; n < maxlen; n++) {
+ if ((rc = my_read(tsd, fd, &c)) == 1) {
+ *ptr++ = c;
+ if (c == '\n')
+ break;
+ } else if (rc == 0) {
+ *ptr = '\0';
+ return (n - 1);
+ } else
+ return -1;
+ }
+
+ *ptr = '\0';
+ return n;
+}
+
+off_t
+gf_lseek(int fd, off_t offset, int whence)
+{
+ off_t off = 0;
+ read_line_t *tsd = &thread_tsd;
+
+ off = sys_lseek(fd, offset, whence);
+ if (off == -1)
+ return -1;
+
+ tsd->rl_cnt = 0;
+ tsd->rl_bufptr = tsd->rl_buf;
+
+ return off;
+}
+
+int
+gf_ftruncate(int fd, off_t length)
+{
+ read_line_t *tsd = &thread_tsd;
+
+ if (sys_ftruncate(fd, 0))
+ return -1;
+
+ tsd->rl_cnt = 0;
+ tsd->rl_bufptr = tsd->rl_buf;
+
+ return 0;
+}
+
+int
+gf_thread_cleanup(xlator_t *this, pthread_t thread)
+{
+ int ret = 0;
+ void *res = NULL;
+
+ ret = pthread_cancel(thread);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_LIB_MSG_THREAD_CLEANUP_WARNING,
+ "Failed to send cancellation to thread");
+ goto error_return;
+ }
+
+ ret = pthread_join(thread, &res);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_LIB_MSG_THREAD_CLEANUP_WARNING,
+ "failed to join thread");
+ goto error_return;
+ }
+
+ if (res != PTHREAD_CANCELED) {
+ gf_msg(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_LIB_MSG_THREAD_CLEANUP_WARNING,
+ "Thread could not be cleaned up");
+ goto error_return;
+ }
+
+ return 0;
+
+error_return:
+ return -1;
+}
diff --git a/xlators/features/changelog/lib/src/gf-changelog-helpers.h b/xlators/features/changelog/lib/src/gf-changelog-helpers.h
new file mode 100644
index 00000000000..9c609d33172
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-helpers.h
@@ -0,0 +1,255 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GF_CHANGELOG_HELPERS_H
+#define _GF_CHANGELOG_HELPERS_H
+
+#include <unistd.h>
+#include <dirent.h>
+#include <limits.h>
+#include <glusterfs/locking.h>
+
+#include <glusterfs/xlator.h>
+
+#include "changelog.h"
+
+#include "changelog-rpc-common.h"
+#include "gf-changelog-journal.h"
+
+#define GF_CHANGELOG_TRACKER "tracker"
+
+#define GF_CHANGELOG_CURRENT_DIR ".current"
+#define GF_CHANGELOG_PROCESSED_DIR ".processed"
+#define GF_CHANGELOG_PROCESSING_DIR ".processing"
+#define GF_CHANGELOG_HISTORY_DIR ".history"
+#define TIMESTAMP_LENGTH 10
+
+#ifndef MAXLINE
+#define MAXLINE 4096
+#endif
+
+#define GF_CHANGELOG_FILL_BUFFER(ptr, ascii, off, len) \
+ do { \
+ memcpy(ascii + off, ptr, len); \
+ off += len; \
+ } while (0)
+
+typedef struct read_line {
+ int rl_cnt;
+ char *rl_bufptr;
+ char rl_buf[MAXLINE];
+} read_line_t;
+
+struct gf_changelog;
+struct gf_event;
+
+/**
+ * Event list for ordered event notification
+ *
+ * ->next_seq holds the next _expected_ sequence number.
+ */
+struct gf_event_list {
+ pthread_mutex_t lock; /* protects this structure */
+ pthread_cond_t cond;
+
+ pthread_t invoker;
+
+ unsigned long next_seq; /* next sequence number expected:
+ zero during bootstrap */
+
+ struct gf_changelog *entry; /* backpointer to it's brick
+ encapsulator (entry) */
+ struct list_head events; /* list of events */
+};
+
+/**
+ * include a refcount if it's of use by additional layers
+ */
+struct gf_event {
+ int count;
+
+ unsigned long seq;
+
+ struct list_head list;
+
+ struct iovec iov[0];
+};
+#define GF_EVENT_CALLOC_SIZE(cnt, len) \
+ (sizeof(struct gf_event) + (cnt * sizeof(struct iovec)) + len)
+
+/**
+ * assign the base address of the IO vector to the correct memory
+o * area and set it's addressable length.
+ */
+#define GF_EVENT_ASSIGN_IOVEC(vec, event, len, pos) \
+ do { \
+ vec->iov_base = ((char *)event) + sizeof(struct gf_event) + \
+ (event->count * sizeof(struct iovec)) + pos; \
+ vec->iov_len = len; \
+ pos += len; \
+ } while (0)
+
+typedef enum gf_changelog_conn_state {
+ GF_CHANGELOG_CONN_STATE_PENDING = 0,
+ GF_CHANGELOG_CONN_STATE_ACCEPTED,
+ GF_CHANGELOG_CONN_STATE_DISCONNECTED,
+} gf_changelog_conn_state_t;
+
+/**
+ * An instance of this structure is allocated for each brick for which
+ * notifications are streamed.
+ */
+typedef struct gf_changelog {
+ gf_lock_t statelock;
+ gf_changelog_conn_state_t connstate;
+
+ xlator_t *this;
+
+ struct list_head list; /* list of instances */
+
+ char brick[PATH_MAX]; /* brick path for this end-point */
+
+ changelog_rpc_t grpc; /* rpc{-clnt,svc} for this brick */
+#define RPC_PROBER(ent) ent->grpc.rpc
+#define RPC_REBORP(ent) ent->grpc.svc
+#define RPC_SOCK(ent) ent->grpc.sock
+
+ unsigned int notify; /* notification flag(s) */
+
+ FINI *fini; /* destructor callback */
+ CALLBACK *callback; /* event callback dispatcher */
+ CONNECT *connected; /* connect callback */
+ DISCONNECT *disconnected; /* disconnection callback */
+
+ void *ptr; /* owner specific private data */
+ xlator_t *invokerxl; /* consumers _this_, if valid,
+ assigned to THIS before cbk is
+ invoked */
+
+ gf_boolean_t ordered;
+
+ void (*queueevent)(struct gf_event_list *, struct gf_event *);
+ void (*pickevent)(struct gf_event_list *, struct gf_event **);
+
+ struct gf_event_list event;
+} gf_changelog_t;
+
+static inline int
+gf_changelog_filter_check(gf_changelog_t *entry, changelog_event_t *event)
+{
+ if (event->ev_type & entry->notify)
+ return 1;
+ return 0;
+}
+
+#define GF_NEED_ORDERED_EVENTS(ent) (ent->ordered == _gf_true)
+
+/** private structure */
+typedef struct gf_private {
+ pthread_mutex_t lock; /* protects ->connections, cleanups */
+ pthread_cond_t cond;
+
+ void *api; /* pointer for API access */
+
+ pthread_t poller; /* event poller thread */
+ pthread_t connectionjanitor; /* connection cleaner */
+
+ struct list_head connections; /* list of connections */
+ struct list_head cleanups; /* list of connection to be
+ cleaned up */
+} gf_private_t;
+
+#define GF_CHANGELOG_GET_API_PTR(this) (((gf_private_t *)this->private)->api)
+
+/**
+ * upcall: invoke callback with _correct_ THIS
+ */
+#define GF_CHANGELOG_INVOKE_CBK(this, cbk, brick, args...) \
+ do { \
+ xlator_t *old_this = NULL; \
+ xlator_t *invokerxl = NULL; \
+ \
+ invokerxl = entry->invokerxl; \
+ old_this = this; \
+ \
+ if (invokerxl) { \
+ THIS = invokerxl; \
+ } \
+ \
+ cbk(invokerxl, brick, args); \
+ THIS = old_this; \
+ \
+ } while (0)
+
+#define SAVE_THIS(xl) \
+ do { \
+ old_this = xl; \
+ THIS = master; \
+ } while (0)
+
+#define RESTORE_THIS() \
+ do { \
+ if (old_this) \
+ THIS = old_this; \
+ } while (0)
+
+/** APIs and the rest */
+
+void *
+gf_changelog_process(void *data);
+
+void
+gf_rfc3986_encode_space_newline(unsigned char *s, char *enc, char *estr);
+
+size_t
+gf_changelog_write(int fd, char *buffer, size_t len);
+
+ssize_t
+gf_readline(int fd, void *vptr, size_t maxlen);
+
+int
+gf_ftruncate(int fd, off_t length);
+
+off_t
+gf_lseek(int fd, off_t offset, int whence);
+
+int
+gf_changelog_consume(xlator_t *this, gf_changelog_journal_t *jnl,
+ char *from_path, gf_boolean_t no_publish);
+int
+gf_changelog_publish(xlator_t *this, gf_changelog_journal_t *jnl,
+ char *from_path);
+int
+gf_thread_cleanup(xlator_t *this, pthread_t thread);
+void *
+gf_changelog_callback_invoker(void *arg);
+
+int
+gf_cleanup_event(xlator_t *, struct gf_event_list *);
+
+/* (un)ordered event queueing */
+void
+queue_ordered_event(struct gf_event_list *, struct gf_event *);
+
+void
+queue_unordered_event(struct gf_event_list *, struct gf_event *);
+
+/* (un)ordered event picking */
+void
+pick_event_ordered(struct gf_event_list *, struct gf_event **);
+
+void
+pick_event_unordered(struct gf_event_list *, struct gf_event **);
+
+/* connection janitor thread */
+void *
+gf_changelog_connection_janitor(void *);
+
+#endif
diff --git a/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c b/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c
new file mode 100644
index 00000000000..7f6e2329e71
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-journal-handler.c
@@ -0,0 +1,1029 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/compat-uuid.h>
+#include <glusterfs/globals.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/compat-errno.h>
+
+#include "gf-changelog-helpers.h"
+
+/* from the changelog translator */
+#include "changelog-misc.h"
+#include "changelog-mem-types.h"
+
+#include "gf-changelog-journal.h"
+#include "changelog-lib-messages.h"
+
+extern int byebye;
+
+enum changelog_versions { VERSION_1_1 = 0, VERSION_1_2 = 1 };
+
+/**
+ * number of gfid records after fop number
+ */
+int nr_gfids[2][GF_FOP_MAXVALUE] = {{
+ [GF_FOP_MKNOD] = 1,
+ [GF_FOP_MKDIR] = 1,
+ [GF_FOP_UNLINK] = 1,
+ [GF_FOP_RMDIR] = 1,
+ [GF_FOP_SYMLINK] = 1,
+ [GF_FOP_RENAME] = 2,
+ [GF_FOP_LINK] = 1,
+ [GF_FOP_CREATE] = 1,
+ },
+ {
+ [GF_FOP_MKNOD] = 1,
+ [GF_FOP_MKDIR] = 1,
+ [GF_FOP_UNLINK] = 2,
+ [GF_FOP_RMDIR] = 2,
+ [GF_FOP_SYMLINK] = 1,
+ [GF_FOP_RENAME] = 2,
+ [GF_FOP_LINK] = 1,
+ [GF_FOP_CREATE] = 1,
+ }};
+
+int nr_extra_recs[2][GF_FOP_MAXVALUE] = {{
+ [GF_FOP_MKNOD] = 3,
+ [GF_FOP_MKDIR] = 3,
+ [GF_FOP_UNLINK] = 0,
+ [GF_FOP_RMDIR] = 0,
+ [GF_FOP_SYMLINK] = 0,
+ [GF_FOP_RENAME] = 0,
+ [GF_FOP_LINK] = 0,
+ [GF_FOP_CREATE] = 3,
+ },
+ {
+ [GF_FOP_MKNOD] = 3,
+ [GF_FOP_MKDIR] = 3,
+ [GF_FOP_UNLINK] = 0,
+ [GF_FOP_RMDIR] = 0,
+ [GF_FOP_SYMLINK] = 0,
+ [GF_FOP_RENAME] = 0,
+ [GF_FOP_LINK] = 0,
+ [GF_FOP_CREATE] = 3,
+ }};
+
+static char *
+binary_to_ascii(uuid_t uuid)
+{
+ return uuid_utoa(uuid);
+}
+
+static char *
+conv_noop(char *ptr)
+{
+ return ptr;
+}
+
+#define VERIFY_SEPARATOR(ptr, plen, perr) \
+ { \
+ if (*(ptr + plen) != '\0') { \
+ perr = 1; \
+ break; \
+ } \
+ }
+
+#define MOVER_MOVE(mover, nleft, bytes) \
+ { \
+ mover += bytes; \
+ nleft -= bytes; \
+ }
+
+#define PARSE_GFID(mov, ptr, le, fn, perr) \
+ { \
+ VERIFY_SEPARATOR(mov, le, perr); \
+ ptr = fn(mov); \
+ if (!ptr) { \
+ perr = 1; \
+ break; \
+ } \
+ }
+
+#define FILL_AND_MOVE(pt, buf, of, mo, nl, le) \
+ { \
+ GF_CHANGELOG_FILL_BUFFER(pt, buf, of, strlen(pt)); \
+ MOVER_MOVE(mo, nl, le); \
+ }
+
+#define PARSE_GFID_MOVE(ptr, uuid, mover, nleft, perr) \
+ { \
+ memcpy(uuid, mover, sizeof(uuid_t)); \
+ ptr = binary_to_ascii(uuid); \
+ if (!ptr) { \
+ perr = 1; \
+ break; \
+ } \
+ MOVER_MOVE(mover, nleft, sizeof(uuid_t)); \
+ }
+
+#define LINE_BUFSIZE (3 * PATH_MAX) /* enough buffer for extra chars too */
+
+/**
+ * using mmap() makes parsing easy. fgets() cannot be used here as
+ * the binary gfid could contain a line-feed (0x0A), in that case fgets()
+ * would read an incomplete line and parsing would fail. using POSIX fds
+ * would result is additional code to maintain state in case of partial
+ * reads of data (where multiple entries do not fit extirely in the buffer).
+ *
+ * mmap() gives the flexibility of pointing to an offset in the file
+ * without us worrying about reading it in memory (VM does that for us for
+ * free).
+ */
+
+static int
+gf_changelog_parse_binary(xlator_t *this, gf_changelog_journal_t *jnl,
+ int from_fd, int to_fd, size_t start_offset,
+ struct stat *stbuf, int version_idx)
+
+{
+ int ret = -1;
+ off_t off = 0;
+ off_t nleft = 0;
+ uuid_t uuid = {
+ 0,
+ };
+ char *ptr = NULL;
+ char *bname_start = NULL;
+ char *bname_end = NULL;
+ char *mover = NULL;
+ void *start = NULL;
+ char current_mover = ' ';
+ size_t blen = 0;
+ int parse_err = 0;
+ char *ascii = NULL;
+
+ ascii = GF_CALLOC(LINE_BUFSIZE, sizeof(char), gf_common_mt_char);
+
+ nleft = stbuf->st_size;
+
+ start = mmap(NULL, nleft, PROT_READ, MAP_PRIVATE, from_fd, 0);
+ if (start == MAP_FAILED) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_MMAP_FAILED,
+ "mmap() error");
+ goto out;
+ }
+
+ mover = start;
+
+ MOVER_MOVE(mover, nleft, start_offset);
+
+ while (nleft > 0) {
+ off = blen = 0;
+ ptr = bname_start = bname_end = NULL;
+
+ current_mover = *mover;
+
+ switch (current_mover) {
+ case 'D':
+ case 'M':
+ MOVER_MOVE(mover, nleft, 1);
+ PARSE_GFID_MOVE(ptr, uuid, mover, nleft, parse_err);
+
+ break;
+
+ case 'E':
+ MOVER_MOVE(mover, nleft, 1);
+ PARSE_GFID_MOVE(ptr, uuid, mover, nleft, parse_err);
+
+ bname_start = mover;
+ bname_end = strchr(mover, '\n');
+ if (bname_end == NULL) {
+ parse_err = 1;
+ break;
+ }
+
+ blen = bname_end - bname_start;
+ MOVER_MOVE(mover, nleft, blen);
+
+ break;
+
+ default:
+ parse_err = 1;
+ }
+
+ if (parse_err)
+ break;
+
+ GF_CHANGELOG_FILL_BUFFER(&current_mover, ascii, off, 1);
+ GF_CHANGELOG_FILL_BUFFER(" ", ascii, off, 1);
+ GF_CHANGELOG_FILL_BUFFER(ptr, ascii, off, strlen(ptr));
+ if (blen)
+ GF_CHANGELOG_FILL_BUFFER(bname_start, ascii, off, blen);
+ GF_CHANGELOG_FILL_BUFFER("\n", ascii, off, 1);
+
+ if (gf_changelog_write(to_fd, ascii, off) != off) {
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_ASCII_ERROR,
+ "processing binary changelog failed due to "
+ " error in writing ascii change");
+ break;
+ }
+
+ MOVER_MOVE(mover, nleft, 1);
+ }
+
+ if ((nleft == 0) && (!parse_err))
+ ret = 0;
+
+ if (munmap(start, stbuf->st_size))
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_MUNMAP_FAILED,
+ "munmap() error");
+out:
+ if (ascii)
+ GF_FREE(ascii);
+ return ret;
+}
+
+/**
+ * ascii decoder:
+ * - separate out one entry from another
+ * - use fop name rather than fop number
+ */
+static int
+gf_changelog_parse_ascii(xlator_t *this, gf_changelog_journal_t *jnl,
+ int from_fd, int to_fd, size_t start_offset,
+ struct stat *stbuf, int version_idx)
+{
+ int ng = 0;
+ int ret = -1;
+ int fop = 0;
+ int len = 0;
+ off_t off = 0;
+ off_t nleft = 0;
+ char *ptr = NULL;
+ char *eptr = NULL;
+ void *start = NULL;
+ char *mover = NULL;
+ int parse_err = 0;
+ char current_mover = ' ';
+ char *ascii = NULL;
+ const char *fopname = NULL;
+
+ ascii = GF_CALLOC(LINE_BUFSIZE, sizeof(char), gf_common_mt_char);
+
+ nleft = stbuf->st_size;
+
+ start = mmap(NULL, nleft, PROT_READ, MAP_PRIVATE, from_fd, 0);
+ if (start == MAP_FAILED) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_MMAP_FAILED,
+ "mmap() error");
+ goto out;
+ }
+
+ mover = start;
+
+ MOVER_MOVE(mover, nleft, start_offset);
+
+ while (nleft > 0) {
+ off = 0;
+ current_mover = *mover;
+
+ GF_CHANGELOG_FILL_BUFFER(&current_mover, ascii, off, 1);
+ GF_CHANGELOG_FILL_BUFFER(" ", ascii, off, 1);
+
+ switch (current_mover) {
+ case 'D':
+ MOVER_MOVE(mover, nleft, 1);
+
+ /* target gfid */
+ PARSE_GFID(mover, ptr, UUID_CANONICAL_FORM_LEN, conv_noop,
+ parse_err);
+ FILL_AND_MOVE(ptr, ascii, off, mover, nleft,
+ UUID_CANONICAL_FORM_LEN);
+ break;
+ case 'M':
+ MOVER_MOVE(mover, nleft, 1);
+
+ /* target gfid */
+ PARSE_GFID(mover, ptr, UUID_CANONICAL_FORM_LEN, conv_noop,
+ parse_err);
+ FILL_AND_MOVE(ptr, ascii, off, mover, nleft,
+ UUID_CANONICAL_FORM_LEN);
+ FILL_AND_MOVE(" ", ascii, off, mover, nleft, 1);
+
+ /* fop */
+ len = strlen(mover);
+ VERIFY_SEPARATOR(mover, len, parse_err);
+
+ fop = atoi(mover);
+ fopname = gf_fop_list[fop];
+ if (fopname == NULL) {
+ parse_err = 1;
+ break;
+ }
+
+ MOVER_MOVE(mover, nleft, len);
+
+ len = strlen(fopname);
+ GF_CHANGELOG_FILL_BUFFER(fopname, ascii, off, len);
+
+ break;
+
+ case 'E':
+ MOVER_MOVE(mover, nleft, 1);
+
+ /* target gfid */
+ PARSE_GFID(mover, ptr, UUID_CANONICAL_FORM_LEN, conv_noop,
+ parse_err);
+ FILL_AND_MOVE(ptr, ascii, off, mover, nleft,
+ UUID_CANONICAL_FORM_LEN);
+ FILL_AND_MOVE(" ", ascii, off, mover, nleft, 1);
+
+ /* fop */
+ len = strlen(mover);
+ VERIFY_SEPARATOR(mover, len, parse_err);
+
+ fop = atoi(mover);
+ fopname = gf_fop_list[fop];
+ if (fopname == NULL) {
+ parse_err = 1;
+ break;
+ }
+
+ MOVER_MOVE(mover, nleft, len);
+
+ len = strlen(fopname);
+ GF_CHANGELOG_FILL_BUFFER(fopname, ascii, off, len);
+
+ ng = nr_extra_recs[version_idx][fop];
+ for (; ng > 0; ng--) {
+ MOVER_MOVE(mover, nleft, 1);
+ len = strlen(mover);
+ VERIFY_SEPARATOR(mover, len, parse_err);
+
+ GF_CHANGELOG_FILL_BUFFER(" ", ascii, off, 1);
+ FILL_AND_MOVE(mover, ascii, off, mover, nleft, len);
+ }
+
+ /* pargfid + bname */
+ ng = nr_gfids[version_idx][fop];
+ while (ng-- > 0) {
+ MOVER_MOVE(mover, nleft, 1);
+ len = strlen(mover);
+ if (!len) {
+ MOVER_MOVE(mover, nleft, 1);
+ continue;
+ }
+
+ GF_CHANGELOG_FILL_BUFFER(" ", ascii, off, 1);
+
+ PARSE_GFID(mover, ptr, len, conv_noop, parse_err);
+ eptr = calloc(3, strlen(ptr));
+ if (!eptr) {
+ parse_err = 1;
+ break;
+ }
+
+ gf_rfc3986_encode_space_newline((unsigned char *)ptr, eptr,
+ jnl->rfc3986_space_newline);
+ FILL_AND_MOVE(eptr, ascii, off, mover, nleft, len);
+ free(eptr);
+ }
+
+ break;
+ default:
+ parse_err = 1;
+ }
+
+ if (parse_err)
+ break;
+
+ GF_CHANGELOG_FILL_BUFFER("\n", ascii, off, 1);
+
+ if (gf_changelog_write(to_fd, ascii, off) != off) {
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_ASCII_ERROR,
+ "processing ascii changelog failed due to "
+ " error in writing change");
+ break;
+ }
+
+ MOVER_MOVE(mover, nleft, 1);
+ }
+
+ if ((nleft == 0) && (!parse_err))
+ ret = 0;
+
+ if (munmap(start, stbuf->st_size))
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_MUNMAP_FAILED,
+ "munmap() error");
+
+out:
+ if (ascii)
+ GF_FREE(ascii);
+
+ return ret;
+}
+
+static int
+gf_changelog_decode(xlator_t *this, gf_changelog_journal_t *jnl, int from_fd,
+ int to_fd, struct stat *stbuf, int *zerob)
+{
+ int ret = -1;
+ int encoding = -1;
+ int major_version = -1;
+ int minor_version = -1;
+ int version_idx = -1;
+ size_t elen = 0;
+ char buffer[1024] = {
+ 0,
+ };
+
+ CHANGELOG_GET_HEADER_INFO(from_fd, buffer, sizeof(buffer), encoding,
+ major_version, minor_version, elen);
+ if (encoding == -1) /* unknown encoding */
+ goto out;
+
+ if (major_version == -1) /* unknown major version */
+ goto out;
+
+ if (minor_version == -1) /* unknown minor version */
+ goto out;
+
+ if (!CHANGELOG_VALID_ENCODING(encoding))
+ goto out;
+
+ if (elen == stbuf->st_size) {
+ *zerob = 1;
+ goto out;
+ }
+
+ if (major_version == 1 && minor_version == 1) {
+ version_idx = VERSION_1_1;
+ } else if (major_version == 1 && minor_version == 2) {
+ version_idx = VERSION_1_2;
+ }
+
+ if (version_idx == -1) /* unknown version number */
+ goto out;
+
+ /**
+ * start processing after the header
+ */
+ if (sys_lseek(from_fd, elen, SEEK_SET) < 0) {
+ goto out;
+ }
+ switch (encoding) {
+ case CHANGELOG_ENCODE_BINARY:
+ /**
+ * this ideally should have been a part of changelog-encoders.c
+ * (ie. part of the changelog translator).
+ */
+ ret = gf_changelog_parse_binary(this, jnl, from_fd, to_fd, elen,
+ stbuf, version_idx);
+ break;
+
+ case CHANGELOG_ENCODE_ASCII:
+ ret = gf_changelog_parse_ascii(this, jnl, from_fd, to_fd, elen,
+ stbuf, version_idx);
+ break;
+ }
+
+out:
+ return ret;
+}
+
+int
+gf_changelog_publish(xlator_t *this, gf_changelog_journal_t *jnl,
+ char *from_path)
+{
+ int ret = 0;
+ char dest[PATH_MAX] = {
+ 0,
+ };
+ char to_path[PATH_MAX] = {
+ 0,
+ };
+ struct stat stbuf = {
+ 0,
+ };
+
+ if (snprintf(to_path, PATH_MAX, "%s%s", jnl->jnl_current_dir,
+ basename(from_path)) >= PATH_MAX)
+ return -1;
+
+ /* handle zerob file that won't exist in current */
+ ret = sys_stat(to_path, &stbuf);
+ if (ret) {
+ if (errno == ENOENT)
+ ret = 0;
+ goto out;
+ }
+
+ if (snprintf(dest, PATH_MAX, "%s%s", jnl->jnl_processing_dir,
+ basename(from_path)) >= PATH_MAX)
+ return -1;
+
+ ret = sys_rename(to_path, dest);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_RENAME_FAILED, "from=%s", to_path, "to=%s",
+ dest, NULL);
+ }
+
+out:
+ return ret;
+}
+
+int
+gf_changelog_consume(xlator_t *this, gf_changelog_journal_t *jnl,
+ char *from_path, gf_boolean_t no_publish)
+{
+ int ret = -1;
+ int fd1 = 0;
+ int fd2 = 0;
+ int zerob = 0;
+ struct stat stbuf = {
+ 0,
+ };
+ char dest[PATH_MAX] = {
+ 0,
+ };
+ char to_path[PATH_MAX] = {
+ 0,
+ };
+
+ if (snprintf(to_path, PATH_MAX, "%s%s", jnl->jnl_current_dir,
+ basename(from_path)) >= PATH_MAX)
+ goto out;
+ if (snprintf(dest, PATH_MAX, "%s%s", jnl->jnl_processing_dir,
+ basename(from_path)) >= PATH_MAX)
+ goto out;
+
+ ret = sys_stat(from_path, &stbuf);
+ if (ret || !S_ISREG(stbuf.st_mode)) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_STAT_FAILED,
+ "path=%s", from_path, NULL);
+ goto out;
+ }
+
+ fd1 = open(from_path, O_RDONLY);
+ if (fd1 < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_OPEN_FAILED,
+ "path=%s", from_path, NULL);
+ goto out;
+ }
+
+ fd2 = open(to_path, O_CREAT | O_TRUNC | O_RDWR,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd2 < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_OPEN_FAILED,
+ "path=%s", to_path, NULL);
+ goto close_fd;
+ } else {
+ ret = gf_changelog_decode(this, jnl, fd1, fd2, &stbuf, &zerob);
+
+ sys_close(fd2);
+
+ if (!ret) {
+ /* move it to processing on a successful
+ decode */
+ if (no_publish == _gf_true)
+ goto close_fd;
+ ret = sys_rename(to_path, dest);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_RENAME_FAILED, "from=%s", to_path,
+ "to=%s", dest, NULL);
+ }
+
+ /* remove it from .current if it's an empty file */
+ if (zerob) {
+ /* zerob changelogs must be unlinked */
+ ret = sys_unlink(to_path);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_UNLINK_FAILED, "name=empty changelog",
+ "path=%s", to_path, NULL);
+ }
+ }
+
+close_fd:
+ sys_close(fd1);
+
+out:
+ return ret;
+}
+
+void *
+gf_changelog_process(void *data)
+{
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_entry_t *entry = NULL;
+ gf_changelog_processor_t *jnl_proc = NULL;
+
+ jnl = data;
+ jnl_proc = jnl->jnl_proc;
+ THIS = jnl->this;
+ this = jnl->this;
+
+ while (1) {
+ pthread_mutex_lock(&jnl_proc->lock);
+ {
+ while (list_empty(&jnl_proc->entries)) {
+ jnl_proc->waiting = _gf_true;
+ pthread_cond_wait(&jnl_proc->cond, &jnl_proc->lock);
+ }
+
+ entry = list_first_entry(&jnl_proc->entries, gf_changelog_entry_t,
+ list);
+ if (entry)
+ list_del(&entry->list);
+
+ jnl_proc->waiting = _gf_false;
+ }
+ pthread_mutex_unlock(&jnl_proc->lock);
+
+ if (entry) {
+ (void)gf_changelog_consume(this, jnl, entry->path, _gf_false);
+ GF_FREE(entry);
+ }
+ }
+
+ return NULL;
+}
+
+void
+gf_changelog_queue_journal(gf_changelog_processor_t *jnl_proc,
+ changelog_event_t *event)
+{
+ size_t len = 0;
+ gf_changelog_entry_t *entry = NULL;
+
+ entry = GF_CALLOC(1, sizeof(gf_changelog_entry_t),
+ gf_changelog_mt_libgfchangelog_entry_t);
+ if (!entry)
+ return;
+ INIT_LIST_HEAD(&entry->list);
+
+ len = strlen(event->u.journal.path);
+ (void)memcpy(entry->path, event->u.journal.path, len + 1);
+ entry->path[len] = '\0';
+
+ pthread_mutex_lock(&jnl_proc->lock);
+ {
+ list_add_tail(&entry->list, &jnl_proc->entries);
+ if (jnl_proc->waiting)
+ pthread_cond_signal(&jnl_proc->cond);
+ }
+ pthread_mutex_unlock(&jnl_proc->lock);
+
+ return;
+}
+
+void
+gf_changelog_handle_journal(void *xl, char *brick, void *cbkdata,
+ changelog_event_t *event)
+{
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_processor_t *jnl_proc = NULL;
+
+ jnl = cbkdata;
+ jnl_proc = jnl->jnl_proc;
+
+ gf_changelog_queue_journal(jnl_proc, event);
+}
+
+void
+gf_changelog_journal_disconnect(void *xl, char *brick, void *data)
+{
+ gf_changelog_journal_t *jnl = NULL;
+
+ jnl = data;
+
+ pthread_spin_lock(&jnl->lock);
+ {
+ JNL_SET_API_STATE(jnl, JNL_API_DISCONNECTED);
+ };
+ pthread_spin_unlock(&jnl->lock);
+}
+
+void
+gf_changelog_journal_connect(void *xl, char *brick, void *data)
+{
+ gf_changelog_journal_t *jnl = NULL;
+
+ jnl = data;
+
+ pthread_spin_lock(&jnl->lock);
+ {
+ JNL_SET_API_STATE(jnl, JNL_API_CONNECTED);
+ };
+ pthread_spin_unlock(&jnl->lock);
+
+ return;
+}
+
+void
+gf_changelog_cleanup_processor(gf_changelog_journal_t *jnl)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ gf_changelog_processor_t *jnl_proc = NULL;
+
+ this = THIS;
+ if (!this || !jnl || !jnl->jnl_proc)
+ goto error_return;
+
+ jnl_proc = jnl->jnl_proc;
+
+ ret = gf_thread_cleanup(this, jnl_proc->processor);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_CLEANUP_ERROR,
+ "failed to cleanup processor thread");
+ goto error_return;
+ }
+
+ (void)pthread_mutex_destroy(&jnl_proc->lock);
+ (void)pthread_cond_destroy(&jnl_proc->cond);
+
+ GF_FREE(jnl_proc);
+
+error_return:
+ return;
+}
+
+int
+gf_changelog_init_processor(gf_changelog_journal_t *jnl)
+{
+ int ret = -1;
+ gf_changelog_processor_t *jnl_proc = NULL;
+
+ jnl_proc = GF_CALLOC(1, sizeof(gf_changelog_processor_t),
+ gf_changelog_mt_libgfchangelog_t);
+ if (!jnl_proc)
+ goto error_return;
+
+ ret = pthread_mutex_init(&jnl_proc->lock, NULL);
+ if (ret != 0)
+ goto free_jnl_proc;
+ ret = pthread_cond_init(&jnl_proc->cond, NULL);
+ if (ret != 0)
+ goto cleanup_mutex;
+
+ INIT_LIST_HEAD(&jnl_proc->entries);
+ jnl_proc->waiting = _gf_false;
+ jnl->jnl_proc = jnl_proc;
+
+ ret = gf_thread_create(&jnl_proc->processor, NULL, gf_changelog_process,
+ jnl, "clogproc");
+ if (ret != 0) {
+ jnl->jnl_proc = NULL;
+ goto cleanup_cond;
+ }
+
+ return 0;
+
+cleanup_cond:
+ (void)pthread_cond_destroy(&jnl_proc->cond);
+cleanup_mutex:
+ (void)pthread_mutex_destroy(&jnl_proc->lock);
+free_jnl_proc:
+ GF_FREE(jnl_proc);
+error_return:
+ return -1;
+}
+
+static void
+gf_changelog_cleanup_fds(gf_changelog_journal_t *jnl)
+{
+ /* tracker fd */
+ if (jnl->jnl_fd != -1)
+ sys_close(jnl->jnl_fd);
+ /* processing dir */
+ if (jnl->jnl_dir)
+ sys_closedir(jnl->jnl_dir);
+
+ if (jnl->jnl_working_dir)
+ free(jnl->jnl_working_dir); /* allocated by realpath */
+}
+
+static int
+gf_changelog_open_dirs(xlator_t *this, gf_changelog_journal_t *jnl)
+{
+ int ret = -1;
+ DIR *dir = NULL;
+ int tracker_fd = 0;
+ char tracker_path[PATH_MAX] = {
+ 0,
+ };
+
+ /* .current */
+ (void)snprintf(jnl->jnl_current_dir, PATH_MAX,
+ "%s/" GF_CHANGELOG_CURRENT_DIR "/", jnl->jnl_working_dir);
+ ret = recursive_rmdir(jnl->jnl_current_dir);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_FAILED_TO_RMDIR, "path=%s",
+ jnl->jnl_current_dir, NULL);
+ goto out;
+ }
+ ret = mkdir_p(jnl->jnl_current_dir, 0600, _gf_false);
+ if (ret)
+ goto out;
+
+ /* .processed */
+ (void)snprintf(jnl->jnl_processed_dir, PATH_MAX,
+ "%s/" GF_CHANGELOG_PROCESSED_DIR "/", jnl->jnl_working_dir);
+ ret = mkdir_p(jnl->jnl_processed_dir, 0600, _gf_false);
+ if (ret)
+ goto out;
+
+ /* .processing */
+ (void)snprintf(jnl->jnl_processing_dir, PATH_MAX,
+ "%s/" GF_CHANGELOG_PROCESSING_DIR "/", jnl->jnl_working_dir);
+ ret = recursive_rmdir(jnl->jnl_processing_dir);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_FAILED_TO_RMDIR, "path=%s",
+ jnl->jnl_processing_dir, NULL);
+ goto out;
+ }
+
+ ret = mkdir_p(jnl->jnl_processing_dir, 0600, _gf_false);
+ if (ret)
+ goto out;
+
+ dir = sys_opendir(jnl->jnl_processing_dir);
+ if (!dir) {
+ gf_msg("", GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_OPENDIR_ERROR,
+ "opendir() error");
+ goto out;
+ }
+
+ jnl->jnl_dir = dir;
+
+ (void)snprintf(tracker_path, PATH_MAX, "%s/" GF_CHANGELOG_TRACKER,
+ jnl->jnl_working_dir);
+
+ tracker_fd = open(tracker_path, O_CREAT | O_APPEND | O_RDWR,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (tracker_fd < 0) {
+ sys_closedir(jnl->jnl_dir);
+ ret = -1;
+ goto out;
+ }
+
+ jnl->jnl_fd = tracker_fd;
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+gf_changelog_init_history(xlator_t *this, gf_changelog_journal_t *jnl,
+ char *brick_path)
+{
+ int i = 0;
+ int ret = 0;
+ char hist_scratch_dir[PATH_MAX] = {
+ 0,
+ };
+
+ jnl->hist_jnl = GF_CALLOC(1, sizeof(*jnl),
+ gf_changelog_mt_libgfchangelog_t);
+ if (!jnl->hist_jnl)
+ goto error_return;
+
+ jnl->hist_jnl->jnl_dir = NULL;
+ jnl->hist_jnl->jnl_fd = -1;
+
+ (void)snprintf(hist_scratch_dir, PATH_MAX,
+ "%s/" GF_CHANGELOG_HISTORY_DIR "/", jnl->jnl_working_dir);
+
+ ret = mkdir_p(hist_scratch_dir, 0600, _gf_false);
+ if (ret)
+ goto dealloc_hist;
+
+ jnl->hist_jnl->jnl_working_dir = realpath(hist_scratch_dir, NULL);
+ if (!jnl->hist_jnl->jnl_working_dir)
+ goto dealloc_hist;
+
+ ret = gf_changelog_open_dirs(this, jnl->hist_jnl);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_OPENDIR_ERROR,
+ "could not create entries in history scratch dir");
+ goto dealloc_hist;
+ }
+
+ if (snprintf(jnl->hist_jnl->jnl_brickpath, PATH_MAX, "%s", brick_path) >=
+ PATH_MAX)
+ goto dealloc_hist;
+
+ for (i = 0; i < 256; i++) {
+ jnl->hist_jnl->rfc3986_space_newline[i] = (i == ' ' || i == '\n' ||
+ i == '%')
+ ? 0
+ : i;
+ }
+
+ return 0;
+
+dealloc_hist:
+ GF_FREE(jnl->hist_jnl);
+ jnl->hist_jnl = NULL;
+error_return:
+ return -1;
+}
+
+void
+gf_changelog_journal_fini(void *xl, char *brick, void *data)
+{
+ gf_changelog_journal_t *jnl = NULL;
+
+ jnl = data;
+
+ gf_changelog_cleanup_processor(jnl);
+
+ gf_changelog_cleanup_fds(jnl);
+ if (jnl->hist_jnl)
+ gf_changelog_cleanup_fds(jnl->hist_jnl);
+
+ GF_FREE(jnl);
+}
+
+void *
+gf_changelog_journal_init(void *xl, struct gf_brick_spec *brick)
+{
+ int i = 0;
+ int ret = 0;
+ xlator_t *this = NULL;
+ struct stat buf = {
+ 0,
+ };
+ char *scratch_dir = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+
+ this = xl;
+ scratch_dir = (char *)brick->ptr;
+
+ jnl = GF_CALLOC(1, sizeof(gf_changelog_journal_t),
+ gf_changelog_mt_libgfchangelog_t);
+ if (!jnl)
+ goto error_return;
+
+ if (snprintf(jnl->jnl_brickpath, PATH_MAX, "%s", brick->brick_path) >=
+ PATH_MAX)
+ goto dealloc_private;
+
+ if (sys_stat(scratch_dir, &buf) && errno == ENOENT) {
+ ret = mkdir_p(scratch_dir, 0600, _gf_true);
+ if (ret)
+ goto dealloc_private;
+ }
+
+ jnl->jnl_working_dir = realpath(scratch_dir, NULL);
+ if (!jnl->jnl_working_dir)
+ goto dealloc_private;
+
+ ret = gf_changelog_open_dirs(this, jnl);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_OPENDIR_ERROR,
+ "could not create entries in scratch dir");
+ goto dealloc_private;
+ }
+
+ /* RFC 3986 {de,en}coding */
+ for (i = 0; i < 256; i++) {
+ jnl->rfc3986_space_newline[i] = (i == ' ' || i == '\n' || i == '%') ? 0
+ : i;
+ }
+
+ ret = gf_changelog_init_history(this, jnl, brick->brick_path);
+ if (ret)
+ goto cleanup_fds;
+
+ /* initialize journal processor */
+ jnl->this = this;
+ ret = gf_changelog_init_processor(jnl);
+ if (ret)
+ goto cleanup_fds;
+
+ JNL_SET_API_STATE(jnl, JNL_API_CONN_INPROGESS);
+ ret = pthread_spin_init(&jnl->lock, 0);
+ if (ret != 0)
+ goto cleanup_processor;
+ return jnl;
+
+cleanup_processor:
+ gf_changelog_cleanup_processor(jnl);
+cleanup_fds:
+ gf_changelog_cleanup_fds(jnl);
+ if (jnl->hist_jnl)
+ gf_changelog_cleanup_fds(jnl->hist_jnl);
+dealloc_private:
+ GF_FREE(jnl);
+error_return:
+ return NULL;
+}
diff --git a/xlators/features/changelog/lib/src/gf-changelog-journal.h b/xlators/features/changelog/lib/src/gf-changelog-journal.h
new file mode 100644
index 00000000000..ba5b9bf827e
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-journal.h
@@ -0,0 +1,116 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __GF_CHANGELOG_JOURNAL_H
+#define __GF_CHANGELOG_JOURNAL_H
+
+#include <unistd.h>
+#include <pthread.h>
+
+#include "changelog.h"
+
+enum api_conn {
+ JNL_API_CONNECTED,
+ JNL_API_CONN_INPROGESS,
+ JNL_API_DISCONNECTED,
+};
+
+typedef struct gf_changelog_entry {
+ char path[PATH_MAX];
+
+ struct list_head list;
+} gf_changelog_entry_t;
+
+typedef struct gf_changelog_processor {
+ pthread_mutex_t lock; /* protects ->entries */
+ pthread_cond_t cond; /* waiter during empty list */
+ gf_boolean_t waiting;
+
+ pthread_t processor; /* thread-id of journal processing thread */
+
+ struct list_head entries;
+} gf_changelog_processor_t;
+
+typedef struct gf_changelog_journal {
+ DIR *jnl_dir; /* 'processing' directory stream */
+
+ int jnl_fd; /* fd to the tracker file */
+
+ char jnl_brickpath[PATH_MAX]; /* brick path for this end-point */
+
+ gf_changelog_processor_t *jnl_proc;
+
+ char *jnl_working_dir; /* scratch directory */
+
+ char jnl_current_dir[PATH_MAX];
+ char jnl_processed_dir[PATH_MAX];
+ char jnl_processing_dir[PATH_MAX];
+
+ char rfc3986_space_newline[256]; /* RFC 3986 string encoding */
+
+ struct gf_changelog_journal *hist_jnl;
+ int hist_done; /* holds 0 done scanning,
+ 1 keep scanning and -1 error */
+
+ pthread_spinlock_t lock;
+ int connected;
+ xlator_t *this;
+} gf_changelog_journal_t;
+
+#define JNL_SET_API_STATE(jnl, state) (jnl->connected = state)
+#define JNL_IS_API_DISCONNECTED(jnl) (jnl->connected == JNL_API_DISCONNECTED)
+
+/* History API */
+typedef struct gf_changelog_history_data {
+ int len;
+
+ int htime_fd;
+
+ /* parallelism count */
+ int n_parallel;
+
+ /* history from, to indexes */
+ unsigned long from;
+ unsigned long to;
+ xlator_t *this;
+} gf_changelog_history_data_t;
+
+typedef struct gf_changelog_consume_data {
+ /** set of inputs */
+
+ /* fd to read from */
+ int fd;
+
+ /* from @offset */
+ off_t offset;
+
+ xlator_t *this;
+
+ gf_changelog_journal_t *jnl;
+
+ /** set of outputs */
+
+ /* return value */
+ int retval;
+
+ /* journal processed */
+ char changelog[PATH_MAX];
+} gf_changelog_consume_data_t;
+
+/* event handler */
+CALLBACK gf_changelog_handle_journal;
+
+/* init, connect & disconnect handler */
+INIT gf_changelog_journal_init;
+FINI gf_changelog_journal_fini;
+CONNECT gf_changelog_journal_connect;
+DISCONNECT gf_changelog_journal_disconnect;
+
+#endif
diff --git a/xlators/features/changelog/lib/src/gf-changelog-reborp.c b/xlators/features/changelog/lib/src/gf-changelog-reborp.c
new file mode 100644
index 00000000000..56b11cbb705
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-reborp.c
@@ -0,0 +1,413 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "changelog-misc.h"
+#include "changelog-mem-types.h"
+
+#include "gf-changelog-helpers.h"
+#include "changelog-rpc-common.h"
+#include "changelog-lib-messages.h"
+
+#include <glusterfs/syscall.h>
+
+/**
+ * Reverse socket: actual data transfer handler. Connection
+ * initiator is PROBER, data transfer is REBORP.
+ */
+
+static struct rpcsvc_program *gf_changelog_reborp_programs[];
+
+void *
+gf_changelog_connection_janitor(void *arg)
+{
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+ gf_private_t *priv = NULL;
+ gf_changelog_t *entry = NULL;
+ struct gf_event *event = NULL;
+ struct gf_event_list *ev = NULL;
+ unsigned long drained = 0;
+
+ this = arg;
+ THIS = this;
+
+ priv = this->private;
+
+ while (1) {
+ pthread_mutex_lock(&priv->lock);
+ {
+ while (list_empty(&priv->cleanups))
+ pthread_cond_wait(&priv->cond, &priv->lock);
+
+ entry = list_first_entry(&priv->cleanups, gf_changelog_t, list);
+ list_del_init(&entry->list);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ drained = 0;
+ ev = &entry->event;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_LIB_MSG_CLEANING_BRICK_ENTRY_INFO, "brick=%s",
+ entry->brick, NULL);
+
+ /* 0x0: disable rpc-clnt */
+ rpc_clnt_disable(RPC_PROBER(entry));
+
+ /* 0x1: cleanup callback invoker thread */
+ ret = gf_cleanup_event(this, ev);
+ if (ret)
+ continue;
+
+ /* 0x2: drain pending events */
+ while (!list_empty(&ev->events)) {
+ event = list_first_entry(&ev->events, struct gf_event, list);
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_LIB_MSG_DRAINING_EVENT_INFO, "seq=%lu",
+ event->seq, "payload=%d", event->count, NULL);
+
+ GF_FREE(event);
+ drained++;
+ }
+
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_LIB_MSG_DRAINED_EVENT_INFO, "num=%lu", drained, NULL);
+
+ /* 0x3: freeup brick entry */
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_LIB_MSG_FREEING_ENTRY_INFO, "entry=%p", entry, NULL);
+ LOCK_DESTROY(&entry->statelock);
+ GF_FREE(entry);
+ }
+
+ return NULL;
+}
+
+int
+gf_changelog_reborp_rpcsvc_notify(rpcsvc_t *rpc, void *mydata,
+ rpcsvc_event_t event, void *data)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ gf_changelog_t *entry = NULL;
+
+ if (!(event == RPCSVC_EVENT_ACCEPT || event == RPCSVC_EVENT_DISCONNECT))
+ return 0;
+
+ entry = mydata;
+ this = entry->this;
+
+ switch (event) {
+ case RPCSVC_EVENT_ACCEPT:
+ ret = sys_unlink(RPC_SOCK(entry));
+ if (ret != 0)
+ gf_smsg(this->name, GF_LOG_WARNING, errno,
+ CHANGELOG_LIB_MSG_UNLINK_FAILED, "name=reverse socket",
+ "path=%s", RPC_SOCK(entry), NULL);
+ if (entry->connected)
+ GF_CHANGELOG_INVOKE_CBK(this, entry->connected, entry->brick,
+ entry->ptr);
+ break;
+ case RPCSVC_EVENT_DISCONNECT:
+ if (entry->disconnected)
+ GF_CHANGELOG_INVOKE_CBK(this, entry->disconnected, entry->brick,
+ entry->ptr);
+ /* passthrough */
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+rpcsvc_t *
+gf_changelog_reborp_init_rpc_listner(xlator_t *this, char *path, char *sock,
+ void *cbkdata)
+{
+ CHANGELOG_MAKE_TMP_SOCKET_PATH(path, sock, UNIX_PATH_MAX);
+ return changelog_rpc_server_init(this, sock, cbkdata,
+ gf_changelog_reborp_rpcsvc_notify,
+ gf_changelog_reborp_programs);
+}
+
+/**
+ * This is dirty and painful as of now until there is event filtering in the
+ * server. The entire event buffer is scanned and interested events are picked,
+ * whereas we _should_ be notified with the events we were interested in
+ * (selected at the time of probe). As of now this is complete BS and needs
+ * fixture ASAP. I just made it work, it needs to be better.
+ *
+ * @FIXME: cleanup this bugger once server filters events.
+ */
+void
+gf_changelog_invoke_callback(gf_changelog_t *entry, struct iovec **vec,
+ int payloadcnt)
+{
+ int i = 0;
+ int evsize = 0;
+ xlator_t *this = NULL;
+ changelog_event_t *event = NULL;
+
+ this = entry->this;
+
+ for (; i < payloadcnt; i++) {
+ event = (changelog_event_t *)vec[i]->iov_base;
+ evsize = vec[i]->iov_len / CHANGELOG_EV_SIZE;
+
+ for (; evsize > 0; evsize--, event++) {
+ if (gf_changelog_filter_check(entry, event)) {
+ GF_CHANGELOG_INVOKE_CBK(this, entry->callback, entry->brick,
+ entry->ptr, event);
+ }
+ }
+ }
+}
+
+/**
+ * Ordered event handler is self-adaptive.. if the event sequence number
+ * is what's expected (->next_seq) there is no ordering list that's
+ * maintained. On out-of-order event notifications, event buffers are
+ * dynamically allocated and ordered.
+ */
+
+int
+__is_expected_sequence(struct gf_event_list *ev, struct gf_event *event)
+{
+ return (ev->next_seq == event->seq);
+}
+
+int
+__can_process_event(struct gf_event_list *ev, struct gf_event **event)
+{
+ *event = list_first_entry(&ev->events, struct gf_event, list);
+
+ if (__is_expected_sequence(ev, *event)) {
+ list_del(&(*event)->list);
+ ev->next_seq++;
+ return 1;
+ }
+
+ return 0;
+}
+
+void
+pick_event_ordered(struct gf_event_list *ev, struct gf_event **event)
+{
+ pthread_mutex_lock(&ev->lock);
+ {
+ while (list_empty(&ev->events) || !__can_process_event(ev, event))
+ pthread_cond_wait(&ev->cond, &ev->lock);
+ }
+ pthread_mutex_unlock(&ev->lock);
+}
+
+void
+pick_event_unordered(struct gf_event_list *ev, struct gf_event **event)
+{
+ pthread_mutex_lock(&ev->lock);
+ {
+ while (list_empty(&ev->events))
+ pthread_cond_wait(&ev->cond, &ev->lock);
+ *event = list_first_entry(&ev->events, struct gf_event, list);
+ list_del(&(*event)->list);
+ }
+ pthread_mutex_unlock(&ev->lock);
+}
+
+void *
+gf_changelog_callback_invoker(void *arg)
+{
+ xlator_t *this = NULL;
+ gf_changelog_t *entry = NULL;
+ struct iovec *vec = NULL;
+ struct gf_event *event = NULL;
+ struct gf_event_list *ev = NULL;
+
+ ev = arg;
+ entry = ev->entry;
+ THIS = this = entry->this;
+
+ while (1) {
+ entry->pickevent(ev, &event);
+
+ vec = (struct iovec *)&event->iov;
+ gf_changelog_invoke_callback(entry, &vec, event->count);
+
+ GF_FREE(event);
+ }
+
+ return NULL;
+}
+
+static int
+orderfn(struct list_head *pos1, struct list_head *pos2)
+{
+ struct gf_event *event1 = NULL;
+ struct gf_event *event2 = NULL;
+
+ event1 = list_entry(pos1, struct gf_event, list);
+ event2 = list_entry(pos2, struct gf_event, list);
+
+ if (event1->seq > event2->seq)
+ return 1;
+ return -1;
+}
+
+void
+queue_ordered_event(struct gf_event_list *ev, struct gf_event *event)
+{
+ /* add event to the ordered event list and wake up listener(s) */
+ pthread_mutex_lock(&ev->lock);
+ {
+ list_add_order(&event->list, &ev->events, orderfn);
+ if (!ev->next_seq)
+ ev->next_seq = event->seq;
+ if (ev->next_seq == event->seq)
+ pthread_cond_signal(&ev->cond);
+ }
+ pthread_mutex_unlock(&ev->lock);
+}
+
+void
+queue_unordered_event(struct gf_event_list *ev, struct gf_event *event)
+{
+ /* add event to the tail of the queue and wake up listener(s) */
+ pthread_mutex_lock(&ev->lock);
+ {
+ list_add_tail(&event->list, &ev->events);
+ pthread_cond_signal(&ev->cond);
+ }
+ pthread_mutex_unlock(&ev->lock);
+}
+
+int
+gf_changelog_event_handler(rpcsvc_request_t *req, xlator_t *this,
+ gf_changelog_t *entry)
+{
+ int i = 0;
+ size_t payloadlen = 0;
+ ssize_t len = 0;
+ int payloadcnt = 0;
+ changelog_event_req rpc_req = {
+ 0,
+ };
+ changelog_event_rsp rpc_rsp = {
+ 0,
+ };
+ struct iovec *vec = NULL;
+ struct gf_event *event = NULL;
+ struct gf_event_list *ev = NULL;
+
+ ev = &entry->event;
+
+ len = xdr_to_generic(req->msg[0], &rpc_req,
+ (xdrproc_t)xdr_changelog_event_req);
+ if (len < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_XDR_DECODING_FAILED, "xdr decoding failed");
+ req->rpc_err = GARBAGE_ARGS;
+ goto handle_xdr_error;
+ }
+
+ if (len < req->msg[0].iov_len) {
+ payloadcnt = 1;
+ payloadlen = (req->msg[0].iov_len - len);
+ }
+ for (i = 1; i < req->count; i++) {
+ payloadcnt++;
+ payloadlen += req->msg[i].iov_len;
+ }
+
+ event = GF_CALLOC(1, GF_EVENT_CALLOC_SIZE(payloadcnt, payloadlen),
+ gf_changelog_mt_libgfchangelog_event_t);
+ if (!event)
+ goto handle_xdr_error;
+ INIT_LIST_HEAD(&event->list);
+
+ payloadlen = 0;
+ event->seq = rpc_req.seq;
+ event->count = payloadcnt;
+
+ /* deep copy IO vectors */
+ vec = &event->iov[0];
+ GF_EVENT_ASSIGN_IOVEC(vec, event, (req->msg[0].iov_len - len), payloadlen);
+ (void)memcpy(vec->iov_base, req->msg[0].iov_base + len, vec->iov_len);
+
+ for (i = 1; i < req->count; i++) {
+ vec = &event->iov[i];
+ GF_EVENT_ASSIGN_IOVEC(vec, event, req->msg[i].iov_len, payloadlen);
+ (void)memcpy(event->iov[i].iov_base, req->msg[i].iov_base,
+ req->msg[i].iov_len);
+ }
+
+ gf_msg_debug(this->name, 0,
+ "seq: %" PRIu64 " [%s] (time: %" PRIu64 ".%" PRIu64
+ "), "
+ "(vec: %d, len: %zd)",
+ rpc_req.seq, entry->brick, rpc_req.tv_sec, rpc_req.tv_usec,
+ payloadcnt, payloadlen);
+
+ /* dispatch event */
+ entry->queueevent(ev, event);
+
+ /* ack sequence number */
+ rpc_rsp.op_ret = 0;
+ rpc_rsp.seq = rpc_req.seq;
+
+ goto submit_rpc;
+
+handle_xdr_error:
+ rpc_rsp.op_ret = -1;
+ rpc_rsp.seq = 0; /* invalid */
+submit_rpc:
+ return changelog_rpc_sumbit_reply(req, &rpc_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_changelog_event_rsp);
+}
+
+int
+gf_changelog_reborp_handle_event(rpcsvc_request_t *req)
+{
+ xlator_t *this = NULL;
+ rpcsvc_t *svc = NULL;
+ gf_changelog_t *entry = NULL;
+
+ svc = rpcsvc_request_service(req);
+ entry = svc->mydata;
+
+ this = THIS = entry->this;
+
+ return gf_changelog_event_handler(req, this, entry);
+}
+
+static rpcsvc_actor_t gf_changelog_reborp_actors[CHANGELOG_REV_PROC_MAX] = {
+ [CHANGELOG_REV_PROC_EVENT] = {"CHANGELOG EVENT HANDLER",
+ gf_changelog_reborp_handle_event, NULL,
+ CHANGELOG_REV_PROC_EVENT, DRC_NA, 0},
+};
+
+/**
+ * Do not use synctask as the RPC layer dereferences ->mydata as THIS.
+ * In gf_changelog_setup_rpc(), @cbkdata is of type @gf_changelog_t,
+ * and that's required to invoke the callback with the appropriate
+ * brick path and it's private data.
+ */
+static struct rpcsvc_program gf_changelog_reborp_prog = {
+ .progname = "LIBGFCHANGELOG REBORP",
+ .prognum = CHANGELOG_REV_RPC_PROCNUM,
+ .progver = CHANGELOG_REV_RPC_PROCVER,
+ .numactors = CHANGELOG_REV_PROC_MAX,
+ .actors = gf_changelog_reborp_actors,
+ .synctask = _gf_false,
+};
+
+static struct rpcsvc_program *gf_changelog_reborp_programs[] = {
+ &gf_changelog_reborp_prog,
+ NULL,
+};
diff --git a/xlators/features/changelog/lib/src/gf-changelog-rpc.c b/xlators/features/changelog/lib/src/gf-changelog-rpc.c
new file mode 100644
index 00000000000..8ec6ffbcebc
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-rpc.c
@@ -0,0 +1,98 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "gf-changelog-rpc.h"
+#include "changelog-misc.h"
+#include "changelog-mem-types.h"
+
+struct rpc_clnt_program gf_changelog_clnt;
+
+/* TODO: piggyback reconnect to called (upcall) */
+int
+gf_changelog_rpc_notify(struct rpc_clnt *rpc, void *mydata,
+ rpc_clnt_event_t event, void *data)
+{
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ break;
+ case RPC_CLNT_DISCONNECT:
+ case RPC_CLNT_MSG:
+ case RPC_CLNT_DESTROY:
+ case RPC_CLNT_PING:
+ break;
+ }
+
+ return 0;
+}
+
+struct rpc_clnt *
+gf_changelog_rpc_init(xlator_t *this, gf_changelog_t *entry)
+{
+ char sockfile[UNIX_PATH_MAX] = {
+ 0,
+ };
+
+ CHANGELOG_MAKE_SOCKET_PATH(entry->brick, sockfile, UNIX_PATH_MAX);
+ return changelog_rpc_client_init(this, entry, sockfile,
+ gf_changelog_rpc_notify);
+}
+
+/**
+ * remote procedure calls declarations.
+ */
+
+int
+gf_probe_changelog_cbk(struct rpc_req *req, struct iovec *iovec, int count,
+ void *myframe)
+{
+ return 0;
+}
+
+int
+gf_probe_changelog_filter(call_frame_t *frame, xlator_t *this, void *data)
+{
+ char *sock = NULL;
+ gf_changelog_t *entry = NULL;
+ changelog_probe_req req = {
+ 0,
+ };
+
+ entry = data;
+ sock = RPC_SOCK(entry);
+
+ (void)memcpy(&req.sock, sock, strlen(sock));
+ req.filter = entry->notify;
+
+ /* invoke RPC */
+ return changelog_rpc_sumbit_req(
+ RPC_PROBER(entry), (void *)&req, frame, &gf_changelog_clnt,
+ CHANGELOG_RPC_PROBE_FILTER, NULL, 0, NULL, this, gf_probe_changelog_cbk,
+ (xdrproc_t)xdr_changelog_probe_req);
+}
+
+int
+gf_changelog_invoke_rpc(xlator_t *this, gf_changelog_t *entry, int procidx)
+{
+ return changelog_invoke_rpc(this, RPC_PROBER(entry), &gf_changelog_clnt,
+ procidx, entry);
+}
+
+struct rpc_clnt_procedure gf_changelog_procs[CHANGELOG_RPC_PROC_MAX] = {
+ [CHANGELOG_RPC_PROC_NULL] = {"NULL", NULL},
+ [CHANGELOG_RPC_PROBE_FILTER] = {"PROBE FILTER", gf_probe_changelog_filter},
+};
+
+struct rpc_clnt_program gf_changelog_clnt = {
+ .progname = "LIBGFCHANGELOG",
+ .prognum = CHANGELOG_RPC_PROGNUM,
+ .progver = CHANGELOG_RPC_PROGVER,
+ .numproc = CHANGELOG_RPC_PROC_MAX,
+ .proctable = gf_changelog_procs,
+};
diff --git a/xlators/features/changelog/lib/src/gf-changelog-rpc.h b/xlators/features/changelog/lib/src/gf-changelog-rpc.h
new file mode 100644
index 00000000000..5c82d6f1c08
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog-rpc.h
@@ -0,0 +1,28 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __GF_CHANGELOG_RPC_H
+#define __GF_CHANGELOG_RPC_H
+
+#include <glusterfs/xlator.h>
+
+#include "gf-changelog-helpers.h"
+#include "changelog-rpc-common.h"
+
+struct rpc_clnt *
+gf_changelog_rpc_init(xlator_t *, gf_changelog_t *);
+
+int
+gf_changelog_invoke_rpc(xlator_t *, gf_changelog_t *, int);
+
+rpcsvc_t *
+gf_changelog_reborp_init_rpc_listner(xlator_t *, char *, char *, void *);
+
+#endif
diff --git a/xlators/features/changelog/lib/src/gf-changelog.c b/xlators/features/changelog/lib/src/gf-changelog.c
new file mode 100644
index 00000000000..57c3d39ef76
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-changelog.c
@@ -0,0 +1,652 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <errno.h>
+#include <dirent.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <string.h>
+
+#include <glusterfs/globals.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/syncop.h>
+
+#include "gf-changelog-rpc.h"
+#include "gf-changelog-helpers.h"
+
+/* from the changelog translator */
+#include "changelog-misc.h"
+#include "changelog-mem-types.h"
+#include "changelog-lib-messages.h"
+
+/**
+ * Global singleton xlator pointer for the library, initialized
+ * during library load. This should probably be hidden inside
+ * an initialized object which is an handle for the consumer.
+ *
+ * TODO: do away with the global..
+ */
+xlator_t *master = NULL;
+
+static inline gf_private_t *
+gf_changelog_alloc_priv()
+{
+ int ret = 0;
+ gf_private_t *priv = NULL;
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_changelog_mt_priv_t);
+ if (!priv)
+ goto error_return;
+ INIT_LIST_HEAD(&priv->connections);
+ INIT_LIST_HEAD(&priv->cleanups);
+
+ ret = pthread_mutex_init(&priv->lock, NULL);
+ if (ret != 0)
+ goto free_priv;
+ ret = pthread_cond_init(&priv->cond, NULL);
+ if (ret != 0)
+ goto cleanup_mutex;
+
+ priv->api = NULL;
+ return priv;
+
+cleanup_mutex:
+ (void)pthread_mutex_destroy(&priv->lock);
+free_priv:
+ GF_FREE(priv);
+error_return:
+ return NULL;
+}
+
+#define GF_CHANGELOG_EVENT_POOL_SIZE 16384
+#define GF_CHANGELOG_EVENT_THREAD_COUNT 4
+
+static int
+gf_changelog_ctx_defaults_init(glusterfs_ctx_t *ctx)
+{
+ cmd_args_t *cmd_args = NULL;
+ struct rlimit lim = {
+ 0,
+ };
+ call_pool_t *pool = NULL;
+ int ret = -1;
+
+ ret = xlator_mem_acct_init(THIS, gf_changelog_mt_end);
+ if (ret != 0)
+ return -1;
+
+ ctx->process_uuid = generate_glusterfs_ctx_id();
+ if (!ctx->process_uuid)
+ return -1;
+
+ ctx->page_size = 128 * GF_UNIT_KB;
+
+ ctx->iobuf_pool = iobuf_pool_new();
+ if (!ctx->iobuf_pool)
+ goto free_pool;
+
+ ctx->event_pool = gf_event_pool_new(GF_CHANGELOG_EVENT_POOL_SIZE,
+ GF_CHANGELOG_EVENT_THREAD_COUNT);
+ if (!ctx->event_pool)
+ goto free_pool;
+
+ pool = GF_CALLOC(1, sizeof(call_pool_t),
+ gf_changelog_mt_libgfchangelog_call_pool_t);
+ if (!pool)
+ goto free_pool;
+
+ /* frame_mem_pool size 112 * 64 */
+ pool->frame_mem_pool = mem_pool_new(call_frame_t, 32);
+ if (!pool->frame_mem_pool)
+ goto free_pool;
+
+ /* stack_mem_pool size 256 * 128 */
+ pool->stack_mem_pool = mem_pool_new(call_stack_t, 16);
+
+ if (!pool->stack_mem_pool)
+ goto free_pool;
+
+ ctx->stub_mem_pool = mem_pool_new(call_stub_t, 16);
+ if (!ctx->stub_mem_pool)
+ goto free_pool;
+
+ ctx->dict_pool = mem_pool_new(dict_t, 32);
+ if (!ctx->dict_pool)
+ goto free_pool;
+
+ ctx->dict_pair_pool = mem_pool_new(data_pair_t, 512);
+ if (!ctx->dict_pair_pool)
+ goto free_pool;
+
+ ctx->dict_data_pool = mem_pool_new(data_t, 512);
+ if (!ctx->dict_data_pool)
+ goto free_pool;
+
+ ctx->logbuf_pool = mem_pool_new(log_buf_t, 256);
+ if (!ctx->logbuf_pool)
+ goto free_pool;
+
+ INIT_LIST_HEAD(&pool->all_frames);
+ LOCK_INIT(&pool->lock);
+ ctx->pool = pool;
+
+ LOCK_INIT(&ctx->lock);
+
+ cmd_args = &ctx->cmd_args;
+
+ INIT_LIST_HEAD(&cmd_args->xlator_options);
+
+ lim.rlim_cur = RLIM_INFINITY;
+ lim.rlim_max = RLIM_INFINITY;
+ setrlimit(RLIMIT_CORE, &lim);
+
+ return 0;
+
+free_pool:
+ if (pool) {
+ GF_FREE(pool->frame_mem_pool);
+
+ GF_FREE(pool->stack_mem_pool);
+
+ GF_FREE(pool);
+ }
+
+ GF_FREE(ctx->stub_mem_pool);
+
+ GF_FREE(ctx->dict_pool);
+
+ GF_FREE(ctx->dict_pair_pool);
+
+ GF_FREE(ctx->dict_data_pool);
+
+ GF_FREE(ctx->logbuf_pool);
+
+ GF_FREE(ctx->iobuf_pool);
+
+ GF_FREE(ctx->event_pool);
+
+ return -1;
+}
+
+/* TODO: cleanup ctx defaults */
+void
+gf_changelog_cleanup_this(xlator_t *this)
+{
+ glusterfs_ctx_t *ctx = NULL;
+
+ if (!this)
+ return;
+
+ ctx = this->ctx;
+ syncenv_destroy(ctx->env);
+ free(ctx);
+
+ this->private = NULL;
+ this->ctx = NULL;
+
+ mem_pools_fini();
+}
+
+static int
+gf_changelog_init_context()
+{
+ glusterfs_ctx_t *ctx = NULL;
+
+ ctx = glusterfs_ctx_new();
+ if (!ctx)
+ goto error_return;
+
+ if (glusterfs_globals_init(ctx))
+ goto free_ctx;
+
+ THIS->ctx = ctx;
+ if (gf_changelog_ctx_defaults_init(ctx))
+ goto free_ctx;
+
+ ctx->env = syncenv_new(0, 0, 0);
+ if (!ctx->env)
+ goto free_ctx;
+ return 0;
+
+free_ctx:
+ free(ctx);
+ THIS->ctx = NULL;
+error_return:
+ return -1;
+}
+
+static int
+gf_changelog_init_master()
+{
+ int ret = 0;
+
+ ret = gf_changelog_init_context();
+ mem_pools_init();
+
+ return ret;
+}
+
+/* TODO: cleanup clnt/svc on failure */
+int
+gf_changelog_setup_rpc(xlator_t *this, gf_changelog_t *entry, int proc)
+{
+ int ret = 0;
+ rpcsvc_t *svc = NULL;
+ struct rpc_clnt *rpc = NULL;
+
+ /**
+ * Initialize a connect back socket. A probe() RPC call to the server
+ * triggers a reverse connect.
+ */
+ svc = gf_changelog_reborp_init_rpc_listner(this, entry->brick,
+ RPC_SOCK(entry), entry);
+ if (!svc)
+ goto error_return;
+ RPC_REBORP(entry) = svc;
+
+ /* Initialize an RPC client */
+ rpc = gf_changelog_rpc_init(this, entry);
+ if (!rpc)
+ goto error_return;
+ RPC_PROBER(entry) = rpc;
+
+ /**
+ * @FIXME
+ * till we have connection state machine, let's delay the RPC call
+ * for now..
+ */
+ sleep(2);
+
+ /**
+ * Probe changelog translator for reverse connection. After a successful
+ * call, there's less use of the client and can be disconnected, but
+ * let's leave the connection active for any future RPC calls.
+ */
+ ret = gf_changelog_invoke_rpc(this, entry, proc);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_INVOKE_RPC_FAILED,
+ "Could not initiate probe RPC, bailing out!!!");
+ goto error_return;
+ }
+
+ return 0;
+
+error_return:
+ return -1;
+}
+
+int
+gf_cleanup_event(xlator_t *this, struct gf_event_list *ev)
+{
+ int ret = 0;
+
+ ret = gf_thread_cleanup(this, ev->invoker);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, -ret,
+ CHANGELOG_LIB_MSG_CLEANUP_ERROR,
+ "cannot cleanup callback invoker thread."
+ " Not freeing resources");
+ return -1;
+ }
+
+ ev->entry = NULL;
+
+ return 0;
+}
+
+static int
+gf_init_event(gf_changelog_t *entry)
+{
+ int ret = 0;
+ struct gf_event_list *ev = NULL;
+
+ ev = &entry->event;
+ ev->entry = entry;
+
+ ret = pthread_mutex_init(&ev->lock, NULL);
+ if (ret != 0)
+ goto error_return;
+ ret = pthread_cond_init(&ev->cond, NULL);
+ if (ret != 0)
+ goto cleanup_mutex;
+ INIT_LIST_HEAD(&ev->events);
+
+ ev->next_seq = 0; /* bootstrap sequencing */
+
+ if (GF_NEED_ORDERED_EVENTS(entry)) {
+ entry->pickevent = pick_event_ordered;
+ entry->queueevent = queue_ordered_event;
+ } else {
+ entry->pickevent = pick_event_unordered;
+ entry->queueevent = queue_unordered_event;
+ }
+
+ ret = gf_thread_create(&ev->invoker, NULL, gf_changelog_callback_invoker,
+ ev, "clogcbki");
+ if (ret != 0) {
+ entry->pickevent = NULL;
+ entry->queueevent = NULL;
+ goto cleanup_cond;
+ }
+
+ return 0;
+
+cleanup_cond:
+ (void)pthread_cond_destroy(&ev->cond);
+cleanup_mutex:
+ (void)pthread_mutex_destroy(&ev->lock);
+error_return:
+ return -1;
+}
+
+/**
+ * TODO:
+ * - cleanup invoker thread
+ * - cleanup event list
+ * - destroy rpc{-clnt, svc}
+ */
+int
+gf_cleanup_brick_connection(xlator_t *this, gf_changelog_t *entry)
+{
+ return 0;
+}
+
+int
+gf_cleanup_connections(xlator_t *this)
+{
+ return 0;
+}
+
+static int
+gf_setup_brick_connection(xlator_t *this, struct gf_brick_spec *brick,
+ gf_boolean_t ordered, void *xl)
+{
+ int ret = 0;
+ gf_private_t *priv = NULL;
+ gf_changelog_t *entry = NULL;
+
+ priv = this->private;
+
+ if (!brick->callback || !brick->init || !brick->fini)
+ goto error_return;
+
+ entry = GF_CALLOC(1, sizeof(*entry), gf_changelog_mt_libgfchangelog_t);
+ if (!entry)
+ goto error_return;
+ INIT_LIST_HEAD(&entry->list);
+
+ LOCK_INIT(&entry->statelock);
+ entry->connstate = GF_CHANGELOG_CONN_STATE_PENDING;
+
+ entry->notify = brick->filter;
+ if (snprintf(entry->brick, PATH_MAX, "%s", brick->brick_path) >= PATH_MAX)
+ goto free_entry;
+
+ entry->this = this;
+ entry->invokerxl = xl;
+
+ entry->ordered = ordered;
+ ret = gf_init_event(entry);
+ if (ret)
+ goto free_entry;
+
+ entry->fini = brick->fini;
+ entry->callback = brick->callback;
+ entry->connected = brick->connected;
+ entry->disconnected = brick->disconnected;
+
+ entry->ptr = brick->init(this, brick);
+ if (!entry->ptr)
+ goto cleanup_event;
+ priv->api = entry->ptr; /* pointer to API, if required */
+
+ pthread_mutex_lock(&priv->lock);
+ {
+ list_add_tail(&entry->list, &priv->connections);
+ }
+ pthread_mutex_unlock(&priv->lock);
+
+ ret = gf_changelog_setup_rpc(this, entry, CHANGELOG_RPC_PROBE_FILTER);
+ if (ret)
+ goto cleanup_event;
+ return 0;
+
+cleanup_event:
+ (void)gf_cleanup_event(this, &entry->event);
+free_entry:
+ gf_msg_debug(this->name, 0, "freeing entry %p", entry);
+ list_del(&entry->list); /* FIXME: kludge for now */
+ GF_FREE(entry);
+error_return:
+ return -1;
+}
+
+int
+gf_changelog_register_brick(xlator_t *this, struct gf_brick_spec *brick,
+ gf_boolean_t ordered, void *xl)
+{
+ return gf_setup_brick_connection(this, brick, ordered, xl);
+}
+
+static int
+gf_changelog_setup_logging(xlator_t *this, char *logfile, int loglevel)
+{
+ /* passing ident as NULL means to use default ident for syslog */
+ if (gf_log_init(this->ctx, logfile, NULL))
+ return -1;
+
+ gf_log_set_loglevel(this->ctx, (loglevel == -1) ? GF_LOG_INFO : loglevel);
+ return 0;
+}
+
+static int
+gf_changelog_set_master(xlator_t *master, void *xl)
+{
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+ xlator_t *old_this = NULL;
+ gf_private_t *priv = NULL;
+
+ this = xl;
+ if (!this || !this->ctx) {
+ ret = gf_changelog_init_master();
+ if (ret)
+ return -1;
+ this = THIS;
+ }
+
+ master->ctx = this->ctx;
+
+ INIT_LIST_HEAD(&master->volume_options);
+ SAVE_THIS(THIS);
+
+ ret = xlator_mem_acct_init(THIS, gf_changelog_mt_end);
+ if (ret != 0)
+ goto restore_this;
+
+ priv = gf_changelog_alloc_priv();
+ if (!priv) {
+ ret = -1;
+ goto restore_this;
+ }
+
+ if (!xl) {
+ /* poller thread */
+ ret = gf_thread_create(&priv->poller, NULL, changelog_rpc_poller, THIS,
+ "clogpoll");
+ if (ret != 0) {
+ GF_FREE(priv);
+ gf_msg(master->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_THREAD_CREATION_FAILED,
+ "failed to spawn poller thread");
+ goto restore_this;
+ }
+ }
+
+ master->private = priv;
+
+restore_this:
+ RESTORE_THIS();
+
+ return ret;
+}
+
+int
+gf_changelog_init(void *xl)
+{
+ int ret = 0;
+ gf_private_t *priv = NULL;
+
+ if (master)
+ return 0;
+
+ master = calloc(1, sizeof(*master));
+ if (!master)
+ goto error_return;
+
+ master->name = strdup("gfchangelog");
+ if (!master->name)
+ goto dealloc_master;
+
+ ret = gf_changelog_set_master(master, xl);
+ if (ret)
+ goto dealloc_name;
+
+ priv = master->private;
+ ret = gf_thread_create(&priv->connectionjanitor, NULL,
+ gf_changelog_connection_janitor, master, "clogjan");
+ if (ret != 0) {
+ /* TODO: cleanup priv, mutex (poller thread for !xl) */
+ goto dealloc_name;
+ }
+
+ return 0;
+
+dealloc_name:
+ free(master->name);
+dealloc_master:
+ free(master);
+ master = NULL;
+error_return:
+ return -1;
+}
+
+int
+gf_changelog_register_generic(struct gf_brick_spec *bricks, int count,
+ int ordered, char *logfile, int lvl, void *xl)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ xlator_t *old_this = NULL;
+ struct gf_brick_spec *brick = NULL;
+ gf_boolean_t need_order = _gf_false;
+
+ SAVE_THIS(xl);
+
+ this = THIS;
+ if (!this)
+ goto error_return;
+
+ ret = gf_changelog_setup_logging(this, logfile, lvl);
+ if (ret)
+ goto error_return;
+
+ need_order = (ordered) ? _gf_true : _gf_false;
+
+ brick = bricks;
+ while (count--) {
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_LIB_MSG_NOTIFY_REGISTER_INFO, "brick=%s",
+ brick->brick_path, "notify_filter=%d", brick->filter, NULL);
+
+ ret = gf_changelog_register_brick(this, brick, need_order, xl);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_NOTIFY_REGISTER_FAILED,
+ "Error registering with changelog xlator");
+ break;
+ }
+
+ brick++;
+ }
+
+ if (ret != 0)
+ goto cleanup_inited_bricks;
+
+ RESTORE_THIS();
+ return 0;
+
+cleanup_inited_bricks:
+ gf_cleanup_connections(this);
+error_return:
+ RESTORE_THIS();
+ return -1;
+}
+
+/**
+ * @API
+ * gf_changelog_register()
+ *
+ * This is _NOT_ a generic register API. It's a special API to handle
+ * updates at a journal granulality. This is used by consumers wanting
+ * to process persistent journal such as geo-replication via a set of
+ * APIs. All of this is required to maintain backward compatibility.
+ * Owner specific private data is stored in ->api (in gf_private_t),
+ * which is used by APIs to access it's private data. This limits
+ * the library access to a single brick, but that's how it used to
+ * be anyway. Furthermore, this API solely _owns_ "this", therefore
+ * callers already having a notion of "this" are expected to use the
+ * newer API.
+ *
+ * Newer applications wanting to use this library need not face this
+ * limitation and reply of the much more feature rich generic register
+ * API, which is purely callback based.
+ *
+ * NOTE: @max_reconnects is not used but required for backward compat.
+ *
+ * For generic API, refer gf_changelog_register_generic().
+ */
+int
+gf_changelog_register(char *brick_path, char *scratch_dir, char *log_file,
+ int log_level, int max_reconnects)
+{
+ struct gf_brick_spec brick = {
+ 0,
+ };
+
+ if (master)
+ THIS = master;
+ else
+ return -1;
+
+ brick.brick_path = brick_path;
+ brick.filter = CHANGELOG_OP_TYPE_JOURNAL;
+
+ brick.init = gf_changelog_journal_init;
+ brick.fini = gf_changelog_journal_fini;
+ brick.callback = gf_changelog_handle_journal;
+ brick.connected = gf_changelog_journal_connect;
+ brick.disconnected = gf_changelog_journal_disconnect;
+
+ brick.ptr = scratch_dir;
+
+ return gf_changelog_register_generic(&brick, 1, 1, log_file, log_level,
+ NULL);
+}
diff --git a/xlators/features/changelog/lib/src/gf-history-changelog.c b/xlators/features/changelog/lib/src/gf-history-changelog.c
new file mode 100644
index 00000000000..a16219f3664
--- /dev/null
+++ b/xlators/features/changelog/lib/src/gf-history-changelog.c
@@ -0,0 +1,1020 @@
+#include <errno.h>
+#include <dirent.h>
+#include <stddef.h>
+#include <sys/types.h>
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <string.h>
+
+#include <glusterfs/globals.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/syscall.h>
+
+#include "gf-changelog-helpers.h"
+#include "gf-changelog-journal.h"
+
+/* from the changelog translator */
+#include "changelog-misc.h"
+#include "changelog-lib-messages.h"
+#include "changelog-mem-types.h"
+
+/**
+ * @API
+ * gf_history_changelog_done:
+ * Move processed history changelog file from .processing
+ * to .processed
+ *
+ * ARGUMENTS:
+ * file(IN): path to processed history changelog file in
+ * .processing directory.
+ *
+ * RETURN VALUE:
+ * 0: On success.
+ * -1: On error.
+ */
+int
+gf_history_changelog_done(char *file)
+{
+ int ret = -1;
+ char *buffer = NULL;
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_journal_t *hist_jnl = NULL;
+ char to_path[PATH_MAX] = {
+ 0,
+ };
+
+ errno = EINVAL;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+
+ hist_jnl = jnl->hist_jnl;
+ if (!hist_jnl)
+ goto out;
+
+ if (!file || !strlen(file))
+ goto out;
+
+ /* make sure 'file' is inside ->jnl_working_dir */
+ buffer = realpath(file, NULL);
+ if (!buffer)
+ goto out;
+
+ if (strncmp(hist_jnl->jnl_working_dir, buffer,
+ strlen(hist_jnl->jnl_working_dir)))
+ goto out;
+
+ (void)snprintf(to_path, PATH_MAX, "%s%s", hist_jnl->jnl_processed_dir,
+ basename(buffer));
+ gf_msg_debug(this->name, 0, "moving %s to processed directory", file);
+ ret = sys_rename(buffer, to_path);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_RENAME_FAILED, "from=%s", file, "to=%s",
+ to_path, NULL);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (buffer)
+ free(buffer); /* allocated by realpath() */
+ return ret;
+}
+
+/**
+ * @API
+ * gf_history_changelog_start_fresh:
+ * For a set of changelogs, start from the beginning.
+ * It will truncates the history tracker fd.
+ *
+ * RETURN VALUES:
+ * 0: On success.
+ * -1: On error.
+ */
+int
+gf_history_changelog_start_fresh()
+{
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_journal_t *hist_jnl = NULL;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ errno = EINVAL;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+
+ hist_jnl = jnl->hist_jnl;
+ if (!hist_jnl)
+ goto out;
+
+ if (gf_ftruncate(hist_jnl->jnl_fd, 0))
+ goto out;
+
+ return 0;
+
+out:
+ return -1;
+}
+
+/**
+ * @API
+ * gf_history_changelog_next_change:
+ * Return the next history changelog file entry. Zero means all
+ * history chanelogs are consumed.
+ *
+ * ARGUMENTS:
+ * bufptr(OUT): Path to unprocessed history changelog file
+ * from tracker file.
+ * maxlen(IN): Usually PATH_MAX.
+ *
+ * RETURN VALUES:
+ * size: On success.
+ * -1 : On error.
+ */
+ssize_t
+gf_history_changelog_next_change(char *bufptr, size_t maxlen)
+{
+ ssize_t size = -1;
+ int tracker_fd = 0;
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_journal_t *hist_jnl = NULL;
+ char buffer[PATH_MAX] = {
+ 0,
+ };
+
+ if (maxlen > PATH_MAX) {
+ errno = ENAMETOOLONG;
+ goto out;
+ }
+
+ errno = EINVAL;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+
+ hist_jnl = jnl->hist_jnl;
+ if (!hist_jnl)
+ goto out;
+
+ tracker_fd = hist_jnl->jnl_fd;
+
+ size = gf_readline(tracker_fd, buffer, maxlen);
+ if (size < 0) {
+ size = -1;
+ goto out;
+ }
+
+ if (size == 0)
+ goto out;
+
+ memcpy(bufptr, buffer, size - 1);
+ bufptr[size - 1] = '\0';
+
+out:
+ return size;
+}
+
+/**
+ * @API
+ * gf_history_changelog_scan:
+ * Scan and generate a list of change entries.
+ * Calling this api multiple times (without calling gf_changlog_done())
+ * would result new changelogs(s) being refreshed in the tracker file.
+ * This call also acts as a cancellation point for the consumer.
+ *
+ * RETURN VALUES:
+ * +ve integer : success and keep scanning.(count of changelogs)
+ * 0 : success and done scanning.
+ * -1 : error.
+ *
+ * NOTE: After first 0 return call_get_next change for once more time
+ * to empty the tracker
+ *
+ */
+ssize_t
+gf_history_changelog_scan()
+{
+ int tracker_fd = 0;
+ size_t off = 0;
+ xlator_t *this = NULL;
+ size_t nr_entries = 0;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_journal_t *hist_jnl = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ char buffer[PATH_MAX] = {
+ 0,
+ };
+ static int is_last_scan;
+
+ this = THIS;
+ if (!this)
+ goto out;
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl)
+ goto out;
+ if (JNL_IS_API_DISCONNECTED(jnl)) {
+ errno = ENOTCONN;
+ goto out;
+ }
+
+ hist_jnl = jnl->hist_jnl;
+ if (!hist_jnl)
+ goto out;
+
+retry:
+ if (is_last_scan == 1)
+ return 0;
+ if (hist_jnl->hist_done == 0)
+ is_last_scan = 1;
+
+ errno = EINVAL;
+ if (hist_jnl->hist_done == -1)
+ goto out;
+
+ tracker_fd = hist_jnl->jnl_fd;
+
+ if (gf_ftruncate(tracker_fd, 0))
+ goto out;
+
+ rewinddir(hist_jnl->jnl_dir);
+
+ for (;;) {
+ errno = 0;
+ entry = sys_readdir(hist_jnl->jnl_dir, scratch);
+ if (!entry || errno != 0)
+ break;
+
+ if (strcmp(basename(entry->d_name), ".") == 0 ||
+ strcmp(basename(entry->d_name), "..") == 0)
+ continue;
+
+ nr_entries++;
+
+ GF_CHANGELOG_FILL_BUFFER(hist_jnl->jnl_processing_dir, buffer, off,
+ strlen(hist_jnl->jnl_processing_dir));
+ GF_CHANGELOG_FILL_BUFFER(entry->d_name, buffer, off,
+ strlen(entry->d_name));
+ GF_CHANGELOG_FILL_BUFFER("\n", buffer, off, 1);
+
+ if (gf_changelog_write(tracker_fd, buffer, off) != off) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_WRITE_FAILED,
+ "error writing changelog filename"
+ " to tracker file");
+ break;
+ }
+ off = 0;
+ }
+
+ gf_msg_debug(this->name, 0, "hist_done %d, is_last_scan: %d",
+ hist_jnl->hist_done, is_last_scan);
+
+ if (!entry) {
+ if (gf_lseek(tracker_fd, 0, SEEK_SET) != -1) {
+ if (nr_entries > 0)
+ return nr_entries;
+ else {
+ sleep(1);
+ goto retry;
+ }
+ }
+ }
+out:
+ return -1;
+}
+
+/*
+ * Gets timestamp value at the changelog path at index.
+ * Returns 0 on success(updates given time-stamp), -1 on failure.
+ */
+int
+gf_history_get_timestamp(int fd, int index, int len, unsigned long *ts)
+{
+ xlator_t *this = NULL;
+ int n_read = -1;
+ char path_buf[PATH_MAX] = {
+ 0,
+ };
+ char *iter = path_buf;
+ size_t offset = index * (len + 1);
+ unsigned long value = 0;
+ int ret = 0;
+
+ this = THIS;
+ if (!this) {
+ return -1;
+ }
+
+ n_read = sys_pread(fd, path_buf, len, offset);
+ if (n_read < 0) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_READ_ERROR,
+ "could not read from htime file");
+ goto out;
+ }
+ iter += len - TIMESTAMP_LENGTH;
+ sscanf(iter, "%lu", &value);
+out:
+ if (ret == 0)
+ *ts = value;
+ return ret;
+}
+
+/*
+ * Function to ensure correctness of search
+ * Checks whether @value is there next to @target_index or not
+ */
+int
+gf_history_check(int fd, int target_index, unsigned long value, int len)
+{
+ int ret = 0;
+ unsigned long ts1 = 0;
+ unsigned long ts2 = 0;
+
+ if (target_index == 0) {
+ ret = gf_history_get_timestamp(fd, target_index, len, &ts1);
+ if (ret == -1)
+ goto out;
+ if (value <= ts1)
+ goto out;
+ else {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = gf_history_get_timestamp(fd, target_index, len, &ts1);
+ if (ret == -1)
+ goto out;
+ ret = gf_history_get_timestamp(fd, target_index - 1, len, &ts2);
+ if (ret == -1)
+ goto out;
+
+ if ((value <= ts1) && (value > ts2)) {
+ goto out;
+ } else
+ ret = -1;
+out:
+ return ret;
+}
+
+/*
+ * This is a "binary search" based search function which checks neighbours
+ * for in-range availability of the value to be searched and provides the
+ * index at which the changelog file nearest to the requested timestamp(value)
+ * can be read from.
+ *
+ * Actual offset can be calculated as (index* (len+1) ).
+ * "1" is because the changelog paths are null terminated.
+ *
+ * @path : Htime file to search in
+ * @value : time stamp to search
+ * @from : start index to search
+ * @to : end index to search
+ * @len : length of fixes length strings separated by null
+ */
+
+int
+gf_history_b_search(int fd, unsigned long value, unsigned long from,
+ unsigned long to, int len)
+{
+ int m_index = -1;
+ unsigned long cur_value = 0;
+ unsigned long ts1 = 0;
+ int ret = 0;
+
+ m_index = (from + to) / 2;
+
+ if ((to - from) <= 1) {
+ /* either one or 2 changelogs left */
+ if (to != from) {
+ /* check if value is less or greater than to
+ * return accordingly
+ */
+ ret = gf_history_get_timestamp(fd, from, len, &ts1);
+ if (ret == -1)
+ goto out;
+ if (ts1 >= value) {
+ /* actually compatision should be
+ * exactly == but considering
+ *
+ * case of only 2 changelogs in htime file
+ */
+ return from;
+ } else
+ return to;
+ } else
+ return to;
+ }
+
+ ret = gf_history_get_timestamp(fd, m_index, len, &cur_value);
+ if (ret == -1)
+ goto out;
+ if (cur_value == value) {
+ return m_index;
+ } else if (value > cur_value) {
+ ret = gf_history_get_timestamp(fd, m_index + 1, len, &cur_value);
+ if (ret == -1)
+ goto out;
+ if (value < cur_value)
+ return m_index + 1;
+ else
+ return gf_history_b_search(fd, value, m_index + 1, to, len);
+ } else {
+ if (m_index == 0) {
+ /* we are sure that values exists
+ * in this htime file
+ */
+ return 0;
+ } else {
+ ret = gf_history_get_timestamp(fd, m_index - 1, len, &cur_value);
+ if (ret == -1)
+ goto out;
+ if (value > cur_value) {
+ return m_index;
+ } else
+ return gf_history_b_search(fd, value, from, m_index - 1, len);
+ }
+ }
+out:
+ return -1;
+}
+
+/*
+ * Description: Checks if the changelog path is usable or not,
+ * which is differentiated by checking for "changelog"
+ * in the path and not "CHANGELOG".
+ *
+ * Returns:
+ * 1 : Yes, usable ( contains "CHANGELOG" )
+ * 0 : No, Not usable ( contains, "changelog")
+ */
+int
+gf_is_changelog_usable(char *cl_path)
+{
+ int ret = -1;
+ const char low_c[] = "changelog";
+ char *str_ret = NULL;
+ char *bname = NULL;
+
+ bname = basename(cl_path);
+
+ str_ret = strstr(bname, low_c);
+
+ if (str_ret != NULL)
+ ret = 0;
+ else
+ ret = 1;
+
+ return ret;
+}
+
+void *
+gf_changelog_consume_wrap(void *data)
+{
+ int ret = -1;
+ ssize_t nread = 0;
+ xlator_t *this = NULL;
+ gf_changelog_consume_data_t *ccd = NULL;
+
+ ccd = (gf_changelog_consume_data_t *)data;
+ this = ccd->this;
+
+ ccd->retval = -1;
+
+ nread = sys_pread(ccd->fd, ccd->changelog, PATH_MAX - 1, ccd->offset);
+ if (nread < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_READ_ERROR,
+ "cannot read from history metadata file");
+ goto out;
+ }
+
+ /* TODO: handle short reads and EOF. */
+ if (gf_is_changelog_usable(ccd->changelog) == 1) {
+ ret = gf_changelog_consume(ccd->this, ccd->jnl, ccd->changelog,
+ _gf_true);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_LIB_MSG_PARSE_ERROR,
+ "name=%s", ccd->changelog, NULL);
+ goto out;
+ }
+ }
+ ccd->retval = 0;
+
+out:
+ return NULL;
+}
+
+/**
+ * "gf_history_consume" is a worker function for history.
+ * parses and moves changelogs files from index "from"
+ * to index "to" in open htime file whose fd is "fd".
+ */
+
+#define MAX_PARALLELS 10
+
+void *
+gf_history_consume(void *data)
+{
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_journal_t *hist_jnl = NULL;
+ int ret = 0;
+ int iter = 0;
+ int fd = -1;
+ int from = -1;
+ int to = -1;
+ int len = -1;
+ int n_parallel = 0;
+ int n_envoked = 0;
+ gf_boolean_t publish = _gf_true;
+ pthread_t th_id[MAX_PARALLELS] = {
+ 0,
+ };
+ gf_changelog_history_data_t *hist_data = NULL;
+ gf_changelog_consume_data_t ccd[MAX_PARALLELS] = {
+ {0},
+ };
+ gf_changelog_consume_data_t *curr = NULL;
+
+ hist_data = (gf_changelog_history_data_t *)data;
+ if (hist_data == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ fd = hist_data->htime_fd;
+ from = hist_data->from;
+ to = hist_data->to;
+ len = hist_data->len;
+ n_parallel = hist_data->n_parallel;
+
+ THIS = hist_data->this;
+ this = hist_data->this;
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl) {
+ ret = -1;
+ goto out;
+ }
+
+ hist_jnl = jnl->hist_jnl;
+ if (!hist_jnl) {
+ ret = -1;
+ goto out;
+ }
+
+ while (from <= to) {
+ n_envoked = 0;
+
+ for (iter = 0; (iter < n_parallel) && (from <= to); iter++) {
+ curr = &ccd[iter];
+
+ curr->this = this;
+ curr->jnl = hist_jnl;
+ curr->fd = fd;
+ curr->offset = from * (len + 1);
+
+ curr->retval = 0;
+ memset(curr->changelog, '\0', PATH_MAX);
+
+ ret = gf_thread_create(&th_id[iter], NULL,
+ gf_changelog_consume_wrap, curr,
+ "clogc%03hx", (iter + 1) & 0x3ff);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ret,
+ CHANGELOG_LIB_MSG_THREAD_CREATION_FAILED,
+ "could not create consume-thread");
+ goto sync;
+ } else
+ n_envoked++;
+
+ from++;
+ }
+
+ sync:
+ for (iter = 0; iter < n_envoked; iter++) {
+ ret = pthread_join(th_id[iter], NULL);
+ if (ret) {
+ publish = _gf_false;
+ gf_msg(this->name, GF_LOG_ERROR, ret,
+ CHANGELOG_LIB_MSG_PTHREAD_JOIN_FAILED,
+ "pthread_join() error");
+ /* try to join the rest */
+ continue;
+ }
+
+ if (publish == _gf_false)
+ continue;
+
+ curr = &ccd[iter];
+ if (ccd->retval) {
+ publish = _gf_false;
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_PARSE_ERROR_CEASED, NULL);
+ continue;
+ }
+
+ ret = gf_changelog_publish(curr->this, curr->jnl, curr->changelog);
+ if (ret) {
+ publish = _gf_false;
+ gf_msg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_PUBLISH_ERROR,
+ "publish error, ceased publishing...");
+ }
+ }
+ }
+
+ /* informing "parsing done". */
+ hist_jnl->hist_done = (publish == _gf_true) ? 0 : -1;
+
+out:
+ if (fd != -1)
+ (void)sys_close(fd);
+ GF_FREE(hist_data);
+ return NULL;
+}
+
+/**
+ * @API
+ * gf_history_changelog() : Get/parse historical changelogs and get them ready
+ * for consumption.
+ *
+ * Arguments:
+ * @changelog_dir : Directory location from where history changelogs are
+ * supposed to be consumed.
+ * @start: Unix timestamp FROM where changelogs should be consumed.
+ * @end: Unix timestamp TO where changelogsshould be consumed.
+ * @n_parallel : degree of parallelism while changelog parsing.
+ * @actual_end : the end time till where changelogs are available.
+ *
+ * Return:
+ * Returns <timestamp> on success, the last time till where changelogs are
+ * available.
+ * Returns -1 on failure(error).
+ */
+
+/**
+ * Extract timestamp range from a historical metadata file
+ * Returns:
+ * 0 : Success ({min,max}_ts with the appropriate values)
+ * -1 : Failure
+ * -2 : Ignore this metadata file and process next
+ */
+int
+gf_changelog_extract_min_max(const char *dname, const char *htime_dir, int *fd,
+ unsigned long *total, unsigned long *min_ts,
+ unsigned long *max_ts)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ char htime_file[PATH_MAX] = {
+ 0,
+ };
+ struct stat stbuf = {
+ 0,
+ };
+ char *iter = NULL;
+ char x_value[30] = {
+ 0,
+ };
+
+ this = THIS;
+
+ snprintf(htime_file, PATH_MAX, "%s/%s", htime_dir, dname);
+
+ iter = (htime_file + strlen(htime_file) - TIMESTAMP_LENGTH);
+ sscanf(iter, "%lu", min_ts);
+
+ ret = sys_stat(htime_file, &stbuf);
+ if (ret) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_HTIME_ERROR,
+ "op=stat", "path=%s", htime_file, NULL);
+ goto out;
+ }
+
+ /* ignore everything except regular files */
+ if (!S_ISREG(stbuf.st_mode)) {
+ ret = -2;
+ goto out;
+ }
+
+ *fd = open(htime_file, O_RDONLY);
+ if (*fd < 0) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_HTIME_ERROR,
+ "op=open", "path=%s", htime_file, NULL);
+ goto out;
+ }
+
+ /* Looks good, extract max timestamp */
+ ret = sys_fgetxattr(*fd, HTIME_KEY, x_value, sizeof(x_value));
+ if (ret < 0) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_GET_XATTR_FAILED, "path=%s", htime_file,
+ NULL);
+ goto out;
+ }
+
+ sscanf(x_value, "%lu:%lu", max_ts, total);
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_LIB_MSG_MIN_MAX_INFO,
+ "min=%lu", *min_ts, "max=%lu", *max_ts, "total_changelogs=%lu",
+ *total, NULL);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/* gf_history_changelog returns actual_end and spawns threads to
+ * parse historical changelogs. The return values are as follows.
+ * 0 : On success
+ * 1 : Successful, but partial historical changelogs available,
+ * end time falls into different htime file or future time
+ * -2 : Error, requested historical changelog not available, not
+ * even partial
+ * -1 : On any error
+ */
+int
+gf_history_changelog(char *changelog_dir, unsigned long start,
+ unsigned long end, int n_parallel,
+ unsigned long *actual_end)
+{
+ int ret = 0;
+ int len = -1;
+ int fd = -1;
+ int n_read = -1;
+ unsigned long min_ts = 0;
+ unsigned long max_ts = 0;
+ unsigned long end2 = 0;
+ unsigned long ts1 = 0;
+ unsigned long ts2 = 0;
+ unsigned long to = 0;
+ unsigned long from = 0;
+ unsigned long total_changelog = 0;
+ xlator_t *this = NULL;
+ gf_changelog_journal_t *jnl = NULL;
+ gf_changelog_journal_t *hist_jnl = NULL;
+ gf_changelog_history_data_t *hist_data = NULL;
+ DIR *dirp = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ pthread_t consume_th = 0;
+ char htime_dir[PATH_MAX] = {
+ 0,
+ };
+ char buffer[PATH_MAX] = {
+ 0,
+ };
+ gf_boolean_t partial_history = _gf_false;
+
+ pthread_attr_t attr;
+
+ this = THIS;
+ if (!this) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = pthread_attr_init(&attr);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_PTHREAD_ERROR,
+ "Pthread init failed");
+ return -1;
+ }
+
+ jnl = (gf_changelog_journal_t *)GF_CHANGELOG_GET_API_PTR(this);
+ if (!jnl) {
+ ret = -1;
+ goto out;
+ }
+
+ hist_jnl = (gf_changelog_journal_t *)jnl->hist_jnl;
+ if (!hist_jnl) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_LIB_MSG_REQUESTING_INFO,
+ "start=%lu", start, "end=%lu", end, NULL);
+
+ /* basic sanity check */
+ if (start > end || n_parallel <= 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_HIST_FAILED,
+ "start=%lu", start, "end=%lu", end, "thread_count=%d",
+ n_parallel, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ /* cap parallelism count */
+ if (n_parallel > MAX_PARALLELS)
+ n_parallel = MAX_PARALLELS;
+
+ CHANGELOG_FILL_HTIME_DIR(changelog_dir, htime_dir);
+
+ dirp = sys_opendir(htime_dir);
+ if (dirp == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_LIB_MSG_HTIME_ERROR,
+ "op=opendir", "path=%s", htime_dir, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ for (;;) {
+ errno = 0;
+
+ entry = sys_readdir(dirp, scratch);
+
+ if (!entry || errno != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_HIST_FAILED, "start=%lu", start,
+ "end=%lu", end, NULL);
+ ret = -2;
+ break;
+ }
+
+ ret = gf_changelog_extract_min_max(entry->d_name, htime_dir, &fd,
+ &total_changelog, &min_ts, &max_ts);
+ if (ret) {
+ if (-2 == ret)
+ continue;
+ goto out;
+ }
+
+ if (start >= min_ts && start < max_ts) {
+ /**
+ * TODO: handle short reads later...
+ */
+ n_read = sys_read(fd, buffer, PATH_MAX);
+ if (n_read < 0) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_READ_ERROR,
+ "unable to read htime file");
+ goto out;
+ }
+
+ len = strlen(buffer);
+
+ /**
+ * search @start in the htime file returning it's index
+ * (@from)
+ */
+ from = gf_history_b_search(fd, start, 0, total_changelog - 1, len);
+
+ /* ensuring correctness of gf_b_search */
+ if (gf_history_check(fd, from, start, len) != 0) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_GET_TIME_ERROR, "for=start",
+ "start=%lu", start, "idx=%lu", from, NULL);
+ goto out;
+ }
+
+ end2 = (end <= max_ts) ? end : max_ts;
+
+ /* Check if end falls out of same HTIME file. The end
+ * falling to a different htime file or changelog
+ * disable-enable is detected only after 20 seconds.
+ * This is required because, applications generally
+ * asks historical changelogs till current time and
+ * it is possible changelog is not rolled over yet.
+ * So, buffer time of default rollover time plus 5
+ * seconds is subtracted. If the application requests
+ * the end time with in half a minute of changelog
+ * disable, it's not detected as changelog disable and
+ * it's application's responsibility to retry after
+ * 20 seconds before confirming it as partial history.
+ */
+ if ((end - 20) > max_ts) {
+ partial_history = _gf_true;
+ }
+
+ /**
+ * search @end2 in htime file returning it's index (@to)
+ */
+ to = gf_history_b_search(fd, end2, 0, total_changelog - 1, len);
+
+ if (gf_history_check(fd, to, end2, len) != 0) {
+ ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_LIB_MSG_GET_TIME_ERROR, "for=end",
+ "start=%lu", end2, "idx=%lu", to, NULL);
+ goto out;
+ }
+
+ ret = gf_history_get_timestamp(fd, from, len, &ts1);
+ if (ret == -1)
+ goto out;
+
+ ret = gf_history_get_timestamp(fd, to, len, &ts2);
+ if (ret == -1)
+ goto out;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_LIB_MSG_FINAL_INFO,
+ "from=%lu", ts1, "to=%lu", ts2, "changes=%lu",
+ (to - from + 1), NULL);
+
+ hist_data = GF_CALLOC(1, sizeof(gf_changelog_history_data_t),
+ gf_changelog_mt_history_data_t);
+
+ hist_data->htime_fd = fd;
+ hist_data->from = from;
+ hist_data->to = to;
+ hist_data->len = len;
+ hist_data->n_parallel = n_parallel;
+ hist_data->this = this;
+
+ ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, ret,
+ CHANGELOG_LIB_MSG_PTHREAD_ERROR,
+ "unable to sets the detach"
+ " state attribute");
+ ret = -1;
+ goto out;
+ }
+
+ /* spawn a thread for background parsing & publishing */
+ ret = gf_thread_create(&consume_th, &attr, gf_history_consume,
+ hist_data, "cloghcon");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, ret,
+ CHANGELOG_LIB_MSG_THREAD_CREATION_FAILED,
+ "creation of consume parent-thread"
+ " failed.");
+ ret = -1;
+ goto out;
+ }
+
+ goto out;
+
+ } else { /* end of range check */
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_LIB_MSG_HIST_FAILED, "start=%lu", start,
+ "end=%lu", end, "chlog_min=%lu", min_ts, "chlog_max=%lu",
+ max_ts, NULL);
+ }
+ } /* end of readdir() */
+
+out:
+ if (dirp != NULL)
+ (void)sys_closedir(dirp);
+
+ if (ret < 0) {
+ if (fd != -1)
+ (void)sys_close(fd);
+ GF_FREE(hist_data);
+ (void)pthread_attr_destroy(&attr);
+
+ return ret;
+ }
+
+ hist_jnl->hist_done = 1;
+ *actual_end = ts2;
+
+ if (partial_history) {
+ ret = 1;
+ }
+
+ return ret;
+}
diff --git a/xlators/features/changelog/src/Makefile.am b/xlators/features/changelog/src/Makefile.am
new file mode 100644
index 00000000000..eee7dfa238d
--- /dev/null
+++ b/xlators/features/changelog/src/Makefile.am
@@ -0,0 +1,29 @@
+xlator_LTLIBRARIES = changelog.la
+
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+noinst_HEADERS = changelog-helpers.h changelog-mem-types.h changelog-rt.h \
+ changelog-rpc-common.h changelog-misc.h changelog-encoders.h \
+ changelog-rpc-common.h changelog-rpc.h changelog-ev-handle.h \
+ changelog-messages.h
+
+changelog_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+changelog_la_SOURCES = changelog.c changelog-rt.c changelog-helpers.c \
+ changelog-encoders.c changelog-rpc.c changelog-barrier.c \
+ changelog-rpc-common.c changelog-ev-handle.c
+changelog_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la \
+ $(top_builddir)/rpc/rpc-lib/src/libgfrpc.la
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src \
+ -I$(top_srcdir)/rpc/rpc-transport/socket/src \
+ -I$(top_srcdir)/xlators/features/changelog/lib/src/ \
+ -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS) \
+ -DDATADIR=\"$(localstatedir)\"
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/changelog/src/changelog-barrier.c b/xlators/features/changelog/src/changelog-barrier.c
new file mode 100644
index 00000000000..0fb89ddb127
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-barrier.c
@@ -0,0 +1,131 @@
+/*
+ Copyright (c) 2014 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "changelog-helpers.h"
+#include "changelog-messages.h"
+#include <glusterfs/call-stub.h>
+
+/* Enqueue a stub*/
+void
+__chlog_barrier_enqueue(xlator_t *this, call_stub_t *stub)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ list_add_tail(&stub->list, &priv->queue);
+ priv->queue_size++;
+
+ return;
+}
+
+/* Dequeue a stub */
+call_stub_t *
+__chlog_barrier_dequeue(xlator_t *this, struct list_head *queue)
+{
+ call_stub_t *stub = NULL;
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (list_empty(queue))
+ goto out;
+
+ stub = list_entry(queue->next, call_stub_t, list);
+ list_del_init(&stub->list);
+
+out:
+ return stub;
+}
+
+/* Dequeue all the stubs and call corresponding resume functions */
+void
+chlog_barrier_dequeue_all(xlator_t *this, struct list_head *queue)
+{
+ call_stub_t *stub = NULL;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_DEQUEUING_BARRIER_FOPS,
+ NULL);
+
+ while ((stub = __chlog_barrier_dequeue(this, queue)))
+ call_resume(stub);
+
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_MSG_DEQUEUING_BARRIER_FOPS_FINISHED, NULL);
+ return;
+}
+
+/* Function called on changelog barrier timeout */
+void
+chlog_barrier_timeout(void *data)
+{
+ xlator_t *this = NULL;
+ changelog_priv_t *priv = NULL;
+ struct list_head queue = {
+ 0,
+ };
+
+ this = data;
+ THIS = this;
+ priv = this->private;
+
+ INIT_LIST_HEAD(&queue);
+
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_BARRIER_TIMEOUT, NULL);
+
+ LOCK(&priv->lock);
+ {
+ __chlog_barrier_disable(this, &queue);
+ }
+ UNLOCK(&priv->lock);
+
+ chlog_barrier_dequeue_all(this, &queue);
+
+ return;
+}
+
+/* Disable changelog barrier enable flag */
+void
+__chlog_barrier_disable(xlator_t *this, struct list_head *queue)
+{
+ changelog_priv_t *priv = this->private;
+ GF_ASSERT(priv);
+
+ if (priv->timer) {
+ gf_timer_call_cancel(this->ctx, priv->timer);
+ priv->timer = NULL;
+ }
+
+ list_splice_init(&priv->queue, queue);
+ priv->queue_size = 0;
+ priv->barrier_enabled = _gf_false;
+}
+
+/* Enable chagelog barrier enable with timer */
+int
+__chlog_barrier_enable(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = -1;
+
+ priv->timer = gf_timer_call_after(this->ctx, priv->timeout,
+ chlog_barrier_timeout, (void *)this);
+ if (!priv->timer) {
+ gf_smsg(this->name, GF_LOG_CRITICAL, 0,
+ CHANGELOG_MSG_TIMEOUT_ADD_FAILED, NULL);
+ goto out;
+ }
+
+ priv->barrier_enabled = _gf_true;
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/xlators/features/changelog/src/changelog-encoders.c b/xlators/features/changelog/src/changelog-encoders.c
new file mode 100644
index 00000000000..63754516c2e
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-encoders.c
@@ -0,0 +1,232 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "changelog-encoders.h"
+
+size_t
+entry_fn(void *data, char *buffer, gf_boolean_t encode)
+{
+ char *tmpbuf = NULL;
+ size_t bufsz = 0;
+ struct changelog_entry_fields *ce = NULL;
+
+ ce = (struct changelog_entry_fields *)data;
+
+ if (encode) {
+ tmpbuf = uuid_utoa(ce->cef_uuid);
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, tmpbuf, strlen(tmpbuf));
+ } else {
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, ce->cef_uuid, sizeof(uuid_t));
+ }
+
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, "/", 1);
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, ce->cef_bname, strlen(ce->cef_bname));
+ return bufsz;
+}
+
+size_t
+del_entry_fn(void *data, char *buffer, gf_boolean_t encode)
+{
+ char *tmpbuf = NULL;
+ size_t bufsz = 0;
+ struct changelog_entry_fields *ce = NULL;
+
+ ce = (struct changelog_entry_fields *)data;
+
+ if (encode) {
+ tmpbuf = uuid_utoa(ce->cef_uuid);
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, tmpbuf, strlen(tmpbuf));
+ } else {
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, ce->cef_uuid, sizeof(uuid_t));
+ }
+
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, "/", 1);
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, ce->cef_bname, strlen(ce->cef_bname));
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, "\0", 1);
+
+ if (ce->cef_path[0] == '\0') {
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, "\0", 1);
+ } else {
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, ce->cef_path,
+ strlen(ce->cef_path));
+ }
+
+ return bufsz;
+}
+
+size_t
+fop_fn(void *data, char *buffer, gf_boolean_t encode)
+{
+ char buf[10] = {
+ 0,
+ };
+ size_t bufsz = 0;
+ glusterfs_fop_t fop = 0;
+
+ fop = *(glusterfs_fop_t *)data;
+
+ if (encode) {
+ (void)snprintf(buf, sizeof(buf), "%d", fop);
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, buf, strlen(buf));
+ } else
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, &fop, sizeof(fop));
+
+ return bufsz;
+}
+
+size_t
+number_fn(void *data, char *buffer, gf_boolean_t encode)
+{
+ size_t bufsz = 0;
+ unsigned int nr = 0;
+ char buf[20] = {
+ 0,
+ };
+
+ nr = *(unsigned int *)data;
+
+ if (encode) {
+ (void)snprintf(buf, sizeof(buf), "%u", nr);
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, buf, strlen(buf));
+ } else
+ CHANGELOG_FILL_BUFFER(buffer, bufsz, &nr, sizeof(unsigned int));
+
+ return bufsz;
+}
+
+void
+entry_free_fn(void *data)
+{
+ changelog_opt_t *co = data;
+
+ if (!co)
+ return;
+
+ GF_FREE(co->co_entry.cef_bname);
+}
+
+void
+del_entry_free_fn(void *data)
+{
+ changelog_opt_t *co = data;
+
+ if (!co)
+ return;
+
+ GF_FREE(co->co_entry.cef_bname);
+ GF_FREE(co->co_entry.cef_path);
+}
+
+/**
+ * try to write all data in one shot
+ */
+
+static void
+changelog_encode_write_xtra(changelog_log_data_t *cld, char *buffer,
+ size_t *off, gf_boolean_t encode)
+{
+ int i = 0;
+ size_t offset = 0;
+ void *data = NULL;
+ changelog_opt_t *co = NULL;
+
+ offset = *off;
+
+ co = (changelog_opt_t *)cld->cld_ptr;
+
+ for (; i < cld->cld_xtra_records; i++, co++) {
+ CHANGELOG_FILL_BUFFER(buffer, offset, "\0", 1);
+
+ switch (co->co_type) {
+ case CHANGELOG_OPT_REC_FOP:
+ data = &co->co_fop;
+ break;
+ case CHANGELOG_OPT_REC_ENTRY:
+ data = &co->co_entry;
+ break;
+ case CHANGELOG_OPT_REC_UINT32:
+ data = &co->co_uint32;
+ break;
+ }
+
+ if (co->co_convert)
+ offset += co->co_convert(data, buffer + offset, encode);
+ else /* no coversion: write it out as it is */
+ CHANGELOG_FILL_BUFFER(buffer, offset, data, co->co_len);
+ }
+
+ *off = offset;
+}
+
+int
+changelog_encode_ascii(xlator_t *this, changelog_log_data_t *cld)
+{
+ size_t off = 0;
+ size_t gfid_len = 0;
+ char *gfid_str = NULL;
+ char *buffer = NULL;
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ gfid_str = uuid_utoa(cld->cld_gfid);
+ gfid_len = strlen(gfid_str);
+
+ /* extra bytes for decorations */
+ buffer = alloca(gfid_len + cld->cld_ptr_len + 10);
+ CHANGELOG_STORE_ASCII(priv, buffer, off, gfid_str, gfid_len, cld);
+
+ if (cld->cld_xtra_records)
+ changelog_encode_write_xtra(cld, buffer, &off, _gf_true);
+
+ CHANGELOG_FILL_BUFFER(buffer, off, "\0", 1);
+
+ return changelog_write_change(priv, buffer, off);
+}
+
+int
+changelog_encode_binary(xlator_t *this, changelog_log_data_t *cld)
+{
+ size_t off = 0;
+ char *buffer = NULL;
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ /* extra bytes for decorations */
+ buffer = alloca(sizeof(uuid_t) + cld->cld_ptr_len + 10);
+ CHANGELOG_STORE_BINARY(priv, buffer, off, cld->cld_gfid, cld);
+
+ if (cld->cld_xtra_records)
+ changelog_encode_write_xtra(cld, buffer, &off, _gf_false);
+
+ CHANGELOG_FILL_BUFFER(buffer, off, "\0", 1);
+
+ return changelog_write_change(priv, buffer, off);
+}
+
+static struct changelog_encoder cb_encoder[] = {
+ [CHANGELOG_ENCODE_BINARY] =
+ {
+ .encoder = CHANGELOG_ENCODE_BINARY,
+ .encode = changelog_encode_binary,
+ },
+ [CHANGELOG_ENCODE_ASCII] =
+ {
+ .encoder = CHANGELOG_ENCODE_ASCII,
+ .encode = changelog_encode_ascii,
+ },
+};
+
+void
+changelog_encode_change(changelog_priv_t *priv)
+{
+ priv->ce = &cb_encoder[priv->encode_mode];
+}
diff --git a/xlators/features/changelog/src/changelog-encoders.h b/xlators/features/changelog/src/changelog-encoders.h
new file mode 100644
index 00000000000..26252696d56
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-encoders.h
@@ -0,0 +1,50 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CHANGELOG_ENCODERS_H
+#define _CHANGELOG_ENCODERS_H
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+
+#include "changelog-helpers.h"
+
+#define CHANGELOG_STORE_ASCII(priv, buf, off, gfid, gfid_len, cld) \
+ do { \
+ CHANGELOG_FILL_BUFFER(buffer, off, priv->maps[cld->cld_type], 1); \
+ CHANGELOG_FILL_BUFFER(buffer, off, gfid, gfid_len); \
+ } while (0)
+
+#define CHANGELOG_STORE_BINARY(priv, buf, off, gfid, cld) \
+ do { \
+ CHANGELOG_FILL_BUFFER(buffer, off, priv->maps[cld->cld_type], 1); \
+ CHANGELOG_FILL_BUFFER(buffer, off, gfid, sizeof(uuid_t)); \
+ } while (0)
+
+size_t
+entry_fn(void *data, char *buffer, gf_boolean_t encode);
+size_t
+del_entry_fn(void *data, char *buffer, gf_boolean_t encode);
+size_t
+fop_fn(void *data, char *buffer, gf_boolean_t encode);
+size_t
+number_fn(void *data, char *buffer, gf_boolean_t encode);
+void
+entry_free_fn(void *data);
+void
+del_entry_free_fn(void *data);
+int
+changelog_encode_binary(xlator_t *, changelog_log_data_t *);
+int
+changelog_encode_ascii(xlator_t *, changelog_log_data_t *);
+void
+changelog_encode_change(changelog_priv_t *);
+
+#endif /* _CHANGELOG_ENCODERS_H */
diff --git a/xlators/features/changelog/src/changelog-ev-handle.c b/xlators/features/changelog/src/changelog-ev-handle.c
new file mode 100644
index 00000000000..aa94459de5a
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-ev-handle.c
@@ -0,0 +1,412 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "changelog-ev-handle.h"
+#include "changelog-rpc-common.h"
+#include "changelog-helpers.h"
+
+struct rpc_clnt_program changelog_ev_program;
+
+#define NR_IOVEC (MAX_IOVEC - 3)
+struct ev_rpc_vec {
+ int count;
+ struct iovec vector[NR_IOVEC];
+
+ /* sequence number */
+ unsigned long seq;
+};
+
+struct ev_rpc {
+ rbuf_list_t *rlist;
+ struct rpc_clnt *rpc;
+ struct ev_rpc_vec vec;
+};
+
+/**
+ * As of now this just does the minimal (retval logging). Going further
+ * un-acknowledges sequence numbers can be retransmitted and other
+ * intelligence can be built into the server.
+ */
+int
+changelog_event_dispatch_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ return 0;
+}
+
+/* dispatcher RPC */
+int
+changelog_dispatch_vec(call_frame_t *frame, xlator_t *this,
+ struct rpc_clnt *rpc, struct ev_rpc_vec *vec)
+{
+ struct timeval tv = {
+ 0,
+ };
+ changelog_event_req req = {
+ 0,
+ };
+
+ (void)gettimeofday(&tv, NULL);
+
+ /**
+ * Event dispatch RPC header contains a sequence number for each
+ * dispatch. This allows the receiver to order the request before
+ * processing.
+ */
+ req.seq = vec->seq;
+ req.tv_sec = tv.tv_sec;
+ req.tv_usec = tv.tv_usec;
+
+ return changelog_rpc_sumbit_req(
+ rpc, (void *)&req, frame, &changelog_ev_program,
+ CHANGELOG_REV_PROC_EVENT, vec->vector, vec->count, NULL, this,
+ changelog_event_dispatch_cbk, (xdrproc_t)xdr_changelog_event_req);
+}
+
+int
+changelog_event_dispatch_rpc(call_frame_t *frame, xlator_t *this, void *data)
+{
+ int idx = 0;
+ int count = 0;
+ int ret = 0;
+ unsigned long sequence = 0;
+ rbuf_iovec_t *rvec = NULL;
+ struct ev_rpc *erpc = NULL;
+ struct rlist_iter riter = {
+ {
+ 0,
+ },
+ };
+
+ /* dispatch NR_IOVEC IO vectors at a time. */
+
+ erpc = data;
+ sequence = erpc->rlist->seq[0];
+
+ rlist_iter_init(&riter, erpc->rlist);
+
+ rvec_for_each_entry(rvec, &riter)
+ {
+ idx = count % NR_IOVEC;
+ if (++count == NR_IOVEC) {
+ erpc->vec.vector[idx] = rvec->iov;
+ erpc->vec.seq = sequence++;
+ erpc->vec.count = NR_IOVEC;
+
+ ret = changelog_dispatch_vec(frame, this, erpc->rpc, &erpc->vec);
+ if (ret)
+ break;
+ count = 0;
+ continue;
+ }
+
+ erpc->vec.vector[idx] = rvec->iov;
+ }
+
+ if (ret)
+ goto error_return;
+
+ idx = count % NR_IOVEC;
+ if (idx) {
+ erpc->vec.seq = sequence;
+ erpc->vec.count = idx;
+
+ ret = changelog_dispatch_vec(frame, this, erpc->rpc, &erpc->vec);
+ }
+
+error_return:
+ return ret;
+}
+
+int
+changelog_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ changelog_rpc_clnt_t *crpc = NULL;
+ changelog_clnt_t *c_clnt = NULL;
+ changelog_priv_t *priv = NULL;
+ changelog_ev_selector_t *selection = NULL;
+ uint64_t clntcnt = 0;
+ uint64_t xprtcnt = 0;
+
+ crpc = mydata;
+ this = crpc->this;
+ c_clnt = crpc->c_clnt;
+
+ priv = this->private;
+
+ switch (event) {
+ case RPC_CLNT_CONNECT:
+ selection = &priv->ev_selection;
+ GF_ATOMIC_INC(priv->clntcnt);
+
+ LOCK(&c_clnt->wait_lock);
+ {
+ LOCK(&c_clnt->active_lock);
+ {
+ changelog_select_event(this, selection, crpc->filter);
+ list_move_tail(&crpc->list, &c_clnt->active);
+ }
+ UNLOCK(&c_clnt->active_lock);
+ }
+ UNLOCK(&c_clnt->wait_lock);
+
+ break;
+ case RPC_CLNT_DISCONNECT:
+ rpc_clnt_disable(crpc->rpc);
+
+ /* rpc_clnt_disable doesn't unref the rpc. It just marks
+ * the rpc as disabled and cancels reconnection timer.
+ * Hence unref the rpc object to free it.
+ */
+ rpc_clnt_unref(crpc->rpc);
+
+ if (priv)
+ selection = &priv->ev_selection;
+
+ LOCK(&crpc->lock);
+ {
+ if (selection)
+ changelog_deselect_event(this, selection, crpc->filter);
+ changelog_set_disconnect_flag(crpc, _gf_true);
+ }
+ UNLOCK(&crpc->lock);
+ LOCK(&c_clnt->active_lock);
+ {
+ list_del_init(&crpc->list);
+ }
+ UNLOCK(&c_clnt->active_lock);
+
+ break;
+ case RPC_CLNT_MSG:
+ case RPC_CLNT_DESTROY:
+ /* Free up mydata */
+ changelog_rpc_clnt_unref(crpc);
+ clntcnt = GF_ATOMIC_DEC(priv->clntcnt);
+ xprtcnt = GF_ATOMIC_GET(priv->xprtcnt);
+ if (this->cleanup_starting) {
+ if (!clntcnt && !xprtcnt)
+ changelog_process_cleanup_event(this);
+ }
+ break;
+ case RPC_CLNT_PING:
+ break;
+ }
+
+ return 0;
+}
+
+void *
+changelog_ev_connector(void *data)
+{
+ xlator_t *this = NULL;
+ changelog_clnt_t *c_clnt = NULL;
+ changelog_rpc_clnt_t *crpc = NULL;
+
+ c_clnt = data;
+ this = c_clnt->this;
+
+ while (1) {
+ pthread_mutex_lock(&c_clnt->pending_lock);
+ {
+ while (list_empty(&c_clnt->pending))
+ pthread_cond_wait(&c_clnt->pending_cond, &c_clnt->pending_lock);
+ crpc = list_first_entry(&c_clnt->pending, changelog_rpc_clnt_t,
+ list);
+ crpc->rpc = changelog_rpc_client_init(this, crpc, crpc->sock,
+ changelog_rpc_notify);
+ if (!crpc->rpc) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_RPC_CONNECT_ERROR, "path=%s", crpc->sock,
+ NULL);
+ crpc->cleanup(crpc);
+ goto mutex_unlock;
+ }
+
+ LOCK(&c_clnt->wait_lock);
+ {
+ list_move_tail(&crpc->list, &c_clnt->waitq);
+ }
+ UNLOCK(&c_clnt->wait_lock);
+ }
+ mutex_unlock:
+ pthread_mutex_unlock(&c_clnt->pending_lock);
+ }
+
+ return NULL;
+}
+
+void
+changelog_ev_cleanup_connections(xlator_t *this, changelog_clnt_t *c_clnt)
+{
+ changelog_rpc_clnt_t *crpc = NULL;
+
+ /* cleanup active connections */
+ LOCK(&c_clnt->active_lock);
+ {
+ list_for_each_entry(crpc, &c_clnt->active, list)
+ {
+ rpc_clnt_disable(crpc->rpc);
+ }
+ }
+ UNLOCK(&c_clnt->active_lock);
+}
+
+/**
+ * TODO: granularize lock
+ *
+ * If we have multiple threads dispatching events, doing it this way is
+ * a performance bottleneck.
+ */
+
+static changelog_rpc_clnt_t *
+get_client(changelog_clnt_t *c_clnt, struct list_head **next)
+{
+ changelog_rpc_clnt_t *crpc = NULL;
+
+ LOCK(&c_clnt->active_lock);
+ {
+ if (*next == &c_clnt->active)
+ goto unblock;
+ crpc = list_entry(*next, changelog_rpc_clnt_t, list);
+ /* ref rpc as DISCONNECT might unref the rpc asynchronously */
+ changelog_rpc_clnt_ref(crpc);
+ rpc_clnt_ref(crpc->rpc);
+ *next = (*next)->next;
+ }
+unblock:
+ UNLOCK(&c_clnt->active_lock);
+
+ return crpc;
+}
+
+static void
+put_client(changelog_clnt_t *c_clnt, changelog_rpc_clnt_t *crpc)
+{
+ LOCK(&c_clnt->active_lock);
+ {
+ rpc_clnt_unref(crpc->rpc);
+ changelog_rpc_clnt_unref(crpc);
+ }
+ UNLOCK(&c_clnt->active_lock);
+}
+
+void
+_dispatcher(rbuf_list_t *rlist, void *arg)
+{
+ xlator_t *this = NULL;
+ changelog_clnt_t *c_clnt = NULL;
+ changelog_rpc_clnt_t *crpc = NULL;
+ struct ev_rpc erpc = {
+ 0,
+ };
+ struct list_head *next = NULL;
+
+ c_clnt = arg;
+ this = c_clnt->this;
+
+ erpc.rlist = rlist;
+ next = c_clnt->active.next;
+
+ while (1) {
+ crpc = get_client(c_clnt, &next);
+ if (!crpc)
+ break;
+ erpc.rpc = crpc->rpc;
+ (void)changelog_invoke_rpc(this, crpc->rpc, &changelog_ev_program,
+ CHANGELOG_REV_PROC_EVENT, &erpc);
+ put_client(c_clnt, crpc);
+ }
+}
+
+/** this is called under rotbuff's lock */
+void
+sequencer(rbuf_list_t *rlist, void *mydata)
+{
+ unsigned long range = 0;
+ changelog_clnt_t *c_clnt = 0;
+
+ c_clnt = mydata;
+
+ range = (RLIST_ENTRY_COUNT(rlist)) / NR_IOVEC;
+ if ((RLIST_ENTRY_COUNT(rlist)) % NR_IOVEC)
+ range++;
+ RLIST_STORE_SEQ(rlist, c_clnt->sequence, range);
+
+ c_clnt->sequence += range;
+}
+
+void *
+changelog_ev_dispatch(void *data)
+{
+ int ret = 0;
+ void *opaque = NULL;
+ xlator_t *this = NULL;
+ changelog_clnt_t *c_clnt = NULL;
+ struct timeval tv = {
+ 0,
+ };
+
+ c_clnt = data;
+ this = c_clnt->this;
+
+ while (1) {
+ /* TODO: change this to be pthread cond based.. later */
+
+ tv.tv_sec = 1;
+ tv.tv_usec = 0;
+ select(0, NULL, NULL, NULL, &tv);
+
+ ret = rbuf_get_buffer(c_clnt->rbuf, &opaque, sequencer, c_clnt);
+ if (ret != RBUF_CONSUMABLE) {
+ if (ret != RBUF_EMPTY)
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_BUFFER_STARVATION_ERROR,
+ "Failed to get buffer for RPC dispatch",
+ "rbuf_retval=%d", ret, NULL);
+ continue;
+ }
+
+ ret = rbuf_wait_for_completion(c_clnt->rbuf, opaque, _dispatcher,
+ c_clnt);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_PUT_BUFFER_FAILED, NULL);
+ }
+
+ return NULL;
+}
+
+void
+changelog_ev_queue_connection(changelog_clnt_t *c_clnt,
+ changelog_rpc_clnt_t *crpc)
+{
+ pthread_mutex_lock(&c_clnt->pending_lock);
+ {
+ list_add_tail(&crpc->list, &c_clnt->pending);
+ pthread_cond_signal(&c_clnt->pending_cond);
+ }
+ pthread_mutex_unlock(&c_clnt->pending_lock);
+}
+
+struct rpc_clnt_procedure changelog_ev_procs[CHANGELOG_REV_PROC_MAX] = {
+ [CHANGELOG_REV_PROC_NULL] = {"NULL", NULL},
+ [CHANGELOG_REV_PROC_EVENT] = {"EVENT DISPATCH",
+ changelog_event_dispatch_rpc},
+};
+
+struct rpc_clnt_program changelog_ev_program = {
+ .progname = "CHANGELOG EVENT DISPATCHER",
+ .prognum = CHANGELOG_REV_RPC_PROCNUM,
+ .progver = CHANGELOG_REV_RPC_PROCVER,
+ .numproc = CHANGELOG_REV_PROC_MAX,
+ .proctable = changelog_ev_procs,
+};
diff --git a/xlators/features/changelog/src/changelog-ev-handle.h b/xlators/features/changelog/src/changelog-ev-handle.h
new file mode 100644
index 00000000000..cc1af58a276
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-ev-handle.h
@@ -0,0 +1,136 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __CHANGELOG_EV_HANDLE_H
+#define __CHANGELOG_EV_HANDLE_H
+
+#include <glusterfs/list.h>
+#include <glusterfs/xlator.h>
+#include "rpc-clnt.h"
+
+#include <glusterfs/rot-buffs.h>
+
+struct changelog_clnt;
+
+typedef struct changelog_rpc_clnt {
+ xlator_t *this;
+
+ gf_lock_t lock;
+
+ gf_atomic_t ref;
+ gf_boolean_t disconnected;
+
+ unsigned int filter;
+ char sock[UNIX_PATH_MAX];
+
+ struct changelog_clnt *c_clnt; /* back pointer to list holder */
+
+ struct rpc_clnt *rpc; /* RPC client endpoint */
+
+ struct list_head list; /* ->pending, ->waitq, ->active */
+
+ void (*cleanup)(struct changelog_rpc_clnt *); /* cleanup handler */
+} changelog_rpc_clnt_t;
+
+static inline void
+changelog_rpc_clnt_ref(changelog_rpc_clnt_t *crpc)
+{
+ GF_ATOMIC_INC(crpc->ref);
+}
+
+static inline void
+changelog_set_disconnect_flag(changelog_rpc_clnt_t *crpc, gf_boolean_t flag)
+{
+ crpc->disconnected = flag;
+}
+
+static inline int
+changelog_rpc_clnt_is_disconnected(changelog_rpc_clnt_t *crpc)
+{
+ return (crpc->disconnected == _gf_true);
+}
+
+static inline void
+changelog_rpc_clnt_unref(changelog_rpc_clnt_t *crpc)
+{
+ gf_boolean_t gone = _gf_false;
+ uint64_t ref = 0;
+
+ ref = GF_ATOMIC_DEC(crpc->ref);
+
+ if (!ref && changelog_rpc_clnt_is_disconnected(crpc)) {
+ list_del(&crpc->list);
+ gone = _gf_true;
+ }
+
+ if (gone)
+ crpc->cleanup(crpc);
+}
+
+/**
+ * This structure holds pending and active clients. On probe RPC all
+ * an instance of the above structure (@changelog_rpc_clnt) is placed
+ * in ->pending and gets moved to ->active on a successful connect.
+ *
+ * locking rules:
+ *
+ * Manipulating ->pending
+ * ->pending_lock
+ * ->pending
+ *
+ * Manipulating ->active
+ * ->active_lock
+ * ->active
+ *
+ * Moving object from ->pending to ->active
+ * ->pending_lock
+ * ->active_lock
+ *
+ * Objects are _never_ moved from ->active to ->pending, i.e., during
+ * disconnection, the object is destroyed. Well, we could have tried
+ * to reconnect, but that's pure waste.. let the other end reconnect.
+ */
+
+typedef struct changelog_clnt {
+ xlator_t *this;
+
+ /* pending connections */
+ pthread_mutex_t pending_lock;
+ pthread_cond_t pending_cond;
+ struct list_head pending;
+
+ /* current active connections */
+ gf_lock_t active_lock;
+ struct list_head active;
+
+ gf_lock_t wait_lock;
+ struct list_head waitq;
+
+ /* consumer part of rot-buffs */
+ rbuf_t *rbuf;
+ unsigned long sequence;
+} changelog_clnt_t;
+
+void *
+changelog_ev_connector(void *);
+
+void *
+changelog_ev_dispatch(void *);
+
+/* APIs */
+void
+changelog_ev_queue_connection(changelog_clnt_t *, changelog_rpc_clnt_t *);
+
+void
+changelog_ev_cleanup_connections(xlator_t *, changelog_clnt_t *);
+
+void
+changelog_process_cleanup_event(xlator_t *);
+#endif
diff --git a/xlators/features/changelog/src/changelog-helpers.c b/xlators/features/changelog/src/changelog-helpers.c
new file mode 100644
index 00000000000..e561997d858
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-helpers.c
@@ -0,0 +1,1977 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/iobuf.h>
+#include <glusterfs/syscall.h>
+
+#include "changelog-helpers.h"
+#include "changelog-encoders.h"
+#include "changelog-mem-types.h"
+#include "changelog-messages.h"
+
+#include "changelog-encoders.h"
+#include "changelog-rpc-common.h"
+#include <pthread.h>
+#include <time.h>
+
+static void
+changelog_cleanup_free_mutex(void *arg_mutex)
+{
+ pthread_mutex_t *p_mutex = (pthread_mutex_t *)arg_mutex;
+
+ if (p_mutex)
+ pthread_mutex_unlock(p_mutex);
+}
+
+int
+changelog_thread_cleanup(xlator_t *this, pthread_t thr_id)
+{
+ int ret = 0;
+ void *retval = NULL;
+
+ /* send a cancel request to the thread */
+ ret = pthread_cancel(thr_id);
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_CANCEL_FAILED, NULL);
+ goto out;
+ }
+
+ ret = pthread_join(thr_id, &retval);
+ if ((ret != 0) || (retval != PTHREAD_CANCELED)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_CANCEL_FAILED, NULL);
+ }
+
+out:
+ return ret;
+}
+
+void *
+changelog_get_usable_buffer(changelog_local_t *local)
+{
+ changelog_log_data_t *cld = NULL;
+
+ if (!local)
+ return NULL;
+
+ cld = &local->cld;
+ if (!cld->cld_iobuf)
+ return NULL;
+
+ return cld->cld_iobuf->ptr;
+}
+
+static int
+changelog_selector_index(unsigned int selector)
+{
+ return (ffs(selector) - 1);
+}
+
+int
+changelog_ev_selected(xlator_t *this, changelog_ev_selector_t *selection,
+ unsigned int selector)
+{
+ int idx = 0;
+
+ idx = changelog_selector_index(selector);
+ gf_msg_debug(this->name, 0, "selector ref count for %d (idx: %d): %d",
+ selector, idx, selection->ref[idx]);
+ /* this can be lockless */
+ return (idx < CHANGELOG_EV_SELECTION_RANGE && (selection->ref[idx] > 0));
+}
+
+void
+changelog_select_event(xlator_t *this, changelog_ev_selector_t *selection,
+ unsigned int selector)
+{
+ int idx = 0;
+
+ LOCK(&selection->reflock);
+ {
+ while (selector) {
+ idx = changelog_selector_index(selector);
+ if (idx < CHANGELOG_EV_SELECTION_RANGE) {
+ selection->ref[idx]++;
+ gf_msg_debug(this->name, 0, "selecting event %d", idx);
+ }
+ selector &= ~(1 << idx);
+ }
+ }
+ UNLOCK(&selection->reflock);
+}
+
+void
+changelog_deselect_event(xlator_t *this, changelog_ev_selector_t *selection,
+ unsigned int selector)
+{
+ int idx = 0;
+
+ LOCK(&selection->reflock);
+ {
+ while (selector) {
+ idx = changelog_selector_index(selector);
+ if (idx < CHANGELOG_EV_SELECTION_RANGE) {
+ selection->ref[idx]--;
+ gf_msg_debug(this->name, 0, "de-selecting event %d", idx);
+ }
+ selector &= ~(1 << idx);
+ }
+ }
+ UNLOCK(&selection->reflock);
+}
+
+int
+changelog_init_event_selection(xlator_t *this,
+ changelog_ev_selector_t *selection)
+{
+ int ret = 0;
+ int j = CHANGELOG_EV_SELECTION_RANGE;
+
+ ret = LOCK_INIT(&selection->reflock);
+ if (ret != 0)
+ return -1;
+
+ LOCK(&selection->reflock);
+ {
+ while (j--) {
+ selection->ref[j] = 0;
+ }
+ }
+ UNLOCK(&selection->reflock);
+
+ return 0;
+}
+
+static void
+changelog_perform_dispatch(xlator_t *this, changelog_priv_t *priv, void *mem,
+ size_t size)
+{
+ char *buf = NULL;
+ void *opaque = NULL;
+
+ buf = rbuf_reserve_write_area(priv->rbuf, size, &opaque);
+ if (!buf) {
+ gf_msg_callingfn(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_DISPATCH_EVENT_FAILED,
+ "failed to dispatch event");
+ return;
+ }
+
+ memcpy(buf, mem, size);
+ rbuf_write_complete(opaque);
+}
+
+void
+changelog_dispatch_event(xlator_t *this, changelog_priv_t *priv,
+ changelog_event_t *ev)
+{
+ changelog_ev_selector_t *selection = NULL;
+
+ selection = &priv->ev_selection;
+ if (changelog_ev_selected(this, selection, ev->ev_type)) {
+ changelog_perform_dispatch(this, priv, ev, CHANGELOG_EV_SIZE);
+ }
+}
+
+void
+changelog_set_usable_record_and_length(changelog_local_t *local, size_t len,
+ int xr)
+{
+ changelog_log_data_t *cld = NULL;
+
+ cld = &local->cld;
+
+ cld->cld_ptr_len = len;
+ cld->cld_xtra_records = xr;
+}
+
+void
+changelog_local_cleanup(xlator_t *xl, changelog_local_t *local)
+{
+ int i = 0;
+ changelog_opt_t *co = NULL;
+ changelog_log_data_t *cld = NULL;
+
+ if (!local)
+ return;
+
+ cld = &local->cld;
+
+ /* cleanup dynamic allocation for extra records */
+ if (cld->cld_xtra_records) {
+ co = (changelog_opt_t *)cld->cld_ptr;
+ for (; i < cld->cld_xtra_records; i++, co++)
+ if (co->co_free)
+ co->co_free(co);
+ }
+
+ CHANGELOG_IOBUF_UNREF(cld->cld_iobuf);
+
+ if (local->inode)
+ inode_unref(local->inode);
+
+ mem_put(local);
+}
+
+int
+changelog_write(int fd, char *buffer, size_t len)
+{
+ ssize_t size = 0;
+ size_t written = 0;
+
+ while (written < len) {
+ size = sys_write(fd, buffer + written, len - written);
+ if (size <= 0)
+ break;
+
+ written += size;
+ }
+
+ return (written != len);
+}
+
+int
+htime_update(xlator_t *this, changelog_priv_t *priv, time_t ts, char *buffer)
+{
+ char changelog_path[PATH_MAX + 1] = {
+ 0,
+ };
+ int len = -1;
+ char x_value[25] = {
+ 0,
+ };
+ /* time stamp(10) + : (1) + rolltime (12 ) + buffer (2) */
+ int ret = 0;
+
+ if (priv->htime_fd == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_HTIME_ERROR,
+ "reason=fd not available", NULL);
+ ret = -1;
+ goto out;
+ }
+ len = snprintf(changelog_path, PATH_MAX, "%s", buffer);
+ if (len >= PATH_MAX) {
+ ret = -1;
+ goto out;
+ }
+ if (changelog_write(priv->htime_fd, (void *)changelog_path, len + 1) < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_HTIME_ERROR,
+ "reason=write failed", NULL);
+ ret = -1;
+ goto out;
+ }
+
+ len = snprintf(x_value, sizeof(x_value), "%ld:%d", ts,
+ priv->rollover_count);
+ if (len >= sizeof(x_value)) {
+ ret = -1;
+ goto out;
+ }
+
+ if (sys_fsetxattr(priv->htime_fd, HTIME_KEY, x_value, len, XATTR_REPLACE)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_HTIME_ERROR,
+ "reason=xattr updation failed", "XATTR_REPLACE=true",
+ "changelog=%s", changelog_path, NULL);
+
+ if (sys_fsetxattr(priv->htime_fd, HTIME_KEY, x_value, len, 0)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_HTIME_ERROR,
+ "reason=xattr updation failed", "changelog=%s",
+ changelog_path, NULL);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ priv->rollover_count += 1;
+
+out:
+ return ret;
+}
+
+/*
+ * Description: Check if the changelog to rollover is empty or not.
+ * It is assumed that fd passed is already verified.
+ *
+ * Returns:
+ * 1 : If found empty, changed path from "CHANGELOG.<TS>" to "changelog.<TS>"
+ * 0 : If NOT empty, proceed usual.
+ */
+int
+cl_is_empty(xlator_t *this, int fd)
+{
+ int ret = -1;
+ size_t elen = 0;
+ int encoding = -1;
+ char buffer[1024] = {
+ 0,
+ };
+ struct stat stbuf = {
+ 0,
+ };
+ int major_version = -1;
+ int minor_version = -1;
+
+ ret = sys_fstat(fd, &stbuf);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_FSTAT_OP_FAILED,
+ NULL);
+ goto out;
+ }
+
+ ret = sys_lseek(fd, 0, SEEK_SET);
+ if (ret == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_LSEEK_OP_FAILED,
+ NULL);
+ goto out;
+ }
+
+ CHANGELOG_GET_HEADER_INFO(fd, buffer, sizeof(buffer), encoding,
+ major_version, minor_version, elen);
+
+ if (elen == stbuf.st_size) {
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * Description: Updates "CHANGELOG" to "changelog" for writing changelog path
+ * to htime file.
+ *
+ * Returns:
+ * 0 : Success
+ * -1 : Error
+ */
+int
+update_path(xlator_t *this, char *cl_path)
+{
+ const char low_cl[] = "changelog";
+ const char up_cl[] = "CHANGELOG";
+ char *found = NULL;
+ int ret = -1;
+
+ found = strstr(cl_path, up_cl);
+
+ if (found == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_PATH_NOT_FOUND,
+ NULL);
+ goto out;
+ } else {
+ memcpy(found, low_cl, sizeof(low_cl) - 1);
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static int
+changelog_rollover_changelog(xlator_t *this, changelog_priv_t *priv, time_t ts)
+{
+ int ret = -1;
+ int notify = 0;
+ int cl_empty_flag = 0;
+ struct tm *gmt;
+ char yyyymmdd[40];
+ char ofile[PATH_MAX] = {
+ 0,
+ };
+ char nfile[PATH_MAX] = {
+ 0,
+ };
+ char nfile_dir[PATH_MAX] = {
+ 0,
+ };
+ changelog_event_t ev = {
+ 0,
+ };
+
+ if (priv->changelog_fd != -1) {
+ ret = sys_fsync(priv->changelog_fd);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_FSYNC_OP_FAILED, NULL);
+ }
+ ret = cl_is_empty(this, priv->changelog_fd);
+ if (ret == 1) {
+ cl_empty_flag = 1;
+ } else if (ret == -1) {
+ /* Log error but proceed as usual */
+ gf_smsg(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_DETECT_EMPTY_CHANGELOG_FAILED, NULL);
+ }
+ sys_close(priv->changelog_fd);
+ priv->changelog_fd = -1;
+ }
+
+ /* Get GMT time. */
+ gmt = gmtime(&ts);
+
+ strftime(yyyymmdd, sizeof(yyyymmdd), "%Y/%m/%d", gmt);
+
+ (void)snprintf(ofile, PATH_MAX, "%s/" CHANGELOG_FILE_NAME,
+ priv->changelog_dir);
+ (void)snprintf(nfile, PATH_MAX, "%s/%s/" CHANGELOG_FILE_NAME ".%ld",
+ priv->changelog_dir, yyyymmdd, ts);
+ (void)snprintf(nfile_dir, PATH_MAX, "%s/%s", priv->changelog_dir, yyyymmdd);
+
+ if (cl_empty_flag == 1) {
+ ret = sys_unlink(ofile);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_UNLINK_OP_FAILED, "path=%s", ofile, NULL);
+ ret = 0; /* Error in unlinking empty changelog should
+ not break further changelog operation, so
+ reset return value to 0*/
+ }
+ } else {
+ ret = sys_rename(ofile, nfile);
+
+ /* Changelog file rename gets ENOENT when parent dir doesn't exist */
+ if (errno == ENOENT) {
+ ret = mkdir_p(nfile_dir, 0600, _gf_true);
+
+ if ((ret == -1) && (EEXIST != errno)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_MKDIR_ERROR, "%s", nfile_dir, NULL);
+ goto out;
+ }
+
+ ret = sys_rename(ofile, nfile);
+ }
+
+ if (ret && (errno == ENOENT)) {
+ ret = 0;
+ goto out;
+ }
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_RENAME_ERROR,
+ "from=%s", ofile, "to=%s", nfile, NULL);
+ }
+ }
+
+ if (!ret && (cl_empty_flag == 0)) {
+ notify = 1;
+ }
+
+ if (!ret) {
+ if (cl_empty_flag) {
+ update_path(this, nfile);
+ }
+ ret = htime_update(this, priv, ts, nfile);
+ if (ret == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_HTIME_ERROR,
+ NULL);
+ goto out;
+ }
+ }
+
+ if (notify) {
+ ev.ev_type = CHANGELOG_OP_TYPE_JOURNAL;
+ memcpy(ev.u.journal.path, nfile, strlen(nfile) + 1);
+ changelog_dispatch_event(this, priv, &ev);
+ }
+out:
+ /* If this is explicit rollover initiated by snapshot,
+ * wakeup reconfigure thread waiting for changelog to
+ * rollover. This should happen even in failure cases as
+ * well otherwise snapshot will timeout and fail. Hence
+ * moved under out.
+ */
+ if (priv->explicit_rollover) {
+ priv->explicit_rollover = _gf_false;
+
+ pthread_mutex_lock(&priv->bn.bnotify_mutex);
+ {
+ if (ret) {
+ priv->bn.bnotify_error = _gf_true;
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_EXPLICIT_ROLLOVER_FAILED, NULL);
+ } else {
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_BNOTIFY_INFO,
+ "changelog=%s", nfile, NULL);
+ }
+ priv->bn.bnotify = _gf_false;
+ pthread_cond_signal(&priv->bn.bnotify_cond);
+ }
+ pthread_mutex_unlock(&priv->bn.bnotify_mutex);
+ }
+ return ret;
+}
+
+int
+filter_cur_par_dirs(const struct dirent *entry)
+{
+ if (entry == NULL)
+ return 0;
+
+ if ((strcmp(entry->d_name, ".") == 0) || (strcmp(entry->d_name, "..") == 0))
+ return 0;
+ else
+ return 1;
+}
+
+/*
+ * find_current_htime:
+ * It finds the latest htime file and sets the HTIME_CURRENT
+ * xattr.
+ * RETURN VALUE:
+ * -1 : Error
+ * ret: Number of directory entries;
+ */
+
+int
+find_current_htime(int ht_dir_fd, const char *ht_dir_path, char *ht_file_bname)
+{
+ struct dirent **namelist = NULL;
+ int ret = 0;
+ int cnt = 0;
+ int i = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(ht_dir_path);
+
+ cnt = scandir(ht_dir_path, &namelist, filter_cur_par_dirs, alphasort);
+ if (cnt < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_SCAN_DIR_FAILED,
+ NULL);
+ } else if (cnt > 0) {
+ if (snprintf(ht_file_bname, NAME_MAX, "%s",
+ namelist[cnt - 1]->d_name) >= NAME_MAX) {
+ ret = -1;
+ goto out;
+ }
+ if (sys_fsetxattr(ht_dir_fd, HTIME_CURRENT, ht_file_bname,
+ strlen(ht_file_bname), 0)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_FSETXATTR_FAILED, "HTIME_CURRENT", NULL);
+ ret = -1;
+ goto out;
+ }
+
+ if (sys_fsync(ht_dir_fd) < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_FSYNC_OP_FAILED, NULL);
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < cnt; i++)
+ free(namelist[i]);
+ free(namelist);
+
+ if (ret)
+ cnt = ret;
+
+ return cnt;
+}
+
+/* Returns 0 on successful open of htime file
+ * returns -1 on failure or error
+ */
+int
+htime_open(xlator_t *this, changelog_priv_t *priv, time_t ts)
+{
+ int ht_file_fd = -1;
+ int ht_dir_fd = -1;
+ int ret = 0;
+ int cnt = 0;
+ char ht_dir_path[PATH_MAX] = {
+ 0,
+ };
+ char ht_file_path[PATH_MAX] = {
+ 0,
+ };
+ char ht_file_bname[NAME_MAX] = {
+ 0,
+ };
+ char x_value[NAME_MAX] = {
+ 0,
+ };
+ int flags = 0;
+ unsigned long min_ts = 0;
+ unsigned long max_ts = 0;
+ unsigned long total = 0;
+ unsigned long total1 = 0;
+ ssize_t size = 0;
+ struct stat stat_buf = {
+ 0,
+ };
+ unsigned long record_len = 0;
+ int32_t len = 0;
+
+ CHANGELOG_FILL_HTIME_DIR(priv->changelog_dir, ht_dir_path);
+
+ /* Open htime directory to get HTIME_CURRENT */
+ ht_dir_fd = open(ht_dir_path, O_RDONLY);
+ if (ht_dir_fd == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_OPEN_FAILED,
+ "path=%s", ht_dir_path, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ size = sys_fgetxattr(ht_dir_fd, HTIME_CURRENT, ht_file_bname,
+ sizeof(ht_file_bname));
+ if (size < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_FGETXATTR_FAILED,
+ "name=HTIME_CURRENT", NULL);
+
+ /* If upgrade scenario, find the latest HTIME.TSTAMP file
+ * and use the same. If error, create a new HTIME.TSTAMP
+ * file.
+ */
+ cnt = find_current_htime(ht_dir_fd, ht_dir_path, ht_file_bname);
+ if (cnt <= 0) {
+ gf_smsg(this->name, GF_LOG_INFO, errno,
+ CHANGELOG_MSG_NO_HTIME_CURRENT, NULL);
+ sys_close(ht_dir_fd);
+ return htime_create(this, priv, ts);
+ }
+
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_HTIME_CURRENT_ERROR, NULL);
+ }
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_HTIME_CURRENT, "path=%s",
+ ht_file_bname, NULL);
+ len = snprintf(ht_file_path, PATH_MAX, "%s/%s", ht_dir_path, ht_file_bname);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Open in append mode as existing htime file is used */
+ flags |= (O_RDWR | O_SYNC | O_APPEND);
+ ht_file_fd = open(ht_file_path, flags,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (ht_file_fd < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_OPEN_FAILED,
+ "path=%s", ht_file_path, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ /* save this htime_fd in priv->htime_fd */
+ priv->htime_fd = ht_file_fd;
+
+ ret = sys_fstat(ht_file_fd, &stat_buf);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_HTIME_STAT_ERROR,
+ "path=%s", ht_file_path, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ /* Initialize rollover-number in priv to current number */
+ size = sys_fgetxattr(ht_file_fd, HTIME_KEY, x_value, sizeof(x_value));
+ if (size < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_FGETXATTR_FAILED,
+ "name=%s", HTIME_KEY, "path=%s", ht_file_path, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ sscanf(x_value, "%lu:%lu", &max_ts, &total);
+
+ /* 22 = 1(/) + 20(CHANGELOG.TIMESTAMP) + 1(\x00) */
+ record_len = strlen(priv->changelog_dir) + 22;
+ total1 = stat_buf.st_size / record_len;
+ if (total != total1) {
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_TOTAL_LOG_INFO,
+ "xattr_total=%lu", total, "size_total=%lu", total1, NULL);
+ }
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_TOTAL_LOG_INFO, "min=%lu",
+ min_ts, "max=%lu", max_ts, "total_changelogs=%lu", total, NULL);
+
+ if (total < total1)
+ priv->rollover_count = total1 + 1;
+ else
+ priv->rollover_count = total + 1;
+
+out:
+ if (ht_dir_fd != -1)
+ sys_close(ht_dir_fd);
+ return ret;
+}
+
+/* Returns 0 on successful creation of htime file
+ * returns -1 on failure or error
+ */
+int
+htime_create(xlator_t *this, changelog_priv_t *priv, time_t ts)
+{
+ int ht_file_fd = -1;
+ int ht_dir_fd = -1;
+ int ret = 0;
+ char ht_dir_path[PATH_MAX] = {
+ 0,
+ };
+ char ht_file_path[PATH_MAX] = {
+ 0,
+ };
+ char ht_file_bname[NAME_MAX + 1] = {
+ 0,
+ };
+ int flags = 0;
+ int32_t len = 0;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_NEW_HTIME_FILE,
+ "name=%ld", ts, NULL);
+
+ CHANGELOG_FILL_HTIME_DIR(priv->changelog_dir, ht_dir_path);
+
+ /* get the htime file name in ht_file_path */
+ len = snprintf(ht_file_path, PATH_MAX, "%s/%s.%ld", ht_dir_path,
+ HTIME_FILE_NAME, ts);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+
+ flags |= (O_CREAT | O_RDWR | O_SYNC);
+ ht_file_fd = open(ht_file_path, flags,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (ht_file_fd < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_OPEN_FAILED,
+ "path=%s", ht_file_path, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ if (sys_fsetxattr(ht_file_fd, HTIME_KEY, HTIME_INITIAL_VALUE,
+ sizeof(HTIME_INITIAL_VALUE) - 1, 0)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_XATTR_INIT_FAILED, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ ret = sys_fsync(ht_file_fd);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_FSYNC_OP_FAILED,
+ NULL);
+ goto out;
+ }
+
+ /* save this htime_fd in priv->htime_fd */
+ priv->htime_fd = ht_file_fd;
+
+ ht_file_fd = -1;
+
+ /* Set xattr HTIME_CURRENT on htime directory to htime filename */
+ ht_dir_fd = open(ht_dir_path, O_RDONLY);
+ if (ht_dir_fd == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_OPEN_FAILED,
+ "path=%s", ht_dir_path, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ (void)snprintf(ht_file_bname, sizeof(ht_file_bname), "%s.%ld",
+ HTIME_FILE_NAME, ts);
+ if (sys_fsetxattr(ht_dir_fd, HTIME_CURRENT, ht_file_bname,
+ strlen(ht_file_bname), 0)) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_FSETXATTR_FAILED,
+ " HTIME_CURRENT", NULL);
+ ret = -1;
+ goto out;
+ }
+
+ ret = sys_fsync(ht_dir_fd);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_FSYNC_OP_FAILED,
+ NULL);
+ goto out;
+ }
+
+ /* initialize rollover-number in priv to 1 */
+ priv->rollover_count = 1;
+
+out:
+ if (ht_dir_fd != -1)
+ sys_close(ht_dir_fd);
+ if (ht_file_fd != -1)
+ sys_close(ht_file_fd);
+ return ret;
+}
+
+/* Description:
+ * Opens the snap changelog to log call path fops in it.
+ * This changelos name is "CHANGELOG.SNAP", stored in
+ * path ".glusterfs/changelogs/csnap".
+ * Returns:
+ * 0 : On success.
+ * -1 : On failure.
+ */
+int
+changelog_snap_open(xlator_t *this, changelog_priv_t *priv)
+{
+ int fd = -1;
+ int ret = 0;
+ int flags = 0;
+ char buffer[1024] = {
+ 0,
+ };
+ char c_snap_path[PATH_MAX] = {
+ 0,
+ };
+ char csnap_dir_path[PATH_MAX] = {
+ 0,
+ };
+ int32_t len = 0;
+
+ CHANGELOG_FILL_CSNAP_DIR(priv->changelog_dir, csnap_dir_path);
+
+ len = snprintf(c_snap_path, PATH_MAX, "%s/" CSNAP_FILE_NAME,
+ csnap_dir_path);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+
+ flags |= (O_CREAT | O_RDWR | O_TRUNC);
+
+ fd = open(c_snap_path, flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_OPEN_FAILED,
+ "path=%s", c_snap_path, NULL);
+ ret = -1;
+ goto out;
+ }
+ priv->c_snap_fd = fd;
+
+ (void)snprintf(buffer, 1024, CHANGELOG_HEADER, CHANGELOG_VERSION_MAJOR,
+ CHANGELOG_VERSION_MINOR, priv->ce->encoder);
+ ret = changelog_snap_write_change(priv, buffer, strlen(buffer));
+ if (ret < 0) {
+ sys_close(priv->c_snap_fd);
+ priv->c_snap_fd = -1;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+/*
+ * Description:
+ * Starts logging fop details in CSNAP journal.
+ * Returns:
+ * 0 : On success.
+ * -1 : On Failure.
+ */
+int
+changelog_snap_logging_start(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+
+ ret = changelog_snap_open(this, priv);
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_SNAP_INFO, "starting",
+ NULL);
+
+ return ret;
+}
+
+/*
+ * Description:
+ * Stops logging fop details in CSNAP journal.
+ * Returns:
+ * 0 : On success.
+ * -1 : On Failure.
+ */
+int
+changelog_snap_logging_stop(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+
+ sys_close(priv->c_snap_fd);
+ priv->c_snap_fd = -1;
+
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_SNAP_INFO, "Stopped",
+ NULL);
+
+ return ret;
+}
+
+int
+changelog_open_journal(xlator_t *this, changelog_priv_t *priv)
+{
+ int fd = 0;
+ int ret = -1;
+ int flags = 0;
+ char buffer[1024] = {
+ 0,
+ };
+ char changelog_path[PATH_MAX] = {
+ 0,
+ };
+
+ (void)snprintf(changelog_path, PATH_MAX, "%s/" CHANGELOG_FILE_NAME,
+ priv->changelog_dir);
+
+ flags |= (O_CREAT | O_RDWR);
+ if (priv->fsync_interval == 0)
+ flags |= O_SYNC;
+
+ fd = open(changelog_path, flags, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_OPEN_FAILED,
+ "path=%s", changelog_path, NULL);
+ goto out;
+ }
+
+ priv->changelog_fd = fd;
+
+ (void)snprintf(buffer, 1024, CHANGELOG_HEADER, CHANGELOG_VERSION_MAJOR,
+ CHANGELOG_VERSION_MINOR, priv->ce->encoder);
+ ret = changelog_write_change(priv, buffer, strlen(buffer));
+ if (ret) {
+ sys_close(priv->changelog_fd);
+ priv->changelog_fd = -1;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+changelog_start_next_change(xlator_t *this, changelog_priv_t *priv, time_t ts,
+ gf_boolean_t finale)
+{
+ int ret = -1;
+
+ ret = changelog_rollover_changelog(this, priv, ts);
+
+ if (!ret && !finale)
+ ret = changelog_open_journal(this, priv);
+
+ return ret;
+}
+
+/**
+ * return the length of entry
+ */
+size_t
+changelog_entry_length()
+{
+ return sizeof(changelog_log_data_t);
+}
+
+void
+changelog_fill_rollover_data(changelog_log_data_t *cld, gf_boolean_t is_last)
+{
+ cld->cld_type = CHANGELOG_TYPE_ROLLOVER;
+ cld->cld_roll_time = gf_time();
+ cld->cld_finale = is_last;
+}
+
+int
+changelog_snap_write_change(changelog_priv_t *priv, char *buffer, size_t len)
+{
+ return changelog_write(priv->c_snap_fd, buffer, len);
+}
+
+int
+changelog_write_change(changelog_priv_t *priv, char *buffer, size_t len)
+{
+ return changelog_write(priv->changelog_fd, buffer, len);
+}
+
+/*
+ * Descriptions:
+ * Writes fop details in ascii format to CSNAP.
+ * Issues:
+ * Not Encoding agnostic.
+ * Returns:
+ * 0 : On Success.
+ * -1 : On Failure.
+ */
+int
+changelog_snap_handle_ascii_change(xlator_t *this, changelog_log_data_t *cld)
+{
+ size_t off = 0;
+ size_t gfid_len = 0;
+ char *gfid_str = NULL;
+ char *buffer = NULL;
+ changelog_priv_t *priv = NULL;
+ int ret = 0;
+
+ if (this == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ priv = this->private;
+
+ if (priv == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ gfid_str = uuid_utoa(cld->cld_gfid);
+ gfid_len = strlen(gfid_str);
+
+ /* extra bytes for decorations */
+ buffer = alloca(gfid_len + cld->cld_ptr_len + 10);
+ CHANGELOG_STORE_ASCII(priv, buffer, off, gfid_str, gfid_len, cld);
+
+ CHANGELOG_FILL_BUFFER(buffer, off, "\0", 1);
+
+ ret = changelog_snap_write_change(priv, buffer, off);
+
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_WRITE_FAILED,
+ "csnap", NULL);
+ }
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_WROTE_TO_CSNAP, NULL);
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+changelog_handle_change(xlator_t *this, changelog_priv_t *priv,
+ changelog_log_data_t *cld)
+{
+ int ret = 0;
+
+ if (CHANGELOG_TYPE_IS_ROLLOVER(cld->cld_type)) {
+ changelog_encode_change(priv);
+ ret = changelog_start_next_change(this, priv, cld->cld_roll_time,
+ cld->cld_finale);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_GET_TIME_OP_FAILED, NULL);
+ goto out;
+ }
+
+ /**
+ * case when there is reconfigure done (disabling changelog) and there
+ * are still fops that have updates in prgress.
+ */
+ if (priv->changelog_fd == -1)
+ return 0;
+
+ if (CHANGELOG_TYPE_IS_FSYNC(cld->cld_type)) {
+ ret = sys_fsync(priv->changelog_fd);
+ if (ret < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_FSYNC_OP_FAILED, NULL);
+ }
+ goto out;
+ }
+
+ ret = priv->ce->encode(this, cld);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_WRITE_FAILED,
+ "changelog", NULL);
+ }
+
+out:
+ return ret;
+}
+
+changelog_local_t *
+changelog_local_init(xlator_t *this, inode_t *inode, uuid_t gfid,
+ int xtra_records, gf_boolean_t update_flag)
+{
+ changelog_local_t *local = NULL;
+ struct iobuf *iobuf = NULL;
+
+ /**
+ * We relax the presence of inode if @update_flag is true.
+ * The caller (implementation of the fop) needs to be careful to
+ * not blindly use local->inode.
+ */
+ if (!update_flag && !inode) {
+ gf_msg_callingfn(this->name, GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_INODE_NOT_FOUND,
+ "inode needed for version checking !!!");
+
+ goto out;
+ }
+
+ if (xtra_records) {
+ iobuf = iobuf_get2(this->ctx->iobuf_pool,
+ xtra_records * CHANGELOG_OPT_RECORD_LEN);
+ if (!iobuf)
+ goto out;
+ }
+
+ local = mem_get0(this->local_pool);
+ if (!local) {
+ CHANGELOG_IOBUF_UNREF(iobuf);
+ goto out;
+ }
+
+ local->update_no_check = update_flag;
+
+ gf_uuid_copy(local->cld.cld_gfid, gfid);
+
+ local->cld.cld_iobuf = iobuf;
+ local->cld.cld_xtra_records = 0; /* set by the caller */
+
+ if (inode)
+ local->inode = inode_ref(inode);
+
+out:
+ return local;
+}
+
+int
+changelog_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t ctx_addr = 0;
+ changelog_inode_ctx_t *ctx = NULL;
+
+ inode_ctx_del(inode, this, &ctx_addr);
+ if (!ctx_addr)
+ return 0;
+
+ ctx = (changelog_inode_ctx_t *)(long)ctx_addr;
+ GF_FREE(ctx);
+
+ return 0;
+}
+
+int
+changelog_inject_single_event(xlator_t *this, changelog_priv_t *priv,
+ changelog_log_data_t *cld)
+{
+ return priv->cd.dispatchfn(this, priv, priv->cd.cd_data, cld, NULL);
+}
+
+/* Wait till all the black fops are drained */
+void
+changelog_drain_black_fops(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+
+ /* clean up framework of pthread_mutex is required here as
+ * 'reconfigure' terminates the changelog_rollover thread
+ * on graph change.
+ */
+ pthread_cleanup_push(changelog_cleanup_free_mutex,
+ &priv->dm.drain_black_mutex);
+ ret = pthread_mutex_lock(&priv->dm.drain_black_mutex);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_PTHREAD_ERROR,
+ "error=%d", ret, NULL);
+ while (priv->dm.black_fop_cnt > 0) {
+ gf_msg_debug(this->name, 0, "Conditional wait on black fops: %ld",
+ priv->dm.black_fop_cnt);
+ priv->dm.drain_wait_black = _gf_true;
+ ret = pthread_cond_wait(&priv->dm.drain_black_cond,
+ &priv->dm.drain_black_mutex);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_COND_WAIT_FAILED, "error=%d", ret,
+ NULL);
+ }
+ priv->dm.drain_wait_black = _gf_false;
+ ret = pthread_mutex_unlock(&priv->dm.drain_black_mutex);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_PTHREAD_ERROR,
+ "error=%d", ret, NULL);
+ pthread_cleanup_pop(0);
+ gf_msg_debug(this->name, 0, "Woke up: Conditional wait on black fops");
+}
+
+/* Wait till all the white fops are drained */
+void
+changelog_drain_white_fops(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+
+ /* clean up framework of pthread_mutex is required here as
+ * 'reconfigure' terminates the changelog_rollover thread
+ * on graph change.
+ */
+ pthread_cleanup_push(changelog_cleanup_free_mutex,
+ &priv->dm.drain_white_mutex);
+ ret = pthread_mutex_lock(&priv->dm.drain_white_mutex);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_PTHREAD_ERROR,
+ "error=%d", ret, NULL);
+ while (priv->dm.white_fop_cnt > 0) {
+ gf_msg_debug(this->name, 0, "Conditional wait on white fops : %ld",
+ priv->dm.white_fop_cnt);
+ priv->dm.drain_wait_white = _gf_true;
+ ret = pthread_cond_wait(&priv->dm.drain_white_cond,
+ &priv->dm.drain_white_mutex);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_COND_WAIT_FAILED, "error=%d", ret,
+ NULL);
+ }
+ priv->dm.drain_wait_white = _gf_false;
+ ret = pthread_mutex_unlock(&priv->dm.drain_white_mutex);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_PTHREAD_ERROR,
+ "error=%d", ret, NULL);
+ pthread_cleanup_pop(0);
+ gf_msg_debug(this->name, 0, "Woke up: Conditional wait on white fops");
+}
+
+/**
+ * TODO: these threads have many thing in common (wake up after
+ * a certain time etc..). move them into separate routine.
+ */
+void *
+changelog_rollover(void *data)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ struct timespec tv = {
+ 0,
+ };
+ changelog_log_data_t cld = {
+ 0,
+ };
+ changelog_time_slice_t *slice = NULL;
+ changelog_priv_t *priv = data;
+
+ this = priv->cr.this;
+ slice = &priv->slice;
+
+ while (1) {
+ (void)pthread_testcancel();
+
+ tv.tv_sec = gf_time() + priv->rollover_time;
+ tv.tv_nsec = 0;
+ ret = 0; /* Reset ret to zero */
+
+ /* The race between actual rollover and explicit rollover is
+ * handled. If actual rollover is being done and the
+ * explicit rollover event comes, the event is not missed.
+ * Since explicit rollover sets 'cr.notify' to true, this
+ * thread doesn't wait on 'pthread_cond_timedwait'.
+ */
+ pthread_cleanup_push(changelog_cleanup_free_mutex, &priv->cr.lock);
+ pthread_mutex_lock(&priv->cr.lock);
+ {
+ while (ret == 0 && !priv->cr.notify)
+ ret = pthread_cond_timedwait(&priv->cr.cond, &priv->cr.lock,
+ &tv);
+ if (ret == 0)
+ priv->cr.notify = _gf_false;
+ }
+ pthread_mutex_unlock(&priv->cr.lock);
+ pthread_cleanup_pop(0);
+
+ if (ret == 0) {
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_BARRIER_INFO,
+ NULL);
+ priv->explicit_rollover = _gf_true;
+ } else if (ret && ret != ETIMEDOUT) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_SELECT_FAILED, NULL);
+ continue;
+ } else if (ret && ret == ETIMEDOUT) {
+ gf_msg_debug(this->name, 0, "Wokeup on timeout");
+ }
+
+ /* Reading curent_color without lock is fine here
+ * as it is only modified here and is next to reading.
+ */
+ if (priv->current_color == FOP_COLOR_BLACK) {
+ LOCK(&priv->lock);
+ priv->current_color = FOP_COLOR_WHITE;
+ UNLOCK(&priv->lock);
+ gf_msg_debug(this->name, 0,
+ "Black fops"
+ " to be drained:%ld",
+ priv->dm.black_fop_cnt);
+ changelog_drain_black_fops(this, priv);
+ } else {
+ LOCK(&priv->lock);
+ priv->current_color = FOP_COLOR_BLACK;
+ UNLOCK(&priv->lock);
+ gf_msg_debug(this->name, 0,
+ "White fops"
+ " to be drained:%ld",
+ priv->dm.white_fop_cnt);
+ changelog_drain_white_fops(this, priv);
+ }
+
+ /* Adding delay of 1 second only during explicit rollover:
+ *
+ * Changelog rollover can happen either due to actual
+ * or the explicit rollover during snapshot. Actual
+ * rollover is controlled by tuneable called 'rollover-time'.
+ * The minimum granularity for rollover-time is 1 second.
+ * Explicit rollover is asynchronous in nature and happens
+ * during snapshot.
+ *
+ * Basically, rollover renames the current CHANGELOG file
+ * to CHANGELOG.TIMESTAMP. Let's assume, at time 't1',
+ * actual and explicit rollover raced against each
+ * other and actual rollover won the race renaming the
+ * CHANGELOG file to CHANGELOG.t1 and opens a new
+ * CHANGELOG file. There is high chance that, an immediate
+ * explicit rollover at time 't1' can happen with in the same
+ * second to rename CHANGELOG file to CHANGELOG.t1 resulting in
+ * purging the earlier CHANGELOG.t1 file created by actual
+ * rollover. So adding a delay of 1 second guarantees unique
+ * CHANGELOG.TIMESTAMP during explicit rollover.
+ */
+ if (priv->explicit_rollover == _gf_true)
+ sleep(1);
+
+ changelog_fill_rollover_data(&cld, _gf_false);
+
+ _mask_cancellation();
+
+ LOCK(&priv->lock);
+ {
+ ret = changelog_inject_single_event(this, priv, &cld);
+ if (!ret)
+ SLICE_VERSION_UPDATE(slice);
+ }
+ UNLOCK(&priv->lock);
+
+ _unmask_cancellation();
+ }
+
+ return NULL;
+}
+
+void *
+changelog_fsync_thread(void *data)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ struct timeval tv = {
+ 0,
+ };
+ changelog_log_data_t cld = {
+ 0,
+ };
+ changelog_priv_t *priv = data;
+
+ this = priv->cf.this;
+ cld.cld_type = CHANGELOG_TYPE_FSYNC;
+
+ while (1) {
+ (void)pthread_testcancel();
+
+ tv.tv_sec = priv->fsync_interval;
+ tv.tv_usec = 0;
+
+ ret = select(0, NULL, NULL, NULL, &tv);
+ if (ret)
+ continue;
+
+ _mask_cancellation();
+
+ ret = changelog_inject_single_event(this, priv, &cld);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_INJECT_FSYNC_FAILED, NULL);
+
+ _unmask_cancellation();
+ }
+
+ return NULL;
+}
+
+/* macros for inode/changelog version checks */
+
+#define INODE_VERSION_UPDATE(priv, inode, iver, slice, type) \
+ do { \
+ LOCK(&inode->lock); \
+ { \
+ LOCK(&priv->lock); \
+ { \
+ *iver = slice->changelog_version[type]; \
+ } \
+ UNLOCK(&priv->lock); \
+ } \
+ UNLOCK(&inode->lock); \
+ } while (0)
+
+#define INODE_VERSION_EQUALS_SLICE(priv, ver, slice, type, upd) \
+ do { \
+ LOCK(&priv->lock); \
+ { \
+ upd = (ver == slice->changelog_version[type]) ? _gf_false \
+ : _gf_true; \
+ } \
+ UNLOCK(&priv->lock); \
+ } while (0)
+
+static int
+__changelog_inode_ctx_set(xlator_t *this, inode_t *inode,
+ changelog_inode_ctx_t *ctx)
+{
+ uint64_t ctx_addr = (uint64_t)(uintptr_t)ctx;
+ return __inode_ctx_set(inode, this, &ctx_addr);
+}
+
+/**
+ * one shot routine to get the address and the value of a inode version
+ * for a particular type.
+ */
+changelog_inode_ctx_t *
+__changelog_inode_ctx_get(xlator_t *this, inode_t *inode, unsigned long **iver,
+ unsigned long *version, changelog_log_type type)
+{
+ int ret = 0;
+ uint64_t ctx_addr = 0;
+ changelog_inode_ctx_t *ctx = NULL;
+
+ ret = __inode_ctx_get(inode, this, &ctx_addr);
+ if (ret < 0)
+ ctx_addr = 0;
+ if (ctx_addr != 0) {
+ ctx = (changelog_inode_ctx_t *)(long)ctx_addr;
+ goto out;
+ }
+
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_changelog_mt_inode_ctx_t);
+ if (!ctx)
+ goto out;
+
+ ret = __changelog_inode_ctx_set(this, inode, ctx);
+ if (ret) {
+ GF_FREE(ctx);
+ ctx = NULL;
+ }
+
+out:
+ if (ctx && iver && version) {
+ *iver = CHANGELOG_INODE_VERSION_TYPE(ctx, type);
+ *version = **iver;
+ }
+
+ return ctx;
+}
+
+static changelog_inode_ctx_t *
+changelog_inode_ctx_get(xlator_t *this, inode_t *inode, unsigned long **iver,
+ unsigned long *version, changelog_log_type type)
+{
+ changelog_inode_ctx_t *ctx = NULL;
+
+ LOCK(&inode->lock);
+ {
+ ctx = __changelog_inode_ctx_get(this, inode, iver, version, type);
+ }
+ UNLOCK(&inode->lock);
+
+ return ctx;
+}
+
+/**
+ * This is the main update routine. Locking has been made granular so as to
+ * maximize parallelism of fops - I'll try to explain it below using execution
+ * timelines.
+ *
+ * Basically, the contention is between multiple execution threads of this
+ * routine and the roll-over thread. So, instead of having a big lock, we hold
+ * granular locks: inode->lock and priv->lock. Now I'll explain what happens
+ * when there is an update and a roll-over at just about the same time.
+ * NOTE:
+ * - the dispatcher itself synchronizes updates via it's own lock
+ * - the slice version in incremented by the roll-over thread
+ *
+ * Case 1: When the rollover thread wins before the inode version can be
+ * compared with the slice version.
+ *
+ * [updater] | [rollover]
+ * |
+ * | <SLICE: 1, 1, 1>
+ * <changelog_update> |
+ * <changelog_inode_ctx_get> |
+ * <CTX: 1, 1, 1> |
+ * | <dispatch-rollover-event>
+ * | LOCK (&priv->lock)
+ * | <SLICE_VERSION_UPDATE>
+ * | <SLICE: 2, 2, 2>
+ * | UNLOCK (&priv->lock)
+ * |
+ * LOCK (&priv->lock) |
+ * <INODE_VERSION_EQUALS_SLICE> |
+ * I: 1 <-> S: 2 |
+ * update: true |
+ * UNLOCK (&priv->lock) |
+ * |
+ * <if update == true> |
+ * <dispath-update-event> |
+ * <INODE_VERSION_UPDATE> |
+ * LOCK (&inode->lock) |
+ * LOCK (&priv->lock) |
+ * <CTX: 2, 1, 1> |
+ * UNLOCK (&priv->lock) |
+ * UNLOCK (&inode->lock) |
+ *
+ * Therefore, the change gets recorded in the next change (no lost change). If
+ * the slice version was ahead of the inode version (say I:1, S: 2), then
+ * anyway the comparison would result in a update (I: 1, S: 3).
+ *
+ * If the rollover time is too less, then there is another contention when the
+ * updater tries to bring up inode version to the slice version (this is also
+ * the case when the roll-over thread wakes up during INODE_VERSION_UPDATE.
+ *
+ * <CTX: 1, 1, 1> | <SLICE: 2, 2, 2>
+ * |
+ * |
+ * <dispath-update-event> |
+ * <INODE_VERSION_UPDATE> |
+ * LOCK (&inode->lock) |
+ * LOCK (&priv->lock) |
+ * <CTX: 2, 1, 1> |
+ * UNLOCK (&priv->lock) |
+ * UNLOCK (&inode->lock) |
+ * | <dispatch-rollover-event>
+ * | LOCK (&priv->lock)
+ * | <SLICE_VERSION_UPDATE>
+ * | <SLICE: 3, 3, 3>
+ * | UNLOCK (&priv->lock)
+ *
+ *
+ * Case 2: When the fop thread wins
+ *
+ * [updater] | [rollover]
+ * |
+ * | <SLICE: 1, 1, 1>
+ * <changelog_update> |
+ * <changelog_inode_ctx_get> |
+ * <CTX: 0, 0, 0> |
+ * |
+ * LOCK (&priv->lock) |
+ * <INODE_VERSION_EQUALS_SLICE> |
+ * I: 0 <-> S: 1 |
+ * update: true |
+ * UNLOCK (&priv->lock) |
+ * | <dispatch-rollover-event>
+ * | LOCK (&priv->lock)
+ * | <SLICE_VERSION_UPDATE>
+ * | <SLICE: 2, 2, 2>
+ * | UNLOCK (&priv->lock)
+ * <if update == true> |
+ * <dispath-update-event> |
+ * <INODE_VERSION_UPDATE> |
+ * LOCK (&inode->lock) |
+ * LOCK (&priv->lock) |
+ * <CTX: 2, 0, 0> |
+ * UNLOCK (&priv->lock) |
+ * UNLOCK (&inode->lock) |
+ *
+ * Here again, if the inode version was equal to the slice version (I: 1, S: 1)
+ * then there is no need to record an update (as the equality of the two version
+ * signifies an update was recorded in the current time slice).
+ */
+void
+changelog_update(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local, changelog_log_type type)
+{
+ int ret = 0;
+ unsigned long *iver = NULL;
+ unsigned long version = 0;
+ inode_t *inode = NULL;
+ changelog_time_slice_t *slice = NULL;
+ changelog_inode_ctx_t *ctx = NULL;
+ changelog_log_data_t *cld_0 = NULL;
+ changelog_log_data_t *cld_1 = NULL;
+ changelog_local_t *next_local = NULL;
+ gf_boolean_t need_upd = _gf_true;
+
+ slice = &priv->slice;
+
+ /**
+ * for fops that do not require inode version checking
+ */
+ if (local->update_no_check)
+ goto update;
+
+ inode = local->inode;
+
+ ctx = changelog_inode_ctx_get(this, inode, &iver, &version, type);
+ if (!ctx)
+ goto update;
+
+ INODE_VERSION_EQUALS_SLICE(priv, version, slice, type, need_upd);
+
+update:
+ if (need_upd) {
+ cld_0 = &local->cld;
+ cld_0->cld_type = type;
+
+ if ((next_local = local->prev_entry) != NULL) {
+ cld_1 = &next_local->cld;
+ cld_1->cld_type = type;
+ }
+
+ ret = priv->cd.dispatchfn(this, priv, priv->cd.cd_data, cld_0, cld_1);
+
+ /**
+ * update after the dispatcher has successfully done
+ * it's job.
+ */
+ if (!local->update_no_check && iver && !ret)
+ INODE_VERSION_UPDATE(priv, inode, iver, slice, type);
+ }
+
+ return;
+}
+
+/* Begin: Geo-rep snapshot dependency changes */
+
+/* changelog_color_fop_and_inc_cnt: Assign color and inc fop cnt.
+ *
+ * Assigning color and increment of corresponding fop count should happen
+ * in a lock (i.e., there should be no window between them). If it does not,
+ * we might miss draining those fops which are colored but not yet incremented
+ * the count. Let's assume black fops are draining. If the black fop count
+ * reaches zero, we say draining is completed but we miss black fops which are
+ * not incremented fop count but color is assigned black.
+ */
+
+void
+changelog_color_fop_and_inc_cnt(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local)
+{
+ if (!priv || !local)
+ return;
+
+ LOCK(&priv->lock);
+ {
+ local->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, local);
+ }
+ UNLOCK(&priv->lock);
+}
+
+/* Increments the respective fop counter based on the fop color */
+void
+changelog_inc_fop_cnt(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local)
+{
+ int ret = 0;
+
+ if (local) {
+ if (local->color == FOP_COLOR_BLACK) {
+ ret = pthread_mutex_lock(&priv->dm.drain_black_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ {
+ priv->dm.black_fop_cnt++;
+ }
+ ret = pthread_mutex_unlock(&priv->dm.drain_black_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ } else {
+ ret = pthread_mutex_lock(&priv->dm.drain_white_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ {
+ priv->dm.white_fop_cnt++;
+ }
+ ret = pthread_mutex_unlock(&priv->dm.drain_white_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ }
+ }
+out:
+ return;
+}
+
+/* Decrements the respective fop counter based on the fop color */
+void
+changelog_dec_fop_cnt(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local)
+{
+ int ret = 0;
+
+ if (local) {
+ if (local->color == FOP_COLOR_BLACK) {
+ ret = pthread_mutex_lock(&priv->dm.drain_black_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ {
+ priv->dm.black_fop_cnt--;
+ if (priv->dm.black_fop_cnt == 0 &&
+ priv->dm.drain_wait_black == _gf_true) {
+ ret = pthread_cond_signal(&priv->dm.drain_black_cond);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_2(
+ ret, out, priv->dm.drain_black_mutex);
+ gf_msg_debug(this->name, 0,
+ "Signalled "
+ "draining of black");
+ }
+ }
+ ret = pthread_mutex_unlock(&priv->dm.drain_black_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ } else {
+ ret = pthread_mutex_lock(&priv->dm.drain_white_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ {
+ priv->dm.white_fop_cnt--;
+ if (priv->dm.white_fop_cnt == 0 &&
+ priv->dm.drain_wait_white == _gf_true) {
+ ret = pthread_cond_signal(&priv->dm.drain_white_cond);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_2(
+ ret, out, priv->dm.drain_white_mutex);
+ gf_msg_debug(this->name, 0,
+ "Signalled "
+ "draining of white");
+ }
+ }
+ ret = pthread_mutex_unlock(&priv->dm.drain_white_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ }
+ }
+out:
+ return;
+}
+
+/* Write to a pipe setup between changelog main thread and changelog
+ * rollover thread to initiate explicit rollover of changelog journal.
+ */
+int
+changelog_barrier_notify(changelog_priv_t *priv, char *buf)
+{
+ int ret = 0;
+
+ pthread_mutex_lock(&priv->cr.lock);
+ {
+ ret = pthread_cond_signal(&priv->cr.cond);
+ priv->cr.notify = _gf_true;
+ }
+ pthread_mutex_unlock(&priv->cr.lock);
+ return ret;
+}
+
+/* Clean up flags set on barrier notification */
+void
+changelog_barrier_cleanup(xlator_t *this, changelog_priv_t *priv,
+ struct list_head *queue)
+{
+ int ret = 0;
+
+ LOCK(&priv->bflags.lock);
+ priv->bflags.barrier_ext = _gf_false;
+ UNLOCK(&priv->bflags.lock);
+
+ ret = pthread_mutex_lock(&priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+ {
+ priv->bn.bnotify = _gf_false;
+ }
+ ret = pthread_mutex_unlock(&priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, out);
+
+ /* Disable changelog barrier and dequeue fops */
+ LOCK(&priv->lock);
+ {
+ if (priv->barrier_enabled == _gf_true)
+ __chlog_barrier_disable(this, queue);
+ else
+ ret = -1;
+ }
+ UNLOCK(&priv->lock);
+ if (ret == 0)
+ chlog_barrier_dequeue_all(this, queue);
+
+out:
+ return;
+}
+/* End: Geo-Rep snapshot dependency changes */
+
+int32_t
+changelog_fill_entry_buf(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ changelog_local_t **local)
+{
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+ char *dup_path = NULL;
+ char *bname = NULL;
+ inode_t *parent = NULL;
+
+ GF_ASSERT(this);
+
+ parent = inode_parent(loc->inode, 0, 0);
+ if (!parent) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, CHANGELOG_MSG_INODE_NOT_FOUND,
+ "type=parent", "gfid=%s", uuid_utoa(loc->inode->gfid), NULL);
+ goto err;
+ }
+
+ CHANGELOG_INIT_NOCHECK(this, *local, loc->inode, loc->inode->gfid, 5);
+ if (!(*local)) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_LOCAL_INIT_FAILED,
+ NULL);
+ goto err;
+ }
+
+ co = changelog_get_usable_buffer(*local);
+ if (!co) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_GET_BUFFER_FAILED,
+ NULL);
+ goto err;
+ }
+
+ if (loc->inode->ia_type == IA_IFDIR) {
+ CHANGLOG_FILL_FOP_NUMBER(co, GF_FOP_MKDIR, fop_fn, xtra_len);
+ co++;
+ CHANGELOG_FILL_UINT32(co, S_IFDIR | 0755, number_fn, xtra_len);
+ co++;
+ } else {
+ CHANGLOG_FILL_FOP_NUMBER(co, GF_FOP_CREATE, fop_fn, xtra_len);
+ co++;
+ CHANGELOG_FILL_UINT32(co, S_IFREG | 0644, number_fn, xtra_len);
+ co++;
+ }
+
+ CHANGELOG_FILL_UINT32(co, frame->root->uid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->gid, number_fn, xtra_len);
+ co++;
+
+ dup_path = gf_strdup(loc->path);
+ bname = basename(dup_path);
+
+ CHANGELOG_FILL_ENTRY(co, parent->gfid, bname, entry_fn, entry_free_fn,
+ xtra_len, err);
+ changelog_set_usable_record_and_length(*local, xtra_len, 5);
+
+ if (dup_path)
+ GF_FREE(dup_path);
+ if (parent)
+ inode_unref(parent);
+ return 0;
+
+err:
+ if (dup_path)
+ GF_FREE(dup_path);
+ if (parent)
+ inode_unref(parent);
+ return -1;
+}
+
+/*
+ * resolve_pargfid_to_path:
+ * It converts given pargfid to path by doing recursive readlinks at the
+ * backend. If bname is given, it suffixes bname to pargfid to form the
+ * complete path else it doesn't. It allocates memory for the path and is
+ * caller's responsibility to free the same. If bname is NULL and pargfid
+ * is ROOT, then it returns "."
+ */
+
+int
+resolve_pargfid_to_path(xlator_t *this, const uuid_t pgfid, char **path,
+ char *bname)
+{
+ char *linkname = NULL;
+ char *dir_handle = NULL;
+ char *pgfidstr = NULL;
+ char *saveptr = NULL;
+ ssize_t len = 0;
+ int ret = 0;
+ uuid_t tmp_gfid = {
+ 0,
+ };
+ uuid_t pargfid = {
+ 0,
+ };
+ changelog_priv_t *priv = NULL;
+ char gpath[PATH_MAX] = {
+ 0,
+ };
+ char result[PATH_MAX] = {
+ 0,
+ };
+ char *dir_name = NULL;
+ char pre_dir_name[PATH_MAX] = {
+ 0,
+ };
+
+ GF_ASSERT(this);
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ gf_uuid_copy(pargfid, pgfid);
+ if (!path || gf_uuid_is_null(pargfid)) {
+ ret = -1;
+ goto out;
+ }
+
+ if (__is_root_gfid(pargfid)) {
+ if (bname)
+ *path = gf_strdup(bname);
+ else
+ *path = gf_strdup(".");
+ return ret;
+ }
+
+ dir_handle = alloca(PATH_MAX);
+ linkname = alloca(PATH_MAX);
+ (void)snprintf(gpath, PATH_MAX, "%s/.glusterfs/", priv->changelog_brick);
+
+ while (!(__is_root_gfid(pargfid))) {
+ len = snprintf(dir_handle, PATH_MAX, "%s/%02x/%02x/%s", gpath,
+ pargfid[0], pargfid[1], uuid_utoa(pargfid));
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+
+ len = sys_readlink(dir_handle, linkname, PATH_MAX);
+ if (len < 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_READLINK_OP_FAILED,
+ "could not read the "
+ "link from the gfid handle",
+ "handle=%s", dir_handle, NULL);
+ ret = -1;
+ goto out;
+ }
+
+ linkname[len] = '\0';
+
+ pgfidstr = strtok_r(linkname + strlen("../../00/00/"), "/", &saveptr);
+ dir_name = strtok_r(NULL, "/", &saveptr);
+
+ len = snprintf(result, PATH_MAX, "%s/%s", dir_name, pre_dir_name);
+ if ((len < 0) || (len >= PATH_MAX)) {
+ ret = -1;
+ goto out;
+ }
+ if (snprintf(pre_dir_name, len + 1, "%s", result) >= len + 1) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_uuid_parse(pgfidstr, tmp_gfid);
+ gf_uuid_copy(pargfid, tmp_gfid);
+ }
+
+ if (bname)
+ strncat(result, bname, strlen(bname) + 1);
+
+ *path = gf_strdup(result);
+
+out:
+ return ret;
+}
diff --git a/xlators/features/changelog/src/changelog-helpers.h b/xlators/features/changelog/src/changelog-helpers.h
new file mode 100644
index 00000000000..38fa7590c32
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-helpers.h
@@ -0,0 +1,716 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CHANGELOG_HELPERS_H
+#define _CHANGELOG_HELPERS_H
+
+#include <glusterfs/locking.h>
+#include <glusterfs/timer.h>
+#include "pthread.h"
+#include <glusterfs/iobuf.h>
+#include <glusterfs/rot-buffs.h>
+
+#include "changelog-misc.h"
+#include <glusterfs/call-stub.h>
+
+#include "rpcsvc.h"
+#include "changelog-ev-handle.h"
+
+#include "changelog.h"
+#include "changelog-messages.h"
+
+/**
+ * the changelog entry
+ */
+typedef struct changelog_log_data {
+ /* rollover related */
+ time_t cld_roll_time;
+
+ /* reopen changelog? */
+ gf_boolean_t cld_finale;
+
+ changelog_log_type cld_type;
+
+ /**
+ * sincd gfid is _always_ a necessity, it's not a part
+ * of the iobuf. by doing this we do not add any overhead
+ * for data and metadata related fops.
+ */
+ uuid_t cld_gfid;
+
+ /**
+ * iobufs are used for optionals records: pargfid, path,
+ * write offsets etc.. It's the fop implementers job
+ * to allocate (iobuf_get() in the fop) and get unref'ed
+ * in the callback (CHANGELOG_STACK_UNWIND).
+ */
+ struct iobuf *cld_iobuf;
+
+#define cld_ptr cld_iobuf->ptr
+
+ /**
+ * after allocation you can point this to the length of
+ * usable data, but make sure it does not exceed the
+ * the size of the requested iobuf.
+ */
+ size_t cld_iobuf_len;
+
+#define cld_ptr_len cld_iobuf_len
+
+ /**
+ * number of optional records
+ */
+ int cld_xtra_records;
+} changelog_log_data_t;
+
+/**
+ * holder for dispatch function and private data
+ */
+
+typedef struct changelog_priv changelog_priv_t;
+
+typedef struct changelog_dispatcher {
+ void *cd_data;
+ int (*dispatchfn)(xlator_t *, changelog_priv_t *, void *,
+ changelog_log_data_t *, changelog_log_data_t *);
+} changelog_dispatcher_t;
+
+struct changelog_bootstrap {
+ changelog_mode_t mode;
+ int (*ctor)(xlator_t *, changelog_dispatcher_t *);
+ int (*dtor)(xlator_t *, changelog_dispatcher_t *);
+};
+
+struct changelog_encoder {
+ changelog_encoder_t encoder;
+ int (*encode)(xlator_t *, changelog_log_data_t *);
+};
+
+/* xlator private */
+
+typedef struct changelog_time_slice {
+ /**
+ * version of changelog file, incremented each time changes
+ * rollover.
+ */
+ unsigned long changelog_version[CHANGELOG_MAX_TYPE];
+} changelog_time_slice_t;
+
+typedef struct changelog_rollover {
+ /* rollover thread */
+ pthread_t rollover_th;
+
+ xlator_t *this;
+
+ pthread_mutex_t lock;
+ pthread_cond_t cond;
+ gf_boolean_t notify;
+} changelog_rollover_t;
+
+typedef struct changelog_fsync {
+ /* fsync() thread */
+ pthread_t fsync_th;
+
+ xlator_t *this;
+} changelog_fsync_t;
+
+/* Draining during changelog rollover (for geo-rep snapshot dependency):
+ * --------------------------------------------------------------------
+ * The introduction of draining of in-transit fops during changelog rollover
+ * (both explicit/timeout triggered) requires coloring of fops. Basically the
+ * implementation requires two counters, one counter which keeps the count of
+ * current intransit fops which should end up in current changelog and the other
+ * counter to keep track of incoming fops which should be drained as part of
+ * next changelog rollover event. The fops are colored w.r.t these counters.
+ * The fops that are to be drained as part of current changelog rollover is
+ * given one color and the fops which keep incoming during this and not
+ * necessarily should end up in current changelog and should be drained as part
+ * of next changelog rollover are given other color. The color switching
+ * continues with each changelog rollover. Two colors(black and white) are
+ * chosen here and initially black is chosen is default.
+ */
+
+typedef enum chlog_fop_color {
+ FOP_COLOR_BLACK,
+ FOP_COLOR_WHITE
+} chlog_fop_color_t;
+
+/* Barrier notify variable */
+typedef struct barrier_notify {
+ pthread_mutex_t bnotify_mutex;
+ pthread_cond_t bnotify_cond;
+ gf_boolean_t bnotify;
+ gf_boolean_t bnotify_error;
+} barrier_notify_t;
+
+/* Two separate mutex and conditional variable set is used
+ * to drain white and black fops. */
+
+typedef struct drain_mgmt {
+ pthread_mutex_t drain_black_mutex;
+ pthread_cond_t drain_black_cond;
+ pthread_mutex_t drain_white_mutex;
+ pthread_cond_t drain_white_cond;
+ /* Represents black fops count in-transit */
+ unsigned long black_fop_cnt;
+ /* Represents white fops count in-transit */
+ unsigned long white_fop_cnt;
+ gf_boolean_t drain_wait_black;
+ gf_boolean_t drain_wait_white;
+} drain_mgmt_t;
+
+/* External barrier as a result of snap on/off indicating flag*/
+typedef struct barrier_flags {
+ gf_lock_t lock;
+ gf_boolean_t barrier_ext;
+} barrier_flags_t;
+
+/* Event selection */
+typedef struct changelog_ev_selector {
+ gf_lock_t reflock;
+
+ /**
+ * Array of references for each selection bit.
+ */
+ unsigned int ref[CHANGELOG_EV_SELECTION_RANGE];
+} changelog_ev_selector_t;
+
+/* changelog's private structure */
+struct changelog_priv {
+ /* changelog journalling */
+ gf_boolean_t active;
+
+ /* changelog live notifications */
+ gf_boolean_t rpc_active;
+
+ /* to generate unique socket file per brick */
+ char *changelog_brick;
+
+ /* logging directory */
+ char *changelog_dir;
+
+ /* htime directory */
+ char *htime_dir;
+
+ /* one file for all changelog types */
+ int changelog_fd;
+
+ /* htime fd for current changelog session */
+ int htime_fd;
+
+ /* c_snap_fd is fd for call-path changelog */
+ int c_snap_fd;
+
+ /* rollover_count used by htime */
+ int rollover_count;
+
+ gf_lock_t lock;
+
+ /* lock to synchronize CSNAP updation */
+ gf_lock_t c_snap_lock;
+
+ /* written end of the pipe */
+ int wfd;
+
+ /* rollover time */
+ int32_t rollover_time;
+
+ /* fsync() interval */
+ int32_t fsync_interval;
+
+ /* changelog type maps */
+ const char *maps[CHANGELOG_MAX_TYPE];
+
+ /* time slicer */
+ changelog_time_slice_t slice;
+
+ /* context of the updater */
+ changelog_dispatcher_t cd;
+
+ /* context of the rollover thread */
+ changelog_rollover_t cr;
+
+ /* context of fsync thread */
+ changelog_fsync_t cf;
+
+ /* operation mode */
+ changelog_mode_t op_mode;
+
+ /* bootstrap routine for 'current' logger */
+ struct changelog_bootstrap *cb;
+
+ /* encoder mode */
+ changelog_encoder_t encode_mode;
+
+ /* encoder */
+ struct changelog_encoder *ce;
+
+ /**
+ * snapshot dependency changes
+ */
+
+ /* Draining of fops*/
+ drain_mgmt_t dm;
+
+ /* Represents the active color. Initially by default black */
+ chlog_fop_color_t current_color;
+
+ /* flag to determine explicit rollover is triggered */
+ gf_boolean_t explicit_rollover;
+
+ /* barrier notification variable protected by mutex */
+ barrier_notify_t bn;
+
+ /* barrier on/off indicating flags */
+ barrier_flags_t bflags;
+
+ /* changelog barrier on/off indicating flag */
+ gf_boolean_t barrier_enabled;
+ struct list_head queue;
+ uint32_t queue_size;
+ gf_timer_t *timer;
+ struct timespec timeout;
+
+ /**
+ * buffers, RPC, event selection, notifications and other
+ * beasts.
+ */
+
+ /* epoll pthread */
+ pthread_t poller;
+
+ /* rotational buffer */
+ rbuf_t *rbuf;
+
+ /* changelog RPC server */
+ rpcsvc_t *rpc;
+
+ /* event selection */
+ changelog_ev_selector_t ev_selection;
+
+ /* client handling (reverse connection) */
+ pthread_t connector;
+
+ int nr_dispatchers;
+ pthread_t *ev_dispatcher;
+
+ changelog_clnt_t connections;
+
+ /* glusterfind dependency to capture paths on deleted entries*/
+ gf_boolean_t capture_del_path;
+
+ /* Save total no. of listners */
+ gf_atomic_t listnercnt;
+
+ /* Save total no. of xprt are associated with listner */
+ gf_atomic_t xprtcnt;
+
+ /* Save xprt list */
+ struct list_head xprt_list;
+
+ /* Save total no. of client connection */
+ gf_atomic_t clntcnt;
+
+ /* Save cleanup brick in victim */
+ xlator_t *victim;
+
+ /* Status to save cleanup notify status */
+ gf_boolean_t notify_down;
+};
+
+struct changelog_local {
+ inode_t *inode;
+ gf_boolean_t update_no_check;
+
+ changelog_log_data_t cld;
+
+ /**
+ * ->prev_entry is used in cases when there needs to be
+ * additional changelog entry for the parent (eg. rename)
+ * It's analogous to ->next in single linked list world,
+ * but we call it as ->prev_entry... ha ha ha
+ */
+ struct changelog_local *prev_entry;
+
+ /* snap dependency changes */
+ chlog_fop_color_t color;
+};
+
+typedef struct changelog_local changelog_local_t;
+
+/* inode version is stored in inode ctx */
+typedef struct changelog_inode_ctx {
+ unsigned long iversion[CHANGELOG_MAX_TYPE];
+} changelog_inode_ctx_t;
+
+#define CHANGELOG_INODE_VERSION_TYPE(ctx, type) &(ctx->iversion[type])
+
+/**
+ * Optional Records:
+ * fops that need to save additional information request a array of
+ * @changelog_opt_t struct. The array is allocated via @iobufs.
+ */
+typedef enum {
+ CHANGELOG_OPT_REC_FOP,
+ CHANGELOG_OPT_REC_ENTRY,
+ CHANGELOG_OPT_REC_UINT32,
+} changelog_optional_rec_type_t;
+
+struct changelog_entry_fields {
+ uuid_t cef_uuid;
+ char *cef_bname;
+ char *cef_path;
+};
+
+typedef struct {
+ /**
+ * @co_covert can be used to do post-processing of the record before
+ * it's persisted to the CHANGELOG. If this is NULL, then the record
+ * is persisted as per it's in memory format.
+ */
+ size_t (*co_convert)(void *data, char *buffer, gf_boolean_t encode);
+
+ /* release routines */
+ void (*co_free)(void *data);
+
+ /* type of the field */
+ changelog_optional_rec_type_t co_type;
+
+ /**
+ * sizeof of the 'valid' field in the union. This field is not used if
+ * @co_convert is specified.
+ */
+ size_t co_len;
+
+ union {
+ unsigned int co_uint32;
+ glusterfs_fop_t co_fop;
+ struct changelog_entry_fields co_entry;
+ };
+} changelog_opt_t;
+
+#define CHANGELOG_OPT_RECORD_LEN sizeof(changelog_opt_t)
+
+/**
+ * helpers routines
+ */
+
+int
+changelog_thread_cleanup(xlator_t *this, pthread_t thr_id);
+
+void *
+changelog_get_usable_buffer(changelog_local_t *local);
+
+void
+changelog_set_usable_record_and_length(changelog_local_t *local, size_t len,
+ int xr);
+void
+changelog_local_cleanup(xlator_t *xl, changelog_local_t *local);
+changelog_local_t *
+changelog_local_init(xlator_t *this, inode_t *inode, uuid_t gfid,
+ int xtra_records, gf_boolean_t update_flag);
+int
+changelog_start_next_change(xlator_t *this, changelog_priv_t *priv, time_t ts,
+ gf_boolean_t finale);
+int
+changelog_open_journal(xlator_t *this, changelog_priv_t *priv);
+void
+changelog_fill_rollover_data(changelog_log_data_t *cld, gf_boolean_t is_last);
+int
+changelog_inject_single_event(xlator_t *this, changelog_priv_t *priv,
+ changelog_log_data_t *cld);
+size_t
+changelog_entry_length();
+int
+changelog_write(int fd, char *buffer, size_t len);
+int
+changelog_write_change(changelog_priv_t *priv, char *buffer, size_t len);
+int
+changelog_handle_change(xlator_t *this, changelog_priv_t *priv,
+ changelog_log_data_t *cld);
+void
+changelog_update(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local, changelog_log_type type);
+void *
+changelog_rollover(void *data);
+void *
+changelog_fsync_thread(void *data);
+int
+changelog_forget(xlator_t *this, inode_t *inode);
+int
+htime_update(xlator_t *this, changelog_priv_t *priv, time_t ts, char *buffer);
+int
+htime_open(xlator_t *this, changelog_priv_t *priv, time_t ts);
+int
+htime_create(xlator_t *this, changelog_priv_t *priv, time_t ts);
+
+/* Geo-Rep snapshot dependency changes */
+void
+changelog_color_fop_and_inc_cnt(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local);
+void
+changelog_inc_fop_cnt(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local);
+void
+changelog_dec_fop_cnt(xlator_t *this, changelog_priv_t *priv,
+ changelog_local_t *local);
+int
+changelog_barrier_notify(changelog_priv_t *priv, char *buf);
+void
+changelog_barrier_cleanup(xlator_t *this, changelog_priv_t *priv,
+ struct list_head *queue);
+void
+changelog_drain_white_fops(xlator_t *this, changelog_priv_t *priv);
+void
+changelog_drain_black_fops(xlator_t *this, changelog_priv_t *priv);
+
+/* Crash consistency of changelog wrt snapshot */
+int
+changelog_snap_logging_stop(xlator_t *this, changelog_priv_t *priv);
+int
+changelog_snap_logging_start(xlator_t *this, changelog_priv_t *priv);
+int
+changelog_snap_open(xlator_t *this, changelog_priv_t *priv);
+int
+changelog_snap_handle_ascii_change(xlator_t *this, changelog_log_data_t *cld);
+int
+changelog_snap_write_change(changelog_priv_t *priv, char *buffer, size_t len);
+
+/* Changelog barrier routines */
+void
+__chlog_barrier_enqueue(xlator_t *this, call_stub_t *stub);
+void
+__chlog_barrier_disable(xlator_t *this, struct list_head *queue);
+void
+chlog_barrier_dequeue_all(xlator_t *this, struct list_head *queue);
+call_stub_t *
+__chlog_barrier_dequeue(xlator_t *this, struct list_head *queue);
+int
+__chlog_barrier_enable(xlator_t *this, changelog_priv_t *priv);
+
+int32_t
+changelog_fill_entry_buf(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ changelog_local_t **local);
+
+/* event selection routines */
+void
+changelog_select_event(xlator_t *, changelog_ev_selector_t *, unsigned int);
+void
+changelog_deselect_event(xlator_t *, changelog_ev_selector_t *, unsigned int);
+int
+changelog_init_event_selection(xlator_t *, changelog_ev_selector_t *);
+int
+changelog_ev_selected(xlator_t *, changelog_ev_selector_t *, unsigned int);
+void
+changelog_dispatch_event(xlator_t *, changelog_priv_t *, changelog_event_t *);
+
+changelog_inode_ctx_t *
+__changelog_inode_ctx_get(xlator_t *, inode_t *, unsigned long **,
+ unsigned long *, changelog_log_type);
+int
+resolve_pargfid_to_path(xlator_t *this, const uuid_t gfid, char **path,
+ char *bname);
+
+/* macros */
+
+#define CHANGELOG_STACK_UNWIND(fop, frame, params...) \
+ do { \
+ changelog_local_t *__local = NULL; \
+ xlator_t *__xl = NULL; \
+ if (frame) { \
+ __local = frame->local; \
+ __xl = frame->this; \
+ frame->local = NULL; \
+ } \
+ STACK_UNWIND_STRICT(fop, frame, params); \
+ if (__local && __local->prev_entry) \
+ changelog_local_cleanup(__xl, __local->prev_entry); \
+ changelog_local_cleanup(__xl, __local); \
+ } while (0)
+
+#define CHANGELOG_IOBUF_REF(iobuf) \
+ do { \
+ if (iobuf) \
+ iobuf_ref(iobuf); \
+ } while (0)
+
+#define CHANGELOG_IOBUF_UNREF(iobuf) \
+ do { \
+ if (iobuf) \
+ iobuf_unref(iobuf); \
+ } while (0)
+
+#define CHANGELOG_FILL_BUFFER(buffer, off, val, len) \
+ do { \
+ memcpy(buffer + off, val, len); \
+ off += len; \
+ } while (0)
+
+#define SLICE_VERSION_UPDATE(slice) \
+ do { \
+ int i = 0; \
+ for (; i < CHANGELOG_MAX_TYPE; i++) { \
+ slice->changelog_version[i]++; \
+ } \
+ } while (0)
+
+#define CHANGELOG_FILL_UINT32(co, number, converter, xlen) \
+ do { \
+ co->co_convert = converter; \
+ co->co_free = NULL; \
+ co->co_type = CHANGELOG_OPT_REC_UINT32; \
+ co->co_uint32 = number; \
+ xlen += sizeof(unsigned int); \
+ } while (0)
+
+#define CHANGLOG_FILL_FOP_NUMBER(co, fop, converter, xlen) \
+ do { \
+ co->co_convert = converter; \
+ co->co_free = NULL; \
+ co->co_type = CHANGELOG_OPT_REC_FOP; \
+ co->co_fop = fop; \
+ xlen += sizeof(fop); \
+ } while (0)
+
+#define CHANGELOG_FILL_ENTRY(co, pargfid, bname, converter, freefn, xlen, \
+ label) \
+ do { \
+ co->co_convert = converter; \
+ co->co_free = freefn; \
+ co->co_type = CHANGELOG_OPT_REC_ENTRY; \
+ gf_uuid_copy(co->co_entry.cef_uuid, pargfid); \
+ co->co_entry.cef_bname = gf_strdup(bname); \
+ if (!co->co_entry.cef_bname) \
+ goto label; \
+ xlen += (UUID_CANONICAL_FORM_LEN + strlen(bname)); \
+ } while (0)
+
+#define CHANGELOG_FILL_ENTRY_DIR_PATH(co, pargfid, bname, converter, \
+ del_freefn, xlen, label, capture_del) \
+ do { \
+ co->co_convert = converter; \
+ co->co_free = del_freefn; \
+ co->co_type = CHANGELOG_OPT_REC_ENTRY; \
+ gf_uuid_copy(co->co_entry.cef_uuid, pargfid); \
+ co->co_entry.cef_bname = gf_strdup(bname); \
+ if (!co->co_entry.cef_bname) \
+ goto label; \
+ xlen += (UUID_CANONICAL_FORM_LEN + strlen(bname)); \
+ if (!capture_del || \
+ resolve_pargfid_to_path(this, pargfid, &(co->co_entry.cef_path), \
+ co->co_entry.cef_bname)) { \
+ co->co_entry.cef_path = gf_strdup("\0"); \
+ xlen += 1; \
+ } else { \
+ xlen += (strlen(co->co_entry.cef_path)); \
+ } \
+ } while (0)
+
+#define CHANGELOG_INIT(this, local, inode, gfid, xrec) \
+ local = changelog_local_init(this, inode, gfid, xrec, _gf_false)
+
+#define CHANGELOG_INIT_NOCHECK(this, local, inode, gfid, xrec) \
+ local = changelog_local_init(this, inode, gfid, xrec, _gf_true)
+
+#define CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, label) \
+ do { \
+ if (!priv->active) \
+ goto label; \
+ /* ignore rebalance process's activity. */ \
+ if ((frame->root->pid == GF_CLIENT_PID_DEFRAG) || \
+ (frame->root->pid == GF_CLIENT_PID_TIER_DEFRAG)) \
+ goto label; \
+ } while (0)
+
+/* If it is a METADATA entry and fop num being GF_FOP_NULL, don't
+ * log in the changelog as it is of no use. And also if it is
+ * logged, since slicing version checking is done for metadata
+ * entries, the subsequent entries with valid fop num which falls
+ * to same changelog will be missed. Hence check for boundary
+ * condition.
+ */
+#define CHANGELOG_OP_BOUNDARY_CHECK(frame, label) \
+ do { \
+ if (frame->root->op <= GF_FOP_NULL || \
+ frame->root->op >= GF_FOP_MAXVALUE) \
+ goto label; \
+ } while (0)
+
+/**
+ * ignore internal fops for all clients except AFR self-heal daemon
+ */
+#define CHANGELOG_IF_INTERNAL_FOP_THEN_GOTO(frame, dict, label) \
+ do { \
+ if ((frame->root->pid != GF_CLIENT_PID_SELF_HEALD) && dict && \
+ dict_get(dict, GLUSTERFS_INTERNAL_FOP_KEY)) \
+ goto label; \
+ } while (0)
+
+#define CHANGELOG_COND_GOTO(priv, cond, label) \
+ do { \
+ if (!priv->active || cond) \
+ goto label; \
+ } while (0)
+
+/* Begin: Geo-Rep snapshot dependency changes */
+
+#define DICT_ERROR -1
+#define BARRIER_OFF 0
+#define BARRIER_ON 1
+#define DICT_DEFAULT 2
+
+#define CHANGELOG_NOT_ON_THEN_GOTO(priv, ret, label) \
+ do { \
+ if (!priv->active) { \
+ gf_smsg(this->name, GF_LOG_WARNING, 0, \
+ CHANGELOG_MSG_CHANGELOG_NOT_ACTIVE, NULL); \
+ ret = 0; \
+ goto label; \
+ } \
+ } while (0)
+
+/* Log pthread error and goto label */
+#define CHANGELOG_PTHREAD_ERROR_HANDLE_0(ret, label) \
+ do { \
+ if (ret) { \
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_PTHREAD_ERROR, \
+ "error=%d", ret, NULL); \
+ ret = -1; \
+ goto label; \
+ } \
+ } while (0);
+
+/* Log pthread error, set flag and goto label */
+#define CHANGELOG_PTHREAD_ERROR_HANDLE_1(ret, label, flag) \
+ do { \
+ if (ret) { \
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_PTHREAD_ERROR, \
+ "error=%d", ret, NULL); \
+ ret = -1; \
+ flag = _gf_true; \
+ goto label; \
+ } \
+ } while (0)
+
+/* Log pthread error, unlock mutex and goto label */
+#define CHANGELOG_PTHREAD_ERROR_HANDLE_2(ret, label, mutex) \
+ do { \
+ if (ret) { \
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_PTHREAD_ERROR, \
+ "error=%d", ret, NULL); \
+ ret = -1; \
+ pthread_mutex_unlock(&mutex); \
+ goto label; \
+ } \
+ } while (0)
+
+/* End: Geo-Rep snapshot dependency changes */
+
+#endif /* _CHANGELOG_HELPERS_H */
diff --git a/xlators/features/changelog/src/changelog-mem-types.h b/xlators/features/changelog/src/changelog-mem-types.h
new file mode 100644
index 00000000000..a2d8a9cbe93
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-mem-types.h
@@ -0,0 +1,34 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CHANGELOG_MEM_TYPES_H
+#define _CHANGELOG_MEM_TYPES_H
+
+#include <glusterfs/mem-types.h>
+
+enum gf_changelog_mem_types {
+ gf_changelog_mt_priv_t = gf_common_mt_end + 1,
+ gf_changelog_mt_str_t = gf_common_mt_end + 2,
+ gf_changelog_mt_batch_t = gf_common_mt_end + 3,
+ gf_changelog_mt_rt_t = gf_common_mt_end + 4,
+ gf_changelog_mt_inode_ctx_t = gf_common_mt_end + 5,
+ gf_changelog_mt_rpc_clnt_t = gf_common_mt_end + 6,
+ gf_changelog_mt_libgfchangelog_t = gf_common_mt_end + 7,
+ gf_changelog_mt_libgfchangelog_entry_t = gf_common_mt_end + 8,
+ gf_changelog_mt_libgfchangelog_rl_t = gf_common_mt_end + 9,
+ gf_changelog_mt_changelog_buffer_t = gf_common_mt_end + 10,
+ gf_changelog_mt_history_data_t = gf_common_mt_end + 11,
+ gf_changelog_mt_libgfchangelog_call_pool_t = gf_common_mt_end + 12,
+ gf_changelog_mt_libgfchangelog_event_t = gf_common_mt_end + 13,
+ gf_changelog_mt_ev_dispatcher_t = gf_common_mt_end + 14,
+ gf_changelog_mt_end
+};
+
+#endif
diff --git a/xlators/features/changelog/src/changelog-messages.h b/xlators/features/changelog/src/changelog-messages.h
new file mode 100644
index 00000000000..cb0e16c85d8
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-messages.h
@@ -0,0 +1,172 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _CHANGELOG_MESSAGES_H_
+#define _CHANGELOG_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(
+ CHANGELOG, CHANGELOG_MSG_OPEN_FAILED, CHANGELOG_MSG_BARRIER_FOP_FAILED,
+ CHANGELOG_MSG_VOL_MISCONFIGURED, CHANGELOG_MSG_RENAME_ERROR,
+ CHANGELOG_MSG_READ_ERROR, CHANGELOG_MSG_HTIME_ERROR,
+ CHANGELOG_MSG_PTHREAD_MUTEX_INIT_FAILED,
+ CHANGELOG_MSG_PTHREAD_COND_INIT_FAILED, CHANGELOG_MSG_CHILD_MISCONFIGURED,
+ CHANGELOG_MSG_DIR_OPTIONS_NOT_SET, CHANGELOG_MSG_CLOSE_ERROR,
+ CHANGELOG_MSG_PIPE_CREATION_ERROR, CHANGELOG_MSG_DICT_GET_FAILED,
+ CHANGELOG_MSG_BARRIER_INFO, CHANGELOG_MSG_BARRIER_ERROR,
+ CHANGELOG_MSG_GET_TIME_OP_FAILED, CHANGELOG_MSG_WRITE_FAILED,
+ CHANGELOG_MSG_PTHREAD_ERROR, CHANGELOG_MSG_INODE_NOT_FOUND,
+ CHANGELOG_MSG_FSYNC_OP_FAILED, CHANGELOG_MSG_TOTAL_LOG_INFO,
+ CHANGELOG_MSG_SNAP_INFO, CHANGELOG_MSG_SELECT_FAILED,
+ CHANGELOG_MSG_FCNTL_FAILED, CHANGELOG_MSG_BNOTIFY_INFO,
+ CHANGELOG_MSG_ENTRY_BUF_INFO, CHANGELOG_MSG_CHANGELOG_NOT_ACTIVE,
+ CHANGELOG_MSG_LOCAL_INIT_FAILED, CHANGELOG_MSG_NOTIFY_REGISTER_FAILED,
+ CHANGELOG_MSG_PROGRAM_NAME_REG_FAILED, CHANGELOG_MSG_HANDLE_PROBE_ERROR,
+ CHANGELOG_MSG_SET_FD_CONTEXT, CHANGELOG_MSG_FREEUP_FAILED,
+ CHANGELOG_MSG_RECONFIGURE, CHANGELOG_MSG_RPC_SUBMIT_REPLY_FAILED,
+ CHANGELOG_MSG_RPC_BUILD_ERROR, CHANGELOG_MSG_RPC_CONNECT_ERROR,
+ CHANGELOG_MSG_RPC_START_ERROR, CHANGELOG_MSG_BUFFER_STARVATION_ERROR,
+ CHANGELOG_MSG_SCAN_DIR_FAILED, CHANGELOG_MSG_FSETXATTR_FAILED,
+ CHANGELOG_MSG_FGETXATTR_FAILED, CHANGELOG_MSG_CLEANUP_ON_ACTIVE_REF,
+ CHANGELOG_MSG_DISPATCH_EVENT_FAILED, CHANGELOG_MSG_PUT_BUFFER_FAILED,
+ CHANGELOG_MSG_PTHREAD_COND_WAIT_FAILED, CHANGELOG_MSG_PTHREAD_CANCEL_FAILED,
+ CHANGELOG_MSG_INJECT_FSYNC_FAILED, CHANGELOG_MSG_CREATE_FRAME_FAILED,
+ CHANGELOG_MSG_FSTAT_OP_FAILED, CHANGELOG_MSG_LSEEK_OP_FAILED,
+ CHANGELOG_MSG_STRSTR_OP_FAILED, CHANGELOG_MSG_UNLINK_OP_FAILED,
+ CHANGELOG_MSG_DETECT_EMPTY_CHANGELOG_FAILED,
+ CHANGELOG_MSG_READLINK_OP_FAILED, CHANGELOG_MSG_EXPLICIT_ROLLOVER_FAILED,
+ CHANGELOG_MSG_RPCSVC_NOTIFY_FAILED, CHANGELOG_MSG_MEMORY_INIT_FAILED,
+ CHANGELOG_MSG_NO_MEMORY, CHANGELOG_MSG_HTIME_STAT_ERROR,
+ CHANGELOG_MSG_HTIME_CURRENT_ERROR, CHANGELOG_MSG_BNOTIFY_COND_INFO,
+ CHANGELOG_MSG_NO_HTIME_CURRENT, CHANGELOG_MSG_HTIME_CURRENT,
+ CHANGELOG_MSG_NEW_HTIME_FILE, CHANGELOG_MSG_MKDIR_ERROR,
+ CHANGELOG_MSG_PATH_NOT_FOUND, CHANGELOG_MSG_XATTR_INIT_FAILED,
+ CHANGELOG_MSG_WROTE_TO_CSNAP, CHANGELOG_MSG_UNUSED_0,
+ CHANGELOG_MSG_GET_BUFFER_FAILED, CHANGELOG_MSG_BARRIER_STATE_NOTIFY,
+ CHANGELOG_MSG_BARRIER_DISABLED, CHANGELOG_MSG_BARRIER_ALREADY_DISABLED,
+ CHANGELOG_MSG_BARRIER_ON_ERROR, CHANGELOG_MSG_BARRIER_ENABLE,
+ CHANGELOG_MSG_BARRIER_KEY_NOT_FOUND, CHANGELOG_MSG_ERROR_IN_DICT_GET,
+ CHANGELOG_MSG_UNUSED_1, CHANGELOG_MSG_UNUSED_2,
+ CHANGELOG_MSG_DEQUEUING_BARRIER_FOPS,
+ CHANGELOG_MSG_DEQUEUING_BARRIER_FOPS_FINISHED,
+ CHANGELOG_MSG_BARRIER_TIMEOUT, CHANGELOG_MSG_TIMEOUT_ADD_FAILED,
+ CHANGELOG_MSG_CLEANUP_ALREADY_SET);
+
+#define CHANGELOG_MSG_BARRIER_FOP_FAILED_STR \
+ "failed to barrier FOPs, disabling changelog barrier"
+#define CHANGELOG_MSG_MEMORY_INIT_FAILED_STR "memory accounting init failed"
+#define CHANGELOG_MSG_NO_MEMORY_STR "failed to create local memory pool"
+#define CHANGELOG_MSG_ENTRY_BUF_INFO_STR \
+ "Entry cannot be captured for gfid, Capturing DATA entry."
+#define CHANGELOG_MSG_PTHREAD_ERROR_STR "pthread error"
+#define CHANGELOG_MSG_PTHREAD_MUTEX_INIT_FAILED_STR "pthread_mutex_init failed"
+#define CHANGELOG_MSG_PTHREAD_COND_INIT_FAILED_STR "pthread_cond_init failed"
+#define CHANGELOG_MSG_HTIME_ERROR_STR "failed to update HTIME file"
+#define CHANGELOG_MSG_HTIME_STAT_ERROR_STR "unable to stat htime file"
+#define CHANGELOG_MSG_HTIME_CURRENT_ERROR_STR "Error extracting HTIME_CURRENT."
+#define CHANGELOG_MSG_UNLINK_OP_FAILED_STR "error unlinking empty changelog"
+#define CHANGELOG_MSG_RENAME_ERROR_STR "error renaming"
+#define CHANGELOG_MSG_MKDIR_ERROR_STR "unable to create directory"
+#define CHANGELOG_MSG_BNOTIFY_INFO_STR \
+ "Explicit rollover changelog signaling bnotify"
+#define CHANGELOG_MSG_BNOTIFY_COND_INFO_STR "Woke up: bnotify conditional wait"
+#define CHANGELOG_MSG_RECONFIGURE_STR "Reconfigure: Changelog Enable"
+#define CHANGELOG_MSG_NO_HTIME_CURRENT_STR \
+ "HTIME_CURRENT not found. Changelog enabled before init"
+#define CHANGELOG_MSG_HTIME_CURRENT_STR "HTIME_CURRENT"
+#define CHANGELOG_MSG_NEW_HTIME_FILE_STR \
+ "Changelog enable: Creating new HTIME file"
+#define CHANGELOG_MSG_FGETXATTR_FAILED_STR "fgetxattr failed"
+#define CHANGELOG_MSG_TOTAL_LOG_INFO_STR "changelog info"
+#define CHANGELOG_MSG_PTHREAD_COND_WAIT_FAILED_STR "pthread cond wait failed"
+#define CHANGELOG_MSG_INODE_NOT_FOUND_STR "inode not found"
+#define CHANGELOG_MSG_READLINK_OP_FAILED_STR \
+ "could not read the link from the gfid handle"
+#define CHANGELOG_MSG_OPEN_FAILED_STR "unable to open file"
+#define CHANGELOG_MSG_RPC_CONNECT_ERROR_STR "failed to connect back"
+#define CHANGELOG_MSG_BUFFER_STARVATION_ERROR_STR \
+ "Failed to get buffer for RPC dispatch"
+#define CHANGELOG_MSG_PTHREAD_CANCEL_FAILED_STR "could not cancel thread"
+#define CHANGELOG_MSG_FSTAT_OP_FAILED_STR "Could not stat (CHANGELOG)"
+#define CHANGELOG_MSG_LSEEK_OP_FAILED_STR "Could not lseek (changelog)"
+#define CHANGELOG_MSG_PATH_NOT_FOUND_STR \
+ "Could not find CHANGELOG in changelog path"
+#define CHANGELOG_MSG_FSYNC_OP_FAILED_STR "fsync failed"
+#define CHANGELOG_MSG_DETECT_EMPTY_CHANGELOG_FAILED_STR \
+ "Error detecting empty changelog"
+#define CHANGELOG_MSG_EXPLICIT_ROLLOVER_FAILED_STR \
+ "Fail snapshot because of previous errors"
+#define CHANGELOG_MSG_SCAN_DIR_FAILED_STR "scandir failed"
+#define CHANGELOG_MSG_FSETXATTR_FAILED_STR "fsetxattr failed"
+#define CHANGELOG_MSG_XATTR_INIT_FAILED_STR "Htime xattr initialization failed"
+#define CHANGELOG_MSG_SNAP_INFO_STR "log in call path"
+#define CHANGELOG_MSG_WRITE_FAILED_STR "error writing to disk"
+#define CHANGELOG_MSG_WROTE_TO_CSNAP_STR "Successfully wrote to csnap"
+#define CHANGELOG_MSG_GET_TIME_OP_FAILED_STR "Problem rolling over changelog(s)"
+#define CHANGELOG_MSG_BARRIER_INFO_STR "Explicit wakeup on barrier notify"
+#define CHANGELOG_MSG_SELECT_FAILED_STR "pthread_cond_timedwait failed"
+#define CHANGELOG_MSG_INJECT_FSYNC_FAILED_STR "failed to inject fsync event"
+#define CHANGELOG_MSG_LOCAL_INIT_FAILED_STR \
+ "changelog local initialization failed"
+#define CHANGELOG_MSG_GET_BUFFER_FAILED_STR "Failed to get buffer"
+#define CHANGELOG_MSG_SET_FD_CONTEXT_STR \
+ "could not set fd context(for release cbk)"
+#define CHANGELOG_MSG_DICT_GET_FAILED_STR "Barrier failed"
+#define CHANGELOG_MSG_BARRIER_STATE_NOTIFY_STR "Barrier notification"
+#define CHANGELOG_MSG_BARRIER_ERROR_STR \
+ "Received another barrier off notification while already off"
+#define CHANGELOG_MSG_BARRIER_DISABLED_STR "disabled changelog barrier"
+#define CHANGELOG_MSG_BARRIER_ALREADY_DISABLED_STR \
+ "Changelog barrier already disabled"
+#define CHANGELOG_MSG_BARRIER_ON_ERROR_STR \
+ "Received another barrier on notification when last one is not served yet"
+#define CHANGELOG_MSG_BARRIER_ENABLE_STR "Enabled changelog barrier"
+#define CHANGELOG_MSG_BARRIER_KEY_NOT_FOUND_STR "barrier key not found"
+#define CHANGELOG_MSG_ERROR_IN_DICT_GET_STR \
+ "Something went wrong in dict_get_str_boolean"
+#define CHANGELOG_MSG_DIR_OPTIONS_NOT_SET_STR "changelog-dir option is not set"
+#define CHANGELOG_MSG_FREEUP_FAILED_STR "could not cleanup bootstrapper"
+#define CHANGELOG_MSG_CHILD_MISCONFIGURED_STR \
+ "translator needs a single subvolume"
+#define CHANGELOG_MSG_VOL_MISCONFIGURED_STR \
+ "dangling volume. please check volfile"
+#define CHANGELOG_MSG_DEQUEUING_BARRIER_FOPS_STR \
+ "Dequeuing all the changelog barriered fops"
+#define CHANGELOG_MSG_DEQUEUING_BARRIER_FOPS_FINISHED_STR \
+ "Dequeuing changelog barriered fops is finished"
+#define CHANGELOG_MSG_BARRIER_TIMEOUT_STR \
+ "Disabling changelog barrier because of the timeout"
+#define CHANGELOG_MSG_TIMEOUT_ADD_FAILED_STR \
+ "Couldn't add changelog barrier timeout event"
+#define CHANGELOG_MSG_RPC_BUILD_ERROR_STR "failed to build rpc options"
+#define CHANGELOG_MSG_NOTIFY_REGISTER_FAILED_STR "failed to register notify"
+#define CHANGELOG_MSG_RPC_START_ERROR_STR "failed to start rpc"
+#define CHANGELOG_MSG_CREATE_FRAME_FAILED_STR "failed to create frame"
+#define CHANGELOG_MSG_RPC_SUBMIT_REPLY_FAILED_STR "failed to serialize reply"
+#define CHANGELOG_MSG_PROGRAM_NAME_REG_FAILED_STR "cannot register program"
+#define CHANGELOG_MSG_CHANGELOG_NOT_ACTIVE_STR \
+ "Changelog is not active, return success"
+#define CHANGELOG_MSG_PUT_BUFFER_FAILED_STR \
+ "failed to put buffer after consumption"
+#define CHANGELOG_MSG_CLEANUP_ALREADY_SET_STR \
+ "cleanup_starting flag is already set for xl"
+#define CHANGELOG_MSG_HANDLE_PROBE_ERROR_STR "xdr decoding error"
+#endif /* !_CHANGELOG_MESSAGES_H_ */
diff --git a/xlators/features/changelog/src/changelog-misc.h b/xlators/features/changelog/src/changelog-misc.h
new file mode 100644
index 00000000000..e2addc09414
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-misc.h
@@ -0,0 +1,131 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CHANGELOG_MISC_H
+#define _CHANGELOG_MISC_H
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/common-utils.h>
+
+#define CHANGELOG_MAX_TYPE 4
+#define CHANGELOG_FILE_NAME "CHANGELOG"
+#define HTIME_FILE_NAME "HTIME"
+#define CSNAP_FILE_NAME "CHANGELOG.SNAP"
+#define HTIME_KEY "trusted.glusterfs.htime"
+#define HTIME_CURRENT "trusted.glusterfs.current_htime"
+#define HTIME_INITIAL_VALUE "0:0"
+
+#define CHANGELOG_VERSION_MAJOR 1
+#define CHANGELOG_VERSION_MINOR 2
+
+#define CHANGELOG_UNIX_SOCK DEFAULT_VAR_RUN_DIRECTORY "/changelog-%s.sock"
+#define CHANGELOG_TMP_UNIX_SOCK DEFAULT_VAR_RUN_DIRECTORY "/.%s%lu.sock"
+
+/**
+ * header starts with the version and the format of the changelog.
+ * 'version' not much of a use now.
+ */
+#define CHANGELOG_HEADER \
+ "GlusterFS Changelog | version: v%d.%d | encoding : %d\n"
+
+#define CHANGELOG_MAKE_SOCKET_PATH(brick_path, sockpath, len) \
+ do { \
+ char xxh64[GF_XXH64_DIGEST_LENGTH * 2 + 1] = { \
+ 0, \
+ }; \
+ gf_xxh64_wrapper((unsigned char *)brick_path, strlen(brick_path), \
+ GF_XXHSUM64_DEFAULT_SEED, xxh64); \
+ (void)snprintf(sockpath, len, CHANGELOG_UNIX_SOCK, xxh64); \
+ } while (0)
+
+#define CHANGELOG_MAKE_TMP_SOCKET_PATH(brick_path, sockpath, len) \
+ do { \
+ unsigned long pid = 0; \
+ char xxh64[GF_XXH64_DIGEST_LENGTH * 2 + 1] = { \
+ 0, \
+ }; \
+ pid = (unsigned long)getpid(); \
+ gf_xxh64_wrapper((unsigned char *)brick_path, strlen(brick_path), \
+ GF_XXHSUM64_DEFAULT_SEED, xxh64); \
+ (void)snprintf(sockpath, len, CHANGELOG_TMP_UNIX_SOCK, xxh64, pid); \
+ } while (0)
+
+/**
+ * ... used by libgfchangelog.
+ */
+#define CHANGELOG_GET_HEADER_INFO(fd, buffer, len, enc, maj, min, elen) \
+ do { \
+ FILE *fp; \
+ int fd_dup; \
+ \
+ enc = -1; \
+ maj = -1; \
+ min = -1; \
+ fd_dup = dup(fd); \
+ \
+ if (fd_dup != -1) { \
+ fp = fdopen(fd_dup, "r"); \
+ if (fp) { \
+ if (fgets(buffer, len, fp)) { \
+ elen = strlen(buffer); \
+ sscanf(buffer, CHANGELOG_HEADER, &maj, &min, &enc); \
+ } \
+ fclose(fp); \
+ } else { \
+ sys_close(fd_dup); \
+ } \
+ } \
+ } while (0)
+
+#define CHANGELOG_FILL_HTIME_DIR(changelog_dir, path) \
+ do { \
+ snprintf(path, sizeof(path), "%s/htime", changelog_dir); \
+ } while (0)
+
+#define CHANGELOG_FILL_CSNAP_DIR(changelog_dir, path) \
+ do { \
+ snprintf(path, sizeof(path), "%s/csnap", changelog_dir); \
+ } while (0)
+/**
+ * everything after 'CHANGELOG_TYPE_METADATA_XATTR' are internal types
+ * (ie. none of the fops trigger this type of event), hence
+ * CHANGELOG_MAX_TYPE = 4
+ */
+typedef enum {
+ CHANGELOG_TYPE_DATA = 0,
+ CHANGELOG_TYPE_METADATA,
+ CHANGELOG_TYPE_ENTRY,
+ CHANGELOG_TYPE_METADATA_XATTR,
+ CHANGELOG_TYPE_ROLLOVER,
+ CHANGELOG_TYPE_FSYNC,
+} changelog_log_type;
+
+/* operation modes - RT for now */
+typedef enum {
+ CHANGELOG_MODE_RT = 0,
+} changelog_mode_t;
+
+/* encoder types */
+
+typedef enum {
+ CHANGELOG_ENCODE_MIN = 0,
+ CHANGELOG_ENCODE_BINARY,
+ CHANGELOG_ENCODE_ASCII,
+ CHANGELOG_ENCODE_MAX,
+} changelog_encoder_t;
+
+#define CHANGELOG_VALID_ENCODING(enc) \
+ (enc > CHANGELOG_ENCODE_MIN && enc < CHANGELOG_ENCODE_MAX)
+
+#define CHANGELOG_TYPE_IS_ENTRY(type) (type == CHANGELOG_TYPE_ENTRY)
+#define CHANGELOG_TYPE_IS_ROLLOVER(type) (type == CHANGELOG_TYPE_ROLLOVER)
+#define CHANGELOG_TYPE_IS_FSYNC(type) (type == CHANGELOG_TYPE_FSYNC)
+
+#endif /* _CHANGELOG_MISC_H */
diff --git a/xlators/features/changelog/src/changelog-rpc-common.c b/xlators/features/changelog/src/changelog-rpc-common.c
new file mode 100644
index 00000000000..125246a17e1
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-rpc-common.c
@@ -0,0 +1,359 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "changelog-rpc-common.h"
+#include "changelog-messages.h"
+
+#include <glusterfs/syscall.h>
+/**
+*****************************************************
+ Client Interface
+*****************************************************
+*/
+
+/**
+ * Initialize and return an RPC client object for a given unix
+ * domain socket.
+ */
+
+void *
+changelog_rpc_poller(void *arg)
+{
+ xlator_t *this = arg;
+
+ (void)gf_event_dispatch(this->ctx->event_pool);
+ return NULL;
+}
+
+struct rpc_clnt *
+changelog_rpc_client_init(xlator_t *this, void *cbkdata, char *sockfile,
+ rpc_clnt_notify_t fn)
+{
+ int ret = 0;
+ struct rpc_clnt *rpc = NULL;
+ dict_t *options = NULL;
+
+ if (!cbkdata)
+ cbkdata = this;
+
+ options = dict_new();
+ if (!options)
+ goto error_return;
+
+ ret = rpc_transport_unix_options_build(options, sockfile, 0);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_RPC_BUILD_ERROR,
+ NULL);
+ goto dealloc_dict;
+ }
+
+ rpc = rpc_clnt_new(options, this, this->name, 16);
+ if (!rpc)
+ goto dealloc_dict;
+
+ ret = rpc_clnt_register_notify(rpc, fn, cbkdata);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_NOTIFY_REGISTER_FAILED, NULL);
+ goto dealloc_rpc_clnt;
+ }
+
+ ret = rpc_clnt_start(rpc);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_RPC_START_ERROR,
+ NULL);
+ goto dealloc_rpc_clnt;
+ }
+
+ dict_unref(options);
+ return rpc;
+
+dealloc_rpc_clnt:
+ rpc_clnt_unref(rpc);
+dealloc_dict:
+ dict_unref(options);
+error_return:
+ return NULL;
+}
+
+/**
+ * Generic RPC client routine to dispatch a request to an
+ * RPC server.
+ */
+int
+changelog_rpc_sumbit_req(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
+ rpc_clnt_prog_t *prog, int procnum,
+ struct iovec *payload, int payloadcnt,
+ struct iobref *iobref, xlator_t *this,
+ fop_cbk_fn_t cbkfn, xdrproc_t xdrproc)
+{
+ int ret = 0;
+ int count = 0;
+ struct iovec iov = {
+ 0,
+ };
+ struct iobuf *iobuf = NULL;
+ char new_iobref = 0;
+ ssize_t xdr_size = 0;
+
+ GF_ASSERT(this);
+
+ if (req) {
+ xdr_size = xdr_sizeof(xdrproc, req);
+
+ iobuf = iobuf_get2(this->ctx->iobuf_pool, xdr_size);
+ if (!iobuf) {
+ goto out;
+ };
+
+ if (!iobref) {
+ iobref = iobref_new();
+ if (!iobref) {
+ goto out;
+ }
+
+ new_iobref = 1;
+ }
+
+ iobref_add(iobref, iobuf);
+
+ iov.iov_base = iobuf->ptr;
+ iov.iov_len = iobuf_size(iobuf);
+
+ /* Create the xdr payload */
+ ret = xdr_serialize_generic(iov, req, xdrproc);
+ if (ret == -1) {
+ goto out;
+ }
+
+ iov.iov_len = ret;
+ count = 1;
+ }
+
+ ret = rpc_clnt_submit(rpc, prog, procnum, cbkfn, &iov, count, payload,
+ payloadcnt, iobref, frame, NULL, 0, NULL, 0, NULL);
+
+out:
+ if (new_iobref)
+ iobref_unref(iobref);
+ if (iobuf)
+ iobuf_unref(iobuf);
+ return ret;
+}
+
+/**
+ * Entry point to perform a remote procedure call
+ */
+int
+changelog_invoke_rpc(xlator_t *this, struct rpc_clnt *rpc,
+ rpc_clnt_prog_t *prog, int procidx, void *arg)
+{
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ rpc_clnt_procedure_t *proc = NULL;
+
+ if (!this || !prog)
+ goto error_return;
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_CREATE_FRAME_FAILED,
+ NULL);
+ goto error_return;
+ }
+
+ proc = &prog->proctable[procidx];
+ if (proc->fn)
+ ret = proc->fn(frame, this, arg);
+
+ STACK_DESTROY(frame->root);
+ return ret;
+
+error_return:
+ return -1;
+}
+
+/**
+*****************************************************
+ Server Interface
+*****************************************************
+*/
+
+struct iobuf *
+__changelog_rpc_serialize_reply(rpcsvc_request_t *req, void *arg,
+ struct iovec *outmsg, xdrproc_t xdrproc)
+{
+ struct iobuf *iob = NULL;
+ ssize_t retlen = 0;
+ ssize_t rsp_size = 0;
+
+ rsp_size = xdr_sizeof(xdrproc, arg);
+ iob = iobuf_get2(req->svc->ctx->iobuf_pool, rsp_size);
+ if (!iob)
+ goto error_return;
+
+ iobuf_to_iovec(iob, outmsg);
+
+ retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
+ if (retlen == -1)
+ goto unref_iob;
+
+ outmsg->iov_len = retlen;
+ return iob;
+
+unref_iob:
+ iobuf_unref(iob);
+error_return:
+ return NULL;
+}
+
+int
+changelog_rpc_sumbit_reply(rpcsvc_request_t *req, void *arg,
+ struct iovec *payload, int payloadcount,
+ struct iobref *iobref, xdrproc_t xdrproc)
+{
+ int ret = -1;
+ struct iobuf *iob = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ char new_iobref = 0;
+
+ if (!req)
+ goto return_ret;
+
+ if (!iobref) {
+ iobref = iobref_new();
+ if (!iobref)
+ goto return_ret;
+ new_iobref = 1;
+ }
+
+ iob = __changelog_rpc_serialize_reply(req, arg, &iov, xdrproc);
+ if (!iob)
+ gf_smsg("", GF_LOG_ERROR, 0, CHANGELOG_MSG_RPC_SUBMIT_REPLY_FAILED,
+ NULL);
+ else
+ iobref_add(iobref, iob);
+
+ ret = rpcsvc_submit_generic(req, &iov, 1, payload, payloadcount, iobref);
+
+ if (new_iobref)
+ iobref_unref(iobref);
+ if (iob)
+ iobuf_unref(iob);
+return_ret:
+ return ret;
+}
+
+void
+changelog_rpc_server_destroy(xlator_t *this, rpcsvc_t *rpc, char *sockfile,
+ rpcsvc_notify_t fn, struct rpcsvc_program **progs)
+{
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_listener_t *next = NULL;
+ struct rpcsvc_program *prog = NULL;
+ rpc_transport_t *trans = NULL;
+
+ if (!rpc)
+ return;
+
+ while (*progs) {
+ prog = *progs;
+ (void)rpcsvc_program_unregister(rpc, prog);
+ progs++;
+ }
+
+ list_for_each_entry_safe(listener, next, &rpc->listeners, list)
+ {
+ if (listener->trans) {
+ trans = listener->trans;
+ rpc_transport_disconnect(trans, _gf_false);
+ }
+ }
+
+ (void)rpcsvc_unregister_notify(rpc, fn, this);
+
+ /* TODO Avoid freeing rpc object in case of brick multiplex
+ after freeing rpc object svc->rpclock corrupted and it takes
+ more time to detach a brick
+ */
+ if (!this->cleanup_starting) {
+ if (rpc->rxpool) {
+ mem_pool_destroy(rpc->rxpool);
+ rpc->rxpool = NULL;
+ }
+ GF_FREE(rpc);
+ }
+}
+
+rpcsvc_t *
+changelog_rpc_server_init(xlator_t *this, char *sockfile, void *cbkdata,
+ rpcsvc_notify_t fn, struct rpcsvc_program **progs)
+{
+ int ret = 0;
+ rpcsvc_t *rpc = NULL;
+ dict_t *options = NULL;
+ struct rpcsvc_program *prog = NULL;
+
+ if (!cbkdata)
+ cbkdata = this;
+
+ options = dict_new();
+ if (!options)
+ return NULL;
+
+ ret = rpcsvc_transport_unix_options_build(options, sockfile);
+ if (ret)
+ goto dealloc_dict;
+
+ rpc = rpcsvc_init(this, this->ctx, options, 8);
+ if (rpc == NULL) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_RPC_START_ERROR,
+ NULL);
+ goto dealloc_dict;
+ }
+
+ ret = rpcsvc_register_notify(rpc, fn, cbkdata);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_NOTIFY_REGISTER_FAILED, NULL);
+ goto dealloc_rpc;
+ }
+
+ ret = rpcsvc_create_listeners(rpc, options, this->name);
+ if (ret != 1) {
+ gf_msg_debug(this->name, 0, "failed to create listeners");
+ goto dealloc_rpc;
+ }
+
+ while (*progs) {
+ prog = *progs;
+ ret = rpcsvc_program_register(rpc, prog, _gf_false);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_PROGRAM_NAME_REG_FAILED, "name%s",
+ prog->progname, "prognum=%d", prog->prognum, "pogver=%d",
+ prog->progver, NULL);
+ goto dealloc_rpc;
+ }
+
+ progs++;
+ }
+
+ dict_unref(options);
+ return rpc;
+
+dealloc_rpc:
+ GF_FREE(rpc);
+dealloc_dict:
+ dict_unref(options);
+ return NULL;
+}
diff --git a/xlators/features/changelog/src/changelog-rpc-common.h b/xlators/features/changelog/src/changelog-rpc-common.h
new file mode 100644
index 00000000000..4d9aa2c694b
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-rpc-common.h
@@ -0,0 +1,85 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __CHANGELOG_RPC_COMMON_H
+#define __CHANGELOG_RPC_COMMON_H
+
+#include "rpcsvc.h"
+#include "rpc-clnt.h"
+#include <glusterfs/gf-event.h>
+#include <glusterfs/call-stub.h>
+
+#include "changelog-xdr.h"
+#include "xdr-generic.h"
+
+#include "changelog.h"
+
+/**
+ * Let's keep this non-configurable for now.
+ */
+#define NR_ROTT_BUFFS 4
+#define NR_DISPATCHERS (NR_ROTT_BUFFS - 1)
+
+enum changelog_rpc_procnum {
+ CHANGELOG_RPC_PROC_NULL = 0,
+ CHANGELOG_RPC_PROBE_FILTER = 1,
+ CHANGELOG_RPC_PROC_MAX = 2,
+};
+
+#define CHANGELOG_RPC_PROGNUM 1885957735
+#define CHANGELOG_RPC_PROGVER 1
+
+/**
+ * reverse connection: data xfer path
+ */
+enum changelog_reverse_rpc_procnum {
+ CHANGELOG_REV_PROC_NULL = 0,
+ CHANGELOG_REV_PROC_EVENT = 1,
+ CHANGELOG_REV_PROC_MAX = 2,
+};
+
+#define CHANGELOG_REV_RPC_PROCNUM 1886350951
+#define CHANGELOG_REV_RPC_PROCVER 1
+
+typedef struct changelog_rpc {
+ rpcsvc_t *svc;
+ struct rpc_clnt *rpc;
+ char sock[UNIX_PATH_MAX]; /* tied to server */
+} changelog_rpc_t;
+
+/* event poller */
+void *
+changelog_rpc_poller(void *);
+
+/* CLIENT API */
+struct rpc_clnt *
+changelog_rpc_client_init(xlator_t *, void *, char *, rpc_clnt_notify_t);
+
+int
+changelog_rpc_sumbit_req(struct rpc_clnt *, void *, call_frame_t *,
+ rpc_clnt_prog_t *, int, struct iovec *, int,
+ struct iobref *, xlator_t *, fop_cbk_fn_t, xdrproc_t);
+
+int
+changelog_invoke_rpc(xlator_t *, struct rpc_clnt *, rpc_clnt_prog_t *, int,
+ void *);
+
+/* SERVER API */
+int
+changelog_rpc_sumbit_reply(rpcsvc_request_t *, void *, struct iovec *, int,
+ struct iobref *, xdrproc_t);
+rpcsvc_t *
+changelog_rpc_server_init(xlator_t *, char *, void *, rpcsvc_notify_t,
+ struct rpcsvc_program **);
+void
+changelog_rpc_server_destroy(xlator_t *, rpcsvc_t *, char *, rpcsvc_notify_t,
+ struct rpcsvc_program **);
+
+#endif
diff --git a/xlators/features/changelog/src/changelog-rpc.c b/xlators/features/changelog/src/changelog-rpc.c
new file mode 100644
index 00000000000..440b88091a6
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-rpc.c
@@ -0,0 +1,440 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/syscall.h>
+#include "changelog-rpc.h"
+#include "changelog-mem-types.h"
+#include "changelog-ev-handle.h"
+
+static struct rpcsvc_program *changelog_programs[];
+
+static void
+changelog_cleanup_dispatchers(xlator_t *this, changelog_priv_t *priv, int count)
+{
+ for (count--; count >= 0; count--) {
+ (void)changelog_thread_cleanup(this, priv->ev_dispatcher[count]);
+ priv->ev_dispatcher[count] = 0;
+ }
+}
+
+int
+changelog_cleanup_rpc_threads(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+ changelog_clnt_t *conn = NULL;
+
+ conn = &priv->connections;
+ if (!conn)
+ return 0;
+
+ /** terminate RPC thread(s) */
+ ret = changelog_thread_cleanup(this, priv->connector);
+ if (ret != 0)
+ goto error_return;
+ priv->connector = 0;
+
+ /** terminate dispatcher thread(s) */
+ changelog_cleanup_dispatchers(this, priv, priv->nr_dispatchers);
+
+ /* destroy locks */
+ ret = pthread_mutex_destroy(&conn->pending_lock);
+ if (ret != 0)
+ goto error_return;
+ ret = pthread_cond_destroy(&conn->pending_cond);
+ if (ret != 0)
+ goto error_return;
+ ret = LOCK_DESTROY(&conn->active_lock);
+ if (ret != 0)
+ goto error_return;
+ ret = LOCK_DESTROY(&conn->wait_lock);
+ if (ret != 0)
+ goto error_return;
+ return 0;
+
+error_return:
+ return -1;
+}
+
+static int
+changelog_init_rpc_threads(xlator_t *this, changelog_priv_t *priv, rbuf_t *rbuf,
+ int nr_dispatchers)
+{
+ int j = 0;
+ int ret = 0;
+ changelog_clnt_t *conn = NULL;
+
+ conn = &priv->connections;
+
+ conn->this = this;
+ conn->rbuf = rbuf;
+ conn->sequence = 1; /* start with sequence number one */
+
+ INIT_LIST_HEAD(&conn->pending);
+ INIT_LIST_HEAD(&conn->active);
+ INIT_LIST_HEAD(&conn->waitq);
+
+ ret = pthread_mutex_init(&conn->pending_lock, NULL);
+ if (ret)
+ goto error_return;
+ ret = pthread_cond_init(&conn->pending_cond, NULL);
+ if (ret)
+ goto cleanup_pending_lock;
+
+ ret = LOCK_INIT(&conn->active_lock);
+ if (ret)
+ goto cleanup_pending_cond;
+ ret = LOCK_INIT(&conn->wait_lock);
+ if (ret)
+ goto cleanup_active_lock;
+
+ /* spawn reverse connection thread */
+ ret = gf_thread_create(&priv->connector, NULL, changelog_ev_connector, conn,
+ "clogecon");
+ if (ret != 0)
+ goto cleanup_wait_lock;
+
+ /* spawn dispatcher thread(s) */
+ priv->ev_dispatcher = GF_CALLOC(nr_dispatchers, sizeof(pthread_t),
+ gf_changelog_mt_ev_dispatcher_t);
+ if (!priv->ev_dispatcher)
+ goto cleanup_connector;
+
+ /* spawn dispatcher threads */
+ for (; j < nr_dispatchers; j++) {
+ ret = gf_thread_create(&priv->ev_dispatcher[j], NULL,
+ changelog_ev_dispatch, conn, "clogd%03hx",
+ j & 0x3ff);
+ if (ret != 0) {
+ changelog_cleanup_dispatchers(this, priv, j);
+ break;
+ }
+ }
+
+ if (ret != 0)
+ goto cleanup_connector;
+
+ priv->nr_dispatchers = nr_dispatchers;
+ return 0;
+
+cleanup_connector:
+ (void)pthread_cancel(priv->connector);
+cleanup_wait_lock:
+ LOCK_DESTROY(&conn->wait_lock);
+cleanup_active_lock:
+ LOCK_DESTROY(&conn->active_lock);
+cleanup_pending_cond:
+ (void)pthread_cond_destroy(&conn->pending_cond);
+cleanup_pending_lock:
+ (void)pthread_mutex_destroy(&conn->pending_lock);
+error_return:
+ return -1;
+}
+
+int
+changelog_rpcsvc_notify(rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
+ void *data)
+{
+ xlator_t *this = NULL;
+ rpc_transport_t *trans = NULL;
+ rpc_transport_t *xprt = NULL;
+ rpc_transport_t *xp_next = NULL;
+ changelog_priv_t *priv = NULL;
+ uint64_t listnercnt = 0;
+ uint64_t xprtcnt = 0;
+ uint64_t clntcnt = 0;
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_listener_t *next = NULL;
+ gf_boolean_t listner_found = _gf_false;
+ socket_private_t *sockpriv = NULL;
+
+ if (!xl || !data || !rpc) {
+ gf_msg_callingfn("changelog", GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_RPCSVC_NOTIFY_FAILED,
+ "Calling rpc_notify without initializing");
+ goto out;
+ }
+
+ this = xl;
+ trans = data;
+ priv = this->private;
+
+ if (!priv) {
+ gf_msg_callingfn("changelog", GF_LOG_WARNING, 0,
+ CHANGELOG_MSG_RPCSVC_NOTIFY_FAILED,
+ "Calling rpc_notify without priv initializing");
+ goto out;
+ }
+
+ if (event == RPCSVC_EVENT_ACCEPT) {
+ GF_ATOMIC_INC(priv->xprtcnt);
+ LOCK(&priv->lock);
+ {
+ list_add_tail(&trans->list, &priv->xprt_list);
+ }
+ UNLOCK(&priv->lock);
+ goto out;
+ }
+
+ if (event == RPCSVC_EVENT_DISCONNECT) {
+ list_for_each_entry_safe(listener, next, &rpc->listeners, list)
+ {
+ if (listener && listener->trans) {
+ if (listener->trans == trans) {
+ listnercnt = GF_ATOMIC_DEC(priv->listnercnt);
+ listner_found = _gf_true;
+ rpcsvc_listener_destroy(listener);
+ }
+ }
+ }
+
+ if (listnercnt > 0) {
+ goto out;
+ }
+ if (listner_found) {
+ LOCK(&priv->lock);
+ list_for_each_entry_safe(xprt, xp_next, &priv->xprt_list, list)
+ {
+ sockpriv = (socket_private_t *)(xprt->private);
+ gf_log("changelog", GF_LOG_INFO,
+ "Send disconnect"
+ " on socket %d",
+ sockpriv->sock);
+ rpc_transport_disconnect(xprt, _gf_false);
+ }
+ UNLOCK(&priv->lock);
+ goto out;
+ }
+ LOCK(&priv->lock);
+ {
+ list_del_init(&trans->list);
+ }
+ UNLOCK(&priv->lock);
+
+ xprtcnt = GF_ATOMIC_DEC(priv->xprtcnt);
+ clntcnt = GF_ATOMIC_GET(priv->clntcnt);
+ if (!xprtcnt && !clntcnt) {
+ changelog_process_cleanup_event(this);
+ }
+ }
+
+out:
+ return 0;
+}
+
+void
+changelog_process_cleanup_event(xlator_t *this)
+{
+ gf_boolean_t cleanup_notify = _gf_false;
+ changelog_priv_t *priv = NULL;
+ char sockfile[UNIX_PATH_MAX] = {
+ 0,
+ };
+
+ if (!this)
+ return;
+ priv = this->private;
+ if (!priv)
+ return;
+
+ LOCK(&priv->lock);
+ {
+ cleanup_notify = priv->notify_down;
+ priv->notify_down = _gf_true;
+ }
+ UNLOCK(&priv->lock);
+
+ if (priv->victim && !cleanup_notify) {
+ default_notify(this, GF_EVENT_PARENT_DOWN, priv->victim);
+
+ if (priv->rpc) {
+ /* sockfile path could have been saved to avoid this */
+ CHANGELOG_MAKE_SOCKET_PATH(priv->changelog_brick, sockfile,
+ UNIX_PATH_MAX);
+ sys_unlink(sockfile);
+ (void)rpcsvc_unregister_notify(priv->rpc, changelog_rpcsvc_notify,
+ this);
+ if (priv->rpc->rxpool) {
+ mem_pool_destroy(priv->rpc->rxpool);
+ priv->rpc->rxpool = NULL;
+ }
+ GF_FREE(priv->rpc);
+ priv->rpc = NULL;
+ }
+ }
+}
+
+void
+changelog_destroy_rpc_listner(xlator_t *this, changelog_priv_t *priv)
+{
+ char sockfile[UNIX_PATH_MAX] = {
+ 0,
+ };
+
+ /* sockfile path could have been saved to avoid this */
+ CHANGELOG_MAKE_SOCKET_PATH(priv->changelog_brick, sockfile, UNIX_PATH_MAX);
+ changelog_rpc_server_destroy(this, priv->rpc, sockfile,
+ changelog_rpcsvc_notify, changelog_programs);
+}
+
+rpcsvc_t *
+changelog_init_rpc_listener(xlator_t *this, changelog_priv_t *priv,
+ rbuf_t *rbuf, int nr_dispatchers)
+{
+ int ret = 0;
+ char sockfile[UNIX_PATH_MAX] = {
+ 0,
+ };
+ rpcsvc_t *svcp;
+
+ ret = changelog_init_rpc_threads(this, priv, rbuf, nr_dispatchers);
+ if (ret)
+ return NULL;
+
+ CHANGELOG_MAKE_SOCKET_PATH(priv->changelog_brick, sockfile, UNIX_PATH_MAX);
+ (void)sys_unlink(sockfile);
+ svcp = changelog_rpc_server_init(
+ this, sockfile, NULL, changelog_rpcsvc_notify, changelog_programs);
+ return svcp;
+}
+
+void
+changelog_rpc_clnt_cleanup(changelog_rpc_clnt_t *crpc)
+{
+ if (!crpc)
+ return;
+ crpc->c_clnt = NULL;
+ LOCK_DESTROY(&crpc->lock);
+ GF_FREE(crpc);
+}
+
+static changelog_rpc_clnt_t *
+changelog_rpc_clnt_init(xlator_t *this, changelog_probe_req *rpc_req,
+ changelog_clnt_t *c_clnt)
+{
+ int ret = 0;
+ changelog_rpc_clnt_t *crpc = NULL;
+
+ crpc = GF_CALLOC(1, sizeof(*crpc), gf_changelog_mt_rpc_clnt_t);
+ if (!crpc)
+ goto error_return;
+ INIT_LIST_HEAD(&crpc->list);
+
+ /* Take a ref, the last unref will be on RPC_CLNT_DESTROY
+ * which comes as a result of last rpc_clnt_unref.
+ */
+ GF_ATOMIC_INIT(crpc->ref, 1);
+ changelog_set_disconnect_flag(crpc, _gf_false);
+
+ crpc->filter = rpc_req->filter;
+ (void)memcpy(crpc->sock, rpc_req->sock, strlen(rpc_req->sock));
+
+ crpc->this = this;
+ crpc->c_clnt = c_clnt;
+ crpc->cleanup = changelog_rpc_clnt_cleanup;
+
+ ret = LOCK_INIT(&crpc->lock);
+ if (ret != 0)
+ goto dealloc_crpc;
+ return crpc;
+
+dealloc_crpc:
+ GF_FREE(crpc);
+error_return:
+ return NULL;
+}
+
+/**
+ * Actor declarations
+ */
+
+/**
+ * @probe_handler
+ * A probe RPC call spawns a connect back to the caller. Caller also
+ * passes an hint which acts as a filter for selecting updates.
+ */
+
+int
+changelog_handle_probe(rpcsvc_request_t *req)
+{
+ int ret = 0;
+ xlator_t *this = NULL;
+ rpcsvc_t *svc = NULL;
+ changelog_priv_t *priv = NULL;
+ changelog_clnt_t *c_clnt = NULL;
+ changelog_rpc_clnt_t *crpc = NULL;
+
+ changelog_probe_req rpc_req = {
+ 0,
+ };
+ changelog_probe_rsp rpc_rsp = {
+ 0,
+ };
+
+ this = req->trans->xl;
+ if (this->cleanup_starting) {
+ gf_smsg(this->name, GF_LOG_DEBUG, 0, CHANGELOG_MSG_CLEANUP_ALREADY_SET,
+ NULL);
+ return 0;
+ }
+
+ ret = xdr_to_generic(req->msg[0], &rpc_req,
+ (xdrproc_t)xdr_changelog_probe_req);
+ if (ret < 0) {
+ gf_smsg("", GF_LOG_ERROR, 0, CHANGELOG_MSG_HANDLE_PROBE_ERROR, NULL);
+ req->rpc_err = GARBAGE_ARGS;
+ goto handle_xdr_error;
+ }
+
+ /* ->xl hidden in rpcsvc */
+ svc = rpcsvc_request_service(req);
+ this = svc->xl;
+ priv = this->private;
+ c_clnt = &priv->connections;
+
+ crpc = changelog_rpc_clnt_init(this, &rpc_req, c_clnt);
+ if (!crpc)
+ goto handle_xdr_error;
+
+ changelog_ev_queue_connection(c_clnt, crpc);
+ rpc_rsp.op_ret = 0;
+
+ goto submit_rpc;
+
+handle_xdr_error:
+ rpc_rsp.op_ret = -1;
+submit_rpc:
+ (void)changelog_rpc_sumbit_reply(req, &rpc_rsp, NULL, 0, NULL,
+ (xdrproc_t)xdr_changelog_probe_rsp);
+ return 0;
+}
+
+/**
+ * RPC declarations
+ */
+
+static rpcsvc_actor_t changelog_svc_actors[CHANGELOG_RPC_PROC_MAX] = {
+ [CHANGELOG_RPC_PROBE_FILTER] = {"CHANGELOG PROBE FILTER",
+ changelog_handle_probe, NULL,
+ CHANGELOG_RPC_PROBE_FILTER, DRC_NA, 0},
+};
+
+static struct rpcsvc_program changelog_svc_prog = {
+ .progname = CHANGELOG_RPC_PROGNAME,
+ .prognum = CHANGELOG_RPC_PROGNUM,
+ .progver = CHANGELOG_RPC_PROGVER,
+ .numactors = CHANGELOG_RPC_PROC_MAX,
+ .actors = changelog_svc_actors,
+ .synctask = _gf_true,
+};
+
+static struct rpcsvc_program *changelog_programs[] = {
+ &changelog_svc_prog,
+ NULL,
+};
diff --git a/xlators/features/changelog/src/changelog-rpc.h b/xlators/features/changelog/src/changelog-rpc.h
new file mode 100644
index 00000000000..b1707565249
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-rpc.h
@@ -0,0 +1,31 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __CHANGELOG_RPC_H
+#define __CHANGELOG_RPC_H
+
+#include <glusterfs/xlator.h>
+#include "changelog-helpers.h"
+
+/* one time */
+#include "socket.h"
+#include "changelog-rpc-common.h"
+
+#define CHANGELOG_RPC_PROGNAME "GlusterFS Changelog"
+
+rpcsvc_t *
+changelog_init_rpc_listener(xlator_t *, changelog_priv_t *, rbuf_t *, int);
+
+void
+changelog_destroy_rpc_listner(xlator_t *, changelog_priv_t *);
+
+int
+changelog_cleanup_rpc_threads(xlator_t *this, changelog_priv_t *priv);
+#endif
diff --git a/xlators/features/changelog/src/changelog-rt.c b/xlators/features/changelog/src/changelog-rt.c
new file mode 100644
index 00000000000..841545ae359
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-rt.c
@@ -0,0 +1,66 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/logging.h>
+
+#include "changelog-rt.h"
+#include "changelog-mem-types.h"
+
+int
+changelog_rt_init(xlator_t *this, changelog_dispatcher_t *cd)
+{
+ changelog_rt_t *crt = NULL;
+
+ crt = GF_CALLOC(1, sizeof(*crt), gf_changelog_mt_rt_t);
+ if (!crt)
+ return -1;
+
+ LOCK_INIT(&crt->lock);
+
+ cd->cd_data = crt;
+ cd->dispatchfn = &changelog_rt_enqueue;
+
+ return 0;
+}
+
+int
+changelog_rt_fini(xlator_t *this, changelog_dispatcher_t *cd)
+{
+ changelog_rt_t *crt = NULL;
+
+ crt = cd->cd_data;
+
+ LOCK_DESTROY(&crt->lock);
+ GF_FREE(crt);
+
+ return 0;
+}
+
+int
+changelog_rt_enqueue(xlator_t *this, changelog_priv_t *priv, void *cbatch,
+ changelog_log_data_t *cld_0, changelog_log_data_t *cld_1)
+{
+ int ret = 0;
+ changelog_rt_t *crt = NULL;
+
+ crt = (changelog_rt_t *)cbatch;
+
+ LOCK(&crt->lock);
+ {
+ ret = changelog_handle_change(this, priv, cld_0);
+ if (!ret && cld_1)
+ ret = changelog_handle_change(this, priv, cld_1);
+ }
+ UNLOCK(&crt->lock);
+
+ return ret;
+}
diff --git a/xlators/features/changelog/src/changelog-rt.h b/xlators/features/changelog/src/changelog-rt.h
new file mode 100644
index 00000000000..28b9827d85b
--- /dev/null
+++ b/xlators/features/changelog/src/changelog-rt.h
@@ -0,0 +1,33 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CHANGELOG_RT_H
+#define _CHANGELOG_RT_H
+
+#include <glusterfs/locking.h>
+#include <glusterfs/timer.h>
+#include "pthread.h"
+
+#include "changelog-helpers.h"
+
+/* unused as of now - may be you would need it later */
+typedef struct changelog_rt {
+ gf_lock_t lock;
+} changelog_rt_t;
+
+int
+changelog_rt_init(xlator_t *this, changelog_dispatcher_t *cd);
+int
+changelog_rt_fini(xlator_t *this, changelog_dispatcher_t *cd);
+int
+changelog_rt_enqueue(xlator_t *this, changelog_priv_t *priv, void *cbatch,
+ changelog_log_data_t *cld_0, changelog_log_data_t *cld_1);
+
+#endif /* _CHANGELOG_RT_H */
diff --git a/xlators/features/changelog/src/changelog.c b/xlators/features/changelog/src/changelog.c
new file mode 100644
index 00000000000..6a6e5af859e
--- /dev/null
+++ b/xlators/features/changelog/src/changelog.c
@@ -0,0 +1,2989 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/iobuf.h>
+
+#include "changelog-rt.h"
+
+#include "changelog-encoders.h"
+#include "changelog-mem-types.h"
+#include "changelog-messages.h"
+
+#include <pthread.h>
+#include <signal.h>
+
+#include "changelog-rpc.h"
+#include "errno.h"
+
+static struct changelog_bootstrap cb_bootstrap[] = {
+ {
+ .mode = CHANGELOG_MODE_RT,
+ .ctor = changelog_rt_init,
+ .dtor = changelog_rt_fini,
+ },
+};
+
+static int
+changelog_init_rpc(xlator_t *this, changelog_priv_t *priv);
+
+static int
+changelog_init(xlator_t *this, changelog_priv_t *priv);
+
+/* Entry operations - TYPE III */
+
+/**
+ * entry operations do not undergo inode version checking.
+ */
+
+/* {{{ */
+
+/* rmdir */
+
+int32_t
+changelog_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(rmdir, frame, op_ret, op_errno, preparent,
+ postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_rmdir_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int xflags, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeue rmdir");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, loc, xflags, xdata);
+ return 0;
+}
+
+int32_t
+changelog_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+ dict_t *xdata)
+{
+ size_t xtra_len = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+
+ INIT_LIST_HEAD(&queue);
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, loc->inode->gfid, 2);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ co++;
+ if (priv->capture_del_path) {
+ CHANGELOG_FILL_ENTRY_DIR_PATH(co, loc->pargfid, loc->name, del_entry_fn,
+ del_entry_free_fn, xtra_len, wind,
+ _gf_true);
+ } else {
+ CHANGELOG_FILL_ENTRY_DIR_PATH(co, loc->pargfid, loc->name, del_entry_fn,
+ del_entry_free_fn, xtra_len, wind,
+ _gf_false);
+ }
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 2);
+
+ /* changelog barrier */
+ /* Color assignment and increment of fop_cnt for rmdir/unlink/rename
+ * should be made with in priv lock if changelog barrier is not enabled.
+ * Because if counter is not incremented yet, draining wakes up and
+ * publishes the changelog but later these fops might hit the disk and
+ * present in snapped volume but where as the intention is these fops
+ * should not be present in snapped volume.
+ */
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_rmdir_stub(frame, changelog_rmdir_resume, loc, xflags,
+ xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueue rmdir");
+ goto out;
+ }
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=rmdir", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+
+ /* changelog barrier */
+
+wind:
+ STACK_WIND(frame, changelog_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, loc, xflags, xdata);
+out:
+ return 0;
+}
+
+/* unlink */
+
+int32_t
+changelog_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(unlink, frame, op_ret, op_errno, preparent,
+ postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_unlink_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int xflags, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeue unlink");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflags, xdata);
+ return 0;
+}
+
+int32_t
+changelog_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+ dict_t *xdata)
+{
+ size_t xtra_len = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+ dht_changelog_rename_info_t *info = NULL;
+ int ret = 0;
+ char *old_name = NULL;
+ char *new_name = NULL;
+ char *nname = NULL;
+
+ INIT_LIST_HEAD(&queue);
+ priv = this->private;
+
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ ret = dict_get_bin(xdata, DHT_CHANGELOG_RENAME_OP_KEY, (void **)&info);
+ if (!ret) { /* special case: unlink considered as rename */
+ /* 3 == fop + oldloc + newloc */
+ old_name = alloca(info->oldname_len);
+ new_name = alloca(info->newname_len);
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, loc->inode->gfid, 3);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, GF_FOP_RENAME, fop_fn, xtra_len);
+
+ co++;
+ strncpy(old_name, info->buffer, info->oldname_len);
+ CHANGELOG_FILL_ENTRY(co, info->old_pargfid, old_name, entry_fn,
+ entry_free_fn, xtra_len, wind);
+
+ co++;
+ /* new name resides just after old name */
+ nname = info->buffer + info->oldname_len;
+ strncpy(new_name, nname, info->newname_len);
+ CHANGELOG_FILL_ENTRY(co, info->new_pargfid, new_name, entry_fn,
+ entry_free_fn, xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 3);
+ } else { /* default unlink */
+ CHANGELOG_IF_INTERNAL_FOP_THEN_GOTO(frame, xdata, wind);
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, loc->inode->gfid, 2);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ co++;
+ if (priv->capture_del_path) {
+ CHANGELOG_FILL_ENTRY_DIR_PATH(co, loc->pargfid, loc->name,
+ del_entry_fn, del_entry_free_fn,
+ xtra_len, wind, _gf_true);
+ } else {
+ CHANGELOG_FILL_ENTRY_DIR_PATH(co, loc->pargfid, loc->name,
+ del_entry_fn, del_entry_free_fn,
+ xtra_len, wind, _gf_false);
+ }
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 2);
+ }
+
+ /* changelog barrier */
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_unlink_stub(frame, changelog_unlink_resume, loc, xflags,
+ xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueue unlink");
+ goto out;
+ }
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=unlink", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+
+ /* changelog barrier */
+
+wind:
+ STACK_WIND(frame, changelog_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflags, xdata);
+out:
+ return 0;
+}
+
+/* rename */
+
+int32_t
+changelog_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(rename, frame, op_ret, op_errno, buf, preoldparent,
+ postoldparent, prenewparent, postnewparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_rename_resume(call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeue rename");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+ return 0;
+}
+
+int32_t
+changelog_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc, dict_t *xdata)
+{
+ size_t xtra_len = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+ dht_changelog_rename_info_t *info = NULL;
+ int ret = 0;
+
+ INIT_LIST_HEAD(&queue);
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ ret = dict_get_bin(xdata, DHT_CHANGELOG_RENAME_OP_KEY, (void **)&info);
+ if (ret && oldloc->inode->ia_type != IA_IFDIR) {
+ /* xdata "NOT" set for a non-directory,
+ * Special rename => avoid logging */
+ goto wind;
+ }
+
+ /* 3 == fop + oldloc + newloc */
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, oldloc->inode->gfid, 3);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ co++;
+ CHANGELOG_FILL_ENTRY(co, oldloc->pargfid, oldloc->name, entry_fn,
+ entry_free_fn, xtra_len, wind);
+
+ co++;
+ CHANGELOG_FILL_ENTRY(co, newloc->pargfid, newloc->name, entry_fn,
+ entry_free_fn, xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 3);
+ /* changelog barrier */
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_rename_stub(frame, changelog_rename_resume, oldloc,
+ newloc, xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueue rename");
+ goto out;
+ }
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=rename", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+ /* changelog barrier */
+
+wind:
+ STACK_WIND(frame, changelog_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+out:
+ return 0;
+}
+
+/* link */
+
+int32_t
+changelog_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(link, frame, op_ret, op_errno, inode, buf, preparent,
+ postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_link_resume(call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("changelog", this, out);
+ GF_VALIDATE_OR_GOTO("changelog", this->fops, out);
+ GF_VALIDATE_OR_GOTO("changelog", frame, out);
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeuing link");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ return 0;
+out:
+ return -1;
+}
+int32_t
+changelog_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc, dict_t *xdata)
+{
+ size_t xtra_len = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+
+ priv = this->private;
+
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+ CHANGELOG_IF_INTERNAL_FOP_THEN_GOTO(frame, xdata, wind);
+
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, oldloc->gfid, 2);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ co++;
+ CHANGELOG_FILL_ENTRY(co, newloc->pargfid, newloc->name, entry_fn,
+ entry_free_fn, xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 2);
+
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_link_stub(frame, changelog_link_resume, oldloc, newloc,
+ xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueued link");
+ goto out;
+ }
+
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_BARRIER_FOP_FAILED,
+ "fop=link", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+wind:
+ STACK_WIND(frame, changelog_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+out:
+ return 0;
+}
+
+/* mkdir */
+
+int32_t
+changelog_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(mkdir, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_mkdir_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ mode_t mode, mode_t umask, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("changelog", this, out);
+ GF_VALIDATE_OR_GOTO("changelog", this->fops, out);
+ GF_VALIDATE_OR_GOTO("changelog", frame, out);
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeuing mkdir");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
+ return 0;
+out:
+ return -1;
+}
+
+int32_t
+changelog_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
+{
+ int ret = -1;
+ uuid_t gfid = {
+ 0,
+ };
+ size_t xtra_len = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ ret = dict_get_gfuuid(xdata, "gfid-req", &gfid);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "failed to get gfid from dict");
+ goto wind;
+ }
+
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, gfid, 5);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, S_IFDIR | mode, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->uid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->gid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_ENTRY(co, loc->pargfid, loc->name, entry_fn, entry_free_fn,
+ xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 5);
+
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_mkdir_stub(frame, changelog_mkdir_resume, loc, mode,
+ umask, xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueued mkdir");
+ goto out;
+ }
+
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=mkdir", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+
+wind:
+ STACK_WIND(frame, changelog_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
+out:
+ return 0;
+}
+
+/* symlink */
+
+int32_t
+changelog_symlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(symlink, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_symlink_resume(call_frame_t *frame, xlator_t *this,
+ const char *linkname, loc_t *loc, mode_t umask,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("changelog", this, out);
+ GF_VALIDATE_OR_GOTO("changelog", this->fops, out);
+ GF_VALIDATE_OR_GOTO("changelog", frame, out);
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeuing symlink");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkname, loc, umask, xdata);
+ return 0;
+out:
+ return -1;
+}
+
+int32_t
+changelog_symlink(call_frame_t *frame, xlator_t *this, const char *linkname,
+ loc_t *loc, mode_t umask, dict_t *xdata)
+{
+ int ret = -1;
+ size_t xtra_len = 0;
+ uuid_t gfid = {
+ 0,
+ };
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ ret = dict_get_gfuuid(xdata, "gfid-req", &gfid);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "failed to get gfid from dict");
+ goto wind;
+ }
+
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, gfid, 2);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_ENTRY(co, loc->pargfid, loc->name, entry_fn, entry_free_fn,
+ xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 2);
+
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_symlink_stub(frame, changelog_symlink_resume, linkname,
+ loc, umask, xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueued symlink");
+ goto out;
+ }
+
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=symlink", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+
+wind:
+ STACK_WIND(frame, changelog_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkname, loc, umask, xdata);
+out:
+ return 0;
+}
+
+/* mknod */
+
+int32_t
+changelog_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(mknod, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_mknod_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ mode_t mode, dev_t rdev, mode_t umask, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("changelog", this, out);
+ GF_VALIDATE_OR_GOTO("changelog", this->fops, out);
+ GF_VALIDATE_OR_GOTO("changelog", frame, out);
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeuing mknod");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, rdev, umask, xdata);
+ return 0;
+out:
+ return -1;
+}
+
+int32_t
+changelog_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t dev, mode_t umask, dict_t *xdata)
+{
+ int ret = -1;
+ uuid_t gfid = {
+ 0,
+ };
+ size_t xtra_len = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+
+ priv = this->private;
+
+ /* Check whether changelog active */
+ if (!(priv->active))
+ goto wind;
+
+ /* Check whether rebalance activity */
+ if (frame->root->pid == GF_CLIENT_PID_DEFRAG)
+ goto wind;
+
+ /* If tier-dht linkto is SET, ignore about verifiying :
+ * 1. Whether internal fop AND
+ * 2. Whether tier rebalance process activity (this will help in
+ * recording mknod if tier rebalance process calls this mknod) */
+ if (!(dict_get(xdata, "trusted.tier.tier-dht.linkto"))) {
+ CHANGELOG_IF_INTERNAL_FOP_THEN_GOTO(frame, xdata, wind);
+ if (frame->root->pid == GF_CLIENT_PID_TIER_DEFRAG)
+ goto wind;
+ }
+
+ ret = dict_get_gfuuid(xdata, "gfid-req", &gfid);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "failed to get gfid from dict");
+ goto wind;
+ }
+
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, gfid, 5);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, mode, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->uid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->gid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_ENTRY(co, loc->pargfid, loc->name, entry_fn, entry_free_fn,
+ xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 5);
+
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_mknod_stub(frame, changelog_mknod_resume, loc, mode, dev,
+ umask, xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueued mknod");
+ goto out;
+ }
+
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=mknod", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+
+wind:
+ STACK_WIND(frame, changelog_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, dev, umask, xdata);
+out:
+ return 0;
+}
+
+/* create */
+
+int32_t
+changelog_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ int32_t ret = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+ changelog_event_t ev = {
+ 0,
+ };
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ /* fill the event structure.. similar to open() */
+ ev.ev_type = CHANGELOG_OP_TYPE_CREATE;
+ gf_uuid_copy(ev.u.create.gfid, buf->ia_gfid);
+ ev.u.create.flags = fd->flags;
+ changelog_dispatch_event(this, priv, &ev);
+
+ if (changelog_ev_selected(this, &priv->ev_selection,
+ CHANGELOG_OP_TYPE_RELEASE)) {
+ ret = fd_ctx_set(fd, this, (uint64_t)(long)0x1);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, CHANGELOG_MSG_SET_FD_CONTEXT,
+ NULL);
+ }
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(create, frame, op_ret, op_errno, fd, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int32_t
+changelog_create_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, mode_t mode, mode_t umask, fd_t *fd,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("changelog", this, out);
+ GF_VALIDATE_OR_GOTO("changelog", this->fops, out);
+ GF_VALIDATE_OR_GOTO("changelog", frame, out);
+
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Dequeuing create");
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+
+out:
+ return -1;
+}
+
+int32_t
+changelog_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ int ret = -1;
+ uuid_t gfid = {
+ 0,
+ };
+ changelog_opt_t *co = NULL;
+ changelog_priv_t *priv = NULL;
+ size_t xtra_len = 0;
+ call_stub_t *stub = NULL;
+ struct list_head queue = {
+ 0,
+ };
+ gf_boolean_t barrier_enabled = _gf_false;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ ret = dict_get_gfuuid(xdata, "gfid-req", &gfid);
+ if (ret) {
+ gf_msg_debug(this->name, 0, "failed to get gfid from dict");
+ goto wind;
+ }
+
+ /* init with two extra records */
+ CHANGELOG_INIT_NOCHECK(this, frame->local, NULL, gfid, 5);
+ if (!frame->local)
+ goto wind;
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, mode, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->uid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_UINT32(co, frame->root->gid, number_fn, xtra_len);
+ co++;
+
+ CHANGELOG_FILL_ENTRY(co, loc->pargfid, loc->name, entry_fn, entry_free_fn,
+ xtra_len, wind);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 5);
+
+ LOCK(&priv->lock);
+ {
+ if ((barrier_enabled = priv->barrier_enabled)) {
+ stub = fop_create_stub(frame, changelog_create_resume, loc, flags,
+ mode, umask, fd, xdata);
+ if (!stub)
+ __chlog_barrier_disable(this, &queue);
+ else
+ __chlog_barrier_enqueue(this, stub);
+ } else {
+ ((changelog_local_t *)frame->local)->color = priv->current_color;
+ changelog_inc_fop_cnt(this, priv, frame->local);
+ }
+ }
+ UNLOCK(&priv->lock);
+
+ if (barrier_enabled && stub) {
+ gf_msg_debug(this->name, 0, "Enqueued create");
+ goto out;
+ }
+
+ if (barrier_enabled && !stub) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM,
+ CHANGELOG_MSG_BARRIER_FOP_FAILED, "fop=create", NULL);
+ chlog_barrier_dequeue_all(this, &queue);
+ }
+
+wind:
+ STACK_WIND(frame, changelog_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+out:
+ return 0;
+}
+
+/* }}} */
+
+/* Metadata modification fops - TYPE II */
+
+/* {{{ */
+
+/* {f}setattr */
+
+int32_t
+changelog_fsetattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *preop_stbuf, struct iatt *postop_stbuf,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(fsetattr, frame, op_ret, op_errno, preop_stbuf,
+ postop_stbuf, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, fd->inode, fd->inode->gfid, 1);
+ if (!frame->local)
+ goto wind;
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_fsetattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetattr, fd, stbuf, valid, xdata);
+ return 0;
+}
+
+int32_t
+changelog_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *preop_stbuf, struct iatt *postop_stbuf,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(setattr, frame, op_ret, op_errno, preop_stbuf,
+ postop_stbuf, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+ uuid_t shard_root_gfid = {
+ 0,
+ };
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_IF_INTERNAL_FOP_THEN_GOTO(frame, xdata, wind);
+
+ /* Do not record META on .shard */
+ gf_uuid_parse(SHARD_ROOT_GFID, shard_root_gfid);
+ if (gf_uuid_compare(loc->gfid, shard_root_gfid) == 0) {
+ goto wind;
+ }
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, loc->inode, loc->inode->gfid, 1);
+ if (!frame->local)
+ goto wind;
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_setattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr, loc, stbuf, valid, xdata);
+ return 0;
+}
+
+/* {f}removexattr */
+
+int32_t
+changelog_fremovexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA_XATTR);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(fremovexattr, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, fd->inode, fd->inode->gfid, 1);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_fremovexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+ return 0;
+}
+
+int32_t
+changelog_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA_XATTR);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(removexattr, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, loc->inode, loc->inode->gfid, 1);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
+}
+
+/* {f}setxattr */
+
+int32_t
+changelog_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA_XATTR);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(setxattr, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+/* changelog_handle_virtual_xattr:
+ * Handles virtual setxattr 'glusterfs.geo-rep.trigger-sync' on files.
+ * Following is the behaviour based on the value of xattr.
+ * 1: Captures only DATA entry in changelog.
+ * 2: Tries to captures both ENTRY and DATA entry in
+ * changelog. If failed to get pargfid, only DATA
+ * entry is captured.
+ * any other value: ENOTSUP is returned.
+ */
+static void
+changelog_handle_virtual_xattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *dict)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+ int32_t value = 0;
+ int ret = 0;
+ int dict_ret = 0;
+ gf_boolean_t valid = _gf_false;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ dict_ret = dict_get_int32(dict, GF_XATTR_TRIGGER_SYNC, &value);
+
+ if ((dict_ret == 0 && value == 1) && ((loc->inode->ia_type == IA_IFDIR) ||
+ (loc->inode->ia_type == IA_IFREG)))
+ valid = _gf_true;
+
+ if (valid) {
+ ret = changelog_fill_entry_buf(frame, this, loc, &local);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_ENTRY_BUF_INFO,
+ "gfid=%s", uuid_utoa(loc->inode->gfid), NULL);
+ goto unwind;
+ }
+ changelog_update(this, priv, local, CHANGELOG_TYPE_ENTRY);
+
+ unwind:
+ /* Capture DATA only if it's a file. */
+ if (loc->inode->ia_type != IA_IFDIR)
+ changelog_update(this, priv, frame->local, CHANGELOG_TYPE_DATA);
+ /* Assign local to prev_entry, so unwind will take
+ * care of cleanup. */
+ ((changelog_local_t *)(frame->local))->prev_entry = local;
+ CHANGELOG_STACK_UNWIND(setxattr, frame, 0, 0, NULL);
+ return;
+ } else {
+ CHANGELOG_STACK_UNWIND(setxattr, frame, -1, ENOTSUP, NULL);
+ return;
+ }
+}
+
+int32_t
+changelog_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *dict, int32_t flags, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, loc->inode, loc->inode->gfid, 1);
+
+ /* On setting this virtual xattr on a file, an explicit data
+ * sync is triggered from geo-rep as CREATE|DATA entry is
+ * recorded in changelog based on xattr value.
+ */
+ if (dict_get(dict, GF_XATTR_TRIGGER_SYNC)) {
+ changelog_handle_virtual_xattr(frame, this, loc, dict);
+ return 0;
+ }
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
+}
+
+int32_t
+changelog_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA_XATTR);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(fsetxattr, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+ CHANGELOG_IF_INTERNAL_FOP_THEN_GOTO(frame, xdata, wind);
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, fd->inode, fd->inode->gfid, 1);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+}
+
+int32_t
+changelog_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xattr,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(xattrop, frame, op_ret, op_errno, xattr, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+ int ret = 0;
+ void *size_attr = NULL;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+ ret = dict_get_ptr(xattr, GF_XATTR_SHARD_FILE_SIZE, &size_attr);
+ if (ret)
+ goto wind;
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, loc->inode, loc->inode->gfid, 1);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_xattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop, loc, optype, xattr, xdata);
+ return 0;
+}
+
+int32_t
+changelog_fxattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xattr,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_METADATA_XATTR);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(fxattrop, frame, op_ret, op_errno, xattr, xdata);
+
+ return 0;
+}
+
+int32_t
+changelog_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_opt_t *co = NULL;
+ size_t xtra_len = 0;
+ void *size_attr = NULL;
+ int ret = 0;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+ ret = dict_get_ptr(xattr, GF_XATTR_SHARD_FILE_SIZE, &size_attr);
+ if (ret)
+ goto wind;
+
+ CHANGELOG_OP_BOUNDARY_CHECK(frame, wind);
+
+ CHANGELOG_INIT(this, frame->local, fd->inode, fd->inode->gfid, 1);
+
+ co = changelog_get_usable_buffer(frame->local);
+ if (!co)
+ goto wind;
+
+ CHANGLOG_FILL_FOP_NUMBER(co, frame->root->op, fop_fn, xtra_len);
+
+ changelog_set_usable_record_and_length(frame->local, xtra_len, 1);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_fxattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop, fd, optype, xattr, xdata);
+ return 0;
+}
+/* }}} */
+
+/* Data modification fops - TYPE I */
+
+/* {{{ */
+
+/* {f}truncate() */
+
+int32_t
+changelog_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_DATA);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(truncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+changelog_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_INIT(this, frame->local, loc->inode, loc->inode->gfid, 0);
+ LOCK(&priv->c_snap_lock);
+ {
+ if (priv->c_snap_fd != -1 && priv->barrier_enabled == _gf_true) {
+ changelog_snap_handle_ascii_change(
+ this, &(((changelog_local_t *)(frame->local))->cld));
+ }
+ }
+ UNLOCK(&priv->c_snap_lock);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+}
+
+int32_t
+changelog_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_DATA);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(ftruncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+changelog_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_INIT(this, frame->local, fd->inode, fd->inode->gfid, 0);
+ LOCK(&priv->c_snap_lock);
+ {
+ if (priv->c_snap_fd != -1 && priv->barrier_enabled == _gf_true) {
+ changelog_snap_handle_ascii_change(
+ this, &(((changelog_local_t *)(frame->local))->cld));
+ }
+ }
+ UNLOCK(&priv->c_snap_lock);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+}
+
+/* writev() */
+
+int32_t
+changelog_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+ changelog_local_t *local = NULL;
+
+ priv = this->private;
+ local = frame->local;
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret <= 0) || !local), unwind);
+
+ changelog_update(this, priv, local, CHANGELOG_TYPE_DATA);
+
+unwind:
+ changelog_dec_fop_cnt(this, priv, local);
+ CHANGELOG_STACK_UNWIND(writev, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+changelog_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t offset,
+ uint32_t flags, struct iobref *iobref, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ CHANGELOG_INIT(this, frame->local, fd->inode, fd->inode->gfid, 0);
+ LOCK(&priv->c_snap_lock);
+ {
+ if (priv->c_snap_fd != -1 && priv->barrier_enabled == _gf_true) {
+ changelog_snap_handle_ascii_change(
+ this, &(((changelog_local_t *)(frame->local))->cld));
+ }
+ }
+ UNLOCK(&priv->c_snap_lock);
+
+wind:
+ changelog_color_fop_and_inc_cnt(this, priv, frame->local);
+ STACK_WIND(frame, changelog_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
+}
+
+/* }}} */
+
+/* open, release and other beasts */
+
+/* {{{ */
+
+int
+changelog_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, fd_t *fd, dict_t *xdata)
+{
+ int ret = 0;
+ changelog_priv_t *priv = NULL;
+ changelog_event_t ev = {
+ 0,
+ };
+ gf_boolean_t logopen = _gf_false;
+
+ priv = this->private;
+ if (frame->local) {
+ frame->local = NULL;
+ logopen = _gf_true;
+ }
+
+ CHANGELOG_COND_GOTO(priv, ((op_ret < 0) || !logopen), unwind);
+
+ /* fill the event structure */
+ ev.ev_type = CHANGELOG_OP_TYPE_OPEN;
+ gf_uuid_copy(ev.u.open.gfid, fd->inode->gfid);
+ ev.u.open.flags = fd->flags;
+ changelog_dispatch_event(this, priv, &ev);
+
+ if (changelog_ev_selected(this, &priv->ev_selection,
+ CHANGELOG_OP_TYPE_RELEASE)) {
+ ret = fd_ctx_set(fd, this, (uint64_t)(long)0x1);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_WARNING, 0, CHANGELOG_MSG_SET_FD_CONTEXT,
+ NULL);
+ }
+
+unwind:
+ CHANGELOG_STACK_UNWIND(open, frame, op_ret, op_errno, fd, xdata);
+ return 0;
+}
+
+int
+changelog_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ fd_t *fd, dict_t *xdata)
+{
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+ CHANGELOG_NOT_ACTIVE_THEN_GOTO(frame, priv, wind);
+
+ frame->local = (void *)0x1; /* do not dereference in ->cbk */
+
+wind:
+ STACK_WIND(frame, changelog_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
+}
+
+/* }}} */
+
+/* {{{ */
+
+/* }}} */
+
+int32_t
+_changelog_generic_dispatcher(dict_t *dict, char *key, data_t *value,
+ void *data)
+{
+ xlator_t *this = NULL;
+ changelog_priv_t *priv = NULL;
+
+ this = data;
+ priv = this->private;
+
+ changelog_dispatch_event(this, priv, (changelog_event_t *)value->data);
+ return 0;
+}
+
+/**
+ * changelog ipc dispatches events, pointers of which are passed in
+ * @xdata. Dispatching is orderless (whatever order dict_foreach()
+ * traverses the dictionary).
+ */
+int32_t
+changelog_ipc(call_frame_t *frame, xlator_t *this, int32_t op, dict_t *xdata)
+{
+ if (op != GF_IPC_TARGET_CHANGELOG)
+ goto wind;
+
+ /* it's for us, do the job */
+ if (xdata)
+ (void)dict_foreach(xdata, _changelog_generic_dispatcher, this);
+
+ STACK_UNWIND_STRICT(ipc, frame, 0, 0, NULL);
+ return 0;
+
+wind:
+ STACK_WIND(frame, default_ipc_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ipc, op, xdata);
+ return 0;
+}
+
+/* {{{ */
+
+int32_t
+changelog_release(xlator_t *this, fd_t *fd)
+{
+ changelog_event_t ev = {
+ 0,
+ };
+ changelog_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ ev.ev_type = CHANGELOG_OP_TYPE_RELEASE;
+ gf_uuid_copy(ev.u.release.gfid, fd->inode->gfid);
+ changelog_dispatch_event(this, priv, &ev);
+
+ (void)fd_ctx_del(fd, this, NULL);
+
+ return 0;
+}
+
+/* }}} */
+
+/**
+ * The
+ * - @init ()
+ * - @fini ()
+ * - @reconfigure ()
+ * ... and helper routines
+ */
+
+/**
+ * needed if there are more operation modes in the future.
+ */
+static void
+changelog_assign_opmode(changelog_priv_t *priv, char *mode)
+{
+ if (strncmp(mode, "realtime", 8) == 0) {
+ priv->op_mode = CHANGELOG_MODE_RT;
+ }
+}
+
+static void
+changelog_assign_encoding(changelog_priv_t *priv, char *enc)
+{
+ if (strncmp(enc, "binary", 6) == 0) {
+ priv->encode_mode = CHANGELOG_ENCODE_BINARY;
+ } else if (strncmp(enc, "ascii", 5) == 0) {
+ priv->encode_mode = CHANGELOG_ENCODE_ASCII;
+ }
+}
+
+static void
+changelog_assign_barrier_timeout(changelog_priv_t *priv, uint32_t timeout)
+{
+ LOCK(&priv->lock);
+ {
+ priv->timeout.tv_sec = timeout;
+ }
+ UNLOCK(&priv->lock);
+}
+
+/* cleanup any helper threads that are running */
+static void
+changelog_cleanup_helper_threads(xlator_t *this, changelog_priv_t *priv)
+{
+ if (priv->cr.rollover_th) {
+ (void)changelog_thread_cleanup(this, priv->cr.rollover_th);
+ priv->cr.rollover_th = 0;
+ }
+
+ if (priv->cf.fsync_th) {
+ (void)changelog_thread_cleanup(this, priv->cf.fsync_th);
+ priv->cf.fsync_th = 0;
+ }
+}
+
+/* spawn helper thread; cleaning up in case of errors */
+static int
+changelog_spawn_helper_threads(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+
+ /* Geo-Rep snapshot dependency:
+ *
+ * To implement explicit rollover of changlog journal on barrier
+ * notification, a pipe is created to communicate between
+ * 'changelog_rollover' thread and changelog main thread. The select
+ * call used to wait till roll-over time in changelog_rollover thread
+ * is modified to wait on read end of the pipe. When barrier
+ * notification comes (i.e, in 'reconfigure'), select in
+ * changelog_rollover thread is woken up explicitly by writing into
+ * the write end of the pipe in 'reconfigure'.
+ */
+
+ priv->cr.notify = _gf_false;
+ priv->cr.this = this;
+ ret = gf_thread_create(&priv->cr.rollover_th, NULL, changelog_rollover,
+ priv, "clogro");
+ if (ret)
+ goto out;
+
+ if (priv->fsync_interval) {
+ priv->cf.this = this;
+ ret = gf_thread_create(&priv->cf.fsync_th, NULL, changelog_fsync_thread,
+ priv, "clogfsyn");
+ }
+
+ if (ret)
+ changelog_cleanup_helper_threads(this, priv);
+
+out:
+ return ret;
+}
+
+int
+notify(xlator_t *this, int event, void *data, ...)
+{
+ changelog_priv_t *priv = NULL;
+ dict_t *dict = NULL;
+ char buf[1] = {1};
+ int barrier = DICT_DEFAULT;
+ gf_boolean_t bclean_req = _gf_false;
+ int ret = 0;
+ int ret1 = 0;
+ struct list_head queue = {
+ 0,
+ };
+ uint64_t xprtcnt = 0;
+ uint64_t clntcnt = 0;
+ changelog_clnt_t *conn = NULL;
+ gf_boolean_t cleanup_notify = _gf_false;
+ char sockfile[UNIX_PATH_MAX] = {
+ 0,
+ };
+ rpcsvc_listener_t *listener = NULL;
+ rpcsvc_listener_t *next = NULL;
+
+ INIT_LIST_HEAD(&queue);
+
+ priv = this->private;
+ if (!priv)
+ goto out;
+
+ if (event == GF_EVENT_PARENT_DOWN) {
+ priv->victim = data;
+ gf_log(this->name, GF_LOG_INFO,
+ "cleanup changelog rpc connection of brick %s",
+ priv->victim->name);
+
+ if (priv->rpc_active) {
+ this->cleanup_starting = 1;
+ changelog_destroy_rpc_listner(this, priv);
+ conn = &priv->connections;
+ if (conn)
+ changelog_ev_cleanup_connections(this, conn);
+ xprtcnt = GF_ATOMIC_GET(priv->xprtcnt);
+ clntcnt = GF_ATOMIC_GET(priv->clntcnt);
+ if (!xprtcnt && !clntcnt) {
+ LOCK(&priv->lock);
+ {
+ cleanup_notify = priv->notify_down;
+ priv->notify_down = _gf_true;
+ }
+ UNLOCK(&priv->lock);
+ if (priv->rpc) {
+ list_for_each_entry_safe(listener, next,
+ &priv->rpc->listeners, list)
+ {
+ if (listener->trans) {
+ rpc_transport_unref(listener->trans);
+ }
+ }
+ rpcsvc_destroy(priv->rpc);
+ priv->rpc = NULL;
+ }
+ CHANGELOG_MAKE_SOCKET_PATH(priv->changelog_brick, sockfile,
+ UNIX_PATH_MAX);
+ sys_unlink(sockfile);
+ if (!cleanup_notify)
+ default_notify(this, GF_EVENT_PARENT_DOWN, data);
+ }
+ } else {
+ default_notify(this, GF_EVENT_PARENT_DOWN, data);
+ }
+ goto out;
+ }
+
+ if (event == GF_EVENT_TRANSLATOR_OP) {
+ dict = data;
+
+ barrier = dict_get_str_boolean(dict, "barrier", DICT_DEFAULT);
+
+ switch (barrier) {
+ case DICT_ERROR:
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_DICT_GET_FAILED, "dict_get_str_boolean",
+ NULL);
+ ret = -1;
+ goto out;
+
+ case BARRIER_OFF:
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_MSG_BARRIER_STATE_NOTIFY, "off", NULL);
+
+ CHANGELOG_NOT_ON_THEN_GOTO(priv, ret, out);
+ LOCK(&priv->c_snap_lock);
+ {
+ changelog_snap_logging_stop(this, priv);
+ }
+ UNLOCK(&priv->c_snap_lock);
+
+ LOCK(&priv->bflags.lock);
+ {
+ if (priv->bflags.barrier_ext == _gf_false)
+ ret = -1;
+ }
+ UNLOCK(&priv->bflags.lock);
+
+ if (ret == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_BARRIER_ERROR, NULL);
+ goto out;
+ }
+
+ /* Stop changelog barrier and dequeue all fops */
+ LOCK(&priv->lock);
+ {
+ if (priv->barrier_enabled == _gf_true)
+ __chlog_barrier_disable(this, &queue);
+ else
+ ret = -1;
+ }
+ UNLOCK(&priv->lock);
+ /* If ret = -1, then changelog barrier is already
+ * disabled because of error or timeout.
+ */
+ if (ret == 0) {
+ chlog_barrier_dequeue_all(this, &queue);
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_MSG_BARRIER_DISABLED, NULL);
+ } else {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_BARRIER_ALREADY_DISABLED, NULL);
+ }
+
+ LOCK(&priv->bflags.lock);
+ {
+ priv->bflags.barrier_ext = _gf_false;
+ }
+ UNLOCK(&priv->bflags.lock);
+
+ goto out;
+
+ case BARRIER_ON:
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_MSG_BARRIER_STATE_NOTIFY, "on", NULL);
+
+ CHANGELOG_NOT_ON_THEN_GOTO(priv, ret, out);
+ LOCK(&priv->c_snap_lock);
+ {
+ changelog_snap_logging_start(this, priv);
+ }
+ UNLOCK(&priv->c_snap_lock);
+
+ LOCK(&priv->bflags.lock);
+ {
+ if (priv->bflags.barrier_ext == _gf_true)
+ ret = -1;
+ else
+ priv->bflags.barrier_ext = _gf_true;
+ }
+ UNLOCK(&priv->bflags.lock);
+
+ if (ret == -1) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_BARRIER_ON_ERROR, NULL);
+ goto out;
+ }
+
+ ret = pthread_mutex_lock(&priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_1(ret, out, bclean_req);
+ {
+ priv->bn.bnotify = _gf_true;
+ }
+ ret = pthread_mutex_unlock(&priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_1(ret, out, bclean_req);
+
+ /* Start changelog barrier */
+ LOCK(&priv->lock);
+ {
+ ret = __chlog_barrier_enable(this, priv);
+ }
+ UNLOCK(&priv->lock);
+ if (ret == -1) {
+ changelog_barrier_cleanup(this, priv, &queue);
+ goto out;
+ }
+
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_MSG_BARRIER_ENABLE, NULL);
+
+ ret = changelog_barrier_notify(priv, buf);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_WRITE_FAILED, "Explicit roll over",
+ NULL);
+ changelog_barrier_cleanup(this, priv, &queue);
+ ret = -1;
+ goto out;
+ }
+
+ ret = pthread_mutex_lock(&priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_1(ret, out, bclean_req);
+ {
+ /* The while condition check is required here to
+ * handle spurious wakeup of cond wait that can
+ * happen with pthreads. See man page */
+ while (priv->bn.bnotify == _gf_true) {
+ ret = pthread_cond_wait(&priv->bn.bnotify_cond,
+ &priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_1(ret, out, bclean_req);
+ }
+ if (priv->bn.bnotify_error == _gf_true) {
+ ret = -1;
+ priv->bn.bnotify_error = _gf_false;
+ }
+ }
+ ret1 = pthread_mutex_unlock(&priv->bn.bnotify_mutex);
+ CHANGELOG_PTHREAD_ERROR_HANDLE_1(ret1, out, bclean_req);
+ gf_smsg(this->name, GF_LOG_INFO, 0,
+ CHANGELOG_MSG_BNOTIFY_COND_INFO, NULL);
+
+ goto out;
+
+ case DICT_DEFAULT:
+ gf_smsg(this->name, GF_LOG_ERROR, 0,
+ CHANGELOG_MSG_BARRIER_KEY_NOT_FOUND, NULL);
+ ret = -1;
+ goto out;
+
+ default:
+ gf_smsg(this->name, GF_LOG_ERROR, EINVAL,
+ CHANGELOG_MSG_ERROR_IN_DICT_GET, NULL);
+ ret = -1;
+ goto out;
+ }
+ } else {
+ ret = default_notify(this, event, data);
+ }
+
+out:
+ if (bclean_req)
+ changelog_barrier_cleanup(this, priv, &queue);
+
+ return ret;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_changelog_mt_end + 1);
+
+ if (ret != 0) {
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM,
+ CHANGELOG_MSG_MEMORY_INIT_FAILED, NULL);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+changelog_init(xlator_t *this, changelog_priv_t *priv)
+{
+ int i = 0;
+ int ret = 0;
+ changelog_log_data_t cld = {
+ 0,
+ };
+
+ priv->maps[CHANGELOG_TYPE_DATA] = "D ";
+ priv->maps[CHANGELOG_TYPE_METADATA] = "M ";
+ priv->maps[CHANGELOG_TYPE_METADATA_XATTR] = "M ";
+ priv->maps[CHANGELOG_TYPE_ENTRY] = "E ";
+
+ for (; i < CHANGELOG_MAX_TYPE; i++) {
+ /* start with version 1 */
+ priv->slice.changelog_version[i] = 1;
+ }
+
+ if (!priv->active)
+ return ret;
+
+ /**
+ * start with a fresh changelog file every time. this is done
+ * in case there was an encoding change. so... things are kept
+ * simple here.
+ */
+ changelog_fill_rollover_data(&cld, _gf_false);
+
+ ret = htime_open(this, priv, cld.cld_roll_time);
+ /* call htime open with cld's rollover_time */
+ if (ret)
+ goto out;
+
+ LOCK(&priv->lock);
+ {
+ ret = changelog_inject_single_event(this, priv, &cld);
+ }
+ UNLOCK(&priv->lock);
+
+ /* ... and finally spawn the helpers threads */
+ ret = changelog_spawn_helper_threads(this, priv);
+
+out:
+ return ret;
+}
+
+/**
+ * Init barrier related condition variables and locks
+ */
+static int
+changelog_barrier_pthread_init(xlator_t *this, changelog_priv_t *priv)
+{
+ gf_boolean_t bn_mutex_init = _gf_false;
+ gf_boolean_t bn_cond_init = _gf_false;
+ gf_boolean_t dm_mutex_black_init = _gf_false;
+ gf_boolean_t dm_cond_black_init = _gf_false;
+ gf_boolean_t dm_mutex_white_init = _gf_false;
+ gf_boolean_t dm_cond_white_init = _gf_false;
+ gf_boolean_t cr_mutex_init = _gf_false;
+ gf_boolean_t cr_cond_init = _gf_false;
+ int ret = 0;
+
+ if ((ret = pthread_mutex_init(&priv->bn.bnotify_mutex, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_MUTEX_INIT_FAILED, "name=bnotify",
+ "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ bn_mutex_init = _gf_true;
+
+ if ((ret = pthread_cond_init(&priv->bn.bnotify_cond, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_COND_INIT_FAILED, "name=bnotify",
+ "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ bn_cond_init = _gf_true;
+
+ if ((ret = pthread_mutex_init(&priv->dm.drain_black_mutex, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_MUTEX_INIT_FAILED, "name=drain_black",
+ "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ dm_mutex_black_init = _gf_true;
+
+ if ((ret = pthread_cond_init(&priv->dm.drain_black_cond, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_COND_INIT_FAILED, "name=drain_black",
+ "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ dm_cond_black_init = _gf_true;
+
+ if ((ret = pthread_mutex_init(&priv->dm.drain_white_mutex, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_MUTEX_INIT_FAILED, "name=drain_white",
+ "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ dm_mutex_white_init = _gf_true;
+
+ if ((ret = pthread_cond_init(&priv->dm.drain_white_cond, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_COND_INIT_FAILED, "name=drain_white",
+ "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ dm_cond_white_init = _gf_true;
+
+ if ((pthread_mutex_init(&priv->cr.lock, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_MUTEX_INIT_FAILED,
+ "name=changelog_rollover", "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ cr_mutex_init = _gf_true;
+
+ if ((pthread_cond_init(&priv->cr.cond, NULL)) != 0) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ CHANGELOG_MSG_PTHREAD_COND_INIT_FAILED,
+ "changelog_rollover cond init failed", "ret=%d", ret, NULL);
+ ret = -1;
+ goto out;
+ }
+ cr_cond_init = _gf_true;
+out:
+ if (ret) {
+ if (bn_mutex_init)
+ pthread_mutex_destroy(&priv->bn.bnotify_mutex);
+ if (bn_cond_init)
+ pthread_cond_destroy(&priv->bn.bnotify_cond);
+ if (dm_mutex_black_init)
+ pthread_mutex_destroy(&priv->dm.drain_black_mutex);
+ if (dm_cond_black_init)
+ pthread_cond_destroy(&priv->dm.drain_black_cond);
+ if (dm_mutex_white_init)
+ pthread_mutex_destroy(&priv->dm.drain_white_mutex);
+ if (dm_cond_white_init)
+ pthread_cond_destroy(&priv->dm.drain_white_cond);
+ if (cr_mutex_init)
+ pthread_mutex_destroy(&priv->cr.lock);
+ if (cr_cond_init)
+ pthread_cond_destroy(&priv->cr.cond);
+ }
+ return ret;
+}
+
+/* Destroy barrier related condition variables and locks */
+static void
+changelog_barrier_pthread_destroy(changelog_priv_t *priv)
+{
+ pthread_mutex_destroy(&priv->bn.bnotify_mutex);
+ pthread_cond_destroy(&priv->bn.bnotify_cond);
+ pthread_mutex_destroy(&priv->dm.drain_black_mutex);
+ pthread_cond_destroy(&priv->dm.drain_black_cond);
+ pthread_mutex_destroy(&priv->dm.drain_white_mutex);
+ pthread_cond_destroy(&priv->dm.drain_white_cond);
+ pthread_mutex_destroy(&priv->cr.lock);
+ pthread_cond_destroy(&priv->cr.cond);
+ LOCK_DESTROY(&priv->bflags.lock);
+}
+
+static void
+changelog_cleanup_rpc(xlator_t *this, changelog_priv_t *priv)
+{
+ /* terminate rpc server */
+ if (!this->cleanup_starting)
+ changelog_destroy_rpc_listner(this, priv);
+
+ (void)changelog_cleanup_rpc_threads(this, priv);
+ /* cleanup rot buffs */
+ rbuf_dtor(priv->rbuf);
+
+ /* cleanup poller thread */
+ if (priv->poller)
+ (void)changelog_thread_cleanup(this, priv->poller);
+}
+
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ int ret = 0;
+ char *tmp = NULL;
+ changelog_priv_t *priv = NULL;
+ gf_boolean_t active_earlier = _gf_true;
+ gf_boolean_t active_now = _gf_true;
+ gf_boolean_t rpc_active_earlier = _gf_true;
+ gf_boolean_t rpc_active_now = _gf_true;
+ gf_boolean_t iniate_rpc = _gf_false;
+ changelog_time_slice_t *slice = NULL;
+ changelog_log_data_t cld = {
+ 0,
+ };
+ char htime_dir[PATH_MAX] = {
+ 0,
+ };
+ char csnap_dir[PATH_MAX] = {
+ 0,
+ };
+ uint32_t timeout = 0;
+
+ priv = this->private;
+ if (!priv)
+ goto out;
+
+ ret = -1;
+ active_earlier = priv->active;
+ rpc_active_earlier = priv->rpc_active;
+
+ /* first stop the rollover and the fsync thread */
+ changelog_cleanup_helper_threads(this, priv);
+
+ GF_OPTION_RECONF("changelog-dir", tmp, options, str, out);
+ if (!tmp) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_DIR_OPTIONS_NOT_SET,
+ NULL);
+ goto out;
+ }
+
+ GF_FREE(priv->changelog_dir);
+ priv->changelog_dir = gf_strdup(tmp);
+ if (!priv->changelog_dir)
+ goto out;
+
+ ret = mkdir_p(priv->changelog_dir, 0600, _gf_true);
+
+ if (ret)
+ goto out;
+ CHANGELOG_FILL_HTIME_DIR(priv->changelog_dir, htime_dir);
+ ret = mkdir_p(htime_dir, 0600, _gf_true);
+
+ if (ret)
+ goto out;
+
+ CHANGELOG_FILL_CSNAP_DIR(priv->changelog_dir, csnap_dir);
+ ret = mkdir_p(csnap_dir, 0600, _gf_true);
+
+ if (ret)
+ goto out;
+
+ GF_OPTION_RECONF("changelog", active_now, options, bool, out);
+ GF_OPTION_RECONF("changelog-notification", rpc_active_now, options, bool,
+ out);
+
+ /* If journalling is enabled, enable rpc notifications */
+ if (active_now && !active_earlier) {
+ if (!rpc_active_earlier)
+ iniate_rpc = _gf_true;
+ }
+
+ if (rpc_active_now && !rpc_active_earlier) {
+ iniate_rpc = _gf_true;
+ }
+
+ /* TODO: Disable of changelog-notifications is not supported for now
+ * as there is no clean way of cleaning up of rpc resources
+ */
+
+ if (iniate_rpc) {
+ ret = changelog_init_rpc(this, priv);
+ if (ret)
+ goto out;
+ priv->rpc_active = _gf_true;
+ }
+
+ /**
+ * changelog_handle_change() handles changes that could possibly
+ * have been submit changes before changelog deactivation.
+ */
+ if (!active_now)
+ priv->active = _gf_false;
+
+ GF_OPTION_RECONF("op-mode", tmp, options, str, out);
+ changelog_assign_opmode(priv, tmp);
+
+ tmp = NULL;
+
+ GF_OPTION_RECONF("encoding", tmp, options, str, out);
+ changelog_assign_encoding(priv, tmp);
+
+ GF_OPTION_RECONF("rollover-time", priv->rollover_time, options, int32, out);
+ GF_OPTION_RECONF("fsync-interval", priv->fsync_interval, options, int32,
+ out);
+ GF_OPTION_RECONF("changelog-barrier-timeout", timeout, options, time, out);
+ changelog_assign_barrier_timeout(priv, timeout);
+
+ GF_OPTION_RECONF("capture-del-path", priv->capture_del_path, options, bool,
+ out);
+
+ if (active_now || active_earlier) {
+ changelog_fill_rollover_data(&cld, !active_now);
+
+ slice = &priv->slice;
+
+ LOCK(&priv->lock);
+ {
+ ret = changelog_inject_single_event(this, priv, &cld);
+ if (!ret && active_now)
+ SLICE_VERSION_UPDATE(slice);
+ }
+ UNLOCK(&priv->lock);
+
+ if (ret)
+ goto out;
+
+ if (active_now) {
+ if (!active_earlier) {
+ gf_smsg(this->name, GF_LOG_INFO, 0, CHANGELOG_MSG_RECONFIGURE,
+ NULL);
+ htime_create(this, priv, gf_time());
+ }
+ ret = changelog_spawn_helper_threads(this, priv);
+ }
+ }
+
+out:
+ if (ret) {
+ /* TODO */
+ } else {
+ gf_msg_debug(this->name, 0, "changelog reconfigured");
+ if (active_now && priv)
+ priv->active = _gf_true;
+ }
+
+ return ret;
+}
+
+static void
+changelog_freeup_options(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+
+ ret = priv->cb->dtor(this, &priv->cd);
+ if (ret)
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_FREEUP_FAILED, NULL);
+ GF_FREE(priv->changelog_brick);
+ GF_FREE(priv->changelog_dir);
+}
+
+static int
+changelog_init_options(xlator_t *this, changelog_priv_t *priv)
+{
+ int ret = 0;
+ char *tmp = NULL;
+ uint32_t timeout = 0;
+ char htime_dir[PATH_MAX] = {
+ 0,
+ };
+ char csnap_dir[PATH_MAX] = {
+ 0,
+ };
+
+ GF_OPTION_INIT("changelog-brick", tmp, str, error_return);
+ priv->changelog_brick = gf_strdup(tmp);
+ if (!priv->changelog_brick)
+ goto error_return;
+
+ tmp = NULL;
+
+ GF_OPTION_INIT("changelog-dir", tmp, str, dealloc_1);
+ priv->changelog_dir = gf_strdup(tmp);
+ if (!priv->changelog_dir)
+ goto dealloc_1;
+
+ tmp = NULL;
+
+ /**
+ * create the directory even if change-logging would be inactive
+ * so that consumers can _look_ into it (finding nothing...)
+ */
+ ret = mkdir_p(priv->changelog_dir, 0600, _gf_true);
+
+ if (ret)
+ goto dealloc_2;
+
+ CHANGELOG_FILL_HTIME_DIR(priv->changelog_dir, htime_dir);
+ ret = mkdir_p(htime_dir, 0600, _gf_true);
+ if (ret)
+ goto dealloc_2;
+
+ CHANGELOG_FILL_CSNAP_DIR(priv->changelog_dir, csnap_dir);
+ ret = mkdir_p(csnap_dir, 0600, _gf_true);
+ if (ret)
+ goto dealloc_2;
+
+ GF_OPTION_INIT("changelog", priv->active, bool, dealloc_2);
+ GF_OPTION_INIT("changelog-notification", priv->rpc_active, bool, dealloc_2);
+ GF_OPTION_INIT("capture-del-path", priv->capture_del_path, bool, dealloc_2);
+
+ GF_OPTION_INIT("op-mode", tmp, str, dealloc_2);
+ changelog_assign_opmode(priv, tmp);
+
+ tmp = NULL;
+
+ GF_OPTION_INIT("encoding", tmp, str, dealloc_2);
+ changelog_assign_encoding(priv, tmp);
+ changelog_encode_change(priv);
+
+ GF_OPTION_INIT("rollover-time", priv->rollover_time, int32, dealloc_2);
+
+ GF_OPTION_INIT("fsync-interval", priv->fsync_interval, int32, dealloc_2);
+
+ GF_OPTION_INIT("changelog-barrier-timeout", timeout, time, dealloc_2);
+ changelog_assign_barrier_timeout(priv, timeout);
+
+ GF_ASSERT(cb_bootstrap[priv->op_mode].mode == priv->op_mode);
+ priv->cb = &cb_bootstrap[priv->op_mode];
+
+ /* ... now bootstrap the logger */
+ ret = priv->cb->ctor(this, &priv->cd);
+ if (ret)
+ goto dealloc_2;
+
+ priv->changelog_fd = -1;
+
+ return 0;
+
+dealloc_2:
+ GF_FREE(priv->changelog_dir);
+dealloc_1:
+ GF_FREE(priv->changelog_brick);
+error_return:
+ return -1;
+}
+
+static int
+changelog_init_rpc(xlator_t *this, changelog_priv_t *priv)
+{
+ rpcsvc_t *rpc = NULL;
+ changelog_ev_selector_t *selection = NULL;
+
+ selection = &priv->ev_selection;
+
+ /* initialize event selection */
+ changelog_init_event_selection(this, selection);
+
+ priv->rbuf = rbuf_init(NR_ROTT_BUFFS);
+ if (!priv->rbuf)
+ goto cleanup_thread;
+
+ rpc = changelog_init_rpc_listener(this, priv, priv->rbuf, NR_DISPATCHERS);
+ if (!rpc)
+ goto cleanup_rbuf;
+ priv->rpc = rpc;
+
+ return 0;
+
+cleanup_rbuf:
+ rbuf_dtor(priv->rbuf);
+cleanup_thread:
+ if (priv->poller)
+ (void)changelog_thread_cleanup(this, priv->poller);
+
+ return -1;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ int ret = -1;
+ changelog_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("changelog", this, error_return);
+
+ if (!this->children || this->children->next) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_CHILD_MISCONFIGURED,
+ NULL);
+ goto error_return;
+ }
+
+ if (!this->parents) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, CHANGELOG_MSG_VOL_MISCONFIGURED,
+ NULL);
+ goto error_return;
+ }
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_changelog_mt_priv_t);
+ if (!priv)
+ goto error_return;
+
+ this->local_pool = mem_pool_new(changelog_local_t, 64);
+ if (!this->local_pool) {
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, CHANGELOG_MSG_NO_MEMORY,
+ NULL);
+ goto cleanup_priv;
+ }
+
+ LOCK_INIT(&priv->lock);
+ LOCK_INIT(&priv->c_snap_lock);
+ GF_ATOMIC_INIT(priv->listnercnt, 0);
+ GF_ATOMIC_INIT(priv->clntcnt, 0);
+ GF_ATOMIC_INIT(priv->xprtcnt, 0);
+ INIT_LIST_HEAD(&priv->xprt_list);
+ priv->htime_fd = -1;
+
+ ret = changelog_init_options(this, priv);
+ if (ret)
+ goto cleanup_mempool;
+
+ /* snap dependency changes */
+ priv->dm.black_fop_cnt = 0;
+ priv->dm.white_fop_cnt = 0;
+ priv->dm.drain_wait_black = _gf_false;
+ priv->dm.drain_wait_white = _gf_false;
+ priv->current_color = FOP_COLOR_BLACK;
+ priv->explicit_rollover = _gf_false;
+
+ priv->cr.notify = _gf_false;
+ /* Mutex is not needed as threads are not spawned yet */
+ priv->bn.bnotify = _gf_false;
+ priv->bn.bnotify_error = _gf_false;
+ ret = changelog_barrier_pthread_init(this, priv);
+ if (ret)
+ goto cleanup_options;
+ LOCK_INIT(&priv->bflags.lock);
+ priv->bflags.barrier_ext = _gf_false;
+
+ /* Changelog barrier init */
+ INIT_LIST_HEAD(&priv->queue);
+ priv->barrier_enabled = _gf_false;
+
+ if (priv->rpc_active || priv->active) {
+ /* RPC ball rolling.. */
+ ret = changelog_init_rpc(this, priv);
+ if (ret)
+ goto cleanup_barrier;
+ priv->rpc_active = _gf_true;
+ }
+
+ ret = changelog_init(this, priv);
+ if (ret)
+ goto cleanup_rpc;
+
+ gf_msg_debug(this->name, 0, "changelog translator loaded");
+
+ this->private = priv;
+ return 0;
+
+cleanup_rpc:
+ if (priv->rpc_active) {
+ changelog_cleanup_rpc(this, priv);
+ }
+cleanup_barrier:
+ changelog_barrier_pthread_destroy(priv);
+cleanup_options:
+ changelog_freeup_options(this, priv);
+cleanup_mempool:
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+cleanup_priv:
+ GF_FREE(priv);
+error_return:
+ this->private = NULL;
+ return -1;
+}
+
+void
+fini(xlator_t *this)
+{
+ changelog_priv_t *priv = NULL;
+ struct list_head queue = {
+ 0,
+ };
+
+ priv = this->private;
+
+ if (priv) {
+ if (priv->active || priv->rpc_active) {
+ /* terminate RPC server/threads */
+ changelog_cleanup_rpc(this, priv);
+ GF_FREE(priv->ev_dispatcher);
+ }
+ /* call barrier_disable to cancel timer */
+ if (priv->barrier_enabled)
+ __chlog_barrier_disable(this, &queue);
+
+ /* cleanup barrier related objects */
+ changelog_barrier_pthread_destroy(priv);
+
+ /* cleanup helper threads */
+ changelog_cleanup_helper_threads(this, priv);
+
+ /* cleanup allocated options */
+ changelog_freeup_options(this, priv);
+
+ /* deallocate mempool */
+ mem_pool_destroy(this->local_pool);
+
+ if (priv->htime_fd != -1) {
+ sys_close(priv->htime_fd);
+ }
+
+ /* finally, dealloac private variable */
+ GF_FREE(priv);
+ }
+
+ this->private = NULL;
+ this->local_pool = NULL;
+
+ return;
+}
+
+struct xlator_fops fops = {
+ .open = changelog_open,
+ .mknod = changelog_mknod,
+ .mkdir = changelog_mkdir,
+ .create = changelog_create,
+ .symlink = changelog_symlink,
+ .writev = changelog_writev,
+ .truncate = changelog_truncate,
+ .ftruncate = changelog_ftruncate,
+ .link = changelog_link,
+ .rename = changelog_rename,
+ .unlink = changelog_unlink,
+ .rmdir = changelog_rmdir,
+ .setattr = changelog_setattr,
+ .fsetattr = changelog_fsetattr,
+ .setxattr = changelog_setxattr,
+ .fsetxattr = changelog_fsetxattr,
+ .removexattr = changelog_removexattr,
+ .fremovexattr = changelog_fremovexattr,
+ .ipc = changelog_ipc,
+ .xattrop = changelog_xattrop,
+ .fxattrop = changelog_fxattrop,
+};
+
+struct xlator_cbks cbks = {
+ .forget = changelog_forget,
+ .release = changelog_release,
+};
+
+struct volume_options options[] = {
+ {.key = {"changelog"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "enable/disable change-logging",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_BASIC,
+ .tags = {"journal", "georep", "glusterfind"}},
+ {.key = {"changelog-notification"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "enable/disable changelog live notification",
+ .op_version = {3},
+ .level = OPT_STATUS_BASIC,
+ .tags = {"bitrot", "georep"}},
+ {.key = {"changelog-brick"},
+ .type = GF_OPTION_TYPE_PATH,
+ .description = "brick path to generate unique socket file name."
+ " should be the export directory of the volume strictly.",
+ .default_value = "{{ brick.path }}",
+ .op_version = {3},
+ .tags = {"journal"}},
+ {.key = {"changelog-dir"},
+ .type = GF_OPTION_TYPE_PATH,
+ .description = "directory for the changelog files",
+ .default_value = "{{ brick.path }}/.glusterfs/changelogs",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_ADVANCED,
+ .tags = {"journal", "georep", "glusterfind"}},
+ {.key = {"op-mode"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = "realtime",
+ .value = {"realtime"},
+ .description = "operation mode - futuristic operation modes",
+ .op_version = {3},
+ .tags = {"journal"}},
+ {.key = {"encoding"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = "ascii",
+ .value = {"binary", "ascii"},
+ .description = "encoding type for changelogs",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_ADVANCED,
+ .tags = {"journal"}},
+ {.key = {"rollover-time"},
+ .default_value = "15",
+ .type = GF_OPTION_TYPE_TIME,
+ .description = "time to switch to a new changelog file (in seconds)",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_ADVANCED,
+ .tags = {"journal", "georep", "glusterfind"}},
+ {.key = {"fsync-interval"},
+ .type = GF_OPTION_TYPE_TIME,
+ .default_value = "5",
+ .description = "do not open CHANGELOG file with O_SYNC mode."
+ " instead perform fsync() at specified intervals",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_ADVANCED,
+ .tags = {"journal"}},
+ {.key = {"changelog-barrier-timeout"},
+ .type = GF_OPTION_TYPE_TIME,
+ .default_value = BARRIER_TIMEOUT,
+ .description = "After 'timeout' seconds since the time 'barrier' "
+ "option was set to \"on\", unlink/rmdir/rename "
+ "operations are no longer blocked and previously "
+ "blocked fops are allowed to go through",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_ADVANCED,
+ .tags = {"journal"}},
+ {.key = {"capture-del-path"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "enable/disable capturing paths of deleted entries",
+ .op_version = {3},
+ .flags = OPT_FLAG_SETTABLE,
+ .level = OPT_STATUS_BASIC,
+ .tags = {"journal", "glusterfind"}},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .notify = notify,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "changelog",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/mac-compat/Makefile.am b/xlators/features/cloudsync/Makefile.am
index d471a3f9243..a985f42a877 100644
--- a/xlators/features/mac-compat/Makefile.am
+++ b/xlators/features/cloudsync/Makefile.am
@@ -1,3 +1,3 @@
SUBDIRS = src
-CLEANFILES =
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/Makefile.am b/xlators/features/cloudsync/src/Makefile.am
new file mode 100644
index 00000000000..e2a277e372b
--- /dev/null
+++ b/xlators/features/cloudsync/src/Makefile.am
@@ -0,0 +1,46 @@
+SUBDIRS = cloudsync-plugins
+
+xlator_LTLIBRARIES = cloudsync.la
+
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+cloudsync_sources = cloudsync.c
+
+CLOUDSYNC_SRC = $(top_srcdir)/xlators/features/cloudsync/src
+CLOUDSYNC_BLD = $(top_builddir)/xlators/features/cloudsync/src
+
+cloudsynccommon_sources = $(CLOUDSYNC_SRC)/cloudsync-common.c
+
+noinst_HEADERS = $(CLOUDSYNC_BLD)/cloudsync.h \
+ $(CLOUDSYNC_BLD)/cloudsync-mem-types.h \
+ $(CLOUDSYNC_BLD)/cloudsync-messages.h \
+ $(CLOUDSYNC_BLD)/cloudsync-common.h
+
+cloudsync_la_SOURCES = $(cloudsync_sources) $(cloudsynccommon_sources)
+
+nodist_cloudsync_la_SOURCES = cloudsync-autogen-fops.c cloudsync-autogen-fops.h
+BUILT_SOURCES = cloudsync-autogen-fops.h
+
+cloudsync_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+cloudsync_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(LIB_DL)
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -DCS_PLUGINDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/cloudsync-plugins\"
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS)
+
+noinst_PYTHON = cloudsync-fops-c.py cloudsync-fops-h.py
+EXTRA_DIST = cloudsync-autogen-fops-tmpl.c cloudsync-autogen-fops-tmpl.h
+
+cloudsync-autogen-fops.c: cloudsync-fops-c.py cloudsync-autogen-fops-tmpl.c
+ $(PYTHON) $(CLOUDSYNC_SRC)/cloudsync-fops-c.py \
+ $(CLOUDSYNC_SRC)/cloudsync-autogen-fops-tmpl.c > $@
+
+cloudsync-autogen-fops.h: cloudsync-fops-h.py cloudsync-autogen-fops-tmpl.h
+ $(PYTHON) $(CLOUDSYNC_SRC)/cloudsync-fops-h.py \
+ $(CLOUDSYNC_SRC)/cloudsync-autogen-fops-tmpl.h > $@
+
+CLEANFILES = $(nodist_cloudsync_la_SOURCES)
+
+uninstall-local:
+ rm -f $(DESTDIR)$(xlatordir)/cloudsync.so
diff --git a/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c
new file mode 100644
index 00000000000..ee63f983980
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2008-2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/* File: cloudsync-autogen-fops-tmpl.c
+ * This file contains the CLOUDSYNC autogenerated FOPs. This is run through
+ * the code generator, generator.py to generate the required FOPs.
+ */
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <dlfcn.h>
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include "cloudsync.h"
+#include "cloudsync-common.h"
+#include <glusterfs/call-stub.h>
+
+#pragma generate
diff --git a/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h
new file mode 100644
index 00000000000..d922c77d8aa
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h
@@ -0,0 +1,24 @@
+/*
+ Copyright (c) 2008-2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/* File: clousync-autogen-fops-tmpl.h
+ * This file contains the cloudsync autogenerated FOPs declarations.
+ */
+
+#ifndef _CLOUDSYNC_AUTOGEN_FOPS_H
+#define _CLOUDSYNC_AUTOGEN_FOPS_H
+
+#include <glusterfs/xlator.h>
+#include "cloudsync.h"
+#include "cloudsync-common.h"
+
+#pragma generate
+
+#endif /* _CLOUDSYNC_AUTOGEN_FOPS_H */
diff --git a/xlators/features/cloudsync/src/cloudsync-common.c b/xlators/features/cloudsync/src/cloudsync-common.c
new file mode 100644
index 00000000000..445a31b90e7
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-common.c
@@ -0,0 +1,60 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "cloudsync-common.h"
+
+void
+cs_xattrinfo_wipe(cs_local_t *local)
+{
+ if (local->xattrinfo.lxattr) {
+ if (local->xattrinfo.lxattr->file_path)
+ GF_FREE(local->xattrinfo.lxattr->file_path);
+
+ if (local->xattrinfo.lxattr->volname)
+ GF_FREE(local->xattrinfo.lxattr->volname);
+
+ GF_FREE(local->xattrinfo.lxattr);
+ }
+}
+
+void
+cs_local_wipe(xlator_t *this, cs_local_t *local)
+{
+ if (!local)
+ return;
+
+ loc_wipe(&local->loc);
+
+ if (local->fd) {
+ fd_unref(local->fd);
+ local->fd = NULL;
+ }
+
+ if (local->stub) {
+ call_stub_destroy(local->stub);
+ local->stub = NULL;
+ }
+
+ if (local->xattr_req)
+ dict_unref(local->xattr_req);
+
+ if (local->xattr_rsp)
+ dict_unref(local->xattr_rsp);
+
+ if (local->dlfd)
+ fd_unref(local->dlfd);
+
+ if (local->remotepath)
+ GF_FREE(local->remotepath);
+
+ cs_xattrinfo_wipe(local);
+
+ mem_put(local);
+}
diff --git a/xlators/features/cloudsync/src/cloudsync-common.h b/xlators/features/cloudsync/src/cloudsync-common.h
new file mode 100644
index 00000000000..11d233460a4
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-common.h
@@ -0,0 +1,134 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CLOUDSYNC_COMMON_H
+#define _CLOUDSYNC_COMMON_H
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/compat-errno.h>
+#include "cloudsync-mem-types.h"
+#include "cloudsync-messages.h"
+
+typedef struct cs_loc_xattr {
+ char *file_path;
+ uuid_t uuid;
+ uuid_t gfid;
+ char *volname;
+} cs_loc_xattr_t;
+
+typedef struct cs_size_xattr {
+ uint64_t size;
+ uint64_t blksize;
+ uint64_t blocks;
+} cs_size_xattr_t;
+
+typedef struct cs_local {
+ loc_t loc;
+ fd_t *fd;
+ call_stub_t *stub;
+ call_frame_t *main_frame;
+ int op_errno;
+ int op_ret;
+ fd_t *dlfd;
+ off_t dloffset;
+ struct iatt stbuf;
+ dict_t *xattr_rsp;
+ dict_t *xattr_req;
+ glusterfs_fop_t fop;
+ gf_boolean_t locked;
+ int call_cnt;
+ inode_t *inode;
+ char *remotepath;
+
+ struct {
+ /* offset, flags and size are the information needed
+ * by read fop for remote read operation. These will be
+ * populated in cloudsync read fop, before being passed
+ * on to the plugin performing remote read.
+ */
+ off_t offset;
+ uint32_t flags;
+ size_t size;
+ cs_loc_xattr_t *lxattr;
+ } xattrinfo;
+
+} cs_local_t;
+
+typedef int (*fop_download_t)(call_frame_t *frame, void *config);
+
+typedef int (*fop_remote_read_t)(call_frame_t *, void *);
+
+typedef void *(*store_init)(xlator_t *this);
+
+typedef int (*store_reconfigure)(xlator_t *this, dict_t *options);
+
+typedef void (*store_fini)(void *config);
+
+struct cs_remote_stores {
+ char *name; /* store name */
+ void *config; /* store related information */
+ fop_download_t dlfop; /* store specific download function */
+ fop_remote_read_t rdfop; /* store specific read function */
+ store_init init; /* store init to initialize store config */
+ store_reconfigure reconfigure; /* reconfigure store config */
+ store_fini fini;
+ void *handle; /* shared library handle*/
+};
+
+typedef struct cs_private {
+ xlator_t *this;
+ struct cs_remote_stores *stores;
+ gf_boolean_t abortdl;
+ pthread_spinlock_t lock;
+ gf_boolean_t remote_read;
+} cs_private_t;
+
+void
+cs_local_wipe(xlator_t *this, cs_local_t *local);
+
+void
+cs_xattrinfo_wipe(cs_local_t *local);
+
+#define CS_STACK_UNWIND(fop, frame, params...) \
+ do { \
+ cs_local_t *__local = NULL; \
+ xlator_t *__xl = NULL; \
+ if (frame) { \
+ __xl = frame->this; \
+ __local = frame->local; \
+ frame->local = NULL; \
+ } \
+ STACK_UNWIND_STRICT(fop, frame, params); \
+ cs_local_wipe(__xl, __local); \
+ } while (0)
+
+#define CS_STACK_DESTROY(frame) \
+ do { \
+ cs_local_t *__local = NULL; \
+ xlator_t *__xl = NULL; \
+ __xl = frame->this; \
+ __local = frame->local; \
+ frame->local = NULL; \
+ STACK_DESTROY(frame->root); \
+ cs_local_wipe(__xl, __local); \
+ } while (0)
+
+typedef struct store_methods {
+ int (*fop_download)(call_frame_t *frame, void *config);
+ int (*fop_remote_read)(call_frame_t *, void *);
+ /* return type should be the store config */
+ void *(*fop_init)(xlator_t *this);
+ int (*fop_reconfigure)(xlator_t *this, dict_t *options);
+ void (*fop_fini)(void *config);
+} store_methods_t;
+
+#endif /* _CLOUDSYNC_COMMON_H */
diff --git a/xlators/features/cloudsync/src/cloudsync-fops-c.py b/xlators/features/cloudsync/src/cloudsync-fops-c.py
new file mode 100755
index 00000000000..c27df97ae58
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-fops-c.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python3
+
+from __future__ import print_function
+import os
+import sys
+
+curdir = os.path.dirname(sys.argv[0])
+gendir = os.path.join(curdir, '../../../../libglusterfs/src')
+sys.path.append(gendir)
+from generator import ops, fop_subs, cbk_subs, generate
+
+FD_DATA_MODIFYING_OP_FOP_TEMPLATE = """
+int32_t
+cs_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ int op_errno = EINVAL ;
+ cs_local_t *local = NULL;
+ int ret = 0;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = -1;
+
+ VALIDATE_OR_GOTO (frame, err);
+ VALIDATE_OR_GOTO (this, err);
+ VALIDATE_OR_GOTO (fd, err);
+
+ local = cs_local_init (this, frame, NULL, fd, GF_FOP_@UPNAME@);
+ if (!local) {
+
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ __cs_inode_ctx_get (this, fd->inode, &ctx);
+
+ if (ctx)
+ state = __cs_get_file_state (fd->inode, ctx);
+ else
+ state = GF_CS_LOCAL;
+
+ xdata = xdata ? dict_ref (xdata) : dict_new ();
+
+ if (!xdata) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ local->xattr_req = xdata;
+
+ ret = dict_set_uint32 (local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "dict_set failed key:"
+ " %s", GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ local->stub = fop_@NAME@_stub (frame, cs_resume_@NAME@,
+ @SHORT_ARGS@);
+ if (!local->stub) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND (frame, cs_@NAME@_cbk,
+ FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+ } else {
+ local->call_cnt++;
+ ret = locate_and_execute (frame);
+ if (ret) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ CS_STACK_UNWIND (@NAME@, frame, -1, op_errno, @CBK_ERROR_ARGS@);
+
+ return 0;
+}
+"""
+
+FD_DATA_MODIFYING_RESUME_OP_FOP_TEMPLATE = """
+int32_t
+cs_resume_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ int ret = 0;
+
+ ret = cs_resume_postprocess (this, frame, fd->inode);
+ if (ret) {
+ goto unwind;
+ }
+
+ cs_inodelk_unlock (frame);
+
+ STACK_WIND (frame, cs_@NAME@_cbk,
+ FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+
+ return 0;
+
+unwind:
+
+ cs_inodelk_unlock (frame);
+
+ cs_common_cbk (frame);
+
+ return 0;
+}
+"""
+FD_DATA_MODIFYING_OP_FOP_CBK_TEMPLATE = """
+int32_t
+cs_@NAME@_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ @LONG_ARGS@)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ uint64_t val = 0;
+ fd_t *fd = NULL;
+
+ local = frame->local;
+ fd = local->fd;
+
+ /* Do we need lock here? */
+ local->call_cnt++;
+
+ if (op_ret == -1) {
+ ret = dict_get_uint64 (xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0,
+ "could not get file state, unwinding");
+ op_ret = -1;
+ op_errno = EIO;
+ goto unwind;
+ } else {
+ __cs_inode_ctx_update (this, fd->inode, val);
+ gf_msg (this->name, GF_LOG_INFO, 0, 0,
+ " state = %" PRIu64, val);
+
+ if (local->call_cnt == 1 &&
+ (val == GF_CS_REMOTE ||
+ val == GF_CS_DOWNLOADING)) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ 0, " will repair and download "
+ "the file, current state : %"
+ PRIu64, val);
+ goto repair;
+ } else {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0,
+ "second @NAME@, Unwinding");
+ goto unwind;
+ }
+ }
+ } else {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "file state "
+ "could not be figured, unwinding");
+ goto unwind;
+ }
+ } else {
+ /* successful @NAME@ => file is local */
+ __cs_inode_ctx_update (this, fd->inode, GF_CS_LOCAL);
+ gf_msg (this->name, GF_LOG_INFO, 0, 0, "state : GF_CS_LOCAL"
+ ", @NAME@ successful");
+
+ goto unwind;
+ }
+
+repair:
+ ret = locate_and_execute (frame);
+ if (ret) {
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ CS_STACK_UNWIND (@NAME@, frame, op_ret, op_errno, @SHORT_ARGS@);
+
+ return 0;
+}
+"""
+
+LOC_STAT_OP_FOP_TEMPLATE = """
+int32_t
+cs_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ int op_errno = EINVAL;
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init (this, frame, loc, NULL, GF_FOP_@UPNAME@);
+ if (!local) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "local is NULL");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ if (loc->inode->ia_type == IA_IFDIR)
+ goto wind;
+
+ xdata = xdata ? dict_ref (xdata) : dict_new ();
+
+ if (!xdata) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ local->xattr_req = xdata;
+
+ ret = dict_set_uint32 (local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "dict_set failed key:"
+ " %s", GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+wind:
+ STACK_WIND (frame, cs_@NAME@_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+
+ return 0;
+err:
+ CS_STACK_UNWIND (@NAME@, frame, -1, op_errno, @CBK_ERROR_ARGS@);
+
+ return 0;
+}
+"""
+
+LOC_STAT_OP_FOP_CBK_TEMPLATE = """
+int32_t
+cs_@NAME@_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ @LONG_ARGS@)
+{
+ int ret = 0;
+ uint64_t val = 0;
+ loc_t *loc = NULL;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ loc = &local->loc;
+
+ if (op_ret == 0) {
+ ret = dict_get_uint64 (xdata, GF_CS_OBJECT_STATUS, &val);
+ if (!ret) {
+ ret = __cs_inode_ctx_update (this, loc->inode, val);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0,
+ "ctx update failed");
+ }
+ }
+ } else {
+ cs_inode_ctx_reset (this, loc->inode);
+ }
+
+ CS_STACK_UNWIND (@NAME@, frame, op_ret, op_errno, @SHORT_ARGS@);
+
+ return 0;
+}
+"""
+
+# All xlator FOPs are covered in the following section just to create a clarity
+# The lists themselves are not used.
+entry_ops = ['mknod', 'mkdir', 'unlink', 'rmdir', 'symlink', 'rename', 'link',
+ 'create']
+special_ops = ['statfs', 'lookup', 'ipc', 'compound', 'icreate', 'namelink']
+ignored_ops = ['getspec']
+inode_ops = ['stat', 'readlink', 'truncate', 'open', 'setxattr', 'getxattr',
+ 'removexattr', 'opendir', 'access', 'inodelk', 'entrylk',
+ 'xattrop', 'setattr', 'lease', 'getactivelk', 'setactivelk',
+ 'discover']
+fd_ops = ['readv', 'writev', 'flush', 'fsync', 'fsyncdir', 'ftruncate',
+ 'fstat', 'lk', 'readdir', 'finodelk', 'fentrylk', 'fxattrop',
+ 'fsetxattr', 'fgetxattr', 'rchecksum', 'fsetattr', 'readdirp',
+ 'fremovexattr', 'fallocate', 'discard', 'zerofill', 'seek']
+
+
+# These are the current actual lists used to generate the code
+
+# The following list contains fops which are fd based that modifies data
+fd_data_modify_op_fop_template = ['writev', 'flush', 'fsync',
+ 'ftruncate', 'rchecksum', 'fallocate',
+ 'discard', 'zerofill', 'seek']
+
+# The following list contains fops which are entry based that does not change
+# data
+loc_stat_op_fop_template = ['lookup', 'stat', 'discover', 'access', 'setattr',
+ 'getattr']
+
+# These fops need a separate implementation
+special_fops = ['statfs', 'setxattr', 'unlink', 'getxattr',
+ 'truncate', 'fstat', 'readv', 'readdirp']
+
+def gen_defaults():
+ for name in ops:
+ if name in fd_data_modify_op_fop_template:
+ print(generate(FD_DATA_MODIFYING_OP_FOP_CBK_TEMPLATE, name, cbk_subs))
+ print(generate(FD_DATA_MODIFYING_RESUME_OP_FOP_TEMPLATE, name, fop_subs))
+ print(generate(FD_DATA_MODIFYING_OP_FOP_TEMPLATE, name, fop_subs))
+ elif name in loc_stat_op_fop_template:
+ print(generate(LOC_STAT_OP_FOP_CBK_TEMPLATE, name, cbk_subs))
+ print(generate(LOC_STAT_OP_FOP_TEMPLATE, name, fop_subs))
+
+for l in open(sys.argv[1], 'r').readlines():
+ if l.find('#pragma generate') != -1:
+ print("/* BEGIN GENERATED CODE - DO NOT MODIFY */")
+ gen_defaults()
+ print("/* END GENERATED CODE */")
+ else:
+ print(l[:-1])
diff --git a/xlators/features/cloudsync/src/cloudsync-fops-h.py b/xlators/features/cloudsync/src/cloudsync-fops-h.py
new file mode 100755
index 00000000000..faa2de651a7
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-fops-h.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python3
+
+from __future__ import print_function
+import os
+import sys
+
+curdir = os.path.dirname(sys.argv[0])
+gendir = os.path.join(curdir, '../../../../libglusterfs/src')
+sys.path.append(gendir)
+from generator import ops, fop_subs, cbk_subs, generate
+
+OP_FOP_TEMPLATE = """
+int32_t
+cs_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@);
+"""
+
+def gen_defaults():
+ for name, value in ops.items():
+ if name == 'getspec':
+ continue
+ print(generate(OP_FOP_TEMPLATE, name, fop_subs))
+
+
+for l in open(sys.argv[1], 'r').readlines():
+ if l.find('#pragma generate') != -1:
+ print("/* BEGIN GENERATED CODE - DO NOT MODIFY */")
+ gen_defaults()
+ print("/* END GENERATED CODE */")
+ else:
+ print(l[:-1])
diff --git a/xlators/features/cloudsync/src/cloudsync-mem-types.h b/xlators/features/cloudsync/src/cloudsync-mem-types.h
new file mode 100644
index 00000000000..220346405d0
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-mem-types.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __CLOUDSYNC_MEM_TYPES_H__
+#define __CLOUDSYNC_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+enum cs_mem_types_ {
+ gf_cs_mt_cs_private_t = gf_common_mt_end + 1,
+ gf_cs_mt_cs_remote_stores_t,
+ gf_cs_mt_cs_inode_ctx_t,
+ gf_cs_mt_cs_lxattr_t,
+ gf_cs_mt_end
+};
+#endif /* __CLOUDSYNC_MEM_TYPES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-messages.h b/xlators/features/cloudsync/src/cloudsync-messages.h
new file mode 100644
index 00000000000..fb08f72de7f
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-messages.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __CLOUDSYNC_MESSAGES_H__
+#define __CLOUDSYNC_MESSAGES_H__
+
+/*TODO: define relevant message ids */
+
+#endif /* __CLOUDSYNC_MESSAGES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am
new file mode 100644
index 00000000000..fb6b0580c6d
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am
@@ -0,0 +1,11 @@
+if BUILD_AMAZONS3_PLUGIN
+ AMAZONS3_DIR = cloudsyncs3
+endif
+
+if BUILD_CVLT_PLUGIN
+ CVLT_DIR = cvlt
+endif
+
+SUBDIRS = ${AMAZONS3_DIR} ${CVLT_DIR}
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am
new file mode 100644
index 00000000000..6509426ef87
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am
@@ -0,0 +1,12 @@
+csp_LTLIBRARIES = cloudsyncs3.la
+cspdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/cloudsync-plugins
+
+cloudsyncs3_la_SOURCES = libcloudsyncs3.c $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-common.c
+cloudsyncs3_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+cloudsyncs3_la_LDFLAGS = -module -export-symbols $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym $(GF_XLATOR_LDFLAGS)
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src -lcurlpp -lcryptopp
+noinst_HEADERS = libcloudsyncs3.h libcloudsyncs3-mem-types.h
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS) -lcurl -lcrypto -I$(top_srcdir)/xlators/features/cloudsync/src
+CLEANFILES =
+
+EXTRA_DIST = libcloudsyncs3.sym
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h
new file mode 100644
index 00000000000..7ccfcc9f4b6
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __LIBAWS_MEM_TYPES_H__
+#define __LIBAWS_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+enum libaws_mem_types_ {
+ gf_libaws_mt_aws_private_t = gf_common_mt_end + 1,
+ gf_libaws_mt_end
+};
+#endif /* __CLOUDSYNC_MEM_TYPES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c
new file mode 100644
index 00000000000..23c3599825a
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c
@@ -0,0 +1,584 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <stdlib.h>
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+#include <openssl/bio.h>
+#include <openssl/buffer.h>
+#include <openssl/crypto.h>
+#include <curl/curl.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include "libcloudsyncs3.h"
+#include "cloudsync-common.h"
+
+#define RESOURCE_SIZE 4096
+
+store_methods_t store_ops = {
+ .fop_download = aws_download_s3,
+ .fop_init = aws_init,
+ .fop_reconfigure = aws_reconfigure,
+ .fop_fini = aws_fini,
+};
+
+typedef struct aws_private {
+ char *hostname;
+ char *bucketid;
+ char *awssekey;
+ char *awskeyid;
+ gf_boolean_t abortdl;
+ pthread_spinlock_t lock;
+} aws_private_t;
+
+void *
+aws_init(xlator_t *this)
+{
+ aws_private_t *priv = NULL;
+ char *temp_str = NULL;
+ int ret = 0;
+
+ priv = GF_CALLOC(1, sizeof(aws_private_t), gf_libaws_mt_aws_private_t);
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ return NULL;
+ }
+
+ priv->abortdl = _gf_false;
+
+ pthread_spin_init(&priv->lock, PTHREAD_PROCESS_PRIVATE);
+
+ pthread_spin_lock(&(priv->lock));
+ {
+ if (dict_get_str(this->options, "s3plugin-seckey", &temp_str) == 0) {
+ priv->awssekey = gf_strdup(temp_str);
+ if (!priv->awssekey) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws secret key failed");
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ if (dict_get_str(this->options, "s3plugin-keyid", &temp_str) == 0) {
+ priv->awskeyid = gf_strdup(temp_str);
+ if (!priv->awskeyid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws key ID failed");
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ if (dict_get_str(this->options, "s3plugin-bucketid", &temp_str) == 0) {
+ priv->bucketid = gf_strdup(temp_str);
+ if (!priv->bucketid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws bucketid failed");
+
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ if (dict_get_str(this->options, "s3plugin-hostname", &temp_str) == 0) {
+ priv->hostname = gf_strdup(temp_str);
+ if (!priv->hostname) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws hostname failed");
+
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ gf_msg_debug(this->name, 0,
+ "stored key: %s id: %s "
+ "bucketid %s hostname: %s",
+ priv->awssekey, priv->awskeyid, priv->bucketid,
+ priv->hostname);
+ }
+unlock:
+ pthread_spin_unlock(&(priv->lock));
+
+ if (ret == -1) {
+ GF_FREE(priv->awskeyid);
+ GF_FREE(priv->awssekey);
+ GF_FREE(priv->bucketid);
+ GF_FREE(priv->hostname);
+ GF_FREE(priv);
+ priv = NULL;
+ }
+
+ return (void *)priv;
+}
+
+int
+aws_reconfigure(xlator_t *this, dict_t *options)
+{
+ aws_private_t *priv = NULL;
+ char *temp_str = NULL;
+ int ret = 0;
+ cs_private_t *cspriv = NULL;
+
+ cspriv = this->private;
+
+ priv = cspriv->stores->config;
+
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "null priv");
+ return -1;
+ }
+
+ pthread_spin_lock(&(priv->lock));
+ {
+ if (dict_get_str(options, "s3plugin-seckey", &temp_str) == 0) {
+ priv->awssekey = gf_strdup(temp_str);
+ if (!priv->awssekey) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws secret key failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (dict_get_str(options, "s3plugin-keyid", &temp_str) == 0) {
+ priv->awskeyid = gf_strdup(temp_str);
+ if (!priv->awskeyid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws key ID failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (dict_get_str(options, "s3plugin-bucketid", &temp_str) == 0) {
+ priv->bucketid = gf_strdup(temp_str);
+ if (!priv->bucketid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws bucketid failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (dict_get_str(options, "s3plugin-hostname", &temp_str) == 0) {
+ priv->hostname = gf_strdup(temp_str);
+ if (!priv->hostname) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws hostname failed");
+ ret = -1;
+ goto out;
+ }
+ }
+ }
+out:
+ pthread_spin_unlock(&(priv->lock));
+
+ gf_msg_debug(this->name, 0,
+ "stored key: %s id: %s "
+ "bucketid %s hostname: %s",
+ priv->awssekey, priv->awskeyid, priv->bucketid,
+ priv->hostname);
+
+ return ret;
+}
+
+void
+aws_fini(void *config)
+{
+ aws_private_t *priv = NULL;
+
+ priv = (aws_private_t *)priv;
+
+ if (priv) {
+ GF_FREE(priv->hostname);
+ GF_FREE(priv->bucketid);
+ GF_FREE(priv->awssekey);
+ GF_FREE(priv->awskeyid);
+
+ pthread_spin_destroy(&priv->lock);
+ GF_FREE(priv);
+ }
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("dht", this, out);
+
+ ret = xlator_mem_acct_init(this, gf_libaws_mt_end + 1);
+
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "Memory accounting init failed");
+ return ret;
+ }
+out:
+ return ret;
+}
+char *
+aws_form_request(char *resource, char **date, char *reqtype, char *bucketid,
+ char *filepath)
+{
+ char httpdate[256];
+ time_t ctime;
+ struct tm *gtime = NULL;
+ char *sign_req = NULL;
+ int signreq_len = -1;
+ int date_len = -1;
+ int res_len = -1;
+
+ ctime = gf_time();
+ gtime = gmtime(&ctime);
+
+ date_len = strftime(httpdate, sizeof(httpdate),
+ "%a, %d %b %Y %H:%M:%S +0000", gtime);
+
+ *date = gf_strndup(httpdate, date_len);
+ if (*date == NULL) {
+ gf_msg("CS", GF_LOG_ERROR, ENOMEM, 0,
+ "memory allocation "
+ "failure for date");
+ goto out;
+ }
+
+ res_len = snprintf(resource, RESOURCE_SIZE, "%s/%s", bucketid, filepath);
+
+ gf_msg_debug("CS", 0, "resource %s", resource);
+
+ /* 6 accounts for the 4 new line chars, one forward slash and
+ * one null char */
+ signreq_len = res_len + date_len + strlen(reqtype) + 6;
+
+ sign_req = GF_MALLOC(signreq_len, gf_common_mt_char);
+ if (sign_req == NULL) {
+ gf_msg("CS", GF_LOG_ERROR, ENOMEM, 0,
+ "memory allocation "
+ "failure for sign_req");
+ goto out;
+ }
+
+ snprintf(sign_req, signreq_len, "%s\n\n%s\n%s\n/%s", reqtype, "", *date,
+ resource);
+
+out:
+ return sign_req;
+}
+
+char *
+aws_b64_encode(const unsigned char *input, int length)
+{
+ BIO *bio, *b64;
+ BUF_MEM *bptr;
+ char *buff = NULL;
+
+ b64 = BIO_new(BIO_f_base64());
+ bio = BIO_new(BIO_s_mem());
+ b64 = BIO_push(b64, bio);
+ BIO_write(b64, input, length);
+ BIO_flush(b64);
+ BIO_get_mem_ptr(b64, &bptr);
+
+ buff = GF_MALLOC(bptr->length, gf_common_mt_char);
+ memcpy(buff, bptr->data, bptr->length - 1);
+ buff[bptr->length - 1] = 0;
+
+ BIO_free_all(b64);
+
+ return buff;
+}
+
+char *
+aws_sign_request(char *const str, char *awssekey)
+{
+#if (OPENSSL_VERSION_NUMBER < 0x1010002f)
+ HMAC_CTX ctx;
+#endif
+ HMAC_CTX *pctx = NULL;
+ ;
+
+ unsigned char md[256];
+ unsigned len;
+ char *base64 = NULL;
+
+#if (OPENSSL_VERSION_NUMBER < 0x1010002f)
+ HMAC_CTX_init(&ctx);
+ pctx = &ctx;
+#else
+ pctx = HMAC_CTX_new();
+#endif
+ HMAC_Init_ex(pctx, awssekey, strlen(awssekey), EVP_sha1(), NULL);
+ HMAC_Update(pctx, (unsigned char *)str, strlen(str));
+ HMAC_Final(pctx, (unsigned char *)md, &len);
+
+#if (OPENSSL_VERSION_NUMBER < 0x1010002f)
+ HMAC_CTX_cleanup(pctx);
+#else
+ HMAC_CTX_free(pctx);
+#endif
+ base64 = aws_b64_encode(md, len);
+
+ return base64;
+}
+
+int
+aws_dlwritev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
+{
+ aws_private_t *priv = NULL;
+
+ if (op_ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, op_errno,
+ "write failed "
+ ". Aborting Download");
+
+ priv = this->private;
+ pthread_spin_lock(&(priv->lock));
+ {
+ priv->abortdl = _gf_true;
+ }
+ pthread_spin_unlock(&(priv->lock));
+ }
+
+ CS_STACK_DESTROY(frame);
+
+ return op_ret;
+}
+
+size_t
+aws_write_callback(void *dlbuf, size_t size, size_t nitems, void *mainframe)
+{
+ call_frame_t *frame = NULL;
+ fd_t *dlfd = NULL;
+ int ret = 0;
+ cs_local_t *local = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ struct iobref *iobref = NULL;
+ struct iobuf *iobuf = NULL;
+ struct iovec dliov = {
+ 0,
+ };
+ size_t tsize = 0;
+ xlator_t *this = NULL;
+ cs_private_t *xl_priv = NULL;
+ aws_private_t *priv = NULL;
+ call_frame_t *dlframe = NULL;
+
+ frame = (call_frame_t *)mainframe;
+ this = frame->this;
+ xl_priv = this->private;
+ priv = xl_priv->stores->config;
+
+ pthread_spin_lock(&(priv->lock));
+ {
+ /* returning size other than the size passed from curl will
+ * abort further download*/
+ if (priv->abortdl) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "aborting download");
+ pthread_spin_unlock(&(priv->lock));
+ return 0;
+ }
+ }
+ pthread_spin_unlock(&(priv->lock));
+
+ local = frame->local;
+ dlfd = local->dlfd;
+ tsize = size * nitems;
+
+ dliov.iov_base = (void *)dlbuf;
+ dliov.iov_len = tsize;
+
+ ret = iobuf_copy(this->ctx->iobuf_pool, &dliov, 1, &iobref, &iobuf, &iov);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "iobuf_copy failed");
+ goto out;
+ }
+
+ /* copy frame */
+ dlframe = copy_frame(frame);
+ if (!dlframe) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "copy_frame failed");
+ tsize = 0;
+ goto out;
+ }
+
+ STACK_WIND(dlframe, aws_dlwritev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, dlfd, &iov, 1, local->dloffset,
+ 0, iobref, NULL);
+
+ local->dloffset += tsize;
+
+out:
+ if (iobuf)
+ iobuf_unref(iobuf);
+ if (iobref)
+ iobref_unref(iobref);
+
+ return tsize;
+}
+
+int
+aws_download_s3(call_frame_t *frame, void *config)
+{
+ char *buf;
+ int bufsize = -1;
+ CURL *handle = NULL;
+ struct curl_slist *slist = NULL;
+ struct curl_slist *tmp = NULL;
+ xlator_t *this = NULL;
+ int ret = 0;
+ int debug = 1;
+ CURLcode res;
+ char errbuf[CURL_ERROR_SIZE];
+ size_t len = 0;
+ long responsecode;
+ char *sign_req = NULL;
+ char *date = NULL;
+ char *const reqtype = "GET";
+ char *signature = NULL;
+ cs_local_t *local = NULL;
+ char resource[RESOURCE_SIZE] = {
+ 0,
+ };
+ aws_private_t *priv = NULL;
+
+ this = frame->this;
+
+ local = frame->local;
+
+ priv = (aws_private_t *)config;
+
+ if (!priv->bucketid || !priv->hostname || !priv->awssekey ||
+ !priv->awskeyid) {
+ ret = -1;
+ goto out;
+ }
+
+ sign_req = aws_form_request(resource, &date, reqtype, priv->bucketid,
+ local->remotepath);
+ if (!sign_req) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "null sign_req, "
+ "aborting download");
+ ret = -1;
+ goto out;
+ }
+
+ gf_msg_debug("CS", 0, "sign_req %s date %s", sign_req, date);
+
+ signature = aws_sign_request(sign_req, priv->awssekey);
+ if (!signature) {
+ gf_msg("CS", GF_LOG_ERROR, 0, 0,
+ "null signature, "
+ "aborting download");
+ ret = -1;
+ goto out;
+ }
+
+ handle = curl_easy_init();
+ this = frame->this;
+
+ /* special numbers 6, 20, 10 accounts for static characters in the
+ * below snprintf string format arguments*/
+ bufsize = strlen(date) + 6 + strlen(priv->awskeyid) + strlen(signature) +
+ 20 + strlen(priv->hostname) + 10;
+
+ buf = (char *)alloca(bufsize);
+ if (!buf) {
+ gf_msg("CS", GF_LOG_ERROR, ENOMEM, 0,
+ "mem allocation "
+ "failed for buf");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf(buf, bufsize, "Date: %s", date);
+ slist = curl_slist_append(slist, buf);
+ snprintf(buf, bufsize, "Authorization: AWS %s:%s", priv->awskeyid,
+ signature);
+ slist = curl_slist_append(slist, buf);
+ snprintf(buf, bufsize, "https://%s/%s", priv->hostname, resource);
+
+ if (gf_log_get_loglevel() >= GF_LOG_DEBUG) {
+ tmp = slist;
+ while (tmp) {
+ gf_msg_debug(this->name, 0, "slist for curl - %s", tmp->data);
+ tmp = tmp->next;
+ }
+ }
+
+ curl_easy_setopt(handle, CURLOPT_HTTPHEADER, slist);
+ curl_easy_setopt(handle, CURLOPT_URL, buf);
+ curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, aws_write_callback);
+ curl_easy_setopt(handle, CURLOPT_WRITEDATA, frame);
+ curl_easy_setopt(handle, CURLOPT_VERBOSE, debug);
+ curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errbuf);
+
+ res = curl_easy_perform(handle);
+ if (res != CURLE_OK) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "download failed. err: %s\n",
+ curl_easy_strerror(res));
+ ret = -1;
+ len = strlen(errbuf);
+ if (len) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "curl failure %s", errbuf);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "curl error "
+ "%s\n",
+ curl_easy_strerror(res));
+ }
+ }
+
+ if (res == CURLE_OK) {
+ curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &responsecode);
+ gf_msg_debug(this->name, 0, "response code %ld", responsecode);
+ if (responsecode != 200) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "curl download failed");
+ }
+ }
+
+ curl_slist_free_all(slist);
+ curl_easy_cleanup(handle);
+
+out:
+ if (sign_req)
+ GF_FREE(sign_req);
+ if (date)
+ GF_FREE(date);
+ if (signature)
+ GF_FREE(signature);
+
+ return ret;
+}
+
+struct volume_options cs_options[] = {
+ {.key = {"s3plugin-seckey"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws secret key"},
+ {.key = {"s3plugin-keyid"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws key ID"
+
+ },
+ {.key = {"s3plugin-bucketid"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws bucketid"},
+ {.key = {"s3plugin-hostname"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws hostname e.g. s3.amazonaws.com"},
+ {.key = {NULL}},
+};
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h
new file mode 100644
index 00000000000..85ae669486b
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h
@@ -0,0 +1,50 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _LIBAWS_H
+#define _LIBAWS_H
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/syncop.h>
+#include <curl/curl.h>
+#include "cloudsync-common.h"
+#include "libcloudsyncs3-mem-types.h"
+
+char *
+aws_b64_encode(const unsigned char *input, int length);
+
+size_t
+aws_write_callback(void *dlbuf, size_t size, size_t nitems, void *mainframe);
+
+int
+aws_download_s3(call_frame_t *frame, void *config);
+
+int
+aws_dlwritev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata);
+
+void *
+aws_init(xlator_t *this);
+
+int
+aws_reconfigure(xlator_t *this, dict_t *options);
+
+char *
+aws_form_request(char *resource, char **date, char *reqtype, char *bucketid,
+ char *filepath);
+char *
+aws_sign_request(char *const str, char *awssekey);
+
+void
+aws_fini(void *config);
+
+#endif
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym
new file mode 100644
index 00000000000..0bc273670d5
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym
@@ -0,0 +1 @@
+store_ops
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am
new file mode 100644
index 00000000000..b512464f157
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am
@@ -0,0 +1,12 @@
+csp_LTLIBRARIES = cloudsynccvlt.la
+cspdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/cloudsync-plugins
+
+cloudsynccvlt_la_SOURCES = libcvlt.c $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-common.c
+cloudsynccvlt_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+cloudsynccvlt_la_LDFLAGS = -module -avoid-version -export-symbols $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+noinst_HEADERS = archivestore.h libcvlt.h libcvlt-mem-types.h cvlt-messages.h
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS) -I$(top_srcdir)/xlators/features/cloudsync/src
+CLEANFILES =
+
+EXTRA_DIST = libcloudsynccvlt.sym
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h
new file mode 100644
index 00000000000..7230ef77337
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h
@@ -0,0 +1,203 @@
+/*
+ Copyright (c) 2018 Commvault Systems, Inc. <http://www.commvault.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __ARCHIVESTORE_H__
+#define __ARCHIVESTORE_H__
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <dlfcn.h>
+#include <uuid/uuid.h>
+
+#define CS_XATTR_ARCHIVE_UUID "trusted.cloudsync.uuid"
+#define CS_XATTR_PRODUCT_ID "trusted.cloudsync.product-id"
+#define CS_XATTR_STORE_ID "trusted.cloudsync.store-id"
+
+struct _archstore_methods;
+typedef struct _archstore_methods archstore_methods_t;
+
+struct _archstore_desc {
+ void *priv; /* Private field for store mgmt. */
+ /* To be used only by archive store*/
+};
+typedef struct _archstore_desc archstore_desc_t;
+
+struct _archstore_info {
+ char *id; /* Identifier for the archivestore */
+ uint32_t idlen; /* Length of identifier string */
+ char *prod; /* Name of the data mgmt. product */
+ uint32_t prodlen; /* Length of the product string */
+};
+typedef struct _archstore_info archstore_info_t;
+
+struct _archstore_fileinfo {
+ uuid_t uuid; /* uuid of the file */
+ char *path; /* file path */
+ uint32_t pathlength; /* length of file path */
+};
+typedef struct _archstore_fileinfo archstore_fileinfo_t;
+
+struct _app_callback_info {
+ archstore_info_t *src_archstore;
+ archstore_fileinfo_t *src_archfile;
+ archstore_info_t *dest_archstore;
+ archstore_fileinfo_t *dest_archfile;
+};
+typedef struct _app_callback_info app_callback_info_t;
+
+typedef void (*app_callback_t)(archstore_desc_t *, app_callback_info_t *,
+ void *, int64_t, int32_t);
+
+enum _archstore_scan_type { FULL = 1, INCREMENTAL = 2 };
+typedef enum _archstore_scan_type archstore_scan_type_t;
+
+typedef int32_t archstore_errno_t;
+
+/*
+ * Initialize archive store.
+ * arg1 pointer to structure containing archive store information
+ * arg2 error number if any generated during the initialization
+ * arg3 name of the log file
+ */
+typedef int32_t (*init_archstore_t)(archstore_desc_t *, archstore_errno_t *,
+ const char *);
+
+/*
+ * Clean up archive store.
+ * arg1 pointer to structure containing archive store information
+ * arg2 error number if any generated during the cleanup
+ */
+typedef int32_t (*term_archstore_t)(archstore_desc_t *, archstore_errno_t *);
+
+/*
+ * Read the contents of the file from archive store
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing archive store information
+ * arg3 pointer to structure containing information about file to be read
+ * arg4 offset in the file from which data should be read
+ * arg5 buffer where the data should be read
+ * arg6 number of bytes of data to be read
+ * arg7 error number if any generated during the read from file
+ * arg8 callback handler to be invoked after the data is read
+ * arg9 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*read_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *, off_t, char *,
+ size_t, archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Restore the contents of the file from archive store
+ * This is basically in-place restore
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing archive store information
+ * arg3 pointer to structure containing information about file to be restored
+ * arg4 error number if any generated during the file restore
+ * arg5 callback to be invoked after the file is restored
+ * arg6 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*recall_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Restore the contents of the file from archive store to a different store
+ * This is basically out-of-place restore
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing source archive store information
+ * arg3 pointer to structure containing information about file to be restored
+ * arg4 pointer to structure containing destination archive store information
+ * arg5 pointer to structure containing information about the location to
+ which the file will be restored
+ * arg6 error number if any generated during the file restore
+ * arg7 callback to be invoked after the file is restored
+ * arg8 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*restore_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Archive the contents of the file to archive store
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing source archive store information
+ * arg3 pointer to structure containing information about files to be archived
+ * arg4 pointer to structure containing destination archive store information
+ * arg5 pointer to structure containing information about files that failed
+ * to be archived
+ * arg6 error number if any generated during the file archival
+ * arg7 callback to be invoked after the file is archived
+ * arg8 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*archive_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Backup list of files provided in the input file
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing source archive store information
+ * arg3 pointer to structure containing information about files to be backed up
+ * arg4 pointer to structure containing destination archive store information
+ * arg5 pointer to structure containing information about files that failed
+ * to be backed up
+ * arg6 error number if any generated during the file archival
+ * arg7 callback to be invoked after the file is archived
+ * arg8 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*backup_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Scan the contents of a store and determine the files which need to be
+ * backed up.
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing archive store information
+ * arg3 type of scan whether full or incremental
+ * arg4 path to file that contains list of files to be backed up
+ * arg5 error number if any generated during scan operation
+ */
+typedef int32_t (*scan_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_scan_type_t, char *,
+ archstore_errno_t *);
+
+struct _archstore_methods {
+ init_archstore_t init;
+ term_archstore_t fini;
+ backup_archstore_t backup;
+ archive_archstore_t archive;
+ scan_archstore_t scan;
+ restore_archstore_t restore;
+ recall_archstore_t recall;
+ read_archstore_t read;
+};
+
+typedef int (*get_archstore_methods_t)(archstore_methods_t *);
+
+/*
+ * Single function that will be invoked by applications for extracting
+ * the function pointers to all data management functions.
+ */
+int32_t
+get_archstore_methods(archstore_methods_t *);
+
+#endif /* End of __ARCHIVESTORE_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h
new file mode 100644
index 00000000000..57c9aa77da0
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _CVLT_MESSAGES_H_
+#define _CVLT_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(CVLT, CVLT_EXTRACTION_FAILED, CVLT_FREE,
+ CVLT_RESOURCE_ALLOCATION_FAILED, CVLT_RESTORE_FAILED,
+ CVLT_READ_FAILED, CVLT_NO_MEMORY, CVLT_DLOPEN_FAILED);
+
+#endif /* !_CVLT_MESSAGES_H_ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym
new file mode 100644
index 00000000000..0bc273670d5
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym
@@ -0,0 +1 @@
+store_ops
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h
new file mode 100644
index 00000000000..c24fab8bfe7
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018 Commvault Systems, Inc. <http://www.commvault.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __LIBCVLT_MEM_TYPES_H__
+#define __LIBCVLT_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+enum libcvlt_mem_types_ {
+ gf_libcvlt_mt_cvlt_private_t = gf_common_mt_end + 1,
+ gf_libcvlt_mt_end
+};
+#endif /* __LIBCVLT_MEM_TYPES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c
new file mode 100644
index 00000000000..5b7272bb448
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c
@@ -0,0 +1,842 @@
+#include <stdlib.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include "libcvlt.h"
+#include "cloudsync-common.h"
+#include "cvlt-messages.h"
+
+#define LIBARCHIVE_SO "libopenarchive.so"
+#define ALIGN_SIZE 4096
+#define CVLT_TRAILER "cvltv1"
+
+store_methods_t store_ops = {
+ .fop_download = cvlt_download,
+ .fop_init = cvlt_init,
+ .fop_reconfigure = cvlt_reconfigure,
+ .fop_fini = cvlt_fini,
+ .fop_remote_read = cvlt_read,
+};
+
+static const int32_t num_req = 32;
+static const int32_t num_iatt = 32;
+static char *plugin = "cvlt_cloudSync";
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_libcvlt_mt_end + 1);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return ret;
+}
+
+static void
+cvlt_free_resources(archive_t *arch)
+{
+ /*
+ * We will release all the resources that were allocated by the xlator.
+ * Check whether there are any buffers which have not been released
+ * back to a mempool.
+ */
+
+ if (arch->handle) {
+ dlclose(arch->handle);
+ }
+
+ if (arch->iobuf_pool) {
+ iobuf_pool_destroy(arch->iobuf_pool);
+ }
+
+ if (arch->req_pool) {
+ mem_pool_destroy(arch->req_pool);
+ arch->req_pool = NULL;
+ }
+
+ return;
+}
+
+static int32_t
+cvlt_extract_store_fops(xlator_t *this, archive_t *arch)
+{
+ int32_t op_ret = -1;
+ get_archstore_methods_t get_archstore_methods;
+
+ /*
+ * libopenarchive.so defines methods for performing data management
+ * operations. We will extract the methods from library and these
+ * methods will be invoked for moving data between glusterfs volume
+ * and the data management product.
+ */
+
+ VALIDATE_OR_GOTO(arch, err);
+
+ arch->handle = dlopen(LIBARCHIVE_SO, RTLD_NOW);
+ if (!arch->handle) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_DLOPEN_FAILED,
+ " failed to open %s ", LIBARCHIVE_SO);
+ return op_ret;
+ }
+
+ dlerror(); /* Clear any existing error */
+
+ get_archstore_methods = dlsym(arch->handle, "get_archstore_methods");
+ if (!get_archstore_methods) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " Error extracting get_archstore_methods()");
+ dlclose(arch->handle);
+ arch->handle = NULL;
+ return op_ret;
+ }
+
+ op_ret = get_archstore_methods(&(arch->fops));
+ if (op_ret) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " Failed to extract methods in get_archstore_methods");
+ dlclose(arch->handle);
+ arch->handle = NULL;
+ return op_ret;
+ }
+
+err:
+ return op_ret;
+}
+
+static int32_t
+cvlt_alloc_resources(xlator_t *this, archive_t *arch, int num_req, int num_iatt)
+{
+ /*
+ * Initialize information about all the memory pools that will be
+ * used by this xlator.
+ */
+ arch->nreqs = 0;
+
+ arch->req_pool = NULL;
+
+ arch->handle = NULL;
+ arch->xl = this;
+
+ arch->req_pool = mem_pool_new(cvlt_request_t, num_req);
+ if (!arch->req_pool) {
+ goto err;
+ }
+
+ arch->iobuf_pool = iobuf_pool_new();
+ if (!arch->iobuf_pool) {
+ goto err;
+ }
+
+ if (cvlt_extract_store_fops(this, arch)) {
+ goto err;
+ }
+
+ return 0;
+
+err:
+
+ return -1;
+}
+
+static void
+cvlt_req_init(cvlt_request_t *req)
+{
+ sem_init(&(req->sem), 0, 0);
+
+ return;
+}
+
+static void
+cvlt_req_destroy(cvlt_request_t *req)
+{
+ if (req->iobuf) {
+ iobuf_unref(req->iobuf);
+ }
+
+ if (req->iobref) {
+ iobref_unref(req->iobref);
+ }
+
+ sem_destroy(&(req->sem));
+
+ return;
+}
+
+static cvlt_request_t *
+cvlt_alloc_req(archive_t *arch)
+{
+ cvlt_request_t *reqptr = NULL;
+
+ if (!arch) {
+ goto err;
+ }
+
+ if (arch->req_pool) {
+ reqptr = mem_get0(arch->req_pool);
+ if (reqptr) {
+ cvlt_req_init(reqptr);
+ }
+ }
+
+ if (reqptr) {
+ LOCK(&(arch->lock));
+ arch->nreqs++;
+ UNLOCK(&(arch->lock));
+ }
+
+err:
+ return reqptr;
+}
+
+static int32_t
+cvlt_free_req(archive_t *arch, cvlt_request_t *reqptr)
+{
+ if (!reqptr) {
+ goto err;
+ }
+
+ if (!arch) {
+ goto err;
+ }
+
+ if (arch->req_pool) {
+ /*
+ * Free the request resources if they exist.
+ */
+
+ cvlt_req_destroy(reqptr);
+ mem_put(reqptr);
+
+ LOCK(&(arch->lock));
+ arch->nreqs--;
+ UNLOCK(&(arch->lock));
+ }
+
+ return 0;
+
+err:
+ return -1;
+}
+
+static int32_t
+cvlt_init_xlator(xlator_t *this, archive_t *arch, int num_req, int num_iatt)
+{
+ int32_t ret = -1;
+ int32_t errnum = -1;
+ int32_t locked = 0;
+
+ /*
+ * Perform all the initializations needed for brining up the xlator.
+ */
+ if (!arch) {
+ goto err;
+ }
+
+ LOCK_INIT(&(arch->lock));
+ LOCK(&(arch->lock));
+
+ locked = 1;
+
+ ret = cvlt_alloc_resources(this, arch, num_req, num_iatt);
+
+ if (ret) {
+ goto err;
+ }
+
+ /*
+ * Now that the fops have been extracted initialize the store
+ */
+ ret = arch->fops.init(&(arch->descinfo), &errnum, plugin);
+ if (ret) {
+ goto err;
+ }
+
+ UNLOCK(&(arch->lock));
+ locked = 0;
+ ret = 0;
+
+ return ret;
+
+err:
+ if (arch) {
+ cvlt_free_resources(arch);
+
+ if (locked) {
+ UNLOCK(&(arch->lock));
+ }
+ }
+
+ return ret;
+}
+
+static int32_t
+cvlt_term_xlator(archive_t *arch)
+{
+ int32_t errnum = -1;
+
+ if (!arch) {
+ goto err;
+ }
+
+ LOCK(&(arch->lock));
+
+ /*
+ * Release the resources that have been allocated inside store
+ */
+ arch->fops.fini(&(arch->descinfo), &errnum);
+
+ cvlt_free_resources(arch);
+
+ UNLOCK(&(arch->lock));
+
+ GF_FREE(arch);
+
+ return 0;
+
+err:
+ return -1;
+}
+
+static int32_t
+cvlt_init_store_info(archive_t *priv, archstore_info_t *store_info)
+{
+ if (!store_info) {
+ return -1;
+ }
+
+ store_info->prod = priv->product_id;
+ store_info->prodlen = strlen(priv->product_id);
+
+ store_info->id = priv->store_id;
+ store_info->idlen = strlen(priv->store_id);
+
+ return 0;
+}
+
+static int32_t
+cvlt_init_file_info(cs_loc_xattr_t *xattr, archstore_fileinfo_t *file_info)
+{
+ if (!xattr || !file_info) {
+ return -1;
+ }
+
+ gf_uuid_copy(file_info->uuid, xattr->uuid);
+ file_info->path = xattr->file_path;
+ file_info->pathlength = strlen(xattr->file_path);
+
+ return 0;
+}
+
+static int32_t
+cvlt_init_gluster_store_info(cs_loc_xattr_t *xattr,
+ archstore_info_t *store_info)
+{
+ static char *product = "glusterfs";
+
+ if (!xattr || !store_info) {
+ return -1;
+ }
+
+ store_info->prod = product;
+ store_info->prodlen = strlen(product);
+
+ store_info->id = xattr->volname;
+ store_info->idlen = strlen(xattr->volname);
+
+ return 0;
+}
+
+static int32_t
+cvlt_init_gluster_file_info(cs_loc_xattr_t *xattr,
+ archstore_fileinfo_t *file_info)
+{
+ if (!xattr || !file_info) {
+ return -1;
+ }
+
+ gf_uuid_copy(file_info->uuid, xattr->gfid);
+ file_info->path = xattr->file_path;
+ file_info->pathlength = strlen(xattr->file_path);
+
+ return 0;
+}
+
+static void
+cvlt_copy_stat_info(struct iatt *buf, cs_size_xattr_t *xattrs)
+{
+ /*
+ * If the file was archived then the reported size will not be a
+ * correct one. We need to fix this.
+ */
+ if (buf && xattrs) {
+ buf->ia_size = xattrs->size;
+ buf->ia_blksize = xattrs->blksize;
+ buf->ia_blocks = xattrs->blocks;
+ }
+
+ return;
+}
+
+static void
+cvlt_readv_complete(archstore_desc_t *desc, app_callback_info_t *cbkinfo,
+ void *cookie, int64_t op_ret, int32_t op_errno)
+{
+ struct iovec iov;
+ xlator_t *this = NULL;
+ struct iatt postbuf = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ cvlt_request_t *req = (cvlt_request_t *)cookie;
+ cs_local_t *local = NULL;
+ cs_private_t *cspriv = NULL;
+ archive_t *priv = NULL;
+
+ frame = req->frame;
+ this = frame->this;
+ local = frame->local;
+
+ cspriv = this->private;
+ priv = (archive_t *)cspriv->stores->config;
+
+ if (strcmp(priv->trailer, CVLT_TRAILER)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_msg_debug(plugin, 0,
+ " Read callback invoked offset:%" PRIu64 "bytes: %" PRIu64
+ " op : %d ret : %" PRId64 " errno : %d",
+ req->offset, req->bytes, req->op_type, op_ret, op_errno);
+
+ if (op_ret < 0) {
+ goto out;
+ }
+
+ req->iobref = iobref_new();
+ if (!req->iobref) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ iobref_add(req->iobref, req->iobuf);
+ iov.iov_base = iobuf_ptr(req->iobuf);
+ iov.iov_len = op_ret;
+
+ cvlt_copy_stat_info(&postbuf, &(req->szxattr));
+
+ /*
+ * Hack to notify higher layers of EOF.
+ */
+ if (!postbuf.ia_size || (req->offset + iov.iov_len >= postbuf.ia_size)) {
+ gf_msg_debug(plugin, 0, " signalling end-of-file for uuid=%s",
+ uuid_utoa(req->file_info.uuid));
+ op_errno = ENOENT;
+ }
+
+out:
+
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, &iov, 1, &postbuf,
+ req->iobref, local->xattr_rsp);
+
+ cvlt_free_req(priv, req);
+
+ return;
+}
+
+static void
+cvlt_download_complete(archstore_desc_t *store, app_callback_info_t *cbk_info,
+ void *cookie, int64_t ret, int errcode)
+{
+ cvlt_request_t *req = (cvlt_request_t *)cookie;
+
+ gf_msg_debug(plugin, 0,
+ " Download callback invoked ret : %" PRId64 " errno : %d",
+ ret, errcode);
+
+ req->op_ret = ret;
+ req->op_errno = errcode;
+ sem_post(&(req->sem));
+
+ return;
+}
+
+void *
+cvlt_init(xlator_t *this)
+{
+ int ret = 0;
+ archive_t *priv = NULL;
+
+ if (!this->children || this->children->next) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, 0,
+ "should have exactly one child");
+ ret = -1;
+ goto out;
+ }
+
+ if (!this->parents) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, 0,
+ "dangling volume. check volfile");
+ ret = -1;
+ goto out;
+ }
+
+ priv = GF_CALLOC(1, sizeof(archive_t), gf_libcvlt_mt_cvlt_private_t);
+ if (!priv) {
+ ret = -1;
+ goto out;
+ }
+
+ priv->trailer = CVLT_TRAILER;
+ if (cvlt_init_xlator(this, priv, num_req, num_iatt)) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, 0, "xlator init failed");
+ ret = -1;
+ goto out;
+ }
+
+ GF_OPTION_INIT("cloudsync-store-id", priv->store_id, str, out);
+ GF_OPTION_INIT("cloudsync-product-id", priv->product_id, str, out);
+
+ gf_msg(plugin, GF_LOG_INFO, 0, 0,
+ "store id is : %s "
+ "product id is : %s.",
+ priv->store_id, priv->product_id);
+out:
+ if (ret == -1) {
+ cvlt_term_xlator(priv);
+ return (NULL);
+ }
+ return priv;
+}
+
+int
+cvlt_reconfigure(xlator_t *this, dict_t *options)
+{
+ cs_private_t *cspriv = NULL;
+ archive_t *priv = NULL;
+
+ cspriv = this->private;
+ priv = (archive_t *)cspriv->stores->config;
+
+ if (strcmp(priv->trailer, CVLT_TRAILER))
+ goto out;
+
+ GF_OPTION_RECONF("cloudsync-store-id", priv->store_id, options, str, out);
+
+ GF_OPTION_RECONF("cloudsync-product-id", priv->product_id, options, str,
+ out);
+ gf_msg_debug(plugin, 0,
+ "store id is : %s "
+ "product id is : %s.",
+ priv->store_id, priv->product_id);
+ return 0;
+out:
+ return -1;
+}
+
+void
+cvlt_fini(void *config)
+{
+ archive_t *priv = NULL;
+
+ priv = (archive_t *)config;
+
+ if (strcmp(priv->trailer, CVLT_TRAILER))
+ return;
+
+ cvlt_term_xlator(priv);
+ gf_msg(plugin, GF_LOG_INFO, 0, CVLT_FREE, " released xlator resources");
+ return;
+}
+
+int
+cvlt_download(call_frame_t *frame, void *config)
+{
+ archive_t *parch = NULL;
+ cs_local_t *local = frame->local;
+ cs_loc_xattr_t *locxattr = local->xattrinfo.lxattr;
+ cvlt_request_t *req = NULL;
+ archstore_info_t dest_storeinfo;
+ archstore_fileinfo_t dest_fileinfo;
+ int32_t op_ret, op_errno;
+
+ parch = (archive_t *)config;
+
+ if (strcmp(parch->trailer, CVLT_TRAILER)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto err;
+ }
+
+ gf_msg_debug(plugin, 0, " download invoked for uuid = %s gfid=%s ",
+ locxattr->uuid, uuid_utoa(locxattr->gfid));
+
+ if (!(parch->fops.restore)) {
+ op_errno = ELIBBAD;
+ goto err;
+ }
+
+ /*
+ * Download needs to be processed. Allocate a request.
+ */
+ req = cvlt_alloc_req(parch);
+
+ if (!req) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, CVLT_RESOURCE_ALLOCATION_FAILED,
+ " failed to allocated request for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Initialize the request object.
+ */
+ req->op_type = CVLT_RESTORE_OP;
+ req->frame = frame;
+
+ /*
+ * The file is currently residing inside a data management store.
+ * To restore the file contents we need to provide the information
+ * about data management store.
+ */
+ op_ret = cvlt_init_store_info(parch, &(req->store_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract store info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ op_ret = cvlt_init_file_info(locxattr, &(req->file_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract file info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ /*
+ * We need to perform in-place restore of the file from data management
+ * store to gusterfs volume.
+ */
+ op_ret = cvlt_init_gluster_store_info(locxattr, &dest_storeinfo);
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract destination store info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ op_ret = cvlt_init_gluster_file_info(locxattr, &dest_fileinfo);
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract file info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ /*
+ * Submit the restore request.
+ */
+ op_ret = parch->fops.restore(&(parch->descinfo), &(req->store_info),
+ &(req->file_info), &dest_storeinfo,
+ &dest_fileinfo, &op_errno,
+ cvlt_download_complete, req);
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_RESTORE_FAILED,
+ " failed to restore file gfid=%s from data management store",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ /*
+ * Wait for the restore to complete.
+ */
+ sem_wait(&(req->sem));
+
+ if (req->op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_RESTORE_FAILED,
+ " restored failed for gfid=%s", uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ if (req) {
+ cvlt_free_req(parch, req);
+ }
+
+ return 0;
+
+err:
+
+ if (req) {
+ cvlt_free_req(parch, req);
+ }
+
+ return -1;
+}
+
+int
+cvlt_read(call_frame_t *frame, void *config)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ archive_t *parch = NULL;
+ cvlt_request_t *req = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ struct iobref *iobref;
+ size_t size = 0;
+ off_t off = 0;
+
+ cs_local_t *local = frame->local;
+ cs_loc_xattr_t *locxattr = local->xattrinfo.lxattr;
+
+ size = local->xattrinfo.size;
+ off = local->xattrinfo.offset;
+
+ parch = (archive_t *)config;
+
+ if (strcmp(parch->trailer, CVLT_TRAILER)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto err;
+ }
+
+ gf_msg_debug(plugin, 0,
+ " read invoked for gfid = %s offset = %" PRIu64
+ " file_size = %" PRIu64,
+ uuid_utoa(locxattr->gfid), off, local->stbuf.ia_size);
+
+ if (off >= local->stbuf.ia_size) {
+ /*
+ * Hack to notify higher layers of EOF.
+ */
+
+ op_errno = ENOENT;
+ op_ret = 0;
+
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_READ_FAILED,
+ " reporting end-of-file for gfid=%s", uuid_utoa(locxattr->gfid));
+
+ goto err;
+ }
+
+ if (!size) {
+ op_errno = EINVAL;
+
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_READ_FAILED,
+ " zero size read attempted on gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ if (!(parch->fops.read)) {
+ op_errno = ELIBBAD;
+ goto err;
+ }
+
+ /*
+ * The read request need to be processed. Allocate a request.
+ */
+ req = cvlt_alloc_req(parch);
+
+ if (!req) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, CVLT_NO_MEMORY,
+ " failed to allocated request for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ req->iobuf = iobuf_get_page_aligned(parch->iobuf_pool, size, ALIGN_SIZE);
+ if (!req->iobuf) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Initialize the request object.
+ */
+ req->op_type = CVLT_READ_OP;
+ req->offset = off;
+ req->bytes = size;
+ req->frame = frame;
+ req->szxattr.size = local->stbuf.ia_size;
+ req->szxattr.blocks = local->stbuf.ia_blocks;
+ req->szxattr.blksize = local->stbuf.ia_blksize;
+
+ /*
+ * The file is currently residing inside a data management store.
+ * To read the file contents we need to provide the information
+ * about data management store.
+ */
+ op_ret = cvlt_init_store_info(parch, &(req->store_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract store info for gfid=%s"
+ " offset=%" PRIu64 " size=%" GF_PRI_SIZET
+ ", "
+ " buf=%p",
+ uuid_utoa(locxattr->gfid), off, size, req->iobuf->ptr);
+ goto err;
+ }
+
+ op_ret = cvlt_init_file_info(locxattr, &(req->file_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract file info for gfid=%s"
+ " offset=%" PRIu64 " size=%" GF_PRI_SIZET
+ ", "
+ " buf=%p",
+ uuid_utoa(locxattr->gfid), off, size, req->iobuf->ptr);
+ goto err;
+ }
+
+ /*
+ * Submit the read request.
+ */
+ op_ret = parch->fops.read(&(parch->descinfo), &(req->store_info),
+ &(req->file_info), off, req->iobuf->ptr, size,
+ &op_errno, cvlt_readv_complete, req);
+
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " read failed on gfid=%s"
+ " offset=%" PRIu64 " size=%" GF_PRI_SIZET
+ ", "
+ " buf=%p",
+ uuid_utoa(locxattr->gfid), off, size, req->iobuf->ptr);
+ goto err;
+ }
+
+ return 0;
+
+err:
+
+ iobref = iobref_new();
+ gf_msg_debug(plugin, 0, " read unwinding stack op_ret = %d, op_errno = %d",
+ op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, &iov, 1,
+ &(local->stbuf), iobref, local->xattr_rsp);
+
+ if (iobref) {
+ iobref_unref(iobref);
+ }
+
+ if (req) {
+ cvlt_free_req(parch, req);
+ }
+
+ return 0;
+}
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h
new file mode 100644
index 00000000000..c45ac948f6c
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h
@@ -0,0 +1,84 @@
+/*
+ Copyright (c) 2018 Commvault Systems, Inc. <http://www.commvault.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _LIBCVLT_H
+#define _LIBCVLT_H
+
+#include <semaphore.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/compat-errno.h>
+#include "cloudsync-common.h"
+#include "libcvlt-mem-types.h"
+#include "archivestore.h"
+
+enum _cvlt_op {
+ CVLT_READ_OP = 1,
+ CVLT_WRITE_OP = 2,
+ CVLT_RESTORE_OP = 3,
+ CVLT_ARCHIVE_OP = 4,
+ CVLT_LOOKUP_OP = 5,
+ CVLT_XATTR_OP = 6,
+ CVLT_STAT_OP = 7,
+ CVLT_FSTAT_op = 8,
+ CVLT_UNDEF_OP = 127
+};
+typedef enum _cvlt_op cvlt_op_t;
+
+struct _archive;
+struct _cvlt_request {
+ uint64_t offset;
+ uint64_t bytes;
+ struct iobuf *iobuf;
+ struct iobref *iobref;
+ call_frame_t *frame;
+ cvlt_op_t op_type;
+ int32_t op_ret;
+ int32_t op_errno;
+ xlator_t *this;
+ sem_t sem;
+ archstore_info_t store_info;
+ archstore_fileinfo_t file_info;
+ cs_size_xattr_t szxattr;
+};
+typedef struct _cvlt_request cvlt_request_t;
+
+struct _archive {
+ gf_lock_t lock; /* lock for controlling access */
+ xlator_t *xl; /* xlator */
+ void *handle; /* handle returned from dlopen */
+ int32_t nreqs; /* num requests active */
+ struct mem_pool *req_pool; /* pool for requests */
+ struct iobuf_pool *iobuf_pool; /* iobuff pool */
+ archstore_desc_t descinfo; /* Archive store descriptor info */
+ archstore_methods_t fops; /* function pointers */
+ char *product_id;
+ char *store_id;
+ char *trailer;
+};
+typedef struct _archive archive_t;
+
+void *
+cvlt_init(xlator_t *);
+
+int
+cvlt_reconfigure(xlator_t *, dict_t *);
+
+void
+cvlt_fini(void *);
+
+int
+cvlt_download(call_frame_t *, void *);
+
+int
+cvlt_read(call_frame_t *, void *);
+
+#endif
diff --git a/xlators/features/cloudsync/src/cloudsync.c b/xlators/features/cloudsync/src/cloudsync.c
new file mode 100644
index 00000000000..7f0b9e563b8
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync.c
@@ -0,0 +1,2076 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include "cloudsync.h"
+#include "cloudsync-common.h"
+#include <glusterfs/call-stub.h>
+#include "cloudsync-autogen-fops.h"
+
+#include <string.h>
+#include <dlfcn.h>
+
+static void
+cs_cleanup_private(cs_private_t *priv)
+{
+ if (priv) {
+ if (priv->stores) {
+ priv->stores->fini(priv->stores->config);
+ GF_FREE(priv->stores);
+ }
+
+ pthread_spin_destroy(&priv->lock);
+ GF_FREE(priv);
+ }
+
+ return;
+}
+
+static struct cs_plugin plugins[] = {
+ {.name = "cloudsyncs3",
+ .library = "cloudsyncs3.so",
+ .description = "cloudsync s3 store."},
+#if defined(__linux__)
+ {.name = "cvlt",
+ .library = "cloudsynccvlt.so",
+ .description = "Commvault content store."},
+#endif
+ {.name = NULL},
+};
+
+int
+cs_init(xlator_t *this)
+{
+ cs_private_t *priv = NULL;
+ gf_boolean_t per_vol = _gf_false;
+ int ret = 0;
+ char *libpath = NULL;
+ store_methods_t *store_methods = NULL;
+ void *handle = NULL;
+ char *temp_str = NULL;
+ int index = 0;
+ char *libname = NULL;
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_cs_mt_cs_private_t);
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ goto out;
+ }
+
+ priv->this = this;
+
+ this->local_pool = mem_pool_new(cs_local_t, 512);
+ if (!this->local_pool) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM, "initialisation failed.");
+ ret = -1;
+ goto out;
+ }
+
+ this->private = priv;
+
+ GF_OPTION_INIT("cloudsync-remote-read", priv->remote_read, bool, out);
+
+ /* temp workaround. Should be configurable through glusterd*/
+ per_vol = _gf_true;
+
+ if (per_vol) {
+ if (dict_get_str_sizen(this->options, "cloudsync-storetype",
+ &temp_str) == 0) {
+ for (index = 0; plugins[index].name; index++) {
+ if (!strcmp(temp_str, plugins[index].name)) {
+ libname = plugins[index].library;
+ break;
+ }
+ }
+ } else {
+ ret = 0;
+ }
+
+ if (!libname) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0, "no plugin enabled");
+ ret = 0;
+ goto out;
+ }
+
+ ret = gf_asprintf(&libpath, "%s/%s", CS_PLUGINDIR, libname);
+ if (ret == -1) {
+ goto out;
+ }
+
+ handle = dlopen(libpath, RTLD_NOW);
+ if (!handle) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "could not "
+ "load the required library. %s",
+ dlerror());
+ ret = 0;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "loading library:%s successful", libname);
+ }
+
+ priv->stores = GF_CALLOC(1, sizeof(struct cs_remote_stores),
+ gf_cs_mt_cs_remote_stores_t);
+ if (!priv->stores) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "Could not "
+ "allocate memory for priv->stores");
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror(); /* clear out previous error string */
+
+ /* load library methods */
+ store_methods = (store_methods_t *)dlsym(handle, "store_ops");
+ if (!store_methods) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "null store_methods %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror();
+
+ if (priv->remote_read) {
+ priv->stores->rdfop = store_methods->fop_remote_read;
+ if (!priv->stores->rdfop) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " read fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+ }
+
+ priv->stores->dlfop = store_methods->fop_download;
+ if (!priv->stores->dlfop) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " download fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror();
+ priv->stores->init = store_methods->fop_init;
+ if (!priv->stores->init) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " init fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror();
+ priv->stores->reconfigure = store_methods->fop_reconfigure;
+ if (!priv->stores->reconfigure) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " reconfigure fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ priv->stores->handle = handle;
+
+ priv->stores->config = (void *)((priv->stores->init)(this));
+ if (!priv->stores->config) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "null config");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ if (ret == -1) {
+ if (this->local_pool) {
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
+
+ cs_cleanup_private(priv);
+
+ if (handle) {
+ dlclose(handle);
+ }
+ }
+
+ GF_FREE(libpath);
+
+ return ret;
+}
+
+int
+cs_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t ctx_int = 0;
+ cs_inode_ctx_t *ctx = NULL;
+
+ inode_ctx_del(inode, this, &ctx_int);
+ if (!ctx_int)
+ return 0;
+
+ ctx = (cs_inode_ctx_t *)(uintptr_t)ctx_int;
+
+ GF_FREE(ctx);
+ return 0;
+}
+
+void
+cs_fini(xlator_t *this)
+{
+ cs_private_t *priv = NULL;
+ priv = this->private;
+
+ cs_cleanup_private(priv);
+}
+
+int
+cs_reconfigure(xlator_t *this, dict_t *options)
+{
+ cs_private_t *priv = NULL;
+ int ret = 0;
+
+ priv = this->private;
+ if (!priv) {
+ ret = -1;
+ goto out;
+ }
+
+ GF_OPTION_RECONF("cloudsync-remote-read", priv->remote_read, options, bool,
+ out);
+
+ /* needed only for per volume configuration*/
+ ret = priv->stores->reconfigure(this, options);
+
+out:
+ return ret;
+}
+
+int32_t
+cs_mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("cloudsync", this, out);
+
+ ret = xlator_mem_acct_init(this, gf_cs_mt_end + 1);
+
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "Memory accounting init failed");
+ return ret;
+ }
+out:
+ return ret;
+}
+
+int32_t
+cs_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata)
+{
+ int ret = 0;
+ int op_errno = ENOMEM;
+
+ if (!xdata) {
+ xdata = dict_new();
+ if (!xdata) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "failed to create "
+ "dict");
+ goto err;
+ }
+ }
+
+ ret = dict_set_uint32(xdata, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ STACK_WIND(frame, default_readdirp_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, off, xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(readdirp, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ uint64_t val = 0;
+
+ local = frame->local;
+
+ local->call_cnt++;
+
+ if (op_ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "truncate failed");
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "could not get file state, unwinding");
+ op_ret = -1;
+ op_errno = EIO;
+ goto unwind;
+ } else {
+ __cs_inode_ctx_update(this, local->loc.inode, val);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0, " state = %" PRIu64, val);
+
+ if (local->call_cnt == 1 &&
+ (val == GF_CS_REMOTE || val == GF_CS_DOWNLOADING)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "will repair and download "
+ "the file, current state : %" PRIu64,
+ val);
+ goto repair;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "second truncate, Unwinding");
+ goto unwind;
+ }
+ }
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "file state "
+ "could not be figured, unwinding");
+ goto unwind;
+ }
+ } else {
+ /* successful write => file is local */
+ __cs_inode_ctx_update(this, local->loc.inode, GF_CS_LOCAL);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "state : GF_CS_LOCAL"
+ ", truncate successful");
+
+ goto unwind;
+ }
+
+repair:
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ CS_STACK_UNWIND(truncate, frame, op_ret, op_errno, prebuf, postbuf, xdata);
+ return 0;
+}
+
+int32_t
+cs_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = -1;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+ VALIDATE_OR_GOTO(loc, err);
+
+ local = cs_local_init(this, frame, loc, NULL, GF_FOP_TRUNCATE);
+ if (!local) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ goto err;
+ }
+
+ __cs_inode_ctx_get(this, loc->inode, &ctx);
+
+ if (ctx)
+ state = __cs_get_file_state(loc->inode, ctx);
+ else
+ state = GF_CS_LOCAL;
+
+ local->xattr_req = xdata ? dict_ref(xdata) : (xdata = dict_new());
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ local->stub = fop_truncate_stub(frame, cs_resume_truncate, loc, offset,
+ xdata);
+ if (!local->stub) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ goto err;
+ }
+
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND(frame, cs_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+
+ } else {
+ local->call_cnt++;
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CS_STACK_UNWIND(truncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct statvfs *buf, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(statfs, frame, op_ret, op_errno, buf, xdata);
+ return 0;
+}
+
+int32_t
+cs_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ STACK_WIND(frame, cs_statfs_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->statfs, loc, xdata);
+ return 0;
+}
+
+int32_t
+cs_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
+}
+
+int32_t
+cs_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xattr_req)
+{
+ STACK_WIND(frame, cs_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xattr_req);
+ return 0;
+}
+
+int32_t
+cs_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ if (local->locked)
+ cs_inodelk_unlock(frame);
+
+ CS_STACK_UNWIND(setxattr, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+int32_t
+cs_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ data_t *tmp = NULL;
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+
+ local = cs_local_init(this, frame, loc, NULL, GF_FOP_SETXATTR);
+ if (!local) {
+ ret = -1;
+ goto err;
+ }
+
+ local->xattr_req = xdata ? dict_ref(xdata) : (xdata = dict_new());
+
+ tmp = dict_get_sizen(dict, GF_CS_OBJECT_UPLOAD_COMPLETE);
+ if (tmp) {
+ /* Value of key should be the atime */
+ local->stub = fop_setxattr_stub(frame, cs_resume_setxattr, loc, dict,
+ flags, xdata);
+
+ if (!local->stub)
+ goto err;
+
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto err;
+ }
+
+ return 0;
+ }
+
+ STACK_WIND(frame, cs_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
+err:
+ CS_STACK_UNWIND(setxattr, frame, -1, errno, NULL);
+ return 0;
+}
+
+int32_t
+cs_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fgetxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
+}
+
+int32_t
+cs_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
+{
+ STACK_WIND(frame, cs_fgetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fgetxattr, fd, name, xdata);
+ return 0;
+}
+
+int32_t
+cs_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, xdata);
+ return 0;
+}
+
+int32_t
+cs_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ STACK_WIND(frame, cs_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+}
+
+int32_t
+cs_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+ return 0;
+}
+
+int32_t
+cs_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init(this, frame, loc, NULL, GF_FOP_UNLINK);
+ if (!local)
+ goto err;
+
+ local->xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+ STACK_WIND(frame, cs_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, flags, local->xattr_req);
+ return 0;
+err:
+ CS_STACK_UNWIND(unlink, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, fd_t *fd, dict_t *xdata)
+{
+ int ret = 0;
+ uint64_t val = 0;
+
+ if (op_ret == 0) {
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (!ret) {
+ ret = __cs_inode_ctx_update(this, fd->inode, val);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx update failed");
+ }
+ }
+ } else {
+ cs_inode_ctx_reset(this, fd->inode);
+ }
+
+ CS_STACK_UNWIND(open, frame, op_ret, op_errno, fd, xdata);
+ return 0;
+}
+
+int32_t
+cs_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init(this, frame, NULL, fd, GF_FOP_OPEN);
+ if (!local)
+ goto err;
+
+ local->xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ STACK_WIND(frame, cs_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, local->xattr_req);
+ return 0;
+err:
+ CS_STACK_UNWIND(open, frame, -1, errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, dict_t *xdata)
+{
+ int ret = 0;
+ uint64_t val = 0;
+ fd_t *fd = NULL;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ fd = local->fd;
+
+ if (op_ret == 0) {
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (!ret) {
+ gf_msg_debug(this->name, 0, "state %" PRIu64, val);
+ ret = __cs_inode_ctx_update(this, fd->inode, val);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx update failed");
+ }
+ }
+ } else {
+ cs_inode_ctx_reset(this, fd->inode);
+ }
+
+ CS_STACK_UNWIND(fstat, frame, op_ret, op_errno, buf, xdata);
+
+ return 0;
+}
+
+int32_t
+cs_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init(this, frame, NULL, fd, GF_FOP_FSTAT);
+ if (!local)
+ goto err;
+
+ if (fd->inode->ia_type == IA_IFDIR)
+ goto wind;
+
+ local->xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+wind:
+ STACK_WIND(frame, cs_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, local->xattr_req);
+ return 0;
+err:
+ CS_STACK_UNWIND(fstat, frame, -1, errno, NULL, NULL);
+ return 0;
+}
+
+cs_local_t *
+cs_local_init(xlator_t *this, call_frame_t *frame, loc_t *loc, fd_t *fd,
+ glusterfs_fop_t fop)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = mem_get0(this->local_pool);
+ if (!local)
+ goto out;
+
+ if (loc) {
+ ret = loc_copy(&local->loc, loc);
+ if (ret)
+ goto out;
+ }
+
+ if (fd) {
+ local->fd = fd_ref(fd);
+ }
+
+ local->op_ret = -1;
+ local->op_errno = EUCLEAN;
+ local->fop = fop;
+ local->dloffset = 0;
+ frame->local = local;
+ local->locked = _gf_false;
+ local->call_cnt = 0;
+out:
+ if (ret) {
+ if (local)
+ mem_put(local);
+ local = NULL;
+ }
+
+ return local;
+}
+
+call_frame_t *
+cs_lock_frame(call_frame_t *parent_frame)
+{
+ call_frame_t *lock_frame = NULL;
+
+ lock_frame = copy_frame(parent_frame);
+
+ if (lock_frame == NULL)
+ goto out;
+
+ set_lk_owner_from_ptr(&lock_frame->root->lk_owner, parent_frame->root);
+
+out:
+ return lock_frame;
+}
+
+void
+cs_lock_wipe(call_frame_t *lock_frame)
+{
+ CS_STACK_DESTROY(lock_frame);
+}
+
+int32_t
+cs_inodelk_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ cs_lock_wipe(frame);
+
+ return 0;
+}
+
+int
+cs_inodelk_unlock(call_frame_t *main_frame)
+{
+ xlator_t *this = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ call_frame_t *lock_frame = NULL;
+ cs_local_t *lock_local = NULL;
+ cs_local_t *main_local = NULL;
+ int ret = 0;
+
+ this = main_frame->this;
+ main_local = main_frame->local;
+
+ lock_frame = cs_lock_frame(main_frame);
+ if (!lock_frame)
+ goto out;
+
+ lock_local = cs_local_init(this, lock_frame, NULL, NULL, 0);
+ if (!lock_local)
+ goto out;
+
+ ret = cs_build_loc(&lock_local->loc, main_frame);
+ if (ret) {
+ goto out;
+ }
+
+ flock.l_type = F_UNLCK;
+
+ main_local->locked = _gf_false;
+
+ STACK_WIND(lock_frame, cs_inodelk_unlock_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk, CS_LOCK_DOMAIN,
+ &lock_local->loc, F_SETLKW, &flock, NULL);
+
+ return 0;
+
+out:
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "Stale lock would be found on"
+ " server");
+
+ if (lock_frame)
+ cs_lock_wipe(lock_frame);
+
+ return 0;
+}
+
+int
+cs_download_task(void *arg)
+{
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ cs_private_t *priv = NULL;
+ int ret = -1;
+ char *sign_req = NULL;
+ fd_t *fd = NULL;
+ cs_local_t *local = NULL;
+ dict_t *dict = NULL;
+
+ frame = (call_frame_t *)arg;
+
+ this = frame->this;
+
+ priv = this->private;
+
+ if (!priv->stores) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "No remote store "
+ "plugins found");
+ ret = -1;
+ goto out;
+ }
+
+ local = frame->local;
+
+ if (local->fd)
+ fd = fd_anonymous(local->fd->inode);
+ else
+ fd = fd_anonymous(local->loc.inode);
+
+ if (!fd) {
+ gf_msg("CS", GF_LOG_ERROR, 0, 0, "fd creation failed");
+ ret = -1;
+ goto out;
+ }
+
+ local->dlfd = fd;
+ local->dloffset = 0;
+
+ dict = dict_new();
+ if (!dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "failed to create "
+ "dict");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_uint32(dict, GF_CS_OBJECT_DOWNLOADING, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "dict_set failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = syncop_fsetxattr(this, local->fd, dict, 0, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "fsetxattr failed "
+ "key %s",
+ GF_CS_OBJECT_DOWNLOADING);
+ ret = -1;
+ goto out;
+ }
+ /*this calling method is for per volume setting */
+ ret = priv->stores->dlfop(frame, priv->stores->config);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "download failed"
+ ", remotepath: %s",
+ local->remotepath);
+
+ /*using dlfd as it is anonymous and have RDWR flag*/
+ ret = syncop_ftruncate(FIRST_CHILD(this), local->dlfd, 0, NULL, NULL,
+ NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, -ret, "ftruncate failed");
+ } else {
+ gf_msg_debug(this->name, 0, "ftruncate succeed");
+ }
+
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "download success, path"
+ " : %s",
+ local->remotepath);
+
+ ret = syncop_fremovexattr(this, local->fd, GF_CS_OBJECT_REMOTE, NULL,
+ NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, -ret,
+ "removexattr failed, remotexattr");
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg_debug(this->name, 0,
+ "fremovexattr success, "
+ "path : %s",
+ local->remotepath);
+ }
+
+ ret = syncop_fremovexattr(this, local->fd, GF_CS_OBJECT_DOWNLOADING,
+ NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, -ret,
+ "removexattr failed, downloading xattr, path %s",
+ local->remotepath);
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg_debug(this->name, 0,
+ "fremovexattr success"
+ " path %s",
+ local->remotepath);
+ }
+ }
+
+out:
+ GF_FREE(sign_req);
+
+ if (dict)
+ dict_unref(dict);
+
+ if (fd) {
+ fd_unref(fd);
+ local->dlfd = NULL;
+ }
+
+ return ret;
+}
+
+int
+cs_download(call_frame_t *frame)
+{
+ int ret = 0;
+ cs_local_t *local = NULL;
+ xlator_t *this = NULL;
+
+ local = frame->local;
+ this = frame->this;
+
+ if (!local->remotepath) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "remote path not"
+ " available. Check posix logs to resolve");
+ goto out;
+ }
+
+ ret = cs_download_task((void *)frame);
+out:
+ return ret;
+}
+
+int
+cs_set_xattr_req(call_frame_t *frame)
+{
+ cs_local_t *local = NULL;
+ GF_UNUSED int ret = 0;
+
+ local = frame->local;
+
+ /* When remote reads are performed (i.e. reads on remote store),
+ * there needs to be a way to associate a file on gluster volume
+ * with its correspnding file on the remote store. In order to do
+ * that, a unique key can be maintained as an xattr
+ * (GF_CS_XATTR_ARCHIVE_UUID)on the stub file on gluster bricks.
+ * This xattr should be provided to the plugin to
+ * perform the read fop on the correct file. This assumes that the file
+ * hierarchy and name need not be the same on remote store as that of
+ * the gluster volume.
+ */
+ ret = dict_set_sizen_str_sizen(local->xattr_req, GF_CS_XATTR_ARCHIVE_UUID,
+ "1");
+
+ return 0;
+}
+
+int
+cs_update_xattrs(call_frame_t *frame, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ xlator_t *this = NULL;
+ int size = -1;
+ GF_UNUSED int ret = 0;
+
+ local = frame->local;
+ this = frame->this;
+
+ local->xattrinfo.lxattr = GF_CALLOC(1, sizeof(cs_loc_xattr_t),
+ gf_cs_mt_cs_lxattr_t);
+ if (!local->xattrinfo.lxattr) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+
+ gf_uuid_copy(local->xattrinfo.lxattr->gfid, local->loc.gfid);
+
+ if (local->remotepath) {
+ local->xattrinfo.lxattr->file_path = gf_strdup(local->remotepath);
+ if (!local->xattrinfo.lxattr->file_path) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ }
+
+ ret = dict_get_gfuuid(xdata, GF_CS_XATTR_ARCHIVE_UUID,
+ &(local->xattrinfo.lxattr->uuid));
+
+ if (ret) {
+ gf_uuid_clear(local->xattrinfo.lxattr->uuid);
+ }
+ size = strlen(this->name) - strlen("-cloudsync") + 1;
+ local->xattrinfo.lxattr->volname = GF_CALLOC(1, size, gf_common_mt_char);
+ if (!local->xattrinfo.lxattr->volname) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ strncpy(local->xattrinfo.lxattr->volname, this->name, size - 1);
+ local->xattrinfo.lxattr->volname[size - 1] = '\0';
+
+ return 0;
+err:
+ cs_xattrinfo_wipe(local);
+ return -1;
+}
+
+int
+cs_serve_readv(call_frame_t *frame, off_t offset, size_t size, uint32_t flags)
+{
+ xlator_t *this = NULL;
+ cs_private_t *priv = NULL;
+ int ret = -1;
+ fd_t *fd = NULL;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+ this = frame->this;
+ priv = this->private;
+
+ if (!local->remotepath) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "remote path not"
+ " available. Check posix logs to resolve");
+ goto out;
+ }
+
+ if (!priv->stores) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "No remote store "
+ "plugins found");
+ ret = -1;
+ goto out;
+ }
+
+ if (local->fd) {
+ fd = fd_anonymous(local->fd->inode);
+ } else {
+ fd = fd_anonymous(local->loc.inode);
+ }
+
+ local->xattrinfo.size = size;
+ local->xattrinfo.offset = offset;
+ local->xattrinfo.flags = flags;
+
+ if (!fd) {
+ gf_msg("CS", GF_LOG_ERROR, 0, 0, "fd creation failed");
+ ret = -1;
+ goto out;
+ }
+
+ local->dlfd = fd;
+ local->dloffset = offset;
+
+ /*this calling method is for per volume setting */
+ ret = priv->stores->rdfop(frame, priv->stores->config);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "read failed"
+ ", remotepath: %s",
+ local->remotepath);
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "read success, path"
+ " : %s",
+ local->remotepath);
+ }
+
+out:
+ if (fd) {
+ fd_unref(fd);
+ local->dlfd = NULL;
+ }
+ return ret;
+}
+
+int32_t
+cs_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ uint64_t val = 0;
+ fd_t *fd = NULL;
+
+ local = frame->local;
+ fd = local->fd;
+
+ local->call_cnt++;
+
+ if (op_ret == -1) {
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "could not get file state, unwinding");
+ op_ret = -1;
+ op_errno = EIO;
+ goto unwind;
+ } else {
+ __cs_inode_ctx_update(this, fd->inode, val);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0, " state = %" PRIu64, val);
+
+ if (local->call_cnt == 1 &&
+ (val == GF_CS_REMOTE || val == GF_CS_DOWNLOADING)) {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ " will read from remote : %" PRIu64, val);
+ goto repair;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "second readv, Unwinding");
+ goto unwind;
+ }
+ }
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "file state "
+ "could not be figured, unwinding");
+ goto unwind;
+ }
+ } else {
+ /* successful readv => file is local */
+ __cs_inode_ctx_update(this, fd->inode, GF_CS_LOCAL);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "state : GF_CS_LOCAL"
+ ", readv successful");
+
+ goto unwind;
+ }
+
+repair:
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ CS_STACK_UNWIND(readv, frame, op_ret, op_errno, vector, count, stbuf,
+ iobref, xdata);
+
+ return 0;
+}
+
+int32_t
+cs_resume_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int ret = 0;
+
+ ret = cs_resume_postprocess(this, frame, fd->inode);
+ if (ret) {
+ goto unwind;
+ }
+
+ cs_inodelk_unlock(frame);
+
+ STACK_WIND(frame, cs_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags, xdata);
+
+ return 0;
+
+unwind:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int32_t
+cs_resume_remote_readv(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int ret = 0;
+ cs_local_t *local = NULL;
+ gf_cs_obj_state state = -1;
+ cs_inode_ctx_t *ctx = NULL;
+
+ cs_inodelk_unlock(frame);
+
+ local = frame->local;
+ if (!local) {
+ ret = -1;
+ goto unwind;
+ }
+
+ __cs_inode_ctx_get(this, fd->inode, &ctx);
+
+ state = __cs_get_file_state(fd->inode, ctx);
+ if (state == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "status is GF_CS_ERROR."
+ " Aborting readv");
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ ret = -1;
+ goto unwind;
+ }
+
+ /* Serve readv from remote store only if it is remote. */
+ gf_msg_debug(this->name, 0, "status of file %s is %d",
+ local->remotepath ? local->remotepath : "", state);
+
+ /* We will reach this condition if local inode ctx had REMOTE
+ * state when the control was in cs_readv but after stat
+ * we got an updated state saying that the file is LOCAL.
+ */
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND(frame, cs_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags,
+ xdata);
+ } else if (state == GF_CS_REMOTE) {
+ ret = cs_resume_remote_readv_postprocess(this, frame, fd->inode, offset,
+ size, flags);
+ /* Failed to submit the remote readv fop to plugin */
+ if (ret) {
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ goto unwind;
+ }
+ /* When the file is in any other intermediate state,
+ * we should not perform remote reads.
+ */
+ } else {
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int32_t
+cs_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ cs_local_t *local = NULL;
+ int ret = 0;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = -1;
+ cs_private_t *priv = NULL;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+ VALIDATE_OR_GOTO(fd, err);
+
+ priv = this->private;
+
+ local = cs_local_init(this, frame, NULL, fd, GF_FOP_READ);
+ if (!local) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ goto err;
+ }
+
+ __cs_inode_ctx_get(this, fd->inode, &ctx);
+
+ if (ctx)
+ state = __cs_get_file_state(fd->inode, ctx);
+ else
+ state = GF_CS_LOCAL;
+
+ local->xattr_req = xdata ? dict_ref(xdata) : (xdata = dict_new());
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ if (priv->remote_read) {
+ local->stub = fop_readv_stub(frame, cs_resume_remote_readv, fd, size,
+ offset, flags, xdata);
+ } else {
+ local->stub = fop_readv_stub(frame, cs_resume_readv, fd, size, offset,
+ flags, xdata);
+ }
+ if (!local->stub) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ goto err;
+ }
+
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND(frame, cs_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags,
+ xdata);
+ } else {
+ local->call_cnt++;
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ CS_STACK_UNWIND(readv, frame, -1, op_errno, NULL, -1, NULL, NULL, NULL);
+
+ return 0;
+}
+
+int
+cs_resume_remote_readv_postprocess(xlator_t *this, call_frame_t *frame,
+ inode_t *inode, off_t offset, size_t size,
+ uint32_t flags)
+{
+ int ret = 0;
+
+ ret = cs_serve_readv(frame, offset, size, flags);
+
+ return ret;
+}
+
+int
+cs_stat_check_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *stbuf, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ char *filepath = NULL;
+ int ret = 0;
+ inode_t *inode = NULL;
+ uint64_t val = 0;
+
+ local = frame->local;
+
+ if (op_ret == -1) {
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
+ gf_msg(this->name, GF_LOG_ERROR, 0, op_errno, "stat check failed");
+ goto err;
+ } else {
+ if (local->fd)
+ inode = local->fd->inode;
+ else
+ inode = local->loc.inode;
+
+ if (!inode) {
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "null inode "
+ "returned");
+ goto err;
+ }
+
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ cs_inode_ctx_reset(this, inode);
+ local->op_ret = -1;
+ local->op_errno = EIO;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "status = GF_CS_ERROR. failed to get "
+ " file state");
+ goto err;
+ } else {
+ ret = __cs_inode_ctx_update(this, inode, val);
+ gf_msg_debug(this->name, 0, "status : %" PRIu64, val);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx update failed");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ }
+ } else {
+ gf_msg_debug(this->name, 0, "status not found in dict");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+
+ ret = dict_get_str_sizen(xdata, GF_CS_OBJECT_REMOTE, &filepath);
+ if (filepath) {
+ gf_msg_debug(this->name, 0, "filepath returned %s", filepath);
+ local->remotepath = gf_strdup(filepath);
+ if (!local->remotepath) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ } else {
+ gf_msg_debug(this->name, 0, "NULL filepath");
+ }
+
+ ret = cs_update_xattrs(frame, xdata);
+ if (ret)
+ goto err;
+
+ local->op_ret = 0;
+ local->xattr_rsp = dict_ref(xdata);
+ memcpy(&local->stbuf, stbuf, sizeof(struct iatt));
+ }
+
+ stub = local->stub;
+ local->stub = NULL;
+ call_resume(stub);
+
+ return 0;
+err:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int
+cs_do_stat_check(call_frame_t *main_frame)
+{
+ cs_local_t *local = NULL;
+ xlator_t *this = NULL;
+ int ret = 0;
+
+ local = main_frame->local;
+ this = main_frame->this;
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_REPAIR, 256);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "dict_set failed");
+ goto err;
+ }
+
+ cs_set_xattr_req(main_frame);
+
+ if (local->fd) {
+ STACK_WIND(main_frame, cs_stat_check_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, local->fd, local->xattr_req);
+ } else {
+ STACK_WIND(main_frame, cs_stat_check_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat, &local->loc,
+ local->xattr_req);
+ }
+
+ return 0;
+
+err:
+ cs_inodelk_unlock(main_frame);
+
+ cs_common_cbk(main_frame);
+
+ return 0;
+}
+
+void
+cs_common_cbk(call_frame_t *frame)
+{
+ glusterfs_fop_t fop = -1;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ fop = local->fop;
+
+ /*Note: Only the failure case needs to be handled here. Since for
+ * successful stat check the fop will resume anyway. The unwind can
+ * happen from the fop_cbk and each cbk can unlock the inodelk in case
+ * a lock was taken before. The lock status can be stored in frame */
+
+ /* for failure case */
+
+ /*TODO: add other fops*/
+ switch (fop) {
+ case GF_FOP_WRITE:
+ CS_STACK_UNWIND(writev, frame, local->op_ret, local->op_errno, NULL,
+ NULL, NULL);
+ break;
+
+ case GF_FOP_SETXATTR:
+ CS_STACK_UNWIND(setxattr, frame, local->op_ret, local->op_errno,
+ NULL);
+ break;
+ case GF_FOP_READ:
+ CS_STACK_UNWIND(readv, frame, local->op_ret, local->op_errno, NULL,
+ 0, NULL, NULL, NULL);
+ break;
+ case GF_FOP_FTRUNCATE:
+ CS_STACK_UNWIND(ftruncate, frame, local->op_ret, local->op_errno,
+ NULL, NULL, NULL);
+ break;
+
+ case GF_FOP_TRUNCATE:
+ CS_STACK_UNWIND(truncate, frame, local->op_ret, local->op_errno,
+ NULL, NULL, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+int
+cs_blocking_inodelk_cbk(call_frame_t *lock_frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ cs_local_t *main_local = NULL;
+ call_frame_t *main_frame = NULL;
+ cs_local_t *lock_local = NULL;
+
+ lock_local = lock_frame->local;
+
+ main_frame = lock_local->main_frame;
+ main_local = main_frame->local;
+
+ if (op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "inodelk failed");
+ main_local->op_errno = op_errno;
+ main_local->op_ret = op_ret;
+ goto err;
+ }
+
+ main_local->locked = _gf_true;
+
+ cs_lock_wipe(lock_frame);
+
+ cs_do_stat_check(main_frame);
+
+ return 0;
+err:
+ cs_common_cbk(main_frame);
+
+ cs_lock_wipe(lock_frame);
+
+ return 0;
+}
+
+int
+cs_build_loc(loc_t *loc, call_frame_t *frame)
+{
+ cs_local_t *local = NULL;
+ int ret = -1;
+
+ local = frame->local;
+
+ if (local->fd) {
+ loc->inode = inode_ref(local->fd->inode);
+ if (loc->inode) {
+ gf_uuid_copy(loc->gfid, loc->inode->gfid);
+ ret = 0;
+ goto out;
+ } else {
+ ret = -1;
+ goto out;
+ }
+ } else {
+ loc->inode = inode_ref(local->loc.inode);
+ if (loc->inode) {
+ gf_uuid_copy(loc->gfid, loc->inode->gfid);
+ ret = 0;
+ goto out;
+ } else {
+ ret = -1;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int
+cs_blocking_inodelk(call_frame_t *parent_frame)
+{
+ call_frame_t *lock_frame = NULL;
+ cs_local_t *lock_local = NULL;
+ xlator_t *this = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ int ret = 0;
+
+ this = parent_frame->this;
+
+ lock_frame = cs_lock_frame(parent_frame);
+ if (!lock_frame) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insuffcient memory");
+ goto err;
+ }
+
+ lock_local = cs_local_init(this, lock_frame, NULL, NULL, 0);
+ if (!lock_local) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ goto err;
+ }
+
+ lock_local->main_frame = parent_frame;
+
+ flock.l_type = F_WRLCK;
+
+ ret = cs_build_loc(&lock_local->loc, parent_frame);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "build_loc failed");
+ goto err;
+ }
+
+ STACK_WIND(lock_frame, cs_blocking_inodelk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk, CS_LOCK_DOMAIN,
+ &lock_local->loc, F_SETLKW, &flock, NULL);
+
+ return 0;
+err:
+ if (lock_frame)
+ cs_lock_wipe(lock_frame);
+
+ return -1;
+}
+
+int
+locate_and_execute(call_frame_t *frame)
+{
+ int ret = 0;
+
+ ret = cs_blocking_inodelk(frame);
+
+ if (ret)
+ return -1;
+ else
+ return 0;
+}
+
+int32_t
+cs_resume_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset, dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = frame->local;
+
+ ret = cs_resume_postprocess(this, frame, loc->inode);
+ if (ret) {
+ goto unwind;
+ }
+
+ cs_inodelk_unlock(frame);
+
+ STACK_WIND(frame, cs_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset,
+ local->xattr_req);
+
+ return 0;
+
+unwind:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int32_t
+cs_resume_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *dict, int32_t flags, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = GF_CS_ERROR;
+
+ local = frame->local;
+
+ __cs_inode_ctx_get(this, loc->inode, &ctx);
+
+ state = __cs_get_file_state(loc->inode, ctx);
+
+ if (state == GF_CS_ERROR) {
+ /* file is already remote */
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "file %s , could not figure file state", loc->path);
+ goto unwind;
+ }
+
+ if (state == GF_CS_REMOTE) {
+ /* file is already remote */
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_WARNING, 0, EINVAL,
+ "file %s is already remote", loc->path);
+ goto unwind;
+ }
+
+ if (state == GF_CS_DOWNLOADING) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ " file is in downloading state.");
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ goto unwind;
+ }
+
+ STACK_WIND(frame, cs_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags,
+ local->xattr_req);
+
+ return 0;
+unwind:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+gf_cs_obj_state
+__cs_get_file_state(inode_t *inode, cs_inode_ctx_t *ctx)
+{
+ gf_cs_obj_state state = -1;
+
+ if (!ctx)
+ return GF_CS_ERROR;
+
+ LOCK(&inode->lock);
+ {
+ state = ctx->state;
+ }
+ UNLOCK(&inode->lock);
+
+ return state;
+}
+
+void
+__cs_inode_ctx_get(xlator_t *this, inode_t *inode, cs_inode_ctx_t **ctx)
+{
+ uint64_t ctxint = 0;
+ int ret = 0;
+
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &ctxint);
+ }
+ UNLOCK(&inode->lock);
+
+ if (ret)
+ *ctx = NULL;
+ else
+ *ctx = (cs_inode_ctx_t *)(uintptr_t)ctxint;
+
+ return;
+}
+
+int
+__cs_inode_ctx_update(xlator_t *this, inode_t *inode, uint64_t val)
+{
+ cs_inode_ctx_t *ctx = NULL;
+ uint64_t ctxint = 0;
+ int ret = 0;
+
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &ctxint);
+ if (ret) {
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_cs_mt_cs_inode_ctx_t);
+ if (!ctx) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx allocation failed");
+ ret = -1;
+ goto out;
+ }
+
+ ctx->state = val;
+
+ ctxint = (uint64_t)(uintptr_t)ctx;
+
+ ret = __inode_ctx_set(inode, this, &ctxint);
+ if (ret) {
+ GF_FREE(ctx);
+ goto out;
+ }
+ } else {
+ ctx = (cs_inode_ctx_t *)(uintptr_t)ctxint;
+
+ ctx->state = val;
+ }
+ }
+
+out:
+ UNLOCK(&inode->lock);
+
+ return ret;
+}
+
+int
+cs_inode_ctx_reset(xlator_t *this, inode_t *inode)
+{
+ cs_inode_ctx_t *ctx = NULL;
+ uint64_t ctxint = 0;
+
+ inode_ctx_del(inode, this, &ctxint);
+ if (!ctxint) {
+ return 0;
+ }
+
+ ctx = (cs_inode_ctx_t *)(uintptr_t)ctxint;
+
+ GF_FREE(ctx);
+ return 0;
+}
+
+int
+cs_resume_postprocess(xlator_t *this, call_frame_t *frame, inode_t *inode)
+{
+ cs_local_t *local = NULL;
+ gf_cs_obj_state state = -1;
+ cs_inode_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ local = frame->local;
+ if (!local) {
+ ret = -1;
+ goto out;
+ }
+
+ __cs_inode_ctx_get(this, inode, &ctx);
+
+ state = __cs_get_file_state(inode, ctx);
+ if (state == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "status is GF_CS_ERROR."
+ " Aborting write");
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ ret = -1;
+ goto out;
+ }
+
+ if (state == GF_CS_REMOTE || state == GF_CS_DOWNLOADING) {
+ gf_msg_debug(this->name, 0, "status is %d", state);
+ ret = cs_download(frame);
+ if (ret == 0) {
+ gf_msg_debug(this->name, 0, "Winding for Final Write");
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ " download failed, unwinding writev");
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ ret = -1;
+ }
+ }
+out:
+ return ret;
+}
+
+int32_t
+cs_fdctx_to_dict(xlator_t *this, fd_t *fd, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_inode(xlator_t *this)
+{
+ return 0;
+}
+
+int32_t
+cs_inode_to_dict(xlator_t *this, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_history(xlator_t *this)
+{
+ return 0;
+}
+
+int32_t
+cs_fd(xlator_t *this)
+{
+ return 0;
+}
+
+int32_t
+cs_fd_to_dict(xlator_t *this, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_fdctx(xlator_t *this, fd_t *fd)
+{
+ return 0;
+}
+
+int32_t
+cs_inodectx(xlator_t *this, inode_t *ino)
+{
+ return 0;
+}
+
+int32_t
+cs_inodectx_to_dict(xlator_t *this, inode_t *ino, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_priv_to_dict(xlator_t *this, dict_t *dict, char *brickname)
+{
+ return 0;
+}
+
+int32_t
+cs_priv(xlator_t *this)
+{
+ return 0;
+}
+
+int
+cs_notify(xlator_t *this, int event, void *data, ...)
+{
+ return default_notify(this, event, data);
+}
+
+struct xlator_fops cs_fops = {
+ .stat = cs_stat,
+ .readdirp = cs_readdirp,
+ .truncate = cs_truncate,
+ .seek = cs_seek,
+ .statfs = cs_statfs,
+ .fallocate = cs_fallocate,
+ .discard = cs_discard,
+ .getxattr = cs_getxattr,
+ .writev = cs_writev,
+ .setxattr = cs_setxattr,
+ .fgetxattr = cs_fgetxattr,
+ .lookup = cs_lookup,
+ .fsetxattr = cs_fsetxattr,
+ .readv = cs_readv,
+ .ftruncate = cs_ftruncate,
+ .rchecksum = cs_rchecksum,
+ .unlink = cs_unlink,
+ .open = cs_open,
+ .fstat = cs_fstat,
+ .zerofill = cs_zerofill,
+};
+
+struct xlator_cbks cs_cbks = {
+ .forget = cs_forget,
+};
+
+struct xlator_dumpops cs_dumpops = {
+ .fdctx_to_dict = cs_fdctx_to_dict,
+ .inode = cs_inode,
+ .inode_to_dict = cs_inode_to_dict,
+ .history = cs_history,
+ .fd = cs_fd,
+ .fd_to_dict = cs_fd_to_dict,
+ .fdctx = cs_fdctx,
+ .inodectx = cs_inodectx,
+ .inodectx_to_dict = cs_inodectx_to_dict,
+ .priv_to_dict = cs_priv_to_dict,
+ .priv = cs_priv,
+};
+
+struct volume_options cs_options[] = {
+ {.key = {"cloudsync-storetype"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Defines which remote store is enabled"},
+ {.key = {"cloudsync-remote-read"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .description = "Defines a remote read fop when on"},
+ {.key = {"cloudsync-store-id"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Defines a volume wide store id"},
+ {.key = {"cloudsync-product-id"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Defines a volume wide product id"},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = cs_init,
+ .fini = cs_fini,
+ .notify = cs_notify,
+ .reconfigure = cs_reconfigure,
+ .mem_acct_init = cs_mem_acct_init,
+ .dumpops = &cs_dumpops,
+ .fops = &cs_fops,
+ .cbks = &cs_cbks,
+ .options = cs_options,
+ .identifier = "cloudsync",
+ .category = GF_TECH_PREVIEW,
+};
diff --git a/xlators/features/cloudsync/src/cloudsync.h b/xlators/features/cloudsync/src/cloudsync.h
new file mode 100644
index 00000000000..d24141978d6
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __CLOUDSYNC_H__
+#define __CLOUDSYNC_H__
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/call-stub.h>
+#include "cloudsync-common.h"
+#include "cloudsync-autogen-fops.h"
+
+#define ALIGN_SIZE 4096
+#define CS_LOCK_DOMAIN "cs.protect.file.stat"
+typedef struct cs_dlstore {
+ off_t off;
+ struct iovec *vector;
+ int32_t count;
+ struct iobref *iobref;
+ uint32_t flags;
+} cs_dlstore;
+
+typedef struct cs_inode_ctx {
+ cs_loc_xattr_t locxattr;
+ gf_cs_obj_state state;
+} cs_inode_ctx_t;
+
+struct cs_plugin {
+ char *name; /* store name */
+ char *library; /* library to load for the given store */
+ char *description; /* description about the store */
+};
+
+cs_local_t *
+cs_local_init(xlator_t *this, call_frame_t *frame, loc_t *loc, fd_t *fd,
+ glusterfs_fop_t fop);
+
+int
+locate_and_execute(call_frame_t *frame);
+
+int32_t
+cs_resume_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *dict, int32_t flags, dict_t *xdata);
+
+int32_t
+cs_inodelk_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata);
+
+size_t
+cs_write_callback(void *lcurlbuf, size_t size, size_t nitems, void *frame);
+
+void
+cs_common_cbk(call_frame_t *frame);
+
+gf_boolean_t
+cs_is_file_remote(struct iatt *stbuf, dict_t *xattr);
+
+int32_t
+cs_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata);
+int
+cs_build_loc(loc_t *loc, call_frame_t *frame);
+
+int
+cs_blocking_inodelk_cbk(call_frame_t *lock_frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata);
+
+int
+cs_read_authinfo(xlator_t *this);
+
+int
+__cs_inode_ctx_update(xlator_t *this, inode_t *inode, uint64_t val);
+
+int
+cs_inode_ctx_reset(xlator_t *this, inode_t *inode);
+
+void
+__cs_inode_ctx_get(xlator_t *this, inode_t *inode, cs_inode_ctx_t **ctx);
+
+gf_cs_obj_state
+__cs_get_file_state(inode_t *inode, cs_inode_ctx_t *ctx);
+
+int
+cs_inodelk_unlock(call_frame_t *main_frame);
+
+int
+cs_resume_postprocess(xlator_t *this, call_frame_t *frame, inode_t *inode);
+
+int32_t
+cs_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata);
+int32_t
+cs_resume_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset, dict_t *xattr_req);
+
+int32_t
+cs_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata);
+int32_t
+cs_resume_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata);
+int32_t
+cs_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata);
+
+int
+cs_resume_remote_readv_postprocess(xlator_t *this, call_frame_t *frame,
+ inode_t *inode, off_t offset, size_t size,
+ uint32_t flags);
+int
+cs_serve_readv(call_frame_t *frame, off_t offset, size_t size, uint32_t flags);
+#endif /* __CLOUDSYNC_H__ */
diff --git a/xlators/features/compress/Makefile.am b/xlators/features/compress/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/compress/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/compress/src/Makefile.am b/xlators/features/compress/src/Makefile.am
new file mode 100644
index 00000000000..98271a9f3fc
--- /dev/null
+++ b/xlators/features/compress/src/Makefile.am
@@ -0,0 +1,19 @@
+xlator_LTLIBRARIES = cdc.la
+
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+noinst_HEADERS = cdc.h cdc-mem-types.h
+
+cdc_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+cdc_la_SOURCES = cdc.c cdc-helper.c
+cdc_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(ZLIB_LIBS)
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D$(GF_HOST_OS) \
+ $(LIBZ_CFLAGS)
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/compress/src/cdc-helper.c b/xlators/features/compress/src/cdc-helper.c
new file mode 100644
index 00000000000..f973ff56cf5
--- /dev/null
+++ b/xlators/features/compress/src/cdc-helper.c
@@ -0,0 +1,527 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/syscall.h>
+
+#include "cdc.h"
+#include "cdc-mem-types.h"
+
+#ifdef HAVE_LIB_Z
+#include "zlib.h"
+#endif
+
+#ifdef HAVE_LIB_Z
+/* gzip header looks something like this
+ * (RFC 1950)
+ *
+ * +---+---+---+---+---+---+---+---+---+---+
+ * |ID1|ID2|CM |FLG| MTIME |XFL|OS |
+ * +---+---+---+---+---+---+---+---+---+---+
+ *
+ * Data is usually sent without this header i.e
+ * Data sent = <compressed-data> + trailer(8)
+ * The trailer contains the checksum.
+ *
+ * gzip_header is added only during debugging.
+ * Refer to the function cdc_dump_iovec_to_disk
+ */
+static const char gzip_header[10] = {'\037', '\213', Z_DEFLATED, 0, 0, 0, 0,
+ 0, 0, GF_CDC_OS_ID};
+
+static int32_t
+cdc_next_iovec(xlator_t *this, cdc_info_t *ci)
+{
+ int ret = -1;
+
+ ci->ncount++;
+ /* check for iovec overflow -- should not happen */
+ if (ci->ncount == MAX_IOVEC) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Zlib output buffer overflow"
+ " ->ncount (%d) | ->MAX_IOVEC (%d)",
+ ci->ncount, MAX_IOVEC);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static void
+cdc_put_long(unsigned char *string, unsigned long x)
+{
+ string[0] = (unsigned char)(x & 0xff);
+ string[1] = (unsigned char)((x & 0xff00) >> 8);
+ string[2] = (unsigned char)((x & 0xff0000) >> 16);
+ string[3] = (unsigned char)((x & 0xff000000) >> 24);
+}
+
+static unsigned long
+cdc_get_long(unsigned char *buf)
+{
+ return ((unsigned long)buf[0]) | (((unsigned long)buf[1]) << 8) |
+ (((unsigned long)buf[2]) << 16) | (((unsigned long)buf[3]) << 24);
+}
+
+static int32_t
+cdc_init_gzip_trailer(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci)
+{
+ int ret = -1;
+ char *buf = NULL;
+
+ ret = cdc_next_iovec(this, ci);
+ if (ret)
+ goto out;
+
+ buf = CURR_VEC(ci).iov_base = (char *)GF_CALLOC(1, GF_CDC_VALIDATION_SIZE,
+ gf_cdc_mt_gzip_trailer_t);
+
+ if (!CURR_VEC(ci).iov_base)
+ goto out;
+
+ CURR_VEC(ci).iov_len = GF_CDC_VALIDATION_SIZE;
+
+ cdc_put_long((unsigned char *)&buf[0], ci->crc);
+ cdc_put_long((unsigned char *)&buf[4], ci->stream.total_in);
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static int32_t
+cdc_alloc_iobuf_and_init_vec(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci,
+ int size)
+{
+ int ret = -1;
+ int alloc_len = 0;
+ struct iobuf *iobuf = NULL;
+
+ ret = cdc_next_iovec(this, ci);
+ if (ret)
+ goto out;
+
+ alloc_len = size ? size : ci->buffer_size;
+
+ iobuf = iobuf_get2(this->ctx->iobuf_pool, alloc_len);
+ if (!iobuf)
+ goto out;
+
+ ret = iobref_add(ci->iobref, iobuf);
+ if (ret)
+ goto out;
+
+ /* Initialize this iovec */
+ CURR_VEC(ci).iov_base = iobuf->ptr;
+ CURR_VEC(ci).iov_len = alloc_len;
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+static void
+cdc_init_zlib_output_stream(cdc_priv_t *priv, cdc_info_t *ci, int size)
+{
+ ci->stream.next_out = (unsigned char *)CURR_VEC(ci).iov_base;
+ ci->stream.avail_out = size ? size : ci->buffer_size;
+}
+
+/* This routine is for testing and debugging only.
+ * Data written = header(10) + <compressed-data> + trailer(8)
+ * So each gzip dump file is at least 18 bytes in size.
+ */
+void
+cdc_dump_iovec_to_disk(xlator_t *this, cdc_info_t *ci, const char *file)
+{
+ int i = 0;
+ int fd = 0;
+ size_t written = 0;
+ size_t total_written = 0;
+
+ fd = open(file, O_WRONLY | O_CREAT | O_TRUNC, 0777);
+ if (fd < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "Cannot open file: %s", file);
+ return;
+ }
+
+ written = sys_write(fd, (char *)gzip_header, 10);
+ total_written += written;
+ for (i = 0; i < ci->ncount; i++) {
+ written = sys_write(fd, (char *)ci->vec[i].iov_base,
+ ci->vec[i].iov_len);
+ total_written += written;
+ }
+
+ gf_log(this->name, GF_LOG_DEBUG, "dump'd %zu bytes to %s", total_written,
+ GF_CDC_DEBUG_DUMP_FILE);
+
+ sys_close(fd);
+}
+
+static int32_t
+cdc_flush_libz_buffer(cdc_priv_t *priv, xlator_t *this, cdc_info_t *ci,
+ int (*libz_func)(z_streamp, int), int flush)
+{
+ int32_t ret = Z_OK;
+ int done = 0;
+ unsigned int deflate_len = 0;
+
+ for (;;) {
+ deflate_len = ci->buffer_size - ci->stream.avail_out;
+
+ if (deflate_len != 0) {
+ CURR_VEC(ci).iov_len = deflate_len;
+
+ ret = cdc_alloc_iobuf_and_init_vec(this, priv, ci, 0);
+ if (ret) {
+ ret = Z_MEM_ERROR;
+ break;
+ }
+
+ /* Re-position Zlib output buffer */
+ cdc_init_zlib_output_stream(priv, ci, 0);
+ }
+
+ if (done) {
+ ci->ncount--;
+ break;
+ }
+
+ ret = libz_func(&ci->stream, flush);
+
+ if (ret == Z_BUF_ERROR) {
+ ret = Z_OK;
+ ci->ncount--;
+ break;
+ }
+
+ done = (ci->stream.avail_out != 0 || ret == Z_STREAM_END);
+
+ if (ret != Z_OK && ret != Z_STREAM_END)
+ break;
+ }
+
+ return ret;
+}
+
+static int32_t
+do_cdc_compress(struct iovec *vec, xlator_t *this, cdc_priv_t *priv,
+ cdc_info_t *ci)
+{
+ int ret = -1;
+
+ /* Initialize defalte */
+ ret = deflateInit2(&ci->stream, priv->cdc_level, Z_DEFLATED,
+ priv->window_size, priv->mem_level, Z_DEFAULT_STRATEGY);
+
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "unable to init Zlib (retval: %d)",
+ ret);
+ goto out;
+ }
+
+ ret = cdc_alloc_iobuf_and_init_vec(this, priv, ci, 0);
+ if (ret)
+ goto out;
+
+ /* setup output buffer */
+ cdc_init_zlib_output_stream(priv, ci, 0);
+
+ /* setup input buffer */
+ ci->stream.next_in = (unsigned char *)vec->iov_base;
+ ci->stream.avail_in = vec->iov_len;
+
+ ci->crc = crc32(ci->crc, (const Bytef *)vec->iov_base, vec->iov_len);
+
+ gf_log(this->name, GF_LOG_DEBUG, "crc=%lu len=%d buffer_size=%d", ci->crc,
+ ci->stream.avail_in, ci->buffer_size);
+
+ /* compress !! */
+ while (ci->stream.avail_in != 0) {
+ if (ci->stream.avail_out == 0) {
+ CURR_VEC(ci).iov_len = ci->buffer_size;
+
+ ret = cdc_alloc_iobuf_and_init_vec(this, priv, ci, 0);
+ if (ret)
+ break;
+
+ /* Re-position Zlib output buffer */
+ cdc_init_zlib_output_stream(priv, ci, 0);
+ }
+
+ ret = deflate(&ci->stream, Z_NO_FLUSH);
+ if (ret != Z_OK)
+ break;
+ }
+
+out:
+ return ret;
+}
+
+int32_t
+cdc_compress(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci, dict_t **xdata)
+{
+ int ret = -1;
+ int i = 0;
+
+ ci->iobref = iobref_new();
+ if (!ci->iobref)
+ goto out;
+
+ if (!*xdata) {
+ *xdata = dict_new();
+ if (!*xdata) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Cannot allocate xdata"
+ " dict");
+ goto out;
+ }
+ }
+
+ /* data */
+ for (i = 0; i < ci->count; i++) {
+ ret = do_cdc_compress(&ci->vector[i], this, priv, ci);
+ if (ret != Z_OK)
+ goto deflate_cleanup_out;
+ }
+
+ /* flush zlib buffer */
+ ret = cdc_flush_libz_buffer(priv, this, ci, deflate, Z_FINISH);
+ if (!(ret == Z_OK || ret == Z_STREAM_END)) {
+ gf_log(this->name, GF_LOG_ERROR, "Compression Error: ret (%d)", ret);
+ ret = -1;
+ goto deflate_cleanup_out;
+ }
+
+ /* trailer */
+ ret = cdc_init_gzip_trailer(this, priv, ci);
+ if (ret)
+ goto deflate_cleanup_out;
+
+ gf_log(this->name, GF_LOG_DEBUG, "Compressed %ld to %ld bytes",
+ ci->stream.total_in, ci->stream.total_out);
+
+ ci->nbytes = ci->stream.total_out + GF_CDC_VALIDATION_SIZE;
+
+ /* set deflated canary value for identification */
+ ret = dict_set_int32(*xdata, GF_CDC_DEFLATE_CANARY_VAL, 1);
+ if (ret) {
+ /* Send uncompressed data if we can't _tell_ the client
+ * that deflated data is on it's way. So, we just log
+ * the failure and continue as usual.
+ */
+ gf_log(this->name, GF_LOG_ERROR,
+ "Data deflated, but could not set canary"
+ " value in dict for identification");
+ }
+
+ /* This is to be used in testing */
+ if (priv->debug) {
+ cdc_dump_iovec_to_disk(this, ci, GF_CDC_DEBUG_DUMP_FILE);
+ }
+
+deflate_cleanup_out:
+ (void)deflateEnd(&ci->stream);
+
+out:
+ return ret;
+}
+
+/* deflate content is checked by the presence of a canary
+ * value in the dict as the key
+ */
+static int32_t
+cdc_check_content_for_deflate(dict_t *xdata)
+{
+ return dict_get(xdata, GF_CDC_DEFLATE_CANARY_VAL) ? -1 : 0;
+}
+
+static unsigned long
+cdc_extract_crc(char *trailer)
+{
+ return cdc_get_long((unsigned char *)&trailer[0]);
+}
+
+static unsigned long
+cdc_extract_size(char *trailer)
+{
+ return cdc_get_long((unsigned char *)&trailer[4]);
+}
+
+static int32_t
+cdc_validate_inflate(cdc_info_t *ci, unsigned long crc, unsigned long len)
+{
+ return !((crc == ci->crc)
+ /* inflated length is hidden inside
+ * Zlib stream struct */
+ && (len == ci->stream.total_out));
+}
+
+static int32_t
+do_cdc_decompress(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci)
+{
+ int ret = -1;
+ int i = 0;
+ int len = 0;
+ char *inflte = NULL;
+ char *trailer = NULL;
+ struct iovec vec = {
+ 0,
+ };
+ unsigned long computed_crc = 0;
+ unsigned long computed_len = 0;
+
+ ret = inflateInit2(&ci->stream, priv->window_size);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Zlib: Unable to initialize inflate");
+ goto out;
+ }
+
+ vec = THIS_VEC(ci, 0);
+
+ trailer = (char *)(((char *)vec.iov_base) + vec.iov_len -
+ GF_CDC_VALIDATION_SIZE);
+
+ /* CRC of uncompressed data */
+ computed_crc = cdc_extract_crc(trailer);
+
+ /* size of uncomrpessed data */
+ computed_len = cdc_extract_size(trailer);
+
+ gf_log(this->name, GF_LOG_DEBUG, "crc=%lu len=%lu buffer_size=%d",
+ computed_crc, computed_len, ci->buffer_size);
+
+ inflte = vec.iov_base;
+ len = vec.iov_len - GF_CDC_VALIDATION_SIZE;
+
+ /* allocate buffer of the original length of the data */
+ ret = cdc_alloc_iobuf_and_init_vec(this, priv, ci, 0);
+ if (ret)
+ goto out;
+
+ /* setup output buffer */
+ cdc_init_zlib_output_stream(priv, ci, 0);
+
+ /* setup input buffer */
+ ci->stream.next_in = (unsigned char *)inflte;
+ ci->stream.avail_in = len;
+
+ while (ci->stream.avail_in != 0) {
+ if (ci->stream.avail_out == 0) {
+ CURR_VEC(ci).iov_len = ci->buffer_size;
+
+ ret = cdc_alloc_iobuf_and_init_vec(this, priv, ci, 0);
+ if (ret)
+ break;
+
+ /* Re-position Zlib output buffer */
+ cdc_init_zlib_output_stream(priv, ci, 0);
+ }
+
+ ret = inflate(&ci->stream, Z_NO_FLUSH);
+ if (ret == Z_STREAM_ERROR)
+ break;
+ }
+
+ /* flush zlib buffer */
+ ret = cdc_flush_libz_buffer(priv, this, ci, inflate, Z_SYNC_FLUSH);
+ if (!(ret == Z_OK || ret == Z_STREAM_END)) {
+ gf_log(this->name, GF_LOG_ERROR, "Decompression Error: ret (%d)", ret);
+ ret = -1;
+ goto out;
+ }
+
+ /* compute CRC of the uncompresses data to check for
+ * correctness */
+
+ for (i = 0; i < ci->ncount; i++) {
+ ci->crc = crc32(ci->crc, (const Bytef *)ci->vec[i].iov_base,
+ ci->vec[i].iov_len);
+ }
+
+ /* validate inflated data */
+ ret = cdc_validate_inflate(ci, computed_crc, computed_len);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Checksum or length mismatched in inflated data");
+ }
+
+out:
+ return ret;
+}
+
+int32_t
+cdc_decompress(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci, dict_t *xdata)
+{
+ int32_t ret = -1;
+
+ /* check for deflate content */
+ if (!cdc_check_content_for_deflate(xdata)) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Content not deflated, passing through ...");
+ goto passthrough_out;
+ }
+
+ ci->iobref = iobref_new();
+ if (!ci->iobref)
+ goto passthrough_out;
+
+ /* do we need to do this? can we assume that one iovec
+ * will hold per request data every time?
+ *
+ * server/client protocol seems to deal with a single
+ * iovec even if op_ret > 1M. So, it looks ok to
+ * assume that a single iovec will contain all the
+ * data (This saves us a lot from finding the trailer
+ * and the data since it could have been split-up onto
+ * two adjacent iovec's.
+ *
+ * But, in case this translator is loaded above quick-read
+ * for some reason, then it's entirely possible that we get
+ * multiple iovec's...
+ *
+ * This case (handled below) is not tested. (by loading the
+ * xlator below quick-read)
+ */
+
+ /* @@ I_HOPE_THIS_IS_NEVER_HIT */
+ if (ci->count > 1) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "unable to handle"
+ " multiple iovecs (%d in number)",
+ ci->count);
+ goto inflate_cleanup_out;
+ /* TODO: coallate all iovecs in one */
+ }
+
+ ret = do_cdc_decompress(this, priv, ci);
+ if (ret)
+ goto inflate_cleanup_out;
+
+ ci->nbytes = ci->stream.total_out;
+
+ gf_log(this->name, GF_LOG_DEBUG, "Inflated %ld to %ld bytes",
+ ci->stream.total_in, ci->stream.total_out);
+
+inflate_cleanup_out:
+ (void)inflateEnd(&ci->stream);
+
+passthrough_out:
+ return ret;
+}
+
+#endif
diff --git a/xlators/features/compress/src/cdc-mem-types.h b/xlators/features/compress/src/cdc-mem-types.h
new file mode 100644
index 00000000000..928afdd2efe
--- /dev/null
+++ b/xlators/features/compress/src/cdc-mem-types.h
@@ -0,0 +1,23 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __CDC_MEM_TYPES_H
+#define __CDC_MEM_TYPES_H
+
+#include <glusterfs/mem-types.h>
+
+enum gf_cdc_mem_types {
+ gf_cdc_mt_priv_t = gf_common_mt_end + 1,
+ gf_cdc_mt_vec_t = gf_common_mt_end + 2,
+ gf_cdc_mt_gzip_trailer_t = gf_common_mt_end + 3,
+ gf_cdc_mt_end = gf_common_mt_end + 4,
+};
+
+#endif
diff --git a/xlators/features/compress/src/cdc.c b/xlators/features/compress/src/cdc.c
new file mode 100644
index 00000000000..b0b51e914ed
--- /dev/null
+++ b/xlators/features/compress/src/cdc.c
@@ -0,0 +1,348 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <sys/uio.h>
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/logging.h>
+
+#include "cdc.h"
+#include "cdc-mem-types.h"
+
+static void
+cdc_cleanup_iobref(cdc_info_t *ci)
+{
+ assert(ci->iobref != NULL);
+ iobref_clear(ci->iobref);
+}
+
+int32_t
+cdc_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
+{
+ int ret = -1;
+ cdc_priv_t *priv = NULL;
+ cdc_info_t ci = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("cdc", this, default_out);
+ GF_VALIDATE_OR_GOTO(this->name, frame, default_out);
+
+ priv = this->private;
+
+ if (op_ret <= 0)
+ goto default_out;
+
+ if ((priv->min_file_size != 0) && (op_ret < priv->min_file_size))
+ goto default_out;
+
+ ci.count = count;
+ ci.ibytes = op_ret;
+ ci.vector = vector;
+ ci.buf = NULL;
+ ci.iobref = NULL;
+ ci.ncount = 0;
+ ci.crc = 0;
+ ci.buffer_size = GF_CDC_DEF_BUFFERSIZE;
+
+ /* A readv compresses on the server side and decompresses on the client side
+ */
+ if (priv->op_mode == GF_CDC_MODE_SERVER) {
+ ret = cdc_compress(this, priv, &ci, &xdata);
+ } else if (priv->op_mode == GF_CDC_MODE_CLIENT) {
+ ret = cdc_decompress(this, priv, &ci, xdata);
+ } else {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid operation mode (%d)",
+ priv->op_mode);
+ }
+
+ if (ret)
+ goto default_out;
+
+ STACK_UNWIND_STRICT(readv, frame, ci.nbytes, op_errno, ci.vec, ci.ncount,
+ stbuf, iobref, xdata);
+ cdc_cleanup_iobref(&ci);
+ return 0;
+
+default_out:
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, vector, count, stbuf,
+ iobref, xdata);
+ return 0;
+}
+
+int32_t
+cdc_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ fop_readv_cbk_t cbk = NULL;
+
+#ifdef HAVE_LIB_Z
+ cbk = cdc_readv_cbk;
+#else
+ cbk = default_readv_cbk;
+#endif
+ STACK_WIND(frame, cbk, FIRST_CHILD(this), FIRST_CHILD(this)->fops->readv,
+ fd, size, offset, flags, xdata);
+ return 0;
+}
+
+int32_t
+cdc_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+cdc_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
+ int32_t count, off_t offset, uint32_t flags, struct iobref *iobref,
+ dict_t *xdata)
+{
+ int ret = -1;
+ cdc_priv_t *priv = NULL;
+ cdc_info_t ci = {
+ 0,
+ };
+ size_t isize = 0;
+
+ GF_VALIDATE_OR_GOTO("cdc", this, err);
+ GF_VALIDATE_OR_GOTO(this->name, frame, err);
+
+ priv = this->private;
+
+ isize = iov_length(vector, count);
+
+ if (isize <= 0)
+ goto default_out;
+
+ if ((priv->min_file_size != 0) && (isize < priv->min_file_size))
+ goto default_out;
+
+ ci.count = count;
+ ci.ibytes = isize;
+ ci.vector = vector;
+ ci.buf = NULL;
+ ci.iobref = NULL;
+ ci.ncount = 0;
+ ci.crc = 0;
+ ci.buffer_size = GF_CDC_DEF_BUFFERSIZE;
+
+ /* A writev compresses on the client side and decompresses on the server
+ * side
+ */
+ if (priv->op_mode == GF_CDC_MODE_CLIENT) {
+ ret = cdc_compress(this, priv, &ci, &xdata);
+ } else if (priv->op_mode == GF_CDC_MODE_SERVER) {
+ ret = cdc_decompress(this, priv, &ci, xdata);
+ } else {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid operation mode (%d) ",
+ priv->op_mode);
+ }
+
+ if (ret)
+ goto default_out;
+
+ STACK_WIND(frame, cdc_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, ci.vec, ci.ncount, offset,
+ flags, iobref, xdata);
+
+ cdc_cleanup_iobref(&ci);
+ return 0;
+
+default_out:
+ STACK_WIND(frame, cdc_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(writev, frame, -1, EINVAL, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_cdc_mt_end);
+
+ if (ret != 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Memory accounting init"
+ "failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ int ret = -1;
+ char *temp_str = NULL;
+ cdc_priv_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("cdc", this, err);
+
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_ERROR, "Need subvolume == 1");
+ goto err;
+ }
+
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_WARNING, "Dangling volume. Check volfile");
+ }
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_cdc_mt_priv_t);
+ if (!priv) {
+ goto err;
+ }
+
+ /* Check if debug mode is turned on */
+ GF_OPTION_INIT("debug", priv->debug, bool, err);
+ if (priv->debug) {
+ gf_log(this->name, GF_LOG_DEBUG, "CDC debug option turned on");
+ }
+
+ /* Set Gzip Window Size */
+ GF_OPTION_INIT("window-size", priv->window_size, int32, err);
+ if ((priv->window_size > GF_CDC_MAX_WINDOWSIZE) ||
+ (priv->window_size < GF_CDC_DEF_WINDOWSIZE)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Invalid gzip window size (%d), using default",
+ priv->window_size);
+ priv->window_size = GF_CDC_DEF_WINDOWSIZE;
+ }
+
+ /* Set Gzip (De)Compression Level */
+ GF_OPTION_INIT("compression-level", priv->cdc_level, int32, err);
+ if (((priv->cdc_level < 1) || (priv->cdc_level > 9)) &&
+ (priv->cdc_level != GF_CDC_DEF_COMPRESSION)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Invalid gzip (de)compression level (%d),"
+ " using default",
+ priv->cdc_level);
+ priv->cdc_level = GF_CDC_DEF_COMPRESSION;
+ }
+
+ /* Set Gzip Memory Level */
+ GF_OPTION_INIT("mem-level", priv->mem_level, int32, err);
+ if ((priv->mem_level < 1) || (priv->mem_level > 9)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Invalid gzip memory level, using the default");
+ priv->mem_level = GF_CDC_DEF_MEMLEVEL;
+ }
+
+ /* Set min file size to enable compression */
+ GF_OPTION_INIT("min-size", priv->min_file_size, int32, err);
+
+ /* Mode of operation - Server/Client */
+ ret = dict_get_str(this->options, "mode", &temp_str);
+ if (ret) {
+ gf_log(this->name, GF_LOG_CRITICAL, "Operation mode not specified !!");
+ goto err;
+ }
+
+ if (GF_CDC_MODE_IS_CLIENT(temp_str)) {
+ priv->op_mode = GF_CDC_MODE_CLIENT;
+ } else if (GF_CDC_MODE_IS_SERVER(temp_str)) {
+ priv->op_mode = GF_CDC_MODE_SERVER;
+ } else {
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "Bogus operation mode (%s) specified", temp_str);
+ goto err;
+ }
+
+ this->private = priv;
+ gf_log(this->name, GF_LOG_DEBUG, "CDC xlator loaded in (%s) mode",
+ temp_str);
+ return 0;
+
+err:
+ if (priv)
+ GF_FREE(priv);
+
+ return -1;
+}
+
+void
+fini(xlator_t *this)
+{
+ cdc_priv_t *priv = this->private;
+
+ if (priv)
+ GF_FREE(priv);
+ this->private = NULL;
+ return;
+}
+
+struct xlator_fops fops = {
+ .readv = cdc_readv,
+ .writev = cdc_writev,
+};
+
+struct xlator_cbks cbks = {};
+
+struct volume_options options[] = {
+ {.key = {"window-size"},
+ .default_value = "-15",
+ .type = GF_OPTION_TYPE_INT,
+ .description = "Size of the zlib history buffer."},
+ {.key = {"mem-level"},
+ .default_value = "8",
+ .type = GF_OPTION_TYPE_INT,
+ .description = "Memory allocated for internal compression state. "
+ "1 uses minimum memory but is slow and reduces "
+ "compression ratio; memLevel=9 uses maximum memory "
+ "for optimal speed. The default value is 8."},
+ {.key = {"compression-level"},
+ .default_value = "-1",
+ .type = GF_OPTION_TYPE_INT,
+ .description = "Compression levels \n"
+ "0 : no compression, 1 : best speed, \n"
+ "9 : best compression, -1 : default compression "},
+ {.key = {"min-size"},
+ .default_value = "0",
+ .type = GF_OPTION_TYPE_INT,
+ .description = "Data is compressed only when its size exceeds this."},
+ {.key = {"mode"},
+ .value = {"server", "client"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Set on the basis of where the xlator is loaded. "
+ "This option should NOT be configured by user."},
+ {.key = {"debug"},
+ .default_value = "false",
+ .type = GF_OPTION_TYPE_BOOL,
+ .description = "This is used in testing. Will dump compressed data "
+ "to disk as a gzip file."},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {GD_OP_VERSION_3_9_0},
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "cdc",
+ .category = GF_TECH_PREVIEW,
+};
diff --git a/xlators/features/compress/src/cdc.h b/xlators/features/compress/src/cdc.h
new file mode 100644
index 00000000000..cb87b06a989
--- /dev/null
+++ b/xlators/features/compress/src/cdc.h
@@ -0,0 +1,99 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __CDC_H
+#define __CDC_H
+
+#ifdef HAVE_LIB_Z
+#include "zlib.h"
+#endif
+
+#include <glusterfs/xlator.h>
+
+#ifndef MAX_IOVEC
+#define MAX_IOVEC 16
+#endif
+
+typedef struct cdc_priv {
+ int window_size;
+ int mem_level;
+ int cdc_level;
+ int min_file_size;
+ int op_mode;
+ gf_boolean_t debug;
+ gf_lock_t lock;
+} cdc_priv_t;
+
+typedef struct cdc_info {
+ /* input bits */
+ int count;
+ int32_t ibytes;
+ struct iovec *vector;
+ struct iatt *buf;
+
+ /* output bits */
+ int ncount;
+ int nbytes;
+ int buffer_size;
+ struct iovec vec[MAX_IOVEC];
+ struct iobref *iobref;
+
+ /* zlib bits */
+#ifdef HAVE_LIB_Z
+ z_stream stream;
+#endif
+ unsigned long crc;
+} cdc_info_t;
+
+#define NVEC(ci) (ci->ncount - 1)
+#define CURR_VEC(ci) ci->vec[ci->ncount - 1]
+#define THIS_VEC(ci, i) ci->vector[i]
+
+/* Gzip defaults */
+#define GF_CDC_DEF_WINDOWSIZE -15 /* default value */
+#define GF_CDC_MAX_WINDOWSIZE -8 /* max value */
+
+#ifdef HAVE_LIB_Z
+#define GF_CDC_DEF_COMPRESSION Z_DEFAULT_COMPRESSION
+#else
+#define GF_CDC_DEF_COMPRESSION -1
+#endif
+
+#define GF_CDC_DEF_MEMLEVEL 8
+#define GF_CDC_DEF_BUFFERSIZE 262144 // 256K - default compression buffer size
+
+/* Operation mode
+ * If xlator is loaded on client, readv decompresses and writev compresses
+ * If xlator is loaded on server, readv compresses and writev decompresses
+ */
+#define GF_CDC_MODE_CLIENT 0
+#define GF_CDC_MODE_SERVER 1
+
+/* min size of data to do cmpression
+ * 0 == compress even 1byte
+ */
+#define GF_CDC_MIN_CHUNK_SIZE 0
+
+#define GF_CDC_VALIDATION_SIZE 8
+
+#define GF_CDC_OS_ID 0xFF
+#define GF_CDC_DEFLATE_CANARY_VAL "deflate"
+#define GF_CDC_DEBUG_DUMP_FILE "/tmp/cdcdump.gz"
+
+#define GF_CDC_MODE_IS_CLIENT(m) (strcmp(m, "client") == 0)
+
+#define GF_CDC_MODE_IS_SERVER(m) (strcmp(m, "server") == 0)
+
+int32_t
+cdc_compress(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci, dict_t **xdata);
+int32_t
+cdc_decompress(xlator_t *this, cdc_priv_t *priv, cdc_info_t *ci, dict_t *xdata);
+
+#endif
diff --git a/xlators/features/filter/src/Makefile.am b/xlators/features/filter/src/Makefile.am
deleted file mode 100644
index d473b9ea16d..00000000000
--- a/xlators/features/filter/src/Makefile.am
+++ /dev/null
@@ -1,15 +0,0 @@
-xlator_LTLIBRARIES = filter.la
-xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/testing/features
-
-filter_la_LDFLAGS = -module -avoidversion
-
-filter_la_SOURCES = filter.c
-filter_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
-noinst_HEADERS = filter-mem-types.h
-
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS) \
- -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)
-
-CLEANFILES =
-
diff --git a/xlators/features/filter/src/filter-mem-types.h b/xlators/features/filter/src/filter-mem-types.h
deleted file mode 100644
index 3078266b115..00000000000
--- a/xlators/features/filter/src/filter-mem-types.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __FILTER_MEM_TYPES_H__
-#define __FILTER_MEM_TYPES_H__
-
-#include "mem-types.h"
-
-enum gf_filter_mem_types_ {
- gf_filter_mt_gf_filter = gf_common_mt_end + 1,
- gf_filter_mt_end
-};
-#endif
-
diff --git a/xlators/features/filter/src/filter.c b/xlators/features/filter/src/filter.c
deleted file mode 100644
index 13a3e7cf22d..00000000000
--- a/xlators/features/filter/src/filter.c
+++ /dev/null
@@ -1,1744 +0,0 @@
-/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "glusterfs.h"
-#include "logging.h"
-#include "dict.h"
-#include "xlator.h"
-#include "filter-mem-types.h"
-
-#define GF_FILTER_NOBODY_UID 65534
-#define GF_FILTER_NOBODY_GID 65534
-#define GF_FILTER_ROOT_UID 0
-#define GF_FILTER_ROOT_GID 0
-
-#define GF_MAXIMUM_FILTERING_ALLOWED 32
-
-/*
- option root-filtering on (off by default)
- option translate-uid <uid-range=newuid,uid=newuid>
- option translate-gid <gid-range=newgid,gid=newgid>
- option read-only <yes|true>
- option fixed-uid <uid>
- option fixed-gid <gid>
- option filter-uid <uid-range,uid>
- option filter-gid <gid-range,gid> // not supported yet
-
-*/
-
-struct gf_filter {
- /* Flags */
- gf_boolean_t complete_read_only;
- char fixed_uid_set;
- char fixed_gid_set;
- char partial_filter;
-
- /* Options */
- /* Mapping/Filtering/Translate whatever you want to call */
- int translate_num_uid_entries;
- int translate_num_gid_entries;
- int translate_input_uid[GF_MAXIMUM_FILTERING_ALLOWED][2];
- int translate_output_uid[GF_MAXIMUM_FILTERING_ALLOWED];
- int translate_input_gid[GF_MAXIMUM_FILTERING_ALLOWED][2];
- int translate_output_gid[GF_MAXIMUM_FILTERING_ALLOWED];
-
- /* Fixed uid/gid */
- int fixed_uid;
- int fixed_gid;
-
- /* Filter */
- int filter_num_uid_entries;
- int filter_num_gid_entries;
- int filter_input_uid[GF_MAXIMUM_FILTERING_ALLOWED][2];
- int filter_input_gid[GF_MAXIMUM_FILTERING_ALLOWED][2];
-
-};
-
-/* update_frame: The main logic of the whole translator.
- Return values:
- 0: no change
- // TRANSLATE
- 1: only uid changed
- 2: only gid changed
- 3: both uid/gid changed
- // FILTER
- 4: uid in filter range
- 5: gid in filter range // not supported yet
- 6: complete fs is readonly
-*/
-
-#define GF_FILTER_NO_CHANGE 0
-#define GF_FILTER_MAP_UID 1
-#define GF_FILTER_MAP_GID 2
-#define GF_FILTER_MAP_BOTH 3
-#define GF_FILTER_FILTER_UID 4
-#define GF_FILTER_FILTER_GID 5
-#define GF_FILTER_RO_FS 6
-
-static int32_t
-update_frame (call_frame_t *frame,
- inode_t *inode,
- struct gf_filter *filter)
-{
- uid_t uid = 0;
- int32_t idx = 0;
- int32_t ret = 0;
- int32_t dictret = 0;
- uint64_t tmp_uid = 0;
-
- for (idx = 0; idx < filter->translate_num_uid_entries; idx++) {
- if ((frame->root->uid >=filter->translate_input_uid[idx][0]) &&
- (frame->root->uid <=filter->translate_input_uid[idx][1])) {
- dictret = inode_ctx_get (inode, frame->this, &tmp_uid);
- uid = (uid_t)tmp_uid;
- if (dictret == 0) {
- if (frame->root->uid != uid)
- ret = GF_FILTER_MAP_UID;
- } else {
- ret = GF_FILTER_MAP_UID;
- }
- break;
- }
- }
-
- for (idx = 0; idx < filter->translate_num_gid_entries; idx++) {
- if ((frame->root->gid >=filter->translate_input_gid[idx][0]) &&
- (frame->root->gid <=filter->translate_input_gid[idx][1])) {
- if (ret == GF_FILTER_NO_CHANGE)
- ret = GF_FILTER_MAP_GID;
- else
- ret = GF_FILTER_MAP_BOTH;
- break;
- }
- }
-
-
- if (filter->complete_read_only)
- return GF_FILTER_RO_FS;
-
- if (filter->partial_filter) {
- dictret = inode_ctx_get (inode, frame->this, &tmp_uid);
- uid = (uid_t)tmp_uid;
- if (dictret != -1) {
- for (idx = 0; idx < filter->filter_num_uid_entries;
- idx++) {
- if ((uid >=filter->filter_input_uid[idx][0]) &&
- (uid <=filter->filter_input_uid[idx][1])) {
- return GF_FILTER_FILTER_UID;
- }
- }
- }
- }
-
- return ret;
-}
-
-/* if 'root' don't change the uid/gid */
-static int32_t
-update_stat (struct iatt *stbuf,
- struct gf_filter *filter)
-{
- int32_t idx = 0;
- for (idx = 0; idx < filter->translate_num_uid_entries; idx++) {
- if (stbuf->ia_uid == GF_FILTER_ROOT_UID)
- continue;
- if ((stbuf->ia_uid >= filter->translate_input_uid[idx][0]) &&
- (stbuf->ia_uid <= filter->translate_input_uid[idx][1])) {
- stbuf->ia_uid = filter->translate_output_uid[idx];
- break;
- }
- }
-
- for (idx = 0; idx < filter->translate_num_gid_entries; idx++) {
- if (stbuf->ia_gid == GF_FILTER_ROOT_GID)
- continue;
- if ((stbuf->ia_gid >= filter->translate_input_gid[idx][0]) &&
- (stbuf->ia_gid <= filter->translate_input_gid[idx][1])) {
- stbuf->ia_gid = filter->translate_output_gid[idx];
- break;
- }
- }
-
- if (filter->fixed_uid_set) {
- stbuf->ia_uid = filter->fixed_uid;
- }
-
- if (filter->fixed_gid_set) {
- stbuf->ia_gid = filter->fixed_gid;
- }
-
- return 0;
-}
-
-static int32_t
-filter_lookup_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- dict_t *dict,
- struct iatt *postparent)
-{
- int ret = 0;
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- ret = inode_ctx_put (inode, this, (uint64_t)(long)buf->ia_uid);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "couldn't set context");
- }
-
- update_stat (postparent, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf, dict, postparent);
- return 0;
-}
-
-int32_t
-filter_lookup (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *xattr_req)
-{
- STACK_WIND (frame,
- filter_lookup_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup,
- loc,
- xattr_req);
- return 0;
-}
-
-
-static int32_t
-filter_stat_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *buf)
-{
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, buf);
- return 0;
-}
-
-int32_t
-filter_stat (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
-{
- STACK_WIND (frame,
- filter_stat_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->stat,
- loc);
- return 0;
-}
-
-static int32_t
-filter_setattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *preop,
- struct iatt *postop)
-{
- if (op_ret >= 0) {
- update_stat (preop, this->private);
- update_stat (postop, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, preop, postop);
- return 0;
-}
-
-int32_t
-filter_setattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- struct iatt *stbuf,
- int32_t valid)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG,
- "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL);
- return 0;
- default:
- break;
- }
-
- STACK_WIND (frame,
- filter_setattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setattr,
- loc,
- stbuf, valid);
- return 0;
-}
-
-static int32_t
-filter_fsetattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *preop,
- struct iatt *postop)
-{
- if (op_ret >= 0) {
- update_stat (preop, this->private);
- update_stat (postop, this->private);
- }
- STACK_UNWIND (frame,
- op_ret,
- op_errno,
- preop, postop);
- return 0;
-}
-
-int32_t
-filter_fsetattr (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- struct iatt *stbuf,
- int32_t valid)
-{
- STACK_WIND (frame,
- filter_fsetattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsetattr,
- fd,
- stbuf, valid);
- return 0;
-}
-
-
-static int32_t
-filter_truncate_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *prebuf,
- struct iatt *postbuf)
-{
- if (op_ret >= 0) {
- update_stat (prebuf, this->private);
- update_stat (postbuf, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, prebuf, postbuf);
- return 0;
-}
-
-int32_t
-filter_truncate (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- off_t offset)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL);
- return 0;
- }
-
- STACK_WIND (frame,
- filter_truncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->truncate,
- loc,
- offset);
- return 0;
-}
-
-static int32_t
-filter_ftruncate_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *prebuf,
- struct iatt *postbuf)
-{
- if (op_ret >= 0) {
- update_stat (prebuf, this->private);
- update_stat (postbuf, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, prebuf, postbuf);
- return 0;
-}
-
-int32_t
-filter_ftruncate (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- off_t offset)
-{
- STACK_WIND (frame,
- filter_ftruncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->ftruncate,
- fd,
- offset);
- return 0;
-}
-
-
-static int32_t
-filter_readlink_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- const char *path,
- struct iatt *sbuf)
-{
- if (op_ret >= 0)
- update_stat (sbuf, this->private);
-
- STACK_UNWIND (frame, op_ret, op_errno, path, sbuf);
- return 0;
-}
-
-int32_t
-filter_readlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- size_t size)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IRGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IROTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_readlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readlink,
- loc,
- size);
- return 0;
-}
-
-
-static int32_t
-filter_mknod_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- int ret = 0;
-
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- ret = inode_ctx_put (inode, this, (uint64_t)(long)buf->ia_uid);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "couldn't set context");
- }
-
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf,
- preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_mknod (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- mode_t mode,
- dev_t rdev)
-{
- int ret = 0;
- inode_t *parent = loc->parent;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL,
- NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL,
- NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_mknod_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mknod,
- loc, mode, rdev);
- return 0;
-}
-
-static int32_t
-filter_mkdir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- int ret = 0;
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- ret = inode_ctx_put (inode, this, (uint64_t)(long)buf->ia_uid);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "couldn't set context");
- }
-
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf,
- preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_mkdir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- mode_t mode)
-{
- int ret = 0;
- inode_t *parent = loc->parent;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL,
- NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL,
- NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_mkdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mkdir,
- loc, mode);
- return 0;
-}
-
-static int32_t
-filter_unlink_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- if (op_ret >= 0) {
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
-
- STACK_UNWIND (frame, op_ret, op_errno, preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_unlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
-{
- int32_t ret = 0;
- inode_t *parent = loc->parent;
- if (!parent)
- parent = inode_parent (loc->inode, 0, NULL);
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- if (loc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- if (loc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL);
- return 0;
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_unlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->unlink,
- loc);
- return 0;
-}
-
-static int32_t
-filter_rmdir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- if (op_ret >= 0) {
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
-
- STACK_UNWIND (frame, op_ret, op_errno, preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_rmdir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
-{
- int32_t ret = 0;
- inode_t *parent = loc->parent;
- if (!parent)
- parent = inode_parent (loc->inode, 0, NULL);
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- if (loc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- if (loc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL);
- return 0;
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_rmdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rmdir,
- loc);
- return 0;
-}
-
-static int32_t
-filter_symlink_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- int ret = 0;
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- ret = inode_ctx_put (inode, this, (uint64_t)(long)buf->ia_uid);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "couldn't set context");
- }
-
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf,
- preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_symlink (call_frame_t *frame,
- xlator_t *this,
- const char *linkpath,
- loc_t *loc)
-{
- int ret = 0;
- inode_t *parent = loc->parent;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL,
- NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL,
- NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_symlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->symlink,
- linkpath, loc);
- return 0;
-}
-
-
-static int32_t
-filter_rename_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *buf,
- struct iatt *preoldparent,
- struct iatt *postoldparent,
- struct iatt *prenewparent,
- struct iatt *postnewparent)
-{
- if (op_ret >= 0) {
- update_stat (buf, this->private);
-
- update_stat (preoldparent, this->private);
- update_stat (postoldparent, this->private);
-
- update_stat (prenewparent, this->private);
- update_stat (postnewparent, this->private);
- }
-
- STACK_UNWIND (frame, op_ret, op_errno, buf,
- preoldparent, postoldparent,
- prenewparent, postnewparent);
- return 0;
-}
-
-int32_t
-filter_rename (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc)
-{
- int32_t ret = 0;
- inode_t *parent = oldloc->parent;
- if (!parent)
- parent = inode_parent (oldloc->inode, 0, NULL);
- ret = update_frame (frame, oldloc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- if (oldloc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- if (oldloc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG,
- "%s -> %s: returning permission denied", oldloc->path, newloc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL,
- NULL, NULL,
- NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL,
- NULL, NULL,
- NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_rename_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rename,
- oldloc, newloc);
- return 0;
-}
-
-
-static int32_t
-filter_link_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- int ret = 0;
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- ret = inode_ctx_put (inode, this, (uint64_t)(long)buf->ia_uid);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "couldn't set context");
- }
-
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf,
- preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_link (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc)
-{
- int ret = 0;
- ret = update_frame (frame, oldloc->inode, this->private);
- switch (ret) {
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL,
- NULL, NULL);
- return 0;
- }
- STACK_WIND (frame,
- filter_link_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->link,
- oldloc, newloc);
- return 0;
-}
-
-
-static int32_t
-filter_create_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- int ret = 0;
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- ret = inode_ctx_put (inode, this, (uint64_t)(long)buf->ia_uid);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "couldn't set context");
- }
- update_stat (preparent, this->private);
- update_stat (postparent, this->private);
- }
- STACK_UNWIND (frame, op_ret, op_errno, fd, inode, buf,
- preparent, postparent);
- return 0;
-}
-
-int32_t
-filter_create (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flags,
- mode_t mode, fd_t *fd)
-{
- int ret = 0;
- inode_t *parent = loc->parent;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (parent->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (parent->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL, NULL, NULL,
- NULL, NULL);
- return 0;
-
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL, NULL,
- NULL, NULL);
- return 0;
- }
- STACK_WIND (frame, filter_create_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->create,
- loc, flags, mode, fd);
- return 0;
-}
-
-static int32_t
-filter_open_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd)
-{
- STACK_UNWIND (frame, op_ret, op_errno, fd);
- return 0;
-}
-
-int32_t
-filter_open (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flags,
- fd_t *fd,
- int32_t wbflags)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IWGRP)
- break;
- if (!(((flags & O_ACCMODE) == O_WRONLY)
- || ((flags & O_ACCMODE) == O_RDWR))
- && (loc->inode->st_mode & S_IRGRP))
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IWOTH)
- break;
- if (!(((flags & O_ACCMODE) == O_WRONLY)
- || ((flags & O_ACCMODE) == O_RDWR))
- && (loc->inode->st_mode & S_IROTH))
- break;
- gf_log (this->name, GF_LOG_DEBUG,
- "%s: returning permission denied (mode: 0%o, flag=0%o)",
- loc->path, loc->inode->st_mode, flags);
- STACK_UNWIND (frame, -1, EPERM, fd);
- return 0;
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- if (!(((flags & O_ACCMODE) == O_WRONLY)
- || ((flags & O_ACCMODE) == O_RDWR)))
- break;
- STACK_UNWIND (frame, -1, EROFS, NULL);
- return 0;
-
- }
- STACK_WIND (frame,
- filter_open_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->open,
- loc, flags, fd, wbflags);
- return 0;
-}
-
-static int32_t
-filter_readv_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iovec *vector,
- int32_t count,
- struct iatt *stbuf,
- struct iobref *iobref)
-{
- if (op_ret >= 0) {
- update_stat (stbuf, this->private);
- }
- STACK_UNWIND (frame,
- op_ret,
- op_errno,
- vector,
- count,
- stbuf,
- iobref);
- return 0;
-}
-
-int32_t
-filter_readv (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- size_t size,
- off_t offset)
-{
- STACK_WIND (frame,
- filter_readv_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readv,
- fd,
- size,
- offset);
- return 0;
-}
-
-
-static int32_t
-filter_writev_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *prebuf,
- struct iatt *postbuf)
-{
- if (op_ret >= 0) {
- update_stat (prebuf, this->private);
- update_stat (postbuf, this->private);
- }
- STACK_UNWIND (frame,
- op_ret,
- op_errno,
- prebuf,
- postbuf);
- return 0;
-}
-
-int32_t
-filter_writev (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- struct iovec *vector,
- int32_t count,
- off_t off,
- struct iobref *iobref)
-{
- int32_t ret = 0;
- ret = update_frame (frame, fd->inode, this->private);
- switch (ret) {
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS, NULL, NULL);
- return 0;
- }
-
- STACK_WIND (frame,
- filter_writev_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->writev,
- fd,
- vector,
- count,
- off,
- iobref);
- return 0;
-}
-
-static int32_t
-filter_fstat_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *buf)
-{
- if (op_ret >= 0) {
- update_stat (buf, this->private);
- }
- STACK_UNWIND (frame,
- op_ret,
- op_errno,
- buf);
- return 0;
-}
-
-int32_t
-filter_fstat (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd)
-{
- STACK_WIND (frame,
- filter_fstat_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fstat,
- fd);
- return 0;
-}
-
-static int32_t
-filter_opendir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd)
-{
- STACK_UNWIND (frame,
- op_ret,
- op_errno,
- fd);
- return 0;
-}
-
-int32_t
-filter_opendir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc, fd_t *fd)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IWGRP)
- break;
- if (loc->inode->st_mode & S_IRGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IWOTH)
- break;
- if (loc->inode->st_mode & S_IROTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, fd);
- return 0;
- }
- STACK_WIND (frame,
- filter_opendir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->opendir,
- loc, fd);
- return 0;
-}
-
-
-static int32_t
-filter_setxattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno)
-{
- STACK_UNWIND (frame,
- op_ret,
- op_errno);
- return 0;
-}
-
-int32_t
-filter_setxattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *dict,
- int32_t flags)
-{
-
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM);
- return 0;
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS);
- return 0;
- }
-
- STACK_WIND (frame,
- filter_setxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- loc,
- dict,
- flags);
- return 0;
-}
-
-static int32_t
-filter_getxattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *dict)
-{
- STACK_UNWIND (frame,
- op_ret,
- op_errno,
- dict);
- return 0;
-}
-
-int32_t
-filter_getxattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IRGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IROTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM, NULL);
- return 0;
- }
-
- STACK_WIND (frame,
- filter_getxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->getxattr,
- loc,
- name);
- return 0;
-}
-
-static int32_t
-filter_removexattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno)
-{
- STACK_UNWIND (frame, op_ret, op_errno);
- return 0;
-}
-
-int32_t
-filter_removexattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name)
-{
- int32_t ret = 0;
- ret = update_frame (frame, loc->inode, this->private);
- switch (ret) {
- case GF_FILTER_MAP_UID:
- if (loc->inode->st_mode & S_IWGRP)
- break;
- case GF_FILTER_MAP_BOTH:
- if (loc->inode->st_mode & S_IWOTH)
- break;
- gf_log (this->name, GF_LOG_DEBUG, "%s: returning permission denied", loc->path);
- STACK_UNWIND (frame, -1, EPERM);
- return 0;
- case GF_FILTER_FILTER_UID:
- case GF_FILTER_FILTER_GID:
- case GF_FILTER_RO_FS:
- STACK_UNWIND (frame, -1, EROFS);
- return 0;
- }
-
- STACK_WIND (frame,
- filter_removexattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr,
- loc,
- name);
- return 0;
-}
-
-int32_t
-mem_acct_init (xlator_t *this)
-{
- int ret = -1;
-
- if (!this)
- return ret;
-
- ret = xlator_mem_acct_init (this, gf_filter_mt_end + 1);
-
- if (ret != 0) {
- gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
- "failed");
- return ret;
- }
-
- return ret;
-}
-
-int32_t
-init (xlator_t *this)
-{
- char *value = NULL;
- char *tmp_str = NULL;
- char *tmp_str1 = NULL;
- char *tmp_str2 = NULL;
- char *dup_str = NULL;
- char *input_value_str1 = NULL;
- char *input_value_str2 = NULL;
- char *output_value_str = NULL;
- int32_t input_value = 0;
- int32_t output_value = 0;
- data_t *option_data = NULL;
- struct gf_filter *filter = NULL;
- gf_boolean_t tmp_bool = 0;
-
- if (!this->children || this->children->next) {
- gf_log (this->name,
- GF_LOG_ERROR,
- "translator not configured with exactly one child");
- return -1;
- }
-
- if (!this->parents) {
- gf_log (this->name, GF_LOG_WARNING,
- "dangling volume. check volfile ");
- }
-
- filter = GF_CALLOC (sizeof (*filter), 1, gf_filter_mt_gf_filter);
- ERR_ABORT (filter);
-
- if (dict_get (this->options, "read-only")) {
- value = data_to_str (dict_get (this->options, "read-only"));
- if (gf_string2boolean (value, &filter->complete_read_only) == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "wrong value provided for 'read-only'");
- return -1;
- }
- }
-
- if (dict_get (this->options, "root-squashing")) {
- value = data_to_str (dict_get (this->options, "root-squashing"));
- if (gf_string2boolean (value, &tmp_bool) == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "wrong value provided for 'root-squashing'");
- return -1;
- }
- if (tmp_bool) {
- filter->translate_num_uid_entries = 1;
- filter->translate_num_gid_entries = 1;
- filter->translate_input_uid[0][0] = GF_FILTER_ROOT_UID; /* root */
- filter->translate_input_uid[0][1] = GF_FILTER_ROOT_UID; /* root */
- filter->translate_input_gid[0][0] = GF_FILTER_ROOT_GID; /* root */
- filter->translate_input_gid[0][1] = GF_FILTER_ROOT_GID; /* root */
- filter->translate_output_uid[0] = GF_FILTER_NOBODY_UID;
- filter->translate_output_gid[0] = GF_FILTER_NOBODY_GID;
- }
- }
-
- if (dict_get (this->options, "translate-uid")) {
- option_data = dict_get (this->options, "translate-uid");
- value = strtok_r (option_data->data, ",", &tmp_str);
- while (value) {
- dup_str = gf_strdup (value);
- input_value_str1 = strtok_r (dup_str, "=", &tmp_str1);
- if (input_value_str1) {
- /* Check for n-m */
- char *temp_string = gf_strdup (input_value_str1);
- input_value_str2 = strtok_r (temp_string, "-", &tmp_str2);
- if (gf_string2int (input_value_str2, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str2);
- return -1;
- }
- filter->translate_input_uid[filter->translate_num_uid_entries][0] = input_value;
- input_value_str2 = strtok_r (NULL, "-", &tmp_str2);
- if (input_value_str2) {
- if (gf_string2int (input_value_str2, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str2);
- return -1;
- }
- }
- filter->translate_input_uid[filter->translate_num_uid_entries][1] = input_value;
- GF_FREE (temp_string);
- output_value_str = strtok_r (NULL, "=", &tmp_str1);
- if (output_value_str) {
- if (gf_string2int (output_value_str, &output_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- output_value_str);
- return -1;
- }
- } else {
- gf_log (this->name, GF_LOG_ERROR,
- "mapping string not valid");
- return -1;
- }
- } else {
- gf_log (this->name, GF_LOG_ERROR,
- "mapping string not valid");
- return -1;
- }
- filter->translate_output_uid[filter->translate_num_uid_entries] = output_value;
- gf_log (this->name,
- GF_LOG_DEBUG,
- "pair %d: input uid '%d' will be changed to uid '%d'",
- filter->translate_num_uid_entries, input_value, output_value);
-
- filter->translate_num_uid_entries++;
- if (filter->translate_num_uid_entries == GF_MAXIMUM_FILTERING_ALLOWED)
- break;
- value = strtok_r (NULL, ",", &tmp_str);
- GF_FREE (dup_str);
- }
- }
-
- tmp_str1 = NULL;
- tmp_str2 = NULL;
- tmp_str = NULL;
-
- if (dict_get (this->options, "translate-gid")) {
- option_data = dict_get (this->options, "translate-gid");
- value = strtok_r (option_data->data, ",", &tmp_str);
- while (value) {
- dup_str = gf_strdup (value);
- input_value_str1 = strtok_r (dup_str, "=", &tmp_str1);
- if (input_value_str1) {
- /* Check for n-m */
- char *temp_string = gf_strdup (input_value_str1);
- input_value_str2 = strtok_r (temp_string, "-", &tmp_str2);
- if (gf_string2int (input_value_str2, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str2);
- return -1;
- }
- filter->translate_input_gid[filter->translate_num_gid_entries][0] = input_value;
- input_value_str2 = strtok_r (NULL, "-", &tmp_str2);
- if (input_value_str2) {
- if (gf_string2int (input_value_str2, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str2);
- return -1;
- }
- }
- filter->translate_input_gid[filter->translate_num_gid_entries][1] = input_value;
- GF_FREE (temp_string);
- output_value_str = strtok_r (NULL, "=", &tmp_str1);
- if (output_value_str) {
- if (gf_string2int (output_value_str, &output_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- output_value_str);
- return -1;
- }
- } else {
- gf_log (this->name, GF_LOG_ERROR,
- "translate-gid value not valid");
- return -1;
- }
- } else {
- gf_log (this->name, GF_LOG_ERROR,
- "translate-gid value not valid");
- return -1;
- }
-
- filter->translate_output_gid[filter->translate_num_gid_entries] = output_value;
-
- gf_log (this->name, GF_LOG_DEBUG,
- "pair %d: input gid '%d' will be changed to gid '%d'",
- filter->translate_num_gid_entries, input_value, output_value);
-
- filter->translate_num_gid_entries++;
- if (filter->translate_num_gid_entries == GF_MAXIMUM_FILTERING_ALLOWED)
- break;
- value = strtok_r (NULL, ",", &tmp_str);
- GF_FREE (dup_str);
- }
- }
-
- tmp_str = NULL;
- tmp_str1 = NULL;
-
- if (dict_get (this->options, "filter-uid")) {
- option_data = dict_get (this->options, "filter-uid");
- value = strtok_r (option_data->data, ",", &tmp_str);
- while (value) {
- dup_str = gf_strdup (value);
- /* Check for n-m */
- input_value_str1 = strtok_r (dup_str, "-", &tmp_str1);
- if (gf_string2int (input_value_str1, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str1);
- return -1;
- }
- filter->filter_input_uid[filter->filter_num_uid_entries][0] = input_value;
- input_value_str1 = strtok_r (NULL, "-", &tmp_str1);
- if (input_value_str1) {
- if (gf_string2int (input_value_str1, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str1);
- return -1;
- }
- }
- filter->filter_input_uid[filter->filter_num_uid_entries][1] = input_value;
-
- gf_log (this->name,
- GF_LOG_DEBUG,
- "filter [%d]: input uid(s) '%s' will be filtered",
- filter->filter_num_uid_entries, dup_str);
-
- filter->filter_num_uid_entries++;
- if (filter->filter_num_uid_entries == GF_MAXIMUM_FILTERING_ALLOWED)
- break;
- value = strtok_r (NULL, ",", &tmp_str);
- GF_FREE (dup_str);
- }
- filter->partial_filter = 1;
- }
-
- tmp_str = NULL;
- tmp_str1 = NULL;
-
- if (dict_get (this->options, "filter-gid")) {
- option_data = dict_get (this->options, "filter-gid");
- value = strtok_r (option_data->data, ",", &tmp_str);
- while (value) {
- dup_str = gf_strdup (value);
- /* Check for n-m */
- input_value_str1 = strtok_r (dup_str, "-", &tmp_str1);
- if (gf_string2int (input_value_str1, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str1);
- return -1;
- }
- filter->filter_input_gid[filter->filter_num_gid_entries][0] = input_value;
- input_value_str1 = strtok_r (NULL, "-", &tmp_str1);
- if (input_value_str1) {
- if (gf_string2int (input_value_str1, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- input_value_str1);
- return -1;
- }
- }
- filter->filter_input_gid[filter->filter_num_gid_entries][1] = input_value;
-
- gf_log (this->name,
- GF_LOG_DEBUG,
- "filter [%d]: input gid(s) '%s' will be filtered",
- filter->filter_num_gid_entries, dup_str);
-
- filter->filter_num_gid_entries++;
- if (filter->filter_num_gid_entries == GF_MAXIMUM_FILTERING_ALLOWED)
- break;
- value = strtok_r (NULL, ",", &tmp_str);
- GF_FREE (dup_str);
- }
- gf_log (this->name, GF_LOG_ERROR, "this option is not supported currently.. exiting");
- return -1;
- filter->partial_filter = 1;
- }
-
- if (dict_get (this->options, "fixed-uid")) {
- option_data = dict_get (this->options, "fixed-uid");
- if (gf_string2int (option_data->data, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- option_data->data);
- return -1;
- }
- filter->fixed_uid = input_value;
- filter->fixed_uid_set = 1;
- }
-
- if (dict_get (this->options, "fixed-gid")) {
- option_data = dict_get (this->options, "fixed-gid");
- if (gf_string2int (option_data->data, &input_value) != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "invalid number format \"%s\"",
- option_data->data);
- return -1;
- }
- filter->fixed_gid = input_value;
- filter->fixed_gid_set = 1;
- }
-
- this->private = filter;
- return 0;
-}
-
-
-void
-fini (xlator_t *this)
-{
- struct gf_filter *filter = this->private;
-
- GF_FREE (filter);
-
- return;
-}
-
-
-struct xlator_fops fops = {
- .lookup = filter_lookup,
- .stat = filter_stat,
- .fstat = filter_fstat,
- .readlink = filter_readlink,
- .mknod = filter_mknod,
- .mkdir = filter_mkdir,
- .unlink = filter_unlink,
- .rmdir = filter_rmdir,
- .symlink = filter_symlink,
- .rename = filter_rename,
- .link = filter_link,
- .truncate = filter_truncate,
- .ftruncate = filter_ftruncate,
- .create = filter_create,
- .open = filter_open,
- .readv = filter_readv,
- .writev = filter_writev,
- .setxattr = filter_setxattr,
- .getxattr = filter_getxattr,
- .removexattr = filter_removexattr,
- .opendir = filter_opendir,
- .setattr = filter_setattr,
- .fsetattr = filter_fsetattr,
-};
-
-struct xlator_cbks cbks = {
-};
-
-struct volume_options options[] = {
- { .key = { "root-squashing" },
- .type = GF_OPTION_TYPE_BOOL
- },
- { .key = { "read-only" },
- .type = GF_OPTION_TYPE_BOOL
- },
- { .key = { "fixed-uid" },
- .type = GF_OPTION_TYPE_INT
- },
- { .key = { "fixed-gid" },
- .type = GF_OPTION_TYPE_INT
- },
- { .key = { "translate-uid" },
- .type = GF_OPTION_TYPE_ANY
- },
- { .key = { "translate-gid" },
- .type = GF_OPTION_TYPE_ANY
- },
- { .key = { "filter-uid" },
- .type = GF_OPTION_TYPE_ANY
- },
- { .key = { "filter-gid" },
- .type = GF_OPTION_TYPE_ANY
- },
- { .key = {NULL} },
-};
diff --git a/xlators/features/gfid-access/Makefile.am b/xlators/features/gfid-access/Makefile.am
new file mode 100644
index 00000000000..af437a64d6d
--- /dev/null
+++ b/xlators/features/gfid-access/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = src
diff --git a/xlators/features/gfid-access/src/Makefile.am b/xlators/features/gfid-access/src/Makefile.am
new file mode 100644
index 00000000000..ff95604c4de
--- /dev/null
+++ b/xlators/features/gfid-access/src/Makefile.am
@@ -0,0 +1,16 @@
+xlator_LTLIBRARIES = gfid-access.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+gfid_access_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+gfid_access_la_SOURCES = gfid-access.c
+gfid_access_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = gfid-access.h gfid-access-mem-types.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/gfid-access/src/gfid-access-mem-types.h b/xlators/features/gfid-access/src/gfid-access-mem-types.h
new file mode 100644
index 00000000000..1c4d0b93de2
--- /dev/null
+++ b/xlators/features/gfid-access/src/gfid-access-mem-types.h
@@ -0,0 +1,22 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GFID_ACCESS_MEM_TYPES_H
+#define _GFID_ACCESS_MEM_TYPES_H
+
+#include <glusterfs/mem-types.h>
+
+enum gf_changelog_mem_types {
+ gf_gfid_access_mt_priv_t = gf_common_mt_end + 1,
+ gf_gfid_access_mt_gfid_t,
+ gf_gfid_access_mt_end
+};
+
+#endif
diff --git a/xlators/features/gfid-access/src/gfid-access.c b/xlators/features/gfid-access/src/gfid-access.c
new file mode 100644
index 00000000000..3fea5672a21
--- /dev/null
+++ b/xlators/features/gfid-access/src/gfid-access.c
@@ -0,0 +1,1420 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include "gfid-access.h"
+#include <glusterfs/inode.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/statedump.h>
+
+int
+ga_valid_inode_loc_copy(loc_t *dst, loc_t *src, xlator_t *this)
+{
+ int ret = 0;
+ uint64_t value = 0;
+
+ /* if its an entry operation, on the virtual */
+ /* directory inode as parent, we need to handle */
+ /* it properly */
+ ret = loc_copy(dst, src);
+ if (ret < 0)
+ goto out;
+
+ /*
+ * Change ALL virtual inodes with real-inodes in loc
+ */
+ if (dst->parent) {
+ ret = inode_ctx_get(dst->parent, this, &value);
+ if (ret < 0) {
+ ret = 0; // real-inode
+ goto out;
+ }
+ inode_unref(dst->parent);
+ dst->parent = inode_ref((inode_t *)(uintptr_t)value);
+ gf_uuid_copy(dst->pargfid, dst->parent->gfid);
+ }
+
+ if (dst->inode) {
+ ret = inode_ctx_get(dst->inode, this, &value);
+ if (ret < 0) {
+ ret = 0; // real-inode
+ goto out;
+ }
+ inode_unref(dst->inode);
+ dst->inode = inode_ref((inode_t *)(uintptr_t)value);
+ gf_uuid_copy(dst->gfid, dst->inode->gfid);
+ }
+out:
+
+ return ret;
+}
+
+void
+ga_newfile_args_free(ga_newfile_args_t *args)
+{
+ if (!args)
+ goto out;
+
+ GF_FREE(args->bname);
+
+ if (S_ISLNK(args->st_mode) && args->args.symlink.linkpath) {
+ GF_FREE(args->args.symlink.linkpath);
+ args->args.symlink.linkpath = NULL;
+ }
+
+ mem_put(args);
+out:
+ return;
+}
+
+void
+ga_heal_args_free(ga_heal_args_t *args)
+{
+ if (!args)
+ goto out;
+
+ GF_FREE(args->bname);
+
+ mem_put(args);
+out:
+ return;
+}
+
+ga_newfile_args_t *
+ga_newfile_parse_args(xlator_t *this, data_t *data)
+{
+ ga_newfile_args_t *args = NULL;
+ ga_private_t *priv = NULL;
+ int len = 0;
+ int blob_len = 0;
+ int min_len = 0;
+ void *blob = NULL;
+
+ priv = this->private;
+
+ blob = data->data;
+ blob_len = data->len;
+
+ min_len = sizeof(args->uid) + sizeof(args->gid) + sizeof(args->gfid) +
+ sizeof(args->st_mode) + 2 + 2;
+ if (blob_len < min_len) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Invalid length: Total length is less "
+ "than minimum length.");
+ goto err;
+ }
+
+ args = mem_get0(priv->newfile_args_pool);
+ if (args == NULL)
+ goto err;
+
+ args->uid = ntoh32(*(uint32_t *)blob);
+ blob += sizeof(uint32_t);
+ blob_len -= sizeof(uint32_t);
+
+ args->gid = ntoh32(*(uint32_t *)blob);
+ blob += sizeof(uint32_t);
+ blob_len -= sizeof(uint32_t);
+
+ memcpy(args->gfid, blob, sizeof(args->gfid));
+ blob += sizeof(args->gfid);
+ blob_len -= sizeof(args->gfid);
+
+ args->st_mode = ntoh32(*(uint32_t *)blob);
+ blob += sizeof(uint32_t);
+ blob_len -= sizeof(uint32_t);
+
+ len = strnlen(blob, blob_len);
+ if (len == blob_len) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. No null byte present.",
+ args->gfid);
+ goto err;
+ }
+
+ args->bname = GF_MALLOC(len + 1, gf_common_mt_char);
+ if (args->bname == NULL)
+ goto err;
+
+ memcpy(args->bname, blob, (len + 1));
+ blob += (len + 1);
+ blob_len -= (len + 1);
+
+ if (S_ISDIR(args->st_mode)) {
+ if (blob_len < sizeof(uint32_t)) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ args->args.mkdir.mode = ntoh32(*(uint32_t *)blob);
+ blob += sizeof(uint32_t);
+ blob_len -= sizeof(uint32_t);
+
+ if (blob_len < sizeof(uint32_t)) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ args->args.mkdir.umask = ntoh32(*(uint32_t *)blob);
+ blob_len -= sizeof(uint32_t);
+ if (blob_len < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ } else if (S_ISLNK(args->st_mode)) {
+ len = strnlen(blob, blob_len);
+ if (len == blob_len) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ args->args.symlink.linkpath = GF_MALLOC(len + 1, gf_common_mt_char);
+ if (args->args.symlink.linkpath == NULL)
+ goto err;
+
+ memcpy(args->args.symlink.linkpath, blob, (len + 1));
+ blob_len -= (len + 1);
+ } else {
+ if (blob_len < sizeof(uint32_t)) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ args->args.mknod.mode = ntoh32(*(uint32_t *)blob);
+ blob += sizeof(uint32_t);
+ blob_len -= sizeof(uint32_t);
+
+ if (blob_len < sizeof(uint32_t)) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ args->args.mknod.rdev = ntoh32(*(uint32_t *)blob);
+ blob += sizeof(uint32_t);
+ blob_len -= sizeof(uint32_t);
+
+ if (blob_len < sizeof(uint32_t)) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+ args->args.mknod.umask = ntoh32(*(uint32_t *)blob);
+ blob_len -= sizeof(uint32_t);
+ }
+
+ if (blob_len) {
+ gf_log(this->name, GF_LOG_ERROR, "gfid: %s. Invalid length",
+ args->gfid);
+ goto err;
+ }
+
+ return args;
+
+err:
+ if (args)
+ ga_newfile_args_free(args);
+
+ return NULL;
+}
+
+ga_heal_args_t *
+ga_heal_parse_args(xlator_t *this, data_t *data)
+{
+ ga_heal_args_t *args = NULL;
+ ga_private_t *priv = NULL;
+ void *blob = NULL;
+ int len = 0;
+ int blob_len = 0;
+
+ blob = data->data;
+ blob_len = data->len;
+
+ priv = this->private;
+
+ /* bname should at least contain a character */
+ if (blob_len < (sizeof(args->gfid) + 2))
+ goto err;
+
+ args = mem_get0(priv->heal_args_pool);
+ if (!args)
+ goto err;
+
+ memcpy(args->gfid, blob, sizeof(args->gfid));
+ blob += sizeof(args->gfid);
+ blob_len -= sizeof(args->gfid);
+
+ len = strnlen(blob, blob_len);
+ if (len == blob_len)
+ goto err;
+
+ args->bname = GF_MALLOC(len + 1, gf_common_mt_char);
+ if (!args->bname)
+ goto err;
+
+ memcpy(args->bname, blob, len);
+ args->bname[len] = '\0';
+ blob_len -= (len + 1);
+
+ if (blob_len)
+ goto err;
+
+ return args;
+
+err:
+ if (args)
+ ga_heal_args_free(args);
+
+ return NULL;
+}
+
+static int32_t
+ga_fill_tmp_loc(loc_t *loc, xlator_t *this, uuid_t gfid, char *bname,
+ dict_t *xdata, loc_t *new_loc)
+{
+ int ret = -1;
+ uint64_t value = 0;
+ inode_t *parent = NULL;
+ unsigned char *gfid_ptr = NULL;
+
+ parent = loc->inode;
+ ret = inode_ctx_get(loc->inode, this, &value);
+ if (!ret) {
+ parent = (void *)(uintptr_t)value;
+ if (gf_uuid_is_null(parent->gfid))
+ parent = loc->inode;
+ }
+
+ /* parent itself should be looked up */
+ gf_uuid_copy(new_loc->pargfid, parent->gfid);
+ new_loc->parent = inode_ref(parent);
+
+ new_loc->inode = inode_grep(parent->table, parent, bname);
+ if (!new_loc->inode) {
+ new_loc->inode = inode_new(parent->table);
+ gf_uuid_copy(new_loc->inode->gfid, gfid);
+ }
+
+ loc_path(new_loc, bname);
+ if (new_loc->path) {
+ new_loc->name = strrchr(new_loc->path, '/');
+ if (new_loc->name)
+ new_loc->name++;
+ }
+
+ gfid_ptr = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!gfid_ptr) {
+ ret = -1;
+ goto out;
+ }
+ gf_uuid_copy(gfid_ptr, gfid);
+ ret = dict_set_gfuuid(xdata, "gfid-req", gfid_ptr, false);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+
+out:
+ if (ret && gfid_ptr)
+ GF_FREE(gfid_ptr);
+ return ret;
+}
+
+static gf_boolean_t
+__is_gfid_access_dir(uuid_t gfid)
+{
+ static uuid_t aux_gfid = {0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, GF_AUX_GFID};
+
+ if (gf_uuid_compare(gfid, aux_gfid) == 0)
+ return _gf_true;
+
+ return _gf_false;
+}
+
+int32_t
+ga_forget(xlator_t *this, inode_t *inode)
+{
+ int ret = -1;
+ uint64_t value = 0;
+ inode_t *tmp_inode = NULL;
+
+ ret = inode_ctx_del(inode, this, &value);
+ if (ret)
+ goto out;
+
+ tmp_inode = (void *)(uintptr_t)value;
+ inode_unref(tmp_inode);
+
+out:
+ return 0;
+}
+
+static int
+ga_heal_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, inode_t *inode, struct iatt *stat, dict_t *dict,
+ struct iatt *postparent)
+{
+ call_frame_t *orig_frame = NULL;
+
+ orig_frame = frame->local;
+ frame->local = NULL;
+
+ /* don't worry about inode linking and other stuff. They'll happen on
+ * the next lookup.
+ */
+ STACK_DESTROY(frame->root);
+
+ STACK_UNWIND_STRICT(setxattr, orig_frame, op_ret, op_errno, dict);
+
+ return 0;
+}
+
+static int
+ga_newentry_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ ga_local_t *local = NULL;
+
+ local = frame->local;
+
+ /* don't worry about inode linking and other stuff. They'll happen on
+ * the next lookup.
+ */
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+
+ STACK_UNWIND_STRICT(setxattr, local->orig_frame, op_ret, op_errno, xdata);
+
+ if (local->xdata)
+ dict_unref(local->xdata);
+ loc_wipe(&local->loc);
+ mem_put(local);
+
+ return 0;
+}
+
+static int
+ga_newentry_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *stat, dict_t *xdata,
+ struct iatt *postparent)
+
+{
+ ga_local_t *local = NULL;
+
+ local = frame->local;
+
+ if ((op_ret < 0) && ((op_errno != ENOENT) && (op_errno != ESTALE)))
+ goto err;
+
+ STACK_WIND(frame, ga_newentry_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, &local->loc, local->mode,
+ local->rdev, local->umask, local->xdata);
+ return 0;
+
+err:
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+ STACK_UNWIND_STRICT(setxattr, local->orig_frame, op_ret, op_errno, xdata);
+ if (local->xdata)
+ dict_unref(local->xdata);
+ loc_wipe(&local->loc);
+ mem_put(local);
+
+ return 0;
+}
+
+int32_t
+ga_new_entry(call_frame_t *frame, xlator_t *this, loc_t *loc, data_t *data,
+ dict_t *xdata)
+{
+ int ret = -1;
+ ga_newfile_args_t *args = NULL;
+ loc_t tmp_loc = {
+ 0,
+ };
+ call_frame_t *new_frame = NULL;
+ ga_local_t *local = NULL;
+ uuid_t gfid = {
+ 0,
+ };
+
+ if (!xdata) {
+ xdata = dict_new();
+ } else {
+ xdata = dict_ref(xdata);
+ }
+
+ if (!xdata) {
+ ret = -1;
+ goto out;
+ }
+
+ args = ga_newfile_parse_args(this, data);
+ if (!args)
+ goto out;
+
+ ret = gf_uuid_parse(args->gfid, gfid);
+ if (ret)
+ goto out;
+
+ ret = ga_fill_tmp_loc(loc, this, gfid, args->bname, xdata, &tmp_loc);
+ if (ret)
+ goto out;
+
+ new_frame = copy_frame(frame);
+ if (!new_frame)
+ goto out;
+
+ local = mem_get0(this->local_pool);
+ local->orig_frame = frame;
+
+ loc_copy(&local->loc, &tmp_loc);
+
+ new_frame->local = local;
+ new_frame->root->uid = args->uid;
+ new_frame->root->gid = args->gid;
+
+ if (S_ISDIR(args->st_mode)) {
+ STACK_WIND(new_frame, ga_newentry_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, &tmp_loc,
+ args->args.mkdir.mode, args->args.mkdir.umask, xdata);
+ } else if (S_ISLNK(args->st_mode)) {
+ STACK_WIND(new_frame, ga_newentry_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink,
+ args->args.symlink.linkpath, &tmp_loc, 0, xdata);
+ } else {
+ /* use 07777 (4 7s) for considering the Sticky bits etc) */
+ ((ga_local_t *)new_frame->local)->mode = (S_IFMT & args->st_mode) |
+ (07777 &
+ args->args.mknod.mode);
+
+ ((ga_local_t *)new_frame->local)->umask = args->args.mknod.umask;
+ ((ga_local_t *)new_frame->local)->rdev = args->args.mknod.rdev;
+ ((ga_local_t *)new_frame->local)->xdata = dict_ref(xdata);
+
+ /* send a named lookup, so that dht can cleanup up stale linkto
+ * files etc.
+ */
+ STACK_WIND(new_frame, ga_newentry_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, &tmp_loc, NULL);
+ }
+
+ ret = 0;
+out:
+ ga_newfile_args_free(args);
+
+ if (xdata)
+ dict_unref(xdata);
+
+ loc_wipe(&tmp_loc);
+
+ return ret;
+}
+
+int32_t
+ga_heal_entry(call_frame_t *frame, xlator_t *this, loc_t *loc, data_t *data,
+ dict_t *xdata)
+{
+ int ret = -1;
+ ga_heal_args_t *args = NULL;
+ loc_t tmp_loc = {
+ 0,
+ };
+ call_frame_t *new_frame = NULL;
+ uuid_t gfid = {
+ 0,
+ };
+
+ args = ga_heal_parse_args(this, data);
+ if (!args)
+ goto out;
+
+ ret = gf_uuid_parse(args->gfid, gfid);
+ if (ret)
+ goto out;
+
+ if (!xdata)
+ xdata = dict_new();
+ else
+ xdata = dict_ref(xdata);
+
+ if (!xdata) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = ga_fill_tmp_loc(loc, this, gfid, args->bname, xdata, &tmp_loc);
+ if (ret)
+ goto out;
+
+ new_frame = copy_frame(frame);
+ if (!new_frame)
+ goto out;
+
+ new_frame->local = (void *)frame;
+
+ STACK_WIND(new_frame, ga_heal_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, &tmp_loc, xdata);
+
+ ret = 0;
+out:
+ if (args)
+ ga_heal_args_free(args);
+
+ loc_wipe(&tmp_loc);
+
+ if (xdata)
+ dict_unref(xdata);
+
+ return ret;
+}
+
+int32_t
+ga_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(setxattr, frame, op_ret, op_errno, xdata);
+ return 0;
+}
+
+int32_t
+ga_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ data_t *data = NULL;
+ int op_errno = ENOMEM;
+ int ret = 0;
+ loc_t ga_loc = {
+ 0,
+ };
+
+ GFID_ACCESS_INODE_OP_CHECK(loc, op_errno, err);
+
+ data = dict_get(dict, GF_FUSE_AUX_GFID_NEWFILE);
+ if (data) {
+ ret = ga_new_entry(frame, this, loc, data, xdata);
+ if (ret)
+ goto err;
+ return 0;
+ }
+
+ data = dict_get(dict, GF_FUSE_AUX_GFID_HEAL);
+ if (data) {
+ ret = ga_heal_entry(frame, this, loc, data, xdata);
+ if (ret)
+ goto err;
+ return 0;
+ }
+
+ // If the inode is a virtual inode change the inode otherwise perform
+ // the operation on same inode
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, ga_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, &ga_loc, dict, flags, xdata);
+
+ loc_wipe(&ga_loc);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(setxattr, frame, -1, op_errno, xdata);
+ return 0;
+}
+
+int32_t
+ga_virtual_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata, struct iatt *postparent)
+{
+ int ret = 0;
+ inode_t *cbk_inode = NULL;
+ inode_t *true_inode = NULL;
+ uuid_t random_gfid = {
+ 0,
+ };
+ inode_t *linked_inode = NULL;
+
+ if (frame->local)
+ cbk_inode = frame->local;
+ else
+ cbk_inode = inode_ref(inode);
+
+ frame->local = NULL;
+ if (op_ret)
+ goto unwind;
+
+ if (!IA_ISDIR(buf->ia_type))
+ goto unwind;
+
+ /* need to send back a different inode for linking in itable */
+ if (cbk_inode == inode) {
+ /* check if the inode is in the 'itable' or
+ if its just previously discover()'d inode */
+ true_inode = inode_find(inode->table, buf->ia_gfid);
+ if (!true_inode) {
+ /* This unref is for 'inode_ref()' done in beginning.
+ This is needed as cbk_inode is allocated new inode
+ whose unref is taken at the end*/
+ inode_unref(cbk_inode);
+ cbk_inode = inode_new(inode->table);
+
+ if (!cbk_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ /* the inode is not present in itable, ie, the actual
+ path is not yet looked up. Use the current inode
+ itself for now */
+
+ linked_inode = inode_link(inode, NULL, NULL, buf);
+ inode = linked_inode;
+ } else {
+ /* 'inode_ref()' has been done in inode_find() */
+ inode = true_inode;
+ }
+
+ ret = inode_ctx_put(cbk_inode, this, (uint64_t)(uintptr_t)inode);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "failed to set the inode ctx with"
+ "the actual inode");
+ if (inode)
+ inode_unref(inode);
+ }
+ inode = NULL;
+ }
+
+ if (!gf_uuid_is_null(cbk_inode->gfid)) {
+ /* if the previous linked inode is used, use the
+ same gfid */
+ gf_uuid_copy(random_gfid, cbk_inode->gfid);
+ } else {
+ /* replace the buf->ia_gfid to a random gfid
+ for directory, for files, what we received is fine */
+ gf_uuid_generate(random_gfid);
+ }
+
+ gf_uuid_copy(buf->ia_gfid, random_gfid);
+
+ buf->ia_ino = gfid_to_ino(buf->ia_gfid);
+
+unwind:
+ /* Lookup on non-existing gfid returns ESTALE.
+ Convert into ENOENT for virtual lookup*/
+ if (op_errno == ESTALE)
+ op_errno = ENOENT;
+
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, cbk_inode, buf, xdata,
+ postparent);
+
+ /* Also handles inode_unref of frame->local if done in ga_lookup */
+ if (cbk_inode)
+ inode_unref(cbk_inode);
+
+ return 0;
+}
+
+int32_t
+ga_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ ga_private_t *priv = NULL;
+
+ /* if the entry in question is not 'root',
+ then follow the normal path */
+ if (op_ret || !__is_root_gfid(buf->ia_gfid))
+ goto unwind;
+
+ priv = this->private;
+
+ /* do we need to copy root stbuf every time? */
+ /* mostly yes, as we want to have the 'stat' info show latest
+ in every _cbk() */
+
+ /* keep the reference for root stat buf */
+ priv->root_stbuf = *buf;
+ priv->gfiddir_stbuf = priv->root_stbuf;
+ priv->gfiddir_stbuf.ia_gfid[15] = GF_AUX_GFID;
+ priv->gfiddir_stbuf.ia_ino = GF_AUX_GFID;
+
+unwind:
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, xdata,
+ postparent);
+ return 0;
+}
+
+int32_t
+ga_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ ga_private_t *priv = NULL;
+ int ret = -1;
+ uuid_t tmp_gfid = {
+ 0,
+ };
+ loc_t tmp_loc = {
+ 0,
+ };
+ uint64_t value = 0;
+ inode_t *inode = NULL;
+ inode_t *true_inode = NULL;
+ int32_t op_errno = ENOENT;
+
+ priv = this->private;
+
+ /* Handle nameless lookup on ".gfid" */
+ if (!loc->parent && __is_gfid_access_dir(loc->gfid)) {
+ STACK_UNWIND_STRICT(lookup, frame, 0, 0, loc->inode,
+ &priv->gfiddir_stbuf, xdata, &priv->root_stbuf);
+ return 0;
+ }
+
+ /* if its discover(), no need for any action here */
+ if (!loc->name)
+ goto wind;
+
+ /* if its revalidate, and inode is not of type directory,
+ proceed with 'wind' */
+ if (loc->inode && loc->inode->ia_type && !IA_ISDIR(loc->inode->ia_type)) {
+ /* a revalidate on ".gfid/<dentry>" is possible, check for it */
+ if (((loc->parent && __is_gfid_access_dir(loc->parent->gfid)) ||
+ __is_gfid_access_dir(loc->pargfid))) {
+ /* here, just send 'loc->gfid' and 'loc->inode' */
+ tmp_loc.inode = inode_ref(loc->inode);
+ gf_uuid_copy(tmp_loc.gfid, loc->inode->gfid);
+
+ STACK_WIND(frame, default_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, &tmp_loc, xdata);
+
+ inode_unref(tmp_loc.inode);
+
+ return 0;
+ }
+
+ /* not something to bother, continue the flow */
+ goto wind;
+ }
+
+ /* need to check if the lookup is on virtual dir */
+ if ((loc->name && !strcmp(GF_GFID_DIR, loc->name)) &&
+ ((loc->parent && __is_root_gfid(loc->parent->gfid)) ||
+ __is_root_gfid(loc->pargfid))) {
+ /* this means, the query is on '/.gfid', return the fake stat,
+ and say success */
+
+ STACK_UNWIND_STRICT(lookup, frame, 0, 0, loc->inode,
+ &priv->gfiddir_stbuf, xdata, &priv->root_stbuf);
+ return 0;
+ }
+
+ /* now, check if the lookup() is on an existing entry,
+ but on gfid-path */
+ if (!((loc->parent && __is_gfid_access_dir(loc->parent->gfid)) ||
+ __is_gfid_access_dir(loc->pargfid))) {
+ if (!loc->parent)
+ goto wind;
+
+ ret = inode_ctx_get(loc->parent, this, &value);
+ if (ret)
+ goto wind;
+
+ inode = (inode_t *)(uintptr_t)value;
+
+ ret = loc_copy_overload_parent(&tmp_loc, loc, inode);
+ if (ret)
+ goto err;
+
+ STACK_WIND(frame, ga_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, &tmp_loc, xdata);
+
+ loc_wipe(&tmp_loc);
+ return 0;
+ }
+
+ /* make sure the 'basename' is actually a 'canonical-gfid',
+ otherwise, return error */
+ ret = gf_uuid_parse(loc->name, tmp_gfid);
+ if (ret)
+ goto err;
+
+ /* if its fresh lookup, go ahead and send it down, if not,
+ for directory, we need indirection to actual dir inode */
+ if (!(loc->inode && loc->inode->ia_type))
+ goto discover;
+
+ /* revalidate on directory */
+ ret = inode_ctx_get(loc->inode, this, &value);
+ if (ret)
+ goto err;
+
+ inode = (void *)(uintptr_t)value;
+
+ /* valid inode, already looked up, work on that */
+ if (inode->ia_type)
+ goto discover;
+
+ /* check if the inode is in the 'itable' or
+ if its just previously discover()'d inode */
+ true_inode = inode_find(loc->inode->table, tmp_gfid);
+ if (true_inode) {
+ /* time do another lookup and update the context
+ with proper inode */
+ op_errno = ESTALE;
+ /* 'inode_ref()' done in inode_find */
+ inode_unref(true_inode);
+ goto err;
+ }
+
+discover:
+ /* for the virtual entries, we don't need to send 'gfid-req' key, as
+ for these entries, we don't want to 'set' a new gfid */
+ if (xdata)
+ dict_del(xdata, "gfid-req");
+
+ gf_uuid_copy(tmp_loc.gfid, tmp_gfid);
+
+ /* if revalidate, then we need to have the proper reference */
+ if (inode) {
+ tmp_loc.inode = inode_ref(inode);
+ frame->local = inode_ref(loc->inode);
+ } else {
+ tmp_loc.inode = inode_ref(loc->inode);
+ }
+
+ STACK_WIND(frame, ga_virtual_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, &tmp_loc, xdata);
+
+ inode_unref(tmp_loc.inode);
+
+ return 0;
+
+wind:
+ /* used for all the normal lookup path */
+ STACK_WIND(frame, ga_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xdata);
+
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(lookup, frame, -1, op_errno, loc->inode,
+ &priv->gfiddir_stbuf, xdata, &priv->root_stbuf);
+ return 0;
+}
+
+int
+ga_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+
+ GFID_ACCESS_ENTRY_OP_CHECK(loc, op_errno, err);
+
+ STACK_WIND(frame, default_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
+
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(mkdir, frame, -1, op_errno, loc->inode, NULL, NULL,
+ NULL, xdata);
+ return 0;
+}
+
+int
+ga_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+
+ GFID_ACCESS_ENTRY_OP_CHECK(loc, op_errno, err);
+
+ STACK_WIND(frame, default_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(create, frame, -1, op_errno, NULL, NULL, NULL, NULL,
+ NULL, xdata);
+
+ return 0;
+}
+
+int
+ga_symlink(call_frame_t *frame, xlator_t *this, const char *linkname,
+ loc_t *loc, mode_t umask, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+
+ GFID_ACCESS_ENTRY_OP_CHECK(loc, op_errno, err);
+
+ STACK_WIND(frame, default_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkname, loc, umask, xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(symlink, frame, -1, op_errno, NULL, NULL, NULL, NULL,
+ xdata);
+
+ return 0;
+}
+
+int
+ga_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t rdev, mode_t umask, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+
+ GFID_ACCESS_ENTRY_OP_CHECK(loc, op_errno, err);
+
+ STACK_WIND(frame, default_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, rdev, umask, xdata);
+
+ return 0;
+err:
+ STACK_UNWIND_STRICT(mknod, frame, -1, op_errno, NULL, NULL, NULL, NULL,
+ xdata);
+
+ return 0;
+}
+
+int
+ga_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flag,
+ dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = -1;
+ loc_t ga_loc = {
+ 0,
+ };
+
+ GFID_ACCESS_ENTRY_OP_CHECK(loc, op_errno, err);
+
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, default_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, &ga_loc, flag, xdata);
+
+ loc_wipe(&ga_loc);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(rmdir, frame, -1, op_errno, NULL, NULL, xdata);
+
+ return 0;
+}
+
+int
+ga_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t xflag,
+ dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = -1;
+ loc_t ga_loc = {
+ 0,
+ };
+
+ GFID_ACCESS_ENTRY_OP_CHECK(loc, op_errno, err);
+
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, default_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, &ga_loc, xflag, xdata);
+
+ loc_wipe(&ga_loc);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(unlink, frame, -1, op_errno, NULL, NULL, xdata);
+
+ return 0;
+}
+
+int
+ga_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = 0;
+ loc_t ga_oldloc = {
+ 0,
+ };
+ loc_t ga_newloc = {
+ 0,
+ };
+
+ GFID_ACCESS_ENTRY_OP_CHECK(oldloc, op_errno, err);
+ GFID_ACCESS_ENTRY_OP_CHECK(newloc, op_errno, err);
+
+ ret = ga_valid_inode_loc_copy(&ga_oldloc, oldloc, this);
+ if (ret < 0)
+ goto err;
+
+ ret = ga_valid_inode_loc_copy(&ga_newloc, newloc, this);
+ if (ret < 0) {
+ loc_wipe(&ga_oldloc);
+ goto err;
+ }
+
+ STACK_WIND(frame, default_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, &ga_oldloc, &ga_newloc, xdata);
+
+ loc_wipe(&ga_newloc);
+ loc_wipe(&ga_oldloc);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(rename, frame, -1, op_errno, NULL, NULL, NULL, NULL,
+ NULL, xdata);
+
+ return 0;
+}
+
+int
+ga_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = 0;
+ loc_t ga_oldloc = {
+ 0,
+ };
+ loc_t ga_newloc = {
+ 0,
+ };
+
+ GFID_ACCESS_ENTRY_OP_CHECK(oldloc, op_errno, err);
+ GFID_ACCESS_ENTRY_OP_CHECK(newloc, op_errno, err);
+
+ ret = ga_valid_inode_loc_copy(&ga_oldloc, oldloc, this);
+ if (ret < 0)
+ goto err;
+
+ ret = ga_valid_inode_loc_copy(&ga_newloc, newloc, this);
+ if (ret < 0) {
+ loc_wipe(&ga_oldloc);
+ goto err;
+ }
+
+ STACK_WIND(frame, default_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, &ga_oldloc, &ga_newloc, xdata);
+
+ loc_wipe(&ga_newloc);
+ loc_wipe(&ga_oldloc);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(link, frame, -1, op_errno, NULL, NULL, NULL, NULL,
+ xdata);
+
+ return 0;
+}
+
+int32_t
+ga_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+
+ GFID_ACCESS_INODE_OP_CHECK(loc, op_errno, err);
+
+ /* also check if the loc->inode itself is virtual
+ inode, if yes, return with failure, mainly because we
+ can't handle all the readdirp and other things on it. */
+ if (inode_ctx_get(loc->inode, this, NULL) == 0) {
+ op_errno = ENOTSUP;
+ goto err;
+ }
+
+ STACK_WIND(frame, default_opendir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(opendir, frame, -1, op_errno, NULL, xdata);
+
+ return 0;
+}
+
+int32_t
+ga_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = -1;
+ loc_t ga_loc = {
+ 0,
+ };
+
+ GFID_ACCESS_INODE_OP_CHECK(loc, op_errno, err);
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, default_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, &ga_loc, name, xdata);
+
+ loc_wipe(&ga_loc);
+
+ return 0;
+err:
+ STACK_UNWIND_STRICT(getxattr, frame, -1, op_errno, NULL, xdata);
+
+ return 0;
+}
+
+int32_t
+ga_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = -1;
+ loc_t ga_loc = {
+ 0,
+ };
+ ga_private_t *priv = NULL;
+
+ priv = this->private;
+ /* If stat is on ".gfid" itself, do not wind further,
+ * return fake stat and return success.
+ */
+ if (__is_gfid_access_dir(loc->gfid))
+ goto out;
+
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, default_stat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat, &ga_loc, xdata);
+
+ loc_wipe(&ga_loc);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(stat, frame, -1, op_errno, NULL, xdata);
+
+ return 0;
+
+out:
+ STACK_UNWIND_STRICT(stat, frame, 0, 0, &priv->gfiddir_stbuf, xdata);
+ return 0;
+}
+
+int32_t
+ga_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *stbuf,
+ int32_t valid, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = -1;
+ loc_t ga_loc = {
+ 0,
+ };
+
+ GFID_ACCESS_INODE_OP_CHECK(loc, op_errno, err);
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, default_setattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr, &ga_loc, stbuf, valid, xdata);
+
+ loc_wipe(&ga_loc);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(setattr, frame, -1, op_errno, NULL, NULL, xdata);
+
+ return 0;
+}
+
+int32_t
+ga_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ int ret = -1;
+ loc_t ga_loc = {
+ 0,
+ };
+
+ GFID_ACCESS_INODE_OP_CHECK(loc, op_errno, err);
+ ret = ga_valid_inode_loc_copy(&ga_loc, loc, this);
+ if (ret < 0)
+ goto err;
+
+ STACK_WIND(frame, default_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, &ga_loc, name, xdata);
+
+ loc_wipe(&ga_loc);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(removexattr, frame, -1, op_errno, xdata);
+
+ return 0;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_gfid_access_mt_end + 1);
+
+ if (ret != 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Memory accounting"
+ " init failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ ga_private_t *priv = NULL;
+ int ret = -1;
+
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "not configured with exactly one child. exiting");
+ goto out;
+ }
+
+ /* This can be the top of graph in certain cases */
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_DEBUG, "dangling volume. check volfile ");
+ }
+
+ /* TODO: define a mem-type structure */
+ priv = GF_CALLOC(1, sizeof(*priv), gf_gfid_access_mt_priv_t);
+ if (!priv)
+ goto out;
+
+ priv->newfile_args_pool = mem_pool_new(ga_newfile_args_t, 512);
+ if (!priv->newfile_args_pool)
+ goto out;
+
+ priv->heal_args_pool = mem_pool_new(ga_heal_args_t, 512);
+ if (!priv->heal_args_pool)
+ goto out;
+
+ this->local_pool = mem_pool_new(ga_local_t, 16);
+ if (!this->local_pool) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to create local_t's memory pool");
+ goto out;
+ }
+
+ this->private = priv;
+
+ ret = 0;
+out:
+ if (ret && priv) {
+ if (priv->newfile_args_pool)
+ mem_pool_destroy(priv->newfile_args_pool);
+ GF_FREE(priv);
+ }
+
+ return ret;
+}
+
+void
+fini(xlator_t *this)
+{
+ ga_private_t *priv = NULL;
+ priv = this->private;
+ this->private = NULL;
+
+ if (priv) {
+ if (priv->newfile_args_pool)
+ mem_pool_destroy(priv->newfile_args_pool);
+ if (priv->heal_args_pool)
+ mem_pool_destroy(priv->heal_args_pool);
+ GF_FREE(priv);
+ }
+
+ return;
+}
+
+int32_t
+ga_dump_inodectx(xlator_t *this, inode_t *inode)
+{
+ int ret = -1;
+ uint64_t value = 0;
+ inode_t *tmp_inode = NULL;
+ char key_prefix[GF_DUMP_MAX_BUF_LEN] = {
+ 0,
+ };
+
+ ret = inode_ctx_get(inode, this, &value);
+ if (ret == 0) {
+ tmp_inode = (void *)(uintptr_t)value;
+ gf_proc_dump_build_key(key_prefix, this->name, "inode");
+ gf_proc_dump_add_section("%s", key_prefix);
+ gf_proc_dump_write("real-gfid", "%s", uuid_utoa(tmp_inode->gfid));
+ }
+
+ return 0;
+}
+
+struct xlator_fops fops = {
+ .lookup = ga_lookup,
+
+ /* entry fops */
+ .mkdir = ga_mkdir,
+ .mknod = ga_mknod,
+ .create = ga_create,
+ .symlink = ga_symlink,
+ .link = ga_link,
+ .unlink = ga_unlink,
+ .rmdir = ga_rmdir,
+ .rename = ga_rename,
+
+ /* handle any other directory operations here */
+ .opendir = ga_opendir,
+ .stat = ga_stat,
+ .setattr = ga_setattr,
+ .getxattr = ga_getxattr,
+ .removexattr = ga_removexattr,
+
+ /* special fop to handle more entry creations */
+ .setxattr = ga_setxattr,
+};
+
+struct xlator_cbks cbks = {
+ .forget = ga_forget,
+};
+
+struct xlator_dumpops dumpops = {
+ .inodectx = ga_dump_inodectx,
+};
+
+struct volume_options options[] = {
+ /* This translator doesn't take any options, or provide any options */
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1},
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "gfid-access",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/gfid-access/src/gfid-access.h b/xlators/features/gfid-access/src/gfid-access.h
new file mode 100644
index 00000000000..b1e255e56c0
--- /dev/null
+++ b/xlators/features/gfid-access/src/gfid-access.h
@@ -0,0 +1,107 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef __GFID_ACCESS_H__
+#define __GFID_ACCESS_H__
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include "gfid-access-mem-types.h"
+
+#define UUID_CANONICAL_FORM_LEN 36
+
+#define GF_FUSE_AUX_GFID_NEWFILE "glusterfs.gfid.newfile"
+#define GF_FUSE_AUX_GFID_HEAL "glusterfs.gfid.heal"
+
+#define GF_GFID_KEY "GLUSTERFS_GFID"
+#define GF_GFID_DIR ".gfid"
+#define GF_AUX_GFID 0xd
+
+#define GFID_ACCESS_ENTRY_OP_CHECK(loc, err, lbl) \
+ do { \
+ /* need to check if the lookup is on virtual dir */ \
+ if ((loc->name && !strcmp(GF_GFID_DIR, loc->name)) && \
+ ((loc->parent && __is_root_gfid(loc->parent->gfid)) || \
+ __is_root_gfid(loc->pargfid))) { \
+ err = ENOTSUP; \
+ goto lbl; \
+ } \
+ \
+ /* now, check if the lookup() is on an existing */ \
+ /* entry, but on gfid-path */ \
+ if ((loc->parent && __is_gfid_access_dir(loc->parent->gfid)) || \
+ __is_gfid_access_dir(loc->pargfid)) { \
+ err = EPERM; \
+ goto lbl; \
+ } \
+ } while (0)
+
+#define GFID_ACCESS_INODE_OP_CHECK(loc, err, lbl) \
+ do { \
+ /*Check if it is on .gfid*/ \
+ if (__is_gfid_access_dir(loc->gfid)) { \
+ err = ENOTSUP; \
+ goto lbl; \
+ } \
+ } while (0)
+typedef struct {
+ unsigned int uid;
+ unsigned int gid;
+ char gfid[UUID_CANONICAL_FORM_LEN + 1];
+ unsigned int st_mode;
+ char *bname;
+
+ union {
+ struct _symlink_in {
+ char *linkpath;
+ } __attribute__((__packed__)) symlink;
+
+ struct _mknod_in {
+ unsigned int mode;
+ unsigned int rdev;
+ unsigned int umask;
+ } __attribute__((__packed__)) mknod;
+
+ struct _mkdir_in {
+ unsigned int mode;
+ unsigned int umask;
+ } __attribute__((__packed__)) mkdir;
+ } __attribute__((__packed__)) args;
+} __attribute__((__packed__)) ga_newfile_args_t;
+
+typedef struct {
+ char gfid[UUID_CANONICAL_FORM_LEN + 1];
+ char *bname; /* a null terminated basename */
+} __attribute__((__packed__)) ga_heal_args_t;
+
+struct ga_private {
+ /* root inode's stbuf */
+ struct iatt root_stbuf;
+ struct iatt gfiddir_stbuf;
+ struct mem_pool *newfile_args_pool;
+ struct mem_pool *heal_args_pool;
+};
+typedef struct ga_private ga_private_t;
+
+struct __ga_local {
+ call_frame_t *orig_frame;
+ unsigned int uid;
+ unsigned int gid;
+ loc_t loc;
+ mode_t mode;
+ dev_t rdev;
+ mode_t umask;
+ dict_t *xdata;
+};
+typedef struct __ga_local ga_local_t;
+
+#endif /* __GFID_ACCESS_H__ */
diff --git a/xlators/features/index/Makefile.am b/xlators/features/index/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/index/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/index/src/Makefile.am b/xlators/features/index/src/Makefile.am
new file mode 100644
index 00000000000..c71c238c163
--- /dev/null
+++ b/xlators/features/index/src/Makefile.am
@@ -0,0 +1,19 @@
+if WITH_SERVER
+xlator_LTLIBRARIES = index.la
+endif
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+index_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+index_la_SOURCES = index.c
+index_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = index.h index-mem-types.h index-messages.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/index/src/index-mem-types.h b/xlators/features/index/src/index-mem-types.h
new file mode 100644
index 00000000000..58833d0ec9b
--- /dev/null
+++ b/xlators/features/index/src/index-mem-types.h
@@ -0,0 +1,23 @@
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __INDEX_MEM_TYPES_H__
+#define __INDEX_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+
+enum gf_index_mem_types_ {
+ gf_index_mt_priv_t = gf_common_mt_end + 1,
+ gf_index_inode_ctx_t,
+ gf_index_fd_ctx_t,
+ gf_index_mt_local_t,
+ gf_index_mt_end
+};
+#endif
diff --git a/xlators/features/index/src/index-messages.h b/xlators/features/index/src/index-messages.h
new file mode 100644
index 00000000000..364f17cd34e
--- /dev/null
+++ b/xlators/features/index/src/index-messages.h
@@ -0,0 +1,33 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _INDEX_MESSAGES_H_
+#define _INDEX_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(INDEX, INDEX_MSG_INDEX_DIR_CREATE_FAILED,
+ INDEX_MSG_INDEX_READDIR_FAILED, INDEX_MSG_INDEX_ADD_FAILED,
+ INDEX_MSG_INDEX_DEL_FAILED, INDEX_MSG_DICT_SET_FAILED,
+ INDEX_MSG_INODE_CTX_GET_SET_FAILED, INDEX_MSG_INVALID_ARGS,
+ INDEX_MSG_FD_OP_FAILED, INDEX_MSG_WORKER_THREAD_CREATE_FAILED,
+ INDEX_MSG_INVALID_GRAPH);
+
+#endif /* !_INDEX_MESSAGES_H_ */
diff --git a/xlators/features/index/src/index.c b/xlators/features/index/src/index.c
new file mode 100644
index 00000000000..4abb2c73ce5
--- /dev/null
+++ b/xlators/features/index/src/index.c
@@ -0,0 +1,2682 @@
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include "index.h"
+#include <glusterfs/options.h>
+#include "glusterfs3-xdr.h"
+#include <glusterfs/syscall.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/common-utils.h>
+#include "index-messages.h"
+#include <ftw.h>
+#include <libgen.h> /* for dirname() */
+#include <signal.h>
+
+#define XATTROP_SUBDIR "xattrop"
+#define DIRTY_SUBDIR "dirty"
+#define ENTRY_CHANGES_SUBDIR "entry-changes"
+
+struct index_syncop_args {
+ inode_t *parent;
+ gf_dirent_t *entries;
+ char *path;
+};
+
+static char *index_vgfid_xattrs[XATTROP_TYPE_END] = {
+ [XATTROP] = GF_XATTROP_INDEX_GFID,
+ [DIRTY] = GF_XATTROP_DIRTY_GFID,
+ [ENTRY_CHANGES] = GF_XATTROP_ENTRY_CHANGES_GFID};
+
+static char *index_subdirs[XATTROP_TYPE_END] = {
+ [XATTROP] = XATTROP_SUBDIR,
+ [DIRTY] = DIRTY_SUBDIR,
+ [ENTRY_CHANGES] = ENTRY_CHANGES_SUBDIR};
+
+int
+index_get_type_from_vgfid(index_priv_t *priv, uuid_t vgfid)
+{
+ int i = 0;
+
+ for (i = 0; i < XATTROP_TYPE_END; i++) {
+ if (gf_uuid_compare(priv->internal_vgfid[i], vgfid) == 0)
+ return i;
+ }
+ return -1;
+}
+
+gf_boolean_t
+index_is_virtual_gfid(index_priv_t *priv, uuid_t vgfid)
+{
+ if (index_get_type_from_vgfid(priv, vgfid) < 0)
+ return _gf_false;
+ return _gf_true;
+}
+
+static int
+__index_inode_ctx_get(inode_t *inode, xlator_t *this, index_inode_ctx_t **ctx)
+{
+ int ret = 0;
+ index_inode_ctx_t *ictx = NULL;
+ uint64_t tmpctx = 0;
+
+ ret = __inode_ctx_get(inode, this, &tmpctx);
+ if (!ret) {
+ ictx = (index_inode_ctx_t *)(long)tmpctx;
+ goto out;
+ }
+ ictx = GF_CALLOC(1, sizeof(*ictx), gf_index_inode_ctx_t);
+ if (!ictx) {
+ ret = -1;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&ictx->callstubs);
+ ret = __inode_ctx_put(inode, this, (uint64_t)(uintptr_t)ictx);
+ if (ret) {
+ GF_FREE(ictx);
+ ictx = NULL;
+ goto out;
+ }
+out:
+ if (ictx)
+ *ctx = ictx;
+ return ret;
+}
+
+static int
+index_inode_ctx_get(inode_t *inode, xlator_t *this, index_inode_ctx_t **ctx)
+{
+ int ret = 0;
+
+ LOCK(&inode->lock);
+ {
+ ret = __index_inode_ctx_get(inode, this, ctx);
+ }
+ UNLOCK(&inode->lock);
+
+ return ret;
+}
+
+static gf_boolean_t
+index_is_subdir_of_entry_changes(xlator_t *this, inode_t *inode)
+{
+ index_inode_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ if (!inode)
+ return _gf_false;
+
+ ret = index_inode_ctx_get(inode, this, &ctx);
+ if ((ret == 0) && !gf_uuid_is_null(ctx->virtual_pargfid))
+ return _gf_true;
+ return _gf_false;
+}
+
+static int
+index_get_type_from_vgfid_xattr(const char *name)
+{
+ int i = 0;
+
+ for (i = 0; i < XATTROP_TYPE_END; i++) {
+ if (strcmp(name, index_vgfid_xattrs[i]) == 0)
+ return i;
+ }
+ return -1;
+}
+
+gf_boolean_t
+index_is_fop_on_internal_inode(xlator_t *this, inode_t *inode, uuid_t gfid)
+{
+ index_priv_t *priv = this->private;
+ uuid_t vgfid = {0};
+
+ if (!inode)
+ return _gf_false;
+
+ if (gfid && !gf_uuid_is_null(gfid))
+ gf_uuid_copy(vgfid, gfid);
+ else
+ gf_uuid_copy(vgfid, inode->gfid);
+
+ if (index_is_virtual_gfid(priv, vgfid))
+ return _gf_true;
+ if (index_is_subdir_of_entry_changes(this, inode))
+ return _gf_true;
+ return _gf_false;
+}
+
+static gf_boolean_t
+index_is_vgfid_xattr(const char *name)
+{
+ if (index_get_type_from_vgfid_xattr(name) < 0)
+ return _gf_false;
+ return _gf_true;
+}
+
+call_stub_t *
+__index_dequeue(struct list_head *callstubs)
+{
+ call_stub_t *stub = NULL;
+
+ if (!list_empty(callstubs)) {
+ stub = list_entry(callstubs->next, call_stub_t, list);
+ list_del_init(&stub->list);
+ }
+
+ return stub;
+}
+
+static void
+__index_enqueue(struct list_head *callstubs, call_stub_t *stub)
+{
+ list_add_tail(&stub->list, callstubs);
+}
+
+static void
+worker_enqueue(xlator_t *this, call_stub_t *stub)
+{
+ index_priv_t *priv = NULL;
+
+ priv = this->private;
+ pthread_mutex_lock(&priv->mutex);
+ {
+ __index_enqueue(&priv->callstubs, stub);
+ GF_ATOMIC_INC(priv->stub_cnt);
+ pthread_cond_signal(&priv->cond);
+ }
+ pthread_mutex_unlock(&priv->mutex);
+}
+
+void *
+index_worker(void *data)
+{
+ index_priv_t *priv = NULL;
+ xlator_t *this = NULL;
+ call_stub_t *stub = NULL;
+ gf_boolean_t bye = _gf_false;
+
+ THIS = data;
+ this = data;
+ priv = this->private;
+
+ for (;;) {
+ pthread_mutex_lock(&priv->mutex);
+ {
+ while (list_empty(&priv->callstubs)) {
+ if (priv->down) {
+ bye = _gf_true; /*Avoid wait*/
+ break;
+ }
+ (void)pthread_cond_wait(&priv->cond, &priv->mutex);
+ if (priv->down) {
+ bye = _gf_true;
+ break;
+ }
+ }
+ if (!bye)
+ stub = __index_dequeue(&priv->callstubs);
+ if (bye) {
+ priv->curr_count--;
+ if (priv->curr_count == 0)
+ pthread_cond_broadcast(&priv->cond);
+ }
+ }
+ pthread_mutex_unlock(&priv->mutex);
+
+ if (stub) { /* guard against spurious wakeups */
+ call_resume(stub);
+ GF_ATOMIC_DEC(priv->stub_cnt);
+ }
+ stub = NULL;
+ if (bye)
+ break;
+ }
+
+ return NULL;
+}
+
+static void
+make_index_dir_path(char *base, const char *subdir, char *index_dir, size_t len)
+{
+ snprintf(index_dir, len, "%s/%s", base, subdir);
+}
+
+int
+index_dir_create(xlator_t *this, const char *subdir)
+{
+ int ret = 0;
+ struct stat st = {0};
+ char fullpath[PATH_MAX] = {0};
+ char path[PATH_MAX] = {0};
+ char *dir = NULL;
+ index_priv_t *priv = NULL;
+ size_t len = 0;
+ size_t pathlen = 0;
+
+ priv = this->private;
+ make_index_dir_path(priv->index_basepath, subdir, fullpath,
+ sizeof(fullpath));
+ ret = sys_stat(fullpath, &st);
+ if (!ret) {
+ if (!S_ISDIR(st.st_mode))
+ ret = -2;
+ goto out;
+ }
+
+ pathlen = strlen(fullpath);
+ if ((pathlen > 1) && fullpath[pathlen - 1] == '/')
+ fullpath[pathlen - 1] = '\0';
+ dir = strchr(fullpath, '/');
+ while (dir) {
+ dir = strchr(dir + 1, '/');
+ if (dir)
+ len = pathlen - strlen(dir);
+ else
+ len = pathlen;
+ strncpy(path, fullpath, len);
+ path[len] = '\0';
+ ret = sys_mkdir(path, 0600);
+ if (ret && (errno != EEXIST))
+ goto out;
+ }
+ ret = 0;
+out:
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ INDEX_MSG_INDEX_DIR_CREATE_FAILED,
+ "%s/%s: Failed to "
+ "create",
+ priv->index_basepath, subdir);
+ } else if (ret == -2) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOTDIR,
+ INDEX_MSG_INDEX_DIR_CREATE_FAILED,
+ "%s/%s: Failed to "
+ "create, path exists, not a directory ",
+ priv->index_basepath, subdir);
+ }
+ return ret;
+}
+
+void
+index_get_index(index_priv_t *priv, uuid_t index)
+{
+ LOCK(&priv->lock);
+ {
+ gf_uuid_copy(index, priv->index);
+ }
+ UNLOCK(&priv->lock);
+}
+
+void
+index_generate_index(index_priv_t *priv, uuid_t index)
+{
+ LOCK(&priv->lock);
+ {
+ // To prevent duplicate generates.
+ // This method fails if number of contending threads is greater
+ // than MAX_LINK count of the fs
+ if (!gf_uuid_compare(priv->index, index))
+ gf_uuid_generate(priv->index);
+ gf_uuid_copy(index, priv->index);
+ }
+ UNLOCK(&priv->lock);
+}
+
+static void
+make_index_path(char *base, const char *subdir, uuid_t index, char *index_path,
+ size_t len)
+{
+ make_index_dir_path(base, subdir, index_path, len);
+ snprintf(index_path + strlen(index_path), len - strlen(index_path),
+ "/%s-%s", subdir, uuid_utoa(index));
+}
+
+static void
+make_gfid_path(char *base, const char *subdir, uuid_t gfid, char *gfid_path,
+ size_t len)
+{
+ make_index_dir_path(base, subdir, gfid_path, len);
+ snprintf(gfid_path + strlen(gfid_path), len - strlen(gfid_path), "/%s",
+ uuid_utoa(gfid));
+}
+
+static void
+make_file_path(char *base, const char *subdir, const char *filename,
+ char *file_path, size_t len)
+{
+ make_index_dir_path(base, subdir, file_path, len);
+ snprintf(file_path + strlen(file_path), len - strlen(file_path), "/%s",
+ filename);
+}
+
+static int
+is_index_file_current(char *filename, uuid_t priv_index, char *subdir)
+{
+ char current_index[GF_UUID_BUF_SIZE + 16] = {
+ 0,
+ };
+
+ snprintf(current_index, sizeof current_index, "%s-%s", subdir,
+ uuid_utoa(priv_index));
+ return (!strcmp(filename, current_index));
+}
+
+static void
+check_delete_stale_index_file(xlator_t *this, char *filename, char *subdir)
+{
+ int ret = 0;
+ struct stat st = {0};
+ char filepath[PATH_MAX] = {0};
+ index_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ if (is_index_file_current(filename, priv->index, subdir))
+ return;
+
+ make_file_path(priv->index_basepath, subdir, filename, filepath,
+ sizeof(filepath));
+ ret = sys_stat(filepath, &st);
+ if (!ret && st.st_nlink == 1)
+ sys_unlink(filepath);
+}
+
+static void
+index_set_link_count(index_priv_t *priv, int64_t count,
+ index_xattrop_type_t type)
+{
+ switch (type) {
+ case XATTROP:
+ LOCK(&priv->lock);
+ {
+ priv->pending_count = count;
+ }
+ UNLOCK(&priv->lock);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+index_get_link_count(index_priv_t *priv, int64_t *count,
+ index_xattrop_type_t type)
+{
+ switch (type) {
+ case XATTROP:
+ LOCK(&priv->lock);
+ {
+ *count = priv->pending_count;
+ }
+ UNLOCK(&priv->lock);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+index_dec_link_count(index_priv_t *priv, index_xattrop_type_t type)
+{
+ switch (type) {
+ case XATTROP:
+ LOCK(&priv->lock);
+ {
+ priv->pending_count--;
+ if (priv->pending_count == 0)
+ priv->pending_count--;
+ }
+ UNLOCK(&priv->lock);
+ break;
+ default:
+ break;
+ }
+}
+
+char *
+index_get_subdir_from_type(index_xattrop_type_t type)
+{
+ if (type < XATTROP || type >= XATTROP_TYPE_END)
+ return NULL;
+ return index_subdirs[type];
+}
+
+char *
+index_get_subdir_from_vgfid(index_priv_t *priv, uuid_t vgfid)
+{
+ return index_get_subdir_from_type(index_get_type_from_vgfid(priv, vgfid));
+}
+
+static int
+index_fill_readdir(fd_t *fd, index_fd_ctx_t *fctx, DIR *dir, off_t off,
+ size_t size, gf_dirent_t *entries)
+{
+ off_t in_case = -1;
+ off_t last_off = 0;
+ size_t filled = 0;
+ int count = 0;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ int32_t this_size = -1;
+ gf_dirent_t *this_entry = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ if (!off) {
+ rewinddir(dir);
+ } else {
+ seekdir(dir, off);
+#ifndef GF_LINUX_HOST_OS
+ if ((u_long)telldir(dir) != off && off != fctx->dir_eof) {
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL,
+ INDEX_MSG_INDEX_READDIR_FAILED,
+ "seekdir(0x%llx) failed on dir=%p: "
+ "Invalid argument (offset reused from "
+ "another DIR * structure?)",
+ off, dir);
+ errno = EINVAL;
+ count = -1;
+ goto out;
+ }
+#endif /* GF_LINUX_HOST_OS */
+ }
+
+ while (filled <= size) {
+ in_case = (u_long)telldir(dir);
+
+ if (in_case == -1) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno,
+ INDEX_MSG_INDEX_READDIR_FAILED, "telldir failed on dir=%p",
+ dir);
+ goto out;
+ }
+
+ errno = 0;
+ entry = sys_readdir(dir, scratch);
+ if (!entry || errno != 0) {
+ if (errno == EBADF) {
+ gf_msg(THIS->name, GF_LOG_WARNING, errno,
+ INDEX_MSG_INDEX_READDIR_FAILED,
+ "readdir failed on dir=%p", dir);
+ goto out;
+ }
+ break;
+ }
+
+ if (!strncmp(entry->d_name, XATTROP_SUBDIR "-",
+ strlen(XATTROP_SUBDIR "-"))) {
+ check_delete_stale_index_file(this, entry->d_name, XATTROP_SUBDIR);
+ continue;
+ } else if (!strncmp(entry->d_name, DIRTY_SUBDIR "-",
+ strlen(DIRTY_SUBDIR "-"))) {
+ check_delete_stale_index_file(this, entry->d_name, DIRTY_SUBDIR);
+ continue;
+ }
+
+ this_size = max(sizeof(gf_dirent_t), sizeof(gfs3_dirplist)) +
+ strlen(entry->d_name) + 1;
+
+ if (this_size + filled > size) {
+ seekdir(dir, in_case);
+#ifndef GF_LINUX_HOST_OS
+ if ((u_long)telldir(dir) != in_case && in_case != fctx->dir_eof) {
+ gf_msg(THIS->name, GF_LOG_ERROR, EINVAL,
+ INDEX_MSG_INDEX_READDIR_FAILED,
+ "seekdir(0x%llx) failed on dir=%p: "
+ "Invalid argument (offset reused from "
+ "another DIR * structure?)",
+ in_case, dir);
+ errno = EINVAL;
+ count = -1;
+ goto out;
+ }
+#endif /* GF_LINUX_HOST_OS */
+ break;
+ }
+
+ this_entry = gf_dirent_for_name(entry->d_name);
+
+ if (!this_entry) {
+ gf_msg(THIS->name, GF_LOG_ERROR, errno,
+ INDEX_MSG_INDEX_READDIR_FAILED,
+ "could not create gf_dirent for entry %s", entry->d_name);
+ goto out;
+ }
+ /*
+ * we store the offset of next entry here, which is
+ * probably not intended, but code using syncop_readdir()
+ * (glfs-heal.c, afr-self-heald.c, pump.c) rely on it
+ * for directory read resumption.
+ */
+ last_off = (u_long)telldir(dir);
+ this_entry->d_off = last_off;
+ this_entry->d_ino = entry->d_ino;
+
+ list_add_tail(&this_entry->list, &entries->list);
+
+ filled += this_size;
+ count++;
+ }
+
+ errno = 0;
+
+ if ((!sys_readdir(dir, scratch) && (errno == 0))) {
+ /* Indicate EOF */
+ errno = ENOENT;
+ /* Remember EOF offset for later detection */
+ fctx->dir_eof = last_off;
+ }
+out:
+ return count;
+}
+
+int
+index_link_to_base(xlator_t *this, char *fpath, const char *subdir)
+{
+ int ret = 0;
+ int fd = 0;
+ int op_errno = 0;
+ uuid_t index = {0};
+ index_priv_t *priv = this->private;
+ char base[PATH_MAX] = {0};
+
+ index_get_index(priv, index);
+ make_index_path(priv->index_basepath, subdir, index, base, sizeof(base));
+
+ ret = sys_link(base, fpath);
+ if (!ret || (errno == EEXIST)) {
+ ret = 0;
+ goto out;
+ }
+
+ op_errno = errno;
+ if (op_errno == ENOENT) {
+ ret = index_dir_create(this, subdir);
+ if (ret) {
+ op_errno = errno;
+ goto out;
+ }
+ } else if (op_errno == EMLINK) {
+ index_generate_index(priv, index);
+ make_index_path(priv->index_basepath, subdir, index, base,
+ sizeof(base));
+ } else {
+ goto out;
+ }
+
+ op_errno = 0;
+ fd = sys_creat(base, 0);
+ if ((fd < 0) && (errno != EEXIST)) {
+ op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, INDEX_MSG_INDEX_ADD_FAILED,
+ "%s: Not able to "
+ "create index",
+ fpath);
+ goto out;
+ }
+
+ if (fd >= 0)
+ sys_close(fd);
+
+ ret = sys_link(base, fpath);
+ if (ret && (errno != EEXIST)) {
+ op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, errno, INDEX_MSG_INDEX_ADD_FAILED,
+ "%s: Not able to "
+ "add to index",
+ fpath);
+ goto out;
+ }
+out:
+ return -op_errno;
+}
+
+int
+index_add(xlator_t *this, uuid_t gfid, const char *subdir,
+ index_xattrop_type_t type)
+{
+ char gfid_path[PATH_MAX] = {0};
+ int ret = -1;
+ index_priv_t *priv = NULL;
+ struct stat st = {0};
+
+ priv = this->private;
+
+ if (gf_uuid_is_null(gfid)) {
+ GF_ASSERT(0);
+ goto out;
+ }
+
+ make_gfid_path(priv->index_basepath, subdir, gfid, gfid_path,
+ sizeof(gfid_path));
+
+ ret = sys_stat(gfid_path, &st);
+ if (!ret)
+ goto out;
+ ret = index_link_to_base(this, gfid_path, subdir);
+out:
+ return ret;
+}
+
+int
+index_del(xlator_t *this, uuid_t gfid, const char *subdir, int type)
+{
+ int32_t op_errno __attribute__((unused)) = 0;
+ index_priv_t *priv = NULL;
+ int ret = 0;
+ char gfid_path[PATH_MAX] = {0};
+ char rename_dst[PATH_MAX] = {
+ 0,
+ };
+ uuid_t uuid;
+
+ priv = this->private;
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, !gf_uuid_is_null(gfid), out,
+ op_errno, EINVAL);
+ make_gfid_path(priv->index_basepath, subdir, gfid, gfid_path,
+ sizeof(gfid_path));
+
+ if ((strcmp(subdir, ENTRY_CHANGES_SUBDIR)) == 0) {
+ ret = sys_rmdir(gfid_path);
+ /* rmdir above could fail with ENOTEMPTY if the indices under
+ * it were created when granular-entry-heal was enabled, whereas
+ * the actual heal that happened was non-granular (or full) in
+ * nature, resulting in name indices getting left out. To
+ * clean up this directory without it affecting the IO path perf,
+ * the directory is renamed to a unique name under
+ * indices/entry-changes. Self-heal will pick up this entry
+ * during crawl and on lookup into the file system figure that
+ * the index is stale and subsequently wipe it out using rmdir().
+ */
+ if ((ret) && (errno == ENOTEMPTY)) {
+ gf_uuid_generate(uuid);
+ make_gfid_path(priv->index_basepath, subdir, uuid, rename_dst,
+ sizeof(rename_dst));
+ ret = sys_rename(gfid_path, rename_dst);
+ }
+ } else {
+ ret = sys_unlink(gfid_path);
+ }
+
+ if (ret && (errno != ENOENT)) {
+ gf_msg(this->name, GF_LOG_ERROR, errno, INDEX_MSG_INDEX_DEL_FAILED,
+ "%s: failed to delete"
+ " from index",
+ gfid_path);
+ ret = -errno;
+ goto out;
+ }
+
+ index_dec_link_count(priv, type);
+ ret = 0;
+out:
+ return ret;
+}
+
+static gf_boolean_t
+_is_xattr_in_watchlist(dict_t *d, char *k, data_t *v, void *tmp)
+{
+ if (!strncmp(k, tmp, strlen(k)))
+ return _gf_true;
+
+ return _gf_false;
+}
+
+static gf_boolean_t
+is_xattr_in_watchlist(dict_t *this, char *key, data_t *value, void *matchdata)
+{
+ int ret = -1;
+
+ // matchdata is a list of xattrs
+ // key is strncmp'ed with each xattr in matchdata.
+ // ret will be 0 if key pattern is not present in the matchdata
+ // else ret will be count number of xattrs the key pattern-matches with.
+ ret = dict_foreach_match(matchdata, _is_xattr_in_watchlist, key,
+ dict_null_foreach_fn, NULL);
+
+ if (ret > 0)
+ return _gf_true;
+ return _gf_false;
+}
+
+static int
+index_find_xattr_type(dict_t *d, char *k, data_t *v)
+{
+ int idx = -1;
+ index_priv_t *priv = THIS->private;
+
+ if (priv->dirty_watchlist &&
+ is_xattr_in_watchlist(d, k, v, priv->dirty_watchlist))
+ idx = DIRTY;
+ else if (priv->pending_watchlist &&
+ is_xattr_in_watchlist(d, k, v, priv->pending_watchlist))
+ idx = XATTROP;
+
+ return idx;
+}
+
+int
+index_fill_zero_array(dict_t *d, char *k, data_t *v, void *adata)
+{
+ int idx = -1;
+ int *zfilled = adata;
+ // zfilled array contains `state` for all types xattrs.
+ // state : whether the gfid file of this file exists in
+ // corresponding xattr directory or not.
+
+ idx = index_find_xattr_type(d, k, v);
+ if (idx == -1)
+ return 0;
+ zfilled[idx] = 0;
+ return 0;
+}
+
+static int
+_check_key_is_zero_filled(dict_t *d, char *k, data_t *v, void *tmp)
+{
+ int *zfilled = tmp;
+ int idx = -1;
+
+ idx = index_find_xattr_type(d, k, v);
+ if (idx == -1)
+ return 0;
+
+ /* Along with checking that the value of a key is zero filled
+ * the key's corresponding index should be assigned
+ * appropriate value.
+ * zfilled[idx] will be 0(false) if value not zero.
+ * will be 1(true) if value is zero.
+ */
+ if (mem_0filled((const char *)v->data, v->len)) {
+ zfilled[idx] = 0;
+ return 0;
+ }
+
+ /* If zfilled[idx] was previously 0, it means at least
+ * one xattr of its "kind" is non-zero. Keep its value
+ * the same.
+ */
+ if (zfilled[idx])
+ zfilled[idx] = 1;
+ return 0;
+}
+
+int
+index_entry_create(xlator_t *this, inode_t *inode, char *filename)
+{
+ int ret = -1;
+ int op_errno = 0;
+ char pgfid_path[PATH_MAX] = {0};
+ char entry_path[PATH_MAX] = {0};
+ index_priv_t *priv = NULL;
+ index_inode_ctx_t *ctx = NULL;
+ int32_t len = 0;
+
+ priv = this->private;
+
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, !gf_uuid_is_null(inode->gfid),
+ out, op_errno, EINVAL);
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, filename, out, op_errno, EINVAL);
+
+ ret = index_inode_ctx_get(inode, this, &ctx);
+ if (ret) {
+ op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, op_errno,
+ INDEX_MSG_INODE_CTX_GET_SET_FAILED,
+ "Not able to get inode ctx for %s", uuid_utoa(inode->gfid));
+ goto out;
+ }
+
+ make_gfid_path(priv->index_basepath, ENTRY_CHANGES_SUBDIR, inode->gfid,
+ pgfid_path, sizeof(pgfid_path));
+
+ if (ctx->state[ENTRY_CHANGES] != IN) {
+ ret = sys_mkdir(pgfid_path, 0600);
+ if (ret != 0 && errno != EEXIST) {
+ op_errno = errno;
+ goto out;
+ }
+ ctx->state[ENTRY_CHANGES] = IN;
+ }
+
+ if (strchr(filename, '/')) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, INDEX_MSG_INDEX_ADD_FAILED,
+ "Got invalid entry (%s) for pargfid path (%s)", filename,
+ pgfid_path);
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ len = snprintf(entry_path, sizeof(entry_path), "%s/%s", pgfid_path,
+ filename);
+ if ((len < 0) || (len >= sizeof(entry_path))) {
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ op_errno = 0;
+
+ ret = index_link_to_base(this, entry_path, ENTRY_CHANGES_SUBDIR);
+out:
+ if (op_errno)
+ ret = -op_errno;
+ return ret;
+}
+
+int
+index_entry_delete(xlator_t *this, uuid_t pgfid, char *filename)
+{
+ int ret = 0;
+ int op_errno = 0;
+ char pgfid_path[PATH_MAX] = {0};
+ char entry_path[PATH_MAX] = {0};
+ index_priv_t *priv = NULL;
+ int32_t len = 0;
+
+ priv = this->private;
+
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, !gf_uuid_is_null(pgfid), out,
+ op_errno, EINVAL);
+ GF_ASSERT_AND_GOTO_WITH_ERROR(this->name, filename, out, op_errno, EINVAL);
+
+ make_gfid_path(priv->index_basepath, ENTRY_CHANGES_SUBDIR, pgfid,
+ pgfid_path, sizeof(pgfid_path));
+
+ if (strchr(filename, '/')) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, INDEX_MSG_INDEX_DEL_FAILED,
+ "Got invalid entry (%s) for pargfid path (%s)", filename,
+ pgfid_path);
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ len = snprintf(entry_path, sizeof(entry_path), "%s/%s", pgfid_path,
+ filename);
+ if ((len < 0) || (len >= sizeof(entry_path))) {
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ ret = sys_unlink(entry_path);
+ if (ret && (errno != ENOENT)) {
+ op_errno = errno;
+ gf_msg(this->name, GF_LOG_ERROR, op_errno, INDEX_MSG_INDEX_DEL_FAILED,
+ "%s: failed to delete from index/entry-changes", entry_path);
+ }
+
+out:
+ return -op_errno;
+}
+
+int
+index_entry_action(xlator_t *this, inode_t *inode, dict_t *xdata, char *key)
+{
+ int ret = 0;
+ char *filename = NULL;
+
+ ret = dict_get_str(xdata, key, &filename);
+ if (ret != 0) {
+ ret = 0;
+ goto out;
+ }
+
+ if (strcmp(key, GF_XATTROP_ENTRY_IN_KEY) == 0)
+ ret = index_entry_create(this, inode, filename);
+ else if (strcmp(key, GF_XATTROP_ENTRY_OUT_KEY) == 0)
+ ret = index_entry_delete(this, inode->gfid, filename);
+
+out:
+ return ret;
+}
+
+void
+_index_action(xlator_t *this, inode_t *inode, int *zfilled)
+{
+ int ret = 0;
+ int i = 0;
+ index_inode_ctx_t *ctx = NULL;
+ char *subdir = NULL;
+
+ ret = index_inode_ctx_get(inode, this, &ctx);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ INDEX_MSG_INODE_CTX_GET_SET_FAILED,
+ "Not able to get"
+ " inode context for %s.",
+ uuid_utoa(inode->gfid));
+ goto out;
+ }
+
+ for (i = 0; i < XATTROP_TYPE_END; i++) {
+ subdir = index_get_subdir_from_type(i);
+ if (zfilled[i] == 1) {
+ if (ctx->state[i] == NOTIN)
+ continue;
+ ret = index_del(this, inode->gfid, subdir, i);
+ if (!ret)
+ ctx->state[i] = NOTIN;
+ } else if (zfilled[i] == 0) {
+ if (ctx->state[i] == IN)
+ continue;
+ ret = index_add(this, inode->gfid, subdir, i);
+ if (!ret)
+ ctx->state[i] = IN;
+ }
+ }
+out:
+ return;
+}
+
+static void
+index_init_state(xlator_t *this, inode_t *inode, index_inode_ctx_t *ctx,
+ char *subdir)
+{
+ int ret = -1;
+ char pgfid_path[PATH_MAX] = {0};
+ struct stat st = {0};
+ index_priv_t *priv = NULL;
+
+ priv = this->private;
+
+ make_gfid_path(priv->index_basepath, subdir, inode->gfid, pgfid_path,
+ sizeof(pgfid_path));
+
+ ret = sys_stat(pgfid_path, &st);
+ if (ret == 0)
+ ctx->state[ENTRY_CHANGES] = IN;
+ else if (ret != 0 && errno == ENOENT)
+ ctx->state[ENTRY_CHANGES] = NOTIN;
+
+ return;
+}
+
+void
+xattrop_index_action(xlator_t *this, index_local_t *local, dict_t *xattr,
+ dict_match_t match, void *match_data)
+{
+ int ret = 0;
+ int zfilled[XATTROP_TYPE_END] = {
+ 0,
+ };
+ int8_t value = 0;
+ char *subdir = NULL;
+ dict_t *req_xdata = NULL;
+ inode_t *inode = NULL;
+ index_inode_ctx_t *ctx = NULL;
+
+ inode = local->inode;
+ req_xdata = local->xdata;
+
+ memset(zfilled, -1, sizeof(zfilled));
+ ret = dict_foreach_match(xattr, match, match_data,
+ _check_key_is_zero_filled, zfilled);
+ _index_action(this, inode, zfilled);
+
+ if (req_xdata) {
+ ret = index_entry_action(this, inode, req_xdata,
+ GF_XATTROP_ENTRY_OUT_KEY);
+
+ ret = dict_get_int8(req_xdata, GF_XATTROP_PURGE_INDEX, &value);
+ if ((ret) || (value == 0))
+ goto out;
+ }
+
+ if (zfilled[XATTROP] != 1)
+ goto out;
+
+ if (inode->ia_type != IA_IFDIR)
+ goto out;
+
+ subdir = index_get_subdir_from_type(ENTRY_CHANGES);
+ ret = index_inode_ctx_get(inode, this, &ctx);
+ if (ctx->state[ENTRY_CHANGES] == UNKNOWN)
+ index_init_state(this, inode, ctx, subdir);
+ if (ctx->state[ENTRY_CHANGES] == IN) {
+ ret = index_del(this, inode->gfid, subdir, ENTRY_CHANGES);
+ ctx->state[ENTRY_CHANGES] = NOTIN;
+ }
+
+out:
+ return;
+}
+
+static gf_boolean_t
+index_xattrop_track(xlator_t *this, gf_xattrop_flags_t flags, dict_t *dict)
+{
+ index_priv_t *priv = this->private;
+
+ if (flags == GF_XATTROP_ADD_ARRAY)
+ return _gf_true;
+
+ if (flags != GF_XATTROP_ADD_ARRAY64)
+ return _gf_false;
+
+ if (!priv->pending_watchlist)
+ return _gf_false;
+
+ if (dict_foreach_match(dict, is_xattr_in_watchlist, priv->pending_watchlist,
+ dict_null_foreach_fn, NULL) > 0)
+ return _gf_true;
+
+ return _gf_false;
+}
+
+int
+index_inode_path(xlator_t *this, inode_t *inode, char *dirpath, size_t len)
+{
+ char *subdir = NULL;
+ int ret = 0;
+ index_priv_t *priv = NULL;
+ index_inode_ctx_t *ictx = NULL;
+
+ priv = this->private;
+ if (!index_is_fop_on_internal_inode(this, inode, NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ subdir = index_get_subdir_from_vgfid(priv, inode->gfid);
+ if (subdir) {
+ if (len <= strlen(priv->index_basepath) + 1 /*'/'*/ + strlen(subdir)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ make_index_dir_path(priv->index_basepath, subdir, dirpath, len);
+ } else {
+ ret = index_inode_ctx_get(inode, this, &ictx);
+ if (ret)
+ goto out;
+ if (gf_uuid_is_null(ictx->virtual_pargfid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ make_index_dir_path(priv->index_basepath, ENTRY_CHANGES_SUBDIR, dirpath,
+ len);
+ if (len <= strlen(dirpath) + 1 /*'/'*/ + SLEN(UUID0_STR)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ strcat(dirpath, "/");
+ strcat(dirpath, uuid_utoa(ictx->virtual_pargfid));
+ }
+out:
+ return ret;
+}
+
+int
+__index_fd_ctx_get(fd_t *fd, xlator_t *this, index_fd_ctx_t **ctx)
+{
+ int ret = 0;
+ index_fd_ctx_t *fctx = NULL;
+ uint64_t tmpctx = 0;
+ char dirpath[PATH_MAX] = {0};
+
+ ret = __fd_ctx_get(fd, this, &tmpctx);
+ if (!ret) {
+ fctx = (index_fd_ctx_t *)(long)tmpctx;
+ *ctx = fctx;
+ goto out;
+ }
+
+ ret = index_inode_path(this, fd->inode, dirpath, sizeof(dirpath));
+ if (ret)
+ goto out;
+
+ fctx = GF_CALLOC(1, sizeof(*fctx), gf_index_fd_ctx_t);
+ if (!fctx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fctx->dir = sys_opendir(dirpath);
+ if (!fctx->dir) {
+ ret = -errno;
+ GF_FREE(fctx);
+ fctx = NULL;
+ goto out;
+ }
+ fctx->dir_eof = -1;
+
+ ret = __fd_ctx_set(fd, this, (uint64_t)(long)fctx);
+ if (ret) {
+ (void)sys_closedir(fctx->dir);
+ GF_FREE(fctx);
+ fctx = NULL;
+ ret = -EINVAL;
+ goto out;
+ }
+ *ctx = fctx;
+out:
+ return ret;
+}
+
+int
+index_fd_ctx_get(fd_t *fd, xlator_t *this, index_fd_ctx_t **ctx)
+{
+ int ret = 0;
+ LOCK(&fd->lock);
+ {
+ ret = __index_fd_ctx_get(fd, this, ctx);
+ }
+ UNLOCK(&fd->lock);
+ return ret;
+}
+
+// new - Not NULL means start a fop
+// new - NULL means done processing the fop
+void
+index_queue_process(xlator_t *this, inode_t *inode, call_stub_t *new)
+{
+ call_stub_t *stub = NULL;
+ index_inode_ctx_t *ctx = NULL;
+ int ret = 0;
+ call_frame_t *frame = NULL;
+
+ LOCK(&inode->lock);
+ {
+ ret = __index_inode_ctx_get(inode, this, &ctx);
+ if (ret)
+ goto unlock;
+
+ if (new) {
+ __index_enqueue(&ctx->callstubs, new);
+ new = NULL;
+ } else {
+ ctx->processing = _gf_false;
+ }
+
+ if (!ctx->processing) {
+ stub = __index_dequeue(&ctx->callstubs);
+ if (stub)
+ ctx->processing = _gf_true;
+ else
+ ctx->processing = _gf_false;
+ }
+ }
+unlock:
+ UNLOCK(&inode->lock);
+
+ if (ret && new) {
+ frame = new->frame;
+ if (new->fop == GF_FOP_XATTROP) {
+ INDEX_STACK_UNWIND(xattrop, frame, -1, ENOMEM, NULL, NULL);
+ } else if (new->fop == GF_FOP_FXATTROP) {
+ INDEX_STACK_UNWIND(fxattrop, frame, -1, ENOMEM, NULL, NULL);
+ }
+ call_stub_destroy(new);
+ } else if (stub) {
+ call_resume(stub);
+ }
+ return;
+}
+
+static int
+xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xattr, dict_t *xdata, dict_match_t match,
+ dict_t *matchdata)
+{
+ inode_t *inode = NULL;
+ index_local_t *local = NULL;
+
+ local = frame->local;
+ inode = inode_ref(local->inode);
+
+ if (op_ret < 0)
+ goto out;
+
+ xattrop_index_action(this, local, xattr, match, matchdata);
+out:
+ INDEX_STACK_UNWIND(xattrop, frame, op_ret, op_errno, xattr, xdata);
+ index_queue_process(this, inode, NULL);
+ inode_unref(inode);
+
+ return 0;
+}
+
+int32_t
+index_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xattr,
+ dict_t *xdata)
+{
+ index_priv_t *priv = this->private;
+
+ xattrop_cbk(frame, cookie, this, op_ret, op_errno, xattr, xdata,
+ is_xattr_in_watchlist, priv->complete_watchlist);
+ return 0;
+}
+
+int32_t
+index_xattrop64_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xattr,
+ dict_t *xdata)
+{
+ index_priv_t *priv = this->private;
+
+ return xattrop_cbk(frame, cookie, this, op_ret, op_errno, xattr, xdata,
+ is_xattr_in_watchlist, priv->pending_watchlist);
+}
+
+void
+index_xattrop_do(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ int ret = -1;
+ int zfilled[XATTROP_TYPE_END] = {
+ 0,
+ };
+ index_local_t *local = NULL;
+ fop_xattrop_cbk_t x_cbk = NULL;
+
+ local = frame->local;
+
+ if (optype == GF_XATTROP_ADD_ARRAY)
+ x_cbk = index_xattrop_cbk;
+ else
+ x_cbk = index_xattrop64_cbk;
+
+ // In wind phase bring the gfid into index. This way if the brick crashes
+ // just after posix performs xattrop before _cbk reaches index xlator
+ // we will still have the gfid in index.
+ memset(zfilled, -1, sizeof(zfilled));
+
+ /* Foreach xattr, set corresponding index of zfilled to 1
+ * zfilled[index] = 1 implies the xattr's value is zero filled
+ * and should be added in its corresponding subdir.
+ *
+ * zfilled should be set to 1 only for those index that
+ * exist in xattr variable. This is to distinguish
+ * between different types of volumes.
+ * For e.g., if the check is not made,
+ * zfilled[DIRTY] is set to 1 for EC volumes,
+ * index file will be tried to create in indices/dirty dir
+ * which doesn't exist for an EC volume.
+ */
+ ret = dict_foreach(xattr, index_fill_zero_array, zfilled);
+
+ _index_action(this, local->inode, zfilled);
+ if (xdata)
+ ret = index_entry_action(this, local->inode, xdata,
+ GF_XATTROP_ENTRY_IN_KEY);
+ if (ret < 0) {
+ x_cbk(frame, NULL, this, -1, -ret, NULL, NULL);
+ return;
+ }
+
+ if (loc)
+ STACK_WIND(frame, x_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop, loc, optype, xattr, xdata);
+ else
+ STACK_WIND(frame, x_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop, fd, optype, xattr, xdata);
+}
+
+int
+index_xattrop_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ index_xattrop_do(frame, this, loc, NULL, optype, xattr, xdata);
+ return 0;
+}
+
+int
+index_fxattrop_wrapper(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ index_xattrop_do(frame, this, NULL, fd, optype, xattr, xdata);
+ return 0;
+}
+
+int32_t
+index_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t flags, dict_t *dict, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ index_local_t *local = NULL;
+
+ if (!index_xattrop_track(this, flags, dict))
+ goto out;
+
+ local = mem_get0(this->local_pool);
+ if (!local)
+ goto err;
+
+ frame->local = local;
+ local->inode = inode_ref(loc->inode);
+ if (xdata)
+ local->xdata = dict_ref(xdata);
+ stub = fop_xattrop_stub(frame, index_xattrop_wrapper, loc, flags, dict,
+ xdata);
+
+err:
+ if ((!local) || (!stub)) {
+ INDEX_STACK_UNWIND(xattrop, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+ }
+
+ index_queue_process(this, loc->inode, stub);
+ return 0;
+out:
+ STACK_WIND(frame, default_xattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop, loc, flags, dict, xdata);
+ return 0;
+}
+
+int32_t
+index_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t flags, dict_t *dict, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ index_local_t *local = NULL;
+
+ if (!index_xattrop_track(this, flags, dict))
+ goto out;
+
+ local = mem_get0(this->local_pool);
+ if (!local)
+ goto err;
+
+ frame->local = local;
+ local->inode = inode_ref(fd->inode);
+ if (xdata)
+ local->xdata = dict_ref(xdata);
+ stub = fop_fxattrop_stub(frame, index_fxattrop_wrapper, fd, flags, dict,
+ xdata);
+
+err:
+ if ((!local) || (!stub)) {
+ INDEX_STACK_UNWIND(fxattrop, frame, -1, ENOMEM, NULL, xdata);
+ return 0;
+ }
+
+ index_queue_process(this, fd->inode, stub);
+ return 0;
+out:
+ STACK_WIND(frame, default_fxattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop, fd, flags, dict, xdata);
+ return 0;
+}
+
+uint64_t
+index_entry_count(xlator_t *this, char *subdir)
+{
+ uint64_t count = 0;
+ index_priv_t *priv = NULL;
+ DIR *dirp = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ char index_dir[PATH_MAX] = {
+ 0,
+ };
+
+ priv = this->private;
+
+ make_index_dir_path(priv->index_basepath, subdir, index_dir,
+ sizeof(index_dir));
+
+ dirp = sys_opendir(index_dir);
+ if (!dirp)
+ return 0;
+
+ for (;;) {
+ errno = 0;
+ entry = sys_readdir(dirp, scratch);
+ if (!entry || errno != 0)
+ break;
+
+ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
+ continue;
+
+ if (!strncmp(entry->d_name, subdir, strlen(subdir)))
+ continue;
+
+ count++;
+ }
+
+ (void)sys_closedir(dirp);
+
+ return count;
+}
+
+int32_t
+index_getxattr_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ index_priv_t *priv = NULL;
+ dict_t *xattr = NULL;
+ int ret = 0;
+ int vgfid_type = 0;
+ uint64_t count = 0;
+
+ priv = this->private;
+
+ xattr = dict_new();
+ if (!xattr) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ vgfid_type = index_get_type_from_vgfid_xattr(name);
+ if (vgfid_type >= 0) {
+ ret = dict_set_static_bin(xattr, (char *)name,
+ priv->internal_vgfid[vgfid_type],
+ sizeof(priv->internal_vgfid[vgfid_type]));
+ if (ret) {
+ ret = -EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, -ret, INDEX_MSG_DICT_SET_FAILED,
+ "xattrop index "
+ "gfid set failed");
+ goto done;
+ }
+ }
+
+ /* TODO: Need to check what kind of link-counts are needed for
+ * ENTRY-CHANGES before refactor of this block with array*/
+ if (strcmp(name, GF_XATTROP_INDEX_COUNT) == 0) {
+ count = index_entry_count(this, XATTROP_SUBDIR);
+
+ ret = dict_set_uint64(xattr, (char *)name, count);
+ if (ret) {
+ ret = -EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, -ret, INDEX_MSG_DICT_SET_FAILED,
+ "xattrop index "
+ "count set failed");
+ goto done;
+ }
+ } else if (strcmp(name, GF_XATTROP_DIRTY_COUNT) == 0) {
+ count = index_entry_count(this, DIRTY_SUBDIR);
+
+ ret = dict_set_uint64(xattr, (char *)name, count);
+ if (ret) {
+ ret = -EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, -ret, INDEX_MSG_DICT_SET_FAILED,
+ "dirty index "
+ "count set failed");
+ goto done;
+ }
+ }
+done:
+ if (ret)
+ STACK_UNWIND_STRICT(getxattr, frame, -1, -ret, xattr, NULL);
+ else
+ STACK_UNWIND_STRICT(getxattr, frame, 0, 0, xattr, NULL);
+
+ if (xattr)
+ dict_unref(xattr);
+
+ return 0;
+}
+
+static int
+index_save_pargfid_for_entry_changes(xlator_t *this, loc_t *loc, char *path)
+{
+ index_priv_t *priv = NULL;
+ index_inode_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ priv = this->private;
+ if (!loc)
+ return -1;
+ if (gf_uuid_compare(loc->pargfid, priv->internal_vgfid[ENTRY_CHANGES]))
+ return 0;
+
+ ret = index_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ INDEX_MSG_INODE_CTX_GET_SET_FAILED,
+ "Unable to get inode context for %s", path);
+ return -EINVAL;
+ }
+ ret = gf_uuid_parse(loc->name, ctx->virtual_pargfid);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL,
+ INDEX_MSG_INODE_CTX_GET_SET_FAILED,
+ "Unable to store "
+ "virtual gfid in inode context for %s",
+ path);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int32_t
+index_lookup_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xattr_req)
+{
+ index_priv_t *priv = NULL;
+ struct stat lstatbuf = {0};
+ int ret = 0;
+ int32_t op_errno = EINVAL;
+ int32_t op_ret = -1;
+ uint64_t val = IA_INVAL;
+ char path[PATH_MAX] = {0};
+ struct iatt stbuf = {
+ 0,
+ };
+ struct iatt postparent = {
+ 0,
+ };
+ dict_t *xattr = NULL;
+ gf_boolean_t is_dir = _gf_false;
+ char *subdir = NULL;
+ loc_t iloc = {0};
+
+ priv = this->private;
+ loc_copy(&iloc, loc);
+
+ VALIDATE_OR_GOTO(loc, done);
+ if (index_is_fop_on_internal_inode(this, loc->parent, loc->pargfid)) {
+ subdir = index_get_subdir_from_vgfid(priv, loc->pargfid);
+ ret = index_inode_path(this, loc->parent, path, sizeof(path));
+ if (ret < 0) {
+ op_errno = -ret;
+ goto done;
+ }
+ ret = snprintf(path + strlen(path), PATH_MAX - strlen(path), "/%s",
+ loc->name);
+
+ if ((ret < 0) || (ret > (PATH_MAX - strlen(path)))) {
+ op_errno = EINVAL;
+ op_ret = -1;
+ goto done;
+ }
+
+ } else if (index_is_virtual_gfid(priv, loc->gfid)) {
+ subdir = index_get_subdir_from_vgfid(priv, loc->gfid);
+ make_index_dir_path(priv->index_basepath, subdir, path, sizeof(path));
+ is_dir = _gf_true;
+
+ if ((xattr_req) && (dict_get(xattr_req, GF_INDEX_IA_TYPE_GET_REQ))) {
+ if (0 == strcmp(subdir, index_get_subdir_from_type(ENTRY_CHANGES)))
+ val = IA_IFDIR;
+ else
+ val = IA_IFREG;
+ }
+ } else {
+ if (!inode_is_linked(loc->inode)) {
+ inode_unref(iloc.inode);
+ iloc.inode = inode_find(loc->inode->table, loc->gfid);
+ }
+ ret = index_inode_path(this, iloc.inode, path, sizeof(path));
+ if (ret < 0) {
+ op_errno = -ret;
+ goto done;
+ }
+ }
+ ret = sys_lstat(path, &lstatbuf);
+ if (ret) {
+ gf_msg_debug(this->name, errno, "Stat failed on %s dir ", path);
+ op_errno = errno;
+ goto done;
+ } else if (!S_ISDIR(lstatbuf.st_mode) && is_dir) {
+ op_errno = ENOTDIR;
+ gf_msg_debug(this->name, op_errno,
+ "Stat failed on %s dir, "
+ "not a directory",
+ path);
+ goto done;
+ }
+ xattr = dict_new();
+ if (!xattr) {
+ op_errno = ENOMEM;
+ goto done;
+ }
+
+ if (val != IA_INVAL) {
+ ret = dict_set_uint64(xattr, GF_INDEX_IA_TYPE_GET_RSP, val);
+ if (ret) {
+ op_ret = -1;
+ op_errno = -ret;
+ goto done;
+ }
+ }
+
+ iatt_from_stat(&stbuf, &lstatbuf);
+ if (is_dir || inode_is_linked(iloc.inode))
+ loc_gfid(&iloc, stbuf.ia_gfid);
+ else
+ gf_uuid_generate(stbuf.ia_gfid);
+
+ ret = index_save_pargfid_for_entry_changes(this, &iloc, path);
+ if (ret) {
+ op_ret = -1;
+ op_errno = -ret;
+ goto done;
+ }
+
+ stbuf.ia_ino = -1;
+ op_ret = 0;
+done:
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno,
+ loc ? loc->inode : NULL, &stbuf, xattr, &postparent);
+ if (xattr)
+ dict_unref(xattr);
+ loc_wipe(&iloc);
+ return 0;
+}
+
+int
+index_get_gfid_type(void *opaque)
+{
+ gf_dirent_t *entry = NULL;
+ xlator_t *this = THIS;
+ struct index_syncop_args *args = opaque;
+ loc_t loc = {0};
+ struct iatt iatt = {0};
+ int ret = 0;
+
+ list_for_each_entry(entry, &args->entries->list, list)
+ {
+ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
+ continue;
+
+ loc_wipe(&loc);
+
+ entry->d_type = gf_d_type_from_ia_type(IA_INVAL);
+ entry->d_stat.ia_type = IA_INVAL;
+ if (gf_uuid_parse(entry->d_name, loc.gfid))
+ continue;
+
+ loc.inode = inode_find(args->parent->table, loc.gfid);
+ if (loc.inode) {
+ entry->d_stat.ia_type = loc.inode->ia_type;
+ entry->d_type = gf_d_type_from_ia_type(loc.inode->ia_type);
+ continue;
+ }
+ loc.inode = inode_new(args->parent->table);
+ if (!loc.inode)
+ continue;
+ ret = syncop_lookup(FIRST_CHILD(this), &loc, &iatt, 0, 0, 0);
+ if (ret == 0) {
+ entry->d_type = gf_d_type_from_ia_type(iatt.ia_type);
+ entry->d_stat = iatt;
+ }
+ }
+ loc_wipe(&loc);
+
+ return 0;
+}
+
+int32_t
+index_readdir_wrapper(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t off, dict_t *xdata)
+{
+ index_fd_ctx_t *fctx = NULL;
+ index_priv_t *priv = NULL;
+ DIR *dir = NULL;
+ int ret = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ int count = 0;
+ gf_dirent_t entries;
+ struct index_syncop_args args = {0};
+
+ priv = this->private;
+ INIT_LIST_HEAD(&entries.list);
+
+ ret = index_fd_ctx_get(fd, this, &fctx);
+ if (ret < 0) {
+ op_errno = -ret;
+ gf_msg(this->name, GF_LOG_WARNING, op_errno, INDEX_MSG_FD_OP_FAILED,
+ "pfd is NULL, fd=%p", fd);
+ goto done;
+ }
+
+ dir = fctx->dir;
+ if (!dir) {
+ op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_WARNING, op_errno,
+ INDEX_MSG_INDEX_READDIR_FAILED, "dir is NULL for fd=%p", fd);
+ goto done;
+ }
+
+ count = index_fill_readdir(fd, fctx, dir, off, size, &entries);
+
+ /* pick ENOENT to indicate EOF */
+ op_errno = errno;
+ op_ret = count;
+ if (index_is_virtual_gfid(priv, fd->inode->gfid) && xdata &&
+ dict_get(xdata, "get-gfid-type")) {
+ args.parent = fd->inode;
+ args.entries = &entries;
+ ret = synctask_new(this->ctx->env, index_get_gfid_type, NULL, NULL,
+ &args);
+ }
+done:
+ STACK_UNWIND_STRICT(readdir, frame, op_ret, op_errno, &entries, NULL);
+ gf_dirent_free(&entries);
+ return 0;
+}
+
+int
+deletion_handler(const char *fpath, const struct stat *sb, int typeflag,
+ struct FTW *ftwbuf)
+{
+ ia_type_t type = IA_INVAL;
+
+ switch (sb->st_mode & S_IFMT) {
+ case S_IFREG:
+ sys_unlink(fpath);
+ break;
+
+ case S_IFDIR:
+ sys_rmdir(fpath);
+ break;
+ default:
+ type = ia_type_from_st_mode(sb->st_mode);
+ gf_msg(THIS->name, GF_LOG_WARNING, EINVAL, INDEX_MSG_INVALID_ARGS,
+ "%s neither a regular file nor a directory - type:%s", fpath,
+ gf_inode_type_to_str(type));
+ break;
+ }
+ return 0;
+}
+
+static int
+index_wipe_index_subdir(void *opaque)
+{
+ struct index_syncop_args *args = opaque;
+
+ nftw(args->path, deletion_handler, 1, FTW_DEPTH | FTW_PHYS);
+ return 0;
+}
+
+static void
+index_get_parent_iatt(struct iatt *parent, char *path, loc_t *loc,
+ int32_t *op_ret, int32_t *op_errno)
+{
+ int ret = -1;
+ struct stat lstatbuf = {
+ 0,
+ };
+
+ ret = sys_lstat(path, &lstatbuf);
+ if (ret < 0) {
+ *op_ret = -1;
+ *op_errno = errno;
+ return;
+ }
+
+ iatt_from_stat(parent, &lstatbuf);
+ gf_uuid_copy(parent->ia_gfid, loc->pargfid);
+ parent->ia_ino = -1;
+
+ return;
+}
+
+int
+index_rmdir_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc, int flag,
+ dict_t *xdata)
+{
+ int ret = 0;
+ int32_t op_ret = 0;
+ int32_t op_errno = 0;
+ char *subdir = NULL;
+ char index_dir[PATH_MAX] = {0};
+ char index_subdir[PATH_MAX] = {0};
+ uuid_t gfid = {0};
+ struct iatt preparent = {0};
+ struct iatt postparent = {0};
+ index_priv_t *priv = NULL;
+ index_xattrop_type_t type = XATTROP_TYPE_UNSET;
+ struct index_syncop_args args = {
+ 0,
+ };
+
+ priv = this->private;
+
+ type = index_get_type_from_vgfid(priv, loc->pargfid);
+ subdir = index_get_subdir_from_vgfid(priv, loc->pargfid);
+ make_index_dir_path(priv->index_basepath, subdir, index_dir,
+ sizeof(index_dir));
+
+ index_get_parent_iatt(&preparent, index_dir, loc, &op_ret, &op_errno);
+ if (op_ret < 0)
+ goto done;
+
+ gf_uuid_parse(loc->name, gfid);
+ make_gfid_path(priv->index_basepath, subdir, gfid, index_subdir,
+ sizeof(index_subdir));
+
+ if (flag == 0) {
+ ret = index_del(this, gfid, subdir, type);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = -ret;
+ goto done;
+ }
+ } else {
+ args.path = index_subdir;
+ ret = synctask_new(this->ctx->env, index_wipe_index_subdir, NULL, NULL,
+ &args);
+ }
+
+ index_get_parent_iatt(&postparent, index_dir, loc, &op_ret, &op_errno);
+ if (op_ret < 0)
+ goto done;
+
+done:
+ INDEX_STACK_UNWIND(rmdir, frame, op_ret, op_errno, &preparent, &postparent,
+ xdata);
+ return 0;
+}
+
+int
+index_unlink_wrapper(call_frame_t *frame, xlator_t *this, loc_t *loc, int flag,
+ dict_t *xdata)
+{
+ index_priv_t *priv = NULL;
+ index_inode_ctx_t *ictx = NULL;
+ int32_t op_ret = 0;
+ int32_t op_errno = 0;
+ int ret = 0;
+ index_xattrop_type_t type = XATTROP_TYPE_UNSET;
+ struct iatt preparent = {0};
+ struct iatt postparent = {0};
+ char index_dir[PATH_MAX] = {0};
+ char filepath[PATH_MAX] = {0};
+ uuid_t gfid = {0};
+ char *subdir = NULL;
+
+ priv = this->private;
+ type = index_get_type_from_vgfid(priv, loc->pargfid);
+ ret = index_inode_path(this, loc->parent, index_dir, sizeof(index_dir));
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = -ret;
+ goto done;
+ }
+
+ index_get_parent_iatt(&preparent, index_dir, loc, &op_ret, &op_errno);
+ if (op_ret < 0)
+ goto done;
+
+ if (type <= XATTROP_TYPE_UNSET) {
+ ret = index_inode_ctx_get(loc->parent, this, &ictx);
+ if ((ret == 0) && gf_uuid_is_null(ictx->virtual_pargfid)) {
+ ret = -EINVAL;
+ }
+ if (ret == 0) {
+ ret = index_entry_delete(this, ictx->virtual_pargfid,
+ (char *)loc->name);
+ }
+ } else if (type == ENTRY_CHANGES) {
+ make_file_path(priv->index_basepath, ENTRY_CHANGES_SUBDIR,
+ (char *)loc->name, filepath, sizeof(filepath));
+ ret = sys_unlink(filepath);
+ } else {
+ subdir = index_get_subdir_from_type(type);
+ gf_uuid_parse(loc->name, gfid);
+ ret = index_del(this, gfid, subdir, type);
+ }
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = -ret;
+ goto done;
+ }
+
+ index_get_parent_iatt(&postparent, index_dir, loc, &op_ret, &op_errno);
+ if (op_ret < 0)
+ goto done;
+done:
+ INDEX_STACK_UNWIND(unlink, frame, op_ret, op_errno, &preparent, &postparent,
+ xdata);
+ return 0;
+}
+
+int32_t
+index_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+
+ if (!name ||
+ (!index_is_vgfid_xattr(name) && strcmp(GF_XATTROP_INDEX_COUNT, name) &&
+ strcmp(GF_XATTROP_DIRTY_COUNT, name)))
+ goto out;
+
+ stub = fop_getxattr_stub(frame, index_getxattr_wrapper, loc, name, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(getxattr, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+ }
+ worker_enqueue(this, stub);
+ return 0;
+out:
+ STACK_WIND(frame, default_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xdata);
+ return 0;
+}
+
+int64_t
+index_fetch_link_count(xlator_t *this, index_xattrop_type_t type)
+{
+ index_priv_t *priv = this->private;
+ char *subdir = NULL;
+ struct stat lstatbuf = {
+ 0,
+ };
+ int ret = -1;
+ int64_t count = -1;
+ DIR *dirp = NULL;
+ struct dirent *entry = NULL;
+ struct dirent scratch[2] = {
+ {
+ 0,
+ },
+ };
+ char index_dir[PATH_MAX] = {
+ 0,
+ };
+ char index_path[PATH_MAX] = {
+ 0,
+ };
+
+ subdir = index_get_subdir_from_type(type);
+ make_index_dir_path(priv->index_basepath, subdir, index_dir,
+ sizeof(index_dir));
+
+ dirp = sys_opendir(index_dir);
+ if (!dirp)
+ goto out;
+
+ for (;;) {
+ errno = 0;
+ entry = sys_readdir(dirp, scratch);
+ if (!entry || errno != 0) {
+ if (count == -1)
+ count = 0;
+ goto out;
+ }
+
+ if (strcmp(entry->d_name, ".") == 0 || strcmp(entry->d_name, "..") == 0)
+ continue;
+
+ make_file_path(priv->index_basepath, subdir, entry->d_name, index_path,
+ sizeof(index_path));
+
+ ret = sys_lstat(index_path, &lstatbuf);
+ if (ret < 0) {
+ count = -2;
+ continue;
+ } else {
+ count = lstatbuf.st_nlink - 1;
+ if (count == 0)
+ continue;
+ else
+ break;
+ }
+ }
+out:
+ if (dirp)
+ (void)sys_closedir(dirp);
+ return count;
+}
+
+dict_t *
+index_fill_link_count(xlator_t *this, dict_t *xdata)
+{
+ int ret = -1;
+ index_priv_t *priv = NULL;
+ int64_t count = -1;
+
+ priv = this->private;
+ xdata = (xdata) ? dict_ref(xdata) : dict_new();
+ if (!xdata)
+ goto out;
+
+ index_get_link_count(priv, &count, XATTROP);
+ if (count < 0) {
+ count = index_fetch_link_count(this, XATTROP);
+ index_set_link_count(priv, count, XATTROP);
+ }
+
+ if (count == 0) {
+ ret = dict_set_int8(xdata, "link-count", 0);
+ if (ret < 0)
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, INDEX_MSG_DICT_SET_FAILED,
+ "Unable to set link-count");
+ } else {
+ ret = dict_set_int8(xdata, "link-count", 1);
+ if (ret < 0)
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, INDEX_MSG_DICT_SET_FAILED,
+ "Unable to set link-count");
+ }
+
+out:
+ return xdata;
+}
+
+int32_t
+index_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata, struct iatt *postparent)
+{
+ xdata = index_fill_link_count(this, xdata);
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, xdata,
+ postparent);
+ if (xdata)
+ dict_unref(xdata);
+ return 0;
+}
+
+int32_t
+index_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xattr_req)
+{
+ inode_t *inode = NULL;
+ call_stub_t *stub = NULL;
+ char *flag = NULL;
+ int ret = -1;
+
+ if (!index_is_fop_on_internal_inode(this, loc->parent, loc->pargfid) &&
+ !index_is_fop_on_internal_inode(this, loc->inode, loc->gfid)) {
+ if (!inode_is_linked(loc->inode)) {
+ inode = inode_find(loc->inode->table, loc->gfid);
+ if (!index_is_fop_on_internal_inode(this, inode, loc->gfid)) {
+ inode_unref(inode);
+ goto normal;
+ }
+ inode_unref(inode);
+ } else {
+ goto normal;
+ }
+ }
+
+ stub = fop_lookup_stub(frame, index_lookup_wrapper, loc, xattr_req);
+ if (!stub) {
+ STACK_UNWIND_STRICT(lookup, frame, -1, ENOMEM, loc->inode, NULL, NULL,
+ NULL);
+ return 0;
+ }
+ worker_enqueue(this, stub);
+ return 0;
+normal:
+ ret = dict_get_str_sizen(xattr_req, "link-count", &flag);
+ if ((ret == 0) && (strcmp(flag, GF_XATTROP_INDEX_COUNT) == 0)) {
+ STACK_WIND(frame, index_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xattr_req);
+ } else {
+ STACK_WIND(frame, default_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xattr_req);
+ }
+
+ return 0;
+}
+
+int32_t
+index_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
+{
+ xdata = index_fill_link_count(this, xdata);
+ STACK_UNWIND_STRICT(fstat, frame, op_ret, op_errno, buf, xdata);
+ if (xdata)
+ dict_unref(xdata);
+ return 0;
+}
+
+int32_t
+index_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ int ret = -1;
+ char *flag = NULL;
+
+ ret = dict_get_str(xdata, "link-count", &flag);
+ if ((ret == 0) && (strcmp(flag, GF_XATTROP_INDEX_COUNT) == 0)) {
+ STACK_WIND(frame, index_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ } else {
+ STACK_WIND(frame, default_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ }
+
+ return 0;
+}
+
+int32_t
+index_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
+{
+ if (!index_is_fop_on_internal_inode(this, fd->inode, NULL))
+ goto normal;
+
+ frame->local = NULL;
+ STACK_UNWIND_STRICT(opendir, frame, 0, 0, fd, NULL);
+ return 0;
+
+normal:
+ STACK_WIND(frame, default_opendir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ return 0;
+}
+
+int32_t
+index_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+
+ if (!index_is_fop_on_internal_inode(this, fd->inode, NULL))
+ goto out;
+
+ stub = fop_readdir_stub(frame, index_readdir_wrapper, fd, size, off, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readdir, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+ }
+ worker_enqueue(this, stub);
+ return 0;
+out:
+ STACK_WIND(frame, default_readdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdir, fd, size, off, xdata);
+ return 0;
+}
+
+int
+index_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+
+ if (!index_is_fop_on_internal_inode(this, loc->parent, NULL))
+ goto out;
+
+ stub = fop_unlink_stub(frame, index_unlink_wrapper, loc, xflag, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(unlink, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
+ worker_enqueue(this, stub);
+ return 0;
+out:
+ STACK_WIND(frame, default_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ return 0;
+}
+
+int
+index_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+
+ if (!index_is_fop_on_internal_inode(this, loc->parent, NULL))
+ goto out;
+
+ stub = fop_rmdir_stub(frame, index_rmdir_wrapper, loc, flags, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(rmdir, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
+ worker_enqueue(this, stub);
+ return 0;
+out:
+ STACK_WIND_TAIL(frame, FIRST_CHILD(this), FIRST_CHILD(this)->fops->rmdir,
+ loc, flags, xdata);
+ return 0;
+}
+
+int
+index_make_xattrop_watchlist(xlator_t *this, index_priv_t *priv,
+ char *watchlist, index_xattrop_type_t type)
+{
+ char *delim = NULL;
+ char *dup_watchlist = NULL;
+ char *key = NULL;
+ char *saveptr = NULL;
+ dict_t *xattrs = NULL;
+ data_t *dummy = NULL;
+ int ret = 0;
+
+ if (!watchlist)
+ return 0;
+
+ dup_watchlist = gf_strdup(watchlist);
+ if (!dup_watchlist)
+ return -1;
+
+ xattrs = dict_new();
+ if (!xattrs) {
+ ret = -1;
+ goto out;
+ }
+
+ dummy = int_to_data(1);
+ if (!dummy) {
+ ret = -1;
+ goto out;
+ }
+
+ data_ref(dummy);
+
+ delim = ",";
+ key = strtok_r(dup_watchlist, delim, &saveptr);
+ while (key) {
+ if (strlen(key) == 0) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set(xattrs, key, dummy);
+ if (ret)
+ goto out;
+
+ key = strtok_r(NULL, delim, &saveptr);
+ }
+
+ switch (type) {
+ case DIRTY:
+ priv->dirty_watchlist = dict_copy_with_ref(xattrs,
+ priv->dirty_watchlist);
+ if (!priv->dirty_watchlist) {
+ ret = -1;
+ goto out;
+ }
+ break;
+ case XATTROP:
+ priv->pending_watchlist = dict_copy_with_ref(
+ xattrs, priv->pending_watchlist);
+ if (!priv->pending_watchlist) {
+ ret = -1;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ret = 0;
+out:
+ if (xattrs)
+ dict_unref(xattrs);
+
+ GF_FREE(dup_watchlist);
+
+ if (dummy)
+ data_unref(dummy);
+
+ return ret;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ ret = xlator_mem_acct_init(this, gf_index_mt_end + 1);
+
+ return ret;
+}
+
+int
+init(xlator_t *this)
+{
+ int i = 0;
+ int ret = -1;
+ int64_t count = -1;
+ index_priv_t *priv = NULL;
+ pthread_attr_t w_attr;
+ gf_boolean_t mutex_inited = _gf_false;
+ gf_boolean_t cond_inited = _gf_false;
+ gf_boolean_t attr_inited = _gf_false;
+ char *watchlist = NULL;
+ char *dirtylist = NULL;
+ char *pendinglist = NULL;
+ char *index_base_parent = NULL;
+ char *tmp = NULL;
+
+ if (!this->children || this->children->next) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, INDEX_MSG_INVALID_GRAPH,
+ "'index' not configured with exactly one child");
+ goto out;
+ }
+
+ if (!this->parents) {
+ gf_msg(this->name, GF_LOG_WARNING, EINVAL, INDEX_MSG_INVALID_GRAPH,
+ "dangling volume. check volfile ");
+ }
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_index_mt_priv_t);
+ if (!priv)
+ goto out;
+
+ LOCK_INIT(&priv->lock);
+ if ((ret = pthread_cond_init(&priv->cond, NULL)) != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, ret, INDEX_MSG_INVALID_ARGS,
+ "pthread_cond_init failed");
+ goto out;
+ }
+ cond_inited = _gf_true;
+
+ if ((ret = pthread_mutex_init(&priv->mutex, NULL)) != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, ret, INDEX_MSG_INVALID_ARGS,
+ "pthread_mutex_init failed");
+ goto out;
+ }
+ mutex_inited = _gf_true;
+
+ if ((ret = pthread_attr_init(&w_attr)) != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, ret, INDEX_MSG_INVALID_ARGS,
+ "pthread_attr_init failed");
+ goto out;
+ }
+ attr_inited = _gf_true;
+
+ ret = pthread_attr_setstacksize(&w_attr, INDEX_THREAD_STACK_SIZE);
+ if (ret == EINVAL) {
+ gf_msg(this->name, GF_LOG_WARNING, ret, INDEX_MSG_INVALID_ARGS,
+ "Using default thread stack size");
+ }
+
+ GF_OPTION_INIT("index-base", priv->index_basepath, path, out);
+ tmp = gf_strdup(priv->index_basepath);
+ index_base_parent = dirname(tmp);
+ if (gf_lstat_dir(index_base_parent, NULL) != 0) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, errno,
+ INDEX_MSG_INDEX_DIR_CREATE_FAILED,
+ "Failed to find parent dir (%s) of index basepath %s.",
+ index_base_parent, priv->index_basepath);
+ goto out;
+ }
+
+ GF_OPTION_INIT("xattrop64-watchlist", watchlist, str, out);
+ ret = index_make_xattrop_watchlist(this, priv, watchlist, XATTROP);
+ if (ret)
+ goto out;
+
+ GF_OPTION_INIT("xattrop-dirty-watchlist", dirtylist, str, out);
+ ret = index_make_xattrop_watchlist(this, priv, dirtylist, DIRTY);
+ if (ret)
+ goto out;
+
+ GF_OPTION_INIT("xattrop-pending-watchlist", pendinglist, str, out);
+ ret = index_make_xattrop_watchlist(this, priv, pendinglist, XATTROP);
+ if (ret)
+ goto out;
+
+ if (priv->dirty_watchlist)
+ priv->complete_watchlist = dict_copy_with_ref(priv->dirty_watchlist,
+ priv->complete_watchlist);
+ if (priv->pending_watchlist)
+ priv->complete_watchlist = dict_copy_with_ref(priv->pending_watchlist,
+ priv->complete_watchlist);
+
+ gf_uuid_generate(priv->index);
+ for (i = 0; i < XATTROP_TYPE_END; i++)
+ gf_uuid_generate(priv->internal_vgfid[i]);
+
+ INIT_LIST_HEAD(&priv->callstubs);
+ GF_ATOMIC_INIT(priv->stub_cnt, 0);
+
+ this->local_pool = mem_pool_new(index_local_t, 64);
+ if (!this->local_pool) {
+ ret = -1;
+ goto out;
+ }
+
+ this->private = priv;
+
+ ret = index_dir_create(this, XATTROP_SUBDIR);
+ if (ret < 0)
+ goto out;
+
+ if (priv->dirty_watchlist) {
+ ret = index_dir_create(this, DIRTY_SUBDIR);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = index_dir_create(this, ENTRY_CHANGES_SUBDIR);
+ if (ret < 0)
+ goto out;
+
+ /*init indices files counts*/
+ count = index_fetch_link_count(this, XATTROP);
+ index_set_link_count(priv, count, XATTROP);
+ priv->down = _gf_false;
+
+ priv->curr_count = 0;
+ ret = gf_thread_create(&priv->thread, &w_attr, index_worker, this,
+ "idxwrker");
+ if (ret) {
+ gf_msg(this->name, GF_LOG_WARNING, ret,
+ INDEX_MSG_WORKER_THREAD_CREATE_FAILED,
+ "Failed to create worker thread, aborting");
+ goto out;
+ }
+ priv->curr_count++;
+ ret = 0;
+out:
+ GF_FREE(tmp);
+
+ if (ret) {
+ if (cond_inited)
+ pthread_cond_destroy(&priv->cond);
+ if (mutex_inited)
+ pthread_mutex_destroy(&priv->mutex);
+ if (priv && priv->dirty_watchlist)
+ dict_unref(priv->dirty_watchlist);
+ if (priv && priv->pending_watchlist)
+ dict_unref(priv->pending_watchlist);
+ if (priv && priv->complete_watchlist)
+ dict_unref(priv->complete_watchlist);
+ if (priv)
+ GF_FREE(priv);
+ this->private = NULL;
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
+
+ if (attr_inited)
+ pthread_attr_destroy(&w_attr);
+ return ret;
+}
+
+void
+fini(xlator_t *this)
+{
+ index_priv_t *priv = NULL;
+
+ priv = this->private;
+ if (!priv)
+ goto out;
+
+ priv->down = _gf_true;
+ pthread_cond_broadcast(&priv->cond);
+ if (priv->thread) {
+ gf_thread_cleanup_xint(priv->thread);
+ priv->thread = 0;
+ }
+ this->private = NULL;
+ LOCK_DESTROY(&priv->lock);
+ pthread_cond_destroy(&priv->cond);
+ pthread_mutex_destroy(&priv->mutex);
+ if (priv->dirty_watchlist)
+ dict_unref(priv->dirty_watchlist);
+ if (priv->pending_watchlist)
+ dict_unref(priv->pending_watchlist);
+ if (priv->complete_watchlist)
+ dict_unref(priv->complete_watchlist);
+ GF_FREE(priv);
+
+ if (this->local_pool) {
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
+out:
+ return;
+}
+
+int
+index_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t tmp_cache = 0;
+ if (!inode_ctx_del(inode, this, &tmp_cache))
+ GF_FREE((index_inode_ctx_t *)(long)tmp_cache);
+
+ return 0;
+}
+
+int32_t
+index_releasedir(xlator_t *this, fd_t *fd)
+{
+ index_fd_ctx_t *fctx = NULL;
+ uint64_t ctx = 0;
+ int ret = 0;
+
+ ret = fd_ctx_del(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
+
+ fctx = (index_fd_ctx_t *)(long)ctx;
+ if (fctx->dir) {
+ ret = sys_closedir(fctx->dir);
+ if (ret)
+ gf_msg(this->name, GF_LOG_ERROR, errno, INDEX_MSG_FD_OP_FAILED,
+ "closedir error");
+ }
+
+ GF_FREE(fctx);
+out:
+ return 0;
+}
+
+int32_t
+index_release(xlator_t *this, fd_t *fd)
+{
+ index_fd_ctx_t *fctx = NULL;
+ uint64_t ctx = 0;
+ int ret = 0;
+
+ ret = fd_ctx_del(fd, this, &ctx);
+ if (ret < 0)
+ goto out;
+
+ fctx = (index_fd_ctx_t *)(long)ctx;
+ GF_FREE(fctx);
+out:
+ return 0;
+}
+
+int
+notify(xlator_t *this, int event, void *data, ...)
+{
+ int ret = 0;
+ index_priv_t *priv = NULL;
+ uint64_t stub_cnt = 0;
+ xlator_t *victim = data;
+ struct timespec sleep_till = {
+ 0,
+ };
+
+ if (!this)
+ return 0;
+
+ priv = this->private;
+ if (!priv)
+ return 0;
+
+ if ((event == GF_EVENT_PARENT_DOWN) && victim->cleanup_starting) {
+ stub_cnt = GF_ATOMIC_GET(priv->stub_cnt);
+ timespec_now_realtime(&sleep_till);
+ sleep_till.tv_sec += 1;
+
+ /* Wait for draining stub from queue before notify PARENT_DOWN */
+ pthread_mutex_lock(&priv->mutex);
+ {
+ while (stub_cnt) {
+ (void)pthread_cond_timedwait(&priv->cond, &priv->mutex,
+ &sleep_till);
+ stub_cnt = GF_ATOMIC_GET(priv->stub_cnt);
+ }
+ }
+ pthread_mutex_unlock(&priv->mutex);
+ gf_log(this->name, GF_LOG_INFO,
+ "Notify GF_EVENT_PARENT_DOWN for brick %s", victim->name);
+ }
+
+ if ((event == GF_EVENT_CHILD_DOWN) && victim->cleanup_starting) {
+ pthread_mutex_lock(&priv->mutex);
+ {
+ priv->down = _gf_true;
+ pthread_cond_broadcast(&priv->cond);
+ while (priv->curr_count)
+ pthread_cond_wait(&priv->cond, &priv->mutex);
+ }
+ pthread_mutex_unlock(&priv->mutex);
+
+ gf_log(this->name, GF_LOG_INFO,
+ "Notify GF_EVENT_CHILD_DOWN for brick %s", victim->name);
+ }
+
+ ret = default_notify(this, event, data);
+ return ret;
+}
+
+struct xlator_fops fops = {
+ .xattrop = index_xattrop,
+ .fxattrop = index_fxattrop,
+
+ // interface functions follow
+ .getxattr = index_getxattr,
+ .lookup = index_lookup,
+ .opendir = index_opendir,
+ .readdir = index_readdir,
+ .unlink = index_unlink,
+ .rmdir = index_rmdir,
+ .fstat = index_fstat,
+};
+
+struct xlator_dumpops dumpops;
+
+struct xlator_cbks cbks = {.forget = index_forget,
+ .release = index_release,
+ .releasedir = index_releasedir};
+
+struct volume_options options[] = {
+ {.key = {"index-base"},
+ .type = GF_OPTION_TYPE_PATH,
+ .description = "path where the index files need to be stored",
+ .default_value = "{{ brick.path }}/.glusterfs/indices"},
+ {.key = {"xattrop64-watchlist"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Comma separated list of xattrs that are watched",
+ .default_value = "trusted.ec.dirty"},
+ {.key = {"xattrop-dirty-watchlist"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Comma separated list of xattrs that are watched",
+ .default_value = "trusted.afr.dirty"},
+ {.key = {"xattrop-pending-watchlist"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Comma separated list of xattrs that are watched",
+ .default_value = "trusted.afr.{{ volume.name }}"},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .notify = notify,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .dumpops = &dumpops,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "index",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/index/src/index.h b/xlators/features/index/src/index.h
new file mode 100644
index 00000000000..a2b6e6e2570
--- /dev/null
+++ b/xlators/features/index/src/index.h
@@ -0,0 +1,86 @@
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __INDEX_H__
+#define __INDEX_H__
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/byte-order.h>
+#include <glusterfs/common-utils.h>
+#include "index-mem-types.h"
+
+#define INDEX_THREAD_STACK_SIZE ((size_t)(1024 * 1024))
+
+typedef enum { UNKNOWN, IN, NOTIN } index_state_t;
+
+typedef enum {
+ XATTROP_TYPE_UNSET = -1,
+ XATTROP,
+ DIRTY,
+ ENTRY_CHANGES,
+ XATTROP_TYPE_END
+} index_xattrop_type_t;
+
+typedef struct index_inode_ctx {
+ gf_boolean_t processing;
+ struct list_head callstubs;
+ int state[XATTROP_TYPE_END];
+ uuid_t virtual_pargfid; /* virtual gfid of dir under
+ .glusterfs/indices/entry-changes. */
+} index_inode_ctx_t;
+
+typedef struct index_fd_ctx {
+ DIR *dir;
+ off_t dir_eof;
+} index_fd_ctx_t;
+
+typedef struct index_priv {
+ char *index_basepath;
+ char *dirty_basepath;
+ uuid_t index;
+ gf_lock_t lock;
+ uuid_t internal_vgfid[XATTROP_TYPE_END];
+ struct list_head callstubs;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ dict_t *dirty_watchlist;
+ dict_t *pending_watchlist;
+ dict_t *complete_watchlist;
+ int64_t pending_count;
+ pthread_t thread;
+ gf_boolean_t down;
+ gf_atomic_t stub_cnt;
+ int32_t curr_count;
+} index_priv_t;
+
+typedef struct index_local {
+ inode_t *inode;
+ dict_t *xdata;
+} index_local_t;
+
+#define INDEX_STACK_UNWIND(fop, frame, params...) \
+ do { \
+ index_local_t *__local = NULL; \
+ if (frame) { \
+ __local = frame->local; \
+ frame->local = NULL; \
+ } \
+ STACK_UNWIND_STRICT(fop, frame, params); \
+ if (__local) { \
+ inode_unref(__local->inode); \
+ if (__local->xdata) \
+ dict_unref(__local->xdata); \
+ mem_put(__local); \
+ } \
+ } while (0)
+
+#endif
diff --git a/xlators/features/leases/Makefile.am b/xlators/features/leases/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/leases/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/leases/src/Makefile.am b/xlators/features/leases/src/Makefile.am
new file mode 100644
index 00000000000..a1aef10e299
--- /dev/null
+++ b/xlators/features/leases/src/Makefile.am
@@ -0,0 +1,20 @@
+if WITH_SERVER
+xlator_LTLIBRARIES = leases.la
+endif
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+leases_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+leases_la_SOURCES = leases.c leases-internal.c
+
+leases_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = leases.h leases-mem-types.h leases-messages.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(CONTRIBDIR)/timer-wheel
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/leases/src/leases-internal.c b/xlators/features/leases/src/leases-internal.c
new file mode 100644
index 00000000000..56dee244281
--- /dev/null
+++ b/xlators/features/leases/src/leases-internal.c
@@ -0,0 +1,1412 @@
+/*
+ Copyright (c) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "leases.h"
+
+/* Mutex locks used in this xlator and their order of acquisition:
+ * Check lease conflict:
+ * lease_ctx lock
+ * add_timer => internal timer locks
+ * lease_ctx unlock
+ *
+ * Add/remove lease:
+ * lease_ctx lock
+ * add_timer => internal timer locks
+ * OR
+ * priv lock => Adding/removing to/from the cleanup client list
+ * priv unlock
+ * lease_ctx unlock
+ *
+ * Timer thread:
+ * Timer internal lock
+ * priv lock => By timer handler
+ * priv unlock
+ * Timer internal unlock
+ *
+ * Expired recall cleanup thread:
+ * priv lock
+ * priv condwait
+ * priv unlock
+ * lease_ctx lock
+ * priv lock
+ * priv unlock
+ * lease_ctx unlock
+ */
+
+/*
+ * Check if lease_lk is enabled
+ * Return Value:
+ * _gf_true - lease lock option enabled
+ * _gf_false - lease lock option disabled
+ */
+gf_boolean_t
+is_leases_enabled(xlator_t *this)
+{
+ leases_private_t *priv = NULL;
+ gf_boolean_t is_enabled = _gf_false;
+
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+
+ if (this->private) {
+ priv = (leases_private_t *)this->private;
+ is_enabled = priv->leases_enabled;
+ }
+out:
+ return is_enabled;
+}
+
+/*
+ * Get the recall_leaselk_timeout
+ * Return Value:
+ * timeout value(in seconds) set as an option to this xlator.
+ * -1 error case
+ */
+static int32_t
+get_recall_lease_timeout(xlator_t *this)
+{
+ leases_private_t *priv = NULL;
+ int32_t timeout = -1;
+
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+
+ if (this->private) {
+ priv = (leases_private_t *)this->private;
+ timeout = priv->recall_lease_timeout;
+ }
+out:
+ return timeout;
+}
+
+static void
+__dump_leases_info(xlator_t *this, lease_inode_ctx_t *lease_ctx)
+{
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+
+ gf_msg_debug(this->name, 0,
+ "Lease held on this inode, lease_type: %d,"
+ " lease_cnt:%" PRIu64
+ ", RD lease:%d, RW lease:%d, "
+ "openfd cnt:%" PRIu64,
+ lease_ctx->lease_type, lease_ctx->lease_cnt,
+ lease_ctx->lease_type_cnt[GF_RD_LEASE],
+ lease_ctx->lease_type_cnt[GF_RW_LEASE], lease_ctx->openfd_cnt);
+
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ gf_msg_debug(this->name, 0,
+ "Leases held by client: %s, lease "
+ "ID:%s, RD lease:%d, RW lease:%d, lease_type: %d, "
+ "lease_cnt:%" PRIu64,
+ lease_entry->client_uid, lease_entry->lease_id,
+ lease_entry->lease_type_cnt[GF_RD_LEASE],
+ lease_entry->lease_type_cnt[GF_RW_LEASE],
+ lease_entry->lease_type, lease_entry->lease_cnt);
+ }
+out:
+ return;
+}
+
+static int
+__lease_ctx_set(inode_t *inode, xlator_t *this)
+{
+ lease_inode_ctx_t *inode_ctx = NULL;
+ int ret = -1;
+ uint64_t ctx = 0;
+
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+
+ ret = __inode_ctx_get(inode, this, &ctx);
+ if (!ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, LEASE_MSG_INVAL_INODE_CTX,
+ "inode_ctx_get failed");
+ goto out;
+ }
+
+ inode_ctx = GF_CALLOC(1, sizeof(*inode_ctx),
+ gf_leases_mt_lease_inode_ctx_t);
+ GF_CHECK_ALLOC(inode_ctx, ret, out);
+
+ pthread_mutex_init(&inode_ctx->lock, NULL);
+ INIT_LIST_HEAD(&inode_ctx->lease_id_list);
+ INIT_LIST_HEAD(&inode_ctx->blocked_list);
+
+ inode_ctx->lease_cnt = 0;
+
+ ret = __inode_ctx_set(inode, this, (uint64_t *)inode_ctx);
+ if (ret) {
+ GF_FREE(inode_ctx);
+ gf_msg(this->name, GF_LOG_INFO, 0, LEASE_MSG_INVAL_INODE_CTX,
+ "failed to set inode ctx (%p)", inode);
+ }
+out:
+ return ret;
+}
+
+static lease_inode_ctx_t *
+__lease_ctx_get(inode_t *inode, xlator_t *this)
+{
+ lease_inode_ctx_t *inode_ctx = NULL;
+ uint64_t ctx = 0;
+ int ret = 0;
+
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+
+ ret = __inode_ctx_get(inode, this, &ctx);
+ if (ret < 0) {
+ ret = __lease_ctx_set(inode, this);
+ if (ret < 0)
+ goto out;
+
+ ret = __inode_ctx_get(inode, this, &ctx);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, LEASE_MSG_INVAL_INODE_CTX,
+ "failed to get inode ctx (%p)", inode);
+ goto out;
+ }
+ }
+
+ inode_ctx = (lease_inode_ctx_t *)(long)ctx;
+out:
+ return inode_ctx;
+}
+
+lease_inode_ctx_t *
+lease_ctx_get(inode_t *inode, xlator_t *this)
+{
+ lease_inode_ctx_t *inode_ctx = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+
+ LOCK(&inode->lock);
+ {
+ inode_ctx = __lease_ctx_get(inode, this);
+ }
+ UNLOCK(&inode->lock);
+out:
+ return inode_ctx;
+}
+
+static lease_id_entry_t *
+new_lease_id_entry(call_frame_t *frame, const char *lease_id)
+{
+ lease_id_entry_t *lease_entry = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", frame, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_id, out);
+
+ lease_entry = GF_CALLOC(1, sizeof(*lease_entry),
+ gf_leases_mt_lease_id_entry_t);
+ if (!lease_entry) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, ENOMEM, LEASE_MSG_NO_MEM,
+ "Memory allocation for lease_entry failed");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&lease_entry->lease_id_list);
+ lease_entry->lease_type = NONE;
+ lease_entry->lease_cnt = 0;
+ lease_entry->recall_time = get_recall_lease_timeout(frame->this);
+ lease_entry->client_uid = gf_strdup(frame->root->client->client_uid);
+ if (!lease_entry->client_uid) {
+ gf_msg(frame->this->name, GF_LOG_ERROR, ENOMEM, LEASE_MSG_NO_MEM,
+ "Memory allocation for client_uid failed");
+ GF_FREE(lease_entry);
+ lease_entry = NULL;
+ goto out;
+ }
+
+ memcpy(lease_entry->lease_id, lease_id, LEASE_ID_SIZE);
+out:
+ return lease_entry;
+}
+
+static void
+__destroy_lease_id_entry(lease_id_entry_t *lease_entry)
+{
+ GF_VALIDATE_OR_GOTO("leases", lease_entry, out);
+
+ list_del_init(&lease_entry->lease_id_list);
+ GF_FREE(lease_entry->client_uid);
+ GF_FREE(lease_entry);
+out:
+ return;
+}
+
+static inline gf_boolean_t
+__is_same_lease_id(const char *k1, const char *k2)
+{
+ if (memcmp(k1, k2, strlen(k1)) == 0)
+ return _gf_true;
+
+ return _gf_false;
+}
+
+/* Checks if there are any leases, other than the leases taken
+ * by the given lease_id
+ */
+static gf_boolean_t
+__another_lease_found(lease_inode_ctx_t *lease_ctx, const char *lease_id)
+{
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+ gf_boolean_t found_lease = _gf_false;
+
+ GF_VALIDATE_OR_GOTO("leases", lease_id, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ if (!__is_same_lease_id(lease_id, lease_entry->lease_id)) {
+ if (lease_entry->lease_cnt > 0) {
+ found_lease = _gf_true;
+ break;
+ }
+ }
+ }
+out:
+ return found_lease;
+}
+
+/* Returns the lease_id_entry for a given lease_id and a given inode.
+ * Return values:
+ * NULL - If no client entry found
+ * lease_id_entry_t* - a pointer to the client entry if found
+ */
+static lease_id_entry_t *
+__get_lease_id_entry(lease_inode_ctx_t *lease_ctx, const char *lease_id)
+{
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+ lease_id_entry_t *found = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", lease_id, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ if (__is_same_lease_id(lease_id, lease_entry->lease_id)) {
+ found = lease_entry;
+ gf_msg_debug("leases", 0,
+ "lease ID entry found "
+ "Client UID:%s, lease id:%s",
+ lease_entry->client_uid,
+ leaseid_utoa(lease_entry->lease_id));
+ break;
+ }
+ }
+out:
+ return found;
+}
+
+/* Returns the lease_id_entry for a given lease_id and a given inode,
+ * if none found creates one.
+ * Return values:
+ * lease_id_entry_t* - a pointer to the client entry
+ */
+static lease_id_entry_t *
+__get_or_new_lease_entry(call_frame_t *frame, const char *lease_id,
+ lease_inode_ctx_t *lease_ctx)
+{
+ lease_id_entry_t *lease_entry = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", frame, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_id, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+
+ lease_entry = __get_lease_id_entry(lease_ctx, lease_id);
+ if (!lease_entry) { /* create one */
+ lease_entry = new_lease_id_entry(frame, lease_id);
+ if (!lease_entry)
+ goto out;
+
+ list_add_tail(&lease_entry->lease_id_list, &lease_ctx->lease_id_list);
+
+ gf_msg_debug(frame->this->name, 0,
+ "lease ID entry added,"
+ " Client UID:%s, lease id:%s",
+ lease_entry->client_uid,
+ leaseid_utoa(lease_entry->lease_id));
+ }
+out:
+ return lease_entry;
+}
+
+static lease_inode_t *
+new_lease_inode(inode_t *inode)
+{
+ lease_inode_t *l_inode = GF_MALLOC(sizeof(*l_inode),
+ gf_leases_mt_lease_inode_t);
+ if (!l_inode)
+ goto out;
+
+ INIT_LIST_HEAD(&l_inode->list);
+ l_inode->inode = inode_ref(inode);
+out:
+ return l_inode;
+}
+
+static void
+__destroy_lease_inode(lease_inode_t *l_inode)
+{
+ list_del_init(&l_inode->list);
+ inode_unref(l_inode->inode);
+ GF_FREE(l_inode);
+}
+
+static lease_client_t *
+new_lease_client(const char *client_uid)
+{
+ lease_client_t *clnt = GF_MALLOC(sizeof(*clnt),
+ gf_leases_mt_lease_client_t);
+ if (!clnt)
+ goto out;
+
+ INIT_LIST_HEAD(&clnt->client_list);
+ INIT_LIST_HEAD(&clnt->inode_list);
+ clnt->client_uid = gf_strdup(client_uid);
+out:
+ return clnt;
+}
+
+static void
+__destroy_lease_client(lease_client_t *clnt)
+{
+ list_del_init(&clnt->inode_list);
+ list_del_init(&clnt->client_list);
+ GF_FREE(clnt);
+
+ return;
+}
+
+static lease_client_t *
+__get_lease_client(xlator_t *this, leases_private_t *priv,
+ const char *client_uid)
+{
+ lease_client_t *clnt = NULL;
+ lease_client_t *tmp = NULL;
+ lease_client_t *found = NULL;
+
+ list_for_each_entry_safe(clnt, tmp, &priv->client_list, client_list)
+ {
+ if ((strcmp(clnt->client_uid, client_uid) == 0)) {
+ found = clnt;
+ gf_msg_debug(this->name, 0,
+ "Client:%s already found "
+ "in the cleanup list",
+ client_uid);
+ break;
+ }
+ }
+ return found;
+}
+
+static lease_client_t *
+__get_or_new_lease_client(xlator_t *this, leases_private_t *priv,
+ const char *client_uid)
+{
+ lease_client_t *found = NULL;
+
+ found = __get_lease_client(this, priv, client_uid);
+ if (!found) {
+ found = new_lease_client(client_uid);
+ if (!found)
+ goto out;
+ list_add_tail(&found->client_list, &priv->client_list);
+ gf_msg_debug(this->name, 0,
+ "Adding a new client:%s entry "
+ "to the cleanup list",
+ client_uid);
+ }
+out:
+ return found;
+}
+
+static int
+add_inode_to_client_list(xlator_t *this, inode_t *inode, const char *client_uid)
+{
+ leases_private_t *priv = this->private;
+ lease_client_t *clnt = NULL;
+
+ lease_inode_t *lease_inode = new_lease_inode(inode);
+ if (!lease_inode)
+ return -ENOMEM;
+
+ pthread_mutex_lock(&priv->mutex);
+ {
+ clnt = __get_or_new_lease_client(this, priv, client_uid);
+ if (!clnt) {
+ pthread_mutex_unlock(&priv->mutex);
+ __destroy_lease_inode(lease_inode);
+ return -ENOMEM;
+ }
+ list_add_tail(&clnt->inode_list, &lease_inode->list);
+ }
+ pthread_mutex_unlock(&priv->mutex);
+ gf_msg_debug(this->name, 0,
+ "Added a new inode:%p to the client(%s) "
+ "cleanup list, gfid(%s)",
+ inode, client_uid, uuid_utoa(inode->gfid));
+ return 0;
+}
+
+/* Add lease entry to the corresponding client entry.
+ * Return values:
+ * 0 Success
+ * -1 Failure
+ */
+static int
+__add_lease(call_frame_t *frame, inode_t *inode, lease_inode_ctx_t *lease_ctx,
+ const char *client_uid, struct gf_lease *lease)
+{
+ lease_id_entry_t *lease_entry = NULL;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("leases", frame, out);
+ GF_VALIDATE_OR_GOTO("leases", client_uid, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+ GF_VALIDATE_OR_GOTO("leases", lease, out);
+
+ gf_msg_trace(frame->this->name, 0,
+ "Granting lease lock to client %s with lease id %s"
+ " on gfid(%s)",
+ client_uid, leaseid_utoa(lease->lease_id),
+ uuid_utoa(inode->gfid));
+
+ lease_entry = __get_or_new_lease_entry(frame, lease->lease_id, lease_ctx);
+ if (!lease_entry) {
+ errno = ENOMEM;
+ goto out;
+ }
+
+ lease_entry->lease_type_cnt[lease->lease_type]++;
+ lease_entry->lease_cnt++;
+ lease_entry->lease_type |= lease->lease_type;
+ /* If this is the first lease taken by the client on the file, then
+ * add this inode/file to the client disconnect cleanup list
+ */
+ if (lease_entry->lease_cnt == 1) {
+ add_inode_to_client_list(frame->this, inode, client_uid);
+ }
+
+ lease_ctx->lease_cnt++;
+ lease_ctx->lease_type_cnt[lease->lease_type]++;
+ lease_ctx->lease_type |= lease->lease_type;
+
+ /* Take a ref for the first lock taken on this inode. Corresponding
+ * unref when all the leases are unlocked or during DISCONNECT
+ * Ref is required because the inode on which lease is acquired should
+ * not be deleted when lru cleanup kicks in*/
+ if (lease_ctx->lease_cnt == 1) {
+ lease_ctx->inode = inode_ref(inode);
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static gf_boolean_t
+__is_clnt_lease_none(const char *client_uid, lease_inode_ctx_t *lease_ctx)
+{
+ gf_boolean_t lease_none = _gf_true;
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ if ((strcmp(client_uid, lease_entry->client_uid) == 0) &&
+ (lease_entry->lease_cnt != 0)) {
+ lease_none = _gf_false;
+ break;
+ }
+ }
+
+ return lease_none;
+}
+
+static int
+__remove_inode_from_clnt_list(xlator_t *this, lease_client_t *clnt,
+ inode_t *inode)
+{
+ int ret = -1;
+ lease_inode_t *l_inode = NULL;
+ lease_inode_t *tmp1 = NULL;
+
+ list_for_each_entry_safe(l_inode, tmp1, &clnt->inode_list, list)
+ {
+ if (l_inode->inode == inode) {
+ __destroy_lease_inode(l_inode);
+ gf_msg_debug(this->name, 0,
+ "Removed the inode from the client cleanup list");
+ ret = 0;
+ }
+ }
+ /* TODO: Remove the client entry from the cleanup list */
+
+ return ret;
+}
+
+static int
+remove_from_clnt_list(xlator_t *this, const char *client_uid, inode_t *inode)
+{
+ leases_private_t *priv = NULL;
+ int ret = -1;
+ lease_client_t *clnt = NULL;
+
+ priv = this->private;
+ if (!priv)
+ goto out;
+
+ pthread_mutex_lock(&priv->mutex);
+ {
+ clnt = __get_lease_client(this, priv, client_uid);
+ if (!clnt) {
+ pthread_mutex_unlock(&priv->mutex);
+ gf_msg(this->name, GF_LOG_ERROR, 0, LEASE_MSG_CLNT_NOTFOUND,
+ "There is no client entry found in the cleanup list");
+ goto out;
+ }
+ ret = __remove_inode_from_clnt_list(this, clnt, inode);
+ if (ret) {
+ pthread_mutex_unlock(&priv->mutex);
+ gf_msg(this->name, GF_LOG_ERROR, 0, LEASE_MSG_INODE_NOTFOUND,
+ "There is no inode entry found in the cleanup list");
+ goto out;
+ }
+ }
+ pthread_mutex_unlock(&priv->mutex);
+out:
+ return ret;
+}
+
+/* Remove lease entry in the corresponding client entry.
+ */
+static int
+__remove_lease(xlator_t *this, inode_t *inode, lease_inode_ctx_t *lease_ctx,
+ const char *client_uid, struct gf_lease *lease)
+{
+ lease_id_entry_t *lease_entry = NULL;
+ int ret = 0;
+ int32_t lease_type = 0;
+ leases_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+ GF_VALIDATE_OR_GOTO("leases", lease, out);
+
+ priv = this->private;
+
+ gf_msg_trace(this->name, 0,
+ "Removing lease entry for client: %s, "
+ "lease type:%d, lease id:%s",
+ client_uid, lease->lease_type, leaseid_utoa(lease->lease_id));
+
+ /* There could be a race where in server recalled the lease and by the time
+ * client sends lease_unlock request, server may have revoked it. To handle
+ * such cases, if lease doesnt exist treat it as noop and return success.
+ */
+ lease_entry = __get_lease_id_entry(lease_ctx, lease->lease_id);
+ if (!lease_entry) {
+ gf_msg(this->name, GF_LOG_INFO, 0, LEASE_MSG_INVAL_UNLK_LEASE,
+ "Got unlock lease request from client:%s, but has no "
+ "corresponding lock",
+ client_uid);
+ ret = 0;
+ goto out;
+ }
+
+ if (!(lease_entry->lease_type & lease->lease_type)) {
+ gf_msg(this->name, GF_LOG_INFO, 0, LEASE_MSG_INVAL_UNLK_LEASE,
+ "Got unlock lease request from client:%s for an invalid "
+ "lease_type",
+ client_uid);
+ ret = -EINVAL;
+ errno = EINVAL;
+ goto out;
+ }
+ lease_type = lease->lease_type;
+ lease_entry->lease_type_cnt[lease_type]--;
+ lease_entry->lease_cnt--;
+
+ lease_ctx->lease_type_cnt[lease_type]--;
+ lease_ctx->lease_cnt--;
+
+ if (lease_entry->lease_type_cnt[lease_type] == 0)
+ lease_entry->lease_type = lease_entry->lease_type & (~lease_type);
+
+ if (lease_ctx->lease_type_cnt[lease_type] == 0)
+ lease_ctx->lease_type = lease_ctx->lease_type & (~lease_type);
+
+ if (lease_entry->lease_cnt == 0) {
+ if (__is_clnt_lease_none(client_uid, lease_ctx)) {
+ gf_msg_trace(this->name, 0,
+ "Client(%s) has no leases"
+ " on gfid (%s), hence removing the inode"
+ " from the client cleanup list",
+ client_uid, uuid_utoa(inode->gfid));
+ remove_from_clnt_list(this, client_uid, lease_ctx->inode);
+ }
+ __destroy_lease_id_entry(lease_entry);
+ lease_ctx->blocked_fops_resuming = _gf_true;
+ }
+
+ if (lease_ctx->lease_cnt == 0 && lease_ctx->timer) {
+ ret = gf_tw_del_timer(priv->timer_wheel, lease_ctx->timer);
+ lease_ctx->recall_in_progress = _gf_false;
+ lease_ctx->timer = NULL;
+ }
+out:
+ return ret;
+}
+
+static gf_boolean_t
+__is_lease_grantable(xlator_t *this, lease_inode_ctx_t *lease_ctx,
+ struct gf_lease *lease, inode_t *inode)
+{
+ uint32_t fd_count = 0;
+ int32_t flags = 0;
+ fd_t *iter_fd = NULL;
+ gf_boolean_t grant = _gf_false;
+ int ret = 0;
+ lease_fd_ctx_t *fd_ctx = NULL;
+ uint64_t ctx = 0;
+
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+ GF_VALIDATE_OR_GOTO("leases", lease, out);
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+
+ if (lease_ctx->recall_in_progress) {
+ gf_msg_debug(this->name, 0,
+ "Recall in progress, hence "
+ "failing the lease request");
+ grant = _gf_false;
+ goto out;
+ }
+
+ if (lease_ctx->blocked_fops_resuming) {
+ gf_msg_debug(this->name, 0,
+ "Previously blocked fops resuming, hence "
+ "failing the lease request");
+ grant = _gf_false;
+ goto out;
+ }
+
+ LOCK(&inode->lock);
+ {
+ list_for_each_entry(iter_fd, &inode->fd_list, inode_list)
+ {
+ ret = fd_ctx_get(iter_fd, this, &ctx);
+ if (ret < 0) {
+ grant = _gf_false;
+ UNLOCK(&inode->lock);
+ gf_msg(this->name, GF_LOG_ERROR, 0, LEASE_MSG_INVAL_FD_CTX,
+ "Unable to get fd ctx");
+ goto out;
+ }
+ fd_ctx = (lease_fd_ctx_t *)(long)ctx;
+
+ /* Check for open fd conflict, note that open fds from
+ * the same lease id is not checked for conflict, as it is
+ * lease id based lease.
+ */
+ if (fd_ctx->client_uid != NULL &&
+ !__is_same_lease_id(fd_ctx->lease_id, lease->lease_id)) {
+ fd_count++;
+ flags |= iter_fd->flags;
+ }
+ }
+ }
+ UNLOCK(&inode->lock);
+
+ gf_msg_debug(this->name, 0, "open fd count:%d flags:%d", fd_count, flags);
+
+ __dump_leases_info(this, lease_ctx);
+
+ switch (lease->lease_type) {
+ case GF_RD_LEASE:
+ /* check open fd conflict */
+ if ((fd_count > 0) && ((flags & O_WRONLY) || (flags & O_RDWR))) {
+ grant = _gf_false;
+ break;
+ }
+
+ /* check for conflict with existing leases */
+ if (lease_ctx->lease_type == NONE ||
+ lease_ctx->lease_type == GF_RD_LEASE ||
+ !(__another_lease_found(lease_ctx, lease->lease_id)))
+ grant = _gf_true;
+ else
+ grant = _gf_false;
+ break;
+
+ case GF_RW_LEASE:
+ /* check open fd conflict; conflict if there are any fds open
+ * other than the client on which the lease is requested. */
+ if (fd_count > 0) {
+ grant = _gf_false;
+ break;
+ }
+
+ /* check existing lease conflict */
+ if (lease_ctx->lease_type == NONE ||
+ !(__another_lease_found(lease_ctx, lease->lease_id)))
+ grant = _gf_true;
+ else
+ grant = _gf_false;
+ break;
+
+ default:
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, LEASE_MSG_INVAL_LEASE_TYPE,
+ "Invalid lease type specified");
+ break;
+ }
+out:
+ return grant;
+}
+
+static void
+do_blocked_fops(xlator_t *this, lease_inode_ctx_t *lease_ctx)
+{
+ struct list_head wind_list;
+ fop_stub_t *blk_fop = NULL;
+ fop_stub_t *tmp = NULL;
+
+ INIT_LIST_HEAD(&wind_list);
+
+ pthread_mutex_lock(&lease_ctx->lock);
+ {
+ if (!lease_ctx->blocked_fops_resuming) {
+ /* lease_ctx->blocked_fops_resuming will be set
+ * only when the last lease is released. That
+ * is when we need to resume blocked fops and unref
+ * the inode taken in __add_lease (when lease_cnt == 1).
+ * Return otherwise.
+ */
+ pthread_mutex_unlock(&lease_ctx->lock);
+ return;
+ }
+
+ list_for_each_entry_safe(blk_fop, tmp, &lease_ctx->blocked_list, list)
+ {
+ list_del_init(&blk_fop->list);
+ list_add_tail(&blk_fop->list, &wind_list);
+ }
+ }
+ pthread_mutex_unlock(&lease_ctx->lock);
+
+ gf_msg_trace(this->name, 0, "Executing the blocked stubs on gfid(%s)",
+ uuid_utoa(lease_ctx->inode->gfid));
+ list_for_each_entry_safe(blk_fop, tmp, &wind_list, list)
+ {
+ list_del_init(&blk_fop->list);
+ gf_msg_trace(this->name, 0, "Executing fop:%d", blk_fop->stub->fop);
+ call_resume(blk_fop->stub);
+ GF_FREE(blk_fop);
+ }
+
+ pthread_mutex_lock(&lease_ctx->lock);
+ {
+ lease_ctx->lease_type = NONE;
+ /* unref the inode taken in __add_lease
+ * (when lease_cnt == 1) */
+ lease_ctx->blocked_fops_resuming = _gf_false;
+ inode_unref(lease_ctx->inode);
+ lease_ctx->inode = NULL;
+ }
+ pthread_mutex_unlock(&lease_ctx->lock);
+
+ return;
+}
+
+void
+recall_lease_timer_handler(struct gf_tw_timer_list *timer, void *data,
+ unsigned long calltime)
+{
+ inode_t *inode = NULL;
+ lease_inode_t *lease_inode = NULL;
+ leases_private_t *priv = NULL;
+ lease_timer_data_t *timer_data = NULL;
+
+ timer_data = data;
+
+ priv = timer_data->this->private;
+ inode = timer_data->inode;
+ lease_inode = new_lease_inode(inode);
+ if (!lease_inode) {
+ errno = ENOMEM;
+ goto out;
+ }
+ pthread_mutex_lock(&priv->mutex);
+ {
+ list_add_tail(&lease_inode->list, &priv->recall_list);
+ pthread_cond_broadcast(&priv->cond);
+ }
+ pthread_mutex_unlock(&priv->mutex);
+out:
+ /* unref the inode_ref taken by timer_data in __recall_lease */
+ inode_unref(timer_data->inode);
+
+ GF_FREE(timer);
+}
+
+static void
+__recall_lease(xlator_t *this, lease_inode_ctx_t *lease_ctx)
+{
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+ struct gf_upcall up_req = {
+ 0,
+ };
+ struct gf_upcall_recall_lease recall_req = {
+ 0,
+ };
+ int notify_ret = -1;
+ struct gf_tw_timer_list *timer = NULL;
+ leases_private_t *priv = NULL;
+ lease_timer_data_t *timer_data = NULL;
+ time_t recall_time;
+
+ if (lease_ctx->recall_in_progress) {
+ gf_msg_debug(this->name, 0,
+ "Lease recall is already in "
+ "progress, hence not sending another recall");
+ goto out;
+ }
+
+ priv = this->private;
+ recall_time = gf_time();
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ gf_uuid_copy(up_req.gfid, lease_ctx->inode->gfid);
+ up_req.client_uid = lease_entry->client_uid;
+ up_req.event_type = GF_UPCALL_RECALL_LEASE;
+ up_req.data = &recall_req;
+
+ notify_ret = this->notify(this, GF_EVENT_UPCALL, &up_req);
+ if (notify_ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, LEASE_MSG_RECALL_FAIL,
+ "Recall notification to client: %s failed",
+ lease_entry->client_uid);
+ /* Do not return from here, continue registering the timer,
+ this is required mostly o keep replicas in sync*/
+ } else {
+ gf_msg_debug(this->name, 0,
+ "Recall lease (all)"
+ "notification sent to client %s",
+ lease_entry->client_uid);
+ }
+
+ lease_ctx->recall_in_progress = _gf_true;
+ lease_entry->recall_time = recall_time;
+ }
+ timer = GF_MALLOC(sizeof(*timer), gf_common_mt_tw_timer_list);
+ if (!timer) {
+ goto out;
+ }
+ timer_data = GF_MALLOC(sizeof(lease_timer_data_t),
+ gf_leases_mt_timer_data_t);
+ if (!timer_data) {
+ GF_FREE(timer);
+ goto out;
+ }
+
+ timer_data->inode = inode_ref(lease_ctx->inode);
+ timer_data->this = this;
+ timer->data = timer_data;
+
+ INIT_LIST_HEAD(&timer->entry);
+ timer->expires = get_recall_lease_timeout(this);
+ timer->function = recall_lease_timer_handler;
+ lease_ctx->timer = timer;
+ gf_tw_add_timer(priv->timer_wheel, timer);
+ gf_msg_trace(this->name, 0,
+ "Registering timer "
+ "%p, after "
+ "sending recall",
+ timer);
+out:
+ return;
+}
+
+/* ret = 0; STACK_UNWIND Success
+ * ret = -1; STACK_UNWIND failure
+ */
+int
+process_lease_req(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ struct gf_lease *lease)
+{
+ int ret = 0;
+ char *client_uid = NULL;
+ lease_inode_ctx_t *lease_ctx = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", frame, out);
+ GF_VALIDATE_OR_GOTO("leases", this, out);
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+ GF_VALIDATE_OR_GOTO("leases", lease, out);
+
+ client_uid = frame->root->client->client_uid;
+
+ if (!is_valid_lease_id(lease->lease_id)) {
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, LEASE_MSG_INVAL_LEASE_ID,
+ "Invalid lease id, from"
+ "client:%s",
+ client_uid);
+ ret = -EINVAL;
+ errno = EINVAL;
+ goto out;
+ }
+
+ lease_ctx = lease_ctx_get(inode, this);
+ if (!lease_ctx) {
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM,
+ "Unable to create/get inode ctx, "
+ "inode:%p",
+ inode);
+ ret = -ENOMEM;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ gf_msg_debug(this->name, 0,
+ "Lease request from client: %s, "
+ "lease type:%d, lease cmd:%d, lease ID:%s, gfid:%s",
+ client_uid, lease->lease_type, lease->cmd,
+ leaseid_utoa(lease->lease_id), uuid_utoa(inode->gfid));
+
+ pthread_mutex_lock(&lease_ctx->lock);
+ {
+ switch (lease->cmd) {
+ case GF_GET_LEASE:
+ lease->lease_type = lease_ctx->lease_type;
+ gf_msg_debug(this->name, 0,
+ "Get lease, existing lease"
+ "type: %d",
+ lease_ctx->lease_type);
+ /*TODO:Should it consider lease id or client_uid?*/
+ break;
+
+ case GF_SET_LEASE:
+ if (__is_lease_grantable(this, lease_ctx, lease, inode)) {
+ __add_lease(frame, inode, lease_ctx, client_uid, lease);
+ ret = 0;
+ } else {
+ gf_msg_debug(this->name, GF_LOG_DEBUG,
+ "Not granting the conflicting lease"
+ " request from %s on gfid(%s)",
+ client_uid, uuid_utoa(inode->gfid));
+ __recall_lease(this, lease_ctx);
+ ret = -1;
+ }
+ break;
+ case GF_UNLK_LEASE:
+ ret = __remove_lease(this, inode, lease_ctx, client_uid, lease);
+ if ((ret >= 0) && (lease_ctx->lease_cnt == 0)) {
+ pthread_mutex_unlock(&lease_ctx->lock);
+ goto unblock;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+ pthread_mutex_unlock(&lease_ctx->lock);
+
+ return ret;
+
+unblock:
+ do_blocked_fops(this, lease_ctx);
+out:
+ return ret;
+}
+
+/* ret = 1 conflict
+ * ret = 0 no conflict
+ */
+gf_boolean_t
+__check_lease_conflict(call_frame_t *frame, lease_inode_ctx_t *lease_ctx,
+ const char *lease_id, gf_boolean_t is_write)
+{
+ gf_lease_types_t lease_type = {
+ 0,
+ };
+ gf_boolean_t conflicts = _gf_false;
+ lease_id_entry_t *lease_entry = NULL;
+
+ GF_VALIDATE_OR_GOTO("leases", frame, out);
+ GF_VALIDATE_OR_GOTO("leases", lease_ctx, out);
+
+ lease_type = lease_ctx->lease_type;
+
+ /* If the fop is rename or unlink conflict the lease even if its
+ * from the same client??
+ */
+ if ((frame->root->op == GF_FOP_RENAME) ||
+ (frame->root->op == GF_FOP_UNLINK)) {
+ conflicts = _gf_true;
+ goto recall;
+ }
+
+ /* As internal fops are used to maintain data integrity but do not
+ * make modififications to the client data, no need to conflict with
+ * them.
+ *
+ * @todo: like for locks, even lease state has to be handled by
+ * rebalance or self-heal daemon process. */
+ if (frame->root->pid < 0) {
+ conflicts = _gf_false;
+ goto recall;
+ }
+
+ /* If lease_id is not sent, set conflicts = true if there is
+ * an existing lease */
+ if (!lease_id && (lease_ctx->lease_cnt > 0)) {
+ conflicts = _gf_true;
+ goto recall;
+ }
+
+ switch (lease_type) {
+ case (GF_RW_LEASE | GF_RD_LEASE):
+ case GF_RW_LEASE:
+ lease_entry = __get_lease_id_entry(lease_ctx, lease_id);
+ if (lease_entry && (lease_entry->lease_type & GF_RW_LEASE))
+ conflicts = _gf_false;
+ else
+ conflicts = _gf_true;
+ break;
+ case GF_RD_LEASE:
+ if (is_write && __another_lease_found(lease_ctx, lease_id))
+ conflicts = _gf_true;
+ else
+ conflicts = _gf_false;
+ break;
+ default:
+ break;
+ }
+
+recall:
+ /* If there is a conflict found and recall is not already sent to all
+ * the clients, then send recall to each of the client holding lease.
+ */
+ if (conflicts)
+ __recall_lease(frame->this, lease_ctx);
+out:
+ return conflicts;
+}
+
+/* Return values:
+ * -1 : error, unwind the fop
+ * WIND_FOP: No conflict, wind the fop
+ * BLOCK_FOP: Found a conflicting lease, block the fop
+ */
+int
+check_lease_conflict(call_frame_t *frame, inode_t *inode, const char *lease_id,
+ uint32_t fop_flags)
+{
+ lease_inode_ctx_t *lease_ctx = NULL;
+ gf_boolean_t is_blocking_fop = _gf_false;
+ gf_boolean_t is_write_fop = _gf_false;
+ gf_boolean_t conflicts = _gf_false;
+ int ret = WIND_FOP;
+
+ lease_ctx = lease_ctx_get(inode, frame->this);
+ if (!lease_ctx) {
+ gf_msg(frame->this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM,
+ "Unable to create/get inode ctx");
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ is_blocking_fop = ((fop_flags & BLOCKING_FOP) != 0);
+ is_write_fop = ((fop_flags & DATA_MODIFY_FOP) != 0);
+
+ pthread_mutex_lock(&lease_ctx->lock);
+ {
+ if (lease_ctx->lease_type == NONE) {
+ pthread_mutex_unlock(&lease_ctx->lock);
+ gf_msg_debug(frame->this->name, 0,
+ "No leases found continuing with the"
+ " fop:%s",
+ gf_fop_list[frame->root->op]);
+ ret = WIND_FOP;
+ goto out;
+ }
+ conflicts = __check_lease_conflict(frame, lease_ctx, lease_id,
+ is_write_fop);
+ if (conflicts) {
+ if (is_blocking_fop) {
+ gf_msg_debug(frame->this->name, 0,
+ "Fop: %s "
+ "conflicting existing "
+ "lease: %d, blocking the"
+ "fop",
+ gf_fop_list[frame->root->op],
+ lease_ctx->lease_type);
+ ret = BLOCK_FOP;
+ } else {
+ gf_msg_debug(frame->this->name, 0,
+ "Fop: %s "
+ "conflicting existing "
+ "lease: %d, sending "
+ "EAGAIN",
+ gf_fop_list[frame->root->op],
+ lease_ctx->lease_type);
+ errno = EAGAIN;
+ ret = -1;
+ }
+ }
+ }
+ pthread_mutex_unlock(&lease_ctx->lock);
+out:
+ return ret;
+}
+
+static int
+remove_clnt_leases(const char *client_uid, inode_t *inode, xlator_t *this)
+{
+ lease_inode_ctx_t *lease_ctx = NULL;
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+ int ret = 0;
+ int i = 0;
+
+ lease_ctx = lease_ctx_get(inode, this);
+ if (!lease_ctx) {
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_INVAL_INODE_CTX,
+ "Unable to create/get inode ctx");
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ pthread_mutex_lock(&lease_ctx->lock);
+ {
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ if (strcmp(client_uid, lease_entry->client_uid) == 0) {
+ for (i = 0; i < GF_LEASE_MAX_TYPE; i++) {
+ lease_ctx->lease_type_cnt[i] -= lease_entry
+ ->lease_type_cnt[i];
+ }
+ lease_ctx->lease_cnt -= lease_entry->lease_cnt;
+ __destroy_lease_id_entry(lease_entry);
+ if (lease_ctx->lease_cnt == 0) {
+ lease_ctx->blocked_fops_resuming = _gf_true;
+ pthread_mutex_unlock(&lease_ctx->lock);
+ goto unblock;
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&lease_ctx->lock);
+out:
+ return ret;
+
+unblock:
+ do_blocked_fops(this, lease_ctx);
+ return ret;
+}
+
+int
+cleanup_client_leases(xlator_t *this, const char *client_uid)
+{
+ lease_client_t *clnt = NULL;
+ lease_client_t *tmp = NULL;
+ struct list_head cleanup_list = {
+ 0,
+ };
+ lease_inode_t *l_inode = NULL;
+ lease_inode_t *tmp1 = NULL;
+ leases_private_t *priv = NULL;
+ int ret = 0;
+
+ priv = this->private;
+ if (!priv) {
+ ret = -1;
+ errno = EINVAL;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&cleanup_list);
+ pthread_mutex_lock(&priv->mutex);
+ {
+ list_for_each_entry_safe(clnt, tmp, &priv->client_list, client_list)
+ {
+ if ((strcmp(clnt->client_uid, client_uid) == 0)) {
+ list_for_each_entry_safe(l_inode, tmp1, &clnt->inode_list, list)
+ {
+ list_del_init(&l_inode->list);
+ list_add_tail(&l_inode->list, &cleanup_list);
+ }
+ __destroy_lease_client(clnt);
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&priv->mutex);
+
+ l_inode = tmp1 = NULL;
+ list_for_each_entry_safe(l_inode, tmp1, &cleanup_list, list)
+ {
+ remove_clnt_leases(client_uid, l_inode->inode, this);
+ __destroy_lease_inode(l_inode);
+ }
+out:
+ return ret;
+}
+
+static void
+__remove_all_leases(xlator_t *this, lease_inode_ctx_t *lease_ctx)
+{
+ int i = 0;
+ lease_id_entry_t *lease_entry = NULL;
+ lease_id_entry_t *tmp = NULL;
+
+ if (lease_ctx->lease_cnt == 0) {
+ /* No leases to remove. Return */
+ return;
+ }
+ __dump_leases_info(this, lease_ctx);
+
+ list_for_each_entry_safe(lease_entry, tmp, &lease_ctx->lease_id_list,
+ lease_id_list)
+ {
+ lease_entry->lease_cnt = 0;
+ remove_from_clnt_list(this, lease_entry->client_uid, lease_ctx->inode);
+ __destroy_lease_id_entry(lease_entry);
+ }
+ INIT_LIST_HEAD(&lease_ctx->lease_id_list);
+ for (i = 0; i <= GF_LEASE_MAX_TYPE; i++)
+ lease_ctx->lease_type_cnt[i] = 0;
+ lease_ctx->lease_type = 0;
+ lease_ctx->lease_cnt = 0;
+ lease_ctx->recall_in_progress = _gf_false;
+ lease_ctx->timer = NULL;
+ lease_ctx->blocked_fops_resuming = _gf_true;
+
+ /* TODO:
+ * - Mark the corresponding fd bad. Could be done on client side
+ * as a result of recall
+ * - Free the lease_ctx
+ */
+ return;
+}
+
+static int
+remove_all_leases(xlator_t *this, inode_t *inode)
+{
+ lease_inode_ctx_t *lease_ctx = NULL;
+ int ret = 0;
+
+ GF_VALIDATE_OR_GOTO("leases", inode, out);
+
+ lease_ctx = lease_ctx_get(inode, this);
+ if (!lease_ctx) {
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_INVAL_INODE_CTX,
+ "Unable to create/get inode ctx");
+ ret = -1;
+ errno = ENOMEM;
+ goto out;
+ }
+
+ pthread_mutex_lock(&lease_ctx->lock);
+ {
+ __remove_all_leases(this, lease_ctx);
+ }
+ pthread_mutex_unlock(&lease_ctx->lock);
+
+ do_blocked_fops(this, lease_ctx);
+out:
+ return ret;
+}
+
+void *
+expired_recall_cleanup(void *data)
+{
+ struct timespec sleep_till = {
+ 0,
+ };
+ struct list_head recall_cleanup_list;
+ lease_inode_t *recall_entry = NULL;
+ lease_inode_t *tmp = NULL;
+ leases_private_t *priv = NULL;
+ xlator_t *this = NULL;
+ time_t time_now;
+
+ GF_VALIDATE_OR_GOTO("leases", data, out);
+
+ this = data;
+ priv = this->private;
+
+ gf_msg_debug(this->name, 0, "Started the expired_recall_cleanup thread");
+
+ while (1) {
+ time_now = gf_time();
+ pthread_mutex_lock(&priv->mutex);
+ {
+ if (priv->fini) {
+ pthread_mutex_unlock(&priv->mutex);
+ goto out;
+ }
+ INIT_LIST_HEAD(&recall_cleanup_list);
+ if (list_empty(&priv->recall_list)) {
+ sleep_till.tv_sec = time_now + 600;
+ pthread_cond_timedwait(&priv->cond, &priv->mutex, &sleep_till);
+ }
+ if (!list_empty(&priv->recall_list)) {
+ gf_msg_debug(this->name, 0, "Found expired recalls");
+ list_for_each_entry_safe(recall_entry, tmp, &priv->recall_list,
+ list)
+ {
+ list_del_init(&recall_entry->list);
+ list_add_tail(&recall_entry->list, &recall_cleanup_list);
+ }
+ }
+ }
+ pthread_mutex_unlock(&priv->mutex);
+
+ recall_entry = tmp = NULL;
+ list_for_each_entry_safe(recall_entry, tmp, &recall_cleanup_list, list)
+ {
+ gf_msg_debug(this->name, 0,
+ "Recall lease was sent on"
+ " inode:%p, recall timer has expired"
+ " and clients haven't unlocked the lease"
+ " hence cleaning up leases on the inode",
+ recall_entry->inode);
+ remove_all_leases(this, recall_entry->inode);
+ /* no need to take priv->mutex lock as this entry
+ * reference is removed from global recall list. */
+ __destroy_lease_inode(recall_entry);
+ }
+ }
+
+out:
+ return NULL;
+}
diff --git a/xlators/features/leases/src/leases-mem-types.h b/xlators/features/leases/src/leases-mem-types.h
new file mode 100644
index 00000000000..25664b44156
--- /dev/null
+++ b/xlators/features/leases/src/leases-mem-types.h
@@ -0,0 +1,27 @@
+/*
+ Copyright (c) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __LEASES_MEM_TYPES_H__
+#define __LEASES_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+
+enum gf_leases_mem_types_ {
+ gf_leases_mt_private_t = gf_common_mt_end + 1,
+ gf_leases_mt_lease_client_t,
+ gf_leases_mt_lease_inode_t,
+ gf_leases_mt_fd_ctx_t,
+ gf_leases_mt_lease_inode_ctx_t,
+ gf_leases_mt_lease_id_entry_t,
+ gf_leases_mt_fop_stub_t,
+ gf_leases_mt_timer_data_t,
+ gf_leases_mt_end
+};
+#endif
diff --git a/xlators/features/leases/src/leases-messages.h b/xlators/features/leases/src/leases-messages.h
new file mode 100644
index 00000000000..da696b832de
--- /dev/null
+++ b/xlators/features/leases/src/leases-messages.h
@@ -0,0 +1,33 @@
+/*
+ Copyright (c) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _LEASES_MESSAGES_H_
+#define _LEASES_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(LEASES, LEASE_MSG_NO_MEM, LEASE_MSG_RECALL_FAIL,
+ LEASE_MSG_INVAL_LEASE_ID, LEASE_MSG_INVAL_UNLK_LEASE,
+ LEASE_MSG_INVAL_INODE_CTX, LEASE_MSG_NOT_ENABLED,
+ LEASE_MSG_NO_TIMER_WHEEL, LEASE_MSG_CLNT_NOTFOUND,
+ LEASE_MSG_INODE_NOTFOUND, LEASE_MSG_INVAL_FD_CTX,
+ LEASE_MSG_INVAL_LEASE_TYPE);
+
+#endif /* !_LEASES_MESSAGES_H_ */
diff --git a/xlators/features/leases/src/leases.c b/xlators/features/leases/src/leases.c
new file mode 100644
index 00000000000..04bee50ba3f
--- /dev/null
+++ b/xlators/features/leases/src/leases.c
@@ -0,0 +1,1168 @@
+/*
+ Copyright (c) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include "leases.h"
+
+int32_t
+leases_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ int32_t op_errno = EINVAL;
+ int ret = 0;
+ lease_fd_ctx_t *fd_ctx = NULL;
+ char *lease_id = NULL;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ fd_ctx = GF_CALLOC(1, sizeof(*fd_ctx), gf_leases_mt_fd_ctx_t);
+ if (!fd_ctx) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ fd_ctx->client_uid = gf_strdup(frame->root->client->client_uid);
+ if (!fd_ctx->client_uid) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ GET_FLAGS(frame->root->op, flags);
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ if (lease_id != NULL)
+ memcpy(fd_ctx->lease_id, lease_id, LEASE_ID_SIZE);
+ else
+ memset(fd_ctx->lease_id, 0, LEASE_ID_SIZE);
+
+ ret = fd_ctx_set(fd, this, (uint64_t)(uintptr_t)fd_ctx);
+ if (ret) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, open, frame, this, loc, flags, fd, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
+
+err:
+ if (fd_ctx) {
+ GF_FREE(fd_ctx->client_uid);
+ GF_FREE(fd_ctx);
+ }
+
+ STACK_UNWIND_STRICT(open, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ return 0;
+}
+
+int32_t
+leases_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int count, off_t off, uint32_t flags,
+ struct iobref *iobref, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, writev, frame, this, fd, vector, count, off,
+ flags, iobref, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, off, flags,
+ iobref, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(writev, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iovec *vector, int count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, vector, count, stbuf,
+ iobref, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, readv, frame, this, fd, size, offset, flags,
+ xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(readv, frame, -1, errno, NULL, 0, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_lk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(lk, frame, op_ret, op_errno, lock, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS_LK(cmd, flock->l_type, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, lk, frame, this, fd, cmd, flock, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_lk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lk, fd, cmd, flock, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(lk, frame, -1, errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_lease(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct gf_lease *lease, dict_t *xdata)
+{
+ int32_t op_errno = 0;
+ int ret = 0;
+ struct gf_lease nullease = {
+ 0,
+ };
+ int32_t op_ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ ret = process_lease_req(frame, this, loc->inode, lease);
+ if (ret < 0) {
+ op_errno = -ret;
+ op_ret = -1;
+ }
+ goto unwind;
+
+out:
+ gf_msg(this->name, GF_LOG_ERROR, EINVAL, LEASE_MSG_NOT_ENABLED,
+ "\"features/leases\" translator is not enabled. "
+ "You need to enable it for proper functioning of your "
+ "application");
+ op_errno = ENOSYS;
+ op_ret = -1;
+
+unwind:
+ STACK_UNWIND_STRICT(lease, frame, op_ret, op_errno,
+ (op_errno == ENOSYS) ? &nullease : lease, xdata);
+ return 0;
+}
+
+int32_t
+leases_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ return 0;
+}
+
+int32_t
+leases_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, 0);
+
+ ret = check_lease_conflict(frame, loc->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(loc->inode, truncate, frame, this, loc, offset, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(truncate, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(setattr, frame, op_ret, op_errno, statpre, statpost,
+ xdata);
+
+ return 0;
+}
+
+int32_t
+leases_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, 0);
+
+ ret = check_lease_conflict(frame, loc->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(loc->inode, setattr, frame, this, loc, stbuf, valid, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_setattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr, loc, stbuf, valid, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(setattr, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *stbuf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(rename, frame, op_ret, op_errno, stbuf, preoldparent,
+ postoldparent, prenewparent, postnewparent, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ /* should the lease be also checked for newloc */
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, 0);
+
+ ret = check_lease_conflict(frame, oldloc->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(oldloc->inode, rename, frame, this, oldloc, newloc, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(rename, frame, -1, errno, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+}
+
+int32_t
+leases_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+
+ return 0;
+}
+
+int32_t
+leases_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, 0);
+
+ ret = check_lease_conflict(frame, loc->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(loc->inode, unlink, frame, this, loc, xflag, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(unlink, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, inode_t *inode, struct iatt *stbuf,
+ struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(link, frame, op_ret, op_errno, inode, stbuf, preparent,
+ postparent, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, 0);
+
+ ret = check_lease_conflict(frame, oldloc->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(oldloc->inode, link, frame, this, oldloc, newloc, xdata);
+ return 0;
+out:
+ STACK_WIND(frame, leases_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(link, frame, -1, errno, NULL, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, fd_t *fd, inode_t *inode, struct iatt *stbuf,
+ struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, stbuf,
+ preparent, postparent, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, create, frame, this, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(create, frame, -1, errno, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+}
+
+int32_t
+leases_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fsync, frame, op_ret, op_errno, prebuf, postbuf, xdata);
+ return 0;
+}
+
+int32_t
+leases_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+ dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, fsync, frame, this, fd, flags, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_fsync_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsync, fd, flags, xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(fsync, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+
+int32_t
+leases_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, 0); /* TODO:fd->flags?*/
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, ftruncate, frame, this, fd, offset, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(ftruncate, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_fsetattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fsetattr, frame, op_ret, op_errno, statpre, statpost,
+ xdata);
+ return 0;
+}
+
+int32_t
+leases_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, fsetattr, frame, this, fd, stbuf, valid, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_fsetattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetattr, fd, stbuf, valid, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(fsetattr, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_fallocate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fallocate, frame, op_ret, op_errno, pre, post, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
+ off_t offset, size_t len, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, fallocate, frame, this, fd, mode, offset, len,
+ xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_fallocate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fallocate, fd, mode, offset, len,
+ xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(fallocate, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_discard_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(discard, frame, op_ret, op_errno, pre, post, xdata);
+
+ return 0;
+}
+
+int32_t
+leases_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, discard, frame, this, fd, offset, len, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_discard_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->discard, fd, offset, len, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(discard, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+leases_zerofill_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(zerofill, frame, op_ret, op_errno, pre, post, xdata);
+
+ return 0;
+}
+
+int
+leases_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ off_t len, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, zerofill, frame, this, fd, offset, len, xdata);
+ return 0;
+
+out:
+ STACK_WIND(frame, leases_zerofill_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->zerofill, fd, offset, len, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(zerofill, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int
+leases_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(flush, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+int
+leases_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ uint32_t fop_flags = 0;
+ char *lease_id = NULL;
+ int ret = 0;
+ lease_fd_ctx_t *fd_ctx = NULL;
+ uint64_t ctx = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+ EXIT_IF_INTERNAL_FOP(frame, xdata, out);
+
+ GET_LEASE_ID(xdata, lease_id, frame->root->client->client_uid);
+ GET_FLAGS(frame->root->op, fd->flags);
+
+ ret = check_lease_conflict(frame, fd->inode, lease_id, fop_flags);
+ if (ret < 0)
+ goto err;
+ else if (ret == BLOCK_FOP)
+ goto block;
+ else if (ret == WIND_FOP)
+ goto out;
+
+block:
+ LEASE_BLOCK_FOP(fd->inode, flush, frame, this, fd, xdata);
+ return 0;
+
+out:
+ /* *
+ * currently release is not called after the close fop from the
+ * application. Hence lease fd ctx is reset on here.
+ * This is actually not the right way, since flush can be called
+ * not only from the close op.
+ * TODO :
+ * - Either identify the flush is called from close call on fd from
+ * from the application.
+ * OR
+ * - Find why release is not called post the last close call
+ */
+ ret = fd_ctx_get(fd, this, &ctx);
+ if (ret == 0) {
+ fd_ctx = (lease_fd_ctx_t *)(long)ctx;
+ if (fd_ctx->client_uid) {
+ GF_FREE(fd_ctx->client_uid);
+ fd_ctx->client_uid = NULL;
+ }
+ memset(fd_ctx->lease_id, 0, LEASE_ID_SIZE);
+ }
+ STACK_WIND(frame, leases_flush_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->flush, fd, xdata);
+ return 0;
+
+err:
+ STACK_UNWIND_STRICT(create, frame, -1, errno, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_leases_mt_end + 1);
+
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM,
+ "mem account init failed");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+leases_init_priv(xlator_t *this)
+{
+ int ret = 0;
+ leases_private_t *priv = NULL;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ if (!priv->timer_wheel) {
+ priv->timer_wheel = glusterfs_ctx_tw_get(this->ctx);
+ if (!priv->timer_wheel) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (!priv->inited_recall_thr) {
+ ret = gf_thread_create(&priv->recall_thr, NULL, expired_recall_cleanup,
+ this, "leasercl");
+ if (!ret)
+ priv->inited_recall_thr = _gf_true;
+ }
+
+out:
+ return ret;
+}
+
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ leases_private_t *priv = NULL;
+ int ret = -1;
+
+ priv = this->private;
+ GF_ASSERT(priv);
+
+ /* TODO: In case of reconfigure, if its enabling the leases
+ * its not an issue, but if its disabling the leases, there
+ * is more to it, like recall all the existing leases, wait
+ * for unlock of all the leases etc., hence not supporting the
+ * reconfigure for now.
+
+ GF_OPTION_RECONF ("leases", priv->leases_enabled,
+ options, bool, out);
+
+ if (priv->leases_enabled) {
+ ret = leases_init_priv (this);
+ if (ret)
+ goto out;
+ }
+ */
+
+ GF_OPTION_RECONF("lease-lock-recall-timeout", priv->recall_lease_timeout,
+ options, int32, out);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+init(xlator_t *this)
+{
+ int ret = -1;
+ leases_private_t *priv = NULL;
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_leases_mt_private_t);
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM,
+ "Leases init failed");
+ goto out;
+ }
+
+ GF_OPTION_INIT("leases", priv->leases_enabled, bool, out);
+ GF_OPTION_INIT("lease-lock-recall-timeout", priv->recall_lease_timeout,
+ int32, out);
+ pthread_mutex_init(&priv->mutex, NULL);
+ INIT_LIST_HEAD(&priv->client_list);
+ INIT_LIST_HEAD(&priv->recall_list);
+
+ this->private = priv;
+
+ if (priv->leases_enabled) {
+ ret = leases_init_priv(this);
+ if (ret)
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (ret) {
+ GF_FREE(priv);
+ this->private = NULL;
+ }
+
+ return ret;
+}
+
+void
+fini(xlator_t *this)
+{
+ leases_private_t *priv = NULL;
+
+ priv = this->private;
+ if (!priv) {
+ return;
+ }
+ this->private = NULL;
+
+ priv->fini = _gf_true;
+ pthread_cond_broadcast(&priv->cond);
+ if (priv->recall_thr) {
+ gf_thread_cleanup_xint(priv->recall_thr);
+ priv->recall_thr = 0;
+ priv->inited_recall_thr = _gf_false;
+ }
+
+ if (priv->timer_wheel) {
+ glusterfs_ctx_tw_put(this->ctx);
+ }
+
+ GF_FREE(priv);
+ return;
+}
+
+static int
+leases_forget(xlator_t *this, inode_t *inode)
+{
+ /* TODO:leases_cleanup_inode_ctx (this, inode); */
+ return 0;
+}
+
+static int
+leases_release(xlator_t *this, fd_t *fd)
+{
+ int ret = -1;
+ uint64_t tmp = 0;
+ lease_fd_ctx_t *fd_ctx = NULL;
+
+ if (fd == NULL) {
+ goto out;
+ }
+
+ gf_log(this->name, GF_LOG_TRACE, "Releasing all leases with fd %p", fd);
+
+ ret = fd_ctx_del(fd, this, &tmp);
+ if (ret) {
+ gf_log(this->name, GF_LOG_DEBUG, "Could not get fdctx");
+ goto out;
+ }
+
+ fd_ctx = (lease_fd_ctx_t *)(long)tmp;
+ if (fd_ctx)
+ GF_FREE(fd_ctx);
+out:
+ return ret;
+}
+
+static int
+leases_clnt_disconnect_cbk(xlator_t *this, client_t *client)
+{
+ int ret = 0;
+
+ EXIT_IF_LEASES_OFF(this, out);
+
+ ret = cleanup_client_leases(this, client->client_uid);
+out:
+ return ret;
+}
+
+struct xlator_fops fops = {
+ /* Metadata modifying fops */
+ .fsetattr = leases_fsetattr,
+ .setattr = leases_setattr,
+
+ /* File Data reading fops */
+ .open = leases_open,
+ .readv = leases_readv,
+
+ /* File Data modifying fops */
+ .truncate = leases_truncate,
+ .ftruncate = leases_ftruncate,
+ .writev = leases_writev,
+ .zerofill = leases_zerofill,
+ .fallocate = leases_fallocate,
+ .discard = leases_discard,
+ .lk = leases_lk,
+ .fsync = leases_fsync,
+ .flush = leases_flush,
+ .lease = leases_lease,
+
+ /* Directory Data modifying fops */
+ .create = leases_create,
+ .rename = leases_rename,
+ .unlink = leases_unlink,
+ .link = leases_link,
+
+#ifdef NOT_SUPPORTED
+ /* internal lk fops */
+ .inodelk = leases_inodelk,
+ .finodelk = leases_finodelk,
+ .entrylk = leases_entrylk,
+ .fentrylk = leases_fentrylk,
+
+ /* Internal special fops*/
+ .xattrop = leases_xattrop,
+ .fxattrop = leases_fxattrop,
+#endif
+};
+
+struct xlator_cbks cbks = {
+ .forget = leases_forget,
+ .release = leases_release,
+ .client_disconnect = leases_clnt_disconnect_cbk,
+};
+
+struct volume_options options[] = {
+ {.key = {"leases"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .op_version = {GD_OP_VERSION_3_8_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .description = "When \"on\", enables leases support"},
+ {.key = {"lease-lock-recall-timeout"},
+ .type = GF_OPTION_TYPE_INT,
+ .default_value = RECALL_LEASE_LK_TIMEOUT,
+ .op_version = {GD_OP_VERSION_3_8_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .description = "After 'timeout' seconds since the recall_lease"
+ " request has been sent to the client, the lease lock"
+ " will be forcefully purged by the server."},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "leases",
+ .category = GF_MAINTAINED,
+};
diff --git a/xlators/features/leases/src/leases.h b/xlators/features/leases/src/leases.h
new file mode 100644
index 00000000000..a6e8a6824cc
--- /dev/null
+++ b/xlators/features/leases/src/leases.h
@@ -0,0 +1,259 @@
+/*
+ Copyright (c) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _LEASES_H
+#define _LEASES_H
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <glusterfs/common-utils.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/client_t.h>
+#include <glusterfs/lkowner.h>
+#include <glusterfs/locking.h>
+#include <glusterfs/upcall-utils.h>
+#include "timer-wheel.h"
+#include "leases-mem-types.h"
+#include "leases-messages.h"
+
+/* The time period for which a client lease lock will be stored after its been
+ * recalled for the first time. */
+#define RECALL_LEASE_LK_TIMEOUT "60"
+
+#define DATA_MODIFY_FOP 0x0001
+#define BLOCKING_FOP 0x0002
+
+#define BLOCK_FOP 0x0001
+#define WIND_FOP 0x0002
+
+#define EXIT_IF_LEASES_OFF(this, label) \
+ do { \
+ if (!is_leases_enabled(this)) \
+ goto label; \
+ } while (0)
+
+#define EXIT_IF_INTERNAL_FOP(frame, xdata, label) \
+ do { \
+ if (frame->root->pid < 0) \
+ goto label; \
+ if (xdata && dict_get(xdata, GLUSTERFS_INTERNAL_FOP_KEY)) \
+ goto label; \
+ } while (0)
+
+#define GET_LEASE_ID(xdata, lease_id, client_uid) \
+ do { \
+ int ret_val = -1; \
+ ret_val = dict_get_bin(xdata, "lease-id", (void **)&lease_id); \
+ if (ret_val) { \
+ ret_val = 0; \
+ gf_msg_debug("leases", 0, "Lease id is not set for client:%s", \
+ client_uid); \
+ } \
+ } while (0)
+
+#define GET_FLAGS(fop, fd_flags) \
+ do { \
+ if ((fd_flags & (O_WRONLY | O_RDWR)) && fop == GF_FOP_OPEN) \
+ fop_flags = DATA_MODIFY_FOP; \
+ \
+ if (fop == GF_FOP_UNLINK || fop == GF_FOP_RENAME || \
+ fop == GF_FOP_TRUNCATE || fop == GF_FOP_FTRUNCATE || \
+ fop == GF_FOP_FLUSH || fop == GF_FOP_FSYNC || \
+ fop == GF_FOP_WRITE || fop == GF_FOP_FALLOCATE || \
+ fop == GF_FOP_DISCARD || fop == GF_FOP_ZEROFILL || \
+ fop == GF_FOP_SETATTR || fop == GF_FOP_FSETATTR || \
+ fop == GF_FOP_LINK) \
+ fop_flags = DATA_MODIFY_FOP; \
+ \
+ if (!(fd_flags & (O_NONBLOCK | O_NDELAY))) \
+ fop_flags |= BLOCKING_FOP; \
+ \
+ } while (0)
+
+#define GET_FLAGS_LK(cmd, l_type, fd_flags) \
+ do { \
+ /* TODO: handle F_RESLK_LCK and other glusterfs_lk_recovery_cmds_t */ \
+ if ((cmd == F_SETLKW || cmd == F_SETLKW64 || cmd == F_SETLK || \
+ cmd == F_SETLK64) && \
+ l_type == F_WRLCK) \
+ fop_flags = DATA_MODIFY_FOP; \
+ \
+ if (fd_flags & (O_NONBLOCK | O_NDELAY) && \
+ (cmd == F_SETLKW || cmd == F_SETLKW64)) \
+ fop_flags |= BLOCKING_FOP; \
+ \
+ } while (0)
+
+#define LEASE_BLOCK_FOP(inode, fop_name, frame, this, params...) \
+ do { \
+ call_stub_t *__stub = NULL; \
+ fop_stub_t *blk_fop = NULL; \
+ lease_inode_ctx_t *lease_ctx = NULL; \
+ \
+ __stub = fop_##fop_name##_stub(frame, default_##fop_name##_resume, \
+ params); \
+ if (!__stub) { \
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM, \
+ "Unable to create stub"); \
+ ret = -ENOMEM; \
+ goto __out; \
+ } \
+ \
+ blk_fop = GF_CALLOC(1, sizeof(*blk_fop), gf_leases_mt_fop_stub_t); \
+ if (!blk_fop) { \
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM, \
+ "Unable to create lease fop stub"); \
+ ret = -ENOMEM; \
+ goto __out; \
+ } \
+ \
+ lease_ctx = lease_ctx_get(inode, this); \
+ if (!lease_ctx) { \
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM, \
+ "Unable to create/get inode ctx"); \
+ ret = -ENOMEM; \
+ goto __out; \
+ } \
+ \
+ blk_fop->stub = __stub; \
+ pthread_mutex_lock(&lease_ctx->lock); \
+ { \
+ /*TODO: If the lease is unlocked btw check lease conflict and \
+ * by now, then this fop shouldn't be add to the blocked fop \
+ * list, can use generation number for the same?*/ \
+ list_add_tail(&blk_fop->list, &lease_ctx->blocked_list); \
+ } \
+ pthread_mutex_unlock(&lease_ctx->lock); \
+ \
+ __out: \
+ if (ret < 0) { \
+ gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LEASE_MSG_NO_MEM, \
+ "Unable to create stub for blocking the fop:%s (%s)", \
+ gf_fop_list[frame->root->op], strerror(ENOMEM)); \
+ if (__stub != NULL) { \
+ call_stub_destroy(__stub); \
+ } \
+ GF_FREE(blk_fop); \
+ goto err; \
+ } \
+ } while (0)
+
+struct _leases_private {
+ struct list_head client_list;
+ struct list_head recall_list;
+ struct tvec_base *timer_wheel; /* timer wheel where the recall request
+ is qued and waits for unlock/expiry */
+ pthread_t recall_thr;
+ pthread_mutex_t mutex;
+ pthread_cond_t cond;
+ int32_t recall_lease_timeout;
+ gf_boolean_t inited_recall_thr;
+ gf_boolean_t fini;
+ gf_boolean_t leases_enabled;
+
+ char _pad[1]; /* manual padding */
+};
+typedef struct _leases_private leases_private_t;
+
+struct _lease_client {
+ char *client_uid;
+ struct list_head client_list;
+ struct list_head inode_list;
+};
+typedef struct _lease_client lease_client_t;
+
+struct _lease_inode {
+ inode_t *inode;
+ struct list_head
+ list; /* This can be part of both inode_list and recall_list */
+};
+typedef struct _lease_inode lease_inode_t;
+
+struct _lease_fd_ctx {
+ char *client_uid;
+ char lease_id[LEASE_ID_SIZE];
+};
+typedef struct _lease_fd_ctx lease_fd_ctx_t;
+
+struct _lease_inode_ctx {
+ struct list_head lease_id_list; /* clients that have taken leases */
+ int lease_type_cnt[GF_LEASE_MAX_TYPE + 1];
+ uint64_t lease_cnt; /* Total number of leases on this inode */
+ uint64_t openfd_cnt; /* number of fds open */
+ struct list_head blocked_list; /* List of fops blocked until the
+ lease recall is complete */
+ inode_t *inode; /* this represents the inode on which the
+ lock was taken, required mainly during
+ disconnect cleanup */
+ struct gf_tw_timer_list *timer;
+ pthread_mutex_t lock;
+ int lease_type; /* Types of leases acquired */
+ gf_boolean_t recall_in_progress; /* if lease recall is sent on this inode */
+ gf_boolean_t blocked_fops_resuming; /* if blocked fops are being resumed */
+
+ char _pad[2]; /* manual padding */
+};
+typedef struct _lease_inode_ctx lease_inode_ctx_t;
+
+struct _lease_id_entry {
+ struct list_head lease_id_list;
+ char lease_id[LEASE_ID_SIZE];
+ char *client_uid; /* uid of the client that has
+ taken the lease */
+ int lease_type_cnt[GF_LEASE_MAX_TYPE + 1]; /* count of each lease type */
+ uint64_t lease_cnt; /* Number of leases taken under the
+ given lease id */
+ time_t recall_time; /* time @ which recall was sent */
+ int lease_type; /* Union of all the leases taken
+ under the given lease id */
+ char _pad[4]; /* manual padding */
+};
+typedef struct _lease_id_entry lease_id_entry_t;
+
+/* Required? as stub itself will have list */
+struct __fop_stub {
+ struct list_head list;
+ call_stub_t *stub;
+};
+typedef struct __fop_stub fop_stub_t;
+
+struct __lease_timer_data {
+ inode_t *inode;
+ xlator_t *this;
+};
+typedef struct __lease_timer_data lease_timer_data_t;
+
+gf_boolean_t
+is_leases_enabled(xlator_t *this);
+
+lease_inode_ctx_t *
+lease_ctx_get(inode_t *inode, xlator_t *this);
+
+int
+process_lease_req(call_frame_t *frame, xlator_t *this, inode_t *inode,
+ struct gf_lease *lease);
+
+int
+check_lease_conflict(call_frame_t *frame, inode_t *inode, const char *lease_id,
+ uint32_t fop_flags);
+
+int
+cleanup_client_leases(xlator_t *this, const char *client_uid);
+
+void *
+expired_recall_cleanup(void *data);
+
+#endif /* _LEASES_H */
diff --git a/xlators/features/locks/src/Makefile.am b/xlators/features/locks/src/Makefile.am
index 53dd3aa5da5..0b174c19d2d 100644
--- a/xlators/features/locks/src/Makefile.am
+++ b/xlators/features/locks/src/Makefile.am
@@ -1,20 +1,29 @@
+if WITH_SERVER
xlator_LTLIBRARIES = locks.la
+endif
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
-locks_la_LDFLAGS = -module -avoidversion
+locks_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
-locks_la_SOURCES = common.c posix.c entrylk.c inodelk.c reservelk.c
-locks_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+locks_la_SOURCES = common.c posix.c entrylk.c inodelk.c reservelk.c \
+ clear.c
-noinst_HEADERS = locks.h common.h locks-mem-types.h
+locks_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -fno-strict-aliasing -D$(GF_HOST_OS) \
- -I$(top_srcdir)/libglusterfs/src $(GF_CFLAGS) -shared -nostartfiles
+noinst_HEADERS = locks.h common.h locks-mem-types.h clear.h pl-messages.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS)
CLEANFILES =
+if WITH_SERVER
uninstall-local:
rm -f $(DESTDIR)$(xlatordir)/posix-locks.so
install-data-hook:
- ln -sf locks.so $(DESTDIR)$(xlatordir)/posix-locks.so \ No newline at end of file
+ ln -sf locks.so $(DESTDIR)$(xlatordir)/posix-locks.so
+endif
diff --git a/xlators/features/locks/src/clear.c b/xlators/features/locks/src/clear.c
new file mode 100644
index 00000000000..ab1eac68a53
--- /dev/null
+++ b/xlators/features/locks/src/clear.c
@@ -0,0 +1,460 @@
+/*
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include <unistd.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/common-utils.h>
+
+#include "locks.h"
+#include "common.h"
+#include <glusterfs/statedump.h>
+#include "clear.h"
+
+const char *clrlk_type_names[CLRLK_TYPE_MAX] = {
+ [CLRLK_INODE] = "inode",
+ [CLRLK_ENTRY] = "entry",
+ [CLRLK_POSIX] = "posix",
+};
+
+int
+clrlk_get_kind(char *kind)
+{
+ char *clrlk_kinds[CLRLK_KIND_MAX] = {"dummy", "blocked", "granted", "all"};
+ int ret_kind = CLRLK_KIND_MAX;
+ int i = 0;
+
+ for (i = CLRLK_BLOCKED; i < CLRLK_KIND_MAX; i++) {
+ if (!strcmp(clrlk_kinds[i], kind)) {
+ ret_kind = i;
+ break;
+ }
+ }
+
+ return ret_kind;
+}
+
+int
+clrlk_get_type(char *type)
+{
+ char *clrlk_types[CLRLK_TYPE_MAX] = {"inode", "entry", "posix"};
+ int ret_type = CLRLK_TYPE_MAX;
+ int i = 0;
+
+ for (i = CLRLK_INODE; i < CLRLK_TYPE_MAX; i++) {
+ if (!strcmp(clrlk_types[i], type)) {
+ ret_type = i;
+ break;
+ }
+ }
+
+ return ret_type;
+}
+
+int
+clrlk_get_lock_range(char *range_str, struct gf_flock *ulock,
+ gf_boolean_t *chk_range)
+{
+ int ret = -1;
+
+ if (!chk_range)
+ goto out;
+
+ if (!range_str) {
+ ret = 0;
+ *chk_range = _gf_false;
+ goto out;
+ }
+
+ if (sscanf(range_str,
+ "%hd,%" PRId64 "-"
+ "%" PRId64,
+ &ulock->l_whence, &ulock->l_start, &ulock->l_len) != 3) {
+ goto out;
+ }
+
+ ret = 0;
+ *chk_range = _gf_true;
+out:
+ return ret;
+}
+
+int
+clrlk_parse_args(const char *cmd, clrlk_args *args)
+{
+ char *opts = NULL;
+ char *cur = NULL;
+ char *tok = NULL;
+ char *sptr = NULL;
+ char *free_ptr = NULL;
+ char kw[KW_MAX] = {
+ [KW_TYPE] = 't',
+ [KW_KIND] = 'k',
+ };
+ int ret = -1;
+ int i = 0;
+
+ GF_ASSERT(cmd);
+ free_ptr = opts = GF_CALLOC(1, strlen(cmd), gf_common_mt_char);
+ if (!opts)
+ goto out;
+
+ if (sscanf(cmd, GF_XATTR_CLRLK_CMD ".%s", opts) < 1) {
+ ret = -1;
+ goto out;
+ }
+
+ /*clr_lk_prefix.ttype.kkind.args, args - type specific*/
+ cur = opts;
+ for (i = 0; i < KW_MAX && (tok = strtok_r(cur, ".", &sptr));
+ cur = NULL, i++) {
+ if (tok[0] != kw[i]) {
+ ret = -1;
+ goto out;
+ }
+ if (i == KW_TYPE)
+ args->type = clrlk_get_type(tok + 1);
+ if (i == KW_KIND)
+ args->kind = clrlk_get_kind(tok + 1);
+ }
+
+ if ((args->type == CLRLK_TYPE_MAX) || (args->kind == CLRLK_KIND_MAX))
+ goto out;
+
+ /*optional args, neither range nor basename can 'legally' contain
+ * "/" in them*/
+ tok = strtok_r(NULL, "/", &sptr);
+ if (tok)
+ args->opts = gf_strdup(tok);
+
+ ret = 0;
+out:
+ GF_FREE(free_ptr);
+ return ret;
+}
+
+int
+clrlk_clear_posixlk(xlator_t *this, pl_inode_t *pl_inode, clrlk_args *args,
+ int *blkd, int *granted, int *op_errno)
+{
+ posix_lock_t *plock = NULL;
+ posix_lock_t *tmp = NULL;
+ struct gf_flock ulock = {
+ 0,
+ };
+ int ret = -1;
+ int bcount = 0;
+ int gcount = 0;
+ gf_boolean_t chk_range = _gf_false;
+
+ if (clrlk_get_lock_range(args->opts, &ulock, &chk_range)) {
+ *op_errno = EINVAL;
+ goto out;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(plock, tmp, &pl_inode->ext_list, list)
+ {
+ if ((plock->blocked && !(args->kind & CLRLK_BLOCKED)) ||
+ (!plock->blocked && !(args->kind & CLRLK_GRANTED)))
+ continue;
+
+ if (chk_range && (plock->user_flock.l_whence != ulock.l_whence ||
+ plock->user_flock.l_start != ulock.l_start ||
+ plock->user_flock.l_len != ulock.l_len))
+ continue;
+
+ list_del_init(&plock->list);
+ if (plock->blocked) {
+ bcount++;
+ pl_trace_out(this, plock->frame, NULL, NULL, F_SETLKW,
+ &plock->user_flock, -1, EINTR, NULL);
+
+ STACK_UNWIND_STRICT(lk, plock->frame, -1, EINTR,
+ &plock->user_flock, NULL);
+
+ } else {
+ gcount++;
+ }
+ __destroy_lock(plock);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ grant_blocked_locks(this, pl_inode);
+ ret = 0;
+out:
+ *blkd = bcount;
+ *granted = gcount;
+ return ret;
+}
+
+/* Returns 0 on success and -1 on failure */
+int
+clrlk_clear_inodelk(xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *dom,
+ clrlk_args *args, int *blkd, int *granted, int *op_errno)
+{
+ posix_locks_private_t *priv;
+ pl_inode_lock_t *ilock = NULL;
+ pl_inode_lock_t *tmp = NULL;
+ struct gf_flock ulock = {
+ 0,
+ };
+ int ret = -1;
+ int bcount = 0;
+ int gcount = 0;
+ gf_boolean_t chk_range = _gf_false;
+ struct list_head *pcontend = NULL;
+ struct list_head released;
+ struct list_head contend;
+ struct timespec now = {};
+
+ INIT_LIST_HEAD(&released);
+
+ priv = this->private;
+ if (priv->notify_contention) {
+ pcontend = &contend;
+ INIT_LIST_HEAD(pcontend);
+ timespec_now(&now);
+ }
+
+ if (clrlk_get_lock_range(args->opts, &ulock, &chk_range)) {
+ *op_errno = EINVAL;
+ goto out;
+ }
+
+ if (args->kind & CLRLK_BLOCKED)
+ goto blkd;
+
+ if (args->kind & CLRLK_GRANTED)
+ goto granted;
+
+blkd:
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(ilock, tmp, &dom->blocked_inodelks,
+ blocked_locks)
+ {
+ if (chk_range && (ilock->user_flock.l_whence != ulock.l_whence ||
+ ilock->user_flock.l_start != ulock.l_start ||
+ ilock->user_flock.l_len != ulock.l_len))
+ continue;
+
+ bcount++;
+ list_del_init(&ilock->client_list);
+ list_del_init(&ilock->blocked_locks);
+ list_add(&ilock->blocked_locks, &released);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (!list_empty(&released)) {
+ list_for_each_entry_safe(ilock, tmp, &released, blocked_locks)
+ {
+ list_del_init(&ilock->blocked_locks);
+ pl_trace_out(this, ilock->frame, NULL, NULL, F_SETLKW,
+ &ilock->user_flock, -1, EAGAIN, ilock->volume);
+ STACK_UNWIND_STRICT(inodelk, ilock->frame, -1, EAGAIN, NULL);
+ // No need to take lock as the locks are only in one list
+ __pl_inodelk_unref(ilock);
+ }
+ }
+
+ if (!(args->kind & CLRLK_GRANTED)) {
+ ret = 0;
+ goto out;
+ }
+
+granted:
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(ilock, tmp, &dom->inodelk_list, list)
+ {
+ if (chk_range && (ilock->user_flock.l_whence != ulock.l_whence ||
+ ilock->user_flock.l_start != ulock.l_start ||
+ ilock->user_flock.l_len != ulock.l_len))
+ continue;
+
+ gcount++;
+ list_del_init(&ilock->client_list);
+ list_del_init(&ilock->list);
+ list_add(&ilock->list, &released);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ list_for_each_entry_safe(ilock, tmp, &released, list)
+ {
+ list_del_init(&ilock->list);
+ // No need to take lock as the locks are only in one list
+ __pl_inodelk_unref(ilock);
+ }
+
+ ret = 0;
+out:
+ grant_blocked_inode_locks(this, pl_inode, dom, &now, pcontend);
+ if (pcontend != NULL) {
+ inodelk_contention_notify(this, pcontend);
+ }
+ *blkd = bcount;
+ *granted = gcount;
+ return ret;
+}
+
+/* Returns 0 on success and -1 on failure */
+int
+clrlk_clear_entrylk(xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *dom,
+ clrlk_args *args, int *blkd, int *granted, int *op_errno)
+{
+ posix_locks_private_t *priv;
+ pl_entry_lock_t *elock = NULL;
+ pl_entry_lock_t *tmp = NULL;
+ int bcount = 0;
+ int gcount = 0;
+ int ret = -1;
+ struct list_head *pcontend = NULL;
+ struct list_head removed;
+ struct list_head released;
+ struct list_head contend;
+ struct timespec now;
+
+ INIT_LIST_HEAD(&released);
+
+ priv = this->private;
+ if (priv->notify_contention) {
+ pcontend = &contend;
+ INIT_LIST_HEAD(pcontend);
+ timespec_now(&now);
+ }
+
+ if (args->kind & CLRLK_BLOCKED)
+ goto blkd;
+
+ if (args->kind & CLRLK_GRANTED)
+ goto granted;
+
+blkd:
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(elock, tmp, &dom->blocked_entrylks,
+ blocked_locks)
+ {
+ if (args->opts) {
+ if (!elock->basename || strcmp(elock->basename, args->opts))
+ continue;
+ }
+
+ bcount++;
+
+ list_del_init(&elock->client_list);
+ list_del_init(&elock->blocked_locks);
+ list_add_tail(&elock->blocked_locks, &released);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (!list_empty(&released)) {
+ list_for_each_entry_safe(elock, tmp, &released, blocked_locks)
+ {
+ list_del_init(&elock->blocked_locks);
+ entrylk_trace_out(this, elock->frame, elock->volume, NULL, NULL,
+ elock->basename, ENTRYLK_LOCK, elock->type, -1,
+ EAGAIN);
+ STACK_UNWIND_STRICT(entrylk, elock->frame, -1, EAGAIN, NULL);
+
+ __pl_entrylk_unref(elock);
+ }
+ }
+
+ if (!(args->kind & CLRLK_GRANTED)) {
+ ret = 0;
+ goto out;
+ }
+
+granted:
+ INIT_LIST_HEAD(&removed);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(elock, tmp, &dom->entrylk_list, domain_list)
+ {
+ if (args->opts) {
+ if (!elock->basename || strcmp(elock->basename, args->opts))
+ continue;
+ }
+
+ gcount++;
+ list_del_init(&elock->client_list);
+ list_del_init(&elock->domain_list);
+ list_add_tail(&elock->domain_list, &removed);
+
+ __pl_entrylk_unref(elock);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ grant_blocked_entry_locks(this, pl_inode, dom, &now, pcontend);
+ if (pcontend != NULL) {
+ entrylk_contention_notify(this, pcontend);
+ }
+
+ ret = 0;
+out:
+ *blkd = bcount;
+ *granted = gcount;
+ return ret;
+}
+
+int
+clrlk_clear_lks_in_all_domains(xlator_t *this, pl_inode_t *pl_inode,
+ clrlk_args *args, int *blkd, int *granted,
+ int *op_errno)
+{
+ pl_dom_list_t *dom = NULL;
+ int ret = -1;
+ int tmp_bcount = 0;
+ int tmp_gcount = 0;
+
+ if (list_empty(&pl_inode->dom_list)) {
+ ret = 0;
+ goto out;
+ }
+
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ tmp_bcount = tmp_gcount = 0;
+
+ switch (args->type) {
+ case CLRLK_INODE:
+ ret = clrlk_clear_inodelk(this, pl_inode, dom, args,
+ &tmp_bcount, &tmp_gcount, op_errno);
+ if (ret)
+ goto out;
+ break;
+ case CLRLK_ENTRY:
+ ret = clrlk_clear_entrylk(this, pl_inode, dom, args,
+ &tmp_bcount, &tmp_gcount, op_errno);
+ if (ret)
+ goto out;
+ break;
+ }
+
+ *blkd += tmp_bcount;
+ *granted += tmp_gcount;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/xlators/features/locks/src/clear.h b/xlators/features/locks/src/clear.h
new file mode 100644
index 00000000000..bc118cb1b81
--- /dev/null
+++ b/xlators/features/locks/src/clear.h
@@ -0,0 +1,73 @@
+/*
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef __CLEAR_H__
+#define __CLEAR_H__
+
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/stack.h>
+#include <glusterfs/call-stub.h>
+#include "locks.h"
+
+typedef enum {
+ CLRLK_INODE,
+ CLRLK_ENTRY,
+ CLRLK_POSIX,
+ CLRLK_TYPE_MAX
+} clrlk_type;
+
+extern const char *clrlk_type_names[];
+
+typedef enum {
+ CLRLK_BLOCKED = 1,
+ CLRLK_GRANTED,
+ CLRLK_ALL,
+ CLRLK_KIND_MAX
+} clrlk_kind;
+
+typedef enum {
+ KW_TYPE,
+ KW_KIND,
+ /*add new keywords here*/
+ KW_MAX
+} clrlk_opts;
+
+struct _clrlk_args;
+typedef struct _clrlk_args clrlk_args;
+
+struct _clrlk_args {
+ int type;
+ int kind;
+ char *opts;
+};
+
+int
+clrlk_get__kind(char *kind);
+int
+clrlk_get_type(char *type);
+int
+clrlk_get_lock_range(char *range_str, struct gf_flock *ulock,
+ gf_boolean_t *chk_range);
+int
+clrlk_parse_args(const char *cmd, clrlk_args *args);
+
+int
+clrlk_clear_posixlk(xlator_t *this, pl_inode_t *pl_inode, clrlk_args *args,
+ int *blkd, int *granted, int *op_errno);
+int
+clrlk_clear_inodelk(xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *dom,
+ clrlk_args *args, int *blkd, int *granted, int *op_errno);
+int
+clrlk_clear_entrylk(xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *dom,
+ clrlk_args *args, int *blkd, int *granted, int *op_errno);
+int
+clrlk_clear_lks_in_all_domains(xlator_t *this, pl_inode_t *pl_inode,
+ clrlk_args *args, int *blkd, int *granted,
+ int *op_errno);
+#endif /* __CLEAR_H__ */
diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c
index a2c6245d63f..a2c6be93e03 100644
--- a/xlators/features/locks/src/common.c
+++ b/xlators/features/locks/src/common.c
@@ -1,712 +1,786 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2012, 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#include <unistd.h>
#include <fcntl.h>
#include <limits.h>
#include <pthread.h>
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "glusterfs.h"
-#include "compat.h"
-#include "xlator.h"
-#include "inode.h"
-#include "logging.h"
-#include "common-utils.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/syncop.h>
#include "locks.h"
#include "common.h"
-
static int
-__is_lock_grantable (pl_inode_t *pl_inode, posix_lock_t *lock);
+__is_lock_grantable(pl_inode_t *pl_inode, posix_lock_t *lock);
static void
-__insert_and_merge (pl_inode_t *pl_inode, posix_lock_t *lock);
+__insert_and_merge(pl_inode_t *pl_inode, posix_lock_t *lock);
static int
-pl_send_prelock_unlock (xlator_t *this, pl_inode_t *pl_inode,
- posix_lock_t *old_lock);
+pl_send_prelock_unlock(xlator_t *this, pl_inode_t *pl_inode,
+ posix_lock_t *old_lock);
+
static pl_dom_list_t *
-allocate_domain (const char *volume)
+__allocate_domain(const char *volume)
{
- pl_dom_list_t *dom = NULL;
+ pl_dom_list_t *dom = NULL;
- dom = GF_CALLOC (1, sizeof (*dom),
- gf_locks_mt_pl_dom_list_t);
- if (!dom)
- goto out;
+ dom = GF_CALLOC(1, sizeof(*dom), gf_locks_mt_pl_dom_list_t);
+ if (!dom)
+ goto out;
- dom->domain = gf_strdup(volume);
- if (!dom->domain)
- goto out;
+ dom->domain = gf_strdup(volume);
+ if (!dom->domain)
+ goto out;
- gf_log ("posix-locks", GF_LOG_TRACE,
- "New domain allocated: %s", dom->domain);
+ gf_log("posix-locks", GF_LOG_TRACE, "New domain allocated: %s",
+ dom->domain);
- INIT_LIST_HEAD (&dom->inode_list);
- INIT_LIST_HEAD (&dom->entrylk_list);
- INIT_LIST_HEAD (&dom->blocked_entrylks);
- INIT_LIST_HEAD (&dom->inodelk_list);
- INIT_LIST_HEAD (&dom->blocked_inodelks);
+ INIT_LIST_HEAD(&dom->inode_list);
+ INIT_LIST_HEAD(&dom->entrylk_list);
+ INIT_LIST_HEAD(&dom->blocked_entrylks);
+ INIT_LIST_HEAD(&dom->inodelk_list);
+ INIT_LIST_HEAD(&dom->blocked_inodelks);
out:
- if (dom && (NULL == dom->domain)) {
- GF_FREE (dom);
- dom = NULL;
- }
+ if (dom && (NULL == dom->domain)) {
+ GF_FREE(dom);
+ dom = NULL;
+ }
- return dom;
+ return dom;
}
/* Returns domain for the lock. If domain is not present,
* allocates a domain and returns it
*/
pl_dom_list_t *
-get_domain (pl_inode_t *pl_inode, const char *volume)
+get_domain(pl_inode_t *pl_inode, const char *volume)
{
- pl_dom_list_t *dom = NULL;
-
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, pl_inode, out);
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, volume, out);
-
- list_for_each_entry (dom, &pl_inode->dom_list, inode_list) {
- if (strcmp (dom->domain, volume) == 0)
- goto found;
+ pl_dom_list_t *dom = NULL;
+ GF_VALIDATE_OR_GOTO("posix-locks", pl_inode, out);
+ GF_VALIDATE_OR_GOTO("posix-locks", volume, out);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ if (strcmp(dom->domain, volume) == 0)
+ goto unlock;
}
- dom = allocate_domain (volume);
+ dom = __allocate_domain(volume);
if (dom)
- list_add (&dom->inode_list, &pl_inode->dom_list);
-found:
- if (dom) {
- gf_log (POSIX_LOCKS, GF_LOG_TRACE, "Domain %s found", volume);
- } else {
- gf_log (POSIX_LOCKS, GF_LOG_TRACE, "Domain %s not found", volume);
- }
+ list_add(&dom->inode_list, &pl_inode->dom_list);
+ }
+unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
+ if (dom) {
+ gf_log("posix-locks", GF_LOG_TRACE, "Domain %s found", volume);
+ } else {
+ gf_log("posix-locks", GF_LOG_TRACE, "Domain %s not found", volume);
+ }
out:
- return dom;
+ return dom;
}
unsigned long
-fd_to_fdnum (fd_t *fd)
+fd_to_fdnum(fd_t *fd)
{
- return ((unsigned long) fd);
+ return ((unsigned long)fd);
}
fd_t *
-fd_from_fdnum (posix_lock_t *lock)
+fd_from_fdnum(posix_lock_t *lock)
{
- return ((fd_t *) lock->fd_num);
+ return ((fd_t *)lock->fd_num);
}
int
-__pl_inode_is_empty (pl_inode_t *pl_inode)
+__pl_inode_is_empty(pl_inode_t *pl_inode)
{
- pl_dom_list_t *dom = NULL;
- int is_empty = 1;
-
- if (!list_empty (&pl_inode->ext_list))
- is_empty = 0;
-
- list_for_each_entry (dom, &pl_inode->dom_list, inode_list) {
- if (!list_empty (&dom->entrylk_list))
- is_empty = 0;
-
- if (!list_empty (&dom->inodelk_list))
- is_empty = 0;
- }
-
- return is_empty;
+ return (list_empty(&pl_inode->ext_list));
}
void
-pl_print_locker (char *str, int size, xlator_t *this, call_frame_t *frame)
+pl_print_locker(char *str, int size, xlator_t *this, call_frame_t *frame)
{
- snprintf (str, size, "Pid=%llu, lk-owner=%llu, Transport=%p, Frame=%llu",
- (unsigned long long) frame->root->pid,
- (unsigned long long) frame->root->lk_owner,
- (void *)frame->root->trans,
- (unsigned long long) frame->root->unique);
+ snprintf(str, size, "Pid=%llu, lk-owner=%s, Client=%p, Frame=%llu",
+ (unsigned long long)frame->root->pid,
+ lkowner_utoa(&frame->root->lk_owner), frame->root->client,
+ (unsigned long long)frame->root->unique);
}
-
void
-pl_print_lockee (char *str, int size, fd_t *fd, loc_t *loc)
+pl_print_lockee(char *str, int size, fd_t *fd, loc_t *loc)
{
- inode_t *inode = NULL;
- char *ipath = NULL;
- int ret = 0;
+ inode_t *inode = NULL;
+ char *ipath = NULL;
+ int ret = 0;
- if (fd)
- inode = fd->inode;
- if (loc)
- inode = loc->inode;
+ if (fd)
+ inode = fd->inode;
+ if (loc)
+ inode = loc->inode;
- if (!inode) {
- snprintf (str, size, "<nul>");
- return;
- }
+ if (!inode) {
+ snprintf(str, size, "<nul>");
+ return;
+ }
- if (loc && loc->path) {
- ipath = gf_strdup (loc->path);
- } else {
- ret = inode_path (inode, NULL, &ipath);
- if (ret <= 0)
- ipath = NULL;
- }
+ if (loc && loc->path) {
+ ipath = gf_strdup(loc->path);
+ } else {
+ ret = inode_path(inode, NULL, &ipath);
+ if (ret <= 0)
+ ipath = NULL;
+ }
- snprintf (str, size, "ino=%llu, fd=%p, path=%s",
- (unsigned long long) inode->ino, fd,
- ipath ? ipath : "<nul>");
+ snprintf(str, size, "gfid=%s, fd=%p, path=%s", uuid_utoa(inode->gfid), fd,
+ ipath ? ipath : "<nul>");
- if (ipath)
- GF_FREE (ipath);
+ GF_FREE(ipath);
}
-
void
-pl_print_lock (char *str, int size, int cmd,
- struct gf_flock *flock, uint64_t owner)
+pl_print_lock(char *str, int size, int cmd, struct gf_flock *flock,
+ gf_lkowner_t *owner)
{
- char *cmd_str = NULL;
- char *type_str = NULL;
+ char *cmd_str = NULL;
+ char *type_str = NULL;
- switch (cmd) {
+ switch (cmd) {
#if F_GETLK != F_GETLK64
case F_GETLK64:
#endif
case F_GETLK:
- cmd_str = "GETLK";
- break;
+ cmd_str = "GETLK";
+ break;
#if F_SETLK != F_SETLK64
case F_SETLK64:
#endif
case F_SETLK:
- cmd_str = "SETLK";
- break;
+ cmd_str = "SETLK";
+ break;
#if F_SETLKW != F_SETLKW64
case F_SETLKW64:
#endif
case F_SETLKW:
- cmd_str = "SETLKW";
- break;
+ cmd_str = "SETLKW";
+ break;
default:
- cmd_str = "UNKNOWN";
- break;
- }
+ cmd_str = "UNKNOWN";
+ break;
+ }
- switch (flock->l_type) {
+ switch (flock->l_type) {
case F_RDLCK:
- type_str = "READ";
- break;
+ type_str = "READ";
+ break;
case F_WRLCK:
- type_str = "WRITE";
- break;
+ type_str = "WRITE";
+ break;
case F_UNLCK:
- type_str = "UNLOCK";
- break;
+ type_str = "UNLOCK";
+ break;
default:
- type_str = "UNKNOWN";
- break;
- }
-
- snprintf (str, size, "lock=FCNTL, cmd=%s, type=%s, "
- "start=%llu, len=%llu, pid=%llu, lk-owner=%llu",
- cmd_str, type_str, (unsigned long long) flock->l_start,
- (unsigned long long) flock->l_len,
- (unsigned long long) flock->l_pid,
- (unsigned long long) owner);
+ type_str = "UNKNOWN";
+ break;
+ }
+
+ snprintf(str, size,
+ "lock=FCNTL, cmd=%s, type=%s, "
+ "start=%llu, len=%llu, pid=%llu, lk-owner=%s",
+ cmd_str, type_str, (unsigned long long)flock->l_start,
+ (unsigned long long)flock->l_len, (unsigned long long)flock->l_pid,
+ lkowner_utoa(owner));
}
-
void
-pl_trace_in (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
- int cmd, struct gf_flock *flock, const char *domain)
+pl_trace_in(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, int cmd,
+ struct gf_flock *flock, const char *domain)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- char pl_lock[256];
-
- priv = this->private;
+ posix_locks_private_t *priv = this->private;
+ char pl_locker[256];
+ char pl_lockee[256];
+ char pl_lock[256];
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, loc);
- if (domain)
- pl_print_inodelk (pl_lock, 256, cmd, flock, domain);
- else
- pl_print_lock (pl_lock, 256, cmd, flock, frame->root->lk_owner);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, loc);
+ if (domain)
+ pl_print_inodelk(pl_lock, 256, cmd, flock, domain);
+ else
+ pl_print_lock(pl_lock, 256, cmd, flock, &frame->root->lk_owner);
- gf_log (this->name, GF_LOG_INFO,
- "[REQUEST] Locker = {%s} Lockee = {%s} Lock = {%s}",
- pl_locker, pl_lockee, pl_lock);
+ gf_log(this->name, GF_LOG_INFO,
+ "[REQUEST] Locker = {%s} Lockee = {%s} Lock = {%s}", pl_locker,
+ pl_lockee, pl_lock);
}
-
void
-pl_print_verdict (char *str, int size, int op_ret, int op_errno)
+pl_print_verdict(char *str, int size, int op_ret, int op_errno)
{
- char *verdict = NULL;
-
- if (op_ret == 0) {
- verdict = "GRANTED";
- } else {
- switch (op_errno) {
- case EAGAIN:
- verdict = "TRYAGAIN";
- break;
- default:
- verdict = strerror (op_errno);
- }
+ char *verdict = NULL;
+
+ if (op_ret == 0) {
+ verdict = "GRANTED";
+ } else {
+ switch (op_errno) {
+ case EAGAIN:
+ verdict = "TRYAGAIN";
+ break;
+ default:
+ verdict = strerror(op_errno);
}
+ }
- snprintf (str, size, "%s", verdict);
+ snprintf(str, size, "%s", verdict);
}
-
void
-pl_trace_out (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
- int cmd, struct gf_flock *flock, int op_ret, int op_errno, const char *domain)
+pl_trace_out(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, int cmd,
+ struct gf_flock *flock, int op_ret, int op_errno,
+ const char *domain)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- char pl_lock[256];
- char verdict[32];
+ posix_locks_private_t *priv = NULL;
+ char pl_locker[256];
+ char pl_lockee[256];
+ char pl_lock[256];
+ char verdict[32];
- priv = this->private;
+ priv = this->private;
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, loc);
- if (domain)
- pl_print_inodelk (pl_lock, 256, cmd, flock, domain);
- else
- pl_print_lock (pl_lock, 256, cmd, flock, frame->root->lk_owner);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, loc);
+ if (domain)
+ pl_print_inodelk(pl_lock, 256, cmd, flock, domain);
+ else
+ pl_print_lock(pl_lock, 256, cmd, flock, &frame->root->lk_owner);
- pl_print_verdict (verdict, 32, op_ret, op_errno);
+ pl_print_verdict(verdict, 32, op_ret, op_errno);
- gf_log (this->name, GF_LOG_INFO,
- "[%s] Locker = {%s} Lockee = {%s} Lock = {%s}",
- verdict, pl_locker, pl_lockee, pl_lock);
+ gf_log(this->name, GF_LOG_INFO,
+ "[%s] Locker = {%s} Lockee = {%s} Lock = {%s}", verdict, pl_locker,
+ pl_lockee, pl_lock);
}
-
void
-pl_trace_block (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
- int cmd, struct gf_flock *flock, const char *domain)
+pl_trace_block(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
+ int cmd, struct gf_flock *flock, const char *domain)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- char pl_lock[256];
-
- priv = this->private;
+ posix_locks_private_t *priv = this->private;
+ char pl_locker[256];
+ char pl_lockee[256];
+ char pl_lock[256];
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, loc);
- if (domain)
- pl_print_inodelk (pl_lock, 256, cmd, flock, domain);
- else
- pl_print_lock (pl_lock, 256, cmd, flock, frame->root->lk_owner);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, loc);
+ if (domain)
+ pl_print_inodelk(pl_lock, 256, cmd, flock, domain);
+ else
+ pl_print_lock(pl_lock, 256, cmd, flock, &frame->root->lk_owner);
- gf_log (this->name, GF_LOG_INFO,
- "[BLOCKED] Locker = {%s} Lockee = {%s} Lock = {%s}",
- pl_locker, pl_lockee, pl_lock);
+ gf_log(this->name, GF_LOG_INFO,
+ "[BLOCKED] Locker = {%s} Lockee = {%s} Lock = {%s}", pl_locker,
+ pl_lockee, pl_lock);
}
-
void
-pl_trace_flush (xlator_t *this, call_frame_t *frame, fd_t *fd)
+pl_trace_flush(xlator_t *this, call_frame_t *frame, fd_t *fd)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- pl_inode_t *pl_inode = NULL;
+ posix_locks_private_t *priv = NULL;
+ char pl_locker[256];
+ char pl_lockee[256];
+ pl_inode_t *pl_inode = NULL;
- priv = this->private;
+ priv = this->private;
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_inode = pl_inode_get (this, fd->inode);
+ pl_inode = pl_inode_get(this, fd->inode, NULL);
- if (pl_inode && __pl_inode_is_empty (pl_inode))
- return;
+ if (pl_inode && __pl_inode_is_empty(pl_inode))
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, NULL);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, NULL);
- gf_log (this->name, GF_LOG_INFO,
- "[FLUSH] Locker = {%s} Lockee = {%s}",
- pl_locker, pl_lockee);
+ gf_log(this->name, GF_LOG_INFO, "[FLUSH] Locker = {%s} Lockee = {%s}",
+ pl_locker, pl_lockee);
}
void
-pl_trace_release (xlator_t *this, fd_t *fd)
+pl_trace_release(xlator_t *this, fd_t *fd)
{
- posix_locks_private_t *priv = NULL;
- char pl_lockee[256];
+ posix_locks_private_t *priv = NULL;
+ char pl_lockee[256];
- priv = this->private;
+ priv = this->private;
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_lockee (pl_lockee, 256, fd, NULL);
+ pl_print_lockee(pl_lockee, 256, fd, NULL);
- gf_log (this->name, GF_LOG_INFO,
- "[RELEASE] Lockee = {%s}", pl_lockee);
+ gf_log(this->name, GF_LOG_INFO, "[RELEASE] Lockee = {%s}", pl_lockee);
}
-
void
-pl_update_refkeeper (xlator_t *this, inode_t *inode)
+pl_update_refkeeper(xlator_t *this, inode_t *inode)
{
- pl_inode_t *pl_inode = NULL;
- int is_empty = 0;
- int need_unref = 0;
+ pl_inode_t *pl_inode = NULL;
+ int is_empty = 0;
+ int need_unref = 0;
+ int need_ref = 0;
- pl_inode = pl_inode_get (this, inode);
+ pl_inode = pl_inode_get(this, inode, NULL);
+ if (!pl_inode)
+ return;
- pthread_mutex_lock (&pl_inode->mutex);
- {
- is_empty = __pl_inode_is_empty (pl_inode);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ is_empty = __pl_inode_is_empty(pl_inode);
- if (is_empty && pl_inode->refkeeper) {
- need_unref = 1;
- pl_inode->refkeeper = NULL;
- }
+ if (is_empty && pl_inode->refkeeper) {
+ need_unref = 1;
+ pl_inode->refkeeper = NULL;
+ }
- if (!is_empty && !pl_inode->refkeeper) {
- pl_inode->refkeeper = inode_ref (inode);
- }
+ if (!is_empty && !pl_inode->refkeeper) {
+ need_ref = 1;
+ pl_inode->refkeeper = inode;
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
- if (need_unref)
- inode_unref (inode);
+ if (need_unref)
+ inode_unref(inode);
+
+ if (need_ref)
+ inode_ref(inode);
}
+/* Get lock enforcement info from disk */
+int
+pl_fetch_mlock_info_from_disk(xlator_t *this, pl_inode_t *pl_inode,
+ pl_local_t *local)
+{
+ dict_t *xdata_rsp = NULL;
+ int ret = 0;
+ int op_ret = 0;
+
+ if (!local) {
+ return -1;
+ }
+
+ if (local->fd) {
+ op_ret = syncop_fgetxattr(this, local->fd, &xdata_rsp,
+ GF_ENFORCE_MANDATORY_LOCK, NULL, NULL);
+ } else {
+ op_ret = syncop_getxattr(this, &local->loc[0], &xdata_rsp,
+ GF_ENFORCE_MANDATORY_LOCK, NULL, NULL);
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (op_ret >= 0) {
+ pl_inode->mlock_enforced = _gf_true;
+ pl_inode->check_mlock_info = _gf_false;
+ } else {
+ gf_msg(this->name, GF_LOG_WARNING, -op_ret, 0,
+ "getxattr failed with %d", op_ret);
+ pl_inode->mlock_enforced = _gf_false;
+
+ if (-op_ret == ENODATA) {
+ pl_inode->check_mlock_info = _gf_false;
+ } else {
+ pl_inode->check_mlock_info = _gf_true;
+ }
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ return ret;
+}
pl_inode_t *
-pl_inode_get (xlator_t *this, inode_t *inode)
+pl_inode_get(xlator_t *this, inode_t *inode, pl_local_t *local)
{
- uint64_t tmp_pl_inode = 0;
- pl_inode_t *pl_inode = NULL;
- int ret = 0;
+ uint64_t tmp_pl_inode = 0;
+ pl_inode_t *pl_inode = NULL;
+ int ret = 0;
- ret = inode_ctx_get (inode, this,&tmp_pl_inode);
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &tmp_pl_inode);
if (ret == 0) {
- pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
- goto out;
+ pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
+ goto unlock;
}
- pl_inode = GF_CALLOC (1, sizeof (*pl_inode),
- gf_locks_mt_pl_inode_t);
+
+ pl_inode = GF_CALLOC(1, sizeof(*pl_inode), gf_locks_mt_pl_inode_t);
if (!pl_inode) {
- goto out;
+ goto unlock;
}
- gf_log (this->name, GF_LOG_TRACE,
- "Allocating new pl inode");
-
- pthread_mutex_init (&pl_inode->mutex, NULL);
-
- INIT_LIST_HEAD (&pl_inode->dom_list);
- INIT_LIST_HEAD (&pl_inode->ext_list);
- INIT_LIST_HEAD (&pl_inode->rw_list);
- INIT_LIST_HEAD (&pl_inode->reservelk_list);
- INIT_LIST_HEAD (&pl_inode->blocked_reservelks);
- INIT_LIST_HEAD (&pl_inode->blocked_calls);
-
- inode_ctx_put (inode, this, (uint64_t)(long)(pl_inode));
+ gf_log(this->name, GF_LOG_TRACE, "Allocating new pl inode");
+
+ pthread_mutex_init(&pl_inode->mutex, NULL);
+ pthread_cond_init(&pl_inode->check_fop_wind_count, 0);
+
+ INIT_LIST_HEAD(&pl_inode->dom_list);
+ INIT_LIST_HEAD(&pl_inode->ext_list);
+ INIT_LIST_HEAD(&pl_inode->rw_list);
+ INIT_LIST_HEAD(&pl_inode->reservelk_list);
+ INIT_LIST_HEAD(&pl_inode->blocked_reservelks);
+ INIT_LIST_HEAD(&pl_inode->blocked_calls);
+ INIT_LIST_HEAD(&pl_inode->metalk_list);
+ INIT_LIST_HEAD(&pl_inode->queued_locks);
+ INIT_LIST_HEAD(&pl_inode->waiting);
+ gf_uuid_copy(pl_inode->gfid, inode->gfid);
+
+ pl_inode->check_mlock_info = _gf_true;
+ pl_inode->mlock_enforced = _gf_false;
+
+ /* -2 means never looked up. -1 means something went wrong and link
+ * tracking is disabled. */
+ pl_inode->links = -2;
+
+ ret = __inode_ctx_put(inode, this, (uint64_t)(long)(pl_inode));
+ if (ret) {
+ pthread_mutex_destroy(&pl_inode->mutex);
+ GF_FREE(pl_inode);
+ pl_inode = NULL;
+ goto unlock;
+ }
+ }
+unlock:
+ UNLOCK(&inode->lock);
+
+ if ((pl_inode != NULL) && pl_is_mandatory_locking_enabled(pl_inode) &&
+ pl_inode->check_mlock_info && local) {
+ /* Note: The lock enforcement information per file can be stored in the
+ attribute flag of stat(x) in posix. With that there won't be a need
+ for doing getxattr post a reboot
+ */
+ pl_fetch_mlock_info_from_disk(this, pl_inode, local);
+ }
-out:
- return pl_inode;
+ return pl_inode;
}
-
/* Create a new posix_lock_t */
posix_lock_t *
-new_posix_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
- uint64_t owner, fd_t *fd)
+new_posix_lock(struct gf_flock *flock, client_t *client, pid_t client_pid,
+ gf_lkowner_t *owner, fd_t *fd, uint32_t lk_flags, int blocking,
+ int32_t *op_errno)
{
- posix_lock_t *lock = NULL;
+ posix_lock_t *lock = NULL;
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, flock, out);
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, transport, out);
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, fd, out);
+ GF_VALIDATE_OR_GOTO("posix-locks", flock, out);
+ GF_VALIDATE_OR_GOTO("posix-locks", client, out);
+ GF_VALIDATE_OR_GOTO("posix-locks", fd, out);
- lock = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!lock) {
- goto out;
- }
+ if (!pl_is_lk_owner_valid(owner, client)) {
+ *op_errno = EINVAL;
+ goto out;
+ }
- lock->fl_start = flock->l_start;
- lock->fl_type = flock->l_type;
+ lock = GF_CALLOC(1, sizeof(posix_lock_t), gf_locks_mt_posix_lock_t);
+ if (!lock) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
- if (flock->l_len == 0)
- lock->fl_end = LLONG_MAX;
- else
- lock->fl_end = flock->l_start + flock->l_len - 1;
+ lock->fl_start = flock->l_start;
+ lock->fl_type = flock->l_type;
- lock->transport = transport;
- lock->fd_num = fd_to_fdnum (fd);
- lock->fd = fd;
- lock->client_pid = client_pid;
- lock->owner = owner;
+ if (flock->l_len == 0)
+ lock->fl_end = LLONG_MAX;
+ else
+ lock->fl_end = flock->l_start + flock->l_len - 1;
- INIT_LIST_HEAD (&lock->list);
+ lock->client = client;
+
+ lock->client_uid = gf_strdup(client->client_uid);
+ if (lock->client_uid == NULL) {
+ GF_FREE(lock);
+ lock = NULL;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ lock->fd_num = fd_to_fdnum(fd);
+ lock->fd = fd;
+ lock->client_pid = client_pid;
+ lock->owner = *owner;
+ lock->lk_flags = lk_flags;
+
+ lock->blocking = blocking;
+ memcpy(&lock->user_flock, flock, sizeof(lock->user_flock));
+
+ INIT_LIST_HEAD(&lock->list);
out:
- return lock;
+ return lock;
}
-
/* Delete a lock from the inode's lock list */
void
-__delete_lock (pl_inode_t *pl_inode, posix_lock_t *lock)
+__delete_lock(posix_lock_t *lock)
{
- list_del_init (&lock->list);
+ list_del_init(&lock->list);
}
-
/* Destroy a posix_lock */
void
-__destroy_lock (posix_lock_t *lock)
+__destroy_lock(posix_lock_t *lock)
{
- GF_FREE (lock);
+ GF_FREE(lock->client_uid);
+ GF_FREE(lock);
}
+static posix_lock_t *
+__copy_lock(posix_lock_t *src)
+{
+ posix_lock_t *dst;
+
+ dst = GF_MALLOC(sizeof(posix_lock_t), gf_locks_mt_posix_lock_t);
+ if (dst != NULL) {
+ memcpy(dst, src, sizeof(posix_lock_t));
+ dst->client_uid = gf_strdup(src->client_uid);
+ if (dst->client_uid == NULL) {
+ GF_FREE(dst);
+ dst = NULL;
+ }
+
+ if (dst != NULL)
+ INIT_LIST_HEAD(&dst->list);
+ }
+
+ return dst;
+}
/* Convert a posix_lock to a struct gf_flock */
void
-posix_lock_to_flock (posix_lock_t *lock, struct gf_flock *flock)
+posix_lock_to_flock(posix_lock_t *lock, struct gf_flock *flock)
{
- flock->l_pid = lock->client_pid;
- flock->l_type = lock->fl_type;
- flock->l_start = lock->fl_start;
- flock->l_owner = lock->owner;
-
- if (lock->fl_end == LLONG_MAX)
- flock->l_len = 0;
- else
- flock->l_len = lock->fl_end - lock->fl_start + 1;
+ flock->l_pid = lock->user_flock.l_pid;
+ flock->l_type = lock->fl_type;
+ flock->l_start = lock->fl_start;
+ flock->l_owner = lock->owner;
+
+ if (lock->fl_end == LLONG_MAX)
+ flock->l_len = 0;
+ else
+ flock->l_len = lock->fl_end - lock->fl_start + 1;
}
/* Insert the lock into the inode's lock list */
static void
-__insert_lock (pl_inode_t *pl_inode, posix_lock_t *lock)
+__insert_lock(pl_inode_t *pl_inode, posix_lock_t *lock)
{
- list_add_tail (&lock->list, &pl_inode->ext_list);
+ if (lock->blocked)
+ lock->blkd_time = gf_time();
+ else
+ lock->granted_time = gf_time();
- return;
+ list_add_tail(&lock->list, &pl_inode->ext_list);
}
-
/* Return true if the locks overlap, false otherwise */
int
-locks_overlap (posix_lock_t *l1, posix_lock_t *l2)
+locks_overlap(posix_lock_t *l1, posix_lock_t *l2)
{
- /*
- Note:
- FUSE always gives us absolute offsets, so no need to worry
- about SEEK_CUR or SEEK_END
- */
+ /*
+ Note:
+ FUSE always gives us absolute offsets, so no need to worry
+ about SEEK_CUR or SEEK_END
+ */
- return ((l1->fl_end >= l2->fl_start) &&
- (l2->fl_end >= l1->fl_start));
+ return ((l1->fl_end >= l2->fl_start) && (l2->fl_end >= l1->fl_start));
}
-
/* Return true if the locks have the same owner */
int
-same_owner (posix_lock_t *l1, posix_lock_t *l2)
+same_owner(posix_lock_t *l1, posix_lock_t *l2)
{
-
- return ((l1->owner == l2->owner) &&
- (l1->transport == l2->transport));
-
+ return (is_same_lkowner(&l1->owner, &l2->owner) &&
+ (l1->client == l2->client));
}
-
/* Delete all F_UNLCK locks */
void
-__delete_unlck_locks (pl_inode_t *pl_inode)
+__delete_unlck_locks(pl_inode_t *pl_inode)
{
- posix_lock_t *l = NULL;
- posix_lock_t *tmp = NULL;
-
- list_for_each_entry_safe (l, tmp, &pl_inode->ext_list, list) {
- if (l->fl_type == F_UNLCK) {
- __delete_lock (pl_inode, l);
- __destroy_lock (l);
- }
+ posix_lock_t *l = NULL;
+ posix_lock_t *tmp = NULL;
+
+ list_for_each_entry_safe(l, tmp, &pl_inode->ext_list, list)
+ {
+ if (l->fl_type == F_UNLCK) {
+ __delete_lock(l);
+ __destroy_lock(l);
}
+ }
}
-
/* Add two locks */
static posix_lock_t *
-add_locks (posix_lock_t *l1, posix_lock_t *l2)
+add_locks(posix_lock_t *l1, posix_lock_t *l2, posix_lock_t *dst)
{
- posix_lock_t *sum = NULL;
+ posix_lock_t *sum = NULL;
+
+ sum = __copy_lock(dst);
+ if (!sum)
+ return NULL;
- sum = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!sum)
- return NULL;
+ sum->fl_start = min(l1->fl_start, l2->fl_start);
+ sum->fl_end = max(l1->fl_end, l2->fl_end);
- sum->fl_start = min (l1->fl_start, l2->fl_start);
- sum->fl_end = max (l1->fl_end, l2->fl_end);
+ posix_lock_to_flock(sum, &sum->user_flock);
- return sum;
+ return sum;
}
/* Subtract two locks */
struct _values {
- posix_lock_t *locks[3];
+ posix_lock_t *locks[3];
};
/* {big} must always be contained inside {small} */
static struct _values
-subtract_locks (posix_lock_t *big, posix_lock_t *small)
+subtract_locks(posix_lock_t *big, posix_lock_t *small)
{
+ struct _values v = {.locks = {0, 0, 0}};
- struct _values v = { .locks = {0, 0, 0} };
-
- if ((big->fl_start == small->fl_start) &&
- (big->fl_end == small->fl_end)) {
- /* both edges coincide with big */
- v.locks[0] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[0])
- goto out;
- memcpy (v.locks[0], big, sizeof (posix_lock_t));
- v.locks[0]->fl_type = small->fl_type;
- goto done;
+ if ((big->fl_start == small->fl_start) && (big->fl_end == small->fl_end)) {
+ /* both edges coincide with big */
+ v.locks[0] = __copy_lock(big);
+ if (!v.locks[0]) {
+ goto out;
}
- if ((small->fl_start > big->fl_start) &&
- (small->fl_end < big->fl_end)) {
- /* both edges lie inside big */
- v.locks[0] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[0])
- goto out;
-
- v.locks[1] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[1])
- goto out;
-
- v.locks[2] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[1])
- goto out;
-
- memcpy (v.locks[0], big, sizeof (posix_lock_t));
- v.locks[0]->fl_end = small->fl_start - 1;
-
- memcpy (v.locks[1], small, sizeof (posix_lock_t));
-
- memcpy (v.locks[2], big, sizeof (posix_lock_t));
- v.locks[2]->fl_start = small->fl_end + 1;
- goto done;
+ v.locks[0]->fl_type = small->fl_type;
+ v.locks[0]->user_flock.l_type = small->fl_type;
+ goto done;
+ }
+
+ if ((small->fl_start > big->fl_start) && (small->fl_end < big->fl_end)) {
+ /* both edges lie inside big */
+ v.locks[0] = __copy_lock(big);
+ v.locks[1] = __copy_lock(small);
+ v.locks[2] = __copy_lock(big);
+ if ((v.locks[0] == NULL) || (v.locks[1] == NULL) ||
+ (v.locks[2] == NULL)) {
+ goto out;
+ }
+ v.locks[0]->fl_end = small->fl_start - 1;
+ v.locks[2]->fl_start = small->fl_end + 1;
+ posix_lock_to_flock(v.locks[0], &v.locks[0]->user_flock);
+ posix_lock_to_flock(v.locks[2], &v.locks[2]->user_flock);
+ goto done;
+ }
+
+ /* one edge coincides with big */
+ if (small->fl_start == big->fl_start) {
+ v.locks[0] = __copy_lock(big);
+ v.locks[1] = __copy_lock(small);
+ if ((v.locks[0] == NULL) || (v.locks[1] == NULL)) {
+ goto out;
}
- /* one edge coincides with big */
- if (small->fl_start == big->fl_start) {
- v.locks[0] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[0])
- goto out;
+ v.locks[0]->fl_start = small->fl_end + 1;
+ posix_lock_to_flock(v.locks[0], &v.locks[0]->user_flock);
+ goto done;
+ }
- v.locks[1] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[1])
- goto out;
+ if (small->fl_end == big->fl_end) {
+ v.locks[0] = __copy_lock(big);
+ v.locks[1] = __copy_lock(small);
+ if ((v.locks[0] == NULL) || (v.locks[1] == NULL)) {
+ goto out;
+ }
- memcpy (v.locks[0], big, sizeof (posix_lock_t));
- v.locks[0]->fl_start = small->fl_end + 1;
+ v.locks[0]->fl_end = small->fl_start - 1;
+ posix_lock_to_flock(v.locks[0], &v.locks[0]->user_flock);
+ goto done;
+ }
- memcpy (v.locks[1], small, sizeof (posix_lock_t));
- goto done;
- }
+ GF_ASSERT(0);
+ gf_log("posix-locks", GF_LOG_ERROR, "Unexpected case in subtract_locks");
- if (small->fl_end == big->fl_end) {
- v.locks[0] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[0])
- goto out;
+out:
+ if (v.locks[0]) {
+ __destroy_lock(v.locks[0]);
+ v.locks[0] = NULL;
+ }
+ if (v.locks[1]) {
+ __destroy_lock(v.locks[1]);
+ v.locks[1] = NULL;
+ }
+ if (v.locks[2]) {
+ __destroy_lock(v.locks[2]);
+ v.locks[2] = NULL;
+ }
- v.locks[1] = GF_CALLOC (1, sizeof (posix_lock_t),
- gf_locks_mt_posix_lock_t);
- if (!v.locks[1])
- goto out;
+done:
+ return v;
+}
- memcpy (v.locks[0], big, sizeof (posix_lock_t));
- v.locks[0]->fl_end = small->fl_start - 1;
+static posix_lock_t *
+first_conflicting_overlap(pl_inode_t *pl_inode, posix_lock_t *lock)
+{
+ posix_lock_t *l = NULL;
+ posix_lock_t *conf = NULL;
- memcpy (v.locks[1], small, sizeof (posix_lock_t));
- goto done;
- }
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (l->blocked)
+ continue;
- GF_ASSERT (0);
- gf_log (POSIX_LOCKS, GF_LOG_ERROR, "Unexpected case in subtract_locks");
+ if (locks_overlap(l, lock)) {
+ if (same_owner(l, lock))
+ continue;
-out:
- if (v.locks[0]) {
- GF_FREE (v.locks[0]);
- v.locks[0] = NULL;
- }
- if (v.locks[1]) {
- GF_FREE (v.locks[1]);
- v.locks[1] = NULL;
- }
- if (v.locks[2]) {
- GF_FREE (v.locks[2]);
- v.locks[2] = NULL;
+ if ((l->fl_type == F_WRLCK) || (lock->fl_type == F_WRLCK)) {
+ conf = l;
+ goto unlock;
+ }
+ }
}
+ }
+unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
-done:
- return v;
+ return conf;
}
/*
@@ -715,343 +789,803 @@ done:
If {begin} is NULL, then start from the beginning of the list
*/
static posix_lock_t *
-first_overlap (pl_inode_t *pl_inode, posix_lock_t *lock)
+first_overlap(pl_inode_t *pl_inode, posix_lock_t *lock)
{
- posix_lock_t *l = NULL;
+ posix_lock_t *l = NULL;
- list_for_each_entry (l, &pl_inode->ext_list, list) {
- if (l->blocked)
- continue;
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (l->blocked)
+ continue;
- if (locks_overlap (l, lock))
- return l;
- }
+ if (locks_overlap(l, lock))
+ return l;
+ }
- return NULL;
+ return NULL;
}
-
-
/* Return true if lock is grantable */
static int
-__is_lock_grantable (pl_inode_t *pl_inode, posix_lock_t *lock)
+__is_lock_grantable(pl_inode_t *pl_inode, posix_lock_t *lock)
{
- posix_lock_t *l = NULL;
- int ret = 1;
-
- list_for_each_entry (l, &pl_inode->ext_list, list) {
- if (!l->blocked && locks_overlap (lock, l)) {
- if (((l->fl_type == F_WRLCK)
- || (lock->fl_type == F_WRLCK))
- && (lock->fl_type != F_UNLCK)
- && !same_owner (l, lock)) {
- ret = 0;
- break;
- }
- }
+ posix_lock_t *l = NULL;
+ int ret = 1;
+
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (!l->blocked && locks_overlap(lock, l)) {
+ if (((l->fl_type == F_WRLCK) || (lock->fl_type == F_WRLCK)) &&
+ (lock->fl_type != F_UNLCK) && !same_owner(l, lock)) {
+ ret = 0;
+ break;
+ }
}
- return ret;
+ }
+ return ret;
}
-
-extern void do_blocked_rw (pl_inode_t *);
-
+extern void
+do_blocked_rw(pl_inode_t *);
static void
-__insert_and_merge (pl_inode_t *pl_inode, posix_lock_t *lock)
+__insert_and_merge(pl_inode_t *pl_inode, posix_lock_t *lock)
{
- posix_lock_t *conf = NULL;
- posix_lock_t *t = NULL;
- posix_lock_t *sum = NULL;
- int i = 0;
- struct _values v = { .locks = {0, 0, 0} };
-
- list_for_each_entry_safe (conf, t, &pl_inode->ext_list, list) {
- if (!locks_overlap (conf, lock))
- continue;
+ posix_lock_t *conf = NULL;
+ posix_lock_t *t = NULL;
+ posix_lock_t *sum = NULL;
+ int i = 0;
+ struct _values v = {.locks = {0, 0, 0}};
+
+ list_for_each_entry_safe(conf, t, &pl_inode->ext_list, list)
+ {
+ if (conf->blocked)
+ continue;
+ if (!locks_overlap(conf, lock))
+ continue;
+
+ if (same_owner(conf, lock)) {
+ if (conf->fl_type == lock->fl_type &&
+ conf->lk_flags == lock->lk_flags) {
+ sum = add_locks(lock, conf, lock);
+
+ __delete_lock(conf);
+ __destroy_lock(conf);
+
+ __destroy_lock(lock);
+ INIT_LIST_HEAD(&sum->list);
+ posix_lock_to_flock(sum, &sum->user_flock);
+ __insert_and_merge(pl_inode, sum);
- if (same_owner (conf, lock)) {
- if (conf->fl_type == lock->fl_type) {
- sum = add_locks (lock, conf);
+ return;
+ } else {
+ sum = add_locks(lock, conf, conf);
- sum->fl_type = lock->fl_type;
- sum->transport = lock->transport;
- sum->fd_num = lock->fd_num;
- sum->client_pid = lock->client_pid;
- sum->owner = lock->owner;
+ v = subtract_locks(sum, lock);
- __delete_lock (pl_inode, conf);
- __destroy_lock (conf);
+ __delete_lock(conf);
+ __destroy_lock(conf);
- __destroy_lock (lock);
- __insert_and_merge (pl_inode, sum);
+ __delete_lock(lock);
+ __destroy_lock(lock);
- return;
- } else {
- sum = add_locks (lock, conf);
+ __destroy_lock(sum);
- sum->fl_type = conf->fl_type;
- sum->transport = conf->transport;
- sum->fd_num = conf->fd_num;
- sum->client_pid = conf->client_pid;
- sum->owner = conf->owner;
+ for (i = 0; i < 3; i++) {
+ if (!v.locks[i])
+ continue;
- v = subtract_locks (sum, lock);
+ __insert_and_merge(pl_inode, v.locks[i]);
+ }
- __delete_lock (pl_inode, conf);
- __destroy_lock (conf);
+ __delete_unlck_locks(pl_inode);
+ return;
+ }
+ }
- __delete_lock (pl_inode, lock);
- __destroy_lock (lock);
+ if (lock->fl_type == F_UNLCK) {
+ continue;
+ }
- __destroy_lock (sum);
+ if ((conf->fl_type == F_RDLCK) && (lock->fl_type == F_RDLCK)) {
+ __insert_lock(pl_inode, lock);
+ return;
+ }
+ }
+
+ /* no conflicts, so just insert */
+ if (lock->fl_type != F_UNLCK) {
+ __insert_lock(pl_inode, lock);
+ } else {
+ __destroy_lock(lock);
+ }
+}
- for (i = 0; i < 3; i++) {
- if (!v.locks[i])
- continue;
+void
+__grant_blocked_locks(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted)
+{
+ struct list_head tmp_list;
+ posix_lock_t *l = NULL;
+ posix_lock_t *tmp = NULL;
+ posix_lock_t *conf = NULL;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ list_for_each_entry_safe(l, tmp, &pl_inode->ext_list, list)
+ {
+ if (l->blocked) {
+ conf = first_overlap(pl_inode, l);
+ if (conf)
+ continue;
+
+ l->blocked = 0;
+ list_move_tail(&l->list, &tmp_list);
+ }
+ }
- INIT_LIST_HEAD (&v.locks[i]->list);
- __insert_and_merge (pl_inode,
- v.locks[i]);
- }
+ list_for_each_entry_safe(l, tmp, &tmp_list, list)
+ {
+ list_del_init(&l->list);
- __delete_unlck_locks (pl_inode);
- return;
- }
- }
+ if (__is_lock_grantable(pl_inode, l)) {
+ conf = GF_CALLOC(1, sizeof(*conf), gf_locks_mt_posix_lock_t);
- if (lock->fl_type == F_UNLCK) {
- continue;
- }
+ if (!conf) {
+ l->blocked = 1;
+ __insert_lock(pl_inode, l);
+ continue;
+ }
- if ((conf->fl_type == F_RDLCK) && (lock->fl_type == F_RDLCK)) {
- __insert_lock (pl_inode, lock);
- return;
- }
- }
+ conf->frame = l->frame;
+ l->frame = NULL;
+
+ posix_lock_to_flock(l, &conf->user_flock);
- /* no conflicts, so just insert */
- if (lock->fl_type != F_UNLCK) {
- __insert_lock (pl_inode, lock);
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64
+ " => Granted",
+ l->fl_type == F_UNLCK ? "Unlock" : "Lock", l->client_pid,
+ lkowner_utoa(&l->owner), l->user_flock.l_start,
+ l->user_flock.l_len);
+
+ __insert_and_merge(pl_inode, l);
+
+ list_add(&conf->list, granted);
} else {
- __destroy_lock (lock);
+ l->blocked = 1;
+ __insert_lock(pl_inode, l);
}
+ }
}
-
void
-__grant_blocked_locks (xlator_t *this, pl_inode_t *pl_inode, struct list_head *granted)
+grant_blocked_locks(xlator_t *this, pl_inode_t *pl_inode)
{
- struct list_head tmp_list;
- posix_lock_t *l = NULL;
- posix_lock_t *tmp = NULL;
- posix_lock_t *conf = NULL;
+ struct list_head granted_list;
+ posix_lock_t *tmp = NULL;
+ posix_lock_t *lock = NULL;
+ pl_local_t *local = NULL;
+ INIT_LIST_HEAD(&granted_list);
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __grant_blocked_locks(this, pl_inode, &granted_list);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ list_for_each_entry_safe(lock, tmp, &granted_list, list)
+ {
+ list_del_init(&lock->list);
+
+ pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+ 0, 0, NULL);
+ local = lock->frame->local;
+ PL_STACK_UNWIND_AND_FREE(local, lk, lock->frame, 0, 0,
+ &lock->user_flock, NULL);
+ __destroy_lock(lock);
+ }
+
+ return;
+}
- INIT_LIST_HEAD (&tmp_list);
+static int
+pl_send_prelock_unlock(xlator_t *this, pl_inode_t *pl_inode,
+ posix_lock_t *old_lock)
+{
+ struct gf_flock flock = {
+ 0,
+ };
+ posix_lock_t *unlock_lock = NULL;
+ int32_t op_errno = 0;
- list_for_each_entry_safe (l, tmp, &pl_inode->ext_list, list) {
- if (l->blocked) {
- conf = first_overlap (pl_inode, l);
- if (conf)
- continue;
+ struct list_head granted_list;
+ posix_lock_t *tmp = NULL;
+ posix_lock_t *lock = NULL;
+ pl_local_t *local = NULL;
- l->blocked = 0;
- list_move_tail (&l->list, &tmp_list);
- }
+ int ret = -1;
+
+ INIT_LIST_HEAD(&granted_list);
+
+ flock.l_type = F_UNLCK;
+ flock.l_whence = old_lock->user_flock.l_whence;
+ flock.l_start = old_lock->user_flock.l_start;
+ flock.l_len = old_lock->user_flock.l_len;
+ flock.l_pid = old_lock->user_flock.l_pid;
+
+ unlock_lock = new_posix_lock(&flock, old_lock->client, old_lock->client_pid,
+ &old_lock->owner, old_lock->fd,
+ old_lock->lk_flags, 0, &op_errno);
+ GF_VALIDATE_OR_GOTO(this->name, unlock_lock, out);
+ ret = 0;
+
+ __insert_and_merge(pl_inode, unlock_lock);
+
+ __grant_blocked_locks(this, pl_inode, &granted_list);
+
+ list_for_each_entry_safe(lock, tmp, &granted_list, list)
+ {
+ list_del_init(&lock->list);
+
+ pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+ 0, 0, NULL);
+ local = lock->frame->local;
+ PL_STACK_UNWIND_AND_FREE(local, lk, lock->frame, 0, 0,
+ &lock->user_flock, NULL);
+ __destroy_lock(lock);
+ }
+
+out:
+ return ret;
+}
+
+int
+pl_setlk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block)
+{
+ int ret = 0;
+
+ errno = 0;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ /* Send unlock before the actual lock to
+ prevent lock upgrade / downgrade
+ problems only if:
+ - it is a blocking call
+ - it has other conflicting locks
+ */
+
+ if (can_block && !(__is_lock_grantable(pl_inode, lock))) {
+ ret = pl_send_prelock_unlock(this, pl_inode, lock);
+ if (ret)
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Could not send pre-lock "
+ "unlock");
+ }
+
+ if (__is_lock_grantable(pl_inode, lock)) {
+ if (pl_metalock_is_active(pl_inode)) {
+ __pl_queue_lock(pl_inode, lock);
+ pthread_mutex_unlock(&pl_inode->mutex);
+ ret = -2;
+ goto out;
+ }
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 " => OK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid, lkowner_utoa(&lock->owner),
+ lock->user_flock.l_start, lock->user_flock.l_len);
+ __insert_and_merge(pl_inode, lock);
+ } else if (can_block) {
+ if (pl_metalock_is_active(pl_inode)) {
+ __pl_queue_lock(pl_inode, lock);
+ pthread_mutex_unlock(&pl_inode->mutex);
+ ret = -2;
+ goto out;
+ }
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64
+ " => Blocked",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid, lkowner_utoa(&lock->owner),
+ lock->user_flock.l_start, lock->user_flock.l_len);
+
+ pl_trace_block(this, lock->frame, NULL, NULL, F_SETLKW,
+ &lock->user_flock, NULL);
+
+ lock->blocked = 1;
+ __insert_lock(pl_inode, lock);
+ ret = -1;
+ } else {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 " => NOK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid, lkowner_utoa(&lock->owner),
+ lock->user_flock.l_start, lock->user_flock.l_len);
+ errno = EAGAIN;
+ ret = -1;
}
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
- list_for_each_entry_safe (l, tmp, &tmp_list, list) {
- list_del_init (&l->list);
+ grant_blocked_locks(this, pl_inode);
- if (__is_lock_grantable (pl_inode, l)) {
- conf = GF_CALLOC (1, sizeof (*conf),
- gf_locks_mt_posix_lock_t);
+ do_blocked_rw(pl_inode);
- if (!conf) {
- l->blocked = 1;
- __insert_lock (pl_inode, l);
- continue;
- }
+out:
+ return ret;
+}
- conf->frame = l->frame;
- l->frame = NULL;
+posix_lock_t *
+pl_getlk(pl_inode_t *pl_inode, posix_lock_t *lock)
+{
+ posix_lock_t *conf = first_conflicting_overlap(pl_inode, lock);
+ if (conf == NULL) {
+ lock->fl_type = F_UNLCK;
+ return lock;
+ }
- posix_lock_to_flock (l, &conf->user_flock);
+ return conf;
+}
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Granted",
- l->fl_type == F_UNLCK ? "Unlock" : "Lock",
- l->client_pid,
- l->owner,
- l->user_flock.l_start,
- l->user_flock.l_len);
+gf_boolean_t
+pl_does_monkey_want_stuck_lock()
+{
+ long int monkey_unlock_rand = 0;
+ long int monkey_unlock_rand_rem = 0;
+
+ /* coverity[DC.WEAK_CRYPTO] */
+ monkey_unlock_rand = random();
+ monkey_unlock_rand_rem = monkey_unlock_rand % 100;
+ if (monkey_unlock_rand_rem == 0)
+ return _gf_true;
+ return _gf_false;
+}
- __insert_and_merge (pl_inode, l);
+int
+pl_lock_preempt(pl_inode_t *pl_inode, posix_lock_t *reqlock)
+{
+ posix_lock_t *lock = NULL;
+ posix_lock_t *i = NULL;
+ pl_rw_req_t *rw = NULL;
+ pl_rw_req_t *itr = NULL;
+ struct list_head unwind_blist = {
+ 0,
+ };
+ struct list_head unwind_rw_list = {
+ 0,
+ };
+ int ret = 0;
+
+ INIT_LIST_HEAD(&unwind_blist);
+ INIT_LIST_HEAD(&unwind_rw_list);
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ /*
+ - go through the lock list
+ - remove all locks from different owners
+ - same owner locks will be added or substracted based on
+ the new request
+ - add the new lock
+ */
+ list_for_each_entry_safe(lock, i, &pl_inode->ext_list, list)
+ {
+ if (lock->blocked) {
+ list_del_init(&lock->list);
+ list_add(&lock->list, &unwind_blist);
+ continue;
+ }
+
+ if (locks_overlap(lock, reqlock)) {
+ if (same_owner(lock, reqlock))
+ continue;
+
+ /* remove conflicting locks */
+ list_del_init(&lock->list);
+ __delete_lock(lock);
+ __destroy_lock(lock);
+ }
+ }
- list_add (&conf->list, granted);
- } else {
- l->blocked = 1;
- __insert_lock (pl_inode, l);
- }
+ __insert_and_merge(pl_inode, reqlock);
+
+ list_for_each_entry_safe(rw, itr, &pl_inode->rw_list, list)
+ {
+ list_del_init(&rw->list);
+ list_add(&rw->list, &unwind_rw_list);
}
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ /* unwind blocked locks */
+ list_for_each_entry_safe(lock, i, &unwind_blist, list)
+ {
+ PL_STACK_UNWIND_AND_FREE(((pl_local_t *)lock->frame->local), lk,
+ lock->frame, -1, EBUSY, &lock->user_flock,
+ NULL);
+ __destroy_lock(lock);
+ }
+
+ /* unwind blocked IOs */
+ list_for_each_entry_safe(rw, itr, &unwind_rw_list, list)
+ {
+ pl_clean_local(rw->stub->frame->local);
+ call_unwind_error(rw->stub, -1, EBUSY);
+ }
+
+ return ret;
}
+/* Return true in case we need to ensure mandatory-locking
+ * semantics under different modes.
+ */
+gf_boolean_t
+pl_is_mandatory_locking_enabled(pl_inode_t *pl_inode)
+{
+ posix_locks_private_t *priv = THIS->private;
+
+ if (priv->mandatory_mode == MLK_FILE_BASED && pl_inode->mandatory)
+ return _gf_true;
+ else if (priv->mandatory_mode == MLK_FORCED ||
+ priv->mandatory_mode == MLK_OPTIMAL)
+ return _gf_true;
+
+ return _gf_false;
+}
void
-grant_blocked_locks (xlator_t *this, pl_inode_t *pl_inode)
+pl_clean_local(pl_local_t *local)
{
- struct list_head granted_list;
- posix_lock_t *tmp = NULL;
- posix_lock_t *lock = NULL;
+ if (!local)
+ return;
- INIT_LIST_HEAD (&granted_list);
+ if (local->inodelk_dom_count_req)
+ data_unref(local->inodelk_dom_count_req);
+ loc_wipe(&local->loc[0]);
+ loc_wipe(&local->loc[1]);
+ if (local->fd)
+ fd_unref(local->fd);
+ if (local->inode)
+ inode_unref(local->inode);
+ mem_put(local);
+}
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __grant_blocked_locks (this, pl_inode, &granted_list);
+/*
+TODO: detach local initialization from PL_LOCAL_GET_REQUESTS and add it here
+*/
+int
+pl_local_init(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd)
+{
+ pl_local_t *local = NULL;
+
+ if (!loc && !fd) {
+ return -1;
+ }
+
+ if (!frame->local) {
+ local = mem_get0(this->local_pool);
+ if (!local) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "mem allocation failed");
+ return -1;
+ }
+
+ local->inode = (loc ? inode_ref(loc->inode) : inode_ref(fd->inode));
+
+ frame->local = local;
+ }
+
+ return 0;
+}
+
+gf_boolean_t
+pl_is_lk_owner_valid(gf_lkowner_t *owner, client_t *client)
+{
+ if (client && (client->opversion < GD_OP_VERSION_7_0)) {
+ return _gf_true;
+ }
+
+ if (is_lk_owner_null(owner)) {
+ return _gf_false;
+ }
+ return _gf_true;
+}
+
+static int32_t
+pl_inode_from_loc(loc_t *loc, inode_t **pinode)
+{
+ inode_t *inode = NULL;
+ int32_t error = 0;
+
+ if (loc->inode != NULL) {
+ inode = inode_ref(loc->inode);
+ goto done;
+ }
+
+ if (loc->parent == NULL) {
+ error = EINVAL;
+ goto done;
+ }
+
+ if (!gf_uuid_is_null(loc->gfid)) {
+ inode = inode_find(loc->parent->table, loc->gfid);
+ if (inode != NULL) {
+ goto done;
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ }
- list_for_each_entry_safe (lock, tmp, &granted_list, list) {
- list_del_init (&lock->list);
+ if (loc->name == NULL) {
+ error = EINVAL;
+ goto done;
+ }
- pl_trace_out (this, lock->frame, NULL, NULL, F_SETLKW,
- &lock->user_flock, 0, 0, NULL);
+ inode = inode_grep(loc->parent->table, loc->parent, loc->name);
+ if (inode == NULL) {
+ /* We haven't found any inode. This means that the file doesn't exist
+ * or that even if it exists, we don't have any knowledge about it, so
+ * we don't have locks on it either, which is fine for our purposes. */
+ goto done;
+ }
- STACK_UNWIND (lock->frame, 0, 0, &lock->user_flock);
+done:
+ *pinode = inode;
+
+ return error;
+}
- GF_FREE (lock);
+static gf_boolean_t
+pl_inode_has_owners(xlator_t *xl, client_t *client, pl_inode_t *pl_inode,
+ struct timespec *now, struct list_head *contend)
+{
+ pl_dom_list_t *dom;
+ pl_inode_lock_t *lock;
+ gf_boolean_t has_owners = _gf_false;
+
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ list_for_each_entry(lock, &dom->inodelk_list, list)
+ {
+ /* If the lock belongs to the same client, we assume it's related
+ * to the same operation, so we allow the removal to continue. */
+ if (lock->client == client) {
+ continue;
+ }
+ /* If the lock belongs to an internal process, we don't block the
+ * removal. */
+ if (lock->client_pid < 0) {
+ continue;
+ }
+ if (contend == NULL) {
+ return _gf_true;
+ }
+ has_owners = _gf_true;
+ inodelk_contention_notify_check(xl, lock, now, contend);
}
+ }
- return;
+ return has_owners;
}
-static int
-pl_send_prelock_unlock (xlator_t *this, pl_inode_t *pl_inode,
- posix_lock_t *old_lock)
+int32_t
+pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc,
+ pl_inode_t **ppl_inode, struct list_head *contend)
{
- struct gf_flock flock = {0,};
- posix_lock_t *unlock_lock = NULL;
+ struct timespec now;
+ inode_t *inode;
+ pl_inode_t *pl_inode;
+ int32_t error;
- struct list_head granted_list;
- posix_lock_t *tmp = NULL;
- posix_lock_t *lock = NULL;
+ pl_inode = NULL;
- int ret = -1;
+ error = pl_inode_from_loc(loc, &inode);
+ if ((error != 0) || (inode == NULL)) {
+ goto done;
+ }
- INIT_LIST_HEAD (&granted_list);
+ pl_inode = pl_inode_get(xl, inode, NULL);
+ if (pl_inode == NULL) {
+ inode_unref(inode);
+ error = ENOMEM;
+ goto done;
+ }
- flock.l_type = F_UNLCK;
- flock.l_whence = old_lock->user_flock.l_whence;
- flock.l_start = old_lock->user_flock.l_start;
- flock.l_len = old_lock->user_flock.l_len;
+ /* pl_inode_from_loc() already increments ref count for inode, so
+ * we only assign here our reference. */
+ pl_inode->inode = inode;
+ timespec_now(&now);
- unlock_lock = new_posix_lock (&flock, old_lock->transport,
- old_lock->client_pid, old_lock->owner,
- old_lock->fd);
- GF_VALIDATE_OR_GOTO (this->name, unlock_lock, out);
- ret = 0;
+ pthread_mutex_lock(&pl_inode->mutex);
- __insert_and_merge (pl_inode, unlock_lock);
+ if (pl_inode->removed) {
+ error = ESTALE;
+ goto unlock;
+ }
- __grant_blocked_locks (this, pl_inode, &granted_list);
+ if (pl_inode_has_owners(xl, frame->root->client, pl_inode, &now, contend)) {
+ error = -1;
+ /* We skip the unlock here because the caller must create a stub when
+ * we return -1 and do a call to pl_inode_remove_complete(), which
+ * assumes the lock is still acquired and will release it once
+ * everything else is prepared. */
+ goto done;
+ }
- list_for_each_entry_safe (lock, tmp, &granted_list, list) {
- list_del_init (&lock->list);
+ pl_inode->is_locked = _gf_true;
+ pl_inode->remove_running++;
- pl_trace_out (this, lock->frame, NULL, NULL, F_SETLKW,
- &lock->user_flock, 0, 0, NULL);
+unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
- STACK_UNWIND (lock->frame, 0, 0, &lock->user_flock);
+done:
+ *ppl_inode = pl_inode;
+
+ return error;
+}
- GF_FREE (lock);
+int32_t
+pl_inode_remove_complete(xlator_t *xl, pl_inode_t *pl_inode, call_stub_t *stub,
+ struct list_head *contend)
+{
+ pl_inode_lock_t *lock;
+ int32_t error = -1;
+
+ if (stub != NULL) {
+ list_add_tail(&stub->list, &pl_inode->waiting);
+ pl_inode->is_locked = _gf_true;
+ } else {
+ error = ENOMEM;
+
+ while (!list_empty(contend)) {
+ lock = list_first_entry(contend, pl_inode_lock_t, list);
+ list_del_init(&lock->list);
+ __pl_inodelk_unref(lock);
}
+ }
-out:
- return ret;
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (error < 0) {
+ inodelk_contention_notify(xl, contend);
+ }
+
+ inode_unref(pl_inode->inode);
+
+ return error;
}
-int
-pl_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
- int can_block)
+void
+pl_inode_remove_wake(struct list_head *list)
{
- int ret = 0;
+ call_stub_t *stub;
- errno = 0;
+ while (!list_empty(list)) {
+ stub = list_first_entry(list, call_stub_t, list);
+ list_del_init(&stub->list);
- pthread_mutex_lock (&pl_inode->mutex);
- {
- /* Send unlock before the actual lock to
- prevent lock upgrade / downgrade
- problems only if:
- - it is a blocking call
- - it has other conflicting locks
- */
-
- if (can_block &&
- !(__is_lock_grantable (pl_inode, lock))) {
- ret = pl_send_prelock_unlock (this, pl_inode,
- lock);
- if (ret)
- gf_log (this->name, GF_LOG_DEBUG,
- "Could not send pre-lock "
- "unlock");
- }
+ call_resume(stub);
+ }
+}
- if (__is_lock_grantable (pl_inode, lock)) {
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => OK",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
- __insert_and_merge (pl_inode, lock);
- } else if (can_block) {
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
- lock->blocked = 1;
- __insert_lock (pl_inode, lock);
- ret = -1;
- } else {
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => NOK",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
- errno = EAGAIN;
- ret = -1;
- }
+void
+pl_inode_remove_cbk(xlator_t *xl, pl_inode_t *pl_inode, int32_t error)
+{
+ struct list_head contend, granted;
+ struct timespec now;
+ pl_dom_list_t *dom;
+
+ if (pl_inode == NULL) {
+ return;
+ }
+
+ INIT_LIST_HEAD(&contend);
+ INIT_LIST_HEAD(&granted);
+ timespec_now(&now);
+
+ pthread_mutex_lock(&pl_inode->mutex);
+
+ if (error == 0) {
+ if (pl_inode->links >= 0) {
+ pl_inode->links--;
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ if (pl_inode->links == 0) {
+ pl_inode->removed = _gf_true;
+ }
+ }
- grant_blocked_locks (this, pl_inode);
+ pl_inode->remove_running--;
- do_blocked_rw (pl_inode);
+ if ((pl_inode->remove_running == 0) && list_empty(&pl_inode->waiting)) {
+ pl_inode->is_locked = _gf_false;
- return ret;
-}
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ __grant_blocked_inode_locks(xl, pl_inode, &granted, dom, &now,
+ &contend);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
-posix_lock_t *
-pl_getlk (pl_inode_t *pl_inode, posix_lock_t *lock)
+ unwind_granted_inodes(xl, pl_inode, &granted);
+
+ inodelk_contention_notify(xl, &contend);
+
+ inode_unref(pl_inode->inode);
+}
+
+void
+pl_inode_remove_unlocked(xlator_t *xl, pl_inode_t *pl_inode,
+ struct list_head *list)
{
- posix_lock_t *conf = NULL;
+ call_stub_t *stub, *tmp;
+
+ if (!pl_inode->is_locked) {
+ return;
+ }
- conf = first_overlap (pl_inode, lock);
+ list_for_each_entry_safe(stub, tmp, &pl_inode->waiting, list)
+ {
+ if (!pl_inode_has_owners(xl, stub->frame->root->client, pl_inode, NULL,
+ NULL)) {
+ list_move_tail(&stub->list, list);
+ }
+ }
+}
- if (conf == NULL) {
- lock->fl_type = F_UNLCK;
- return lock;
+/* This function determines if an inodelk attempt can be done now or it needs
+ * to wait.
+ *
+ * Possible return values:
+ * < 0: An error occurred. Currently only -ESTALE can be returned if the
+ * inode has been deleted previously by unlink/rmdir/rename
+ * = 0: The lock can be attempted.
+ * > 0: The lock needs to wait because a conflicting remove operation is
+ * ongoing.
+ */
+int32_t
+pl_inode_remove_inodelk(pl_inode_t *pl_inode, pl_inode_lock_t *lock)
+{
+ pl_dom_list_t *dom;
+ pl_inode_lock_t *ilock;
+
+ /* If the inode has been deleted, we won't allow any lock. */
+ if (pl_inode->removed) {
+ return -ESTALE;
+ }
+
+ /* We only synchronize with locks made for regular operations coming from
+ * the user. Locks done for internal purposes are hard to control and could
+ * lead to long delays or deadlocks quite easily. */
+ if (lock->client_pid < 0) {
+ return 0;
+ }
+ if (!pl_inode->is_locked) {
+ return 0;
+ }
+ if (pl_inode->remove_running > 0) {
+ return 1;
+ }
+
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ list_for_each_entry(ilock, &dom->inodelk_list, list)
+ {
+ /* If a lock from the same client is already granted, we allow this
+ * one to continue. This is necessary to prevent deadlocks when
+ * multiple locks are taken for the same operation.
+ *
+ * On the other side it's unlikely that the same client sends
+ * completely unrelated locks for the same inode.
+ */
+ if (ilock->client == lock->client) {
+ return 0;
+ }
}
+ }
- return conf;
+ return 1;
}
diff --git a/xlators/features/locks/src/common.h b/xlators/features/locks/src/common.h
index b13f44bc278..281223bf3b8 100644
--- a/xlators/features/locks/src/common.h
+++ b/xlators/features/locks/src/common.h
@@ -1,133 +1,262 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2012, 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#ifndef __COMMON_H__
#define __COMMON_H__
+/*dump locks format strings */
+#define RANGE_FMT "type=%s, whence=%hd, start=%llu, len=%llu"
+#define ENTRY_FMT "type=%s on basename=%s"
+#define DUMP_GEN_FMT "pid = %llu, owner=%s, client=%p"
+#define GRNTD_AT "granted at %s"
+#define BLKD_AT "blocked at %s"
+#define CONN_ID "connection-id=%s"
+#define DUMP_BLKD_FMT DUMP_GEN_FMT ", " CONN_ID ", " BLKD_AT
+#define DUMP_GRNTD_FMT DUMP_GEN_FMT ", " CONN_ID ", " GRNTD_AT
+#define DUMP_BLKD_GRNTD_FMT DUMP_GEN_FMT ", " CONN_ID ", " BLKD_AT ", " GRNTD_AT
+
+#define ENTRY_BLKD_FMT ENTRY_FMT ", " DUMP_BLKD_FMT
+#define ENTRY_GRNTD_FMT ENTRY_FMT ", " DUMP_GRNTD_FMT
+#define ENTRY_BLKD_GRNTD_FMT ENTRY_FMT ", " DUMP_BLKD_GRNTD_FMT
+
+#define RANGE_BLKD_FMT RANGE_FMT ", " DUMP_BLKD_FMT
+#define RANGE_GRNTD_FMT RANGE_FMT ", " DUMP_GRNTD_FMT
+#define RANGE_BLKD_GRNTD_FMT RANGE_FMT ", " DUMP_BLKD_GRNTD_FMT
+
+#define SET_FLOCK_PID(flock, lock) ((flock)->l_pid = lock->client_pid)
+
+#define PL_STACK_UNWIND_AND_FREE(__local, fop, frame, op_ret, params...) \
+ do { \
+ frame->local = NULL; \
+ STACK_UNWIND_STRICT(fop, frame, op_ret, params); \
+ if (__local) { \
+ if (__local->inodelk_dom_count_req) \
+ data_unref(__local->inodelk_dom_count_req); \
+ loc_wipe(&__local->loc[0]); \
+ loc_wipe(&__local->loc[1]); \
+ if (__local->fd) \
+ fd_unref(__local->fd); \
+ if (__local->inode) \
+ inode_unref(__local->inode); \
+ if (__local->xdata) { \
+ dict_unref(__local->xdata); \
+ __local->xdata = NULL; \
+ } \
+ mem_put(__local); \
+ } \
+ } while (0)
+
posix_lock_t *
-new_posix_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
- uint64_t owner, fd_t *fd);
+new_posix_lock(struct gf_flock *flock, client_t *client, pid_t client_pid,
+ gf_lkowner_t *owner, fd_t *fd, uint32_t lk_flags, int blocking,
+ int32_t *op_errno);
pl_inode_t *
-pl_inode_get (xlator_t *this, inode_t *inode);
+pl_inode_get(xlator_t *this, inode_t *inode, pl_local_t *local);
posix_lock_t *
-pl_getlk (pl_inode_t *inode, posix_lock_t *lock);
+pl_getlk(pl_inode_t *inode, posix_lock_t *lock);
+
+int
+pl_setlk(xlator_t *this, pl_inode_t *inode, posix_lock_t *lock, int can_block);
int
-pl_setlk (xlator_t *this, pl_inode_t *inode, posix_lock_t *lock,
- int can_block);
+pl_lock_preempt(pl_inode_t *pl_inode, posix_lock_t *reqlock);
void
-grant_blocked_locks (xlator_t *this, pl_inode_t *inode);
+grant_blocked_locks(xlator_t *this, pl_inode_t *inode);
void
-posix_lock_to_flock (posix_lock_t *lock, struct gf_flock *flock);
+posix_lock_to_flock(posix_lock_t *lock, struct gf_flock *flock);
int
-locks_overlap (posix_lock_t *l1, posix_lock_t *l2);
+locks_overlap(posix_lock_t *l1, posix_lock_t *l2);
int
-same_owner (posix_lock_t *l1, posix_lock_t *l2);
+same_owner(posix_lock_t *l1, posix_lock_t *l2);
-void __delete_lock (pl_inode_t *, posix_lock_t *);
+void
+__delete_lock(posix_lock_t *);
-void __destroy_lock (posix_lock_t *);
+void
+__destroy_lock(posix_lock_t *);
pl_dom_list_t *
-get_domain (pl_inode_t *pl_inode, const char *volume);
+get_domain(pl_inode_t *pl_inode, const char *volume);
void
-grant_blocked_inode_locks (xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *dom);
+grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+ pl_dom_list_t *dom, struct timespec *now,
+ struct list_head *contend);
void
-__delete_inode_lock (pl_inode_lock_t *lock);
+inodelk_contention_notify(xlator_t *this, struct list_head *contend);
void
-__destroy_inode_lock (pl_inode_lock_t *lock);
+__delete_inode_lock(pl_inode_lock_t *lock);
void
-grant_blocked_entry_locks (xlator_t *this, pl_inode_t *pl_inode,
- pl_entry_lock_t *unlocked, pl_dom_list_t *dom);
+__pl_inodelk_unref(pl_inode_lock_t *lock);
-void pl_update_refkeeper (xlator_t *this, inode_t *inode);
+void
+__grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted, pl_dom_list_t *dom,
+ struct timespec *now, struct list_head *contend);
+void
+unwind_granted_inodes(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted);
+
+void
+grant_blocked_entry_locks(xlator_t *this, pl_inode_t *pl_inode,
+ pl_dom_list_t *dom, struct timespec *now,
+ struct list_head *contend);
+
+void
+entrylk_contention_notify(xlator_t *this, struct list_head *contend);
+
+void
+pl_update_refkeeper(xlator_t *this, inode_t *inode);
+
+int32_t
+__get_inodelk_count(xlator_t *this, pl_inode_t *pl_inode, char *domname);
int32_t
-get_inodelk_count (xlator_t *this, inode_t *inode);
+get_inodelk_count(xlator_t *this, inode_t *inode, char *domname);
int32_t
-get_entrylk_count (xlator_t *this, inode_t *inode);
+__get_entrylk_count(xlator_t *this, pl_inode_t *pl_inode);
+int32_t
+get_entrylk_count(xlator_t *this, inode_t *inode);
-void pl_trace_in (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
- int cmd, struct gf_flock *flock, const char *domain);
+void
+pl_trace_in(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, int cmd,
+ struct gf_flock *flock, const char *domain);
-void pl_trace_out (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
- int cmd, struct gf_flock *flock, int op_ret, int op_errno, const char *domain);
+void
+pl_trace_out(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, int cmd,
+ struct gf_flock *flock, int op_ret, int op_errno,
+ const char *domain);
-void pl_trace_block (xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
- int cmd, struct gf_flock *flock, const char *domain);
+void
+pl_trace_block(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc,
+ int cmd, struct gf_flock *flock, const char *domain);
-void pl_trace_flush (xlator_t *this, call_frame_t *frame, fd_t *fd);
+void
+pl_trace_flush(xlator_t *this, call_frame_t *frame, fd_t *fd);
-void entrylk_trace_in (xlator_t *this, call_frame_t *frame, const char *volume,
- fd_t *fd, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type);
+void
+entrylk_trace_in(xlator_t *this, call_frame_t *frame, const char *volume,
+ fd_t *fd, loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type);
-void entrylk_trace_out (xlator_t *this, call_frame_t *frame, const char *volume,
- fd_t *fd, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type,
- int op_ret, int op_errno);
+void
+entrylk_trace_out(xlator_t *this, call_frame_t *frame, const char *volume,
+ fd_t *fd, loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type, int op_ret, int op_errno);
-void entrylk_trace_block (xlator_t *this, call_frame_t *frame, const char *volume,
- fd_t *fd, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type);
+void
+entrylk_trace_block(xlator_t *this, call_frame_t *frame, const char *volume,
+ fd_t *fd, loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type);
void
-pl_print_verdict (char *str, int size, int op_ret, int op_errno);
+pl_print_verdict(char *str, int size, int op_ret, int op_errno);
void
-pl_print_lockee (char *str, int size, fd_t *fd, loc_t *loc);
+pl_print_lockee(char *str, int size, fd_t *fd, loc_t *loc);
void
-pl_print_locker (char *str, int size, xlator_t *this, call_frame_t *frame);
+pl_print_locker(char *str, int size, xlator_t *this, call_frame_t *frame);
void
-pl_print_inodelk (char *str, int size, int cmd, struct gf_flock *flock, const char *domain);
+pl_print_inodelk(char *str, int size, int cmd, struct gf_flock *flock,
+ const char *domain);
void
-pl_trace_release (xlator_t *this, fd_t *fd);
+pl_trace_release(xlator_t *this, fd_t *fd);
unsigned long
-fd_to_fdnum (fd_t *fd);
+fd_to_fdnum(fd_t *fd);
fd_t *
-fd_from_fdnum (posix_lock_t *lock);
+fd_from_fdnum(posix_lock_t *lock);
int
-pl_reserve_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
- int can_block);
+pl_reserve_setlk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block);
int
-reservelks_equal (posix_lock_t *l1, posix_lock_t *l2);
+reservelks_equal(posix_lock_t *l1, posix_lock_t *l2);
+
+int
+pl_verify_reservelk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block);
+int
+pl_reserve_unlock(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *reqlock);
+
+int32_t
+check_entrylk_on_basename(xlator_t *this, inode_t *parent, char *basename);
+
+void
+__pl_inodelk_unref(pl_inode_lock_t *lock);
+void
+__pl_entrylk_unref(pl_entry_lock_t *lock);
int
-pl_verify_reservelk (xlator_t *this, pl_inode_t *pl_inode,
- posix_lock_t *lock, int can_block);
+pl_metalock_is_active(pl_inode_t *pl_inode);
+
+void
+__pl_queue_lock(pl_inode_t *pl_inode, posix_lock_t *reqlock);
+
+void
+inodelk_contention_notify_check(xlator_t *xl, pl_inode_lock_t *lock,
+ struct timespec *now,
+ struct list_head *contend);
+
+void
+entrylk_contention_notify_check(xlator_t *xl, pl_entry_lock_t *lock,
+ struct timespec *now,
+ struct list_head *contend);
+
+gf_boolean_t
+pl_does_monkey_want_stuck_lock();
+
+gf_boolean_t
+pl_is_mandatory_locking_enabled(pl_inode_t *pl_inode);
+
+void
+pl_clean_local(pl_local_t *local);
+
int
-pl_reserve_unlock (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *reqlock);
+pl_local_init(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd);
+
+gf_boolean_t
+pl_is_lk_owner_valid(gf_lkowner_t *owner, client_t *client);
+
+int32_t
+pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc,
+ pl_inode_t **ppl_inode, struct list_head *contend);
+
+int32_t
+pl_inode_remove_complete(xlator_t *xl, pl_inode_t *pl_inode, call_stub_t *stub,
+ struct list_head *contend);
+
+void
+pl_inode_remove_wake(struct list_head *list);
+
+void
+pl_inode_remove_cbk(xlator_t *xl, pl_inode_t *pl_inode, int32_t error);
+
+void
+pl_inode_remove_unlocked(xlator_t *xl, pl_inode_t *pl_inode,
+ struct list_head *list);
+
+int32_t
+pl_inode_remove_inodelk(pl_inode_t *pl_inode, pl_inode_lock_t *lock);
+
#endif /* __COMMON_H__ */
diff --git a/xlators/features/locks/src/entrylk.c b/xlators/features/locks/src/entrylk.c
index 9e7d4c87820..fd772c850dd 100644
--- a/xlators/features/locks/src/entrylk.c
+++ b/xlators/features/locks/src/entrylk.c
@@ -1,66 +1,83 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
-#include "glusterfs.h"
-#include "compat.h"
-#include "xlator.h"
-#include "inode.h"
-#include "logging.h"
-#include "common-utils.h"
-#include "list.h"
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/list.h>
+#include <glusterfs/upcall-utils.h>
#include "locks.h"
+#include "clear.h"
#include "common.h"
+#include "pl-messages.h"
-static pl_entry_lock_t *
-new_entrylk_lock (pl_inode_t *pinode, const char *basename, entrylk_type type,
- void *trans, pid_t client_pid, uint64_t owner, const char *volume)
-
+void
+__pl_entrylk_unref(pl_entry_lock_t *lock)
{
- pl_entry_lock_t *newlock = NULL;
-
- newlock = GF_CALLOC (1, sizeof (pl_entry_lock_t),
- gf_locks_mt_pl_entry_lock_t);
- if (!newlock) {
- goto out;
- }
-
- newlock->basename = basename ? gf_strdup (basename) : NULL;
- newlock->type = type;
- newlock->trans = trans;
- newlock->volume = volume;
- newlock->client_pid = client_pid;
- newlock->owner = owner;
+ lock->ref--;
+ if (!lock->ref) {
+ GF_FREE((char *)lock->basename);
+ GF_FREE(lock->connection_id);
+ GF_FREE(lock);
+ }
+}
- INIT_LIST_HEAD (&newlock->domain_list);
- INIT_LIST_HEAD (&newlock->blocked_locks);
+static void
+__pl_entrylk_ref(pl_entry_lock_t *lock)
+{
+ lock->ref++;
+}
+static pl_entry_lock_t *
+new_entrylk_lock(pl_inode_t *pinode, const char *basename, entrylk_type type,
+ const char *domain, call_frame_t *frame, char *conn_id,
+ int32_t *op_errno)
+{
+ pl_entry_lock_t *newlock = NULL;
+
+ if (!pl_is_lk_owner_valid(&frame->root->lk_owner, frame->root->client)) {
+ *op_errno = EINVAL;
+ goto out;
+ }
+
+ newlock = GF_CALLOC(1, sizeof(pl_entry_lock_t),
+ gf_locks_mt_pl_entry_lock_t);
+ if (!newlock) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ newlock->basename = basename ? gf_strdup(basename) : NULL;
+ newlock->type = type;
+ newlock->client = frame->root->client;
+ newlock->client_pid = frame->root->pid;
+ newlock->volume = domain;
+ newlock->owner = frame->root->lk_owner;
+ newlock->frame = frame;
+ newlock->this = frame->this;
+
+ if (conn_id) {
+ newlock->connection_id = gf_strdup(conn_id);
+ }
+
+ INIT_LIST_HEAD(&newlock->domain_list);
+ INIT_LIST_HEAD(&newlock->blocked_locks);
+ INIT_LIST_HEAD(&newlock->client_list);
+
+ __pl_entrylk_ref(newlock);
out:
- return newlock;
+ return newlock;
}
-
/**
* all_names - does a basename represent all names?
* @basename: name to check
@@ -75,206 +92,411 @@ out:
*/
static int
-names_conflict (const char *n1, const char *n2)
+names_conflict(const char *n1, const char *n2)
{
- return all_names (n1) || all_names (n2) || !strcmp (n1, n2);
+ return all_names(n1) || all_names(n2) || !strcmp(n1, n2);
}
+static int
+__same_entrylk_owner(pl_entry_lock_t *l1, pl_entry_lock_t *l2)
+{
+ return (is_same_lkowner(&l1->owner, &l2->owner) &&
+ (l1->client == l2->client));
+}
+/* Just as in inodelk, allow conflicting name locks from same (lk_owner, conn)*/
static int
-__same_entrylk_owner (pl_entry_lock_t *l1, pl_entry_lock_t *l2)
+__conflicting_entrylks(pl_entry_lock_t *l1, pl_entry_lock_t *l2)
+{
+ if (names_conflict(l1->basename, l2->basename) &&
+ !__same_entrylk_owner(l1, l2))
+ return 1;
+
+ return 0;
+}
+
+/* See comments in inodelk.c for details */
+static inline gf_boolean_t
+__stale_entrylk(xlator_t *this, pl_entry_lock_t *candidate_lock,
+ pl_entry_lock_t *requested_lock, time_t *lock_age_sec)
+{
+ posix_locks_private_t *priv = NULL;
+
+ priv = this->private;
+
+ /* Question: Should we just prune them all given the
+ * chance? Or just the locks we are attempting to acquire?
+ */
+ if (names_conflict(candidate_lock->basename, requested_lock->basename)) {
+ *lock_age_sec = gf_time() - candidate_lock->granted_time;
+ if (*lock_age_sec > priv->revocation_secs)
+ return _gf_true;
+ }
+ return _gf_false;
+}
+
+/* See comments in inodelk.c for details */
+static gf_boolean_t
+__entrylk_prune_stale(xlator_t *this, pl_inode_t *pinode, pl_dom_list_t *dom,
+ pl_entry_lock_t *lock)
+{
+ posix_locks_private_t *priv = NULL;
+ pl_entry_lock_t *tmp = NULL;
+ pl_entry_lock_t *lk = NULL;
+ gf_boolean_t revoke_lock = _gf_false;
+ int bcount = 0;
+ int gcount = 0;
+ int op_errno = 0;
+ clrlk_args args;
+ args.opts = NULL;
+ time_t lk_age_sec = 0;
+ uint32_t max_blocked = 0;
+ char *reason_str = NULL;
+
+ priv = this->private;
+ args.type = CLRLK_ENTRY;
+ if (priv->revocation_clear_all == _gf_true)
+ args.kind = CLRLK_ALL;
+ else
+ args.kind = CLRLK_GRANTED;
+
+ if (list_empty(&dom->entrylk_list))
+ goto out;
+
+ pthread_mutex_lock(&pinode->mutex);
+ lock->pinode = pinode;
+ list_for_each_entry_safe(lk, tmp, &dom->entrylk_list, domain_list)
+ {
+ if (__stale_entrylk(this, lk, lock, &lk_age_sec) == _gf_true) {
+ revoke_lock = _gf_true;
+ reason_str = "age";
+ break;
+ }
+ }
+ max_blocked = priv->revocation_max_blocked;
+ if (max_blocked != 0 && revoke_lock == _gf_false) {
+ list_for_each_entry_safe(lk, tmp, &dom->blocked_entrylks, blocked_locks)
+ {
+ max_blocked--;
+ if (max_blocked == 0) {
+ revoke_lock = _gf_true;
+ reason_str = "max blocked";
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&pinode->mutex);
+
+out:
+ if (revoke_lock == _gf_true) {
+ clrlk_clear_entrylk(this, pinode, dom, &args, &bcount, &gcount,
+ &op_errno);
+ gf_log(this->name, GF_LOG_WARNING,
+ "Lock revocation [reason: %s; gfid: %s; domain: %s; "
+ "age: %ld sec] - Entry lock revoked: %d granted & %d "
+ "blocked locks cleared",
+ reason_str, uuid_utoa(pinode->gfid), dom->domain, lk_age_sec,
+ gcount, bcount);
+ }
+
+ return revoke_lock;
+}
+
+void
+entrylk_contention_notify_check(xlator_t *this, pl_entry_lock_t *lock,
+ struct timespec *now, struct list_head *contend)
{
+ posix_locks_private_t *priv;
+ int64_t elapsed;
+
+ priv = this->private;
+
+ /* If this lock is in a list, it means that we are about to send a
+ * notification for it, so no need to do anything else. */
+ if (!list_empty(&lock->contend)) {
+ return;
+ }
+
+ elapsed = now->tv_sec;
+ elapsed -= lock->contention_time.tv_sec;
+ if (now->tv_nsec < lock->contention_time.tv_nsec) {
+ elapsed--;
+ }
+ if (elapsed < priv->notify_contention_delay) {
+ return;
+ }
+
+ /* All contention notifications will be sent outside of the locked
+ * region. This means that currently granted locks might have already
+ * been unlocked by that time. To avoid the lock or the inode to be
+ * destroyed before we process them, we take an additional reference
+ * on both. */
+ inode_ref(lock->pinode->inode);
+ __pl_entrylk_ref(lock);
- return ((l1->owner == l2->owner) &&
- (l1->trans == l2->trans));
+ lock->contention_time = *now;
+
+ list_add_tail(&lock->contend, contend);
}
+void
+entrylk_contention_notify(xlator_t *this, struct list_head *contend)
+{
+ struct gf_upcall up;
+ struct gf_upcall_entrylk_contention lc;
+ pl_entry_lock_t *lock;
+ pl_inode_t *pl_inode;
+ client_t *client;
+ gf_boolean_t notify;
+
+ while (!list_empty(contend)) {
+ lock = list_first_entry(contend, pl_entry_lock_t, contend);
+
+ pl_inode = lock->pinode;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+
+ /* If the lock has already been released, no notification is
+ * sent. We clear the notification time in this case. */
+ notify = !list_empty(&lock->domain_list);
+ if (!notify) {
+ lock->contention_time.tv_sec = 0;
+ lock->contention_time.tv_nsec = 0;
+ } else {
+ lc.type = lock->type;
+ lc.name = lock->basename;
+ lc.pid = lock->client_pid;
+ lc.domain = lock->volume;
+ lc.xdata = NULL;
+
+ gf_uuid_copy(up.gfid, lock->pinode->gfid);
+ client = (client_t *)lock->client;
+ if (client == NULL) {
+ /* A NULL client can be found if the entrylk
+ * was issued by a server side xlator. */
+ up.client_uid = NULL;
+ } else {
+ up.client_uid = client->client_uid;
+ }
+ }
+
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (notify) {
+ up.event_type = GF_UPCALL_ENTRYLK_CONTENTION;
+ up.data = &lc;
+
+ if (this->notify(this, GF_EVENT_UPCALL, &up) < 0) {
+ gf_msg_debug(this->name, 0,
+ "Entrylk contention notification "
+ "failed");
+ } else {
+ gf_msg_debug(this->name, 0,
+ "Entrylk contention notification "
+ "sent");
+ }
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+
+ list_del_init(&lock->contend);
+ __pl_entrylk_unref(lock);
+
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ inode_unref(pl_inode->inode);
+ }
+}
/**
- * lock_grantable - is this lock grantable?
+ * entrylk_grantable - is this lock grantable?
* @inode: inode in which to look
* @basename: name we're trying to lock
* @type: type of lock
*/
static pl_entry_lock_t *
-__lock_grantable (pl_dom_list_t *dom, const char *basename, entrylk_type type)
+__entrylk_grantable(xlator_t *this, pl_dom_list_t *dom, pl_entry_lock_t *lock,
+ struct timespec *now, struct list_head *contend)
{
- pl_entry_lock_t *lock = NULL;
-
- if (list_empty (&dom->entrylk_list))
- return NULL;
-
- list_for_each_entry (lock, &dom->entrylk_list, domain_list) {
- if (names_conflict (lock->basename, basename))
- return lock;
+ pl_entry_lock_t *tmp = NULL;
+ pl_entry_lock_t *ret = NULL;
+
+ list_for_each_entry(tmp, &dom->entrylk_list, domain_list)
+ {
+ if (__conflicting_entrylks(tmp, lock)) {
+ if (ret == NULL) {
+ ret = tmp;
+ if (contend == NULL) {
+ break;
+ }
+ }
+ entrylk_contention_notify_check(this, tmp, now, contend);
}
+ }
- return NULL;
+ return ret;
}
static pl_entry_lock_t *
-__blocked_lock_conflict (pl_dom_list_t *dom, const char *basename, entrylk_type type)
+__blocked_entrylk_conflict(pl_dom_list_t *dom, pl_entry_lock_t *lock)
{
- pl_entry_lock_t *lock = NULL;
+ pl_entry_lock_t *tmp = NULL;
- if (list_empty (&dom->blocked_entrylks))
- return NULL;
-
- list_for_each_entry (lock, &dom->blocked_entrylks, blocked_locks) {
- if (names_conflict (lock->basename, basename))
- return lock;
- }
+ list_for_each_entry(tmp, &dom->blocked_entrylks, blocked_locks)
+ {
+ if (names_conflict(tmp->basename, lock->basename))
+ return lock;
+ }
- return NULL;
+ return NULL;
}
static int
-__owner_has_lock (pl_dom_list_t *dom, pl_entry_lock_t *newlock)
+__owner_has_lock(pl_dom_list_t *dom, pl_entry_lock_t *newlock)
{
- pl_entry_lock_t *lock = NULL;
+ pl_entry_lock_t *lock = NULL;
- list_for_each_entry (lock, &dom->entrylk_list, domain_list) {
- if (__same_entrylk_owner (lock, newlock))
- return 1;
- }
+ list_for_each_entry(lock, &dom->entrylk_list, domain_list)
+ {
+ if (__same_entrylk_owner(lock, newlock))
+ return 1;
+ }
- list_for_each_entry (lock, &dom->blocked_entrylks, blocked_locks) {
- if (__same_entrylk_owner (lock, newlock))
- return 1;
- }
+ list_for_each_entry(lock, &dom->blocked_entrylks, blocked_locks)
+ {
+ if (__same_entrylk_owner(lock, newlock))
+ return 1;
+ }
- return 0;
+ return 0;
}
static int
-names_equal (const char *n1, const char *n2)
+names_equal(const char *n1, const char *n2)
{
- return (n1 == NULL && n2 == NULL) || (n1 && n2 && !strcmp (n1, n2));
+ return (n1 == NULL && n2 == NULL) || (n1 && n2 && !strcmp(n1, n2));
}
void
-pl_print_entrylk (char *str, int size, entrylk_cmd cmd, entrylk_type type,
- const char *basename, const char *domain)
+pl_print_entrylk(char *str, int size, entrylk_cmd cmd, entrylk_type type,
+ const char *basename, const char *domain)
{
- char *cmd_str = NULL;
- char *type_str = NULL;
+ char *cmd_str = NULL;
+ char *type_str = NULL;
- switch (cmd) {
+ switch (cmd) {
case ENTRYLK_LOCK:
- cmd_str = "LOCK";
- break;
+ cmd_str = "LOCK";
+ break;
case ENTRYLK_LOCK_NB:
- cmd_str = "LOCK_NB";
- break;
+ cmd_str = "LOCK_NB";
+ break;
case ENTRYLK_UNLOCK:
- cmd_str = "UNLOCK";
- break;
+ cmd_str = "UNLOCK";
+ break;
default:
- cmd_str = "UNKNOWN";
- break;
- }
+ cmd_str = "UNKNOWN";
+ break;
+ }
- switch (type) {
+ switch (type) {
case ENTRYLK_RDLCK:
- type_str = "READ";
- break;
+ type_str = "READ";
+ break;
case ENTRYLK_WRLCK:
- type_str = "WRITE";
- break;
+ type_str = "WRITE";
+ break;
default:
- type_str = "UNKNOWN";
- break;
- }
+ type_str = "UNKNOWN";
+ break;
+ }
- snprintf (str, size, "lock=ENTRYLK, cmd=%s, type=%s, basename=%s, domain: %s",
- cmd_str, type_str, basename, domain);
+ snprintf(str, size,
+ "lock=ENTRYLK, cmd=%s, type=%s, basename=%s, domain: %s", cmd_str,
+ type_str, basename, domain);
}
-
void
-entrylk_trace_in (xlator_t *this, call_frame_t *frame, const char *domain,
- fd_t *fd, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type)
+entrylk_trace_in(xlator_t *this, call_frame_t *frame, const char *domain,
+ fd_t *fd, loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- char pl_entrylk[256];
+ posix_locks_private_t *priv = NULL;
+ char pl_locker[256];
+ char pl_lockee[256];
+ char pl_entrylk[256];
- priv = this->private;
+ priv = this->private;
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, loc);
- pl_print_entrylk (pl_entrylk, 256, cmd, type, basename, domain);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, loc);
+ pl_print_entrylk(pl_entrylk, 256, cmd, type, basename, domain);
- gf_log (this->name, GF_LOG_INFO,
- "[REQUEST] Locker = {%s} Lockee = {%s} Lock = {%s}",
- pl_locker, pl_lockee, pl_entrylk);
+ gf_log(this->name, GF_LOG_INFO,
+ "[REQUEST] Locker = {%s} Lockee = {%s} Lock = {%s}", pl_locker,
+ pl_lockee, pl_entrylk);
}
-
void
-entrylk_trace_out (xlator_t *this, call_frame_t *frame, const char *domain,
- fd_t *fd, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type, int op_ret, int op_errno)
+entrylk_trace_out(xlator_t *this, call_frame_t *frame, const char *domain,
+ fd_t *fd, loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type, int op_ret, int op_errno)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- char pl_entrylk[256];
- char verdict[32];
+ posix_locks_private_t *priv = NULL;
+ char pl_locker[256];
+ char pl_lockee[256];
+ char pl_entrylk[256];
+ char verdict[32];
- priv = this->private;
+ priv = this->private;
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, loc);
- pl_print_entrylk (pl_entrylk, 256, cmd, type, basename, domain);
- pl_print_verdict (verdict, 32, op_ret, op_errno);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, loc);
+ pl_print_entrylk(pl_entrylk, 256, cmd, type, basename, domain);
+ pl_print_verdict(verdict, 32, op_ret, op_errno);
- gf_log (this->name, GF_LOG_INFO,
- "[%s] Locker = {%s} Lockee = {%s} Lock = {%s}",
- verdict, pl_locker, pl_lockee, pl_entrylk);
+ gf_log(this->name, GF_LOG_INFO,
+ "[%s] Locker = {%s} Lockee = {%s} Lock = {%s}", verdict, pl_locker,
+ pl_lockee, pl_entrylk);
}
-
void
-entrylk_trace_block (xlator_t *this, call_frame_t *frame, const char *volume,
- fd_t *fd, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type)
+entrylk_trace_block(xlator_t *this, call_frame_t *frame, const char *volume,
+ fd_t *fd, loc_t *loc, const char *basename, entrylk_cmd cmd,
+ entrylk_type type)
{
- posix_locks_private_t *priv = NULL;
- char pl_locker[256];
- char pl_lockee[256];
- char pl_entrylk[256];
+ posix_locks_private_t *priv = NULL;
+ char pl_locker[256];
+ char pl_lockee[256];
+ char pl_entrylk[256];
- priv = this->private;
+ priv = this->private;
- if (!priv->trace)
- return;
+ if (!priv->trace)
+ return;
- pl_print_locker (pl_locker, 256, this, frame);
- pl_print_lockee (pl_lockee, 256, fd, loc);
- pl_print_entrylk (pl_entrylk, 256, cmd, type, basename, volume);
+ pl_print_locker(pl_locker, 256, this, frame);
+ pl_print_lockee(pl_lockee, 256, fd, loc);
+ pl_print_entrylk(pl_entrylk, 256, cmd, type, basename, volume);
- gf_log (this->name, GF_LOG_INFO,
- "[BLOCKED] Locker = {%s} Lockee = {%s} Lock = {%s}",
- pl_locker, pl_lockee, pl_entrylk);
+ gf_log(this->name, GF_LOG_INFO,
+ "[BLOCKED] Locker = {%s} Lockee = {%s} Lock = {%s}", pl_locker,
+ pl_lockee, pl_entrylk);
}
/**
- * __find_most_matching_lock - find the lock struct which most matches in order of:
- * lock on the exact basename ||
- * an all_names lock
+ * __find_most_matching_lock - find the lock struct which most matches in order
+ * of: lock on the exact basename || an all_names lock
*
*
* @inode: inode in which to look
@@ -282,27 +504,61 @@ entrylk_trace_block (xlator_t *this, call_frame_t *frame, const char *volume,
*/
static pl_entry_lock_t *
-__find_most_matching_lock (pl_dom_list_t *dom, const char *basename)
+__find_most_matching_lock(pl_dom_list_t *dom, const char *basename)
{
- pl_entry_lock_t *lock;
- pl_entry_lock_t *all = NULL;
- pl_entry_lock_t *exact = NULL;
+ pl_entry_lock_t *lock;
+ pl_entry_lock_t *all = NULL;
+ pl_entry_lock_t *exact = NULL;
- if (list_empty (&dom->entrylk_list))
- return NULL;
+ if (list_empty(&dom->entrylk_list))
+ return NULL;
- list_for_each_entry (lock, &dom->entrylk_list, domain_list) {
- if (all_names (lock->basename))
- all = lock;
- else if (names_equal (lock->basename, basename))
- exact = lock;
- }
+ list_for_each_entry(lock, &dom->entrylk_list, domain_list)
+ {
+ if (all_names(lock->basename))
+ all = lock;
+ else if (names_equal(lock->basename, basename))
+ exact = lock;
+ }
+
+ return (exact ? exact : all);
+}
- return (exact ? exact : all);
+static pl_entry_lock_t *
+__find_matching_lock(pl_dom_list_t *dom, pl_entry_lock_t *lock)
+{
+ pl_entry_lock_t *tmp = NULL;
+
+ list_for_each_entry(tmp, &dom->entrylk_list, domain_list)
+ {
+ if (names_equal(lock->basename, tmp->basename) &&
+ __same_entrylk_owner(lock, tmp) && (lock->type == tmp->type))
+ return tmp;
+ }
+ return NULL;
+}
+
+static int
+__lock_blocked_add(xlator_t *this, pl_inode_t *pinode, pl_dom_list_t *dom,
+ pl_entry_lock_t *lock, int nonblock)
+{
+ if (nonblock)
+ goto out;
+
+ lock->blkd_time = gf_time();
+ list_add_tail(&lock->blocked_locks, &dom->blocked_entrylks);
+
+ gf_msg_trace(this->name, 0, "Blocking lock: {pinode=%p, basename=%s}",
+ pinode, lock->basename);
+
+ entrylk_trace_block(this, lock->frame, NULL, NULL, NULL, lock->basename,
+ ENTRYLK_LOCK, lock->type);
+out:
+ return -EAGAIN;
}
/**
- * __lock_name - lock a name in a directory
+ * __lock_entrylk - lock a name in a directory
* @inode: inode for the directory in which to lock
* @basename: name of the entry to lock
* if null, lock the entire directory
@@ -313,413 +569,375 @@ __find_most_matching_lock (pl_dom_list_t *dom, const char *basename)
*/
int
-__lock_name (pl_inode_t *pinode, const char *basename, entrylk_type type,
- call_frame_t *frame, pl_dom_list_t *dom, xlator_t *this, int nonblock)
+__lock_entrylk(xlator_t *this, pl_inode_t *pinode, pl_entry_lock_t *lock,
+ int nonblock, pl_dom_list_t *dom, struct timespec *now,
+ struct list_head *contend)
{
- pl_entry_lock_t *lock = NULL;
- pl_entry_lock_t *conf = NULL;
- void *trans = NULL;
- pid_t client_pid = 0;
- uint64_t owner = 0;
-
- int ret = -EINVAL;
-
- trans = frame->root->trans;
- client_pid = frame->root->pid;
- owner = frame->root->lk_owner;
-
- lock = new_entrylk_lock (pinode, basename, type, trans, client_pid, owner, dom->domain);
- if (!lock) {
- ret = -ENOMEM;
- goto out;
- }
-
- lock->frame = frame;
- lock->this = this;
- lock->trans = trans;
-
- conf = __lock_grantable (dom, basename, type);
- if (conf) {
- ret = -EAGAIN;
- if (nonblock){
- if (lock->basename)
- GF_FREE ((char *)lock->basename);
- GF_FREE (lock);
- goto out;
-
- }
-
- list_add_tail (&lock->blocked_locks, &dom->blocked_entrylks);
-
- gf_log (this->name, GF_LOG_TRACE,
- "Blocking lock: {pinode=%p, basename=%s}",
- pinode, basename);
-
- goto out;
- }
-
- if ( __blocked_lock_conflict (dom, basename, type) && !(__owner_has_lock (dom, lock))) {
- ret = -EAGAIN;
- if (nonblock) {
- if (lock->basename)
- GF_FREE ((char *) lock->basename);
- GF_FREE (lock);
- goto out;
-
- }
- lock->frame = frame;
- lock->this = this;
-
- list_add_tail (&lock->blocked_locks, &dom->blocked_entrylks);
-
- gf_log (this->name, GF_LOG_TRACE,
- "Lock is grantable, but blocking to prevent starvation");
- gf_log (this->name, GF_LOG_TRACE,
- "Blocking lock: {pinode=%p, basename=%s}",
- pinode, basename);
-
- ret = -EAGAIN;
- goto out;
+ pl_entry_lock_t *conf = NULL;
+ int ret = -EAGAIN;
+
+ conf = __entrylk_grantable(this, dom, lock, now, contend);
+ if (conf) {
+ ret = __lock_blocked_add(this, pinode, dom, lock, nonblock);
+ goto out;
+ }
+
+ /* To prevent blocked locks starvation, check if there are any blocked
+ * locks thay may conflict with this lock. If there is then don't grant
+ * the lock. BUT grant the lock if the owner already has lock to allow
+ * nested locks.
+ * Example: SHD from Machine1 takes (gfid, basename=257-length-name)
+ * and is granted.
+ * SHD from machine2 takes (gfid, basename=NULL) and is blocked.
+ * When SHD from Machine1 takes (gfid, basename=NULL) it needs to be
+ * granted, without which self-heal can't progress.
+ * TODO: Find why 'owner_has_lock' is checked even for blocked locks.
+ */
+ if (__blocked_entrylk_conflict(dom, lock) &&
+ !(__owner_has_lock(dom, lock))) {
+ if (nonblock == 0) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Lock is grantable, but blocking to prevent "
+ "starvation");
}
- switch (type) {
- case ENTRYLK_WRLCK:
- list_add_tail (&lock->domain_list, &dom->entrylk_list);
- break;
+ ret = __lock_blocked_add(this, pinode, dom, lock, nonblock);
+ goto out;
+ }
- default:
+ __pl_entrylk_ref(lock);
+ lock->granted_time = gf_time();
+ list_add(&lock->domain_list, &dom->entrylk_list);
- gf_log (this->name, GF_LOG_DEBUG,
- "Invalid type for entrylk specified: %d", type);
- ret = -EINVAL;
- goto out;
- }
-
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
/**
- * __unlock_name - unlock a name in a directory
+ * __unlock_entrylk - unlock a name in a directory
* @inode: inode for the directory to unlock in
* @basename: name of the entry to unlock
* if null, unlock the entire directory
*/
pl_entry_lock_t *
-__unlock_name (pl_dom_list_t *dom, const char *basename, entrylk_type type)
+__unlock_entrylk(pl_dom_list_t *dom, pl_entry_lock_t *lock)
{
- pl_entry_lock_t *lock = NULL;
- pl_entry_lock_t *ret_lock = NULL;
+ pl_entry_lock_t *ret_lock = NULL;
- lock = __find_most_matching_lock (dom, basename);
+ ret_lock = __find_matching_lock(dom, lock);
- if (!lock) {
- gf_log ("locks", GF_LOG_DEBUG,
- "unlock on %s (type=ENTRYLK_WRLCK) attempted but no matching lock found",
- basename);
- goto out;
- }
+ if (ret_lock) {
+ list_del_init(&ret_lock->domain_list);
+ } else {
+ gf_log("locks", GF_LOG_ERROR,
+ "unlock on %s "
+ "(type=ENTRYLK_WRLCK) attempted but no matching lock "
+ "found",
+ lock->basename);
+ }
- if (names_equal (lock->basename, basename)
- && lock->type == type) {
+ return ret_lock;
+}
- if (type == ENTRYLK_WRLCK) {
- list_del_init (&lock->domain_list);
- ret_lock = lock;
- }
- } else {
- gf_log ("locks", GF_LOG_DEBUG,
- "Unlock for a non-existing lock!");
- goto out;
+int32_t
+check_entrylk_on_basename(xlator_t *this, inode_t *parent, char *basename)
+{
+ int32_t entrylk = 0;
+ pl_dom_list_t *dom = NULL;
+ pl_entry_lock_t *conf = NULL;
+
+ pl_inode_t *pinode = pl_inode_get(this, parent, NULL);
+ if (!pinode)
+ goto out;
+ pthread_mutex_lock(&pinode->mutex);
+ {
+ list_for_each_entry(dom, &pinode->dom_list, inode_list)
+ {
+ conf = __find_most_matching_lock(dom, basename);
+ if (conf && conf->basename) {
+ entrylk = 1;
+ break;
+ }
}
+ }
+ pthread_mutex_unlock(&pinode->mutex);
out:
- return ret_lock;
+ return entrylk;
}
-
void
-__grant_blocked_entry_locks (xlator_t *this, pl_inode_t *pl_inode,
- pl_dom_list_t *dom, struct list_head *granted)
+__grant_blocked_entry_locks(xlator_t *this, pl_inode_t *pl_inode,
+ pl_dom_list_t *dom, struct list_head *granted,
+ struct timespec *now, struct list_head *contend)
{
- int bl_ret = 0;
- pl_entry_lock_t *bl = NULL;
- pl_entry_lock_t *tmp = NULL;
-
- struct list_head blocked_list;
-
- INIT_LIST_HEAD (&blocked_list);
- list_splice_init (&dom->blocked_entrylks, &blocked_list);
+ int bl_ret = 0;
+ pl_entry_lock_t *bl = NULL;
+ pl_entry_lock_t *tmp = NULL;
- list_for_each_entry_safe (bl, tmp, &blocked_list,
- blocked_locks) {
+ struct list_head blocked_list;
- list_del_init (&bl->blocked_locks);
+ INIT_LIST_HEAD(&blocked_list);
+ list_splice_init(&dom->blocked_entrylks, &blocked_list);
+ list_for_each_entry_safe(bl, tmp, &blocked_list, blocked_locks)
+ {
+ list_del_init(&bl->blocked_locks);
- gf_log ("locks", GF_LOG_TRACE,
- "Trying to unblock: {pinode=%p, basename=%s}",
- pl_inode, bl->basename);
+ bl_ret = __lock_entrylk(bl->this, pl_inode, bl, 0, dom, now, contend);
- bl_ret = __lock_name (pl_inode, bl->basename, bl->type,
- bl->frame, dom, bl->this, 0);
-
- if (bl_ret == 0) {
- list_add (&bl->blocked_locks, granted);
- } else {
- gf_log (this->name, GF_LOG_DEBUG,
- "should never happen");
- if (bl->basename)
- GF_FREE ((char *)bl->basename);
- GF_FREE (bl);
- }
+ if (bl_ret == 0) {
+ list_add_tail(&bl->blocked_locks, granted);
}
- return;
+ }
}
/* Grants locks if possible which are blocked on a lock */
void
-grant_blocked_entry_locks (xlator_t *this, pl_inode_t *pl_inode,
- pl_entry_lock_t *unlocked, pl_dom_list_t *dom)
-{
- struct list_head granted_list;
- pl_entry_lock_t *tmp = NULL;
- pl_entry_lock_t *lock = NULL;
-
- INIT_LIST_HEAD (&granted_list);
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __grant_blocked_entry_locks (this, pl_inode, dom, &granted_list);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
-
- list_for_each_entry_safe (lock, tmp, &granted_list, blocked_locks) {
- list_del_init (&lock->blocked_locks);
-
- entrylk_trace_out (this, lock->frame, NULL, NULL, NULL,
- lock->basename, ENTRYLK_LOCK, lock->type,
- 0, 0);
-
- STACK_UNWIND_STRICT (entrylk, lock->frame, 0, 0);
-
- }
-
- GF_FREE ((char *)unlocked->basename);
- GF_FREE (unlocked);
-
- return;
-}
-
-/**
- * release_entry_locks_for_transport: release all entry locks from this
- * transport for this loc_t
- */
-
-static int
-release_entry_locks_for_transport (xlator_t *this, pl_inode_t *pinode,
- pl_dom_list_t *dom, void *trans)
+grant_blocked_entry_locks(xlator_t *this, pl_inode_t *pl_inode,
+ pl_dom_list_t *dom, struct timespec *now,
+ struct list_head *contend)
{
- pl_entry_lock_t *lock = NULL;
- pl_entry_lock_t *tmp = NULL;
- struct list_head granted;
- struct list_head released;
-
- INIT_LIST_HEAD (&granted);
- INIT_LIST_HEAD (&released);
-
- pthread_mutex_lock (&pinode->mutex);
+ struct list_head granted_list;
+ pl_entry_lock_t *tmp = NULL;
+ pl_entry_lock_t *lock = NULL;
+
+ INIT_LIST_HEAD(&granted_list);
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __grant_blocked_entry_locks(this, pl_inode, dom, &granted_list, now,
+ contend);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ list_for_each_entry_safe(lock, tmp, &granted_list, blocked_locks)
+ {
+ entrylk_trace_out(this, lock->frame, NULL, NULL, NULL, lock->basename,
+ ENTRYLK_LOCK, lock->type, 0, 0);
+
+ STACK_UNWIND_STRICT(entrylk, lock->frame, 0, 0, NULL);
+ lock->frame = NULL;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(lock, tmp, &granted_list, blocked_locks)
{
- list_for_each_entry_safe (lock, tmp, &dom->blocked_entrylks,
- blocked_locks) {
- if (lock->trans != trans)
- continue;
-
- list_del_init (&lock->blocked_locks);
-
- gf_log (this->name, GF_LOG_TRACE,
- "releasing lock on held by "
- "{transport=%p}",trans);
-
- list_add (&lock->blocked_locks, &released);
-
- }
-
- list_for_each_entry_safe (lock, tmp, &dom->entrylk_list,
- domain_list) {
- if (lock->trans != trans)
- continue;
-
- list_del_init (&lock->domain_list);
-
- gf_log (this->name, GF_LOG_TRACE,
- "releasing lock on held by "
- "{transport=%p}",trans);
-
- GF_FREE ((char *)lock->basename);
- GF_FREE (lock);
- }
-
- __grant_blocked_entry_locks (this, pinode, dom, &granted);
-
+ list_del_init(&lock->blocked_locks);
+ __pl_entrylk_unref(lock);
}
-
- pthread_mutex_unlock (&pinode->mutex);
-
- list_for_each_entry_safe (lock, tmp, &released, blocked_locks) {
- list_del_init (&lock->blocked_locks);
-
- STACK_UNWIND_STRICT (entrylk, lock->frame, -1, EAGAIN);
-
- if (lock->basename)
- GF_FREE ((char *)lock->basename);
- GF_FREE (lock);
-
- }
-
- list_for_each_entry_safe (lock, tmp, &granted, blocked_locks) {
- list_del_init (&lock->blocked_locks);
-
- STACK_UNWIND_STRICT (entrylk, lock->frame, 0, 0);
-
- if (lock->basename)
- GF_FREE ((char *)lock->basename);
- GF_FREE (lock);
- }
-
- return 0;
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
}
/* Common entrylk code called by pl_entrylk and pl_fentrylk */
int
-pl_common_entrylk (call_frame_t *frame, xlator_t *this,
- const char *volume, inode_t *inode, const char *basename,
- entrylk_cmd cmd, entrylk_type type, loc_t *loc, fd_t *fd)
-{
- uint64_t owner = 0;
- int32_t op_ret = -1;
- int32_t op_errno = 0;
-
- void * transport = NULL;
- pid_t pid = -1;
-
- pl_inode_t * pinode = NULL;
- int ret = -1;
- pl_entry_lock_t *unlocked = NULL;
- char unwind = 1;
+pl_common_entrylk(call_frame_t *frame, xlator_t *this, const char *volume,
+ inode_t *inode, const char *basename, entrylk_cmd cmd,
+ entrylk_type type, loc_t *loc, fd_t *fd, dict_t *xdata)
- pl_dom_list_t *dom = NULL;
-
- pinode = pl_inode_get (this, inode);
- if (!pinode) {
- op_errno = ENOMEM;
- goto out;
- }
-
- dom = get_domain (pinode, volume);
- if (!dom){
- op_errno = ENOMEM;
- goto out;
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ int ret = -1;
+ char unwind = 1;
+ GF_UNUSED int dict_ret = -1;
+ pl_inode_t *pinode = NULL;
+ pl_entry_lock_t *reqlock = NULL;
+ pl_entry_lock_t *unlocked = NULL;
+ pl_dom_list_t *dom = NULL;
+ char *conn_id = NULL;
+ pl_ctx_t *ctx = NULL;
+ int nonblock = 0;
+ gf_boolean_t need_inode_unref = _gf_false;
+ posix_locks_private_t *priv = NULL;
+ struct list_head *pcontend = NULL;
+ struct list_head contend;
+ struct timespec now = {};
+
+ priv = this->private;
+
+ if (priv->notify_contention) {
+ pcontend = &contend;
+ INIT_LIST_HEAD(pcontend);
+ timespec_now(&now);
+ }
+
+ if (xdata)
+ dict_ret = dict_get_str(xdata, "connection-id", &conn_id);
+
+ pinode = pl_inode_get(this, inode, NULL);
+ if (!pinode) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ if (frame->root->client) {
+ ctx = pl_ctx_get(frame->root->client, this);
+ if (!ctx) {
+ op_errno = ENOMEM;
+ gf_log(this->name, GF_LOG_INFO, "pl_ctx_get() failed");
+ goto unwind;
}
-
- entrylk_trace_in (this, frame, volume, fd, loc, basename, cmd, type);
-
- pid = frame->root->pid;
- owner = frame->root->lk_owner;
- transport = frame->root->trans;
-
- if (owner == 0) {
- /*
- this is a special case that means release
- all locks from this transport
- */
-
- gf_log (this->name, GF_LOG_TRACE,
- "Releasing locks for transport %p", transport);
-
- release_entry_locks_for_transport (this, pinode, dom, transport);
+ }
+
+ dom = get_domain(pinode, volume);
+ if (!dom) {
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ entrylk_trace_in(this, frame, volume, fd, loc, basename, cmd, type);
+
+ reqlock = new_entrylk_lock(pinode, basename, type, dom->domain, frame,
+ conn_id, &op_errno);
+ if (!reqlock) {
+ op_ret = -1;
+ goto unwind;
+ }
+
+ /* Ideally, AFTER a successful lock (both blocking and non-blocking) or
+ * an unsuccessful blocking lock operation, the inode needs to be ref'd.
+ *
+ * But doing so might give room to a race where the lock-requesting
+ * client could send a DISCONNECT just before this thread refs the inode
+ * after the locking is done, and the epoll thread could unref the inode
+ * in cleanup which means the inode's refcount would come down to 0, and
+ * the call to pl_forget() at this point destroys @pinode. Now when
+ * the io-thread executing this function tries to access pinode,
+ * it could crash on account of illegal memory access.
+ *
+ * To get around this problem, the inode is ref'd once even before
+ * adding the lock into client_list as a precautionary measure.
+ * This way even if there are DISCONNECTs, there will always be 1 extra
+ * ref on the inode, so @pinode is still alive until after the
+ * current stack unwinds.
+ */
+ pinode->inode = inode_ref(inode);
+ if (priv->revocation_secs != 0) {
+ if (cmd != ENTRYLK_UNLOCK) {
+ __entrylk_prune_stale(this, pinode, dom, reqlock);
+ } else if (priv->monkey_unlocking == _gf_true) {
+ if (pl_does_monkey_want_stuck_lock()) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "MONKEY LOCKING (forcing stuck lock)!");
op_ret = 0;
-
+ need_inode_unref = _gf_true;
+ pthread_mutex_lock(&pinode->mutex);
+ {
+ __pl_entrylk_unref(reqlock);
+ }
+ pthread_mutex_unlock(&pinode->mutex);
goto out;
+ }
}
+ }
- switch (cmd) {
+ switch (cmd) {
+ case ENTRYLK_LOCK_NB:
+ nonblock = 1;
+ /* fall through */
case ENTRYLK_LOCK:
- pthread_mutex_lock (&pinode->mutex);
- {
- ret = __lock_name (pinode, basename, type,
- frame, dom, this, 0);
- }
- pthread_mutex_unlock (&pinode->mutex);
-
- op_errno = -ret;
- if (ret < 0) {
- if (ret == -EAGAIN)
- unwind = 0;
- else
- unwind = 1;
- goto out;
+ if (ctx)
+ pthread_mutex_lock(&ctx->lock);
+ pthread_mutex_lock(&pinode->mutex);
+ {
+ reqlock->pinode = pinode;
+
+ ret = __lock_entrylk(this, pinode, reqlock, nonblock, dom, &now,
+ pcontend);
+ if (ret == 0) {
+ reqlock->frame = NULL;
+ op_ret = 0;
} else {
- op_ret = 0;
- op_errno = 0;
- unwind = 1;
- goto out;
+ op_errno = -ret;
}
- break;
+ if (ctx && (!ret || !nonblock))
+ list_add(&reqlock->client_list, &ctx->entrylk_lockers);
- case ENTRYLK_LOCK_NB:
- unwind = 1;
- pthread_mutex_lock (&pinode->mutex);
- {
- ret = __lock_name (pinode, basename, type,
- frame, dom, this, 1);
- }
- pthread_mutex_unlock (&pinode->mutex);
-
- if (ret < 0) {
- op_errno = -ret;
- goto out;
+ if (ret == -EAGAIN && !nonblock) {
+ /* blocked */
+ unwind = 0;
+ } else {
+ __pl_entrylk_unref(reqlock);
}
- break;
+ /* For all but the case where a non-blocking lock
+ * attempt fails, the extra ref taken before the switch
+ * block must be negated.
+ */
+ if ((ret == -EAGAIN) && (nonblock))
+ need_inode_unref = _gf_true;
+ }
+ pthread_mutex_unlock(&pinode->mutex);
+ if (ctx)
+ pthread_mutex_unlock(&ctx->lock);
+ break;
case ENTRYLK_UNLOCK:
- pthread_mutex_lock (&pinode->mutex);
- {
- unlocked = __unlock_name (dom, basename, type);
+ if (ctx)
+ pthread_mutex_lock(&ctx->lock);
+ pthread_mutex_lock(&pinode->mutex);
+ {
+ /* Irrespective of whether unlock succeeds or not,
+ * the extra inode ref that was done before the switch
+ * block must be negated. Towards this,
+ * @need_inode_unref flag is set unconditionally here.
+ */
+ need_inode_unref = _gf_true;
+ unlocked = __unlock_entrylk(dom, reqlock);
+ if (unlocked) {
+ list_del_init(&unlocked->client_list);
+ __pl_entrylk_unref(unlocked);
+ op_ret = 0;
+ } else {
+ op_errno = EINVAL;
}
- pthread_mutex_unlock (&pinode->mutex);
+ __pl_entrylk_unref(reqlock);
+ }
+ pthread_mutex_unlock(&pinode->mutex);
+ if (ctx)
+ pthread_mutex_unlock(&ctx->lock);
- if (unlocked)
- grant_blocked_entry_locks (this, pinode, unlocked, dom);
+ grant_blocked_entry_locks(this, pinode, dom, &now, pcontend);
- break;
+ break;
default:
- gf_log (this->name, GF_LOG_ERROR,
- "Unexpected case in entrylk (cmd=%d). Please file"
- "a bug report at http://bugs.gluster.com", cmd);
- goto out;
- }
+ need_inode_unref = _gf_true;
+ gf_log(this->name, GF_LOG_ERROR,
+ "Unexpected case in entrylk (cmd=%d). Please file"
+ "a bug report at http://bugs.gluster.com",
+ cmd);
+ goto out;
+ }
+ /* The following (extra) unref corresponds to the ref that
+ * was done at the time the lock was granted.
+ */
+ if ((cmd == ENTRYLK_UNLOCK) && (op_ret == 0))
+ inode_unref(pinode->inode);
- op_ret = 0;
out:
- pl_update_refkeeper (this, inode);
- if (unwind) {
- entrylk_trace_out (this, frame, volume, fd, loc, basename,
- cmd, type, op_ret, op_errno);
- STACK_UNWIND_STRICT (entrylk, frame, op_ret, op_errno);
- } else {
- entrylk_trace_block (this, frame, volume, fd, loc, basename,
- cmd, type);
- }
+ if (need_inode_unref)
+ inode_unref(pinode->inode);
+ if (unwind) {
+ entrylk_trace_out(this, frame, volume, fd, loc, basename, cmd, type,
+ op_ret, op_errno);
+ unwind:
+ STACK_UNWIND_STRICT(entrylk, frame, op_ret, op_errno, NULL);
+ }
- return 0;
+ if (pcontend != NULL) {
+ entrylk_contention_notify(this, pcontend);
+ }
+
+ return 0;
}
/**
@@ -729,17 +947,16 @@ out:
*/
int
-pl_entrylk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type)
+pl_entrylk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
{
+ pl_common_entrylk(frame, this, volume, loc->inode, basename, cmd, type, loc,
+ NULL, xdata);
- pl_common_entrylk (frame, this, volume, loc->inode, basename, cmd, type, loc, NULL);
-
- return 0;
+ return 0;
}
-
/**
* pl_fentrylk:
*
@@ -747,73 +964,190 @@ pl_entrylk (call_frame_t *frame, xlator_t *this,
*/
int
-pl_fentrylk (call_frame_t *frame, xlator_t *this,
- const char *volume, fd_t *fd, const char *basename,
- entrylk_cmd cmd, entrylk_type type)
+pl_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
{
+ pl_common_entrylk(frame, this, volume, fd->inode, basename, cmd, type, NULL,
+ fd, xdata);
- pl_common_entrylk (frame, this, volume, fd->inode, basename, cmd, type, NULL, fd);
-
- return 0;
+ return 0;
}
-
-static int32_t
-__get_entrylk_count (xlator_t *this, pl_inode_t *pl_inode)
+static void
+pl_entrylk_log_cleanup(pl_entry_lock_t *lock)
{
- int32_t count = 0;
- pl_entry_lock_t *lock = NULL;
- pl_dom_list_t *dom = NULL;
+ pl_inode_t *pinode = NULL;
- list_for_each_entry (dom, &pl_inode->dom_list, inode_list) {
- list_for_each_entry (lock, &dom->entrylk_list, domain_list) {
+ pinode = lock->pinode;
+
+ gf_log(THIS->name, GF_LOG_WARNING,
+ "releasing lock on %s held by "
+ "{client=%p, pid=%" PRId64 " lk-owner=%s}",
+ uuid_utoa(pinode->gfid), lock->client, (uint64_t)lock->client_pid,
+ lkowner_utoa(&lock->owner));
+}
- gf_log (this->name, GF_LOG_DEBUG,
- " XATTR DEBUG"
- " domain: %s %s on %s state = Active",
- dom->domain,
- lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
- "ENTRYLK_WRLCK", lock->basename);
- count++;
+/* Release all entrylks from this client */
+int
+pl_entrylk_client_cleanup(xlator_t *this, pl_ctx_t *ctx)
+{
+ posix_locks_private_t *priv;
+ pl_entry_lock_t *tmp = NULL;
+ pl_entry_lock_t *l = NULL;
+ pl_dom_list_t *dom = NULL;
+ pl_inode_t *pinode = NULL;
+ struct list_head *pcontend = NULL;
+ struct list_head released;
+ struct list_head unwind;
+ struct list_head contend;
+ struct timespec now = {};
+
+ INIT_LIST_HEAD(&released);
+ INIT_LIST_HEAD(&unwind);
+
+ priv = this->private;
+ if (priv->notify_contention) {
+ pcontend = &contend;
+ INIT_LIST_HEAD(pcontend);
+ timespec_now(&now);
+ }
+
+ pthread_mutex_lock(&ctx->lock);
+ {
+ list_for_each_entry_safe(l, tmp, &ctx->entrylk_lockers, client_list)
+ {
+ pl_entrylk_log_cleanup(l);
+
+ pinode = l->pinode;
+
+ pthread_mutex_lock(&pinode->mutex);
+ {
+ /* If the entrylk object is part of granted list but not
+ * blocked list, then perform the following actions:
+ * i. delete the object from granted list;
+ * ii. grant other locks (from other clients) that may
+ * have been blocked on this entrylk; and
+ * iii. unref the object.
+ *
+ * If the entrylk object (L1) is part of both granted
+ * and blocked lists, then this means that a parallel
+ * unlock on another entrylk (L2 say) may have 'granted'
+ * L1 and added it to 'granted' list in
+ * __grant_blocked_entry_locks() (although using the
+ * 'blocked_locks' member). In that case, the cleanup
+ * codepath must try and grant other overlapping
+ * blocked entrylks from other clients, now that L1 is
+ * out of their way and then unref L1 in the end, and
+ * leave it to the other thread (the one executing
+ * unlock codepath) to unwind L1's frame, delete it from
+ * blocked_locks list, and perform the last unref on L1.
+ *
+ * If the entrylk object (L1) is part of blocked list
+ * only, the cleanup code path must:
+ * i. delete it from the blocked_locks list inside
+ * this critical section,
+ * ii. unwind its frame with EAGAIN,
+ * iii. try and grant blocked entry locks from other
+ * clients that were otherwise grantable, but were
+ * blocked to avoid leaving L1 to starve forever.
+ * iv. unref the object.
+ */
+ list_del_init(&l->client_list);
+
+ if (!list_empty(&l->domain_list)) {
+ list_del_init(&l->domain_list);
+ list_add_tail(&l->client_list, &released);
+ } else {
+ list_del_init(&l->blocked_locks);
+ list_add_tail(&l->client_list, &unwind);
}
+ }
+ pthread_mutex_unlock(&pinode->mutex);
+ }
+ }
+ pthread_mutex_unlock(&ctx->lock);
- list_for_each_entry (lock, &dom->blocked_entrylks, blocked_locks) {
+ if (!list_empty(&unwind)) {
+ list_for_each_entry_safe(l, tmp, &unwind, client_list)
+ {
+ list_del_init(&l->client_list);
- gf_log (this->name, GF_LOG_DEBUG,
- " XATTR DEBUG"
- " domain: %s %s on %s state = Blocked",
- dom->domain,
- lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
- "ENTRYLK_WRLCK", lock->basename);
- count++;
- }
+ if (l->frame)
+ STACK_UNWIND_STRICT(entrylk, l->frame, -1, EAGAIN, NULL);
+ list_add_tail(&l->client_list, &released);
+ }
+ }
+
+ if (!list_empty(&released)) {
+ list_for_each_entry_safe(l, tmp, &released, client_list)
+ {
+ list_del_init(&l->client_list);
+
+ pinode = l->pinode;
+ dom = get_domain(pinode, l->volume);
+
+ grant_blocked_entry_locks(this, pinode, dom, &now, pcontend);
+
+ pthread_mutex_lock(&pinode->mutex);
+ {
+ __pl_entrylk_unref(l);
+ }
+ pthread_mutex_unlock(&pinode->mutex);
+
+ inode_unref(pinode->inode);
}
+ }
+
+ if (pcontend != NULL) {
+ entrylk_contention_notify(this, pcontend);
+ }
- return count;
+ return 0;
}
int32_t
-get_entrylk_count (xlator_t *this, inode_t *inode)
+__get_entrylk_count(xlator_t *this, pl_inode_t *pl_inode)
{
- pl_inode_t *pl_inode = NULL;
- uint64_t tmp_pl_inode = 0;
- int ret = 0;
- int32_t count = 0;
+ int32_t count = 0;
+ pl_entry_lock_t *lock = NULL;
+ pl_dom_list_t *dom = NULL;
- ret = inode_ctx_get (inode, this, &tmp_pl_inode);
- if (ret != 0) {
- goto out;
- }
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ list_for_each_entry(lock, &dom->entrylk_list, domain_list) { count++; }
- pl_inode = (pl_inode_t *)(long) tmp_pl_inode;
-
- pthread_mutex_lock (&pl_inode->mutex);
+ list_for_each_entry(lock, &dom->blocked_entrylks, blocked_locks)
{
- count = __get_entrylk_count (this, pl_inode);
+ count++;
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ }
+
+ return count;
+}
+
+int32_t
+get_entrylk_count(xlator_t *this, inode_t *inode)
+{
+ pl_inode_t *pl_inode = NULL;
+ uint64_t tmp_pl_inode = 0;
+ int ret = 0;
+ int32_t count = 0;
+
+ ret = inode_ctx_get(inode, this, &tmp_pl_inode);
+ if (ret != 0) {
+ goto out;
+ }
+
+ pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ count = __get_entrylk_count(this, pl_inode);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
out:
- return count;
+ return count;
}
diff --git a/xlators/features/locks/src/inodelk.c b/xlators/features/locks/src/inodelk.c
index 81cd26730fd..d4e51d6e0a1 100644
--- a/xlators/features/locks/src/inodelk.c
+++ b/xlators/features/locks/src/inodelk.c
@@ -1,742 +1,1174 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
-#include "glusterfs.h"
-#include "compat.h"
-#include "xlator.h"
-#include "inode.h"
-#include "logging.h"
-#include "common-utils.h"
-#include "list.h"
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/list.h>
+#include <glusterfs/upcall-utils.h>
#include "locks.h"
+#include "clear.h"
#include "common.h"
void
-__delete_inode_lock (pl_inode_lock_t *lock)
+__delete_inode_lock(pl_inode_lock_t *lock)
{
- list_del (&lock->list);
+ list_del_init(&lock->list);
+}
+
+static void
+__pl_inodelk_ref(pl_inode_lock_t *lock)
+{
+ lock->ref++;
}
void
-__destroy_inode_lock (pl_inode_lock_t *lock)
+__pl_inodelk_unref(pl_inode_lock_t *lock)
{
- GF_FREE (lock);
+ lock->ref--;
+ if (!lock->ref) {
+ GF_FREE(lock->connection_id);
+ GF_FREE(lock);
+ }
}
-/* Check if 2 inodelks are conflicting on type. Only 2 shared locks don't conflict */
+/* Check if 2 inodelks are conflicting on type. Only 2 shared locks don't
+ * conflict */
static int
-inodelk_type_conflict (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
+inodelk_type_conflict(pl_inode_lock_t *l1, pl_inode_lock_t *l2)
{
- if (l2->fl_type == F_WRLCK || l1->fl_type == F_WRLCK)
- return 1;
+ if (l2->fl_type == F_WRLCK || l1->fl_type == F_WRLCK)
+ return 1;
- return 0;
+ return 0;
}
void
-pl_print_inodelk (char *str, int size, int cmd, struct gf_flock *flock, const char *domain)
+pl_print_inodelk(char *str, int size, int cmd, struct gf_flock *flock,
+ const char *domain)
{
- char *cmd_str = NULL;
- char *type_str = NULL;
+ char *cmd_str = NULL;
+ char *type_str = NULL;
- switch (cmd) {
+ switch (cmd) {
#if F_GETLK != F_GETLK64
case F_GETLK64:
#endif
case F_GETLK:
- cmd_str = "GETLK";
- break;
+ cmd_str = "GETLK";
+ break;
#if F_SETLK != F_SETLK64
case F_SETLK64:
#endif
case F_SETLK:
- cmd_str = "SETLK";
- break;
+ cmd_str = "SETLK";
+ break;
#if F_SETLKW != F_SETLKW64
case F_SETLKW64:
#endif
case F_SETLKW:
- cmd_str = "SETLKW";
- break;
+ cmd_str = "SETLKW";
+ break;
default:
- cmd_str = "UNKNOWN";
- break;
- }
+ cmd_str = "UNKNOWN";
+ break;
+ }
- switch (flock->l_type) {
+ switch (flock->l_type) {
case F_RDLCK:
- type_str = "READ";
- break;
+ type_str = "READ";
+ break;
case F_WRLCK:
- type_str = "WRITE";
- break;
+ type_str = "WRITE";
+ break;
case F_UNLCK:
- type_str = "UNLOCK";
- break;
+ type_str = "UNLOCK";
+ break;
default:
- type_str = "UNKNOWN";
- break;
- }
-
- snprintf (str, size, "lock=INODELK, cmd=%s, type=%s, "
- "domain: %s, start=%llu, len=%llu, pid=%llu",
- cmd_str, type_str, domain,
- (unsigned long long) flock->l_start,
- (unsigned long long) flock->l_len,
- (unsigned long long) flock->l_pid);
+ type_str = "UNKNOWN";
+ break;
+ }
+
+ snprintf(str, size,
+ "lock=INODELK, cmd=%s, type=%s, "
+ "domain: %s, start=%llu, len=%llu, pid=%llu",
+ cmd_str, type_str, domain, (unsigned long long)flock->l_start,
+ (unsigned long long)flock->l_len,
+ (unsigned long long)flock->l_pid);
}
/* Determine if the two inodelks overlap reach other's lock regions */
static int
-inodelk_overlap (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
+inodelk_overlap(pl_inode_lock_t *l1, pl_inode_lock_t *l2)
{
- return ((l1->fl_end >= l2->fl_start) &&
- (l2->fl_end >= l1->fl_start));
+ return ((l1->fl_end >= l2->fl_start) && (l2->fl_end >= l1->fl_start));
}
/* Returns true if the 2 inodelks have the same owner */
-static int same_inodelk_owner (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
+static int
+same_inodelk_owner(pl_inode_lock_t *l1, pl_inode_lock_t *l2)
{
- return ((l1->owner == l2->owner) &&
- (l1->transport == l2->transport));
+ return (is_same_lkowner(&l1->owner, &l2->owner) &&
+ (l1->client == l2->client));
}
/* Returns true if the 2 inodelks conflict with each other */
static int
-inodelk_conflict (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
+inodelk_conflict(pl_inode_lock_t *l1, pl_inode_lock_t *l2)
{
- return (inodelk_overlap (l1, l2) &&
- inodelk_type_conflict (l1, l2));
+ return (inodelk_overlap(l1, l2) && inodelk_type_conflict(l1, l2));
}
-/* Determine if lock is grantable or not */
-static pl_inode_lock_t *
-__inodelk_grantable (pl_dom_list_t *dom, pl_inode_lock_t *lock)
+/*
+ * Check to see if the candidate lock overlaps/conflicts with the
+ * requested lock. If so, determine how old the lock is and return
+ * true if it exceeds the configured threshold, false otherwise.
+ */
+static inline gf_boolean_t
+__stale_inodelk(xlator_t *this, pl_inode_lock_t *candidate_lock,
+ pl_inode_lock_t *requested_lock, time_t *lock_age_sec)
{
- pl_inode_lock_t *l = NULL;
- pl_inode_lock_t *ret = NULL;
- if (list_empty (&dom->inodelk_list))
- goto out;
- list_for_each_entry (l, &dom->inodelk_list, list){
- if (inodelk_conflict (lock, l)) {
- ret = l;
- goto out;
- }
- }
-out:
- return ret;
+ posix_locks_private_t *priv = NULL;
+
+ priv = this->private;
+ /* Question: Should we just prune them all given the
+ * chance? Or just the locks we are attempting to acquire?
+ */
+ if (inodelk_conflict(candidate_lock, requested_lock)) {
+ *lock_age_sec = gf_time() - candidate_lock->granted_time;
+ if (*lock_age_sec > priv->revocation_secs)
+ return _gf_true;
+ }
+ return _gf_false;
}
-static pl_inode_lock_t *
-__blocked_lock_conflict (pl_dom_list_t *dom, pl_inode_lock_t *lock)
+/* Examine any locks held on this inode and potentially revoke the lock
+ * if the age exceeds revocation_secs. We will clear _only_ those locks
+ * which are granted, and then grant those locks which are blocked.
+ *
+ * Depending on how this patch works in the wild, we may expand this and
+ * introduce a heuristic which clears blocked locks as well if they
+ * are beyond a threshold.
+ */
+static gf_boolean_t
+__inodelk_prune_stale(xlator_t *this, pl_inode_t *pinode, pl_dom_list_t *dom,
+ pl_inode_lock_t *lock)
{
- pl_inode_lock_t *l = NULL;
- pl_inode_lock_t *ret = NULL;
-
- if (list_empty (&dom->blocked_inodelks))
- return NULL;
+ posix_locks_private_t *priv = NULL;
+ pl_inode_lock_t *tmp = NULL;
+ pl_inode_lock_t *lk = NULL;
+ gf_boolean_t revoke_lock = _gf_false;
+ int bcount = 0;
+ int gcount = 0;
+ int op_errno = 0;
+ clrlk_args args;
+ args.opts = NULL;
+ time_t lk_age_sec = 0;
+ uint32_t max_blocked = 0;
+ char *reason_str = NULL;
+
+ priv = this->private;
+
+ args.type = CLRLK_INODE;
+ if (priv->revocation_clear_all == _gf_true)
+ args.kind = CLRLK_ALL;
+ else
+ args.kind = CLRLK_GRANTED;
+
+ if (list_empty(&dom->inodelk_list))
+ goto out;
+
+ pthread_mutex_lock(&pinode->mutex);
+ list_for_each_entry_safe(lk, tmp, &dom->inodelk_list, list)
+ {
+ if (__stale_inodelk(this, lk, lock, &lk_age_sec) == _gf_true) {
+ revoke_lock = _gf_true;
+ reason_str = "age";
+ break;
+ }
+ }
- list_for_each_entry (l, &dom->blocked_inodelks, blocked_locks) {
- if (inodelk_conflict (lock, l)) {
- ret = l;
- goto out;
- }
+ max_blocked = priv->revocation_max_blocked;
+ if (max_blocked != 0 && revoke_lock == _gf_false) {
+ list_for_each_entry_safe(lk, tmp, &dom->blocked_inodelks, blocked_locks)
+ {
+ max_blocked--;
+ if (max_blocked == 0) {
+ revoke_lock = _gf_true;
+ reason_str = "max blocked";
+ break;
+ }
}
+ }
+ pthread_mutex_unlock(&pinode->mutex);
out:
- return ret;
+ if (revoke_lock == _gf_true) {
+ clrlk_clear_inodelk(this, pinode, dom, &args, &bcount, &gcount,
+ &op_errno);
+ gf_log(this->name, GF_LOG_WARNING,
+ "Lock revocation [reason: %s; gfid: %s; domain: %s; "
+ "age: %ld sec] - Inode lock revoked: %d granted & %d "
+ "blocked locks cleared",
+ reason_str, uuid_utoa(pinode->gfid), dom->domain, lk_age_sec,
+ gcount, bcount);
+ }
+ return revoke_lock;
}
-static int
-__owner_has_lock (pl_dom_list_t *dom, pl_inode_lock_t *newlock)
+void
+inodelk_contention_notify_check(xlator_t *this, pl_inode_lock_t *lock,
+ struct timespec *now, struct list_head *contend)
{
- pl_inode_lock_t *lock = NULL;
+ posix_locks_private_t *priv;
+ int64_t elapsed;
- list_for_each_entry (lock, &dom->inodelk_list, list) {
- if (same_inodelk_owner (lock, newlock))
- return 1;
- }
+ priv = this->private;
- list_for_each_entry (lock, &dom->blocked_inodelks, blocked_locks) {
- if (same_inodelk_owner (lock, newlock))
- return 1;
- }
+ /* If this lock is in a list, it means that we are about to send a
+ * notification for it, so no need to do anything else. */
+ if (!list_empty(&lock->contend)) {
+ return;
+ }
+
+ elapsed = now->tv_sec;
+ elapsed -= lock->contention_time.tv_sec;
+ if (now->tv_nsec < lock->contention_time.tv_nsec) {
+ elapsed--;
+ }
+ if (elapsed < priv->notify_contention_delay) {
+ return;
+ }
- return 0;
-}
+ /* All contention notifications will be sent outside of the locked
+ * region. This means that currently granted locks might have already
+ * been unlocked by that time. To avoid the lock or the inode to be
+ * destroyed before we process them, we take an additional reference
+ * on both. */
+ inode_ref(lock->pl_inode->inode);
+ __pl_inodelk_ref(lock);
+ lock->contention_time = *now;
-/* Determines if lock can be granted and adds the lock. If the lock
- * is blocking, adds it to the blocked_inodelks list of the domain.
- */
-static int
-__lock_inodelk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
- int can_block, pl_dom_list_t *dom)
+ list_add_tail(&lock->contend, contend);
+}
+
+void
+inodelk_contention_notify(xlator_t *this, struct list_head *contend)
{
- pl_inode_lock_t *conf = NULL;
- int ret = -EINVAL;
+ struct gf_upcall up;
+ struct gf_upcall_inodelk_contention lc;
+ pl_inode_lock_t *lock;
+ pl_inode_t *pl_inode;
+ client_t *client;
+ gf_boolean_t notify;
+
+ while (!list_empty(contend)) {
+ lock = list_first_entry(contend, pl_inode_lock_t, contend);
+
+ pl_inode = lock->pl_inode;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+
+ /* If the lock has already been released, no notification is
+ * sent. We clear the notification time in this case. */
+ notify = !list_empty(&lock->list);
+ if (!notify) {
+ lock->contention_time.tv_sec = 0;
+ lock->contention_time.tv_nsec = 0;
+ } else {
+ memcpy(&lc.flock, &lock->user_flock, sizeof(lc.flock));
+ lc.pid = lock->client_pid;
+ lc.domain = lock->volume;
+ lc.xdata = NULL;
+
+ gf_uuid_copy(up.gfid, lock->pl_inode->gfid);
+ client = (client_t *)lock->client;
+ if (client == NULL) {
+ /* A NULL client can be found if the inodelk
+ * was issued by a server side xlator. */
+ up.client_uid = NULL;
+ } else {
+ up.client_uid = client->client_uid;
+ }
+ }
- conf = __inodelk_grantable (dom, lock);
- if (conf){
- ret = -EAGAIN;
- if (can_block == 0)
- goto out;
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (notify) {
+ up.event_type = GF_UPCALL_INODELK_CONTENTION;
+ up.data = &lc;
+
+ if (this->notify(this, GF_EVENT_UPCALL, &up) < 0) {
+ gf_msg_debug(this->name, 0,
+ "Inodelk contention notification "
+ "failed");
+ } else {
+ gf_msg_debug(this->name, 0,
+ "Inodelk contention notification "
+ "sent");
+ }
+ }
- list_add_tail (&lock->blocked_locks, &dom->blocked_inodelks);
+ pthread_mutex_lock(&pl_inode->mutex);
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
+ list_del_init(&lock->contend);
+ __pl_inodelk_unref(lock);
+ pthread_mutex_unlock(&pl_inode->mutex);
- goto out;
+ inode_unref(pl_inode->inode);
+ }
+}
+
+/* Determine if lock is grantable or not */
+static pl_inode_lock_t *
+__inodelk_grantable(xlator_t *this, pl_dom_list_t *dom, pl_inode_lock_t *lock,
+ struct timespec *now, struct list_head *contend)
+{
+ pl_inode_lock_t *l = NULL;
+ pl_inode_lock_t *ret = NULL;
+
+ list_for_each_entry(l, &dom->inodelk_list, list)
+ {
+ if (inodelk_conflict(lock, l) && !same_inodelk_owner(lock, l)) {
+ if (ret == NULL) {
+ ret = l;
+ if (contend == NULL) {
+ break;
+ }
+ }
+ inodelk_contention_notify_check(this, l, now, contend);
}
+ }
- if (__blocked_lock_conflict (dom, lock) && !(__owner_has_lock (dom, lock))) {
- ret = -EAGAIN;
- if (can_block == 0)
- goto out;
+ return ret;
+}
- list_add_tail (&lock->blocked_locks, &dom->blocked_inodelks);
+static pl_inode_lock_t *
+__blocked_lock_conflict(pl_dom_list_t *dom, pl_inode_lock_t *lock)
+{
+ pl_inode_lock_t *l = NULL;
- gf_log (this->name, GF_LOG_TRACE,
- "Lock is grantable, but blocking to prevent starvation");
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Blocked",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
+ list_for_each_entry(l, &dom->blocked_inodelks, blocked_locks)
+ {
+ if (inodelk_conflict(lock, l)) {
+ return l;
+ }
+ }
+ return NULL;
+}
- goto out;
- }
- list_add (&lock->list, &dom->inodelk_list);
+static int
+__owner_has_lock(pl_dom_list_t *dom, pl_inode_lock_t *newlock)
+{
+ pl_inode_lock_t *lock = NULL;
+
+ list_for_each_entry(lock, &dom->inodelk_list, list)
+ {
+ if (same_inodelk_owner(lock, newlock))
+ return 1;
+ }
- ret = 0;
+ list_for_each_entry(lock, &dom->blocked_inodelks, blocked_locks)
+ {
+ if (same_inodelk_owner(lock, newlock))
+ return 1;
+ }
+ return 0;
+}
+
+static int
+__lock_blocked_add(xlator_t *this, pl_dom_list_t *dom, pl_inode_lock_t *lock,
+ int can_block)
+{
+ if (can_block == 0) {
+ goto out;
+ }
+
+ lock->blkd_time = gf_time();
+ list_add_tail(&lock->blocked_locks, &dom->blocked_inodelks);
+
+ gf_msg_trace(this->name, 0,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64
+ " - "
+ "%" PRId64 " => Blocked",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock", lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->user_flock.l_start,
+ lock->user_flock.l_len);
+
+ pl_trace_block(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+ lock->volume);
out:
+ return -EAGAIN;
+}
+
+/* Determines if lock can be granted and adds the lock. If the lock
+ * is blocking, adds it to the blocked_inodelks list of the domain.
+ */
+static int
+__lock_inodelk(xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
+ int can_block, pl_dom_list_t *dom, struct timespec *now,
+ struct list_head *contend)
+{
+ pl_inode_lock_t *conf = NULL;
+ int ret;
+
+ ret = pl_inode_remove_inodelk(pl_inode, lock);
+ if (ret < 0) {
return ret;
+ }
+ if (ret == 0) {
+ conf = __inodelk_grantable(this, dom, lock, now, contend);
+ }
+ if ((ret > 0) || (conf != NULL)) {
+ return __lock_blocked_add(this, dom, lock, can_block);
+ }
+
+ /* To prevent blocked locks starvation, check if there are any blocked
+ * locks thay may conflict with this lock. If there is then don't grant
+ * the lock. BUT grant the lock if the owner already has lock to allow
+ * nested locks.
+ * Example:
+ * SHD from Machine1 takes (gfid, 0-infinity) and is granted.
+ * SHD from machine2 takes (gfid, 0-infinity) and is blocked.
+ * When SHD from Machine1 takes (gfid, 0-128KB) it
+ * needs to be granted, without which the earlier lock on 0-infinity
+ * will not be unlocked by SHD from Machine1.
+ * TODO: Find why 'owner_has_lock' is checked even for blocked locks.
+ */
+ if (__blocked_lock_conflict(dom, lock) && !(__owner_has_lock(dom, lock))) {
+ if (can_block != 0) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Lock is grantable, but blocking to prevent "
+ "starvation");
+ }
+
+ return __lock_blocked_add(this, dom, lock, can_block);
+ }
+ __pl_inodelk_ref(lock);
+ lock->granted_time = gf_time();
+ list_add(&lock->list, &dom->inodelk_list);
+
+ return 0;
}
/* Return true if the two inodelks have exactly same lock boundaries */
static int
-inodelks_equal (pl_inode_lock_t *l1, pl_inode_lock_t *l2)
+inodelks_equal(pl_inode_lock_t *l1, pl_inode_lock_t *l2)
{
- if ((l1->fl_start == l2->fl_start) &&
- (l1->fl_end == l2->fl_end))
- return 1;
+ if ((l1->fl_start == l2->fl_start) && (l1->fl_end == l2->fl_end))
+ return 1;
- return 0;
+ return 0;
}
-
static pl_inode_lock_t *
-find_matching_inodelk (pl_inode_lock_t *lock, pl_dom_list_t *dom)
+find_matching_inodelk(pl_inode_lock_t *lock, pl_dom_list_t *dom)
{
- pl_inode_lock_t *l = NULL;
- list_for_each_entry (l, &dom->inodelk_list, list) {
- if (inodelks_equal (l, lock) &&
- same_inodelk_owner (l, lock))
- return l;
- }
- return NULL;
+ pl_inode_lock_t *l = NULL;
+ list_for_each_entry(l, &dom->inodelk_list, list)
+ {
+ if (inodelks_equal(l, lock) && same_inodelk_owner(l, lock))
+ return l;
+ }
+ return NULL;
}
/* Set F_UNLCK removes a lock which has the exact same lock boundaries
* as the UNLCK lock specifies. If such a lock is not found, returns invalid
*/
static pl_inode_lock_t *
-__inode_unlock_lock (xlator_t *this, pl_inode_lock_t *lock, pl_dom_list_t *dom)
+__inode_unlock_lock(xlator_t *this, pl_inode_lock_t *lock, pl_dom_list_t *dom)
{
-
- pl_inode_lock_t *conf = NULL;
-
- conf = find_matching_inodelk (lock, dom);
- if (!conf) {
- gf_log (this->name, GF_LOG_DEBUG,
- " Matching lock not found for unlock");
- goto out;
- }
- __delete_inode_lock (conf);
- gf_log (this->name, GF_LOG_DEBUG,
- " Matching lock found for unlock");
- __destroy_inode_lock (lock);
-
+ pl_inode_lock_t *conf = NULL;
+ inode_t *inode = NULL;
+
+ inode = lock->pl_inode->inode;
+
+ conf = find_matching_inodelk(lock, dom);
+ if (!conf) {
+ gf_log(this->name, GF_LOG_ERROR,
+ " Matching lock not found for unlock %llu-%llu, by %s "
+ "on %p for gfid:%s",
+ (unsigned long long)lock->fl_start,
+ (unsigned long long)lock->fl_end, lkowner_utoa(&lock->owner),
+ lock->client, inode ? uuid_utoa(inode->gfid) : "UNKNOWN");
+ goto out;
+ }
+ __delete_inode_lock(conf);
+ gf_log(this->name, GF_LOG_DEBUG,
+ " Matching lock found for unlock %llu-%llu, by %s on %p for gfid:%s",
+ (unsigned long long)lock->fl_start, (unsigned long long)lock->fl_end,
+ lkowner_utoa(&lock->owner), lock->client,
+ inode ? uuid_utoa(inode->gfid) : "UNKNOWN");
out:
- return conf;
-
-
+ return conf;
}
-static void
-__grant_blocked_inode_locks (xlator_t *this, pl_inode_t *pl_inode,
- struct list_head *granted, pl_dom_list_t *dom)
-{
- int bl_ret = 0;
- pl_inode_lock_t *bl = NULL;
- pl_inode_lock_t *tmp = NULL;
- struct list_head blocked_list;
+void
+__grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted, pl_dom_list_t *dom,
+ struct timespec *now, struct list_head *contend)
+{
+ pl_inode_lock_t *bl = NULL;
+ pl_inode_lock_t *tmp = NULL;
- INIT_LIST_HEAD (&blocked_list);
- list_splice_init (&dom->blocked_inodelks, &blocked_list);
+ struct list_head blocked_list;
- list_for_each_entry_safe (bl, tmp, &blocked_list, blocked_locks) {
+ INIT_LIST_HEAD(&blocked_list);
+ list_splice_init(&dom->blocked_inodelks, &blocked_list);
- list_del_init (&bl->blocked_locks);
+ list_for_each_entry_safe(bl, tmp, &blocked_list, blocked_locks)
+ {
+ list_del_init(&bl->blocked_locks);
- bl_ret = __lock_inodelk (this, pl_inode, bl, 1, dom);
+ bl->status = __lock_inodelk(this, pl_inode, bl, 1, dom, now, contend);
- if (bl_ret == 0) {
- list_add (&bl->blocked_locks, granted);
- }
+ if (bl->status != -EAGAIN) {
+ list_add_tail(&bl->blocked_locks, granted);
}
- return;
+ }
}
-/* Grant all inodelks blocked on a lock */
void
-grant_blocked_inode_locks (xlator_t *this, pl_inode_t *pl_inode, pl_dom_list_t *dom)
+unwind_granted_inodes(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted)
{
- struct list_head granted;
- pl_inode_lock_t *lock;
- pl_inode_lock_t *tmp;
-
- INIT_LIST_HEAD (&granted);
-
- if (list_empty (&dom->blocked_inodelks)) {
- gf_log (this->name, GF_LOG_TRACE,
- "No blocked locks to be granted for domain: %s", dom->domain);
+ pl_inode_lock_t *lock;
+ pl_inode_lock_t *tmp;
+ int32_t op_ret;
+ int32_t op_errno;
+
+ list_for_each_entry_safe(lock, tmp, granted, blocked_locks)
+ {
+ if (lock->status == 0) {
+ op_ret = 0;
+ op_errno = 0;
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64
+ " => Granted",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid, lkowner_utoa(&lock->owner),
+ lock->user_flock.l_start, lock->user_flock.l_len);
+ } else {
+ op_ret = -1;
+ op_errno = -lock->status;
}
+ pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock,
+ op_ret, op_errno, lock->volume);
+
+ STACK_UNWIND_STRICT(inodelk, lock->frame, op_ret, op_errno, NULL);
+ lock->frame = NULL;
+ }
- pthread_mutex_lock (&pl_inode->mutex);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(lock, tmp, granted, blocked_locks)
{
- __grant_blocked_inode_locks (this, pl_inode, &granted, dom);
+ list_del_init(&lock->blocked_locks);
+ __pl_inodelk_unref(lock);
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+}
- list_for_each_entry_safe (lock, tmp, &granted, blocked_locks) {
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Granted",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
+/* Grant all inodelks blocked on a lock */
+void
+grant_blocked_inode_locks(xlator_t *this, pl_inode_t *pl_inode,
+ pl_dom_list_t *dom, struct timespec *now,
+ struct list_head *contend)
+{
+ struct list_head granted;
- pl_trace_out (this, lock->frame, NULL, NULL, F_SETLKW,
- &lock->user_flock, 0, 0, lock->volume);
+ INIT_LIST_HEAD(&granted);
- STACK_UNWIND_STRICT (inodelk, lock->frame, 0, 0);
- }
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __grant_blocked_inode_locks(this, pl_inode, &granted, dom, now,
+ contend);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ unwind_granted_inodes(this, pl_inode, &granted);
}
-/* Release all inodelks from this transport */
-static int
-release_inode_locks_of_transport (xlator_t *this, pl_dom_list_t *dom,
- inode_t *inode, void *trans)
+static void
+pl_inodelk_log_cleanup(pl_inode_lock_t *lock)
{
- pl_inode_lock_t *tmp = NULL;
- pl_inode_lock_t *l = NULL;
-
- pl_inode_t * pinode = NULL;
+ pl_inode_t *pl_inode = NULL;
- struct list_head granted;
- struct list_head released;
+ pl_inode = lock->pl_inode;
- char *path = NULL;
-
- INIT_LIST_HEAD (&granted);
- INIT_LIST_HEAD (&released);
-
- pinode = pl_inode_get (this, inode);
+ gf_log(THIS->name, GF_LOG_WARNING,
+ "releasing lock on %s held by "
+ "{client=%p, pid=%" PRId64 " lk-owner=%s}",
+ uuid_utoa(pl_inode->gfid), lock->client, (uint64_t)lock->client_pid,
+ lkowner_utoa(&lock->owner));
+}
- pthread_mutex_lock (&pinode->mutex);
+/* Release all inodelks from this client */
+int
+pl_inodelk_client_cleanup(xlator_t *this, pl_ctx_t *ctx)
+{
+ posix_locks_private_t *priv;
+ pl_inode_lock_t *tmp = NULL;
+ pl_inode_lock_t *l = NULL;
+ pl_dom_list_t *dom = NULL;
+ pl_inode_t *pl_inode = NULL;
+ struct list_head *pcontend = NULL;
+ struct list_head released;
+ struct list_head unwind;
+ struct list_head contend;
+ struct timespec now = {};
+
+ priv = this->private;
+
+ INIT_LIST_HEAD(&released);
+ INIT_LIST_HEAD(&unwind);
+
+ if (priv->notify_contention) {
+ pcontend = &contend;
+ INIT_LIST_HEAD(pcontend);
+ timespec_now(&now);
+ }
+
+ pthread_mutex_lock(&ctx->lock);
+ {
+ list_for_each_entry_safe(l, tmp, &ctx->inodelk_lockers, client_list)
{
-
- list_for_each_entry_safe (l, tmp, &dom->blocked_inodelks, blocked_locks) {
- if (l->transport != trans)
- continue;
-
- list_del_init (&l->blocked_locks);
-
- if (inode_path (inode, NULL, &path) < 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "inode_path failed");
- goto unlock;
- }
-
- gf_log (this->name, GF_LOG_TRACE,
- "releasing lock on %s held by "
- "{transport=%p, pid=%"PRId64" lk-owner=%"PRIu64"}",
- path, trans,
- (uint64_t) l->client_pid,
- l->owner);
-
- list_add (&l->blocked_locks, &released);
-
+ pl_inodelk_log_cleanup(l);
+
+ pl_inode = l->pl_inode;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ /* If the inodelk object is part of granted list but not
+ * blocked list, then perform the following actions:
+ * i. delete the object from granted list;
+ * ii. grant other locks (from other clients) that may
+ * have been blocked on this inodelk; and
+ * iii. unref the object.
+ *
+ * If the inodelk object (L1) is part of both granted
+ * and blocked lists, then this means that a parallel
+ * unlock on another inodelk (L2 say) may have 'granted'
+ * L1 and added it to 'granted' list in
+ * __grant_blocked_inode_locks() (although using the
+ * 'blocked_locks' member). In that case, the cleanup
+ * codepath must try and grant other overlapping
+ * blocked inodelks from other clients, now that L1 is
+ * out of their way and then unref L1 in the end, and
+ * leave it to the other thread (the one executing
+ * unlock codepath) to unwind L1's frame, delete it from
+ * blocked_locks list, and perform the last unref on L1.
+ *
+ * If the inodelk object (L1) is part of blocked list
+ * only, the cleanup code path must:
+ * i. delete it from the blocked_locks list inside
+ * this critical section,
+ * ii. unwind its frame with EAGAIN,
+ * iii. try and grant blocked inode locks from other
+ * clients that were otherwise grantable, but just
+ * got blocked to avoid leaving L1 to starve
+ * forever.
+ * iv. unref the object.
+ */
+ list_del_init(&l->client_list);
+
+ if (!list_empty(&l->list)) {
+ __delete_inode_lock(l);
+ list_add_tail(&l->client_list, &released);
+ } else {
+ list_del_init(&l->blocked_locks);
+ list_add_tail(&l->client_list, &unwind);
}
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+ }
+ pthread_mutex_unlock(&ctx->lock);
- list_for_each_entry_safe (l, tmp, &dom->inodelk_list, list) {
- if (l->transport != trans)
- continue;
+ if (!list_empty(&unwind)) {
+ list_for_each_entry_safe(l, tmp, &unwind, client_list)
+ {
+ list_del_init(&l->client_list);
- __delete_inode_lock (l);
- __destroy_inode_lock (l);
+ if (l->frame)
+ STACK_UNWIND_STRICT(inodelk, l->frame, -1, EAGAIN, NULL);
+ list_add_tail(&l->client_list, &released);
+ }
+ }
+ if (!list_empty(&released)) {
+ list_for_each_entry_safe(l, tmp, &released, client_list)
+ {
+ list_del_init(&l->client_list);
- if (inode_path (inode, NULL, &path) < 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "inode_path failed");
- goto unlock;
- }
+ pl_inode = l->pl_inode;
- gf_log (this->name, GF_LOG_TRACE,
- "releasing lock on %s held by "
- "{transport=%p, pid=%"PRId64" lk-owner=%"PRIu64"}",
- path, trans,
- (uint64_t) l->client_pid,
- l->owner);
+ dom = get_domain(pl_inode, l->volume);
+ grant_blocked_inode_locks(this, pl_inode, dom, &now, pcontend);
- }
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __pl_inodelk_unref(l);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ inode_unref(pl_inode->inode);
}
-unlock:
- if (path)
- GF_FREE (path);
+ }
- pthread_mutex_unlock (&pinode->mutex);
+ if (pcontend != NULL) {
+ inodelk_contention_notify(this, pcontend);
+ }
- list_for_each_entry_safe (l, tmp, &released, blocked_locks) {
- list_del_init (&l->blocked_locks);
-
- STACK_UNWIND_STRICT (inodelk, l->frame, -1, EAGAIN);
- GF_FREE (l);
- }
-
- grant_blocked_inode_locks (this, pinode, dom);
- return 0;
+ return 0;
}
-
static int
-pl_inode_setlk (xlator_t *this, pl_inode_t *pl_inode, pl_inode_lock_t *lock,
- int can_block, pl_dom_list_t *dom)
+pl_inode_setlk(xlator_t *this, pl_ctx_t *ctx, pl_inode_t *pl_inode,
+ pl_inode_lock_t *lock, int can_block, pl_dom_list_t *dom,
+ inode_t *inode)
{
- int ret = -EINVAL;
- pl_inode_lock_t *retlock = NULL;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- if (lock->fl_type != F_UNLCK) {
- ret = __lock_inodelk (this, pl_inode, lock, can_block, dom);
- if (ret == 0)
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => OK",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->fl_start,
- lock->fl_end);
-
- if (ret == -EAGAIN)
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => NOK",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
-
- goto out;
+ posix_locks_private_t *priv = NULL;
+ int ret = -EINVAL;
+ pl_inode_lock_t *retlock = NULL;
+ gf_boolean_t unref = _gf_true;
+ gf_boolean_t need_inode_unref = _gf_false;
+ struct list_head *pcontend = NULL;
+ struct list_head contend;
+ struct list_head wake;
+ struct timespec now = {};
+ short fl_type;
+
+ lock->pl_inode = pl_inode;
+ fl_type = lock->fl_type;
+
+ priv = this->private;
+
+ /* Ideally, AFTER a successful lock (both blocking and non-blocking) or
+ * an unsuccessful blocking lock operation, the inode needs to be ref'd.
+ *
+ * But doing so might give room to a race where the lock-requesting
+ * client could send a DISCONNECT just before this thread refs the inode
+ * after the locking is done, and the epoll thread could unref the inode
+ * in cleanup which means the inode's refcount would come down to 0, and
+ * the call to pl_forget() at this point destroys @pl_inode. Now when
+ * the io-thread executing this function tries to access pl_inode,
+ * it could crash on account of illegal memory access.
+ *
+ * To get around this problem, the inode is ref'd once even before
+ * adding the lock into client_list as a precautionary measure.
+ * This way even if there are DISCONNECTs, there will always be 1 extra
+ * ref on the inode, so @pl_inode is still alive until after the
+ * current stack unwinds.
+ */
+ pl_inode->inode = inode_ref(inode);
+
+ if (priv->revocation_secs != 0) {
+ if (lock->fl_type != F_UNLCK) {
+ __inodelk_prune_stale(this, pl_inode, dom, lock);
+ } else if (priv->monkey_unlocking == _gf_true) {
+ if (pl_does_monkey_want_stuck_lock()) {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __pl_inodelk_unref(lock);
}
-
-
- retlock = __inode_unlock_lock (this, lock, dom);
- if (!retlock) {
- gf_log (this->name, GF_LOG_DEBUG,
- "Bad Unlock issued on Inode lock");
- ret = -EINVAL;
- goto out;
+ pthread_mutex_unlock(&pl_inode->mutex);
+ inode_unref(pl_inode->inode);
+ gf_log(this->name, GF_LOG_WARNING,
+ "MONKEY LOCKING (forcing stuck lock)!");
+ return 0;
+ }
+ }
+ }
+
+ if (priv->notify_contention) {
+ pcontend = &contend;
+ INIT_LIST_HEAD(pcontend);
+ timespec_now(&now);
+ }
+
+ INIT_LIST_HEAD(&wake);
+
+ if (ctx)
+ pthread_mutex_lock(&ctx->lock);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (lock->fl_type != F_UNLCK) {
+ ret = __lock_inodelk(this, pl_inode, lock, can_block, dom, &now,
+ pcontend);
+ if (ret == 0) {
+ lock->frame = NULL;
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64
+ " => OK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid, lkowner_utoa(&lock->owner),
+ lock->fl_start, lock->fl_end);
+ } else if (ret == -EAGAIN) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64
+ " => NOK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
+ lock->client_pid, lkowner_utoa(&lock->owner),
+ lock->user_flock.l_start, lock->user_flock.l_len);
+ if (can_block) {
+ unref = _gf_false;
}
- __destroy_inode_lock (retlock);
-
- ret = 0;
+ }
+ /* For all but the case where a non-blocking lock attempt fails
+ * with -EAGAIN, the extra ref taken at the start of this function
+ * must be negated. */
+ need_inode_unref = (ret != 0) && ((ret != -EAGAIN) || !can_block);
+ if (ctx && !need_inode_unref) {
+ list_add_tail(&lock->client_list, &ctx->inodelk_lockers);
+ }
+ } else {
+ /* Irrespective of whether unlock succeeds or not,
+ * the extra inode ref that was done at the start of
+ * this function must be negated. Towards this,
+ * @need_inode_unref flag is set unconditionally here.
+ */
+ need_inode_unref = _gf_true;
+ retlock = __inode_unlock_lock(this, lock, dom);
+ if (!retlock) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Bad Unlock issued on Inode lock");
+ ret = -EINVAL;
+ goto out;
+ }
+ list_del_init(&retlock->client_list);
+ __pl_inodelk_unref(retlock);
+ pl_inode_remove_unlocked(this, pl_inode, &wake);
+ ret = 0;
}
-out:
- pthread_mutex_unlock (&pl_inode->mutex);
- grant_blocked_inode_locks (this, pl_inode, dom);
- return ret;
+ out:
+ if (unref)
+ __pl_inodelk_unref(lock);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ if (ctx)
+ pthread_mutex_unlock(&ctx->lock);
+
+ pl_inode_remove_wake(&wake);
+
+ /* The following (extra) unref corresponds to the ref that
+ * was done at the time the lock was granted.
+ */
+ if ((fl_type == F_UNLCK) && (ret == 0)) {
+ inode_unref(pl_inode->inode);
+ grant_blocked_inode_locks(this, pl_inode, dom, &now, pcontend);
+ }
+
+ if (need_inode_unref) {
+ inode_unref(pl_inode->inode);
+ }
+
+ if (pcontend != NULL) {
+ inodelk_contention_notify(this, pcontend);
+ }
+
+ return ret;
}
/* Create a new inode_lock_t */
-pl_inode_lock_t *
-new_inode_lock (struct gf_flock *flock, void *transport, pid_t client_pid,
- uint64_t owner, const char *volume)
+static pl_inode_lock_t *
+new_inode_lock(struct gf_flock *flock, client_t *client, pid_t client_pid,
+ call_frame_t *frame, xlator_t *this, const char *volume,
+ char *conn_id, int32_t *op_errno)
{
- pl_inode_lock_t *lock = NULL;
+ pl_inode_lock_t *lock = NULL;
+
+ if (!pl_is_lk_owner_valid(&frame->root->lk_owner, frame->root->client)) {
+ *op_errno = EINVAL;
+ goto out;
+ }
+
+ lock = GF_CALLOC(1, sizeof(*lock), gf_locks_mt_pl_inode_lock_t);
+ if (!lock) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ lock->fl_start = flock->l_start;
+ lock->fl_type = flock->l_type;
+
+ if (flock->l_len == 0)
+ lock->fl_end = LLONG_MAX;
+ else
+ lock->fl_end = flock->l_start + flock->l_len - 1;
+
+ lock->client = client;
+ lock->client_pid = client_pid;
+ lock->volume = volume;
+ lock->owner = frame->root->lk_owner;
+ lock->frame = frame;
+ lock->this = this;
+
+ if (conn_id) {
+ lock->connection_id = gf_strdup(conn_id);
+ }
+
+ INIT_LIST_HEAD(&lock->list);
+ INIT_LIST_HEAD(&lock->blocked_locks);
+ INIT_LIST_HEAD(&lock->client_list);
+ INIT_LIST_HEAD(&lock->contend);
+ __pl_inodelk_ref(lock);
- lock = GF_CALLOC (1, sizeof (*lock),
- gf_locks_mt_pl_inode_lock_t);
- if (!lock) {
- return NULL;
- }
+out:
+ return lock;
+}
- lock->fl_start = flock->l_start;
- lock->fl_type = flock->l_type;
+int32_t
+_pl_convert_volume(const char *volume, char **res)
+{
+ char *mdata_vol = NULL;
+ int ret = 0;
- if (flock->l_len == 0)
- lock->fl_end = LLONG_MAX;
- else
- lock->fl_end = flock->l_start + flock->l_len - 1;
+ mdata_vol = strrchr(volume, ':');
+ // if the volume already ends with :metadata don't bother
+ if (mdata_vol && (strcmp(mdata_vol, ":metadata") == 0))
+ return 0;
- lock->transport = transport;
- lock->client_pid = client_pid;
- lock->owner = owner;
- lock->volume = volume;
+ ret = gf_asprintf(res, "%s:metadata", volume);
+ if (ret <= 0)
+ return ENOMEM;
+ return 0;
+}
- INIT_LIST_HEAD (&lock->list);
- INIT_LIST_HEAD (&lock->blocked_locks);
+int32_t
+_pl_convert_volume_for_special_range(struct gf_flock *flock, const char *volume,
+ char **res)
+{
+ int32_t ret = 0;
+
+ if ((flock->l_start == LLONG_MAX - 1) && (flock->l_len == 0)) {
+ ret = _pl_convert_volume(volume, res);
+ }
- return lock;
+ return ret;
}
/* Common inodelk code called from pl_inodelk and pl_finodelk */
int
-pl_common_inodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, inode_t *inode, int32_t cmd,
- struct gf_flock *flock, loc_t *loc, fd_t *fd)
-{
- int32_t op_ret = -1;
- int32_t op_errno = 0;
- int ret = -1;
- int can_block = 0;
- void * transport = NULL;
- pid_t client_pid = -1;
- uint64_t owner = -1;
- pl_inode_t * pinode = NULL;
- pl_inode_lock_t * reqlock = NULL;
- pl_dom_list_t * dom = NULL;
-
- VALIDATE_OR_GOTO (frame, out);
- VALIDATE_OR_GOTO (inode, unwind);
- VALIDATE_OR_GOTO (flock, unwind);
-
- if ((flock->l_start < 0) || (flock->l_len < 0)) {
- op_errno = EINVAL;
- goto unwind;
- }
-
- pl_trace_in (this, frame, fd, loc, cmd, flock, volume);
-
- transport = frame->root->trans;
- client_pid = frame->root->pid;
- owner = frame->root->lk_owner;
-
- pinode = pl_inode_get (this, inode);
- if (!pinode) {
- op_errno = ENOMEM;
- goto unwind;
+pl_common_inodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+ inode_t *inode, int32_t cmd, struct gf_flock *flock,
+ loc_t *loc, fd_t *fd, dict_t *xdata)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ int ret = -1;
+ GF_UNUSED int dict_ret = -1;
+ int can_block = 0;
+ short lock_type = 0;
+ pl_inode_t *pinode = NULL;
+ pl_inode_lock_t *reqlock = NULL;
+ pl_dom_list_t *dom = NULL;
+ char *res = NULL;
+ char *res1 = NULL;
+ char *conn_id = NULL;
+ pl_ctx_t *ctx = NULL;
+
+ if (xdata)
+ dict_ret = dict_get_str(xdata, "connection-id", &conn_id);
+
+ VALIDATE_OR_GOTO(frame, out);
+ VALIDATE_OR_GOTO(inode, unwind);
+ VALIDATE_OR_GOTO(flock, unwind);
+
+ if ((flock->l_start < 0) || (flock->l_len < 0)) {
+ op_errno = EINVAL;
+ goto unwind;
+ }
+
+ op_errno = _pl_convert_volume_for_special_range(flock, volume, &res);
+ if (op_errno)
+ goto unwind;
+ if (res)
+ volume = res;
+
+ pl_trace_in(this, frame, fd, loc, cmd, flock, volume);
+
+ if (frame->root->client) {
+ ctx = pl_ctx_get(frame->root->client, this);
+ if (!ctx) {
+ op_errno = ENOMEM;
+ gf_log(this->name, GF_LOG_INFO, "pl_ctx_get() failed");
+ goto unwind;
}
+ }
- dom = get_domain (pinode, volume);
+ pinode = pl_inode_get(this, inode, NULL);
+ if (!pinode) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
- if (owner == 0) {
- /*
- special case: this means release all locks
- from this transport
- */
- gf_log (this->name, GF_LOG_TRACE,
- "Releasing all locks from transport %p", transport);
+ dom = get_domain(pinode, volume);
+ if (!dom) {
+ op_errno = ENOMEM;
+ goto unwind;
+ }
- release_inode_locks_of_transport (this, dom, inode, transport);
-
- op_ret = 0;
- goto unwind;
- }
+ reqlock = new_inode_lock(flock, frame->root->client, frame->root->pid,
+ frame, this, dom->domain, conn_id, &op_errno);
- reqlock = new_inode_lock (flock, transport, client_pid, owner, volume);
+ if (!reqlock) {
+ op_ret = -1;
+ goto unwind;
+ }
- if (!reqlock) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
-
- reqlock->frame = frame;
- reqlock->this = this;
-
- switch (cmd) {
+ switch (cmd) {
case F_SETLKW:
- can_block = 1;
- reqlock->frame = frame;
- reqlock->this = this;
+ can_block = 1;
- /* fall through */
+ /* fall through */
case F_SETLK:
- memcpy (&reqlock->user_flock, flock, sizeof (struct gf_flock));
- ret = pl_inode_setlk (this, pinode, reqlock,
- can_block, dom);
-
- if (ret < 0) {
- if ((can_block) && (F_UNLCK != reqlock->fl_type)) {
- pl_trace_block (this, frame, fd, loc,
- cmd, flock, volume);
- goto out;
- }
- gf_log (this->name, GF_LOG_TRACE, "returning EAGAIN");
- op_errno = -ret;
- __destroy_inode_lock (reqlock);
- goto unwind;
+ lock_type = flock->l_type;
+ memcpy(&reqlock->user_flock, flock, sizeof(struct gf_flock));
+ ret = pl_inode_setlk(this, ctx, pinode, reqlock, can_block, dom,
+ inode);
+
+ if (ret < 0) {
+ if (ret == -EAGAIN) {
+ if (can_block && (F_UNLCK != lock_type)) {
+ goto out;
+ }
+ gf_log(this->name, GF_LOG_TRACE, "returning EAGAIN");
+ } else {
+ gf_log(this->name, GF_LOG_TRACE, "returning %d", ret);
}
- break;
+ op_errno = -ret;
+ goto unwind;
+ }
+ break;
default:
- op_errno = ENOTSUP;
- gf_log (this->name, GF_LOG_DEBUG,
- "Lock command F_GETLK not supported for [f]inodelk "
- "(cmd=%d)",
- cmd);
- goto unwind;
- }
+ op_errno = ENOTSUP;
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Lock command F_GETLK not supported for [f]inodelk "
+ "(cmd=%d)",
+ cmd);
+ goto unwind;
+ }
- op_ret = 0;
+ op_ret = 0;
unwind:
- if ((inode != NULL) && (flock !=NULL)) {
- pl_update_refkeeper (this, inode);
- pl_trace_out (this, frame, fd, loc, cmd, flock, op_ret, op_errno, volume);
- }
+ if (flock != NULL)
+ pl_trace_out(this, frame, fd, loc, cmd, flock, op_ret, op_errno,
+ volume);
- STACK_UNWIND_STRICT (inodelk, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT(inodelk, frame, op_ret, op_errno, NULL);
out:
- return 0;
+ GF_FREE(res);
+ GF_FREE(res1);
+ return 0;
}
int
-pl_inodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, int32_t cmd, struct gf_flock *flock)
+pl_inodelk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
{
+ pl_common_inodelk(frame, this, volume, loc->inode, cmd, flock, loc, NULL,
+ xdata);
- pl_common_inodelk (frame, this, volume, loc->inode, cmd, flock, loc, NULL);
-
- return 0;
+ return 0;
}
int
-pl_finodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, fd_t *fd, int32_t cmd, struct gf_flock *flock)
+pl_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
{
+ pl_common_inodelk(frame, this, volume, fd->inode, cmd, flock, NULL, fd,
+ xdata);
- pl_common_inodelk (frame, this, volume, fd->inode, cmd, flock, NULL, fd);
-
- return 0;
-
+ return 0;
}
-
static int32_t
-__get_inodelk_count (xlator_t *this, pl_inode_t *pl_inode)
-{
- int32_t count = 0;
- pl_inode_lock_t *lock = NULL;
- pl_dom_list_t *dom = NULL;
-
- list_for_each_entry (dom, &pl_inode->dom_list, inode_list) {
- list_for_each_entry (lock, &dom->inodelk_list, list) {
-
- gf_log (this->name, GF_LOG_DEBUG,
- " XATTR DEBUG"
- " domain: %s %s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" "
- "state = Active",
- dom->domain,
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
-
- count++;
- }
-
- list_for_each_entry (lock, &dom->blocked_inodelks, blocked_locks) {
-
- gf_log (this->name, GF_LOG_DEBUG,
- " XATTR DEBUG"
- " domain: %s %s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" "
- "state = Blocked",
- dom->domain,
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
+__get_inodelk_dom_count(pl_dom_list_t *dom)
+{
+ pl_inode_lock_t *lock = NULL;
+ int32_t count = 0;
+
+ list_for_each_entry(lock, &dom->inodelk_list, list) { count++; }
+ list_for_each_entry(lock, &dom->blocked_inodelks, blocked_locks)
+ {
+ count++;
+ }
+ return count;
+}
- count++;
- }
+/* Returns the no. of locks (blocked/granted) held on a given domain name
+ * If @domname is NULL, returns the no. of locks in all the domains present.
+ * If @domname is non-NULL and non-existent, returns 0 */
+int32_t
+__get_inodelk_count(xlator_t *this, pl_inode_t *pl_inode, char *domname)
+{
+ int32_t count = 0;
+ pl_dom_list_t *dom = NULL;
+
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ if (domname) {
+ if (strcmp(domname, dom->domain) == 0) {
+ count = __get_inodelk_dom_count(dom);
+ goto out;
+ }
+ } else {
+ /* Counting locks from all domains */
+ count += __get_inodelk_dom_count(dom);
}
+ }
- return count;
+out:
+ return count;
}
int32_t
-get_inodelk_count (xlator_t *this, inode_t *inode)
+get_inodelk_count(xlator_t *this, inode_t *inode, char *domname)
{
- pl_inode_t *pl_inode = NULL;
- uint64_t tmp_pl_inode = 0;
- int ret = 0;
- int32_t count = 0;
+ pl_inode_t *pl_inode = NULL;
+ uint64_t tmp_pl_inode = 0;
+ int ret = 0;
+ int32_t count = 0;
- ret = inode_ctx_get (inode, this, &tmp_pl_inode);
- if (ret != 0) {
- goto out;
- }
+ ret = inode_ctx_get(inode, this, &tmp_pl_inode);
+ if (ret != 0) {
+ goto out;
+ }
- pl_inode = (pl_inode_t *)(long) tmp_pl_inode;
+ pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
- pthread_mutex_lock (&pl_inode->mutex);
- {
- count = __get_inodelk_count (this, pl_inode);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ count = __get_inodelk_count(this, pl_inode, domname);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
out:
- return count;
+ return count;
}
diff --git a/xlators/features/locks/src/locks-mem-types.h b/xlators/features/locks/src/locks-mem-types.h
index c4e9e976146..a76605027b3 100644
--- a/xlators/features/locks/src/locks-mem-types.h
+++ b/xlators/features/locks/src/locks-mem-types.h
@@ -1,40 +1,28 @@
/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef __LOCKS_MEM_TYPES_H__
#define __LOCKS_MEM_TYPES_H__
-#include "mem-types.h"
+#include <glusterfs/mem-types.h>
enum gf_locks_mem_types_ {
- gf_locks_mt_pl_dom_list_t = gf_common_mt_end + 1,
- gf_locks_mt_pl_inode_t,
- gf_locks_mt_posix_lock_t,
- gf_locks_mt_pl_entry_lock_t,
- gf_locks_mt_pl_inode_lock_t,
- gf_locks_mt_truncate_ops,
- gf_locks_mt_pl_rw_req_t,
- gf_locks_mt_posix_locks_private_t,
- gf_locks_mt_pl_local_t,
- gf_locks_mt_pl_fdctx_t,
- gf_locks_mt_end
+ gf_locks_mt_pl_dom_list_t = gf_common_mt_end + 1,
+ gf_locks_mt_pl_inode_t,
+ gf_locks_mt_posix_lock_t,
+ gf_locks_mt_pl_entry_lock_t,
+ gf_locks_mt_pl_inode_lock_t,
+ gf_locks_mt_pl_rw_req_t,
+ gf_locks_mt_posix_locks_private_t,
+ gf_locks_mt_pl_fdctx_t,
+ gf_locks_mt_pl_meta_lock_t,
+ gf_locks_mt_end
};
#endif
-
diff --git a/xlators/features/locks/src/locks.h b/xlators/features/locks/src/locks.h
index 5a37c68f6d9..c868eb494a2 100644
--- a/xlators/features/locks/src/locks.h
+++ b/xlators/features/locks/src/locks.h
@@ -1,162 +1,292 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2012, 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#ifndef __POSIX_LOCKS_H__
#define __POSIX_LOCKS_H__
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "compat-errno.h"
-#include "stack.h"
-#include "call-stub.h"
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/stack.h>
+#include <glusterfs/call-stub.h>
#include "locks-mem-types.h"
+#include <glusterfs/client_t.h>
+
+#include <glusterfs/lkowner.h>
+
+typedef enum {
+ MLK_NONE,
+ MLK_FILE_BASED,
+ MLK_FORCED,
+ MLK_OPTIMAL
+} mlk_mode_t; /* defines different mandatory locking modes*/
-#define POSIX_LOCKS "posix-locks"
struct __pl_fd;
struct __posix_lock {
- struct list_head list;
+ struct list_head list;
+
+ off_t fl_start;
+ off_t fl_end;
+ uint32_t lk_flags;
+
+ short fl_type;
+ short blocked; /* waiting to acquire */
+ struct gf_flock user_flock; /* the flock supplied by the user */
+ xlator_t *this; /* required for blocked locks */
+ unsigned long fd_num;
- short fl_type;
- off_t fl_start;
- off_t fl_end;
+ fd_t *fd;
+ call_frame_t *frame;
- short blocked; /* waiting to acquire */
- struct gf_flock user_flock; /* the flock supplied by the user */
- xlator_t *this; /* required for blocked locks */
- unsigned long fd_num;
+ time_t blkd_time; /* time at which lock was queued into blkd list */
+ time_t granted_time; /* time at which lock was queued into active list */
- fd_t *fd;
- call_frame_t *frame;
+ /* These two together serve to uniquely identify each process
+ across nodes */
- /* These two together serve to uniquely identify each process
- across nodes */
+ void *client; /* to identify client node */
- void *transport; /* to identify client node */
- pid_t client_pid; /* pid of client process */
- uint64_t owner; /* lock owner from fuse */
+ /* This field uniquely identifies the client the lock belongs to. As
+ * lock migration is handled by rebalance, the client_t object will be
+ * overwritten by rebalance and can't be deemed as the owner of the
+ * lock on destination. Hence, the below field is migrated from
+ * source to destination by lock_migration_info_t and updated on the
+ * destination. So that on client-server disconnection, server can
+ * cleanup the locks proper;y. */
+
+ char *client_uid;
+ gf_lkowner_t owner;
+ pid_t client_pid; /* pid of client process */
+
+ int blocking;
};
typedef struct __posix_lock posix_lock_t;
struct __pl_inode_lock {
- struct list_head list;
- struct list_head blocked_locks; /* list_head pointing to blocked_inodelks */
+ struct list_head list;
+ struct list_head blocked_locks; /* list_head pointing to blocked_inodelks */
+ struct list_head contend; /* list of contending locks */
+ int ref;
+
+ off_t fl_start;
+ off_t fl_end;
+
+ const char *volume;
+
+ struct gf_flock user_flock; /* the flock supplied by the user */
+ xlator_t *this; /* required for blocked locks */
+ struct __pl_inode *pl_inode;
+
+ call_frame_t *frame;
+
+ time_t blkd_time; /* time at which lock was queued into blkd list */
+ time_t granted_time; /* time at which lock was queued into active list */
- short fl_type;
- off_t fl_start;
- off_t fl_end;
+ /*last time at which lock contention was detected and notified*/
+ struct timespec contention_time;
- const char *volume;
+ /* These two together serve to uniquely identify each process
+ across nodes */
- struct gf_flock user_flock; /* the flock supplied by the user */
- xlator_t *this; /* required for blocked locks */
- fd_t *fd;
+ void *client; /* to identify client node */
+ gf_lkowner_t owner;
+ pid_t client_pid; /* pid of client process */
- call_frame_t *frame;
+ char *connection_id; /* stores the client connection id */
- /* These two together serve to uniquely identify each process
- across nodes */
+ struct list_head client_list; /* list of all locks from a client */
+ short fl_type;
- void *transport; /* to identify client node */
- pid_t client_pid; /* pid of client process */
- uint64_t owner;
+ int32_t status; /* Error code when we try to grant a lock in blocked
+ state */
};
typedef struct __pl_inode_lock pl_inode_lock_t;
-struct __pl_rw_req_t {
- struct list_head list;
- call_stub_t *stub;
- posix_lock_t region;
+struct _pl_rw_req {
+ struct list_head list;
+ call_stub_t *stub;
+ posix_lock_t region;
};
-typedef struct __pl_rw_req_t pl_rw_req_t;
-
-struct __pl_dom_list_t {
- struct list_head inode_list; /* list_head back to pl_inode_t */
- const char *domain;
- struct list_head entrylk_list; /* List of entry locks */
- struct list_head blocked_entrylks; /* List of all blocked entrylks */
- struct list_head inodelk_list; /* List of inode locks */
- struct list_head blocked_inodelks; /* List of all blocked inodelks */
+typedef struct _pl_rw_req pl_rw_req_t;
+
+struct _pl_dom_list {
+ struct list_head inode_list; /* list_head back to pl_inode_t */
+ const char *domain;
+ struct list_head entrylk_list; /* List of entry locks */
+ struct list_head blocked_entrylks; /* List of all blocked entrylks */
+ struct list_head inodelk_list; /* List of inode locks */
+ struct list_head blocked_inodelks; /* List of all blocked inodelks */
};
-typedef struct __pl_dom_list_t pl_dom_list_t;
+typedef struct _pl_dom_list pl_dom_list_t;
struct __entry_lock {
- struct list_head domain_list; /* list_head back to pl_dom_list_t */
- struct list_head blocked_locks; /* list_head back to blocked_entrylks */
+ struct list_head domain_list; /* list_head back to pl_dom_list_t */
+ struct list_head blocked_locks; /* list_head back to blocked_entrylks */
+ struct list_head contend; /* list of contending locks */
+ int ref;
- call_frame_t *frame;
- xlator_t *this;
+ call_frame_t *frame;
+ xlator_t *this;
+ struct __pl_inode *pinode;
- const char *volume;
+ const char *volume;
- const char *basename;
- entrylk_type type;
+ const char *basename;
- void *trans;
- pid_t client_pid; /* pid of client process */
- uint64_t owner;
+ time_t blkd_time; /* time at which lock was queued into blkd list */
+ time_t granted_time; /* time at which lock was queued into active list */
+
+ /*last time at which lock contention was detected and notified*/
+ struct timespec contention_time;
+
+ void *client;
+ gf_lkowner_t owner;
+ pid_t client_pid; /* pid of client process */
+
+ char *connection_id; /* stores the client connection id */
+
+ struct list_head client_list; /* list of all locks from a client */
+ entrylk_type type;
};
typedef struct __entry_lock pl_entry_lock_t;
-
/* The "simulated" inode. This contains a list of all the locks associated
with this file */
struct __pl_inode {
- pthread_mutex_t mutex;
-
- struct list_head dom_list; /* list of domains */
- struct list_head ext_list; /* list of fcntl locks */
- struct list_head rw_list; /* list of waiting r/w requests */
- struct list_head reservelk_list; /* list of reservelks */
- struct list_head blocked_reservelks; /* list of blocked reservelks */
- struct list_head blocked_calls; /* List of blocked lock calls while a reserve is held*/
- int mandatory; /* if mandatory locking is enabled */
-
- inode_t *refkeeper; /* hold refs on an inode while locks are
- held to prevent pruning */
+ pthread_mutex_t mutex;
+
+ struct list_head dom_list; /* list of domains */
+ struct list_head ext_list; /* list of fcntl locks */
+ struct list_head rw_list; /* list of waiting r/w requests */
+ struct list_head reservelk_list; /* list of reservelks */
+ struct list_head blocked_reservelks; /* list of blocked reservelks */
+ struct list_head blocked_calls; /* List of blocked lock calls while a
+ reserve is held*/
+ struct list_head metalk_list; /* Meta lock list */
+ struct list_head queued_locks; /* This is to store the incoming lock
+ requests while meta lock is enabled */
+ struct list_head waiting; /* List of pending fops waiting to unlink/rmdir
+ the inode. */
+ int mandatory; /* if mandatory locking is enabled */
+
+ inode_t *refkeeper; /* hold refs on an inode while locks are
+ held to prevent pruning */
+ uuid_t gfid; /* placeholder for gfid of the inode */
+ inode_t *inode; /* pointer to be used for ref and unref
+ of inode_t as long as there are
+ locks on it */
+ gf_boolean_t migrated;
+
+ /* Flag to indicate whether to read mlock-enforce xattr from disk */
+ gf_boolean_t check_mlock_info;
+
+ /* Mandatory_lock enforce: IO will be allowed if and only if the lkowner has
+ held the lock.
+
+ Note: An xattr is set on the file to recover this information post
+ reboot. If client does not want mandatory lock to be enforced, then it
+ should remove this xattr explicitly
+ */
+ gf_boolean_t mlock_enforced;
+ /* There are scenarios where mandatory lock is granted but there are IOs
+ pending at posix level. To avoid this before preempting the previous lock
+ owner, we wait for all the fops to be unwound.
+ */
+ int fop_wind_count;
+ pthread_cond_t check_fop_wind_count;
+
+ gf_boolean_t track_fop_wind_count;
+
+ int32_t links; /* Number of hard links the inode has. */
+ uint32_t remove_running; /* Number of remove operations running. */
+ gf_boolean_t is_locked; /* Regular locks will be blocked. */
+ gf_boolean_t removed; /* The inode has been deleted. */
};
typedef struct __pl_inode pl_inode_t;
+struct __pl_metalk {
+ pthread_mutex_t mutex;
+ /* For pl_inode meta lock list */
+ struct list_head list;
+ /* For pl_ctx_t list */
+ struct list_head client_list;
+ char *client_uid;
-struct __pl_fd {
- gf_boolean_t nonblocking; /* whether O_NONBLOCK has been set */
+ pl_inode_t *pl_inode;
+ int ref;
};
-typedef struct __pl_fd pl_fd_t;
-
+typedef struct __pl_metalk pl_meta_lock_t;
typedef struct {
- gf_boolean_t mandatory; /* if mandatory locking is enabled */
- gf_boolean_t trace; /* trace lock requests in and out */
+ char *brickname;
+ uint32_t revocation_secs;
+ uint32_t revocation_max_blocked;
+ uint32_t notify_contention_delay;
+ mlk_mode_t mandatory_mode; /* holds current mandatory locking mode */
+ gf_boolean_t trace; /* trace lock requests in and out */
+ gf_boolean_t monkey_unlocking;
+ gf_boolean_t revocation_clear_all;
+ gf_boolean_t notify_contention;
+ gf_boolean_t mlock_enforced;
} posix_locks_private_t;
typedef struct {
- gf_boolean_t entrylk_count_req;
- gf_boolean_t inodelk_count_req;
- gf_boolean_t posixlk_count_req;
+ data_t *inodelk_dom_count_req;
+
+ dict_t *xdata;
+ loc_t loc[2];
+ fd_t *fd;
+ inode_t *inode;
+ off_t offset;
+ glusterfs_fop_t op;
+ gf_boolean_t entrylk_count_req;
+ gf_boolean_t inodelk_count_req;
+ gf_boolean_t posixlk_count_req;
+ gf_boolean_t parent_entrylk_req;
+ gf_boolean_t multiple_dom_lk_requests;
+ int update_mlock_enforced_flag;
} pl_local_t;
typedef struct {
- struct list_head locks_list;
+ struct list_head locks_list;
} pl_fdctx_t;
+struct _locker {
+ struct list_head lockers;
+ char *volume;
+ inode_t *inode;
+ gf_lkowner_t owner;
+};
+
+typedef struct _locks_ctx {
+ pthread_mutex_t lock;
+ struct list_head inodelk_lockers;
+ struct list_head entrylk_lockers;
+ struct list_head metalk_list;
+} pl_ctx_t;
+
+typedef struct _multi_dom_lk_data {
+ xlator_t *this;
+ inode_t *inode;
+ dict_t *xdata_rsp;
+ gf_boolean_t keep_max;
+} multi_dom_lk_data;
+
+typedef enum { DECREMENT, INCREMENT } pl_count_op_t;
+
+pl_ctx_t *
+pl_ctx_get(client_t *client, xlator_t *xlator);
+
+int
+pl_inodelk_client_cleanup(xlator_t *this, pl_ctx_t *ctx);
+
+int
+pl_entrylk_client_cleanup(xlator_t *this, pl_ctx_t *ctx);
+
#endif /* __POSIX_LOCKS_H__ */
diff --git a/xlators/features/locks/src/pl-messages.h b/xlators/features/locks/src/pl-messages.h
new file mode 100644
index 00000000000..e2d3d7ca974
--- /dev/null
+++ b/xlators/features/locks/src/pl-messages.h
@@ -0,0 +1,29 @@
+/*
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _PL_MESSAGES_H_
+#define _PL_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(PL, PL_MSG_LOCK_NUMBER, PL_MSG_INODELK_CONTENTION_FAILED,
+ PL_MSG_ENTRYLK_CONTENTION_FAILED);
+
+#endif /* !_PL_MESSAGES_H_ */
diff --git a/xlators/features/locks/src/posix.c b/xlators/features/locks/src/posix.c
index f8f26489926..cf0ae4c57dd 100644
--- a/xlators/features/locks/src/posix.c
+++ b/xlators/features/locks/src/posix.c
@@ -1,1926 +1,5095 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+ Copyright (c) 2006-2012, 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#include <unistd.h>
#include <fcntl.h>
#include <limits.h>
#include <pthread.h>
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "glusterfs.h"
-#include "compat.h"
-#include "xlator.h"
-#include "inode.h"
-#include "logging.h"
-#include "common-utils.h"
+#include <glusterfs/compat.h>
+#include <glusterfs/logging.h>
#include "locks.h"
#include "common.h"
-#include "statedump.h"
+#include <glusterfs/statedump.h>
+#include "clear.h"
+#include <glusterfs/defaults.h>
+#include <glusterfs/syncop.h>
#ifndef LLONG_MAX
#define LLONG_MAX LONG_LONG_MAX /* compat with old gcc */
-#endif /* LLONG_MAX */
+#endif /* LLONG_MAX */
/* Forward declarations */
+void
+do_blocked_rw(pl_inode_t *);
+static int
+__rw_allowable(pl_inode_t *, posix_lock_t *, glusterfs_fop_t);
+static int
+format_brickname(char *);
+int
+pl_lockinfo_get_brickname(xlator_t *, inode_t *, int32_t *);
+static int
+fetch_pathinfo(xlator_t *, inode_t *, int32_t *, char **);
-void do_blocked_rw (pl_inode_t *);
-static int __rw_allowable (pl_inode_t *, posix_lock_t *, glusterfs_fop_t);
+/*
+ * The client is always requesting data, but older
+ * servers were not returning it. Newer ones are, so
+ * the client is receiving a mix of NULL and non-NULL
+ * xdata in the answers when bricks are of different
+ * versions. This triggers a bug in older clients.
+ * To prevent that, we avoid returning extra xdata to
+ * older clients (making the newer brick to behave as
+ * an old brick).
+ */
+#define PL_STACK_UNWIND_FOR_CLIENT(fop, xdata, frame, op_ret, params...) \
+ do { \
+ pl_local_t *__local = NULL; \
+ if (frame->root->client && \
+ (frame->root->client->opversion < GD_OP_VERSION_3_10_0)) { \
+ __local = frame->local; \
+ PL_STACK_UNWIND_AND_FREE(__local, fop, frame, op_ret, params); \
+ } else { \
+ PL_STACK_UNWIND(fop, xdata, frame, op_ret, params); \
+ } \
+ } while (0)
+
+#define PL_STACK_UNWIND(fop, xdata, frame, op_ret, params...) \
+ do { \
+ pl_local_t *__local = NULL; \
+ inode_t *__parent = NULL; \
+ inode_t *__inode = NULL; \
+ char *__name = NULL; \
+ dict_t *__unref = NULL; \
+ int __i = 0; \
+ __local = frame->local; \
+ if (op_ret >= 0 && pl_needs_xdata_response(frame->local)) { \
+ if (xdata) \
+ dict_ref(xdata); \
+ else \
+ xdata = dict_new(); \
+ if (xdata) { \
+ __unref = xdata; \
+ while (__local->fd || __local->loc[__i].inode) { \
+ pl_get_xdata_rsp_args(__local, #fop, &__parent, &__inode, \
+ &__name, __i); \
+ pl_set_xdata_response(frame->this, __local, __parent, \
+ __inode, __name, xdata, __i > 0); \
+ if (__local->fd || __i == 1) \
+ break; \
+ __i++; \
+ } \
+ } \
+ } \
+ PL_STACK_UNWIND_AND_FREE(__local, fop, frame, op_ret, params); \
+ if (__unref) \
+ dict_unref(__unref); \
+ } while (0)
+
+#define PL_LOCAL_GET_REQUESTS(frame, this, xdata, __fd, __loc, __newloc) \
+ do { \
+ if (pl_has_xdata_requests(xdata)) { \
+ if (!frame->local) \
+ frame->local = mem_get0(this->local_pool); \
+ pl_local_t *__local = frame->local; \
+ if (__local) { \
+ if (__fd) { \
+ __local->fd = fd_ref(__fd); \
+ __local->inode = inode_ref(__fd->inode); \
+ } else { \
+ if (__loc) \
+ loc_copy(&__local->loc[0], __loc); \
+ if (__newloc) \
+ loc_copy(&__local->loc[1], __newloc); \
+ __local->inode = inode_ref(__local->loc[0].inode); \
+ } \
+ pl_get_xdata_requests(__local, xdata); \
+ } \
+ } \
+ } while (0)
+
+#define PL_CHECK_LOCK_ENFORCE_KEY(frame, dict, name, this, loc, fd, priv) \
+ do { \
+ if ((dict && (dict_get(dict, GF_ENFORCE_MANDATORY_LOCK))) || \
+ (name && (strcmp(name, GF_ENFORCE_MANDATORY_LOCK) == 0))) { \
+ inode_t *__inode = (loc ? loc->inode : fd->inode); \
+ pl_inode_t *__pl_inode = pl_inode_get(this, __inode, NULL); \
+ if (__pl_inode == NULL) { \
+ op_ret = -1; \
+ op_errno = ENOMEM; \
+ goto unwind; \
+ } \
+ if (!pl_is_mandatory_locking_enabled(__pl_inode) || \
+ !priv->mlock_enforced) { \
+ op_ret = -1; \
+ gf_msg(this->name, GF_LOG_DEBUG, EINVAL, 0, \
+ "option %s would need mandatory lock to be enabled " \
+ "and feature.enforce-mandatory-lock option to be set " \
+ "to on", \
+ GF_ENFORCE_MANDATORY_LOCK); \
+ op_errno = EINVAL; \
+ goto unwind; \
+ } \
+ \
+ op_ret = pl_local_init(frame, this, loc, fd); \
+ if (op_ret) { \
+ op_errno = ENOMEM; \
+ goto unwind; \
+ } \
+ \
+ ((pl_local_t *)(frame->local))->update_mlock_enforced_flag = 1; \
+ } \
+ } while (0)
+
+#define PL_INODE_REMOVE(_fop, _frame, _xl, _loc1, _loc2, _cont, _cbk, \
+ _args...) \
+ ({ \
+ struct list_head contend; \
+ pl_inode_t *__pl_inode; \
+ call_stub_t *__stub; \
+ int32_t __error; \
+ INIT_LIST_HEAD(&contend); \
+ __error = pl_inode_remove_prepare(_xl, _frame, _loc2 ? _loc2 : _loc1, \
+ &__pl_inode, &contend); \
+ if (__error < 0) { \
+ __stub = fop_##_fop##_stub(_frame, _cont, ##_args); \
+ __error = pl_inode_remove_complete(_xl, __pl_inode, __stub, \
+ &contend); \
+ } else if (__error == 0) { \
+ PL_LOCAL_GET_REQUESTS(_frame, _xl, xdata, ((fd_t *)NULL), _loc1, \
+ _loc2); \
+ STACK_WIND_COOKIE(_frame, _cbk, __pl_inode, FIRST_CHILD(_xl), \
+ FIRST_CHILD(_xl)->fops->_fop, ##_args); \
+ } \
+ __error; \
+ })
+
+gf_boolean_t
+pl_has_xdata_requests(dict_t *xdata)
+{
+ static char *reqs[] = {GLUSTERFS_ENTRYLK_COUNT,
+ GLUSTERFS_INODELK_COUNT,
+ GLUSTERFS_INODELK_DOM_COUNT,
+ GLUSTERFS_POSIXLK_COUNT,
+ GLUSTERFS_PARENT_ENTRYLK,
+ GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS,
+ NULL};
+ static int reqs_size[] = {SLEN(GLUSTERFS_ENTRYLK_COUNT),
+ SLEN(GLUSTERFS_INODELK_COUNT),
+ SLEN(GLUSTERFS_INODELK_DOM_COUNT),
+ SLEN(GLUSTERFS_POSIXLK_COUNT),
+ SLEN(GLUSTERFS_PARENT_ENTRYLK),
+ SLEN(GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS),
+ 0};
+ int i = 0;
+
+ if (!xdata)
+ return _gf_false;
+
+ for (i = 0; reqs[i]; i++)
+ if (dict_getn(xdata, reqs[i], reqs_size[i]))
+ return _gf_true;
+
+ return _gf_false;
+}
-struct _truncate_ops {
- loc_t loc;
- fd_t *fd;
- off_t offset;
- enum {TRUNCATE, FTRUNCATE} op;
-};
+static int
+dict_delete_domain_key(dict_t *dict, char *key, data_t *value, void *data)
+{
+ dict_del(dict, key);
+ return 0;
+}
-static pl_fdctx_t *
-pl_new_fdctx ()
+void
+pl_get_xdata_requests(pl_local_t *local, dict_t *xdata)
{
- pl_fdctx_t *fdctx = NULL;
+ if (!local || !xdata)
+ return;
+
+ GF_ASSERT(local->xdata == NULL);
+ local->xdata = dict_copy_with_ref(xdata, NULL);
+
+ if (dict_get_sizen(xdata, GLUSTERFS_ENTRYLK_COUNT)) {
+ local->entrylk_count_req = 1;
+ dict_del_sizen(xdata, GLUSTERFS_ENTRYLK_COUNT);
+ }
+ if (dict_get_sizen(xdata, GLUSTERFS_INODELK_COUNT)) {
+ local->inodelk_count_req = 1;
+ dict_del_sizen(xdata, GLUSTERFS_INODELK_COUNT);
+ }
+ if (dict_get_sizen(xdata, GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS)) {
+ local->multiple_dom_lk_requests = 1;
+ dict_del_sizen(xdata, GLUSTERFS_MULTIPLE_DOM_LK_CNT_REQUESTS);
+ dict_foreach_fnmatch(xdata, GLUSTERFS_INODELK_DOM_PREFIX "*",
+ dict_delete_domain_key, NULL);
+ }
+
+ local->inodelk_dom_count_req = dict_get_sizen(xdata,
+ GLUSTERFS_INODELK_DOM_COUNT);
+ if (local->inodelk_dom_count_req) {
+ data_ref(local->inodelk_dom_count_req);
+ dict_del_sizen(xdata, GLUSTERFS_INODELK_DOM_COUNT);
+ }
+
+ if (dict_get_sizen(xdata, GLUSTERFS_POSIXLK_COUNT)) {
+ local->posixlk_count_req = 1;
+ dict_del_sizen(xdata, GLUSTERFS_POSIXLK_COUNT);
+ }
+
+ if (dict_get_sizen(xdata, GLUSTERFS_PARENT_ENTRYLK)) {
+ local->parent_entrylk_req = 1;
+ dict_del_sizen(xdata, GLUSTERFS_PARENT_ENTRYLK);
+ }
+}
- fdctx = GF_CALLOC (1, sizeof (*fdctx),
- gf_locks_mt_pl_fdctx_t);
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, fdctx, out);
+gf_boolean_t
+pl_needs_xdata_response(pl_local_t *local)
+{
+ if (!local)
+ return _gf_false;
- INIT_LIST_HEAD (&fdctx->locks_list);
+ if (local->parent_entrylk_req || local->entrylk_count_req ||
+ local->inodelk_dom_count_req || local->inodelk_count_req ||
+ local->posixlk_count_req || local->multiple_dom_lk_requests)
+ return _gf_true;
-out:
- return fdctx;
+ return _gf_false;
}
-static pl_fdctx_t *
-pl_check_n_create_fdctx (xlator_t *this, fd_t *fd)
+void
+pl_get_xdata_rsp_args(pl_local_t *local, char *fop, inode_t **parent,
+ inode_t **inode, char **name, int i)
+{
+ if (strcmp(fop, "lookup") == 0) {
+ *parent = local->loc[0].parent;
+ *inode = local->loc[0].inode;
+ *name = (char *)local->loc[0].name;
+ } else {
+ if (local->fd) {
+ *inode = local->fd->inode;
+ } else {
+ *inode = local->loc[i].parent;
+ }
+ }
+}
+
+static inline int
+pl_track_io_fop_count(pl_local_t *local, xlator_t *this, pl_count_op_t op)
{
- int ret = 0;
- uint64_t tmp = 0;
- pl_fdctx_t *fdctx = NULL;
+ pl_inode_t *pl_inode = NULL;
- GF_VALIDATE_OR_GOTO (POSIX_LOCKS, this, out);
- GF_VALIDATE_OR_GOTO (this->name, fd, out);
+ if (!local)
+ return -1;
- LOCK (&fd->lock);
- {
- ret = __fd_ctx_get (fd, this, &tmp);
- if ((ret != 0) || (tmp == 0)) {
- fdctx = pl_new_fdctx ();
- if (fdctx == NULL) {
- goto unlock;
- }
- }
+ pl_inode = pl_inode_get(this, local->inode, NULL);
+ if (!pl_inode)
+ return -1;
- ret = __fd_ctx_set (fd, this, (uint64_t)(long)fdctx);
- if (ret != 0) {
- GF_FREE (fdctx);
- fdctx = NULL;
- gf_log (this->name, GF_LOG_DEBUG,
- "failed to set fd ctx");
+ if (pl_inode->mlock_enforced && pl_inode->track_fop_wind_count) {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (op == DECREMENT) {
+ pl_inode->fop_wind_count--;
+ /* fop_wind_count can go negative when lock enforcement is
+ * enabled on unwind path of an IO. Hence the "<" comparision.
+ */
+ if (pl_inode->fop_wind_count <= 0) {
+ pthread_cond_broadcast(&pl_inode->check_fop_wind_count);
+ pl_inode->track_fop_wind_count = _gf_false;
+ pl_inode->fop_wind_count = 0;
}
+ } else {
+ pl_inode->fop_wind_count++;
+ }
}
-unlock:
- UNLOCK (&fd->lock);
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
-out:
- return fdctx;
+ return 0;
}
-int
-pl_truncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+static int32_t
+__get_posixlk_count(pl_inode_t *pl_inode)
{
- struct _truncate_ops *local = NULL;
+ posix_lock_t *lock = NULL;
+ int32_t count = 0;
- local = frame->local;
+ list_for_each_entry(lock, &pl_inode->ext_list, list) { count++; }
- if (local->op == TRUNCATE)
- loc_wipe (&local->loc);
+ return count;
+}
- STACK_UNWIND_STRICT (truncate, frame, op_ret, op_errno,
- prebuf, postbuf);
- return 0;
+int32_t
+get_posixlk_count(xlator_t *this, inode_t *inode)
+{
+ pl_inode_t *pl_inode = NULL;
+ uint64_t tmp_pl_inode = 0;
+ int32_t count = 0;
+
+ int ret = inode_ctx_get(inode, this, &tmp_pl_inode);
+ if (ret != 0) {
+ goto out;
+ }
+
+ pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ count = __get_posixlk_count(pl_inode);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+out:
+ return count;
}
+void
+pl_parent_entrylk_xattr_fill(xlator_t *this, inode_t *parent, char *basename,
+ dict_t *dict, gf_boolean_t keep_max)
+{
+ int32_t entrylk = 0;
+ int32_t maxcount = -1;
+ int ret = -1;
+
+ if (!parent || !basename)
+ goto out;
+ if (keep_max) {
+ ret = dict_get_int32_sizen(dict, GLUSTERFS_PARENT_ENTRYLK, &maxcount);
+ if (ret < 0)
+ gf_msg_debug(this->name, 0, " Failed to fetch the value for key %s",
+ GLUSTERFS_PARENT_ENTRYLK);
+ }
+ entrylk = check_entrylk_on_basename(this, parent, basename);
+ if (maxcount >= entrylk)
+ return;
+out:
+ ret = dict_set_int32_sizen(dict, GLUSTERFS_PARENT_ENTRYLK, entrylk);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0, " dict_set failed on key %s",
+ GLUSTERFS_PARENT_ENTRYLK);
+ }
+}
-static int
-truncate_allowed (pl_inode_t *pl_inode,
- void *transport, pid_t client_pid,
- uint64_t owner, off_t offset)
+void
+pl_entrylk_xattr_fill(xlator_t *this, inode_t *inode, dict_t *dict,
+ gf_boolean_t keep_max)
{
- posix_lock_t *l = NULL;
- posix_lock_t region = {.list = {0, }, };
- int ret = 1;
+ int32_t count = 0;
+ int32_t maxcount = -1;
+ int ret = -1;
+
+ if (keep_max) {
+ ret = dict_get_int32_sizen(dict, GLUSTERFS_ENTRYLK_COUNT, &maxcount);
+ if (ret < 0)
+ gf_msg_debug(this->name, 0, " Failed to fetch the value for key %s",
+ GLUSTERFS_ENTRYLK_COUNT);
+ }
+ count = get_entrylk_count(this, inode);
+ if (maxcount >= count)
+ return;
- region.fl_start = offset;
- region.fl_end = LLONG_MAX;
- region.transport = transport;
- region.client_pid = client_pid;
- region.owner = owner;
+ ret = dict_set_int32_sizen(dict, GLUSTERFS_ENTRYLK_COUNT, count);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0, " dict_set failed on key %s",
+ GLUSTERFS_ENTRYLK_COUNT);
+ }
+}
- pthread_mutex_lock (&pl_inode->mutex);
- {
- list_for_each_entry (l, &pl_inode->ext_list, list) {
- if (!l->blocked
- && locks_overlap (&region, l)
- && !same_owner (&region, l)) {
- ret = 0;
- gf_log (POSIX_LOCKS, GF_LOG_TRACE, "Truncate "
- "allowed");
- break;
- }
- }
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+void
+pl_inodelk_xattr_fill(xlator_t *this, inode_t *inode, dict_t *dict,
+ char *domname, gf_boolean_t keep_max)
+{
+ int32_t count = 0;
+ int32_t maxcount = -1;
+ int ret = -1;
+
+ if (keep_max) {
+ ret = dict_get_int32_sizen(dict, GLUSTERFS_INODELK_COUNT, &maxcount);
+ if (ret < 0)
+ gf_msg_debug(this->name, 0, " Failed to fetch the value for key %s",
+ GLUSTERFS_INODELK_COUNT);
+ }
+ count = get_inodelk_count(this, inode, domname);
+ if (maxcount >= count)
+ return;
- return ret;
+ ret = dict_set_int32_sizen(dict, GLUSTERFS_INODELK_COUNT, count);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set count for "
+ "key %s",
+ GLUSTERFS_INODELK_COUNT);
+ }
+
+ return;
}
+void
+pl_posixlk_xattr_fill(xlator_t *this, inode_t *inode, dict_t *dict,
+ gf_boolean_t keep_max)
+{
+ int32_t count = 0;
+ int32_t maxcount = -1;
+ int ret = -1;
+
+ if (keep_max) {
+ ret = dict_get_int32_sizen(dict, GLUSTERFS_POSIXLK_COUNT, &maxcount);
+ if (ret < 0)
+ gf_msg_debug(this->name, 0, " Failed to fetch the value for key %s",
+ GLUSTERFS_POSIXLK_COUNT);
+ }
+ count = get_posixlk_count(this, inode);
+ if (maxcount >= count)
+ return;
+
+ ret = dict_set_int32_sizen(dict, GLUSTERFS_POSIXLK_COUNT, count);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0, " dict_set failed on key %s",
+ GLUSTERFS_POSIXLK_COUNT);
+ }
+}
+
+void
+pl_inodelk_xattr_fill_each(xlator_t *this, inode_t *inode, dict_t *dict,
+ char *domname, gf_boolean_t keep_max, char *key)
+{
+ int32_t count = 0;
+ int32_t maxcount = -1;
+ int ret = -1;
+
+ if (keep_max) {
+ ret = dict_get_int32(dict, key, &maxcount);
+ if (ret < 0)
+ gf_msg_debug(this->name, 0, " Failed to fetch the value for key %s",
+ GLUSTERFS_INODELK_COUNT);
+ }
+ count = get_inodelk_count(this, inode, domname);
+ if (maxcount >= count)
+ return;
+
+ ret = dict_set_int32(dict, key, count);
+ if (ret < 0) {
+ gf_msg_debug(this->name, 0,
+ "Failed to set count for "
+ "key %s",
+ key);
+ }
+
+ return;
+}
static int
-truncate_stat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf)
+pl_inodelk_xattr_fill_multiple(dict_t *this, char *key, data_t *value,
+ void *data)
{
- posix_locks_private_t *priv = NULL;
- struct _truncate_ops *local = NULL;
- inode_t *inode = NULL;
- pl_inode_t *pl_inode = NULL;
+ multi_dom_lk_data *d = data;
+ char *tmp_key = NULL;
+ char *save_ptr = NULL;
+
+ tmp_key = gf_strdup(key);
+ if (!tmp_key)
+ return -1;
+
+ strtok_r(tmp_key, ":", &save_ptr);
+ if (!*save_ptr) {
+ if (tmp_key)
+ GF_FREE(tmp_key);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, EINVAL,
+ "Could not tokenize domain string from key %s", key);
+ return -1;
+ }
+
+ pl_inodelk_xattr_fill_each(d->this, d->inode, d->xdata_rsp, save_ptr,
+ d->keep_max, key);
+ if (tmp_key)
+ GF_FREE(tmp_key);
+
+ return 0;
+}
+void
+pl_fill_multiple_dom_lk_requests(xlator_t *this, pl_local_t *local,
+ inode_t *inode, dict_t *dict,
+ gf_boolean_t keep_max)
+{
+ multi_dom_lk_data data;
- priv = this->private;
- local = frame->local;
+ data.this = this;
+ data.inode = inode;
+ data.xdata_rsp = dict;
+ data.keep_max = keep_max;
- if (op_ret != 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "got error (errno=%d, stderror=%s) from child",
- op_errno, strerror (op_errno));
- goto unwind;
- }
+ dict_foreach_fnmatch(local->xdata, GLUSTERFS_INODELK_DOM_PREFIX "*",
+ pl_inodelk_xattr_fill_multiple, &data);
+}
- if (local->op == TRUNCATE)
- inode = local->loc.inode;
- else
- inode = local->fd->inode;
+void
+pl_set_xdata_response(xlator_t *this, pl_local_t *local, inode_t *parent,
+ inode_t *inode, char *name, dict_t *xdata,
+ gf_boolean_t max_lock)
+{
+ if (!xdata || !local)
+ return;
- pl_inode = pl_inode_get (this, inode);
- if (!pl_inode) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
+ if (local->parent_entrylk_req && parent && name && name[0] != '\0')
+ pl_parent_entrylk_xattr_fill(this, parent, name, xdata, max_lock);
+
+ if (!inode)
+ return;
+
+ if (local->entrylk_count_req)
+ pl_entrylk_xattr_fill(this, inode, xdata, max_lock);
+
+ if (local->inodelk_dom_count_req)
+ pl_inodelk_xattr_fill(this, inode, xdata,
+ data_to_str(local->inodelk_dom_count_req),
+ max_lock);
+
+ if (local->inodelk_count_req)
+ pl_inodelk_xattr_fill(this, inode, xdata, NULL, max_lock);
+
+ if (local->posixlk_count_req)
+ pl_posixlk_xattr_fill(this, inode, xdata, max_lock);
+
+ if (local->multiple_dom_lk_requests)
+ pl_fill_multiple_dom_lk_requests(this, local, inode, xdata, max_lock);
+}
+
+/* Checks whether the region where fop is acting upon conflicts
+ * with existing locks. If there is no conflict function returns
+ * 1 else returns 0 with can_block boolean set accordingly to
+ * indicate block/fail the fop.
+ */
+int
+pl_is_fop_allowed(pl_inode_t *pl_inode, posix_lock_t *region, fd_t *fd,
+ glusterfs_fop_t op, gf_boolean_t *can_block)
+{
+ int ret = 0;
+
+ if (!__rw_allowable(pl_inode, region, op)) {
+ if (pl_inode->mlock_enforced) {
+ *can_block = _gf_false;
+ } else if ((!fd) || (fd && (fd->flags & O_NONBLOCK))) {
+ gf_log("locks", GF_LOG_TRACE,
+ "returning EAGAIN"
+ " because fd is O_NONBLOCK");
+ *can_block = _gf_false;
+ } else {
+ *can_block = _gf_true;
}
+ } else {
+ ret = 1;
+ }
- if (priv->mandatory
- && pl_inode->mandatory
- && !truncate_allowed (pl_inode, frame->root->trans,
- frame->root->pid, frame->root->lk_owner,
- local->offset)) {
- op_ret = -1;
- op_errno = EAGAIN;
- goto unwind;
+ return ret;
+}
+
+static pl_fdctx_t *
+pl_new_fdctx()
+{
+ pl_fdctx_t *fdctx = GF_MALLOC(sizeof(*fdctx), gf_locks_mt_pl_fdctx_t);
+ GF_VALIDATE_OR_GOTO("posix-locks", fdctx, out);
+
+ INIT_LIST_HEAD(&fdctx->locks_list);
+
+out:
+ return fdctx;
+}
+
+static pl_fdctx_t *
+pl_check_n_create_fdctx(xlator_t *this, fd_t *fd)
+{
+ int ret = 0;
+ uint64_t tmp = 0;
+ pl_fdctx_t *fdctx = NULL;
+
+ GF_VALIDATE_OR_GOTO("posix-locks", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, fd, out);
+
+ LOCK(&fd->lock);
+ {
+ ret = __fd_ctx_get(fd, this, &tmp);
+ if ((ret != 0) || (tmp == 0)) {
+ fdctx = pl_new_fdctx();
+ if (fdctx == NULL) {
+ goto unlock;
+ }
}
- switch (local->op) {
- case TRUNCATE:
- STACK_WIND (frame, pl_truncate_cbk, FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->truncate,
- &local->loc, local->offset);
- break;
- case FTRUNCATE:
- STACK_WIND (frame, pl_truncate_cbk, FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->ftruncate,
- local->fd, local->offset);
- break;
+ ret = __fd_ctx_set(fd, this, (uint64_t)(long)fdctx);
+ if (ret != 0) {
+ GF_FREE(fdctx);
+ fdctx = NULL;
+ UNLOCK(&fd->lock);
+ gf_log(this->name, GF_LOG_DEBUG, "failed to set fd ctx");
+ goto out;
}
+ }
+unlock:
+ UNLOCK(&fd->lock);
- return 0;
+out:
+ return fdctx;
+}
+
+int32_t
+pl_discard_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ pl_track_io_fop_count(frame->local, this, DECREMENT);
+
+ PL_STACK_UNWIND(discard, xdata, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
+int
+pl_discard_cont(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
+{
+ pl_track_io_fop_count(frame->local, this, INCREMENT);
+
+ STACK_WIND(frame, pl_discard_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->discard, fd, offset, len, xdata);
+ return 0;
+}
+
+int32_t
+pl_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+ pl_rw_req_t *rw = NULL;
+ posix_lock_t region = {
+ .list =
+ {
+ 0,
+ },
+ };
+ gf_boolean_t enabled = _gf_false;
+ gf_boolean_t can_block = _gf_true;
+ int op_ret = 0;
+ int op_errno = 0;
+ int allowed = 1;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+
+ local = mem_get0(this->local_pool);
+ if (!local) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ frame->local = local;
+ local->inode = inode_ref(fd->inode);
+ local->fd = fd_ref(fd);
+
+ pl_inode = pl_inode_get(this, fd->inode, local);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (frame->root->pid < 0)
+ enabled = _gf_false;
+ else
+ enabled = pl_is_mandatory_locking_enabled(pl_inode);
+
+ if (enabled) {
+ region.fl_start = offset;
+ region.fl_end = offset + len - 1;
+ region.client = frame->root->client;
+ region.fd_num = fd_to_fdnum(fd);
+ region.client_pid = frame->root->pid;
+ region.owner = frame->root->lk_owner;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ allowed = pl_is_fop_allowed(pl_inode, &region, fd, GF_FOP_DISCARD,
+ &can_block);
+ if (allowed == 1) {
+ if (pl_inode->mlock_enforced &&
+ pl_inode->track_fop_wind_count) {
+ pl_inode->fop_wind_count++;
+ }
+ goto unlock;
+ } else if (!can_block) {
+ op_errno = EAGAIN;
+ op_ret = -1;
+ goto unlock;
+ }
+
+ rw = GF_MALLOC(sizeof(*rw), gf_locks_mt_pl_rw_req_t);
+ if (!rw) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unlock;
+ }
+
+ rw->stub = fop_discard_stub(frame, pl_discard_cont, fd, offset, len,
+ xdata);
+ if (!rw->stub) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ GF_FREE(rw);
+ goto unlock;
+ }
+
+ rw->region = region;
+
+ list_add_tail(&rw->list, &pl_inode->rw_list);
+ }
+ unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+
+ if (allowed == 1)
+ STACK_WIND(frame, pl_discard_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->discard, fd, offset, len, xdata);
unwind:
- gf_log (this->name, GF_LOG_ERROR, "truncate failed with ret: %d, "
- "error: %s", op_ret, strerror (op_errno));
- if (local->op == TRUNCATE)
- loc_wipe (&local->loc);
+ if (op_ret == -1)
+ PL_STACK_UNWIND(discard, xdata, frame, op_ret, op_errno, NULL, NULL,
+ NULL);
- STACK_UNWIND_STRICT (truncate, frame, op_ret, op_errno, buf, NULL);
- return 0;
+ return 0;
}
+int32_t
+pl_zerofill_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ pl_track_io_fop_count(frame->local, this, DECREMENT);
+
+ PL_STACK_UNWIND(zerofill, xdata, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+ return 0;
+}
int
-pl_truncate (call_frame_t *frame, xlator_t *this,
- loc_t *loc, off_t offset)
+pl_zerofill_cont(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ off_t len, dict_t *xdata)
{
- struct _truncate_ops *local = NULL;
+ pl_track_io_fop_count(frame->local, this, INCREMENT);
+
+ STACK_WIND(frame, pl_zerofill_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->zerofill, fd, offset, len, xdata);
+ return 0;
+}
- local = GF_CALLOC (1, sizeof (struct _truncate_ops),
- gf_locks_mt_truncate_ops);
- GF_VALIDATE_OR_GOTO (this->name, local, unwind);
+int32_t
+pl_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ off_t len, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+ pl_rw_req_t *rw = NULL;
+ posix_lock_t region = {
+ .list =
+ {
+ 0,
+ },
+ };
+ gf_boolean_t enabled = _gf_false;
+ gf_boolean_t can_block = _gf_true;
+ int op_ret = 0;
+ int op_errno = 0;
+ int allowed = 1;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+
+ local = mem_get0(this->local_pool);
+ if (!local) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ frame->local = local;
+ local->inode = inode_ref(fd->inode);
+ local->fd = fd_ref(fd);
+
+ pl_inode = pl_inode_get(this, fd->inode, local);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (frame->root->pid < 0)
+ enabled = _gf_false;
+ else
+ enabled = pl_is_mandatory_locking_enabled(pl_inode);
+
+ if (enabled) {
+ region.fl_start = offset;
+ region.fl_end = offset + len - 1;
+ region.client = frame->root->client;
+ region.fd_num = fd_to_fdnum(fd);
+ region.client_pid = frame->root->pid;
+ region.owner = frame->root->lk_owner;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ allowed = pl_is_fop_allowed(pl_inode, &region, fd, GF_FOP_ZEROFILL,
+ &can_block);
+ if (allowed == 1) {
+ if (pl_inode->mlock_enforced &&
+ pl_inode->track_fop_wind_count) {
+ pl_inode->fop_wind_count++;
+ }
+ goto unlock;
+ } else if (!can_block) {
+ op_errno = EAGAIN;
+ op_ret = -1;
+ goto unlock;
+ }
- local->op = TRUNCATE;
- local->offset = offset;
- loc_copy (&local->loc, loc);
+ rw = GF_MALLOC(sizeof(*rw), gf_locks_mt_pl_rw_req_t);
+ if (!rw) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unlock;
+ }
- frame->local = local;
+ rw->stub = fop_zerofill_stub(frame, pl_zerofill_cont, fd, offset,
+ len, xdata);
+ if (!rw->stub) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ GF_FREE(rw);
+ goto unlock;
+ }
- STACK_WIND (frame, truncate_stat_cbk, FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->stat, loc);
+ rw->region = region;
- return 0;
+ list_add_tail(&rw->list, &pl_inode->rw_list);
+ }
+ unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+ if (allowed == 1)
+ STACK_WIND(frame, pl_zerofill_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->zerofill, fd, offset, len, xdata);
unwind:
- gf_log (this->name, GF_LOG_ERROR, "truncate for %s failed with ret: %d, "
- "error: %s", loc->path, -1, strerror (ENOMEM));
- STACK_UNWIND_STRICT (truncate, frame, -1, ENOMEM, NULL, NULL);
+ if (op_ret == -1)
+ PL_STACK_UNWIND(zerofill, xdata, frame, op_ret, op_errno, NULL, NULL,
+ NULL);
- return 0;
+ return 0;
}
+int
+pl_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ pl_local_t *local = frame->local;
+
+ pl_track_io_fop_count(local, this, DECREMENT);
+
+ if (local->op == GF_FOP_TRUNCATE)
+ PL_STACK_UNWIND(truncate, xdata, frame, op_ret, op_errno, prebuf,
+ postbuf, xdata);
+ else
+ PL_STACK_UNWIND(ftruncate, xdata, frame, op_ret, op_errno, prebuf,
+ postbuf, xdata);
+ return 0;
+}
int
-pl_ftruncate (call_frame_t *frame, xlator_t *this,
- fd_t *fd, off_t offset)
+pl_ftruncate_cont(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
{
- struct _truncate_ops *local = NULL;
+ pl_track_io_fop_count(frame->local, this, INCREMENT);
- local = GF_CALLOC (1, sizeof (struct _truncate_ops),
- gf_locks_mt_truncate_ops);
- GF_VALIDATE_OR_GOTO (this->name, local, unwind);
+ STACK_WIND(frame, pl_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+}
- local->op = FTRUNCATE;
- local->offset = offset;
- local->fd = fd;
+int
+pl_truncate_cont(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ pl_track_io_fop_count(frame->local, this, INCREMENT);
- frame->local = local;
+ STACK_WIND(frame, pl_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+}
- STACK_WIND (frame, truncate_stat_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fstat, fd);
- return 0;
+static int
+truncate_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
+{
+ pl_local_t *local = frame->local;
+ inode_t *inode = NULL;
+ pl_inode_t *pl_inode = NULL;
+ pl_rw_req_t *rw = NULL;
+ posix_lock_t region = {
+ .list =
+ {
+ 0,
+ },
+ };
+ gf_boolean_t enabled = _gf_false;
+ gf_boolean_t can_block = _gf_true;
+ int allowed = 1;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+
+ if (op_ret != 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "got error (errno=%d, stderror=%s) from child", op_errno,
+ strerror(op_errno));
+ goto unwind;
+ }
+
+ if (local->op == GF_FOP_TRUNCATE)
+ inode = local->loc[0].inode;
+ else
+ inode = local->fd->inode;
+
+ local->inode = inode_ref(inode);
+
+ pl_inode = pl_inode_get(this, inode, local);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (frame->root->pid < 0)
+ enabled = _gf_false;
+ else
+ enabled = pl_is_mandatory_locking_enabled(pl_inode);
+
+ if (enabled) {
+ region.fl_start = local->offset;
+ region.fl_end = LLONG_MAX;
+ region.client = frame->root->client;
+ region.fd_num = fd_to_fdnum(local->fd);
+ region.client_pid = frame->root->pid;
+ region.owner = frame->root->lk_owner;
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ allowed = pl_is_fop_allowed(pl_inode, &region, local->fd, local->op,
+ &can_block);
+
+ if (allowed == 1) {
+ if (pl_inode->mlock_enforced &&
+ pl_inode->track_fop_wind_count) {
+ pl_inode->fop_wind_count++;
+ }
+ goto unlock;
+ } else if (!can_block) {
+ op_errno = EAGAIN;
+ op_ret = -1;
+ goto unlock;
+ }
+
+ rw = GF_MALLOC(sizeof(*rw), gf_locks_mt_pl_rw_req_t);
+ if (!rw) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unlock;
+ }
+
+ if (local->op == GF_FOP_TRUNCATE)
+ rw->stub = fop_truncate_stub(frame, pl_truncate_cont,
+ &local->loc[0], local->offset,
+ local->xdata);
+ else
+ rw->stub = fop_ftruncate_stub(frame, pl_ftruncate_cont,
+ local->fd, local->offset,
+ local->xdata);
+ if (!rw->stub) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ GF_FREE(rw);
+ goto unlock;
+ }
+
+ rw->region = region;
+ list_add_tail(&rw->list, &pl_inode->rw_list);
+ }
+ unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+
+ if (allowed == 1) {
+ switch (local->op) {
+ case GF_FOP_TRUNCATE:
+ STACK_WIND(frame, pl_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, &local->loc[0],
+ local->offset, local->xdata);
+ break;
+ case GF_FOP_FTRUNCATE:
+ STACK_WIND(frame, pl_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, local->fd,
+ local->offset, local->xdata);
+ break;
+ default:
+ break;
+ }
+ }
unwind:
- gf_log (this->name, GF_LOG_ERROR, "ftruncate failed with ret: %d, "
- "error: %s", -1, strerror (ENOMEM));
- STACK_UNWIND_STRICT (ftruncate, frame, -1, ENOMEM, NULL, NULL);
+ if (op_ret == -1) {
+ gf_log(this ? this->name : "locks", GF_LOG_ERROR,
+ "truncate failed with "
+ "ret: %d, error: %s",
+ op_ret, strerror(op_errno));
- return 0;
+ switch (local->op) {
+ case GF_FOP_TRUNCATE:
+ PL_STACK_UNWIND(truncate, xdata, frame, op_ret, op_errno, buf,
+ NULL, xdata);
+ break;
+ case GF_FOP_FTRUNCATE:
+ PL_STACK_UNWIND(ftruncate, xdata, frame, op_ret, op_errno, buf,
+ NULL, xdata);
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
}
-static void
-delete_locks_of_fd (xlator_t *this, pl_inode_t *pl_inode, fd_t *fd)
+int
+pl_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
{
- posix_lock_t *tmp = NULL;
- posix_lock_t *l = NULL;
+ pl_local_t *local = NULL;
+ int ret = -1;
- struct list_head blocked_list;
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
- INIT_LIST_HEAD (&blocked_list);
+ local = mem_get0(this->local_pool);
+ GF_VALIDATE_OR_GOTO(this->name, local, unwind);
- pthread_mutex_lock (&pl_inode->mutex);
- {
+ local->op = GF_FOP_TRUNCATE;
+ local->offset = offset;
+ loc_copy(&local->loc[0], loc);
+ if (xdata)
+ local->xdata = dict_ref(xdata);
- list_for_each_entry_safe (l, tmp, &pl_inode->ext_list, list) {
- if ((l->fd_num == fd_to_fdnum(fd))) {
- if (l->blocked) {
- list_move_tail (&l->list, &blocked_list);
- continue;
- }
- __delete_lock (pl_inode, l);
- __destroy_lock (l);
- }
- }
+ frame->local = local;
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+ STACK_WIND(frame, truncate_stat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat, loc, NULL);
+ ret = 0;
- list_for_each_entry_safe (l, tmp, &blocked_list, list) {
- list_del_init(&l->list);
- STACK_UNWIND_STRICT (lk, l->frame, -1, EAGAIN, &l->user_flock);
- __destroy_lock (l);
- }
+unwind:
+ if (ret == -1) {
+ gf_log(this ? this->name : "locks", GF_LOG_ERROR,
+ "truncate on %s failed with"
+ " ret: %d, error: %s",
+ loc->path, -1, strerror(ENOMEM));
+ STACK_UNWIND_STRICT(truncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+ }
+ return 0;
+}
- grant_blocked_locks (this, pl_inode);
+int
+pl_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+ local = mem_get0(this->local_pool);
+ GF_VALIDATE_OR_GOTO(this->name, local, unwind);
+
+ local->op = GF_FOP_FTRUNCATE;
+ local->offset = offset;
+ local->fd = fd_ref(fd);
+ if (xdata)
+ local->xdata = dict_ref(xdata);
+
+ frame->local = local;
+
+ STACK_WIND(frame, truncate_stat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ ret = 0;
+unwind:
+ if (ret == -1) {
+ gf_log(this ? this->name : "locks", GF_LOG_ERROR,
+ "ftruncate failed with"
+ " ret: %d, error: %s",
+ -1, strerror(ENOMEM));
+ STACK_UNWIND_STRICT(ftruncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+ }
+ return 0;
+}
- do_blocked_rw (pl_inode);
+int
+pl_locks_by_fd(pl_inode_t *pl_inode, fd_t *fd)
+{
+ posix_lock_t *l = NULL;
+ int found = 0;
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (l->fd_num == fd_to_fdnum(fd)) {
+ found = 1;
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ return found;
}
static void
-__delete_locks_of_owner (pl_inode_t *pl_inode,
- void *transport, uint64_t owner)
-{
- posix_lock_t *tmp = NULL;
- posix_lock_t *l = NULL;
-
- /* TODO: what if it is a blocked lock with pending l->frame */
-
- list_for_each_entry_safe (l, tmp, &pl_inode->ext_list, list) {
- if ((l->transport == transport)
- && (l->owner == owner)) {
- gf_log ("posix-locks", GF_LOG_TRACE,
- " Flushing lock"
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" state: %s",
- l->fl_type == F_UNLCK ? "Unlock" : "Lock",
- l->client_pid,
- l->owner,
- l->user_flock.l_start,
- l->user_flock.l_len,
- l->blocked == 1 ? "Blocked" : "Active");
-
- __delete_lock (pl_inode, l);
- __destroy_lock (l);
+delete_locks_of_fd(xlator_t *this, pl_inode_t *pl_inode, fd_t *fd)
+{
+ posix_lock_t *tmp = NULL;
+ posix_lock_t *l = NULL;
+
+ struct list_head blocked_list;
+
+ INIT_LIST_HEAD(&blocked_list);
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(l, tmp, &pl_inode->ext_list, list)
+ {
+ if (l->fd_num == fd_to_fdnum(fd)) {
+ if (l->blocked) {
+ list_move_tail(&l->list, &blocked_list);
+ continue;
}
+ __delete_lock(l);
+ __destroy_lock(l);
+ }
}
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
- return;
+ list_for_each_entry_safe(l, tmp, &blocked_list, list)
+ {
+ list_del_init(&l->list);
+ STACK_UNWIND_STRICT(lk, l->frame, -1, EAGAIN, &l->user_flock, NULL);
+ __destroy_lock(l);
+ }
+
+ grant_blocked_locks(this, pl_inode);
+
+ do_blocked_rw(pl_inode);
+}
+
+static void
+__delete_locks_of_owner(pl_inode_t *pl_inode, client_t *client,
+ gf_lkowner_t *owner)
+{
+ posix_lock_t *tmp = NULL;
+ posix_lock_t *l = NULL;
+
+ /* TODO: what if it is a blocked lock with pending l->frame */
+
+ list_for_each_entry_safe(l, tmp, &pl_inode->ext_list, list)
+ {
+ if (l->blocked)
+ continue;
+ if ((l->client == client) && is_same_lkowner(&l->owner, owner)) {
+ gf_log("posix-locks", GF_LOG_TRACE,
+ " Flushing lock"
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64
+ " state: %s",
+ l->fl_type == F_UNLCK ? "Unlock" : "Lock", l->client_pid,
+ lkowner_utoa(&l->owner), l->user_flock.l_start,
+ l->user_flock.l_len, l->blocked == 1 ? "Blocked" : "Active");
+
+ __delete_lock(l);
+ __destroy_lock(l);
+ }
+ }
+
+ return;
}
int32_t
-pl_opendir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd)
+pl_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
{
- pl_fdctx_t *fdctx = NULL;
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
+}
- if (op_ret < 0)
- goto unwind;
+static int32_t
+pl_getxattr_clrlk(xlator_t *this, const char *name, inode_t *inode,
+ dict_t **dict, int32_t *op_errno)
+{
+ int32_t bcount = 0;
+ int32_t gcount = 0;
+ char *key = NULL;
+ char *lk_summary = NULL;
+ pl_inode_t *pl_inode = NULL;
+ clrlk_args args = {
+ 0,
+ };
+ char *brickname = NULL;
+ int32_t op_ret = -1;
+
+ *op_errno = EINVAL;
+
+ if (clrlk_parse_args(name, &args)) {
+ *op_errno = EINVAL;
+ goto out;
+ }
+
+ *dict = dict_new();
+ if (!*dict) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ pl_inode = pl_inode_get(this, inode, NULL);
+ if (!pl_inode) {
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ switch (args.type) {
+ case CLRLK_INODE:
+ case CLRLK_ENTRY:
+ op_ret = clrlk_clear_lks_in_all_domains(this, pl_inode, &args,
+ &bcount, &gcount, op_errno);
+ break;
+ case CLRLK_POSIX:
+ op_ret = clrlk_clear_posixlk(this, pl_inode, &args, &bcount,
+ &gcount, op_errno);
+ break;
+ default:
+ op_ret = -1;
+ *op_errno = EINVAL;
+ }
+ if (op_ret) {
+ if (args.type >= CLRLK_TYPE_MAX) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "clear locks: invalid lock type %d", args.type);
+ } else {
+ gf_log(this->name, GF_LOG_ERROR,
+ "clear locks of type %s failed: %s",
+ clrlk_type_names[args.type], strerror(*op_errno));
+ }
- fdctx = pl_check_n_create_fdctx (this, fd);
- if (!fdctx) {
- op_errno = ENOMEM;
- op_ret = -1;
- goto unwind;
+ goto out;
+ }
+
+ op_ret = fetch_pathinfo(this, inode, op_errno, &brickname);
+ if (op_ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Couldn't get brickname");
+ } else {
+ op_ret = format_brickname(brickname);
+ if (op_ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Couldn't format brickname");
+ GF_FREE(brickname);
+ brickname = NULL;
}
+ }
-unwind:
- STACK_UNWIND_STRICT (opendir,
- frame,
- op_ret,
- op_errno,
- fd);
- return 0;
+ if (!gcount && !bcount) {
+ if (gf_asprintf(&lk_summary, "No locks cleared.") == -1) {
+ op_ret = -1;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+ } else if (gf_asprintf(&lk_summary,
+ "%s: %s blocked locks=%d "
+ "granted locks=%d",
+ (brickname == NULL) ? this->name : brickname,
+ clrlk_type_names[args.type], bcount, gcount) == -1) {
+ op_ret = -1;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+ gf_log(this->name, GF_LOG_DEBUG, "%s", lk_summary);
+
+ key = gf_strdup(name);
+ if (!key) {
+ op_ret = -1;
+ goto out;
+ }
+ if (dict_set_dynstr(*dict, key, lk_summary)) {
+ op_ret = -1;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ op_ret = 0;
+
+out:
+ GF_FREE(brickname);
+ GF_FREE(args.opts);
+ GF_FREE(key);
+ if (op_ret) {
+ GF_FREE(lk_summary);
+ }
+
+ return op_ret;
}
int32_t
-pl_opendir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, fd_t *fd)
-{
- STACK_WIND (frame,
- pl_opendir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->opendir,
- loc, fd);
- return 0;
+pl_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xdata)
+{
+ int32_t op_errno = EINVAL;
+ int32_t op_ret = -1;
+ dict_t *dict = NULL;
+
+ if (!name)
+ goto usual;
+
+ if (strncmp(name, GF_XATTR_CLRLK_CMD, SLEN(GF_XATTR_CLRLK_CMD)))
+ goto usual;
+
+ op_ret = pl_getxattr_clrlk(this, name, loc->inode, &dict, &op_errno);
+
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict, xdata);
+
+ if (dict)
+ dict_unref(dict);
+ return 0;
+usual:
+ STACK_WIND(frame, pl_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xdata);
+ return 0;
}
-int
-pl_flush_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+static int
+format_brickname(char *brickname)
{
- STACK_UNWIND_STRICT (flush, frame, op_ret, op_errno);
+ int ret = -1;
+ char *hostname = NULL;
+ char *volume = NULL;
+ char *saveptr = NULL;
- return 0;
+ if (!brickname)
+ goto out;
+
+ strtok_r(brickname, ":", &saveptr);
+ hostname = gf_strdup(strtok_r(NULL, ":", &saveptr));
+ if (hostname == NULL)
+ goto out;
+ volume = gf_strdup(strtok_r(NULL, ".", &saveptr));
+ if (volume == NULL)
+ goto out;
+
+ sprintf(brickname, "%s:%s", hostname, volume);
+
+ ret = 0;
+out:
+ GF_FREE(hostname);
+ GF_FREE(volume);
+ return ret;
}
+static int
+fetch_pathinfo(xlator_t *this, inode_t *inode, int32_t *op_errno,
+ char **brickname)
+{
+ int ret = -1;
+ loc_t loc = {
+ 0,
+ };
+ dict_t *dict = NULL;
+
+ if (!brickname)
+ goto out;
+
+ if (!op_errno)
+ goto out;
+
+ gf_uuid_copy(loc.gfid, inode->gfid);
+ loc.inode = inode_ref(inode);
+
+ ret = syncop_getxattr(FIRST_CHILD(this), &loc, &dict, GF_XATTR_PATHINFO_KEY,
+ NULL, NULL);
+ if (ret < 0) {
+ *op_errno = -ret;
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(dict, GF_XATTR_PATHINFO_KEY, brickname);
+ if (ret)
+ goto out;
+
+ *brickname = gf_strdup(*brickname);
+ if (*brickname == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (dict != NULL) {
+ dict_unref(dict);
+ }
+ loc_wipe(&loc);
+
+ return ret;
+}
int
-pl_flush (call_frame_t *frame, xlator_t *this,
- fd_t *fd)
+pl_lockinfo_get_brickname(xlator_t *this, inode_t *inode, int32_t *op_errno)
{
- pl_inode_t *pl_inode = NULL;
- uint64_t owner = -1;
-
- owner = frame->root->lk_owner;
+ posix_locks_private_t *priv = this->private;
+ char *brickname = NULL;
+ char *end = NULL;
+ char *tmp = NULL;
+
+ int ret = fetch_pathinfo(this, inode, op_errno, &brickname);
+ if (ret)
+ goto out;
+
+ end = strrchr(brickname, ':');
+ if (!end) {
+ GF_FREE(brickname);
+ ret = -1;
+ goto out;
+ }
+
+ tmp = brickname;
+ brickname = gf_strndup(brickname, (end - brickname));
+ if (brickname == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ priv->brickname = brickname;
+ ret = 0;
+out:
+ GF_FREE(tmp);
+ return ret;
+}
- pl_inode = pl_inode_get (this, fd->inode);
+char *
+pl_lockinfo_key(xlator_t *this, inode_t *inode, int32_t *op_errno)
+{
+ posix_locks_private_t *priv = this->private;
+ char *key = NULL;
+ int ret = 0;
- if (!pl_inode) {
- gf_log (this->name, GF_LOG_DEBUG, "Could not get inode.");
- STACK_UNWIND_STRICT (flush, frame, -1, EBADFD);
- return 0;
+ if (priv->brickname == NULL) {
+ ret = pl_lockinfo_get_brickname(this, inode, op_errno);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING, "cannot get brickname");
+ goto out;
}
+ }
- pl_trace_flush (this, frame, fd);
+ key = priv->brickname;
+out:
+ return key;
+}
- if (owner == 0) {
- /* Handle special case when protocol/server sets lk-owner to zero.
- * This usually happens due to a client disconnection. Hence, free
- * all locks opened with this fd.
- */
- gf_log (this->name, GF_LOG_TRACE,
- "Releasing all locks with fd %p", fd);
- delete_locks_of_fd (this, pl_inode, fd);
- goto wind;
+int32_t
+pl_fgetxattr_handle_lockinfo(xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t *op_errno)
+{
+ char *key = NULL, *buf = NULL;
+ int32_t op_ret = 0;
+ unsigned long fdnum = 0;
+ int32_t len = 0;
+ dict_t *tmp = NULL;
+
+ pl_inode_t *pl_inode = pl_inode_get(this, fd->inode, NULL);
+
+ if (!pl_inode) {
+ gf_log(this->name, GF_LOG_DEBUG, "Could not get inode.");
+ *op_errno = EBADFD;
+ op_ret = -1;
+ goto out;
+ }
+
+ if (!pl_locks_by_fd(pl_inode, fd)) {
+ op_ret = 0;
+ goto out;
+ }
+
+ fdnum = fd_to_fdnum(fd);
+
+ key = pl_lockinfo_key(this, fd->inode, op_errno);
+ if (key == NULL) {
+ op_ret = -1;
+ goto out;
+ }
+
+ tmp = dict_new();
+ if (tmp == NULL) {
+ op_ret = -1;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ op_ret = dict_set_uint64(tmp, key, fdnum);
+ if (op_ret < 0) {
+ *op_errno = -op_ret;
+ op_ret = -1;
+ gf_log(this->name, GF_LOG_WARNING,
+ "setting lockinfo value "
+ "(%lu) for fd (ptr:%p inode-gfid:%s) failed (%s)",
+ fdnum, fd, uuid_utoa(fd->inode->gfid), strerror(*op_errno));
+ goto out;
+ }
+
+ op_ret = dict_allocate_and_serialize(tmp, (char **)&buf,
+ (unsigned int *)&len);
+ if (op_ret != 0) {
+ *op_errno = -op_ret;
+ op_ret = -1;
+ gf_log(this->name, GF_LOG_WARNING,
+ "dict_serialized_length failed (%s) while handling "
+ "lockinfo for fd (ptr:%p inode-gfid:%s)",
+ strerror(*op_errno), fd, uuid_utoa(fd->inode->gfid));
+ goto out;
+ }
+
+ op_ret = dict_set_dynptr(dict, GF_XATTR_LOCKINFO_KEY, buf, len);
+ if (op_ret < 0) {
+ *op_errno = -op_ret;
+ op_ret = -1;
+ gf_log(this->name, GF_LOG_WARNING,
+ "setting lockinfo value "
+ "(%lu) for fd (ptr:%p inode-gfid:%s) failed (%s)",
+ fdnum, fd, uuid_utoa(fd->inode->gfid), strerror(*op_errno));
+ goto out;
+ }
+
+ buf = NULL;
+out:
+ if (tmp != NULL) {
+ dict_unref(tmp);
+ }
+
+ if (buf != NULL) {
+ GF_FREE(buf);
+ }
+ return op_ret;
+}
+
+int32_t
+pl_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
+{
+ int32_t op_ret = 0, op_errno = 0;
+ dict_t *dict = NULL;
+
+ if (!name) {
+ goto usual;
+ }
+
+ if (strcmp(name, GF_XATTR_LOCKINFO_KEY) == 0) {
+ dict = dict_new();
+ if (dict == NULL) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
}
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __delete_locks_of_owner (pl_inode, frame->root->trans,
- owner);
+
+ op_ret = pl_fgetxattr_handle_lockinfo(this, fd, dict, &op_errno);
+ if (op_ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "getting lockinfo on fd (ptr:%p inode-gfid:%s) "
+ "failed (%s)",
+ fd, uuid_utoa(fd->inode->gfid), strerror(op_errno));
}
- pthread_mutex_unlock (&pl_inode->mutex);
- grant_blocked_locks (this, pl_inode);
+ goto unwind;
+ } else if (strncmp(name, GF_XATTR_CLRLK_CMD, SLEN(GF_XATTR_CLRLK_CMD)) ==
+ 0) {
+ op_ret = pl_getxattr_clrlk(this, name, fd->inode, &dict, &op_errno);
- do_blocked_rw (pl_inode);
+ goto unwind;
+ } else {
+ goto usual;
+ }
-wind:
- STACK_WIND (frame, pl_flush_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->flush, fd);
- return 0;
+unwind:
+ STACK_UNWIND_STRICT(fgetxattr, frame, op_ret, op_errno, dict, NULL);
+ if (dict != NULL) {
+ dict_unref(dict);
+ }
+
+ return 0;
+
+usual:
+ STACK_WIND(frame, default_fgetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fgetxattr, fd, name, xdata);
+ return 0;
}
+int32_t
+pl_migrate_locks(call_frame_t *frame, fd_t *newfd, uint64_t oldfd_num,
+ int32_t *op_errno)
+{
+ posix_lock_t *l = NULL;
+ int32_t op_ret = 0;
+ uint64_t newfd_num = fd_to_fdnum(newfd);
+
+ pl_inode_t *pl_inode = pl_inode_get(frame->this, newfd->inode, NULL);
+ if (pl_inode == NULL) {
+ op_ret = -1;
+ *op_errno = EBADFD;
+ goto out;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (l->fd_num == oldfd_num) {
+ l->fd_num = newfd_num;
+ l->client = frame->root->client;
+ }
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
-int
-pl_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd)
+ op_ret = 0;
+out:
+ return op_ret;
+}
+
+int32_t
+pl_fsetxattr_handle_lockinfo(call_frame_t *frame, fd_t *fd, char *lockinfo_buf,
+ int len, int32_t *op_errno)
{
- pl_fdctx_t *fdctx = NULL;
+ int32_t op_ret = -1;
+ uint64_t oldfd_num = 0;
+ char *key = NULL;
+
+ dict_t *lockinfo = dict_new();
+ if (lockinfo == NULL) {
+ op_ret = -1;
+ *op_errno = ENOMEM;
+ goto out;
+ }
+
+ op_ret = dict_unserialize(lockinfo_buf, len, &lockinfo);
+ if (op_ret < 0) {
+ *op_errno = -op_ret;
+ op_ret = -1;
+ goto out;
+ }
+
+ key = pl_lockinfo_key(frame->this, fd->inode, op_errno);
+ if (key == NULL) {
+ op_ret = -1;
+ goto out;
+ }
+
+ op_ret = dict_get_uint64(lockinfo, key, &oldfd_num);
+
+ if (oldfd_num == 0) {
+ op_ret = 0;
+ goto out;
+ }
+
+ op_ret = pl_migrate_locks(frame, fd, oldfd_num, op_errno);
+ if (op_ret < 0) {
+ gf_log(frame->this->name, GF_LOG_WARNING,
+ "migration of locks from oldfd (ptr:%p) to newfd "
+ "(ptr:%p) (inode-gfid:%s)",
+ (void *)(uintptr_t)oldfd_num, fd, uuid_utoa(fd->inode->gfid));
+ goto out;
+ }
- if (op_ret < 0)
- goto unwind;
+out:
+ dict_unref(lockinfo);
- fdctx = pl_check_n_create_fdctx (this, fd);
- if (!fdctx) {
- op_errno = ENOMEM;
- op_ret = -1;
- goto unwind;
+ return op_ret;
+}
+
+int32_t
+pl_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+
+ local = frame->local;
+ if (local && local->update_mlock_enforced_flag && op_ret != -1) {
+ pl_inode = pl_inode_get(this, local->inode, NULL);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ pl_inode->mlock_enforced = _gf_true;
+ pl_inode->check_mlock_info = _gf_false;
}
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
unwind:
- STACK_UNWIND_STRICT (open, frame, op_ret, op_errno, fd);
+ PL_STACK_UNWIND_FOR_CLIENT(fsetxattr, xdata, frame, op_ret, op_errno,
+ xdata);
+ return 0;
+}
- return 0;
+int32_t
+pl_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ int32_t op_errno = 0;
+ void *lockinfo_buf = NULL;
+ int len = 0;
+ char *name = NULL;
+ posix_locks_private_t *priv = this->private;
+
+ int32_t op_ret = dict_get_ptr_and_len(dict, GF_XATTR_LOCKINFO_KEY,
+ &lockinfo_buf, &len);
+ if (lockinfo_buf == NULL) {
+ goto usual;
+ }
+
+ op_ret = pl_fsetxattr_handle_lockinfo(frame, fd, lockinfo_buf, len,
+ &op_errno);
+ if (op_ret < 0) {
+ goto unwind;
+ }
+
+usual:
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+
+ PL_CHECK_LOCK_ENFORCE_KEY(frame, dict, name, this, ((loc_t *)NULL), fd,
+ priv);
+
+ STACK_WIND(frame, pl_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(fsetxattr, xdata, frame, op_ret, op_errno, NULL);
+
+ return 0;
+}
+
+int32_t
+pl_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
+{
+ pl_fdctx_t *fdctx = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ fdctx = pl_check_n_create_fdctx(this, fd);
+ if (!fdctx) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unwind;
+ }
+
+unwind:
+ PL_STACK_UNWIND(opendir, xdata, frame, op_ret, op_errno, fd, xdata);
+
+ return 0;
}
+int32_t
+pl_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_opendir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ return 0;
+}
int
-pl_open (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- fd_t *fd, int32_t wbflags)
+pl_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
{
- /* why isn't O_TRUNC being handled ? */
- STACK_WIND (frame, pl_open_cbk,
- FIRST_CHILD(this), FIRST_CHILD(this)->fops->open,
- loc, flags & ~O_TRUNC, fd, wbflags);
+ PL_STACK_UNWIND_FOR_CLIENT(flush, xdata, frame, op_ret, op_errno, xdata);
- return 0;
+ return 0;
}
+int
+pl_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ pl_inode_t *pl_inode = pl_inode_get(this, fd->inode, NULL);
+ if (!pl_inode) {
+ gf_log(this->name, GF_LOG_DEBUG, "Could not get inode.");
+ STACK_UNWIND_STRICT(flush, frame, -1, EBADFD, NULL);
+ return 0;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (pl_inode->migrated) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ STACK_UNWIND_STRICT(flush, frame, -1, EREMOTE, NULL);
+ return 0;
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ pl_trace_flush(this, frame, fd);
+
+ if (frame->root->lk_owner.len == 0) {
+ /* Handle special case when protocol/server sets lk-owner to zero.
+ * This usually happens due to a client disconnection. Hence, free
+ * all locks opened with this fd.
+ */
+ gf_log(this->name, GF_LOG_TRACE, "Releasing all locks with fd %p", fd);
+ delete_locks_of_fd(this, pl_inode, fd);
+ goto wind;
+ }
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __delete_locks_of_owner(pl_inode, frame->root->client,
+ &frame->root->lk_owner);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ grant_blocked_locks(this, pl_inode);
+
+ do_blocked_rw(pl_inode);
+
+wind:
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_flush_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->flush, fd, xdata);
+ return 0;
+}
int
-pl_create_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno,
- fd_t *fd, inode_t *inode, struct iatt *buf,
- struct iatt *preparent, struct iatt *postparent)
+pl_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, fd_t *fd, dict_t *xdata)
{
- pl_fdctx_t *fdctx = NULL;
+ pl_fdctx_t *fdctx = NULL;
- if (op_ret < 0)
- goto unwind;
+ if (op_ret < 0)
+ goto unwind;
- fdctx = pl_check_n_create_fdctx (this, fd);
- if (!fdctx) {
- op_errno = ENOMEM;
- op_ret = -1;
- goto unwind;
- }
+ fdctx = pl_check_n_create_fdctx(this, fd);
+ if (!fdctx) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unwind;
+ }
unwind:
- STACK_UNWIND_STRICT (create, frame, op_ret, op_errno, fd, inode, buf,
- preparent, postparent);
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd, xdata);
- return 0;
+ return 0;
}
-
int
-pl_create (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t flags, mode_t mode, fd_t *fd, dict_t *params)
+pl_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
{
- STACK_WIND (frame, pl_create_cbk,
- FIRST_CHILD (this), FIRST_CHILD (this)->fops->create,
- loc, flags, mode, fd, params);
- return 0;
+ int op_ret = -1;
+ int op_errno = EINVAL;
+ pl_inode_t *pl_inode = NULL;
+ posix_lock_t *l = NULL;
+ posix_locks_private_t *priv = this->private;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+
+ op_ret = 0, op_errno = 0;
+ pl_inode = pl_inode_get(this, fd->inode, NULL);
+ if (!pl_inode) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM, "Could not get inode");
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ /* As per design, under forced and file-based mandatory locking modes
+ * it doesn't matter whether inodes's lock list contain advisory or
+ * mandatory type locks. So we just check whether inode's lock list is
+ * empty or not to make sure that no locks are being held for the file.
+ * Whereas under optimal mandatory locking mode, we strictly fail open
+ * if and only if lock list contain mandatory locks.
+ */
+ if (((priv->mandatory_mode == MLK_FILE_BASED) && pl_inode->mandatory) ||
+ priv->mandatory_mode == MLK_FORCED) {
+ if (fd->flags & O_TRUNC) {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (!list_empty(&pl_inode->ext_list)) {
+ op_ret = -1;
+ op_errno = EAGAIN;
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+ } else if (priv->mandatory_mode == MLK_OPTIMAL) {
+ if (fd->flags & O_TRUNC) {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if ((l->lk_flags & GF_LK_MANDATORY)) {
+ op_ret = -1;
+ op_errno = EAGAIN;
+ break;
+ }
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+ }
+
+unwind:
+ if (op_ret == -1)
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, NULL, NULL);
+ else
+ STACK_WIND(frame, pl_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
}
+int
+pl_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, fd_t *fd, inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
+{
+ pl_fdctx_t *fdctx = NULL;
+
+ if (op_ret < 0)
+ goto unwind;
+
+ fdctx = pl_check_n_create_fdctx(this, fd);
+ if (!fdctx) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unwind;
+ }
+
+unwind:
+ PL_STACK_UNWIND(create, xdata, frame, op_ret, op_errno, fd, inode, buf,
+ preparent, postparent, xdata);
+
+ return 0;
+}
int
-pl_readv_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno,
- struct iovec *vector, int32_t count, struct iatt *stbuf,
- struct iobref *iobref)
+pl_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
{
- STACK_UNWIND_STRICT (readv, frame, op_ret, op_errno,
- vector, count, stbuf, iobref);
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
- return 0;
+ STACK_WIND(frame, pl_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
}
int
-pl_writev_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+pl_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
{
- STACK_UNWIND_STRICT (writev, frame, op_ret, op_errno, prebuf, postbuf);
+ pl_track_io_fop_count(frame->local, this, DECREMENT);
- return 0;
+ PL_STACK_UNWIND(readv, xdata, frame, op_ret, op_errno, vector, count, stbuf,
+ iobref, xdata);
+
+ return 0;
}
+int
+pl_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
+{
+ pl_track_io_fop_count(frame->local, this, DECREMENT);
+
+ PL_STACK_UNWIND(writev, xdata, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ return 0;
+}
void
-do_blocked_rw (pl_inode_t *pl_inode)
+do_blocked_rw(pl_inode_t *pl_inode)
{
- struct list_head wind_list;
- pl_rw_req_t *rw = NULL;
- pl_rw_req_t *tmp = NULL;
+ struct list_head wind_list;
+ pl_rw_req_t *rw = NULL;
+ pl_rw_req_t *tmp = NULL;
- INIT_LIST_HEAD (&wind_list);
+ INIT_LIST_HEAD(&wind_list);
- pthread_mutex_lock (&pl_inode->mutex);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_for_each_entry_safe(rw, tmp, &pl_inode->rw_list, list)
{
- list_for_each_entry_safe (rw, tmp, &pl_inode->rw_list, list) {
- if (__rw_allowable (pl_inode, &rw->region,
- rw->stub->fop)) {
- list_del_init (&rw->list);
- list_add_tail (&rw->list, &wind_list);
- }
+ if (__rw_allowable(pl_inode, &rw->region, rw->stub->fop)) {
+ list_del_init(&rw->list);
+ list_add_tail(&rw->list, &wind_list);
+ if (pl_inode->mlock_enforced &&
+ pl_inode->track_fop_wind_count) {
+ pl_inode->fop_wind_count++;
}
+ }
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
- list_for_each_entry_safe (rw, tmp, &wind_list, list) {
- list_del_init (&rw->list);
- call_resume (rw->stub);
- GF_FREE (rw);
- }
+ list_for_each_entry_safe(rw, tmp, &wind_list, list)
+ {
+ list_del_init(&rw->list);
+ call_resume(rw->stub);
+ GF_FREE(rw);
+ }
- return;
+ return;
}
+/* when mandatory lock is enforced:
+ If an IO request comes on a region which is out of the boundary of the
+ granted mandatory lock, it will be rejected.
+
+ Note: There is no IO blocking with mandatory lock enforced as it may be
+ a stale data from an old client.
+ */
+gf_boolean_t static within_range(posix_lock_t *existing, posix_lock_t *new)
+{
+ if (existing->fl_start <= new->fl_start && existing->fl_end >= new->fl_end)
+ return _gf_true;
+
+ return _gf_false;
+}
static int
-__rw_allowable (pl_inode_t *pl_inode, posix_lock_t *region,
- glusterfs_fop_t op)
+__rw_allowable(pl_inode_t *pl_inode, posix_lock_t *region, glusterfs_fop_t op)
{
- posix_lock_t *l = NULL;
- int ret = 1;
+ posix_lock_t *l = NULL;
+ posix_locks_private_t *priv = THIS->private;
+ int ret = 1;
- list_for_each_entry (l, &pl_inode->ext_list, list) {
- if (locks_overlap (l, region) && !same_owner (l, region)) {
- if ((op == GF_FOP_READ) && (l->fl_type != F_WRLCK))
- continue;
- ret = 0;
- break;
+ if (pl_inode->mlock_enforced) {
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ /*
+ with lock enforced (fencing) there should not be any blocking
+ lock coexisting.
+ */
+ if (same_owner(l, region)) {
+ /* Should range check be strict for same owner with fencing? */
+ if (locks_overlap(l, region)) {
+ if (within_range(l, region)) {
+ return 1;
+ } else {
+ /*
+ Should we allow read fop if it does not fit it in the
+ range?
+ if (op == GF_FOP_READ && l->fl_type != F_WRLCK) {
+ return 1;
+ }
+ */
+ return 0;
+ }
}
+ } else {
+ if (locks_overlap(l, region)) {
+ /*
+ with fencing should a read from a different owner be
+ allowed if the mandatory lock taken is F_RDLCK?
+ if (op == GF_FOP_READ && l->fl_type != F_WRLCK) {
+ return 1;
+ }
+ */
+ return 0;
+ }
+ }
}
- return ret;
-}
+ /* No lock has been taken by this owner */
+ return 0;
+ }
+
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (!l->blocked && locks_overlap(l, region) && !same_owner(l, region)) {
+ if ((op == GF_FOP_READ) && (l->fl_type != F_WRLCK))
+ continue;
+ /* Check for mandatory lock under optimal
+ * mandatory-locking mode */
+ if (priv->mandatory_mode == MLK_OPTIMAL &&
+ !(l->lk_flags & GF_LK_MANDATORY))
+ continue;
+ ret = 0;
+ break;
+ }
+ }
+ return ret;
+}
int
-pl_readv_cont (call_frame_t *frame, xlator_t *this,
- fd_t *fd, size_t size, off_t offset)
+pl_readv_cont(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
{
- STACK_WIND (frame, pl_readv_cbk,
- FIRST_CHILD (this), FIRST_CHILD (this)->fops->readv,
- fd, size, offset);
+ pl_track_io_fop_count(frame->local, this, INCREMENT);
- return 0;
-}
+ STACK_WIND(frame, pl_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags, xdata);
+ return 0;
+}
int
-pl_readv (call_frame_t *frame, xlator_t *this,
- fd_t *fd, size_t size, off_t offset)
-{
- posix_locks_private_t *priv = NULL;
- pl_inode_t *pl_inode = NULL;
- pl_rw_req_t *rw = NULL;
- posix_lock_t region = {.list = {0, }, };
- int op_ret = 0;
- int op_errno = 0;
- char wind_needed = 1;
-
-
- priv = this->private;
- pl_inode = pl_inode_get (this, fd->inode);
-
- if (priv->mandatory && pl_inode->mandatory) {
- region.fl_start = offset;
- region.fl_end = offset + size - 1;
- region.transport = frame->root->trans;
- region.fd_num = fd_to_fdnum(fd);
- region.client_pid = frame->root->pid;
- region.owner = frame->root->lk_owner;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- wind_needed = __rw_allowable (pl_inode, &region,
- GF_FOP_READ);
- if (wind_needed) {
- goto unlock;
- }
-
- if (fd->flags & O_NONBLOCK) {
- gf_log (this->name, GF_LOG_TRACE,
- "returning EAGAIN as fd is O_NONBLOCK");
- op_errno = EAGAIN;
- op_ret = -1;
- goto unlock;
- }
-
- rw = GF_CALLOC (1, sizeof (*rw),
- gf_locks_mt_pl_rw_req_t);
- if (!rw) {
- op_errno = ENOMEM;
- op_ret = -1;
- goto unlock;
- }
-
- rw->stub = fop_readv_stub (frame, pl_readv_cont,
- fd, size, offset);
- if (!rw->stub) {
- op_errno = ENOMEM;
- op_ret = -1;
- GF_FREE (rw);
- goto unlock;
- }
+pl_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+ pl_rw_req_t *rw = NULL;
+ posix_lock_t region = {
+ .list =
+ {
+ 0,
+ },
+ };
+ gf_boolean_t enabled = _gf_false;
+ gf_boolean_t can_block = _gf_true;
+ int op_ret = 0;
+ int op_errno = 0;
+ int allowed = 1;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+
+ if (!frame->local) {
+ frame->local = mem_get0(this->local_pool);
+ local = frame->local;
+ local->inode = inode_ref(fd->inode);
+ local->fd = fd_ref(fd);
+ }
+
+ pl_inode = pl_inode_get(this, fd->inode, local);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (frame->root->pid < 0)
+ enabled = _gf_false;
+ else
+ enabled = pl_is_mandatory_locking_enabled(pl_inode);
+
+ if (enabled) {
+ region.fl_start = offset;
+ region.fl_end = offset + size - 1;
+ region.client = frame->root->client;
+ region.fd_num = fd_to_fdnum(fd);
+ region.client_pid = frame->root->pid;
+ region.owner = frame->root->lk_owner;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ allowed = pl_is_fop_allowed(pl_inode, &region, fd, GF_FOP_READ,
+ &can_block);
+ if (allowed == 1) {
+ if (pl_inode->mlock_enforced &&
+ pl_inode->track_fop_wind_count) {
+ pl_inode->fop_wind_count++;
+ }
+ goto unlock;
+ } else if (!can_block) {
+ op_errno = EAGAIN;
+ op_ret = -1;
+ goto unlock;
+ }
- rw->region = region;
+ rw = GF_MALLOC(sizeof(*rw), gf_locks_mt_pl_rw_req_t);
+ if (!rw) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unlock;
+ }
- list_add_tail (&rw->list, &pl_inode->rw_list);
- }
- unlock:
- pthread_mutex_unlock (&pl_inode->mutex);
- }
+ rw->stub = fop_readv_stub(frame, pl_readv_cont, fd, size, offset,
+ flags, xdata);
+ if (!rw->stub) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ GF_FREE(rw);
+ goto unlock;
+ }
+ rw->region = region;
- if (wind_needed) {
- STACK_WIND (frame, pl_readv_cbk,
- FIRST_CHILD (this), FIRST_CHILD (this)->fops->readv,
- fd, size, offset);
+ list_add_tail(&rw->list, &pl_inode->rw_list);
}
+ unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+
+ if (allowed == 1) {
+ STACK_WIND(frame, pl_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags,
+ xdata);
+ }
+unwind:
+ if (op_ret == -1)
+ PL_STACK_UNWIND(readv, xdata, frame, op_ret, op_errno, NULL, 0, NULL,
+ NULL, NULL);
- if (op_ret == -1)
- STACK_UNWIND_STRICT (readv, frame, -1, op_errno,
- NULL, 0, NULL, NULL);
-
- return 0;
+ return 0;
}
-
int
-pl_writev_cont (call_frame_t *frame, xlator_t *this, fd_t *fd,
- struct iovec *vector, int count, off_t offset,
- struct iobref *iobref)
+pl_writev_cont(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int count, off_t offset, uint32_t flags,
+ struct iobref *iobref, dict_t *xdata)
{
- STACK_WIND (frame, pl_writev_cbk,
- FIRST_CHILD (this), FIRST_CHILD (this)->fops->writev,
- fd, vector, count, offset, iobref);
+ pl_track_io_fop_count(frame->local, this, INCREMENT);
- return 0;
-}
+ STACK_WIND(frame, pl_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
+}
int
-pl_writev (call_frame_t *frame, xlator_t *this, fd_t *fd,
- struct iovec *vector, int32_t count, off_t offset,
- struct iobref *iobref)
-{
- posix_locks_private_t *priv = NULL;
- pl_inode_t *pl_inode = NULL;
- pl_rw_req_t *rw = NULL;
- posix_lock_t region = {.list = {0, }, };
- int op_ret = 0;
- int op_errno = 0;
- char wind_needed = 1;
-
-
- priv = this->private;
- pl_inode = pl_inode_get (this, fd->inode);
-
- if (priv->mandatory && pl_inode->mandatory) {
- region.fl_start = offset;
- region.fl_end = offset + iov_length (vector, count) - 1;
- region.transport = frame->root->trans;
- region.fd_num = fd_to_fdnum(fd);
- region.client_pid = frame->root->pid;
- region.owner = frame->root->lk_owner;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- wind_needed = __rw_allowable (pl_inode, &region,
- GF_FOP_WRITE);
- if (wind_needed)
- goto unlock;
-
- if (fd->flags & O_NONBLOCK) {
- gf_log (this->name, GF_LOG_TRACE,
- "returning EAGAIN because fd is "
- "O_NONBLOCK");
- op_errno = EAGAIN;
- op_ret = -1;
- goto unlock;
- }
-
- rw = GF_CALLOC (1, sizeof (*rw),
- gf_locks_mt_pl_rw_req_t);
- if (!rw) {
- op_errno = ENOMEM;
- op_ret = -1;
- goto unlock;
- }
-
- rw->stub = fop_writev_stub (frame, pl_writev_cont,
- fd, vector, count, offset,
- iobref);
- if (!rw->stub) {
- op_errno = ENOMEM;
- op_ret = -1;
- GF_FREE (rw);
- goto unlock;
- }
+pl_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
+ int32_t count, off_t offset, uint32_t flags, struct iobref *iobref,
+ dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+ pl_rw_req_t *rw = NULL;
+ posix_lock_t region = {
+ .list =
+ {
+ 0,
+ },
+ };
+ gf_boolean_t enabled = _gf_false;
+ gf_boolean_t can_block = _gf_true;
+ int op_ret = 0;
+ int op_errno = 0;
+ int allowed = 1;
+
+ GF_VALIDATE_OR_GOTO("locks", this, unwind);
+
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+
+ if (!frame->local) {
+ frame->local = mem_get0(this->local_pool);
+ local = frame->local;
+ local->inode = inode_ref(fd->inode);
+ local->fd = fd_ref(fd);
+ }
+
+ pl_inode = pl_inode_get(this, fd->inode, local);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (frame->root->pid < 0)
+ enabled = _gf_false;
+ else
+ enabled = pl_is_mandatory_locking_enabled(pl_inode);
+
+ if (enabled) {
+ region.fl_start = offset;
+ region.fl_end = offset + iov_length(vector, count) - 1;
+ region.client = frame->root->client;
+ region.fd_num = fd_to_fdnum(fd);
+ region.client_pid = frame->root->pid;
+ region.owner = frame->root->lk_owner;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ allowed = pl_is_fop_allowed(pl_inode, &region, fd, GF_FOP_WRITE,
+ &can_block);
+ if (allowed == 1) {
+ if (pl_inode->mlock_enforced &&
+ pl_inode->track_fop_wind_count) {
+ pl_inode->fop_wind_count++;
+ }
+ goto unlock;
+ } else if (!can_block) {
+ if (pl_inode->mlock_enforced) {
+ op_errno = EBUSY;
+ } else {
+ op_errno = EAGAIN;
+ }
- rw->region = region;
+ op_ret = -1;
+ goto unlock;
+ }
- list_add_tail (&rw->list, &pl_inode->rw_list);
- }
- unlock:
- pthread_mutex_unlock (&pl_inode->mutex);
- }
+ rw = GF_MALLOC(sizeof(*rw), gf_locks_mt_pl_rw_req_t);
+ if (!rw) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ goto unlock;
+ }
+ rw->stub = fop_writev_stub(frame, pl_writev_cont, fd, vector, count,
+ offset, flags, iobref, xdata);
+ if (!rw->stub) {
+ op_errno = ENOMEM;
+ op_ret = -1;
+ GF_FREE(rw);
+ goto unlock;
+ }
- if (wind_needed)
- STACK_WIND (frame, pl_writev_cbk,
- FIRST_CHILD (this), FIRST_CHILD (this)->fops->writev,
- fd, vector, count, offset, iobref);
+ rw->region = region;
- if (op_ret == -1)
- STACK_UNWIND_STRICT (writev, frame, -1, op_errno, NULL, NULL);
+ list_add_tail(&rw->list, &pl_inode->rw_list);
+ }
+ unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+
+ if (allowed == 1) {
+ STACK_WIND(frame, pl_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ }
+unwind:
+ if (op_ret == -1)
+ PL_STACK_UNWIND(writev, xdata, frame, op_ret, op_errno, NULL, NULL,
+ NULL);
- return 0;
+ return 0;
}
static int
-__fd_has_locks (pl_inode_t *pl_inode, fd_t *fd)
+__fd_has_locks(pl_inode_t *pl_inode, fd_t *fd)
{
- int found = 0;
- posix_lock_t *l = NULL;
+ posix_lock_t *l = NULL;
- list_for_each_entry (l, &pl_inode->ext_list, list) {
- if ((l->fd_num == fd_to_fdnum(fd))) {
- found = 1;
- break;
- }
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (l->fd_num == fd_to_fdnum(fd)) {
+ return 1;
}
+ }
- return found;
+ return 0;
}
static posix_lock_t *
-lock_dup (posix_lock_t *lock)
+lock_dup(posix_lock_t *lock)
{
- posix_lock_t *new_lock = NULL;
-
- new_lock = new_posix_lock (&lock->user_flock, lock->transport,
- lock->client_pid, lock->owner,
- (fd_t *)lock->fd_num);
- return new_lock;
+ int32_t op_errno = 0;
+ return new_posix_lock(&lock->user_flock, lock->client, lock->client_pid,
+ &lock->owner, (fd_t *)lock->fd_num, lock->lk_flags,
+ lock->blocking, &op_errno);
}
static int
-__dup_locks_to_fdctx (pl_inode_t *pl_inode, fd_t *fd,
- pl_fdctx_t *fdctx)
-{
- posix_lock_t *l = NULL;
- posix_lock_t *duplock = NULL;
- int ret = 0;
-
- list_for_each_entry (l, &pl_inode->ext_list, list) {
- if ((l->fd_num == fd_to_fdnum(fd))) {
- duplock = lock_dup (l);
- if (!duplock) {
- ret = -1;
- break;
- }
+__dup_locks_to_fdctx(pl_inode_t *pl_inode, fd_t *fd, pl_fdctx_t *fdctx)
+{
+ posix_lock_t *l = NULL;
+ posix_lock_t *duplock = NULL;
+ int ret = 0;
+
+ list_for_each_entry(l, &pl_inode->ext_list, list)
+ {
+ if (l->fd_num == fd_to_fdnum(fd)) {
+ duplock = lock_dup(l);
+ if (!duplock) {
+ ret = -1;
+ break;
+ }
- list_add_tail (&duplock->list, &fdctx->locks_list);
- }
+ list_add_tail(&duplock->list, &fdctx->locks_list);
}
+ }
- return ret;
+ return ret;
}
static int
-__copy_locks_to_fdctx (pl_inode_t *pl_inode, fd_t *fd,
- pl_fdctx_t *fdctx)
+__copy_locks_to_fdctx(pl_inode_t *pl_inode, fd_t *fd, pl_fdctx_t *fdctx)
{
- int ret = 0;
-
- ret = __dup_locks_to_fdctx (pl_inode, fd, fdctx);
- if (ret)
- goto out;
-
-out:
- return ret;
-
+ return __dup_locks_to_fdctx(pl_inode, fd, fdctx);
}
static void
-pl_mark_eol_lock (posix_lock_t *lock)
+pl_mark_eol_lock(posix_lock_t *lock)
{
- lock->user_flock.l_type = GF_LK_EOL;
- return;
+ lock->user_flock.l_type = GF_LK_EOL;
+ return;
}
static posix_lock_t *
-__get_next_fdctx_lock (pl_fdctx_t *fdctx)
+__get_next_fdctx_lock(pl_fdctx_t *fdctx)
{
- posix_lock_t *lock = NULL;
+ posix_lock_t *lock = NULL;
- GF_ASSERT (fdctx);
+ GF_ASSERT(fdctx);
- if (list_empty (&fdctx->locks_list)) {
- gf_log (THIS->name, GF_LOG_DEBUG,
- "fdctx lock list empty");
- goto out;
- }
+ if (list_empty(&fdctx->locks_list)) {
+ gf_log(THIS->name, GF_LOG_DEBUG, "fdctx lock list empty");
+ goto out;
+ }
- lock = list_entry (fdctx->locks_list.next, typeof (*lock),
- list);
+ lock = list_entry(fdctx->locks_list.next, typeof(*lock), list);
- GF_ASSERT (lock);
+ GF_ASSERT(lock);
- list_del_init (&lock->list);
+ list_del_init(&lock->list);
out:
- return lock;
+ return lock;
}
static int
-__set_next_lock_fd (pl_fdctx_t *fdctx, posix_lock_t *reqlock)
+__set_next_lock_fd(pl_fdctx_t *fdctx, posix_lock_t *reqlock)
{
- posix_lock_t *lock = NULL;
- int ret = 0;
+ posix_lock_t *lock = NULL;
+ int ret = 0;
- GF_ASSERT (fdctx);
+ GF_ASSERT(fdctx);
- lock = __get_next_fdctx_lock (fdctx);
- if (!lock) {
- gf_log (THIS->name, GF_LOG_DEBUG,
- "marking EOL in reqlock");
- pl_mark_eol_lock (reqlock);
- goto out;
- }
+ lock = __get_next_fdctx_lock(fdctx);
+ if (!lock) {
+ gf_log(THIS->name, GF_LOG_DEBUG, "marking EOL in reqlock");
+ pl_mark_eol_lock(reqlock);
+ goto out;
+ }
- reqlock->user_flock = lock->user_flock;
- reqlock->fl_start = lock->fl_start;
- reqlock->fl_type = lock->fl_type;
- reqlock->fl_end = lock->fl_end;
- reqlock->owner = lock->owner;
+ reqlock->user_flock = lock->user_flock;
+ reqlock->fl_start = lock->fl_start;
+ reqlock->fl_type = lock->fl_type;
+ reqlock->fl_end = lock->fl_end;
+ reqlock->owner = lock->owner;
out:
- if (lock)
- __destroy_lock (lock);
+ if (lock)
+ __destroy_lock(lock);
- return ret;
+ return ret;
}
static int
-pl_getlk_fd (xlator_t *this, pl_inode_t *pl_inode,
- fd_t *fd, posix_lock_t *reqlock)
+pl_getlk_fd(xlator_t *this, pl_inode_t *pl_inode, fd_t *fd,
+ posix_lock_t *reqlock)
{
- uint64_t tmp = 0;
- pl_fdctx_t *fdctx = NULL;
- int ret = 0;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- if (!__fd_has_locks (pl_inode, fd)) {
- gf_log (this->name, GF_LOG_DEBUG,
- "fd=%p has no active locks", fd);
- ret = 0;
- goto unlock;
- }
-
- gf_log (this->name, GF_LOG_DEBUG,
- "There are active locks on fd");
-
- ret = fd_ctx_get (fd, this, &tmp);
- fdctx = (pl_fdctx_t *)(long) tmp;
-
- if (list_empty (&fdctx->locks_list)) {
- gf_log (this->name, GF_LOG_TRACE,
- "no fdctx -> copying all locks on fd");
+ uint64_t tmp = 0;
+ pl_fdctx_t *fdctx = NULL;
+ int ret = 0;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (!__fd_has_locks(pl_inode, fd)) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_log(this->name, GF_LOG_DEBUG, "fd=%p has no active locks", fd);
+ ret = 0;
+ goto out;
+ }
- ret = __copy_locks_to_fdctx (pl_inode, fd, fdctx);
- if (ret) {
- goto unlock;
- }
+ gf_log(this->name, GF_LOG_DEBUG, "There are active locks on fd");
- ret = __set_next_lock_fd (fdctx, reqlock);
+ ret = fd_ctx_get(fd, this, &tmp);
+ fdctx = (pl_fdctx_t *)(long)tmp;
- } else {
- gf_log (this->name, GF_LOG_TRACE,
- "fdctx present -> returning the next lock");
- ret = __set_next_lock_fd (fdctx, reqlock);
- if (ret) {
- gf_log (this->name, GF_LOG_DEBUG,
- "could not get next lock of fd");
- goto unlock;
- }
- }
+ if (list_empty(&fdctx->locks_list)) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "no fdctx -> copying all locks on fd");
+
+ ret = __copy_locks_to_fdctx(pl_inode, fd, fdctx);
+ if (ret) {
+ goto unlock;
+ }
+
+ ret = __set_next_lock_fd(fdctx, reqlock);
+
+ } else {
+ gf_log(this->name, GF_LOG_TRACE,
+ "fdctx present -> returning the next lock");
+ ret = __set_next_lock_fd(fdctx, reqlock);
+ if (ret) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_log(this->name, GF_LOG_DEBUG,
+ "could not get next lock of fd");
+ goto out;
+ }
}
+ }
unlock:
- pthread_mutex_unlock (&pl_inode->mutex);
- return ret;
-
+ pthread_mutex_unlock(&pl_inode->mutex);
+out:
+ return ret;
}
int
-pl_lk (call_frame_t *frame, xlator_t *this,
- fd_t *fd, int32_t cmd, struct gf_flock *flock)
-{
- void *transport = NULL;
- pid_t client_pid = 0;
- uint64_t owner = 0;
- pl_inode_t *pl_inode = NULL;
- int op_ret = 0;
- int op_errno = 0;
- int can_block = 0;
- posix_lock_t *reqlock = NULL;
- posix_lock_t *conf = NULL;
- int ret = 0;
-
- transport = frame->root->trans;
- client_pid = frame->root->pid;
- owner = frame->root->lk_owner;
-
- if ((flock->l_start < 0) || (flock->l_len < 0)) {
- op_ret = -1;
- op_errno = EINVAL;
- goto unwind;
- }
+pl_metalock_is_active(pl_inode_t *pl_inode)
+{
+ if (list_empty(&pl_inode->metalk_list))
+ return 0;
+ else
+ return 1;
+}
- pl_inode = pl_inode_get (this, fd->inode);
- if (!pl_inode) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
+void
+__pl_queue_lock(pl_inode_t *pl_inode, posix_lock_t *reqlock)
+{
+ list_add_tail(&reqlock->list, &pl_inode->queued_locks);
+}
- reqlock = new_posix_lock (flock, transport, client_pid,
- owner, fd);
+int
+pl_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ pl_inode_t *pl_inode = NULL;
+ int op_ret = 0;
+ int op_errno = 0;
+ int can_block = 0;
+ posix_lock_t *reqlock = NULL;
+ posix_lock_t *conf = NULL;
+ uint32_t lk_flags = 0;
+ posix_locks_private_t *priv = this->private;
+ pl_local_t *local = NULL;
+ short lock_type = 0;
+
+ int ret = dict_get_uint32(xdata, GF_LOCK_MODE, &lk_flags);
+ if (ret == 0) {
+ if (priv->mandatory_mode == MLK_NONE)
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Lock flags received "
+ "in a non-mandatory locking environment, "
+ "continuing");
+ else
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Lock flags received, "
+ "continuing");
+ }
+
+ if ((flock->l_start < 0) || ((flock->l_start + flock->l_len) < 0)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto unwind;
+ }
+
+ /* As per 'man 3 fcntl', the value of l_len may be
+ * negative. In such cases, lock request should be
+ * considered for the range starting at 'l_start+l_len'
+ * and ending at 'l_start-1'. Update the fields accordingly.
+ */
+ if (flock->l_len < 0) {
+ flock->l_start += flock->l_len;
+ flock->l_len = labs(flock->l_len);
+ }
+
+ local = mem_get0(this->local_pool);
+ if (!local) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ } else {
+ frame->local = local;
+ local->fd = fd_ref(fd);
+ }
- if (!reqlock) {
- op_ret = -1;
- op_errno = ENOMEM;
- goto unwind;
- }
+ pl_inode = pl_inode_get(this, fd->inode, local);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
- pl_trace_in (this, frame, fd, NULL, cmd, flock, NULL);
+ reqlock = new_posix_lock(flock, frame->root->client, frame->root->pid,
+ &frame->root->lk_owner, fd, lk_flags, can_block,
+ &op_errno);
- switch (cmd) {
+ if (!reqlock) {
+ op_ret = -1;
+ goto unwind;
+ }
+ pl_trace_in(this, frame, fd, NULL, cmd, flock, NULL);
+
+ switch (cmd) {
case F_RESLK_LCKW:
- can_block = 1;
+ can_block = 1;
- /* fall through */
+ /* fall through */
case F_RESLK_LCK:
- memcpy (&reqlock->user_flock, flock, sizeof (struct gf_flock));
- reqlock->frame = frame;
- reqlock->this = this;
+ reqlock->frame = frame;
+ reqlock->this = this;
- ret = pl_reserve_setlk (this, pl_inode, reqlock,
- can_block);
- if (ret < 0) {
- if (can_block)
- goto out;
+ ret = pl_reserve_setlk(this, pl_inode, reqlock, can_block);
+ if (ret < 0) {
+ if (can_block)
+ goto out;
- op_ret = -1;
- op_errno = -ret;
- __destroy_lock (reqlock);
- goto unwind;
- }
- /* Finally a getlk and return the call */
- conf = pl_getlk (pl_inode, reqlock);
- if (conf)
- posix_lock_to_flock (conf, flock);
- break;
+ op_ret = -1;
+ op_errno = -ret;
+ __destroy_lock(reqlock);
+ goto unwind;
+ }
+ /* Finally a getlk and return the call */
+ conf = pl_getlk(pl_inode, reqlock);
+ if (conf)
+ posix_lock_to_flock(conf, flock);
+ break;
case F_RESLK_UNLCK:
- reqlock->frame = frame;
- reqlock->this = this;
- ret = pl_reserve_unlock (this, pl_inode, reqlock);
- if (ret < 0) {
- op_ret = -1;
- op_errno = -ret;
- }
- __destroy_lock (reqlock);
- goto unwind;
+ reqlock->frame = frame;
+ reqlock->this = this;
+ ret = pl_reserve_unlock(this, pl_inode, reqlock);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = -ret;
+ }
+ __destroy_lock(reqlock);
+ goto unwind;
- break;
+ break;
case F_GETLK_FD:
- reqlock->frame = frame;
- reqlock->this = this;
- ret = pl_verify_reservelk (this, pl_inode, reqlock, can_block);
- GF_ASSERT (ret >= 0);
-
- ret = pl_getlk_fd (this, pl_inode, fd, reqlock);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- "getting locks on fd failed");
- op_ret = -1;
- op_errno = ENOLCK;
- goto unwind;
- }
+ reqlock->frame = frame;
+ reqlock->this = this;
+ ret = pl_verify_reservelk(this, pl_inode, reqlock, can_block);
+ GF_ASSERT(ret >= 0);
+
+ ret = pl_getlk_fd(this, pl_inode, fd, reqlock);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_DEBUG, "getting locks on fd failed");
+ op_ret = -1;
+ op_errno = ENOLCK;
+ goto unwind;
+ }
- gf_log (this->name, GF_LOG_TRACE,
- "Replying with a lock on fd for healing");
+ gf_log(this->name, GF_LOG_TRACE,
+ "Replying with a lock on fd for healing");
- posix_lock_to_flock (reqlock, flock);
- __destroy_lock (reqlock);
+ posix_lock_to_flock(reqlock, flock);
+ __destroy_lock(reqlock);
- break;
+ break;
#if F_GETLK != F_GETLK64
case F_GETLK64:
#endif
case F_GETLK:
- conf = pl_getlk (pl_inode, reqlock);
- posix_lock_to_flock (conf, flock);
- __destroy_lock (reqlock);
+ conf = pl_getlk(pl_inode, reqlock);
+ posix_lock_to_flock(conf, flock);
+ __destroy_lock(reqlock);
- break;
+ break;
#if F_SETLKW != F_SETLKW64
case F_SETLKW64:
#endif
case F_SETLKW:
- can_block = 1;
- reqlock->frame = frame;
- reqlock->this = this;
-
- /* fall through */
+ can_block = 1;
+ reqlock->frame = frame;
+ reqlock->this = this;
+ reqlock->blocking = can_block;
+ /* fall through */
#if F_SETLK != F_SETLK64
case F_SETLK64:
#endif
case F_SETLK:
- memcpy (&reqlock->user_flock, flock, sizeof (struct gf_flock));
- ret = pl_verify_reservelk (this, pl_inode, reqlock, can_block);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_TRACE,
- "Lock blocked due to conflicting reserve lock");
- goto out;
+ reqlock->frame = frame;
+ reqlock->this = this;
+ lock_type = flock->l_type;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (pl_inode->migrated) {
+ op_errno = EREMOTE;
+ pthread_mutex_unlock(&pl_inode->mutex);
+ STACK_UNWIND_STRICT(lk, frame, -1, op_errno, flock, xdata);
+
+ __destroy_lock(reqlock);
+ goto out;
}
- ret = pl_setlk (this, pl_inode, reqlock,
- can_block);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ ret = pl_verify_reservelk(this, pl_inode, reqlock, can_block);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "Lock blocked due to conflicting reserve lock");
+ goto out;
+ }
+
+ if (reqlock->fl_type != F_UNLCK && pl_inode->mlock_enforced) {
+ ret = pl_lock_preempt(pl_inode, reqlock);
if (ret == -1) {
- if ((can_block) && (F_UNLCK != reqlock->fl_type)) {
- pl_trace_block (this, frame, fd, NULL, cmd, flock, NULL);
- goto out;
- }
- gf_log (this->name, GF_LOG_DEBUG, "returning EAGAIN");
- op_ret = -1;
- op_errno = EAGAIN;
- __destroy_lock (reqlock);
+ gf_log(this->name, GF_LOG_ERROR, "lock preempt failed");
+ op_ret = -1;
+ op_errno = EAGAIN;
+ __destroy_lock(reqlock);
+ goto out;
}
- }
+
+ pl_trace_block(this, frame, fd, NULL, cmd, flock, NULL);
+ goto unwind;
+ }
+
+ ret = pl_setlk(this, pl_inode, reqlock, can_block);
+ if (ret == -1) {
+ if ((can_block) && (F_UNLCK != lock_type)) {
+ goto out;
+ }
+ gf_log(this->name, GF_LOG_DEBUG, "returning EAGAIN");
+ op_ret = -1;
+ op_errno = EAGAIN;
+ __destroy_lock(reqlock);
+ } else if (ret == -2) {
+ goto out;
+ } else if ((0 == ret) && (F_UNLCK == flock->l_type)) {
+ /* For NLM's last "unlock on fd" detection */
+ if (pl_locks_by_fd(pl_inode, fd))
+ flock->l_type = F_RDLCK;
+ else
+ flock->l_type = F_UNLCK;
+ }
+ }
unwind:
- pl_trace_out (this, frame, fd, NULL, cmd, flock, op_ret, op_errno, NULL);
- pl_update_refkeeper (this, fd->inode);
- STACK_UNWIND_STRICT (lk, frame, op_ret, op_errno, flock);
+ pl_trace_out(this, frame, fd, NULL, cmd, flock, op_ret, op_errno, NULL);
+ pl_update_refkeeper(this, fd->inode);
+
+ PL_STACK_UNWIND(lk, xdata, frame, op_ret, op_errno, flock, xdata);
out:
- return 0;
+ return 0;
}
-
/* TODO: this function just logs, no action required?? */
int
-pl_forget (xlator_t *this,
- inode_t *inode)
+pl_forget(xlator_t *this, inode_t *inode)
{
- pl_inode_t *pl_inode = NULL;
+ pl_inode_t *pl_inode = NULL;
- posix_lock_t *ext_tmp = NULL;
- posix_lock_t *ext_l = NULL;
- struct list_head posixlks_released;
+ posix_lock_t *ext_tmp = NULL;
+ posix_lock_t *ext_l = NULL;
+ struct list_head posixlks_released;
- pl_inode_lock_t *ino_tmp = NULL;
- pl_inode_lock_t *ino_l = NULL;
- struct list_head inodelks_released;
+ pl_inode_lock_t *ino_tmp = NULL;
+ pl_inode_lock_t *ino_l = NULL;
+ struct list_head inodelks_released;
- pl_rw_req_t *rw_tmp = NULL;
- pl_rw_req_t *rw_req = NULL;
+ pl_rw_req_t *rw_tmp = NULL;
+ pl_rw_req_t *rw_req = NULL;
- pl_entry_lock_t *entry_tmp = NULL;
- pl_entry_lock_t *entry_l = NULL;
- struct list_head entrylks_released;
+ pl_entry_lock_t *entry_tmp = NULL;
+ pl_entry_lock_t *entry_l = NULL;
+ struct list_head entrylks_released;
- pl_dom_list_t *dom = NULL;
- pl_dom_list_t *dom_tmp = NULL;
+ pl_dom_list_t *dom = NULL;
+ pl_dom_list_t *dom_tmp = NULL;
- INIT_LIST_HEAD (&posixlks_released);
- INIT_LIST_HEAD (&inodelks_released);
- INIT_LIST_HEAD (&entrylks_released);
+ INIT_LIST_HEAD(&posixlks_released);
+ INIT_LIST_HEAD(&inodelks_released);
+ INIT_LIST_HEAD(&entrylks_released);
- pl_inode = pl_inode_get (this, inode);
+ pl_inode = pl_inode_get(this, inode, NULL);
+ if (!pl_inode)
+ return 0;
- pthread_mutex_lock (&pl_inode->mutex);
- {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (!list_empty(&pl_inode->rw_list)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Pending R/W requests found, releasing.");
+
+ list_for_each_entry_safe(rw_req, rw_tmp, &pl_inode->rw_list, list)
+ {
+ list_del(&rw_req->list);
+ call_stub_destroy(rw_req->stub);
+ GF_FREE(rw_req);
+ }
+ }
- if (!list_empty (&pl_inode->rw_list)) {
- gf_log (this->name, GF_LOG_WARNING,
- "Pending R/W requests found, releasing.");
+ if (!list_empty(&pl_inode->ext_list)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Pending fcntl locks found, releasing.");
- list_for_each_entry_safe (rw_req, rw_tmp, &pl_inode->rw_list,
- list) {
+ list_for_each_entry_safe(ext_l, ext_tmp, &pl_inode->ext_list, list)
+ {
+ __delete_lock(ext_l);
+ if (ext_l->blocked) {
+ list_add_tail(&ext_l->list, &posixlks_released);
+ continue;
+ }
+ __destroy_lock(ext_l);
+ }
+ }
- list_del (&rw_req->list);
- GF_FREE (rw_req);
- }
+ list_for_each_entry_safe(dom, dom_tmp, &pl_inode->dom_list, inode_list)
+ {
+ if (!list_empty(&dom->inodelk_list)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Pending inode locks found, releasing.");
+
+ list_for_each_entry_safe(ino_l, ino_tmp, &dom->inodelk_list,
+ list)
+ {
+ __delete_inode_lock(ino_l);
+ __pl_inodelk_unref(ino_l);
}
- if (!list_empty (&pl_inode->ext_list)) {
- gf_log (this->name, GF_LOG_WARNING,
- "Pending fcntl locks found, releasing.");
+ list_splice_init(&dom->blocked_inodelks, &inodelks_released);
+ }
+ if (!list_empty(&dom->entrylk_list)) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Pending entry locks found, releasing.");
- list_for_each_entry_safe (ext_l, ext_tmp, &pl_inode->ext_list,
- list) {
+ list_for_each_entry_safe(entry_l, entry_tmp, &dom->entrylk_list,
+ domain_list)
+ {
+ list_del_init(&entry_l->domain_list);
- __delete_lock (pl_inode, ext_l);
- if (ext_l->blocked) {
- list_add_tail (&ext_l->list, &posixlks_released);
- continue;
- }
- __destroy_lock (ext_l);
- }
+ GF_FREE((char *)entry_l->basename);
+ GF_FREE(entry_l->connection_id);
+ GF_FREE(entry_l);
}
+ list_splice_init(&dom->blocked_entrylks, &entrylks_released);
+ }
+
+ list_del(&dom->inode_list);
+ gf_log("posix-locks", GF_LOG_TRACE, " Cleaning up domain: %s",
+ dom->domain);
+ GF_FREE((char *)(dom->domain));
+ GF_FREE(dom);
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (!list_empty(&posixlks_released)) {
+ list_for_each_entry_safe(ext_l, ext_tmp, &posixlks_released, list)
+ {
+ STACK_UNWIND_STRICT(lk, ext_l->frame, -1, 0, &ext_l->user_flock,
+ NULL);
+ __destroy_lock(ext_l);
+ }
+ }
- list_for_each_entry_safe (dom, dom_tmp, &pl_inode->dom_list, inode_list) {
+ if (!list_empty(&inodelks_released)) {
+ list_for_each_entry_safe(ino_l, ino_tmp, &inodelks_released,
+ blocked_locks)
+ {
+ STACK_UNWIND_STRICT(inodelk, ino_l->frame, -1, 0, NULL);
+ __pl_inodelk_unref(ino_l);
+ }
+ }
- if (!list_empty (&dom->inodelk_list)) {
- gf_log (this->name, GF_LOG_WARNING,
- "Pending inode locks found, releasing.");
+ if (!list_empty(&entrylks_released)) {
+ list_for_each_entry_safe(entry_l, entry_tmp, &entrylks_released,
+ blocked_locks)
+ {
+ STACK_UNWIND_STRICT(entrylk, entry_l->frame, -1, 0, NULL);
+ GF_FREE((char *)entry_l->basename);
+ GF_FREE(entry_l->connection_id);
+ GF_FREE(entry_l);
+ }
+ }
- list_for_each_entry_safe (ino_l, ino_tmp, &dom->inodelk_list, list) {
- __delete_inode_lock (ino_l);
- __destroy_inode_lock (ino_l);
- }
+ pthread_mutex_destroy(&pl_inode->mutex);
- list_splice_init (&dom->blocked_inodelks, &inodelks_released);
+ GF_FREE(pl_inode);
+ return 0;
+}
- }
- if (!list_empty (&dom->entrylk_list)) {
- gf_log (this->name, GF_LOG_WARNING,
- "Pending entry locks found, releasing.");
+int
+pl_release(xlator_t *this, fd_t *fd)
+{
+ pl_inode_t *pl_inode = NULL;
+ uint64_t tmp_pl_inode = 0;
+ int ret = -1;
+ uint64_t tmp = 0;
+ pl_fdctx_t *fdctx = NULL;
- list_for_each_entry_safe (entry_l, entry_tmp, &dom->entrylk_list, domain_list) {
- list_del_init (&entry_l->domain_list);
+ if (fd == NULL) {
+ goto out;
+ }
- if (entry_l->basename)
- GF_FREE ((char *)entry_l->basename);
- GF_FREE (entry_l);
- }
+ ret = inode_ctx_get(fd->inode, this, &tmp_pl_inode);
+ if (ret != 0)
+ goto clean;
- list_splice_init (&dom->blocked_entrylks, &entrylks_released);
- }
+ pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
- list_del (&dom->inode_list);
- gf_log ("posix-locks", GF_LOG_TRACE,
- " Cleaning up domain: %s", dom->domain);
- GF_FREE ((char *)(dom->domain));
- GF_FREE (dom);
- }
+ pl_trace_release(this, fd);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+ gf_log(this->name, GF_LOG_TRACE, "Releasing all locks with fd %p", fd);
- list_for_each_entry_safe (ext_l, ext_tmp, &posixlks_released, list) {
+ delete_locks_of_fd(this, pl_inode, fd);
+ pl_update_refkeeper(this, fd->inode);
- STACK_UNWIND_STRICT (lk, ext_l->frame, -1, 0, &ext_l->user_flock);
- __destroy_lock (ext_l);
- }
+clean:
+ ret = fd_ctx_del(fd, this, &tmp);
+ if (ret) {
+ gf_log(this->name, GF_LOG_DEBUG, "Could not get fdctx");
+ goto out;
+ }
- list_for_each_entry_safe (ino_l, ino_tmp, &inodelks_released, blocked_locks) {
+ fdctx = (pl_fdctx_t *)(long)tmp;
- STACK_UNWIND_STRICT (inodelk, ino_l->frame, -1, 0);
- __destroy_inode_lock (ino_l);
- }
+ GF_FREE(fdctx);
+out:
+ return ret;
+}
- list_for_each_entry_safe (entry_l, entry_tmp, &entrylks_released, blocked_locks) {
+int
+pl_releasedir(xlator_t *this, fd_t *fd)
+{
+ int ret = -1;
+ uint64_t tmp = 0;
+ pl_fdctx_t *fdctx = NULL;
- STACK_UNWIND_STRICT (entrylk, entry_l->frame, -1, 0);
- if (entry_l->basename)
- GF_FREE ((char *)entry_l->basename);
- GF_FREE (entry_l);
+ if (fd == NULL) {
+ goto out;
+ }
- }
+ ret = fd_ctx_del(fd, this, &tmp);
+ if (ret) {
+ gf_log(this->name, GF_LOG_DEBUG, "Could not get fdctx");
+ goto out;
+ }
- GF_FREE (pl_inode);
+ fdctx = (pl_fdctx_t *)(long)tmp;
- return 0;
+ GF_FREE(fdctx);
+out:
+ return ret;
}
-int
-pl_release (xlator_t *this, fd_t *fd)
+static int32_t
+pl_request_link_count(dict_t **pxdata)
{
- pl_inode_t *pl_inode = NULL;
- uint64_t tmp_pl_inode = 0;
- int ret = -1;
- uint64_t tmp = 0;
- pl_fdctx_t *fdctx = NULL;
+ dict_t *xdata;
- if (fd == NULL) {
- goto out;
+ xdata = *pxdata;
+ if (xdata == NULL) {
+ xdata = dict_new();
+ if (xdata == NULL) {
+ return ENOMEM;
}
+ } else {
+ dict_ref(xdata);
+ }
- ret = inode_ctx_get (fd->inode, this, &tmp_pl_inode);
- if (ret != 0)
- goto out;
+ if (dict_set_uint32(xdata, GET_LINK_COUNT, 0) != 0) {
+ dict_unref(xdata);
+ return ENOMEM;
+ }
- pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
+ *pxdata = xdata;
- pl_trace_release (this, fd);
+ return 0;
+}
- gf_log (this->name, GF_LOG_TRACE,
- "Releasing all locks with fd %p", fd);
+static int32_t
+pl_check_link_count(dict_t *xdata)
+{
+ int32_t count;
- delete_locks_of_fd (this, pl_inode, fd);
- pl_update_refkeeper (this, fd->inode);
+ /* In case we are unable to read the link count from xdata, we take a
+ * conservative approach and return -2, which will prevent the inode from
+ * being considered deleted. In fact it will cause link tracking for this
+ * inode to be disabled completely to avoid races. */
- ret = fd_ctx_del (fd, this, &tmp);
- if (ret) {
- gf_log (this->name, GF_LOG_DEBUG,
- "Could not get fdctx");
- goto out;
- }
+ if (xdata == NULL) {
+ return -2;
+ }
- fdctx = (pl_fdctx_t *)(long)tmp;
+ if (dict_get_int32(xdata, GET_LINK_COUNT, &count) != 0) {
+ return -2;
+ }
- GF_FREE (fdctx);
-out:
- return ret;
+ return count;
}
-int
-pl_releasedir (xlator_t *this, fd_t *fd)
+int32_t
+pl_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, inode_t *inode, struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
{
- int ret = -1;
- uint64_t tmp = 0;
- pl_fdctx_t *fdctx = NULL;
-
- if (fd == NULL) {
- goto out;
+ pl_inode_t *pl_inode;
+
+ if (op_ret >= 0) {
+ pl_inode = pl_inode_get(this, inode, NULL);
+ if (pl_inode == NULL) {
+ PL_STACK_UNWIND(lookup, xdata, frame, -1, ENOMEM, NULL, NULL, NULL,
+ NULL);
+ return 0;
}
- ret = fd_ctx_del (fd, this, &tmp);
- if (ret) {
- gf_log (this->name, GF_LOG_DEBUG,
- "Could not get fdctx");
- goto out;
+ pthread_mutex_lock(&pl_inode->mutex);
+
+ /* We only update the link count if we previously didn't know it.
+ * Doing it always can lead to races since lookup is not executed
+ * atomically most of the times. */
+ if (pl_inode->links == -2) {
+ pl_inode->links = pl_check_link_count(xdata);
+ if (buf->ia_type == IA_IFDIR) {
+ /* Directories have at least 2 links. To avoid special handling
+ * for directories, we simply decrement the value here to make
+ * them equivalent to regular files. */
+ pl_inode->links--;
+ }
}
- fdctx = (pl_fdctx_t *)(long)tmp;
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
- GF_FREE (fdctx);
-out:
- return ret;
+ PL_STACK_UNWIND(lookup, xdata, frame, op_ret, op_errno, inode, buf, xdata,
+ postparent);
+ return 0;
}
-static int32_t
-__get_posixlk_count (xlator_t *this, pl_inode_t *pl_inode)
+int32_t
+pl_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- posix_lock_t *lock = NULL;
- int32_t count = 0;
+ int32_t error;
+
+ error = pl_request_link_count(&xdata);
+ if (error == 0) {
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xdata);
+ dict_unref(xdata);
+ } else {
+ STACK_UNWIND_STRICT(lookup, frame, -1, error, NULL, NULL, NULL, NULL);
+ }
+ return 0;
+}
- list_for_each_entry (lock, &pl_inode->ext_list, list) {
+int32_t
+pl_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, dict_t *xdata)
+{
+ PL_STACK_UNWIND(fstat, xdata, frame, op_ret, op_errno, buf, xdata);
+ return 0;
+}
- gf_log (this->name, GF_LOG_DEBUG,
- " XATTR DEBUG"
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" state: %s",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len,
- lock->blocked == 1 ? "Blocked" : "Active");
+int32_t
+pl_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ return 0;
+}
- count++;
- }
+int
+pl_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, gf_dirent_t *entries, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ gf_dirent_t *entry = NULL;
+
+ if (op_ret <= 0)
+ goto unwind;
+
+ local = frame->local;
+ if (!local)
+ goto unwind;
+
+ list_for_each_entry(entry, &entries->list, list)
+ {
+ pl_set_xdata_response(this, local, local->fd->inode, entry->inode,
+ entry->d_name, entry->dict, 0);
+ }
+
+unwind:
+ PL_STACK_UNWIND(readdirp, xdata, frame, op_ret, op_errno, entries, xdata);
- return count;
+ return 0;
}
-int32_t
-get_posixlk_count (xlator_t *this, inode_t *inode)
+int
+pl_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *xdata)
{
- pl_inode_t *pl_inode = NULL;
- uint64_t tmp_pl_inode = 0;
- int ret = 0;
- int32_t count = 0;
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_readdirp_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, offset, xdata);
- ret = inode_ctx_get (inode, this, &tmp_pl_inode);
- if (ret != 0) {
- goto out;
- }
+ return 0;
+}
+
+lock_migration_info_t *
+gf_mig_info_for_lock(posix_lock_t *lock)
+{
+ lock_migration_info_t *new = GF_MALLOC(sizeof(lock_migration_info_t),
+ gf_common_mt_lock_mig);
+ if (new == NULL) {
+ goto out;
+ }
- pl_inode = (pl_inode_t *)(long) tmp_pl_inode;
+ INIT_LIST_HEAD(&new->list);
- pthread_mutex_lock (&pl_inode->mutex);
+ posix_lock_to_flock(lock, &new->flock);
+
+ new->lk_flags = lock->lk_flags;
+
+ new->client_uid = gf_strdup(lock->client_uid);
+
+out:
+ return new;
+}
+
+int
+pl_fill_active_locks(pl_inode_t *pl_inode, lock_migration_info_t *lmi)
+{
+ posix_lock_t *temp = NULL;
+ lock_migration_info_t *newlock = NULL;
+ int count = 0;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (list_empty(&pl_inode->ext_list)) {
+ count = 0;
+ goto unlock;
+ }
+
+ list_for_each_entry(temp, &pl_inode->ext_list, list)
{
- count =__get_posixlk_count (this, pl_inode);
+ if (temp->blocked)
+ continue;
+
+ newlock = gf_mig_info_for_lock(temp);
+ if (!newlock) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, 0, "lock_dup failed");
+ count = -1;
+ goto out;
+ }
+
+ list_add_tail(&newlock->list, &lmi->list);
+ count++;
}
- pthread_mutex_unlock (&pl_inode->mutex);
+ }
+unlock:
+ pthread_mutex_unlock(&pl_inode->mutex);
out:
- return count;
+ return count;
}
-void
-pl_entrylk_xattr_fill (xlator_t *this, inode_t *inode,
- dict_t *dict)
+/* This function reads only active locks */
+static int
+pl_getactivelk(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- int32_t count = 0;
- int ret = -1;
+ pl_inode_t *pl_inode = NULL;
+ lock_migration_info_t locks;
+ int op_ret = 0;
+ int op_errno = 0;
+ int count = 0;
- count = get_entrylk_count (this, inode);
- ret = dict_set_int32 (dict, GLUSTERFS_ENTRYLK_COUNT, count);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- " dict_set failed on key %s", GLUSTERFS_ENTRYLK_COUNT);
- }
+ INIT_LIST_HEAD(&locks.list);
+ pl_inode = pl_inode_get(this, loc->inode, NULL);
+ if (!pl_inode) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "pl_inode_get failed");
+
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ count = pl_fill_active_locks(pl_inode, &locks);
+
+ op_ret = count;
+
+out:
+ STACK_UNWIND_STRICT(getactivelk, frame, op_ret, op_errno, &locks, NULL);
+
+ gf_free_mig_locks(&locks);
+
+ return 0;
}
void
-pl_inodelk_xattr_fill (xlator_t *this, inode_t *inode,
- dict_t *dict)
+pl_metalk_unref(pl_meta_lock_t *lock)
{
- int32_t count = 0;
- int ret = -1;
+ lock->ref--;
+ if (!lock->ref) {
+ GF_FREE(lock->client_uid);
+ GF_FREE(lock);
+ }
+}
- count = get_inodelk_count (this, inode);
- ret = dict_set_int32 (dict, GLUSTERFS_INODELK_COUNT, count);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- " dict_set failed on key %s", GLUSTERFS_INODELK_COUNT);
- }
+void
+__pl_metalk_ref(pl_meta_lock_t *lock)
+{
+ lock->ref++;
+}
+pl_meta_lock_t *
+new_meta_lock(call_frame_t *frame, xlator_t *this)
+{
+ pl_meta_lock_t *lock = GF_CALLOC(1, sizeof(*lock),
+ gf_locks_mt_pl_meta_lock_t);
+
+ if (!lock) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "mem allocation"
+ " failed for meta lock");
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&lock->list);
+ INIT_LIST_HEAD(&lock->client_list);
+
+ lock->client_uid = gf_strdup(frame->root->client->client_uid);
+ if (!lock->client_uid) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "mem allocation"
+ " failed for client_uid");
+ GF_FREE(lock);
+ lock = NULL;
+ goto out;
+ }
+
+ __pl_metalk_ref(lock);
+out:
+ return lock;
}
-void
-pl_posixlk_xattr_fill (xlator_t *this, inode_t *inode,
- dict_t *dict)
+int
+pl_insert_metalk(pl_inode_t *pl_inode, pl_ctx_t *ctx, pl_meta_lock_t *lock)
{
- int32_t count = 0;
- int ret = -1;
+ int ret = 0;
- count = get_posixlk_count (this, inode);
- ret = dict_set_int32 (dict, GLUSTERFS_POSIXLK_COUNT, count);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_DEBUG,
- " dict_set failed on key %s", GLUSTERFS_POSIXLK_COUNT);
+ if (!pl_inode || !ctx || !lock) {
+ gf_msg(THIS->name, GF_LOG_INFO, 0, 0, "NULL parameter");
+ ret = -1;
+ goto out;
+ }
+
+ lock->pl_inode = pl_inode;
+
+ /* refer function pl_inode_setlk for more info for this ref.
+ * This should be unrefed on meta-unlock triggered by rebalance or
+ * in cleanup with client disconnect*/
+ /*TODO: unref this in cleanup code for disconnect and meta-unlock*/
+ pl_inode->inode = inode_ref(pl_inode->inode);
+
+ /* NOTE:In case of a client-server disconnect we need to cleanup metalk.
+ * Hence, adding the metalk to pl_ctx_t as well. The mutex lock order
+ * should always be on ctx and then on pl_inode*/
+
+ pthread_mutex_lock(&ctx->lock);
+ {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ list_add_tail(&lock->list, &pl_inode->metalk_list);
}
+ pthread_mutex_unlock(&pl_inode->mutex);
+ list_add_tail(&lock->client_list, &ctx->metalk_list);
+ }
+ pthread_mutex_unlock(&ctx->lock);
+
+out:
+ return ret;
}
int32_t
-pl_lookup_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- dict_t *dict,
- struct iatt *postparent)
+pl_metalk(call_frame_t *frame, xlator_t *this, inode_t *inode)
{
- pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+ int ret = 0;
+ pl_meta_lock_t *reqlk = NULL;
+ pl_ctx_t *ctx = NULL;
+
+ pl_inode = pl_inode_get(this, inode, NULL);
+ if (!pl_inode) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "pl_inode mem allocation failedd");
+
+ ret = -1;
+ goto out;
+ }
+
+ /* Non rebalance process trying to do metalock */
+ if (frame->root->pid != GF_CLIENT_PID_DEFRAG) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Note: In the current scheme of glusterfs where lock migration is
+ * experimental, (ideally) the rebalance process which is migrating
+ * the file should request for a metalock. Hence, the metalock count
+ * should not be more than one for an inode. In future, if there is a
+ * need for meta-lock from other clients, the following block can be
+ * removed.
+ *
+ * Since pl_metalk is called as part of setxattr operation, any client
+ * process(non-rebalance) residing outside trusted network can exhaust
+ * memory of the server node by issuing setxattr repetitively on the
+ * metalock key. The following code makes sure that more than
+ * one metalock cannot be granted on an inode*/
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (pl_metalock_is_active(pl_inode)) {
+ ret = -1;
+ }
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (ret == -1) {
+ gf_msg(this->name, GF_LOG_WARNING, EINVAL, 0,
+ "More than one meta-lock cannot be granted on"
+ " the inode");
+ goto out;
+ }
+
+ if (frame->root->client) {
+ ctx = pl_ctx_get(frame->root->client, this);
+ if (!ctx) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "pl_ctx_get failed");
+
+ ret = -1;
+ goto out;
+ }
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "frame-root-client "
+ "is NULL");
+
+ ret = -1;
+ goto out;
+ }
+
+ reqlk = new_meta_lock(frame, this);
+ if (!reqlk) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = pl_insert_metalk(pl_inode, ctx, reqlk);
+ if (ret < 0) {
+ pl_metalk_unref(reqlk);
+ }
- GF_VALIDATE_OR_GOTO (this->name, frame->local, out);
+out:
+ return ret;
+}
- if (op_ret) {
- goto out;
+static void
+__unwind_queued_locks(pl_inode_t *pl_inode, struct list_head *tmp_list)
+{
+ if (list_empty(&pl_inode->queued_locks))
+ return;
+
+ list_splice_init(&pl_inode->queued_locks, tmp_list);
+}
+
+static void
+__unwind_blocked_locks(pl_inode_t *pl_inode, struct list_head *tmp_list)
+{
+ posix_lock_t *lock = NULL;
+ posix_lock_t *tmp = NULL;
+
+ if (list_empty(&pl_inode->ext_list))
+ return;
+
+ list_for_each_entry_safe(lock, tmp, &pl_inode->ext_list, list)
+ {
+ if (!lock->blocking)
+ continue;
+
+ list_del_init(&lock->list);
+ list_add_tail(&lock->list, tmp_list);
+ }
+}
+
+int
+pl_metaunlock(call_frame_t *frame, xlator_t *this, inode_t *inode, dict_t *dict)
+{
+ pl_inode_t *pl_inode = NULL;
+ int ret = 0;
+ pl_meta_lock_t *meta_lock = NULL;
+ pl_meta_lock_t *tmp_metalk = NULL;
+ pl_ctx_t *ctx = NULL;
+ posix_lock_t *posix_lock = NULL;
+ posix_lock_t *tmp_posixlk = NULL;
+ struct list_head tmp_posixlk_list;
+
+ INIT_LIST_HEAD(&tmp_posixlk_list);
+
+ if (frame->root->client) {
+ ctx = pl_ctx_get(frame->root->client, this);
+ if (!ctx) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "pl_ctx_get failed");
+
+ ret = -1;
+ goto out;
}
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "frame-root-client is "
+ "NULL");
+ ret = -1;
+ goto out;
+ }
+
+ pl_inode = pl_inode_get(this, inode, NULL);
+ if (!pl_inode) {
+ ret = -1;
+ goto out;
+ }
+
+ pthread_mutex_lock(&ctx->lock);
+ {
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ /* Unwind queued locks regardless of migration status */
+ __unwind_queued_locks(pl_inode, &tmp_posixlk_list);
- local = frame->local;
+ /* Unwind blocked locks only for successful migration */
+ if (dict_get_sizen(dict, "status")) {
+ /* unwind all blocked locks */
+ __unwind_blocked_locks(pl_inode, &tmp_posixlk_list);
+ }
- if (local->entrylk_count_req)
- pl_entrylk_xattr_fill (this, inode, dict);
- if (local->inodelk_count_req)
- pl_inodelk_xattr_fill (this, inode, dict);
- if (local->posixlk_count_req)
- pl_posixlk_xattr_fill (this, inode, dict);
+ /* unlock metalk */
+ /* if this list is empty then pl_inode->metalk_list
+ * should be empty too. meta lock should in all cases
+ * be added/removed from both pl_ctx_t and pl_inode */
+ if (list_empty(&ctx->metalk_list))
+ goto unlock;
- frame->local = NULL;
+ list_for_each_entry_safe(meta_lock, tmp_metalk, &ctx->metalk_list,
+ client_list)
+ {
+ list_del_init(&meta_lock->client_list);
- if (local != NULL)
- GF_FREE (local);
+ pl_inode = meta_lock->pl_inode;
+
+ list_del_init(&meta_lock->list);
+
+ pl_metalk_unref(meta_lock);
+
+ /* The corresponding ref is taken in
+ * pl_insert_metalk*/
+ inode_unref(pl_inode->inode);
+ }
+
+ if (dict_get_sizen(dict, "status"))
+ pl_inode->migrated = _gf_true;
+ else
+ pl_inode->migrated = _gf_false;
+ }
+ unlock:
+
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+ pthread_mutex_unlock(&ctx->lock);
out:
- STACK_UNWIND_STRICT (
- lookup,
- frame,
- op_ret,
- op_errno,
- inode,
- buf,
- dict,
- postparent);
- return 0;
+ list_for_each_entry_safe(posix_lock, tmp_posixlk, &tmp_posixlk_list, list)
+ {
+ list_del_init(&posix_lock->list);
+
+ STACK_UNWIND_STRICT(lk, posix_lock->frame, -1, EREMOTE,
+ &posix_lock->user_flock, NULL);
+
+ __destroy_lock(posix_lock);
+ }
+
+ return ret;
}
int32_t
-pl_lookup (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *xattr_req)
+pl_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- pl_local_t *local = NULL;
- int ret = -1;
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+ local = frame->local;
+ if (local && local->update_mlock_enforced_flag && op_ret != -1) {
+ pl_inode = pl_inode_get(this, local->inode, NULL);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
- VALIDATE_OR_GOTO (frame, out);
- VALIDATE_OR_GOTO (this, out);
- VALIDATE_OR_GOTO (loc, out);
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ while (pl_inode->fop_wind_count > 0) {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "waiting for existing fops (count %d) to drain for "
+ "gfid %s",
+ pl_inode->fop_wind_count, uuid_utoa(pl_inode->gfid));
+ pthread_cond_wait(&pl_inode->check_fop_wind_count,
+ &pl_inode->mutex);
+ }
+ pl_inode->mlock_enforced = _gf_true;
+ pl_inode->check_mlock_info = _gf_false;
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
- local = GF_CALLOC (1, sizeof (*local), gf_locks_mt_pl_local_t);
- GF_VALIDATE_OR_GOTO (this->name, local, out);
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(setxattr, xdata, frame, op_ret, op_errno, xdata);
+ return 0;
+}
- if (xattr_req) {
- if (dict_get (xattr_req, GLUSTERFS_ENTRYLK_COUNT))
- local->entrylk_count_req = 1;
- if (dict_get (xattr_req, GLUSTERFS_INODELK_COUNT))
- local->inodelk_count_req = 1;
- if (dict_get (xattr_req, GLUSTERFS_POSIXLK_COUNT))
- local->posixlk_count_req = 1;
- }
+int32_t
+pl_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int flags, dict_t *xdata)
+{
+ int op_ret = 0;
+ int op_errno = EINVAL;
+ dict_t *xdata_rsp = NULL;
+ char *name = NULL;
+ posix_locks_private_t *priv = this->private;
- frame->local = local;
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
- STACK_WIND (frame,
- pl_lookup_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup,
- loc,
- xattr_req);
- ret = 0;
-out:
- if (ret == -1)
- STACK_UNWIND_STRICT (lookup, frame, -1, 0, NULL, NULL, NULL, NULL);
+ if (dict_get_sizen(dict, GF_META_LOCK_KEY)) {
+ op_ret = pl_metalk(frame, this, loc->inode);
- return 0;
+ } else if (dict_get_sizen(dict, GF_META_UNLOCK_KEY)) {
+ op_ret = pl_metaunlock(frame, this, loc->inode, dict);
+ } else {
+ goto usual;
+ }
+
+ PL_STACK_UNWIND_FOR_CLIENT(setxattr, xdata_rsp, frame, op_ret, op_errno,
+ xdata_rsp);
+ return 0;
+
+usual:
+ PL_CHECK_LOCK_ENFORCE_KEY(frame, dict, name, this, loc, ((fd_t *)NULL),
+ priv);
+
+ STACK_WIND(frame, pl_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
+
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(setxattr, xdata, frame, op_ret, op_errno, xdata);
+
+ return 0;
}
void
-pl_dump_lock (char *str, int size, struct gf_flock *flock,
- uint64_t owner, void *trans)
+pl_dump_lock(char *str, int size, struct gf_flock *flock, gf_lkowner_t *owner,
+ void *trans, char *conn_id, time_t *granted_time,
+ time_t *blkd_time, gf_boolean_t active)
{
- char *type_str = NULL;
-
- switch (flock->l_type) {
+ char *type_str = NULL;
+ char granted[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ char blocked[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+
+ if (granted_time)
+ gf_time_fmt(granted, sizeof(granted), *granted_time, gf_timefmt_FT);
+ if (blkd_time)
+ gf_time_fmt(blocked, sizeof(blocked), *blkd_time, gf_timefmt_FT);
+ switch (flock->l_type) {
case F_RDLCK:
- type_str = "READ";
- break;
+ type_str = "READ";
+ break;
case F_WRLCK:
- type_str = "WRITE";
- break;
+ type_str = "WRITE";
+ break;
case F_UNLCK:
- type_str = "UNLOCK";
- break;
+ type_str = "UNLOCK";
+ break;
default:
- type_str = "UNKNOWN";
- break;
+ type_str = "UNKNOWN";
+ break;
+ }
+
+ if (active) {
+ if (blkd_time && *blkd_time == 0) {
+ snprintf(str, size, RANGE_GRNTD_FMT, type_str, flock->l_whence,
+ (unsigned long long)flock->l_start,
+ (unsigned long long)flock->l_len,
+ (unsigned long long)flock->l_pid, lkowner_utoa(owner),
+ trans, conn_id, granted);
+ } else {
+ snprintf(str, size, RANGE_BLKD_GRNTD_FMT, type_str, flock->l_whence,
+ (unsigned long long)flock->l_start,
+ (unsigned long long)flock->l_len,
+ (unsigned long long)flock->l_pid, lkowner_utoa(owner),
+ trans, conn_id, blocked, granted);
}
+ } else {
+ snprintf(str, size, RANGE_BLKD_FMT, type_str, flock->l_whence,
+ (unsigned long long)flock->l_start,
+ (unsigned long long)flock->l_len,
+ (unsigned long long)flock->l_pid, lkowner_utoa(owner), trans,
+ conn_id, blocked);
+ }
+}
- snprintf (str, size, "type=%s, start=%llu, len=%llu, pid=%llu, lk-owner=%llu, transport=%p",
- type_str, (unsigned long long) flock->l_start,
- (unsigned long long) flock->l_len,
- (unsigned long long) flock->l_pid,
- (unsigned long long) owner,
- trans);
+void
+__dump_entrylks(pl_inode_t *pl_inode)
+{
+ pl_dom_list_t *dom = NULL;
+ pl_entry_lock_t *lock = NULL;
+ char blocked[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ char granted[GF_TIMESTR_SIZE] = {
+ 0,
+ };
+ int count = 0;
+ char key[GF_DUMP_MAX_BUF_LEN] = {
+ 0,
+ };
+ char *k = "xlator.feature.locks.lock-dump.domain.entrylk";
+
+ char tmp[4098];
+
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ count = 0;
+
+ gf_proc_dump_build_key(key, "lock-dump.domain", "domain");
+ gf_proc_dump_write(key, "%s", dom->domain);
+
+ list_for_each_entry(lock, &dom->entrylk_list, domain_list)
+ {
+ gf_time_fmt(granted, sizeof(granted), lock->granted_time,
+ gf_timefmt_FT);
+ gf_proc_dump_build_key(key, k, "entrylk[%d](ACTIVE)", count);
+ if (lock->blkd_time == 0) {
+ snprintf(tmp, sizeof(tmp), ENTRY_GRNTD_FMT,
+ lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK"
+ : "ENTRYLK_WRLCK",
+ lock->basename, (unsigned long long)lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->client,
+ lock->connection_id, granted);
+ } else {
+ gf_time_fmt(blocked, sizeof(blocked), lock->blkd_time,
+ gf_timefmt_FT);
+ snprintf(tmp, sizeof(tmp), ENTRY_BLKD_GRNTD_FMT,
+ lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK"
+ : "ENTRYLK_WRLCK",
+ lock->basename, (unsigned long long)lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->client,
+ lock->connection_id, blocked, granted);
+ }
+
+ gf_proc_dump_write(key, "%s", tmp);
+
+ count++;
+ }
+ list_for_each_entry(lock, &dom->blocked_entrylks, blocked_locks)
+ {
+ gf_time_fmt(blocked, sizeof(blocked), lock->blkd_time,
+ gf_timefmt_FT);
+
+ gf_proc_dump_build_key(key, k, "entrylk[%d](BLOCKED)", count);
+ snprintf(
+ tmp, sizeof(tmp), ENTRY_BLKD_FMT,
+ lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" : "ENTRYLK_WRLCK",
+ lock->basename, (unsigned long long)lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->client, lock->connection_id,
+ blocked);
+ gf_proc_dump_write(key, "%s", tmp);
+
+ count++;
+ }
+ }
}
void
-__dump_entrylks (pl_inode_t *pl_inode)
+dump_entrylks(pl_inode_t *pl_inode)
{
- pl_dom_list_t *dom = NULL;
- pl_entry_lock_t *lock = NULL;
- int count = 0;
- char key[GF_DUMP_MAX_BUF_LEN];
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __dump_entrylks(pl_inode);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+}
- char tmp[256];
+void
+__dump_inodelks(pl_inode_t *pl_inode)
+{
+ pl_dom_list_t *dom = NULL;
+ pl_inode_lock_t *lock = NULL;
+ int count = 0;
+ char key[GF_DUMP_MAX_BUF_LEN];
- list_for_each_entry (dom, &pl_inode->dom_list, inode_list) {
+ char tmp[4098];
- count = 0;
+ list_for_each_entry(dom, &pl_inode->dom_list, inode_list)
+ {
+ count = 0;
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain",
- "domain");
- gf_proc_dump_write(key, "%s", dom->domain);
+ gf_proc_dump_build_key(key, "lock-dump.domain", "domain");
+ gf_proc_dump_write(key, "%s", dom->domain);
- list_for_each_entry (lock, &dom->entrylk_list, domain_list) {
+ list_for_each_entry(lock, &dom->inodelk_list, list)
+ {
+ gf_proc_dump_build_key(key, "inodelk", "inodelk[%d](ACTIVE)",
+ count);
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain.entrylk",
- "entrylk[%d](ACTIVE)",count );
- snprintf (tmp, 256," %s on %s owner=%llu, transport=%p",
- lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
- "ENTRYLK_WRLCK", lock->basename,
- (unsigned long long) lock->owner,
- lock->trans);
+ SET_FLOCK_PID(&lock->user_flock, lock);
+ pl_dump_lock(tmp, sizeof(tmp), &lock->user_flock, &lock->owner,
+ lock->client, lock->connection_id, &lock->granted_time,
+ &lock->blkd_time, _gf_true);
+ gf_proc_dump_write(key, "%s", tmp);
- gf_proc_dump_write(key, tmp);
+ count++;
+ }
- count++;
- }
+ list_for_each_entry(lock, &dom->blocked_inodelks, blocked_locks)
+ {
+ gf_proc_dump_build_key(key, "inodelk", "inodelk[%d](BLOCKED)",
+ count);
+ SET_FLOCK_PID(&lock->user_flock, lock);
+ pl_dump_lock(tmp, sizeof(tmp), &lock->user_flock, &lock->owner,
+ lock->client, lock->connection_id, 0, &lock->blkd_time,
+ _gf_false);
+ gf_proc_dump_write(key, "%s", tmp);
+
+ count++;
+ }
+ }
+}
- list_for_each_entry (lock, &dom->blocked_entrylks, blocked_locks) {
+void
+dump_inodelks(pl_inode_t *pl_inode)
+{
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __dump_inodelks(pl_inode);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+}
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain.entrylk",
- "entrylk[%d](BLOCKED)",count );
- snprintf (tmp, 256," %s on %s state = Blocked",
- lock->type == ENTRYLK_RDLCK ? "ENTRYLK_RDLCK" :
- "ENTRYLK_WRLCK", lock->basename);
+void
+__dump_posixlks(pl_inode_t *pl_inode)
+{
+ posix_lock_t *lock = NULL;
+ int count = 0;
+ char key[GF_DUMP_MAX_BUF_LEN];
+
+ char tmp[4098];
+
+ list_for_each_entry(lock, &pl_inode->ext_list, list)
+ {
+ SET_FLOCK_PID(&lock->user_flock, lock);
+ gf_proc_dump_build_key(key, "posixlk", "posixlk[%d](%s)", count,
+ lock->blocked ? "BLOCKED" : "ACTIVE");
+ pl_dump_lock(tmp, sizeof(tmp), &lock->user_flock, &lock->owner,
+ lock->client, lock->client_uid, &lock->granted_time,
+ &lock->blkd_time, (lock->blocked) ? _gf_false : _gf_true);
+ gf_proc_dump_write(key, "%s", tmp);
+
+ count++;
+ }
+}
- gf_proc_dump_write(key, tmp);
+void
+dump_posixlks(pl_inode_t *pl_inode)
+{
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __dump_posixlks(pl_inode);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+}
- count++;
- }
+int32_t
+pl_dump_inode_priv(xlator_t *this, inode_t *inode)
+{
+ int ret = -1;
+ uint64_t tmp_pl_inode = 0;
+ pl_inode_t *pl_inode = NULL;
+ char *pathname = NULL;
+ gf_boolean_t section_added = _gf_false;
+
+ int count = 0;
+
+ if (!inode) {
+ errno = EINVAL;
+ goto out;
+ }
+
+ ret = TRY_LOCK(&inode->lock);
+ if (ret)
+ goto out;
+ {
+ ret = __inode_ctx_get(inode, this, &tmp_pl_inode);
+ if (ret)
+ goto unlock;
+ }
+unlock:
+ UNLOCK(&inode->lock);
+ if (ret)
+ goto out;
+
+ pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
+ if (!pl_inode) {
+ ret = -1;
+ goto out;
+ }
+
+ gf_proc_dump_add_section("xlator.features.locks.%s.inode", this->name);
+ section_added = _gf_true;
+
+ /*We are safe to call __inode_path since we have the
+ * inode->table->lock */
+ __inode_path(inode, NULL, &pathname);
+ if (pathname)
+ gf_proc_dump_write("path", "%s", pathname);
+
+ gf_proc_dump_write("mandatory", "%d", pl_inode->mandatory);
+
+ ret = pthread_mutex_trylock(&pl_inode->mutex);
+ if (ret)
+ goto out;
+ {
+ count = __get_entrylk_count(this, pl_inode);
+ if (count) {
+ gf_proc_dump_write("entrylk-count", "%d", count);
+ __dump_entrylks(pl_inode);
+ }
+ count = __get_inodelk_count(this, pl_inode, NULL);
+ if (count) {
+ gf_proc_dump_write("inodelk-count", "%d", count);
+ __dump_inodelks(pl_inode);
}
+ count = __get_posixlk_count(pl_inode);
+ if (count) {
+ gf_proc_dump_write("posixlk-count", "%d", count);
+ __dump_posixlks(pl_inode);
+ }
+
+ gf_proc_dump_write("links", "%d", pl_inode->links);
+ gf_proc_dump_write("removes_pending", "%u", pl_inode->remove_running);
+ gf_proc_dump_write("removed", "%u", pl_inode->removed);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+out:
+ GF_FREE(pathname);
+
+ if (ret && inode) {
+ if (!section_added)
+ gf_proc_dump_add_section(
+ "xlator.features.locks.%s."
+ "inode",
+ this->name);
+ gf_proc_dump_write("Unable to print lock state",
+ "(Lock "
+ "acquisition failure) %s",
+ uuid_utoa(inode->gfid));
+ }
+ return ret;
}
-void
-dump_entrylks (pl_inode_t *pl_inode)
+int32_t
+mem_acct_init(xlator_t *this)
{
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __dump_entrylks (pl_inode);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+ int ret = -1;
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_locks_mt_end + 1);
+
+ if (ret != 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Memory accounting init"
+ "failed");
+ return ret;
+ }
+
+ return ret;
}
-void
-__dump_inodelks (pl_inode_t *pl_inode)
+pl_ctx_t *
+pl_ctx_get(client_t *client, xlator_t *xlator)
{
- pl_dom_list_t *dom = NULL;
- pl_inode_lock_t *lock = NULL;
- int count = 0;
- char key[GF_DUMP_MAX_BUF_LEN];
+ void *tmp = NULL;
+ pl_ctx_t *ctx = NULL;
+ pl_ctx_t *setted_ctx = NULL;
- char tmp[256];
+ client_ctx_get(client, xlator, &tmp);
- list_for_each_entry (dom, &pl_inode->dom_list, inode_list) {
+ ctx = tmp;
- count = 0;
+ if (ctx != NULL)
+ goto out;
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain",
- "domain");
- gf_proc_dump_write(key, "%s", dom->domain);
+ ctx = GF_CALLOC(1, sizeof(pl_ctx_t), gf_locks_mt_posix_lock_t);
- list_for_each_entry (lock, &dom->inodelk_list, list) {
+ if (ctx == NULL)
+ goto out;
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain.inodelk",
- "inodelk[%d](ACTIVE)",count );
+ pthread_mutex_init(&ctx->lock, NULL);
+ INIT_LIST_HEAD(&ctx->inodelk_lockers);
+ INIT_LIST_HEAD(&ctx->entrylk_lockers);
+ INIT_LIST_HEAD(&ctx->metalk_list);
- pl_dump_lock (tmp, 256, &lock->user_flock,
- lock->owner, lock->transport);
- gf_proc_dump_write(key, tmp);
+ setted_ctx = client_ctx_set(client, xlator, ctx);
+ if (ctx != setted_ctx) {
+ pthread_mutex_destroy(&ctx->lock);
+ GF_FREE(ctx);
+ ctx = setted_ctx;
+ }
+out:
+ return ctx;
+}
- count++;
- }
+int
+pl_metalk_client_cleanup(xlator_t *this, pl_ctx_t *ctx)
+{
+ pl_meta_lock_t *meta_lock = NULL;
+ pl_meta_lock_t *tmp_metalk = NULL;
+ pl_inode_t *pl_inode = NULL;
+ posix_lock_t *posix_lock = NULL;
+ posix_lock_t *tmp_posixlk = NULL;
+ struct list_head tmp_posixlk_list;
+
+ INIT_LIST_HEAD(&tmp_posixlk_list);
+
+ pthread_mutex_lock(&ctx->lock);
+ {
+ /* if this list is empty then pl_inode->metalk_list should be
+ * empty too. meta lock should in all cases be added/removed
+ * from both pl_ctx_t and pl_inode */
+ if (list_empty(&ctx->metalk_list))
+ goto unlock;
+
+ list_for_each_entry_safe(meta_lock, tmp_metalk, &ctx->metalk_list,
+ client_list)
+ {
+ list_del_init(&meta_lock->client_list);
- list_for_each_entry (lock, &dom->blocked_inodelks, blocked_locks) {
+ pl_inode = meta_lock->pl_inode;
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain.inodelk",
- "inodelk[%d](BLOCKED)",count );
- pl_dump_lock (tmp, 256, &lock->user_flock,
- lock->owner, lock->transport);
- gf_proc_dump_write(key, tmp);
+ pthread_mutex_lock(&pl_inode->mutex);
- count++;
- }
+ {
+ /* Since the migration status is unknown here
+ * unwind all queued and blocked locks to check
+ * migration status and find the correct
+ * destination */
+ __unwind_queued_locks(pl_inode, &tmp_posixlk_list);
+ __unwind_blocked_locks(pl_inode, &tmp_posixlk_list);
+
+ list_del_init(&meta_lock->list);
+
+ pl_metalk_unref(meta_lock);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ /* The corresponding ref is taken in
+ * pl_insert_metalk*/
+ inode_unref(pl_inode->inode);
}
+ }
+
+unlock:
+ pthread_mutex_unlock(&ctx->lock);
+
+ list_for_each_entry_safe(posix_lock, tmp_posixlk, &tmp_posixlk_list, list)
+ {
+ list_del_init(&posix_lock->list);
+ STACK_UNWIND_STRICT(lk, posix_lock->frame, -1, EREMOTE,
+ &posix_lock->user_flock, NULL);
+
+ __destroy_lock(posix_lock);
+ }
+ return 0;
}
-void
-dump_inodelks (pl_inode_t *pl_inode)
+static int
+pl_client_disconnect_cbk(xlator_t *this, client_t *client)
{
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __dump_inodelks (pl_inode);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+ pl_ctx_t *pl_ctx = pl_ctx_get(client, this);
+ if (pl_ctx) {
+ pl_inodelk_client_cleanup(this, pl_ctx);
+ pl_entrylk_client_cleanup(this, pl_ctx);
+ pl_metalk_client_cleanup(this, pl_ctx);
+ }
+
+ return 0;
+}
+static int
+pl_client_destroy_cbk(xlator_t *this, client_t *client)
+{
+ void *tmp = NULL;
+ pl_ctx_t *pl_ctx = NULL;
+
+ pl_client_disconnect_cbk(this, client);
+
+ client_ctx_del(client, this, &tmp);
+
+ if (tmp == NULL)
+ return 0;
+
+ pl_ctx = tmp;
+
+ GF_ASSERT(list_empty(&pl_ctx->inodelk_lockers));
+ GF_ASSERT(list_empty(&pl_ctx->entrylk_lockers));
+
+ pthread_mutex_destroy(&pl_ctx->lock);
+ GF_FREE(pl_ctx);
+
+ return 0;
}
-void
-__dump_posixlks (pl_inode_t *pl_inode)
+int
+reconfigure(xlator_t *this, dict_t *options)
{
- posix_lock_t *lock = NULL;
- int count = 0;
- char key[GF_DUMP_MAX_BUF_LEN];
+ posix_locks_private_t *priv = this->private;
+ int ret = -1;
+ char *tmp_str = NULL;
- char tmp[256];
+ GF_OPTION_RECONF("trace", priv->trace, options, bool, out);
- list_for_each_entry (lock, &pl_inode->ext_list, list) {
+ GF_OPTION_RECONF("monkey-unlocking", priv->monkey_unlocking, options, bool,
+ out);
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.lock-dump.domain.posixlk",
- "posixlk[%d](%s)",
- count,
- lock->blocked ? "BLOCKED" : "ACTIVE");
- pl_dump_lock (tmp, 256, &lock->user_flock,
- lock->owner, lock->transport);
- gf_proc_dump_write(key, tmp);
+ GF_OPTION_RECONF("revocation-secs", priv->revocation_secs, options, uint32,
+ out);
- count++;
- }
+ GF_OPTION_RECONF("revocation-clear-all", priv->revocation_clear_all,
+ options, bool, out);
+
+ GF_OPTION_RECONF("revocation-max-blocked", priv->revocation_max_blocked,
+ options, uint32, out);
+
+ GF_OPTION_RECONF("notify-contention", priv->notify_contention, options,
+ bool, out);
+
+ GF_OPTION_RECONF("notify-contention-delay", priv->notify_contention_delay,
+ options, uint32, out);
+
+ GF_OPTION_RECONF("mandatory-locking", tmp_str, options, str, out);
+
+ GF_OPTION_RECONF("enforce-mandatory-lock", priv->mlock_enforced, options,
+ bool, out);
+
+ if (!strcmp(tmp_str, "forced"))
+ priv->mandatory_mode = MLK_FORCED;
+ else if (!strcmp(tmp_str, "file"))
+ priv->mandatory_mode = MLK_FILE_BASED;
+ else if (!strcmp(tmp_str, "optimal"))
+ priv->mandatory_mode = MLK_OPTIMAL;
+ else
+ priv->mandatory_mode = MLK_NONE;
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+init(xlator_t *this)
+{
+ posix_locks_private_t *priv = NULL;
+ xlator_list_t *trav = NULL;
+ char *tmp_str = NULL;
+ int ret = -1;
+
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "FATAL: posix-locks should have exactly one child");
+ goto out;
+ }
+
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Volume is dangling. Please check the volume file.");
+ }
+
+ trav = this->children;
+ while (trav->xlator->children)
+ trav = trav->xlator->children;
+
+ if (strncmp("storage/", trav->xlator->type, 8)) {
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "'locks' translator is not loaded over a storage "
+ "translator");
+ goto out;
+ }
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_locks_mt_posix_locks_private_t);
+
+ GF_OPTION_INIT("mandatory-locking", tmp_str, str, out);
+ if (!strcmp(tmp_str, "forced"))
+ priv->mandatory_mode = MLK_FORCED;
+ else if (!strcmp(tmp_str, "file"))
+ priv->mandatory_mode = MLK_FILE_BASED;
+ else if (!strcmp(tmp_str, "optimal"))
+ priv->mandatory_mode = MLK_OPTIMAL;
+ else
+ priv->mandatory_mode = MLK_NONE;
+
+ tmp_str = NULL;
+ GF_OPTION_INIT("trace", priv->trace, bool, out);
+ GF_OPTION_INIT("monkey-unlocking", priv->monkey_unlocking, bool, out);
+ GF_OPTION_INIT("revocation-secs", priv->revocation_secs, uint32, out);
+
+ GF_OPTION_INIT("revocation-clear-all", priv->revocation_clear_all, bool,
+ out);
+
+ GF_OPTION_INIT("revocation-max-blocked", priv->revocation_max_blocked,
+ uint32, out);
+
+ GF_OPTION_INIT("notify-contention", priv->notify_contention, bool, out);
+
+ GF_OPTION_INIT("notify-contention-delay", priv->notify_contention_delay,
+ uint32, out);
+
+ GF_OPTION_INIT("enforce-mandatory-lock", priv->mlock_enforced, bool, out);
+
+ this->local_pool = mem_pool_new(pl_local_t, 32);
+ if (!this->local_pool) {
+ ret = -1;
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to create local_t's memory pool");
+ goto out;
+ }
+
+ this->private = priv;
+ ret = 0;
+
+out:
+ if (ret) {
+ GF_FREE(priv);
+ }
+ return ret;
}
void
-dump_posixlks (pl_inode_t *pl_inode)
+fini(xlator_t *this)
{
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __dump_posixlks (pl_inode);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
+ posix_locks_private_t *priv = this->private;
+ if (!priv)
+ return;
+ this->private = NULL;
+ if (this->local_pool) {
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
+ GF_FREE(priv->brickname);
+ GF_FREE(priv);
+
+ return;
+}
+
+int
+pl_inodelk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata);
+
+int
+pl_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata);
+int
+pl_entrylk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata);
+
+int
+pl_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata);
+
+int32_t
+pl_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, struct iatt *preoldparent,
+ struct iatt *postoldparent, struct iatt *prenewparent,
+ struct iatt *postnewparent, dict_t *xdata)
+{
+ pl_inode_remove_cbk(this, cookie, op_ret < 0 ? op_errno : 0);
+
+ PL_STACK_UNWIND(rename, xdata, frame, op_ret, op_errno, buf, preoldparent,
+ postoldparent, prenewparent, postnewparent, xdata);
+
+ return 0;
}
int32_t
-pl_dump_inode_priv (xlator_t *this, inode_t *inode)
+pl_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
{
+ int32_t error;
- int ret = -1;
- uint64_t tmp_pl_inode = 0;
- pl_inode_t *pl_inode = NULL;
- char key[GF_DUMP_MAX_BUF_LEN];
+ error = PL_INODE_REMOVE(rename, frame, this, oldloc, newloc, pl_rename,
+ pl_rename_cbk, oldloc, newloc, xdata);
+ if (error > 0) {
+ STACK_UNWIND_STRICT(rename, frame, -1, error, NULL, NULL, NULL, NULL,
+ NULL, NULL);
+ }
- int count = 0;
+ return 0;
+}
- GF_VALIDATE_OR_GOTO (this->name, inode, out);
+posix_lock_t *
+gf_lkmig_info_to_posix_lock(call_frame_t *frame, lock_migration_info_t *lmi)
+{
+ posix_lock_t *lock = GF_CALLOC(1, sizeof(posix_lock_t),
+ gf_locks_mt_posix_lock_t);
+ if (!lock)
+ goto out;
- ret = inode_ctx_get (inode, this, &tmp_pl_inode);
+ lock->fl_start = lmi->flock.l_start;
+ lock->fl_type = lmi->flock.l_type;
- if (ret != 0)
- goto out;
+ if (lmi->flock.l_len == 0)
+ lock->fl_end = LLONG_MAX;
+ else
+ lock->fl_end = lmi->flock.l_start + lmi->flock.l_len - 1;
- pl_inode = (pl_inode_t *)(long)tmp_pl_inode;
+ lock->client = frame->root->client;
+
+ lock->lk_flags = lmi->lk_flags;
+
+ lock->client_uid = gf_strdup(lmi->client_uid);
+ if (lock->client_uid == NULL) {
+ GF_FREE(lock);
+ lock = NULL;
+ goto out;
+ }
+
+ lock->client_pid = lmi->flock.l_pid;
+ lock->owner = lmi->flock.l_owner;
+
+ INIT_LIST_HEAD(&lock->list);
+
+out:
+ return lock;
+}
+
+/* This function is supposed to write the active locks from the source brick(in
+ * rebalance context) and write here. Hence, will add the locks directly to the
+ * pl_inode->ext_list*/
+int
+pl_write_active_locks(call_frame_t *frame, pl_inode_t *pl_inode,
+ lock_migration_info_t *locklist)
+{
+ posix_lock_t *newlock = NULL;
+ lock_migration_info_t *temp = NULL;
+ int ret = 0;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ /* Just making sure the activelk list is empty. Should not
+ * happen though*/
+ if (!list_empty(&pl_inode->ext_list)) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, 0, "invalid locks found");
+
+ ret = -1;
+ goto out;
+ }
+
+ /* This list also should not be empty */
+ if (list_empty(&locklist->list)) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, 0, "empty lock list");
+
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry(temp, &locklist->list, list)
+ {
+ newlock = gf_lkmig_info_to_posix_lock(frame, temp);
+ if (!newlock) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, 0,
+ "mem allocation failed for newlock");
- if (!pl_inode) {
ret = -1;
goto out;
+ }
+ list_add_tail(&newlock->list, &pl_inode->ext_list);
}
+ }
+ /*TODO: What if few lock add failed with ENOMEM. Should the already
+ * added locks be clearted */
+ pthread_mutex_unlock(&pl_inode->mutex);
+out:
+ return ret;
+}
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.inode",
- "%ld.mandatory",inode->ino);
- gf_proc_dump_write(key, "%d", pl_inode->mandatory);
+static int
+pl_setactivelk(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ lock_migration_info_t *locklist, dict_t *xdata)
+{
+ int op_ret = 0;
+ int op_errno = 0;
+ int ret = 0;
+ pl_inode_t *pl_inode = pl_inode_get(this, loc->inode, NULL);
+ if (!pl_inode) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "pl_inode_get failed");
- count = get_entrylk_count (this, inode);
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.entrylk-count",
- "%ld.entrylk-count", inode->ino);
- gf_proc_dump_write(key, "%d", count);
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto out;
+ }
+ ret = pl_write_active_locks(frame, pl_inode, locklist);
- dump_entrylks(pl_inode);
+ op_ret = ret;
- count = get_inodelk_count (this, inode);
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.inodelk-count",
- "%ld.inodelk-count", inode->ino);
- gf_proc_dump_write(key, "%d", count);
+out:
+ STACK_UNWIND_STRICT(setactivelk, frame, op_ret, op_errno, NULL);
- dump_inodelks(pl_inode);
+ return 0;
+}
- count = get_posixlk_count (this, inode);
- gf_proc_dump_build_key(key,
- "xlator.feature.locks.posixlk-count",
- "%ld.posixlk-count", inode->ino);
- gf_proc_dump_write(key, "%d", count);
+int32_t
+pl_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ pl_inode_remove_cbk(this, cookie, op_ret < 0 ? op_errno : 0);
- dump_posixlks(pl_inode);
+ PL_STACK_UNWIND(unlink, xdata, frame, op_ret, op_errno, preparent,
+ postparent, xdata);
+ return 0;
+}
-out:
- return ret;
+int32_t
+pl_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
+{
+ int32_t error;
+
+ error = PL_INODE_REMOVE(unlink, frame, this, loc, NULL, pl_unlink,
+ pl_unlink_cbk, loc, xflag, xdata);
+ if (error > 0) {
+ STACK_UNWIND_STRICT(unlink, frame, -1, error, NULL, NULL, NULL);
+ }
+
+ return 0;
+}
+
+int32_t
+pl_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(mkdir, xdata, frame, op_ret, op_errno, inode,
+ buf, preparent, postparent, xdata);
+ return 0;
+}
+
+int
+pl_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
+ return 0;
}
+int32_t
+pl_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(stat, xdata, frame, op_ret, op_errno, buf,
+ xdata);
+ return 0;
+}
+int
+pl_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_stat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat, loc, xdata);
+ return 0;
+}
+
+int32_t
+pl_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(mknod, xdata, frame, op_ret, op_errno, inode,
+ buf, preparent, postparent, xdata);
+ return 0;
+}
+
+int
+pl_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t rdev, mode_t umask, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, rdev, umask, xdata);
+ return 0;
+}
+
+int32_t
+pl_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ pl_inode_remove_cbk(this, cookie, op_ret < 0 ? op_errno : 0);
+
+ PL_STACK_UNWIND_FOR_CLIENT(rmdir, xdata, frame, op_ret, op_errno, preparent,
+ postparent, xdata);
+
+ return 0;
+}
+
+int
+pl_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+ dict_t *xdata)
+{
+ int32_t error;
+
+ error = PL_INODE_REMOVE(rmdir, frame, this, loc, NULL, pl_rmdir,
+ pl_rmdir_cbk, loc, xflags, xdata);
+ if (error > 0) {
+ STACK_UNWIND_STRICT(rmdir, frame, -1, error, NULL, NULL, NULL);
+ }
+
+ return 0;
+}
+
+int32_t
+pl_symlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(symlink, xdata, frame, op_ret, op_errno, inode,
+ buf, preparent, postparent, xdata);
+ return 0;
+}
-/*
- * pl_dump_inode - inode dump function for posix locks
- *
- */
int
-pl_dump_inode (xlator_t *this)
+pl_symlink(call_frame_t *frame, xlator_t *this, const char *linkname,
+ loc_t *loc, mode_t umask, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkname, loc, umask, xdata);
+ return 0;
+}
+
+int32_t
+pl_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent, dict_t *xdata)
{
+ pl_inode_t *pl_inode = (pl_inode_t *)cookie;
- GF_ASSERT (this);
+ if (op_ret >= 0) {
+ pthread_mutex_lock(&pl_inode->mutex);
- if (this->itable) {
- inode_table_dump(this->itable,
- "xlator.features.locks.inode_table");
+ /* TODO: can happen pl_inode->links == 0 ? */
+ if (pl_inode->links >= 0) {
+ pl_inode->links++;
}
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+
+ PL_STACK_UNWIND_FOR_CLIENT(link, xdata, frame, op_ret, op_errno, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int
+pl_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ pl_inode_t *pl_inode;
+
+ pl_inode = pl_inode_get(this, oldloc->inode, NULL);
+ if (pl_inode == NULL) {
+ STACK_UNWIND_STRICT(link, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL);
return 0;
+ }
+
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), oldloc, newloc);
+ STACK_WIND_COOKIE(frame, pl_link_cbk, pl_inode, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ return 0;
}
int32_t
-mem_acct_init (xlator_t *this)
+pl_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
{
- int ret = -1;
+ PL_STACK_UNWIND_FOR_CLIENT(fsync, xdata, frame, op_ret, op_errno, prebuf,
+ postbuf, xdata);
+ return 0;
+}
- if (!this)
- return ret;
+int
+pl_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_fsync_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsync, fd, datasync, xdata);
+ return 0;
+}
- ret = xlator_mem_acct_init (this, gf_locks_mt_end + 1);
+int32_t
+pl_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,
+ dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(readdir, xdata, frame, op_ret, op_errno, entries,
+ xdata);
+ return 0;
+}
- if (ret != 0) {
- gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
- "failed");
- return ret;
- }
+int
+pl_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_readdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdir, fd, size, offset, xdata);
+ return 0;
+}
- return ret;
+int32_t
+pl_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(fsyncdir, xdata, frame, op_ret, op_errno, xdata);
+ return 0;
}
int
-init (xlator_t *this)
+pl_fsyncdir(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
{
- posix_locks_private_t *priv = NULL;
- xlator_list_t *trav = NULL;
- data_t *mandatory = NULL;
- data_t *trace = NULL;
- int ret = -1;
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_fsyncdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsyncdir, fd, datasync, xdata);
+ return 0;
+}
- if (!this->children || this->children->next) {
- gf_log (this->name, GF_LOG_CRITICAL,
- "FATAL: posix-locks should have exactly one child");
- goto out;
+int32_t
+pl_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct statvfs *buf, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(statfs, xdata, frame, op_ret, op_errno, buf,
+ xdata);
+ return 0;
+}
+
+int
+pl_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_statfs_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->statfs, loc, xdata);
+ return 0;
+}
+
+int32_t
+pl_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+
+ local = frame->local;
+ if (local && local->update_mlock_enforced_flag && op_ret != -1) {
+ pl_inode = pl_inode_get(this, local->inode, NULL);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
}
- if (!this->parents) {
- gf_log (this->name, GF_LOG_WARNING,
- "Volume is dangling. Please check the volume file.");
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ pl_inode->mlock_enforced = _gf_false;
+ pl_inode->check_mlock_info = _gf_false;
+ pl_inode->track_fop_wind_count = _gf_true;
}
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
- trav = this->children;
- while (trav->xlator->children)
- trav = trav->xlator->children;
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(removexattr, xdata, frame, op_ret, op_errno,
+ xdata);
+ return 0;
+}
- if (strncmp ("storage/", trav->xlator->type, 8)) {
- gf_log (this->name, GF_LOG_CRITICAL,
- "'locks' translator is not loaded over a storage "
- "translator");
- goto out;;
- }
+int
+pl_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ int op_ret = 0;
+ int op_errno = EINVAL;
+ posix_locks_private_t *priv = this->private;
- priv = GF_CALLOC (1, sizeof (*priv),
- gf_locks_mt_posix_locks_private_t);
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
- mandatory = dict_get (this->options, "mandatory-locks");
- if (mandatory)
- gf_log (this->name, GF_LOG_WARNING,
- "mandatory locks not supported in this minor release.");
+ PL_CHECK_LOCK_ENFORCE_KEY(frame, ((dict_t *)NULL), name, this, loc,
+ ((fd_t *)NULL), priv);
- trace = dict_get (this->options, "trace");
- if (trace) {
- if (gf_string2boolean (trace->data,
- &priv->trace) == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "'trace' takes on only boolean values.");
- goto out;
- }
- }
+ STACK_WIND(frame, pl_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
- this->private = priv;
- ret = 0;
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(removexattr, xdata, frame, op_ret, op_errno,
+ NULL);
-out:
- if (ret) {
- if (priv)
- GF_FREE (priv);
+ return 0;
+}
+
+int32_t
+pl_fremovexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ pl_local_t *local = NULL;
+ pl_inode_t *pl_inode = NULL;
+
+ local = frame->local;
+ if (local && local->update_mlock_enforced_flag && op_ret != -1) {
+ pl_inode = pl_inode_get(this, local->inode, NULL);
+ if (!pl_inode) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
}
- return ret;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ pl_inode->mlock_enforced = _gf_false;
+ pl_inode->check_mlock_info = _gf_false;
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ }
+
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(fremovexattr, xdata, frame, op_ret, op_errno,
+ xdata);
+ return 0;
+}
+
+int
+pl_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
+{
+ int op_ret = -1;
+ int op_errno = EINVAL;
+ posix_locks_private_t *priv = this->private;
+
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+
+ PL_CHECK_LOCK_ENFORCE_KEY(frame, ((dict_t *)NULL), name, this,
+ ((loc_t *)NULL), fd, priv);
+
+ STACK_WIND(frame, pl_fremovexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+ return 0;
+
+unwind:
+ PL_STACK_UNWIND_FOR_CLIENT(fremovexattr, xdata, frame, op_ret, op_errno,
+ NULL);
+ return 0;
}
+int32_t
+pl_rchecksum_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, uint32_t weak_cksum,
+ uint8_t *strong_cksum, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(rchecksum, xdata, frame, op_ret, op_errno,
+ weak_cksum, strong_cksum, xdata);
+ return 0;
+}
int
-fini (xlator_t *this)
+pl_rchecksum(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ int32_t len, dict_t *xdata)
{
- posix_locks_private_t *priv = NULL;
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_rchecksum_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rchecksum, fd, offset, len, xdata);
+ return 0;
+}
- priv = this->private;
- if (!priv)
- return 0;
- this->private = NULL;
- GF_FREE (priv);
+int32_t
+pl_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(xattrop, xdata, frame, op_ret, op_errno, dict,
+ xdata);
+ return 0;
+}
- return 0;
+int
+pl_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_xattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop, loc, optype, xattr, xdata);
+ return 0;
}
+int32_t
+pl_fxattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(fxattrop, xdata, frame, op_ret, op_errno, dict,
+ xdata);
+ return 0;
+}
+
+int
+pl_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t optype, dict_t *xattr, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_fxattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop, fd, optype, xattr, xdata);
+ return 0;
+}
+
+int32_t
+pl_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(setattr, xdata, frame, op_ret, op_errno, statpre,
+ statpost, xdata);
+ return 0;
+}
+
+int
+pl_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *stbuf,
+ int32_t valid, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_setattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr, loc, stbuf, valid, xdata);
+ return 0;
+}
+
+int32_t
+pl_fsetattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(fsetattr, xdata, frame, op_ret, op_errno,
+ statpre, statpost, xdata);
+ return 0;
+}
int
-pl_inodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, int32_t cmd, struct gf_flock *flock);
+pl_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *stbuf,
+ int32_t valid, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_fsetattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetattr, fd, stbuf, valid, xdata);
+ return 0;
+}
+
+int32_t
+pl_fallocate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *pre,
+ struct iatt *post, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(fallocate, xdata, frame, op_ret, op_errno, pre,
+ post, xdata);
+ return 0;
+}
int
-pl_finodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, fd_t *fd, int32_t cmd, struct gf_flock *flock);
+pl_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t keep_size,
+ off_t offset, size_t len, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_fallocate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fallocate, fd, keep_size, offset, len,
+ xdata);
+ return 0;
+}
+
+int32_t
+pl_readlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, const char *path,
+ struct iatt *buf, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(readlink, xdata, frame, op_ret, op_errno, path,
+ buf, xdata);
+ return 0;
+}
int
-pl_entrylk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type);
+pl_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
+ dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_readlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readlink, loc, size, xdata);
+ return 0;
+}
+
+int32_t
+pl_access_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(access, xdata, frame, op_ret, op_errno, xdata);
+ return 0;
+}
int
-pl_fentrylk (call_frame_t *frame, xlator_t *this,
- const char *volume, fd_t *fd, const char *basename,
- entrylk_cmd cmd, entrylk_type type);
+pl_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask,
+ dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, ((fd_t *)NULL), loc, NULL);
+ STACK_WIND(frame, pl_access_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->access, loc, mask, xdata);
+ return 0;
+}
+
+int32_t
+pl_seek_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, off_t offset, dict_t *xdata)
+{
+ PL_STACK_UNWIND_FOR_CLIENT(seek, xdata, frame, op_ret, op_errno, offset,
+ xdata);
+ return 0;
+}
+
+int32_t
+pl_seek(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ gf_seek_what_t what, dict_t *xdata)
+{
+ PL_LOCAL_GET_REQUESTS(frame, this, xdata, fd, NULL, NULL);
+ STACK_WIND(frame, pl_seek_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->seek, fd, offset, what, xdata);
+ return 0;
+}
struct xlator_fops fops = {
- .lookup = pl_lookup,
- .create = pl_create,
- .truncate = pl_truncate,
- .ftruncate = pl_ftruncate,
- .open = pl_open,
- .readv = pl_readv,
- .writev = pl_writev,
- .lk = pl_lk,
- .inodelk = pl_inodelk,
- .finodelk = pl_finodelk,
- .entrylk = pl_entrylk,
- .fentrylk = pl_fentrylk,
- .flush = pl_flush,
- .opendir = pl_opendir,
+ .lookup = pl_lookup,
+ .create = pl_create,
+ .fstat = pl_fstat,
+ .truncate = pl_truncate,
+ .ftruncate = pl_ftruncate,
+ .discard = pl_discard,
+ .zerofill = pl_zerofill,
+ .open = pl_open,
+ .readv = pl_readv,
+ .writev = pl_writev,
+ .lk = pl_lk,
+ .inodelk = pl_inodelk,
+ .finodelk = pl_finodelk,
+ .entrylk = pl_entrylk,
+ .fentrylk = pl_fentrylk,
+ .flush = pl_flush,
+ .opendir = pl_opendir,
+ .readdirp = pl_readdirp,
+ .setxattr = pl_setxattr,
+ .fsetxattr = pl_fsetxattr,
+ .getxattr = pl_getxattr,
+ .fgetxattr = pl_fgetxattr,
+ .removexattr = pl_removexattr,
+ .fremovexattr = pl_fremovexattr,
+ .rename = pl_rename,
+ .getactivelk = pl_getactivelk,
+ .setactivelk = pl_setactivelk,
+ .unlink = pl_unlink,
+ .access = pl_access,
+ .readlink = pl_readlink,
+ .fallocate = pl_fallocate,
+ .fsetattr = pl_fsetattr,
+ .setattr = pl_setattr,
+ .fxattrop = pl_fxattrop,
+ .xattrop = pl_xattrop,
+ .rchecksum = pl_rchecksum,
+ .statfs = pl_statfs,
+ .fsyncdir = pl_fsyncdir,
+ .readdir = pl_readdir,
+ .symlink = pl_symlink,
+ .link = pl_link,
+ .rmdir = pl_rmdir,
+ .mknod = pl_mknod,
+ .stat = pl_stat,
+ .seek = pl_seek,
};
struct xlator_dumpops dumpops = {
- .inodectx = pl_dump_inode_priv,
+ .inodectx = pl_dump_inode_priv,
};
struct xlator_cbks cbks = {
- .forget = pl_forget,
- .release = pl_release,
- .releasedir = pl_releasedir,
+ .forget = pl_forget,
+ .release = pl_release,
+ .releasedir = pl_releasedir,
+ .client_destroy = pl_client_destroy_cbk,
+ .client_disconnect = pl_client_disconnect_cbk,
};
-
struct volume_options options[] = {
- { .key = { "mandatory-locks", "mandatory" },
- .type = GF_OPTION_TYPE_BOOL
- },
- { .key = { "trace" },
- .type = GF_OPTION_TYPE_BOOL
- },
- { .key = {NULL} },
+ {.key = {"mandatory-locking"},
+ .type = GF_OPTION_TYPE_STR,
+ .default_value = "off",
+ .op_version = {GD_OP_VERSION_3_8_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"locks"},
+ .description = "Specifies the mandatory-locking mode. Valid options "
+ "are 'file' to use linux style mandatory locks, "
+ "'forced' to use volume strictly under mandatory lock "
+ "semantics only and 'optimal' to treat advisory and "
+ "mandatory locks separately on their own."},
+ {.key = {"trace"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .op_version = {GD_OP_VERSION_3_7_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"locks"},
+ .description = "Trace the different lock requests "
+ "to logs."},
+ {.key = {"monkey-unlocking"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "false",
+ .op_version = {GD_OP_VERSION_3_9_0},
+ .flags = OPT_FLAG_SETTABLE,
+ .tags = {"locks"},
+ .description = "Ignore a random number of unlock requests. Useful "
+ "for testing/creating robust lock recovery mechanisms."},
+ {
+ .key = {"revocation-secs"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = INT_MAX,
+ .default_value = "0",
+ .op_version = {GD_OP_VERSION_3_9_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"locks"},
+ .description = "Maximum time a lock can be taken out, before"
+ "being revoked.",
+ },
+ {
+ .key = {"revocation-clear-all"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "false",
+ .op_version = {GD_OP_VERSION_3_9_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"locks"},
+ .description = "If set to true, will revoke BOTH granted and blocked "
+ "(pending) lock requests if a revocation threshold is "
+ "hit.",
+ },
+ {.key = {"revocation-max-blocked"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0,
+ .max = INT_MAX,
+ .default_value = "0",
+ .op_version = {GD_OP_VERSION_3_9_0},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .tags = {"locks"},
+ .description = "A number of blocked lock requests after which a lock "
+ "will be revoked to allow the others to proceed. Can "
+ "be used in conjunction w/ revocation-clear-all."},
+ {.key = {"notify-contention"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "yes",
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .op_version = {GD_OP_VERSION_4_0_0},
+ .tags = {"locks", "contention"},
+ .description = "When this option is enabled and a lock request "
+ "conflicts with a currently granted lock, an upcall "
+ "notification will be sent to the current owner of "
+ "the lock to request it to be released as soon as "
+ "possible."},
+ {.key = {"notify-contention-delay"},
+ .type = GF_OPTION_TYPE_INT,
+ .min = 0, /* An upcall notification is sent every time a conflict is
+ * detected. */
+ .max = 60,
+ .default_value = "5",
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC,
+ .op_version = {GD_OP_VERSION_4_0_0},
+ .tags = {"locks", "contention", "timeout"},
+ .description = "This value determines the minimum amount of time "
+ "(in seconds) between upcall contention notifications "
+ "on the same inode. If multiple lock requests are "
+ "received during this period, only one upcall will "
+ "be sent."},
+ {.key = {"enforce-mandatory-lock"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .flags = OPT_FLAG_SETTABLE,
+ .op_version = {GD_OP_VERSION_6_0},
+ .description = "option to enable lock enforcement"},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .dumpops = &dumpops,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "locks",
+ .category = GF_MAINTAINED,
};
diff --git a/xlators/features/locks/src/reservelk.c b/xlators/features/locks/src/reservelk.c
index 61110df79cb..604691fd887 100644
--- a/xlators/features/locks/src/reservelk.c
+++ b/xlators/features/locks/src/reservelk.c
@@ -1,450 +1,382 @@
/*
- Copyright (c) 2006, 2007, 2008 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
-#include "glusterfs.h"
-#include "compat.h"
-#include "xlator.h"
-#include "inode.h"
-#include "logging.h"
-#include "common-utils.h"
-#include "list.h"
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/list.h>
#include "locks.h"
#include "common.h"
-void
-__delete_reserve_lock (posix_lock_t *lock)
-{
- list_del (&lock->list);
-}
-
-void
-__destroy_reserve_lock (posix_lock_t *lock)
-{
- GF_FREE (lock);
-}
-
/* Return true if the two reservelks have exactly same lock boundaries */
int
-reservelks_equal (posix_lock_t *l1, posix_lock_t *l2)
+reservelks_equal(posix_lock_t *l1, posix_lock_t *l2)
{
- if ((l1->fl_start == l2->fl_start) &&
- (l1->fl_end == l2->fl_end))
- return 1;
+ if ((l1->fl_start == l2->fl_start) && (l1->fl_end == l2->fl_end))
+ return 1;
- return 0;
+ return 0;
}
/* Determine if lock is grantable or not */
static posix_lock_t *
-__reservelk_grantable (pl_inode_t *pl_inode, posix_lock_t *lock)
+__reservelk_grantable(pl_inode_t *pl_inode, posix_lock_t *lock)
{
- xlator_t *this = NULL;
- posix_lock_t *l = NULL;
- posix_lock_t *ret_lock = NULL;
-
- this = THIS;
-
- if (list_empty (&pl_inode->reservelk_list)) {
- gf_log (this->name, GF_LOG_TRACE,
- "No reservelks in list");
- goto out;
- }
- list_for_each_entry (l, &pl_inode->reservelk_list, list){
- if (reservelks_equal (lock, l)) {
- ret_lock = l;
- break;
- }
+ xlator_t *this = THIS;
+ posix_lock_t *l = NULL;
+ posix_lock_t *ret_lock = NULL;
+
+ if (list_empty(&pl_inode->reservelk_list)) {
+ gf_log(this->name, GF_LOG_TRACE, "No reservelks in list");
+ goto out;
+ }
+ list_for_each_entry(l, &pl_inode->reservelk_list, list)
+ {
+ if (reservelks_equal(lock, l)) {
+ ret_lock = l;
+ break;
}
+ }
out:
- return ret_lock;
+ return ret_lock;
}
static int
-__same_owner_reservelk (posix_lock_t *l1, posix_lock_t *l2)
+__same_owner_reservelk(posix_lock_t *l1, posix_lock_t *l2)
{
- return ((l1->owner == l2->owner));
-
+ return (is_same_lkowner(&l1->owner, &l2->owner));
}
static posix_lock_t *
-__matching_reservelk (pl_inode_t *pl_inode, posix_lock_t *lock)
+__matching_reservelk(pl_inode_t *pl_inode, posix_lock_t *lock)
{
- posix_lock_t *l = NULL;
+ posix_lock_t *l = NULL;
- if (list_empty (&pl_inode->reservelk_list)) {
- gf_log ("posix-locks", GF_LOG_TRACE,
- "reservelk list empty");
- return NULL;
- }
+ if (list_empty(&pl_inode->reservelk_list)) {
+ gf_log("posix-locks", GF_LOG_TRACE, "reservelk list empty");
+ return NULL;
+ }
- list_for_each_entry (l, &pl_inode->reservelk_list, list) {
- if (reservelks_equal (l, lock)) {
- gf_log ("posix-locks", GF_LOG_TRACE,
- "equal reservelk found");
- break;
- }
+ list_for_each_entry(l, &pl_inode->reservelk_list, list)
+ {
+ if (reservelks_equal(l, lock)) {
+ gf_log("posix-locks", GF_LOG_TRACE, "equal reservelk found");
+ break;
}
+ }
- return l;
+ return l;
}
static int
-__reservelk_conflict (xlator_t *this, pl_inode_t *pl_inode,
- posix_lock_t *lock)
+__reservelk_conflict(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock)
{
- posix_lock_t *conf = NULL;
- int ret = 0;
-
- conf = __matching_reservelk (pl_inode, lock);
- if (conf) {
- gf_log (this->name, GF_LOG_TRACE,
- "Matching reservelk found");
- if (__same_owner_reservelk (lock, conf)) {
- list_del_init (&conf->list);
- gf_log (this->name, GF_LOG_TRACE,
- "Removing the matching reservelk for setlk to progress");
- GF_FREE (conf);
- ret = 0;
- } else {
- gf_log (this->name, GF_LOG_TRACE,
- "Conflicting reservelk found");
- ret = 1;
- }
-
+ int ret = 0;
+
+ posix_lock_t *conf = __matching_reservelk(pl_inode, lock);
+ if (conf) {
+ gf_log(this->name, GF_LOG_TRACE, "Matching reservelk found");
+ if (__same_owner_reservelk(lock, conf)) {
+ list_del_init(&conf->list);
+ gf_log(this->name, GF_LOG_TRACE,
+ "Removing the matching reservelk for setlk to progress");
+ __destroy_lock(conf);
+ ret = 0;
+ } else {
+ gf_log(this->name, GF_LOG_TRACE, "Conflicting reservelk found");
+ ret = 1;
}
- return ret;
-
+ }
+ return ret;
}
int
-pl_verify_reservelk (xlator_t *this, pl_inode_t *pl_inode,
- posix_lock_t *lock, int can_block)
+pl_verify_reservelk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ const int can_block)
{
- int ret = 0;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- if (__reservelk_conflict (this, pl_inode, lock)) {
- gf_log (this->name, GF_LOG_TRACE,
- "Found conflicting reservelk. Blocking until reservelk is unlocked.");
- lock->blocked = can_block;
- list_add_tail (&lock->list, &pl_inode->blocked_calls);
- ret = -1;
- goto unlock;
- }
-
- gf_log (this->name, GF_LOG_TRACE,
- "no conflicting reservelk found. Call continuing");
- ret = 0;
-
+ int ret = 0;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ if (__reservelk_conflict(this, pl_inode, lock)) {
+ lock->blocked = can_block;
+ list_add_tail(&lock->list, &pl_inode->blocked_calls);
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_log(this->name, GF_LOG_TRACE,
+ "Found conflicting reservelk. Blocking until reservelk is "
+ "unlocked.");
+ ret = -1;
+ goto out;
}
-unlock:
- pthread_mutex_unlock (&pl_inode->mutex);
-
- return ret;
-
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_log(this->name, GF_LOG_TRACE,
+ "no conflicting reservelk found. Call continuing");
+ ret = 0;
+out:
+ return ret;
}
-
/* Determines if lock can be granted and adds the lock. If the lock
* is blocking, adds it to the blocked_reservelks.
*/
static int
-__lock_reservelk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
- int can_block)
+__lock_reservelk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ const int can_block)
{
- posix_lock_t *conf = NULL;
- int ret = -EINVAL;
+ int ret = -EINVAL;
- conf = __reservelk_grantable (pl_inode, lock);
- if (conf){
- ret = -EAGAIN;
- if (can_block == 0)
- goto out;
+ posix_lock_t *conf = __reservelk_grantable(pl_inode, lock);
+ if (conf) {
+ ret = -EAGAIN;
+ if (can_block == 0)
+ goto out;
- list_add_tail (&lock->list, &pl_inode->blocked_reservelks);
+ list_add_tail(&lock->list, &pl_inode->blocked_reservelks);
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) lk-owner:%"PRIu64" %"PRId64" - %"PRId64" => Blocked",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 " => Blocked",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock", lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->user_flock.l_start,
+ lock->user_flock.l_len);
+ goto out;
+ }
- goto out;
- }
+ list_add(&lock->list, &pl_inode->reservelk_list);
- list_add (&lock->list, &pl_inode->reservelk_list);
-
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
static posix_lock_t *
-find_matching_reservelk (posix_lock_t *lock, pl_inode_t *pl_inode)
+find_matching_reservelk(posix_lock_t *lock, pl_inode_t *pl_inode)
{
- posix_lock_t *l = NULL;
- list_for_each_entry (l, &pl_inode->reservelk_list, list) {
- if (reservelks_equal (l, lock))
- return l;
- }
- return NULL;
+ posix_lock_t *l = NULL;
+ list_for_each_entry(l, &pl_inode->reservelk_list, list)
+ {
+ if (reservelks_equal(l, lock))
+ return l;
+ }
+ return NULL;
}
/* Set F_UNLCK removes a lock which has the exact same lock boundaries
* as the UNLCK lock specifies. If such a lock is not found, returns invalid
*/
static posix_lock_t *
-__reserve_unlock_lock (xlator_t *this, posix_lock_t *lock, pl_inode_t *pl_inode)
+__reserve_unlock_lock(xlator_t *this, posix_lock_t *lock, pl_inode_t *pl_inode)
{
-
- posix_lock_t *conf = NULL;
-
- conf = find_matching_reservelk (lock, pl_inode);
- if (!conf) {
- gf_log (this->name, GF_LOG_DEBUG,
- " Matching lock not found for unlock");
- goto out;
- }
- __delete_reserve_lock (conf);
- gf_log (this->name, GF_LOG_DEBUG,
- " Matching lock found for unlock");
+ posix_lock_t *conf = find_matching_reservelk(lock, pl_inode);
+ if (!conf) {
+ gf_log(this->name, GF_LOG_DEBUG, " Matching lock not found for unlock");
+ goto out;
+ }
+ __delete_lock(conf);
+ gf_log(this->name, GF_LOG_DEBUG, " Matching lock found for unlock");
out:
- return conf;
-
-
+ return conf;
}
static void
-__grant_blocked_reserve_locks (xlator_t *this, pl_inode_t *pl_inode,
- struct list_head *granted)
+__grant_blocked_reserve_locks(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted)
{
- int bl_ret = 0;
- posix_lock_t *bl = NULL;
- posix_lock_t *tmp = NULL;
-
- struct list_head blocked_list;
+ int bl_ret = 0;
+ posix_lock_t *bl = NULL;
+ posix_lock_t *tmp = NULL;
- INIT_LIST_HEAD (&blocked_list);
- list_splice_init (&pl_inode->blocked_reservelks, &blocked_list);
+ struct list_head blocked_list;
- list_for_each_entry_safe (bl, tmp, &blocked_list, list) {
+ INIT_LIST_HEAD(&blocked_list);
+ list_splice_init(&pl_inode->blocked_reservelks, &blocked_list);
- list_del_init (&bl->list);
+ list_for_each_entry_safe(bl, tmp, &blocked_list, list)
+ {
+ list_del_init(&bl->list);
- bl_ret = __lock_reservelk (this, pl_inode, bl, 1);
+ bl_ret = __lock_reservelk(this, pl_inode, bl, 1);
- if (bl_ret == 0) {
- list_add (&bl->list, granted);
- }
+ if (bl_ret == 0) {
+ list_add(&bl->list, granted);
}
- return;
+ }
+ return;
}
/* Grant all reservelks blocked on lock(s) */
void
-grant_blocked_reserve_locks (xlator_t *this, pl_inode_t *pl_inode)
+grant_blocked_reserve_locks(xlator_t *this, pl_inode_t *pl_inode)
{
- struct list_head granted;
- posix_lock_t *lock = NULL;
- posix_lock_t *tmp = NULL;
-
- INIT_LIST_HEAD (&granted);
+ struct list_head granted;
+ posix_lock_t *lock = NULL;
+ posix_lock_t *tmp = NULL;
- if (list_empty (&pl_inode->blocked_reservelks)) {
- gf_log (this->name, GF_LOG_TRACE,
- "No blocked locks to be granted");
- return;
- }
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __grant_blocked_reserve_locks (this, pl_inode, &granted);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
-
- list_for_each_entry_safe (lock, tmp, &granted, list) {
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => Granted",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
-
- STACK_UNWIND_STRICT (lk, lock->frame, 0, 0, &lock->user_flock);
- }
+ INIT_LIST_HEAD(&granted);
+ if (list_empty(&pl_inode->blocked_reservelks)) {
+ gf_log(this->name, GF_LOG_TRACE, "No blocked locks to be granted");
+ return;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __grant_blocked_reserve_locks(this, pl_inode, &granted);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ list_for_each_entry_safe(lock, tmp, &granted, list)
+ {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64 " => Granted",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock", lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->user_flock.l_start,
+ lock->user_flock.l_len);
+
+ STACK_UNWIND_STRICT(lk, lock->frame, 0, 0, &lock->user_flock, NULL);
+ }
}
static void
-__grant_blocked_lock_calls (xlator_t *this, pl_inode_t *pl_inode,
- struct list_head *granted)
+__grant_blocked_lock_calls(xlator_t *this, pl_inode_t *pl_inode,
+ struct list_head *granted)
{
- int bl_ret = 0;
- posix_lock_t *bl = NULL;
- posix_lock_t *tmp = NULL;
+ int bl_ret = 0;
+ posix_lock_t *bl = NULL;
+ posix_lock_t *tmp = NULL;
- struct list_head blocked_list;
+ struct list_head blocked_list;
- INIT_LIST_HEAD (&blocked_list);
- list_splice_init (&pl_inode->blocked_reservelks, &blocked_list);
+ INIT_LIST_HEAD(&blocked_list);
+ list_splice_init(&pl_inode->blocked_reservelks, &blocked_list);
- list_for_each_entry_safe (bl, tmp, &blocked_list, list) {
+ list_for_each_entry_safe(bl, tmp, &blocked_list, list)
+ {
+ list_del_init(&bl->list);
- list_del_init (&bl->list);
+ bl_ret = pl_verify_reservelk(this, pl_inode, bl, bl->blocked);
- bl_ret = pl_verify_reservelk (this, pl_inode, bl, bl->blocked);
-
- if (bl_ret == 0) {
- list_add_tail (&bl->list, granted);
- }
+ if (bl_ret == 0) {
+ list_add_tail(&bl->list, granted);
}
- return;
+ }
+ return;
}
void
-grant_blocked_lock_calls (xlator_t *this, pl_inode_t *pl_inode)
+grant_blocked_lock_calls(xlator_t *this, pl_inode_t *pl_inode)
{
- struct list_head granted;
- posix_lock_t *lock = NULL;
- posix_lock_t *tmp = NULL;
- fd_t *fd = NULL;
-
- int can_block = 0;
- int32_t cmd = 0;
- int ret = 0;
-
- if (list_empty (&pl_inode->blocked_calls)) {
- gf_log (this->name, GF_LOG_TRACE,
- "No blocked lock calls to be granted");
- return;
- }
+ struct list_head granted;
+ posix_lock_t *lock = NULL;
+ posix_lock_t *tmp = NULL;
+ fd_t *fd = NULL;
- pthread_mutex_lock (&pl_inode->mutex);
- {
- __grant_blocked_lock_calls (this, pl_inode, &granted);
- }
- pthread_mutex_unlock (&pl_inode->mutex);
-
- list_for_each_entry_safe (lock, tmp, &granted, list) {
- fd = fd_from_fdnum (lock);
-
- if (lock->blocked) {
- can_block = 1;
- cmd = F_SETLKW;
- }
- else
- cmd = F_SETLK;
-
- lock->blocked = 0;
- ret = pl_setlk (this, pl_inode, lock, can_block);
- if (ret == -1) {
- if (can_block) {
- pl_trace_block (this, lock->frame, fd, NULL,
- cmd, &lock->user_flock, NULL);
- continue;
- } else {
- gf_log (this->name, GF_LOG_DEBUG, "returning EAGAIN");
- pl_trace_out (this, lock->frame, fd, NULL, cmd,
- &lock->user_flock, -1, EAGAIN, NULL);
- pl_update_refkeeper (this, fd->inode);
- STACK_UNWIND_STRICT (lk, lock->frame, -1, EAGAIN, &lock->user_flock);
- __destroy_lock (lock);
- }
- }
+ int can_block = 0;
+ int32_t cmd = 0;
+ int ret = 0;
+ if (list_empty(&pl_inode->blocked_calls)) {
+ gf_log(this->name, GF_LOG_TRACE, "No blocked lock calls to be granted");
+ return;
+ }
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ __grant_blocked_lock_calls(this, pl_inode, &granted);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ list_for_each_entry_safe(lock, tmp, &granted, list)
+ {
+ fd = fd_from_fdnum(lock);
+
+ if (lock->blocked) {
+ can_block = 1;
+ cmd = F_SETLKW;
+ } else
+ cmd = F_SETLK;
+
+ lock->blocked = 0;
+ ret = pl_setlk(this, pl_inode, lock, can_block);
+ if (ret == -1) {
+ if (can_block) {
+ continue;
+ } else {
+ gf_log(this->name, GF_LOG_DEBUG, "returning EAGAIN");
+ pl_trace_out(this, lock->frame, fd, NULL, cmd,
+ &lock->user_flock, -1, EAGAIN, NULL);
+ pl_update_refkeeper(this, fd->inode);
+ STACK_UNWIND_STRICT(lk, lock->frame, -1, EAGAIN,
+ &lock->user_flock, NULL);
+ __destroy_lock(lock);
+ }
}
-
+ }
}
-
int
-pl_reserve_unlock (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock)
+pl_reserve_unlock(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock)
{
- posix_lock_t *retlock = NULL;
- int ret = -1;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
- retlock = __reserve_unlock_lock (this, lock, pl_inode);
- if (!retlock) {
- gf_log (this->name, GF_LOG_DEBUG,
- "Bad Unlock issued on Inode lock");
- ret = -EINVAL;
- goto out;
- }
-
- gf_log (this->name, GF_LOG_TRACE,
- "Reservelk Unlock successful");
- __destroy_reserve_lock (retlock);
- ret = 0;
+ posix_lock_t *retlock = NULL;
+ int ret = -1;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ retlock = __reserve_unlock_lock(this, lock, pl_inode);
+ if (!retlock) {
+ pthread_mutex_unlock(&pl_inode->mutex);
+ gf_log(this->name, GF_LOG_DEBUG, "Bad Unlock issued on Inode lock");
+ ret = -EINVAL;
+ goto out;
}
-out:
- pthread_mutex_unlock (&pl_inode->mutex);
-
- grant_blocked_reserve_locks (this, pl_inode);
- grant_blocked_lock_calls (this, pl_inode);
- return ret;
+ gf_log(this->name, GF_LOG_TRACE, "Reservelk Unlock successful");
+ __destroy_lock(retlock);
+ ret = 0;
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+out:
+ grant_blocked_reserve_locks(this, pl_inode);
+ grant_blocked_lock_calls(this, pl_inode);
+ return ret;
}
int
-pl_reserve_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
- int can_block)
+pl_reserve_setlk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock,
+ int can_block)
{
- int ret = -EINVAL;
-
- pthread_mutex_lock (&pl_inode->mutex);
- {
-
- ret = __lock_reservelk (this, pl_inode, lock, can_block);
- if (ret < 0)
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => NOK",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->user_flock.l_start,
- lock->user_flock.l_len);
- else
- gf_log (this->name, GF_LOG_TRACE,
- "%s (pid=%d) (lk-owner=%"PRIu64") %"PRId64" - %"PRId64" => OK",
- lock->fl_type == F_UNLCK ? "Unlock" : "Lock",
- lock->client_pid,
- lock->owner,
- lock->fl_start,
- lock->fl_end);
-
- }
- pthread_mutex_unlock (&pl_inode->mutex);
- return ret;
+ int ret = -EINVAL;
+
+ pthread_mutex_lock(&pl_inode->mutex);
+ {
+ ret = __lock_reservelk(this, pl_inode, lock, can_block);
+ }
+ pthread_mutex_unlock(&pl_inode->mutex);
+
+ if (ret < 0)
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64 " => NOK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock", lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->user_flock.l_start,
+ lock->user_flock.l_len);
+ else
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s (pid=%d) (lk-owner=%s) %" PRId64 " - %" PRId64 " => OK",
+ lock->fl_type == F_UNLCK ? "Unlock" : "Lock", lock->client_pid,
+ lkowner_utoa(&lock->owner), lock->fl_start, lock->fl_end);
+
+ return ret;
}
diff --git a/xlators/features/locks/tests/unit-test.c b/xlators/features/locks/tests/unit-test.c
index 48c9f301267..d285b12b5aa 100644
--- a/xlators/features/locks/tests/unit-test.c
+++ b/xlators/features/locks/tests/unit-test.c
@@ -1,75 +1,77 @@
/*
- Copyright (c) 2006-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
+ Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "glusterfs.h"
-#include "compat.h"
-#include "xlator.h"
-#include "inode.h"
-#include "logging.h"
-#include "common-utils.h"
-#include "list.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/common-utils.h>
+#include <glusterfs/list.h>
#include "locks.h"
#include "common.h"
-#define expect(cond) if (!(cond)) { goto out; }
+#define expect(cond) \
+ if (!(cond)) { \
+ goto out; \
+ }
-extern int lock_name (pl_inode_t *, const char *, entrylk_type);
-extern int unlock_name (pl_inode_t *, const char *, entrylk_type);
+extern int
+lock_name(pl_inode_t *, const char *, entrylk_type);
+extern int
+unlock_name(pl_inode_t *, const char *, entrylk_type);
-int main (int argc, char **argv)
+int
+main(int argc, char **argv)
{
- int ret = 1;
- int r = -1;
+ int ret = 1;
+ int r = -1;
+
+ pl_inode_t *pinode = CALLOC(sizeof(pl_inode_t), 1);
+ pthread_mutex_init(&pinode->dir_lock_mutex, NULL);
+ INIT_LIST_HEAD(&pinode->gf_dir_locks);
- pl_inode_t *pinode = CALLOC (sizeof (pl_inode_t), 1);
- pthread_mutex_init (&pinode->dir_lock_mutex, NULL);
- INIT_LIST_HEAD (&pinode->gf_dir_locks);
+ r = lock_name(pinode, NULL, ENTRYLK_WRLCK);
+ expect(r == 0);
+ {
+ r = lock_name(pinode, "foo", ENTRYLK_WRLCK);
+ expect(r == -EAGAIN);
+ }
+ r = unlock_name(pinode, NULL, ENTRYLK_WRLCK);
+ expect(r == 0);
- r = lock_name (pinode, NULL, ENTRYLK_WRLCK); expect (r == 0);
- {
- r = lock_name (pinode, "foo", ENTRYLK_WRLCK); expect (r == -EAGAIN);
- }
- r = unlock_name (pinode, NULL, ENTRYLK_WRLCK); expect (r == 0);
+ r = lock_name(pinode, "foo", ENTRYLK_RDLCK);
+ expect(r == 0);
+ {
+ r = lock_name(pinode, "foo", ENTRYLK_RDLCK);
+ expect(r == 0);
+ {
+ r = lock_name(pinode, "foo", ENTRYLK_WRLCK);
+ expect(r == -EAGAIN);
+ }
+ r = unlock_name(pinode, "foo", ENTRYLK_RDLCK);
+ expect(r == 0);
+ }
+ r = unlock_name(pinode, "foo", ENTRYLK_RDLCK);
+ expect(r == 0);
- r = lock_name (pinode, "foo", ENTRYLK_RDLCK); expect (r == 0);
- {
- r = lock_name (pinode, "foo", ENTRYLK_RDLCK); expect (r == 0);
- {
- r = lock_name (pinode, "foo", ENTRYLK_WRLCK); expect (r == -EAGAIN);
- }
- r = unlock_name (pinode, "foo", ENTRYLK_RDLCK); expect (r == 0);
- }
- r = unlock_name (pinode, "foo", ENTRYLK_RDLCK); expect (r == 0);
-
- r = lock_name (pinode, "foo", ENTRYLK_WRLCK); expect (r == 0);
- r = unlock_name (pinode, "foo", ENTRYLK_WRLCK); expect (r == 0);
+ r = lock_name(pinode, "foo", ENTRYLK_WRLCK);
+ expect(r == 0);
+ r = unlock_name(pinode, "foo", ENTRYLK_WRLCK);
+ expect(r == 0);
- r = lock_name (pinode, "baz", ENTRYLK_WRLCK); expect (r == 0);
- r = lock_name (pinode, "baz", ENTRYLK_RDLCK); expect (r == -EAGAIN);
+ r = lock_name(pinode, "baz", ENTRYLK_WRLCK);
+ expect(r == 0);
+ r = lock_name(pinode, "baz", ENTRYLK_RDLCK);
+ expect(r == -EAGAIN);
- ret = 0;
+ ret = 0;
out:
- return ret;
+ return ret;
}
diff --git a/xlators/features/mac-compat/src/Makefile.am b/xlators/features/mac-compat/src/Makefile.am
deleted file mode 100644
index 915c13e308f..00000000000
--- a/xlators/features/mac-compat/src/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-xlator_LTLIBRARIES = mac-compat.la
-xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
-
-mac_compat_la_LDFLAGS = -module -avoidversion
-
-mac_compat_la_SOURCES = mac-compat.c
-mac_compat_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS) \
- -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)
-
-CLEANFILES =
-
diff --git a/xlators/features/mac-compat/src/mac-compat.c b/xlators/features/mac-compat/src/mac-compat.c
deleted file mode 100644
index b1277ec1610..00000000000
--- a/xlators/features/mac-compat/src/mac-compat.c
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "xlator.h"
-#include "defaults.h"
-#include "compat-errno.h"
-
-
-enum apple_xattr {
- GF_FINDER_INFO_XATTR,
- GF_RESOURCE_FORK_XATTR,
- GF_XATTR_ALL,
- GF_XATTR_NONE
-};
-
-static char *apple_xattr_name[] = {
- [GF_FINDER_INFO_XATTR] = "com.apple.FinderInfo",
- [GF_RESOURCE_FORK_XATTR] = "com.apple.ResourceFork"
-};
-
-static const char *apple_xattr_value[] = {
- [GF_FINDER_INFO_XATTR] =
- /* 1 2 3 4 5 6 7 8 */
- "\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0"
- "\0\0\0\0\0\0\0\0",
- [GF_RESOURCE_FORK_XATTR] = ""
-};
-
-static int32_t apple_xattr_len[] = {
- [GF_FINDER_INFO_XATTR] = 32,
- [GF_RESOURCE_FORK_XATTR] = 1
-};
-
-
-int32_t
-maccomp_getxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
-{
- intptr_t ax = (intptr_t)this->private;
- int i = 0;
-
- if ((ax == GF_XATTR_ALL && op_ret >= 0) || ax != GF_XATTR_NONE) {
- op_ret = op_errno = 0;
-
- for (i = 0; i < GF_XATTR_ALL; i++) {
- if (dict_get (dict, apple_xattr_name[i]))
- continue;
-
- if (dict_set (dict, apple_xattr_name[i],
- bin_to_data ((void *)apple_xattr_value[i],
- apple_xattr_len[i])) == -1) {
- op_ret = -1;
- op_errno = ENOMEM;
-
- break;
- }
- }
- }
-
- STACK_UNWIND_STRICT (getxattr, frame, op_ret, op_errno, dict);
-
- return 0;
-}
-
-
-int32_t
-maccomp_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
- const char *name)
-{
- intptr_t ax = GF_XATTR_NONE;
- int i = 0;
-
- if (name) {
- for (i = 0; i < GF_XATTR_ALL; i++) {
- if (strcmp (apple_xattr_name[i], name) == 0) {
- ax = i;
-
- break;
- }
- }
- } else
- ax = GF_XATTR_ALL;
-
- this->private = (void *)ax;
-
- STACK_WIND (frame, maccomp_getxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->getxattr,
- loc, name);
- return 0;
-}
-
-
-int32_t
-maccomp_fgetxattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
- const char *name)
-{
- intptr_t ax = GF_XATTR_NONE;
- int i = 0;
-
- if (name) {
- for (i = 0; i < GF_XATTR_ALL; i++) {
- if (strcmp (apple_xattr_name[i], name) == 0) {
- ax = i;
-
- break;
- }
- }
- } else
- ax = GF_XATTR_ALL;
-
- this->private = (void *)ax;
-
- STACK_WIND (frame, maccomp_getxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fgetxattr,
- fd, name);
- return 0;
-}
-
-
-int32_t
-maccomp_setxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
-{
- intptr_t ax = (intptr_t)this->private;
-
- if (op_ret == -1 && ax != GF_XATTR_NONE)
- op_ret = op_errno = 0;
-
- STACK_UNWIND_STRICT (setxattr, frame, op_ret, op_errno);
-
- return 0;
-}
-
-
-int32_t
-maccomp_setxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
- int32_t flags)
-{
- intptr_t ax = GF_XATTR_NONE;
- int i = 0;
-
- for (i = 0; i < GF_XATTR_ALL; i++) {
- if (dict_get (dict, apple_xattr_name[i])) {
- ax = i;
-
- break;
- }
- }
-
- this->private = (void *)ax;
-
- STACK_WIND (frame, maccomp_setxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- loc, dict, flags);
- return 0;
-}
-
-
-int32_t
-maccomp_fsetxattr (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
- int32_t flags)
-{
- intptr_t ax = GF_XATTR_NONE;
- int i = 0;
-
- for (i = 0; i < GF_XATTR_ALL; i++) {
- if (dict_get (dict, apple_xattr_name[i])) {
- ax = i;
-
- break;
- }
- }
-
- this->private = (void *)ax;
-
- STACK_WIND (frame, maccomp_setxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsetxattr,
- fd, dict, flags);
- return 0;
-}
-
-
-int32_t
-init (xlator_t *this)
-{
- if (!this->children || this->children->next) {
- gf_log (this->name, GF_LOG_ERROR,
- "translator not configured with exactly one child");
- return -1;
- }
-
- if (!this->parents) {
- gf_log (this->name, GF_LOG_WARNING,
- "dangling volume. check volfile ");
- }
-
- return 0;
-}
-
-
-void
-fini (xlator_t *this)
-{
- return;
-}
-
-
-struct xlator_fops fops = {
- .getxattr = maccomp_getxattr,
- .fgetxattr = maccomp_fgetxattr,
- .setxattr = maccomp_setxattr,
- .fsetxattr = maccomp_fsetxattr,
-};
-
-struct xlator_cbks cbks = {
-};
-
-struct volume_options options[] = {
- { .key = {NULL} },
-};
diff --git a/xlators/features/marker/Makefile.am b/xlators/features/marker/Makefile.am
index a6ba2de16ae..a985f42a877 100644
--- a/xlators/features/marker/Makefile.am
+++ b/xlators/features/marker/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = src @SYNCDAEMON_SUBDIR@
+SUBDIRS = src
CLEANFILES =
diff --git a/xlators/features/marker/src/Makefile.am b/xlators/features/marker/src/Makefile.am
index 501586a76b6..58056b36511 100644
--- a/xlators/features/marker/src/Makefile.am
+++ b/xlators/features/marker/src/Makefile.am
@@ -1,15 +1,24 @@
+if WITH_SERVER
xlator_LTLIBRARIES = marker.la
+endif
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
-marker_la_LDFLAGS = -module -avoidversion
+marker_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+marker_la_SOURCES = marker.c marker-quota.c marker-quota-helper.c \
+ marker-common.c
-marker_la_SOURCES = marker.c marker-quota.c marker-quota-helper.c marker-common.c
marker_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-noinst_HEADERS = marker-mem-types.h marker.h marker-quota.h marker-quota-helper.h marker-common.h $(top_builddir)/xlators/lib/src/libxlator.h
+noinst_HEADERS = marker-mem-types.h marker.h marker-quota.h \
+ marker-quota-helper.h marker-common.h \
+ $(top_builddir)/xlators/lib/src/libxlator.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/xlators/lib/src
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -fno-strict-aliasing -D$(GF_HOST_OS) \
- -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/xlators/lib/src $(GF_CFLAGS) -shared -nostartfiles
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS)
CLEANFILES =
diff --git a/xlators/features/marker/src/marker-common.c b/xlators/features/marker/src/marker-common.c
index 9d27167fc0d..9c9047005d6 100644
--- a/xlators/features/marker/src/marker-common.c
+++ b/xlators/features/marker/src/marker-common.c
@@ -1,86 +1,57 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
#include <fnmatch.h>
#include "marker-common.h"
marker_inode_ctx_t *
-marker_inode_ctx_new ()
+marker_inode_ctx_new()
{
- marker_inode_ctx_t *ctx = NULL;
+ marker_inode_ctx_t *ctx = NULL;
- ctx = GF_CALLOC (1, sizeof (marker_inode_ctx_t),
- gf_marker_mt_marker_inode_ctx_t);
- if (ctx == NULL)
- goto out;
+ ctx = GF_CALLOC(1, sizeof(marker_inode_ctx_t),
+ gf_marker_mt_marker_inode_ctx_t);
+ if (ctx == NULL)
+ goto out;
- ctx->quota_ctx = NULL;
+ ctx->quota_ctx = NULL;
out:
- return ctx;
+ return ctx;
}
int32_t
-marker_force_inode_ctx_get (inode_t *inode, xlator_t *this,
- marker_inode_ctx_t **ctx)
+marker_force_inode_ctx_get(inode_t *inode, xlator_t *this,
+ marker_inode_ctx_t **ctx)
{
- int32_t ret = -1;
- uint64_t ctx_int = 0;
-
- LOCK (&inode->lock);
- {
- ret = __inode_ctx_get (inode, this, &ctx_int);
- if (ret == 0)
- *ctx = (marker_inode_ctx_t *) (unsigned long)ctx_int;
- else {
- *ctx = marker_inode_ctx_new ();
- if (*ctx == NULL)
- goto unlock;
-
- ret = __inode_ctx_put (inode, this,
- (uint64_t )(unsigned long) *ctx);
- if (ret == -1) {
- GF_FREE (*ctx);
- goto unlock;
- }
- ret = 0;
- }
+ int32_t ret = -1;
+ uint64_t ctx_int = 0;
+
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &ctx_int);
+ if (ret == 0)
+ *ctx = (marker_inode_ctx_t *)(unsigned long)ctx_int;
+ else {
+ *ctx = marker_inode_ctx_new();
+ if (*ctx == NULL)
+ goto unlock;
+
+ ret = __inode_ctx_put(inode, this, (uint64_t)(unsigned long)*ctx);
+ if (ret == -1) {
+ GF_FREE(*ctx);
+ goto unlock;
+ }
+ ret = 0;
}
-unlock: UNLOCK (&inode->lock);
+ }
+unlock:
+ UNLOCK(&inode->lock);
- return ret;
-}
-
-void
-marker_filter_quota_xattr (dict_t *dict, char *key,
- data_t *value, void *data)
-{
- int ret = -1;
-
- GF_VALIDATE_OR_GOTO ("marker", dict, out);
- GF_VALIDATE_OR_GOTO ("marker", key, out);
-
- ret = fnmatch ("trusted.glusterfs.quota*", key, 0);
- if (ret == 0)
- dict_del (dict, key);
-out:
- return;
+ return ret;
}
diff --git a/xlators/features/marker/src/marker-common.h b/xlators/features/marker/src/marker-common.h
index 2533571c180..7f8cffe7d35 100644
--- a/xlators/features/marker/src/marker-common.h
+++ b/xlators/features/marker/src/marker-common.h
@@ -1,36 +1,19 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef _MARKER_COMMON_H
#define _MARKER_COMMON_H
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "inode.h"
-#include "xlator.h"
+#include <glusterfs/xlator.h>
#include "marker.h"
int32_t
-marker_force_inode_ctx_get (inode_t *, xlator_t *, marker_inode_ctx_t **);
+marker_force_inode_ctx_get(inode_t *, xlator_t *, marker_inode_ctx_t **);
-void
-marker_filter_quota_xattr (dict_t *, char *, data_t *, void *);
#endif
diff --git a/xlators/features/marker/src/marker-mem-types.h b/xlators/features/marker/src/marker-mem-types.h
index 847bfa67c05..aedfdb4a1b7 100644
--- a/xlators/features/marker/src/marker-mem-types.h
+++ b/xlators/features/marker/src/marker-mem-types.h
@@ -1,37 +1,28 @@
/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef __MARKER_MEM_TYPES_H__
#define __MARKER_MEM_TYPES_H__
-#include "mem-types.h"
+#include <glusterfs/mem-types.h>
enum gf_marker_mem_types_ {
- gf_marker_mt_marker_local_t = gf_common_mt_end + 1,
- gf_marker_mt_marker_conf_t,
- gf_marker_mt_loc_t,
- gf_marker_mt_volume_mark,
- gf_marker_mt_int64_t,
- gf_marker_mt_quota_inode_ctx_t,
- gf_marker_mt_marker_inode_ctx_t,
- gf_marker_mt_quota_local_t,
- gf_marker_mt_inode_contribution_t,
- gf_marker_mt_end
+ /* Those are used by ALLOCATE_OR_GOTO macro */
+ gf_marker_mt_marker_conf_t = gf_common_mt_end + 1,
+ gf_marker_mt_loc_t,
+ gf_marker_mt_volume_mark,
+ gf_marker_mt_int64_t,
+ gf_marker_mt_quota_inode_ctx_t,
+ gf_marker_mt_marker_inode_ctx_t,
+ gf_marker_mt_inode_contribution_t,
+ gf_marker_mt_quota_meta_t,
+ gf_marker_mt_quota_synctask_t,
+ gf_marker_mt_end
};
#endif
diff --git a/xlators/features/marker/src/marker-quota-helper.c b/xlators/features/marker/src/marker-quota-helper.c
index fba2cdd3f5d..ecd85d67b2b 100644
--- a/xlators/features/marker/src/marker-quota-helper.c
+++ b/xlators/features/marker/src/marker-quota-helper.c
@@ -1,375 +1,380 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "locking.h"
+#include <glusterfs/locking.h>
#include "marker-quota.h"
#include "marker-common.h"
#include "marker-quota-helper.h"
#include "marker-mem-types.h"
int
-quota_loc_fill (loc_t *loc, inode_t *inode, inode_t *parent, char *path)
+mq_loc_fill(loc_t *loc, inode_t *inode, inode_t *parent, char *path)
{
- int ret = -1;
+ int ret = -1;
- if (!loc)
- return ret;
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", inode, out);
+ GF_VALIDATE_OR_GOTO("marker", path, out);
+ /* Not checking for parent because while filling
+ * loc of root, parent will be NULL
+ */
- if (inode) {
- loc->inode = inode_ref (inode);
- loc->ino = inode->ino;
- }
+ if (inode) {
+ loc->inode = inode_ref(inode);
+ }
- if (parent)
- loc->parent = inode_ref (parent);
+ if (parent)
+ loc->parent = inode_ref(parent);
- loc->path = gf_strdup (path);
- if (!loc->path) {
- gf_log ("loc fill", GF_LOG_ERROR, "strdup failed");
- goto loc_wipe;
- }
+ if (!gf_uuid_is_null(inode->gfid))
+ gf_uuid_copy(loc->gfid, inode->gfid);
- loc->name = strrchr (loc->path, '/');
- if (loc->name)
- loc->name++;
- else
- goto loc_wipe;
+ loc->path = gf_strdup(path);
+ if (!loc->path) {
+ gf_log("loc fill", GF_LOG_ERROR, "strdup failed");
+ goto out;
+ }
- ret = 0;
-loc_wipe:
- if (ret < 0)
- loc_wipe (loc);
+ loc->name = strrchr(loc->path, '/');
+ if (loc->name)
+ loc->name++;
+ else
+ goto out;
- return ret;
-}
+ ret = 0;
+out:
+ if (ret < 0)
+ loc_wipe(loc);
+
+ return ret;
+}
int32_t
-quota_inode_loc_fill (const char *parent_gfid, inode_t *inode, loc_t *loc)
+mq_inode_loc_fill(const char *parent_gfid, inode_t *inode, loc_t *loc)
{
- char *resolvedpath = NULL;
- inode_t *parent = NULL;
- int ret = -1;
+ char *resolvedpath = NULL;
+ inode_t *parent = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+
+ this = THIS;
+
+ if (inode == NULL) {
+ gf_log_callingfn("marker", GF_LOG_ERROR,
+ "loc fill failed, "
+ "inode is NULL");
+ return ret;
+ }
- if ((!inode) || (!loc))
- return ret;
+ if (loc == NULL)
+ return ret;
- if ((inode) && (inode->ino == 1)) {
- loc->parent = NULL;
- goto ignore_parent;
- }
+ if ((inode) && __is_root_gfid(inode->gfid)) {
+ loc->parent = NULL;
+ goto ignore_parent;
+ }
- if (parent_gfid == NULL)
- parent = inode_parent (inode, 0, NULL);
- else
- parent = inode_find (inode->table,
- (unsigned char *) parent_gfid);
+ if (parent_gfid == NULL)
+ parent = inode_parent(inode, 0, NULL);
+ else
+ parent = inode_find(inode->table, (unsigned char *)parent_gfid);
- if (parent == NULL)
- goto err;
+ if (parent == NULL) {
+ gf_log("marker", GF_LOG_ERROR, "parent is NULL for %s",
+ uuid_utoa(inode->gfid));
+ goto err;
+ }
ignore_parent:
- ret = inode_path (inode, NULL, &resolvedpath);
- if (ret < 0)
- goto err;
-
- ret = quota_loc_fill (loc, inode, parent, resolvedpath);
- if (ret < 0)
- goto err;
+ ret = inode_path(inode, NULL, &resolvedpath);
+ if (ret < 0) {
+ gf_log("marker", GF_LOG_ERROR, "failed to resolve path for %s",
+ uuid_utoa(inode->gfid));
+ goto err;
+ }
+
+ ret = mq_loc_fill(loc, inode, parent, resolvedpath);
+ if (ret < 0)
+ goto err;
+
+ ret = mq_inode_ctx_get(inode, this, &ctx);
+ if (ret < 0 || ctx == NULL)
+ ctx = mq_inode_ctx_new(inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(inode->gfid));
+ ret = -1;
+ goto err;
+ }
+ ret = 0;
err:
- if (parent)
- inode_unref (parent);
+ if (parent)
+ inode_unref(parent);
- GF_FREE (resolvedpath);
+ GF_FREE(resolvedpath);
- return ret;
+ return ret;
}
-
quota_inode_ctx_t *
-quota_alloc_inode_ctx ()
-{
- int32_t ret = -1;
- quota_inode_ctx_t *ctx = NULL;
-
- QUOTA_ALLOC (ctx, quota_inode_ctx_t, ret);
- if (ret == -1)
- goto out;
-
- ctx->size = 0;
- ctx->dirty = 0;
- LOCK_INIT (&ctx->lock);
- INIT_LIST_HEAD (&ctx->contribution_head);
-out:
- return ctx;
-}
-
-inode_contribution_t *
-get_contribution_node (inode_t *inode, quota_inode_ctx_t *ctx)
+mq_alloc_inode_ctx()
{
- inode_contribution_t *contri = NULL;
- inode_contribution_t *temp = NULL;
-
- GF_VALIDATE_OR_GOTO ("marker", inode, out);
- GF_VALIDATE_OR_GOTO ("marker", ctx, out);
-
- list_for_each_entry (temp, &ctx->contribution_head, contri_list) {
- if (uuid_compare (temp->gfid, inode->gfid) == 0) {
- contri = temp;
- goto out;
- }
- }
+ int32_t ret = -1;
+ quota_inode_ctx_t *ctx = NULL;
+
+ QUOTA_ALLOC(ctx, quota_inode_ctx_t, ret);
+ if (ret == -1)
+ goto out;
+
+ ctx->size = 0;
+ ctx->dirty = 0;
+ ctx->updation_status = _gf_false;
+ LOCK_INIT(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->contribution_head);
out:
- return contri;
+ return ctx;
}
-
-int32_t
-delete_contribution_node (dict_t *dict, char *key,
- inode_contribution_t *contribution)
+static void
+mq_contri_fini(inode_contribution_t *contri)
{
- if (dict_get (dict, key) != NULL)
- goto out;
-
- QUOTA_FREE_CONTRIBUTION_NODE (contribution);
-out:
- return 0;
+ LOCK_DESTROY(&contri->lock);
+ GF_FREE(contri);
}
-
inode_contribution_t *
-__add_new_contribution_node (xlator_t *this, quota_inode_ctx_t *ctx, loc_t *loc)
+mq_contri_init(inode_t *inode)
{
- int32_t ret = 0;
- inode_contribution_t *contribution = NULL;
-
- list_for_each_entry (contribution, &ctx->contribution_head, contri_list) {
- if (uuid_compare (contribution->gfid, loc->parent->gfid) == 0) {
- goto out;
- }
- }
+ inode_contribution_t *contri = NULL;
+ int32_t ret = 0;
- QUOTA_ALLOC (contribution, inode_contribution_t, ret);
- if (ret == -1)
- goto out;
+ QUOTA_ALLOC(contri, inode_contribution_t, ret);
+ if (ret == -1)
+ goto out;
- contribution->contribution = 0;
+ GF_REF_INIT(contri, mq_contri_fini);
- uuid_copy (contribution->gfid, loc->parent->gfid);
+ contri->contribution = 0;
+ contri->file_count = 0;
+ contri->dir_count = 0;
+ gf_uuid_copy(contri->gfid, inode->gfid);
- list_add_tail (&contribution->contri_list, &ctx->contribution_head);
+ LOCK_INIT(&contri->lock);
+ INIT_LIST_HEAD(&contri->contri_list);
out:
- return contribution;
+ return contri;
}
-
inode_contribution_t *
-add_new_contribution_node (xlator_t *this, quota_inode_ctx_t *ctx, loc_t *loc)
+mq_get_contribution_node(inode_t *inode, quota_inode_ctx_t *ctx)
{
- inode_contribution_t *contribution = NULL;
+ inode_contribution_t *contri = NULL;
+ inode_contribution_t *temp = NULL;
- if ((ctx == NULL) || (loc == NULL))
- return NULL;
+ if (!inode || !ctx)
+ goto out;
- if (strcmp (loc->path, "/") == 0)
- return NULL;
+ LOCK(&ctx->lock);
+ {
+ if (list_empty(&ctx->contribution_head))
+ goto unlock;
- LOCK (&ctx->lock);
+ list_for_each_entry(temp, &ctx->contribution_head, contri_list)
{
- contribution = __add_new_contribution_node (this, ctx, loc);
+ if (gf_uuid_compare(temp->gfid, inode->gfid) == 0) {
+ contri = temp;
+ GF_REF_GET(contri);
+ break;
+ }
}
- UNLOCK (&ctx->lock);
+ }
+unlock:
+ UNLOCK(&ctx->lock);
- return contribution;
+out:
+ return contri;
}
-
-int32_t
-dict_set_contribution (xlator_t *this, dict_t *dict,
- loc_t *loc)
+inode_contribution_t *
+__mq_add_new_contribution_node(xlator_t *this, quota_inode_ctx_t *ctx,
+ loc_t *loc)
{
- int32_t ret = -1;
- char contri_key [512] = {0, };
-
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", dict, out);
- GF_VALIDATE_OR_GOTO ("marker", loc, out);
-
- GET_CONTRI_KEY (contri_key, loc->parent->gfid, ret);
- if (ret < 0) {
- ret = -1;
- goto out;
+ inode_contribution_t *contribution = NULL;
+
+ if (!loc->parent) {
+ if (!gf_uuid_is_null(loc->pargfid))
+ loc->parent = inode_find(loc->inode->table, loc->pargfid);
+
+ if (!loc->parent)
+ loc->parent = inode_parent(loc->inode, loc->pargfid, loc->name);
+ if (!loc->parent)
+ goto out;
+ }
+
+ list_for_each_entry(contribution, &ctx->contribution_head, contri_list)
+ {
+ if (loc->parent &&
+ gf_uuid_compare(contribution->gfid, loc->parent->gfid) == 0) {
+ goto out;
}
+ }
- ret = dict_set_int64 (dict, contri_key, 0);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "unable to set dict value on %s.",
- loc->path);
- goto out;
- }
+ contribution = mq_contri_init(loc->parent);
+ if (contribution == NULL)
+ goto out;
+
+ list_add_tail(&contribution->contri_list, &ctx->contribution_head);
- ret = 0;
out:
- return ret;
+ return contribution;
}
-
-int32_t
-quota_inode_ctx_get (inode_t *inode, xlator_t *this,
- quota_inode_ctx_t **ctx)
+inode_contribution_t *
+mq_add_new_contribution_node(xlator_t *this, quota_inode_ctx_t *ctx, loc_t *loc)
{
- int32_t ret = -1;
- uint64_t ctx_int = 0;
- marker_inode_ctx_t *mark_ctx = NULL;
-
- GF_VALIDATE_OR_GOTO ("marker", inode, out);
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", ctx, out);
+ inode_contribution_t *contribution = NULL;
- ret = inode_ctx_get (inode, this, &ctx_int);
- if (ret < 0) {
- ret = -1;
- *ctx = NULL;
- goto out;
- }
-
- mark_ctx = (marker_inode_ctx_t *) (unsigned long)ctx_int;
- if (mark_ctx->quota_ctx == NULL) {
- ret = -1;
- goto out;
- }
+ if ((ctx == NULL) || (loc == NULL))
+ return NULL;
- *ctx = mark_ctx->quota_ctx;
+ if (((loc->path) && (strcmp(loc->path, "/") == 0)) ||
+ (!loc->path && gf_uuid_is_null(loc->pargfid)))
+ return NULL;
- ret = 0;
+ LOCK(&ctx->lock);
+ {
+ contribution = __mq_add_new_contribution_node(this, ctx, loc);
+ if (contribution)
+ GF_REF_GET(contribution);
+ }
+ UNLOCK(&ctx->lock);
-out:
- return ret;
+ return contribution;
}
-
-quota_inode_ctx_t *
-__quota_inode_ctx_new (inode_t *inode, xlator_t *this)
+int32_t
+mq_dict_set_contribution(xlator_t *this, dict_t *dict, loc_t *loc, uuid_t gfid,
+ char *contri_key)
{
- int32_t ret = -1;
- quota_inode_ctx_t *quota_ctx = NULL;
- marker_inode_ctx_t *mark_ctx = NULL;
-
- ret = marker_force_inode_ctx_get (inode, this, &mark_ctx);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR,
- "marker_force_inode_ctx_get() failed");
- goto out;
+ int32_t ret = -1;
+ char key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", dict, out);
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+
+ if (gfid && !gf_uuid_is_null(gfid)) {
+ GET_CONTRI_KEY(this, key, gfid, ret);
+ } else if (loc->parent) {
+ GET_CONTRI_KEY(this, key, loc->parent->gfid, ret);
+ } else {
+ /* nameless lookup, fetch contributions to all parents */
+ GET_CONTRI_KEY(this, key, NULL, ret);
+ }
+
+ if (ret < 0)
+ goto out;
+
+ ret = dict_set_int64(dict, key, 0);
+ if (ret < 0)
+ goto out;
+
+ if (contri_key)
+ if (snprintf(contri_key, QUOTA_KEY_MAX, "%s", key) >= QUOTA_KEY_MAX) {
+ ret = -1;
+ goto out;
}
- LOCK (&inode->lock);
- {
- if (mark_ctx->quota_ctx == NULL) {
- quota_ctx = quota_alloc_inode_ctx ();
- if (quota_ctx == NULL) {
- ret = -1;
- goto unlock;
- }
- mark_ctx->quota_ctx = quota_ctx;
- } else {
- quota_ctx = mark_ctx->quota_ctx;
- }
-
- ret = 0;
- }
-unlock:
- UNLOCK (&inode->lock);
out:
- return quota_ctx;
-}
+ if (ret < 0)
+ gf_log_callingfn(this ? this->name : "Marker", GF_LOG_ERROR,
+ "dict set failed");
-
-quota_inode_ctx_t *
-quota_inode_ctx_new (inode_t * inode, xlator_t *this)
-{
- return __quota_inode_ctx_new (inode, this);
+ return ret;
}
-quota_local_t *
-quota_local_new ()
+int32_t
+mq_inode_ctx_get(inode_t *inode, xlator_t *this, quota_inode_ctx_t **ctx)
{
- int32_t ret = -1;
- quota_local_t *local = NULL;
+ int32_t ret = -1;
+ uint64_t ctx_int = 0;
+ marker_inode_ctx_t *mark_ctx = NULL;
+
+ GF_VALIDATE_OR_GOTO("marker", inode, out);
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
- QUOTA_ALLOC (local, quota_local_t, ret);
- if (ret < 0)
- goto out;
+ ret = inode_ctx_get(inode, this, &ctx_int);
+ if (ret < 0) {
+ ret = -1;
+ *ctx = NULL;
+ goto out;
+ }
- local->ref = 1;
- local->delta = 0;
- local->err = 0;
- LOCK_INIT (&local->lock);
+ mark_ctx = (marker_inode_ctx_t *)(unsigned long)ctx_int;
+ if (mark_ctx->quota_ctx == NULL) {
+ ret = -1;
+ goto out;
+ }
- memset (&local->loc, 0, sizeof (loc_t));
- memset (&local->parent_loc, 0, sizeof (loc_t));
+ *ctx = mark_ctx->quota_ctx;
- local->ctx = NULL;
- local->contri = NULL;
+ ret = 0;
out:
- return local;
+ return ret;
}
-quota_local_t *
-quota_local_ref (quota_local_t *local)
+quota_inode_ctx_t *
+__mq_inode_ctx_new(inode_t *inode, xlator_t *this)
{
- LOCK (&local->lock);
- {
- local->ref ++;
+ int32_t ret = -1;
+ quota_inode_ctx_t *quota_ctx = NULL;
+ marker_inode_ctx_t *mark_ctx = NULL;
+
+ ret = marker_force_inode_ctx_get(inode, this, &mark_ctx);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "marker_force_inode_ctx_get() failed");
+ goto out;
+ }
+
+ LOCK(&inode->lock);
+ {
+ if (mark_ctx->quota_ctx == NULL) {
+ quota_ctx = mq_alloc_inode_ctx();
+ if (quota_ctx == NULL) {
+ ret = -1;
+ goto unlock;
+ }
+ mark_ctx->quota_ctx = quota_ctx;
+ } else {
+ quota_ctx = mark_ctx->quota_ctx;
}
- UNLOCK (&local->lock);
- return local;
+ ret = 0;
+ }
+unlock:
+ UNLOCK(&inode->lock);
+out:
+ return quota_ctx;
}
-
-int32_t
-quota_local_unref (xlator_t *this, quota_local_t *local)
+quota_inode_ctx_t *
+mq_inode_ctx_new(inode_t *inode, xlator_t *this)
{
- if (local == NULL)
- goto out;
-
- QUOTA_SAFE_DECREMENT (&local->lock, local->ref);
-
- if (local->ref > 0)
- goto out;
-
- if (local->fd != NULL)
- fd_unref (local->fd);
-
- loc_wipe (&local->loc);
-
- loc_wipe (&local->parent_loc);
-
- LOCK_DESTROY (&local->lock);
-out:
- return 0;
+ return __mq_inode_ctx_new(inode, this);
}
diff --git a/xlators/features/marker/src/marker-quota-helper.h b/xlators/features/marker/src/marker-quota-helper.h
index 9a24c8c3d94..d4091dd2180 100644
--- a/xlators/features/marker/src/marker-quota-helper.h
+++ b/xlators/features/marker/src/marker-quota-helper.h
@@ -1,76 +1,66 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-#ifndef _MARKER_QUOTA_HELPER_H
-#define _MARKER_QUOTA_HELPER
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-#include "marker-quota.h"
-
-#define QUOTA_FREE_CONTRIBUTION_NODE(_contribution) \
- do { \
- list_del (&_contribution->contri_list); \
- GF_FREE (_contribution); \
- } while (0)
-#define QUOTA_SAFE_INCREMENT(lock, var) \
- do { \
- LOCK (lock); \
- var ++; \
- UNLOCK (lock); \
- } while (0)
-
-#define QUOTA_SAFE_DECREMENT(lock, var) \
- do { \
- LOCK (lock); \
- var --; \
- UNLOCK (lock); \
- } while (0)
+#ifndef _MARKER_QUOTA_HELPER_H
+#define _MARKER_QUOTA_HELPER_H
+
+#include "marker.h"
+
+#define QUOTA_FREE_CONTRIBUTION_NODE(ctx, _contribution) \
+ do { \
+ LOCK(&ctx->lock); \
+ { \
+ list_del_init(&_contribution->contri_list); \
+ GF_REF_PUT(_contribution); \
+ } \
+ UNLOCK(&ctx->lock); \
+ } while (0)
+
+#define QUOTA_SAFE_INCREMENT(lock, var) \
+ do { \
+ LOCK(lock); \
+ var++; \
+ UNLOCK(lock); \
+ } while (0)
+
+#define QUOTA_SAFE_DECREMENT(lock, var, value) \
+ do { \
+ LOCK(lock); \
+ { \
+ value = --var; \
+ } \
+ UNLOCK(lock); \
+ } while (0)
inode_contribution_t *
-add_new_contribution_node (xlator_t *, quota_inode_ctx_t *, loc_t *);
+mq_add_new_contribution_node(xlator_t *, quota_inode_ctx_t *, loc_t *);
int32_t
-dict_set_contribution (xlator_t *, dict_t *, loc_t *);
+mq_dict_set_contribution(xlator_t *, dict_t *, loc_t *, uuid_t, char *);
quota_inode_ctx_t *
-quota_inode_ctx_new (inode_t *, xlator_t *);
+mq_inode_ctx_new(inode_t *, xlator_t *);
int32_t
-quota_inode_ctx_get (inode_t *, xlator_t *, quota_inode_ctx_t **);
+mq_inode_ctx_get(inode_t *, xlator_t *, quota_inode_ctx_t **);
int32_t
-delete_contribution_node (dict_t *, char *, inode_contribution_t *);
+mq_delete_contribution_node(dict_t *, char *, inode_contribution_t *);
int32_t
-quota_inode_loc_fill (const char *, inode_t *, loc_t *);
-
-quota_local_t *
-quota_local_new ();
-
-quota_local_t *
-quota_local_ref (quota_local_t *);
+mq_inode_loc_fill(const char *, inode_t *, loc_t *);
-int32_t
-quota_local_unref (xlator_t *, quota_local_t *);
+inode_contribution_t *
+mq_contri_init(inode_t *inode);
inode_contribution_t *
-get_contribution_node (inode_t *, quota_inode_ctx_t *);
+mq_get_contribution_node(inode_t *, quota_inode_ctx_t *);
+
#endif
diff --git a/xlators/features/marker/src/marker-quota.c b/xlators/features/marker/src/marker-quota.c
index 8d4f17ea583..3de2ea1c92c 100644
--- a/xlators/features/marker/src/marker-quota.c
+++ b/xlators/features/marker/src/marker-quota.c
@@ -1,1972 +1,2297 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "dict.h"
-#include "xlator.h"
-#include "defaults.h"
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
#include "libxlator.h"
-#include "common-utils.h"
-#include "byte-order.h"
+#include <glusterfs/common-utils.h>
+#include <glusterfs/byte-order.h>
#include "marker-quota.h"
#include "marker-quota-helper.h"
+#include <glusterfs/syncop.h>
+#include <glusterfs/quota-common-utils.h>
-void
-mq_assign_lk_owner (xlator_t *this, call_frame_t *frame)
+int
+mq_loc_copy(loc_t *dst, loc_t *src)
{
- marker_conf_t *conf = NULL;
- uint64_t lk_owner = 0;
+ int ret = -1;
- conf = this->private;
+ GF_VALIDATE_OR_GOTO("marker", dst, out);
+ GF_VALIDATE_OR_GOTO("marker", src, out);
- LOCK (&conf->lock);
- {
- if (++conf->quota_lk_owner == 0) {
- ++conf->quota_lk_owner;
- }
+ if (src->inode == NULL ||
+ ((src->parent == NULL) && (gf_uuid_is_null(src->pargfid)) &&
+ !__is_root_gfid(src->inode->gfid))) {
+ gf_log("marker", GF_LOG_WARNING, "src loc is not valid");
+ goto out;
+ }
- lk_owner = conf->quota_lk_owner;
- }
- UNLOCK (&conf->lock);
+ ret = loc_copy(dst, src);
+out:
+ return ret;
+}
- frame->root->lk_owner = lk_owner;
+static void
+mq_set_ctx_status(quota_inode_ctx_t *ctx, gf_boolean_t *flag,
+ gf_boolean_t status)
+{
+ LOCK(&ctx->lock);
+ {
+ *flag = status;
+ }
+ UNLOCK(&ctx->lock);
+}
- return;
+static void
+mq_test_and_set_ctx_status(quota_inode_ctx_t *ctx, gf_boolean_t *flag,
+ gf_boolean_t *status)
+{
+ gf_boolean_t temp = _gf_false;
+
+ LOCK(&ctx->lock);
+ {
+ temp = *status;
+ *status = *flag;
+ *flag = temp;
+ }
+ UNLOCK(&ctx->lock);
}
+static void
+mq_get_ctx_status(quota_inode_ctx_t *ctx, gf_boolean_t *flag,
+ gf_boolean_t *status)
+{
+ LOCK(&ctx->lock);
+ {
+ *status = *flag;
+ }
+ UNLOCK(&ctx->lock);
+}
int32_t
-loc_fill_from_name (xlator_t *this, loc_t *newloc, loc_t *oldloc,
- uint64_t ino, char *name)
+mq_get_ctx_updation_status(quota_inode_ctx_t *ctx, gf_boolean_t *status)
{
- int32_t ret = -1;
- int32_t len = 0;
- char *path = NULL;
-
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", newloc, out);
- GF_VALIDATE_OR_GOTO ("marker", oldloc, out);
- GF_VALIDATE_OR_GOTO ("marker", name, out);
-
- newloc->ino = ino;
-
- newloc->inode = inode_new (oldloc->inode->table);
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
+ GF_VALIDATE_OR_GOTO("marker", status, out);
- if (!newloc->inode) {
- ret = -1;
- goto out;
- }
-
- newloc->parent = inode_ref (oldloc->inode);
-
- len = strlen (oldloc->path);
-
- if (oldloc->path [len - 1] == '/')
- ret = gf_asprintf ((char **) &path, "%s%s",
- oldloc->path, name);
- else
- ret = gf_asprintf ((char **) &path, "%s/%s",
- oldloc->path, name);
-
- if (ret < 0)
- goto out;
-
- newloc->path = path;
-
- newloc->name = strrchr (newloc->path, '/');
-
- if (newloc->name)
- newloc->name++;
-
- gf_log (this->name, GF_LOG_DEBUG, "path = %s name =%s",
- newloc->path, newloc->name);
+ mq_get_ctx_status(ctx, &ctx->updation_status, status);
+ return 0;
out:
- return ret;
+ return -1;
}
int32_t
-dirty_inode_updation_done (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+mq_set_ctx_updation_status(quota_inode_ctx_t *ctx, gf_boolean_t status)
{
- quota_local_t *local = NULL;
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
- local = frame->local;
-
- if (!local->err)
- QUOTA_SAFE_DECREMENT (&local->lock, local->ref);
- else
- frame->local = NULL;
-
- QUOTA_STACK_DESTROY (frame, this);
-
- return 0;
+ mq_set_ctx_status(ctx, &ctx->updation_status, status);
+ return 0;
+out:
+ return -1;
}
int32_t
-release_lock_on_dirty_inode (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+mq_test_and_set_ctx_updation_status(quota_inode_ctx_t *ctx,
+ gf_boolean_t *status)
{
- struct gf_flock lock;
- quota_local_t *local = NULL;
-
- local = frame->local;
-
- if (op_ret == -1) {
- local->err = -1;
-
- dirty_inode_updation_done (frame, NULL, this, 0, 0);
-
- return 0;
- }
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
+ GF_VALIDATE_OR_GOTO("marker", status, out);
- if (op_ret == 0)
- local->ctx->dirty = 0;
-
- lock.l_type = F_UNLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
- lock.l_pid = 0;
-
- STACK_WIND (frame,
- dirty_inode_updation_done,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- this->name, &local->loc, F_SETLKW, &lock);
-
- return 0;
+ mq_test_and_set_ctx_status(ctx, &ctx->updation_status, status);
+ return 0;
+out:
+ return -1;
}
int32_t
-mark_inode_undirty (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+mq_set_ctx_create_status(quota_inode_ctx_t *ctx, gf_boolean_t status)
{
- int32_t ret = -1;
- int64_t *size = NULL;
- dict_t *newdict = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- local = (quota_local_t *) frame->local;
-
- if (op_ret == -1)
- goto err;
-
- priv = (marker_conf_t *) this->private;
-
- if (!dict)
- goto wind;
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
- ret = dict_get_bin (dict, QUOTA_SIZE_KEY, (void **) &size);
- if (ret)
- goto wind;
-
- local->ctx->size = ntoh64 (*size);
-
-wind:
- newdict = dict_new ();
- if (!newdict)
- goto err;
-
- ret = dict_set_int8 (newdict, QUOTA_DIRTY_KEY, 0);
- if (ret)
- goto err;
-
- STACK_WIND (frame, release_lock_on_dirty_inode,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- &local->loc, newdict, 0);
- ret = 0;
-
-err:
- if (op_ret == -1 || ret == -1) {
- local->err = -1;
-
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
- }
-
- if (newdict)
- dict_unref (newdict);
-
- return 0;
+ mq_set_ctx_status(ctx, &ctx->create_status, status);
+ return 0;
+out:
+ return -1;
}
int32_t
-update_size_xattr (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, dict_t *dict, struct iatt *postparent)
+mq_test_and_set_ctx_create_status(quota_inode_ctx_t *ctx, gf_boolean_t *status)
{
- int32_t ret = -1;
- dict_t *new_dict = NULL;
- int64_t *size = NULL;
- int64_t *delta = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
+ GF_VALIDATE_OR_GOTO("marker", status, out);
- local = frame->local;
+ mq_test_and_set_ctx_status(ctx, &ctx->create_status, status);
+ return 0;
+out:
+ return -1;
+}
- if (op_ret == -1)
- goto err;
+static void
+mq_set_ctx_dirty_status(quota_inode_ctx_t *ctx, gf_boolean_t status)
+{
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
- priv = this->private;
+ mq_set_ctx_status(ctx, &ctx->dirty_status, status);
+out:
+ return;
+}
- if (dict == NULL) {
- gf_log (this->name, GF_LOG_WARNING,
- "Dict is null while updating the size xattr %s",
- local->loc.path?local->loc.path:"");
- goto err;
+int
+mq_build_ancestry(xlator_t *this, loc_t *loc)
+{
+ int32_t ret = -1;
+ fd_t *fd = NULL;
+ gf_dirent_t entries;
+ gf_dirent_t *entry = NULL;
+ dict_t *xdata = NULL;
+ inode_t *tmp_parent = NULL;
+ inode_t *tmp_inode = NULL;
+ inode_t *linked_inode = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+
+ INIT_LIST_HEAD(&entries.list);
+
+ xdata = dict_new();
+ if (xdata == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = dict_set_int8(xdata, GET_ANCESTRY_DENTRY_KEY, 1);
+ if (ret < 0)
+ goto out;
+
+ fd = fd_anonymous(loc->inode);
+ if (fd == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "fd creation failed");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ fd_bind(fd);
+
+ ret = syncop_readdirp(this, fd, 131072, 0, &entries, xdata, NULL);
+ if (ret < 0) {
+ gf_log(this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "readdirp failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ if (list_empty(&entries.list)) {
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry(entry, &entries.list, list)
+ {
+ if (__is_root_gfid(entry->inode->gfid)) {
+ /* The list contains a sub-list for each possible path
+ * to the target inode. Each sub-list starts with the
+ * root entry of the tree and is followed by the child
+ * entries for a particular path to the target entry.
+ * The root entry is an implied sub-list delimiter,
+ * as it denotes we have started processing a new path.
+ * Reset the parent pointer and continue
+ */
+
+ tmp_parent = NULL;
+ } else {
+ linked_inode = inode_link(entry->inode, tmp_parent, entry->d_name,
+ &entry->d_stat);
+ if (linked_inode) {
+ tmp_inode = entry->inode;
+ entry->inode = linked_inode;
+ inode_unref(tmp_inode);
+ } else {
+ gf_log(this->name, GF_LOG_ERROR, "inode link failed");
+ ret = -EINVAL;
+ goto out;
+ }
}
- ret = dict_get_bin (dict, QUOTA_SIZE_KEY, (void **) &size);
- if (!size) {
- gf_log (this->name, GF_LOG_WARNING,
- "failed to get the size, %s",
- local->loc.path?local->loc.path:"");
- goto err;
+ ctx = mq_inode_ctx_new(entry->inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(entry->inode->gfid));
+ ret = -ENOMEM;
+ goto out;
}
- QUOTA_ALLOC_OR_GOTO (delta, int64_t, ret, err);
-
- *delta = hton64 (local->sum - ntoh64 (*size));
-
- gf_log (this->name, GF_LOG_DEBUG, "calculated size = %"PRId64", "
- "original size = %"PRIu64
- " path = %s diff = %"PRIu64, local->sum, ntoh64 (*size),
- local->loc.path, ntoh64 (*delta));
-
- new_dict = dict_new ();
- if (!new_dict);
+ /* For non-directory, posix_get_ancestry_non_directory returns
+ * all hard-links that are represented by nodes adjacent to
+ * each other in the dentry-list.
+ * (Unlike the directory case where adjacent nodes either have
+ * a parent/child relationship or belong to different paths).
+ */
+ if (entry->inode->ia_type == IA_IFDIR)
+ tmp_parent = entry->inode;
+ }
- ret = dict_set_bin (new_dict, QUOTA_SIZE_KEY, delta, 8);
- if (ret)
- goto err;
+ if (loc->parent)
+ inode_unref(loc->parent);
- STACK_WIND (frame, mark_inode_undirty, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop, &local->loc,
- GF_XATTROP_ADD_ARRAY64, new_dict);
+ loc->parent = inode_parent(loc->inode, 0, NULL);
+ if (loc->parent == NULL) {
+ ret = -1;
+ goto out;
+ }
- ret = 0;
+ ret = 0;
-err:
- if (op_ret == -1 || ret == -1) {
- local->err = -1;
+out:
+ gf_dirent_free(&entries);
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
- }
+ if (fd)
+ fd_unref(fd);
- if (new_dict)
- dict_unref (new_dict);
+ if (xdata)
+ dict_unref(xdata);
- return 0;
+ return ret;
}
-int32_t
-get_dirty_inode_size (call_frame_t *frame, xlator_t *this)
+/* This function should be used only in inspect_directory and inspect_file
+ * function to heal quota xattrs.
+ * Inode quota feature is introduced in 3.7.
+ * If gluster setup is upgraded from 3.6 to 3.7, there can be a
+ * getxattr and setxattr spikes with quota heal as inode quota is missing.
+ * So this wrapper function is to avoid xattrs spikes during upgrade.
+ * This function returns success even is inode-quota xattrs are missing and
+ * hence no healing performed.
+ */
+static int32_t
+_quota_dict_get_meta(xlator_t *this, dict_t *dict, char *key, const int keylen,
+ quota_meta_t *meta, ia_type_t ia_type,
+ gf_boolean_t add_delta)
{
- int32_t ret = -1;
- dict_t *dict = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- local = (quota_local_t *) frame->local;
-
- priv = (marker_conf_t *) this->private;
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto err;
- }
-
- ret = dict_set_int64 (dict, QUOTA_SIZE_KEY, 0);
- if (ret)
- goto err;
-
- STACK_WIND (frame, update_size_xattr, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup, &local->loc, dict);
- ret =0;
-
-err:
- if (ret) {
- local->err = -1;
-
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
+ int32_t ret = 0;
+ marker_conf_t *priv = NULL;
+
+ priv = this->private;
+
+ ret = quota_dict_get_inode_meta(dict, key, keylen, meta);
+ if (ret == -2 && (priv->feature_enabled & GF_INODE_QUOTA) == 0) {
+ /* quota_dict_get_inode_meta returns -2 if
+ * inode quota xattrs are not present.
+ * if inode quota self heal is turned off,
+ * then we should skip healing inode quotas
+ */
+
+ gf_log(this->name, GF_LOG_DEBUG,
+ "inode quota disabled. "
+ "inode quota self heal will not be performed");
+ ret = 0;
+ if (add_delta) {
+ if (ia_type == IA_IFDIR)
+ meta->dir_count = 1;
+ else
+ meta->file_count = 1;
}
+ }
- return 0;
+ return ret;
}
int32_t
-get_child_contribution (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- dict_t *dict,
- struct iatt *postparent)
+quota_dict_set_size_meta(xlator_t *this, dict_t *dict, const quota_meta_t *meta)
{
- int32_t ret = -1;
- int32_t val = 0;
- char contri_key [512] = {0, };
- int64_t *contri = NULL;
- quota_local_t *local = NULL;
-
- local = frame->local;
-
- frame->local = NULL;
-
- QUOTA_STACK_DESTROY (frame, this);
-
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "%s", strerror (op_errno));
-
- local->err = -2;
-
- release_lock_on_dirty_inode (local->frame, NULL, this, 0, 0);
-
- goto out;
- }
-
- if (local->err)
- goto out;
-
- GET_CONTRI_KEY (contri_key, local->loc.inode->gfid, ret);
- if (ret < 0)
- goto out;
-
- if (!dict)
- goto out;
-
- if (dict_get_bin (dict, contri_key, (void **) &contri) == 0)
- local->sum += ntoh64 (*contri);
-
+ int32_t ret = -ENOMEM;
+ quota_meta_t *value = NULL;
+ char size_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+
+ value = GF_MALLOC(2 * sizeof(quota_meta_t), gf_common_quota_meta_t);
+ if (value == NULL) {
+ goto out;
+ }
+ value[0].size = hton64(meta->size);
+ value[0].file_count = hton64(meta->file_count);
+ value[0].dir_count = hton64(meta->dir_count);
+
+ value[1].size = 0;
+ value[1].file_count = 0;
+ value[1].dir_count = hton64(1);
+
+ GET_SIZE_KEY(this, size_key, ret);
+ if (ret < 0)
+ goto out;
+ ret = dict_set_bin(dict, size_key, value, (sizeof(quota_meta_t) * 2));
+ if (ret < 0) {
+ gf_log_callingfn("quota", GF_LOG_ERROR, "dict set failed");
+ GF_FREE(value);
+ }
out:
- LOCK (&local->lock);
- {
- val = --local->dentry_child_count;
- }
- UNLOCK (&local->lock);
-
- if (val== 0) {
- if (local->err) {
- QUOTA_SAFE_DECREMENT (&local->lock, local->ref);
-
- quota_local_unref (this, local);
- } else
- quota_dirty_inode_readdir (local->frame, NULL, this,
- 0, 0, NULL);
- }
-
- return 0;
+ return ret;
}
-int32_t
-quota_readdir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- gf_dirent_t *entries)
+void
+mq_compute_delta(quota_meta_t *delta, const quota_meta_t *op1,
+ const quota_meta_t *op2)
{
- char contri_key [512] = {0, };
- loc_t loc;
- int32_t ret = 0;
- off_t offset = 0;
- int32_t count = 0;
- dict_t *dict = NULL;
- quota_local_t *local = NULL;
- gf_dirent_t *entry = NULL;
- call_frame_t *newframe = NULL;
-
- local = frame->local;
-
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_DEBUG,
- "readdir failed %s", strerror (op_errno));
- local->err = -1;
-
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
-
- return 0;
- } else if (op_ret == 0) {
- get_dirty_inode_size (frame, this);
-
- return 0;
- }
-
- local->dentry_child_count = 0;
-
- list_for_each_entry (entry, (&entries->list), list) {
- gf_log (this->name, GF_LOG_DEBUG, "entry = %s", entry->d_name);
-
- if ((!strcmp (entry->d_name, ".")) || (!strcmp (entry->d_name,
- ".."))) {
- gf_log (this->name, GF_LOG_DEBUG, "entry = %s",
- entry->d_name);
- continue;
- }
- count++;
- }
-
- local->frame = frame;
-
- if (count > 0) {
- LOCK (&local->lock);
- {
- local->dentry_child_count = count;
- }
- UNLOCK (&local->lock);
- }
-
-
- list_for_each_entry (entry, (&entries->list), list) {
- gf_log (this->name, GF_LOG_DEBUG, "entry = %s", entry->d_name);
-
- if ((!strcmp (entry->d_name, ".")) || (!strcmp (entry->d_name,
- ".."))) {
- gf_log (this->name, GF_LOG_DEBUG, "entry = %s",
- entry->d_name);
- offset = entry->d_off;
- continue;
- }
-
- ret = loc_fill_from_name (this, &loc, &local->loc,
- entry->d_ino, entry->d_name);
- if (ret < 0)
- goto out;
-
- newframe = copy_frame (frame);
- if (!newframe) {
- ret = -1;
- goto out;
- }
-
- newframe->local = local;
-
- dict = dict_new ();
- if (!dict) {
- ret = -1;
- goto out;
- }
-
- GET_CONTRI_KEY (contri_key, local->loc.inode->gfid, ret);
- if (ret < 0)
- goto out;
-
- ret = dict_set_int64 (dict, contri_key, 0);
- if (ret)
- goto out;
-
- STACK_WIND (newframe,
- get_child_contribution,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup,
- &loc, dict);
-
- offset = entry->d_off;
-
- loc_wipe (&loc);
-
- out:
- if (dict) {
- dict_unref (dict);
- dict = NULL;
- }
-
- if (ret) {
- LOCK (&local->lock);
- {
- if (local->dentry_child_count == 0)
- local->err = -1;
- else
- local->err = -2;
- }
- UNLOCK (&local->lock);
-
- if (newframe) {
- newframe->local = NULL;
-
- QUOTA_STACK_DESTROY (newframe, this);
- }
-
- break;
- }
- }
- gf_log (this->name, GF_LOG_DEBUG, "offset before =%"PRIu64,
- local->d_off);
- local->d_off +=offset;
- gf_log (this->name, GF_LOG_DEBUG, "offset after = %"PRIu64,
- local->d_off);
-
- if (ret)
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
-
- else if (count == 0 )
- get_dirty_inode_size (frame, this);
-
- return 0;
+ delta->size = op1->size - op2->size;
+ delta->file_count = op1->file_count - op2->file_count;
+ delta->dir_count = op1->dir_count - op2->dir_count;
}
-int32_t
-quota_dirty_inode_readdir (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd)
+void
+mq_add_meta(quota_meta_t *dst, const quota_meta_t *src)
{
- quota_local_t *local = NULL;
-
- local = frame->local;
-
- if (op_ret == -1) {
- local->err = -1;
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
- return 0;
- }
-
- if (local->fd == NULL)
- local->fd = fd_ref (fd);
-
- STACK_WIND (frame,
- quota_readdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readdir,
- local->fd, READDIR_BUF, local->d_off);
+ dst->size += src->size;
+ dst->file_count += src->file_count;
+ dst->dir_count += src->dir_count;
+}
- return 0;
+void
+mq_sub_meta(quota_meta_t *dst, const quota_meta_t *src)
+{
+ if (src == NULL) {
+ dst->size = -dst->size;
+ dst->file_count = -dst->file_count;
+ dst->dir_count = -dst->dir_count;
+ } else {
+ dst->size = src->size - dst->size;
+ dst->file_count = src->file_count - dst->file_count;
+ dst->dir_count = src->dir_count - dst->dir_count;
+ }
}
int32_t
-check_if_still_dirty (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- dict_t *dict,
- struct iatt *postparent)
+mq_are_xattrs_set(xlator_t *this, loc_t *loc, gf_boolean_t *contri_set,
+ gf_boolean_t *size_set)
{
- int8_t dirty = -1;
- int32_t ret = -1;
- fd_t *fd = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- local = frame->local;
-
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "failed to get "
- "the dirty xattr for %s", local->loc.path);
- goto err;
- }
-
- priv = this->private;
-
- if (!dict) {
- ret = -1;
- goto err;
- }
-
- ret = dict_get_int8 (dict, QUOTA_DIRTY_KEY, &dirty);
- if (ret)
- goto err;
-
- //the inode is not dirty anymore
- if (dirty == 0) {
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
-
- return 0;
- }
-
- fd = fd_create (local->loc.inode, frame->root->pid);
-
- local->d_off = 0;
-
- STACK_WIND(frame,
- quota_dirty_inode_readdir,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->opendir,
- &local->loc, fd);
+ int32_t ret = -1;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ char size_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ quota_meta_t meta = {
+ 0,
+ };
+ struct iatt stbuf = {
+ 0,
+ };
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ dict = dict_new();
+ if (dict == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ goto out;
+ }
+
+ ret = mq_req_xattr(this, loc, dict, contri_key, size_key);
+ if (ret < 0)
+ goto out;
+
+ ret = syncop_lookup(FIRST_CHILD(this), loc, &stbuf, NULL, dict, &rsp_dict);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "lookup failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ if (rsp_dict == NULL)
+ goto out;
+
+ *contri_set = _gf_true;
+ *size_set = _gf_true;
+ if (loc->inode->ia_type == IA_IFDIR) {
+ ret = quota_dict_get_inode_meta(rsp_dict, size_key, strlen(size_key),
+ &meta);
+ if (ret < 0 || meta.dir_count == 0)
+ *size_set = _gf_false;
+ }
+
+ if (!loc_is_root(loc)) {
+ ret = quota_dict_get_inode_meta(rsp_dict, contri_key,
+ strlen(contri_key), &meta);
+ if (ret < 0)
+ *contri_set = _gf_false;
+ }
- ret = 0;
+ ret = 0;
+out:
+ if (dict)
+ dict_unref(dict);
-err:
- if (op_ret == -1 || ret == -1) {
- local->err = -1;
- release_lock_on_dirty_inode (frame, NULL, this, 0, 0);
- }
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- return 0;
+ return ret;
}
int32_t
-get_dirty_xattr (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno)
+mq_create_size_xattrs(xlator_t *this, quota_inode_ctx_t *ctx, loc_t *loc)
{
- int32_t ret = -1;
- dict_t *xattr_req = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- if (op_ret == -1) {
- dirty_inode_updation_done (frame, NULL, this, 0, 0);
- return 0;
- }
+ int32_t ret = -1;
+ quota_meta_t size = {
+ 0,
+ };
+ dict_t *dict = NULL;
- priv = (marker_conf_t *) this->private;
-
- local = frame->local;
-
- xattr_req = dict_new ();
- if (xattr_req == NULL) {
- ret = -1;
- goto err;
- }
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
- ret = dict_set_int8 (xattr_req, QUOTA_DIRTY_KEY, 0);
- if (ret)
- goto err;
-
- STACK_WIND (frame,
- check_if_still_dirty,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup,
- &local->loc,
- xattr_req);
+ if (loc->inode->ia_type != IA_IFDIR) {
ret = 0;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = quota_dict_set_size_meta(this, dict, &size);
+ if (ret < 0)
+ goto out;
+
+ ret = syncop_xattrop(FIRST_CHILD(this), loc,
+ GF_XATTROP_ADD_ARRAY64_WITH_DEFAULT, dict, NULL, NULL,
+ NULL);
+
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "xattrop failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
-err:
- if (ret) {
- local->err = -1;
- release_lock_on_dirty_inode(frame, NULL, this, 0, 0);
- }
-
- if (xattr_req)
- dict_unref (xattr_req);
+out:
+ if (dict)
+ dict_unref(dict);
- return 0;
+ return ret;
}
int32_t
-update_dirty_inode (xlator_t *this,
- loc_t *loc,
- quota_inode_ctx_t *ctx,
- inode_contribution_t *contribution)
+mq_lock(xlator_t *this, loc_t *loc, short l_type)
{
- int32_t ret = -1;
- quota_local_t *local = NULL;
- struct gf_flock lock = {0, };
- call_frame_t *frame = NULL;
-
- frame = create_frame (this, this->ctx->pool);
- if (frame == NULL) {
- ret = -1;
- goto out;
- }
-
- mq_assign_lk_owner (this, frame);
-
- local = quota_local_new ();
- if (local == NULL)
- goto fr_destroy;
-
- frame->local = local;
-
- ret = loc_copy (&local->loc, loc);
- if (ret < 0)
- goto fr_destroy;
-
- local->ctx = ctx;
-
- local->contri = contribution;
+ struct gf_flock lock = {
+ 0,
+ };
+ int32_t ret = -1;
+
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+
+ gf_log(this->name, GF_LOG_DEBUG, "set lock type %d on %s", l_type,
+ loc->path);
+
+ lock.l_len = 0;
+ lock.l_start = 0;
+ lock.l_type = l_type;
+ lock.l_whence = SEEK_SET;
+
+ ret = syncop_inodelk(FIRST_CHILD(this), this->name, loc, F_SETLKW, &lock,
+ NULL, NULL);
+ if (ret < 0)
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "inodelk failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
-
- STACK_WIND (frame,
- get_dirty_xattr,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- this->name, &local->loc, F_SETLKW, &lock);
- return 0;
-
-fr_destroy:
- QUOTA_STACK_DESTROY (frame, this);
out:
- return 0;
+ return ret;
}
-
int32_t
-quota_inode_creation_done (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+mq_get_dirty(xlator_t *this, loc_t *loc, int32_t *dirty)
{
- quota_local_t *local = NULL;
-
- if (frame == NULL)
- return 0;
+ int32_t ret = -1;
+ int8_t value = 0;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ struct iatt stbuf = {
+ 0,
+ };
+
+ dict = dict_new();
+ if (dict == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ goto out;
+ }
+
+ ret = dict_set_int64(dict, QUOTA_DIRTY_KEY, 0);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING, "dict set failed");
+ goto out;
+ }
+
+ ret = syncop_lookup(FIRST_CHILD(this), loc, &stbuf, NULL, dict, &rsp_dict);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "lookup failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ ret = dict_get_int8(rsp_dict, QUOTA_DIRTY_KEY, &value);
+ if (ret < 0)
+ goto out;
+
+ *dirty = value;
- local = frame->local;
+out:
+ if (dict)
+ dict_unref(dict);
- QUOTA_STACK_DESTROY (frame, this);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- return 0;
+ return ret;
}
int32_t
-create_dirty_xattr (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+mq_get_set_dirty(xlator_t *this, loc_t *loc, int32_t dirty, int32_t *prev_dirty)
{
- int32_t ret = -1;
- dict_t *newdict = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- if (op_ret == -1 && op_errno == ENOTCONN)
- goto err;
-
- local = frame->local;
-
- priv = (marker_conf_t *) this->private;
+ int32_t ret = -1;
+ int8_t value = 0;
+ quota_inode_ctx_t *ctx = NULL;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+ GF_VALIDATE_OR_GOTO("marker", prev_dirty, out);
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to get inode ctx for "
+ "%s",
+ loc->path);
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_int8(dict, QUOTA_DIRTY_KEY, dirty);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_set failed");
+ goto out;
+ }
+
+ ret = syncop_xattrop(FIRST_CHILD(this), loc, GF_XATTROP_GET_AND_SET, dict,
+ NULL, NULL, &rsp_dict);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "xattrop failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ *prev_dirty = 0;
+ if (rsp_dict) {
+ ret = dict_get_int8(rsp_dict, QUOTA_DIRTY_KEY, &value);
+ if (ret == 0)
+ *prev_dirty = value;
+ }
- if (local->loc.inode->ia_type == IA_IFDIR) {
- newdict = dict_new ();
- if (!newdict)
- goto err;
+ LOCK(&ctx->lock);
+ {
+ ctx->dirty = dirty;
+ }
+ UNLOCK(&ctx->lock);
+ ret = 0;
+out:
+ if (dict)
+ dict_unref(dict);
- ret = dict_set_int8 (newdict, QUOTA_DIRTY_KEY, 0);
- if (ret == -1)
- goto err;
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- STACK_WIND (frame, quota_inode_creation_done,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- &local->loc, newdict, 0);
- } else
- quota_inode_creation_done (frame, NULL, this, 0, 0);
+ return ret;
+}
+int32_t
+mq_mark_dirty(xlator_t *this, loc_t *loc, int32_t dirty)
+{
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to get inode ctx for "
+ "%s",
+ loc->path);
ret = 0;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ ret = -1;
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ goto out;
+ }
+
+ ret = dict_set_int8(dict, QUOTA_DIRTY_KEY, dirty);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_set failed");
+ goto out;
+ }
+
+ ret = syncop_setxattr(FIRST_CHILD(this), loc, dict, 0, NULL, NULL);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "setxattr dirty = %d "
+ "failed for %s: %s",
+ dirty, loc->path, strerror(-ret));
+ goto out;
+ }
+
+ LOCK(&ctx->lock);
+ {
+ ctx->dirty = dirty;
+ }
+ UNLOCK(&ctx->lock);
-err:
- if (ret == -1)
- quota_inode_creation_done (frame, NULL, this, -1, 0);
-
- if (newdict)
- dict_unref (newdict);
+out:
+ if (dict)
+ dict_unref(dict);
- return 0;
+ return ret;
}
-
int32_t
-quota_set_inode_xattr (xlator_t *this, loc_t *loc)
+_mq_get_metadata(xlator_t *this, loc_t *loc, quota_meta_t *contri,
+ quota_meta_t *size, uuid_t contri_gfid)
{
- int32_t ret = 0;
- int64_t *value = NULL;
- int64_t *size = NULL;
- dict_t *dict = NULL;
- char key[512] = {0, };
- call_frame_t *frame = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
- quota_inode_ctx_t *ctx = NULL;
- inode_contribution_t *contri = NULL;
-
- if (loc == NULL || this == NULL)
- return 0;
-
- priv = (marker_conf_t *) this->private;
-
- ret = quota_inode_ctx_get (loc->inode, this, &ctx);
+ int32_t ret = -1;
+ quota_meta_t meta = {
+ 0,
+ };
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ char size_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int keylen = 0;
+ dict_t *dict = NULL;
+ dict_t *rsp_dict = NULL;
+ struct iatt stbuf = {
+ 0,
+ };
+
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+
+ if (size == NULL && contri == NULL)
+ goto out;
+
+ dict = dict_new();
+ if (dict == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ goto out;
+ }
+
+ if (size && loc->inode->ia_type == IA_IFDIR) {
+ GET_SIZE_KEY(this, size_key, keylen);
+ if (keylen < 0)
+ goto out;
+ ret = dict_set_int64(dict, size_key, 0);
if (ret < 0) {
- ctx = quota_inode_ctx_new (loc->inode, this);
- if (ctx == NULL) {
- gf_log (this->name, GF_LOG_WARNING,
- "quota_inode_ctx_new failed");
- ret = -1;
- goto out;
- }
+ gf_log(this->name, GF_LOG_ERROR, "dict_set failed.");
+ goto out;
}
+ }
- dict = dict_new ();
- if (!dict)
+ if (contri && !loc_is_root(loc)) {
+ ret = mq_dict_set_contribution(this, dict, loc, contri_gfid,
+ contri_key);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = syncop_lookup(FIRST_CHILD(this), loc, &stbuf, NULL, dict, &rsp_dict);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "lookup failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ if (size) {
+ if (loc->inode->ia_type == IA_IFDIR) {
+ ret = quota_dict_get_meta(rsp_dict, size_key, keylen, &meta);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_get failed.");
goto out;
+ }
- if (loc->inode->ia_type == IA_IFDIR) {
- QUOTA_ALLOC_OR_GOTO (size, int64_t, ret, err);
- ret = dict_set_bin (dict, QUOTA_SIZE_KEY, size, 8);
- if (ret < 0)
- goto free_size;
+ size->size = meta.size;
+ size->file_count = meta.file_count;
+ size->dir_count = meta.dir_count;
+ } else {
+ size->size = stbuf.ia_blocks * 512;
+ size->file_count = 1;
+ size->dir_count = 0;
}
+ }
- //if '/' then dont set contribution xattr
- if (strcmp (loc->path, "/") == 0)
- goto wind;
-
- contri = add_new_contribution_node (this, ctx, loc);
- if (contri == NULL)
- goto err;
-
- QUOTA_ALLOC_OR_GOTO (value, int64_t, ret, err);
- GET_CONTRI_KEY (key, loc->parent->gfid, ret);
-
- ret = dict_set_bin (dict, key, value, 8);
- if (ret < 0)
- goto free_value;
-
-wind:
- frame = create_frame (this, this->ctx->pool);
- if (!frame) {
- ret = -1;
- goto err;
+ if (contri && !loc_is_root(loc)) {
+ ret = quota_dict_get_meta(rsp_dict, contri_key, strlen(contri_key),
+ &meta);
+ if (ret < 0) {
+ contri->size = 0;
+ contri->file_count = 0;
+ contri->dir_count = 0;
+ } else {
+ contri->size = meta.size;
+ contri->file_count = meta.file_count;
+ contri->dir_count = meta.dir_count;
}
+ }
- local = quota_local_new ();
- if (local == NULL)
- goto free_size;
+ ret = 0;
- local->ctx = ctx;
+out:
+ if (dict)
+ dict_unref(dict);
- local->contri = contri;
+ if (rsp_dict)
+ dict_unref(rsp_dict);
- ret = loc_copy (&local->loc, loc);
- if (ret < 0)
- quota_local_unref (this, local);
+ return ret;
+}
+
+int32_t
+mq_get_metadata(xlator_t *this, loc_t *loc, quota_meta_t *contri,
+ quota_meta_t *size, quota_inode_ctx_t *ctx,
+ inode_contribution_t *contribution)
+{
+ int32_t ret = -1;
- frame->local = local;
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
+ GF_VALIDATE_OR_GOTO("marker", contribution, out);
- STACK_WIND (frame, create_dirty_xattr, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop, &local->loc,
- GF_XATTROP_ADD_ARRAY64, dict);
+ if (size == NULL && contri == NULL) {
ret = 0;
+ goto out;
+ }
-free_size:
- if (ret < 0)
- GF_FREE (size);
+ ret = _mq_get_metadata(this, loc, contri, size, contribution->gfid);
+ if (ret < 0)
+ goto out;
-free_value:
- if (ret < 0)
- GF_FREE (value);
+ if (size) {
+ LOCK(&ctx->lock);
+ {
+ ctx->size = size->size;
+ ctx->file_count = size->file_count;
+ ctx->dir_count = size->dir_count;
+ }
+ UNLOCK(&ctx->lock);
+ }
-err:
- dict_unref (dict);
+ if (contri) {
+ LOCK(&contribution->lock);
+ {
+ contribution->contribution = contri->size;
+ contribution->file_count = contri->file_count;
+ contribution->dir_count = contri->dir_count;
+ }
+ UNLOCK(&contribution->lock);
+ }
out:
- if (ret < 0)
- quota_inode_creation_done (NULL, NULL, this, -1, 0);
-
- return 0;
+ return ret;
}
-
int32_t
-get_parent_inode_local (xlator_t *this, quota_local_t *local)
+mq_get_delta(xlator_t *this, loc_t *loc, quota_meta_t *delta,
+ quota_inode_ctx_t *ctx, inode_contribution_t *contribution)
{
- int32_t ret;
- quota_inode_ctx_t *ctx = NULL;
-
- loc_wipe (&local->loc);
+ int32_t ret = -1;
+ quota_meta_t size = {
+ 0,
+ };
+ quota_meta_t contri = {
+ 0,
+ };
- loc_copy (&local->loc, &local->parent_loc);
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
+ GF_VALIDATE_OR_GOTO("marker", contribution, out);
- loc_wipe (&local->parent_loc);
+ ret = mq_get_metadata(this, loc, &contri, &size, ctx, contribution);
+ if (ret < 0)
+ goto out;
- quota_inode_loc_fill (NULL, local->loc.parent, &local->parent_loc);
+ mq_compute_delta(delta, &size, &contri);
- ret = quota_inode_ctx_get (local->loc.inode, this, &ctx);
- if (ret < 0)
- return -1;
-
- local->ctx = ctx;
-
- local->contri = (inode_contribution_t *) ctx->contribution_head.next;
-
- return 0;
-}
-
-
-int32_t
-xattr_updation_done (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *dict)
-{
- QUOTA_STACK_DESTROY (frame, this);
- return 0;
+out:
+ return ret;
}
-
int32_t
-quota_inodelk_cbk (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno)
+mq_remove_contri(xlator_t *this, loc_t *loc, quota_inode_ctx_t *ctx,
+ inode_contribution_t *contri, quota_meta_t *delta,
+ uint32_t nlink)
{
- int32_t ret = 0;
- quota_local_t *local = NULL;
-
- local = frame->local;
+ int32_t ret = -1;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
- if (op_ret == -1 || local->err) {
- gf_log (this->name, ((op_errno == ENOENT) ? GF_LOG_DEBUG :
- GF_LOG_INFO),
- "lock setting failed (%s)", strerror (op_errno));
- xattr_updation_done (frame, NULL, this, 0, 0, NULL);
-
- return 0;
+ if (nlink == 1) {
+ /*File was a last link and has been deleted */
+ ret = 0;
+ goto done;
+ }
+
+ GET_CONTRI_KEY(this, contri_key, contri->gfid, ret);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "get contri_key "
+ "failed for %s",
+ uuid_utoa(contri->gfid));
+ goto out;
+ }
+
+ ret = syncop_removexattr(FIRST_CHILD(this), loc, contri_key, 0, NULL);
+ if (ret < 0) {
+ if (-ret == ENOENT || -ret == ESTALE || -ret == ENODATA ||
+ -ret == ENOATTR) {
+ /* Remove contri in done when unlink operation is
+ * performed, so return success on ENOENT/ESTSLE
+ * rename operation removes xattr earlier,
+ * so return success on ENODATA
+ */
+ ret = 0;
+ } else {
+ gf_log_callingfn(this->name, GF_LOG_ERROR,
+ "removexattr %s failed for %s: %s", contri_key,
+ loc->path, strerror(-ret));
+ goto out;
}
+ }
- gf_log (this->name, GF_LOG_DEBUG,
- "inodelk released on %s", local->parent_loc.path);
+done:
+ LOCK(&contri->lock);
+ {
+ contri->contribution += delta->size;
+ contri->file_count += delta->file_count;
+ contri->dir_count += delta->dir_count;
+ }
+ UNLOCK(&contri->lock);
- if (strcmp (local->parent_loc.path, "/") == 0) {
- xattr_updation_done (frame, NULL, this, 0, 0, NULL);
- } else {
- ret = get_parent_inode_local (this, local);
- if (ret < 0) {
- xattr_updation_done (frame, NULL, this, 0, 0, NULL);
- goto out;
- }
+ ret = 0;
- get_lock_on_parent (frame, this);
- }
out:
- return 0;
-}
+ QUOTA_FREE_CONTRIBUTION_NODE(ctx, contri);
+ return ret;
+}
-//now release lock on the parent inode
int32_t
-quota_release_parent_lock (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret,
- int32_t op_errno)
+mq_update_contri(xlator_t *this, loc_t *loc, inode_contribution_t *contri,
+ quota_meta_t *delta)
{
- int32_t ret = 0;
- struct gf_flock lock;
- quota_local_t *local = NULL;
- quota_inode_ctx_t *ctx = NULL;
-
- local = frame->local;
+ int32_t ret = -1;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ dict_t *dict = NULL;
+
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+ GF_VALIDATE_OR_GOTO("marker", delta, out);
+ GF_VALIDATE_OR_GOTO("marker", contri, out);
+
+ if (quota_meta_is_null(delta)) {
+ ret = 0;
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ ret = -1;
+ goto out;
+ }
+
+ GET_CONTRI_KEY(this, contri_key, contri->gfid, ret);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "get contri_key "
+ "failed for %s",
+ uuid_utoa(contri->gfid));
+ goto out;
+ }
+
+ ret = quota_dict_set_meta(dict, contri_key, delta, loc->inode->ia_type);
+ if (ret < 0)
+ goto out;
+
+ ret = syncop_xattrop(FIRST_CHILD(this), loc, GF_XATTROP_ADD_ARRAY64, dict,
+ NULL, NULL, NULL);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "xattrop failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ LOCK(&contri->lock);
+ {
+ contri->contribution += delta->size;
+ contri->file_count += delta->file_count;
+ contri->dir_count += delta->dir_count;
+ }
+ UNLOCK(&contri->lock);
- ret = quota_inode_ctx_get (local->parent_loc.inode, this, &ctx);
- if (ret < 0)
- goto wind;
+out:
+ if (dict)
+ dict_unref(dict);
- LOCK (&ctx->lock);
- {
- ctx->dirty = 0;
- }
- UNLOCK (&ctx->lock);
-
-wind:
- lock.l_type = F_UNLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
- lock.l_pid = 0;
-
- STACK_WIND (frame,
- quota_inodelk_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- this->name, &local->parent_loc,
- F_SETLKW, &lock);
-
- return 0;
+ return ret;
}
-
int32_t
-quota_mark_undirty (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *dict)
+mq_update_size(xlator_t *this, loc_t *loc, quota_meta_t *delta)
{
- int32_t ret = -1;
- int64_t *size = NULL;
- dict_t *newdict = NULL;
- quota_local_t *local = NULL;
- quota_inode_ctx_t *ctx = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = -1;
+ quota_inode_ctx_t *ctx = NULL;
+ dict_t *dict = NULL;
- local = frame->local;
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+ GF_VALIDATE_OR_GOTO("marker", delta, out);
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_WARNING, "%s occurred while"
- " updating the size of %s", strerror (op_errno),
- local->parent_loc.path);
+ if (quota_meta_is_null(delta)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to get inode ctx for "
+ "%s",
+ loc->path);
+ goto out;
+ }
+
+ dict = dict_new();
+ if (!dict) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = quota_dict_set_size_meta(this, dict, delta);
+ if (ret < 0)
+ goto out;
+
+ ret = syncop_xattrop(FIRST_CHILD(this), loc,
+ GF_XATTROP_ADD_ARRAY64_WITH_DEFAULT, dict, NULL, NULL,
+ NULL);
+ if (ret < 0) {
+ gf_log_callingfn(
+ this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "xattrop failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ LOCK(&ctx->lock);
+ {
+ ctx->size += delta->size;
+ ctx->file_count += delta->file_count;
+ if (ctx->dir_count == 0)
+ ctx->dir_count += delta->dir_count + 1;
+ else
+ ctx->dir_count += delta->dir_count;
+ }
+ UNLOCK(&ctx->lock);
- goto err;
- }
+out:
+ if (dict)
+ dict_unref(dict);
- priv = this->private;
-
- //update the size of the parent inode
- if (dict != NULL) {
- ret = quota_inode_ctx_get (local->parent_loc.inode, this, &ctx);
- if (ret < 0)
- goto err;
-
- ret = dict_get_bin (dict, QUOTA_SIZE_KEY, (void **) &size);
- if (ret < 0)
- goto err;
-
- LOCK (&ctx->lock);
- {
- if (size)
- ctx->size = ntoh64 (*size);
- gf_log (this->name, GF_LOG_DEBUG, "%s %"PRId64,
- local->parent_loc.path, ctx->size);
- }
- UNLOCK (&ctx->lock);
- }
+ return ret;
+}
- newdict = dict_new ();
+int
+mq_synctask_cleanup(int ret, call_frame_t *frame, void *opaque)
+{
+ quota_synctask_t *args = NULL;
- if (!newdict)
- goto err;
+ GF_ASSERT(opaque);
- ret = dict_set_int8 (newdict, QUOTA_DIRTY_KEY, 0);
+ args = (quota_synctask_t *)opaque;
+ loc_wipe(&args->loc);
- if (ret == -1)
- goto err;
+ if (args->stub)
+ call_resume(args->stub);
- STACK_WIND (frame, quota_release_parent_lock,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- &local->parent_loc, newdict, 0);
+ if (!args->is_static)
+ GF_FREE(args);
- ret = 0;
-err:
- if (op_ret == -1 || ret == -1) {
- local->err = 1;
+ return 0;
+}
- quota_release_parent_lock (frame, NULL, this, 0, 0);
+int
+mq_synctask1(xlator_t *this, synctask_fn_t task, gf_boolean_t spawn, loc_t *loc,
+ quota_meta_t *contri, uint32_t nlink, call_stub_t *stub)
+{
+ int32_t ret = -1;
+ quota_synctask_t *args = NULL;
+ quota_synctask_t static_args = {
+ 0,
+ };
+
+ if (spawn) {
+ QUOTA_ALLOC_OR_GOTO(args, quota_synctask_t, ret, out);
+ args->is_static = _gf_false;
+ } else {
+ args = &static_args;
+ args->is_static = _gf_true;
+ }
+
+ args->this = this;
+ args->stub = stub;
+ loc_copy(&args->loc, loc);
+ args->ia_nlink = nlink;
+
+ if (contri) {
+ args->contri = *contri;
+ } else {
+ args->contri.size = -1;
+ args->contri.file_count = -1;
+ args->contri.dir_count = -1;
+ }
+
+ if (spawn) {
+ ret = synctask_new1(this->ctx->env, 1024 * 16, task,
+ mq_synctask_cleanup, NULL, args);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to spawn "
+ "new synctask");
+ mq_synctask_cleanup(ret, NULL, args);
}
+ } else {
+ ret = task(args);
+ mq_synctask_cleanup(ret, NULL, args);
+ }
- if (newdict)
- dict_unref (newdict);
-
- return 0;
+out:
+ return ret;
}
+int
+mq_synctask(xlator_t *this, synctask_fn_t task, gf_boolean_t spawn, loc_t *loc)
+{
+ return mq_synctask1(this, task, spawn, loc, NULL, -1, NULL);
+}
int32_t
-quota_update_parent_size (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *dict)
+mq_prevalidate_txn(xlator_t *this, loc_t *origin_loc, loc_t *loc,
+ quota_inode_ctx_t **ctx, struct iatt *buf)
{
- int64_t *size = NULL;
- int32_t ret = -1;
- dict_t *newdict = NULL;
- marker_conf_t *priv = NULL;
- quota_local_t *local = NULL;
- quota_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+ quota_inode_ctx_t *ctxtmp = NULL;
+
+ if (buf) {
+ if (buf->ia_type == IA_IFREG && IS_DHT_LINKFILE_MODE(buf))
+ goto out;
+
+ if (buf->ia_type != IA_IFREG && buf->ia_type != IA_IFLNK &&
+ buf->ia_type != IA_IFDIR)
+ goto out;
+ }
+
+ if (origin_loc == NULL || origin_loc->inode == NULL ||
+ gf_uuid_is_null(origin_loc->inode->gfid))
+ goto out;
+
+ loc_copy(loc, origin_loc);
+
+ if (gf_uuid_is_null(loc->gfid))
+ gf_uuid_copy(loc->gfid, loc->inode->gfid);
+
+ if (!loc_is_root(loc) && loc->parent == NULL)
+ loc->parent = inode_parent(loc->inode, 0, NULL);
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctxtmp);
+ if (ret < 0) {
+ gf_log_callingfn(this->name, GF_LOG_WARNING,
+ "inode ctx for "
+ "is NULL for %s",
+ loc->path);
+ goto out;
+ }
+ if (ctx)
+ *ctx = ctxtmp;
+
+ ret = 0;
+out:
+ return ret;
+}
- local = frame->local;
+int
+mq_create_xattrs_task(void *opaque)
+{
+ int32_t ret = -1;
+ gf_boolean_t locked = _gf_false;
+ gf_boolean_t contri_set = _gf_false;
+ gf_boolean_t size_set = _gf_false;
+ gf_boolean_t need_txn = _gf_false;
+ quota_synctask_t *args = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ xlator_t *this = NULL;
+ loc_t *loc = NULL;
+ gf_boolean_t status = _gf_false;
+
+ GF_ASSERT(opaque);
+
+ args = (quota_synctask_t *)opaque;
+ loc = &args->loc;
+ this = args->this;
+ THIS = this;
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "Failed to"
+ "get inode ctx, aborting quota create txn");
+ goto out;
+ }
+
+ if (loc->inode->ia_type == IA_IFDIR) {
+ /* lock not required for files */
+ ret = mq_lock(this, loc, F_WRLCK);
+ if (ret < 0)
+ goto out;
+ locked = _gf_true;
+ }
- if (op_ret == -1) {
- gf_log (this->name, ((op_errno == ENOENT) ? GF_LOG_DEBUG :
- GF_LOG_ERROR),
- "xattrop call failed: %s", strerror (op_errno));
+ ret = mq_are_xattrs_set(this, loc, &contri_set, &size_set);
+ if (ret < 0 || (contri_set && size_set))
+ goto out;
- goto err;
- }
+ mq_set_ctx_create_status(ctx, _gf_false);
+ status = _gf_true;
- local->contri->contribution += local->delta;
+ if (loc->inode->ia_type == IA_IFDIR && size_set == _gf_false) {
+ ret = mq_create_size_xattrs(this, ctx, loc);
+ if (ret < 0)
+ goto out;
+ }
- gf_log (this->name, GF_LOG_DEBUG, "%s %"PRId64 "%"PRId64,
- local->loc.path, local->ctx->size,
- local->contri->contribution);
+ need_txn = _gf_true;
+out:
+ if (locked)
+ ret = mq_lock(this, loc, F_UNLCK);
- priv = this->private;
+ if (status == _gf_false)
+ mq_set_ctx_create_status(ctx, _gf_false);
- if (dict == NULL)
- goto err;
+ if (need_txn)
+ ret = mq_initiate_quota_blocking_txn(this, loc, NULL);
- ret = quota_inode_ctx_get (local->parent_loc.inode, this, &ctx);
- if (ret < 0)
- goto err;
+ return ret;
+}
- newdict = dict_new ();
- if (!newdict) {
- ret = -1;
- goto err;
+static int
+_mq_create_xattrs_txn(xlator_t *this, loc_t *origin_loc, struct iatt *buf,
+ gf_boolean_t spawn)
+{
+ int32_t ret = -1;
+ quota_inode_ctx_t *ctx = NULL;
+ gf_boolean_t status = _gf_true;
+ loc_t loc = {
+ 0,
+ };
+ inode_contribution_t *contribution = NULL;
+
+ ret = mq_prevalidate_txn(this, origin_loc, &loc, &ctx, buf);
+ if (ret < 0)
+ goto out;
+
+ ret = mq_test_and_set_ctx_create_status(ctx, &status);
+ if (ret < 0 || status == _gf_true)
+ goto out;
+
+ if (!loc_is_root(&loc) && loc.parent) {
+ contribution = mq_add_new_contribution_node(this, ctx, &loc);
+ if (contribution == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "cannot add a new contribution node "
+ "(%s)",
+ uuid_utoa(loc.gfid));
+ ret = -1;
+ goto out;
+ } else {
+ GF_REF_PUT(contribution);
}
+ }
- QUOTA_ALLOC_OR_GOTO (size, int64_t, ret, err);
+ ret = mq_synctask(this, mq_create_xattrs_task, spawn, &loc);
+out:
+ if (ret < 0 && status == _gf_false)
+ mq_set_ctx_create_status(ctx, _gf_false);
- *size = hton64 (local->delta);
+ loc_wipe(&loc);
+ return ret;
+}
- ret = dict_set_bin (newdict, QUOTA_SIZE_KEY, size, 8);
- if (ret < 0)
- goto err;
-
- STACK_WIND (frame,
- quota_mark_undirty,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop,
- &local->parent_loc,
- GF_XATTROP_ADD_ARRAY64,
- newdict);
- ret = 0;
-err:
- if (op_ret == -1 || ret < 0) {
- local->err = 1;
- quota_release_parent_lock (frame, NULL, this, 0, 0);
- }
+int
+mq_create_xattrs_txn(xlator_t *this, loc_t *loc, struct iatt *buf)
+{
+ int32_t ret = -1;
- if (newdict)
- dict_unref (newdict);
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
- return 0;
+ ret = _mq_create_xattrs_txn(this, loc, buf, _gf_true);
+out:
+ return ret;
}
int32_t
-quota_update_inode_contribution (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret,
- int32_t op_errno, inode_t *inode,
- struct iatt *buf, dict_t *dict,
- struct iatt *postparent)
+mq_reduce_parent_size_task(void *opaque)
{
- int32_t ret = -1;
- int64_t *size = NULL;
- int64_t *contri = NULL;
- int64_t *delta = NULL;
- char contri_key [512] = {0, };
- dict_t *newdict = NULL;
- quota_local_t *local = NULL;
- quota_inode_ctx_t *ctx = NULL;
- marker_conf_t *priv = NULL;
- inode_contribution_t *contribution = NULL;
-
- local = frame->local;
-
- if (op_ret == -1) {
- gf_log (this->name, ((op_errno == ENOENT) ? GF_LOG_DEBUG :
- GF_LOG_WARNING),
- "failed to get size and contribution with %s error",
- strerror (op_errno));
- goto err;
+ int32_t ret = -1;
+ int32_t prev_dirty = 0;
+ quota_inode_ctx_t *ctx = NULL;
+ quota_inode_ctx_t *parent_ctx = NULL;
+ inode_contribution_t *contribution = NULL;
+ quota_meta_t delta = {
+ 0,
+ };
+ quota_meta_t contri = {
+ 0,
+ };
+ loc_t parent_loc = {
+ 0,
+ };
+ gf_boolean_t locked = _gf_false;
+ gf_boolean_t dirty = _gf_false;
+ quota_synctask_t *args = NULL;
+ xlator_t *this = NULL;
+ loc_t *loc = NULL;
+ gf_boolean_t remove_xattr = _gf_true;
+ uint32_t nlink = 0;
+
+ GF_ASSERT(opaque);
+
+ args = (quota_synctask_t *)opaque;
+ loc = &args->loc;
+ contri = args->contri;
+ nlink = args->ia_nlink;
+ this = args->this;
+ THIS = this;
+
+ ret = mq_inode_loc_fill(NULL, loc->parent, &parent_loc);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "parent_loc fill failed for "
+ "child inode %s: ",
+ uuid_utoa(loc->inode->gfid));
+ goto out;
+ }
+
+ ret = mq_lock(this, &parent_loc, F_WRLCK);
+ if (ret < 0)
+ goto out;
+ locked = _gf_true;
+
+ if (contri.size >= 0) {
+ /* contri parameter is supplied only for rename operation.
+ * remove xattr is alreday performed, we need to skip
+ * removexattr for rename operation
+ */
+ remove_xattr = _gf_false;
+ delta.size = contri.size;
+ delta.file_count = contri.file_count;
+ delta.dir_count = contri.dir_count;
+ } else {
+ remove_xattr = _gf_true;
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret < 0) {
+ gf_log_callingfn(this->name, GF_LOG_WARNING,
+ "ctx for"
+ " the node %s is NULL",
+ loc->path);
+ goto out;
}
- priv = this->private;
-
- ctx = local->ctx;
- contribution = local->contri;
-
- //prepare to update size & contribution of the inode
- GET_CONTRI_KEY (contri_key, contribution->gfid, ret);
- if (ret == -1)
- goto err;
+ contribution = mq_get_contribution_node(loc->parent, ctx);
+ if (contribution == NULL) {
+ ret = -1;
+ gf_log(this->name, GF_LOG_DEBUG,
+ "contribution for the node %s is NULL", loc->path);
+ goto out;
+ }
- LOCK (&ctx->lock);
+ LOCK(&contribution->lock);
{
- if (local->loc.inode->ia_type == IA_IFDIR ) {
- ret = dict_get_bin (dict, QUOTA_SIZE_KEY,
- (void **) &size);
- if (ret < 0)
- goto unlock;
-
- ctx->size = ntoh64 (*size);
- } else
- ctx->size = buf->ia_blocks * 512;
-
- ret = dict_get_bin (dict, contri_key, (void **) &contri);
- if (ret < 0)
- contribution->contribution = 0;
- else
- contribution->contribution = ntoh64 (*contri);
-
- ret = 0;
+ delta.size = contribution->contribution;
+ delta.file_count = contribution->file_count;
+ delta.dir_count = contribution->dir_count;
}
-unlock:
- UNLOCK (&ctx->lock);
+ UNLOCK(&contribution->lock);
+ }
- if (ret < 0)
- goto err;
+ ret = mq_get_set_dirty(this, &parent_loc, 1, &prev_dirty);
+ if (ret < 0)
+ goto out;
+ dirty = _gf_true;
- gf_log (this->name, GF_LOG_DEBUG, "%s %"PRId64 "%"PRId64,
- local->loc.path, ctx->size, contribution->contribution);
- newdict = dict_new ();
- if (newdict == NULL) {
- ret = -1;
- goto err;
- }
+ mq_sub_meta(&delta, NULL);
- local->delta = ctx->size - contribution->contribution;
+ if (remove_xattr) {
+ ret = mq_remove_contri(this, loc, ctx, contribution, &delta, nlink);
+ if (ret < 0)
+ goto out;
+ }
- QUOTA_ALLOC_OR_GOTO (delta, int64_t, ret, err);
+ if (quota_meta_is_null(&delta))
+ goto out;
- *delta = hton64 (local->delta);
+ ret = mq_update_size(this, &parent_loc, &delta);
+ if (ret < 0)
+ goto out;
- ret = dict_set_bin (newdict, contri_key, delta, 8);
- if (ret < 0) {
- ret = -1;
- goto err;
+out:
+ if (dirty) {
+ if (ret < 0 || prev_dirty) {
+ /* On failure clear dirty status flag.
+ * In the next lookup inspect_directory_xattr
+ * can set the status flag and fix the
+ * dirty directory.
+ * Do the same if dir was dirty before
+ * the txn
+ */
+ ret = mq_inode_ctx_get(parent_loc.inode, this, &parent_ctx);
+ if (ret == 0)
+ mq_set_ctx_dirty_status(parent_ctx, _gf_false);
+ } else {
+ ret = mq_mark_dirty(this, &parent_loc, 0);
}
+ }
- STACK_WIND (frame,
- quota_update_parent_size,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop,
- &local->loc,
- GF_XATTROP_ADD_ARRAY64,
- newdict);
- ret = 0;
+ if (locked)
+ ret = mq_lock(this, &parent_loc, F_UNLCK);
-err:
- if (op_ret == -1 || ret < 0) {
- local->err = 1;
+ if (ret >= 0)
+ ret = mq_initiate_quota_blocking_txn(this, &parent_loc, NULL);
- quota_release_parent_lock (frame, NULL, this, 0, 0);
- }
+ loc_wipe(&parent_loc);
- if (newdict)
- dict_unref (newdict);
+ if (contribution)
+ GF_REF_PUT(contribution);
- return 0;
+ return ret;
}
int32_t
-quota_fetch_child_size_and_contri (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret,
- int32_t op_errno)
+mq_reduce_parent_size_txn(xlator_t *this, loc_t *origin_loc,
+ quota_meta_t *contri, uint32_t nlink,
+ call_stub_t *stub)
{
- int32_t ret = -1;
- char contri_key [512] = {0, };
- dict_t *newdict = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
- quota_inode_ctx_t *ctx = NULL;
-
- local = frame->local;
-
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_ERROR,
- "%s couldnt mark dirty", local->parent_loc.path);
- goto err;
- }
-
- VALIDATE_OR_GOTO (local->ctx, err);
- VALIDATE_OR_GOTO (local->contri, err);
-
- gf_log (this->name, GF_LOG_DEBUG, "%s marked dirty", local->parent_loc.path);
-
- priv = this->private;
+ int32_t ret = -1;
+ loc_t loc = {
+ 0,
+ };
+ gf_boolean_t resume_stub = _gf_true;
- //update parent ctx
- ret = quota_inode_ctx_get (local->parent_loc.inode, this, &ctx);
- if (ret == -1)
- goto err;
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", origin_loc, out);
- LOCK (&ctx->lock);
- {
- ctx->dirty = 1;
- }
- UNLOCK (&ctx->lock);
-
- newdict = dict_new ();
- if (newdict == NULL)
- goto err;
-
- if (local->loc.inode->ia_type == IA_IFDIR) {
- ret = dict_set_int64 (newdict, QUOTA_SIZE_KEY, 0);
- }
-
- GET_CONTRI_KEY (contri_key, local->contri->gfid, ret);
- if (ret < 0)
- goto err;
-
- ret = dict_set_int64 (newdict, contri_key, 0);
-
- STACK_WIND (frame, quota_update_inode_contribution, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup, &local->loc, newdict);
+ ret = mq_prevalidate_txn(this, origin_loc, &loc, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ if (loc_is_root(&loc)) {
ret = 0;
+ goto out;
+ }
-err:
- if (op_ret == -1 || ret == -1) {
- local->err = 1;
+ resume_stub = _gf_false;
+ ret = mq_synctask1(this, mq_reduce_parent_size_task, _gf_true, &loc, contri,
+ nlink, stub);
+out:
+ loc_wipe(&loc);
- quota_release_parent_lock (frame, NULL, this, 0, 0);
- }
+ if (resume_stub && stub)
+ call_resume(stub);
- if (newdict)
- dict_unref (newdict);
+ if (ret)
+ gf_log_callingfn(this ? this->name : "Marker", GF_LOG_ERROR,
+ "mq_reduce_parent_size_txn failed");
- return 0;
+ return ret;
}
-int32_t
-quota_markdirty (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno)
+int
+mq_initiate_quota_task(void *opaque)
{
- int32_t ret = -1;
- dict_t *dict = NULL;
- quota_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- local = frame->local;
-
- if (op_ret == -1){
- gf_log (this->name, GF_LOG_ERROR,
- "lock setting failed on %s (%s)",
- local->parent_loc.path, strerror (op_errno));
-
- local->err = 1;
-
- quota_inodelk_cbk (frame, NULL, this, 0, 0);
-
- return 0;
+ int32_t ret = -1;
+ int32_t prev_dirty = 0;
+ loc_t child_loc = {
+ 0,
+ };
+ loc_t parent_loc = {
+ 0,
+ };
+ gf_boolean_t locked = _gf_false;
+ gf_boolean_t dirty = _gf_false;
+ gf_boolean_t status = _gf_false;
+ quota_meta_t delta = {
+ 0,
+ };
+ quota_synctask_t *args = NULL;
+ xlator_t *this = NULL;
+ loc_t *loc = NULL;
+ inode_contribution_t *contri = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ quota_inode_ctx_t *parent_ctx = NULL;
+ inode_t *tmp_parent = NULL;
+
+ GF_VALIDATE_OR_GOTO("marker", opaque, out);
+
+ args = (quota_synctask_t *)opaque;
+ loc = &args->loc;
+ this = args->this;
+
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ THIS = this;
+
+ GF_VALIDATE_OR_GOTO(this->name, loc, out);
+ GF_VALIDATE_OR_GOTO(this->name, loc->inode, out);
+
+ ret = mq_loc_copy(&child_loc, loc);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "loc copy failed");
+ goto out;
+ }
+
+ while (!__is_root_gfid(child_loc.gfid)) {
+ ret = mq_inode_ctx_get(child_loc.inode, this, &ctx);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "inode ctx get failed for %s, "
+ "aborting update txn",
+ child_loc.path);
+ goto out;
+ }
+
+ /* To improve performance, abort current transaction
+ * if one is already in progress for same inode
+ */
+ if (status == _gf_true) {
+ /* status will already set before txn start,
+ * so it should not be set in first
+ * loop iteration
+ */
+ ret = mq_test_and_set_ctx_updation_status(ctx, &status);
+ if (ret < 0 || status == _gf_true)
+ goto out;
}
- gf_log (this->name, GF_LOG_TRACE,
- "inodelk succeeded on %s", local->parent_loc.path);
+ if (child_loc.parent == NULL) {
+ ret = mq_build_ancestry(this, &child_loc);
+ if (ret < 0 || child_loc.parent == NULL) {
+ /* If application performs parallel remove
+ * operations on same set of files/directories
+ * then we may get ENOENT/ESTALE
+ */
+ gf_log(this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG
+ : GF_LOG_ERROR,
+ "build ancestry failed for inode %s",
+ uuid_utoa(child_loc.inode->gfid));
+ ret = -1;
+ goto out;
+ }
+ }
- priv = this->private;
+ ret = mq_inode_loc_fill(NULL, child_loc.parent, &parent_loc);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "parent_loc fill "
+ "failed for child inode %s: ",
+ uuid_utoa(child_loc.inode->gfid));
+ goto out;
+ }
- dict = dict_new ();
- if (!dict) {
+ ret = mq_lock(this, &parent_loc, F_WRLCK);
+ if (ret < 0)
+ goto out;
+ locked = _gf_true;
+
+ mq_set_ctx_updation_status(ctx, _gf_false);
+ status = _gf_true;
+
+ /* Contribution node can be NULL in below scenarios and
+ create if needed:
+
+ Scenario 1)
+ In this case create a new contribution node
+ Suppose hard link for a file f1 present in a directory d1 is
+ created in the directory d2 (as f2). Now, since d2's
+ contribution is not there in f1's inode ctx, d2's
+ contribution xattr won't be created and will create problems
+ for quota operations.
+
+ Don't create contribution if parent has been changed after
+ taking a lock, this can happen when rename is performed
+ and writes is still in-progress for the same file
+
+ Scenario 2)
+ When a rename operation is performed, contribution node
+ for olp path will be removed.
+
+ Create contribution node only if oldparent is same as
+ newparent.
+ Consider below example
+ 1) rename FOP invoked on file 'x'
+ 2) write is still in progress for file 'x'
+ 3) rename takes a lock on old-parent
+ 4) write-update txn blocked on old-parent to acquire lock
+ 5) in rename_cbk, contri xattrs are removed and contribution
+ is deleted and lock is released
+ 6) now write-update txn gets the lock and updates the
+ wrong parent as it was holding lock on old parent
+ so validate parent once the lock is acquired
+
+ For more information on this problem, please see
+ doc for marker_rename in file marker.c
+ */
+ contri = mq_get_contribution_node(child_loc.parent, ctx);
+ if (contri == NULL) {
+ tmp_parent = inode_parent(child_loc.inode, 0, NULL);
+ if (tmp_parent == NULL) {
+ /* This can happen if application performs
+ * parallel remove operations on same set
+ * of files/directories
+ */
+ gf_log(this->name, GF_LOG_WARNING,
+ "parent is "
+ "NULL for inode %s",
+ uuid_utoa(child_loc.inode->gfid));
+ ret = -1;
+ goto out;
+ }
+ if (gf_uuid_compare(tmp_parent->gfid, parent_loc.gfid)) {
+ /* abort txn if parent has changed */
+ ret = 0;
+ goto out;
+ }
+
+ inode_unref(tmp_parent);
+ tmp_parent = NULL;
+
+ contri = mq_add_new_contribution_node(this, ctx, &child_loc);
+ if (contri == NULL) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to "
+ "create contribution node for %s, "
+ "abort update txn",
+ child_loc.path);
ret = -1;
- goto err;
+ goto out;
+ }
}
- ret = dict_set_int8 (dict, QUOTA_DIRTY_KEY, 1);
- if (ret == -1)
- goto err;
+ ret = mq_get_delta(this, &child_loc, &delta, ctx, contri);
+ if (ret < 0)
+ goto out;
- STACK_WIND (frame, quota_fetch_child_size_and_contri,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- &local->parent_loc, dict, 0);
+ if (quota_meta_is_null(&delta))
+ goto out;
- ret = 0;
-err:
- if (ret == -1) {
- local->err = 1;
+ ret = mq_get_set_dirty(this, &parent_loc, 1, &prev_dirty);
+ if (ret < 0)
+ goto out;
+ dirty = _gf_true;
+
+ ret = mq_update_contri(this, &child_loc, contri, &delta);
+ if (ret < 0)
+ goto out;
+
+ ret = mq_update_size(this, &parent_loc, &delta);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "rollback "
+ "contri updation");
+ mq_sub_meta(&delta, NULL);
+ mq_update_contri(this, &child_loc, contri, &delta);
+ goto out;
+ }
- quota_release_parent_lock (frame, NULL, this, 0, 0);
+ if (prev_dirty == 0) {
+ ret = mq_mark_dirty(this, &parent_loc, 0);
+ } else {
+ ret = mq_inode_ctx_get(parent_loc.inode, this, &parent_ctx);
+ if (ret == 0)
+ mq_set_ctx_dirty_status(parent_ctx, _gf_false);
}
+ dirty = _gf_false;
+ prev_dirty = 0;
- if (dict)
- dict_unref (dict);
+ ret = mq_lock(this, &parent_loc, F_UNLCK);
+ locked = _gf_false;
- return 0;
-}
+ if (__is_root_gfid(parent_loc.gfid))
+ break;
+ /* Repeate above steps upwards till the root */
+ loc_wipe(&child_loc);
+ ret = mq_loc_copy(&child_loc, &parent_loc);
+ if (ret < 0)
+ goto out;
-int32_t
-get_lock_on_parent (call_frame_t *frame, xlator_t *this)
-{
- struct gf_flock lock;
- quota_local_t *local = NULL;
+ loc_wipe(&parent_loc);
+ GF_REF_PUT(contri);
+ contri = NULL;
+ }
- GF_VALIDATE_OR_GOTO ("marker", frame, fr_destroy);
+out:
+ if ((dirty) && (ret < 0)) {
+ /* On failure clear dirty status flag.
+ * In the next lookup inspect_directory_xattr
+ * can set the status flag and fix the
+ * dirty directory.
+ * Do the same if the dir was dirty before
+ * txn
+ */
+ ret = mq_inode_ctx_get(parent_loc.inode, this, &parent_ctx);
+ if (ret == 0)
+ mq_set_ctx_dirty_status(parent_ctx, _gf_false);
+ }
- local = frame->local;
- gf_log (this->name, GF_LOG_DEBUG, "taking lock on %s",
- local->parent_loc.path);
+ if (locked)
+ ret = mq_lock(this, &parent_loc, F_UNLCK);
- lock.l_len = 0;
- lock.l_start = 0;
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
+ if (ctx && status == _gf_false)
+ mq_set_ctx_updation_status(ctx, _gf_false);
- STACK_WIND (frame,
- quota_markdirty,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- this->name, &local->parent_loc, F_SETLKW, &lock);
+ loc_wipe(&child_loc);
+ loc_wipe(&parent_loc);
- return 0;
+ if (tmp_parent)
+ inode_unref(tmp_parent);
-fr_destroy:
- QUOTA_STACK_DESTROY (frame, this);
+ if (contri)
+ GF_REF_PUT(contri);
- return 0;
+ return 0;
}
-
int
-start_quota_txn (xlator_t *this, loc_t *loc,
- quota_inode_ctx_t *ctx,
- inode_contribution_t *contri)
+_mq_initiate_quota_txn(xlator_t *this, loc_t *origin_loc, struct iatt *buf,
+ gf_boolean_t spawn)
{
- int32_t ret = -1;
- call_frame_t *frame = NULL;
- quota_local_t *local = NULL;
-
- frame = create_frame (this, this->ctx->pool);
- if (frame == NULL)
- goto err;
-
- mq_assign_lk_owner (this, frame);
-
- local = quota_local_new ();
- if (local == NULL)
- goto fr_destroy;
-
- frame->local = local;
-
- ret = loc_copy (&local->loc, loc);
- if (ret < 0)
- goto fr_destroy;
-
- ret = quota_inode_loc_fill (NULL, local->loc.parent,
- &local->parent_loc);
- if (ret < 0)
- goto fr_destroy;
-
- local->ctx = ctx;
- local->contri = contri;
+ int32_t ret = -1;
+ quota_inode_ctx_t *ctx = NULL;
+ gf_boolean_t status = _gf_true;
+ loc_t loc = {
+ 0,
+ };
+
+ ret = mq_prevalidate_txn(this, origin_loc, &loc, &ctx, buf);
+ if (ret < 0)
+ goto out;
+
+ if (loc_is_root(&loc)) {
+ ret = 0;
+ goto out;
+ }
- get_lock_on_parent (frame, this);
+ ret = mq_test_and_set_ctx_updation_status(ctx, &status);
+ if (ret < 0 || status == _gf_true)
+ goto out;
- return 0;
+ ret = mq_synctask(this, mq_initiate_quota_task, spawn, &loc);
-fr_destroy:
- QUOTA_STACK_DESTROY (frame, this);
+out:
+ if (ret < 0 && status == _gf_false)
+ mq_set_ctx_updation_status(ctx, _gf_false);
-err:
- return -1;
+ loc_wipe(&loc);
+ return ret;
}
-
int
-initiate_quota_txn (xlator_t *this, loc_t *loc)
+mq_initiate_quota_txn(xlator_t *this, loc_t *loc, struct iatt *buf)
{
- int32_t ret = -1;
- quota_inode_ctx_t *ctx = NULL;
- inode_contribution_t *contribution = NULL;
+ int32_t ret = -1;
- VALIDATE_OR_GOTO (loc, out);
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
- ret = quota_inode_ctx_get (loc->inode, this, &ctx);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_WARNING,
- "inode ctx get failed, aborting quota txn");
- ret = -1;
- goto out;
- }
-
- contribution = get_contribution_node (loc->parent, ctx);
- if (contribution == NULL)
- goto out;
-
- start_quota_txn (this, loc, ctx, contribution);
+ ret = _mq_initiate_quota_txn(this, loc, buf, _gf_true);
out:
- return 0;
+ return ret;
}
-
-int32_t
-validate_inode_size_contribution (xlator_t *this,
- loc_t *loc,
- quota_inode_ctx_t *ctx,
- inode_contribution_t *contribution)
+int
+mq_initiate_quota_blocking_txn(xlator_t *this, loc_t *loc, struct iatt *buf)
{
- if (ctx->size != contribution->contribution)
- initiate_quota_txn (this, loc);
+ int32_t ret = -1;
- return 0;
-}
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
+ ret = _mq_initiate_quota_txn(this, loc, buf, _gf_false);
+out:
+ return ret;
+}
-int32_t
-inspect_directory_xattr (xlator_t *this,
- loc_t *loc,
- dict_t *dict,
- struct iatt buf)
+int
+mq_update_dirty_inode_task(void *opaque)
{
- int32_t ret = 0;
- int8_t dirty = -1;
- int64_t *size = NULL;
- int64_t *contri = NULL;
- char contri_key [512] = {0, };
- marker_conf_t *priv = NULL;
- gf_boolean_t not_root = _gf_false;
- quota_inode_ctx_t *ctx = NULL;
- inode_contribution_t *contribution = NULL;
-
- priv = this->private;
-
- ret = quota_inode_ctx_get (loc->inode, this, &ctx);
+ int32_t ret = -1;
+ fd_t *fd = NULL;
+ off_t offset = 0;
+ gf_dirent_t entries;
+ gf_dirent_t *entry = NULL;
+ gf_boolean_t locked = _gf_false;
+ gf_boolean_t updated = _gf_false;
+ int32_t dirty = 0;
+ quota_meta_t contri = {
+ 0,
+ };
+ quota_meta_t size = {
+ 0,
+ };
+ quota_meta_t contri_sum = {
+ 0,
+ };
+ quota_meta_t delta = {
+ 0,
+ };
+ quota_synctask_t *args = NULL;
+ xlator_t *this = NULL;
+ loc_t *loc = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ dict_t *xdata = NULL;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int keylen = 0;
+
+ GF_ASSERT(opaque);
+
+ args = (quota_synctask_t *)opaque;
+ loc = &args->loc;
+ this = args->this;
+ THIS = this;
+ INIT_LIST_HEAD(&entries.list);
+
+ ret = mq_inode_ctx_get(loc->inode, this, &ctx);
+ if (ret < 0)
+ goto out;
+
+ GET_CONTRI_KEY(this, contri_key, loc->gfid, keylen);
+ if (keylen < 0) {
+ ret = keylen;
+ goto out;
+ }
+
+ xdata = dict_new();
+ if (xdata == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_new failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_int64(xdata, contri_key, 0);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "dict_set failed");
+ goto out;
+ }
+
+ ret = mq_lock(this, loc, F_WRLCK);
+ if (ret < 0)
+ goto out;
+ locked = _gf_true;
+
+ ret = mq_get_dirty(this, loc, &dirty);
+ if (ret < 0 || dirty == 0) {
+ ret = 0;
+ goto out;
+ }
+
+ fd = fd_create(loc->inode, 0);
+ if (!fd) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to create fd");
+ ret = -1;
+ goto out;
+ }
+
+ ret = syncop_opendir(this, loc, fd, NULL, NULL);
+ if (ret < 0) {
+ gf_log(this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG : GF_LOG_ERROR,
+ "opendir failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
+ }
+
+ fd_bind(fd);
+ while ((ret = syncop_readdirp(this, fd, 131072, offset, &entries, xdata,
+ NULL)) != 0) {
if (ret < 0) {
- ctx = quota_inode_ctx_new (loc->inode, this);
- if (ctx == NULL) {
- gf_log (this->name, GF_LOG_WARNING,
- "quota_inode_ctx_new failed");
- ret = -1;
- goto out;
- }
+ gf_log(this->name,
+ (-ret == ENOENT || -ret == ESTALE) ? GF_LOG_DEBUG
+ : GF_LOG_ERROR,
+ "readdirp failed "
+ "for %s: %s",
+ loc->path, strerror(-ret));
+ goto out;
}
- ret = dict_get_bin (dict, QUOTA_SIZE_KEY, (void **) &size);
- if (ret < 0)
- goto out;
-
- ret = dict_get_int8 (dict, QUOTA_DIRTY_KEY, &dirty);
- if (ret < 0)
- goto out;
+ if (list_empty(&entries.list))
+ break;
- if (strcmp (loc->path, "/") != 0) {
- not_root = _gf_true;
-
- contribution = add_new_contribution_node (this, ctx, loc);
- if (contribution == NULL) {
- gf_log (this->name, GF_LOG_DEBUG,
- "cannot add a new contributio node");
- goto out;
- }
+ list_for_each_entry(entry, &entries.list, list)
+ {
+ offset = entry->d_off;
- GET_CONTRI_KEY (contri_key, contribution->gfid, ret);
- if (ret < 0)
- goto out;
+ if (!strcmp(entry->d_name, ".") || !strcmp(entry->d_name, ".."))
+ continue;
- ret = dict_get_bin (dict, contri_key, (void **) &contri);
- if (ret < 0)
- goto out;
+ memset(&contri, 0, sizeof(contri));
+ quota_dict_get_meta(entry->dict, contri_key, keylen, &contri);
+ if (quota_meta_is_null(&contri))
+ continue;
- contribution->contribution = ntoh64 (*contri);
+ mq_add_meta(&contri_sum, &contri);
}
- ctx->size = ntoh64 (*size);
+ gf_dirent_free(&entries);
+ }
+ /* Inculde for self */
+ contri_sum.dir_count++;
- gf_log (this->name, GF_LOG_DEBUG, "size=%"PRId64
- " contri=%"PRId64, ctx->size,
- contribution?contribution->contribution:0);
+ ret = _mq_get_metadata(this, loc, NULL, &size, 0);
+ if (ret < 0)
+ goto out;
- ctx->dirty = dirty;
- if (ctx->dirty == 1) {
- update_dirty_inode (this, loc, ctx, contribution);
- } else if (not_root == _gf_true &&
- ctx->size != contribution->contribution) {
- initiate_quota_txn (this, loc);
- }
-
- ret = 0;
-out:
- if (ret)
- quota_set_inode_xattr (this, loc);
+ mq_compute_delta(&delta, &contri_sum, &size);
- return 0;
-}
+ if (quota_meta_is_null(&delta))
+ goto out;
-int32_t
-inspect_file_xattr (xlator_t *this,
- loc_t *loc,
- dict_t *dict,
- struct iatt buf)
-{
- int32_t ret = -1;
- uint64_t contri_int = 0;
- int64_t *contri_ptr = NULL;
- char contri_key [512] = {0, };
- marker_conf_t *priv = NULL;
- quota_inode_ctx_t *ctx = NULL;
- inode_contribution_t *contribution = NULL;
+ gf_log(this->name, GF_LOG_INFO,
+ "calculated size = %" PRId64 ", original size = %" PRIu64
+ ", diff = %" PRIu64 ", path = %s ",
+ contri_sum.size, size.size, delta.size, loc->path);
- priv = this->private;
+ gf_log(this->name, GF_LOG_INFO,
+ "calculated f_count = %" PRId64 ", original f_count = %" PRIu64
+ ", diff = %" PRIu64 ", path = %s ",
+ contri_sum.file_count, size.file_count, delta.file_count, loc->path);
- ret = quota_inode_ctx_get (loc->inode, this, &ctx);
- if (ret < 0) {
- ctx = quota_inode_ctx_new (loc->inode, this);
- if (ctx == NULL) {
- gf_log (this->name, GF_LOG_WARNING,
- "quota_inode_ctx_new failed");
- ret = -1;
- goto out;
- }
- }
+ gf_log(this->name, GF_LOG_INFO,
+ "calculated d_count = %" PRId64 ", original d_count = %" PRIu64
+ ", diff = %" PRIu64 ", path = %s ",
+ contri_sum.dir_count, size.dir_count, delta.dir_count, loc->path);
- contribution = add_new_contribution_node (this, ctx, loc);
- if (contribution == NULL)
- goto out;
+ ret = mq_update_size(this, loc, &delta);
+ if (ret < 0)
+ goto out;
- LOCK (&ctx->lock);
- {
- ctx->size = 512 * buf.ia_blocks;
- }
- UNLOCK (&ctx->lock);
+ updated = _gf_true;
- list_for_each_entry (contribution, &ctx->contribution_head, contri_list) {
- GET_CONTRI_KEY (contri_key, contribution->gfid, ret);
- if (ret < 0)
- continue;
+out:
+ gf_dirent_free(&entries);
- ret = dict_get_bin (dict, contri_key, (void **) &contri_int);
- if (ret == 0) {
- contri_ptr = (int64_t *)(unsigned long)contri_int;
+ if (fd)
+ fd_unref(fd);
- contribution->contribution = ntoh64 (*contri_ptr);
+ if (xdata)
+ dict_unref(xdata);
- gf_log (this->name, GF_LOG_DEBUG,
- "size=%"PRId64 " contri=%"PRId64, ctx->size,
- contribution->contribution);
+ if (ret < 0) {
+ /* On failure clear dirty status flag.
+ * In the next lookup inspect_directory_xattr
+ * can set the status flag and fix the
+ * dirty directory
+ */
+ if (ctx)
+ mq_set_ctx_dirty_status(ctx, _gf_false);
+ } else if (dirty) {
+ mq_mark_dirty(this, loc, 0);
+ }
- ret = validate_inode_size_contribution
- (this, loc, ctx, contribution);
- } else
- initiate_quota_txn (this, loc);
- }
+ if (locked)
+ mq_lock(this, loc, F_UNLCK);
-out:
- return ret;
-}
+ if (updated)
+ mq_initiate_quota_blocking_txn(this, loc, NULL);
-int32_t
-quota_xattr_state (xlator_t *this,
- loc_t *loc,
- dict_t *dict,
- struct iatt buf)
-{
- if (buf.ia_type == IA_IFREG ||
- buf.ia_type == IA_IFLNK) {
- k ++;
- inspect_file_xattr (this, loc, dict, buf);
- } else if (buf.ia_type == IA_IFDIR)
- inspect_directory_xattr (this, loc, dict, buf);
-
- return 0;
+ return ret;
}
int32_t
-quota_req_xattr (xlator_t *this,
- loc_t *loc,
- dict_t *dict)
+mq_update_dirty_inode_txn(xlator_t *this, loc_t *loc, quota_inode_ctx_t *ctx)
{
- int32_t ret = -1;
- marker_conf_t *priv = NULL;
-
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", loc, out);
- GF_VALIDATE_OR_GOTO ("marker", dict, out);
+ int32_t ret = -1;
+ gf_boolean_t status = _gf_true;
- priv = this->private;
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", loc->inode, out);
- //if not "/" then request contribution
- if (strcmp (loc->path, "/") == 0)
- goto set_size;
-
- ret = dict_set_contribution (this, dict, loc);
- if (ret == -1)
- goto out;
-
-set_size:
- ret = dict_set_uint64 (dict, QUOTA_SIZE_KEY, 0);
- if (ret < 0) {
- ret = -1;
- goto out;
- }
-
- ret = dict_set_int8 (dict, QUOTA_DIRTY_KEY, 0);
- if (ret < 0) {
- ret = -1;
- goto out;
- }
-
- ret = 0;
+ mq_test_and_set_ctx_status(ctx, &ctx->dirty_status, &status);
+ if (status == _gf_true)
+ goto out;
+ ret = mq_synctask(this, mq_update_dirty_inode_task, _gf_true, loc);
out:
- return ret;
-}
-
+ if (ret < 0 && status == _gf_false)
+ mq_set_ctx_dirty_status(ctx, _gf_false);
-int32_t
-quota_removexattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
-{
- QUOTA_STACK_DESTROY (frame, this);
-
- return 0;
+ return ret;
}
int32_t
-quota_inode_remove_done (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+mq_inspect_directory_xattr(xlator_t *this, quota_inode_ctx_t *ctx,
+ inode_contribution_t *contribution, loc_t *loc,
+ dict_t *dict)
{
- int32_t ret = 0;
- char contri_key [512] = {0, };
- quota_local_t *local = NULL;
-
- local = (quota_local_t *) frame->local;
+ int32_t ret = -1;
+ int8_t dirty = -1;
+ quota_meta_t size = {
+ 0,
+ };
+ quota_meta_t contri = {
+ 0,
+ };
+ quota_meta_t delta = {
+ 0,
+ };
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ char size_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int keylen = 0;
+ gf_boolean_t status = _gf_false;
+
+ ret = dict_get_int8(dict, QUOTA_DIRTY_KEY, &dirty);
+ if (ret < 0) {
+ /* dirty is set only on the first file write operation
+ * so ignore this error
+ */
+ ret = 0;
+ dirty = 0;
+ }
+
+ GET_SIZE_KEY(this, size_key, keylen);
+ if (keylen < 0) {
+ ret = -1;
+ goto out;
+ }
+ ret = _quota_dict_get_meta(this, dict, size_key, keylen, &size, IA_IFDIR,
+ _gf_false);
+ if (ret < 0)
+ goto create_xattr;
+
+ if (!contribution)
+ goto create_xattr;
+
+ if (!loc_is_root(loc)) {
+ GET_CONTRI_KEY(this, contri_key, contribution->gfid, keylen);
+ if (keylen < 0) {
+ ret = -1;
+ goto out;
+ }
+ ret = _quota_dict_get_meta(this, dict, contri_key, keylen, &contri,
+ IA_IFDIR, _gf_false);
+ if (ret < 0)
+ goto create_xattr;
- if (op_ret == -1 || local->err == -1) {
- quota_removexattr_cbk (frame, NULL, this, -1, 0);
- return 0;
- }
+ LOCK(&contribution->lock);
+ {
+ contribution->contribution = contri.size;
+ contribution->file_count = contri.file_count;
+ contribution->dir_count = contri.dir_count;
+ }
+ UNLOCK(&contribution->lock);
+ }
+
+ LOCK(&ctx->lock);
+ {
+ ctx->size = size.size;
+ ctx->file_count = size.file_count;
+ ctx->dir_count = size.dir_count;
+ ctx->dirty = dirty;
+ }
+ UNLOCK(&ctx->lock);
- frame->local = NULL;
+ ret = mq_get_ctx_updation_status(ctx, &status);
+ if (ret < 0 || status == _gf_true) {
+ /* If the update txn is in progress abort inspection */
+ ret = 0;
+ goto out;
+ }
- if (local->hl_count > 1) {
- GET_CONTRI_KEY (contri_key, local->contri->gfid, ret);
+ mq_compute_delta(&delta, &size, &contri);
- STACK_WIND (frame, quota_removexattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr,
- &local->loc, contri_key);
- ret = 0;
- } else {
- quota_removexattr_cbk (frame, NULL, this, 0, 0);
- }
+ if (dirty) {
+ ret = mq_update_dirty_inode_txn(this, loc, ctx);
+ goto out;
+ }
- if (strcmp (local->parent_loc.path, "/") != 0) {
- get_parent_inode_local (this, local);
+ if (!loc_is_root(loc) && !quota_meta_is_null(&delta))
+ mq_initiate_quota_txn(this, loc, NULL);
- start_quota_txn (this, &local->loc, local->ctx, local->contri);
- }
+ ret = 0;
+ goto out;
- /* TODO: free local in quota_local_unref only*/
- quota_local_unref (this, local);
- GF_FREE (local);
+create_xattr:
+ if (ret < 0)
+ ret = mq_create_xattrs_txn(this, loc, NULL);
- return 0;
+out:
+ return ret;
}
int32_t
-mq_inode_remove_done (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+mq_inspect_file_xattr(xlator_t *this, quota_inode_ctx_t *ctx,
+ inode_contribution_t *contribution, loc_t *loc,
+ dict_t *dict, struct iatt *buf)
{
- int32_t ret;
- struct gf_flock lock;
- quota_inode_ctx_t *ctx;
- quota_local_t *local = NULL;
-
- local = frame->local;
- if (op_ret == -1)
- local->err = -1;
-
- ret = quota_inode_ctx_get (local->parent_loc.inode, this, &ctx);
- if (ret == 0)
- ctx->size -= local->contri->contribution;
-
- local->contri->contribution = 0;
-
- lock.l_type = F_UNLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
- lock.l_pid = 0;
-
- STACK_WIND (frame,
- quota_inode_remove_done,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- this->name, &local->parent_loc,
- F_SETLKW, &lock);
- return 0;
-}
-
-static int32_t
-mq_reduce_parent_size_xattr (call_frame_t *frame, void *cookie,
- xlator_t *this, int32_t op_ret, int32_t op_errno)
-{
- int32_t ret = -1;
- int64_t *size = NULL;
- dict_t *dict = NULL;
- marker_conf_t *priv = NULL;
- quota_local_t *local = NULL;
- inode_contribution_t *contribution = NULL;
-
- local = frame->local;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_WARNING,
- "inodelk set failed on %s", local->parent_loc.path);
- QUOTA_STACK_DESTROY (frame, this);
- return 0;
+ int32_t ret = -1;
+ quota_meta_t size = {
+ 0,
+ };
+ quota_meta_t contri = {
+ 0,
+ };
+ quota_meta_t delta = {
+ 0,
+ };
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int keylen = 0;
+ gf_boolean_t status = _gf_false;
+
+ if (!buf || !contribution || !ctx)
+ goto out;
+
+ LOCK(&ctx->lock);
+ {
+ ctx->size = 512 * buf->ia_blocks;
+ ctx->file_count = 1;
+ ctx->dir_count = 0;
+
+ size.size = ctx->size;
+ size.file_count = ctx->file_count;
+ size.dir_count = ctx->dir_count;
+ }
+ UNLOCK(&ctx->lock);
+
+ GET_CONTRI_KEY(this, contri_key, contribution->gfid, keylen);
+ if (keylen < 0) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = _quota_dict_get_meta(this, dict, contri_key, keylen, &contri,
+ IA_IFREG, _gf_true);
+ if (ret < 0) {
+ ret = mq_create_xattrs_txn(this, loc, NULL);
+ } else {
+ LOCK(&contribution->lock);
+ {
+ contribution->contribution = contri.size;
+ contribution->file_count = contri.file_count;
+ contribution->dir_count = contri.dir_count;
}
+ UNLOCK(&contribution->lock);
- VALIDATE_OR_GOTO (local->contri, err);
-
- priv = this->private;
-
- contribution = local->contri;
-
- dict = dict_new ();
- if (dict == NULL) {
- ret = -1;
- goto err;
+ ret = mq_get_ctx_updation_status(ctx, &status);
+ if (ret < 0 || status == _gf_true) {
+ /* If the update txn is in progress abort inspection */
+ ret = 0;
+ goto out;
}
- QUOTA_ALLOC_OR_GOTO (size, int64_t, ret, err);
-
- *size = hton64 (-contribution->contribution);
-
-
- ret = dict_set_bin (dict, QUOTA_SIZE_KEY, size, 8);
- if (ret < 0)
- goto err;
+ mq_compute_delta(&delta, &size, &contri);
+ if (!quota_meta_is_null(&delta))
+ mq_initiate_quota_txn(this, loc, NULL);
+ }
+ /* TODO: revist this code when fixing hardlinks */
-
- STACK_WIND (frame, mq_inode_remove_done, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop, &local->parent_loc,
- GF_XATTROP_ADD_ARRAY64, dict);
- dict_unref (dict);
- return 0;
-
-err:
- local->err = 1;
- mq_inode_remove_done (frame, NULL, this, -1, 0, NULL);
- if (dict)
- dict_unref (dict);
- return 0;
+out:
+ return ret;
}
int32_t
-reduce_parent_size (xlator_t *this, loc_t *loc)
+mq_xattr_state(xlator_t *this, loc_t *origin_loc, dict_t *dict,
+ struct iatt *buf)
{
- int32_t ret = -1;
- struct gf_flock lock = {0,};
- call_frame_t *frame = NULL;
- marker_conf_t *priv = NULL;
- quota_local_t *local = NULL;
- quota_inode_ctx_t *ctx = NULL;
- inode_contribution_t *contribution = NULL;
-
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", loc, out);
-
- priv = this->private;
-
- ret = quota_inode_ctx_get (loc->inode, this, &ctx);
- if (ret < 0)
- goto out;
-
- contribution = get_contribution_node (loc->parent, ctx);
- if (contribution == NULL)
- goto out;
-
- local = quota_local_new ();
- if (local == NULL) {
- ret = -1;
- goto out;
- }
-
- ret = loc_copy (&local->loc, loc);
- if (ret < 0)
- goto out;
-
- local->ctx = ctx;
- local->contri = contribution;
-
- ret = quota_inode_loc_fill (NULL, loc->parent, &local->parent_loc);
- if (ret < 0)
- goto out;
-
- frame = create_frame (this, this->ctx->pool);
- if (!frame) {
- ret = -1;
- goto out;
- }
-
- mq_assign_lk_owner (this, frame);
-
- frame->local = local;
-
- lock.l_len = 0;
- lock.l_start = 0;
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
-
- STACK_WIND (frame,
- mq_reduce_parent_size_xattr,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- this->name, &local->parent_loc, F_SETLKW, &lock);
- ret = 0;
+ int32_t ret = -1;
+ quota_inode_ctx_t *ctx = NULL;
+ loc_t loc = {
+ 0,
+ };
+ inode_contribution_t *contribution = NULL;
+
+ ret = mq_prevalidate_txn(this, origin_loc, &loc, &ctx, buf);
+ if (ret < 0 || loc.parent == NULL)
+ goto out;
+
+ if (!loc_is_root(&loc)) {
+ contribution = mq_add_new_contribution_node(this, ctx, &loc);
+ if (contribution == NULL) {
+ if (!gf_uuid_is_null(loc.inode->gfid))
+ gf_log(this->name, GF_LOG_WARNING,
+ "cannot add a new contribution node "
+ "(%s)",
+ uuid_utoa(loc.gfid));
+ ret = -1;
+ goto out;
+ }
+ if (buf->ia_type == IA_IFDIR)
+ mq_inspect_directory_xattr(this, ctx, contribution, &loc, dict);
+ else
+ mq_inspect_file_xattr(this, ctx, contribution, &loc, dict, buf);
+ } else {
+ mq_inspect_directory_xattr(this, ctx, 0, &loc, dict);
+ }
out:
- if (ret < 0) {
- quota_local_unref (this, local);
- GF_FREE (local);
- }
- return ret;
-}
+ loc_wipe(&loc);
+ if (contribution)
+ GF_REF_PUT(contribution);
-int32_t
-init_quota_priv (xlator_t *this)
-{
- strcpy (volname, "quota");
-
- return 0;
+ return ret;
}
-
int32_t
-quota_rename_update_newpath (xlator_t *this, loc_t *loc)
+mq_req_xattr(xlator_t *this, loc_t *loc, dict_t *dict, char *contri_key,
+ char *size_key)
{
- int32_t ret = -1;
- quota_inode_ctx_t *ctx = NULL;
- inode_contribution_t *contribution = NULL;
+ int32_t ret = -1;
+ char key[QUOTA_KEY_MAX] = {
+ 0,
+ };
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", loc, out);
- GF_VALIDATE_OR_GOTO ("marker", loc->inode, out);
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", loc, out);
+ GF_VALIDATE_OR_GOTO("marker", dict, out);
- ret = quota_inode_ctx_get (loc->inode, this, &ctx);
+ if (!loc_is_root(loc)) {
+ ret = mq_dict_set_contribution(this, dict, loc, NULL, contri_key);
if (ret < 0)
- goto out;
+ goto out;
+ }
- contribution = add_new_contribution_node (this, ctx, loc);
- if (contribution == NULL) {
- ret = -1;
- goto out;
+ GET_SIZE_KEY(this, key, ret);
+ if (ret < 0)
+ goto out;
+ if (size_key)
+ if (snprintf(size_key, QUOTA_KEY_MAX, "%s", key) >= QUOTA_KEY_MAX) {
+ ret = -1;
+ goto out;
}
- initiate_quota_txn (this, loc);
+ ret = dict_set_uint64(dict, key, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = dict_set_int8(dict, QUOTA_DIRTY_KEY, 0);
+
out:
- return ret;
+ if (ret < 0)
+ gf_log_callingfn(this ? this->name : "Marker", GF_LOG_ERROR,
+ "dict set failed");
+ return ret;
}
int32_t
-quota_forget (xlator_t *this, quota_inode_ctx_t *ctx)
+mq_forget(xlator_t *this, quota_inode_ctx_t *ctx)
{
- inode_contribution_t *contri = NULL;
- inode_contribution_t *next = NULL;
+ inode_contribution_t *contri = NULL;
+ inode_contribution_t *next = NULL;
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO ("marker", ctx, out);
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", ctx, out);
- list_for_each_entry_safe (contri, next, &ctx->contribution_head,
- contri_list) {
- list_del (&contri->contri_list);
- GF_FREE (contri);
- }
+ list_for_each_entry_safe(contri, next, &ctx->contribution_head, contri_list)
+ {
+ list_del_init(&contri->contri_list);
+ GF_REF_PUT(contri);
+ }
- LOCK_DESTROY (&ctx->lock);
- GF_FREE (ctx);
+ LOCK_DESTROY(&ctx->lock);
+ GF_FREE(ctx);
out:
- return 0;
+ return 0;
}
diff --git a/xlators/features/marker/src/marker-quota.h b/xlators/features/marker/src/marker-quota.h
index ea54bb6315c..4bbf6878b22 100644
--- a/xlators/features/marker/src/marker-quota.h
+++ b/xlators/features/marker/src/marker-quota.h
@@ -1,170 +1,140 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef _MARKER_QUOTA_H
#define _MARKER_QUOTA_H
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "marker.h"
-#include "xlator.h"
+#include <glusterfs/xlator.h>
#include "marker-mem-types.h"
+#include <glusterfs/refcount.h>
+#include <glusterfs/quota-common-utils.h>
+#include <glusterfs/call-stub.h>
#define QUOTA_XATTR_PREFIX "trusted.glusterfs"
#define QUOTA_DIRTY_KEY "trusted.glusterfs.quota.dirty"
#define CONTRIBUTION "contri"
-#define VOL_NAME volname
-#define CONTRI_KEY_MAX 512
+#define QUOTA_KEY_MAX 512
#define READDIR_BUF 4096
-char volname [40];
-
-#define QUOTA_STACK_DESTROY(_frame, _this) \
- do { \
- quota_local_t *_local = NULL; \
- _local = _frame->local; \
- _frame->local = NULL; \
- STACK_DESTROY (_frame->root); \
- quota_local_unref (_this, _local); \
- GF_FREE (_local); \
- } while (0)
-
-
-#define QUOTA_ALLOC(var, type, ret) \
- do { \
- ret = 0; \
- var = GF_CALLOC (sizeof (type), 1, \
- gf_marker_mt_##type); \
- if (!var) { \
- gf_log ("", GF_LOG_ERROR, \
- "out of memory"); \
- ret = -1; \
- } \
- } while (0);
-
-#define QUOTA_ALLOC_OR_GOTO(var, type, ret, label) \
- do { \
- var = GF_CALLOC (sizeof (type), 1, \
- gf_marker_mt_##type); \
- if (!var) { \
- gf_log ("", GF_LOG_ERROR, \
- "out of memory"); \
- ret = -1; \
- goto label; \
- } \
- ret = 0; \
- } while (0);
-
-#define GET_CONTRI_KEY(var, _gfid, _ret) \
- do { \
- char _gfid_unparsed[40]; \
- uuid_unparse (_gfid, _gfid_unparsed); \
- _ret = snprintf (var, CONTRI_KEY_MAX, QUOTA_XATTR_PREFIX \
- ".%s.%s." CONTRIBUTION, VOL_NAME, \
- _gfid_unparsed); \
- } while (0);
-
-#define QUOTA_SAFE_INCREMENT(lock, var) \
- do { \
- LOCK (lock); \
- var ++; \
- UNLOCK (lock); \
- } while (0)
-
-#define QUOTA_SAFE_DECREMENT(lock, var) \
- do { \
- LOCK (lock); \
- var --; \
- UNLOCK (lock); \
- } while (0)
-
+#define QUOTA_ALLOC(var, type, ret) \
+ do { \
+ ret = 0; \
+ var = GF_CALLOC(sizeof(type), 1, gf_marker_mt_##type); \
+ if (!var) { \
+ ret = -1; \
+ } \
+ } while (0);
+
+#define QUOTA_ALLOC_OR_GOTO(var, type, ret, label) \
+ do { \
+ var = GF_CALLOC(sizeof(type), 1, gf_marker_mt_##type); \
+ if (!var) { \
+ gf_log("", GF_LOG_ERROR, "out of memory"); \
+ ret = -1; \
+ goto label; \
+ } \
+ ret = 0; \
+ } while (0);
+
+#define GET_QUOTA_KEY(_this, var, key, _ret) \
+ do { \
+ marker_conf_t *_priv = _this->private; \
+ if (_priv->version > 0) \
+ _ret = snprintf(var, QUOTA_KEY_MAX, "%s.%d", key, _priv->version); \
+ else \
+ _ret = snprintf(var, QUOTA_KEY_MAX, "%s", key); \
+ } while (0)
+
+#define GET_CONTRI_KEY(_this, var, _gfid, _ret) \
+ do { \
+ char _tmp_var[QUOTA_KEY_MAX] = { \
+ 0, \
+ }; \
+ if (_gfid != NULL) { \
+ char _gfid_unparsed[40]; \
+ gf_uuid_unparse(_gfid, _gfid_unparsed); \
+ _ret = snprintf(_tmp_var, QUOTA_KEY_MAX, \
+ QUOTA_XATTR_PREFIX ".%s.%s." CONTRIBUTION, \
+ "quota", _gfid_unparsed); \
+ } else { \
+ _ret = snprintf(_tmp_var, QUOTA_KEY_MAX, \
+ QUOTA_XATTR_PREFIX ".%s.." CONTRIBUTION, "quota"); \
+ } \
+ GET_QUOTA_KEY(_this, var, _tmp_var, _ret); \
+ } while (0)
+
+#define GET_SIZE_KEY(_this, var, _ret) \
+ { \
+ GET_QUOTA_KEY(_this, var, QUOTA_SIZE_KEY, _ret); \
+ }
+
+#define QUOTA_SAFE_INCREMENT(lock, var) \
+ do { \
+ LOCK(lock); \
+ var++; \
+ UNLOCK(lock); \
+ } while (0)
struct quota_inode_ctx {
- int64_t size;
- int8_t dirty;
- gf_lock_t lock;
- struct list_head contribution_head;
+ int64_t size;
+ int64_t file_count;
+ int64_t dir_count;
+ int8_t dirty;
+ gf_boolean_t create_status;
+ gf_boolean_t updation_status;
+ gf_boolean_t dirty_status;
+ gf_lock_t lock;
+ struct list_head contribution_head;
};
typedef struct quota_inode_ctx quota_inode_ctx_t;
-struct inode_contribution {
- struct list_head contri_list;
- int64_t contribution;
- uuid_t gfid;
+struct quota_synctask {
+ xlator_t *this;
+ loc_t loc;
+ quota_meta_t contri;
+ gf_boolean_t is_static;
+ uint32_t ia_nlink;
+ call_stub_t *stub;
};
-typedef struct inode_contribution inode_contribution_t;
+typedef struct quota_synctask quota_synctask_t;
-struct quota_local {
- int64_t delta;
- int64_t d_off;
- int32_t err;
- int32_t ref;
- int64_t sum;
- int32_t hl_count;
- int32_t dentry_child_count;
-
- fd_t *fd;
- call_frame_t *frame;
- gf_lock_t lock;
-
- loc_t loc;
- loc_t parent_loc;
-
- quota_inode_ctx_t *ctx;
- inode_contribution_t *contri;
+struct inode_contribution {
+ struct list_head contri_list;
+ int64_t contribution;
+ int64_t file_count;
+ int64_t dir_count;
+ uuid_t gfid;
+ gf_lock_t lock;
+ GF_REF_DECL;
};
-typedef struct quota_local quota_local_t;
-
-int32_t
-get_lock_on_parent (call_frame_t *, xlator_t *);
-
-int32_t
-quota_req_xattr (xlator_t *, loc_t *, dict_t *);
-
-int32_t
-init_quota_priv (xlator_t *);
+typedef struct inode_contribution inode_contribution_t;
int32_t
-quota_xattr_state (xlator_t *, loc_t *, dict_t *, struct iatt);
+mq_req_xattr(xlator_t *, loc_t *, dict_t *, char *, char *);
int32_t
-quota_set_inode_xattr (xlator_t *, loc_t *);
+mq_xattr_state(xlator_t *, loc_t *, dict_t *, struct iatt *);
int
-initiate_quota_txn (xlator_t *, loc_t *);
+mq_initiate_quota_txn(xlator_t *, loc_t *, struct iatt *);
-int32_t
-quota_dirty_inode_readdir (call_frame_t *, void *, xlator_t *,
- int32_t, int32_t, fd_t *);
-
-int32_t
-reduce_parent_size (xlator_t *, loc_t *);
+int
+mq_initiate_quota_blocking_txn(xlator_t *, loc_t *, struct iatt *);
-int32_t
-quota_rename_update_newpath (xlator_t *, loc_t *);
+int
+mq_create_xattrs_txn(xlator_t *this, loc_t *loc, struct iatt *buf);
int32_t
-inspect_file_xattr (xlator_t *this, loc_t *loc, dict_t *dict, struct iatt buf);
+mq_reduce_parent_size_txn(xlator_t *, loc_t *, quota_meta_t *, uint32_t nlink,
+ call_stub_t *stub);
int32_t
-quota_forget (xlator_t *, quota_inode_ctx_t *);
+mq_forget(xlator_t *, quota_inode_ctx_t *);
#endif
diff --git a/xlators/features/marker/src/marker.c b/xlators/features/marker/src/marker.c
index d7b792bc471..1375ccc498c 100644
--- a/xlators/features/marker/src/marker.c
+++ b/xlators/features/marker/src/marker.c
@@ -1,1317 +1,2320 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-#include "xlator.h"
-#include "defaults.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
#include "libxlator.h"
#include "marker.h"
#include "marker-mem-types.h"
#include "marker-quota.h"
+#include "marker-quota-helper.h"
#include "marker-common.h"
+#include <glusterfs/byte-order.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/syscall.h>
+
+#include <fnmatch.h>
+
+#define _GF_UID_GID_CHANGED 1
+
+static char *mq_ext_xattrs[] = {
+ QUOTA_SIZE_KEY,
+ QUOTA_LIMIT_KEY,
+ QUOTA_LIMIT_OBJECTS_KEY,
+ NULL,
+};
void
-fini (xlator_t *this);
+fini(xlator_t *this);
int32_t
-marker_start_setxattr (call_frame_t *, xlator_t *);
+marker_start_setxattr(call_frame_t *, xlator_t *);
+
+/* When client/quotad request for quota xattrs,
+ * replace the key-name by adding the version number
+ * in end of the key-name.
+ * In the cbk, result value of xattrs for original
+ * key-name.
+ * Below function marker_key_replace_with_ver and
+ * marker_key_set_ver is used for setting/removing
+ * version for the key-name
+ */
+int
+marker_key_replace_with_ver(xlator_t *this, dict_t *dict)
+{
+ int ret = -1;
+ int i = 0;
+ marker_conf_t *priv = NULL;
+ char key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+
+ priv = this->private;
+
+ if (dict == NULL || priv->version <= 0) {
+ ret = 0;
+ goto out;
+ }
+
+ for (i = 0; mq_ext_xattrs[i]; i++) {
+ if (dict_get(dict, mq_ext_xattrs[i])) {
+ GET_QUOTA_KEY(this, key, mq_ext_xattrs[i], ret);
+ if (ret < 0)
+ goto out;
+
+ ret = dict_set(dict, key, dict_get(dict, mq_ext_xattrs[i]));
+ if (ret < 0)
+ goto out;
+
+ dict_del(dict, mq_ext_xattrs[i]);
+ }
+ }
+
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+marker_key_set_ver(xlator_t *this, dict_t *dict)
+{
+ int ret = -1;
+ int i = -1;
+ marker_conf_t *priv = NULL;
+ char key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+
+ priv = this->private;
+
+ if (dict == NULL || priv->version <= 0) {
+ ret = 0;
+ goto out;
+ }
+
+ for (i = 0; mq_ext_xattrs[i]; i++) {
+ GET_QUOTA_KEY(this, key, mq_ext_xattrs[i], ret);
+ if (ret < 0)
+ goto out;
+
+ if (dict_get(dict, key))
+ dict_set(dict, mq_ext_xattrs[i], dict_get(dict, key));
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
marker_local_t *
-marker_local_ref (marker_local_t *local)
+marker_local_ref(marker_local_t *local)
{
- GF_VALIDATE_OR_GOTO ("marker", local, err);
+ GF_VALIDATE_OR_GOTO("marker", local, err);
- LOCK (&local->lock);
- {
- local->ref++;
- }
- UNLOCK (&local->lock);
+ LOCK(&local->lock);
+ {
+ local->ref++;
+ }
+ UNLOCK(&local->lock);
- return local;
+ return local;
err:
- return NULL;
+ return NULL;
}
int
-marker_loc_fill (loc_t *loc, inode_t *inode, inode_t *parent, char *path)
+marker_loc_fill(loc_t *loc, inode_t *inode, inode_t *parent, char *path)
{
- int ret = -1;
+ int ret = -1;
- if (!loc)
- return ret;
+ if (!loc)
+ return ret;
- if (inode) {
- loc->inode = inode_ref (inode);
- loc->ino = inode->ino;
+ if (inode) {
+ loc->inode = inode_ref(inode);
+ if (gf_uuid_is_null(loc->gfid)) {
+ gf_uuid_copy(loc->gfid, loc->inode->gfid);
}
+ }
- if (parent)
- loc->parent = inode_ref (parent);
+ if (parent)
+ loc->parent = inode_ref(parent);
- loc->path = gf_strdup (path);
+ if (path) {
+ loc->path = gf_strdup(path);
if (!loc->path) {
- gf_log ("loc fill", GF_LOG_ERROR, "strdup failed");
- goto loc_wipe;
+ gf_log("loc fill", GF_LOG_ERROR, "strdup failed");
+ goto loc_wipe;
}
- loc->name = strrchr (loc->path, '/');
+ loc->name = strrchr(loc->path, '/');
if (loc->name)
- loc->name++;
- else
- goto loc_wipe;
+ loc->name++;
+ }
- ret = 0;
+ ret = 0;
loc_wipe:
- if (ret < 0)
- loc_wipe (loc);
+ if (ret < 0)
+ loc_wipe(loc);
- return ret;
+ return ret;
}
int
-marker_inode_loc_fill (inode_t *inode, loc_t *loc)
+_marker_inode_loc_fill(inode_t *inode, inode_t *parent, char *name, loc_t *loc)
{
- char *resolvedpath = NULL;
- inode_t *parent = NULL;
- int ret = -1;
+ char *resolvedpath = NULL;
+ int ret = -1;
+ gf_boolean_t free_parent = _gf_false;
- if ((!inode) || (!loc))
- return ret;
+ if ((!inode) || (!loc))
+ return ret;
- if ((inode) && (inode->ino == 1)) {
- loc->parent = NULL;
- goto ignore_parent;
- }
+ if (parent && name)
+ ret = inode_path(parent, name, &resolvedpath);
+ else
+ ret = inode_path(inode, NULL, &resolvedpath);
+ if (ret < 0)
+ goto err;
- parent = inode_parent (inode, 0, NULL);
- if (!parent) {
- goto err;
- }
+ if (parent == NULL) {
+ parent = inode_parent(inode, NULL, NULL);
+ free_parent = _gf_true;
+ }
-ignore_parent:
- ret = inode_path (inode, NULL, &resolvedpath);
- if (ret < 0)
- goto err;
-
- ret = marker_loc_fill (loc, inode, parent, resolvedpath);
- if (ret < 0)
- goto err;
+ ret = marker_loc_fill(loc, inode, parent, resolvedpath);
+ if (ret < 0)
+ goto err;
err:
- if (parent)
- inode_unref (parent);
+ if (free_parent)
+ inode_unref(parent);
- if (resolvedpath)
- GF_FREE (resolvedpath);
+ GF_FREE(resolvedpath);
- return ret;
+ return ret;
+}
+
+int
+marker_inode_loc_fill(inode_t *inode, loc_t *loc)
+{
+ return _marker_inode_loc_fill(inode, NULL, NULL, loc);
}
int32_t
-marker_trav_parent (marker_local_t *local)
+marker_trav_parent(marker_local_t *local)
{
- int32_t ret = 0;
- loc_t loc = {0, };
+ int32_t ret = 0;
+ loc_t loc = {
+ 0,
+ };
+ inode_t *parent = NULL;
+ int8_t need_unref = 0;
+
+ if (!local->loc.parent) {
+ parent = inode_parent(local->loc.inode, NULL, NULL);
+ if (parent)
+ need_unref = 1;
+ } else
+ parent = local->loc.parent;
- ret = marker_inode_loc_fill (local->loc.parent, &loc);
+ ret = marker_inode_loc_fill(parent, &loc);
- if (ret < 0) {
- ret = -1;
- goto out;
- }
+ if (ret < 0) {
+ ret = -1;
+ goto out;
+ }
- loc_wipe (&local->loc);
+ loc_wipe(&local->loc);
- local->loc = loc;
+ local->loc = loc;
out:
- return ret;
+ if (need_unref)
+ inode_unref(parent);
+
+ return ret;
}
-int32_t
-marker_error_handler (xlator_t *this)
+void
+marker_error_handler(xlator_t *this, marker_local_t *local, int32_t op_errno)
{
- marker_conf_t *priv = NULL;
-
- priv = (marker_conf_t *) this->private;
-
- unlink (priv->timestamp_file);
-
- return 0;
+ marker_conf_t *priv = (marker_conf_t *)this->private;
+ const char *path = local ? ((local->loc.path) ? local->loc.path
+ : uuid_utoa(local->loc.gfid))
+ : "<nul>";
+
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "Indexing gone corrupt at %s (reason: %s)."
+ " Geo-replication slave content needs to be revalidated",
+ path, strerror(op_errno));
+ sys_unlink(priv->timestamp_file);
}
int32_t
-marker_local_unref (marker_local_t *local)
+marker_local_unref(marker_local_t *local)
{
- int32_t var = 0;
+ int32_t var = 0;
- if (local == NULL)
- return -1;
+ if (local == NULL)
+ return -1;
- LOCK (&local->lock);
- {
- var = --local->ref;
- }
- UNLOCK (&local->lock);
+ LOCK(&local->lock);
+ {
+ var = --local->ref;
+ }
+ UNLOCK(&local->lock);
- if (var != 0)
- goto out;
+ if (var != 0)
+ goto out;
- loc_wipe (&local->loc);
+ loc_wipe(&local->loc);
+ loc_wipe(&local->parent_loc);
+ if (local->xdata)
+ dict_unref(local->xdata);
- if (local->oplocal) {
- loc_wipe (&local->oplocal->loc);
- GF_FREE (local->oplocal);
- }
- GF_FREE (local);
+ if (local->lk_frame) {
+ STACK_DESTROY(local->lk_frame->root);
+ local->lk_frame = NULL;
+ }
+
+ if (local->oplocal) {
+ marker_local_unref(local->oplocal);
+ local->oplocal = NULL;
+ }
+ mem_put(local);
out:
- return 0;
+ return 0;
}
int32_t
-stat_stampfile (xlator_t *this, marker_conf_t *priv, struct volume_mark **status)
+stat_stampfile(xlator_t *this, marker_conf_t *priv, struct volume_mark **status)
{
- struct stat buf;
- struct volume_mark *vol_mark;
+ struct stat buf = {
+ 0,
+ };
+ struct volume_mark *vol_mark = NULL;
- vol_mark = GF_CALLOC (sizeof (struct volume_mark), 1,
- gf_marker_mt_volume_mark);
+ vol_mark = GF_CALLOC(sizeof(struct volume_mark), 1,
+ gf_marker_mt_volume_mark);
- vol_mark->major = 1;
- vol_mark->minor = 0;
+ vol_mark->major = 1;
+ vol_mark->minor = 0;
- GF_ASSERT (sizeof (priv->volume_uuid_bin) == 16);
- memcpy (vol_mark->uuid, priv->volume_uuid_bin, 16);
+ GF_ASSERT(sizeof(priv->volume_uuid_bin) == 16);
+ memcpy(vol_mark->uuid, priv->volume_uuid_bin, 16);
- if (stat (priv->timestamp_file, &buf) != -1) {
- vol_mark->retval = 0;
- vol_mark->sec = htonl (buf.st_ctime);
- vol_mark->usec = htonl (ST_CTIM_NSEC (&buf)/1000);
- } else
- vol_mark->retval = 1;
+ if (sys_stat(priv->timestamp_file, &buf) != -1) {
+ vol_mark->retval = 0;
+ vol_mark->sec = htonl(buf.st_mtime);
+ vol_mark->usec = htonl(ST_MTIM_NSEC(&buf) / 1000);
+ } else
+ vol_mark->retval = 1;
- *status = vol_mark;
+ *status = vol_mark;
- return 0;
+ return 0;
}
int32_t
-marker_getxattr_stampfile_cbk (call_frame_t *frame, xlator_t *this,
- const char *name, struct volume_mark *vol_mark)
+marker_getxattr_stampfile_cbk(call_frame_t *frame, xlator_t *this,
+ const char *name, struct volume_mark *vol_mark,
+ dict_t *xdata)
{
- int32_t ret;
- dict_t *dict = NULL;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
- if (vol_mark == NULL){
- STACK_UNWIND_STRICT (getxattr, frame, -1, ENOMEM, NULL);
+ if (vol_mark == NULL) {
+ STACK_UNWIND_STRICT(getxattr, frame, -1, ENOMEM, NULL, NULL);
- goto out;
- }
+ goto out;
+ }
- dict = dict_new ();
+ dict = dict_new();
- ret = dict_set_bin (dict, (char *)name, vol_mark,
- sizeof (struct volume_mark));
+ ret = dict_set_bin(dict, (char *)name, vol_mark,
+ sizeof(struct volume_mark));
+ if (ret) {
+ GF_FREE(vol_mark);
+ gf_log(this->name, GF_LOG_WARNING, "failed to set key %s", name);
+ }
- STACK_UNWIND_STRICT (getxattr, frame, 0, 0, dict);
+ STACK_UNWIND_STRICT(getxattr, frame, 0, 0, dict, xdata);
- dict_unref (dict);
+ if (dict)
+ dict_unref(dict);
out:
- return 0;
+ return 0;
}
-int32_t
-call_from_special_client (call_frame_t *frame, xlator_t *this, const char *name)
+gf_boolean_t
+call_from_special_client(call_frame_t *frame, xlator_t *this, const char *name)
{
- struct volume_mark *vol_mark = NULL;
- marker_conf_t *priv = NULL;
- gf_boolean_t ret = _gf_true;
+ struct volume_mark *vol_mark = NULL;
+ marker_conf_t *priv = NULL;
+ gf_boolean_t is_true = _gf_true;
- priv = (marker_conf_t *)this->private;
+ priv = (marker_conf_t *)this->private;
- if (frame->root->pid != -1 || name == NULL ||
- strcmp (name, MARKER_XATTR_PREFIX "." VOLUME_MARK) != 0) {
- ret = _gf_false;
- goto out;
- }
+ if (frame->root->pid != GF_CLIENT_PID_GSYNCD || name == NULL ||
+ strcmp(name, MARKER_XATTR_PREFIX "." VOLUME_MARK) != 0) {
+ is_true = _gf_false;
+ goto out;
+ }
- stat_stampfile (this, priv, &vol_mark);
+ stat_stampfile(this, priv, &vol_mark);
- marker_getxattr_stampfile_cbk (frame, this, name, vol_mark);
+ marker_getxattr_stampfile_cbk(frame, this, name, vol_mark, NULL);
out:
- return ret;
+ return is_true;
}
-int32_t
-marker_getxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+static gf_boolean_t
+_is_quota_internal_xattr(dict_t *d, char *k, data_t *v, void *data)
{
- if (cookie) {
- gf_log (this->name, GF_LOG_DEBUG,
- "Filtering the quota extended attributes");
+ int i = 0;
+ char **external_xattrs = data;
- dict_foreach (dict, marker_filter_quota_xattr, NULL);
- }
- STACK_UNWIND_STRICT (getxattr, frame, op_ret, op_errno, dict);
- return 0;
+ for (i = 0; external_xattrs && external_xattrs[i]; i++) {
+ if (strcmp(k, external_xattrs[i]) == 0)
+ return _gf_false;
+ }
+
+ if (fnmatch("trusted.glusterfs.quota*", k, 0) == 0)
+ return _gf_true;
+
+ /* It would be nice if posix filters pgfid xattrs. But since marker
+ * also takes up responsibility to clean these up, adding the filtering
+ * here (Check 'quota_xattr_cleaner')
+ */
+ if (fnmatch(PGFID_XATTR_KEY_PREFIX "*", k, 0) == 0)
+ return _gf_true;
+
+ return _gf_false;
}
-int32_t
-marker_getxattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
- const char *name)
+static void
+marker_filter_internal_xattrs(xlator_t *this, dict_t *xattrs)
{
- gf_boolean_t ret = _gf_false;
- marker_conf_t *priv = NULL;
- unsigned long cookie = 0;
+ marker_conf_t *priv = NULL;
+ char **ext = NULL;
- priv = this->private;
+ priv = this->private;
+ if (priv->feature_enabled & GF_QUOTA)
+ ext = mq_ext_xattrs;
- if (priv == NULL || (priv->feature_enabled & GF_XTIME) == 0)
- goto wind;
+ dict_foreach_match(xattrs, _is_quota_internal_xattr, ext,
+ dict_remove_foreach_fn, NULL);
+}
- gf_log (this->name, GF_LOG_DEBUG, "USER:PID = %d", frame->root->pid);
+static void
+marker_filter_gsyncd_xattrs(call_frame_t *frame, xlator_t *this, dict_t *xattrs)
+{
+ marker_conf_t *priv = NULL;
- ret = call_from_special_client (frame, this, name);
-wind:
- if (ret == _gf_false) {
- if (name == NULL) {
- /* Signifies that marker translator
- * has to filter the quota's xattr's,
- * this is to prevent afr from performing
- * self healing on marker-quota xattrs'
- */
- cookie = 1;
- }
- STACK_WIND_COOKIE (frame, marker_getxattr_cbk, (void *)cookie,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->getxattr, loc,
- name);
- }
+ priv = this->private;
+ GF_ASSERT(priv);
+ GF_ASSERT(frame);
- return 0;
+ if (xattrs && frame->root->pid != GF_CLIENT_PID_GSYNCD) {
+ GF_REMOVE_INTERNAL_XATTR(GF_XATTR_XTIME_PATTERN, xattrs);
+ }
+ return;
}
+int32_t
+marker_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ int32_t ret = -1;
+ if (op_ret < 0)
+ goto unwind;
+
+ ret = marker_key_set_ver(this, dict);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (cookie) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Filtering the quota extended attributes");
+
+ /* If the getxattr is from a non special client, then do not
+ copy the quota related xattrs (except the quota limit key
+ i.e trusted.glusterfs.quota.limit-set which has been set by
+ glusterd on the directory on which quota limit is set.) for
+ directories. Let the healing of xattrs happen upon lookup.
+ NOTE: setting of trusted.glusterfs.quota.limit-set as of now
+ happens from glusterd. It should be moved to quotad. Also
+ trusted.glusterfs.quota.limit-set is set on directory which
+ is permanent till quota is removed on that directory or limit
+ is changed. So let that xattr be healed by other xlators
+ properly whenever directory healing is done.
+ */
+ /*
+ * Except limit-set xattr, rest of the xattrs are maintained
+ * by quota xlator. Don't expose them to other xlators.
+ * This filter makes sure quota xattrs are not healed as part of
+ * metadata self-heal
+ */
+ marker_filter_internal_xattrs(frame->this, dict);
+ }
+
+ /* Filter gsyncd xtime xattr for non gsyncd clients */
+ marker_filter_gsyncd_xattrs(frame, frame->this, dict);
+
+unwind:
+ MARKER_STACK_UNWIND(getxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
+}
int32_t
-marker_setxattr_done (call_frame_t *frame)
+marker_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
{
- marker_local_t *local = NULL;
+ gf_boolean_t is_true = _gf_false;
+ marker_conf_t *priv = NULL;
+ unsigned long cookie = 0;
+ marker_local_t *local = NULL;
+ char key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int32_t ret = -1;
+ int32_t i = 0;
+
+ priv = this->private;
+
+ if (name) {
+ for (i = 0; mq_ext_xattrs[i]; i++) {
+ if (strcmp(name, mq_ext_xattrs[i]))
+ continue;
+
+ GET_QUOTA_KEY(this, key, mq_ext_xattrs[i], ret);
+ if (ret < 0)
+ goto out;
+ name = key;
+ break;
+ }
+ }
- local = (marker_local_t *) frame->local;
+ frame->local = mem_get0(this->local_pool);
+ local = frame->local;
+ if (local == NULL)
+ goto out;
- frame->local = NULL;
+ MARKER_INIT_LOCAL(frame, local);
- STACK_DESTROY (frame->root);
+ if ((loc_copy(&local->loc, loc)) < 0)
+ goto out;
- marker_local_unref (local);
+ gf_log(this->name, GF_LOG_DEBUG, "USER:PID = %d", frame->root->pid);
- return 0;
+ if (priv && priv->feature_enabled & GF_XTIME)
+ is_true = call_from_special_client(frame, this, name);
+
+ if (is_true == _gf_false) {
+ if (name == NULL) {
+ /* Signifies that marker translator
+ * has to filter the quota's xattr's,
+ * this is to prevent afr from performing
+ * self healing on marker-quota xattrs'
+ */
+ cookie = 1;
+ }
+ STACK_WIND_COOKIE(frame, marker_getxattr_cbk, (void *)cookie,
+ FIRST_CHILD(this), FIRST_CHILD(this)->fops->getxattr,
+ loc, name, xdata);
+ }
+
+ return 0;
+out:
+ MARKER_STACK_UNWIND(getxattr, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
}
-int
-marker_specific_setxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+int32_t
+marker_setxattr_done(call_frame_t *frame)
{
- int32_t ret = 0;
- int32_t done = 0;
- marker_local_t *local = NULL;
+ marker_local_t *local = NULL;
- local = (marker_local_t*) frame->local;
+ local = (marker_local_t *)frame->local;
- if (op_ret == -1 && op_errno == ENOSPC) {
- marker_error_handler (this);
- done = 1;
- goto out;
- }
+ frame->local = NULL;
- if (strcmp (local->loc.path, "/") == 0) {
- done = 1;
- goto out;
- }
+ STACK_DESTROY(frame->root);
- ret = marker_trav_parent (local);
+ marker_local_unref(local);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_DEBUG, "Error occured "
- "while traversing to the parent, stopping marker");
+ return 0;
+}
- done = 1;
+int
+marker_specific_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ int32_t ret = 0;
+ int32_t done = 1;
+ marker_local_t *local = NULL;
- goto out;
+ local = (marker_local_t *)frame->local;
+
+ if (op_ret == -1 && op_errno == ENOSPC) {
+ marker_error_handler(this, local, op_errno);
+ goto out;
+ }
+
+ if (local) {
+ if (local->loc.path && strcmp(local->loc.path, "/") == 0) {
+ goto out;
+ }
+ if (__is_root_gfid(local->loc.gfid)) {
+ goto out;
}
+ }
- marker_start_setxattr (frame, this);
+ ret = (local) ? marker_trav_parent(local) : -1;
+ if (ret == -1) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "Error occurred "
+ "while traversing to the parent, stopping marker");
+ goto out;
+ }
+
+ marker_start_setxattr(frame, this);
+ done = 0;
out:
- if (done) {
- marker_setxattr_done (frame);
- }
+ if (done) {
+ marker_setxattr_done(frame);
+ }
- return 0;
+ return 0;
}
int32_t
-marker_start_setxattr (call_frame_t *frame, xlator_t *this)
+marker_start_setxattr(call_frame_t *frame, xlator_t *this)
{
- int32_t ret = 0;
- dict_t *dict = NULL;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- local = (marker_local_t*) frame->local;
+ local = (marker_local_t *)frame->local;
- dict = dict_new ();
+ if (!local)
+ goto out;
- ret = dict_set_static_bin (dict, priv->marker_xattr,
- (void *)local->timebuf, 8);
+ dict = dict_new();
- gf_log (this->name, GF_LOG_DEBUG, "path = %s", local->loc.path);
+ if (!dict)
+ goto out;
- STACK_WIND (frame, marker_specific_setxattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr, &local->loc, dict, 0);
+ if (local->loc.inode && gf_uuid_is_null(local->loc.gfid))
+ gf_uuid_copy(local->loc.gfid, local->loc.inode->gfid);
- dict_unref (dict);
+ GF_UUID_ASSERT(local->loc.gfid);
- return 0;
+ ret = dict_set_static_bin(dict, priv->marker_xattr, (void *)local->timebuf,
+ 8);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "failed to set marker xattr (%s)",
+ local->loc.path);
+ goto out;
+ }
+
+ STACK_WIND(frame, marker_specific_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, &local->loc, dict, 0, NULL);
+
+ ret = 0;
+out:
+ if (dict)
+ dict_unref(dict);
+
+ return ret;
}
void
-marker_gettimeofday (marker_local_t *local)
+marker_gettimeofday(marker_local_t *local)
{
- struct timeval tv;
+ struct timeval tv = {
+ 0,
+ };
- gettimeofday (&tv, NULL);
+ gettimeofday(&tv, NULL);
- local->timebuf [0] = htonl (tv.tv_sec);
- local->timebuf [1] = htonl (tv.tv_usec);
+ local->timebuf[0] = htonl(tv.tv_sec);
+ local->timebuf[1] = htonl(tv.tv_usec);
- return;
+ return;
}
int32_t
-marker_create_frame (xlator_t *this, marker_local_t *local)
+marker_create_frame(xlator_t *this, marker_local_t *local)
{
- call_frame_t *frame = NULL;
+ call_frame_t *frame = NULL;
+
+ frame = create_frame(this, this->ctx->pool);
- frame = create_frame (this, this->ctx->pool);
+ if (!frame)
+ return -1;
- frame->local = (void *) local;
+ frame->local = (void *)local;
- marker_start_setxattr (frame, this);
+ marker_start_setxattr(frame, this);
- return 0;
+ return 0;
}
int32_t
-marker_xtime_update_marks (xlator_t *this, marker_local_t *local)
+marker_xtime_update_marks(xlator_t *this, marker_local_t *local)
{
- marker_gettimeofday (local);
+ marker_conf_t *priv = NULL;
- marker_local_ref (local);
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, local, out);
- marker_create_frame (this, local);
+ priv = this->private;
- return 0;
-}
+ if ((local->pid == GF_CLIENT_PID_GSYNCD &&
+ !(priv->feature_enabled & GF_XTIME_GSYNC_FORCE)) ||
+ (local->pid == GF_CLIENT_PID_DEFRAG))
+ goto out;
+
+ marker_gettimeofday(local);
+ marker_local_ref(local);
+
+ marker_create_frame(this, local);
+out:
+ return 0;
+}
int32_t
-marker_mkdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent)
+marker_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ quota_inode_ctx_t *ctx = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "error occurred "
- "while Creating a file %s", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "error occurred "
+ "while creating directory %s",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
+ priv = this->private;
- STACK_UNWIND_STRICT (mkdir, frame, op_ret, op_errno, inode,
- buf, preparent, postparent);
+ if (op_ret >= 0 && inode && (priv->feature_enabled & GF_QUOTA)) {
+ ctx = mq_inode_ctx_new(inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(inode->gfid));
+ op_ret = -1;
+ op_errno = ENOMEM;
+ }
+ }
- if (op_ret == -1 || local == NULL)
- goto out;
+ STACK_UNWIND_STRICT(mkdir, frame, op_ret, op_errno, inode, buf, preparent,
+ postparent, xdata);
+
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ if (gf_uuid_is_null(local->loc.gfid))
+ gf_uuid_copy(local->loc.gfid, buf->ia_gfid);
- if (priv->feature_enabled & GF_QUOTA)
- quota_set_inode_xattr (this, &local->loc);
+ if (priv->feature_enabled & GF_QUOTA)
+ mq_create_xattrs_txn(this, &local->loc, NULL);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int
-marker_mkdir (call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
- dict_t *params)
+marker_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = loc_copy(&local->loc, loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_mkdir_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mkdir, loc, mode, params);
+ STACK_WIND(frame, marker_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
- return 0;
+ return 0;
err:
- STACK_UNWIND_STRICT (mkdir, frame, -1, ENOMEM, NULL,
- NULL, NULL, NULL);
- return 0;
-}
+ MARKER_STACK_UNWIND(mkdir, frame, -1, ENOMEM, NULL, NULL, NULL, NULL, NULL);
+ return 0;
+}
int32_t
-marker_create_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent)
+marker_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ quota_inode_ctx_t *ctx = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "error occurred "
- "while Creating a file %s", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "error occurred "
+ "while creating file %s",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
+ priv = this->private;
- STACK_UNWIND_STRICT (create, frame, op_ret, op_errno, fd, inode, buf,
- preparent, postparent);
+ if (op_ret >= 0 && inode && (priv->feature_enabled & GF_QUOTA)) {
+ ctx = mq_inode_ctx_new(inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(inode->gfid));
+ op_ret = -1;
+ op_errno = ENOMEM;
+ }
+ }
- if (op_ret == -1 || local == NULL)
- goto out;
+ STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, buf,
+ preparent, postparent, xdata);
- priv = this->private;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- if (priv->feature_enabled & GF_QUOTA)
- inspect_file_xattr (this, &local->loc, NULL, *buf);
+ if (gf_uuid_is_null(local->loc.gfid))
+ gf_uuid_copy(local->loc.gfid, buf->ia_gfid);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_QUOTA)
+ mq_create_xattrs_txn(this, &local->loc, buf);
+
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int32_t
-marker_create (call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
- mode_t mode, fd_t *fd, dict_t *params)
+marker_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = loc_copy(&local->loc, loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_create_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->create, loc, flags, mode, fd,
- params);
- return 0;
+ STACK_WIND(frame, marker_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (create, frame, -1, ENOMEM, NULL, NULL, NULL, NULL, NULL);
+ MARKER_STACK_UNWIND(create, frame, -1, ENOMEM, NULL, NULL, NULL, NULL, NULL,
+ NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_writev_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+marker_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "error occurred "
- "while write, %s", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "error occurred "
+ "while write, %s",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (writev, frame, op_ret, op_errno, prebuf, postbuf);
+ STACK_UNWIND_STRICT(writev, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_QUOTA)
- initiate_quota_txn (this, &local->loc);
+ if (priv->feature_enabled & GF_QUOTA)
+ mq_initiate_quota_txn(this, &local->loc, postbuf);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int32_t
-marker_writev (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- struct iovec *vector,
- int32_t count,
- off_t offset,
- struct iobref *iobref)
+marker_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t offset, uint32_t flags,
+ struct iobref *iobref, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = marker_inode_loc_fill (fd->inode, &local->loc);
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_writev_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
- iobref);
- return 0;
+ STACK_WIND(frame, marker_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (writev, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(writev, frame, -1, ENOMEM, NULL, NULL, NULL);
- return 0;
+ return 0;
}
+int32_t
+marker_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "error occurred "
+ "rmdir %s",
+ strerror(op_errno));
+ }
+
+ local = (marker_local_t *)frame->local;
+
+ frame->local = NULL;
+ priv = this->private;
+
+ if (op_ret == -1 || local == NULL)
+ goto out;
+
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
+
+ if (priv->feature_enabled & GF_QUOTA) {
+ /* If a 'rm -rf' is performed by a client, rmdir can be faster
+ than marker background mq_reduce_parent_size_txn.
+ In this case, as part of rmdir parent child association
+ will be removed in the server protocol.
+ This can lead to mq_reduce_parent_size_txn failures.
+
+ So perform mq_reduce_parent_size_txn in foreground
+ and unwind to server once txn is complete
+ */
+
+ stub = fop_rmdir_cbk_stub(frame, default_rmdir_cbk, op_ret, op_errno,
+ preparent, postparent, xdata);
+ mq_reduce_parent_size_txn(this, &local->loc, NULL, 1, stub);
+
+ if (stub) {
+ marker_local_unref(local);
+ return 0;
+ }
+ }
+
+out:
+ STACK_UNWIND_STRICT(rmdir, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+
+ marker_local_unref(local);
+
+ return 0;
+}
int32_t
-marker_rmdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *preparent,
- struct iatt *postparent)
+marker_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ dict_t *xdata)
{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "error occurred "
- "rmdir %s", strerror (op_errno));
- }
+ priv = this->private;
- local = (marker_local_t *) frame->local;
+ if (priv->feature_enabled == 0)
+ goto wind;
- frame->local = NULL;
+ local = mem_get0(this->local_pool);
- STACK_UNWIND_STRICT (rmdir, frame, op_ret, op_errno, preparent,
- postparent);
+ MARKER_INIT_LOCAL(frame, local);
- if (op_ret == -1 || local == NULL)
- goto out;
+ ret = loc_copy(&local->loc, loc);
+
+ if (ret == -1)
+ goto err;
+wind:
+ STACK_WIND(frame, marker_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, loc, flags, xdata);
+ return 0;
+err:
+ MARKER_STACK_UNWIND(rmdir, frame, -1, ENOMEM, NULL, NULL, NULL);
+
+ return 0;
+}
+
+int32_t
+marker_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ uint32_t nlink = -1;
+ GF_UNUSED int32_t ret = 0;
+ call_stub_t *stub = NULL;
+
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE, "%s occurred in unlink",
+ strerror(op_errno));
+ }
+
+ local = (marker_local_t *)frame->local;
+
+ frame->local = NULL;
+ priv = this->private;
+
+ if (op_ret == -1 || local == NULL)
+ goto out;
+
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
+
+ if (priv->feature_enabled & GF_QUOTA) {
+ if (local->skip_txn)
+ goto out;
+
+ if (xdata) {
+ ret = dict_get_uint32(xdata, GF_RESPONSE_LINK_COUNT_XDATA, &nlink);
+ if (ret) {
+ gf_log(this->name, GF_LOG_TRACE, "dict get failed %s ",
+ strerror(-ret));
+ }
+ }
- priv = this->private;
+ /* If a 'rm -rf' is performed by a client, unlink can be faster
+ than marker background mq_reduce_parent_size_txn.
+ In this case, as part of unlink parent child association
+ will be removed in the server protocol.
+ This can lead to mq_reduce_parent_size_txn failures.
- if (priv->feature_enabled & GF_QUOTA)
- reduce_parent_size (this, &local->loc);
+ So perform mq_reduce_parent_size_txn in foreground
+ and unwind to server once txn is complete
+ */
+
+ stub = fop_unlink_cbk_stub(frame, default_unlink_cbk, op_ret, op_errno,
+ preparent, postparent, xdata);
+ mq_reduce_parent_size_txn(this, &local->loc, NULL, nlink, stub);
+
+ if (stub) {
+ marker_local_unref(local);
+ return 0;
+ }
+ }
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
out:
- marker_local_unref (local);
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
- return 0;
+ marker_local_unref(local);
+
+ return 0;
}
int32_t
-marker_rmdir (call_frame_t *frame, xlator_t *this, loc_t *loc, int flags)
+marker_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ gf_boolean_t dict_free = _gf_false;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto unlink_wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
+ local->xflag = xflag;
+ if (xdata)
+ local->xdata = dict_ref(xdata);
+ MARKER_INIT_LOCAL(frame, local);
- MARKER_INIT_LOCAL (frame, local);
+ ret = loc_copy(&local->loc, loc);
- ret = loc_copy (&local->loc, loc);
+ if (ret == -1)
+ goto err;
+
+ if (xdata && dict_get(xdata, GLUSTERFS_MARKER_DONT_ACCOUNT_KEY)) {
+ local->skip_txn = 1;
+ goto unlink_wind;
+ }
+
+ if (xdata == NULL) {
+ xdata = dict_new();
+ dict_free = _gf_true;
+ }
+
+ ret = dict_set_int32(xdata, GF_REQUEST_LINK_COUNT_XDATA, 1);
+ if (ret < 0)
+ goto err;
+
+unlink_wind:
+ STACK_WIND(frame, marker_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ goto out;
- if (ret == -1)
- goto err;
-wind:
- STACK_WIND (frame, marker_rmdir_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rmdir, loc, flags);
- return 0;
err:
- STACK_UNWIND_STRICT (rmdir, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(unlink, frame, -1, ENOMEM, NULL, NULL, NULL);
- return 0;
+out:
+ if (dict_free)
+ dict_unref(xdata);
+ return 0;
}
-
int32_t
-marker_unlink_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *preparent,
- struct iatt *postparent)
+marker_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE,
- "%s occurred in unlink", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "linking a file ",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (unlink, frame, op_ret, op_errno, preparent,
- postparent);
+ STACK_UNWIND_STRICT(link, frame, op_ret, op_errno, inode, buf, preparent,
+ postparent, xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if ((priv->feature_enabled & GF_QUOTA) && (local->ia_nlink == 1))
- reduce_parent_size (this, &local->loc);
+ if (priv->feature_enabled & GF_QUOTA) {
+ if (!local->skip_txn)
+ mq_create_xattrs_txn(this, &local->loc, buf);
+ }
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
-
int32_t
-marker_unlink_stat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf)
+marker_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
{
- marker_local_t *local = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret < 0) {
- goto err;
- }
+ priv = this->private;
- local = frame->local;
- if (local == NULL) {
- goto err;
- }
+ if (priv->feature_enabled == 0)
+ goto wind;
- local->ia_nlink = buf->ia_nlink;
+ local = mem_get0(this->local_pool);
- STACK_WIND (frame, marker_unlink_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->unlink, &local->loc);
- return 0;
+ MARKER_INIT_LOCAL(frame, local);
+
+ ret = loc_copy(&local->loc, newloc);
+
+ if (ret == -1)
+ goto err;
+
+ if (xdata && dict_get(xdata, GLUSTERFS_MARKER_DONT_ACCOUNT_KEY))
+ local->skip_txn = 1;
+wind:
+ STACK_WIND(frame, marker_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (unlink, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(link, frame, -1, ENOMEM, NULL, NULL, NULL, NULL, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_unlink (call_frame_t *frame, xlator_t *this, loc_t *loc)
+marker_rename_done(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL, *oplocal = NULL;
+ loc_t newloc = {
+ 0,
+ };
+ marker_conf_t *priv = NULL;
+
+ local = frame->local;
+ oplocal = local->oplocal;
+
+ priv = this->private;
+
+ frame->local = NULL;
+
+ if (op_ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "inodelk (UNLOCK) failed on path:%s (gfid:%s) (%s)",
+ oplocal->parent_loc.path,
+ uuid_utoa(oplocal->parent_loc.inode->gfid), strerror(op_errno));
+ }
+
+ if (local->err != 0)
+ goto err;
+
+ mq_reduce_parent_size_txn(this, &oplocal->loc, &oplocal->contribution, -1,
+ NULL);
+
+ if (local->loc.inode != NULL) {
+ /* If destination file exits before rename, it would have
+ * been unlinked while renaming a file
+ */
+ mq_reduce_parent_size_txn(this, &local->loc, NULL, local->ia_nlink,
+ NULL);
+ }
+
+ newloc.inode = inode_ref(oplocal->loc.inode);
+ newloc.path = gf_strdup(local->loc.path);
+ newloc.name = strrchr(newloc.path, '/');
+ if (newloc.name)
+ newloc.name++;
+ newloc.parent = inode_ref(local->loc.parent);
+
+ mq_create_xattrs_txn(this, &newloc, &local->buf);
+
+ loc_wipe(&newloc);
+
+ if (priv->feature_enabled & GF_XTIME) {
+ if (!local->loc.inode)
+ local->loc.inode = inode_ref(oplocal->loc.inode);
+ // update marks on oldpath
+ gf_uuid_copy(local->loc.gfid, oplocal->loc.inode->gfid);
+ marker_xtime_update_marks(this, oplocal);
+ marker_xtime_update_marks(this, local);
+ }
- priv = this->private;
+err:
+ marker_local_unref(local);
+ marker_local_unref(oplocal);
- if (priv->feature_enabled == 0)
- goto unlink_wind;
+ return 0;
+}
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+void
+marker_rename_release_oldp_lock(marker_local_t *local, xlator_t *this)
+{
+ marker_local_t *oplocal = NULL;
+ call_frame_t *lk_frame = NULL;
+ struct gf_flock lock = {
+ 0,
+ };
- MARKER_INIT_LOCAL (frame, local);
+ oplocal = local->oplocal;
+ lk_frame = local->lk_frame;
- ret = loc_copy (&local->loc, loc);
+ if (lk_frame == NULL)
+ goto err;
- if (ret == -1)
- goto err;
+ lock.l_type = F_UNLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+ lock.l_pid = 0;
- STACK_WIND (frame, marker_unlink_stat_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->stat, loc);
- return 0;
+ STACK_WIND(lk_frame, marker_rename_done, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk, this->name,
+ &oplocal->parent_loc, F_SETLKW, &lock, NULL);
-unlink_wind:
- STACK_WIND (frame, marker_unlink_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->unlink, loc);
- return 0;
-err:
- STACK_UNWIND_STRICT (unlink, frame, -1, ENOMEM, NULL, NULL);
+ return;
- return 0;
+err:
+ marker_local_unref(local);
+ marker_local_unref(oplocal);
}
+int32_t
+marker_rename_unwind(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ marker_local_t *local = NULL;
+ marker_local_t *oplocal = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ inode_contribution_t *contri = NULL;
+
+ local = frame->local;
+ oplocal = local->oplocal;
+ frame->local = NULL;
+
+ // Reset frame uid and gid if set.
+ if (cookie == (void *)_GF_UID_GID_CHANGED)
+ MARKER_RESET_UID_GID(frame, frame->root, local);
+
+ if (op_ret < 0)
+ local->err = op_errno ? op_errno : EINVAL;
+
+ if (local->stub != NULL) {
+ /* Remove contribution node from in-memory even if
+ * remove-xattr has failed as the rename is already performed
+ * if local->stub is set, which means rename was successful
+ */
+ (void)mq_inode_ctx_get(oplocal->loc.inode, this, &ctx);
+ if (ctx) {
+ contri = mq_get_contribution_node(oplocal->loc.parent, ctx);
+ if (contri) {
+ QUOTA_FREE_CONTRIBUTION_NODE(ctx, contri);
+ GF_REF_PUT(contri);
+ }
+ }
+
+ call_resume(local->stub);
+ local->stub = NULL;
+ local->err = 0;
+ } else if (local->err != 0) {
+ STACK_UNWIND_STRICT(rename, frame, -1, local->err, NULL, NULL, NULL,
+ NULL, NULL, NULL);
+ } else {
+ gf_log(this->name, GF_LOG_CRITICAL,
+ "continuation stub to unwind the call is absent, hence "
+ "call will be hung (call-stack id = %" PRIu64 ")",
+ frame->root->unique);
+ }
+
+ /* If there are in-progress writes on old-path when during rename
+ * operation, update txn will update the wrong path if lock
+ * is released before rename unwind.
+ * So release lock only after rename unwind
+ */
+ marker_rename_release_oldp_lock(local, this);
+
+ return 0;
+}
int32_t
-marker_link_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent)
+marker_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ struct iatt *preoldparent, struct iatt *postoldparent,
+ struct iatt *prenewparent, struct iatt *postnewparent,
+ dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_local_t *oplocal = NULL;
+ call_stub_t *stub = NULL;
+ int32_t ret = 0;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ loc_t newloc = {
+ 0,
+ };
+
+ local = (marker_local_t *)frame->local;
+
+ if (local != NULL) {
+ oplocal = local->oplocal;
+ }
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "linking a file ", strerror (op_errno));
+ priv = this->private;
+
+ if (op_ret < 0) {
+ if (local != NULL) {
+ local->err = op_errno;
}
- local = (marker_local_t *) frame->local;
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "renaming a file ",
+ strerror(op_errno));
+ }
- frame->local = NULL;
+ if (priv->feature_enabled & GF_QUOTA) {
+ if ((op_ret < 0) || (local == NULL)) {
+ goto quota_err;
+ }
- STACK_UNWIND_STRICT (link, frame, op_ret, op_errno, inode, buf,
- preparent, postparent);
+ local->ia_nlink = 0;
+ if (xdata)
+ ret = dict_get_uint32(xdata, GF_RESPONSE_LINK_COUNT_XDATA,
+ &local->ia_nlink);
+
+ local->buf = *buf;
+ stub = fop_rename_cbk_stub(frame, default_rename_cbk, op_ret, op_errno,
+ buf, preoldparent, postoldparent,
+ prenewparent, postnewparent, xdata);
+ if (stub == NULL) {
+ local->err = ENOMEM;
+ goto quota_err;
+ }
- if (op_ret == -1 || local == NULL)
- goto out;
+ local->stub = stub;
+
+ GET_CONTRI_KEY(this, contri_key, oplocal->loc.parent->gfid, ret);
+ if (ret < 0) {
+ local->err = ENOMEM;
+ goto quota_err;
+ }
+
+ /* Removexattr requires uid and gid to be 0,
+ * reset them in the callback.
+ */
+ MARKER_SET_UID_GID(frame, local, frame->root);
+
+ newloc.inode = inode_ref(oplocal->loc.inode);
+ newloc.path = gf_strdup(local->loc.path);
+ newloc.name = strrchr(newloc.path, '/');
+ if (newloc.name)
+ newloc.name++;
+ newloc.parent = inode_ref(local->loc.parent);
+ gf_uuid_copy(newloc.gfid, oplocal->loc.inode->gfid);
+
+ STACK_WIND_COOKIE(
+ frame, marker_rename_unwind, frame->cookie, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, &newloc, contri_key, NULL);
+
+ loc_wipe(&newloc);
+ } else {
+ frame->local = NULL;
+
+ STACK_UNWIND_STRICT(rename, frame, op_ret, op_errno, buf, preoldparent,
+ postoldparent, prenewparent, postnewparent, xdata);
- priv = this->private;
+ if ((op_ret < 0) || (local == NULL)) {
+ goto out;
+ }
- if (priv->feature_enabled & GF_QUOTA)
- initiate_quota_txn (this, &local->loc);
+ if (priv->feature_enabled & GF_XTIME) {
+ // update marks on oldpath
+ if (!local->loc.inode)
+ local->loc.inode = inode_ref(oplocal->loc.inode);
+ gf_uuid_copy(local->loc.gfid, oplocal->loc.inode->gfid);
+ marker_xtime_update_marks(this, oplocal);
+ marker_xtime_update_marks(this, local);
+ }
+ }
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
out:
- marker_local_unref (local);
+ if (!(priv->feature_enabled & GF_QUOTA)) {
+ marker_local_unref(local);
+ marker_local_unref(oplocal);
+ }
- return 0;
+ return 0;
+
+quota_err:
+ marker_rename_unwind(frame, NULL, this, 0, 0, NULL);
+ return 0;
+}
+
+int32_t
+marker_do_rename(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ marker_local_t *local = NULL;
+ marker_local_t *oplocal = NULL;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int keylen = 0;
+ quota_meta_t contribution = {
+ 0,
+ };
+
+ local = frame->local;
+ oplocal = local->oplocal;
+
+ // Reset frame uid and gid if set.
+ if (cookie == (void *)_GF_UID_GID_CHANGED)
+ MARKER_RESET_UID_GID(frame, frame->root, local);
+
+ if ((op_ret < 0) && (op_errno != ENOATTR) && (op_errno != ENODATA)) {
+ local->err = op_errno ? op_errno : EINVAL;
+ gf_log(this->name, GF_LOG_WARNING,
+ "fetching contribution values from %s (gfid:%s) "
+ "failed (%s)",
+ oplocal->loc.path, uuid_utoa(oplocal->loc.inode->gfid),
+ strerror(op_errno));
+ goto err;
+ }
+
+ GET_CONTRI_KEY(this, contri_key, oplocal->loc.parent->gfid, keylen);
+ if (keylen < 0) {
+ local->err = errno ? errno : ENOMEM;
+ goto err;
+ }
+ quota_dict_get_meta(dict, contri_key, keylen, &contribution);
+ oplocal->contribution = contribution;
+
+ STACK_WIND(frame, marker_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, &oplocal->loc, &local->loc,
+ local->xdata);
+
+ return 0;
+
+err:
+ marker_rename_unwind(frame, NULL, this, 0, 0, NULL);
+ return 0;
}
int32_t
-marker_link (call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc)
+marker_get_oldpath_contribution(call_frame_t *lk_frame, void *cookie,
+ xlator_t *this, int32_t op_ret,
+ int32_t op_errno, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ call_frame_t *frame = NULL;
+ marker_local_t *local = NULL;
+ marker_local_t *oplocal = NULL;
+ char contri_key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+ int32_t ret = 0;
+
+ local = lk_frame->local;
+ oplocal = local->oplocal;
+ frame = local->frame;
+
+ if (op_ret < 0) {
+ local->err = op_errno ? op_errno : EINVAL;
+ gf_log(this->name, GF_LOG_WARNING,
+ "cannot hold inodelk on %s (gfid:%s) (%s)", oplocal->loc.path,
+ uuid_utoa(oplocal->loc.inode->gfid), strerror(op_errno));
+ if (local->lk_frame) {
+ STACK_DESTROY(local->lk_frame->root);
+ local->lk_frame = NULL;
+ }
+ goto err;
+ }
- priv = this->private;
+ GET_CONTRI_KEY(this, contri_key, oplocal->loc.parent->gfid, ret);
+ if (ret < 0) {
+ local->err = errno ? errno : ENOMEM;
+ goto err;
+ }
- if (priv->feature_enabled == 0)
- goto wind;
+ /* getxattr requires uid and gid to be 0,
+ * reset them in the callback.
+ */
+ MARKER_SET_UID_GID(frame, local, frame->root);
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ if (gf_uuid_is_null(oplocal->loc.gfid))
+ gf_uuid_copy(oplocal->loc.gfid, oplocal->loc.inode->gfid);
- MARKER_INIT_LOCAL (frame, local);
+ GF_UUID_ASSERT(oplocal->loc.gfid);
- ret = loc_copy (&local->loc, newloc);
+ STACK_WIND_COOKIE(frame, marker_do_rename, frame->cookie, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, &oplocal->loc,
+ contri_key, NULL);
- if (ret == -1)
- goto err;
-wind:
- STACK_WIND (frame, marker_link_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->link, oldloc, newloc);
- return 0;
+ return 0;
err:
- STACK_UNWIND_STRICT (link, frame, -1, ENOMEM, NULL, NULL, NULL, NULL);
-
- return 0;
+ marker_rename_unwind(frame, NULL, this, 0, 0, NULL);
+ return 0;
}
-
+/* For a marker_rename FOP, following is the algorithm used for Quota
+ * accounting. The use-case considered is:
+ * 1. rename (src, dst)
+ * 2. both src and dst exist
+ * 3. there are parallel operations on src and dst (lets say through fds
+ * opened on them before rename was initiated).
+ *
+ * PS: We've not thought through whether this algo works in the presence of
+ * hardlinks to src and/or dst.
+ *
+ * Algorithm:
+ * ==========
+ *
+ * 1) set inodelk on src-parent
+ * As part of rename operation, parent can change for the file.
+ * We need to remove contribution (both on disk xattr and in-memory one)
+ * to src-parent (and its ancestors) and add the contribution to dst-parent
+ * (and its ancestors). While we are doing these operations, contribution of
+ * the file/directory shouldn't be changing as we want to be sure that
+ * a) what we subtract from src-parent is exactly what we add to dst-parent
+ * b) we should subtract from src-parent exactly what we contributed to
+ * src-parent
+ * So, We hold a lock on src-parent to block any parallel transcations on
+ * src-inode (since that's the one which survives rename).
+ *
+ * If there are any parallel transactions on dst-inode they keep succeeding
+ * till the association of dst-inode with dst-parent is broken because of an
+ * inode_rename after unwind of rename fop from marker. Only after unwind
+ * (and hence inode_rename), we delete and subtract the contribution of
+ * dst-inode to dst-parent. That way we are making sure we subtract exactly
+ * what dst-inode contributed to dst-parent.
+ *
+ * 2) lookup contribution to src-parent on src-inode.
+ * We need to save the contribution info for use at step-8.
+ *
+ * 3) wind rename
+ * Perform rename on disk
+ *
+ * 4) remove xattr on src-loc
+ * After rename, parent can change, so
+ * need to remove xattrs storing contribution to src-parent.
+ *
+ * 5) remove contribution node corresponding to src-parent from the in-memory
+ * list.
+ * After rename, contri gfid can change and we have
+ * also removed xattr from file.
+ * We need to remove in-memory contribution node to prevent updations to
+ * src-parent even after a successful rename
+ *
+ * 6) unwind rename
+ * This will ensure that rename is done in the server
+ * inode table. An inode_rename disassociates src-inode from src-parent and
+ * associates it with dst-parent. It also disassociates dst-inode from
+ * dst-parent. After inode_rename, inode_parent on src-inode will give
+ * dst-parent and inode_parent on dst-inode will return NULL (assuming
+ * dst-inode doesn't have any hardlinks).
+ *
+ * 7) release inodelk on src-parent
+ * Lock on src-parent should be released only after
+ * rename on disk, remove xattr and rename_unwind (and hence inode_rename)
+ * operations. If lock is released before inode_rename, a parallel
+ * transaction on src-inode can still update src-parent (as inode_parent on
+ * src-inode can still return src-parent). This would make the
+ * contribution from src-inode to src-parent stored in step-2 stale.
+ *
+ * 8) Initiate mq_reduce_parent_size_txn on src-parent to remove contribution
+ * of src-inode to src-parent. We use the contribution stored in step-2.
+ * Since, we had acquired the lock on src-parent all along step-2 through
+ * inode_rename, we can be sure that a parallel transaction wouldn't have
+ * added a delta to src-parent.
+ *
+ * 9) Initiate mq_reduce_parent_size_txn on dst-parent if dst-inode exists.
+ * The size reduced from dst-parent and its ancestors is the
+ * size stored as contribution to dst-parent in dst-inode.
+ * If the destination file had existed, rename will unlink the
+ * destination file as part of its operation.
+ * We need to reduce the size on the dest parent similarly to
+ * unlink. Since, we are initiating reduce-parent-size transaction after
+ * inode_rename, we can be sure that a parallel transaction wouldn't add
+ * delta to dst-parent while we are reducing the contribution of dst-inode
+ * from its ancestors before rename.
+ *
+ * 10) create contribution xattr to dst-parent on src-inode.
+ */
int32_t
-marker_rename_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf,
- struct iatt *preoldparent, struct iatt *postoldparent,
- struct iatt *prenewparent, struct iatt *postnewparent)
-{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
- marker_local_t *oplocal = NULL;
- loc_t newloc = {0, };
-
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "renaming a file ", strerror (op_errno));
- }
+marker_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_local_t *oplocal = NULL;
+ marker_conf_t *priv = NULL;
+ struct gf_flock lock = {
+ 0,
+ };
- local = (marker_local_t *) frame->local;
+ priv = this->private;
- frame->local = NULL;
+ if (priv->feature_enabled == 0)
+ goto rename_wind;
- STACK_UNWIND_STRICT (rename, frame, op_ret, op_errno, buf, preoldparent,
- postoldparent, prenewparent, postnewparent);
+ local = mem_get0(this->local_pool);
- if (op_ret == -1 || local == NULL)
- goto out;
+ MARKER_INIT_LOCAL(frame, local);
- oplocal = local->oplocal;
- local->oplocal = NULL;
+ oplocal = mem_get0(this->local_pool);
- priv = this->private;
+ MARKER_INIT_LOCAL(frame, oplocal);
- if (priv->feature_enabled & GF_QUOTA) {
- reduce_parent_size (this, &oplocal->loc);
+ frame->local = local;
- if (local->loc.inode != NULL) {
- reduce_parent_size (this, &local->loc);
- }
+ local->oplocal = marker_local_ref(oplocal);
- newloc.inode = inode_ref (oplocal->loc.inode);
- newloc.path = gf_strdup (local->loc.path);
- newloc.name = gf_strdup (local->loc.name);
- newloc.parent = inode_ref (local->loc.parent);
- newloc.ino = oplocal->loc.inode->ino;
+ ret = loc_copy(&local->loc, newloc);
+ if (ret < 0)
+ goto err;
- quota_rename_update_newpath (this, &newloc);
+ ret = loc_copy(&oplocal->loc, oldloc);
+ if (ret < 0)
+ goto err;
- loc_wipe (&newloc);
- }
+ if (!(priv->feature_enabled & GF_QUOTA)) {
+ goto rename_wind;
+ }
+
+ ret = mq_inode_loc_fill(NULL, newloc->parent, &local->parent_loc);
+ if (ret < 0)
+ goto err;
+
+ ret = mq_inode_loc_fill(NULL, oldloc->parent, &oplocal->parent_loc);
+ if (ret < 0)
+ goto err;
+
+ lock.l_len = 0;
+ lock.l_start = 0;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+
+ local->xdata = xdata ? dict_ref(xdata) : dict_new();
+ ret = dict_set_int32(local->xdata, GF_REQUEST_LINK_COUNT_XDATA, 1);
+ if (ret < 0)
+ goto err;
+
+ local->frame = frame;
+ local->lk_frame = create_frame(this, this->ctx->pool);
+ if (local->lk_frame == NULL)
+ goto err;
+
+ local->lk_frame->root->uid = 0;
+ local->lk_frame->root->gid = 0;
+ local->lk_frame->local = local;
+ set_lk_owner_from_ptr(&local->lk_frame->root->lk_owner,
+ local->lk_frame->root);
+
+ STACK_WIND(local->lk_frame, marker_get_oldpath_contribution,
+ FIRST_CHILD(this), FIRST_CHILD(this)->fops->inodelk, this->name,
+ &oplocal->parent_loc, F_SETLKW, &lock, NULL);
+
+ return 0;
+
+rename_wind:
+ STACK_WIND(frame, marker_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+
+ return 0;
+err:
+ MARKER_STACK_UNWIND(rename, frame, -1, ENOMEM, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ marker_local_unref(oplocal);
+
+ return 0;
+}
+
+int32_t
+marker_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "truncating a file ",
+ strerror(op_errno));
+ }
+
+ local = (marker_local_t *)frame->local;
+
+ frame->local = NULL;
+
+ STACK_UNWIND_STRICT(truncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ if (op_ret == -1 || local == NULL)
+ goto out;
+
+ priv = this->private;
+
+ if (priv->feature_enabled & GF_QUOTA) {
+ /* DHT Rebalance process, at the end of migration will
+ * first make the src file as a linkto file and then
+ * truncate the file. By doing a truncate after making the
+ * src file as linkto file, the contri which is already
+ * accounted is left over.
+ * So, we need to account for the linkto file when a truncate
+ * happens, thereby updating the contri properly.
+ * By passing NULL for postbuf, mq_prevalidate does not check
+ * for linkto file.
+ * Same happens with ftruncate as well.
+ */
+ if (postbuf && IS_DHT_LINKFILE_MODE(postbuf))
+ mq_initiate_quota_txn(this, &local->loc, NULL);
+ else
+ mq_initiate_quota_txn(this, &local->loc, postbuf);
+ }
+
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
- if (priv->feature_enabled & GF_XTIME) {
- //update marks on oldpath
- marker_xtime_update_marks (this, oplocal);
- marker_xtime_update_marks (this, local);
- }
out:
- marker_local_unref (local);
- marker_local_unref (oplocal);
- return 0;
+ marker_local_unref(local);
+
+ return 0;
}
+int32_t
+marker_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+
+ priv = this->private;
+
+ if (priv->feature_enabled == 0)
+ goto wind;
+
+ local = mem_get0(this->local_pool);
+
+ MARKER_INIT_LOCAL(frame, local);
+
+ ret = loc_copy(&local->loc, loc);
+
+ if (ret == -1)
+ goto err;
+wind:
+ STACK_WIND(frame, marker_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+err:
+ MARKER_STACK_UNWIND(truncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+
+ return 0;
+}
int32_t
-marker_quota_removexattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+marker_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- marker_local_t *local = NULL, *oplocal = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if ((op_ret < 0) && (op_errno != ENOATTR)) {
- goto unwind;
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "truncating a file ",
+ strerror(op_errno));
+ }
- local = frame->local;
- oplocal = local->oplocal;
+ local = (marker_local_t *)frame->local;
- STACK_WIND (frame, marker_rename_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rename, &oplocal->loc,
- &local->loc);
- return 0;
+ frame->local = NULL;
-unwind:
- STACK_UNWIND_STRICT (rename, frame, -1, ENOMEM, NULL,
- NULL, NULL, NULL, NULL);
- if (local) {
- local->oplocal = NULL;
- marker_local_unref (local);
- GF_FREE (local);
- }
- if (oplocal) {
- marker_local_unref (oplocal);
- GF_FREE (oplocal);
- }
- return 0;
+ STACK_UNWIND_STRICT(ftruncate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
+
+ if (op_ret == -1 || local == NULL)
+ goto out;
+
+ priv = this->private;
+
+ if (priv->feature_enabled & GF_QUOTA) {
+ if (postbuf && IS_DHT_LINKFILE_MODE(postbuf))
+ mq_initiate_quota_txn(this, &local->loc, NULL);
+ else
+ mq_initiate_quota_txn(this, &local->loc, postbuf);
+ }
+
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
+out:
+ marker_local_unref(local);
+
+ return 0;
}
+int32_t
+marker_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+
+ priv = this->private;
+
+ if (priv->feature_enabled == 0)
+ goto wind;
+
+ local = mem_get0(this->local_pool);
+
+ MARKER_INIT_LOCAL(frame, local);
+
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
+
+ if (ret == -1)
+ goto err;
+wind:
+ STACK_WIND(frame, marker_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+err:
+ MARKER_STACK_UNWIND(ftruncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+
+ return 0;
+}
int32_t
-marker_rename (call_frame_t *frame, xlator_t *this, loc_t *oldloc,
- loc_t *newloc)
+marker_symlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_local_t *oplocal = NULL;
- marker_conf_t *priv = NULL;
- char contri_key[512] = {0,};
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ quota_inode_ctx_t *ctx = NULL;
- priv = this->private;
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "creating symlinks ",
+ strerror(op_errno));
+ }
- if (priv->feature_enabled == 0)
- goto rename_wind;
+ local = (marker_local_t *)frame->local;
- GET_CONTRI_KEY (contri_key, oldloc->parent->gfid, ret);
- if (ret < 0)
- goto err;
+ frame->local = NULL;
+ priv = this->private;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ if (op_ret >= 0 && inode && (priv->feature_enabled & GF_QUOTA)) {
+ ctx = mq_inode_ctx_new(inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(inode->gfid));
+ op_ret = -1;
+ op_errno = ENOMEM;
+ }
+ }
- MARKER_INIT_LOCAL (frame, local);
+ STACK_UNWIND_STRICT(symlink, frame, op_ret, op_errno, inode, buf, preparent,
+ postparent, xdata);
- ALLOCATE_OR_GOTO (oplocal, marker_local_t, err);
+ if (op_ret == -1 || local == NULL)
+ goto out;
- MARKER_INIT_LOCAL (frame, oplocal);
+ if (gf_uuid_is_null(local->loc.gfid))
+ gf_uuid_copy(local->loc.gfid, buf->ia_gfid);
- frame->local = local;
+ if (priv->feature_enabled & GF_QUOTA) {
+ mq_create_xattrs_txn(this, &local->loc, buf);
+ }
- local->oplocal = oplocal;
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
+out:
+ marker_local_unref(local);
- ret = loc_copy (&local->loc, newloc);
- if (ret == -1)
- goto err;
+ return 0;
+}
- ret = loc_copy (&oplocal->loc, oldloc);
- if (ret == -1)
- goto err;
+int
+marker_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
+ loc_t *loc, mode_t umask, dict_t *xdata)
+{
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- STACK_WIND (frame, marker_quota_removexattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr, oldloc, contri_key);
- return 0;
+ priv = this->private;
-rename_wind:
- STACK_WIND (frame, marker_rename_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rename, oldloc, newloc);
+ if (priv->feature_enabled == 0)
+ goto wind;
- return 0;
+ local = mem_get0(this->local_pool);
+
+ MARKER_INIT_LOCAL(frame, local);
+
+ ret = loc_copy(&local->loc, loc);
+
+ if (ret == -1)
+ goto err;
+wind:
+ STACK_WIND(frame, marker_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkpath, loc, umask, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (rename, frame, -1, ENOMEM, NULL,
- NULL, NULL, NULL, NULL);
+ MARKER_STACK_UNWIND(symlink, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_truncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+marker_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ quota_inode_ctx_t *ctx = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "truncating a file ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred with "
+ "mknod ",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
+ priv = this->private;
- STACK_UNWIND_STRICT (truncate, frame, op_ret, op_errno, prebuf,
- postbuf);
+ if (op_ret >= 0 && inode && (priv->feature_enabled & GF_QUOTA)) {
+ ctx = mq_inode_ctx_new(inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(inode->gfid));
+ op_ret = -1;
+ op_errno = ENOMEM;
+ }
+ }
- if (op_ret == -1 || local == NULL)
- goto out;
+ STACK_UNWIND_STRICT(mknod, frame, op_ret, op_errno, inode, buf, preparent,
+ postparent, xdata);
- priv = this->private;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- if (priv->feature_enabled & GF_QUOTA)
- initiate_quota_txn (this, &local->loc);
+ if (gf_uuid_is_null(local->loc.gfid))
+ gf_uuid_copy(local->loc.gfid, buf->ia_gfid);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if ((priv->feature_enabled & GF_QUOTA) && (S_ISREG(local->mode))) {
+ mq_create_xattrs_txn(this, &local->loc, buf);
+ }
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
-int32_t
-marker_truncate (call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset)
+int
+marker_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t rdev, mode_t umask, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = loc_copy(&local->loc, loc);
- if (ret == -1)
- goto err;
+ local->mode = mode;
+
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_truncate_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->truncate, loc, offset);
- return 0;
+ STACK_WIND(frame, marker_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, rdev, umask, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (truncate, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(mknod, frame, -1, ENOMEM, NULL, NULL, NULL, NULL, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_ftruncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+marker_fallocate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "truncating a file ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "fallocating a file ",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (ftruncate, frame, op_ret, op_errno, prebuf,
- postbuf);
+ STACK_UNWIND_STRICT(fallocate, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_QUOTA)
- initiate_quota_txn (this, &local->loc);
+ if (priv->feature_enabled & GF_QUOTA)
+ mq_initiate_quota_txn(this, &local->loc, postbuf);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int32_t
-marker_ftruncate (call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset)
+marker_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t mode,
+ off_t offset, size_t len, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = marker_inode_loc_fill (fd->inode, &local->loc);
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_ftruncate_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->ftruncate, fd, offset);
- return 0;
+ STACK_WIND(frame, marker_fallocate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fallocate, fd, mode, offset, len,
+ xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (ftruncate, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(fallocate, frame, -1, ENOMEM, NULL, NULL, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_symlink_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent)
+marker_discard_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "creating symlinks ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE, "%s occurred during discard",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (symlink, frame, op_ret, op_errno, inode, buf,
- preparent, postparent);
+ STACK_UNWIND_STRICT(discard, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_QUOTA)
- inspect_file_xattr (this, &local->loc, NULL, *buf);
+ if (priv->feature_enabled & GF_QUOTA)
+ mq_initiate_quota_txn(this, &local->loc, postbuf);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
-int
-marker_symlink (call_frame_t *frame, xlator_t *this, const char *linkpath,
- loc_t *loc, dict_t *params)
+int32_t
+marker_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_symlink_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->symlink, linkpath, loc, params);
- return 0;
+ STACK_WIND(frame, marker_discard_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->discard, fd, offset, len, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (symlink, frame, -1, ENOMEM, NULL,
- NULL, NULL, NULL);
- return 0;
-}
+ MARKER_STACK_UNWIND(discard, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+}
int32_t
-marker_mknod_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, struct iatt *preparent,
- struct iatt *postparent)
+marker_zerofill_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "creating symlinks ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE, "%s occurred during zerofill",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (mknod, frame, op_ret, op_errno, inode,
- buf, preparent, postparent);
+ STACK_UNWIND_STRICT(zerofill, frame, op_ret, op_errno, prebuf, postbuf,
+ xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if ((priv->feature_enabled & GF_QUOTA) && (S_ISREG (local->mode))) {
- inspect_file_xattr (this, &local->loc, NULL, *buf);
- }
+ if (priv->feature_enabled & GF_QUOTA)
+ mq_initiate_quota_txn(this, &local->loc, postbuf);
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
-int
-marker_mknod (call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
- dev_t rdev, dict_t *parms)
+int32_t
+marker_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ off_t len, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
- local->mode = mode;
-
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_mknod_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mknod, loc, mode, rdev, parms);
- return 0;
+ STACK_WIND(frame, marker_zerofill_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->zerofill, fd, offset, len, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (mknod, frame, -1, ENOMEM, NULL,
- NULL, NULL, NULL);
- return 0;
-}
+ MARKER_STACK_UNWIND(zerofill, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+}
/* when a call from the special client is received on
* key trusted.glusterfs.volume-mark with value "RESET"
@@ -1320,759 +2323,1246 @@ err:
* timestamp file.
*/
int32_t
-call_from_sp_client_to_reset_tmfile (call_frame_t *frame,
- xlator_t *this,
- dict_t *dict)
+call_from_sp_client_to_reset_tmfile(call_frame_t *frame, xlator_t *this,
+ dict_t *dict)
{
- int32_t fd = 0;
- int32_t op_ret = 0;
- int32_t op_errno = 0;
- data_t *data = NULL;
- marker_conf_t *priv = NULL;
-
- if (frame == NULL || this == NULL || dict == NULL)
- return -1;
+ int32_t fd = 0;
+ int32_t op_ret = 0;
+ int32_t op_errno = 0;
+ data_t *data = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ if (frame == NULL || this == NULL || dict == NULL)
+ return -1;
- data = dict_get (dict, "trusted.glusterfs.volume-mark");
- if (data == NULL)
- return -1;
+ priv = this->private;
- if (frame->root->pid != -1) {
- op_ret = -1;
- op_errno = EPERM;
+ data = dict_get(dict, "trusted.glusterfs.volume-mark");
+ if (data == NULL)
+ return -1;
- goto out;
+ if (frame->root->pid != GF_CLIENT_PID_GSYNCD) {
+ op_ret = -1;
+ op_errno = EPERM;
+
+ goto out;
+ }
+
+ if (data->len == 0 ||
+ (data->len == 5 && memcmp(data->data, "RESET", 5) == 0)) {
+ fd = open(priv->timestamp_file, O_WRONLY | O_TRUNC);
+ if (fd != -1) {
+ /* TODO check whether the O_TRUNC would update the
+ * timestamps on a zero length file on all machies.
+ */
+ sys_close(fd);
}
- if (data->len == 0 || (data->len == 5 &&
- memcmp (data->data, "RESET", 5) == 0)) {
- fd = open (priv->timestamp_file, O_WRONLY|O_TRUNC);
- if (fd != -1) {
- /* TODO check whether the O_TRUNC would update the
- * timestamps on a zero length file on all machies.
- */
- close (fd);
- }
-
- if (fd != -1 || errno == ENOENT) {
- op_ret = 0;
- op_errno = 0;
- } else {
- op_ret = -1;
- op_errno = errno;
- }
+ if (fd != -1 || errno == ENOENT) {
+ op_ret = 0;
+ op_errno = 0;
} else {
- op_ret = -1;
- op_errno = EINVAL;
+ op_ret = -1;
+ op_errno = errno;
}
+ } else {
+ op_ret = -1;
+ op_errno = EINVAL;
+ }
out:
- STACK_UNWIND_STRICT (setxattr, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT(setxattr, frame, op_ret, op_errno, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_setxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+marker_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "creating symlinks ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred in "
+ "setxattr ",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (setxattr, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT(setxattr, frame, op_ret, op_errno, xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
+}
+
+int
+remove_quota_keys(dict_t *dict, char *k, data_t *v, void *data)
+{
+ call_frame_t *frame = data;
+ marker_local_t *local = frame->local;
+ xlator_t *this = frame->this;
+ marker_conf_t *priv = NULL;
+ char ver_str[NAME_MAX] = {
+ 0,
+ };
+ char *dot = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ /* If quota is enabled immediately after disable.
+ * quota healing starts creating new xattrs
+ * before completing the cleanup operation.
+ * So we should check if the xattr is the new.
+ * Do not remove xattr if its xattr
+ * version is same as current version
+ */
+ if ((priv->feature_enabled & GF_QUOTA) && priv->version > 0) {
+ snprintf(ver_str, sizeof(ver_str), ".%d", priv->version);
+ dot = strrchr(k, '.');
+ if (dot && !strcmp(dot, ver_str))
+ return 0;
+ }
+
+ ret = syncop_removexattr(FIRST_CHILD(this), &local->loc, k, 0, NULL);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "%s: Failed to remove "
+ "extended attribute: %s",
+ local->loc.path, k);
+ return -1;
+ }
+ return 0;
+}
+
+int
+quota_xattr_cleaner_cbk(int ret, call_frame_t *frame, void *args)
+{
+ dict_t *xdata = args;
+ int op_ret = -1;
+ int op_errno = 0;
+
+ op_ret = (ret < 0) ? -1 : 0;
+ op_errno = -ret;
+
+ MARKER_STACK_UNWIND(setxattr, frame, op_ret, op_errno, xdata);
+ return ret;
+}
+
+int
+quota_xattr_cleaner(void *args)
+{
+ struct synctask *task = NULL;
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ marker_local_t *local = NULL;
+ dict_t *xdata = NULL;
+ int ret = -1;
+
+ task = synctask_get();
+ if (!task)
+ goto out;
+
+ frame = task->frame;
+ this = frame->this;
+ local = frame->local;
+
+ ret = syncop_listxattr(FIRST_CHILD(this), &local->loc, &xdata, NULL, NULL);
+ if (ret == -1) {
+ ret = -errno;
+ goto out;
+ }
+
+ ret = dict_foreach_fnmatch(xdata, "trusted.glusterfs.quota.*",
+ remove_quota_keys, frame);
+ if (ret == -1) {
+ ret = -errno;
+ goto out;
+ }
+ ret = dict_foreach_fnmatch(xdata, PGFID_XATTR_KEY_PREFIX "*",
+ remove_quota_keys, frame);
+ if (ret == -1) {
+ ret = -errno;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (xdata)
+ dict_unref(xdata);
+
+ return ret;
+}
+
+int
+marker_do_xattr_cleanup(call_frame_t *frame, xlator_t *this, dict_t *xdata,
+ loc_t *loc)
+{
+ int ret = -1;
+ marker_local_t *local = NULL;
+
+ local = mem_get0(this->local_pool);
+ if (!local)
+ goto out;
+
+ MARKER_INIT_LOCAL(frame, local);
+
+ loc_copy(&local->loc, loc);
+ ret = synctask_new(this->ctx->env, quota_xattr_cleaner,
+ quota_xattr_cleaner_cbk, frame, xdata);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Failed to create synctask "
+ "for cleaning up quota extended attributes");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret)
+ MARKER_STACK_UNWIND(setxattr, frame, -1, ENOMEM, xdata);
+
+ return ret;
+}
+
+static gf_boolean_t
+marker_xattr_cleanup_cmd(dict_t *dict)
+{
+ return (dict_get(dict, VIRTUAL_QUOTA_XATTR_CLEANUP_KEY) != NULL);
}
int32_t
-marker_setxattr (call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
- int32_t flags)
+marker_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ int op_errno = ENOMEM;
+
+ priv = this->private;
+
+ if (marker_xattr_cleanup_cmd(dict)) {
+ if (frame->root->uid != 0 || frame->root->gid != 0) {
+ op_errno = EPERM;
+ ret = -1;
+ goto err;
+ }
- priv = this->private;
+ /* The following function does the cleanup and then unwinds the
+ * corresponding call*/
+ loc_path(loc, NULL);
+ marker_do_xattr_cleanup(frame, this, xdata, loc);
+ return 0;
+ }
- if (priv->feature_enabled == 0)
- goto wind;
+ ret = marker_key_replace_with_ver(this, dict);
+ if (ret < 0)
+ goto err;
- ret = call_from_sp_client_to_reset_tmfile (frame, this, dict);
- if (ret == 0)
- return 0;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ ret = call_from_sp_client_to_reset_tmfile(frame, this, dict);
+ if (ret == 0)
+ return 0;
- MARKER_INIT_LOCAL (frame, local);
+ local = mem_get0(this->local_pool);
- ret = loc_copy (&local->loc, loc);
+ MARKER_INIT_LOCAL(frame, local);
- if (ret == -1)
- goto err;
+ ret = loc_copy(&local->loc, loc);
+
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_setxattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr, loc, dict, flags);
- return 0;
+ STACK_WIND(frame, marker_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (setxattr, frame, -1, ENOMEM);
+ MARKER_STACK_UNWIND(setxattr, frame, -1, op_errno, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_fsetxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+marker_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "%s occured while "
- "creating symlinks ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred in "
+ "fsetxattr",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (fsetxattr, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int32_t
-marker_fsetxattr (call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
- int32_t flags)
+marker_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ret = call_from_sp_client_to_reset_tmfile (frame, this, dict);
- if (ret == 0)
- return 0;
+ ret = call_from_sp_client_to_reset_tmfile(frame, this, dict);
+ if (ret == 0)
+ return 0;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = marker_inode_loc_fill (fd->inode, &local->loc);
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_fsetxattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags);
- return 0;
+ STACK_WIND(frame, marker_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (fsetxattr, frame, -1, ENOMEM);
+ MARKER_STACK_UNWIND(fsetxattr, frame, -1, ENOMEM, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_fsetattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *statpre,
- struct iatt *statpost)
+marker_fsetattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "%s occured while "
- "creating symlinks ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred in "
+ "fsetattr ",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (fsetattr, frame, op_ret, op_errno, statpre,
- statpost);
+ STACK_UNWIND_STRICT(fsetattr, frame, op_ret, op_errno, statpre, statpost,
+ xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
-
int32_t
-marker_fsetattr (call_frame_t *frame, xlator_t *this, fd_t *fd,
- struct iatt *stbuf, int32_t valid)
+marker_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = marker_inode_loc_fill (fd->inode, &local->loc);
+ ret = marker_inode_loc_fill(fd->inode, &local->loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_fsetattr_cbk, FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->fsetattr, fd, stbuf, valid);
- return 0;
+ STACK_WIND(frame, marker_fsetattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetattr, fd, stbuf, valid, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (fsetattr, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(fsetattr, frame, -1, ENOMEM, NULL, NULL, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_setattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *statpre,
- struct iatt *statpost)
+marker_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- if (op_ret == -1) {
- gf_log (this->name, ((op_errno == ENOENT) ? GF_LOG_DEBUG :
- GF_LOG_ERROR),
- "%s occured during setattr of %s",
- strerror (op_errno),
- (local ? local->loc.path : "<nul>"));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE, "%s occurred during setattr of %s",
+ strerror(op_errno), (local ? local->loc.path : "<nul>"));
+ }
- STACK_UNWIND_STRICT (setattr, frame, op_ret, op_errno, statpre,
- statpost);
+ STACK_UNWIND_STRICT(setattr, frame, op_ret, op_errno, statpre, statpost,
+ xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int32_t
-marker_setattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
- struct iatt *stbuf, int32_t valid)
+marker_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = loc_copy(&local->loc, loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_setattr_cbk, FIRST_CHILD (this),
- FIRST_CHILD (this)->fops->setattr, loc, stbuf, valid);
- return 0;
+ STACK_WIND(frame, marker_setattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr, loc, stbuf, valid, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (setattr, frame, -1, ENOMEM, NULL, NULL);
+ MARKER_STACK_UNWIND(setattr, frame, -1, ENOMEM, NULL, NULL, NULL);
- return 0;
+ return 0;
}
-
int32_t
-marker_removexattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+marker_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "%s occured while "
- "creating symlinks ", strerror (op_errno));
- }
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE,
+ "%s occurred while "
+ "removing extended attribute",
+ strerror(op_errno));
+ }
- local = (marker_local_t *) frame->local;
+ local = (marker_local_t *)frame->local;
- frame->local = NULL;
+ frame->local = NULL;
- STACK_UNWIND_STRICT (removexattr, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT(removexattr, frame, op_ret, op_errno, xdata);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled & GF_XTIME)
- marker_xtime_update_marks (this, local);
+ if (priv->feature_enabled & GF_XTIME)
+ marker_xtime_update_marks(this, local);
out:
- marker_local_unref (local);
+ marker_local_unref(local);
- return 0;
+ return 0;
}
int32_t
-marker_removexattr (call_frame_t *frame, xlator_t *this, loc_t *loc,
- const char *name)
+marker_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
-
- priv = this->private;
+ int32_t ret = -1;
+ int32_t i = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
+ char key[QUOTA_KEY_MAX] = {
+ 0,
+ };
+
+ priv = this->private;
+
+ if (name) {
+ for (i = 0; mq_ext_xattrs[i]; i++) {
+ if (strcmp(name, mq_ext_xattrs[i]))
+ continue;
+
+ GET_QUOTA_KEY(this, key, mq_ext_xattrs[i], ret);
+ if (ret < 0)
+ goto err;
+ name = key;
+ break;
+ }
+ }
- if (priv->feature_enabled == 0)
- goto wind;
+ if (priv->feature_enabled == 0)
+ goto wind;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ local = mem_get0(this->local_pool);
- MARKER_INIT_LOCAL (frame, local);
+ MARKER_INIT_LOCAL(frame, local);
- ret = loc_copy (&local->loc, loc);
+ ret = loc_copy(&local->loc, loc);
- if (ret == -1)
- goto err;
+ if (ret == -1)
+ goto err;
wind:
- STACK_WIND (frame, marker_removexattr_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr, loc, name);
- return 0;
+ STACK_WIND(frame, marker_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
err:
- STACK_UNWIND_STRICT (removexattr, frame, -1, ENOMEM);
+ MARKER_STACK_UNWIND(removexattr, frame, -1, ENOMEM, NULL);
- return 0;
+ return 0;
}
+static gf_boolean_t
+__has_quota_xattrs(dict_t *xattrs)
+{
+ if (dict_foreach_match(xattrs, _is_quota_internal_xattr, NULL,
+ dict_null_foreach_fn, NULL) > 0)
+ return _gf_true;
+
+ return _gf_false;
+}
int32_t
-marker_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, dict_t *dict, struct iatt *postparent)
+marker_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *dict, struct iatt *postparent)
{
- marker_conf_t *priv = NULL;
- marker_local_t *local = NULL;
-
- if (op_ret == -1) {
- gf_log (this->name, GF_LOG_TRACE, "lookup failed with %s",
- strerror (op_errno));
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ dict_t *xattrs = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ int32_t ret = -1;
+
+ priv = this->private;
+ local = (marker_local_t *)frame->local;
+ frame->local = NULL;
+
+ if (op_ret == -1) {
+ gf_log(this->name, GF_LOG_TRACE, "lookup failed with %s",
+ strerror(op_errno));
+ goto unwind;
+ }
+
+ ret = marker_key_set_ver(this, dict);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+
+ if (dict && __has_quota_xattrs(dict)) {
+ xattrs = dict_copy_with_ref(dict, NULL);
+ if (!xattrs) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ } else {
+ marker_filter_internal_xattrs(this, xattrs);
}
+ } else if (dict) {
+ xattrs = dict_ref(dict);
+ }
- local = (marker_local_t *) frame->local;
-
- frame->local = NULL;
+ if (op_ret >= 0 && inode && (priv->feature_enabled & GF_QUOTA)) {
+ ctx = mq_inode_ctx_new(inode, this);
+ if (ctx == NULL) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(inode->gfid));
+ op_ret = -1;
+ op_errno = ENOMEM;
+ }
+ }
- STACK_UNWIND_STRICT (lookup, frame, op_ret, op_errno, inode, buf,
- dict, postparent);
+unwind:
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, xattrs,
+ postparent);
- if (op_ret == -1 || local == NULL)
- goto out;
+ if (op_ret == -1 || local == NULL)
+ goto out;
- priv = this->private;
+ /* copy the gfid from the stat structure instead of inode,
+ * since if the lookup is fresh lookup, then the inode
+ * would have not yet linked to the inode table which happens
+ * in protocol/server.
+ */
+ if (gf_uuid_is_null(local->loc.gfid))
+ gf_uuid_copy(local->loc.gfid, buf->ia_gfid);
- if (priv->feature_enabled & GF_QUOTA) {
- quota_xattr_state (this, &local->loc, dict, *buf);
- }
+ if (priv->feature_enabled & GF_QUOTA) {
+ mq_xattr_state(this, &local->loc, dict, buf);
+ }
out:
- marker_local_unref (local);
+ marker_local_unref(local);
+ if (xattrs)
+ dict_unref(xattrs);
- return 0;
+ return 0;
}
int32_t
-marker_lookup (call_frame_t *frame, xlator_t *this,
- loc_t *loc, dict_t *xattr_req)
+marker_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xattr_req)
{
- int32_t ret = 0;
- marker_local_t *local = NULL;
- marker_conf_t *priv = NULL;
+ int32_t ret = 0;
+ marker_local_t *local = NULL;
+ marker_conf_t *priv = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->feature_enabled == 0)
- goto wind;
+ xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+ if (!xattr_req)
+ goto err;
- ALLOCATE_OR_GOTO (local, marker_local_t, err);
+ ret = marker_key_replace_with_ver(this, xattr_req);
+ if (ret < 0)
+ goto err;
- MARKER_INIT_LOCAL (frame, local);
+ if (priv->feature_enabled == 0)
+ goto wind;
- ret = loc_copy (&local->loc, loc);
- if (ret == -1)
- goto err;
+ local = mem_get0(this->local_pool);
+ if (local == NULL)
+ goto err;
+
+ MARKER_INIT_LOCAL(frame, local);
+
+ ret = loc_copy(&local->loc, loc);
+ if (ret == -1)
+ goto err;
+
+ if ((priv->feature_enabled & GF_QUOTA))
+ mq_req_xattr(this, loc, xattr_req, NULL, NULL);
- if ((priv->feature_enabled & GF_QUOTA) && xattr_req)
- quota_req_xattr (this, loc, xattr_req);
wind:
- STACK_WIND (frame, marker_lookup_cbk, FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup, loc, xattr_req);
- return 0;
+ STACK_WIND(frame, marker_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xattr_req);
+
+ dict_unref(xattr_req);
+
+ return 0;
err:
- STACK_UNWIND_STRICT (lookup, frame, -1, 0, NULL, NULL, NULL, NULL);
+ MARKER_STACK_UNWIND(lookup, frame, -1, ENOMEM, NULL, NULL, NULL, NULL);
- return 0;
+ if (xattr_req)
+ dict_unref(xattr_req);
+
+ return 0;
}
-int32_t
-mem_acct_init (xlator_t *this)
+int
+marker_build_ancestry_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, gf_dirent_t *entries,
+ dict_t *xdata)
{
- int ret = -1;
+ gf_dirent_t *entry = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+ int ret = -1;
- if (!this)
- return ret;
+ if ((op_ret <= 0) || (entries == NULL)) {
+ goto out;
+ }
- ret = xlator_mem_acct_init (this, gf_marker_mt_end + 1);
+ list_for_each_entry(entry, &entries->list, list)
+ {
+ if (entry->inode == NULL)
+ continue;
- if (ret != 0) {
- gf_log(this->name, GF_LOG_ERROR, "Memory accounting init"
- "failed");
- return ret;
+ ret = marker_key_set_ver(this, entry->dict);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ break;
}
- return ret;
+ ctx = mq_inode_ctx_new(entry->inode, this);
+ if (ctx == NULL)
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(entry->inode->gfid));
+ }
+
+out:
+ STACK_UNWIND_STRICT(readdirp, frame, op_ret, op_errno, entries, xdata);
+ return 0;
}
+int
+marker_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int op_ret, int op_errno, gf_dirent_t *entries,
+ dict_t *xdata)
+{
+ gf_dirent_t *entry = NULL;
+ marker_conf_t *priv = NULL;
+ marker_local_t *local = NULL;
+ loc_t loc = {
+ 0,
+ };
+ int ret = -1;
+ char *resolvedpath = NULL;
+ quota_inode_ctx_t *ctx = NULL;
+
+ if (op_ret <= 0)
+ goto unwind;
+
+ priv = this->private;
+ local = frame->local;
+
+ if (!(priv->feature_enabled & GF_QUOTA) || (local == NULL)) {
+ goto unwind;
+ }
+
+ list_for_each_entry(entry, &entries->list, list)
+ {
+ if ((strcmp(entry->d_name, ".") == 0) ||
+ (strcmp(entry->d_name, "..") == 0) || entry->inode == NULL)
+ continue;
+
+ loc.parent = inode_ref(local->loc.inode);
+ loc.inode = inode_ref(entry->inode);
+ ret = inode_path(loc.parent, entry->d_name, &resolvedpath);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to get the "
+ "path for the entry %s",
+ entry->d_name);
+ loc_wipe(&loc);
+ continue;
+ }
+
+ loc.path = resolvedpath;
+ resolvedpath = NULL;
+
+ ctx = mq_inode_ctx_new(loc.inode, this);
+ if (ctx == NULL)
+ gf_log(this->name, GF_LOG_WARNING,
+ "mq_inode_ctx_new "
+ "failed for %s",
+ uuid_utoa(loc.inode->gfid));
+
+ mq_xattr_state(this, &loc, entry->dict, &entry->d_stat);
+ loc_wipe(&loc);
+
+ ret = marker_key_set_ver(this, entry->dict);
+ if (ret < 0) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto unwind;
+ }
+ }
+
+unwind:
+ MARKER_STACK_UNWIND(readdirp, frame, op_ret, op_errno, entries, xdata);
+
+ return 0;
+}
+
+int
+marker_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *dict)
+{
+ marker_conf_t *priv = NULL;
+ loc_t loc = {
+ 0,
+ };
+ marker_local_t *local = NULL;
+ int ret = -1;
+
+ priv = this->private;
+
+ dict = dict ? dict_ref(dict) : dict_new();
+ if (!dict)
+ goto unwind;
+
+ ret = marker_key_replace_with_ver(this, dict);
+ if (ret < 0)
+ goto unwind;
+
+ if (dict_get(dict, GET_ANCESTRY_DENTRY_KEY)) {
+ STACK_WIND(frame, marker_build_ancestry_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, offset, dict);
+ } else {
+ if (priv->feature_enabled & GF_QUOTA) {
+ local = mem_get0(this->local_pool);
+
+ MARKER_INIT_LOCAL(frame, local);
+
+ loc.parent = local->loc.inode = inode_ref(fd->inode);
+
+ mq_req_xattr(this, &loc, dict, NULL, NULL);
+ }
+
+ STACK_WIND(frame, marker_readdirp_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, offset, dict);
+ }
+
+ dict_unref(dict);
+ return 0;
+unwind:
+ MARKER_STACK_UNWIND(readdirp, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+}
int32_t
-init_xtime_priv (xlator_t *this, dict_t *options)
+mem_acct_init(xlator_t *this)
{
- data_t *data = NULL;
- int32_t ret = -1;
- marker_conf_t *priv = NULL;
+ int ret = -1;
- GF_VALIDATE_OR_GOTO ("marker", this, out);
- GF_VALIDATE_OR_GOTO (this->name, options, out);
- GF_VALIDATE_OR_GOTO (this->name, this->private, out);
+ if (!this)
+ return ret;
- priv = this->private;
+ ret = xlator_mem_acct_init(this, gf_marker_mt_end + 1);
- if((data = dict_get (options, VOLUME_UUID)) != NULL) {
- priv->volume_uuid = data->data;
+ if (ret != 0) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "Memory accounting init"
+ " failed");
+ return ret;
+ }
- ret = uuid_parse (priv->volume_uuid, priv->volume_uuid_bin);
- if (ret == -1) {
- gf_log (this->name, GF_LOG_ERROR, "invalid volume uuid %s", priv->volume_uuid);
- goto out;
- }
+ return ret;
+}
- ret = gf_asprintf (& (priv->marker_xattr), "%s.%s.%s",
- MARKER_XATTR_PREFIX, priv->volume_uuid, XTIME);
+int32_t
+init_xtime_priv(xlator_t *this, dict_t *options)
+{
+ int32_t ret = -1;
+ marker_conf_t *priv = NULL;
+ char *tmp_opt = NULL;
- if (ret == -1){
- priv->marker_xattr = NULL;
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, options, out);
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to allocate memory");
- goto out;
- }
+ priv = this->private;
- gf_log (this->name, GF_LOG_DEBUG,
- "the volume-uuid = %s", priv->volume_uuid);
- } else {
- priv->volume_uuid = NULL;
+ ret = dict_get_str(options, "volume-uuid", &tmp_opt);
- gf_log (this->name, GF_LOG_ERROR,
- "please specify the volume-uuid"
- "in the translator options");
+ if (ret) {
+ priv->volume_uuid = NULL;
+ tmp_opt = "";
- return -1;
- }
+ gf_log(this->name, GF_LOG_ERROR,
+ "please specify the volume-uuid"
+ "in the translator options");
- if ((data = dict_get (options, TIMESTAMP_FILE)) != NULL) {
- priv->timestamp_file = data->data;
+ return -1;
+ }
+ gf_asprintf(&priv->volume_uuid, "%s", tmp_opt);
- gf_log (this->name, GF_LOG_DEBUG,
- "the timestamp-file is = %s",
- priv->timestamp_file);
+ ret = gf_uuid_parse(priv->volume_uuid, priv->volume_uuid_bin);
- } else {
- priv->timestamp_file = NULL;
+ if (ret == -1) {
+ gf_log(this->name, GF_LOG_ERROR, "invalid volume uuid %s",
+ priv->volume_uuid);
+ goto out;
+ }
- gf_log (this->name, GF_LOG_ERROR,
- "please specify the timestamp-file"
- "in the translator options");
+ ret = gf_asprintf(&(priv->marker_xattr), "%s.%s.%s", MARKER_XATTR_PREFIX,
+ priv->volume_uuid, XTIME);
- goto out;
- }
+ if (ret == -1) {
+ priv->marker_xattr = NULL;
+ goto out;
+ }
- ret = 0;
+ gf_log(this->name, GF_LOG_DEBUG, "volume-uuid = %s", priv->volume_uuid);
+
+ ret = dict_get_str(options, "timestamp-file", &tmp_opt);
+ if (ret) {
+ priv->timestamp_file = NULL;
+ tmp_opt = "";
+
+ gf_log(this->name, GF_LOG_ERROR,
+ "please specify the timestamp-file"
+ "in the translator options");
+
+ goto out;
+ }
+
+ ret = gf_asprintf(&priv->timestamp_file, "%s", tmp_opt);
+ if (ret == -1) {
+ priv->timestamp_file = NULL;
+ goto out;
+ }
+
+ gf_log(this->name, GF_LOG_DEBUG, "the timestamp-file is = %s",
+ priv->timestamp_file);
+
+ ret = 0;
out:
- return ret;
+ return ret;
}
void
-marker_xtime_priv_cleanup (xlator_t *this)
+marker_xtime_priv_cleanup(xlator_t *this)
{
- marker_conf_t *priv = NULL;
+ marker_conf_t *priv = NULL;
- GF_VALIDATE_OR_GOTO ("marker", this, out);
+ GF_VALIDATE_OR_GOTO("marker", this, out);
- priv = (marker_conf_t *) this->private;
+ priv = (marker_conf_t *)this->private;
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
- if (priv->volume_uuid != NULL)
- GF_FREE (priv->volume_uuid);
+ GF_FREE(priv->volume_uuid);
- if (priv->timestamp_file != NULL)
- GF_FREE (priv->timestamp_file);
+ GF_FREE(priv->timestamp_file);
- if (priv->marker_xattr != NULL)
- GF_FREE (priv->marker_xattr);
+ GF_FREE(priv->marker_xattr);
out:
- return;
+ return;
}
void
-marker_priv_cleanup (xlator_t *this)
+marker_priv_cleanup(xlator_t *this)
{
- marker_conf_t *priv = NULL;
+ marker_conf_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO("marker", this, out);
+
+ priv = (marker_conf_t *)this->private;
- GF_VALIDATE_OR_GOTO ("marker", this, out);
+ GF_VALIDATE_OR_GOTO(this->name, priv, out);
- priv = (marker_conf_t *) this->private;
+ marker_xtime_priv_cleanup(this);
- GF_VALIDATE_OR_GOTO (this->name, priv, out);
+ LOCK_DESTROY(&priv->lock);
- marker_xtime_priv_cleanup (this);
+ GF_FREE(priv);
- LOCK_DESTROY (&priv->lock);
+ if (this->local_pool) {
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
- GF_FREE (priv);
out:
- return;
+ return;
}
int32_t
-reconfigure (xlator_t *this, dict_t *options)
-{
- int32_t ret = -1;
- data_t *data = NULL;
- gf_boolean_t flag = _gf_false;
- marker_conf_t *priv = NULL;
-
- GF_ASSERT (this);
- GF_ASSERT (this->private);
-
- priv = this->private;
-
- priv->feature_enabled = 0;
-
- GF_VALIDATE_OR_GOTO (this->name, options, out);
-
- data = dict_get (options, "quota");
- if (data) {
- ret = gf_string2boolean (data->data, &flag);
- if (ret == 0 && flag == _gf_true) {
- ret = init_quota_priv (this);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "failed to initialize quota private");
- } else {
- priv->feature_enabled |= GF_QUOTA;
- }
- }
- }
-
- data = dict_get (options, "xtime");
- if (data) {
- ret = gf_string2boolean (data->data, &flag);
- if (ret == 0 && flag == _gf_true) {
- marker_xtime_priv_cleanup (this);
-
- ret = init_xtime_priv (this, options);
- if (ret < 0) {
- gf_log (this->name, GF_LOG_WARNING,
- "failed to initialize xtime private, "
- "xtime updation will fail");
- } else {
- priv->feature_enabled |= GF_XTIME;
- }
- }
+reconfigure(xlator_t *this, dict_t *options)
+{
+ int32_t ret = 0;
+ data_t *data = NULL;
+ gf_boolean_t flag = _gf_false;
+ marker_conf_t *priv = NULL;
+ int32_t version = 0;
+
+ GF_ASSERT(this);
+ GF_ASSERT(this->private);
+
+ priv = this->private;
+
+ priv->feature_enabled = 0;
+
+ GF_VALIDATE_OR_GOTO(this->name, options, out);
+
+ data = dict_get(options, "quota");
+ if (data) {
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag == _gf_true)
+ priv->feature_enabled |= GF_QUOTA;
+ }
+
+ data = dict_get(options, "inode-quota");
+ if (data) {
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag == _gf_true)
+ priv->feature_enabled |= GF_INODE_QUOTA;
+ }
+
+ data = dict_get(options, "quota-version");
+ if (data)
+ ret = gf_string2int32(data->data, &version);
+
+ if (priv->feature_enabled) {
+ if (version >= 0)
+ priv->version = version;
+ else
+ gf_log(this->name, GF_LOG_ERROR,
+ "Invalid quota "
+ "version %d",
+ priv->version);
+ }
+
+ data = dict_get(options, "xtime");
+ if (data) {
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag == _gf_true) {
+ marker_xtime_priv_cleanup(this);
+
+ ret = init_xtime_priv(this, options);
+ if (ret < 0) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "failed to initialize xtime private, "
+ "xtime updation will fail");
+ } else {
+ priv->feature_enabled |= GF_XTIME;
+ data = dict_get(options, "gsync-force-xtime");
+ if (!data)
+ goto out;
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag)
+ priv->feature_enabled |= GF_XTIME_GSYNC_FORCE;
+ }
}
+ }
out:
- return 0;
+ return ret;
}
-
int32_t
-init (xlator_t *this)
-{
- dict_t *options = NULL;
- data_t *data = NULL;
- int32_t ret = 0;
- gf_boolean_t flag = _gf_false;
- marker_conf_t *priv = NULL;
-
- if (!this->children) {
- gf_log (this->name, GF_LOG_ERROR,
- "marker translator needs subvolume defined.");
- return -1;
- }
+init(xlator_t *this)
+{
+ dict_t *options = NULL;
+ data_t *data = NULL;
+ int32_t ret = 0;
+ gf_boolean_t flag = _gf_false;
+ marker_conf_t *priv = NULL;
+
+ if (!this->children) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "marker translator needs subvolume defined.");
+ return -1;
+ }
- if (!this->parents) {
- gf_log (this->name, GF_LOG_WARNING,
- "Volume is dangling.");
- return -1;
- }
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_WARNING, "Volume is dangling.");
+ return -1;
+ }
- options = this->options;
+ options = this->options;
- ALLOCATE_OR_GOTO (this->private, marker_conf_t, err);
+ ALLOCATE_OR_GOTO(this->private, marker_conf_t, err);
- priv = this->private;
+ priv = this->private;
- priv->feature_enabled = 0;
+ priv->feature_enabled = 0;
+ priv->version = 0;
- LOCK_INIT (&priv->lock);
+ LOCK_INIT(&priv->lock);
- data = dict_get (options, "quota");
- if (data) {
- ret = gf_string2boolean (data->data, &flag);
- if (ret == 0 && flag == _gf_true) {
- ret = init_quota_priv (this);
- if (ret < 0)
- goto err;
+ data = dict_get(options, "quota");
+ if (data) {
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag == _gf_true)
+ priv->feature_enabled |= GF_QUOTA;
+ }
- priv->feature_enabled |= GF_QUOTA;
- }
- }
+ data = dict_get(options, "inode-quota");
+ if (data) {
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag == _gf_true)
+ priv->feature_enabled |= GF_INODE_QUOTA;
+ }
+
+ data = dict_get(options, "quota-version");
+ if (data)
+ ret = gf_string2int32(data->data, &priv->version);
- data = dict_get (options, "xtime");
- if (data) {
- ret = gf_string2boolean (data->data, &flag);
- if (ret == 0 && flag == _gf_true) {
- ret = init_xtime_priv (this, options);
- if (ret < 0)
- goto err;
+ if ((ret == 0) && priv->feature_enabled && priv->version < 0) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid quota version %d",
+ priv->version);
+ goto err;
+ }
+
+ data = dict_get(options, "xtime");
+ if (data) {
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag == _gf_true) {
+ ret = init_xtime_priv(this, options);
+ if (ret < 0)
+ goto err;
- priv->feature_enabled |= GF_XTIME;
- }
+ priv->feature_enabled |= GF_XTIME;
+ data = dict_get(options, "gsync-force-xtime");
+ if (!data)
+ goto cont;
+ ret = gf_string2boolean(data->data, &flag);
+ if (ret == 0 && flag)
+ priv->feature_enabled |= GF_XTIME_GSYNC_FORCE;
}
+ }
- return 0;
+cont:
+ this->local_pool = mem_pool_new(marker_local_t, 128);
+ if (!this->local_pool) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "failed to create local_t's memory pool");
+ goto err;
+ }
+
+ return 0;
err:
- marker_priv_cleanup (this);
+ marker_priv_cleanup(this);
- return -1;
+ return -1;
}
int32_t
-marker_forget (xlator_t *this, inode_t *inode)
+marker_forget(xlator_t *this, inode_t *inode)
{
- marker_inode_ctx_t *ctx = NULL;
- uint64_t value = 0;
+ marker_inode_ctx_t *ctx = NULL;
+ uint64_t value = 0;
- if (inode_ctx_del (inode, this, &value) != 0)
- goto out;
+ if (inode_ctx_del(inode, this, &value) != 0)
+ goto out;
- ctx = (marker_inode_ctx_t *)(unsigned long)value;
- if (ctx == NULL) {
- goto out;
- }
+ ctx = (marker_inode_ctx_t *)(unsigned long)value;
+ if (ctx == NULL) {
+ goto out;
+ }
- quota_forget (this, ctx->quota_ctx);
+ mq_forget(this, ctx->quota_ctx);
- GF_FREE (ctx);
+ GF_FREE(ctx);
out:
- return 0;
+ return 0;
}
void
-fini (xlator_t *this)
+fini(xlator_t *this)
{
- marker_priv_cleanup (this);
+ marker_priv_cleanup(this);
}
struct xlator_fops fops = {
- .lookup = marker_lookup,
- .create = marker_create,
- .mkdir = marker_mkdir,
- .writev = marker_writev,
- .truncate = marker_truncate,
- .ftruncate = marker_ftruncate,
- .symlink = marker_symlink,
- .link = marker_link,
- .unlink = marker_unlink,
- .rmdir = marker_rmdir,
- .rename = marker_rename,
- .mknod = marker_mknod,
- .setxattr = marker_setxattr,
- .fsetxattr = marker_fsetxattr,
- .setattr = marker_setattr,
- .fsetattr = marker_fsetattr,
- .removexattr = marker_removexattr,
- .getxattr = marker_getxattr
+ .lookup = marker_lookup,
+ .create = marker_create,
+ .mkdir = marker_mkdir,
+ .writev = marker_writev,
+ .truncate = marker_truncate,
+ .ftruncate = marker_ftruncate,
+ .symlink = marker_symlink,
+ .link = marker_link,
+ .unlink = marker_unlink,
+ .rmdir = marker_rmdir,
+ .rename = marker_rename,
+ .mknod = marker_mknod,
+ .setxattr = marker_setxattr,
+ .fsetxattr = marker_fsetxattr,
+ .setattr = marker_setattr,
+ .fsetattr = marker_fsetattr,
+ .removexattr = marker_removexattr,
+ .getxattr = marker_getxattr,
+ .readdirp = marker_readdirp,
+ .fallocate = marker_fallocate,
+ .discard = marker_discard,
+ .zerofill = marker_zerofill,
};
-struct xlator_cbks cbks = {
- .forget = marker_forget
-};
+struct xlator_cbks cbks = {.forget = marker_forget};
struct volume_options options[] = {
- {.key = {"volume-uuid"}},
- {.key = {"timestamp-file"}},
- {.key = {"quota"}},
- {.key = {"xtime"}},
- {.key = {NULL}}
+ {.key = {"volume-uuid"}, .default_value = "{{ volume.id }}"},
+ {.key = {"timestamp-file"}},
+ {
+ .key = {"quota"},
+ .op_version = {1},
+ .flags = OPT_FLAG_NONE,
+ .tags = {},
+ },
+ {
+ .key = {"inode-quota"},
+ .op_version = {1},
+ .flags = OPT_FLAG_NONE,
+ .tags = {},
+ },
+ {
+ .key = {"xtime"},
+ .op_version = {1},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_FORCE,
+ .tags = {},
+ },
+ {
+ .key = {"gsync-force-xtime"},
+ .op_version = {2},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_FORCE,
+ .tags = {},
+ },
+ {
+ .key = {"quota-version"},
+ .flags = OPT_FLAG_NONE,
+ },
+ {.key = {NULL}}};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .reconfigure = reconfigure,
+ .mem_acct_init = mem_acct_init,
+ .op_version = {1}, /* Present from the initial version */
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "marker",
+ .category = GF_MAINTAINED,
};
diff --git a/xlators/features/marker/src/marker.h b/xlators/features/marker/src/marker.h
index ea1f5cc0a99..4821094c14b 100644
--- a/xlators/features/marker/src/marker.h
+++ b/xlators/features/marker/src/marker.h
@@ -1,95 +1,148 @@
-/*Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
+/*
+ Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#ifndef _MARKER_H
#define _MARKER_H
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
#include "marker-quota.h"
-#include "xlator.h"
-#include "defaults.h"
-#include "uuid.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/compat-uuid.h>
+#include <glusterfs/call-stub.h>
#define MARKER_XATTR_PREFIX "trusted.glusterfs"
-#define XTIME "xtime"
-#define VOLUME_MARK "volume-mark"
-#define VOLUME_UUID "volume-uuid"
-#define TIMESTAMP_FILE "timestamp-file"
+#define XTIME "xtime"
+#define VOLUME_MARK "volume-mark"
+#define VOLUME_UUID "volume-uuid"
+#define TIMESTAMP_FILE "timestamp-file"
enum {
- GF_QUOTA=1,
- GF_XTIME=2
+ GF_QUOTA = 1,
+ GF_XTIME = 2,
+ GF_XTIME_GSYNC_FORCE = 4,
+ GF_INODE_QUOTA = 8,
};
/*initialize the local variable*/
-#define MARKER_INIT_LOCAL(_frame,_local) do { \
- _frame->local = _local; \
- _local->pid = _frame->root->pid; \
- memset (&_local->loc, 0, sizeof (loc_t)); \
- _local->ref = 1; \
- LOCK_INIT (&_local->lock); \
- _local->oplocal = NULL; \
- } while (0)
+#define MARKER_INIT_LOCAL(_frame, _local) \
+ do { \
+ _frame->local = _local; \
+ _local->pid = _frame->root->pid; \
+ memset(&_local->loc, 0, sizeof(loc_t)); \
+ _local->ref = 1; \
+ _local->uid = -1; \
+ _local->gid = -1; \
+ LOCK_INIT(&_local->lock); \
+ _local->oplocal = NULL; \
+ } while (0)
/* try alloc and if it fails, goto label */
-#define ALLOCATE_OR_GOTO(var, type, label) do { \
- var = GF_CALLOC (sizeof (type), 1, \
- gf_marker_mt_##type); \
- if (!var) { \
- gf_log (this->name, GF_LOG_ERROR, \
- "out of memory :("); \
- goto label; \
- } \
- } while (0)
-
-struct marker_local{
- uint32_t timebuf[2];
- pid_t pid;
- loc_t loc;
- int32_t ref;
- int32_t ia_nlink;
- gf_lock_t lock;
- mode_t mode;
- struct marker_local *oplocal;
+#define ALLOCATE_OR_GOTO(var, type, label) \
+ do { \
+ var = GF_CALLOC(sizeof(type), 1, gf_marker_mt_##type); \
+ if (!var) { \
+ gf_log(this->name, GF_LOG_ERROR, "out of memory :("); \
+ goto label; \
+ } \
+ } while (0)
+
+#define _MARKER_SET_UID_GID(dest, src) \
+ do { \
+ if (src->uid != -1 && src->gid != -1) { \
+ dest->uid = src->uid; \
+ dest->gid = src->gid; \
+ } \
+ } while (0)
+
+#define MARKER_SET_UID_GID(frame, dest, src) \
+ do { \
+ _MARKER_SET_UID_GID(dest, src); \
+ frame->root->uid = 0; \
+ frame->root->gid = 0; \
+ frame->cookie = (void *)_GF_UID_GID_CHANGED; \
+ } while (0)
+
+#define MARKER_RESET_UID_GID(frame, dest, src) \
+ do { \
+ _MARKER_SET_UID_GID(dest, src); \
+ frame->cookie = NULL; \
+ } while (0)
+
+#define MARKER_STACK_UNWIND(fop, frame, params...) \
+ do { \
+ quota_local_t *_local = NULL; \
+ if (frame) { \
+ _local = frame->local; \
+ frame->local = NULL; \
+ } \
+ STACK_UNWIND_STRICT(fop, frame, params); \
+ if (_local) \
+ marker_local_unref(_local); \
+ } while (0)
+
+struct marker_local {
+ uint32_t timebuf[2];
+ pid_t pid;
+ loc_t loc;
+ loc_t parent_loc;
+ uid_t uid;
+ gid_t gid;
+ int32_t ref;
+ uint32_t ia_nlink;
+ struct iatt buf;
+ gf_lock_t lock;
+ mode_t mode;
+ int32_t err;
+ call_stub_t *stub;
+ call_frame_t *lk_frame;
+ quota_meta_t contribution;
+ struct marker_local *oplocal;
+
+ /* marker quota specific */
+ int64_t delta;
+ int64_t d_off;
+ int64_t sum;
+ int64_t size;
+ int32_t hl_count;
+ int32_t dentry_child_count;
+
+ fd_t *fd;
+ call_frame_t *frame;
+
+ quota_inode_ctx_t *ctx;
+ inode_contribution_t *contri;
+
+ int xflag;
+ dict_t *xdata;
+ gf_boolean_t skip_txn;
};
typedef struct marker_local marker_local_t;
+#define quota_local_t marker_local_t
+
struct marker_inode_ctx {
- struct quota_inode_ctx *quota_ctx;
+ struct quota_inode_ctx *quota_ctx;
};
typedef struct marker_inode_ctx marker_inode_ctx_t;
-struct marker_conf{
- char feature_enabled;
- char *size_key;
- char *dirty_key;
- char *volume_uuid;
- uuid_t volume_uuid_bin;
- char *timestamp_file;
- char *marker_xattr;
- uint64_t quota_lk_owner;
- gf_lock_t lock;
+struct marker_conf {
+ char feature_enabled;
+ char *size_key;
+ char *dirty_key;
+ char *volume_uuid;
+ uuid_t volume_uuid_bin;
+ char *timestamp_file;
+ char *marker_xattr;
+ uint64_t quota_lk_owner;
+ gf_lock_t lock;
+ int32_t version;
};
typedef struct marker_conf marker_conf_t;
-int32_t k;
#endif
diff --git a/xlators/features/marker/utils/Makefile.am b/xlators/features/marker/utils/Makefile.am
deleted file mode 100644
index 84e926c00eb..00000000000
--- a/xlators/features/marker/utils/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-SUBDIRS = syncdaemon
-
-gsyncddir = $(libexecdir)/glusterfs
-
-gsyncd_SCRIPTS = gsyncd
-
-CLEANFILES =
diff --git a/xlators/features/marker/utils/gsyncd.in b/xlators/features/marker/utils/gsyncd.in
deleted file mode 100755
index a7af8c0b0b4..00000000000
--- a/xlators/features/marker/utils/gsyncd.in
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/bin/sh
-
-prefix="@prefix@"
-exec_prefix="@exec_prefix@"
-libexecdir=`eval echo "@libexecdir@"`
-sbindir=`eval echo "@sbindir@"`
-
-gluster="$sbindir"/gluster
-
-# glusterd service autodetection
-
-config_wanted=1
-if [ "$_GLUSTERD_CALLED_" = 1 ]; then
- # OK, we know glusterd called us, no need to look for further config
- config_wanted=0
- # ... altough this conclusion should not inherit to our children
- unset _GLUSTERD_CALLED_
-else
- # look for a -c option -- if present, we are already configured.
-
- for a in "$@"; do
- # -c found, see if it has an argument
- if [ "$one_more_arg" = 1 ]; then
- if echo "$a" | grep -qv ^-; then
- config_wanted=0
- break
- fi
- one_more_arg=0
- fi
-
- if [ "$a" = -c ] || [ "$a" = --config-file ]; then
- one_more_arg=1
- continue
- fi
-
- if echo $a | grep -qE '^(-c.|--config-file=)'; then
- config_wanted=0;
- break
- fi
- done
-
-fi
-
-if [ $config_wanted = 1 ]; then
- wd="`${gluster} system:: getwd`"
- if [ $? -eq 0 ]; then
- config_file="$wd/geo-replication/gsyncd.conf"
- fi
-fi
-
-if [ -z "$config_file" ]; then
- exec @PYTHON@ "$libexecdir"/glusterfs/python/syncdaemon/gsyncd.py "$@"
-else
- exec @PYTHON@ "$libexecdir"/glusterfs/python/syncdaemon/gsyncd.py -c "$config_file" "$@"
-fi
diff --git a/xlators/features/marker/utils/syncdaemon/Makefile.am b/xlators/features/marker/utils/syncdaemon/Makefile.am
deleted file mode 100644
index ef2dc9aea2c..00000000000
--- a/xlators/features/marker/utils/syncdaemon/Makefile.am
+++ /dev/null
@@ -1,5 +0,0 @@
-syncdaemondir = $(libexecdir)/glusterfs/python/syncdaemon
-
-syncdaemon_PYTHON = gconf.py gsyncd.py __init__.py master.py README.md repce.py resource.py configinterface.py syncdutils.py monitor.py libcxattr.py
-
-CLEANFILES =
diff --git a/xlators/features/marker/utils/syncdaemon/README.md b/xlators/features/marker/utils/syncdaemon/README.md
deleted file mode 100644
index d45006932d1..00000000000
--- a/xlators/features/marker/utils/syncdaemon/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-gsycnd, the Gluster Syncdaemon
-==============================
-
-REQUIREMENTS
-------------
-
-_gsyncd_ is a program which can operate either in _master_ or in _slave_ mode.
-Requirements are categorized according to this.
-
-* supported OS is GNU/Linux
-* Python >= 2.5, or 2.4 with Ctypes (see below) (both)
-* OpenSSH >= 4.0 (master) / SSH2 compliant sshd (eg. openssh) (slave)
-* rsync (both)
-* glusterfs with marker support (master); glusterfs (optional on slave)
-* FUSE; for supported versions consult glusterfs
-
-INSTALLATION
-------------
-
-As of now, the supported way of operation is running from the source directory.
-
-If you use Python 2.4.x, you need to install the [Ctypes module](http://python.net/crew/theller/ctypes/).
-
-CONFIGURATION
--------------
-
-gsyncd tunables are a subset of the long command-line options; for listing them,
-type
-
- gsyncd.py --help
-
-and see the long options up to "--config-file". (The leading double dash should be omitted;
-interim underscores and dashes are interchangeable.) The set of options bear some resemblance
-to those of glusterfs and rsync.
-
-The config file format matches the following syntax:
-
- <option1>: <value1>
- <option2>: <value2>
- # comment
-
-By default (unless specified by the option `-c`), gsyncd looks for config file at _conf/gsyncd.conf_
-in the source tree.
-
-USAGE
------
-
-gsyncd is a utilitly for continous mirroring, ie. it mirrors master to slave incrementally.
-Assume we have a gluster volume _pop_ at localhost. We try to set up the following mirrors
-for it with gysncd:
-
-1. _/data/mirror_
-2. local gluster volume _yow_
-3. _/data/far_mirror_ at example.com
-4. gluster volume _moz_ at example.com
-
-The respective gsyncd invocations are (demoing some syntax sugaring):
-
-1.
-
- gsyncd.py gluster://localhost:pop file:///data/mirror
-
- or short form
-
- gsyncd.py :pop /data/mirror
-
-2. `gsyncd :pop :yow`
-3.
-
- gsyncd.py :pop ssh://example.com:/data/far_mirror
-
- or short form
-
- gsyncd.py :pop example.com:/data/far_mirror
-
-4. `gsyncd.py :pop example.com::moz`
-
-gsyncd has to be available on both sides; it's location on the remote side has to be specified
-via the "--remote-gsyncd" option (or "remote-gsyncd" config file parameter). (This option can also be
-used for setting options on the remote side, although the suggested mode of operation is to
-set parameters like log file / pid file in the configuration file.)
diff --git a/xlators/features/marker/utils/syncdaemon/__init__.py b/xlators/features/marker/utils/syncdaemon/__init__.py
deleted file mode 100644
index e69de29bb2d..00000000000
--- a/xlators/features/marker/utils/syncdaemon/__init__.py
+++ /dev/null
diff --git a/xlators/features/marker/utils/syncdaemon/configinterface.py b/xlators/features/marker/utils/syncdaemon/configinterface.py
deleted file mode 100644
index a170b2236cb..00000000000
--- a/xlators/features/marker/utils/syncdaemon/configinterface.py
+++ /dev/null
@@ -1,185 +0,0 @@
-try:
- import ConfigParser
-except ImportError:
- # py 3
- import configparser as ConfigParser
-import re
-from string import Template
-
-from syncdutils import escape, unescape, norm, update_file
-
-SECT_ORD = '__section_order__'
-SECT_META = '__meta__'
-config_version = 2.0
-
-re_type = type(re.compile(''))
-
-
-class MultiDict(object):
-
- def __init__(self, *dd):
- self.dicts = dd
-
- def __getitem__(self, key):
- val = None
- for d in self.dicts:
- if d.get(key):
- val = d[key]
- if not val:
- raise KeyError(key)
- return val
-
-
-class GConffile(object):
-
- def _normconfig(self):
- for n, s in self.config._sections.items():
- if n.find('__') == 0:
- continue
- s2 = type(s)()
- for k, v in s.items():
- if k.find('__') != 0:
- k = norm(k)
- s2[k] = v
- self.config._sections[n] = s2
-
- def __init__(self, path, peers, *dd):
- self.peers = peers
- self.path = path
- self.auxdicts = dd
- self.config = ConfigParser.RawConfigParser()
- self.config.read(path)
- self._normconfig()
-
- def section(self, rx=False):
- peers = self.peers
- if not peers:
- peers = ['.', '.']
- rx = True
- if rx:
- st = 'peersrx'
- else:
- st = 'peers'
- return ' '.join([st] + [escape(u) for u in peers])
-
- @staticmethod
- def parse_section(section):
- sl = section.split()
- st = sl.pop(0)
- sl = [unescape(u) for u in sl]
- if st == 'peersrx':
- sl = [re.compile(u) for u in sl]
- return sl
-
- def ord_sections(self):
- """Return an ordered list of sections.
-
- Ordering happens based on the auxiliary
- SECT_ORD section storing indices for each
- section added through the config API.
-
- To not to go corrupt in case of manually
- written config files, we take care to append
- also those sections which are not registered
- in SECT_ORD.
-
- Needed for python 2.{4,5} where ConfigParser
- cannot yet order sections/options internally.
- """
- so = {}
- if self.config.has_section(SECT_ORD):
- so = self.config._sections[SECT_ORD]
- so2 = {}
- for k, v in so.items():
- if k != '__name__':
- so2[k] = int(v)
- tv = 0
- if so2:
- tv = max(so2.values()) + 1
- ss = [s for s in self.config.sections() if s.find('__') != 0]
- for s in ss:
- if s in so.keys():
- continue
- so2[s] = tv
- tv += 1
- def scmp(x, y):
- return cmp(*(so2[s] for s in (x, y)))
- ss.sort(scmp)
- return ss
-
- def update_to(self, dct, allow_unresolved=False):
- if not self.peers:
- raise RuntimeError('no peers given, cannot select matching options')
- def update_from_sect(sect, mud):
- for k, v in self.config._sections[sect].items():
- if k == '__name__':
- continue
- if allow_unresolved:
- dct[k] = Template(v).safe_substitute(mud)
- else:
- dct[k] = Template(v).substitute(mud)
- for sect in self.ord_sections():
- sp = self.parse_section(sect)
- if isinstance(sp[0], re_type) and len(sp) == len(self.peers):
- match = True
- mad = {}
- for i in range(len(sp)):
- m = sp[i].search(self.peers[i])
- if not m:
- match = False
- break
- for j in range(len(m.groups())):
- mad['match%d_%d' % (i+1, j+1)] = m.groups()[j]
- if match:
- update_from_sect(sect, MultiDict(dct, mad, *self.auxdicts))
- if self.config.has_section(self.section()):
- update_from_sect(self.section(), MultiDict(dct, mad, *self.auxdicts))
-
- def get(self, opt=None):
- d = {}
- self.update_to(d, allow_unresolved = True)
- if opt:
- opt = norm(opt)
- v = d.get(opt)
- if v:
- print v
- else:
- for k, v in d.iteritems():
- if k == '__name__':
- continue
- print("%s: %s" % (k, v))
-
- def write(self, trfn, opt, *a, **kw):
- def mergeconf(f):
- self.config = ConfigParser.RawConfigParser()
- self.config.readfp(f)
- self._normconfig()
- if not self.config.has_section(SECT_META):
- self.config.add_section(SECT_META)
- self.config.set(SECT_META, 'version', config_version)
- return trfn(norm(opt), *a, **kw)
- def updateconf(f):
- self.config.write(f)
- update_file(self.path, updateconf, mergeconf)
-
- def _set(self, opt, val, rx=False):
- sect = self.section(rx)
- if not self.config.has_section(sect):
- self.config.add_section(sect)
- # regarding SECT_ORD, cf. ord_sections
- if not self.config.has_section(SECT_ORD):
- self.config.add_section(SECT_ORD)
- self.config.set(SECT_ORD, sect, len(self.config._sections[SECT_ORD]))
- self.config.set(sect, opt, val)
- return True
-
- def set(self, opt, *a, **kw):
- self.write(self._set, opt, *a, **kw)
-
- def _delete(self, opt, rx=False):
- sect = self.section(rx)
- if self.config.has_section(sect):
- return self.config.remove_option(sect, opt)
-
- def delete(self, opt, *a, **kw):
- self.write(self._delete, opt, *a, **kw)
diff --git a/xlators/features/marker/utils/syncdaemon/gconf.py b/xlators/features/marker/utils/syncdaemon/gconf.py
deleted file mode 100644
index 24165b6191a..00000000000
--- a/xlators/features/marker/utils/syncdaemon/gconf.py
+++ /dev/null
@@ -1,15 +0,0 @@
-import os
-
-class GConf(object):
- ssh_ctl_dir = None
- ssh_ctl_args = None
- cpid = None
- pid_file_owned = False
- permanent_handles = []
-
- @classmethod
- def setup_ssh_ctl(cls, ctld):
- cls.ssh_ctl_dir = ctld
- cls.ssh_ctl_args = ["-oControlMaster=auto", "-S", os.path.join(ctld, "gsycnd-ssh-%r@%h:%p")]
-
-gconf = GConf()
diff --git a/xlators/features/marker/utils/syncdaemon/gsyncd.py b/xlators/features/marker/utils/syncdaemon/gsyncd.py
deleted file mode 100644
index 193af9d5f37..00000000000
--- a/xlators/features/marker/utils/syncdaemon/gsyncd.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/env python
-
-import os
-import os.path
-import sys
-import time
-import logging
-import signal
-import select
-import optparse
-import fcntl
-from optparse import OptionParser, SUPPRESS_HELP
-from logging import Logger
-from errno import EEXIST, ENOENT
-
-from gconf import gconf
-from syncdutils import FreeObject, norm, grabpidfile, finalize, log_raise_exception
-from configinterface import GConffile
-import resource
-from monitor import monitor
-
-class GLogger(Logger):
-
- def makeRecord(self, name, level, *a):
- rv = Logger.makeRecord(self, name, level, *a)
- rv.nsecs = (rv.created - int(rv.created)) * 1000000
- fr = sys._getframe(4)
- callee = fr.f_locals.get('self')
- if callee:
- ctx = str(type(callee)).split("'")[1].split('.')[-1]
- else:
- ctx = '<top>'
- if not hasattr(rv, 'funcName'):
- rv.funcName = fr.f_code.co_name
- rv.lvlnam = logging.getLevelName(level)[0]
- rv.ctx = ctx
- return rv
-
- @classmethod
- def setup(cls, **kw):
- lbl = kw.get('label', "")
- if lbl:
- lbl = '(' + lbl + ')'
- lprm = {'datefmt': "%Y-%m-%d %H:%M:%S",
- 'format': "[%(asctime)s.%(nsecs)d] %(lvlnam)s [%(module)s" + lbl + ":%(lineno)s:%(funcName)s] %(ctx)s: %(message)s"}
- lprm.update(kw)
- lvl = kw.get('level', logging.INFO)
- lprm['level'] = lvl
- logging.root = cls("root", lvl)
- logging.setLoggerClass(cls)
- logging.getLogger().handlers = []
- logging.basicConfig(**lprm)
-
-
-def startup(**kw):
- if getattr(gconf, 'pid_file', None) and kw.get('go_daemon') != 'postconn':
- if not grabpidfile():
- sys.stderr.write("pidfile is taken, exiting.\n")
- sys.exit(2)
- gconf.pid_file_owned = True
-
- if kw.get('go_daemon') == 'should':
- x, y = os.pipe()
- gconf.cpid = os.fork()
- if gconf.cpid:
- os.close(x)
- sys.exit()
- os.close(y)
- os.setsid()
- dn = os.open(os.devnull, os.O_RDWR)
- for f in (sys.stdin, sys.stdout, sys.stderr):
- os.dup2(dn, f.fileno())
- if getattr(gconf, 'pid_file', None):
- if not grabpidfile(gconf.pid_file + '.tmp'):
- raise RuntimeError("cannot grap temporary pidfile")
- os.rename(gconf.pid_file + '.tmp', gconf.pid_file)
- # wait for parent to terminate
- # so we can start up with
- # no messing from the dirty
- # ol' bustard
- select.select((x,), (), ())
- os.close(x)
-
- lkw = {}
- if gconf.log_level:
- lkw['level'] = gconf.log_level
- if kw.get('log_file'):
- if kw['log_file'] in ('-', '/dev/stderr'):
- lkw['stream'] = sys.stderr
- elif kw['log_file'] == '/dev/stdout':
- lkw['stream'] = sys.stdout
- else:
- lkw['filename'] = kw['log_file']
- GLogger.setup(label=kw.get('label'), **lkw)
-
-def main():
- signal.signal(signal.SIGTERM, lambda *a: finalize(*a, **{'exval': 1}))
- GLogger.setup()
- excont = FreeObject(exval = 0)
- try:
- try:
- main_i()
- except:
- log_raise_exception(excont)
- finally:
- finalize(exval = excont.exval)
-
-def main_i():
- rconf = {'go_daemon': 'should'}
-
- def store_abs(opt, optstr, val, parser):
- if val and val != '-':
- val = os.path.abspath(val)
- setattr(parser.values, opt.dest, val)
- def store_local(opt, optstr, val, parser):
- rconf[opt.dest] = val
- def store_local_curry(val):
- return lambda o, oo, vx, p: store_local(o, oo, val, p)
- def store_local_obj(op, dmake):
- return lambda o, oo, vx, p: store_local(o, oo, FreeObject(op=op, **dmake(vx)), p)
-
- op = OptionParser(usage="%prog [options...] <master> <slave>", version="%prog 0.0.1")
- op.add_option('--gluster-command', metavar='CMD', default='glusterfs')
- op.add_option('--gluster-log-file', metavar='LOGF', default=os.devnull, type=str, action='callback', callback=store_abs)
- op.add_option('--gluster-log-level', metavar='LVL')
- op.add_option('-p', '--pid-file', metavar='PIDF', type=str, action='callback', callback=store_abs)
- op.add_option('-l', '--log-file', metavar='LOGF', type=str, action='callback', callback=store_abs)
- op.add_option('--state-file', metavar='STATF', type=str, action='callback', callback=store_abs)
- op.add_option('-L', '--log-level', metavar='LVL')
- op.add_option('-r', '--remote-gsyncd', metavar='CMD', default=os.path.abspath(sys.argv[0]))
- op.add_option('--volume-id', metavar='UUID')
- op.add_option('--session-owner', metavar='ID')
- op.add_option('-s', '--ssh-command', metavar='CMD', default='ssh')
- op.add_option('--rsync-command', metavar='CMD', default='rsync')
- op.add_option('--rsync-extra', metavar='ARGS', default='-sS', help=SUPPRESS_HELP)
- op.add_option('--timeout', metavar='SEC', type=int, default=120)
- op.add_option('--sync-jobs', metavar='N', type=int, default=3)
- op.add_option('--turns', metavar='N', type=int, default=0, help=SUPPRESS_HELP)
-
- op.add_option('-c', '--config-file', metavar='CONF', type=str, action='callback', callback=store_local)
- # duh. need to specify dest or value will be mapped to None :S
- op.add_option('--monitor', dest='monitor', action='callback', callback=store_local_curry(True))
- op.add_option('--feedback-fd', dest='feedback_fd', type=int, help=SUPPRESS_HELP, action='callback', callback=store_local)
- op.add_option('--listen', dest='listen', help=SUPPRESS_HELP, action='callback', callback=store_local_curry(True))
- op.add_option('-N', '--no-daemon', dest="go_daemon", action='callback', callback=store_local_curry('dont'))
- op.add_option('--debug', dest="go_daemon", action='callback', callback=lambda *a: (store_local_curry('dont')(*a),
- setattr(a[-1].values, 'log_file', '-'),
- setattr(a[-1].values, 'log_level', 'DEBUG'))),
-
- for a in ('check', 'get'):
- op.add_option('--config-' + a, metavar='OPT', type=str, dest='config', action='callback',
- callback=store_local_obj(a, lambda vx: {'opt': vx}))
- op.add_option('--config-get-all', dest='config', action='callback', callback=store_local_obj('get', lambda vx: {'opt': None}))
- for m in ('', '-rx'):
- # call this code 'Pythonic' eh?
- # have to define a one-shot local function to be able to inject (a value depending on the)
- # iteration variable into the inner lambda
- def conf_mod_opt_regex_variant(rx):
- op.add_option('--config-set' + m, metavar='OPT VAL', type=str, nargs=2, dest='config', action='callback',
- callback=store_local_obj('set', lambda vx: {'opt': vx[0], 'val': vx[1], 'rx': rx}))
- op.add_option('--config-del' + m, metavar='OPT', type=str, dest='config', action='callback',
- callback=store_local_obj('del', lambda vx: {'opt': vx, 'rx': rx}))
- conf_mod_opt_regex_variant(not not m)
-
- op.add_option('--canonicalize-url', dest='do_canon', action='callback', callback=store_local_curry('raw'))
- op.add_option('--canonicalize-escape-url', dest='do_canon', action='callback', callback=store_local_curry('escaped'))
-
- tunables = [ norm(o.get_opt_string()[2:]) for o in op.option_list if o.callback in (store_abs, None) and o.get_opt_string() not in ('--version', '--help') ]
-
- # precedence for sources of values: 1) commandline, 2) cfg file, 3) defaults
- # -- for this to work out we need to tell apart defaults from explicitly set
- # options... so churn out the defaults here and call the parser with virgin
- # values container.
- defaults = op.get_default_values()
- opts, args = op.parse_args(values=optparse.Values())
- confdata = rconf.get('config')
- if not (len(args) == 2 or \
- (len(args) == 1 and rconf.get('listen')) or \
- (len(args) <= 2 and confdata) or \
- rconf.get('do_canon')):
- sys.stderr.write("error: incorrect number of arguments\n\n")
- sys.stderr.write(op.get_usage() + "\n")
- sys.exit(1)
-
- if getattr(confdata, 'rx', None):
- # peers are regexen, don't try to parse them
- canon_peers = args
- namedict = {}
- else:
- rscs = [resource.parse_url(u) for u in args]
- dc = rconf.get('do_canon')
- if dc:
- for r in rscs:
- print(r.get_url(canonical=True, escaped=(dc=='escaped')))
- return
- local = remote = None
- if rscs:
- local = rscs[0]
- if len(rscs) > 1:
- remote = rscs[1]
- if not local.can_connect_to(remote):
- raise RuntimeError("%s cannot work with %s" % (local.path, remote and remote.path))
- pa = ([], [], [])
- urlprms = ({}, {'canonical': True}, {'canonical': True, 'escaped': True})
- for x in rscs:
- for i in range(len(pa)):
- pa[i].append(x.get_url(**urlprms[i]))
- peers, canon_peers, canon_esc_peers = pa
- # creating the namedict, a dict representing various ways of referring to / repreenting
- # peers to be fillable in config templates
- mods = (lambda x: x, lambda x: x[0].upper() + x[1:], lambda x: 'e' + x[0].upper() + x[1:])
- if remote:
- rmap = { local: ('local', 'master'), remote: ('remote', 'slave') }
- else:
- rmap = { local: ('local', 'slave') }
- namedict = {}
- for i in range(len(rscs)):
- x = rscs[i]
- for name in rmap[x]:
- for j in range(3):
- namedict[mods[j](name)] = pa[j][i]
- if x.scheme == 'gluster':
- namedict[name + 'vol'] = x.volume
- if not 'config_file' in rconf:
- rconf['config_file'] = os.path.join(os.path.dirname(sys.argv[0]), "conf/gsyncd.conf")
- gcnf = GConffile(rconf['config_file'], canon_peers, defaults.__dict__, opts.__dict__, namedict)
-
- if confdata:
- opt_ok = norm(confdata.opt) in tunables + [None]
- if confdata.op == 'check':
- if opt_ok:
- sys.exit(0)
- else:
- sys.exit(1)
- elif not opt_ok:
- raise RuntimeError("not a valid option: " + confdata.opt)
- if confdata.op == 'get':
- gcnf.get(confdata.opt)
- elif confdata.op == 'set':
- gcnf.set(confdata.opt, confdata.val, confdata.rx)
- elif confdata.op == 'del':
- gcnf.delete(confdata.opt, confdata.rx)
- return
-
- gconf.__dict__.update(defaults.__dict__)
- gcnf.update_to(gconf.__dict__)
- gconf.__dict__.update(opts.__dict__)
- gconf.configinterface = gcnf
-
- ffd = rconf.get('feedback_fd')
- if ffd:
- fcntl.fcntl(ffd, fcntl.F_SETFD, fcntl.FD_CLOEXEC)
-
- #normalize loglevel
- lvl0 = gconf.log_level
- if isinstance(lvl0, str):
- lvl1 = lvl0.upper()
- lvl2 = logging.getLevelName(lvl1)
- # I have _never_ _ever_ seen such an utterly braindead
- # error condition
- if lvl2 == "Level " + lvl1:
- raise RuntimeError('cannot recognize log level "%s"' % lvl0)
- gconf.log_level = lvl2
-
- go_daemon = rconf['go_daemon']
- be_monitor = rconf.get('monitor')
-
- if not be_monitor and isinstance(remote, resource.SSH) and \
- go_daemon == 'should':
- go_daemon = 'postconn'
- log_file = None
- else:
- log_file = gconf.log_file
- if be_monitor:
- label = 'monitor'
- elif remote:
- #master
- label = ''
- else:
- label = 'slave'
- startup(go_daemon=go_daemon, log_file=log_file, label=label)
-
- if be_monitor:
- return monitor()
-
- logging.info("syncing: %s" % " -> ".join(peers))
- if remote:
- go_daemon = remote.connect_remote(go_daemon=go_daemon)
- if go_daemon:
- startup(go_daemon=go_daemon, log_file=gconf.log_file)
- # complete remote connection in child
- remote.connect_remote(go_daemon='done')
- local.connect()
- if ffd:
- os.close(ffd)
- local.service_loop(*[r for r in [remote] if r])
-
- logging.info("exiting.")
-
-
-if __name__ == "__main__":
- main()
diff --git a/xlators/features/marker/utils/syncdaemon/libcxattr.py b/xlators/features/marker/utils/syncdaemon/libcxattr.py
deleted file mode 100644
index fdc016c47ce..00000000000
--- a/xlators/features/marker/utils/syncdaemon/libcxattr.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import os
-from ctypes import *
-from ctypes.util import find_library
-
-class Xattr(object):
-
- libc = CDLL(find_library("libc"))
-
- @classmethod
- def geterrno(cls):
- return c_int.in_dll(cls.libc, 'errno').value
-
- @classmethod
- def raise_oserr(cls):
- errn = cls.geterrno()
- raise OSError(errn, os.strerror(errn))
-
- @classmethod
- def _query_xattr(cls, path, siz, syscall, *a):
- if siz:
- buf = create_string_buffer('\0' * siz)
- else:
- buf = None
- ret = getattr(cls.libc, syscall)(*((path,) + a + (buf, siz)))
- if ret == -1:
- cls.raise_oserr()
- if siz:
- return buf.raw[:ret]
- else:
- return ret
-
- @classmethod
- def lgetxattr(cls, path, attr, siz=0):
- return cls._query_xattr( path, siz, 'lgetxattr', attr)
-
- @classmethod
- def llistxattr(cls, path, siz=0):
- ret = cls._query_xattr(path, siz, 'llistxattr')
- if isinstance(ret, str):
- ret = ret.split('\0')
- return ret
-
- @classmethod
- def lsetxattr(cls, path, attr, val):
- ret = cls.libc.lsetxattr(path, attr, val, len(val), 0)
- if ret == -1:
- cls.raise_oserr()
-
- @classmethod
- def lremovexattr(cls, path, attr):
- ret = cls.libc.lremovexattr(path, attr)
- if ret == -1:
- cls.raise_oserr()
-
- @classmethod
- def llistxattr_buf(cls, path):
- size = cls.llistxattr(path)
- if size == -1:
- cls.raise_oserr()
- if size == 0:
- return []
- return cls.llistxattr(path, size)
diff --git a/xlators/features/marker/utils/syncdaemon/master.py b/xlators/features/marker/utils/syncdaemon/master.py
deleted file mode 100644
index 35dc4ee06aa..00000000000
--- a/xlators/features/marker/utils/syncdaemon/master.py
+++ /dev/null
@@ -1,366 +0,0 @@
-import os
-import sys
-import time
-import stat
-import signal
-import logging
-import errno
-from errno import ENOENT, ENODATA
-from threading import currentThread, Condition, Lock
-
-from gconf import gconf
-from syncdutils import FreeObject, Thread
-
-URXTIME = (-1, 0)
-
-class GMaster(object):
-
- KFGN = 0
- KNAT = 1
-
- def get_sys_volinfo(self):
- fgn_vis, nat_vi = self.master.server.foreign_volume_infos(), \
- self.master.server.native_volume_info()
- fgn_vi = None
- if fgn_vis:
- if len(fgn_vis) > 1:
- raise RuntimeError("cannot work with multiple foreign masters")
- fgn_vi = fgn_vis[0]
- return fgn_vi, nat_vi
-
- @property
- def uuid(self):
- if self.volinfo:
- return self.volinfo['uuid']
-
- @property
- def volmark(self):
- if self.volinfo:
- return self.volinfo['volume_mark']
-
- @property
- def inter_master(self):
- return self.volinfo_state[self.KFGN] and True or False
-
- def xtime(self, path, *a, **opts):
- if a:
- rsc = a[0]
- else:
- rsc = self.master
- if not 'create' in opts:
- opts['create'] = (rsc == self.master and not self.inter_master)
- if not 'default_xtime' in opts:
- if rsc == self.master and self.inter_master:
- opts['default_xtime'] = ENODATA
- else:
- opts['default_xtime'] = URXTIME
- xt = rsc.server.xtime(path, self.uuid)
- if isinstance(xt, int) and xt != ENODATA:
- return xt
- invalid_xtime = (xt == ENODATA or xt < self.volmark)
- if invalid_xtime and opts['create']:
- t = time.time()
- sec = int(t)
- nsec = int((t - sec) * 1000000)
- xt = (sec, nsec)
- rsc.server.set_xtime(path, self.uuid, xt)
- if invalid_xtime:
- xt = opts['default_xtime']
- return xt
-
- def __init__(self, master, slave):
- self.master = master
- self.slave = slave
- self.jobtab = {}
- self.syncer = Syncer(slave)
- self.total_turns = int(gconf.turns)
- self.turns = 0
- self.start = None
- self.change_seen = None
- # the authorative (foreign, native) volinfo pair
- # which lets us deduce what to do when we refetch
- # the volinfos from system
- uuid_preset = getattr(gconf, 'volume_id', None)
- self.volinfo_state = (uuid_preset and {'uuid': uuid_preset}, None)
- # the actual volinfo we make use of
- self.volinfo = None
- self.terminate = False
-
- def crawl_loop(self):
- timo = int(gconf.timeout or 0)
- if timo > 0:
- def keep_alive():
- while True:
- gap = timo * 0.5
- # first grab a reference as self.volinfo
- # can be changed in main thread
- vi = self.volinfo
- if vi:
- # then have a private copy which we can mod
- vi = vi.copy()
- vi['timeout'] = int(time.time()) + timo
- else:
- # send keep-alives more frequently to
- # avoid a delay in announcing our volume info
- # to slave if it becomes established in the
- # meantime
- gap = min(10, gap)
- self.slave.server.keep_alive(vi)
- time.sleep(gap)
- t = Thread(target=keep_alive)
- t.start()
- while not self.terminate:
- self.crawl()
-
- def add_job(self, path, label, job, *a, **kw):
- if self.jobtab.get(path) == None:
- self.jobtab[path] = []
- self.jobtab[path].append((label, a, lambda : job(*a, **kw)))
-
- def add_failjob(self, path, label):
- logging.debug('salvaged: ' + label)
- self.add_job(path, label, lambda: False)
-
- def wait(self, path, *args):
- jobs = self.jobtab.pop(path, [])
- succeed = True
- for j in jobs:
- ret = j[-1]()
- if not ret:
- succeed = False
- if succeed:
- self.sendmark(path, *args)
- return succeed
-
- def sendmark(self, path, mark, adct=None):
- if adct:
- self.slave.server.setattr(path, adct)
- self.slave.server.set_xtime(path, self.uuid, mark)
-
- @staticmethod
- def volinfo_state_machine(volinfo_state, volinfo_sys):
- # store the value below "boxed" to emulate proper closures
- # (variables of the enclosing scope are available inner functions
- # provided they are no reassigned; mutation is OK).
- param = FreeObject(relax_mismatch = False, state_change = None, index=-1)
- def select_vi(vi0, vi):
- param.index += 1
- if vi and (not vi0 or vi0['uuid'] == vi['uuid']):
- if not vi0 and not param.relax_mismatch:
- param.state_change = param.index
- # valid new value found; for the rest, we are graceful about
- # uuid mismatch
- param.relax_mismatch = True
- return vi
- if vi0 and vi and vi0['uuid'] != vi['uuid'] and not param.relax_mismatch:
- # uuid mismatch for master candidate, bail out
- raise RuntimeError("aborting on uuid change from %s to %s" % \
- (vi0['uuid'], vi['uuid']))
- # fall back to old
- return vi0
- newstate = tuple(select_vi(*vip) for vip in zip(volinfo_state, volinfo_sys))
- srep = lambda vi: vi and vi['uuid'][0:8]
- logging.debug('(%s, %s) << (%s, %s) -> (%s, %s)' % \
- tuple(srep(vi) for vi in volinfo_state + volinfo_sys + newstate))
- return newstate, param.state_change
-
- def crawl(self, path='.', xtl=None):
- if path == '.':
- if self.start:
- logging.info("... done, took %.6f seconds" % (time.time() - self.start))
- time.sleep(1)
- self.start = time.time()
- volinfo_sys = self.get_sys_volinfo()
- self.volinfo_state, state_change = self.volinfo_state_machine(self.volinfo_state,
- volinfo_sys)
- if self.inter_master:
- self.volinfo = volinfo_sys[self.KFGN]
- else:
- self.volinfo = volinfo_sys[self.KNAT]
- if state_change == self.KFGN or (state_change == self.KNAT and not self.inter_master):
- logging.info('new master is %s', self.uuid)
- if state_change == self.KFGN:
- gconf.configinterface.set('volume_id', self.uuid)
- if self.volinfo:
- if self.volinfo['retval']:
- raise RuntimeError ("master is corrupt")
- logging.info("%s master with volume id %s ..." % \
- (self.inter_master and "intermediate" or "primary", self.uuid))
- else:
- if self.inter_master:
- logging.info("waiting for being synced from %s ..." % \
- self.volinfo_state[self.KFGN]['uuid'])
- else:
- logging.info("waiting for volume info ...")
- return
- logging.debug("entering " + path)
- if not xtl:
- xtl = self.xtime(path)
- if isinstance(xtl, int):
- self.add_failjob(path, 'no-local-node')
- return
- xtr0 = self.xtime(path, self.slave)
- if isinstance(xtr0, int):
- if xtr0 != ENOENT:
- self.slave.server.purge(path)
- try:
- self.slave.server.mkdir(path)
- except OSError:
- self.add_failjob(path, 'no-remote-node')
- return
- xtr = URXTIME
- else:
- xtr = xtr0
- if xtr > xtl:
- raise RuntimeError("timestamp corruption for " + path)
- if xtl == xtr:
- if path == '.' and self.total_turns and self.change_seen:
- self.turns += 1
- self.change_seen = False
- logging.info("finished turn #%s/%s" % (self.turns, self.total_turns))
- if self.turns == self.total_turns:
- logging.info("reached turn limit")
- self.terminate = True
- return
- if path == '.':
- self.change_seen = True
- try:
- dem = self.master.server.entries(path)
- except OSError:
- self.add_failjob(path, 'local-entries-fail')
- return
- try:
- des = self.slave.server.entries(path)
- except OSError:
- self.slave.server.purge(path)
- try:
- self.slave.server.mkdir(path)
- des = self.slave.server.entries(path)
- except OSError:
- self.add_failjob(path, 'remote-entries-fail')
- return
- dd = set(des) - set(dem)
- if dd:
- self.slave.server.purge(path, dd)
- chld = []
- for e in dem:
- e = os.path.join(path, e)
- xte = self.xtime(e)
- if isinstance(xte, int):
- logging.warn("irregular xtime for %s: %s" % (e, errno.errorcode[xte]))
- elif xte > xtr:
- chld.append((e, xte))
- def indulgently(e, fnc, blame=None):
- if not blame:
- blame = path
- try:
- return fnc(e)
- except (IOError, OSError):
- ex = sys.exc_info()[1]
- if ex.errno == ENOENT:
- logging.warn("salvaged ENOENT for" + e)
- self.add_failjob(blame, 'by-indulgently')
- return False
- else:
- raise
- for e, xte in chld:
- st = indulgently(e, lambda e: os.lstat(e))
- if st == False:
- continue
- mo = st.st_mode
- adct = {'own': (st.st_uid, st.st_gid)}
- if stat.S_ISLNK(mo):
- if indulgently(e, lambda e: self.slave.server.symlink(os.readlink(e), e)) == False:
- continue
- self.sendmark(e, xte, adct)
- elif stat.S_ISREG(mo):
- logging.debug("syncing %s ..." % e)
- pb = self.syncer.add(e)
- def regjob(e, xte, pb):
- if pb.wait():
- logging.debug("synced " + e)
- self.sendmark(e, xte)
- return True
- else:
- logging.error("failed to sync " + e)
- self.add_job(path, 'reg', regjob, e, xte, pb)
- elif stat.S_ISDIR(mo):
- adct['mode'] = mo
- if indulgently(e, lambda e: (self.add_job(path, 'cwait', self.wait, e, xte, adct),
- self.crawl(e, xte),
- True)[-1], blame=e) == False:
- continue
- else:
- # ignore fifos, sockets and special files
- pass
- if path == '.':
- self.wait(path, xtl)
-
-class BoxClosedErr(Exception):
- pass
-
-class PostBox(list):
-
- def __init__(self, *a):
- list.__init__(self, *a)
- self.lever = Condition()
- self.open = True
- self.done = False
-
- def wait(self):
- self.lever.acquire()
- if not self.done:
- self.lever.wait()
- self.lever.release()
- return self.result
-
- def wakeup(self, data):
- self.result = data
- self.lever.acquire()
- self.done = True
- self.lever.notifyAll()
- self.lever.release()
-
- def append(self, e):
- self.lever.acquire()
- if not self.open:
- raise BoxClosedErr
- list.append(self, e)
- self.lever.release()
-
- def close(self):
- self.lever.acquire()
- self.open = False
- self.lever.release()
-
-class Syncer(object):
-
- def __init__(self, slave):
- self.slave = slave
- self.lock = Lock()
- self.pb = PostBox()
- for i in range(int(gconf.sync_jobs)):
- t = Thread(target=self.syncjob)
- t.start()
-
- def syncjob(self):
- while True:
- pb = None
- while True:
- self.lock.acquire()
- if self.pb:
- pb, self.pb = self.pb, PostBox()
- self.lock.release()
- if pb:
- break
- time.sleep(0.5)
- pb.close()
- pb.wakeup(self.slave.rsync(pb))
-
- def add(self, e):
- while True:
- try:
- self.pb.append(e)
- return self.pb
- except BoxClosedErr:
- pass
diff --git a/xlators/features/marker/utils/syncdaemon/monitor.py b/xlators/features/marker/utils/syncdaemon/monitor.py
deleted file mode 100644
index 365e91435fd..00000000000
--- a/xlators/features/marker/utils/syncdaemon/monitor.py
+++ /dev/null
@@ -1,80 +0,0 @@
-import os
-import sys
-import time
-import logging
-import select
-from signal import SIGKILL
-from gconf import gconf
-from syncdutils import update_file
-
-class Monitor(object):
-
- def __init__(self):
- self.state = None
-
- def set_state(self, state):
- if state == self.state:
- return
- self.state = state
- logging.info('new state: %s' % state)
- if getattr(gconf, 'state_file', None):
- update_file(gconf.state_file, lambda f: f.write(state + '\n'))
-
- def monitor(self):
- argv = sys.argv[:]
- for o in ('-N', '--no-daemon', '--monitor'):
- while o in argv:
- argv.remove(o)
- argv.extend(('-N', '-p', ''))
- argv.insert(0, os.path.basename(sys.executable))
-
- self.set_state('starting...')
- ret = 0
- def nwait(p, o=0):
- p2, r = os.waitpid(p, o)
- if not p2:
- return
- if os.WIFEXITED(r):
- return os.WEXITSTATUS(r)
- return 1
- conn_timeout = 60
- while ret in (0, 1):
- logging.info('-' * conn_timeout)
- logging.info('starting gsyncd worker')
- pr, pw = os.pipe()
- cpid = os.fork()
- if cpid == 0:
- os.close(pr)
- os.execv(sys.executable, argv + ['--feedback-fd', str(pw)])
- os.close(pw)
- t0 = time.time()
- so = select.select((pr,), (), (), conn_timeout)[0]
- os.close(pr)
- if so:
- ret = nwait(cpid, os.WNOHANG)
- if ret != None:
- logging.debug("worker died before establishing connection")
- else:
- logging.debug("worker seems to be connected (?? racy check)")
- while time.time() < t0 + conn_timeout:
- ret = nwait(cpid, os.WNOHANG)
- if ret != None:
- logging.debug("worker died in startup phase")
- break
- time.sleep(1)
- else:
- logging.debug("worker not confirmed in %d sec, aborting it" % \
- conn_timeout)
- os.kill(cpid, SIGKILL)
- ret = nwait(cpid)
- if ret == None:
- self.set_state('OK')
- ret = nwait(cpid)
- elif ret in (0, 1):
- self.set_state('faulty')
- time.sleep(10)
- self.set_state('inconsistent')
- return ret
-
-def monitor():
- return Monitor().monitor()
diff --git a/xlators/features/marker/utils/syncdaemon/repce.py b/xlators/features/marker/utils/syncdaemon/repce.py
deleted file mode 100644
index 47691301e29..00000000000
--- a/xlators/features/marker/utils/syncdaemon/repce.py
+++ /dev/null
@@ -1,162 +0,0 @@
-import os
-import sys
-import select
-import time
-import logging
-from threading import Condition
-try:
- import thread
-except ImportError:
- # py 3
- import _thread as thread
-try:
- from Queue import Queue
-except ImportError:
- # py 3
- from queue import Queue
-try:
- import cPickle as pickle
-except ImportError:
- # py 3
- import pickle
-
-from syncdutils import Thread
-
-pickle_proto = -1
-repce_version = 1.0
-
-def ioparse(i, o):
- if isinstance(i, int):
- i = os.fdopen(i)
- # rely on duck typing for recognizing
- # streams as that works uniformly
- # in py2 and py3
- if hasattr(o, 'fileno'):
- o = o.fileno()
- return (i, o)
-
-def send(out, *args):
- os.write(out, pickle.dumps(args, pickle_proto))
-
-def recv(inf):
- return pickle.load(inf)
-
-
-class RepceServer(object):
-
- def __init__(self, obj, i, o, wnum=6):
- self.obj = obj
- self.inf, self.out = ioparse(i, o)
- self.wnum = wnum
- self.q = Queue()
-
- def service_loop(self):
- for i in range(self.wnum):
- t = Thread(target=self.worker)
- t.start()
- try:
- while True:
- self.q.put(recv(self.inf))
- except EOFError:
- logging.info("terminating on reaching EOF.")
-
- def worker(self):
- while True:
- in_data = self.q.get(True)
- rid = in_data[0]
- rmeth = in_data[1]
- exc = False
- if rmeth == '__repce_version__':
- res = repce_version
- else:
- try:
- res = getattr(self.obj, rmeth)(*in_data[2:])
- except:
- res = sys.exc_info()[1]
- exc = True
- logging.exception("call failed: ")
- send(self.out, rid, exc, res)
-
-
-class RepceJob(object):
-
- def __init__(self, cbk):
- self.rid = (os.getpid(), thread.get_ident(), time.time())
- self.cbk = cbk
- self.lever = Condition()
- self.done = False
-
- def __repr__(self):
- return ':'.join([str(x) for x in self.rid])
-
- def wait(self):
- self.lever.acquire()
- if not self.done:
- self.lever.wait()
- self.lever.release()
- return self.result
-
- def wakeup(self, data):
- self.result = data
- self.lever.acquire()
- self.done = True
- self.lever.notify()
- self.lever.release()
-
-
-class RepceClient(object):
-
- def __init__(self, i, o):
- self.inf, self.out = ioparse(i, o)
- self.jtab = {}
- t = Thread(target = self.listen)
- t.start()
-
- def listen(self):
- while True:
- select.select((self.inf,), (), ())
- rid, exc, res = recv(self.inf)
- rjob = self.jtab.pop(rid)
- if rjob.cbk:
- rjob.cbk(rjob, [exc, res])
-
- def push(self, meth, *args, **kw):
- cbk = kw.get('cbk')
- if not cbk:
- def cbk(rj, res):
- if res[0]:
- raise res[1]
- rjob = RepceJob(cbk)
- self.jtab[rjob.rid] = rjob
- logging.debug("call %s %s%s ..." % (repr(rjob), meth, repr(args)))
- send(self.out, rjob.rid, meth, *args)
- return rjob
-
- def __call__(self, meth, *args):
- rjob = self.push(meth, *args, **{'cbk': lambda rj, res: rj.wakeup(res)})
- exc, res = rjob.wait()
- if exc:
- logging.error('call %s (%s) failed on peer with %s' % (repr(rjob), meth, str(type(res).__name__)))
- raise res
- logging.debug("call %s %s -> %s" % (repr(rjob), meth, repr(res)))
- return res
-
- class mprx(object):
-
- def __init__(self, ins, meth):
- self.ins = ins
- self.meth = meth
-
- def __call__(self, *a):
- return self.ins(self.meth, *a)
-
- def __getattr__(self, meth):
- return self.mprx(self, meth)
-
- def __version__(self):
- d = {'proto': self('__repce_version__')}
- try:
- d['object'] = self('version')
- except AttributeError:
- pass
- return d
diff --git a/xlators/features/marker/utils/syncdaemon/resource.py b/xlators/features/marker/utils/syncdaemon/resource.py
deleted file mode 100644
index 800d297bacd..00000000000
--- a/xlators/features/marker/utils/syncdaemon/resource.py
+++ /dev/null
@@ -1,467 +0,0 @@
-import re
-import os
-import sys
-import pwd
-import stat
-import time
-import errno
-import struct
-import select
-import socket
-import logging
-import tempfile
-from errno import EEXIST, ENOENT, ENODATA, ENOTDIR, ELOOP, EISDIR
-
-from gconf import gconf
-import repce
-from repce import RepceServer, RepceClient
-from master import GMaster
-import syncdutils
-
-UrlRX = re.compile('\A(\w+)://(.*)')
-HostRX = re.compile('[a-z\d](?:[a-z\d.-]*[a-z\d])?', re.I)
-UserRX = re.compile("[\w!\#$%&'*+-\/=?^_`{|}~]+")
-
-def sup(x, *a, **kw):
- return getattr(super(type(x), x), sys._getframe(1).f_code.co_name)(*a, **kw)
-
-def desugar(ustr):
- m = re.match('([^:]*):(.*)', ustr)
- if m:
- if not m.groups()[0]:
- return "gluster://localhost" + ustr
- elif '@' in m.groups()[0] or re.search('[:/]', m.groups()[1]):
- return "ssh://" + ustr
- else:
- return "gluster://" + ustr
- else:
- if ustr[0] != '/':
- raise RuntimeError("cannot resolve sugared url '%s'" % ustr)
- ap = os.path.normpath(ustr)
- if ap.startswith('//'):
- ap = ap[1:]
- return "file://" + ap
-
-def parse_url(ustr):
- m = UrlRX.match(ustr)
- if not m:
- ustr = desugar(ustr)
- m = UrlRX.match(ustr)
- if not m:
- raise RuntimeError("malformed url")
- sch, path = m.groups()
- this = sys.modules[__name__]
- if not hasattr(this, sch.upper()):
- raise RuntimeError("unknown url scheme " + sch)
- return getattr(this, sch.upper())(path)
-
-
-class _MetaXattr(object):
-
- # load Xattr stuff on-demand
-
- def __getattr__(self, meth):
- from libcxattr import Xattr as LXattr
- xmeth = [ m for m in dir(LXattr) if m[0] != '_' ]
- if not meth in xmeth:
- return
- for m in xmeth:
- setattr(self, m, getattr(LXattr, m))
- return getattr(self, meth)
-
-Xattr = _MetaXattr()
-
-
-class Server(object):
-
- GX_NSPACE = "trusted.glusterfs"
- NTV_FMTSTR = "!" + "B"*19 + "II"
- FRGN_XTRA_FMT = "I"
- FRGN_FMTSTR = NTV_FMTSTR + FRGN_XTRA_FMT
-
- @staticmethod
- def entries(path):
- # prevent symlinks being followed
- if not stat.S_ISDIR(os.lstat(path).st_mode):
- raise OSError(ENOTDIR, os.strerror(ENOTDIR))
- return os.listdir(path)
-
- @classmethod
- def purge(cls, path, entries=None):
- me_also = entries == None
- if not entries:
- try:
- # if it's a symlink, prevent
- # following it
- try:
- os.unlink(path)
- return
- except OSError:
- ex = sys.exc_info()[1]
- if ex.errno == EISDIR:
- entries = os.listdir(path)
- else:
- raise
- except OSError:
- ex = sys.exc_info()[1]
- if ex.errno in (ENOTDIR, ENOENT, ELOOP):
- try:
- os.unlink(path)
- return
- except OSError:
- ex = sys.exc_info()[1]
- if ex.errno == ENOENT:
- return
- raise
- else:
- raise
- for e in entries:
- cls.purge(os.path.join(path, e))
- if me_also:
- os.rmdir(path)
-
- @classmethod
- def _create(cls, path, ctor):
- try:
- ctor(path)
- except OSError:
- ex = sys.exc_info()[1]
- if ex.errno == EEXIST:
- cls.purge(path)
- return ctor(path)
- raise
-
- @classmethod
- def mkdir(cls, path):
- cls._create(path, os.mkdir)
-
- @classmethod
- def symlink(cls, lnk, path):
- cls._create(path, lambda p: os.symlink(lnk, p))
-
- @classmethod
- def xtime(cls, path, uuid):
- try:
- return struct.unpack('!II', Xattr.lgetxattr(path, '.'.join([cls.GX_NSPACE, uuid, 'xtime']), 8))
- except OSError:
- ex = sys.exc_info()[1]
- if ex.errno in (ENOENT, ENODATA, ENOTDIR):
- return ex.errno
- else:
- raise
-
- @classmethod
- def set_xtime(cls, path, uuid, mark):
- Xattr.lsetxattr(path, '.'.join([cls.GX_NSPACE, uuid, 'xtime']), struct.pack('!II', *mark))
-
- @staticmethod
- def setattr(path, adct):
- own = adct.get('own')
- if own:
- os.lchown(path, *own)
- mode = adct.get('mode')
- if mode:
- os.chmod(path, stat.S_IMODE(mode))
- times = adct.get('times')
- if times:
- os.utime(path, times)
-
- @staticmethod
- def pid():
- return os.getpid()
-
- last_keep_alive = 0
- @classmethod
- def keep_alive(cls, dct):
- if dct:
- key = '.'.join([cls.GX_NSPACE, 'volume-mark', dct['uuid']])
- val = struct.pack(cls.FRGN_FMTSTR,
- *(dct['version'] +
- tuple(int(x,16) for x in re.findall('(?:[\da-f]){2}', dct['uuid'])) +
- (dct['retval'],) + dct['volume_mark'][0:2] + (dct['timeout'],)))
- Xattr.lsetxattr('.', key, val)
- cls.last_keep_alive += 1
- return cls.last_keep_alive
-
- @staticmethod
- def version():
- return 1.0
-
-
-class SlaveLocal(object):
-
- def can_connect_to(self, remote):
- return not remote
-
- def service_loop(self):
- repce = RepceServer(self.server, sys.stdin, sys.stdout, int(gconf.sync_jobs))
- t = syncdutils.Thread(target=repce.service_loop)
- t.start()
- logging.info("slave listening")
- if gconf.timeout and int(gconf.timeout) > 0:
- while True:
- lp = self.server.last_keep_alive
- time.sleep(int(gconf.timeout))
- if lp == self.server.last_keep_alive:
- logging.info("connection inactive for %d seconds, stopping" % int(gconf.timeout))
- break
- else:
- select.select((), (), ())
-
-class SlaveRemote(object):
-
- def connect_remote(self, rargs=[], **opts):
- slave = opts.get('slave', self.url)
- ix, ox = os.pipe()
- iy, oy = os.pipe()
- pid = os.fork()
- if not pid:
- os.close(ox)
- os.dup2(ix, sys.stdin.fileno())
- os.close(iy)
- os.dup2(oy, sys.stdout.fileno())
- so = getattr(gconf, 'session_owner', None)
- if so:
- so_args = ['--session-owner', so]
- else:
- so_args = []
- argv = rargs + gconf.remote_gsyncd.split() + so_args + \
- ['-N', '--listen', '--timeout', str(gconf.timeout), slave]
- os.execvp(argv[0], argv)
- os.close(ix)
- os.close(oy)
- return self.start_fd_client(iy, ox, **opts)
-
- def start_fd_client(self, i, o, **opts):
- self.server = RepceClient(i, o)
- rv = self.server.__version__()
- exrv = {'proto': repce.repce_version, 'object': Server.version()}
- da0 = (rv, exrv)
- da1 = ({}, {})
- for i in range(2):
- for k, v in da0[i].iteritems():
- da1[i][k] = int(v)
- if da1[0] != da1[1]:
- raise RuntimeError("RePCe major version mismatch: local %s, remote %s" % (exrv, rv))
-
- def rsync(self, files, *args):
- if not files:
- raise RuntimeError("no files to sync")
- logging.debug("files: " + ", ".join(files))
- argv = gconf.rsync_command.split() + gconf.rsync_extra.split() + ['-aR'] + files + list(args)
- return os.spawnvp(os.P_WAIT, argv[0], argv) == 0
-
-
-class AbstractUrl(object):
-
- def __init__(self, path, pattern):
- m = re.search(pattern, path)
- if not m:
- raise RuntimeError("malformed path")
- self.path = path
- return m.groups()
-
- @property
- def scheme(self):
- return type(self).__name__.lower()
-
- def canonical_path(self):
- return self.path
-
- def get_url(self, canonical=False, escaped=False):
- if canonical:
- pa = self.canonical_path()
- else:
- pa = self.path
- u = "://".join((self.scheme, pa))
- if escaped:
- u = syncdutils.escape(u)
- return u
-
- @property
- def url(self):
- return self.get_url()
-
-
- ### Concrete resource classes ###
-
-
-class FILE(AbstractUrl, SlaveLocal, SlaveRemote):
-
- class FILEServer(Server):
- pass
-
- server = FILEServer
-
- def __init__(self, path):
- sup(self, path, '^/')
-
- def connect(self):
- os.chdir(self.path)
-
- def rsync(self, files):
- return sup(self, files, self.path)
-
-
-class GLUSTER(AbstractUrl, SlaveLocal, SlaveRemote):
-
- class GLUSTERServer(Server):
-
- @classmethod
- def _attr_unpack_dict(cls, xattr, extra_fields = ''):
- fmt_string = cls.NTV_FMTSTR + extra_fields
- buf = Xattr.lgetxattr('.', xattr, struct.calcsize(fmt_string))
- vm = struct.unpack(fmt_string, buf)
- m = re.match('(.{8})(.{4})(.{4})(.{4})(.{12})', "".join(['%02x' % x for x in vm[2:18]]))
- uuid = '-'.join(m.groups())
- volinfo = { 'version': vm[0:2],
- 'uuid' : uuid,
- 'retval' : vm[18],
- 'volume_mark': vm[19:21],
- }
- if extra_fields:
- return volinfo, vm[-len(extra_fields):]
- else:
- return volinfo
-
- @classmethod
- def foreign_volume_infos(cls):
- dict_list = []
- xattr_list = Xattr.llistxattr_buf('.')
- for ele in xattr_list:
- if ele.find('.'.join([cls.GX_NSPACE, 'volume-mark', ''])) == 0:
- d, x = cls._attr_unpack_dict(ele, cls.FRGN_XTRA_FMT)
- now = int(time.time())
- if x[0] > now:
- logging.debug("volinfo[%s] expires: %d (%d sec later)" % \
- (d['uuid'], x[0], x[0] - now))
- dict_list.append(d)
- else:
- try:
- Xattr.lremovexattr('.', ele)
- except OSError:
- pass
- return dict_list
-
- @classmethod
- def native_volume_info(cls):
- try:
- return cls._attr_unpack_dict('.'.join([cls.GX_NSPACE, 'volume-mark']))
- except OSError:
- ex = sys.exc_info()[1]
- if ex.errno != ENODATA:
- raise
-
- server = GLUSTERServer
-
- def __init__(self, path):
- self.host, self.volume = sup(self, path, '^(%s):(.+)' % HostRX.pattern)
-
- def canonical_path(self):
- return ':'.join([socket.gethostbyname(self.host), self.volume])
-
- def can_connect_to(self, remote):
- return True
-
- def connect(self):
- def umount_l(d):
- time.sleep(0.2) # XXX temporary workaround
- argv = ['umount', '-l', d]
- return os.spawnvp(os.P_WAIT, argv[0], argv)
- d = tempfile.mkdtemp(prefix='gsyncd-aux-mount-')
- mounted = False
- try:
- argv = gconf.gluster_command.split() + \
- (gconf.gluster_log_level and ['-L', gconf.gluster_log_level] or []) + \
- ['-l', gconf.gluster_log_file, '-s', self.host,
- '--volfile-id', self.volume, '--client-pid=-1', d]
- if os.spawnvp(os.P_WAIT, argv[0], argv):
- raise RuntimeError("command failed: " + " ".join(argv))
- mounted = True
- logging.debug('auxiliary glusterfs mount in place')
- os.chdir(d)
- if umount_l(d) != 0:
- raise RuntimeError("umounting %s failed" % d)
- mounted = False
- finally:
- try:
- if mounted:
- umount_l(d)
- os.rmdir(d)
- except:
- logging.warn('stale mount possibly left behind on ' + d)
- logging.debug('auxiliary glusterfs mount prepared')
-
- def connect_remote(self, *a, **kw):
- sup(self, *a, **kw)
- self.slavedir = "/proc/%d/cwd" % self.server.pid()
-
- def service_loop(self, *args):
- if args:
- GMaster(self, args[0]).crawl_loop()
- else:
- sup(self, *args)
-
- def rsync(self, files):
- return sup(self, files, self.slavedir)
-
-
-class SSH(AbstractUrl, SlaveRemote):
-
- def __init__(self, path):
- self.remote_addr, inner_url = sup(self, path,
- '^((?:%s@)?%s):(.+)' % tuple([ r.pattern for r in (UserRX, HostRX) ]))
- self.inner_rsc = parse_url(inner_url)
-
- def canonical_path(self):
- m = re.match('([^@]+)@(.+)', self.remote_addr)
- if m:
- u, h = m.groups()
- else:
- u, h = pwd.getpwuid(os.geteuid()).pw_name, self.remote_addr
- remote_addr = '@'.join([u, socket.gethostbyname(h)])
- return ':'.join([remote_addr, self.inner_rsc.get_url(canonical=True)])
-
- def can_connect_to(self, remote):
- return False
-
- def start_fd_client(self, *a, **opts):
- if opts.get('deferred'):
- return a
- sup(self, *a)
- ityp = type(self.inner_rsc)
- if ityp == FILE:
- slavepath = self.inner_rsc.path
- elif ityp == GLUSTER:
- slavepath = "/proc/%d/cwd" % self.server.pid()
- else:
- raise NotImplementedError
- self.slaveurl = ':'.join([self.remote_addr, slavepath])
-
- def connect_remote(self, go_daemon=None):
- if go_daemon == 'done':
- return self.start_fd_client(*self.fd_pair)
- gconf.setup_ssh_ctl(tempfile.mkdtemp(prefix='gsyncd-aux-ssh-'))
- deferred = go_daemon == 'postconn'
- ret = sup(self, gconf.ssh_command.split() + gconf.ssh_ctl_args + [self.remote_addr], slave=self.inner_rsc.url, deferred=deferred)
- if deferred:
- # send a message to peer so that we can wait for
- # the answer from which we know connection is
- # established and we can proceed with daemonization
- # (doing that too early robs the ssh passwd prompt...)
- # However, we'd better not start the RepceClient
- # before daemonization (that's not preserved properly
- # in daemon), we just do a an ad-hoc linear put/get.
- i, o = ret
- inf = os.fdopen(i)
- repce.send(o, None, '__repce_version__')
- select.select((inf,), (), ())
- repce.recv(inf)
- # hack hack hack: store a global reference to the file
- # to save it from getting GC'd which implies closing it
- gconf.permanent_handles.append(inf)
- self.fd_pair = (i, o)
- return 'should'
-
- def rsync(self, files):
- return sup(self, files, '-ze', " ".join(gconf.ssh_command.split() + gconf.ssh_ctl_args), self.slaveurl)
diff --git a/xlators/features/marker/utils/syncdaemon/syncdutils.py b/xlators/features/marker/utils/syncdaemon/syncdutils.py
deleted file mode 100644
index 4bf51da746e..00000000000
--- a/xlators/features/marker/utils/syncdaemon/syncdutils.py
+++ /dev/null
@@ -1,160 +0,0 @@
-import os
-import sys
-import time
-import fcntl
-import shutil
-import logging
-from threading import Lock, Thread as baseThread
-from errno import EACCES, EAGAIN
-from signal import SIGTERM, SIGKILL
-from time import sleep
-
-from gconf import gconf
-
-try:
- # py 3
- from urllib import parse as urllib
-except ImportError:
- import urllib
-
-def escape(s):
- return urllib.quote_plus(s)
-
-def unescape(s):
- return urllib.unquote_plus(s)
-
-def norm(s):
- if s:
- return s.replace('-', '_')
-
-def update_file(path, updater, merger = lambda f: True):
- """update a file in a transaction-like manner"""
-
- fr = fw = None
- try:
- fd = os.open(path, os.O_CREAT|os.O_RDWR)
- try:
- fr = os.fdopen(fd, 'r+b')
- except:
- os.close(fd)
- raise
- fcntl.lockf(fr, fcntl.LOCK_EX)
- if not merger(fr):
- return
-
- tmpp = path + '.tmp.' + str(os.getpid())
- fd = os.open(tmpp, os.O_CREAT|os.O_EXCL|os.O_WRONLY)
- try:
- fw = os.fdopen(fd, 'wb', 0)
- except:
- os.close(fd)
- raise
- updater(fw)
- os.fsync(fd)
- os.rename(tmpp, path)
- finally:
- for fx in (fr, fw):
- if fx:
- fx.close()
-
-def grabfile(fname, content=None):
- # damn those messy open() mode codes
- fd = os.open(fname, os.O_CREAT|os.O_RDWR)
- f = os.fdopen(fd, 'r+b', 0)
- try:
- fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
- except:
- ex = sys.exc_info()[1]
- f.close()
- if isinstance(ex, IOError) and ex.errno in (EACCES, EAGAIN):
- # cannot grab, it's taken
- return
- raise
- if content:
- try:
- f.truncate()
- f.write(content)
- except:
- f.close()
- raise
- gconf.permanent_handles.append(f)
- return f
-
-def grabpidfile(fname=None, setpid=True):
- if not fname:
- fname = gconf.pid_file
- content = None
- if setpid:
- content = str(os.getpid()) + '\n'
- return grabfile(fname, content=content)
-
-final_lock = Lock()
-
-def finalize(*a, **kw):
- final_lock.acquire()
- if getattr(gconf, 'pid_file', None):
- rm_pidf = gconf.pid_file_owned
- if gconf.cpid:
- # exit path from parent branch of daemonization
- rm_pidf = False
- while True:
- f = grabpidfile(setpid=False)
- if not f:
- # child has already taken over pidfile
- break
- if os.waitpid(gconf.cpid, os.WNOHANG)[0] == gconf.cpid:
- # child has terminated
- rm_pidf = True
- break;
- time.sleep(0.1)
- if rm_pidf:
- try:
- os.unlink(gconf.pid_file)
- except:
- ex = sys.exc_info()[1]
- if ex.errno == ENOENT:
- pass
- else:
- raise
- if gconf.ssh_ctl_dir and not gconf.cpid:
- shutil.rmtree(gconf.ssh_ctl_dir)
- sys.stdout.flush()
- sys.stderr.flush()
- os._exit(kw.get('exval', 0))
-
-def log_raise_exception(excont):
- exc = sys.exc_info()[1]
- if isinstance(exc, SystemExit):
- excont.exval = exc.code or 0
- raise
- else:
- logging.exception("FAIL: ")
- sys.stderr.write("failed with %s.\n" % type(exc).__name__)
- excont.exval = 1
- sys.exit(excont.exval)
-
-
-class FreeObject(object):
- """wildcard class for which any attribute can be set"""
-
- def __init__(self, **kw):
- for k,v in kw.iteritems():
- setattr(self, k, v)
-
-class Thread(baseThread):
-
- def __init__(self, *a, **kw):
- tf = kw.get('target')
- if tf:
- def twrap(*aa):
- excont = FreeObject(exval = 0)
- try:
- tf(*aa)
- except:
- try:
- log_raise_exception(excont)
- finally:
- finalize(exval = excont.exval)
- kw['target'] = twrap
- baseThread.__init__(self, *a, **kw)
- self.setDaemon(True)
diff --git a/xlators/features/metadisp/Makefile.am b/xlators/features/metadisp/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/metadisp/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/metadisp/src/Makefile.am b/xlators/features/metadisp/src/Makefile.am
new file mode 100644
index 00000000000..1520ad8c424
--- /dev/null
+++ b/xlators/features/metadisp/src/Makefile.am
@@ -0,0 +1,38 @@
+noinst_PYTHON = gen-fops.py
+
+EXTRA_DIST = fops-tmpl.c
+
+xlator_LTLIBRARIES = metadisp.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+nodist_metadisp_la_SOURCES = fops.c
+
+BUILT_SOURCES = fops.c
+
+metadisp_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+metadisp_la_SOURCES = metadisp.c \
+ metadisp-unlink.c \
+ metadisp-stat.c \
+ metadisp-lookup.c \
+ metadisp-readdir.c \
+ metadisp-create.c \
+ metadisp-open.c \
+ metadisp-fsync.c \
+ metadisp-setattr.c \
+ backend.c
+
+metadisp_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = metadisp.h metadisp-fops.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+fops.c: fops-tmpl.c $(top_srcdir)/libglusterfs/src/generator.py gen-fops.py
+ PYTHONPATH=$(top_srcdir)/libglusterfs/src \
+ $(PYTHON) $(srcdir)/gen-fops.py $(srcdir)/fops-tmpl.c > $@
+
+CLEANFILES = $(nodist_metadisp_la_SOURCES)
diff --git a/xlators/features/metadisp/src/backend.c b/xlators/features/metadisp/src/backend.c
new file mode 100644
index 00000000000..ee2c25bfaa7
--- /dev/null
+++ b/xlators/features/metadisp/src/backend.c
@@ -0,0 +1,45 @@
+#define GFID_STR_LEN 37
+
+#include "metadisp.h"
+
+/*
+ * backend.c
+ *
+ * functions responsible for converting user-facing paths to backend-style
+ * "/$GFID" paths.
+ */
+
+int32_t
+build_backend_loc(uuid_t gfid, loc_t *src_loc, loc_t *dst_loc)
+{
+ static uuid_t root = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1};
+ char gfid_buf[GFID_STR_LEN + 1] = {
+ 0,
+ };
+ char *path = NULL;
+
+ GF_VALIDATE_OR_GOTO("metadisp", src_loc, out);
+ GF_VALIDATE_OR_GOTO("metadisp", dst_loc, out);
+
+ loc_copy(dst_loc, src_loc);
+ memcpy(dst_loc->pargfid, root, sizeof(root));
+ GF_FREE((char *)dst_loc->path); // we are overwriting path so nuke
+ // whatever loc_copy gave us
+
+ uuid_utoa_r(gfid, gfid_buf);
+
+ path = GF_CALLOC(GFID_STR_LEN + 1, sizeof(char),
+ gf_common_mt_char); // freed via loc_wipe
+
+ path[0] = '/';
+ strncpy(path + 1, gfid_buf, GFID_STR_LEN);
+ path[GFID_STR_LEN] = 0;
+ dst_loc->path = path;
+ if (src_loc->name)
+ dst_loc->name = strrchr(dst_loc->path, '/');
+ if (dst_loc->name)
+ dst_loc->name++;
+ return 0;
+out:
+ return -1;
+}
diff --git a/xlators/features/metadisp/src/fops-tmpl.c b/xlators/features/metadisp/src/fops-tmpl.c
new file mode 100644
index 00000000000..4385b7dd5b7
--- /dev/null
+++ b/xlators/features/metadisp/src/fops-tmpl.c
@@ -0,0 +1,10 @@
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <glusterfs/xlator.h>
+#include "metadisp.h"
+#include "metadisp-fops.h"
+
+#pragma generate
diff --git a/xlators/features/metadisp/src/gen-fops.py b/xlators/features/metadisp/src/gen-fops.py
new file mode 100644
index 00000000000..8b5e120fdec
--- /dev/null
+++ b/xlators/features/metadisp/src/gen-fops.py
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+
+import sys
+from generator import fop_subs, generate
+
+FN_METADATA_CHILD_GENERIC = """
+int32_t
+metadisp_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ METADISP_TRACE("@NAME@ metadata");
+ STACK_WIND (frame, default_@NAME@_cbk,
+ METADATA_CHILD(this), METADATA_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+ return 0;
+}
+"""
+
+FN_GENERIC_TEMPLATE = """
+int32_t
+metadisp_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ METADISP_TRACE("@NAME@ generic");
+ STACK_WIND (frame, default_@NAME@_cbk,
+ DATA_CHILD(this), DATA_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+ return 0;
+}
+"""
+
+FN_DATAFD_TEMPLATE = """
+int32_t
+metadisp_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ METADISP_TRACE("@NAME@ datafd");
+ xlator_t *child = NULL;
+ child = DATA_CHILD(this);
+ STACK_WIND (frame, default_@NAME@_cbk,
+ child, child->fops->@NAME@,
+ @SHORT_ARGS@);
+ return 0;
+}
+"""
+
+FN_DATALOC_TEMPLATE = """
+int32_t
+metadisp_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ METADISP_TRACE("@NAME@ dataloc");
+ loc_t backend_loc = {
+ 0,
+ };
+ if (build_backend_loc(loc->gfid, loc, &backend_loc)) {
+ goto unwind;
+ }
+ xlator_t *child = NULL;
+ child = DATA_CHILD(this);
+ STACK_WIND (frame, default_@NAME@_cbk,
+ child, child->fops->@NAME@,
+ @SHORT_ARGS@);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(lookup, frame, -1, EINVAL, NULL, NULL, NULL, NULL);
+ return 0;
+}
+"""
+
+FOPS_LINE_TEMPLATE = "\t.@NAME@ = metadisp_@NAME@,"
+
+skipped = [
+ "readdir",
+ "readdirp",
+ "lookup",
+ "fsync",
+ "stat",
+ "open",
+ "create",
+ "unlink",
+ "setattr",
+ # TODO: implement "inodelk",
+]
+
+
+def gen_fops():
+ done = skipped
+
+ #
+ # these are fops that wind to the DATA_CHILD
+ #
+ # NOTE: re-written in order from google doc:
+ # https://docs.google.com/document/d/1KEwVtSNvDhs4qb63gWx2ulCp5GJjge77NGJk4p_Ms4Q
+ for name in [
+ "writev",
+ "readv",
+ "ftruncate",
+ "zerofill",
+ "discard",
+ "seek",
+ "fstat",
+ ]:
+ done = done + [name]
+ print(generate(FN_DATAFD_TEMPLATE, name, fop_subs))
+
+ for name in ["truncate"]:
+ done = done + [name]
+ print(generate(FN_DATALOC_TEMPLATE, name, fop_subs))
+
+ # these are fops that operate solely on dentries, folders,
+ # or extended attributes. Therefore, they must always
+ # wind to METADATA_CHILD and should never perform
+ # any path rewriting
+ #
+ # NOTE: re-written in order from google doc:
+ # https://docs.google.com/document/d/1KEwVtSNvDhs4qb63gWx2ulCp5GJjge77NGJk4p_Ms4Q
+ for name in [
+ "mkdir",
+ "symlink",
+ "link",
+ "rename",
+ "mknod",
+ "opendir",
+ # "readdir, # special-cased
+ # "readdirp, # special-cased
+ "fsyncdir",
+ # "setattr", # special-cased
+ "readlink",
+ "fentrylk",
+ "access",
+ # TODO: these wind to both,
+ # data for backend-attributes and metadata for the rest
+ "xattrop",
+ "setxattr",
+ "getxattr",
+ "removexattr",
+ "fgetxattr",
+ "fsetxattr",
+ "fremovexattr",
+ ]:
+
+ done = done + [name]
+ print(generate(FN_METADATA_CHILD_GENERIC, name, fop_subs))
+
+ print("struct xlator_fops fops = {")
+ for name in done:
+ print(generate(FOPS_LINE_TEMPLATE, name, fop_subs))
+
+ print("};")
+
+
+for l in open(sys.argv[1], "r").readlines():
+ if l.find("#pragma generate") != -1:
+ print("/* BEGIN GENERATED CODE - DO NOT MODIFY */")
+ gen_fops()
+ print("/* END GENERATED CODE */")
+ else:
+ print(l[:-1])
diff --git a/xlators/features/metadisp/src/metadisp-create.c b/xlators/features/metadisp/src/metadisp-create.c
new file mode 100644
index 00000000000..f8c9798dd59
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-create.c
@@ -0,0 +1,101 @@
+#include "metadisp.h"
+#include <glusterfs/call-stub.h>
+
+/**
+ * Create, like stat, is a two-step process. We send a create
+ * to the METADATA_CHILD, then send another create to the DATA_CHILD.
+ *
+ * We do the metadata child first to ensure that the ACLs are enforced.
+ */
+
+int32_t
+metadisp_create_dentry_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd,
+ inode_t *inode, struct iatt *buf,
+ struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_create_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, mode_t mode, mode_t umask, fd_t *fd,
+ dict_t *xdata)
+{
+ // create the backend data inode
+ STACK_WIND(frame, metadisp_create_dentry_cbk, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+}
+
+int32_t
+metadisp_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
+ struct iatt *buf, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ METADISP_TRACE("%d %d", op_ret, op_errno);
+ call_stub_t *stub = cookie;
+ if (op_ret != 0) {
+ STACK_UNWIND_STRICT(create, frame, op_ret, op_errno, fd, inode, buf,
+ preparent, postparent, xdata);
+ return 0;
+ }
+
+ if (stub == NULL) {
+ goto unwind;
+ }
+
+ if (stub->poison) {
+ call_stub_destroy(stub);
+ return 0;
+ }
+
+ call_resume(stub);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(create, frame, -1, EINVAL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+}
+
+int32_t
+metadisp_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ METADISP_TRACE(".");
+
+ loc_t backend_loc = {
+ 0,
+ };
+ call_stub_t *stub = NULL;
+ uuid_t *gfid_req = NULL;
+
+ RESOLVE_GFID_REQ(xdata, gfid_req, out);
+
+ if (build_backend_loc(*gfid_req, loc, &backend_loc)) {
+ goto unwind;
+ }
+
+ frame->local = loc;
+
+ stub = fop_create_stub(frame, metadisp_create_resume, &backend_loc, flags,
+ mode, umask, fd, xdata);
+
+ STACK_WIND_COOKIE(frame, metadisp_create_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->create, loc, flags, mode,
+ umask, fd, xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(create, frame, -1, EINVAL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+out:
+ return -1;
+}
diff --git a/xlators/features/metadisp/src/metadisp-fops.h b/xlators/features/metadisp/src/metadisp-fops.h
new file mode 100644
index 00000000000..56dd427cf34
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-fops.h
@@ -0,0 +1,51 @@
+#ifndef GF_METADISP_FOPS_H_
+#define GF_METADISP_FOPS_H_
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/glusterfs.h>
+
+#include <sys/types.h>
+
+/* fops in here are defined in their own file. Every other fop is just defined
+ * inline of fops.c */
+
+int
+metadisp_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata);
+
+int
+metadisp_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *dict);
+
+int
+metadisp_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata);
+
+int
+metadisp_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata);
+
+int
+metadisp_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata);
+
+int
+metadisp_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata);
+
+int
+metadisp_inodelk(call_frame_t *frame, xlator_t *this, const char *volume,
+ loc_t *loc, int32_t cmd, struct gf_flock *lock, dict_t *xdata);
+
+int
+metadisp_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+ dict_t *xdata);
+
+int
+metadisp_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata);
+
+int
+metadisp_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata);
+
+#endif
diff --git a/xlators/features/metadisp/src/metadisp-fsync.c b/xlators/features/metadisp/src/metadisp-fsync.c
new file mode 100644
index 00000000000..2e46fa84eac
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-fsync.c
@@ -0,0 +1,54 @@
+
+#include "metadisp.h"
+#include <glusterfs/call-stub.h>
+
+int32_t
+metadisp_fsync_resume(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ int32_t flags, dict_t *xdata)
+{
+ STACK_WIND(frame, default_fsync_cbk, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->fsync, fd, flags, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ if (cookie) {
+ stub = cookie;
+ }
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ if (stub->poison) {
+ call_stub_destroy(stub);
+ stub = NULL;
+ return 0;
+ }
+
+ call_resume(stub);
+ return 0;
+
+unwind:
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ STACK_UNWIND_STRICT(fsync, frame, op_ret, op_errno, prebuf, postbuf, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+ dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ stub = fop_fsync_stub(frame, metadisp_fsync_resume, fd, flags, xdata);
+ STACK_WIND_COOKIE(frame, metadisp_fsync_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->fsync, fd, flags, xdata);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp-lookup.c b/xlators/features/metadisp/src/metadisp-lookup.c
new file mode 100644
index 00000000000..27d90c9f746
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-lookup.c
@@ -0,0 +1,90 @@
+#include "metadisp.h"
+#include <glusterfs/call-stub.h>
+
+/**
+ * Lookup, like stat, is a two-step process for grabbing the metadata details
+ * as well as the data details.
+ */
+
+int32_t
+metadisp_backend_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ METADISP_TRACE("backend_lookup_cbk");
+ if (op_errno == ENOENT) {
+ op_errno = ENODATA;
+ op_ret = -1;
+ }
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, xdata,
+ postparent);
+ return 0;
+}
+
+int32_t
+metadisp_backend_lookup_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xdata)
+{
+ METADISP_TRACE("backend_lookup_resume");
+ loc_t backend_loc = {
+ 0,
+ };
+ if (build_backend_loc(loc->gfid, loc, &backend_loc)) {
+ goto unwind;
+ }
+
+ STACK_WIND(frame, metadisp_backend_lookup_cbk, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->lookup, &backend_loc, xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(lookup, frame, -1, EINVAL, NULL, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+metadisp_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata, struct iatt *postparent)
+{
+ METADISP_TRACE("%d %d", op_ret, op_errno);
+ call_stub_t *stub = NULL;
+ stub = cookie;
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ if (!IA_ISREG(buf->ia_type)) {
+ goto unwind;
+ } else if (!stub) {
+ op_errno = EINVAL;
+ goto unwind;
+ }
+
+ METADISP_TRACE("resuming stub");
+
+ // memcpy(stub->args.loc.gfid, buf->ia_gfid, sizeof(uuid_t));
+ call_resume(stub);
+ return 0;
+unwind:
+ METADISP_TRACE("unwinding %d %d", op_ret, op_errno);
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, xdata,
+ postparent);
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ return 0;
+}
+
+int32_t
+metadisp_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ METADISP_TRACE("lookup");
+ call_stub_t *stub = NULL;
+ stub = fop_lookup_stub(frame, metadisp_backend_lookup_resume, loc, xdata);
+ STACK_WIND_COOKIE(frame, metadisp_lookup_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->lookup, loc, xdata);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp-open.c b/xlators/features/metadisp/src/metadisp-open.c
new file mode 100644
index 00000000000..64814afe636
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-open.c
@@ -0,0 +1,70 @@
+#include <glusterfs/call-stub.h>
+#include "metadisp.h"
+
+int32_t
+metadisp_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
+{
+ METADISP_TRACE("got open results %d %d", op_ret, op_errno);
+
+ call_stub_t *stub = NULL;
+ if (cookie) {
+ stub = cookie;
+ }
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ if (!stub) {
+ goto unwind;
+ }
+
+ if (stub->poison) {
+ call_stub_destroy(stub);
+ stub = NULL;
+ return 0;
+ }
+
+ call_resume(stub);
+ return 0;
+
+unwind:
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_open_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int32_t flags, fd_t *fd, dict_t *xdata)
+{
+ STACK_WIND_COOKIE(frame, metadisp_open_cbk, NULL, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ loc_t backend_loc = {
+ 0,
+ };
+
+ if (build_backend_loc(loc->gfid, loc, &backend_loc)) {
+ goto unwind;
+ }
+
+ stub = fop_open_stub(frame, metadisp_open_resume, &backend_loc, flags, fd,
+ xdata);
+ STACK_WIND_COOKIE(frame, metadisp_open_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(open, frame, -1, EINVAL, NULL, NULL);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp-readdir.c b/xlators/features/metadisp/src/metadisp-readdir.c
new file mode 100644
index 00000000000..5f840b1e88f
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-readdir.c
@@ -0,0 +1,65 @@
+#include "metadisp.h"
+
+/**
+ * With a change to the posix xlator, readdir and readdirp are shockingly
+ * simple.
+ *
+ * The issue with separating the backend data of the files
+ * with the metadata is that readdirs must now read from multiple sources
+ * to coalesce the directory entries.
+ *
+ * The way we do this is to tell the METADATA_CHILD that when it's
+ * running readdirp, each file entry should have a stat wound to
+ * 'stat-source-of-truth'.
+ *
+ * see metadisp_stat for how it handles winds _from_posix.
+ */
+
+int32_t
+metadisp_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata)
+{
+ METADISP_TRACE(".");
+ /*
+ * Always use readdirp, even if the original was readdir. Why? Because NFS.
+ * There are multiple translations between Gluster, UNIX, and NFS stat
+ * structures in that path. One of them uses the type etc. from the stat
+ * structure, which is only filled in by readdirp. If we use readdir, the
+ * entries do actually go all the way back to the client and are visible in
+ * getdents, but then the readdir throws them away because of the
+ * uninitialized type.
+ */
+ GF_UNUSED int32_t ret;
+ if (!xdata) {
+ xdata = dict_new();
+ }
+
+ // ret = dict_set_int32 (xdata, "list-xattr", 1);
+
+ // I'm my own source of truth!
+ ret = dict_set_static_ptr(xdata, "stat-source-of-truth", (void *)this);
+
+ STACK_WIND(frame, default_readdirp_cbk, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->readdirp, fd, size, off, xdata);
+
+ return 0;
+}
+
+int32_t
+metadisp_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata)
+{
+ METADISP_TRACE(".");
+ if (!xdata) {
+ xdata = dict_new();
+ }
+ GF_UNUSED int32_t ret;
+ // ret = dict_set_int32 (xdata, "list-xattr", 1);
+
+ // I'm my own source of truth!
+ ret = dict_set_static_ptr(xdata, "stat-source-of-truth", (void *)this);
+
+ STACK_WIND(frame, default_readdirp_cbk, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->readdirp, fd, size, off, xdata);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp-setattr.c b/xlators/features/metadisp/src/metadisp-setattr.c
new file mode 100644
index 00000000000..6991cf644f3
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-setattr.c
@@ -0,0 +1,90 @@
+#include "metadisp.h"
+#include <glusterfs/call-stub.h>
+
+int32_t
+metadisp_backend_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ struct iatt *statpre, struct iatt *statpost,
+ dict_t *xdata)
+
+{
+ METADISP_TRACE("backend_setattr_cbk");
+ if (op_errno == ENOENT) {
+ op_errno = ENODATA;
+ op_ret = -1;
+ }
+ STACK_UNWIND_STRICT(setattr, frame, op_ret, op_errno, statpre, statpost,
+ xdata);
+ return 0;
+}
+
+int32_t
+metadisp_backend_setattr_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid,
+ dict_t *xdata)
+
+{
+ METADISP_TRACE("backend_setattr_resume");
+ loc_t backend_loc = {
+ 0,
+ };
+ if (build_backend_loc(loc->gfid, loc, &backend_loc)) {
+ goto unwind;
+ }
+
+ STACK_WIND(frame, metadisp_backend_setattr_cbk, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->setattr, &backend_loc, stbuf, valid,
+ xdata);
+ return 0;
+
+unwind:
+ STACK_UNWIND_STRICT(setattr, frame, -1, EINVAL, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+metadisp_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *statpre,
+ struct iatt *statpost, dict_t *xdata)
+{
+ METADISP_TRACE("%d %d", op_ret, op_errno);
+ call_stub_t *stub = NULL;
+ stub = cookie;
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ if (!IA_ISREG(statpost->ia_type)) {
+ goto unwind;
+ } else if (!stub) {
+ op_errno = EINVAL;
+ goto unwind;
+ }
+
+ METADISP_TRACE("resuming stub");
+ call_resume(stub);
+ return 0;
+unwind:
+ METADISP_TRACE("unwinding %d %d", op_ret, op_errno);
+ STACK_UNWIND_STRICT(setattr, frame, op_ret, op_errno, statpre, statpost,
+ xdata);
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ return 0;
+}
+
+int32_t
+metadisp_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ struct iatt *stbuf, int32_t valid, dict_t *xdata)
+{
+ METADISP_TRACE("setattr");
+ call_stub_t *stub = NULL;
+ stub = fop_setattr_stub(frame, metadisp_backend_setattr_resume, loc, stbuf,
+ valid, xdata);
+ STACK_WIND_COOKIE(frame, metadisp_setattr_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->setattr, loc, stbuf, valid,
+ xdata);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp-stat.c b/xlators/features/metadisp/src/metadisp-stat.c
new file mode 100644
index 00000000000..b06d0dbcddd
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-stat.c
@@ -0,0 +1,124 @@
+#include "metadisp.h"
+#include <glusterfs/call-stub.h>
+
+/**
+ * The stat flow in METADISP is complicated because we must
+ * do ensure a few things:
+ * 1. stat, on the path within the metadata layer,
+ * MUST get the backend FD of the data layer.
+ * --- we wind to the metadata layer, then the data layer.
+ *
+ * 2. the metadata layer MUST be able to ask the data
+ * layer for stat information.
+ * --- this is 'syncop-internal-from-posix'
+ *
+ * 3. when the metadata exists BUT the data is missing,
+ * we MUST mark the backend file as bad and heal it.
+ */
+
+int32_t
+metadisp_stat_backend_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
+{
+ METADISP_TRACE("got backend stat results %d %d", op_ret, op_errno);
+ if (op_errno == ENOENT) {
+ STACK_UNWIND_STRICT(open, frame, -1, ENODATA, NULL, NULL);
+ return 0;
+ }
+ STACK_UNWIND_STRICT(stat, frame, op_ret, op_errno, buf, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_stat_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *xdata)
+{
+ METADISP_TRACE("winding stat to path %s", loc->path);
+ if (gf_uuid_is_null(loc->gfid)) {
+ METADISP_TRACE("bad object, sending EUCLEAN");
+ STACK_UNWIND_STRICT(open, frame, -1, EUCLEAN, NULL, NULL);
+ return 0;
+ }
+
+ STACK_WIND(frame, metadisp_stat_backend_cbk, SECOND_CHILD(this),
+ SECOND_CHILD(this)->fops->stat, loc, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+
+ METADISP_TRACE("got stat results %d %d", op_ret, op_errno);
+
+ if (cookie) {
+ stub = cookie;
+ }
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ // only use the stub for the files
+ if (!IA_ISREG(buf->ia_type)) {
+ goto unwind;
+ }
+
+ if (stub->poison) {
+ call_stub_destroy(stub);
+ stub = NULL;
+ return 0;
+ }
+
+ call_resume(stub);
+ return 0;
+
+unwind:
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ STACK_UNWIND_STRICT(stat, frame, op_ret, op_errno, buf, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ int32_t ret = 0;
+ loc_t backend_loc = {
+ 0,
+ };
+ METADISP_FILTER_ROOT(stat, loc, xdata);
+
+ if (build_backend_loc(loc->gfid, loc, &backend_loc)) {
+ goto unwind;
+ }
+
+ if (dict_get_int32(xdata, "syncop-internal-from-posix", &ret) == 0) {
+ // if we've just been sent a stat from posix, then we know
+ // that we must send down a stat for a file to the second child.
+ //
+ // that means we can skip the stat for the first child and just
+ // send to the data disk.
+ METADISP_TRACE("got syncop-internal-from-posix");
+ STACK_WIND(frame, default_stat_cbk, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->stat, &backend_loc, xdata);
+ return 0;
+ }
+
+ // we do not know if the request is for a file, folder, etc. wind
+ // to first child to find out.
+ stub = fop_stat_stub(frame, metadisp_stat_resume, &backend_loc, xdata);
+ METADISP_TRACE("winding stat to first child %s", loc->path);
+ STACK_WIND_COOKIE(frame, metadisp_stat_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->stat, loc, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(stat, frame, -1, EINVAL, NULL, NULL);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp-unlink.c b/xlators/features/metadisp/src/metadisp-unlink.c
new file mode 100644
index 00000000000..1f6a8eb35ce
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp-unlink.c
@@ -0,0 +1,160 @@
+
+#include "metadisp.h"
+#include <glusterfs/call-stub.h>
+
+/**
+ * The unlink flow in metadisp is complicated because we must
+ * do ensure that UNLINK causes both the metadata objects
+ * to get removed and the data objects to get removed.
+ */
+
+int32_t
+metadisp_unlink_resume(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ int xflag, dict_t *xdata)
+{
+ METADISP_TRACE("winding backend unlink to path %s", loc->path);
+ STACK_WIND(frame, default_unlink_cbk, DATA_CHILD(this),
+ DATA_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ return 0;
+}
+
+int32_t
+metadisp_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *preparent,
+ struct iatt *postparent, dict_t *xdata)
+{
+ METADISP_TRACE(". %d %d", op_ret, op_errno);
+
+ int ret = 0;
+ call_stub_t *stub = NULL;
+ int nlink = 0;
+
+ if (cookie) {
+ stub = cookie;
+ }
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ if (stub->poison) {
+ call_stub_destroy(stub);
+ stub = NULL;
+ return 0;
+ }
+
+ ret = dict_get_uint32(xdata, GF_RESPONSE_LINK_COUNT_XDATA, &nlink);
+ if (ret != 0) {
+ op_errno = EINVAL;
+ op_ret = -1;
+ goto unwind;
+ }
+ METADISP_TRACE("frontend hardlink count %d %d", ret, nlink);
+ if (nlink > 1) {
+ goto unwind;
+ }
+
+ call_resume(stub);
+ return 0;
+
+unwind:
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+ return 0;
+}
+
+int32_t
+metadisp_unlink_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *xdata,
+ struct iatt *postparent)
+{
+ call_stub_t *stub = NULL;
+
+ if (cookie) {
+ stub = cookie;
+ }
+
+ if (op_ret != 0) {
+ goto unwind;
+ }
+
+ // fail fast on empty gfid so we don't loop forever
+ if (gf_uuid_is_null(buf->ia_gfid)) {
+ op_ret = -1;
+ op_errno = ENODATA;
+ goto unwind;
+ }
+
+ // fill gfid since the stub is incomplete
+ memcpy(stub->args.loc.gfid, buf->ia_gfid, sizeof(uuid_t));
+ memcpy(stub->args.loc.pargfid, postparent->ia_gfid, sizeof(uuid_t));
+
+ if (stub->poison) {
+ call_stub_destroy(stub);
+ stub = NULL;
+ return 0;
+ }
+
+ call_resume(stub);
+ return 0;
+
+unwind:
+ if (stub) {
+ call_stub_destroy(stub);
+ }
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+metadisp_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
+{
+ call_stub_t *stub = NULL;
+ loc_t backend_loc = {
+ 0,
+ };
+
+ if (gf_uuid_is_null(loc->gfid)) {
+ METADISP_TRACE("winding lookup for unlink to path %s", loc->path);
+
+ // loop back to ourselves after a lookup
+ stub = fop_unlink_stub(frame, metadisp_unlink, loc, xflag, xdata);
+ STACK_WIND_COOKIE(frame, metadisp_unlink_lookup_cbk, stub,
+ METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->lookup, loc, xdata);
+ return 0;
+ }
+
+ if (build_backend_loc(loc->gfid, loc, &backend_loc)) {
+ goto unwind;
+ }
+
+ //
+ // ensure we get the link count on the unlink response, so we can
+ // account for hardlinks before winding to the backend.
+ // NOTE:
+ // multiple xlators use GF_REQUEST_LINK_COUNT_XDATA. confirmation
+ // is needed to ensure that multiple requests will work in the same
+ // xlator stack.
+ //
+ if (!xdata) {
+ xdata = dict_new();
+ }
+ dict_set_int32(xdata, GF_REQUEST_LINK_COUNT_XDATA, 1);
+
+ METADISP_TRACE("winding frontend unlink to path %s", loc->path);
+ stub = fop_unlink_stub(frame, metadisp_unlink_resume, &backend_loc, xflag,
+ xdata);
+
+ STACK_WIND_COOKIE(frame, metadisp_unlink_cbk, stub, METADATA_CHILD(this),
+ METADATA_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ return 0;
+unwind:
+ STACK_UNWIND_STRICT(unlink, frame, -1, EINVAL, NULL, NULL, NULL);
+ return 0;
+}
diff --git a/xlators/features/metadisp/src/metadisp.c b/xlators/features/metadisp/src/metadisp.c
new file mode 100644
index 00000000000..3c8f150cebc
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp.c
@@ -0,0 +1,46 @@
+#include <glusterfs/call-stub.h>
+
+#include "metadisp.h"
+#include "metadisp-fops.h"
+
+int32_t
+init(xlator_t *this)
+{
+ if (!this->children) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "not configured with children. exiting");
+ return -1;
+ }
+
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_WARNING, "dangling volume. check volfile ");
+ }
+
+ return 0;
+}
+
+void
+fini(xlator_t *this)
+{
+ return;
+}
+
+/* defined in fops.c */
+struct xlator_fops fops;
+
+struct xlator_cbks cbks = {};
+
+struct volume_options options[] = {
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .op_version = {1},
+ .identifier = "metadisp",
+ .category = GF_EXPERIMENTAL,
+};
diff --git a/xlators/features/metadisp/src/metadisp.h b/xlators/features/metadisp/src/metadisp.h
new file mode 100644
index 00000000000..c8fd7a13c04
--- /dev/null
+++ b/xlators/features/metadisp/src/metadisp.h
@@ -0,0 +1,45 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef GF_METADISP_H_
+#define GF_METADISP_H_
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/dict.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+
+#define METADATA_CHILD(_this) FIRST_CHILD(_this)
+#define DATA_CHILD(_this) SECOND_CHILD(_this)
+
+int32_t
+build_backend_loc(uuid_t gfid, loc_t *src_loc, loc_t *dst_loc);
+
+#define METADISP_TRACE(_args...) gf_log("metadisp", GF_LOG_INFO, _args)
+
+#define METADISP_FILTER_ROOT(_op, _args...) \
+ if (strcmp(loc->path, "/") == 0) { \
+ STACK_WIND(frame, default_##_op##_cbk, METADATA_CHILD(this), \
+ METADATA_CHILD(this)->fops->_op, _args); \
+ return 0; \
+ }
+
+#define METADISP_FILTER_ROOT_BY_GFID(_op, _gfid, _args...) \
+ if (__is_root_gfid(_gfid)) { \
+ STACK_WIND(frame, default_##_op##_cbk, METADATA_CHILD(this), \
+ METADATA_CHILD(this)->fops->_op, _args); \
+ return 0; \
+ }
+
+#define RESOLVE_GFID_REQ(_dict, _dest, _lbl) \
+ VALIDATE_OR_GOTO(dict_get_ptr(_dict, "gfid-req", (void **)&_dest) == 0, \
+ _lbl)
+
+#endif /* __TEMPLATE_H__ */
diff --git a/xlators/features/namespace/Makefile.am b/xlators/features/namespace/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/namespace/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/namespace/src/Makefile.am b/xlators/features/namespace/src/Makefile.am
new file mode 100644
index 00000000000..e355d42cf4e
--- /dev/null
+++ b/xlators/features/namespace/src/Makefile.am
@@ -0,0 +1,17 @@
+xlator_LTLIBRARIES = namespace.la
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+namespace_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+namespace_la_SOURCES = namespace.c
+namespace_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+
+noinst_HEADERS = namespace.h
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/xlators/lib/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
+
+CLEANFILES =
diff --git a/xlators/features/namespace/src/namespace.c b/xlators/features/namespace/src/namespace.c
new file mode 100644
index 00000000000..86c5ebee900
--- /dev/null
+++ b/xlators/features/namespace/src/namespace.c
@@ -0,0 +1,1344 @@
+/*
+ * Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ *
+ * xlators/features/namespace:
+ * This translator tags each request with a namespace hash,
+ * which then can be used in later translators to track and
+ * throttle fops per namespace.
+ */
+
+#include <sys/types.h>
+
+#include <glusterfs/defaults.h>
+#include <glusterfs/hashfn.h>
+#include <glusterfs/logging.h>
+#include "namespace.h"
+
+/* Return codes for common path parsing functions. */
+enum _path_parse_result {
+ PATH_PARSE_RESULT_NO_PATH = 0,
+ PATH_PARSE_RESULT_FOUND = 1,
+ PATH_PARSE_RESULT_IS_GFID = 2,
+};
+
+typedef enum _path_parse_result path_parse_result_t;
+
+/* Clean up an ns_local struct. Wipe a loc (its inode is ref'd, so we're good.)
+ */
+static inline void
+ns_local_cleanup(ns_local_t *local)
+{
+ if (!local) {
+ return;
+ }
+
+ loc_wipe(&local->loc);
+ GF_FREE(local);
+}
+
+/* Create a new ns_local. We ref the inode, fake a new loc struct, and stash
+ * the stub given to us. */
+static inline ns_local_t *
+ns_local_new(call_stub_t *stub, inode_t *inode)
+{
+ ns_local_t *local = NULL;
+ loc_t loc = {
+ 0,
+ };
+
+ if (!stub || !inode) {
+ goto out;
+ }
+
+ local = GF_CALLOC(1, sizeof(ns_local_t), 0);
+ if (local == NULL) {
+ goto out;
+ }
+
+ /* Set up a fake loc_t struct to give to the getxattr call. */
+ gf_uuid_copy(loc.gfid, inode->gfid);
+ loc.inode = inode_ref(inode);
+
+ /* If for some reason inode_ref() fails, then just give up. */
+ if (!loc.inode) {
+ GF_FREE(local);
+ goto out;
+ }
+
+ local->stub = stub;
+ local->loc = loc;
+
+out:
+ return local;
+}
+
+/* Try parsing a path string. If the path string is a GFID, then return
+ * with PATH_PARSE_RESULT_IS_GFID. If we have no namespace (i.e. '/') then
+ * return PATH_PARSE_RESULT_NO_PATH and set the hash to 1. Otherwise, hash the
+ * namespace and store it in the info struct. */
+static path_parse_result_t
+parse_path(ns_info_t *info, const char *path)
+{
+ int len = 0;
+ const char *ns_begin = path;
+ const char *ns_end = NULL;
+
+ if (!path || strlen(path) == 0) {
+ return PATH_PARSE_RESULT_NO_PATH;
+ }
+
+ if (path[0] == '<') {
+ return PATH_PARSE_RESULT_IS_GFID;
+ }
+
+ /* Right now we only want the top-level directory, so
+ * skip the initial '/' and read until the next '/'. */
+ while (*ns_begin == '/') {
+ ns_begin++;
+ }
+
+ /* ns_end will point to the next '/' or NULL if there is no delimiting
+ * '/' (i.e. "/directory" or the top level "/") */
+ ns_end = strchr(ns_begin, '/');
+ len = ns_end ? (ns_end - ns_begin) : strlen(ns_begin);
+
+ if (len != 0) {
+ info->hash = SuperFastHash(ns_begin, len);
+ } else {
+ /* If our substring is empty, then we can hash '/' instead.
+ * '/' is used in the namespace config for the top-level
+ * namespace. */
+ info->hash = SuperFastHash("/", 1);
+ }
+
+ info->found = _gf_true;
+ return PATH_PARSE_RESULT_FOUND;
+}
+
+/* Cache namespace info stored in the stack (info) into the inode. */
+static int
+ns_inode_ctx_put(inode_t *inode, xlator_t *this, ns_info_t *info)
+{
+ ns_info_t *cached_ns_info = NULL;
+ uint64_t ns_as_64 = 0;
+ int ret = -1;
+
+ if (!inode || !this) {
+ gf_log(this ? this->name : "namespace", GF_LOG_WARNING,
+ "Need a valid inode and xlator to cache ns_info.");
+ ret = -1;
+ goto out;
+ }
+
+ cached_ns_info = GF_CALLOC(1, sizeof(ns_info_t), 0);
+
+ /* If we've run out of memory, then return ENOMEM. */
+ if (cached_ns_info == NULL) {
+ gf_log(this->name, GF_LOG_WARNING, "No memory to cache ns_info.");
+ ret = -(ENOMEM);
+ goto out;
+ }
+
+ *cached_ns_info = *info;
+ ns_as_64 = (uint64_t)(uintptr_t)cached_ns_info;
+
+ ret = inode_ctx_put(inode, this, ns_as_64);
+
+ if (ret) {
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret && cached_ns_info) {
+ GF_FREE(cached_ns_info);
+ }
+
+ return ret;
+}
+
+/* Retrieve namespace info cached in the inode into the stack for use in later
+ * translators. */
+static int
+ns_inode_ctx_get(inode_t *inode, xlator_t *this, ns_info_t *info)
+{
+ ns_info_t *cached_ns_info = NULL;
+ uint64_t ns_as_64 = 0;
+ int ret = -1;
+
+ if (!inode) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ ret = inode_ctx_get(inode, this, &ns_as_64);
+
+ if (!ret) {
+ cached_ns_info = (ns_info_t *)(uintptr_t)ns_as_64;
+ *info = *cached_ns_info;
+ }
+
+out:
+ return ret;
+}
+
+/* This callback is the top of the unwind path of our attempt to get the path
+ * manually from the posix translator. We'll try to parse the path returned
+ * if it exists, then cache the hash if possible. Then just return to the
+ * default stub that we provide in the local, since there's nothing else to do
+ * once we've gotten the namespace hash. */
+int32_t
+get_path_resume_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = PATH_PARSE_RESULT_NO_PATH;
+ call_frame_t *resume_frame = NULL;
+ ns_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ ns_info_t *info = NULL;
+ char *path = NULL;
+
+ GF_VALIDATE_OR_GOTO(this->name, frame, out);
+ local = frame->local;
+
+ GF_VALIDATE_OR_GOTO(this->name, local, out);
+ stub = local->stub;
+
+ GF_VALIDATE_OR_GOTO(this->name, stub, out);
+ /* Get the ns_info from the frame that we will eventually resume,
+ * not the frame that we're going to destroy (frame). */
+ resume_frame = stub->frame;
+
+ GF_VALIDATE_OR_GOTO(this->name, resume_frame, out);
+ GF_VALIDATE_OR_GOTO(this->name, resume_frame->root, out);
+ info = &resume_frame->root->ns_info;
+
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ /* If we get a value back for the GET_ANCESTRY_PATH_KEY, then we
+ * try to access it and parse it like a path. */
+ if (!op_ret && !dict_get_str(dict, GET_ANCESTRY_PATH_KEY, &path)) {
+ gf_log(this->name, GF_LOG_DEBUG, "G>P %s retrieved path %s",
+ uuid_utoa(local->loc.gfid), path);
+ /* Now let's parse a path, finally. */
+ ret = parse_path(info, path);
+ }
+
+ if (ret == PATH_PARSE_RESULT_FOUND) {
+ /* If we finally found namespace, then stash it. */
+ ns_inode_ctx_put(local->loc.inode, this, info);
+
+ gf_log(this->name, GF_LOG_DEBUG, "G>P %s %10u namespace found %s",
+ uuid_utoa(local->loc.inode->gfid), info->hash, path);
+ } else if (ret == PATH_PARSE_RESULT_NO_PATH) {
+ gf_log(this->name, GF_LOG_WARNING, "G>P %s has no path",
+ uuid_utoa(local->loc.inode->gfid));
+ } else if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "G>P %s winding failed, still have gfid",
+ uuid_utoa(local->loc.inode->gfid));
+ }
+
+out:
+ /* Make sure to clean up local finally. */
+
+ if (frame) {
+ frame->local = NULL;
+ STACK_DESTROY(frame->root);
+ }
+
+ if (local) {
+ ns_local_cleanup(local);
+ }
+
+ if (stub) {
+ call_resume(stub);
+ }
+
+ return 0;
+}
+
+/* This function tries first to set a namespace based on the information that
+ * it can retrieve from an `loc_t`. This includes first looking for a cached
+ * namespace in the inode, then trying to parse the path string in the `loc_t`
+ * struct. If this fails, then it will try to call inode_path. */
+static path_parse_result_t
+set_ns_from_loc(const char *fn, call_frame_t *frame, xlator_t *this, loc_t *loc)
+{
+ path_parse_result_t ret = PATH_PARSE_RESULT_NO_PATH;
+ ns_private_t *priv = (ns_private_t *)this->private;
+ ns_info_t *info = &frame->root->ns_info;
+ char *path = NULL;
+
+ info->hash = 0;
+ info->found = _gf_false;
+
+ if (!priv->tag_namespaces) {
+ return ret;
+ }
+
+ /* This is our first pass at trying to get a path. Try getting
+ * from the inode context, then from the loc's path itself. */
+ if (!loc || !loc->path || !loc->inode) {
+ ret = PATH_PARSE_RESULT_NO_PATH;
+ } else if (!ns_inode_ctx_get(loc->inode, this, info)) {
+ ret = PATH_PARSE_RESULT_FOUND;
+ } else {
+ ret = parse_path(info, loc->path);
+ gf_log(this->name, GF_LOG_DEBUG, "%s: LOC retrieved path %s", fn,
+ loc->path);
+
+ if (ret == PATH_PARSE_RESULT_FOUND) {
+ ns_inode_ctx_put(loc->inode, this, info);
+ }
+ }
+
+ /* Keep trying by calling inode_path next, making sure to copy
+ the loc's gfid into its inode if necessary. */
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ if (gf_uuid_is_null(loc->inode->gfid)) {
+ gf_uuid_copy(loc->inode->gfid, loc->gfid);
+ }
+
+ if (inode_path(loc->inode, NULL, &path) >= 0 && path) {
+ ret = parse_path(info, loc->path);
+ gf_log(this->name, GF_LOG_DEBUG, "%s: LOC retrieved path %s", fn,
+ path);
+
+ if (ret == PATH_PARSE_RESULT_FOUND) {
+ ns_inode_ctx_put(loc->inode, this, info);
+ }
+ }
+
+ if (path) {
+ GF_FREE(path);
+ }
+ }
+
+ /* Report our status, and if we have a GFID, we'll eventually try a
+ * GET_ANCESTRY_PATH_KEY wind when we return from this function. */
+ if (ret == PATH_PARSE_RESULT_FOUND) {
+ gf_log(this->name, GF_LOG_DEBUG,
+ "%s: LOC %s %10u namespace found for %s", fn,
+ uuid_utoa(loc->inode->gfid), info->hash, loc->path);
+ } else if (ret == PATH_PARSE_RESULT_NO_PATH) {
+ gf_log(this->name, GF_LOG_WARNING, "%s: LOC has no path", fn);
+ } else if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ /* Make sure to copy the inode's gfid for the eventual wind. */
+ if (gf_uuid_is_null(loc->inode->gfid)) {
+ gf_uuid_copy(loc->inode->gfid, loc->gfid);
+ }
+
+ gf_log(this->name, GF_LOG_DEBUG, "%s: LOC %s winding, looking for path",
+ fn, uuid_utoa(loc->inode->gfid));
+ }
+
+ return ret;
+}
+
+/* This function tries first to set a namespace based on the information that
+ * it can retrieve from an `fd_t`. This includes first looking for a cached
+ * namespace in the inode, then trying to call inode_path manually. */
+static path_parse_result_t
+set_ns_from_fd(const char *fn, call_frame_t *frame, xlator_t *this, fd_t *fd)
+{
+ path_parse_result_t ret = PATH_PARSE_RESULT_NO_PATH;
+ ns_private_t *priv = (ns_private_t *)this->private;
+ ns_info_t *info = &frame->root->ns_info;
+ char *path = NULL;
+
+ info->hash = 0;
+ info->found = _gf_false;
+
+ if (!priv->tag_namespaces) {
+ return ret;
+ }
+
+ /* This is our first pass at trying to get a path. Try getting
+ * from the inode context, then inode_path. */
+ if (!fd || !fd->inode) {
+ ret = PATH_PARSE_RESULT_NO_PATH;
+ } else if (!ns_inode_ctx_get(fd->inode, this, info)) {
+ ret = PATH_PARSE_RESULT_FOUND;
+ } else if (inode_path(fd->inode, NULL, &path) >= 0 && path) {
+ ret = parse_path(info, path);
+ gf_log(this->name, GF_LOG_DEBUG, "%s: FD retrieved path %s", fn, path);
+
+ if (ret == PATH_PARSE_RESULT_FOUND) {
+ ns_inode_ctx_put(fd->inode, this, info);
+ }
+ }
+
+ if (path) {
+ GF_FREE(path);
+ }
+
+ /* Report our status, and if we have a GFID, we'll eventually try a
+ * GET_ANCESTRY_PATH_KEY wind when we return from this function. */
+ if (ret == PATH_PARSE_RESULT_FOUND) {
+ gf_log(this->name, GF_LOG_DEBUG, "%s: FD %s %10u namespace found", fn,
+ uuid_utoa(fd->inode->gfid), info->hash);
+ } else if (ret == PATH_PARSE_RESULT_NO_PATH) {
+ gf_log(this->name, GF_LOG_WARNING, "%s: FD has no path", fn);
+ } else if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ gf_log(this->name, GF_LOG_DEBUG, "%s: FD %s winding, looking for path",
+ fn, uuid_utoa(fd->inode->gfid));
+ }
+
+ return ret;
+}
+
+/* This macro does the work of winding down a call of `getxattr` in the case
+ * that we have to retrieve the path manually. It assumes that there is a label
+ * called `wind` and the existence of several basic variables (frame, this),
+ * but otherwise is general enough for any fop (fd- or loc-based.) */
+#define GET_ANCESTRY_PATH_WIND(fop, inode, args...) \
+ do { \
+ ns_info_t *info = &frame->root->ns_info; \
+ call_frame_t *new_frame = NULL; \
+ ns_local_t *local = NULL; \
+ call_stub_t *stub = NULL; \
+ \
+ gf_log(this->name, GF_LOG_DEBUG, " %s winding, looking for path", \
+ uuid_utoa(inode->gfid)); \
+ \
+ new_frame = create_frame(this, this->ctx->pool); \
+ if (!new_frame) { \
+ gf_log(this->name, GF_LOG_ERROR, \
+ "Cannot allocate new call frame."); \
+ goto wind; \
+ } \
+ \
+ stub = fop_##fop##_stub(frame, default_##fop, args); \
+ if (!stub) { \
+ gf_log(this->name, GF_LOG_ERROR, \
+ "Cannot allocate function stub."); \
+ goto wind; \
+ } \
+ \
+ new_frame->root->uid = 0; \
+ new_frame->root->gid = 0; \
+ /* Put a phony "not found" NS info into this call. */ \
+ new_frame->root->ns_info = *info; \
+ \
+ local = ns_local_new(stub, inode); \
+ if (!local) { \
+ gf_log(this->name, GF_LOG_ERROR, \
+ "Cannot allocate function local."); \
+ goto wind; \
+ } \
+ \
+ new_frame->local = local; \
+ /* After allocating a new frame, a call stub (to \
+ * resume our current fop), and a local variables \
+ * struct (for our loc to getxattr and our resume \
+ * stub), call getxattr and unwind to get_path_resume_cbk. \
+ */ \
+ STACK_WIND(new_frame, get_path_resume_cbk, FIRST_CHILD(this), \
+ FIRST_CHILD(this)->fops->getxattr, &local->loc, \
+ GET_ANCESTRY_PATH_KEY, NULL); \
+ } while (0)
+
+int32_t
+ns_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(rmdir, loc->inode, loc, xflags, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, loc, xflags, xdata);
+ return 0;
+}
+
+int32_t
+ns_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflags,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(unlink, loc->inode, loc, xflags, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflags, xdata);
+ return 0;
+}
+
+int32_t
+ns_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this,
+ newloc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(rename, newloc->inode, oldloc, newloc, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+ return 0;
+}
+
+int32_t
+ns_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this,
+ newloc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(link, newloc->inode, oldloc, newloc, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ return 0;
+}
+
+int32_t
+ns_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(mkdir, loc->inode, loc, mode, umask, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
+ return 0;
+}
+
+int32_t
+ns_symlink(call_frame_t *frame, xlator_t *this, const char *linkname,
+ loc_t *loc, mode_t umask, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(symlink, loc->inode, linkname, loc, umask,
+ xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkname, loc, umask, xdata);
+ return 0;
+}
+
+int32_t
+ns_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t dev, mode_t umask, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(mknod, loc->inode, loc, mode, dev, umask, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, dev, umask, xdata);
+ return 0;
+}
+
+int32_t
+ns_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(create, loc->inode, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, flags, mode, umask, fd,
+ xdata);
+ return 0;
+}
+
+int32_t
+ns_fsetattr(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iatt *stbuf,
+ int32_t valid, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fsetattr, fd->inode, fd, stbuf, valid, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fsetattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetattr, fd, stbuf, valid, xdata);
+ return 0;
+}
+
+int32_t
+ns_setattr(call_frame_t *frame, xlator_t *this, loc_t *loc, struct iatt *stbuf,
+ int32_t valid, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(setattr, loc->inode, loc, stbuf, valid, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_setattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setattr, loc, stbuf, valid, xdata);
+ return 0;
+}
+
+int32_t
+ns_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fremovexattr, fd->inode, fd, name, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fremovexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+ return 0;
+}
+
+int32_t
+ns_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(removexattr, loc->inode, loc, name, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
+}
+
+int32_t
+ns_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(setxattr, loc->inode, loc, dict, flags, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
+}
+
+int32_t
+ns_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fsetxattr, fd->inode, fd, dict, flags, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+}
+
+int32_t
+ns_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(truncate, loc->inode, loc, offset, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+}
+
+int32_t
+ns_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(ftruncate, fd->inode, fd, offset, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+}
+
+int32_t
+ns_writev(call_frame_t *frame, xlator_t *this, fd_t *fd, struct iovec *vector,
+ int32_t count, off_t offset, uint32_t flags, struct iobref *iobref,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(writev, fd->inode, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, offset,
+ flags, iobref, xdata);
+ return 0;
+}
+
+int32_t
+ns_lookup(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(lookup, loc->inode, loc, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_lookup_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lookup, loc, xdata);
+ return 0;
+}
+
+int32_t
+ns_stat(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(stat, loc->inode, loc, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_stat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat, loc, xdata);
+ return 0;
+}
+
+int32_t
+ns_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fstat, fd->inode, fd, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ return 0;
+}
+
+int32_t
+ns_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(readlink, loc->inode, loc, size, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_readlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readlink, loc, size, xdata);
+ return 0;
+}
+
+int32_t
+ns_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(access, loc->inode, loc, mask, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_access_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->access, loc, mask, xdata);
+ return 0;
+}
+
+int32_t
+ns_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(open, fd->inode, loc, flags, fd, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, xdata);
+ return 0;
+}
+
+int32_t
+ns_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(readv, fd->inode, fd, size, offset, flags,
+ xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags, xdata);
+ return 0;
+}
+
+int32_t
+ns_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(flush, fd->inode, fd, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_flush_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->flush, fd, xdata);
+ return 0;
+}
+
+int32_t
+ns_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fsync, fd->inode, fd, datasync, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fsync_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsync, fd, datasync, xdata);
+ return 0;
+}
+
+int32_t
+ns_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(opendir, loc->inode, loc, fd, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_opendir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ return 0;
+}
+
+int32_t
+ns_fsyncdir(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t datasync,
+ dict_t *xdata)
+
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fsyncdir, fd->inode, fd, datasync, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fsyncdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsyncdir, fd, datasync, xdata);
+ return 0;
+}
+
+int32_t
+ns_rchecksum(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ int32_t len, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(rchecksum, fd->inode, fd, offset, len, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_rchecksum_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rchecksum, fd, offset, len, xdata);
+ return 0;
+}
+
+int32_t
+ns_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(statfs, loc->inode, loc, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_statfs_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->statfs, loc, xdata);
+ return 0;
+}
+
+int32_t
+ns_inodelk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(inodelk, loc->inode, volume, loc, cmd, flock,
+ xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_inodelk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk, volume, loc, cmd, flock,
+ xdata);
+ return 0;
+}
+
+int32_t
+ns_finodelk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ int32_t cmd, struct gf_flock *flock, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(finodelk, fd->inode, volume, fd, cmd, flock,
+ xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_finodelk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->finodelk, volume, fd, cmd, flock,
+ xdata);
+ return 0;
+}
+
+int32_t
+ns_entrylk(call_frame_t *frame, xlator_t *this, const char *volume, loc_t *loc,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(entrylk, loc->inode, volume, loc, basename, cmd,
+ type, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_entrylk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->entrylk, volume, loc, basename, cmd,
+ type, xdata);
+ return 0;
+}
+
+int32_t
+ns_fentrylk(call_frame_t *frame, xlator_t *this, const char *volume, fd_t *fd,
+ const char *basename, entrylk_cmd cmd, entrylk_type type,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fentrylk, fd->inode, volume, fd, basename, cmd,
+ type, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fentrylk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fentrylk, volume, fd, basename, cmd,
+ type, xdata);
+ return 0;
+}
+
+int32_t
+ns_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fgetxattr, fd->inode, fd, name, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fgetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fgetxattr, fd, name, xdata);
+ return 0;
+}
+
+int32_t
+ns_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(getxattr, loc->inode, loc, name, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xdata);
+ return 0;
+}
+
+int32_t
+ns_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct gf_flock *flock, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(lk, fd->inode, fd, cmd, flock, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_lk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->lk, fd, cmd, flock, xdata);
+ return 0;
+}
+
+int32_t
+ns_readdir(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(readdir, fd->inode, fd, size, offset, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_readdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdir, fd, size, offset, xdata);
+
+ return 0;
+}
+
+int32_t
+ns_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, dict_t *dict)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(readdirp, fd->inode, fd, size, offset, dict);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_readdirp_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, offset, dict);
+ return 0;
+}
+
+int32_t
+ns_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t flags, dict_t *dict, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_loc(__FUNCTION__, frame, this, loc);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(xattrop, loc->inode, loc, flags, dict, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_xattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop, loc, flags, dict, xdata);
+
+ return 0;
+}
+
+int32_t
+ns_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t flags, dict_t *dict, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fxattrop, fd->inode, fd, flags, dict, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fxattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop, fd, flags, dict, xdata);
+
+ return 0;
+}
+
+int32_t
+ns_getspec(call_frame_t *frame, xlator_t *this, const char *key, int32_t flag)
+{
+ STACK_WIND(frame, default_getspec_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getspec, key, flag);
+ return 0;
+}
+
+int32_t
+ns_fallocate(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t keep_size,
+ off_t offset, size_t len, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(fallocate, fd->inode, fd, keep_size, offset, len,
+ xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_fallocate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fallocate, fd, keep_size, offset, len,
+ xdata);
+ return 0;
+}
+
+int32_t
+ns_discard(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ size_t len, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(discard, fd->inode, fd, offset, len, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_discard_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->discard, fd, offset, len, xdata);
+ return 0;
+}
+
+int32_t
+ns_zerofill(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ off_t len, dict_t *xdata)
+{
+ path_parse_result_t ret = set_ns_from_fd(__FUNCTION__, frame, this, fd);
+
+ if (ret == PATH_PARSE_RESULT_IS_GFID) {
+ GET_ANCESTRY_PATH_WIND(zerofill, fd->inode, fd, offset, len, xdata);
+ return 0;
+ }
+wind:
+ STACK_WIND(frame, default_zerofill_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->zerofill, fd, offset, len, xdata);
+ return 0;
+}
+
+int
+ns_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t ns_as_64 = 0;
+ ns_info_t *info = NULL;
+
+ inode_ctx_del(inode, this, &ns_as_64);
+
+ if (!ns_as_64) {
+ return 0;
+ }
+
+ info = (ns_info_t *)(uintptr_t)ns_as_64;
+ GF_FREE(info);
+
+ return 0;
+}
+
+int32_t
+init(xlator_t *this)
+{
+ int32_t ret = -1;
+ ns_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO(GF_NAMESPACE, this, out);
+
+ if (!this->children || this->children->next) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "translator needs a single subvolume.");
+ goto out;
+ }
+
+ if (!this->parents) {
+ gf_log(this->name, GF_LOG_ERROR,
+ "dangling volume. please check volfile.");
+ goto out;
+ }
+
+ priv = GF_CALLOC(1, sizeof(ns_private_t), 0);
+
+ if (!priv) {
+ gf_log(this->name, GF_LOG_ERROR, "Can't allocate ns_priv structure.");
+ goto out;
+ }
+
+ GF_OPTION_INIT("tag-namespaces", priv->tag_namespaces, bool, out);
+
+ gf_log(this->name, GF_LOG_INFO, "Namespace xlator loaded");
+ this->private = priv;
+ ret = 0;
+
+out:
+ if (ret) {
+ GF_FREE(priv);
+ }
+
+ return ret;
+}
+
+void
+fini(xlator_t *this)
+{
+ GF_FREE(this->private);
+}
+
+int
+reconfigure(xlator_t *this, dict_t *options)
+{
+ int ret = -1;
+ ns_private_t *priv = NULL;
+
+ GF_VALIDATE_OR_GOTO(this->name, this->private, out);
+ GF_VALIDATE_OR_GOTO(this->name, options, out);
+
+ priv = (ns_private_t *)this->private;
+
+ GF_OPTION_RECONF("tag-namespaces", priv->tag_namespaces, options, bool,
+ out);
+
+ ret = 0;
+out:
+ return ret;
+}
+
+struct xlator_fops fops = {
+ .lookup = ns_lookup,
+ .stat = ns_stat,
+ .fstat = ns_fstat,
+ .truncate = ns_truncate,
+ .ftruncate = ns_ftruncate,
+ .access = ns_access,
+ .readlink = ns_readlink,
+ .mknod = ns_mknod,
+ .mkdir = ns_mkdir,
+ .unlink = ns_unlink,
+ .rmdir = ns_rmdir,
+ .symlink = ns_symlink,
+ .rename = ns_rename,
+ .link = ns_link,
+ .create = ns_create,
+ .open = ns_open,
+ .readv = ns_readv,
+ .writev = ns_writev,
+ .flush = ns_flush,
+ .fsync = ns_fsync,
+ .opendir = ns_opendir,
+ .readdir = ns_readdir,
+ .readdirp = ns_readdirp,
+ .fsyncdir = ns_fsyncdir,
+ .statfs = ns_statfs,
+ .setxattr = ns_setxattr,
+ .getxattr = ns_getxattr,
+ .fsetxattr = ns_fsetxattr,
+ .fgetxattr = ns_fgetxattr,
+ .removexattr = ns_removexattr,
+ .fremovexattr = ns_fremovexattr,
+ .lk = ns_lk,
+ .inodelk = ns_inodelk,
+ .finodelk = ns_finodelk,
+ .entrylk = ns_entrylk,
+ .fentrylk = ns_fentrylk,
+ .rchecksum = ns_rchecksum,
+ .xattrop = ns_xattrop,
+ .fxattrop = ns_fxattrop,
+ .setattr = ns_setattr,
+ .fsetattr = ns_fsetattr,
+ .getspec = ns_getspec,
+ .fallocate = ns_fallocate,
+ .discard = ns_discard,
+ .zerofill = ns_zerofill,
+};
+
+struct xlator_cbks cbks = {
+ .forget = ns_forget,
+};
+
+struct xlator_dumpops dumpops;
+
+struct volume_options options[] = {
+ {
+ .key = {"tag-namespaces"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "off",
+ .description = "This option enables this translator's functionality "
+ "that tags every fop with a namespace hash for later "
+ "throttling, stats collection, logging, etc.",
+ .op_version = {GD_OP_VERSION_4_1_0},
+ .tags = {"namespace"},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_CLIENT_OPT | OPT_FLAG_DOC,
+ },
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = init,
+ .fini = fini,
+ .reconfigure = reconfigure,
+ .op_version = {GD_OP_VERSION_3_12_0},
+ .dumpops = &dumpops,
+ .fops = &fops,
+ .cbks = &cbks,
+ .options = options,
+ .identifier = "namespace",
+ .category = GF_TECH_PREVIEW,
+};
diff --git a/xlators/features/namespace/src/namespace.h b/xlators/features/namespace/src/namespace.h
new file mode 100644
index 00000000000..3a9b84d6426
--- /dev/null
+++ b/xlators/features/namespace/src/namespace.h
@@ -0,0 +1,23 @@
+#ifndef __NAMESPACE_H__
+#define __NAMESPACE_H__
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <glusterfs/xlator.h>
+#include <glusterfs/call-stub.h>
+
+#define GF_NAMESPACE "namespace"
+
+typedef struct {
+ gf_boolean_t tag_namespaces;
+} ns_private_t;
+
+typedef struct {
+ loc_t loc; /* We store a "fake" loc_t for the getxattr wind. */
+ call_stub_t *stub; /* A stub back to the function we're resuming. */
+} ns_local_t;
+
+#endif /* __NAMESPACE_H__ */
diff --git a/xlators/features/path-convertor/src/Makefile.am b/xlators/features/path-convertor/src/Makefile.am
deleted file mode 100644
index 58cfed0f983..00000000000
--- a/xlators/features/path-convertor/src/Makefile.am
+++ /dev/null
@@ -1,14 +0,0 @@
-
-xlator_LTLIBRARIES = path-converter.la
-xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/testing/features
-
-path_converter_la_LDFLAGS = -module -avoidversion
-
-path_converter_la_SOURCES = path.c
-path_converter_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS)\
- -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)
-
-CLEANFILES =
-
diff --git a/xlators/features/path-convertor/src/path-mem-types.h b/xlators/features/path-convertor/src/path-mem-types.h
deleted file mode 100644
index a0b4d90cce3..00000000000
--- a/xlators/features/path-convertor/src/path-mem-types.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __PATH_MEM_TYPES_H__
-#define __PATH_MEM_TYPES_H__
-
-#include "mem-types.h"
-
-enum gf_path_mem_types_ {
- gf_path_mt_path_private_t = gf_common_mt_end + 1,
- gf_path_mt_char,
- gf_path_mt_regex_t,
- gf_path_mt_end
-};
-#endif
-
diff --git a/xlators/features/path-convertor/src/path.c b/xlators/features/path-convertor/src/path.c
deleted file mode 100644
index 858c6b2d05f..00000000000
--- a/xlators/features/path-convertor/src/path.c
+++ /dev/null
@@ -1,1239 +0,0 @@
-/*
- Copyright (c) 2008-2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-/* TODO: add gf_log to all the cases returning errors */
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
-
-/**
- * xlators/features/path-translator:
- * This translator converts the path it gets into user specified targets.
- */
-
-#include <sys/types.h>
-#include <regex.h>
-#include <time.h>
-#include <errno.h>
-#include "glusterfs.h"
-#include "xlator.h"
-#include "path-mem-types.h"
-
-typedef struct path_private
-{
- int32_t this_len;
- int32_t start_off;
- int32_t end_off;
- char *this;
- char *that;
- char *path;
- regex_t *preg;
-} path_private_t;
-
-static char *
-name_this_to_that (xlator_t *xl, const char *path, const char *name)
-{
- path_private_t *priv = xl->private;
- char priv_path[ZR_PATH_MAX] = {0,};
- char *tmp_name = NULL;
- int32_t path_len = strlen (path);
- int32_t name_len = strlen (name) - ZR_FILE_CONTENT_STRLEN;
- int32_t total_len = path_len + name_len;
- int32_t i = 0, j = 0;
-
- if (path_len >= priv->end_off)
- return (char *)name;
-
- if (priv->end_off && (total_len > priv->end_off)) {
- j = priv->start_off;
- tmp_name = GF_CALLOC (1, (total_len +
- ZR_FILE_CONTENT_STRLEN),
- gf_path_mt_char);
- ERR_ABORT (tmp_name);
-
- /* Get the complete path for the file first */
- strcpy (tmp_name, path);
- strcat (tmp_name, name + ZR_FILE_CONTENT_STRLEN);
-
- strncpy (priv_path, tmp_name, priv->start_off);
- for (i = priv->start_off; i < priv->end_off; i++) {
- if (tmp_name[i] == '/')
- continue;
- priv_path[j++] = tmp_name[i];
- }
- memcpy ((priv_path + j),
- (tmp_name + priv->end_off),
- (total_len - priv->end_off));
- priv_path[(total_len - (priv->end_off - j))] = '\0';
-
- strcpy (tmp_name, ZR_FILE_CONTENT_STR);
- strcat (tmp_name, priv_path);
-
- return tmp_name;
- }
-
- return (char *)name;
-}
-
-/* This function should return
- * NULL -
- * converted path - if path match
- * same path - if it doesn't match
- */
-static char *
-path_this_to_that (xlator_t *xl, const char *path)
-{
- path_private_t *priv = xl->private;
- char *priv_path = NULL;
- int32_t path_len = strlen (path);
- int32_t i = 0, j = 0;
-
- if (priv->end_off && (path_len > priv->start_off)) {
- priv_path = GF_CALLOC (1, path_len, gf_path_mt_char);
- ERR_ABORT (priv_path);
-
- if (priv->start_off && (path_len > priv->start_off))
- memcpy (priv_path, path, priv->start_off);
- if (path_len > priv->end_off) {
- j = priv->start_off;
- for (i = priv->start_off; i < priv->end_off; i++) {
- if (path[i] == '/')
- continue;
- priv_path[j++] = path[i];
- }
- memcpy ((priv_path + j),
- (path + priv->end_off),
- (path_len - priv->end_off));
- priv_path[(path_len - (priv->end_off - j))] = '\0';
- }
- return priv_path;
- }
- return (char *)path;
-}
-
-int32_t
-path_create_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, fd, inode, buf);
- return 0;
-}
-
-int32_t
-path_open_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd)
-{
- STACK_UNWIND (frame, op_ret, op_errno, fd);
- return 0;
-}
-
-int32_t
-path_getdents_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dir_entry_t *entries,
- int32_t count)
-{
- STACK_UNWIND (frame, op_ret, op_errno, entries, count);
- return 0;
-}
-
-int32_t
-path_readdir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- gf_dirent_t *buf)
-{
- STACK_UNWIND (frame, op_ret, op_errno, buf);
- return 0;
-}
-
-
-int32_t
-path_readlink_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- const char *buf,
- struct iatt *sbuf)
-{
- STACK_UNWIND (frame, op_ret, op_errno, buf, sbuf);
- return 0;
-}
-
-int32_t
-path_lookup_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- dict_t *xattr,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf, xattr);
- return 0;
-}
-
-
-int32_t
-path_symlink_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf);
- return 0;
-}
-
-int32_t
-path_mknod_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf);
- return 0;
-}
-
-
-int32_t
-path_mkdir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf);
- return 0;
-}
-
-int32_t
-path_link_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- inode_t *inode,
- struct iatt *buf,
- struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, inode, buf);
- return 0;
-}
-
-int32_t
-path_opendir_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- fd_t *fd)
-{
- STACK_UNWIND (frame, op_ret, op_errno, fd);
- return 0;
-}
-
-
-int32_t
-path_rename_buf_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *buf,
- struct iatt *preoldparent,
- struct iatt *postoldparent,
- struct iatt *prenewparent,
- struct iatt *postnewparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno, buf);
- return 0;
-}
-
-
-
-int32_t
-path_common_buf_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *buf)
-{
- STACK_UNWIND (frame, op_ret, op_errno, buf);
- return 0;
-}
-
-int32_t
-path_common_dict_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- dict_t *dict)
-{
- STACK_UNWIND (frame, op_ret, op_errno, dict);
- return 0;
-}
-
-int32_t
-path_common_remove_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno,struct iatt *preparent,
- struct iatt *postparent)
-{
- STACK_UNWIND (frame, op_ret, op_errno);
- return 0;
-}
-
-int32_t
-path_truncate_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno,struct iatt *prebuf,
- struct iatt *postbuf)
-{
- STACK_UNWIND (frame, op_ret, op_errno, prebuf, postbuf);
- return 0;
-}
-
-
-int32_t
-path_common_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno)
-{
- STACK_UNWIND (frame, op_ret, op_errno);
- return 0;
-}
-
-/* */
-int32_t
-path_lookup (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *xattr_req)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame, path_lookup_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lookup,
- loc, xattr_req);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_stat (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_common_buf_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->stat,
- loc);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_readlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- size_t size)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_readlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readlink,
- loc,
- size);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_mknod (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- mode_t mode,
- dev_t dev)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_mknod_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mknod,
- loc,
- mode,
- dev);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_mkdir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- mode_t mode)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_mkdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mkdir,
- loc,
- mode);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_unlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_common_remove_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->unlink,
- loc);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_rmdir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_common_remove_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rmdir,
- loc);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_symlink (call_frame_t *frame,
- xlator_t *this,
- const char *linkpath,
- loc_t *loc)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_symlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->symlink,
- linkpath,
- loc);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_rename (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc)
-{
- char *oldloc_path = (char *)oldloc->path;
- char *tmp_oldloc_path = NULL;
-
- char *newloc_path = (char *)newloc->path;
- char *tmp_newloc_path = NULL;
-
- if (!(tmp_oldloc_path = path_this_to_that (this, oldloc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- oldloc->path = tmp_oldloc_path;
-
- if (!(tmp_newloc_path = path_this_to_that (this, newloc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- newloc->path = tmp_newloc_path;
-
- STACK_WIND (frame,
- path_rename_buf_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rename,
- oldloc,
- newloc);
-
- oldloc->path = oldloc_path;
- if (tmp_oldloc_path != oldloc_path)
- GF_FREE (tmp_oldloc_path);
-
- newloc->path = newloc_path;
- if (tmp_newloc_path != newloc_path)
- GF_FREE (tmp_newloc_path);
-
- return 0;
-}
-
-int32_t
-path_link (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc)
-{
- char *oldloc_path = (char *)oldloc->path;
- char *tmp_oldloc_path = NULL;
-
- char *newloc_path = (char *)newloc->path;
- char *tmp_newloc_path = NULL;
-
- if (!(tmp_oldloc_path = path_this_to_that (this, oldloc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- oldloc->path = tmp_oldloc_path;
-
- if (!(tmp_newloc_path = path_this_to_that (this, newloc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- newloc->path = tmp_newloc_path;
-
- STACK_WIND (frame,
- path_link_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->link,
- oldloc,
- newloc);
-
- oldloc->path = oldloc_path;
- if (tmp_oldloc_path != oldloc_path)
- GF_FREE (tmp_oldloc_path);
-
- newloc->path = newloc_path;
- if (tmp_newloc_path != newloc_path)
- GF_FREE (tmp_newloc_path);
-
- return 0;
-}
-
-int32_t
-path_setattr_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- struct iatt *preop,
- struct iatt *postop)
-{
- STACK_UNWIND (frame, op_ret, op_errno, preop, postop);
- return 0;
-}
-
-int32_t
-path_setattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- struct iatt *stbuf,
- int32_t valid)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_setattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setattr,
- loc,
- stbuf, valid);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-
-int32_t
-path_truncate (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- off_t offset)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_truncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->truncate,
- loc,
- offset);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-
-int32_t
-path_open (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flags,
- fd_t *fd,
- int32_t wbflags)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_open_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->open,
- loc,
- flags,
- fd,
- wbflags);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_create (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flags,
- mode_t mode,
- fd_t *fd)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_create_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->create,
- loc,
- flags,
- mode,
- fd);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_setxattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *dict,
- int32_t flags)
-{
- char *tmp_name = NULL;
- data_pair_t *trav = dict->members_list;
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- if (ZR_FILE_CONTENT_REQUEST(trav->key)) {
- tmp_name = name_this_to_that (this, loc->path, trav->key);
- if (tmp_name != trav->key) {
- trav->key = tmp_name;
- } else {
- tmp_name = NULL;
- }
- }
-
- STACK_WIND (frame,
- path_common_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- loc,
- dict,
- flags);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- if (tmp_name)
- GF_FREE (tmp_name);
-
- return 0;
-}
-
-int32_t
-path_getxattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name)
-{
- char *tmp_name = (char *)name;
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- if (ZR_FILE_CONTENT_REQUEST(name)) {
- tmp_name = name_this_to_that (this, loc->path, name);
- }
-
- STACK_WIND (frame,
- path_common_dict_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->getxattr,
- loc,
- tmp_name);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- if (tmp_name != name)
- GF_FREE (tmp_name);
-
- return 0;
-}
-
-int32_t
-path_removexattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name)
-{
- char *tmp_name = (char *)name;
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- if (ZR_FILE_CONTENT_REQUEST(name)) {
- tmp_name = name_this_to_that (this, loc->path, name);
- }
-
- STACK_WIND (frame,
- path_common_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr,
- loc,
- tmp_name);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- if (tmp_name != name)
- GF_FREE (tmp_name);
-
- return 0;
-}
-
-int32_t
-path_opendir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- fd_t *fd)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_opendir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->opendir,
- loc,
- fd);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_access (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t mask)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_common_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->access,
- loc,
- mask);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_checksum_cbk (call_frame_t *frame,
- void *cookie,
- xlator_t *this,
- int32_t op_ret,
- int32_t op_errno,
- uint8_t *fchecksum,
- uint8_t *dchecksum)
-{
- STACK_UNWIND (frame, op_ret, op_errno, fchecksum, dchecksum);
- return 0;
-}
-
-int32_t
-path_checksum (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flag)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_checksum_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->checksum,
- loc,
- flag);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-
-int32_t
-path_entrylk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, const char *basename,
- entrylk_cmd cmd, entrylk_type type)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame, path_common_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->entrylk,
- volume, loc, basename, cmd, type);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-path_inodelk (call_frame_t *frame, xlator_t *this,
- const char *volume, loc_t *loc, int32_t cmd, struct gf_flock *lock)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_common_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->inodelk,
- volume, loc, cmd, lock);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-
-int32_t
-path_xattrop (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- gf_xattrop_flags_t flags,
- dict_t *dict)
-{
- char *loc_path = (char *)loc->path;
- char *tmp_path = NULL;
-
- if (!(tmp_path = path_this_to_that (this, loc->path))) {
- STACK_UNWIND (frame, -1, ENOENT, NULL, NULL);
- return 0;
- }
- loc->path = tmp_path;
-
- STACK_WIND (frame,
- path_common_dict_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop,
- loc,
- flags,
- dict);
-
- loc->path = loc_path;
- if (tmp_path != loc_path)
- GF_FREE (tmp_path);
-
- return 0;
-}
-
-int32_t
-mem_acct_init (xlator_t *this)
-{
- int ret = -1;
-
- if (!this)
- return ret;
-
- ret = xlator_mem_acct_init (this, gf_path_mt_end + 1);
-
- if (ret != 0) {
- gf_log (this->name, GF_LOG_ERROR, "Memory accounting init"
- "failed");
- return ret;
- }
-
- return ret;
-}
-
-int32_t
-init (xlator_t *this)
-{
- dict_t *options = this->options;
- path_private_t *priv = NULL;
-
- if (!this->children || this->children->next) {
- gf_log (this->name, GF_LOG_ERROR,
- "path translator requires exactly one subvolume");
- return -1;
- }
-
- if (!this->parents) {
- gf_log (this->name, GF_LOG_WARNING,
- "dangling volume. check volfile ");
- }
-
- priv = GF_CALLOC (1, sizeof (*priv), gf_path_mt_path_private_t);
- ERR_ABORT (priv);
- if (dict_get (options, "start-offset")) {
- priv->start_off = data_to_int32 (dict_get (options,
- "start-offset"));
- }
- if (dict_get (options, "end-offset")) {
- priv->end_off = data_to_int32 (dict_get (options,
- "end-offset"));
- }
-
- if (dict_get (options, "regex")) {
- int32_t ret = 0;
- priv->preg = GF_CALLOC (1, sizeof (regex_t),
- gf_path_mt_regex_t);
- ERR_ABORT (priv->preg);
- ret = regcomp (priv->preg,
- data_to_str (dict_get (options, "regex")),
- REG_EXTENDED);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to compile the 'option regex'");
- GF_FREE (priv);
- return -1;
- }
- if (dict_get (options, "replace-with")) {
- priv->that = data_to_str (dict_get (options,
- "replace-with"));
- } else {
- priv->that = "";
- }
- }
-
- this->private = priv;
- return 0;
-}
-
-void
-fini (xlator_t *this)
-{
- return;
-}
-
-struct xlator_fops fops = {
- .stat = path_stat,
- .readlink = path_readlink,
- .mknod = path_mknod,
- .mkdir = path_mkdir,
- .unlink = path_unlink,
- .rmdir = path_rmdir,
- .symlink = path_symlink,
- .rename = path_rename,
- .link = path_link,
- .truncate = path_truncate,
- .open = path_open,
- .setxattr = path_setxattr,
- .getxattr = path_getxattr,
- .removexattr = path_removexattr,
- .opendir = path_opendir,
- .access = path_access,
- .create = path_create,
- .lookup = path_lookup,
- .checksum = path_checksum,
- .xattrop = path_xattrop,
- .entrylk = path_entrylk,
- .inodelk = path_inodelk,
- .setattr = path_setattr,
-};
-
-struct xlator_cbks cbks = {
-};
-
-struct volume_options options[] = {
- { .key = {"start-offset"},
- .type = GF_OPTION_TYPE_INT,
- .min = 0,
- .max = 4095
- },
- { .key = {"end-offset"},
- .type = GF_OPTION_TYPE_INT,
- .min = 1,
- .max = 4096
- },
- { .key = {"replace-with"},
- .type = GF_OPTION_TYPE_ANY
- },
- { .key = {NULL} },
-};
diff --git a/xlators/features/quiesce/src/Makefile.am b/xlators/features/quiesce/src/Makefile.am
index e8ab4cb2417..74ea999c045 100644
--- a/xlators/features/quiesce/src/Makefile.am
+++ b/xlators/features/quiesce/src/Makefile.am
@@ -1,14 +1,16 @@
xlator_LTLIBRARIES = quiesce.la
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
-quiesce_la_LDFLAGS = -module -avoidversion
+quiesce_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
quiesce_la_SOURCES = quiesce.c
quiesce_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
-noinst_HEADERS = quiesce.h quiesce-mem-types.h
+noinst_HEADERS = quiesce.h quiesce-mem-types.h quiesce-messages.h
-AM_CFLAGS = -fPIC -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -Wall -D$(GF_HOST_OS) \
- -I$(top_srcdir)/libglusterfs/src -shared -nostartfiles $(GF_CFLAGS)
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+AM_CFLAGS = -Wall $(GF_CFLAGS)
CLEANFILES =
diff --git a/xlators/features/quiesce/src/quiesce-mem-types.h b/xlators/features/quiesce/src/quiesce-mem-types.h
index f9154173a18..416456b13af 100644
--- a/xlators/features/quiesce/src/quiesce-mem-types.h
+++ b/xlators/features/quiesce/src/quiesce-mem-types.h
@@ -1,30 +1,21 @@
/*
- Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
+ Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
*/
-
#ifndef __QUIESCE_MEM_TYPES_H__
#define __QUIESCE_MEM_TYPES_H__
-#include "mem-types.h"
+#include <glusterfs/mem-types.h>
enum gf_quiesce_mem_types_ {
- gf_quiesce_mt_priv_t = gf_common_mt_end + 1,
- gf_quiesce_mt_end
+ gf_quiesce_mt_priv_t = gf_common_mt_end + 1,
+ gf_quiesce_mt_failover_hosts,
+ gf_quiesce_mt_end
};
#endif
diff --git a/xlators/features/quiesce/src/quiesce-messages.h b/xlators/features/quiesce/src/quiesce-messages.h
new file mode 100644
index 00000000000..32ffd409807
--- /dev/null
+++ b/xlators/features/quiesce/src/quiesce-messages.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __QUIESCE_MESSAGES_H__
+#define __QUIESCE_MESSAGES_H__
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(QUIESCE, QUIESCE_MSG_INVAL_HOST, QUIESCE_MSG_FAILOVER_FAILED);
+
+#endif /* __NL_CACHE_MESSAGES_H__ */
diff --git a/xlators/features/quiesce/src/quiesce.c b/xlators/features/quiesce/src/quiesce.c
index c9828054c1c..0e5eb60a16f 100644
--- a/xlators/features/quiesce/src/quiesce.c
+++ b/xlators/features/quiesce/src/quiesce.c
@@ -1,748 +1,827 @@
/*
- Copyright (c) 2010 Gluster, Inc. <http://www.gluster.com>
- This file is part of GlusterFS.
-
- GlusterFS is free software; you can redistribute it and/or modify
- it under the terms of the GNU Affero General Public License as published
- by the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- GlusterFS is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Affero General Public License for more details.
-
- You should have received a copy of the GNU Affero General Public License
- along with this program. If not, see
- <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-#include "config.h"
-#endif
+ Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
#include "quiesce.h"
-#include "defaults.h"
-#include "call-stub.h"
+#include <glusterfs/defaults.h>
+#include <glusterfs/call-stub.h>
/* TODO: */
/* Think about 'writev/_*_lk/setattr/xattrop/' fops to do re-transmittion */
+void
+gf_quiesce_timeout(void *data);
/* Quiesce Specific Functions */
void
-gf_quiesce_local_wipe (xlator_t *this, quiesce_local_t *local)
+gf_quiesce_local_wipe(xlator_t *this, quiesce_local_t *local)
{
- quiesce_priv_t *priv = NULL;
+ if (!local || !this || !this->private)
+ return;
+
+ if (local->loc.inode)
+ loc_wipe(&local->loc);
+ if (local->fd)
+ fd_unref(local->fd);
+ GF_FREE(local->name);
+ GF_FREE(local->volname);
+ if (local->dict)
+ dict_unref(local->dict);
+ if (local->iobref)
+ iobref_unref(local->iobref);
+ GF_FREE(local->vector);
- if (!local || !this || !this->private)
- return;
+ mem_put(local);
+}
- priv = this->private;
+void
+__gf_quiesce_start_timer(xlator_t *this, quiesce_priv_t *priv)
+{
+ struct timespec timeout = {
+ 0,
+ };
- if (local->loc.inode)
- loc_wipe (&local->loc);
- if (local->fd)
- fd_unref (local->fd);
- if (local->name)
- GF_FREE (local->name);
- if (local->volname)
- GF_FREE (local->volname);
- if (local->dict)
- dict_unref (local->dict);
- if (local->iobref)
- iobref_unref (local->iobref);
- if (local->vector)
- GF_FREE (local->vector);
+ if (!priv->timer) {
+ timeout.tv_sec = priv->timeout;
+ timeout.tv_nsec = 0;
- mem_put (priv->local_pool, local);
+ priv->timer = gf_timer_call_after(this->ctx, timeout,
+ gf_quiesce_timeout, (void *)this);
+ if (priv->timer == NULL) {
+ gf_log(this->name, GF_LOG_ERROR, "Cannot create timer");
+ }
+ }
}
-call_stub_t *
-gf_quiesce_dequeue (xlator_t *this)
+static void
+__gf_quiesce_cleanup_failover_hosts(xlator_t *this, quiesce_priv_t *priv)
{
- call_stub_t *stub = NULL;
- quiesce_priv_t *priv = NULL;
+ quiesce_failover_hosts_t *tmp = NULL;
+ quiesce_failover_hosts_t *failover_host = NULL;
- priv = this->private;
+ list_for_each_entry_safe(failover_host, tmp, &priv->failover_list, list)
+ {
+ GF_FREE(failover_host->addr);
+ list_del(&failover_host->list);
+ GF_FREE(failover_host);
+ }
+ return;
+}
- if (!priv || list_empty (&priv->req))
- return NULL;
+void
+gf_quiesce_populate_failover_hosts(xlator_t *this, quiesce_priv_t *priv,
+ const char *value)
+{
+ char *dup_val = NULL;
+ char *addr_tok = NULL;
+ char *save_ptr = NULL;
+ quiesce_failover_hosts_t *failover_host = NULL;
+
+ if (!value)
+ goto out;
+
+ dup_val = gf_strdup(value);
+ if (!dup_val)
+ goto out;
+
+ addr_tok = strtok_r(dup_val, ",", &save_ptr);
+ LOCK(&priv->lock);
+ {
+ if (!list_empty(&priv->failover_list))
+ __gf_quiesce_cleanup_failover_hosts(this, priv);
+
+ while (addr_tok) {
+ if (!valid_internet_address(addr_tok, _gf_true, _gf_false)) {
+ gf_msg(this->name, GF_LOG_INFO, 0, QUIESCE_MSG_INVAL_HOST,
+ "Specified "
+ "invalid internet address:%s",
+ addr_tok);
+ continue;
+ }
+ failover_host = GF_CALLOC(1, sizeof(*failover_host),
+ gf_quiesce_mt_failover_hosts);
+ failover_host->addr = gf_strdup(addr_tok);
+ INIT_LIST_HEAD(&failover_host->list);
+ list_add(&failover_host->list, &priv->failover_list);
+ addr_tok = strtok_r(NULL, ",", &save_ptr);
+ }
+ }
+ UNLOCK(&priv->lock);
+ GF_FREE(dup_val);
+out:
+ return;
+}
- LOCK (&priv->lock);
- {
- stub = list_entry (priv->req.next, call_stub_t, list);
- list_del_init (&stub->list);
- priv->queue_size--;
- }
- UNLOCK (&priv->lock);
+int32_t
+gf_quiesce_failover_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ quiesce_priv_t *priv = NULL;
+
+ if (op_ret < 0) {
+ /* Failure here doesn't mean the failover to another host didn't
+ * succeed, we will know if failover succeeds or not by the
+ * CHILD_UP/CHILD_DOWN event. A failure here indicates something
+ * went wrong with the submission of failover command, hence
+ * just abort the failover attempts without retrying with other
+ * hosts.
+ */
+ gf_msg(this->name, GF_LOG_INFO, op_errno, QUIESCE_MSG_FAILOVER_FAILED,
+ "Initiating failover to host:%s failed:", (char *)cookie);
+ }
- return stub;
+ GF_FREE(cookie);
+ STACK_DESTROY(frame->root);
+
+ priv = this->private;
+ __gf_quiesce_start_timer(this, priv);
+
+ return 0;
}
-void *
-gf_quiesce_dequeue_start (void *data)
+int
+__gf_quiesce_perform_failover(xlator_t *this)
+{
+ int ret = 0;
+ call_frame_t *frame = NULL;
+ dict_t *dict = NULL;
+ quiesce_priv_t *priv = NULL;
+ quiesce_failover_hosts_t *failover_host = NULL;
+ quiesce_failover_hosts_t *host = NULL;
+
+ priv = this->private;
+
+ if (priv->pass_through) {
+ gf_msg_trace(this->name, 0,
+ "child is up, hence not "
+ "performing any failover");
+ goto out;
+ }
+
+ list_for_each_entry(failover_host, &priv->failover_list, list)
+ {
+ if (failover_host->tried == 0) {
+ host = failover_host;
+ failover_host->tried = 1;
+ break;
+ }
+ }
+ if (!host) {
+ /*TODO: Keep trying until any of the gfproxy comes back up.
+ Currently it tries failing over once for each host,
+ if it doesn't succeed then returns error to mount point
+ list_for_each_entry (failover_host,
+ &priv->failover_list, list) {
+ failover_host->tried = 0;
+ }*/
+ gf_msg_debug(this->name, 0,
+ "all the failover hosts have "
+ "been tried and looks like didn't succeed");
+ ret = -1;
+ goto out;
+ }
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ gf_msg_debug(this->name, 0, "failed to create the frame");
+ ret = -1;
+ goto out;
+ }
+
+ dict = dict_new();
+
+ ret = dict_set_dynstr(dict, CLIENT_CMD_CONNECT, gf_strdup(host->addr));
+
+ gf_msg_trace(this->name, 0, "Initiating failover to:%s", host->addr);
+
+ STACK_WIND_COOKIE(frame, gf_quiesce_failover_cbk, NULL, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, NULL, dict, 0, NULL);
+out:
+
+ if (dict)
+ dict_unref(dict);
+
+ return ret;
+}
+
+call_stub_t *
+gf_quiesce_dequeue(xlator_t *this)
{
- xlator_t *this = NULL;
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
- this = data;
- priv = this->private;
- THIS = this;
+ priv = this->private;
- while (!list_empty (&priv->req)) {
- stub = gf_quiesce_dequeue (this);
- if (stub) {
- call_resume (stub);
- }
- }
+ if (!priv || list_empty(&priv->req))
+ return NULL;
- return 0;
-}
+ LOCK(&priv->lock);
+ {
+ stub = list_entry(priv->req.next, call_stub_t, list);
+ list_del_init(&stub->list);
+ priv->queue_size--;
+ }
+ UNLOCK(&priv->lock);
+ return stub;
+}
-void
-gf_quiesce_timeout (void *data)
+void *
+gf_quiesce_dequeue_start(void *data)
{
- xlator_t *this = NULL;
- quiesce_priv_t *priv = NULL;
- int need_dequeue = 0;
+ xlator_t *this = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- this = data;
- priv = this->private;
- THIS = this;
+ this = data;
+ priv = this->private;
+ THIS = this;
- LOCK (&priv->lock);
- {
- priv->pass_through = _gf_true;
- need_dequeue = (priv->queue_size)? 1:0;
+ while (!list_empty(&priv->req)) {
+ stub = gf_quiesce_dequeue(this);
+ if (stub) {
+ call_resume(stub);
}
- UNLOCK (&priv->lock);
+ }
- gf_quiesce_dequeue_start (this);
-
- return;
+ return 0;
}
void
-gf_quiesce_enqueue (xlator_t *this, call_stub_t *stub)
+gf_quiesce_timeout(void *data)
{
- quiesce_priv_t *priv = NULL;
- struct timeval timeout = {0,};
+ xlator_t *this = NULL;
+ quiesce_priv_t *priv = NULL;
+ int ret = -1;
- priv = this->private;
- if (!priv) {
- gf_log_callingfn (this->name, GF_LOG_ERROR,
- "this->private == NULL");
- return;
- }
+ this = data;
+ priv = this->private;
+ THIS = this;
- LOCK (&priv->lock);
- {
- list_add_tail (&stub->list, &priv->req);
- priv->queue_size++;
+ LOCK(&priv->lock);
+ {
+ priv->timer = NULL;
+ if (priv->pass_through) {
+ UNLOCK(&priv->lock);
+ goto out;
}
- UNLOCK (&priv->lock);
+ ret = __gf_quiesce_perform_failover(THIS);
+ }
+ UNLOCK(&priv->lock);
- if (!priv->timer) {
- timeout.tv_sec = 20;
- timeout.tv_usec = 0;
+ if (ret < 0) {
+ priv->pass_through = _gf_true;
+ gf_quiesce_dequeue_start(this);
+ }
- priv->timer = gf_timer_call_after (this->ctx,
- timeout,
- gf_quiesce_timeout,
- (void *) this);
- }
+out:
+ return;
+}
+
+void
+gf_quiesce_enqueue(xlator_t *this, call_stub_t *stub)
+{
+ quiesce_priv_t *priv = NULL;
+ priv = this->private;
+ if (!priv) {
+ gf_log_callingfn(this->name, GF_LOG_ERROR, "this->private == NULL");
return;
-}
+ }
+ LOCK(&priv->lock);
+ {
+ list_add_tail(&stub->list, &priv->req);
+ priv->queue_size++;
+ __gf_quiesce_start_timer(this, priv);
+ }
+ UNLOCK(&priv->lock);
+ return;
+}
/* _CBK function section */
int32_t
-quiesce_lookup_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, inode_t *inode,
- struct iatt *buf, dict_t *dict, struct iatt *postparent)
+quiesce_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, inode_t *inode,
+ struct iatt *buf, dict_t *dict, struct iatt *postparent)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_lookup_stub (frame, default_lookup_resume,
- &local->loc, local->dict);
- if (!stub) {
- STACK_UNWIND_STRICT (lookup, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_lookup_stub(frame, default_lookup_resume, &local->loc,
+ local->dict);
+ if (!stub) {
+ STACK_UNWIND_STRICT(lookup, frame, -1, ENOMEM, NULL, NULL, NULL,
+ NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (lookup, frame, op_ret, op_errno, inode, buf,
- dict, postparent);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(lookup, frame, op_ret, op_errno, inode, buf, dict,
+ postparent);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_stat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf)
+quiesce_stat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_stat_stub (frame, default_stat_resume,
- &local->loc);
- if (!stub) {
- STACK_UNWIND_STRICT (stat, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_stat_stub(frame, default_stat_resume, &local->loc, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(stat, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (stat, frame, op_ret, op_errno, buf);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(stat, frame, op_ret, op_errno, buf, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_access_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+quiesce_access_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_access_stub (frame, default_access_resume,
- &local->loc, local->flag);
- if (!stub) {
- STACK_UNWIND_STRICT (access, frame, -1, ENOMEM);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_access_stub(frame, default_access_resume, &local->loc,
+ local->flag, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(access, frame, -1, ENOMEM, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (access, frame, op_ret, op_errno);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(access, frame, op_ret, op_errno, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_readlink_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, const char *path,
- struct iatt *buf)
+quiesce_readlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, const char *path,
+ struct iatt *buf, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_readlink_stub (frame, default_readlink_resume,
- &local->loc, local->size);
- if (!stub) {
- STACK_UNWIND_STRICT (readlink, frame, -1, ENOMEM,
- NULL, NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_readlink_stub(frame, default_readlink_resume, &local->loc,
+ local->size, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readlink, frame, -1, ENOMEM, NULL, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (readlink, frame, op_ret, op_errno, path, buf);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(readlink, frame, op_ret, op_errno, path, buf, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_open_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd)
+quiesce_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_open_stub (frame, default_open_resume,
- &local->loc, local->flag, local->fd,
- local->wbflags);
- if (!stub) {
- STACK_UNWIND_STRICT (open, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_open_stub(frame, default_open_resume, &local->loc,
+ local->flag, local->fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(open, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (open, frame, op_ret, op_errno, fd);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(open, frame, op_ret, op_errno, fd, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_readv_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iovec *vector,
- int32_t count, struct iatt *stbuf, struct iobref *iobref)
+quiesce_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iovec *vector,
+ int32_t count, struct iatt *stbuf, struct iobref *iobref,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_readv_stub (frame, default_readv_resume,
- local->fd, local->size, local->offset);
- if (!stub) {
- STACK_UNWIND_STRICT (readv, frame, -1, ENOMEM,
- NULL, 0, NULL, NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_readv_stub(frame, default_readv_resume, local->fd,
+ local->size, local->offset, local->io_flag,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readv, frame, -1, ENOMEM, NULL, 0, NULL, NULL,
+ NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (readv, frame, op_ret, op_errno, vector, count,
- stbuf, iobref);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, vector, count, stbuf,
+ iobref, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_flush_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+quiesce_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_flush_stub (frame, default_flush_resume,
- local->fd);
- if (!stub) {
- STACK_UNWIND_STRICT (flush, frame, -1, ENOMEM);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_flush_stub(frame, default_flush_resume, local->fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(flush, frame, -1, ENOMEM, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (flush, frame, op_ret, op_errno);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(flush, frame, op_ret, op_errno, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
-
-
int32_t
-quiesce_fsync_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+quiesce_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_fsync_stub (frame, default_fsync_resume,
- local->fd, local->flag);
- if (!stub) {
- STACK_UNWIND_STRICT (fsync, frame, -1, ENOMEM,
- NULL, NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_fsync_stub(frame, default_fsync_resume, local->fd,
+ local->flag, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fsync, frame, -1, ENOMEM, NULL, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (fsync, frame, op_ret, op_errno, prebuf, postbuf);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(fsync, frame, op_ret, op_errno, prebuf, postbuf, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_fstat_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct iatt *buf)
+quiesce_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *buf,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_fstat_stub (frame, default_fstat_resume,
- local->fd);
- if (!stub) {
- STACK_UNWIND_STRICT (fstat, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_fstat_stub(frame, default_fstat_resume, local->fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fstat, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (fstat, frame, op_ret, op_errno, buf);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(fstat, frame, op_ret, op_errno, buf, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_opendir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, fd_t *fd)
+quiesce_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_opendir_stub (frame, default_opendir_resume,
- &local->loc, local->fd);
- if (!stub) {
- STACK_UNWIND_STRICT (opendir, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_opendir_stub(frame, default_opendir_resume, &local->loc,
+ local->fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(opendir, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (opendir, frame, op_ret, op_errno, fd);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(opendir, frame, op_ret, op_errno, fd, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_fsyncdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+quiesce_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_fsyncdir_stub (frame, default_fsyncdir_resume,
- local->fd, local->flag);
- if (!stub) {
- STACK_UNWIND_STRICT (fsyncdir, frame, -1, ENOMEM);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_fsyncdir_stub(frame, default_fsyncdir_resume, local->fd,
+ local->flag, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fsyncdir, frame, -1, ENOMEM, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (fsyncdir, frame, op_ret, op_errno);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(fsyncdir, frame, op_ret, op_errno, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_statfs_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct statvfs *buf)
+quiesce_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct statvfs *buf,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_statfs_stub (frame, default_statfs_resume,
- &local->loc);
- if (!stub) {
- STACK_UNWIND_STRICT (statfs, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_statfs_stub(frame, default_statfs_resume, &local->loc,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(statfs, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (statfs, frame, op_ret, op_errno, buf);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(statfs, frame, op_ret, op_errno, buf, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
int32_t
-quiesce_fgetxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+quiesce_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_fgetxattr_stub (frame, default_fgetxattr_resume,
- local->fd, local->name);
- if (!stub) {
- STACK_UNWIND_STRICT (fgetxattr, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_fgetxattr_stub(frame, default_fgetxattr_resume, local->fd,
+ local->name, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fgetxattr, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (fgetxattr, frame, op_ret, op_errno, dict);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(fgetxattr, frame, op_ret, op_errno, dict, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
-
int32_t
-quiesce_getxattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+quiesce_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_getxattr_stub (frame, default_getxattr_resume,
- &local->loc, local->name);
- if (!stub) {
- STACK_UNWIND_STRICT (getxattr, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_getxattr_stub(frame, default_getxattr_resume, &local->loc,
+ local->name, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(getxattr, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (getxattr, frame, op_ret, op_errno, dict);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
-
int32_t
-quiesce_rchecksum_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, uint32_t weak_checksum,
- uint8_t *strong_checksum)
+quiesce_rchecksum_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, uint32_t weak_checksum,
+ uint8_t *strong_checksum, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_rchecksum_stub (frame, default_rchecksum_resume,
- local->fd, local->offset, local->flag);
- if (!stub) {
- STACK_UNWIND_STRICT (rchecksum, frame, -1, ENOMEM,
- 0, NULL);
- goto out;
- }
-
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_rchecksum_stub(frame, default_rchecksum_resume, local->fd,
+ local->offset, local->flag, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(rchecksum, frame, -1, ENOMEM, 0, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (rchecksum, frame, op_ret, op_errno, weak_checksum,
- strong_checksum);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(rchecksum, frame, op_ret, op_errno, weak_checksum,
+ strong_checksum, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
-
int32_t
-quiesce_readdir_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, gf_dirent_t *entries)
+quiesce_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_readdir_stub (frame, default_readdir_resume,
- local->fd, local->size, local->offset);
- if (!stub) {
- STACK_UNWIND_STRICT (readdir, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_readdir_stub(frame, default_readdir_resume, local->fd,
+ local->size, local->offset, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readdir, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (readdir, frame, op_ret, op_errno, entries);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(readdir, frame, op_ret, op_errno, entries, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
-
int32_t
-quiesce_readdirp_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, gf_dirent_t *entries)
+quiesce_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
-
- local = frame->local;
- frame->local = NULL;
- if ((op_ret == -1) && (op_errno == ENOTCONN)) {
- /* Re-transmit (by putting in the queue) */
- stub = fop_readdirp_stub (frame, default_readdirp_resume,
- local->fd, local->size, local->offset);
- if (!stub) {
- STACK_UNWIND_STRICT (readdirp, frame, -1, ENOMEM,
- NULL);
- goto out;
- }
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- gf_quiesce_enqueue (this, stub);
- goto out;
+ local = frame->local;
+ frame->local = NULL;
+ if ((op_ret == -1) && (op_errno == ENOTCONN)) {
+ /* Re-transmit (by putting in the queue) */
+ stub = fop_readdirp_stub(frame, default_readdirp_resume, local->fd,
+ local->size, local->offset, local->dict);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readdirp, frame, -1, ENOMEM, NULL, NULL);
+ goto out;
}
- STACK_UNWIND_STRICT (readdirp, frame, op_ret, op_errno, entries);
+ gf_quiesce_enqueue(this, stub);
+ goto out;
+ }
+
+ STACK_UNWIND_STRICT(readdirp, frame, op_ret, op_errno, entries, xdata);
out:
- gf_quiesce_local_wipe (this, local);
+ gf_quiesce_local_wipe(this, local);
- return 0;
+ return 0;
}
-
#if 0
int32_t
quiesce_writev_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
- struct iatt *postbuf)
+ struct iatt *postbuf, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -756,10 +835,11 @@ quiesce_writev_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_writev_stub (frame, default_writev_resume,
local->fd, local->vector, local->flag,
- local->offset, local->iobref);
+ local->offset, local->io_flags,
+ local->iobref, xdata);
if (!stub) {
STACK_UNWIND_STRICT (writev, frame, -1, ENOMEM,
- NULL, NULL);
+ NULL, NULL, NULL);
goto out;
}
@@ -767,7 +847,7 @@ quiesce_writev_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (writev, frame, op_ret, op_errno, prebuf, postbuf);
+ STACK_UNWIND_STRICT (writev, frame, op_ret, op_errno, prebuf, postbuf, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -776,7 +856,7 @@ out:
int32_t
quiesce_xattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -790,10 +870,10 @@ quiesce_xattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_xattrop_stub (frame, default_xattrop_resume,
&local->loc, local->xattrop_flags,
- local->dict);
+ local->dict, xdata);
if (!stub) {
STACK_UNWIND_STRICT (xattrop, frame, -1, ENOMEM,
- NULL);
+ NULL, NULL);
goto out;
}
@@ -801,7 +881,7 @@ quiesce_xattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (xattrop, frame, op_ret, op_errno, dict);
+ STACK_UNWIND_STRICT (xattrop, frame, op_ret, op_errno, dict, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -810,7 +890,7 @@ out:
int32_t
quiesce_fxattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, dict_t *dict)
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -824,10 +904,10 @@ quiesce_fxattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_fxattrop_stub (frame, default_fxattrop_resume,
local->fd, local->xattrop_flags,
- local->dict);
+ local->dict, xdata);
if (!stub) {
STACK_UNWIND_STRICT (fxattrop, frame, -1, ENOMEM,
- NULL);
+ NULL, NULL);
goto out;
}
@@ -835,7 +915,7 @@ quiesce_fxattrop_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (fxattrop, frame, op_ret, op_errno, dict);
+ STACK_UNWIND_STRICT (fxattrop, frame, op_ret, op_errno, dict, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -844,7 +924,7 @@ out:
int32_t
quiesce_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, struct gf_flock *lock)
+ int32_t op_ret, int32_t op_errno, struct gf_flock *lock, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -857,10 +937,10 @@ quiesce_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
if ((op_ret == -1) && (op_errno == ENOTCONN)) {
/* Re-transmit (by putting in the queue) */
stub = fop_lk_stub (frame, default_lk_resume,
- local->fd, local->flag, &local->flock);
+ local->fd, local->flag, &local->flock, xdata);
if (!stub) {
STACK_UNWIND_STRICT (lk, frame, -1, ENOMEM,
- NULL);
+ NULL, NULL);
goto out;
}
@@ -868,7 +948,7 @@ quiesce_lk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (lk, frame, op_ret, op_errno, lock);
+ STACK_UNWIND_STRICT (lk, frame, op_ret, op_errno, lock, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -877,7 +957,7 @@ out:
int32_t
quiesce_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -891,9 +971,9 @@ quiesce_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_inodelk_stub (frame, default_inodelk_resume,
local->volname, &local->loc,
- local->flag, &local->flock);
+ local->flag, &local->flock, xdata);
if (!stub) {
- STACK_UNWIND_STRICT (inodelk, frame, -1, ENOMEM);
+ STACK_UNWIND_STRICT (inodelk, frame, -1, ENOMEM, NULL);
goto out;
}
@@ -901,7 +981,7 @@ quiesce_inodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (inodelk, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT (inodelk, frame, op_ret, op_errno, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -911,7 +991,7 @@ out:
int32_t
quiesce_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -925,9 +1005,9 @@ quiesce_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_finodelk_stub (frame, default_finodelk_resume,
local->volname, local->fd,
- local->flag, &local->flock);
+ local->flag, &local->flock, xdata);
if (!stub) {
- STACK_UNWIND_STRICT (finodelk, frame, -1, ENOMEM);
+ STACK_UNWIND_STRICT (finodelk, frame, -1, ENOMEM, NULL);
goto out;
}
@@ -935,7 +1015,7 @@ quiesce_finodelk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (finodelk, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT (finodelk, frame, op_ret, op_errno, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -944,7 +1024,7 @@ out:
int32_t
quiesce_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -958,9 +1038,9 @@ quiesce_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_entrylk_stub (frame, default_entrylk_resume,
local->volname, &local->loc,
- local->name, local->cmd, local->type);
+ local->name, local->cmd, local->type, xdata);
if (!stub) {
- STACK_UNWIND_STRICT (entrylk, frame, -1, ENOMEM);
+ STACK_UNWIND_STRICT (entrylk, frame, -1, ENOMEM, NULL);
goto out;
}
@@ -968,7 +1048,7 @@ quiesce_entrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (entrylk, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT (entrylk, frame, op_ret, op_errno, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -977,7 +1057,7 @@ out:
int32_t
quiesce_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno)
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -991,9 +1071,9 @@ quiesce_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
/* Re-transmit (by putting in the queue) */
stub = fop_fentrylk_stub (frame, default_fentrylk_resume,
local->volname, local->fd,
- local->name, local->cmd, local->type);
+ local->name, local->cmd, local->type, xdata);
if (!stub) {
- STACK_UNWIND_STRICT (fentrylk, frame, -1, ENOMEM);
+ STACK_UNWIND_STRICT (fentrylk, frame, -1, ENOMEM, NULL);
goto out;
}
@@ -1001,7 +1081,7 @@ quiesce_fentrylk_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
goto out;
}
- STACK_UNWIND_STRICT (fentrylk, frame, op_ret, op_errno);
+ STACK_UNWIND_STRICT (fentrylk, frame, op_ret, op_errno, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -1011,7 +1091,7 @@ out:
int32_t
quiesce_setattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *statpre,
- struct iatt *statpost)
+ struct iatt *statpost, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -1024,10 +1104,10 @@ quiesce_setattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
if ((op_ret == -1) && (op_errno == ENOTCONN)) {
/* Re-transmit (by putting in the queue) */
stub = fop_setattr_stub (frame, default_setattr_resume,
- &local->loc, &local->stbuf, local->flag);
+ &local->loc, &local->stbuf, local->flag, xdata);
if (!stub) {
STACK_UNWIND_STRICT (setattr, frame, -1, ENOMEM,
- NULL, NULL);
+ NULL, NULL, NULL);
goto out;
}
@@ -1036,7 +1116,7 @@ quiesce_setattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
}
STACK_UNWIND_STRICT (setattr, frame, op_ret, op_errno, statpre,
- statpost);
+ statpost, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -1046,7 +1126,7 @@ out:
int32_t
quiesce_fsetattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *statpre,
- struct iatt *statpost)
+ struct iatt *statpost, dict_t *xdata)
{
quiesce_priv_t *priv = NULL;
call_stub_t *stub = NULL;
@@ -1060,10 +1140,10 @@ quiesce_fsetattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
if ((op_ret == -1) && (op_errno == ENOTCONN)) {
/* Re-transmit (by putting in the queue) */
stub = fop_fsetattr_stub (frame, default_fsetattr_resume,
- local->fd, &local->stbuf, local->flag);
+ local->fd, &local->stbuf, local->flag, xdata);
if (!stub) {
STACK_UNWIND_STRICT (fsetattr, frame, -1, ENOMEM,
- NULL, NULL);
+ NULL, NULL, NULL);
goto out;
}
@@ -1072,7 +1152,7 @@ quiesce_fsetattr_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
}
STACK_UNWIND_STRICT (fsetattr, frame, op_ret, op_errno, statpre,
- statpost);
+ statpost, xdata);
out:
gf_quiesce_local_wipe (this, local);
@@ -1081,1596 +1161,1544 @@ out:
#endif /* if 0 */
-
/* FOP */
/* No retransmittion */
int32_t
-quiesce_removexattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name)
+quiesce_removexattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_removexattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->removexattr,
- loc,
- name);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_removexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->removexattr, loc, name, xdata);
+ return 0;
+ }
- stub = fop_removexattr_stub (frame, default_removexattr_resume,
- loc, name);
- if (!stub) {
- STACK_UNWIND_STRICT (removexattr, frame, -1, ENOMEM);
- return 0;
- }
+ stub = fop_removexattr_stub(frame, default_removexattr_resume, loc, name,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(removexattr, frame, -1, ENOMEM, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_truncate (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- off_t offset)
+quiesce_fremovexattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_truncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->truncate,
- loc,
- offset);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_fremovexattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fremovexattr, fd, name, xdata);
+ return 0;
+ }
- stub = fop_truncate_stub (frame, default_truncate_resume, loc, offset);
- if (!stub) {
- STACK_UNWIND_STRICT (truncate, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
+ stub = fop_fremovexattr_stub(frame, default_fremovexattr_resume, fd, name,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fremovexattr, frame, -1, ENOMEM, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_fsetxattr (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- dict_t *dict,
- int32_t flags)
+quiesce_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_fsetxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsetxattr,
- fd,
- dict,
- flags);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+ return 0;
+ }
- stub = fop_fsetxattr_stub (frame, default_fsetxattr_resume,
- fd, dict, flags);
- if (!stub) {
- STACK_UNWIND_STRICT (fsetxattr, frame, -1, ENOMEM);
- return 0;
- }
+ stub = fop_truncate_stub(frame, default_truncate_resume, loc, offset,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(truncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_setxattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- dict_t *dict,
- int32_t flags)
+quiesce_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_setxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->setxattr,
- loc,
- dict,
- flags);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+ }
- stub = fop_setxattr_stub (frame, default_setxattr_resume,
- loc, dict, flags);
- if (!stub) {
- STACK_UNWIND_STRICT (setxattr, frame, -1, ENOMEM);
- return 0;
- }
+ stub = fop_fsetxattr_stub(frame, default_fsetxattr_resume, fd, dict, flags,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fsetxattr, frame, -1, ENOMEM, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_create (call_frame_t *frame, xlator_t *this,
- loc_t *loc, int32_t flags, mode_t mode,
- fd_t *fd, dict_t *params)
+quiesce_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- /* Don't send O_APPEND below, as write() re-transmittions can
- fail with O_APPEND */
- STACK_WIND (frame, default_create_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->create,
- loc, (flags & ~O_APPEND), mode, fd, params);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
+ }
- stub = fop_create_stub (frame, default_create_resume,
- loc, (flags & ~O_APPEND), mode, fd, params);
- if (!stub) {
- STACK_UNWIND_STRICT (create, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL, NULL);
- return 0;
- }
+ stub = fop_setxattr_stub(frame, default_setxattr_resume, loc, dict, flags,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(setxattr, frame, -1, ENOMEM, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_link (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc)
+quiesce_create(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ mode_t mode, mode_t umask, fd_t *fd, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_link_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->link,
- oldloc, newloc);
- return 0;
- }
+ if (priv->pass_through) {
+ /* Don't send O_APPEND below, as write() re-transmittions can
+ fail with O_APPEND */
+ STACK_WIND(frame, default_create_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->create, loc, (flags & ~O_APPEND),
+ mode, umask, fd, xdata);
+ return 0;
+ }
- stub = fop_link_stub (frame, default_link_resume, oldloc, newloc);
- if (!stub) {
- STACK_UNWIND_STRICT (link, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL);
- return 0;
- }
+ stub = fop_create_stub(frame, default_create_resume, loc,
+ (flags & ~O_APPEND), mode, umask, fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(create, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_rename (call_frame_t *frame,
- xlator_t *this,
- loc_t *oldloc,
- loc_t *newloc)
+quiesce_link(call_frame_t *frame, xlator_t *this, loc_t *oldloc, loc_t *newloc,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_rename_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rename,
- oldloc, newloc);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_link_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->link, oldloc, newloc, xdata);
+ return 0;
+ }
- stub = fop_rename_stub (frame, default_rename_resume, oldloc, newloc);
- if (!stub) {
- STACK_UNWIND_STRICT (rename, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL, NULL);
- return 0;
- }
+ stub = fop_link_stub(frame, default_link_resume, oldloc, newloc, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(link, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
+int32_t
+quiesce_rename(call_frame_t *frame, xlator_t *this, loc_t *oldloc,
+ loc_t *newloc, dict_t *xdata)
+{
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+
+ priv = this->private;
+
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_rename_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rename, oldloc, newloc, xdata);
+ return 0;
+ }
+
+ stub = fop_rename_stub(frame, default_rename_resume, oldloc, newloc, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(rename, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL, NULL);
+ return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
+}
int
-quiesce_symlink (call_frame_t *frame, xlator_t *this,
- const char *linkpath, loc_t *loc, dict_t *params)
+quiesce_symlink(call_frame_t *frame, xlator_t *this, const char *linkpath,
+ loc_t *loc, mode_t umask, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame, default_symlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->symlink,
- linkpath, loc, params);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_symlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->symlink, linkpath, loc, umask,
+ xdata);
+ return 0;
+ }
- stub = fop_symlink_stub (frame, default_symlink_resume,
- linkpath, loc, params);
- if (!stub) {
- STACK_UNWIND_STRICT (symlink, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL);
- return 0;
- }
+ stub = fop_symlink_stub(frame, default_symlink_resume, linkpath, loc, umask,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(symlink, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
-
int
-quiesce_rmdir (call_frame_t *frame, xlator_t *this, loc_t *loc, int flags)
+quiesce_rmdir(call_frame_t *frame, xlator_t *this, loc_t *loc, int flags,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame, default_rmdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->rmdir,
- loc, flags);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_rmdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->rmdir, loc, flags, xdata);
+ return 0;
+ }
- stub = fop_rmdir_stub (frame, default_rmdir_resume, loc, flags);
- if (!stub) {
- STACK_UNWIND_STRICT (rmdir, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
+ stub = fop_rmdir_stub(frame, default_rmdir_resume, loc, flags, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(rmdir, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_unlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
+quiesce_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int xflag,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_unlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->unlink,
- loc);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, xflag, xdata);
+ return 0;
+ }
- stub = fop_unlink_stub (frame, default_unlink_resume, loc);
- if (!stub) {
- STACK_UNWIND_STRICT (unlink, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
+ stub = fop_unlink_stub(frame, default_unlink_resume, loc, xflag, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(unlink, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int
-quiesce_mkdir (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, dict_t *params)
+quiesce_mkdir(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ mode_t umask, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame, default_mkdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mkdir,
- loc, mode, params);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_mkdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mkdir, loc, mode, umask, xdata);
+ return 0;
+ }
- stub = fop_mkdir_stub (frame, default_mkdir_resume,
- loc, mode, params);
- if (!stub) {
- STACK_UNWIND_STRICT (mkdir, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL);
- return 0;
- }
+ stub = fop_mkdir_stub(frame, default_mkdir_resume, loc, mode, umask, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(mkdir, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
-
int
-quiesce_mknod (call_frame_t *frame, xlator_t *this,
- loc_t *loc, mode_t mode, dev_t rdev, dict_t *parms)
+quiesce_mknod(call_frame_t *frame, xlator_t *this, loc_t *loc, mode_t mode,
+ dev_t rdev, mode_t umask, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame, default_mknod_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->mknod,
- loc, mode, rdev, parms);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_mknod_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->mknod, loc, mode, rdev, umask,
+ xdata);
+ return 0;
+ }
- stub = fop_mknod_stub (frame, default_mknod_resume,
- loc, mode, rdev, parms);
- if (!stub) {
- STACK_UNWIND_STRICT (mknod, frame, -1, ENOMEM,
- NULL, NULL, NULL, NULL);
- return 0;
- }
+ stub = fop_mknod_stub(frame, default_mknod_resume, loc, mode, rdev, umask,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(mknod, frame, -1, ENOMEM, NULL, NULL, NULL, NULL,
+ NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_ftruncate (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- off_t offset)
+quiesce_ftruncate(call_frame_t *frame, xlator_t *this, fd_t *fd, off_t offset,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv->pass_through) {
- STACK_WIND (frame,
- default_ftruncate_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->ftruncate,
- fd,
- offset);
- return 0;
- }
+ if (priv->pass_through) {
+ STACK_WIND(frame, default_ftruncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->ftruncate, fd, offset, xdata);
+ return 0;
+ }
- stub = fop_ftruncate_stub (frame, default_ftruncate_resume, fd, offset);
- if (!stub) {
- STACK_UNWIND_STRICT (ftruncate, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
+ stub = fop_ftruncate_stub(frame, default_ftruncate_resume, fd, offset,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(ftruncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
/* Re-transmittion */
int32_t
-quiesce_readlink (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- size_t size)
+quiesce_readlink(call_frame_t *frame, xlator_t *this, loc_t *loc, size_t size,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- loc_dup (loc, &local->loc);
- local->size = size;
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_readlink_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readlink,
- loc,
- size);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ loc_dup(loc, &local->loc);
+ local->size = size;
+ frame->local = local;
- stub = fop_readlink_stub (frame, default_readlink_resume, loc, size);
- if (!stub) {
- STACK_UNWIND_STRICT (readlink, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
-
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_readlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readlink, loc, size, xdata);
+ return 0;
+ }
+ stub = fop_readlink_stub(frame, default_readlink_resume, loc, size, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readlink, frame, -1, ENOMEM, NULL, NULL, NULL);
return 0;
-}
+ }
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
+}
int32_t
-quiesce_access (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t mask)
+quiesce_access(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t mask,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- loc_dup (loc, &local->loc);
- local->flag = mask;
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_access_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->access,
- loc,
- mask);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ loc_dup(loc, &local->loc);
+ local->flag = mask;
+ frame->local = local;
- stub = fop_access_stub (frame, default_access_resume, loc, mask);
- if (!stub) {
- STACK_UNWIND_STRICT (access, frame, -1, ENOMEM);
- return 0;
- }
-
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_access_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->access, loc, mask, xdata);
+ return 0;
+ }
+ stub = fop_access_stub(frame, default_access_resume, loc, mask, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(access, frame, -1, ENOMEM, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_fgetxattr (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- const char *name)
+quiesce_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ const char *name, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- local->fd = fd_ref (fd);
- if (name)
- local->name = gf_strdup (name);
-
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_fgetxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fgetxattr,
- fd,
- name);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ local->fd = fd_ref(fd);
+ if (name)
+ local->name = gf_strdup(name);
- stub = fop_fgetxattr_stub (frame, default_fgetxattr_resume, fd, name);
- if (!stub) {
- STACK_UNWIND_STRICT (fgetxattr, frame, -1, ENOMEM, NULL);
- return 0;
- }
+ frame->local = local;
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_fgetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fgetxattr, fd, name, xdata);
+ return 0;
+ }
+ stub = fop_fgetxattr_stub(frame, default_fgetxattr_resume, fd, name, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fgetxattr, frame, -1, ENOMEM, NULL, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_statfs (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc)
+quiesce_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- loc_dup (loc, &local->loc);
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_statfs_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->statfs,
- loc);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ loc_dup(loc, &local->loc);
+ frame->local = local;
- stub = fop_statfs_stub (frame, default_statfs_resume, loc);
- if (!stub) {
- STACK_UNWIND_STRICT (statfs, frame, -1, ENOMEM, NULL);
- return 0;
- }
-
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_statfs_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->statfs, loc, xdata);
+ return 0;
+ }
+ stub = fop_statfs_stub(frame, default_statfs_resume, loc, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(statfs, frame, -1, ENOMEM, NULL, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_fsyncdir (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- int32_t flags)
+quiesce_fsyncdir(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- local->fd = fd_ref (fd);
- local->flag = flags;
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_fsyncdir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsyncdir,
- fd,
- flags);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ local->fd = fd_ref(fd);
+ local->flag = flags;
+ frame->local = local;
- stub = fop_fsyncdir_stub (frame, default_fsyncdir_resume, fd, flags);
- if (!stub) {
- STACK_UNWIND_STRICT (fsyncdir, frame, -1, ENOMEM);
- return 0;
- }
-
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_fsyncdir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsyncdir, fd, flags, xdata);
+ return 0;
+ }
+ stub = fop_fsyncdir_stub(frame, default_fsyncdir_resume, fd, flags, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fsyncdir, frame, -1, ENOMEM, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_opendir (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc, fd_t *fd)
+quiesce_opendir(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- loc_dup (loc, &local->loc);
- local->fd = fd_ref (fd);
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_opendir_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->opendir,
- loc, fd);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ loc_dup(loc, &local->loc);
+ local->fd = fd_ref(fd);
+ frame->local = local;
- stub = fop_opendir_stub (frame, default_opendir_resume, loc, fd);
- if (!stub) {
- STACK_UNWIND_STRICT (opendir, frame, -1, ENOMEM, NULL);
- return 0;
- }
-
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_opendir_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->opendir, loc, fd, xdata);
+ return 0;
+ }
+ stub = fop_opendir_stub(frame, default_opendir_resume, loc, fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(opendir, frame, -1, ENOMEM, NULL, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_fstat (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd)
+quiesce_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
-
- priv = this->private;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- local->fd = fd_ref (fd);
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_fstat_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fstat,
- fd);
- return 0;
- }
+ priv = this->private;
- stub = fop_fstat_stub (frame, default_fstat_resume, fd);
- if (!stub) {
- STACK_UNWIND_STRICT (fstat, frame, -1, ENOMEM, NULL);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ local->fd = fd_ref(fd);
+ frame->local = local;
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, xdata);
+ return 0;
+ }
+ stub = fop_fstat_stub(frame, default_fstat_resume, fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fstat, frame, -1, ENOMEM, NULL, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_fsync (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- int32_t flags)
+quiesce_fsync(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t flags,
+ dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- local->fd = fd_ref (fd);
- local->flag = flags;
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_fsync_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fsync,
- fd,
- flags);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ local->fd = fd_ref(fd);
+ local->flag = flags;
+ frame->local = local;
- stub = fop_fsync_stub (frame, default_fsync_resume, fd, flags);
- if (!stub) {
- STACK_UNWIND_STRICT (fsync, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
-
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_fsync_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsync, fd, flags, xdata);
+ return 0;
+ }
+ stub = fop_fsync_stub(frame, default_fsync_resume, fd, flags, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fsync, frame, -1, ENOMEM, NULL, NULL, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_flush (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd)
+quiesce_flush(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- local->fd = fd_ref (fd);
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_flush_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->flush,
- fd);
- return 0;
- }
-
- stub = fop_flush_stub (frame, default_flush_resume, fd);
- if (!stub) {
- STACK_UNWIND_STRICT (flush, frame, -1, ENOMEM);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ local->fd = fd_ref(fd);
+ frame->local = local;
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_flush_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->flush, fd, xdata);
+ return 0;
+ }
+ stub = fop_flush_stub(frame, default_flush_resume, fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(flush, frame, -1, ENOMEM, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_writev (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- struct iovec *vector,
- int32_t count,
- off_t off,
- struct iobref *iobref)
+quiesce_writev(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ struct iovec *vector, int32_t count, off_t off, uint32_t flags,
+ struct iobref *iobref, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- STACK_WIND (frame,
- default_writev_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->writev,
- fd,
- vector,
- count,
- off,
- iobref);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ STACK_WIND(frame, default_writev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, fd, vector, count, off,
+ flags, iobref, xdata);
+ return 0;
+ }
- stub = fop_writev_stub (frame, default_writev_resume,
- fd, vector, count, off, iobref);
- if (!stub) {
- STACK_UNWIND_STRICT (writev, frame, -1, ENOMEM, NULL, NULL);
- return 0;
- }
+ stub = fop_writev_stub(frame, default_writev_resume, fd, vector, count, off,
+ flags, iobref, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(writev, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_readv (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- size_t size,
- off_t offset)
+quiesce_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
-
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- local->fd = fd_ref (fd);
- local->size = size;
- local->offset = offset;
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_readv_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->readv,
- fd,
- size,
- offset);
- return 0;
- }
+ priv = this->private;
- stub = fop_readv_stub (frame, default_readv_resume, fd, size, offset);
- if (!stub) {
- STACK_UNWIND_STRICT (readv, frame, -1, ENOMEM,
- NULL, 0, NULL, NULL);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ local->fd = fd_ref(fd);
+ local->size = size;
+ local->offset = offset;
+ local->io_flag = flags;
+ frame->local = local;
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags,
+ xdata);
+ return 0;
+ }
+ stub = fop_readv_stub(frame, default_readv_resume, fd, size, offset, flags,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(readv, frame, -1, ENOMEM, NULL, 0, NULL, NULL,
+ NULL);
return 0;
-}
+ }
+
+ gf_quiesce_enqueue(this, stub);
+ return 0;
+}
int32_t
-quiesce_open (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- int32_t flags, fd_t *fd,
- int32_t wbflags)
+quiesce_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- loc_dup (loc, &local->loc);
- local->fd = fd_ref (fd);
-
- /* Don't send O_APPEND below, as write() re-transmittions can
- fail with O_APPEND */
- local->flag = (flags & ~O_APPEND);
- local->wbflags = wbflags;
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_open_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->open,
- loc, (flags & ~O_APPEND), fd, wbflags);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ loc_dup(loc, &local->loc);
+ local->fd = fd_ref(fd);
- stub = fop_open_stub (frame, default_open_resume, loc,
- (flags & ~O_APPEND), fd, wbflags);
- if (!stub) {
- STACK_UNWIND_STRICT (open, frame, -1, ENOMEM, NULL);
- return 0;
- }
+ /* Don't send O_APPEND below, as write() re-transmittions can
+ fail with O_APPEND */
+ local->flag = (flags & ~O_APPEND);
+ frame->local = local;
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, (flags & ~O_APPEND), fd,
+ xdata);
+ return 0;
+ }
+ stub = fop_open_stub(frame, default_open_resume, loc, (flags & ~O_APPEND),
+ fd, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(open, frame, -1, ENOMEM, NULL, NULL);
return 0;
+ }
+
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
}
int32_t
-quiesce_getxattr (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- const char *name)
+quiesce_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ const char *name, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
- quiesce_local_t *local = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
+ quiesce_local_t *local = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- local = mem_get0 (priv->local_pool);
- loc_dup (loc, &local->loc);
- if (name)
- local->name = gf_strdup (name);
-
- frame->local = local;
-
- STACK_WIND (frame,
- quiesce_getxattr_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->getxattr,
- loc,
- name);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ local = mem_get0(priv->local_pool);
+ loc_dup(loc, &local->loc);
+ if (name)
+ local->name = gf_strdup(name);
- stub = fop_getxattr_stub (frame, default_getxattr_resume, loc, name);
- if (!stub) {
- STACK_UNWIND_STRICT (getxattr, frame, -1, ENOMEM, NULL);
- return 0;
- }
+ frame->local = local;
- gf_quiesce_enqueue (this, stub);
+ STACK_WIND(frame, quiesce_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xdata);
+ return 0;
+ }
+ stub = fop_getxattr_stub(frame, default_getxattr_resume, loc, name, xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(getxattr, frame, -1, ENOMEM, NULL, NULL);
return 0;
-}
+ }
+ gf_quiesce_enqueue(this, stub);
+
+ return 0;
+}
int32_t
-quiesce_xattrop (call_frame_t *frame,
- xlator_t *this,
- loc_t *loc,
- gf_xattrop_flags_t flags,
- dict_t *dict)
+quiesce_xattrop(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ gf_xattrop_flags_t flags, dict_t *dict, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- STACK_WIND (frame,
- default_xattrop_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->xattrop,
- loc,
- flags,
- dict);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ STACK_WIND(frame, default_xattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->xattrop, loc, flags, dict, xdata);
+ return 0;
+ }
- stub = fop_xattrop_stub (frame, default_xattrop_resume,
- loc, flags, dict);
- if (!stub) {
- STACK_UNWIND_STRICT (xattrop, frame, -1, ENOMEM, NULL);
- return 0;
- }
+ stub = fop_xattrop_stub(frame, default_xattrop_resume, loc, flags, dict,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(xattrop, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_fxattrop (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- gf_xattrop_flags_t flags,
- dict_t *dict)
+quiesce_fxattrop(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ gf_xattrop_flags_t flags, dict_t *dict, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- STACK_WIND (frame,
- default_fxattrop_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->fxattrop,
- fd,
- flags,
- dict);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ STACK_WIND(frame, default_fxattrop_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fxattrop, fd, flags, dict, xdata);
+ return 0;
+ }
- stub = fop_fxattrop_stub (frame, default_fxattrop_resume,
- fd, flags, dict);
- if (!stub) {
- STACK_UNWIND_STRICT (fxattrop, frame, -1, ENOMEM, NULL);
- return 0;
- }
+ stub = fop_fxattrop_stub(frame, default_fxattrop_resume, fd, flags, dict,
+ xdata);
+ if (!stub) {
+ STACK_UNWIND_STRICT(fxattrop, frame, -1, ENOMEM, NULL, NULL);
+ return 0;
+ }
- gf_quiesce_enqueue (this, stub);
+ gf_quiesce_enqueue(this, stub);
- return 0;
+ return 0;
}
int32_t
-quiesce_lk (call_frame_t *frame,
- xlator_t *this,
- fd_t *fd,
- int32_t cmd,
- struct gf_flock *lock)
+quiesce_lk(call_frame_t *frame, xlator_t *this, fd_t *fd, int32_t cmd,
+ struct gf_flock *lock, dict_t *xdata)
{
- quiesce_priv_t *priv = NULL;
- call_stub_t *stub = NULL;
+ quiesce_priv_t *priv = NULL;
+ call_stub_t *stub = NULL;
- priv = this->private;
+ priv = this->private;
- if (priv && priv->pass_through) {
- STACK_WIND (frame,
- default_lk_cbk,
- FIRST_CHILD(this),
- FIRST_CHILD(this)->fops->lk,
- fd,
- cmd,
- lock);
- return 0;
- }
+ if (priv && priv->pass_through) {
+ STACK_WIND(frame, d