summaryrefslogtreecommitdiffstats
path: root/xlators/features/cloudsync
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/features/cloudsync')
-rw-r--r--xlators/features/cloudsync/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/Makefile.am46
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h24
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.c60
-rw-r--r--xlators/features/cloudsync/src/cloudsync-common.h134
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-c.py324
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-h.py31
-rw-r--r--xlators/features/cloudsync/src/cloudsync-mem-types.h22
-rw-r--r--xlators/features/cloudsync/src/cloudsync-messages.h16
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am11
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c584
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h50
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am3
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am12
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h203
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h30
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym1
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h19
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c842
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h84
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c2076
-rw-r--r--xlators/features/cloudsync/src/cloudsync.h123
28 files changed, 4766 insertions, 0 deletions
diff --git a/xlators/features/cloudsync/Makefile.am b/xlators/features/cloudsync/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/Makefile.am b/xlators/features/cloudsync/src/Makefile.am
new file mode 100644
index 00000000000..e2a277e372b
--- /dev/null
+++ b/xlators/features/cloudsync/src/Makefile.am
@@ -0,0 +1,46 @@
+SUBDIRS = cloudsync-plugins
+
+xlator_LTLIBRARIES = cloudsync.la
+
+xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/features
+
+cloudsync_sources = cloudsync.c
+
+CLOUDSYNC_SRC = $(top_srcdir)/xlators/features/cloudsync/src
+CLOUDSYNC_BLD = $(top_builddir)/xlators/features/cloudsync/src
+
+cloudsynccommon_sources = $(CLOUDSYNC_SRC)/cloudsync-common.c
+
+noinst_HEADERS = $(CLOUDSYNC_BLD)/cloudsync.h \
+ $(CLOUDSYNC_BLD)/cloudsync-mem-types.h \
+ $(CLOUDSYNC_BLD)/cloudsync-messages.h \
+ $(CLOUDSYNC_BLD)/cloudsync-common.h
+
+cloudsync_la_SOURCES = $(cloudsync_sources) $(cloudsynccommon_sources)
+
+nodist_cloudsync_la_SOURCES = cloudsync-autogen-fops.c cloudsync-autogen-fops.h
+BUILT_SOURCES = cloudsync-autogen-fops.h
+
+cloudsync_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
+
+cloudsync_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(LIB_DL)
+
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -DCS_PLUGINDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/cloudsync-plugins\"
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS)
+
+noinst_PYTHON = cloudsync-fops-c.py cloudsync-fops-h.py
+EXTRA_DIST = cloudsync-autogen-fops-tmpl.c cloudsync-autogen-fops-tmpl.h
+
+cloudsync-autogen-fops.c: cloudsync-fops-c.py cloudsync-autogen-fops-tmpl.c
+ $(PYTHON) $(CLOUDSYNC_SRC)/cloudsync-fops-c.py \
+ $(CLOUDSYNC_SRC)/cloudsync-autogen-fops-tmpl.c > $@
+
+cloudsync-autogen-fops.h: cloudsync-fops-h.py cloudsync-autogen-fops-tmpl.h
+ $(PYTHON) $(CLOUDSYNC_SRC)/cloudsync-fops-h.py \
+ $(CLOUDSYNC_SRC)/cloudsync-autogen-fops-tmpl.h > $@
+
+CLEANFILES = $(nodist_cloudsync_la_SOURCES)
+
+uninstall-local:
+ rm -f $(DESTDIR)$(xlatordir)/cloudsync.so
diff --git a/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c
new file mode 100644
index 00000000000..ee63f983980
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.c
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2008-2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/* File: cloudsync-autogen-fops-tmpl.c
+ * This file contains the CLOUDSYNC autogenerated FOPs. This is run through
+ * the code generator, generator.py to generate the required FOPs.
+ */
+
+#ifndef _CONFIG_H
+#define _CONFIG_H
+#include "config.h"
+#endif
+
+#include <dlfcn.h>
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include "cloudsync.h"
+#include "cloudsync-common.h"
+#include <glusterfs/call-stub.h>
+
+#pragma generate
diff --git a/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h
new file mode 100644
index 00000000000..d922c77d8aa
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-autogen-fops-tmpl.h
@@ -0,0 +1,24 @@
+/*
+ Copyright (c) 2008-2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/* File: clousync-autogen-fops-tmpl.h
+ * This file contains the cloudsync autogenerated FOPs declarations.
+ */
+
+#ifndef _CLOUDSYNC_AUTOGEN_FOPS_H
+#define _CLOUDSYNC_AUTOGEN_FOPS_H
+
+#include <glusterfs/xlator.h>
+#include "cloudsync.h"
+#include "cloudsync-common.h"
+
+#pragma generate
+
+#endif /* _CLOUDSYNC_AUTOGEN_FOPS_H */
diff --git a/xlators/features/cloudsync/src/cloudsync-common.c b/xlators/features/cloudsync/src/cloudsync-common.c
new file mode 100644
index 00000000000..445a31b90e7
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-common.c
@@ -0,0 +1,60 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "cloudsync-common.h"
+
+void
+cs_xattrinfo_wipe(cs_local_t *local)
+{
+ if (local->xattrinfo.lxattr) {
+ if (local->xattrinfo.lxattr->file_path)
+ GF_FREE(local->xattrinfo.lxattr->file_path);
+
+ if (local->xattrinfo.lxattr->volname)
+ GF_FREE(local->xattrinfo.lxattr->volname);
+
+ GF_FREE(local->xattrinfo.lxattr);
+ }
+}
+
+void
+cs_local_wipe(xlator_t *this, cs_local_t *local)
+{
+ if (!local)
+ return;
+
+ loc_wipe(&local->loc);
+
+ if (local->fd) {
+ fd_unref(local->fd);
+ local->fd = NULL;
+ }
+
+ if (local->stub) {
+ call_stub_destroy(local->stub);
+ local->stub = NULL;
+ }
+
+ if (local->xattr_req)
+ dict_unref(local->xattr_req);
+
+ if (local->xattr_rsp)
+ dict_unref(local->xattr_rsp);
+
+ if (local->dlfd)
+ fd_unref(local->dlfd);
+
+ if (local->remotepath)
+ GF_FREE(local->remotepath);
+
+ cs_xattrinfo_wipe(local);
+
+ mem_put(local);
+}
diff --git a/xlators/features/cloudsync/src/cloudsync-common.h b/xlators/features/cloudsync/src/cloudsync-common.h
new file mode 100644
index 00000000000..11d233460a4
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-common.h
@@ -0,0 +1,134 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _CLOUDSYNC_COMMON_H
+#define _CLOUDSYNC_COMMON_H
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/compat-errno.h>
+#include "cloudsync-mem-types.h"
+#include "cloudsync-messages.h"
+
+typedef struct cs_loc_xattr {
+ char *file_path;
+ uuid_t uuid;
+ uuid_t gfid;
+ char *volname;
+} cs_loc_xattr_t;
+
+typedef struct cs_size_xattr {
+ uint64_t size;
+ uint64_t blksize;
+ uint64_t blocks;
+} cs_size_xattr_t;
+
+typedef struct cs_local {
+ loc_t loc;
+ fd_t *fd;
+ call_stub_t *stub;
+ call_frame_t *main_frame;
+ int op_errno;
+ int op_ret;
+ fd_t *dlfd;
+ off_t dloffset;
+ struct iatt stbuf;
+ dict_t *xattr_rsp;
+ dict_t *xattr_req;
+ glusterfs_fop_t fop;
+ gf_boolean_t locked;
+ int call_cnt;
+ inode_t *inode;
+ char *remotepath;
+
+ struct {
+ /* offset, flags and size are the information needed
+ * by read fop for remote read operation. These will be
+ * populated in cloudsync read fop, before being passed
+ * on to the plugin performing remote read.
+ */
+ off_t offset;
+ uint32_t flags;
+ size_t size;
+ cs_loc_xattr_t *lxattr;
+ } xattrinfo;
+
+} cs_local_t;
+
+typedef int (*fop_download_t)(call_frame_t *frame, void *config);
+
+typedef int (*fop_remote_read_t)(call_frame_t *, void *);
+
+typedef void *(*store_init)(xlator_t *this);
+
+typedef int (*store_reconfigure)(xlator_t *this, dict_t *options);
+
+typedef void (*store_fini)(void *config);
+
+struct cs_remote_stores {
+ char *name; /* store name */
+ void *config; /* store related information */
+ fop_download_t dlfop; /* store specific download function */
+ fop_remote_read_t rdfop; /* store specific read function */
+ store_init init; /* store init to initialize store config */
+ store_reconfigure reconfigure; /* reconfigure store config */
+ store_fini fini;
+ void *handle; /* shared library handle*/
+};
+
+typedef struct cs_private {
+ xlator_t *this;
+ struct cs_remote_stores *stores;
+ gf_boolean_t abortdl;
+ pthread_spinlock_t lock;
+ gf_boolean_t remote_read;
+} cs_private_t;
+
+void
+cs_local_wipe(xlator_t *this, cs_local_t *local);
+
+void
+cs_xattrinfo_wipe(cs_local_t *local);
+
+#define CS_STACK_UNWIND(fop, frame, params...) \
+ do { \
+ cs_local_t *__local = NULL; \
+ xlator_t *__xl = NULL; \
+ if (frame) { \
+ __xl = frame->this; \
+ __local = frame->local; \
+ frame->local = NULL; \
+ } \
+ STACK_UNWIND_STRICT(fop, frame, params); \
+ cs_local_wipe(__xl, __local); \
+ } while (0)
+
+#define CS_STACK_DESTROY(frame) \
+ do { \
+ cs_local_t *__local = NULL; \
+ xlator_t *__xl = NULL; \
+ __xl = frame->this; \
+ __local = frame->local; \
+ frame->local = NULL; \
+ STACK_DESTROY(frame->root); \
+ cs_local_wipe(__xl, __local); \
+ } while (0)
+
+typedef struct store_methods {
+ int (*fop_download)(call_frame_t *frame, void *config);
+ int (*fop_remote_read)(call_frame_t *, void *);
+ /* return type should be the store config */
+ void *(*fop_init)(xlator_t *this);
+ int (*fop_reconfigure)(xlator_t *this, dict_t *options);
+ void (*fop_fini)(void *config);
+} store_methods_t;
+
+#endif /* _CLOUDSYNC_COMMON_H */
diff --git a/xlators/features/cloudsync/src/cloudsync-fops-c.py b/xlators/features/cloudsync/src/cloudsync-fops-c.py
new file mode 100755
index 00000000000..c27df97ae58
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-fops-c.py
@@ -0,0 +1,324 @@
+#!/usr/bin/python3
+
+from __future__ import print_function
+import os
+import sys
+
+curdir = os.path.dirname(sys.argv[0])
+gendir = os.path.join(curdir, '../../../../libglusterfs/src')
+sys.path.append(gendir)
+from generator import ops, fop_subs, cbk_subs, generate
+
+FD_DATA_MODIFYING_OP_FOP_TEMPLATE = """
+int32_t
+cs_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ int op_errno = EINVAL ;
+ cs_local_t *local = NULL;
+ int ret = 0;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = -1;
+
+ VALIDATE_OR_GOTO (frame, err);
+ VALIDATE_OR_GOTO (this, err);
+ VALIDATE_OR_GOTO (fd, err);
+
+ local = cs_local_init (this, frame, NULL, fd, GF_FOP_@UPNAME@);
+ if (!local) {
+
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ __cs_inode_ctx_get (this, fd->inode, &ctx);
+
+ if (ctx)
+ state = __cs_get_file_state (fd->inode, ctx);
+ else
+ state = GF_CS_LOCAL;
+
+ xdata = xdata ? dict_ref (xdata) : dict_new ();
+
+ if (!xdata) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ local->xattr_req = xdata;
+
+ ret = dict_set_uint32 (local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "dict_set failed key:"
+ " %s", GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ local->stub = fop_@NAME@_stub (frame, cs_resume_@NAME@,
+ @SHORT_ARGS@);
+ if (!local->stub) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND (frame, cs_@NAME@_cbk,
+ FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+ } else {
+ local->call_cnt++;
+ ret = locate_and_execute (frame);
+ if (ret) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ CS_STACK_UNWIND (@NAME@, frame, -1, op_errno, @CBK_ERROR_ARGS@);
+
+ return 0;
+}
+"""
+
+FD_DATA_MODIFYING_RESUME_OP_FOP_TEMPLATE = """
+int32_t
+cs_resume_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ int ret = 0;
+
+ ret = cs_resume_postprocess (this, frame, fd->inode);
+ if (ret) {
+ goto unwind;
+ }
+
+ cs_inodelk_unlock (frame);
+
+ STACK_WIND (frame, cs_@NAME@_cbk,
+ FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+
+ return 0;
+
+unwind:
+
+ cs_inodelk_unlock (frame);
+
+ cs_common_cbk (frame);
+
+ return 0;
+}
+"""
+FD_DATA_MODIFYING_OP_FOP_CBK_TEMPLATE = """
+int32_t
+cs_@NAME@_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ @LONG_ARGS@)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ uint64_t val = 0;
+ fd_t *fd = NULL;
+
+ local = frame->local;
+ fd = local->fd;
+
+ /* Do we need lock here? */
+ local->call_cnt++;
+
+ if (op_ret == -1) {
+ ret = dict_get_uint64 (xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0,
+ "could not get file state, unwinding");
+ op_ret = -1;
+ op_errno = EIO;
+ goto unwind;
+ } else {
+ __cs_inode_ctx_update (this, fd->inode, val);
+ gf_msg (this->name, GF_LOG_INFO, 0, 0,
+ " state = %" PRIu64, val);
+
+ if (local->call_cnt == 1 &&
+ (val == GF_CS_REMOTE ||
+ val == GF_CS_DOWNLOADING)) {
+ gf_msg (this->name, GF_LOG_INFO, 0,
+ 0, " will repair and download "
+ "the file, current state : %"
+ PRIu64, val);
+ goto repair;
+ } else {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0,
+ "second @NAME@, Unwinding");
+ goto unwind;
+ }
+ }
+ } else {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "file state "
+ "could not be figured, unwinding");
+ goto unwind;
+ }
+ } else {
+ /* successful @NAME@ => file is local */
+ __cs_inode_ctx_update (this, fd->inode, GF_CS_LOCAL);
+ gf_msg (this->name, GF_LOG_INFO, 0, 0, "state : GF_CS_LOCAL"
+ ", @NAME@ successful");
+
+ goto unwind;
+ }
+
+repair:
+ ret = locate_and_execute (frame);
+ if (ret) {
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ CS_STACK_UNWIND (@NAME@, frame, op_ret, op_errno, @SHORT_ARGS@);
+
+ return 0;
+}
+"""
+
+LOC_STAT_OP_FOP_TEMPLATE = """
+int32_t
+cs_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@)
+{
+ int op_errno = EINVAL;
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init (this, frame, loc, NULL, GF_FOP_@UPNAME@);
+ if (!local) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "local is NULL");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ if (loc->inode->ia_type == IA_IFDIR)
+ goto wind;
+
+ xdata = xdata ? dict_ref (xdata) : dict_new ();
+
+ if (!xdata) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ local->xattr_req = xdata;
+
+ ret = dict_set_uint32 (local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0, "dict_set failed key:"
+ " %s", GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+wind:
+ STACK_WIND (frame, cs_@NAME@_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->@NAME@,
+ @SHORT_ARGS@);
+
+ return 0;
+err:
+ CS_STACK_UNWIND (@NAME@, frame, -1, op_errno, @CBK_ERROR_ARGS@);
+
+ return 0;
+}
+"""
+
+LOC_STAT_OP_FOP_CBK_TEMPLATE = """
+int32_t
+cs_@NAME@_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno,
+ @LONG_ARGS@)
+{
+ int ret = 0;
+ uint64_t val = 0;
+ loc_t *loc = NULL;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ loc = &local->loc;
+
+ if (op_ret == 0) {
+ ret = dict_get_uint64 (xdata, GF_CS_OBJECT_STATUS, &val);
+ if (!ret) {
+ ret = __cs_inode_ctx_update (this, loc->inode, val);
+ if (ret) {
+ gf_msg (this->name, GF_LOG_ERROR, 0, 0,
+ "ctx update failed");
+ }
+ }
+ } else {
+ cs_inode_ctx_reset (this, loc->inode);
+ }
+
+ CS_STACK_UNWIND (@NAME@, frame, op_ret, op_errno, @SHORT_ARGS@);
+
+ return 0;
+}
+"""
+
+# All xlator FOPs are covered in the following section just to create a clarity
+# The lists themselves are not used.
+entry_ops = ['mknod', 'mkdir', 'unlink', 'rmdir', 'symlink', 'rename', 'link',
+ 'create']
+special_ops = ['statfs', 'lookup', 'ipc', 'compound', 'icreate', 'namelink']
+ignored_ops = ['getspec']
+inode_ops = ['stat', 'readlink', 'truncate', 'open', 'setxattr', 'getxattr',
+ 'removexattr', 'opendir', 'access', 'inodelk', 'entrylk',
+ 'xattrop', 'setattr', 'lease', 'getactivelk', 'setactivelk',
+ 'discover']
+fd_ops = ['readv', 'writev', 'flush', 'fsync', 'fsyncdir', 'ftruncate',
+ 'fstat', 'lk', 'readdir', 'finodelk', 'fentrylk', 'fxattrop',
+ 'fsetxattr', 'fgetxattr', 'rchecksum', 'fsetattr', 'readdirp',
+ 'fremovexattr', 'fallocate', 'discard', 'zerofill', 'seek']
+
+
+# These are the current actual lists used to generate the code
+
+# The following list contains fops which are fd based that modifies data
+fd_data_modify_op_fop_template = ['writev', 'flush', 'fsync',
+ 'ftruncate', 'rchecksum', 'fallocate',
+ 'discard', 'zerofill', 'seek']
+
+# The following list contains fops which are entry based that does not change
+# data
+loc_stat_op_fop_template = ['lookup', 'stat', 'discover', 'access', 'setattr',
+ 'getattr']
+
+# These fops need a separate implementation
+special_fops = ['statfs', 'setxattr', 'unlink', 'getxattr',
+ 'truncate', 'fstat', 'readv', 'readdirp']
+
+def gen_defaults():
+ for name in ops:
+ if name in fd_data_modify_op_fop_template:
+ print(generate(FD_DATA_MODIFYING_OP_FOP_CBK_TEMPLATE, name, cbk_subs))
+ print(generate(FD_DATA_MODIFYING_RESUME_OP_FOP_TEMPLATE, name, fop_subs))
+ print(generate(FD_DATA_MODIFYING_OP_FOP_TEMPLATE, name, fop_subs))
+ elif name in loc_stat_op_fop_template:
+ print(generate(LOC_STAT_OP_FOP_CBK_TEMPLATE, name, cbk_subs))
+ print(generate(LOC_STAT_OP_FOP_TEMPLATE, name, fop_subs))
+
+for l in open(sys.argv[1], 'r').readlines():
+ if l.find('#pragma generate') != -1:
+ print("/* BEGIN GENERATED CODE - DO NOT MODIFY */")
+ gen_defaults()
+ print("/* END GENERATED CODE */")
+ else:
+ print(l[:-1])
diff --git a/xlators/features/cloudsync/src/cloudsync-fops-h.py b/xlators/features/cloudsync/src/cloudsync-fops-h.py
new file mode 100755
index 00000000000..faa2de651a7
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-fops-h.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python3
+
+from __future__ import print_function
+import os
+import sys
+
+curdir = os.path.dirname(sys.argv[0])
+gendir = os.path.join(curdir, '../../../../libglusterfs/src')
+sys.path.append(gendir)
+from generator import ops, fop_subs, cbk_subs, generate
+
+OP_FOP_TEMPLATE = """
+int32_t
+cs_@NAME@ (call_frame_t *frame, xlator_t *this,
+ @LONG_ARGS@);
+"""
+
+def gen_defaults():
+ for name, value in ops.items():
+ if name == 'getspec':
+ continue
+ print(generate(OP_FOP_TEMPLATE, name, fop_subs))
+
+
+for l in open(sys.argv[1], 'r').readlines():
+ if l.find('#pragma generate') != -1:
+ print("/* BEGIN GENERATED CODE - DO NOT MODIFY */")
+ gen_defaults()
+ print("/* END GENERATED CODE */")
+ else:
+ print(l[:-1])
diff --git a/xlators/features/cloudsync/src/cloudsync-mem-types.h b/xlators/features/cloudsync/src/cloudsync-mem-types.h
new file mode 100644
index 00000000000..220346405d0
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-mem-types.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __CLOUDSYNC_MEM_TYPES_H__
+#define __CLOUDSYNC_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+enum cs_mem_types_ {
+ gf_cs_mt_cs_private_t = gf_common_mt_end + 1,
+ gf_cs_mt_cs_remote_stores_t,
+ gf_cs_mt_cs_inode_ctx_t,
+ gf_cs_mt_cs_lxattr_t,
+ gf_cs_mt_end
+};
+#endif /* __CLOUDSYNC_MEM_TYPES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-messages.h b/xlators/features/cloudsync/src/cloudsync-messages.h
new file mode 100644
index 00000000000..fb08f72de7f
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-messages.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __CLOUDSYNC_MESSAGES_H__
+#define __CLOUDSYNC_MESSAGES_H__
+
+/*TODO: define relevant message ids */
+
+#endif /* __CLOUDSYNC_MESSAGES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am
new file mode 100644
index 00000000000..fb6b0580c6d
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/Makefile.am
@@ -0,0 +1,11 @@
+if BUILD_AMAZONS3_PLUGIN
+ AMAZONS3_DIR = cloudsyncs3
+endif
+
+if BUILD_CVLT_PLUGIN
+ CVLT_DIR = cvlt
+endif
+
+SUBDIRS = ${AMAZONS3_DIR} ${CVLT_DIR}
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am
new file mode 100644
index 00000000000..6509426ef87
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile.am
@@ -0,0 +1,12 @@
+csp_LTLIBRARIES = cloudsyncs3.la
+cspdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/cloudsync-plugins
+
+cloudsyncs3_la_SOURCES = libcloudsyncs3.c $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-common.c
+cloudsyncs3_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+cloudsyncs3_la_LDFLAGS = -module -export-symbols $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym $(GF_XLATOR_LDFLAGS)
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src -lcurlpp -lcryptopp
+noinst_HEADERS = libcloudsyncs3.h libcloudsyncs3-mem-types.h
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS) -lcurl -lcrypto -I$(top_srcdir)/xlators/features/cloudsync/src
+CLEANFILES =
+
+EXTRA_DIST = libcloudsyncs3.sym
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h
new file mode 100644
index 00000000000..7ccfcc9f4b6
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3-mem-types.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __LIBAWS_MEM_TYPES_H__
+#define __LIBAWS_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+enum libaws_mem_types_ {
+ gf_libaws_mt_aws_private_t = gf_common_mt_end + 1,
+ gf_libaws_mt_end
+};
+#endif /* __CLOUDSYNC_MEM_TYPES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c
new file mode 100644
index 00000000000..23c3599825a
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c
@@ -0,0 +1,584 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <stdlib.h>
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+#include <openssl/bio.h>
+#include <openssl/buffer.h>
+#include <openssl/crypto.h>
+#include <curl/curl.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include "libcloudsyncs3.h"
+#include "cloudsync-common.h"
+
+#define RESOURCE_SIZE 4096
+
+store_methods_t store_ops = {
+ .fop_download = aws_download_s3,
+ .fop_init = aws_init,
+ .fop_reconfigure = aws_reconfigure,
+ .fop_fini = aws_fini,
+};
+
+typedef struct aws_private {
+ char *hostname;
+ char *bucketid;
+ char *awssekey;
+ char *awskeyid;
+ gf_boolean_t abortdl;
+ pthread_spinlock_t lock;
+} aws_private_t;
+
+void *
+aws_init(xlator_t *this)
+{
+ aws_private_t *priv = NULL;
+ char *temp_str = NULL;
+ int ret = 0;
+
+ priv = GF_CALLOC(1, sizeof(aws_private_t), gf_libaws_mt_aws_private_t);
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ return NULL;
+ }
+
+ priv->abortdl = _gf_false;
+
+ pthread_spin_init(&priv->lock, PTHREAD_PROCESS_PRIVATE);
+
+ pthread_spin_lock(&(priv->lock));
+ {
+ if (dict_get_str(this->options, "s3plugin-seckey", &temp_str) == 0) {
+ priv->awssekey = gf_strdup(temp_str);
+ if (!priv->awssekey) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws secret key failed");
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ if (dict_get_str(this->options, "s3plugin-keyid", &temp_str) == 0) {
+ priv->awskeyid = gf_strdup(temp_str);
+ if (!priv->awskeyid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws key ID failed");
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ if (dict_get_str(this->options, "s3plugin-bucketid", &temp_str) == 0) {
+ priv->bucketid = gf_strdup(temp_str);
+ if (!priv->bucketid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws bucketid failed");
+
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ if (dict_get_str(this->options, "s3plugin-hostname", &temp_str) == 0) {
+ priv->hostname = gf_strdup(temp_str);
+ if (!priv->hostname) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws hostname failed");
+
+ ret = -1;
+ goto unlock;
+ }
+ }
+
+ gf_msg_debug(this->name, 0,
+ "stored key: %s id: %s "
+ "bucketid %s hostname: %s",
+ priv->awssekey, priv->awskeyid, priv->bucketid,
+ priv->hostname);
+ }
+unlock:
+ pthread_spin_unlock(&(priv->lock));
+
+ if (ret == -1) {
+ GF_FREE(priv->awskeyid);
+ GF_FREE(priv->awssekey);
+ GF_FREE(priv->bucketid);
+ GF_FREE(priv->hostname);
+ GF_FREE(priv);
+ priv = NULL;
+ }
+
+ return (void *)priv;
+}
+
+int
+aws_reconfigure(xlator_t *this, dict_t *options)
+{
+ aws_private_t *priv = NULL;
+ char *temp_str = NULL;
+ int ret = 0;
+ cs_private_t *cspriv = NULL;
+
+ cspriv = this->private;
+
+ priv = cspriv->stores->config;
+
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "null priv");
+ return -1;
+ }
+
+ pthread_spin_lock(&(priv->lock));
+ {
+ if (dict_get_str(options, "s3plugin-seckey", &temp_str) == 0) {
+ priv->awssekey = gf_strdup(temp_str);
+ if (!priv->awssekey) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws secret key failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (dict_get_str(options, "s3plugin-keyid", &temp_str) == 0) {
+ priv->awskeyid = gf_strdup(temp_str);
+ if (!priv->awskeyid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws key ID failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (dict_get_str(options, "s3plugin-bucketid", &temp_str) == 0) {
+ priv->bucketid = gf_strdup(temp_str);
+ if (!priv->bucketid) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws bucketid failed");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ if (dict_get_str(options, "s3plugin-hostname", &temp_str) == 0) {
+ priv->hostname = gf_strdup(temp_str);
+ if (!priv->hostname) {
+ gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0,
+ "initializing aws hostname failed");
+ ret = -1;
+ goto out;
+ }
+ }
+ }
+out:
+ pthread_spin_unlock(&(priv->lock));
+
+ gf_msg_debug(this->name, 0,
+ "stored key: %s id: %s "
+ "bucketid %s hostname: %s",
+ priv->awssekey, priv->awskeyid, priv->bucketid,
+ priv->hostname);
+
+ return ret;
+}
+
+void
+aws_fini(void *config)
+{
+ aws_private_t *priv = NULL;
+
+ priv = (aws_private_t *)priv;
+
+ if (priv) {
+ GF_FREE(priv->hostname);
+ GF_FREE(priv->bucketid);
+ GF_FREE(priv->awssekey);
+ GF_FREE(priv->awskeyid);
+
+ pthread_spin_destroy(&priv->lock);
+ GF_FREE(priv);
+ }
+}
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("dht", this, out);
+
+ ret = xlator_mem_acct_init(this, gf_libaws_mt_end + 1);
+
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "Memory accounting init failed");
+ return ret;
+ }
+out:
+ return ret;
+}
+char *
+aws_form_request(char *resource, char **date, char *reqtype, char *bucketid,
+ char *filepath)
+{
+ char httpdate[256];
+ time_t ctime;
+ struct tm *gtime = NULL;
+ char *sign_req = NULL;
+ int signreq_len = -1;
+ int date_len = -1;
+ int res_len = -1;
+
+ ctime = gf_time();
+ gtime = gmtime(&ctime);
+
+ date_len = strftime(httpdate, sizeof(httpdate),
+ "%a, %d %b %Y %H:%M:%S +0000", gtime);
+
+ *date = gf_strndup(httpdate, date_len);
+ if (*date == NULL) {
+ gf_msg("CS", GF_LOG_ERROR, ENOMEM, 0,
+ "memory allocation "
+ "failure for date");
+ goto out;
+ }
+
+ res_len = snprintf(resource, RESOURCE_SIZE, "%s/%s", bucketid, filepath);
+
+ gf_msg_debug("CS", 0, "resource %s", resource);
+
+ /* 6 accounts for the 4 new line chars, one forward slash and
+ * one null char */
+ signreq_len = res_len + date_len + strlen(reqtype) + 6;
+
+ sign_req = GF_MALLOC(signreq_len, gf_common_mt_char);
+ if (sign_req == NULL) {
+ gf_msg("CS", GF_LOG_ERROR, ENOMEM, 0,
+ "memory allocation "
+ "failure for sign_req");
+ goto out;
+ }
+
+ snprintf(sign_req, signreq_len, "%s\n\n%s\n%s\n/%s", reqtype, "", *date,
+ resource);
+
+out:
+ return sign_req;
+}
+
+char *
+aws_b64_encode(const unsigned char *input, int length)
+{
+ BIO *bio, *b64;
+ BUF_MEM *bptr;
+ char *buff = NULL;
+
+ b64 = BIO_new(BIO_f_base64());
+ bio = BIO_new(BIO_s_mem());
+ b64 = BIO_push(b64, bio);
+ BIO_write(b64, input, length);
+ BIO_flush(b64);
+ BIO_get_mem_ptr(b64, &bptr);
+
+ buff = GF_MALLOC(bptr->length, gf_common_mt_char);
+ memcpy(buff, bptr->data, bptr->length - 1);
+ buff[bptr->length - 1] = 0;
+
+ BIO_free_all(b64);
+
+ return buff;
+}
+
+char *
+aws_sign_request(char *const str, char *awssekey)
+{
+#if (OPENSSL_VERSION_NUMBER < 0x1010002f)
+ HMAC_CTX ctx;
+#endif
+ HMAC_CTX *pctx = NULL;
+ ;
+
+ unsigned char md[256];
+ unsigned len;
+ char *base64 = NULL;
+
+#if (OPENSSL_VERSION_NUMBER < 0x1010002f)
+ HMAC_CTX_init(&ctx);
+ pctx = &ctx;
+#else
+ pctx = HMAC_CTX_new();
+#endif
+ HMAC_Init_ex(pctx, awssekey, strlen(awssekey), EVP_sha1(), NULL);
+ HMAC_Update(pctx, (unsigned char *)str, strlen(str));
+ HMAC_Final(pctx, (unsigned char *)md, &len);
+
+#if (OPENSSL_VERSION_NUMBER < 0x1010002f)
+ HMAC_CTX_cleanup(pctx);
+#else
+ HMAC_CTX_free(pctx);
+#endif
+ base64 = aws_b64_encode(md, len);
+
+ return base64;
+}
+
+int
+aws_dlwritev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata)
+{
+ aws_private_t *priv = NULL;
+
+ if (op_ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, op_errno,
+ "write failed "
+ ". Aborting Download");
+
+ priv = this->private;
+ pthread_spin_lock(&(priv->lock));
+ {
+ priv->abortdl = _gf_true;
+ }
+ pthread_spin_unlock(&(priv->lock));
+ }
+
+ CS_STACK_DESTROY(frame);
+
+ return op_ret;
+}
+
+size_t
+aws_write_callback(void *dlbuf, size_t size, size_t nitems, void *mainframe)
+{
+ call_frame_t *frame = NULL;
+ fd_t *dlfd = NULL;
+ int ret = 0;
+ cs_local_t *local = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ struct iobref *iobref = NULL;
+ struct iobuf *iobuf = NULL;
+ struct iovec dliov = {
+ 0,
+ };
+ size_t tsize = 0;
+ xlator_t *this = NULL;
+ cs_private_t *xl_priv = NULL;
+ aws_private_t *priv = NULL;
+ call_frame_t *dlframe = NULL;
+
+ frame = (call_frame_t *)mainframe;
+ this = frame->this;
+ xl_priv = this->private;
+ priv = xl_priv->stores->config;
+
+ pthread_spin_lock(&(priv->lock));
+ {
+ /* returning size other than the size passed from curl will
+ * abort further download*/
+ if (priv->abortdl) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "aborting download");
+ pthread_spin_unlock(&(priv->lock));
+ return 0;
+ }
+ }
+ pthread_spin_unlock(&(priv->lock));
+
+ local = frame->local;
+ dlfd = local->dlfd;
+ tsize = size * nitems;
+
+ dliov.iov_base = (void *)dlbuf;
+ dliov.iov_len = tsize;
+
+ ret = iobuf_copy(this->ctx->iobuf_pool, &dliov, 1, &iobref, &iobuf, &iov);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "iobuf_copy failed");
+ goto out;
+ }
+
+ /* copy frame */
+ dlframe = copy_frame(frame);
+ if (!dlframe) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "copy_frame failed");
+ tsize = 0;
+ goto out;
+ }
+
+ STACK_WIND(dlframe, aws_dlwritev_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->writev, dlfd, &iov, 1, local->dloffset,
+ 0, iobref, NULL);
+
+ local->dloffset += tsize;
+
+out:
+ if (iobuf)
+ iobuf_unref(iobuf);
+ if (iobref)
+ iobref_unref(iobref);
+
+ return tsize;
+}
+
+int
+aws_download_s3(call_frame_t *frame, void *config)
+{
+ char *buf;
+ int bufsize = -1;
+ CURL *handle = NULL;
+ struct curl_slist *slist = NULL;
+ struct curl_slist *tmp = NULL;
+ xlator_t *this = NULL;
+ int ret = 0;
+ int debug = 1;
+ CURLcode res;
+ char errbuf[CURL_ERROR_SIZE];
+ size_t len = 0;
+ long responsecode;
+ char *sign_req = NULL;
+ char *date = NULL;
+ char *const reqtype = "GET";
+ char *signature = NULL;
+ cs_local_t *local = NULL;
+ char resource[RESOURCE_SIZE] = {
+ 0,
+ };
+ aws_private_t *priv = NULL;
+
+ this = frame->this;
+
+ local = frame->local;
+
+ priv = (aws_private_t *)config;
+
+ if (!priv->bucketid || !priv->hostname || !priv->awssekey ||
+ !priv->awskeyid) {
+ ret = -1;
+ goto out;
+ }
+
+ sign_req = aws_form_request(resource, &date, reqtype, priv->bucketid,
+ local->remotepath);
+ if (!sign_req) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "null sign_req, "
+ "aborting download");
+ ret = -1;
+ goto out;
+ }
+
+ gf_msg_debug("CS", 0, "sign_req %s date %s", sign_req, date);
+
+ signature = aws_sign_request(sign_req, priv->awssekey);
+ if (!signature) {
+ gf_msg("CS", GF_LOG_ERROR, 0, 0,
+ "null signature, "
+ "aborting download");
+ ret = -1;
+ goto out;
+ }
+
+ handle = curl_easy_init();
+ this = frame->this;
+
+ /* special numbers 6, 20, 10 accounts for static characters in the
+ * below snprintf string format arguments*/
+ bufsize = strlen(date) + 6 + strlen(priv->awskeyid) + strlen(signature) +
+ 20 + strlen(priv->hostname) + 10;
+
+ buf = (char *)alloca(bufsize);
+ if (!buf) {
+ gf_msg("CS", GF_LOG_ERROR, ENOMEM, 0,
+ "mem allocation "
+ "failed for buf");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf(buf, bufsize, "Date: %s", date);
+ slist = curl_slist_append(slist, buf);
+ snprintf(buf, bufsize, "Authorization: AWS %s:%s", priv->awskeyid,
+ signature);
+ slist = curl_slist_append(slist, buf);
+ snprintf(buf, bufsize, "https://%s/%s", priv->hostname, resource);
+
+ if (gf_log_get_loglevel() >= GF_LOG_DEBUG) {
+ tmp = slist;
+ while (tmp) {
+ gf_msg_debug(this->name, 0, "slist for curl - %s", tmp->data);
+ tmp = tmp->next;
+ }
+ }
+
+ curl_easy_setopt(handle, CURLOPT_HTTPHEADER, slist);
+ curl_easy_setopt(handle, CURLOPT_URL, buf);
+ curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, aws_write_callback);
+ curl_easy_setopt(handle, CURLOPT_WRITEDATA, frame);
+ curl_easy_setopt(handle, CURLOPT_VERBOSE, debug);
+ curl_easy_setopt(handle, CURLOPT_ERRORBUFFER, errbuf);
+
+ res = curl_easy_perform(handle);
+ if (res != CURLE_OK) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "download failed. err: %s\n",
+ curl_easy_strerror(res));
+ ret = -1;
+ len = strlen(errbuf);
+ if (len) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "curl failure %s", errbuf);
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "curl error "
+ "%s\n",
+ curl_easy_strerror(res));
+ }
+ }
+
+ if (res == CURLE_OK) {
+ curl_easy_getinfo(handle, CURLINFO_RESPONSE_CODE, &responsecode);
+ gf_msg_debug(this->name, 0, "response code %ld", responsecode);
+ if (responsecode != 200) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "curl download failed");
+ }
+ }
+
+ curl_slist_free_all(slist);
+ curl_easy_cleanup(handle);
+
+out:
+ if (sign_req)
+ GF_FREE(sign_req);
+ if (date)
+ GF_FREE(date);
+ if (signature)
+ GF_FREE(signature);
+
+ return ret;
+}
+
+struct volume_options cs_options[] = {
+ {.key = {"s3plugin-seckey"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws secret key"},
+ {.key = {"s3plugin-keyid"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws key ID"
+
+ },
+ {.key = {"s3plugin-bucketid"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws bucketid"},
+ {.key = {"s3plugin-hostname"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "aws hostname e.g. s3.amazonaws.com"},
+ {.key = {NULL}},
+};
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h
new file mode 100644
index 00000000000..85ae669486b
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.h
@@ -0,0 +1,50 @@
+/*
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _LIBAWS_H
+#define _LIBAWS_H
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/syncop.h>
+#include <curl/curl.h>
+#include "cloudsync-common.h"
+#include "libcloudsyncs3-mem-types.h"
+
+char *
+aws_b64_encode(const unsigned char *input, int length);
+
+size_t
+aws_write_callback(void *dlbuf, size_t size, size_t nitems, void *mainframe);
+
+int
+aws_download_s3(call_frame_t *frame, void *config);
+
+int
+aws_dlwritev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *prebuf, struct iatt *postbuf,
+ dict_t *xdata);
+
+void *
+aws_init(xlator_t *this);
+
+int
+aws_reconfigure(xlator_t *this, dict_t *options);
+
+char *
+aws_form_request(char *resource, char **date, char *reqtype, char *bucketid,
+ char *filepath);
+char *
+aws_sign_request(char *const str, char *awssekey);
+
+void
+aws_fini(void *config);
+
+#endif
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym
new file mode 100644
index 00000000000..0bc273670d5
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.sym
@@ -0,0 +1 @@
+store_ops
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am
new file mode 100644
index 00000000000..b512464f157
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile.am
@@ -0,0 +1,12 @@
+csp_LTLIBRARIES = cloudsynccvlt.la
+cspdir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/cloudsync-plugins
+
+cloudsynccvlt_la_SOURCES = libcvlt.c $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-common.c
+cloudsynccvlt_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
+cloudsynccvlt_la_LDFLAGS = -module -avoid-version -export-symbols $(top_srcdir)/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+noinst_HEADERS = archivestore.h libcvlt.h libcvlt-mem-types.h cvlt-messages.h
+AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS) -I$(top_srcdir)/xlators/features/cloudsync/src
+CLEANFILES =
+
+EXTRA_DIST = libcloudsynccvlt.sym
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h
new file mode 100644
index 00000000000..7230ef77337
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/archivestore.h
@@ -0,0 +1,203 @@
+/*
+ Copyright (c) 2018 Commvault Systems, Inc. <http://www.commvault.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __ARCHIVESTORE_H__
+#define __ARCHIVESTORE_H__
+
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <dlfcn.h>
+#include <uuid/uuid.h>
+
+#define CS_XATTR_ARCHIVE_UUID "trusted.cloudsync.uuid"
+#define CS_XATTR_PRODUCT_ID "trusted.cloudsync.product-id"
+#define CS_XATTR_STORE_ID "trusted.cloudsync.store-id"
+
+struct _archstore_methods;
+typedef struct _archstore_methods archstore_methods_t;
+
+struct _archstore_desc {
+ void *priv; /* Private field for store mgmt. */
+ /* To be used only by archive store*/
+};
+typedef struct _archstore_desc archstore_desc_t;
+
+struct _archstore_info {
+ char *id; /* Identifier for the archivestore */
+ uint32_t idlen; /* Length of identifier string */
+ char *prod; /* Name of the data mgmt. product */
+ uint32_t prodlen; /* Length of the product string */
+};
+typedef struct _archstore_info archstore_info_t;
+
+struct _archstore_fileinfo {
+ uuid_t uuid; /* uuid of the file */
+ char *path; /* file path */
+ uint32_t pathlength; /* length of file path */
+};
+typedef struct _archstore_fileinfo archstore_fileinfo_t;
+
+struct _app_callback_info {
+ archstore_info_t *src_archstore;
+ archstore_fileinfo_t *src_archfile;
+ archstore_info_t *dest_archstore;
+ archstore_fileinfo_t *dest_archfile;
+};
+typedef struct _app_callback_info app_callback_info_t;
+
+typedef void (*app_callback_t)(archstore_desc_t *, app_callback_info_t *,
+ void *, int64_t, int32_t);
+
+enum _archstore_scan_type { FULL = 1, INCREMENTAL = 2 };
+typedef enum _archstore_scan_type archstore_scan_type_t;
+
+typedef int32_t archstore_errno_t;
+
+/*
+ * Initialize archive store.
+ * arg1 pointer to structure containing archive store information
+ * arg2 error number if any generated during the initialization
+ * arg3 name of the log file
+ */
+typedef int32_t (*init_archstore_t)(archstore_desc_t *, archstore_errno_t *,
+ const char *);
+
+/*
+ * Clean up archive store.
+ * arg1 pointer to structure containing archive store information
+ * arg2 error number if any generated during the cleanup
+ */
+typedef int32_t (*term_archstore_t)(archstore_desc_t *, archstore_errno_t *);
+
+/*
+ * Read the contents of the file from archive store
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing archive store information
+ * arg3 pointer to structure containing information about file to be read
+ * arg4 offset in the file from which data should be read
+ * arg5 buffer where the data should be read
+ * arg6 number of bytes of data to be read
+ * arg7 error number if any generated during the read from file
+ * arg8 callback handler to be invoked after the data is read
+ * arg9 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*read_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *, off_t, char *,
+ size_t, archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Restore the contents of the file from archive store
+ * This is basically in-place restore
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing archive store information
+ * arg3 pointer to structure containing information about file to be restored
+ * arg4 error number if any generated during the file restore
+ * arg5 callback to be invoked after the file is restored
+ * arg6 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*recall_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Restore the contents of the file from archive store to a different store
+ * This is basically out-of-place restore
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing source archive store information
+ * arg3 pointer to structure containing information about file to be restored
+ * arg4 pointer to structure containing destination archive store information
+ * arg5 pointer to structure containing information about the location to
+ which the file will be restored
+ * arg6 error number if any generated during the file restore
+ * arg7 callback to be invoked after the file is restored
+ * arg8 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*restore_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Archive the contents of the file to archive store
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing source archive store information
+ * arg3 pointer to structure containing information about files to be archived
+ * arg4 pointer to structure containing destination archive store information
+ * arg5 pointer to structure containing information about files that failed
+ * to be archived
+ * arg6 error number if any generated during the file archival
+ * arg7 callback to be invoked after the file is archived
+ * arg8 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*archive_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Backup list of files provided in the input file
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing source archive store information
+ * arg3 pointer to structure containing information about files to be backed up
+ * arg4 pointer to structure containing destination archive store information
+ * arg5 pointer to structure containing information about files that failed
+ * to be backed up
+ * arg6 error number if any generated during the file archival
+ * arg7 callback to be invoked after the file is archived
+ * arg8 cookie to be passed when callback is invoked
+ */
+typedef int32_t (*backup_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_info_t *,
+ archstore_fileinfo_t *,
+ archstore_errno_t *, app_callback_t,
+ void *);
+
+/*
+ * Scan the contents of a store and determine the files which need to be
+ * backed up.
+ * arg1 pointer to structure containing archive store description
+ * arg2 pointer to structure containing archive store information
+ * arg3 type of scan whether full or incremental
+ * arg4 path to file that contains list of files to be backed up
+ * arg5 error number if any generated during scan operation
+ */
+typedef int32_t (*scan_archstore_t)(archstore_desc_t *, archstore_info_t *,
+ archstore_scan_type_t, char *,
+ archstore_errno_t *);
+
+struct _archstore_methods {
+ init_archstore_t init;
+ term_archstore_t fini;
+ backup_archstore_t backup;
+ archive_archstore_t archive;
+ scan_archstore_t scan;
+ restore_archstore_t restore;
+ recall_archstore_t recall;
+ read_archstore_t read;
+};
+
+typedef int (*get_archstore_methods_t)(archstore_methods_t *);
+
+/*
+ * Single function that will be invoked by applications for extracting
+ * the function pointers to all data management functions.
+ */
+int32_t
+get_archstore_methods(archstore_methods_t *);
+
+#endif /* End of __ARCHIVESTORE_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h
new file mode 100644
index 00000000000..57c9aa77da0
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/cvlt-messages.h
@@ -0,0 +1,30 @@
+/*
+ Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+ */
+
+#ifndef _CVLT_MESSAGES_H_
+#define _CVLT_MESSAGES_H_
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(CVLT, CVLT_EXTRACTION_FAILED, CVLT_FREE,
+ CVLT_RESOURCE_ALLOCATION_FAILED, CVLT_RESTORE_FAILED,
+ CVLT_READ_FAILED, CVLT_NO_MEMORY, CVLT_DLOPEN_FAILED);
+
+#endif /* !_CVLT_MESSAGES_H_ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym
new file mode 100644
index 00000000000..0bc273670d5
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcloudsynccvlt.sym
@@ -0,0 +1 @@
+store_ops
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h
new file mode 100644
index 00000000000..c24fab8bfe7
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt-mem-types.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (c) 2018 Commvault Systems, Inc. <http://www.commvault.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __LIBCVLT_MEM_TYPES_H__
+#define __LIBCVLT_MEM_TYPES_H__
+
+#include <glusterfs/mem-types.h>
+enum libcvlt_mem_types_ {
+ gf_libcvlt_mt_cvlt_private_t = gf_common_mt_end + 1,
+ gf_libcvlt_mt_end
+};
+#endif /* __LIBCVLT_MEM_TYPES_H__ */
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c
new file mode 100644
index 00000000000..5b7272bb448
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c
@@ -0,0 +1,842 @@
+#include <stdlib.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include "libcvlt.h"
+#include "cloudsync-common.h"
+#include "cvlt-messages.h"
+
+#define LIBARCHIVE_SO "libopenarchive.so"
+#define ALIGN_SIZE 4096
+#define CVLT_TRAILER "cvltv1"
+
+store_methods_t store_ops = {
+ .fop_download = cvlt_download,
+ .fop_init = cvlt_init,
+ .fop_reconfigure = cvlt_reconfigure,
+ .fop_fini = cvlt_fini,
+ .fop_remote_read = cvlt_read,
+};
+
+static const int32_t num_req = 32;
+static const int32_t num_iatt = 32;
+static char *plugin = "cvlt_cloudSync";
+
+int32_t
+mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ if (!this)
+ return ret;
+
+ ret = xlator_mem_acct_init(this, gf_libcvlt_mt_end + 1);
+
+ if (ret != 0) {
+ return ret;
+ }
+
+ return ret;
+}
+
+static void
+cvlt_free_resources(archive_t *arch)
+{
+ /*
+ * We will release all the resources that were allocated by the xlator.
+ * Check whether there are any buffers which have not been released
+ * back to a mempool.
+ */
+
+ if (arch->handle) {
+ dlclose(arch->handle);
+ }
+
+ if (arch->iobuf_pool) {
+ iobuf_pool_destroy(arch->iobuf_pool);
+ }
+
+ if (arch->req_pool) {
+ mem_pool_destroy(arch->req_pool);
+ arch->req_pool = NULL;
+ }
+
+ return;
+}
+
+static int32_t
+cvlt_extract_store_fops(xlator_t *this, archive_t *arch)
+{
+ int32_t op_ret = -1;
+ get_archstore_methods_t get_archstore_methods;
+
+ /*
+ * libopenarchive.so defines methods for performing data management
+ * operations. We will extract the methods from library and these
+ * methods will be invoked for moving data between glusterfs volume
+ * and the data management product.
+ */
+
+ VALIDATE_OR_GOTO(arch, err);
+
+ arch->handle = dlopen(LIBARCHIVE_SO, RTLD_NOW);
+ if (!arch->handle) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_DLOPEN_FAILED,
+ " failed to open %s ", LIBARCHIVE_SO);
+ return op_ret;
+ }
+
+ dlerror(); /* Clear any existing error */
+
+ get_archstore_methods = dlsym(arch->handle, "get_archstore_methods");
+ if (!get_archstore_methods) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " Error extracting get_archstore_methods()");
+ dlclose(arch->handle);
+ arch->handle = NULL;
+ return op_ret;
+ }
+
+ op_ret = get_archstore_methods(&(arch->fops));
+ if (op_ret) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " Failed to extract methods in get_archstore_methods");
+ dlclose(arch->handle);
+ arch->handle = NULL;
+ return op_ret;
+ }
+
+err:
+ return op_ret;
+}
+
+static int32_t
+cvlt_alloc_resources(xlator_t *this, archive_t *arch, int num_req, int num_iatt)
+{
+ /*
+ * Initialize information about all the memory pools that will be
+ * used by this xlator.
+ */
+ arch->nreqs = 0;
+
+ arch->req_pool = NULL;
+
+ arch->handle = NULL;
+ arch->xl = this;
+
+ arch->req_pool = mem_pool_new(cvlt_request_t, num_req);
+ if (!arch->req_pool) {
+ goto err;
+ }
+
+ arch->iobuf_pool = iobuf_pool_new();
+ if (!arch->iobuf_pool) {
+ goto err;
+ }
+
+ if (cvlt_extract_store_fops(this, arch)) {
+ goto err;
+ }
+
+ return 0;
+
+err:
+
+ return -1;
+}
+
+static void
+cvlt_req_init(cvlt_request_t *req)
+{
+ sem_init(&(req->sem), 0, 0);
+
+ return;
+}
+
+static void
+cvlt_req_destroy(cvlt_request_t *req)
+{
+ if (req->iobuf) {
+ iobuf_unref(req->iobuf);
+ }
+
+ if (req->iobref) {
+ iobref_unref(req->iobref);
+ }
+
+ sem_destroy(&(req->sem));
+
+ return;
+}
+
+static cvlt_request_t *
+cvlt_alloc_req(archive_t *arch)
+{
+ cvlt_request_t *reqptr = NULL;
+
+ if (!arch) {
+ goto err;
+ }
+
+ if (arch->req_pool) {
+ reqptr = mem_get0(arch->req_pool);
+ if (reqptr) {
+ cvlt_req_init(reqptr);
+ }
+ }
+
+ if (reqptr) {
+ LOCK(&(arch->lock));
+ arch->nreqs++;
+ UNLOCK(&(arch->lock));
+ }
+
+err:
+ return reqptr;
+}
+
+static int32_t
+cvlt_free_req(archive_t *arch, cvlt_request_t *reqptr)
+{
+ if (!reqptr) {
+ goto err;
+ }
+
+ if (!arch) {
+ goto err;
+ }
+
+ if (arch->req_pool) {
+ /*
+ * Free the request resources if they exist.
+ */
+
+ cvlt_req_destroy(reqptr);
+ mem_put(reqptr);
+
+ LOCK(&(arch->lock));
+ arch->nreqs--;
+ UNLOCK(&(arch->lock));
+ }
+
+ return 0;
+
+err:
+ return -1;
+}
+
+static int32_t
+cvlt_init_xlator(xlator_t *this, archive_t *arch, int num_req, int num_iatt)
+{
+ int32_t ret = -1;
+ int32_t errnum = -1;
+ int32_t locked = 0;
+
+ /*
+ * Perform all the initializations needed for brining up the xlator.
+ */
+ if (!arch) {
+ goto err;
+ }
+
+ LOCK_INIT(&(arch->lock));
+ LOCK(&(arch->lock));
+
+ locked = 1;
+
+ ret = cvlt_alloc_resources(this, arch, num_req, num_iatt);
+
+ if (ret) {
+ goto err;
+ }
+
+ /*
+ * Now that the fops have been extracted initialize the store
+ */
+ ret = arch->fops.init(&(arch->descinfo), &errnum, plugin);
+ if (ret) {
+ goto err;
+ }
+
+ UNLOCK(&(arch->lock));
+ locked = 0;
+ ret = 0;
+
+ return ret;
+
+err:
+ if (arch) {
+ cvlt_free_resources(arch);
+
+ if (locked) {
+ UNLOCK(&(arch->lock));
+ }
+ }
+
+ return ret;
+}
+
+static int32_t
+cvlt_term_xlator(archive_t *arch)
+{
+ int32_t errnum = -1;
+
+ if (!arch) {
+ goto err;
+ }
+
+ LOCK(&(arch->lock));
+
+ /*
+ * Release the resources that have been allocated inside store
+ */
+ arch->fops.fini(&(arch->descinfo), &errnum);
+
+ cvlt_free_resources(arch);
+
+ UNLOCK(&(arch->lock));
+
+ GF_FREE(arch);
+
+ return 0;
+
+err:
+ return -1;
+}
+
+static int32_t
+cvlt_init_store_info(archive_t *priv, archstore_info_t *store_info)
+{
+ if (!store_info) {
+ return -1;
+ }
+
+ store_info->prod = priv->product_id;
+ store_info->prodlen = strlen(priv->product_id);
+
+ store_info->id = priv->store_id;
+ store_info->idlen = strlen(priv->store_id);
+
+ return 0;
+}
+
+static int32_t
+cvlt_init_file_info(cs_loc_xattr_t *xattr, archstore_fileinfo_t *file_info)
+{
+ if (!xattr || !file_info) {
+ return -1;
+ }
+
+ gf_uuid_copy(file_info->uuid, xattr->uuid);
+ file_info->path = xattr->file_path;
+ file_info->pathlength = strlen(xattr->file_path);
+
+ return 0;
+}
+
+static int32_t
+cvlt_init_gluster_store_info(cs_loc_xattr_t *xattr,
+ archstore_info_t *store_info)
+{
+ static char *product = "glusterfs";
+
+ if (!xattr || !store_info) {
+ return -1;
+ }
+
+ store_info->prod = product;
+ store_info->prodlen = strlen(product);
+
+ store_info->id = xattr->volname;
+ store_info->idlen = strlen(xattr->volname);
+
+ return 0;
+}
+
+static int32_t
+cvlt_init_gluster_file_info(cs_loc_xattr_t *xattr,
+ archstore_fileinfo_t *file_info)
+{
+ if (!xattr || !file_info) {
+ return -1;
+ }
+
+ gf_uuid_copy(file_info->uuid, xattr->gfid);
+ file_info->path = xattr->file_path;
+ file_info->pathlength = strlen(xattr->file_path);
+
+ return 0;
+}
+
+static void
+cvlt_copy_stat_info(struct iatt *buf, cs_size_xattr_t *xattrs)
+{
+ /*
+ * If the file was archived then the reported size will not be a
+ * correct one. We need to fix this.
+ */
+ if (buf && xattrs) {
+ buf->ia_size = xattrs->size;
+ buf->ia_blksize = xattrs->blksize;
+ buf->ia_blocks = xattrs->blocks;
+ }
+
+ return;
+}
+
+static void
+cvlt_readv_complete(archstore_desc_t *desc, app_callback_info_t *cbkinfo,
+ void *cookie, int64_t op_ret, int32_t op_errno)
+{
+ struct iovec iov;
+ xlator_t *this = NULL;
+ struct iatt postbuf = {
+ 0,
+ };
+ call_frame_t *frame = NULL;
+ cvlt_request_t *req = (cvlt_request_t *)cookie;
+ cs_local_t *local = NULL;
+ cs_private_t *cspriv = NULL;
+ archive_t *priv = NULL;
+
+ frame = req->frame;
+ this = frame->this;
+ local = frame->local;
+
+ cspriv = this->private;
+ priv = (archive_t *)cspriv->stores->config;
+
+ if (strcmp(priv->trailer, CVLT_TRAILER)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto out;
+ }
+
+ gf_msg_debug(plugin, 0,
+ " Read callback invoked offset:%" PRIu64 "bytes: %" PRIu64
+ " op : %d ret : %" PRId64 " errno : %d",
+ req->offset, req->bytes, req->op_type, op_ret, op_errno);
+
+ if (op_ret < 0) {
+ goto out;
+ }
+
+ req->iobref = iobref_new();
+ if (!req->iobref) {
+ op_ret = -1;
+ op_errno = ENOMEM;
+ goto out;
+ }
+
+ iobref_add(req->iobref, req->iobuf);
+ iov.iov_base = iobuf_ptr(req->iobuf);
+ iov.iov_len = op_ret;
+
+ cvlt_copy_stat_info(&postbuf, &(req->szxattr));
+
+ /*
+ * Hack to notify higher layers of EOF.
+ */
+ if (!postbuf.ia_size || (req->offset + iov.iov_len >= postbuf.ia_size)) {
+ gf_msg_debug(plugin, 0, " signalling end-of-file for uuid=%s",
+ uuid_utoa(req->file_info.uuid));
+ op_errno = ENOENT;
+ }
+
+out:
+
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, &iov, 1, &postbuf,
+ req->iobref, local->xattr_rsp);
+
+ cvlt_free_req(priv, req);
+
+ return;
+}
+
+static void
+cvlt_download_complete(archstore_desc_t *store, app_callback_info_t *cbk_info,
+ void *cookie, int64_t ret, int errcode)
+{
+ cvlt_request_t *req = (cvlt_request_t *)cookie;
+
+ gf_msg_debug(plugin, 0,
+ " Download callback invoked ret : %" PRId64 " errno : %d",
+ ret, errcode);
+
+ req->op_ret = ret;
+ req->op_errno = errcode;
+ sem_post(&(req->sem));
+
+ return;
+}
+
+void *
+cvlt_init(xlator_t *this)
+{
+ int ret = 0;
+ archive_t *priv = NULL;
+
+ if (!this->children || this->children->next) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, 0,
+ "should have exactly one child");
+ ret = -1;
+ goto out;
+ }
+
+ if (!this->parents) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, 0,
+ "dangling volume. check volfile");
+ ret = -1;
+ goto out;
+ }
+
+ priv = GF_CALLOC(1, sizeof(archive_t), gf_libcvlt_mt_cvlt_private_t);
+ if (!priv) {
+ ret = -1;
+ goto out;
+ }
+
+ priv->trailer = CVLT_TRAILER;
+ if (cvlt_init_xlator(this, priv, num_req, num_iatt)) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, 0, "xlator init failed");
+ ret = -1;
+ goto out;
+ }
+
+ GF_OPTION_INIT("cloudsync-store-id", priv->store_id, str, out);
+ GF_OPTION_INIT("cloudsync-product-id", priv->product_id, str, out);
+
+ gf_msg(plugin, GF_LOG_INFO, 0, 0,
+ "store id is : %s "
+ "product id is : %s.",
+ priv->store_id, priv->product_id);
+out:
+ if (ret == -1) {
+ cvlt_term_xlator(priv);
+ return (NULL);
+ }
+ return priv;
+}
+
+int
+cvlt_reconfigure(xlator_t *this, dict_t *options)
+{
+ cs_private_t *cspriv = NULL;
+ archive_t *priv = NULL;
+
+ cspriv = this->private;
+ priv = (archive_t *)cspriv->stores->config;
+
+ if (strcmp(priv->trailer, CVLT_TRAILER))
+ goto out;
+
+ GF_OPTION_RECONF("cloudsync-store-id", priv->store_id, options, str, out);
+
+ GF_OPTION_RECONF("cloudsync-product-id", priv->product_id, options, str,
+ out);
+ gf_msg_debug(plugin, 0,
+ "store id is : %s "
+ "product id is : %s.",
+ priv->store_id, priv->product_id);
+ return 0;
+out:
+ return -1;
+}
+
+void
+cvlt_fini(void *config)
+{
+ archive_t *priv = NULL;
+
+ priv = (archive_t *)config;
+
+ if (strcmp(priv->trailer, CVLT_TRAILER))
+ return;
+
+ cvlt_term_xlator(priv);
+ gf_msg(plugin, GF_LOG_INFO, 0, CVLT_FREE, " released xlator resources");
+ return;
+}
+
+int
+cvlt_download(call_frame_t *frame, void *config)
+{
+ archive_t *parch = NULL;
+ cs_local_t *local = frame->local;
+ cs_loc_xattr_t *locxattr = local->xattrinfo.lxattr;
+ cvlt_request_t *req = NULL;
+ archstore_info_t dest_storeinfo;
+ archstore_fileinfo_t dest_fileinfo;
+ int32_t op_ret, op_errno;
+
+ parch = (archive_t *)config;
+
+ if (strcmp(parch->trailer, CVLT_TRAILER)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto err;
+ }
+
+ gf_msg_debug(plugin, 0, " download invoked for uuid = %s gfid=%s ",
+ locxattr->uuid, uuid_utoa(locxattr->gfid));
+
+ if (!(parch->fops.restore)) {
+ op_errno = ELIBBAD;
+ goto err;
+ }
+
+ /*
+ * Download needs to be processed. Allocate a request.
+ */
+ req = cvlt_alloc_req(parch);
+
+ if (!req) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, CVLT_RESOURCE_ALLOCATION_FAILED,
+ " failed to allocated request for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Initialize the request object.
+ */
+ req->op_type = CVLT_RESTORE_OP;
+ req->frame = frame;
+
+ /*
+ * The file is currently residing inside a data management store.
+ * To restore the file contents we need to provide the information
+ * about data management store.
+ */
+ op_ret = cvlt_init_store_info(parch, &(req->store_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract store info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ op_ret = cvlt_init_file_info(locxattr, &(req->file_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract file info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ /*
+ * We need to perform in-place restore of the file from data management
+ * store to gusterfs volume.
+ */
+ op_ret = cvlt_init_gluster_store_info(locxattr, &dest_storeinfo);
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract destination store info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ op_ret = cvlt_init_gluster_file_info(locxattr, &dest_fileinfo);
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract file info for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ /*
+ * Submit the restore request.
+ */
+ op_ret = parch->fops.restore(&(parch->descinfo), &(req->store_info),
+ &(req->file_info), &dest_storeinfo,
+ &dest_fileinfo, &op_errno,
+ cvlt_download_complete, req);
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_RESTORE_FAILED,
+ " failed to restore file gfid=%s from data management store",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ /*
+ * Wait for the restore to complete.
+ */
+ sem_wait(&(req->sem));
+
+ if (req->op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_RESTORE_FAILED,
+ " restored failed for gfid=%s", uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ if (req) {
+ cvlt_free_req(parch, req);
+ }
+
+ return 0;
+
+err:
+
+ if (req) {
+ cvlt_free_req(parch, req);
+ }
+
+ return -1;
+}
+
+int
+cvlt_read(call_frame_t *frame, void *config)
+{
+ int32_t op_ret = -1;
+ int32_t op_errno = 0;
+ archive_t *parch = NULL;
+ cvlt_request_t *req = NULL;
+ struct iovec iov = {
+ 0,
+ };
+ struct iobref *iobref;
+ size_t size = 0;
+ off_t off = 0;
+
+ cs_local_t *local = frame->local;
+ cs_loc_xattr_t *locxattr = local->xattrinfo.lxattr;
+
+ size = local->xattrinfo.size;
+ off = local->xattrinfo.offset;
+
+ parch = (archive_t *)config;
+
+ if (strcmp(parch->trailer, CVLT_TRAILER)) {
+ op_ret = -1;
+ op_errno = EINVAL;
+ goto err;
+ }
+
+ gf_msg_debug(plugin, 0,
+ " read invoked for gfid = %s offset = %" PRIu64
+ " file_size = %" PRIu64,
+ uuid_utoa(locxattr->gfid), off, local->stbuf.ia_size);
+
+ if (off >= local->stbuf.ia_size) {
+ /*
+ * Hack to notify higher layers of EOF.
+ */
+
+ op_errno = ENOENT;
+ op_ret = 0;
+
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_READ_FAILED,
+ " reporting end-of-file for gfid=%s", uuid_utoa(locxattr->gfid));
+
+ goto err;
+ }
+
+ if (!size) {
+ op_errno = EINVAL;
+
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_READ_FAILED,
+ " zero size read attempted on gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ goto err;
+ }
+
+ if (!(parch->fops.read)) {
+ op_errno = ELIBBAD;
+ goto err;
+ }
+
+ /*
+ * The read request need to be processed. Allocate a request.
+ */
+ req = cvlt_alloc_req(parch);
+
+ if (!req) {
+ gf_msg(plugin, GF_LOG_ERROR, ENOMEM, CVLT_NO_MEMORY,
+ " failed to allocated request for gfid=%s",
+ uuid_utoa(locxattr->gfid));
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ req->iobuf = iobuf_get_page_aligned(parch->iobuf_pool, size, ALIGN_SIZE);
+ if (!req->iobuf) {
+ op_errno = ENOMEM;
+ goto err;
+ }
+
+ /*
+ * Initialize the request object.
+ */
+ req->op_type = CVLT_READ_OP;
+ req->offset = off;
+ req->bytes = size;
+ req->frame = frame;
+ req->szxattr.size = local->stbuf.ia_size;
+ req->szxattr.blocks = local->stbuf.ia_blocks;
+ req->szxattr.blksize = local->stbuf.ia_blksize;
+
+ /*
+ * The file is currently residing inside a data management store.
+ * To read the file contents we need to provide the information
+ * about data management store.
+ */
+ op_ret = cvlt_init_store_info(parch, &(req->store_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract store info for gfid=%s"
+ " offset=%" PRIu64 " size=%" GF_PRI_SIZET
+ ", "
+ " buf=%p",
+ uuid_utoa(locxattr->gfid), off, size, req->iobuf->ptr);
+ goto err;
+ }
+
+ op_ret = cvlt_init_file_info(locxattr, &(req->file_info));
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " failed to extract file info for gfid=%s"
+ " offset=%" PRIu64 " size=%" GF_PRI_SIZET
+ ", "
+ " buf=%p",
+ uuid_utoa(locxattr->gfid), off, size, req->iobuf->ptr);
+ goto err;
+ }
+
+ /*
+ * Submit the read request.
+ */
+ op_ret = parch->fops.read(&(parch->descinfo), &(req->store_info),
+ &(req->file_info), off, req->iobuf->ptr, size,
+ &op_errno, cvlt_readv_complete, req);
+
+ if (op_ret < 0) {
+ gf_msg(plugin, GF_LOG_ERROR, 0, CVLT_EXTRACTION_FAILED,
+ " read failed on gfid=%s"
+ " offset=%" PRIu64 " size=%" GF_PRI_SIZET
+ ", "
+ " buf=%p",
+ uuid_utoa(locxattr->gfid), off, size, req->iobuf->ptr);
+ goto err;
+ }
+
+ return 0;
+
+err:
+
+ iobref = iobref_new();
+ gf_msg_debug(plugin, 0, " read unwinding stack op_ret = %d, op_errno = %d",
+ op_ret, op_errno);
+
+ STACK_UNWIND_STRICT(readv, frame, op_ret, op_errno, &iov, 1,
+ &(local->stbuf), iobref, local->xattr_rsp);
+
+ if (iobref) {
+ iobref_unref(iobref);
+ }
+
+ if (req) {
+ cvlt_free_req(parch, req);
+ }
+
+ return 0;
+}
diff --git a/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h
new file mode 100644
index 00000000000..c45ac948f6c
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.h
@@ -0,0 +1,84 @@
+/*
+ Copyright (c) 2018 Commvault Systems, Inc. <http://www.commvault.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+#ifndef _LIBCVLT_H
+#define _LIBCVLT_H
+
+#include <semaphore.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/compat-errno.h>
+#include "cloudsync-common.h"
+#include "libcvlt-mem-types.h"
+#include "archivestore.h"
+
+enum _cvlt_op {
+ CVLT_READ_OP = 1,
+ CVLT_WRITE_OP = 2,
+ CVLT_RESTORE_OP = 3,
+ CVLT_ARCHIVE_OP = 4,
+ CVLT_LOOKUP_OP = 5,
+ CVLT_XATTR_OP = 6,
+ CVLT_STAT_OP = 7,
+ CVLT_FSTAT_op = 8,
+ CVLT_UNDEF_OP = 127
+};
+typedef enum _cvlt_op cvlt_op_t;
+
+struct _archive;
+struct _cvlt_request {
+ uint64_t offset;
+ uint64_t bytes;
+ struct iobuf *iobuf;
+ struct iobref *iobref;
+ call_frame_t *frame;
+ cvlt_op_t op_type;
+ int32_t op_ret;
+ int32_t op_errno;
+ xlator_t *this;
+ sem_t sem;
+ archstore_info_t store_info;
+ archstore_fileinfo_t file_info;
+ cs_size_xattr_t szxattr;
+};
+typedef struct _cvlt_request cvlt_request_t;
+
+struct _archive {
+ gf_lock_t lock; /* lock for controlling access */
+ xlator_t *xl; /* xlator */
+ void *handle; /* handle returned from dlopen */
+ int32_t nreqs; /* num requests active */
+ struct mem_pool *req_pool; /* pool for requests */
+ struct iobuf_pool *iobuf_pool; /* iobuff pool */
+ archstore_desc_t descinfo; /* Archive store descriptor info */
+ archstore_methods_t fops; /* function pointers */
+ char *product_id;
+ char *store_id;
+ char *trailer;
+};
+typedef struct _archive archive_t;
+
+void *
+cvlt_init(xlator_t *);
+
+int
+cvlt_reconfigure(xlator_t *, dict_t *);
+
+void
+cvlt_fini(void *);
+
+int
+cvlt_download(call_frame_t *, void *);
+
+int
+cvlt_read(call_frame_t *, void *);
+
+#endif
diff --git a/xlators/features/cloudsync/src/cloudsync.c b/xlators/features/cloudsync/src/cloudsync.c
new file mode 100644
index 00000000000..7f0b9e563b8
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync.c
@@ -0,0 +1,2076 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include "cloudsync.h"
+#include "cloudsync-common.h"
+#include <glusterfs/call-stub.h>
+#include "cloudsync-autogen-fops.h"
+
+#include <string.h>
+#include <dlfcn.h>
+
+static void
+cs_cleanup_private(cs_private_t *priv)
+{
+ if (priv) {
+ if (priv->stores) {
+ priv->stores->fini(priv->stores->config);
+ GF_FREE(priv->stores);
+ }
+
+ pthread_spin_destroy(&priv->lock);
+ GF_FREE(priv);
+ }
+
+ return;
+}
+
+static struct cs_plugin plugins[] = {
+ {.name = "cloudsyncs3",
+ .library = "cloudsyncs3.so",
+ .description = "cloudsync s3 store."},
+#if defined(__linux__)
+ {.name = "cvlt",
+ .library = "cloudsynccvlt.so",
+ .description = "Commvault content store."},
+#endif
+ {.name = NULL},
+};
+
+int
+cs_init(xlator_t *this)
+{
+ cs_private_t *priv = NULL;
+ gf_boolean_t per_vol = _gf_false;
+ int ret = 0;
+ char *libpath = NULL;
+ store_methods_t *store_methods = NULL;
+ void *handle = NULL;
+ char *temp_str = NULL;
+ int index = 0;
+ char *libname = NULL;
+
+ priv = GF_CALLOC(1, sizeof(*priv), gf_cs_mt_cs_private_t);
+ if (!priv) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ goto out;
+ }
+
+ priv->this = this;
+
+ this->local_pool = mem_pool_new(cs_local_t, 512);
+ if (!this->local_pool) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM, "initialisation failed.");
+ ret = -1;
+ goto out;
+ }
+
+ this->private = priv;
+
+ GF_OPTION_INIT("cloudsync-remote-read", priv->remote_read, bool, out);
+
+ /* temp workaround. Should be configurable through glusterd*/
+ per_vol = _gf_true;
+
+ if (per_vol) {
+ if (dict_get_str_sizen(this->options, "cloudsync-storetype",
+ &temp_str) == 0) {
+ for (index = 0; plugins[index].name; index++) {
+ if (!strcmp(temp_str, plugins[index].name)) {
+ libname = plugins[index].library;
+ break;
+ }
+ }
+ } else {
+ ret = 0;
+ }
+
+ if (!libname) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0, "no plugin enabled");
+ ret = 0;
+ goto out;
+ }
+
+ ret = gf_asprintf(&libpath, "%s/%s", CS_PLUGINDIR, libname);
+ if (ret == -1) {
+ goto out;
+ }
+
+ handle = dlopen(libpath, RTLD_NOW);
+ if (!handle) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "could not "
+ "load the required library. %s",
+ dlerror());
+ ret = 0;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "loading library:%s successful", libname);
+ }
+
+ priv->stores = GF_CALLOC(1, sizeof(struct cs_remote_stores),
+ gf_cs_mt_cs_remote_stores_t);
+ if (!priv->stores) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "Could not "
+ "allocate memory for priv->stores");
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror(); /* clear out previous error string */
+
+ /* load library methods */
+ store_methods = (store_methods_t *)dlsym(handle, "store_ops");
+ if (!store_methods) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "null store_methods %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror();
+
+ if (priv->remote_read) {
+ priv->stores->rdfop = store_methods->fop_remote_read;
+ if (!priv->stores->rdfop) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " read fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+ }
+
+ priv->stores->dlfop = store_methods->fop_download;
+ if (!priv->stores->dlfop) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " download fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror();
+ priv->stores->init = store_methods->fop_init;
+ if (!priv->stores->init) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " init fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ (void)dlerror();
+ priv->stores->reconfigure = store_methods->fop_reconfigure;
+ if (!priv->stores->reconfigure) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "failed to get"
+ " reconfigure fop %s",
+ dlerror());
+ ret = -1;
+ goto out;
+ }
+
+ priv->stores->handle = handle;
+
+ priv->stores->config = (void *)((priv->stores->init)(this));
+ if (!priv->stores->config) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "null config");
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = 0;
+
+out:
+ if (ret == -1) {
+ if (this->local_pool) {
+ mem_pool_destroy(this->local_pool);
+ this->local_pool = NULL;
+ }
+
+ cs_cleanup_private(priv);
+
+ if (handle) {
+ dlclose(handle);
+ }
+ }
+
+ GF_FREE(libpath);
+
+ return ret;
+}
+
+int
+cs_forget(xlator_t *this, inode_t *inode)
+{
+ uint64_t ctx_int = 0;
+ cs_inode_ctx_t *ctx = NULL;
+
+ inode_ctx_del(inode, this, &ctx_int);
+ if (!ctx_int)
+ return 0;
+
+ ctx = (cs_inode_ctx_t *)(uintptr_t)ctx_int;
+
+ GF_FREE(ctx);
+ return 0;
+}
+
+void
+cs_fini(xlator_t *this)
+{
+ cs_private_t *priv = NULL;
+ priv = this->private;
+
+ cs_cleanup_private(priv);
+}
+
+int
+cs_reconfigure(xlator_t *this, dict_t *options)
+{
+ cs_private_t *priv = NULL;
+ int ret = 0;
+
+ priv = this->private;
+ if (!priv) {
+ ret = -1;
+ goto out;
+ }
+
+ GF_OPTION_RECONF("cloudsync-remote-read", priv->remote_read, options, bool,
+ out);
+
+ /* needed only for per volume configuration*/
+ ret = priv->stores->reconfigure(this, options);
+
+out:
+ return ret;
+}
+
+int32_t
+cs_mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ GF_VALIDATE_OR_GOTO("cloudsync", this, out);
+
+ ret = xlator_mem_acct_init(this, gf_cs_mt_end + 1);
+
+ if (ret != 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "Memory accounting init failed");
+ return ret;
+ }
+out:
+ return ret;
+}
+
+int32_t
+cs_readdirp(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t off, dict_t *xdata)
+{
+ int ret = 0;
+ int op_errno = ENOMEM;
+
+ if (!xdata) {
+ xdata = dict_new();
+ if (!xdata) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "failed to create "
+ "dict");
+ goto err;
+ }
+ }
+
+ ret = dict_set_uint32(xdata, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ STACK_WIND(frame, default_readdirp_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readdirp, fd, size, off, xdata);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(readdirp, frame, -1, op_errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ uint64_t val = 0;
+
+ local = frame->local;
+
+ local->call_cnt++;
+
+ if (op_ret == -1) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "truncate failed");
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "could not get file state, unwinding");
+ op_ret = -1;
+ op_errno = EIO;
+ goto unwind;
+ } else {
+ __cs_inode_ctx_update(this, local->loc.inode, val);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0, " state = %" PRIu64, val);
+
+ if (local->call_cnt == 1 &&
+ (val == GF_CS_REMOTE || val == GF_CS_DOWNLOADING)) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "will repair and download "
+ "the file, current state : %" PRIu64,
+ val);
+ goto repair;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "second truncate, Unwinding");
+ goto unwind;
+ }
+ }
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "file state "
+ "could not be figured, unwinding");
+ goto unwind;
+ }
+ } else {
+ /* successful write => file is local */
+ __cs_inode_ctx_update(this, local->loc.inode, GF_CS_LOCAL);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "state : GF_CS_LOCAL"
+ ", truncate successful");
+
+ goto unwind;
+ }
+
+repair:
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ CS_STACK_UNWIND(truncate, frame, op_ret, op_errno, prebuf, postbuf, xdata);
+ return 0;
+}
+
+int32_t
+cs_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc, off_t offset,
+ dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = -1;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+ VALIDATE_OR_GOTO(loc, err);
+
+ local = cs_local_init(this, frame, loc, NULL, GF_FOP_TRUNCATE);
+ if (!local) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ goto err;
+ }
+
+ __cs_inode_ctx_get(this, loc->inode, &ctx);
+
+ if (ctx)
+ state = __cs_get_file_state(loc->inode, ctx);
+ else
+ state = GF_CS_LOCAL;
+
+ local->xattr_req = xdata ? dict_ref(xdata) : (xdata = dict_new());
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ local->stub = fop_truncate_stub(frame, cs_resume_truncate, loc, offset,
+ xdata);
+ if (!local->stub) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ goto err;
+ }
+
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND(frame, cs_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset, xdata);
+
+ } else {
+ local->call_cnt++;
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ CS_STACK_UNWIND(truncate, frame, -1, ENOMEM, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct statvfs *buf, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(statfs, frame, op_ret, op_errno, buf, xdata);
+ return 0;
+}
+
+int32_t
+cs_statfs(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *xdata)
+{
+ STACK_WIND(frame, cs_statfs_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->statfs, loc, xdata);
+ return 0;
+}
+
+int32_t
+cs_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(getxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
+}
+
+int32_t
+cs_getxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, const char *name,
+ dict_t *xattr_req)
+{
+ STACK_WIND(frame, cs_getxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->getxattr, loc, name, xattr_req);
+ return 0;
+}
+
+int32_t
+cs_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ if (local->locked)
+ cs_inodelk_unlock(frame);
+
+ CS_STACK_UNWIND(setxattr, frame, op_ret, op_errno, xdata);
+
+ return 0;
+}
+
+int32_t
+cs_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ data_t *tmp = NULL;
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+
+ local = cs_local_init(this, frame, loc, NULL, GF_FOP_SETXATTR);
+ if (!local) {
+ ret = -1;
+ goto err;
+ }
+
+ local->xattr_req = xdata ? dict_ref(xdata) : (xdata = dict_new());
+
+ tmp = dict_get_sizen(dict, GF_CS_OBJECT_UPLOAD_COMPLETE);
+ if (tmp) {
+ /* Value of key should be the atime */
+ local->stub = fop_setxattr_stub(frame, cs_resume_setxattr, loc, dict,
+ flags, xdata);
+
+ if (!local->stub)
+ goto err;
+
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto err;
+ }
+
+ return 0;
+ }
+
+ STACK_WIND(frame, cs_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags, xdata);
+ return 0;
+err:
+ CS_STACK_UNWIND(setxattr, frame, -1, errno, NULL);
+ return 0;
+}
+
+int32_t
+cs_fgetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *dict, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fgetxattr, frame, op_ret, op_errno, dict, xdata);
+ return 0;
+}
+
+int32_t
+cs_fgetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, const char *name,
+ dict_t *xdata)
+{
+ STACK_WIND(frame, cs_fgetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fgetxattr, fd, name, xdata);
+ return 0;
+}
+
+int32_t
+cs_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(fsetxattr, frame, op_ret, op_errno, xdata);
+ return 0;
+}
+
+int32_t
+cs_fsetxattr(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *dict,
+ int32_t flags, dict_t *xdata)
+{
+ STACK_WIND(frame, cs_fsetxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fsetxattr, fd, dict, flags, xdata);
+ return 0;
+}
+
+int32_t
+cs_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *preparent, struct iatt *postparent,
+ dict_t *xdata)
+{
+ STACK_UNWIND_STRICT(unlink, frame, op_ret, op_errno, preparent, postparent,
+ xdata);
+ return 0;
+}
+
+int32_t
+cs_unlink(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init(this, frame, loc, NULL, GF_FOP_UNLINK);
+ if (!local)
+ goto err;
+
+ local->xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+ STACK_WIND(frame, cs_unlink_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->unlink, loc, flags, local->xattr_req);
+ return 0;
+err:
+ CS_STACK_UNWIND(unlink, frame, -1, errno, NULL, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, fd_t *fd, dict_t *xdata)
+{
+ int ret = 0;
+ uint64_t val = 0;
+
+ if (op_ret == 0) {
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (!ret) {
+ ret = __cs_inode_ctx_update(this, fd->inode, val);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx update failed");
+ }
+ }
+ } else {
+ cs_inode_ctx_reset(this, fd->inode);
+ }
+
+ CS_STACK_UNWIND(open, frame, op_ret, op_errno, fd, xdata);
+ return 0;
+}
+
+int32_t
+cs_open(call_frame_t *frame, xlator_t *this, loc_t *loc, int32_t flags,
+ fd_t *fd, dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init(this, frame, NULL, fd, GF_FOP_OPEN);
+ if (!local)
+ goto err;
+
+ local->xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ STACK_WIND(frame, cs_open_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->open, loc, flags, fd, local->xattr_req);
+ return 0;
+err:
+ CS_STACK_UNWIND(open, frame, -1, errno, NULL, NULL);
+ return 0;
+}
+
+int32_t
+cs_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iatt *buf, dict_t *xdata)
+{
+ int ret = 0;
+ uint64_t val = 0;
+ fd_t *fd = NULL;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ fd = local->fd;
+
+ if (op_ret == 0) {
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (!ret) {
+ gf_msg_debug(this->name, 0, "state %" PRIu64, val);
+ ret = __cs_inode_ctx_update(this, fd->inode, val);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx update failed");
+ }
+ }
+ } else {
+ cs_inode_ctx_reset(this, fd->inode);
+ }
+
+ CS_STACK_UNWIND(fstat, frame, op_ret, op_errno, buf, xdata);
+
+ return 0;
+}
+
+int32_t
+cs_fstat(call_frame_t *frame, xlator_t *this, fd_t *fd, dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = cs_local_init(this, frame, NULL, fd, GF_FOP_FSTAT);
+ if (!local)
+ goto err;
+
+ if (fd->inode->ia_type == IA_IFDIR)
+ goto wind;
+
+ local->xattr_req = xattr_req ? dict_ref(xattr_req) : dict_new();
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+wind:
+ STACK_WIND(frame, cs_fstat_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, fd, local->xattr_req);
+ return 0;
+err:
+ CS_STACK_UNWIND(fstat, frame, -1, errno, NULL, NULL);
+ return 0;
+}
+
+cs_local_t *
+cs_local_init(xlator_t *this, call_frame_t *frame, loc_t *loc, fd_t *fd,
+ glusterfs_fop_t fop)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = mem_get0(this->local_pool);
+ if (!local)
+ goto out;
+
+ if (loc) {
+ ret = loc_copy(&local->loc, loc);
+ if (ret)
+ goto out;
+ }
+
+ if (fd) {
+ local->fd = fd_ref(fd);
+ }
+
+ local->op_ret = -1;
+ local->op_errno = EUCLEAN;
+ local->fop = fop;
+ local->dloffset = 0;
+ frame->local = local;
+ local->locked = _gf_false;
+ local->call_cnt = 0;
+out:
+ if (ret) {
+ if (local)
+ mem_put(local);
+ local = NULL;
+ }
+
+ return local;
+}
+
+call_frame_t *
+cs_lock_frame(call_frame_t *parent_frame)
+{
+ call_frame_t *lock_frame = NULL;
+
+ lock_frame = copy_frame(parent_frame);
+
+ if (lock_frame == NULL)
+ goto out;
+
+ set_lk_owner_from_ptr(&lock_frame->root->lk_owner, parent_frame->root);
+
+out:
+ return lock_frame;
+}
+
+void
+cs_lock_wipe(call_frame_t *lock_frame)
+{
+ CS_STACK_DESTROY(lock_frame);
+}
+
+int32_t
+cs_inodelk_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ cs_lock_wipe(frame);
+
+ return 0;
+}
+
+int
+cs_inodelk_unlock(call_frame_t *main_frame)
+{
+ xlator_t *this = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ call_frame_t *lock_frame = NULL;
+ cs_local_t *lock_local = NULL;
+ cs_local_t *main_local = NULL;
+ int ret = 0;
+
+ this = main_frame->this;
+ main_local = main_frame->local;
+
+ lock_frame = cs_lock_frame(main_frame);
+ if (!lock_frame)
+ goto out;
+
+ lock_local = cs_local_init(this, lock_frame, NULL, NULL, 0);
+ if (!lock_local)
+ goto out;
+
+ ret = cs_build_loc(&lock_local->loc, main_frame);
+ if (ret) {
+ goto out;
+ }
+
+ flock.l_type = F_UNLCK;
+
+ main_local->locked = _gf_false;
+
+ STACK_WIND(lock_frame, cs_inodelk_unlock_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk, CS_LOCK_DOMAIN,
+ &lock_local->loc, F_SETLKW, &flock, NULL);
+
+ return 0;
+
+out:
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "Stale lock would be found on"
+ " server");
+
+ if (lock_frame)
+ cs_lock_wipe(lock_frame);
+
+ return 0;
+}
+
+int
+cs_download_task(void *arg)
+{
+ call_frame_t *frame = NULL;
+ xlator_t *this = NULL;
+ cs_private_t *priv = NULL;
+ int ret = -1;
+ char *sign_req = NULL;
+ fd_t *fd = NULL;
+ cs_local_t *local = NULL;
+ dict_t *dict = NULL;
+
+ frame = (call_frame_t *)arg;
+
+ this = frame->this;
+
+ priv = this->private;
+
+ if (!priv->stores) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "No remote store "
+ "plugins found");
+ ret = -1;
+ goto out;
+ }
+
+ local = frame->local;
+
+ if (local->fd)
+ fd = fd_anonymous(local->fd->inode);
+ else
+ fd = fd_anonymous(local->loc.inode);
+
+ if (!fd) {
+ gf_msg("CS", GF_LOG_ERROR, 0, 0, "fd creation failed");
+ ret = -1;
+ goto out;
+ }
+
+ local->dlfd = fd;
+ local->dloffset = 0;
+
+ dict = dict_new();
+ if (!dict) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, ENOMEM,
+ "failed to create "
+ "dict");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_uint32(dict, GF_CS_OBJECT_DOWNLOADING, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "dict_set failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = syncop_fsetxattr(this, local->fd, dict, 0, NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "fsetxattr failed "
+ "key %s",
+ GF_CS_OBJECT_DOWNLOADING);
+ ret = -1;
+ goto out;
+ }
+ /*this calling method is for per volume setting */
+ ret = priv->stores->dlfop(frame, priv->stores->config);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "download failed"
+ ", remotepath: %s",
+ local->remotepath);
+
+ /*using dlfd as it is anonymous and have RDWR flag*/
+ ret = syncop_ftruncate(FIRST_CHILD(this), local->dlfd, 0, NULL, NULL,
+ NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, -ret, "ftruncate failed");
+ } else {
+ gf_msg_debug(this->name, 0, "ftruncate succeed");
+ }
+
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "download success, path"
+ " : %s",
+ local->remotepath);
+
+ ret = syncop_fremovexattr(this, local->fd, GF_CS_OBJECT_REMOTE, NULL,
+ NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, -ret,
+ "removexattr failed, remotexattr");
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg_debug(this->name, 0,
+ "fremovexattr success, "
+ "path : %s",
+ local->remotepath);
+ }
+
+ ret = syncop_fremovexattr(this, local->fd, GF_CS_OBJECT_DOWNLOADING,
+ NULL, NULL);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, -ret,
+ "removexattr failed, downloading xattr, path %s",
+ local->remotepath);
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg_debug(this->name, 0,
+ "fremovexattr success"
+ " path %s",
+ local->remotepath);
+ }
+ }
+
+out:
+ GF_FREE(sign_req);
+
+ if (dict)
+ dict_unref(dict);
+
+ if (fd) {
+ fd_unref(fd);
+ local->dlfd = NULL;
+ }
+
+ return ret;
+}
+
+int
+cs_download(call_frame_t *frame)
+{
+ int ret = 0;
+ cs_local_t *local = NULL;
+ xlator_t *this = NULL;
+
+ local = frame->local;
+ this = frame->this;
+
+ if (!local->remotepath) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "remote path not"
+ " available. Check posix logs to resolve");
+ goto out;
+ }
+
+ ret = cs_download_task((void *)frame);
+out:
+ return ret;
+}
+
+int
+cs_set_xattr_req(call_frame_t *frame)
+{
+ cs_local_t *local = NULL;
+ GF_UNUSED int ret = 0;
+
+ local = frame->local;
+
+ /* When remote reads are performed (i.e. reads on remote store),
+ * there needs to be a way to associate a file on gluster volume
+ * with its correspnding file on the remote store. In order to do
+ * that, a unique key can be maintained as an xattr
+ * (GF_CS_XATTR_ARCHIVE_UUID)on the stub file on gluster bricks.
+ * This xattr should be provided to the plugin to
+ * perform the read fop on the correct file. This assumes that the file
+ * hierarchy and name need not be the same on remote store as that of
+ * the gluster volume.
+ */
+ ret = dict_set_sizen_str_sizen(local->xattr_req, GF_CS_XATTR_ARCHIVE_UUID,
+ "1");
+
+ return 0;
+}
+
+int
+cs_update_xattrs(call_frame_t *frame, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ xlator_t *this = NULL;
+ int size = -1;
+ GF_UNUSED int ret = 0;
+
+ local = frame->local;
+ this = frame->this;
+
+ local->xattrinfo.lxattr = GF_CALLOC(1, sizeof(cs_loc_xattr_t),
+ gf_cs_mt_cs_lxattr_t);
+ if (!local->xattrinfo.lxattr) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+
+ gf_uuid_copy(local->xattrinfo.lxattr->gfid, local->loc.gfid);
+
+ if (local->remotepath) {
+ local->xattrinfo.lxattr->file_path = gf_strdup(local->remotepath);
+ if (!local->xattrinfo.lxattr->file_path) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ }
+
+ ret = dict_get_gfuuid(xdata, GF_CS_XATTR_ARCHIVE_UUID,
+ &(local->xattrinfo.lxattr->uuid));
+
+ if (ret) {
+ gf_uuid_clear(local->xattrinfo.lxattr->uuid);
+ }
+ size = strlen(this->name) - strlen("-cloudsync") + 1;
+ local->xattrinfo.lxattr->volname = GF_CALLOC(1, size, gf_common_mt_char);
+ if (!local->xattrinfo.lxattr->volname) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ strncpy(local->xattrinfo.lxattr->volname, this->name, size - 1);
+ local->xattrinfo.lxattr->volname[size - 1] = '\0';
+
+ return 0;
+err:
+ cs_xattrinfo_wipe(local);
+ return -1;
+}
+
+int
+cs_serve_readv(call_frame_t *frame, off_t offset, size_t size, uint32_t flags)
+{
+ xlator_t *this = NULL;
+ cs_private_t *priv = NULL;
+ int ret = -1;
+ fd_t *fd = NULL;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+ this = frame->this;
+ priv = this->private;
+
+ if (!local->remotepath) {
+ ret = -1;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "remote path not"
+ " available. Check posix logs to resolve");
+ goto out;
+ }
+
+ if (!priv->stores) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "No remote store "
+ "plugins found");
+ ret = -1;
+ goto out;
+ }
+
+ if (local->fd) {
+ fd = fd_anonymous(local->fd->inode);
+ } else {
+ fd = fd_anonymous(local->loc.inode);
+ }
+
+ local->xattrinfo.size = size;
+ local->xattrinfo.offset = offset;
+ local->xattrinfo.flags = flags;
+
+ if (!fd) {
+ gf_msg("CS", GF_LOG_ERROR, 0, 0, "fd creation failed");
+ ret = -1;
+ goto out;
+ }
+
+ local->dlfd = fd;
+ local->dloffset = offset;
+
+ /*this calling method is for per volume setting */
+ ret = priv->stores->rdfop(frame, priv->stores->config);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "read failed"
+ ", remotepath: %s",
+ local->remotepath);
+ ret = -1;
+ goto out;
+ } else {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "read success, path"
+ " : %s",
+ local->remotepath);
+ }
+
+out:
+ if (fd) {
+ fd_unref(fd);
+ local->dlfd = NULL;
+ }
+ return ret;
+}
+
+int32_t
+cs_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+ uint64_t val = 0;
+ fd_t *fd = NULL;
+
+ local = frame->local;
+ fd = local->fd;
+
+ local->call_cnt++;
+
+ if (op_ret == -1) {
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "could not get file state, unwinding");
+ op_ret = -1;
+ op_errno = EIO;
+ goto unwind;
+ } else {
+ __cs_inode_ctx_update(this, fd->inode, val);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0, " state = %" PRIu64, val);
+
+ if (local->call_cnt == 1 &&
+ (val == GF_CS_REMOTE || val == GF_CS_DOWNLOADING)) {
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ " will read from remote : %" PRIu64, val);
+ goto repair;
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "second readv, Unwinding");
+ goto unwind;
+ }
+ }
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "file state "
+ "could not be figured, unwinding");
+ goto unwind;
+ }
+ } else {
+ /* successful readv => file is local */
+ __cs_inode_ctx_update(this, fd->inode, GF_CS_LOCAL);
+ gf_msg(this->name, GF_LOG_INFO, 0, 0,
+ "state : GF_CS_LOCAL"
+ ", readv successful");
+
+ goto unwind;
+ }
+
+repair:
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ CS_STACK_UNWIND(readv, frame, op_ret, op_errno, vector, count, stbuf,
+ iobref, xdata);
+
+ return 0;
+}
+
+int32_t
+cs_resume_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int ret = 0;
+
+ ret = cs_resume_postprocess(this, frame, fd->inode);
+ if (ret) {
+ goto unwind;
+ }
+
+ cs_inodelk_unlock(frame);
+
+ STACK_WIND(frame, cs_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags, xdata);
+
+ return 0;
+
+unwind:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int32_t
+cs_resume_remote_readv(call_frame_t *frame, xlator_t *this, fd_t *fd,
+ size_t size, off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int ret = 0;
+ cs_local_t *local = NULL;
+ gf_cs_obj_state state = -1;
+ cs_inode_ctx_t *ctx = NULL;
+
+ cs_inodelk_unlock(frame);
+
+ local = frame->local;
+ if (!local) {
+ ret = -1;
+ goto unwind;
+ }
+
+ __cs_inode_ctx_get(this, fd->inode, &ctx);
+
+ state = __cs_get_file_state(fd->inode, ctx);
+ if (state == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "status is GF_CS_ERROR."
+ " Aborting readv");
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ ret = -1;
+ goto unwind;
+ }
+
+ /* Serve readv from remote store only if it is remote. */
+ gf_msg_debug(this->name, 0, "status of file %s is %d",
+ local->remotepath ? local->remotepath : "", state);
+
+ /* We will reach this condition if local inode ctx had REMOTE
+ * state when the control was in cs_readv but after stat
+ * we got an updated state saying that the file is LOCAL.
+ */
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND(frame, cs_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags,
+ xdata);
+ } else if (state == GF_CS_REMOTE) {
+ ret = cs_resume_remote_readv_postprocess(this, frame, fd->inode, offset,
+ size, flags);
+ /* Failed to submit the remote readv fop to plugin */
+ if (ret) {
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ goto unwind;
+ }
+ /* When the file is in any other intermediate state,
+ * we should not perform remote reads.
+ */
+ } else {
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ goto unwind;
+ }
+
+ return 0;
+
+unwind:
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int32_t
+cs_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata)
+{
+ int op_errno = ENOMEM;
+ cs_local_t *local = NULL;
+ int ret = 0;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = -1;
+ cs_private_t *priv = NULL;
+
+ VALIDATE_OR_GOTO(frame, err);
+ VALIDATE_OR_GOTO(this, err);
+ VALIDATE_OR_GOTO(fd, err);
+
+ priv = this->private;
+
+ local = cs_local_init(this, frame, NULL, fd, GF_FOP_READ);
+ if (!local) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ goto err;
+ }
+
+ __cs_inode_ctx_get(this, fd->inode, &ctx);
+
+ if (ctx)
+ state = __cs_get_file_state(fd->inode, ctx);
+ else
+ state = GF_CS_LOCAL;
+
+ local->xattr_req = xdata ? dict_ref(xdata) : (xdata = dict_new());
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_STATUS, 1);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "dict_set failed key:"
+ " %s",
+ GF_CS_OBJECT_STATUS);
+ goto err;
+ }
+
+ if (priv->remote_read) {
+ local->stub = fop_readv_stub(frame, cs_resume_remote_readv, fd, size,
+ offset, flags, xdata);
+ } else {
+ local->stub = fop_readv_stub(frame, cs_resume_readv, fd, size, offset,
+ flags, xdata);
+ }
+ if (!local->stub) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insufficient memory");
+ goto err;
+ }
+
+ if (state == GF_CS_LOCAL) {
+ STACK_WIND(frame, cs_readv_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->readv, fd, size, offset, flags,
+ xdata);
+ } else {
+ local->call_cnt++;
+ ret = locate_and_execute(frame);
+ if (ret) {
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ CS_STACK_UNWIND(readv, frame, -1, op_errno, NULL, -1, NULL, NULL, NULL);
+
+ return 0;
+}
+
+int
+cs_resume_remote_readv_postprocess(xlator_t *this, call_frame_t *frame,
+ inode_t *inode, off_t offset, size_t size,
+ uint32_t flags)
+{
+ int ret = 0;
+
+ ret = cs_serve_readv(frame, offset, size, flags);
+
+ return ret;
+}
+
+int
+cs_stat_check_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
+ int op_errno, struct iatt *stbuf, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ call_stub_t *stub = NULL;
+ char *filepath = NULL;
+ int ret = 0;
+ inode_t *inode = NULL;
+ uint64_t val = 0;
+
+ local = frame->local;
+
+ if (op_ret == -1) {
+ local->op_ret = op_ret;
+ local->op_errno = op_errno;
+ gf_msg(this->name, GF_LOG_ERROR, 0, op_errno, "stat check failed");
+ goto err;
+ } else {
+ if (local->fd)
+ inode = local->fd->inode;
+ else
+ inode = local->loc.inode;
+
+ if (!inode) {
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "null inode "
+ "returned");
+ goto err;
+ }
+
+ ret = dict_get_uint64(xdata, GF_CS_OBJECT_STATUS, &val);
+ if (ret == 0) {
+ if (val == GF_CS_ERROR) {
+ cs_inode_ctx_reset(this, inode);
+ local->op_ret = -1;
+ local->op_errno = EIO;
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "status = GF_CS_ERROR. failed to get "
+ " file state");
+ goto err;
+ } else {
+ ret = __cs_inode_ctx_update(this, inode, val);
+ gf_msg_debug(this->name, 0, "status : %" PRIu64, val);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx update failed");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ }
+ } else {
+ gf_msg_debug(this->name, 0, "status not found in dict");
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+
+ ret = dict_get_str_sizen(xdata, GF_CS_OBJECT_REMOTE, &filepath);
+ if (filepath) {
+ gf_msg_debug(this->name, 0, "filepath returned %s", filepath);
+ local->remotepath = gf_strdup(filepath);
+ if (!local->remotepath) {
+ local->op_ret = -1;
+ local->op_errno = ENOMEM;
+ goto err;
+ }
+ } else {
+ gf_msg_debug(this->name, 0, "NULL filepath");
+ }
+
+ ret = cs_update_xattrs(frame, xdata);
+ if (ret)
+ goto err;
+
+ local->op_ret = 0;
+ local->xattr_rsp = dict_ref(xdata);
+ memcpy(&local->stbuf, stbuf, sizeof(struct iatt));
+ }
+
+ stub = local->stub;
+ local->stub = NULL;
+ call_resume(stub);
+
+ return 0;
+err:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int
+cs_do_stat_check(call_frame_t *main_frame)
+{
+ cs_local_t *local = NULL;
+ xlator_t *this = NULL;
+ int ret = 0;
+
+ local = main_frame->local;
+ this = main_frame->this;
+
+ ret = dict_set_uint32(local->xattr_req, GF_CS_OBJECT_REPAIR, 256);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "dict_set failed");
+ goto err;
+ }
+
+ cs_set_xattr_req(main_frame);
+
+ if (local->fd) {
+ STACK_WIND(main_frame, cs_stat_check_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->fstat, local->fd, local->xattr_req);
+ } else {
+ STACK_WIND(main_frame, cs_stat_check_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->stat, &local->loc,
+ local->xattr_req);
+ }
+
+ return 0;
+
+err:
+ cs_inodelk_unlock(main_frame);
+
+ cs_common_cbk(main_frame);
+
+ return 0;
+}
+
+void
+cs_common_cbk(call_frame_t *frame)
+{
+ glusterfs_fop_t fop = -1;
+ cs_local_t *local = NULL;
+
+ local = frame->local;
+
+ fop = local->fop;
+
+ /*Note: Only the failure case needs to be handled here. Since for
+ * successful stat check the fop will resume anyway. The unwind can
+ * happen from the fop_cbk and each cbk can unlock the inodelk in case
+ * a lock was taken before. The lock status can be stored in frame */
+
+ /* for failure case */
+
+ /*TODO: add other fops*/
+ switch (fop) {
+ case GF_FOP_WRITE:
+ CS_STACK_UNWIND(writev, frame, local->op_ret, local->op_errno, NULL,
+ NULL, NULL);
+ break;
+
+ case GF_FOP_SETXATTR:
+ CS_STACK_UNWIND(setxattr, frame, local->op_ret, local->op_errno,
+ NULL);
+ break;
+ case GF_FOP_READ:
+ CS_STACK_UNWIND(readv, frame, local->op_ret, local->op_errno, NULL,
+ 0, NULL, NULL, NULL);
+ break;
+ case GF_FOP_FTRUNCATE:
+ CS_STACK_UNWIND(ftruncate, frame, local->op_ret, local->op_errno,
+ NULL, NULL, NULL);
+ break;
+
+ case GF_FOP_TRUNCATE:
+ CS_STACK_UNWIND(truncate, frame, local->op_ret, local->op_errno,
+ NULL, NULL, NULL);
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+int
+cs_blocking_inodelk_cbk(call_frame_t *lock_frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata)
+{
+ cs_local_t *main_local = NULL;
+ call_frame_t *main_frame = NULL;
+ cs_local_t *lock_local = NULL;
+
+ lock_local = lock_frame->local;
+
+ main_frame = lock_local->main_frame;
+ main_local = main_frame->local;
+
+ if (op_ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "inodelk failed");
+ main_local->op_errno = op_errno;
+ main_local->op_ret = op_ret;
+ goto err;
+ }
+
+ main_local->locked = _gf_true;
+
+ cs_lock_wipe(lock_frame);
+
+ cs_do_stat_check(main_frame);
+
+ return 0;
+err:
+ cs_common_cbk(main_frame);
+
+ cs_lock_wipe(lock_frame);
+
+ return 0;
+}
+
+int
+cs_build_loc(loc_t *loc, call_frame_t *frame)
+{
+ cs_local_t *local = NULL;
+ int ret = -1;
+
+ local = frame->local;
+
+ if (local->fd) {
+ loc->inode = inode_ref(local->fd->inode);
+ if (loc->inode) {
+ gf_uuid_copy(loc->gfid, loc->inode->gfid);
+ ret = 0;
+ goto out;
+ } else {
+ ret = -1;
+ goto out;
+ }
+ } else {
+ loc->inode = inode_ref(local->loc.inode);
+ if (loc->inode) {
+ gf_uuid_copy(loc->gfid, loc->inode->gfid);
+ ret = 0;
+ goto out;
+ } else {
+ ret = -1;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+int
+cs_blocking_inodelk(call_frame_t *parent_frame)
+{
+ call_frame_t *lock_frame = NULL;
+ cs_local_t *lock_local = NULL;
+ xlator_t *this = NULL;
+ struct gf_flock flock = {
+ 0,
+ };
+ int ret = 0;
+
+ this = parent_frame->this;
+
+ lock_frame = cs_lock_frame(parent_frame);
+ if (!lock_frame) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "insuffcient memory");
+ goto err;
+ }
+
+ lock_local = cs_local_init(this, lock_frame, NULL, NULL, 0);
+ if (!lock_local) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "local init failed");
+ goto err;
+ }
+
+ lock_local->main_frame = parent_frame;
+
+ flock.l_type = F_WRLCK;
+
+ ret = cs_build_loc(&lock_local->loc, parent_frame);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "build_loc failed");
+ goto err;
+ }
+
+ STACK_WIND(lock_frame, cs_blocking_inodelk_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->inodelk, CS_LOCK_DOMAIN,
+ &lock_local->loc, F_SETLKW, &flock, NULL);
+
+ return 0;
+err:
+ if (lock_frame)
+ cs_lock_wipe(lock_frame);
+
+ return -1;
+}
+
+int
+locate_and_execute(call_frame_t *frame)
+{
+ int ret = 0;
+
+ ret = cs_blocking_inodelk(frame);
+
+ if (ret)
+ return -1;
+ else
+ return 0;
+}
+
+int32_t
+cs_resume_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset, dict_t *xattr_req)
+{
+ cs_local_t *local = NULL;
+ int ret = 0;
+
+ local = frame->local;
+
+ ret = cs_resume_postprocess(this, frame, loc->inode);
+ if (ret) {
+ goto unwind;
+ }
+
+ cs_inodelk_unlock(frame);
+
+ STACK_WIND(frame, cs_truncate_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->truncate, loc, offset,
+ local->xattr_req);
+
+ return 0;
+
+unwind:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+int32_t
+cs_resume_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *dict, int32_t flags, dict_t *xdata)
+{
+ cs_local_t *local = NULL;
+ cs_inode_ctx_t *ctx = NULL;
+ gf_cs_obj_state state = GF_CS_ERROR;
+
+ local = frame->local;
+
+ __cs_inode_ctx_get(this, loc->inode, &ctx);
+
+ state = __cs_get_file_state(loc->inode, ctx);
+
+ if (state == GF_CS_ERROR) {
+ /* file is already remote */
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ "file %s , could not figure file state", loc->path);
+ goto unwind;
+ }
+
+ if (state == GF_CS_REMOTE) {
+ /* file is already remote */
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ gf_msg(this->name, GF_LOG_WARNING, 0, EINVAL,
+ "file %s is already remote", loc->path);
+ goto unwind;
+ }
+
+ if (state == GF_CS_DOWNLOADING) {
+ gf_msg(this->name, GF_LOG_WARNING, 0, 0,
+ " file is in downloading state.");
+ local->op_ret = -1;
+ local->op_errno = EINVAL;
+ goto unwind;
+ }
+
+ STACK_WIND(frame, cs_setxattr_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->setxattr, loc, dict, flags,
+ local->xattr_req);
+
+ return 0;
+unwind:
+ cs_inodelk_unlock(frame);
+
+ cs_common_cbk(frame);
+
+ return 0;
+}
+
+gf_cs_obj_state
+__cs_get_file_state(inode_t *inode, cs_inode_ctx_t *ctx)
+{
+ gf_cs_obj_state state = -1;
+
+ if (!ctx)
+ return GF_CS_ERROR;
+
+ LOCK(&inode->lock);
+ {
+ state = ctx->state;
+ }
+ UNLOCK(&inode->lock);
+
+ return state;
+}
+
+void
+__cs_inode_ctx_get(xlator_t *this, inode_t *inode, cs_inode_ctx_t **ctx)
+{
+ uint64_t ctxint = 0;
+ int ret = 0;
+
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &ctxint);
+ }
+ UNLOCK(&inode->lock);
+
+ if (ret)
+ *ctx = NULL;
+ else
+ *ctx = (cs_inode_ctx_t *)(uintptr_t)ctxint;
+
+ return;
+}
+
+int
+__cs_inode_ctx_update(xlator_t *this, inode_t *inode, uint64_t val)
+{
+ cs_inode_ctx_t *ctx = NULL;
+ uint64_t ctxint = 0;
+ int ret = 0;
+
+ LOCK(&inode->lock);
+ {
+ ret = __inode_ctx_get(inode, this, &ctxint);
+ if (ret) {
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_cs_mt_cs_inode_ctx_t);
+ if (!ctx) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0, "ctx allocation failed");
+ ret = -1;
+ goto out;
+ }
+
+ ctx->state = val;
+
+ ctxint = (uint64_t)(uintptr_t)ctx;
+
+ ret = __inode_ctx_set(inode, this, &ctxint);
+ if (ret) {
+ GF_FREE(ctx);
+ goto out;
+ }
+ } else {
+ ctx = (cs_inode_ctx_t *)(uintptr_t)ctxint;
+
+ ctx->state = val;
+ }
+ }
+
+out:
+ UNLOCK(&inode->lock);
+
+ return ret;
+}
+
+int
+cs_inode_ctx_reset(xlator_t *this, inode_t *inode)
+{
+ cs_inode_ctx_t *ctx = NULL;
+ uint64_t ctxint = 0;
+
+ inode_ctx_del(inode, this, &ctxint);
+ if (!ctxint) {
+ return 0;
+ }
+
+ ctx = (cs_inode_ctx_t *)(uintptr_t)ctxint;
+
+ GF_FREE(ctx);
+ return 0;
+}
+
+int
+cs_resume_postprocess(xlator_t *this, call_frame_t *frame, inode_t *inode)
+{
+ cs_local_t *local = NULL;
+ gf_cs_obj_state state = -1;
+ cs_inode_ctx_t *ctx = NULL;
+ int ret = 0;
+
+ local = frame->local;
+ if (!local) {
+ ret = -1;
+ goto out;
+ }
+
+ __cs_inode_ctx_get(this, inode, &ctx);
+
+ state = __cs_get_file_state(inode, ctx);
+ if (state == GF_CS_ERROR) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ "status is GF_CS_ERROR."
+ " Aborting write");
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ ret = -1;
+ goto out;
+ }
+
+ if (state == GF_CS_REMOTE || state == GF_CS_DOWNLOADING) {
+ gf_msg_debug(this->name, 0, "status is %d", state);
+ ret = cs_download(frame);
+ if (ret == 0) {
+ gf_msg_debug(this->name, 0, "Winding for Final Write");
+ } else {
+ gf_msg(this->name, GF_LOG_ERROR, 0, 0,
+ " download failed, unwinding writev");
+ local->op_ret = -1;
+ local->op_errno = EREMOTE;
+ ret = -1;
+ }
+ }
+out:
+ return ret;
+}
+
+int32_t
+cs_fdctx_to_dict(xlator_t *this, fd_t *fd, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_inode(xlator_t *this)
+{
+ return 0;
+}
+
+int32_t
+cs_inode_to_dict(xlator_t *this, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_history(xlator_t *this)
+{
+ return 0;
+}
+
+int32_t
+cs_fd(xlator_t *this)
+{
+ return 0;
+}
+
+int32_t
+cs_fd_to_dict(xlator_t *this, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_fdctx(xlator_t *this, fd_t *fd)
+{
+ return 0;
+}
+
+int32_t
+cs_inodectx(xlator_t *this, inode_t *ino)
+{
+ return 0;
+}
+
+int32_t
+cs_inodectx_to_dict(xlator_t *this, inode_t *ino, dict_t *dict)
+{
+ return 0;
+}
+
+int32_t
+cs_priv_to_dict(xlator_t *this, dict_t *dict, char *brickname)
+{
+ return 0;
+}
+
+int32_t
+cs_priv(xlator_t *this)
+{
+ return 0;
+}
+
+int
+cs_notify(xlator_t *this, int event, void *data, ...)
+{
+ return default_notify(this, event, data);
+}
+
+struct xlator_fops cs_fops = {
+ .stat = cs_stat,
+ .readdirp = cs_readdirp,
+ .truncate = cs_truncate,
+ .seek = cs_seek,
+ .statfs = cs_statfs,
+ .fallocate = cs_fallocate,
+ .discard = cs_discard,
+ .getxattr = cs_getxattr,
+ .writev = cs_writev,
+ .setxattr = cs_setxattr,
+ .fgetxattr = cs_fgetxattr,
+ .lookup = cs_lookup,
+ .fsetxattr = cs_fsetxattr,
+ .readv = cs_readv,
+ .ftruncate = cs_ftruncate,
+ .rchecksum = cs_rchecksum,
+ .unlink = cs_unlink,
+ .open = cs_open,
+ .fstat = cs_fstat,
+ .zerofill = cs_zerofill,
+};
+
+struct xlator_cbks cs_cbks = {
+ .forget = cs_forget,
+};
+
+struct xlator_dumpops cs_dumpops = {
+ .fdctx_to_dict = cs_fdctx_to_dict,
+ .inode = cs_inode,
+ .inode_to_dict = cs_inode_to_dict,
+ .history = cs_history,
+ .fd = cs_fd,
+ .fd_to_dict = cs_fd_to_dict,
+ .fdctx = cs_fdctx,
+ .inodectx = cs_inodectx,
+ .inodectx_to_dict = cs_inodectx_to_dict,
+ .priv_to_dict = cs_priv_to_dict,
+ .priv = cs_priv,
+};
+
+struct volume_options cs_options[] = {
+ {.key = {"cloudsync-storetype"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Defines which remote store is enabled"},
+ {.key = {"cloudsync-remote-read"},
+ .type = GF_OPTION_TYPE_BOOL,
+ .description = "Defines a remote read fop when on"},
+ {.key = {"cloudsync-store-id"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Defines a volume wide store id"},
+ {.key = {"cloudsync-product-id"},
+ .type = GF_OPTION_TYPE_STR,
+ .description = "Defines a volume wide product id"},
+ {.key = {NULL}},
+};
+
+xlator_api_t xlator_api = {
+ .init = cs_init,
+ .fini = cs_fini,
+ .notify = cs_notify,
+ .reconfigure = cs_reconfigure,
+ .mem_acct_init = cs_mem_acct_init,
+ .dumpops = &cs_dumpops,
+ .fops = &cs_fops,
+ .cbks = &cs_cbks,
+ .options = cs_options,
+ .identifier = "cloudsync",
+ .category = GF_TECH_PREVIEW,
+};
diff --git a/xlators/features/cloudsync/src/cloudsync.h b/xlators/features/cloudsync/src/cloudsync.h
new file mode 100644
index 00000000000..d24141978d6
--- /dev/null
+++ b/xlators/features/cloudsync/src/cloudsync.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#ifndef __CLOUDSYNC_H__
+#define __CLOUDSYNC_H__
+
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/syncop.h>
+#include <glusterfs/call-stub.h>
+#include "cloudsync-common.h"
+#include "cloudsync-autogen-fops.h"
+
+#define ALIGN_SIZE 4096
+#define CS_LOCK_DOMAIN "cs.protect.file.stat"
+typedef struct cs_dlstore {
+ off_t off;
+ struct iovec *vector;
+ int32_t count;
+ struct iobref *iobref;
+ uint32_t flags;
+} cs_dlstore;
+
+typedef struct cs_inode_ctx {
+ cs_loc_xattr_t locxattr;
+ gf_cs_obj_state state;
+} cs_inode_ctx_t;
+
+struct cs_plugin {
+ char *name; /* store name */
+ char *library; /* library to load for the given store */
+ char *description; /* description about the store */
+};
+
+cs_local_t *
+cs_local_init(xlator_t *this, call_frame_t *frame, loc_t *loc, fd_t *fd,
+ glusterfs_fop_t fop);
+
+int
+locate_and_execute(call_frame_t *frame);
+
+int32_t
+cs_resume_setxattr(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ dict_t *dict, int32_t flags, dict_t *xdata);
+
+int32_t
+cs_inodelk_unlock_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata);
+
+size_t
+cs_write_callback(void *lcurlbuf, size_t size, size_t nitems, void *frame);
+
+void
+cs_common_cbk(call_frame_t *frame);
+
+gf_boolean_t
+cs_is_file_remote(struct iatt *stbuf, dict_t *xattr);
+
+int32_t
+cs_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata);
+int
+cs_build_loc(loc_t *loc, call_frame_t *frame);
+
+int
+cs_blocking_inodelk_cbk(call_frame_t *lock_frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, dict_t *xdata);
+
+int
+cs_read_authinfo(xlator_t *this);
+
+int
+__cs_inode_ctx_update(xlator_t *this, inode_t *inode, uint64_t val);
+
+int
+cs_inode_ctx_reset(xlator_t *this, inode_t *inode);
+
+void
+__cs_inode_ctx_get(xlator_t *this, inode_t *inode, cs_inode_ctx_t **ctx);
+
+gf_cs_obj_state
+__cs_get_file_state(inode_t *inode, cs_inode_ctx_t *ctx);
+
+int
+cs_inodelk_unlock(call_frame_t *main_frame);
+
+int
+cs_resume_postprocess(xlator_t *this, call_frame_t *frame, inode_t *inode);
+
+int32_t
+cs_truncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
+ int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
+ struct iatt *postbuf, dict_t *xdata);
+int32_t
+cs_resume_truncate(call_frame_t *frame, xlator_t *this, loc_t *loc,
+ off_t offset, dict_t *xattr_req);
+
+int32_t
+cs_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, struct iovec *vector, int32_t count,
+ struct iatt *stbuf, struct iobref *iobref, dict_t *xdata);
+int32_t
+cs_resume_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata);
+int32_t
+cs_readv(call_frame_t *frame, xlator_t *this, fd_t *fd, size_t size,
+ off_t offset, uint32_t flags, dict_t *xdata);
+
+int
+cs_resume_remote_readv_postprocess(xlator_t *this, call_frame_t *frame,
+ inode_t *inode, off_t offset, size_t size,
+ uint32_t flags);
+int
+cs_serve_readv(call_frame_t *frame, off_t offset, size_t size, uint32_t flags);
+#endif /* __CLOUDSYNC_H__ */