summaryrefslogtreecommitdiffstats
path: root/extras
diff options
context:
space:
mode:
Diffstat (limited to 'extras')
-rw-r--r--extras/LinuxRPM/Makefile.am10
-rw-r--r--extras/Makefile.am66
-rw-r--r--extras/benchmarking/glfs-bm.c509
-rw-r--r--extras/benchmarking/rdd.c1001
-rwxr-xr-xextras/checkpatch.pl4337
-rwxr-xr-xextras/clang-checker.sh301
-rw-r--r--extras/cliutils/Makefile.am4
-rw-r--r--extras/cliutils/README.md233
-rw-r--r--extras/cliutils/__init__.py31
-rw-r--r--extras/cliutils/cliutils.py237
-rwxr-xr-xextras/collect-system-stats.sh52
-rw-r--r--extras/command-completion/gluster.bash40
-rwxr-xr-xextras/control-cpu-load.sh116
-rwxr-xr-xextras/control-mem.sh128
-rwxr-xr-xextras/create_new_xlator/generate_xlator.py104
-rw-r--r--extras/create_new_xlator/new-xlator-tmpl.c89
-rw-r--r--extras/create_new_xlator/new-xlator.c.tmpl151
-rw-r--r--extras/devel-tools/devel-vagrant/Vagrantfile21
-rw-r--r--extras/devel-tools/devel-vagrant/ansible/roles/cluster/tasks/main.yml5
-rw-r--r--extras/devel-tools/devel-vagrant/ansible/roles/compile-gluster/tasks/main.yml9
-rw-r--r--extras/devel-tools/devel-vagrant/ansible/setup.yml9
-rwxr-xr-xextras/devel-tools/print-backtrace.sh115
-rwxr-xr-xextras/devel-tools/strace-brick.sh55
-rw-r--r--extras/distributed-testing/README28
-rw-r--r--extras/distributed-testing/distributed-test-build-env20
-rwxr-xr-xextras/distributed-testing/distributed-test-build.sh27
-rw-r--r--extras/distributed-testing/distributed-test-env48
-rwxr-xr-xextras/distributed-testing/distributed-test-runner.py859
-rwxr-xr-xextras/distributed-testing/distributed-test.sh95
-rw-r--r--extras/ec-heal-script/README.md69
-rwxr-xr-xextras/ec-heal-script/correct_pending_heals.sh415
-rwxr-xr-xextras/ec-heal-script/gfid_needing_heal_parallel.sh278
-rwxr-xr-xextras/failed-tests.py253
-rw-r--r--extras/firewalld/Makefile.am2
-rw-r--r--extras/firewalld/glusterfs.xml1
-rw-r--r--extras/ganesha/config/ganesha-ha.conf.sample3
-rw-r--r--extras/ganesha/ocf/Makefile.am1
-rw-r--r--extras/ganesha/ocf/ganesha_grace1
-rw-r--r--extras/ganesha/ocf/ganesha_mon1
-rw-r--r--extras/ganesha/ocf/ganesha_nfsd3
-rw-r--r--extras/ganesha/scripts/Makefile.am8
-rwxr-xr-xextras/ganesha/scripts/copy-export-ganesha.sh97
-rwxr-xr-xextras/ganesha/scripts/create-export-ganesha.sh55
-rwxr-xr-xextras/ganesha/scripts/dbus-send.sh74
-rw-r--r--extras/ganesha/scripts/ganesha-ha.sh437
-rwxr-xr-xextras/ganesha/scripts/generate-epoch.py6
-rw-r--r--extras/geo-rep/Makefile.am7
-rw-r--r--extras/geo-rep/gsync-sync-gfid.c168
-rw-r--r--extras/geo-rep/schedule_georep.py.in111
-rwxr-xr-xextras/git-branch-diff.py285
-rw-r--r--extras/glusterd.vol.in5
-rw-r--r--extras/glusterfs-georep-logrotate24
-rwxr-xr-xextras/glusterfs-georep-upgrade.py77
-rw-r--r--extras/glusterfs-logrotate35
-rw-r--r--extras/glusterfs-mode.el225
-rwxr-xr-x[-rw-r--r--]extras/gnfs-loganalyse.py5
-rw-r--r--extras/group-db-workload12
-rw-r--r--extras/group-distributed-virt10
-rw-r--r--extras/group-gluster-block27
-rw-r--r--extras/group-metadata-cache6
-rw-r--r--extras/group-nl-cache5
-rw-r--r--extras/group-samba11
-rw-r--r--extras/group-virt.example20
-rw-r--r--extras/hook-scripts/Makefile.am2
-rwxr-xr-xextras/hook-scripts/S40ufo-stop.py2
-rwxr-xr-xextras/hook-scripts/S56glusterd-geo-rep-create-post.sh24
-rw-r--r--extras/hook-scripts/add-brick/post/Makefile.am6
-rwxr-xr-xextras/hook-scripts/add-brick/post/S10selinux-label-brick.sh100
-rwxr-xr-xextras/hook-scripts/add-brick/post/S13create-subdir-mounts.sh86
-rwxr-xr-xextras/hook-scripts/add-brick/post/disabled-quota-root-xattr-heal.sh152
-rw-r--r--extras/hook-scripts/add-brick/pre/Makefile.am2
-rwxr-xr-xextras/hook-scripts/add-brick/pre/S28Quota-enable-root-xattr-heal.sh15
-rw-r--r--extras/hook-scripts/create/Makefile.am1
-rw-r--r--extras/hook-scripts/create/post/Makefile.am8
-rwxr-xr-xextras/hook-scripts/create/post/S10selinux-label-brick.sh65
-rw-r--r--extras/hook-scripts/delete/Makefile.am1
-rw-r--r--extras/hook-scripts/delete/pre/Makefile.am8
-rwxr-xr-xextras/hook-scripts/delete/pre/S10selinux-del-fcontext.sh73
-rw-r--r--extras/hook-scripts/set/post/Makefile.am2
-rwxr-xr-xextras/hook-scripts/set/post/S30samba-set.sh36
-rwxr-xr-xextras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh31
-rw-r--r--extras/hook-scripts/start/post/Makefile.am2
-rwxr-xr-xextras/hook-scripts/start/post/S29CTDBsetup.sh36
-rwxr-xr-xextras/hook-scripts/start/post/S30samba-start.sh68
-rwxr-xr-xextras/hook-scripts/start/post/S31ganesha-start.sh47
-rw-r--r--extras/hook-scripts/stop/pre/Makefile.am2
-rwxr-xr-xextras/hook-scripts/stop/pre/S29CTDB-teardown.sh22
-rwxr-xr-xextras/hook-scripts/stop/pre/S30samba-stop.sh44
-rwxr-xr-xextras/identify-hangs.sh53
-rw-r--r--extras/init.d/Makefile.am12
-rw-r--r--extras/init.d/glustereventsd-Debian.in91
-rw-r--r--extras/init.d/glustereventsd-FreeBSD.in19
-rw-r--r--extras/init.d/glustereventsd-Redhat.in129
-rwxr-xr-xextras/init.d/rhel5-load-fuse.modules7
-rwxr-xr-xextras/mount-shared-storage.sh39
-rwxr-xr-xextras/ocf/volume.in42
-rwxr-xr-xextras/profiler/glusterfs-profiler4
-rwxr-xr-xextras/prot_filter.py144
-rw-r--r--extras/python/Makefile.am7
-rw-r--r--extras/python/__init__.py2
-rwxr-xr-xextras/quota/contri-add.sh (renamed from extras/contri-add.sh)0
-rwxr-xr-xextras/quota/log_accounting.sh26
-rwxr-xr-xextras/quota/quota_fsck.py377
-rwxr-xr-xextras/quota/xattr_analysis.py73
-rwxr-xr-xextras/rebalance.py106
-rw-r--r--extras/run-gluster.tmpfiles.in2
-rw-r--r--extras/snap_scheduler/Makefile.am6
-rw-r--r--extras/snap_scheduler/conf.py.in11
-rwxr-xr-xextras/snap_scheduler/gcron.py38
-rwxr-xr-xextras/snap_scheduler/snap_scheduler.py307
-rwxr-xr-xextras/statedumpparse.rb208
-rwxr-xr-xextras/stop-all-gluster-processes.sh119
-rw-r--r--extras/stripe-merge.c689
-rw-r--r--extras/systemd/Makefile.am16
-rw-r--r--extras/systemd/gluster-ta-volume.service.in13
-rw-r--r--extras/systemd/glusterd.service.in15
-rw-r--r--extras/systemd/glustereventsd.service.in16
-rw-r--r--extras/systemd/glusterfssharedstorage.service.in13
-rw-r--r--extras/test/ld-preload-test/ld-preload-lib.c598
-rw-r--r--extras/test/ld-preload-test/ld-preload-test.c505
-rw-r--r--extras/test/open-fd-tests.c85
-rw-r--r--extras/test/test-ffop.c1640
-rwxr-xr-xextras/thin-arbiter/setup-thin-arbiter.sh184
-rw-r--r--extras/thin-arbiter/thin-arbiter.vol57
-rw-r--r--extras/volfilter.py45
-rw-r--r--extras/who-wrote-glusterfs/gitdm.aliases8
-rw-r--r--extras/who-wrote-glusterfs/gitdm.domain-map15
127 files changed, 9699 insertions, 8294 deletions
diff --git a/extras/LinuxRPM/Makefile.am b/extras/LinuxRPM/Makefile.am
index d1b78ea8974..f02853798c0 100644
--- a/extras/LinuxRPM/Makefile.am
+++ b/extras/LinuxRPM/Makefile.am
@@ -15,10 +15,10 @@ glusterrpms_without_autogen: prep srcrpm rpms
-rm -rf rpmbuild
autogen:
- cd ../.. \
- && rm -rf autom4te.cache \
- && ./autogen.sh \
- && ./configure --with-previous-options
+ cd ../.. && \
+ rm -rf autom4te.cache && \
+ ./autogen.sh && \
+ ./configure --enable-gnfs --with-previous-options
prep:
$(MAKE) -C ../.. dist;
@@ -36,7 +36,7 @@ srcrpm:
mv rpmbuild/SRPMS/* .
rpms:
- rpmbuild --define '_topdir $(shell pwd)/rpmbuild' -bb rpmbuild/SPECS/glusterfs.spec
+ rpmbuild --define '_topdir $(shell pwd)/rpmbuild' --with gnfs -bb rpmbuild/SPECS/glusterfs.spec
mv rpmbuild/RPMS/*/* .
# EPEL-5 does not like new versions of rpmbuild and requires some
diff --git a/extras/Makefile.am b/extras/Makefile.am
index 609d497f5b8..983f014cca6 100644
--- a/extras/Makefile.am
+++ b/extras/Makefile.am
@@ -1,29 +1,58 @@
-addonexecdir = $(libexecdir)/glusterfs
-addonexec_SCRIPTS = peer_add_secret_pub
+addonexecdir = $(GLUSTERFS_LIBEXECDIR)
+addonexec_SCRIPTS =
+if WITH_SERVER
+addonexec_SCRIPTS += peer_add_secret_pub
+if USE_SYSTEMD
+addonexec_SCRIPTS += mount-shared-storage.sh
+endif
+endif
EditorModedir = $(docdir)
EditorMode_DATA = glusterfs-mode.el glusterfs.vim
SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \
- $(GEOREP_EXTRAS_SUBDIR) ganesha snap_scheduler firewalld
+ $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python \
+ ganesha
confdir = $(sysconfdir)/glusterfs
+if WITH_SERVER
conf_DATA = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.conf \
- logger.conf.example glusterfs-georep-logrotate group-virt.example
+ logger.conf.example glusterfs-georep-logrotate group-virt.example \
+ group-metadata-cache group-gluster-block group-nl-cache \
+ group-db-workload group-distributed-virt group-samba
+endif
voldir = $(sysconfdir)/glusterfs
-vol_DATA = glusterd.vol
+vol_DATA = thin-arbiter/thin-arbiter.vol
+if WITH_SERVER
+vol_DATA += glusterd.vol
+endif
+
scriptsdir = $(datadir)/glusterfs/scripts
-scripts_SCRIPTS = post-upgrade-script-for-quota.sh \
+scripts_SCRIPTS = thin-arbiter/setup-thin-arbiter.sh
+if WITH_SERVER
+scripts_SCRIPTS += post-upgrade-script-for-quota.sh \
pre-upgrade-script-for-quota.sh stop-all-gluster-processes.sh
+if USE_SYSTEMD
+scripts_SCRIPTS += control-cpu-load.sh
+scripts_SCRIPTS += control-mem.sh
+endif
+endif
-EXTRA_DIST = $(conf_DATA) specgen.scm glusterfs-mode.el glusterfs.vim \
- migrate-unify-to-distribute.sh backend-xattr-sanitize.sh backend-cleanup.sh \
- disk_usage_sync.sh clear_xattrs.sh glusterd-sysconfig glusterd.vol \
- post-upgrade-script-for-quota.sh pre-upgrade-script-for-quota.sh \
- command-completion/gluster.bash command-completion/Makefile \
- command-completion/README stop-all-gluster-processes.sh
+EXTRA_DIST = glusterfs-logrotate gluster-rsyslog-7.2.conf gluster-rsyslog-5.8.conf \
+ logger.conf.example glusterfs-georep-logrotate group-virt.example \
+ group-metadata-cache group-gluster-block group-nl-cache \
+ group-db-workload group-samba specgen.scm glusterfs-mode.el glusterfs.vim \
+ migrate-unify-to-distribute.sh backend-xattr-sanitize.sh \
+ backend-cleanup.sh disk_usage_sync.sh clear_xattrs.sh \
+ glusterd-sysconfig glusterd.vol post-upgrade-script-for-quota.sh \
+ pre-upgrade-script-for-quota.sh command-completion/gluster.bash \
+ command-completion/Makefile command-completion/README \
+ stop-all-gluster-processes.sh clang-checker.sh mount-shared-storage.sh \
+ control-cpu-load.sh control-mem.sh group-distributed-virt \
+ thin-arbiter/thin-arbiter.vol thin-arbiter/setup-thin-arbiter.sh
+if WITH_SERVER
install-data-local:
if [ -n "$(tmpfilesdir)" ]; then \
$(mkdir_p) $(DESTDIR)$(tmpfilesdir); \
@@ -33,3 +62,16 @@ install-data-local:
$(mkdir_p) $(DESTDIR)$(GLUSTERD_WORKDIR)/groups
$(INSTALL_DATA) $(top_srcdir)/extras/group-virt.example \
$(DESTDIR)$(GLUSTERD_WORKDIR)/groups/virt
+ $(INSTALL_DATA) $(top_srcdir)/extras/group-metadata-cache \
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/metadata-cache
+ $(INSTALL_DATA) $(top_srcdir)/extras/group-gluster-block \
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/gluster-block
+ $(INSTALL_DATA) $(top_srcdir)/extras/group-nl-cache \
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/nl-cache
+ $(INSTALL_DATA) $(top_srcdir)/extras/group-db-workload \
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/db-workload
+ $(INSTALL_DATA) $(top_srcdir)/extras/group-distributed-virt \
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/distributed-virt
+ $(INSTALL_DATA) $(top_srcdir)/extras/group-samba \
+ $(DESTDIR)$(GLUSTERD_WORKDIR)/groups/samba
+endif
diff --git a/extras/benchmarking/glfs-bm.c b/extras/benchmarking/glfs-bm.c
index dc717f33c16..f7f5873f84d 100644
--- a/extras/benchmarking/glfs-bm.c
+++ b/extras/benchmarking/glfs-bm.c
@@ -25,365 +25,338 @@
#include <sys/time.h>
struct state {
- char need_op_write:1;
- char need_op_read:1;
+ char need_op_write : 1;
+ char need_op_read : 1;
- char need_iface_fileio:1;
- char need_iface_xattr:1;
+ char need_iface_fileio : 1;
+ char need_iface_xattr : 1;
- char need_mode_posix:1;
+ char need_mode_posix : 1;
- char prefix[512];
- long int count;
+ char prefix[512];
+ long int count;
- size_t block_size;
+ size_t block_size;
- char *specfile;
+ char *specfile;
- long int io_size;
+ long int io_size;
};
-
-#define MEASURE(func, arg) measure (func, #func, arg)
-
+#define MEASURE(func, arg) measure(func, #func, arg)
void
-tv_difference (struct timeval *tv_stop,
- struct timeval *tv_start,
- struct timeval *tv_diff)
+tv_difference(struct timeval *tv_stop, struct timeval *tv_start,
+ struct timeval *tv_diff)
{
- if (tv_stop->tv_usec < tv_start->tv_usec) {
- tv_diff->tv_usec = (tv_stop->tv_usec + 1000000) - tv_start->tv_usec;
- tv_diff->tv_sec = (tv_stop->tv_sec - 1 - tv_start->tv_sec);
- } else {
- tv_diff->tv_usec = tv_stop->tv_usec - tv_start->tv_usec;
- tv_diff->tv_sec = tv_stop->tv_sec - tv_start->tv_sec;
- }
+ if (tv_stop->tv_usec < tv_start->tv_usec) {
+ tv_diff->tv_usec = (tv_stop->tv_usec + 1000000) - tv_start->tv_usec;
+ tv_diff->tv_sec = (tv_stop->tv_sec - 1 - tv_start->tv_sec);
+ } else {
+ tv_diff->tv_usec = tv_stop->tv_usec - tv_start->tv_usec;
+ tv_diff->tv_sec = tv_stop->tv_sec - tv_start->tv_sec;
+ }
}
-
void
-measure (int (*func)(struct state *state),
- char *func_name, struct state *state)
+measure(int (*func)(struct state *state), char *func_name, struct state *state)
{
- struct timeval tv_start, tv_stop, tv_diff;
- state->io_size = 0;
- long int count;
+ struct timeval tv_start, tv_stop, tv_diff;
+ state->io_size = 0;
+ long int count;
- gettimeofday (&tv_start, NULL);
- count = func (state);
- gettimeofday (&tv_stop, NULL);
+ gettimeofday(&tv_start, NULL);
+ count = func(state);
+ gettimeofday(&tv_stop, NULL);
- tv_difference (&tv_stop, &tv_start, &tv_diff);
+ tv_difference(&tv_stop, &tv_start, &tv_diff);
- fprintf (stdout, "%s: count=%ld, size=%ld, time=%ld:%ld\n",
- func_name, count, state->io_size,
- tv_diff.tv_sec, tv_diff.tv_usec);
+ fprintf(stdout, "%s: count=%ld, size=%ld, time=%ld:%ld\n", func_name, count,
+ state->io_size, tv_diff.tv_sec, tv_diff.tv_usec);
}
-
static error_t
-parse_opts (int key, char *arg,
- struct argp_state *_state)
+parse_opts(int key, char *arg, struct argp_state *_state)
{
- struct state *state = _state->input;
+ struct state *state = _state->input;
- switch (key)
- {
+ switch (key) {
case 'o':
- if (strcasecmp (arg, "read") == 0) {
- state->need_op_write = 0;
- state->need_op_read = 1;
- } else if (strcasecmp (arg, "write") == 0) {
- state->need_op_write = 1;
- state->need_op_read = 0;
- } else if (strcasecmp (arg, "both") == 0) {
- state->need_op_write = 1;
- state->need_op_read = 1;
- } else {
- fprintf (stderr, "unknown op: %s\n", arg);
- return -1;
- }
- break;
+ if (strcasecmp(arg, "read") == 0) {
+ state->need_op_write = 0;
+ state->need_op_read = 1;
+ } else if (strcasecmp(arg, "write") == 0) {
+ state->need_op_write = 1;
+ state->need_op_read = 0;
+ } else if (strcasecmp(arg, "both") == 0) {
+ state->need_op_write = 1;
+ state->need_op_read = 1;
+ } else {
+ fprintf(stderr, "unknown op: %s\n", arg);
+ return -1;
+ }
+ break;
case 'i':
- if (strcasecmp (arg, "fileio") == 0) {
- state->need_iface_fileio = 1;
- state->need_iface_xattr = 0;
- } else if (strcasecmp (arg, "xattr") == 0) {
- state->need_iface_fileio = 0;
- state->need_iface_xattr = 1;
- } else if (strcasecmp (arg, "both") == 0) {
- state->need_iface_fileio = 1;
- state->need_iface_xattr = 1;
- } else {
- fprintf (stderr, "unknown interface: %s\n", arg);
- return -1;
- }
- break;
- case 'b':
- {
- size_t block_size = atoi (arg);
- if (!block_size) {
- fprintf (stderr, "incorrect size: %s\n", arg);
- return -1;
- }
- state->block_size = block_size;
- }
- break;
+ if (strcasecmp(arg, "fileio") == 0) {
+ state->need_iface_fileio = 1;
+ state->need_iface_xattr = 0;
+ } else if (strcasecmp(arg, "xattr") == 0) {
+ state->need_iface_fileio = 0;
+ state->need_iface_xattr = 1;
+ } else if (strcasecmp(arg, "both") == 0) {
+ state->need_iface_fileio = 1;
+ state->need_iface_xattr = 1;
+ } else {
+ fprintf(stderr, "unknown interface: %s\n", arg);
+ return -1;
+ }
+ break;
+ case 'b': {
+ size_t block_size = atoi(arg);
+ if (!block_size) {
+ fprintf(stderr, "incorrect size: %s\n", arg);
+ return -1;
+ }
+ state->block_size = block_size;
+ } break;
case 's':
- state->specfile = strdup (arg);
- break;
+ state->specfile = strdup(arg);
+ break;
case 'p':
- fprintf (stderr, "using prefix: %s\n", arg);
- strncpy (state->prefix, arg, 512);
- break;
- case 'c':
- {
- long count = atol (arg);
- if (!count) {
- fprintf (stderr, "incorrect count: %s\n", arg);
- return -1;
- }
- state->count = count;
- }
- break;
+ fprintf(stderr, "using prefix: %s\n", arg);
+ strncpy(state->prefix, arg, 512);
+ break;
+ case 'c': {
+ long count = atol(arg);
+ if (!count) {
+ fprintf(stderr, "incorrect count: %s\n", arg);
+ return -1;
+ }
+ state->count = count;
+ } break;
case ARGP_KEY_NO_ARGS:
- break;
+ break;
case ARGP_KEY_ARG:
- break;
- }
+ break;
+ }
- return 0;
+ return 0;
}
int
-do_mode_posix_iface_fileio_write (struct state *state)
+do_mode_posix_iface_fileio_write(struct state *state)
{
- long int i;
- int ret = -1;
- char block[state->block_size];
-
- for (i=0; i<state->count; i++) {
- int fd = -1;
- char filename[512];
-
- sprintf (filename, "%s.%06ld", state->prefix, i);
-
- fd = open (filename, O_CREAT|O_WRONLY, 00600);
- if (fd == -1) {
- fprintf (stderr, "open(%s) => %s\n", filename, strerror (errno));
- break;
- }
- ret = write (fd, block, state->block_size);
- if (ret != state->block_size) {
- fprintf (stderr, "write (%s) => %d/%s\n", filename, ret,
- strerror (errno));
- close (fd);
- break;
- }
- close (fd);
- state->io_size += ret;
+ long int i;
+ int ret = -1;
+ char block[state->block_size];
+
+ for (i = 0; i < state->count; i++) {
+ int fd = -1;
+ char filename[512];
+
+ sprintf(filename, "%s.%06ld", state->prefix, i);
+
+ fd = open(filename, O_CREAT | O_WRONLY, 00600);
+ if (fd == -1) {
+ fprintf(stderr, "open(%s) => %s\n", filename, strerror(errno));
+ break;
+ }
+ ret = write(fd, block, state->block_size);
+ if (ret != state->block_size) {
+ fprintf(stderr, "write (%s) => %d/%s\n", filename, ret,
+ strerror(errno));
+ close(fd);
+ break;
}
+ close(fd);
+ state->io_size += ret;
+ }
- return i;
+ return i;
}
-
int
-do_mode_posix_iface_fileio_read (struct state *state)
+do_mode_posix_iface_fileio_read(struct state *state)
{
- long int i;
- int ret = -1;
- char block[state->block_size];
-
- for (i=0; i<state->count; i++) {
- int fd = -1;
- char filename[512];
-
- sprintf (filename, "%s.%06ld", state->prefix, i);
-
- fd = open (filename, O_RDONLY);
- if (fd == -1) {
- fprintf (stderr, "open(%s) => %s\n", filename, strerror (errno));
- break;
- }
- ret = read (fd, block, state->block_size);
- if (ret == -1) {
- fprintf (stderr, "read(%s) => %d/%s\n", filename, ret, strerror (errno));
- close (fd);
- break;
- }
- close (fd);
- state->io_size += ret;
+ long int i;
+ int ret = -1;
+ char block[state->block_size];
+
+ for (i = 0; i < state->count; i++) {
+ int fd = -1;
+ char filename[512];
+
+ sprintf(filename, "%s.%06ld", state->prefix, i);
+
+ fd = open(filename, O_RDONLY);
+ if (fd == -1) {
+ fprintf(stderr, "open(%s) => %s\n", filename, strerror(errno));
+ break;
}
+ ret = read(fd, block, state->block_size);
+ if (ret == -1) {
+ fprintf(stderr, "read(%s) => %d/%s\n", filename, ret,
+ strerror(errno));
+ close(fd);
+ break;
+ }
+ close(fd);
+ state->io_size += ret;
+ }
- return i;
+ return i;
}
-
int
-do_mode_posix_iface_fileio (struct state *state)
+do_mode_posix_iface_fileio(struct state *state)
{
- if (state->need_op_write)
- MEASURE (do_mode_posix_iface_fileio_write, state);
+ if (state->need_op_write)
+ MEASURE(do_mode_posix_iface_fileio_write, state);
- if (state->need_op_read)
- MEASURE (do_mode_posix_iface_fileio_read, state);
+ if (state->need_op_read)
+ MEASURE(do_mode_posix_iface_fileio_read, state);
- return 0;
+ return 0;
}
-
int
-do_mode_posix_iface_xattr_write (struct state *state)
+do_mode_posix_iface_xattr_write(struct state *state)
{
- long int i;
- int ret = -1;
- char block[state->block_size];
- char *dname = NULL, *dirc = NULL;
- char *bname = NULL, *basec = NULL;
-
- dirc = strdup (state->prefix);
- basec = strdup (state->prefix);
- dname = dirname (dirc);
- bname = basename (basec);
-
- for (i=0; i<state->count; i++) {
- char key[512];
-
- sprintf (key, "glusterfs.file.%s.%06ld", bname, i);
-
- ret = lsetxattr (dname, key, block, state->block_size, 0);
-
- if (ret != 0) {
- fprintf (stderr, "lsetxattr (%s, %s, %p) => %s\n",
- dname, key, block, strerror (errno));
- break;
- }
- state->io_size += state->block_size;
+ long int i;
+ int ret = -1;
+ char block[state->block_size];
+ char *dname = NULL, *dirc = NULL;
+ char *bname = NULL, *basec = NULL;
+
+ dirc = strdup(state->prefix);
+ basec = strdup(state->prefix);
+ dname = dirname(dirc);
+ bname = basename(basec);
+
+ for (i = 0; i < state->count; i++) {
+ char key[512];
+
+ sprintf(key, "glusterfs.file.%s.%06ld", bname, i);
+
+ ret = lsetxattr(dname, key, block, state->block_size, 0);
+
+ if (ret != 0) {
+ fprintf(stderr, "lsetxattr (%s, %s, %p) => %s\n", dname, key, block,
+ strerror(errno));
+ break;
}
+ state->io_size += state->block_size;
+ }
- free (dirc);
- free (basec);
+ free(dirc);
+ free(basec);
- return i;
+ return i;
}
-
int
-do_mode_posix_iface_xattr_read (struct state *state)
+do_mode_posix_iface_xattr_read(struct state *state)
{
- long int i;
- int ret = -1;
- char block[state->block_size];
- char *dname = NULL, *dirc = NULL;
- char *bname = NULL, *basec = NULL;
-
- dirc = strdup (state->prefix);
- basec = strdup (state->prefix);
- dname = dirname (dirc);
- bname = basename (basec);
-
- for (i=0; i<state->count; i++) {
- char key[512];
-
- sprintf (key, "glusterfs.file.%s.%06ld", bname, i);
-
- ret = lgetxattr (dname, key, block, state->block_size);
-
- if (ret < 0) {
- fprintf (stderr, "lgetxattr (%s, %s, %p) => %s\n",
- dname, key, block, strerror (errno));
- break;
- }
- state->io_size += ret;
+ long int i;
+ int ret = -1;
+ char block[state->block_size];
+ char *dname = NULL, *dirc = NULL;
+ char *bname = NULL, *basec = NULL;
+
+ dirc = strdup(state->prefix);
+ basec = strdup(state->prefix);
+ dname = dirname(dirc);
+ bname = basename(basec);
+
+ for (i = 0; i < state->count; i++) {
+ char key[512];
+
+ sprintf(key, "glusterfs.file.%s.%06ld", bname, i);
+
+ ret = lgetxattr(dname, key, block, state->block_size);
+
+ if (ret < 0) {
+ fprintf(stderr, "lgetxattr (%s, %s, %p) => %s\n", dname, key, block,
+ strerror(errno));
+ break;
}
+ state->io_size += ret;
+ }
- return i;
+ return i;
}
-
int
-do_mode_posix_iface_xattr (struct state *state)
+do_mode_posix_iface_xattr(struct state *state)
{
- if (state->need_op_write)
- MEASURE (do_mode_posix_iface_xattr_write, state);
+ if (state->need_op_write)
+ MEASURE(do_mode_posix_iface_xattr_write, state);
- if (state->need_op_read)
- MEASURE (do_mode_posix_iface_xattr_read, state);
+ if (state->need_op_read)
+ MEASURE(do_mode_posix_iface_xattr_read, state);
- return 0;
+ return 0;
}
int
-do_mode_posix (struct state *state)
+do_mode_posix(struct state *state)
{
- if (state->need_iface_fileio)
- do_mode_posix_iface_fileio (state);
+ if (state->need_iface_fileio)
+ do_mode_posix_iface_fileio(state);
- if (state->need_iface_xattr)
- do_mode_posix_iface_xattr (state);
+ if (state->need_iface_xattr)
+ do_mode_posix_iface_xattr(state);
- return 0;
+ return 0;
}
-
int
-do_actions (struct state *state)
+do_actions(struct state *state)
{
- if (state->need_mode_posix)
- do_mode_posix (state);
+ if (state->need_mode_posix)
+ do_mode_posix(state);
- return 0;
+ return 0;
}
static struct argp_option options[] = {
- {"op", 'o', "OPERATIONS", 0,
- "WRITE|READ|BOTH - defaults to BOTH"},
- {"iface", 'i', "INTERFACE", 0,
- "FILEIO|XATTR|BOTH - defaults to FILEIO"},
- {"block", 'b', "BLOCKSIZE", 0,
- "<NUM> - defaults to 4096"},
- {"specfile", 's', "SPECFILE", 0,
- "absolute path to specfile"},
- {"prefix", 'p', "PREFIX", 0,
- "filename prefix"},
- {"count", 'c', "COUNT", 0,
- "number of files"},
- {0, 0, 0, 0, 0}
-};
+ {"op", 'o', "OPERATIONS", 0, "WRITE|READ|BOTH - defaults to BOTH"},
+ {"iface", 'i', "INTERFACE", 0, "FILEIO|XATTR|BOTH - defaults to FILEIO"},
+ {"block", 'b', "BLOCKSIZE", 0, "<NUM> - defaults to 4096"},
+ {"specfile", 's', "SPECFILE", 0, "absolute path to specfile"},
+ {"prefix", 'p', "PREFIX", 0, "filename prefix"},
+ {"count", 'c', "COUNT", 0, "number of files"},
+ {0, 0, 0, 0, 0}};
-static struct argp argp = {
- options,
- parse_opts,
- "tool",
- "tool to benchmark small file performance"
-};
+static struct argp argp = {options, parse_opts, "tool",
+ "tool to benchmark small file performance"};
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- struct state state = {0, };
+ struct state state = {
+ 0,
+ };
- state.need_op_write = 1;
- state.need_op_read = 1;
+ state.need_op_write = 1;
+ state.need_op_read = 1;
- state.need_iface_fileio = 1;
- state.need_iface_xattr = 0;
+ state.need_iface_fileio = 1;
+ state.need_iface_xattr = 0;
- state.need_mode_posix = 1;
+ state.need_mode_posix = 1;
- state.block_size = 4096;
+ state.block_size = 4096;
- strcpy (state.prefix, "tmpfile");
- state.count = 1048576;
+ strcpy(state.prefix, "tmpfile");
+ state.count = 1048576;
- if (argp_parse (&argp, argc, argv, 0, 0, &state) != 0) {
- fprintf (stderr, "argp_parse() failed\n");
- return 1;
- }
+ if (argp_parse(&argp, argc, argv, 0, 0, &state) != 0) {
+ fprintf(stderr, "argp_parse() failed\n");
+ return 1;
+ }
- do_actions (&state);
+ do_actions(&state);
- return 0;
+ return 0;
}
diff --git a/extras/benchmarking/rdd.c b/extras/benchmarking/rdd.c
index a667c6a1d65..efc9d342a37 100644
--- a/extras/benchmarking/rdd.c
+++ b/extras/benchmarking/rdd.c
@@ -20,633 +20,586 @@
#define TWO_POWER(power) (2UL << (power))
-#define RDD_INTEGER_VALUE ((TWO_POWER ((sizeof (int) * 8))) - 1)
+#define RDD_INTEGER_VALUE ((TWO_POWER((sizeof(int) * 8))) - 1)
#ifndef UNIX_PATH_MAX
#define UNIX_PATH_MAX 108
#endif
#define UNIT_KB 1024ULL
-#define UNIT_MB UNIT_KB*1024ULL
-#define UNIT_GB UNIT_MB*1024ULL
-#define UNIT_TB UNIT_GB*1024ULL
-#define UNIT_PB UNIT_TB*1024ULL
+#define UNIT_MB UNIT_KB * 1024ULL
+#define UNIT_GB UNIT_MB * 1024ULL
+#define UNIT_TB UNIT_GB * 1024ULL
+#define UNIT_PB UNIT_TB * 1024ULL
-#define UNIT_KB_STRING "KB"
-#define UNIT_MB_STRING "MB"
-#define UNIT_GB_STRING "GB"
-#define UNIT_TB_STRING "TB"
-#define UNIT_PB_STRING "PB"
+#define UNIT_KB_STRING "KB"
+#define UNIT_MB_STRING "MB"
+#define UNIT_GB_STRING "GB"
+#define UNIT_TB_STRING "TB"
+#define UNIT_PB_STRING "PB"
struct rdd_file {
- char path[UNIX_PATH_MAX];
- struct stat st;
- int fd;
+ char path[UNIX_PATH_MAX];
+ struct stat st;
+ int fd;
};
struct rdd_config {
- long iters;
- long max_ops_per_seq;
- size_t max_bs;
- size_t min_bs;
- int thread_count;
- pthread_t *threads;
- pthread_barrier_t barrier;
- pthread_mutex_t lock;
- struct rdd_file in_file;
- struct rdd_file out_file;
- ssize_t file_size;
+ long iters;
+ long max_ops_per_seq;
+ size_t max_bs;
+ size_t min_bs;
+ int thread_count;
+ pthread_t *threads;
+ pthread_barrier_t barrier;
+ pthread_mutex_t lock;
+ struct rdd_file in_file;
+ struct rdd_file out_file;
+ ssize_t file_size;
};
static struct rdd_config rdd_config;
enum rdd_keys {
- RDD_MIN_BS_KEY = 1,
- RDD_MAX_BS_KEY,
+ RDD_MIN_BS_KEY = 1,
+ RDD_MAX_BS_KEY,
};
static error_t
-rdd_parse_opts (int key, char *arg,
- struct argp_state *_state)
+rdd_parse_opts(int key, char *arg, struct argp_state *_state)
{
- switch (key) {
- case 'o':
- {
- int len = 0;
- len = strlen (arg);
- if (len > UNIX_PATH_MAX) {
- fprintf (stderr, "output file name too long (%s)\n",
- arg);
- return -1;
- }
-
- strncpy (rdd_config.out_file.path, arg, len);
- }
- break;
-
- case 'i':
- {
- int len = 0;
- len = strlen (arg);
- if (len > UNIX_PATH_MAX) {
- fprintf (stderr, "input file name too long (%s)\n",
- arg);
- return -1;
- }
-
- strncpy (rdd_config.in_file.path, arg, len);
- rdd_config.in_file.path[len] = '\0';
- }
- break;
-
- case 'f':
- {
- char *tmp = NULL;
- unsigned long long fs = 0;
- if (string2bytesize (arg, &fs) == -1) {
- fprintf (stderr, "invalid argument for file size "
- "(%s)\n", arg);
- return -1;
- }
-
- rdd_config.file_size = fs;
- }
- break;
-
- case RDD_MIN_BS_KEY:
- {
- char *tmp = NULL;
- long bs = 0;
- bs = strtol (arg, &tmp, 10);
- if ((bs == LONG_MAX) || (bs == LONG_MIN) || (tmp && *tmp)) {
- fprintf (stderr, "invalid argument for minimum block"
- "size (%s)\n", arg);
- return -1;
- }
-
- rdd_config.min_bs = bs;
- }
- break;
-
- case RDD_MAX_BS_KEY:
- {
- char *tmp = NULL;
- long bs = 0;
- bs = strtol (arg, &tmp, 10);
- if ((bs == LONG_MAX) || (bs == LONG_MIN) || (tmp && *tmp)) {
- fprintf (stderr, "invalid argument for maximum block"
- "size (%s)\n", arg);
- return -1;
- }
-
- rdd_config.max_bs = bs;
- }
- break;
-
- case 'r':
- {
- char *tmp = NULL;
- long iters = 0;
- iters = strtol (arg, &tmp, 10);
- if ((iters == LONG_MAX) ||
- (iters == LONG_MIN) ||
- (tmp && *tmp)) {
- fprintf (stderr, "invalid argument for iterations"
- "(%s)\n", arg);
- return -1;
- }
-
- rdd_config.iters = iters;
- }
- break;
-
- case 'm':
- {
- char *tmp = NULL;
- long max_ops = 0;
- max_ops = strtol (arg, &tmp, 10);
- if ((max_ops == LONG_MAX) ||
- (max_ops == LONG_MIN) ||
- (tmp && *tmp)) {
- fprintf (stderr, "invalid argument for max-ops"
- "(%s)\n", arg);
- return -1;
- }
+ switch (key) {
+ case 'o': {
+ int len = 0;
+ len = strlen(arg);
+ if (len > UNIX_PATH_MAX) {
+ fprintf(stderr, "output file name too long (%s)\n", arg);
+ return -1;
+ }
- rdd_config.max_ops_per_seq = max_ops;
- }
- break;
+ strncpy(rdd_config.out_file.path, arg, len);
+ } break;
- case 't':
- {
- char *tmp = NULL;
- long threads = 0;
- threads = strtol (arg, &tmp, 10);
- if ((threads == LONG_MAX) ||
- (threads == LONG_MIN) ||
- (tmp && *tmp)) {
- fprintf (stderr, "invalid argument for thread count"
- "(%s)\n", arg);
- return -1;
- }
+ case 'i': {
+ int len = 0;
+ len = strlen(arg);
+ if (len > UNIX_PATH_MAX) {
+ fprintf(stderr, "input file name too long (%s)\n", arg);
+ return -1;
+ }
+
+ strncpy(rdd_config.in_file.path, arg, len);
+ rdd_config.in_file.path[len] = '\0';
+ } break;
+
+ case 'f': {
+ char *tmp = NULL;
+ unsigned long long fs = 0;
+ if (string2bytesize(arg, &fs) == -1) {
+ fprintf(stderr,
+ "invalid argument for file size "
+ "(%s)\n",
+ arg);
+ return -1;
+ }
+
+ rdd_config.file_size = fs;
+ } break;
+
+ case RDD_MIN_BS_KEY: {
+ char *tmp = NULL;
+ long bs = 0;
+ bs = strtol(arg, &tmp, 10);
+ if ((bs == LONG_MAX) || (bs == LONG_MIN) || (tmp && *tmp)) {
+ fprintf(stderr,
+ "invalid argument for minimum block"
+ "size (%s)\n",
+ arg);
+ return -1;
+ }
+
+ rdd_config.min_bs = bs;
+ } break;
+
+ case RDD_MAX_BS_KEY: {
+ char *tmp = NULL;
+ long bs = 0;
+ bs = strtol(arg, &tmp, 10);
+ if ((bs == LONG_MAX) || (bs == LONG_MIN) || (tmp && *tmp)) {
+ fprintf(stderr,
+ "invalid argument for maximum block"
+ "size (%s)\n",
+ arg);
+ return -1;
+ }
+
+ rdd_config.max_bs = bs;
+ } break;
+
+ case 'r': {
+ char *tmp = NULL;
+ long iters = 0;
+ iters = strtol(arg, &tmp, 10);
+ if ((iters == LONG_MAX) || (iters == LONG_MIN) || (tmp && *tmp)) {
+ fprintf(stderr,
+ "invalid argument for iterations"
+ "(%s)\n",
+ arg);
+ return -1;
+ }
+
+ rdd_config.iters = iters;
+ } break;
+
+ case 'm': {
+ char *tmp = NULL;
+ long max_ops = 0;
+ max_ops = strtol(arg, &tmp, 10);
+ if ((max_ops == LONG_MAX) || (max_ops == LONG_MIN) ||
+ (tmp && *tmp)) {
+ fprintf(stderr,
+ "invalid argument for max-ops"
+ "(%s)\n",
+ arg);
+ return -1;
+ }
+
+ rdd_config.max_ops_per_seq = max_ops;
+ } break;
+
+ case 't': {
+ char *tmp = NULL;
+ long threads = 0;
+ threads = strtol(arg, &tmp, 10);
+ if ((threads == LONG_MAX) || (threads == LONG_MIN) ||
+ (tmp && *tmp)) {
+ fprintf(stderr,
+ "invalid argument for thread count"
+ "(%s)\n",
+ arg);
+ return -1;
+ }
- rdd_config.thread_count = threads;
- }
- break;
+ rdd_config.thread_count = threads;
+ } break;
case ARGP_KEY_NO_ARGS:
- break;
+ break;
case ARGP_KEY_ARG:
- break;
+ break;
case ARGP_KEY_END:
- if (_state->argc == 1) {
- argp_usage (_state);
- }
+ if (_state->argc == 1) {
+ argp_usage(_state);
+ }
+ }
- }
-
- return 0;
+ return 0;
}
int
-string2bytesize (const char *str, unsigned long long *n)
+string2bytesize(const char *str, unsigned long long *n)
{
- unsigned long long value = 0ULL;
- char *tail = NULL;
- int old_errno = 0;
- const char *s = NULL;
-
- if (str == NULL || n == NULL)
- {
- errno = EINVAL;
- return -1;
- }
-
- for (s = str; *s != '\0'; s++)
- {
- if (isspace (*s))
- {
- continue;
- }
- if (*s == '-')
- {
- return -1;
- }
- break;
+ unsigned long long value = 0ULL;
+ char *tail = NULL;
+ int old_errno = 0;
+ const char *s = NULL;
+
+ if (str == NULL || n == NULL) {
+ errno = EINVAL;
+ return -1;
+ }
+
+ for (s = str; *s != '\0'; s++) {
+ if (isspace(*s)) {
+ continue;
}
-
- old_errno = errno;
- errno = 0;
- value = strtoull (str, &tail, 10);
-
- if (errno == ERANGE || errno == EINVAL)
- {
- return -1;
+ if (*s == '-') {
+ return -1;
}
-
- if (errno == 0)
- {
- errno = old_errno;
+ break;
+ }
+
+ old_errno = errno;
+ errno = 0;
+ value = strtoull(str, &tail, 10);
+
+ if (errno == ERANGE || errno == EINVAL) {
+ return -1;
+ }
+
+ if (errno == 0) {
+ errno = old_errno;
+ }
+
+ if (tail[0] != '\0') {
+ if (strcasecmp(tail, UNIT_KB_STRING) == 0) {
+ value *= UNIT_KB;
+ } else if (strcasecmp(tail, UNIT_MB_STRING) == 0) {
+ value *= UNIT_MB;
+ } else if (strcasecmp(tail, UNIT_GB_STRING) == 0) {
+ value *= UNIT_GB;
+ } else if (strcasecmp(tail, UNIT_TB_STRING) == 0) {
+ value *= UNIT_TB;
+ } else if (strcasecmp(tail, UNIT_PB_STRING) == 0) {
+ value *= UNIT_PB;
}
- if (tail[0] != '\0')
- {
- if (strcasecmp (tail, UNIT_KB_STRING) == 0)
- {
- value *= UNIT_KB;
- }
- else if (strcasecmp (tail, UNIT_MB_STRING) == 0)
- {
- value *= UNIT_MB;
- }
- else if (strcasecmp (tail, UNIT_GB_STRING) == 0)
- {
- value *= UNIT_GB;
- }
- else if (strcasecmp (tail, UNIT_TB_STRING) == 0)
- {
- value *= UNIT_TB;
- }
- else if (strcasecmp (tail, UNIT_PB_STRING) == 0)
- {
- value *= UNIT_PB;
- }
-
- else
- {
- return -1;
- }
+ else {
+ return -1;
}
+ }
- *n = value;
+ *n = value;
- return 0;
+ return 0;
}
static struct argp_option rdd_options[] = {
- {"if", 'i', "INPUT_FILE", 0, "input-file"},
- {"of", 'o', "OUTPUT_FILE", 0, "output-file"},
- {"threads", 't', "COUNT", 0, "number of threads to spawn (defaults to 2)"},
- {"min-bs", RDD_MIN_BS_KEY, "MIN_BLOCK_SIZE", 0,
- "Minimum block size in bytes (defaults to 1024)"},
- {"max-bs", RDD_MAX_BS_KEY, "MAX_BLOCK_SIZE", 0,
- "Maximum block size in bytes (defaults to 4096)"},
- {"iters", 'r', "ITERS", 0,
- "Number of read-write sequences (defaults to 1000000)"},
- {"max-ops", 'm', "MAXOPS", 0,
- "maximum number of read-writes to be performed in a sequence (defaults to 1)"},
- {"file-size", 'f', "FILESIZE", 0,
- "the size of the file which will be created and upon it I/O will be done"
- " (defaults to 100MB"},
- {0, 0, 0, 0, 0}
-};
+ {"if", 'i', "INPUT_FILE", 0, "input-file"},
+ {"of", 'o', "OUTPUT_FILE", 0, "output-file"},
+ {"threads", 't', "COUNT", 0, "number of threads to spawn (defaults to 2)"},
+ {"min-bs", RDD_MIN_BS_KEY, "MIN_BLOCK_SIZE", 0,
+ "Minimum block size in bytes (defaults to 1024)"},
+ {"max-bs", RDD_MAX_BS_KEY, "MAX_BLOCK_SIZE", 0,
+ "Maximum block size in bytes (defaults to 4096)"},
+ {"iters", 'r', "ITERS", 0,
+ "Number of read-write sequences (defaults to 1000000)"},
+ {"max-ops", 'm', "MAXOPS", 0,
+ "maximum number of read-writes to be performed in a sequence (defaults to "
+ "1)"},
+ {"file-size", 'f', "FILESIZE", 0,
+ "the size of the file which will be created and upon it I/O will be done"
+ " (defaults to 100MB"},
+ {0, 0, 0, 0, 0}};
static struct argp argp = {
- rdd_options,
- rdd_parse_opts,
- "",
- "random dd - tool to do a sequence of random block-sized continuous"
- "read writes starting at a random offset"
-};
-
+ rdd_options, rdd_parse_opts, "",
+ "random dd - tool to do a sequence of random block-sized continuous"
+ "read writes starting at a random offset"};
static void
-rdd_default_config (void)
+rdd_default_config(void)
{
- char *tmp_path = "rdd.in";
-
- rdd_config.thread_count = 2;
- rdd_config.iters = 1000000;
- rdd_config.max_bs = 4096;
- rdd_config.min_bs = 1024;
- rdd_config.in_file.fd = rdd_config.out_file.fd = -1;
- rdd_config.max_ops_per_seq = 1;
- strncpy (rdd_config.in_file.path, tmp_path, strlen (tmp_path));
- rdd_config.file_size = 104857600;
-
- return;
+ char *tmp_path = "rdd.in";
+
+ rdd_config.thread_count = 2;
+ rdd_config.iters = 1000000;
+ rdd_config.max_bs = 4096;
+ rdd_config.min_bs = 1024;
+ rdd_config.in_file.fd = rdd_config.out_file.fd = -1;
+ rdd_config.max_ops_per_seq = 1;
+ strncpy(rdd_config.in_file.path, tmp_path, strlen(tmp_path));
+ rdd_config.file_size = 104857600;
+
+ return;
}
-
static char
-rdd_valid_config (void)
+rdd_valid_config(void)
{
- char ret = 1;
- int fd = -1;
+ char ret = 1;
+ int fd = -1;
- fd = open (rdd_config.in_file.path, O_RDONLY);
- if (fd == -1 && (errno != ENOENT)) {
- fprintf (stderr, "open: (%s)", strerror (errno));
- ret = 0;
- goto out;
- }
- close (fd);
-
- if (rdd_config.min_bs > rdd_config.max_bs) {
- fprintf (stderr, "minimum blocksize %ld is greater than the "
- "maximum blocksize %ld", rdd_config.min_bs,
- rdd_config.max_bs);
- ret = 0;
- goto out;
- }
+ fd = open(rdd_config.in_file.path, O_RDONLY);
+ if (fd == -1 && (errno != ENOENT)) {
+ fprintf(stderr, "open: (%s)", strerror(errno));
+ ret = 0;
+ goto out;
+ }
+ close(fd);
+
+ if (rdd_config.min_bs > rdd_config.max_bs) {
+ fprintf(stderr,
+ "minimum blocksize %ld is greater than the "
+ "maximum blocksize %ld",
+ rdd_config.min_bs, rdd_config.max_bs);
+ ret = 0;
+ goto out;
+ }
- if (strlen (rdd_config.out_file.path) == 0) {
- sprintf (rdd_config.out_file.path, "%s.rddout",
- rdd_config.in_file.path);
- }
+ if (strlen(rdd_config.out_file.path) == 0) {
+ sprintf(rdd_config.out_file.path, "%s.rddout", rdd_config.in_file.path);
+ }
out:
- return ret;
+ return ret;
}
-
static void *
-rdd_read_write (void *arg)
+rdd_read_write(void *arg)
{
- int i = 0, ret = 0;
- size_t bs = 0;
- off_t offset = 0;
- long rand = 0;
- long max_ops = 0;
- char *buf = NULL;
-
- buf = calloc (1, rdd_config.max_bs);
- if (!buf) {
- fprintf (stderr, "calloc failed (%s)\n", strerror (errno));
+ int i = 0, ret = 0;
+ size_t bs = 0;
+ off_t offset = 0;
+ long rand = 0;
+ long max_ops = 0;
+ char *buf = NULL;
+
+ buf = calloc(1, rdd_config.max_bs);
+ if (!buf) {
+ fprintf(stderr, "calloc failed (%s)\n", strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < rdd_config.iters; i++) {
+ pthread_mutex_lock(&rdd_config.lock);
+ {
+ int bytes = 0;
+ rand = random();
+
+ if (rdd_config.min_bs == rdd_config.max_bs) {
+ bs = rdd_config.max_bs;
+ } else {
+ bs = rdd_config.min_bs +
+ (rand % (rdd_config.max_bs - rdd_config.min_bs));
+ }
+
+ offset = rand % rdd_config.in_file.st.st_size;
+ max_ops = rand % rdd_config.max_ops_per_seq;
+ if (!max_ops) {
+ max_ops++;
+ }
+
+ ret = lseek(rdd_config.in_file.fd, offset, SEEK_SET);
+ if (ret != offset) {
+ fprintf(stderr, "lseek failed (%s)\n", strerror(errno));
ret = -1;
- goto out;
- }
+ goto unlock;
+ }
- for (i = 0; i < rdd_config.iters; i++)
- {
- pthread_mutex_lock (&rdd_config.lock);
- {
- int bytes = 0;
- rand = random ();
-
- if (rdd_config.min_bs == rdd_config.max_bs) {
- bs = rdd_config.max_bs;
- } else {
- bs = rdd_config.min_bs +
- (rand %
- (rdd_config.max_bs -
- rdd_config.min_bs));
- }
-
- offset = rand % rdd_config.in_file.st.st_size;
- max_ops = rand % rdd_config.max_ops_per_seq;
- if (!max_ops) {
- max_ops ++;
- }
-
- ret = lseek (rdd_config.in_file.fd, offset, SEEK_SET);
- if (ret != offset) {
- fprintf (stderr, "lseek failed (%s)\n",
- strerror (errno));
- ret = -1;
- goto unlock;
- }
-
- ret = lseek (rdd_config.out_file.fd, offset, SEEK_SET);
- if (ret != offset) {
- fprintf (stderr, "lseek failed (%s)\n",
- strerror (errno));
- ret = -1;
- goto unlock;
- }
-
- while (max_ops--)
- {
- bytes = read (rdd_config.in_file.fd, buf, bs);
- if (!bytes) {
- break;
- }
-
- if (bytes == -1) {
- fprintf (stderr, "read failed (%s)\n",
- strerror (errno));
- ret = -1;
- goto unlock;
- }
-
- if (write (rdd_config.out_file.fd, buf, bytes)
- != bytes) {
- fprintf (stderr, "write failed (%s)\n",
- strerror (errno));
- ret = -1;
- goto unlock;
- }
- }
+ ret = lseek(rdd_config.out_file.fd, offset, SEEK_SET);
+ if (ret != offset) {
+ fprintf(stderr, "lseek failed (%s)\n", strerror(errno));
+ ret = -1;
+ goto unlock;
+ }
+
+ while (max_ops--) {
+ bytes = read(rdd_config.in_file.fd, buf, bs);
+ if (!bytes) {
+ break;
}
- unlock:
- pthread_mutex_unlock (&rdd_config.lock);
- if (ret == -1) {
- goto out;
+
+ if (bytes == -1) {
+ fprintf(stderr, "read failed (%s)\n", strerror(errno));
+ ret = -1;
+ goto unlock;
+ }
+
+ if (write(rdd_config.out_file.fd, buf, bytes) != bytes) {
+ fprintf(stderr, "write failed (%s)\n", strerror(errno));
+ ret = -1;
+ goto unlock;
}
- ret = 0;
+ }
}
+ unlock:
+ pthread_mutex_unlock(&rdd_config.lock);
+ if (ret == -1) {
+ goto out;
+ }
+ ret = 0;
+ }
out:
- free (buf);
- pthread_barrier_wait (&rdd_config.barrier);
+ free(buf);
+ pthread_barrier_wait(&rdd_config.barrier);
- return NULL;
+ return NULL;
}
static void
-cleanup (void)
+cleanup(void)
{
- close (rdd_config.in_file.fd);
- close (rdd_config.out_file.fd);
- rdd_config.in_file.fd = rdd_config.out_file.fd = -1;
+ close(rdd_config.in_file.fd);
+ close(rdd_config.out_file.fd);
+ rdd_config.in_file.fd = rdd_config.out_file.fd = -1;
}
static int
-check_and_create (void)
+check_and_create(void)
{
- int ret = -1;
- char buf[4096] = {0,};
- struct stat stbuf = {0,};
- int fd[2] = {-1,};
- size_t total_size = -1;
-
- total_size = rdd_config.file_size;
-
- ret = stat (rdd_config.in_file.path, &stbuf);
- if (ret == -1 && (errno != ENOENT))
+ int ret = -1;
+ char buf[4096] = {
+ 0,
+ };
+ struct stat stbuf = {
+ 0,
+ };
+ int fd[2] = {
+ -1,
+ };
+ size_t total_size = -1;
+
+ total_size = rdd_config.file_size;
+
+ ret = stat(rdd_config.in_file.path, &stbuf);
+ if (ret == -1 && (errno != ENOENT))
+ goto out;
+
+ fd[1] = open(rdd_config.in_file.path, O_CREAT | O_WRONLY | O_TRUNC);
+ if (fd[1] == -1)
+ goto out;
+
+ fd[0] = open("/dev/urandom", O_RDONLY);
+ if (fd[0] == -1)
+ goto out;
+
+ while (total_size > 0) {
+ if (total_size >= 4096) {
+ ret = read(fd[0], buf, 4096);
+ if (ret == -1)
goto out;
-
- fd[1] = open (rdd_config.in_file.path, O_CREAT | O_WRONLY | O_TRUNC);
- if (fd[1] == -1)
+ ret = write(fd[1], buf, 4096);
+ if (ret == -1)
goto out;
-
- fd[0] = open ("/dev/urandom", O_RDONLY);
- if (fd[0] == -1)
+ total_size = total_size - 4096;
+ } else {
+ ret = read(fd[0], buf, total_size);
+ if (ret == -1)
goto out;
-
- while (total_size > 0) {
- if (total_size >= 4096) {
- ret = read (fd[0], buf, 4096);
- if (ret == -1)
- goto out;
- ret = write (fd[1], buf, 4096);
- if (ret == -1)
- goto out;
- total_size = total_size - 4096;
- } else {
- ret = read (fd[0], buf, total_size);
- if (ret == -1)
- goto out;
- ret = write (fd[1], buf, total_size);
- if (ret == -1)
- goto out;
- total_size = total_size - total_size;
- }
-
+ ret = write(fd[1], buf, total_size);
+ if (ret == -1)
+ goto out;
+ total_size = total_size - total_size;
}
+ }
- ret = 0;
+ ret = 0;
out:
- if (fd[0] > 0)
- close (fd[0]);
- if (fd[1] > 0)
- close (fd[1]);
- return ret;
+ if (fd[0] > 0)
+ close(fd[0]);
+ if (fd[1] > 0)
+ close(fd[1]);
+ return ret;
}
static int
-rdd_spawn_threads (void)
+rdd_spawn_threads(void)
{
- int i = 0, ret = -1, fd = -1;
- char buf[4096];
-
- ret = check_and_create ();
- if (ret == -1)
- goto out;
-
- fd = open (rdd_config.in_file.path, O_RDONLY);
- if (fd < 0) {
- fprintf (stderr, "cannot open %s (%s)\n",
- rdd_config.in_file.path, strerror (errno));
- ret = -1;
- goto out;
- }
- ret = fstat (fd, &rdd_config.in_file.st);
- if (ret != 0) {
- close (fd);
- fprintf (stderr, "cannot stat %s (%s)\n",
- rdd_config.in_file.path, strerror (errno));
- ret = -1;
- goto out;
- }
- rdd_config.in_file.fd = fd;
-
- fd = open (rdd_config.out_file.path, O_WRONLY | O_CREAT | O_TRUNC,
- S_IRWXU | S_IROTH);
- if (fd < 0) {
- close (rdd_config.in_file.fd);
- rdd_config.in_file.fd = -1;
- fprintf (stderr, "cannot open %s (%s)\n",
- rdd_config.out_file.path, strerror (errno));
- ret = -1;
- goto out;
- }
- rdd_config.out_file.fd = fd;
-
- while ((ret = read (rdd_config.in_file.fd, buf, 4096)) > 0) {
- if (write (rdd_config.out_file.fd, buf, ret) != ret) {
- fprintf (stderr, "write failed (%s)\n",
- strerror (errno));
- cleanup ();
- ret = -1;
- goto out;
- }
- }
-
- rdd_config.threads = calloc (rdd_config.thread_count,
- sizeof (pthread_t));
- if (rdd_config.threads == NULL) {
- fprintf (stderr, "calloc() failed (%s)\n", strerror (errno));
-
- ret = -1;
- cleanup ();
- goto out;
- }
-
- ret = pthread_barrier_init (&rdd_config.barrier, NULL,
- rdd_config.thread_count + 1);
- if (ret != 0) {
- fprintf (stderr, "pthread_barrier_init() failed (%s)\n",
- strerror (ret));
-
- free (rdd_config.threads);
- cleanup ();
- ret = -1;
- goto out;
+ int i = 0, ret = -1, fd = -1;
+ char buf[4096];
+
+ ret = check_and_create();
+ if (ret == -1)
+ goto out;
+
+ fd = open(rdd_config.in_file.path, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr, "cannot open %s (%s)\n", rdd_config.in_file.path,
+ strerror(errno));
+ ret = -1;
+ goto out;
+ }
+ ret = fstat(fd, &rdd_config.in_file.st);
+ if (ret != 0) {
+ close(fd);
+ fprintf(stderr, "cannot stat %s (%s)\n", rdd_config.in_file.path,
+ strerror(errno));
+ ret = -1;
+ goto out;
+ }
+ rdd_config.in_file.fd = fd;
+
+ fd = open(rdd_config.out_file.path, O_WRONLY | O_CREAT | O_TRUNC,
+ S_IRWXU | S_IROTH);
+ if (fd < 0) {
+ close(rdd_config.in_file.fd);
+ rdd_config.in_file.fd = -1;
+ fprintf(stderr, "cannot open %s (%s)\n", rdd_config.out_file.path,
+ strerror(errno));
+ ret = -1;
+ goto out;
+ }
+ rdd_config.out_file.fd = fd;
+
+ while ((ret = read(rdd_config.in_file.fd, buf, 4096)) > 0) {
+ if (write(rdd_config.out_file.fd, buf, ret) != ret) {
+ fprintf(stderr, "write failed (%s)\n", strerror(errno));
+ cleanup();
+ ret = -1;
+ goto out;
}
-
- ret = pthread_mutex_init (&rdd_config.lock, NULL);
+ }
+
+ rdd_config.threads = calloc(rdd_config.thread_count, sizeof(pthread_t));
+ if (rdd_config.threads == NULL) {
+ fprintf(stderr, "calloc() failed (%s)\n", strerror(errno));
+
+ ret = -1;
+ cleanup();
+ goto out;
+ }
+
+ ret = pthread_barrier_init(&rdd_config.barrier, NULL,
+ rdd_config.thread_count + 1);
+ if (ret != 0) {
+ fprintf(stderr, "pthread_barrier_init() failed (%s)\n", strerror(ret));
+
+ free(rdd_config.threads);
+ cleanup();
+ ret = -1;
+ goto out;
+ }
+
+ ret = pthread_mutex_init(&rdd_config.lock, NULL);
+ if (ret != 0) {
+ fprintf(stderr, "pthread_mutex_init() failed (%s)\n", strerror(ret));
+
+ free(rdd_config.threads);
+ pthread_barrier_destroy(&rdd_config.barrier);
+ cleanup();
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < rdd_config.thread_count; i++) {
+ ret = pthread_create(&rdd_config.threads[i], NULL, rdd_read_write,
+ NULL);
if (ret != 0) {
- fprintf (stderr, "pthread_mutex_init() failed (%s)\n",
- strerror (ret));
-
- free (rdd_config.threads);
- pthread_barrier_destroy (&rdd_config.barrier);
- cleanup ();
- ret = -1;
- goto out;
- }
-
- for (i = 0; i < rdd_config.thread_count; i++)
- {
- ret = pthread_create (&rdd_config.threads[i], NULL,
- rdd_read_write, NULL);
- if (ret != 0) {
- fprintf (stderr, "pthread_create failed (%s)\n",
- strerror (errno));
- exit (1);
- }
+ fprintf(stderr, "pthread_create failed (%s)\n", strerror(errno));
+ exit(1);
}
+ }
out:
- return ret;
+ return ret;
}
static void
-rdd_wait_for_completion (void)
+rdd_wait_for_completion(void)
{
- pthread_barrier_wait (&rdd_config.barrier);
+ pthread_barrier_wait(&rdd_config.barrier);
}
-
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- int ret = -1;
+ int ret = -1;
- rdd_default_config ();
+ rdd_default_config();
- ret = argp_parse (&argp, argc, argv, 0, 0, NULL);
- if (ret != 0) {
- ret = -1;
- fprintf (stderr, "%s: argp_parse() failed\n", argv[0]);
- goto err;
- }
+ ret = argp_parse(&argp, argc, argv, 0, 0, NULL);
+ if (ret != 0) {
+ ret = -1;
+ fprintf(stderr, "%s: argp_parse() failed\n", argv[0]);
+ goto err;
+ }
- if (!rdd_valid_config ()) {
- ret = -1;
- fprintf (stderr, "%s: configuration validation failed\n",
- argv[0]);
- goto err;
- }
+ if (!rdd_valid_config()) {
+ ret = -1;
+ fprintf(stderr, "%s: configuration validation failed\n", argv[0]);
+ goto err;
+ }
- ret = rdd_spawn_threads ();
- if (ret != 0) {
- fprintf (stderr, "%s: spawning threads failed\n", argv[0]);
- goto err;
- }
+ ret = rdd_spawn_threads();
+ if (ret != 0) {
+ fprintf(stderr, "%s: spawning threads failed\n", argv[0]);
+ goto err;
+ }
- rdd_wait_for_completion ();
+ rdd_wait_for_completion();
err:
- return ret;
+ return ret;
}
diff --git a/extras/checkpatch.pl b/extras/checkpatch.pl
deleted file mode 100755
index dee070c8c7c..00000000000
--- a/extras/checkpatch.pl
+++ /dev/null
@@ -1,4337 +0,0 @@
-#!/usr/bin/perl -w
-# (c) 2001, Dave Jones. (the file handling bit)
-# (c) 2005, Joel Schopp <jschopp@austin.ibm.com> (the ugly bit)
-# (c) 2007,2008, Andy Whitcroft <apw@uk.ibm.com> (new conditions, test suite)
-# (c) 2008-2010 Andy Whitcroft <apw@canonical.com>
-# (c) 2014 Gluster Community <gluster-devel@gluster.org>
-# Licensed under the terms of the GNU GPL License version 2
-
-use strict;
-use POSIX;
-
-my $P = $0;
-$P =~ s@.*/@@g;
-
-my $V = '0.32.1';
-
-use Getopt::Long qw(:config no_auto_abbrev);
-
-my $quiet = 0;
-my $tree = 1;
-my $chk_signoff = 1;
-my $chk_patch = 1;
-my $tst_only;
-my $emacs = 0;
-my $terse = 0;
-my $file = 0;
-my $check = 0;
-my $check_orig = 0;
-my $summary = 1;
-my $mailback = 0;
-my $summary_file = 0;
-my $show_types = 0;
-my $fix = 0;
-my $fix_inplace = 0;
-my $root;
-my %debug;
-my %camelcase = ();
-my %use_type = ();
-my @use = ();
-my %ignore_type = ();
-my @ignore = ();
-my $help = 0;
-my $configuration_file = ".checkpatch.conf";
-my $max_line_length = 80;
-my $ignore_perl_version = 0;
-my $minimum_perl_version = 5.10.0;
-my $gerrit_url = $ENV{GERRIT_URL};
-
-sub help {
- my ($exitcode) = @_;
-
- print << "EOM";
-Usage: $P [OPTION]... [FILE]...
-Version: $V
-
-Options:
- -q, --quiet quiet
- --patch treat FILE as patchfile (default)
- --emacs emacs compile window format
- --gerrit-url=STRING URL the patch was reviewed at
- --terse one line per report
- -f, --file treat FILE as regular source file
- --subjective, --strict enable more subjective tests
- --types TYPE(,TYPE2...) show only these comma separated message types
- --ignore TYPE(,TYPE2...) ignore various comma separated message types
- --max-line-length=n set the maximum line length, if exceeded, warn
- --show-types show the message "types" in the output
- --root=PATH PATH to the glusterfs tree root
- --no-summary suppress the per-file summary
- --mailback only produce a report in case of warnings/errors
- --summary-file include the filename in summary
- --debug KEY=[0|1] turn on/off debugging of KEY, where KEY is one of
- 'values', 'possible', 'type', and 'attr' (default
- is all off)
- --test-only=WORD report only warnings/errors containing WORD literally
- --fix EXPERIMENTAL - may create horrible results
- If correctable single-line errors exist, create
- "<inputfile>.EXPERIMENTAL-checkpatch-fixes"
- with potential errors corrected to the preferred
- checkpatch style
- --fix-inplace EXPERIMENTAL - may create horrible results
- Is the same as --fix, but overwrites the input
- file. It's your fault if there's no backup or git
- --ignore-perl-version override checking of perl version. expect
- runtime errors.
- -h, --help, --version display this help and exit
-
-When FILE is - read standard input.
-EOM
-
-exit($exitcode);
-}
-
-my $conf = which_conf($configuration_file);
-if (-f $conf) {
- my @conf_args;
- open(my $conffile, '<', "$conf")
- or warn "$P: Can't find a readable $configuration_file file $!\n";
-
- while (<$conffile>) {
- my $line = $_;
-
- $line =~ s/\s*\n?$//g;
- $line =~ s/^\s*//g;
- $line =~ s/\s+/ /g;
-
- next if ($line =~ m/^\s*#/);
- next if ($line =~ m/^\s*$/);
-
- my @words = split(" ", $line);
- foreach my $word (@words) {
- last if ($word =~ m/^#/);
- push (@conf_args, $word);
- }
- }
- close($conffile);
- unshift(@ARGV, @conf_args) if @conf_args;
-}
-
-GetOptions(
- 'q|quiet+' => \$quiet,
- 'patch!' => \$chk_patch,
- 'emacs!' => \$emacs,
- 'gerrit-url=s' => \$gerrit_url,
- 'terse!' => \$terse,
- 'f|file!' => \$file,
- 'subjective!' => \$check,
- 'strict!' => \$check,
- 'ignore=s' => \@ignore,
- 'types=s' => \@use,
- 'show-types!' => \$show_types,
- 'max-line-length=i' => \$max_line_length,
- 'root=s' => \$root,
- 'summary!' => \$summary,
- 'mailback!' => \$mailback,
- 'summary-file!' => \$summary_file,
- 'fix!' => \$fix,
- 'fix-inplace!' => \$fix_inplace,
- 'ignore-perl-version!' => \$ignore_perl_version,
- 'debug=s' => \%debug,
- 'test-only=s' => \$tst_only,
- 'h|help' => \$help,
- 'version' => \$help
-) or help(1);
-
-help(0) if ($help);
-
-$fix = 1 if ($fix_inplace);
-$check_orig = $check;
-
-my $exit = 0;
-
-if ($^V && $^V lt $minimum_perl_version) {
- printf "$P: requires at least perl version %vd\n", $minimum_perl_version;
- if (!$ignore_perl_version) {
- exit(1);
- }
-}
-
-if ($#ARGV < 0) {
- print "$P: no input files\n";
- exit(1);
-}
-
-sub hash_save_array_words {
- my ($hashRef, $arrayRef) = @_;
-
- my @array = split(/,/, join(',', @$arrayRef));
- foreach my $word (@array) {
- $word =~ s/\s*\n?$//g;
- $word =~ s/^\s*//g;
- $word =~ s/\s+/ /g;
- $word =~ tr/[a-z]/[A-Z]/;
-
- next if ($word =~ m/^\s*#/);
- next if ($word =~ m/^\s*$/);
-
- $hashRef->{$word}++;
- }
-}
-
-sub hash_show_words {
- my ($hashRef, $prefix) = @_;
-
- if ($quiet == 0 && keys %$hashRef) {
- print "NOTE: $prefix message types:";
- foreach my $word (sort keys %$hashRef) {
- print " $word";
- }
- print "\n\n";
- }
-}
-
-hash_save_array_words(\%ignore_type, \@ignore);
-hash_save_array_words(\%use_type, \@use);
-
-my $dbg_values = 0;
-my $dbg_possible = 0;
-my $dbg_type = 0;
-my $dbg_attr = 0;
-for my $key (keys %debug) {
- ## no critic
- eval "\${dbg_$key} = '$debug{$key}';";
- die "$@" if ($@);
-}
-
-my $rpt_cleaners = 0;
-
-if ($terse) {
- $emacs = 1;
- $quiet++;
-}
-
-if ($tree) {
- if (defined $root) {
- if (!top_of_glusterfs_tree($root)) {
- die "$P: $root: --root does not point at a valid tree\n";
- }
- } else {
- if (top_of_glusterfs_tree('.')) {
- $root = '.';
- } elsif ($0 =~ m@(.*)/extras/[^/]*$@ &&
- top_of_glusterfs_tree($1)) {
- $root = $1;
- }
- }
-
- if (!defined $root) {
- print "Must be run from the top-level dir. of a GlusterFS tree\n";
- exit(2);
- }
-}
-
-my $emitted_corrupt = 0;
-
-our $Ident = qr{
- [A-Za-z_][A-Za-z\d_]*
- (?:\s*\#\#\s*[A-Za-z_][A-Za-z\d_]*)*
- }x;
-our $Storage = qr{extern|static|asmlinkage};
-our $Sparse = qr{
- __user|
- __kernel|
- __force|
- __iomem|
- __must_check|
- __init_refok|
- __kprobes|
- __ref|
- __rcu
- }x;
-our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
-our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
-our $InitAttributeConst = qr{$InitAttributePrefix(?:initconst\b)};
-our $InitAttributeInit = qr{$InitAttributePrefix(?:init\b)};
-our $InitAttribute = qr{$InitAttributeData|$InitAttributeConst|$InitAttributeInit};
-
-# Notes to $Attribute:
-# We need \b after 'init' otherwise 'initconst' will cause a false positive in a check
-our $Attribute = qr{
- const|
- __percpu|
- __nocast|
- __safe|
- __bitwise__|
- __packed__|
- __packed2__|
- __naked|
- __maybe_unused|
- __always_unused|
- __noreturn|
- __used|
- __cold|
- __noclone|
- __deprecated|
- __read_mostly|
- __kprobes|
- $InitAttribute|
- ____cacheline_aligned|
- ____cacheline_aligned_in_smp|
- ____cacheline_internodealigned_in_smp|
- __weak
- }x;
-our $Modifier;
-our $Inline = qr{inline|__always_inline|noinline|__inline|__inline__};
-our $Member = qr{->$Ident|\.$Ident|\[[^]]*\]};
-our $Lval = qr{$Ident(?:$Member)*};
-
-our $Int_type = qr{(?i)llu|ull|ll|lu|ul|l|u};
-our $Binary = qr{(?i)0b[01]+$Int_type?};
-our $Hex = qr{(?i)0x[0-9a-f]+$Int_type?};
-our $Int = qr{[0-9]+$Int_type?};
-our $Octal = qr{0[0-7]+$Int_type?};
-our $Float_hex = qr{(?i)0x[0-9a-f]+p-?[0-9]+[fl]?};
-our $Float_dec = qr{(?i)(?:[0-9]+\.[0-9]*|[0-9]*\.[0-9]+)(?:e-?[0-9]+)?[fl]?};
-our $Float_int = qr{(?i)[0-9]+e-?[0-9]+[fl]?};
-our $Float = qr{$Float_hex|$Float_dec|$Float_int};
-our $Constant = qr{$Float|$Binary|$Octal|$Hex|$Int};
-our $Assignment = qr{\*\=|/=|%=|\+=|-=|<<=|>>=|&=|\^=|\|=|=};
-our $Compare = qr{<=|>=|==|!=|<|(?<!-)>};
-our $Arithmetic = qr{\+|-|\*|\/|%};
-our $Operators = qr{
- <=|>=|==|!=|
- =>|->|<<|>>|<|>|!|~|
- &&|\|\||,|\^|\+\+|--|&|\||$Arithmetic
- }x;
-
-our $c90_Keywords = qr{do|for|while|if|else|return|goto|continue|switch|default|case|break}x;
-
-our $NonptrType;
-our $NonptrTypeWithAttr;
-our $Type;
-our $Declare;
-
-our $NON_ASCII_UTF8 = qr{
- [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte
- | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs
- | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte
- | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates
- | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3
- | [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15
- | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16
-}x;
-
-our $UTF8 = qr{
- [\x09\x0A\x0D\x20-\x7E] # ASCII
- | $NON_ASCII_UTF8
-}x;
-
-our $typeTypedefs = qr{(?x:
- (?:__)?(?:u|s|be|le)(?:8|16|32|64)|
- atomic_t
-)};
-
-our $logFunctions = qr{(?x:
- printk(?:_ratelimited|_once|)|
- (?:[a-z0-9]+_){1,2}(?:printk|emerg|alert|crit|err|warning|warn|notice|info|debug|dbg|vdbg|devel|cont|WARN)(?:_ratelimited|_once|)|
- WARN(?:_RATELIMIT|_ONCE|)|
- panic|
- MODULE_[A-Z_]+|
- seq_vprintf|seq_printf|seq_puts
-)};
-
-our $signature_tags = qr{(?xi:
- Signed-off-by:|
- Acked-by:|
- Tested-by:|
- Reviewed-by:|
- Reviewed-on:|
- Reported-by:|
- Original-author:|
- Original-Author:|
- Original-Authors:|
- Suggested-by:|
- To:|
- Cc:
-)};
-
-our $url_tags = qr{http:|https:};
-
-our @typeList = (
- qr{void},
- qr{(?:unsigned\s+)?char},
- qr{(?:unsigned\s+)?short},
- qr{(?:unsigned\s+)?int},
- qr{(?:unsigned\s+)?long},
- qr{(?:unsigned\s+)?long\s+int},
- qr{(?:unsigned\s+)?long\s+long},
- qr{(?:unsigned\s+)?long\s+long\s+int},
- qr{unsigned},
- qr{float},
- qr{double},
- qr{bool},
- qr{struct\s+$Ident},
- qr{union\s+$Ident},
- qr{enum\s+$Ident},
- qr{${Ident}_t},
- qr{${Ident}_handler},
- qr{${Ident}_handler_fn},
-);
-our @typeListWithAttr = (
- @typeList,
- qr{struct\s+$InitAttribute\s+$Ident},
- qr{union\s+$InitAttribute\s+$Ident},
-);
-
-our @modifierList = (
- qr{fastcall},
-);
-
-our @mode_permission_funcs = (
- ["module_param", 3],
- ["module_param_(?:array|named|string)", 4],
- ["module_param_array_named", 5],
- ["debugfs_create_(?:file|u8|u16|u32|u64|x8|x16|x32|x64|size_t|atomic_t|bool|blob|regset32|u32_array)", 2],
- ["proc_create(?:_data|)", 2],
- ["(?:CLASS|DEVICE|SENSOR)_ATTR", 2],
-);
-
-#Create a search pattern for all these functions to speed up a loop below
-our $mode_perms_search = "";
-foreach my $entry (@mode_permission_funcs) {
- $mode_perms_search .= '|' if ($mode_perms_search ne "");
- $mode_perms_search .= $entry->[0];
-}
-
-our $declaration_macros = qr{(?x:
- (?:$Storage\s+)?(?:DECLARE|DEFINE)_[A-Z]+\s*\(|
- (?:$Storage\s+)?LIST_HEAD\s*\(
-)};
-
-our $allowed_asm_includes = qr{(?x:
- irq|
- memory
-)};
-# memory.h: ARM has a custom one
-
-sub build_types {
- my $mods = "(?x:\n" . join("|\n ", @modifierList) . "\n)";
- my $all = "(?x:\n" . join("|\n ", @typeList) . "\n)";
- my $allWithAttr = "(?x:\n" . join("|\n ", @typeListWithAttr) . "\n)";
- $Modifier = qr{(?:$Attribute|$Sparse|$mods)};
- $NonptrType = qr{
- (?:$Modifier\s+|const\s+)*
- (?:
- (?:typeof|__typeof__)\s*\([^\)]*\)|
- (?:$typeTypedefs\b)|
- (?:${all}\b)
- )
- (?:\s+$Modifier|\s+const)*
- }x;
- $NonptrTypeWithAttr = qr{
- (?:$Modifier\s+|const\s+)*
- (?:
- (?:typeof|__typeof__)\s*\([^\)]*\)|
- (?:$typeTypedefs\b)|
- (?:${allWithAttr}\b)
- )
- (?:\s+$Modifier|\s+const)*
- }x;
- $Type = qr{
- $NonptrType
- (?:(?:\s|\*|\[\])+\s*const|(?:\s|\*|\[\])+|(?:\s*\[\s*\])+)?
- (?:\s+$Inline|\s+$Modifier)*
- }x;
- $Declare = qr{(?:$Storage\s+(?:$Inline\s+)?)?$Type};
-}
-build_types();
-
-our $Typecast = qr{\s*(\(\s*$NonptrType\s*\)){0,1}\s*};
-
-# Using $balanced_parens, $LvalOrFunc, or $FuncArg
-# requires at least perl version v5.10.0
-# Any use must be runtime checked with $^V
-
-our $balanced_parens = qr/(\((?:[^\(\)]++|(?-1))*\))/;
-our $LvalOrFunc = qr{((?:[\&\*]\s*)?$Lval)\s*($balanced_parens{0,1})\s*};
-our $FuncArg = qr{$Typecast{0,1}($LvalOrFunc|$Constant)};
-
-sub deparenthesize {
- my ($string) = @_;
- return "" if (!defined($string));
-
- while ($string =~ /^\s*\(.*\)\s*$/) {
- $string =~ s@^\s*\(\s*@@;
- $string =~ s@\s*\)\s*$@@;
- }
-
- $string =~ s@\s+@ @g;
-
- return $string;
-}
-
-sub seed_camelcase_file {
- my ($file) = @_;
-
- return if (!(-f $file));
-
- local $/;
-
- open(my $include_file, '<', "$file")
- or warn "$P: Can't read '$file' $!\n";
- my $text = <$include_file>;
- close($include_file);
-
- my @lines = split('\n', $text);
-
- foreach my $line (@lines) {
- next if ($line !~ /(?:[A-Z][a-z]|[a-z][A-Z])/);
- if ($line =~ /^[ \t]*(?:#[ \t]*define|typedef\s+$Type)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)/) {
- $camelcase{$1} = 1;
- } elsif ($line =~ /^\s*$Declare\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[\(\[,;]/) {
- $camelcase{$1} = 1;
- } elsif ($line =~ /^\s*(?:union|struct|enum)\s+(\w*(?:[A-Z][a-z]|[a-z][A-Z])\w*)\s*[;\{]/) {
- $camelcase{$1} = 1;
- }
- }
-}
-
-my $camelcase_seeded = 0;
-sub seed_camelcase_includes {
- return if ($camelcase_seeded);
-
- my $files;
- my $camelcase_cache = "";
- my @include_files = ();
-
- $camelcase_seeded = 1;
-
- if (-e ".git") {
- my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
- chomp $git_last_include_commit;
- $camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
- } else {
- my $last_mod_date = 0;
- $files = `find $root/include -name "*.h"`;
- @include_files = split('\n', $files);
- foreach my $file (@include_files) {
- my $date = POSIX::strftime("%Y%m%d%H%M",
- localtime((stat $file)[9]));
- $last_mod_date = $date if ($last_mod_date < $date);
- }
- $camelcase_cache = ".checkpatch-camelcase.date.$last_mod_date";
- }
-
- if ($camelcase_cache ne "" && -f $camelcase_cache) {
- open(my $camelcase_file, '<', "$camelcase_cache")
- or warn "$P: Can't read '$camelcase_cache' $!\n";
- while (<$camelcase_file>) {
- chomp;
- $camelcase{$_} = 1;
- }
- close($camelcase_file);
- return;
- }
-
- if (-e ".git") {
- $files = `git ls-files "include/*.h"`;
- @include_files = split('\n', $files);
- }
-
- foreach my $file (@include_files) {
- seed_camelcase_file($file);
- }
-
- if ($camelcase_cache ne "") {
- unlink glob ".checkpatch-camelcase.*";
- open(my $camelcase_file, '>', "$camelcase_cache")
- or warn "$P: Can't write '$camelcase_cache' $!\n";
- foreach (sort { lc($a) cmp lc($b) } keys(%camelcase)) {
- print $camelcase_file ("$_\n");
- }
- close($camelcase_file);
- }
-}
-
-$chk_signoff = 0 if ($file);
-
-my @rawlines = ();
-my @lines = ();
-my @fixed = ();
-my $vname;
-for my $filename (@ARGV) {
- my $FILE;
- if ($file) {
- open($FILE, '-|', "diff -u /dev/null $filename") ||
- die "$P: $filename: diff failed - $!\n";
- } elsif ($filename eq '-') {
- open($FILE, '<&STDIN');
- } else {
- open($FILE, '<', "$filename") ||
- die "$P: $filename: open failed - $!\n";
- }
- if ($filename eq '-') {
- $vname = 'Your patch';
- } else {
- $vname = $filename;
- }
- while (<$FILE>) {
- chomp;
- push(@rawlines, $_);
- }
- close($FILE);
- if (!process($filename)) {
- $exit = 1;
- }
- @rawlines = ();
- @lines = ();
- @fixed = ();
-}
-
-exit($exit);
-
-sub top_of_glusterfs_tree {
- my ($root) = @_;
-
- # Add here if the tree changes
- my @tree_check = (
- "api",
- "AUTHORS",
- "autogen.sh",
- "build-aux",
- "ChangeLog",
- "cli",
- "configure.ac",
- "contrib",
- "CONTRIBUTING",
- "COPYING-GPLV2",
- "COPYING-LGPLV3",
- "doc",
- "extras",
- "geo-replication",
- "glusterfs-api.pc.in",
- "glusterfsd",
- "glusterfs.spec.in",
- "heal",
- "INSTALL",
- "libgfchangelog.pc.in",
- "libglusterfs",
- "MAINTAINERS",
- "Makefile.am",
- "NEWS",
- "README.md",
- "rfc.sh",
- "rpc",
- "run-tests.sh",
- "tests",
- "THANKS",
- "xlators",
- );
-
- foreach my $check (@tree_check) {
- if (! -e $root . '/' . $check) {
- return 0;
- }
- }
- return 1;
-}
-
-sub parse_email {
- my ($formatted_email) = @_;
-
- my $name = "";
- my $address = "";
- my $comment = "";
-
- if ($formatted_email =~ /^(.*)<(\S+\@\S+)>(.*)$/) {
- $name = $1;
- $address = $2;
- $comment = $3 if defined $3;
- } elsif ($formatted_email =~ /^\s*<(\S+\@\S+)>(.*)$/) {
- $address = $1;
- $comment = $2 if defined $2;
- } elsif ($formatted_email =~ /(\S+\@\S+)(.*)$/) {
- $address = $1;
- $comment = $2 if defined $2;
- $formatted_email =~ s/$address.*$//;
- $name = $formatted_email;
- $name = trim($name);
- $name =~ s/^\"|\"$//g;
- # If there's a name left after stripping spaces and
- # leading quotes, and the address doesn't have both
- # leading and trailing angle brackets, the address
- # is invalid. ie:
- # "joe smith joe@smith.com" bad
- # "joe smith <joe@smith.com" bad
- if ($name ne "" && $address !~ /^<[^>]+>$/) {
- $name = "";
- $address = "";
- $comment = "";
- }
- }
-
- $name = trim($name);
- $name =~ s/^\"|\"$//g;
- $address = trim($address);
- $address =~ s/^\<|\>$//g;
-
- if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
- $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
- $name = "\"$name\"";
- }
-
- return ($name, $address, $comment);
-}
-
-sub format_email {
- my ($name, $address) = @_;
-
- my $formatted_email;
-
- $name = trim($name);
- $name =~ s/^\"|\"$//g;
- $address = trim($address);
-
- if ($name =~ /[^\w \-]/i) { ##has "must quote" chars
- $name =~ s/(?<!\\)"/\\"/g; ##escape quotes
- $name = "\"$name\"";
- }
-
- if ("$name" eq "") {
- $formatted_email = "$address";
- } else {
- $formatted_email = "$name <$address>";
- }
-
- return $formatted_email;
-}
-
-sub which_conf {
- my ($conf) = @_;
-
- foreach my $path (split(/:/, ".:$ENV{HOME}:.scripts")) {
- if (-e "$path/$conf") {
- return "$path/$conf";
- }
- }
-
- return "";
-}
-
-sub expand_tabs {
- my ($str) = @_;
-
- my $res = '';
- my $n = 0;
- for my $c (split(//, $str)) {
- if ($c eq "\t") {
- $res .= ' ';
- $n++;
- for (; ($n % 8) != 0; $n++) {
- $res .= ' ';
- }
- next;
- }
- $res .= $c;
- $n++;
- }
- return $res;
-}
-sub copy_spacing {
- (my $res = shift) =~ tr/\t/ /c;
- return $res;
-}
-
-sub line_stats {
- my ($line) = @_;
-
- # Drop the diff line leader and expand tabs
- $line =~ s/^.//;
- $line = expand_tabs($line);
-
- # Pick the indent from the front of the line.
- my ($white) = ($line =~ /^(\s*)/);
-
- return (length($line), length($white));
-}
-
-my $sanitise_quote = '';
-
-sub sanitise_line_reset {
- my ($in_comment) = @_;
-
- if ($in_comment) {
- $sanitise_quote = '*/';
- } else {
- $sanitise_quote = '';
- }
-}
-sub sanitise_line {
- my ($line) = @_;
-
- my $res = '';
- my $l = '';
-
- my $qlen = 0;
- my $off = 0;
- my $c;
-
- # Always copy over the diff marker.
- $res = substr($line, 0, 1);
-
- for ($off = 1; $off < length($line); $off++) {
- $c = substr($line, $off, 1);
-
- # Comments we are wacking completly including the begin
- # and end, all to $;.
- if ($sanitise_quote eq '' && substr($line, $off, 2) eq '/*') {
- $sanitise_quote = '*/';
-
- substr($res, $off, 2, "$;$;");
- $off++;
- next;
- }
- if ($sanitise_quote eq '*/' && substr($line, $off, 2) eq '*/') {
- $sanitise_quote = '';
- substr($res, $off, 2, "$;$;");
- $off++;
- next;
- }
- if ($sanitise_quote eq '' && substr($line, $off, 2) eq '//') {
- $sanitise_quote = '//';
-
- substr($res, $off, 2, $sanitise_quote);
- $off++;
- next;
- }
-
- # A \ in a string means ignore the next character.
- if (($sanitise_quote eq "'" || $sanitise_quote eq '"') &&
- $c eq "\\") {
- substr($res, $off, 2, 'XX');
- $off++;
- next;
- }
- # Regular quotes.
- if ($c eq "'" || $c eq '"') {
- if ($sanitise_quote eq '') {
- $sanitise_quote = $c;
-
- substr($res, $off, 1, $c);
- next;
- } elsif ($sanitise_quote eq $c) {
- $sanitise_quote = '';
- }
- }
-
- #print "c<$c> SQ<$sanitise_quote>\n";
- if ($off != 0 && $sanitise_quote eq '*/' && $c ne "\t") {
- substr($res, $off, 1, $;);
- } elsif ($off != 0 && $sanitise_quote eq '//' && $c ne "\t") {
- substr($res, $off, 1, $;);
- } elsif ($off != 0 && $sanitise_quote && $c ne "\t") {
- substr($res, $off, 1, 'X');
- } else {
- substr($res, $off, 1, $c);
- }
- }
-
- if ($sanitise_quote eq '//') {
- $sanitise_quote = '';
- }
-
- # The pathname on a #include may be surrounded by '<' and '>'.
- if ($res =~ /^.\s*\#\s*include\s+\<(.*)\>/) {
- my $clean = 'X' x length($1);
- $res =~ s@\<.*\>@<$clean>@;
-
- # The whole of a #error is a string.
- } elsif ($res =~ /^.\s*\#\s*(?:error|warning)\s+(.*)\b/) {
- my $clean = 'X' x length($1);
- $res =~ s@(\#\s*(?:error|warning)\s+).*@$1$clean@;
- }
-
- return $res;
-}
-
-sub get_quoted_string {
- my ($line, $rawline) = @_;
-
- return "" if ($line !~ m/(\"[X]+\")/g);
- return substr($rawline, $-[0], $+[0] - $-[0]);
-}
-
-sub ctx_statement_block {
- my ($linenr, $remain, $off) = @_;
- my $line = $linenr - 1;
- my $blk = '';
- my $soff = $off;
- my $coff = $off - 1;
- my $coff_set = 0;
-
- my $loff = 0;
-
- my $type = '';
- my $level = 0;
- my @stack = ();
- my $p;
- my $c;
- my $len = 0;
-
- my $remainder;
- while (1) {
- @stack = (['', 0]) if ($#stack == -1);
-
- #warn "CSB: blk<$blk> remain<$remain>\n";
- # If we are about to drop off the end, pull in more
- # context.
- if ($off >= $len) {
- for (; $remain > 0; $line++) {
- last if (!defined $lines[$line]);
- next if ($lines[$line] =~ /^-/);
- $remain--;
- $loff = $len;
- $blk .= $lines[$line] . "\n";
- $len = length($blk);
- $line++;
- last;
- }
- # Bail if there is no further context.
- #warn "CSB: blk<$blk> off<$off> len<$len>\n";
- if ($off >= $len) {
- last;
- }
- if ($level == 0 && substr($blk, $off) =~ /^.\s*#\s*define/) {
- $level++;
- $type = '#';
- }
- }
- $p = $c;
- $c = substr($blk, $off, 1);
- $remainder = substr($blk, $off);
-
- #warn "CSB: c<$c> type<$type> level<$level> remainder<$remainder> coff_set<$coff_set>\n";
-
- # Handle nested #if/#else.
- if ($remainder =~ /^#\s*(?:ifndef|ifdef|if)\s/) {
- push(@stack, [ $type, $level ]);
- } elsif ($remainder =~ /^#\s*(?:else|elif)\b/) {
- ($type, $level) = @{$stack[$#stack - 1]};
- } elsif ($remainder =~ /^#\s*endif\b/) {
- ($type, $level) = @{pop(@stack)};
- }
-
- # Statement ends at the ';' or a close '}' at the
- # outermost level.
- if ($level == 0 && $c eq ';') {
- last;
- }
-
- # An else is really a conditional as long as its not else if
- if ($level == 0 && $coff_set == 0 &&
- (!defined($p) || $p =~ /(?:\s|\}|\+)/) &&
- $remainder =~ /^(else)(?:\s|{)/ &&
- $remainder !~ /^else\s+if\b/) {
- $coff = $off + length($1) - 1;
- $coff_set = 1;
- #warn "CSB: mark coff<$coff> soff<$soff> 1<$1>\n";
- #warn "[" . substr($blk, $soff, $coff - $soff + 1) . "]\n";
- }
-
- if (($type eq '' || $type eq '(') && $c eq '(') {
- $level++;
- $type = '(';
- }
- if ($type eq '(' && $c eq ')') {
- $level--;
- $type = ($level != 0)? '(' : '';
-
- if ($level == 0 && $coff < $soff) {
- $coff = $off;
- $coff_set = 1;
- #warn "CSB: mark coff<$coff>\n";
- }
- }
- if (($type eq '' || $type eq '{') && $c eq '{') {
- $level++;
- $type = '{';
- }
- if ($type eq '{' && $c eq '}') {
- $level--;
- $type = ($level != 0)? '{' : '';
-
- if ($level == 0) {
- if (substr($blk, $off + 1, 1) eq ';') {
- $off++;
- }
- last;
- }
- }
- # Preprocessor commands end at the newline unless escaped.
- if ($type eq '#' && $c eq "\n" && $p ne "\\") {
- $level--;
- $type = '';
- $off++;
- last;
- }
- $off++;
- }
- # We are truly at the end, so shuffle to the next line.
- if ($off == $len) {
- $loff = $len + 1;
- $line++;
- $remain--;
- }
-
- my $statement = substr($blk, $soff, $off - $soff + 1);
- my $condition = substr($blk, $soff, $coff - $soff + 1);
-
- #warn "STATEMENT<$statement>\n";
- #warn "CONDITION<$condition>\n";
-
- #print "coff<$coff> soff<$off> loff<$loff>\n";
-
- return ($statement, $condition,
- $line, $remain + 1, $off - $loff + 1, $level);
-}
-
-sub statement_lines {
- my ($stmt) = @_;
-
- # Strip the diff line prefixes and rip blank lines at start and end.
- $stmt =~ s/(^|\n)./$1/g;
- $stmt =~ s/^\s*//;
- $stmt =~ s/\s*$//;
-
- my @stmt_lines = ($stmt =~ /\n/g);
-
- return $#stmt_lines + 2;
-}
-
-sub statement_rawlines {
- my ($stmt) = @_;
-
- my @stmt_lines = ($stmt =~ /\n/g);
-
- return $#stmt_lines + 2;
-}
-
-sub statement_block_size {
- my ($stmt) = @_;
-
- $stmt =~ s/(^|\n)./$1/g;
- $stmt =~ s/^\s*{//;
- $stmt =~ s/}\s*$//;
- $stmt =~ s/^\s*//;
- $stmt =~ s/\s*$//;
-
- my @stmt_lines = ($stmt =~ /\n/g);
- my @stmt_statements = ($stmt =~ /;/g);
-
- my $stmt_lines = $#stmt_lines + 2;
- my $stmt_statements = $#stmt_statements + 1;
-
- if ($stmt_lines > $stmt_statements) {
- return $stmt_lines;
- } else {
- return $stmt_statements;
- }
-}
-
-sub ctx_statement_full {
- my ($linenr, $remain, $off) = @_;
- my ($statement, $condition, $level);
-
- my (@chunks);
-
- # Grab the first conditional/block pair.
- ($statement, $condition, $linenr, $remain, $off, $level) =
- ctx_statement_block($linenr, $remain, $off);
- #print "F: c<$condition> s<$statement> remain<$remain>\n";
- push(@chunks, [ $condition, $statement ]);
- if (!($remain > 0 && $condition =~ /^\s*(?:\n[+-])?\s*(?:if|else|do)\b/s)) {
- return ($level, $linenr, @chunks);
- }
-
- # Pull in the following conditional/block pairs and see if they
- # could continue the statement.
- for (;;) {
- ($statement, $condition, $linenr, $remain, $off, $level) =
- ctx_statement_block($linenr, $remain, $off);
- #print "C: c<$condition> s<$statement> remain<$remain>\n";
- last if (!($remain > 0 && $condition =~ /^(?:\s*\n[+-])*\s*(?:else|do)\b/s));
- #print "C: push\n";
- push(@chunks, [ $condition, $statement ]);
- }
-
- return ($level, $linenr, @chunks);
-}
-
-sub ctx_block_get {
- my ($linenr, $remain, $outer, $open, $close, $off) = @_;
- my $line;
- my $start = $linenr - 1;
- my $blk = '';
- my @o;
- my @c;
- my @res = ();
-
- my $level = 0;
- my @stack = ($level);
- for ($line = $start; $remain > 0; $line++) {
- next if ($rawlines[$line] =~ /^-/);
- $remain--;
-
- $blk .= $rawlines[$line];
-
- # Handle nested #if/#else.
- if ($lines[$line] =~ /^.\s*#\s*(?:ifndef|ifdef|if)\s/) {
- push(@stack, $level);
- } elsif ($lines[$line] =~ /^.\s*#\s*(?:else|elif)\b/) {
- $level = $stack[$#stack - 1];
- } elsif ($lines[$line] =~ /^.\s*#\s*endif\b/) {
- $level = pop(@stack);
- }
-
- foreach my $c (split(//, $lines[$line])) {
- ##print "C<$c>L<$level><$open$close>O<$off>\n";
- if ($off > 0) {
- $off--;
- next;
- }
-
- if ($c eq $close && $level > 0) {
- $level--;
- last if ($level == 0);
- } elsif ($c eq $open) {
- $level++;
- }
- }
-
- if (!$outer || $level <= 1) {
- push(@res, $rawlines[$line]);
- }
-
- last if ($level == 0);
- }
-
- return ($level, @res);
-}
-sub ctx_block_outer {
- my ($linenr, $remain) = @_;
-
- my ($level, @r) = ctx_block_get($linenr, $remain, 1, '{', '}', 0);
- return @r;
-}
-sub ctx_block {
- my ($linenr, $remain) = @_;
-
- my ($level, @r) = ctx_block_get($linenr, $remain, 0, '{', '}', 0);
- return @r;
-}
-sub ctx_statement {
- my ($linenr, $remain, $off) = @_;
-
- my ($level, @r) = ctx_block_get($linenr, $remain, 0, '(', ')', $off);
- return @r;
-}
-sub ctx_block_level {
- my ($linenr, $remain) = @_;
-
- return ctx_block_get($linenr, $remain, 0, '{', '}', 0);
-}
-sub ctx_statement_level {
- my ($linenr, $remain, $off) = @_;
-
- return ctx_block_get($linenr, $remain, 0, '(', ')', $off);
-}
-
-sub ctx_locate_comment {
- my ($first_line, $end_line) = @_;
-
- # Catch a comment on the end of the line itself.
- my ($current_comment) = ($rawlines[$end_line - 1] =~ m@.*(/\*.*\*/)\s*(?:\\\s*)?$@);
- return $current_comment if (defined $current_comment);
-
- # Look through the context and try and figure out if there is a
- # comment.
- my $in_comment = 0;
- $current_comment = '';
- for (my $linenr = $first_line; $linenr < $end_line; $linenr++) {
- my $line = $rawlines[$linenr - 1];
- #warn " $line\n";
- if ($linenr == $first_line and $line =~ m@^.\s*\*@) {
- $in_comment = 1;
- }
- if ($line =~ m@/\*@) {
- $in_comment = 1;
- }
- if (!$in_comment && $current_comment ne '') {
- $current_comment = '';
- }
- $current_comment .= $line . "\n" if ($in_comment);
- if ($line =~ m@\*/@) {
- $in_comment = 0;
- }
- }
-
- chomp($current_comment);
- return($current_comment);
-}
-sub ctx_has_comment {
- my ($first_line, $end_line) = @_;
- my $cmt = ctx_locate_comment($first_line, $end_line);
-
- ##print "LINE: $rawlines[$end_line - 1 ]\n";
- ##print "CMMT: $cmt\n";
-
- return ($cmt ne '');
-}
-
-sub raw_line {
- my ($linenr, $cnt) = @_;
-
- my $offset = $linenr - 1;
- $cnt++;
-
- my $line;
- while ($cnt) {
- $line = $rawlines[$offset++];
- next if (defined($line) && $line =~ /^-/);
- $cnt--;
- }
- return $line;
-}
-
-sub cat_vet {
- my ($vet) = @_;
- my ($res, $coded);
-
- $res = '';
- while ($vet =~ /([^[:cntrl:]]*)([[:cntrl:]]|$)/g) {
- $res .= $1;
- if ($2 ne '') {
- $coded = sprintf("^%c", unpack('C', $2) + 64);
- $res .= $coded;
- }
- }
- $res =~ s/$/\$/;
- return $res;
-}
-
-my $av_preprocessor = 0;
-my $av_pending;
-my @av_paren_type;
-my $av_pend_colon;
-
-sub annotate_reset {
- $av_preprocessor = 0;
- $av_pending = '_';
- @av_paren_type = ('E');
- $av_pend_colon = 'O';
-}
-
-sub annotate_values {
- my ($stream, $type) = @_;
-
- my $res;
- my $var = '_' x length($stream);
- my $cur = $stream;
-
- print "$stream\n" if ($dbg_values > 1);
-
- while (length($cur)) {
- @av_paren_type = ('E') if ($#av_paren_type < 0);
- print " <" . join('', @av_paren_type) .
- "> <$type> <$av_pending>" if ($dbg_values > 1);
- if ($cur =~ /^(\s+)/o) {
- print "WS($1)\n" if ($dbg_values > 1);
- if ($1 =~ /\n/ && $av_preprocessor) {
- $type = pop(@av_paren_type);
- $av_preprocessor = 0;
- }
-
- } elsif ($cur =~ /^(\(\s*$Type\s*)\)/ && $av_pending eq '_') {
- print "CAST($1)\n" if ($dbg_values > 1);
- push(@av_paren_type, $type);
- $type = 'c';
-
- } elsif ($cur =~ /^($Type)\s*(?:$Ident|,|\)|\(|\s*$)/) {
- print "DECLARE($1)\n" if ($dbg_values > 1);
- $type = 'T';
-
- } elsif ($cur =~ /^($Modifier)\s*/) {
- print "MODIFIER($1)\n" if ($dbg_values > 1);
- $type = 'T';
-
- } elsif ($cur =~ /^(\#\s*define\s*$Ident)(\(?)/o) {
- print "DEFINE($1,$2)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
- push(@av_paren_type, $type);
- if ($2 ne '') {
- $av_pending = 'N';
- }
- $type = 'E';
-
- } elsif ($cur =~ /^(\#\s*(?:undef\s*$Ident|include\b))/o) {
- print "UNDEF($1)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
- push(@av_paren_type, $type);
-
- } elsif ($cur =~ /^(\#\s*(?:ifdef|ifndef|if))/o) {
- print "PRE_START($1)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
-
- push(@av_paren_type, $type);
- push(@av_paren_type, $type);
- $type = 'E';
-
- } elsif ($cur =~ /^(\#\s*(?:else|elif))/o) {
- print "PRE_RESTART($1)\n" if ($dbg_values > 1);
- $av_preprocessor = 1;
-
- push(@av_paren_type, $av_paren_type[$#av_paren_type]);
-
- $type = 'E';
-
- } elsif ($cur =~ /^(\#\s*(?:endif))/o) {
- print "PRE_END($1)\n" if ($dbg_values > 1);
-
- $av_preprocessor = 1;
-
- # Assume all arms of the conditional end as this
- # one does, and continue as if the #endif was not here.
- pop(@av_paren_type);
- push(@av_paren_type, $type);
- $type = 'E';
-
- } elsif ($cur =~ /^(\\\n)/o) {
- print "PRECONT($1)\n" if ($dbg_values > 1);
-
- } elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
- print "ATTR($1)\n" if ($dbg_values > 1);
- $av_pending = $type;
- $type = 'N';
-
- } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
- print "SIZEOF($1)\n" if ($dbg_values > 1);
- if (defined $2) {
- $av_pending = 'V';
- }
- $type = 'N';
-
- } elsif ($cur =~ /^(if|while|for)\b/o) {
- print "COND($1)\n" if ($dbg_values > 1);
- $av_pending = 'E';
- $type = 'N';
-
- } elsif ($cur =~/^(case)/o) {
- print "CASE($1)\n" if ($dbg_values > 1);
- $av_pend_colon = 'C';
- $type = 'N';
-
- } elsif ($cur =~/^(return|else|goto|typeof|__typeof__)\b/o) {
- print "KEYWORD($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~ /^(\()/o) {
- print "PAREN('$1')\n" if ($dbg_values > 1);
- push(@av_paren_type, $av_pending);
- $av_pending = '_';
- $type = 'N';
-
- } elsif ($cur =~ /^(\))/o) {
- my $new_type = pop(@av_paren_type);
- if ($new_type ne '_') {
- $type = $new_type;
- print "PAREN('$1') -> $type\n"
- if ($dbg_values > 1);
- } else {
- print "PAREN('$1')\n" if ($dbg_values > 1);
- }
-
- } elsif ($cur =~ /^($Ident)\s*\(/o) {
- print "FUNC($1)\n" if ($dbg_values > 1);
- $type = 'V';
- $av_pending = 'V';
-
- } elsif ($cur =~ /^($Ident\s*):(?:\s*\d+\s*(,|=|;))?/) {
- if (defined $2 && $type eq 'C' || $type eq 'T') {
- $av_pend_colon = 'B';
- } elsif ($type eq 'E') {
- $av_pend_colon = 'L';
- }
- print "IDENT_COLON($1,$type>$av_pend_colon)\n" if ($dbg_values > 1);
- $type = 'V';
-
- } elsif ($cur =~ /^($Ident|$Constant)/o) {
- print "IDENT($1)\n" if ($dbg_values > 1);
- $type = 'V';
-
- } elsif ($cur =~ /^($Assignment)/o) {
- print "ASSIGN($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~/^(;|{|})/) {
- print "END($1)\n" if ($dbg_values > 1);
- $type = 'E';
- $av_pend_colon = 'O';
-
- } elsif ($cur =~/^(,)/) {
- print "COMMA($1)\n" if ($dbg_values > 1);
- $type = 'C';
-
- } elsif ($cur =~ /^(\?)/o) {
- print "QUESTION($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~ /^(:)/o) {
- print "COLON($1,$av_pend_colon)\n" if ($dbg_values > 1);
-
- substr($var, length($res), 1, $av_pend_colon);
- if ($av_pend_colon eq 'C' || $av_pend_colon eq 'L') {
- $type = 'E';
- } else {
- $type = 'N';
- }
- $av_pend_colon = 'O';
-
- } elsif ($cur =~ /^(\[)/o) {
- print "CLOSE($1)\n" if ($dbg_values > 1);
- $type = 'N';
-
- } elsif ($cur =~ /^(-(?![->])|\+(?!\+)|\*|\&\&|\&)/o) {
- my $variant;
-
- print "OPV($1)\n" if ($dbg_values > 1);
- if ($type eq 'V') {
- $variant = 'B';
- } else {
- $variant = 'U';
- }
-
- substr($var, length($res), 1, $variant);
- $type = 'N';
-
- } elsif ($cur =~ /^($Operators)/o) {
- print "OP($1)\n" if ($dbg_values > 1);
- if ($1 ne '++' && $1 ne '--') {
- $type = 'N';
- }
-
- } elsif ($cur =~ /(^.)/o) {
- print "C($1)\n" if ($dbg_values > 1);
- }
- if (defined $1) {
- $cur = substr($cur, length($1));
- $res .= $type x length($1);
- }
- }
-
- return ($res, $var);
-}
-
-sub possible {
- my ($possible, $line) = @_;
- my $notPermitted = qr{(?:
- ^(?:
- $Modifier|
- $Storage|
- $Type|
- DEFINE_\S+
- )$|
- ^(?:
- goto|
- return|
- case|
- else|
- asm|__asm__|
- do|
- \#|
- \#\#|
- )(?:\s|$)|
- ^(?:typedef|struct|enum)\b
- )}x;
- warn "CHECK<$possible> ($line)\n" if ($dbg_possible > 2);
- if ($possible !~ $notPermitted) {
- # Check for modifiers.
- $possible =~ s/\s*$Storage\s*//g;
- $possible =~ s/\s*$Sparse\s*//g;
- if ($possible =~ /^\s*$/) {
-
- } elsif ($possible =~ /\s/) {
- $possible =~ s/\s*$Type\s*//g;
- for my $modifier (split(' ', $possible)) {
- if ($modifier !~ $notPermitted) {
- warn "MODIFIER: $modifier ($possible) ($line)\n" if ($dbg_possible);
- push(@modifierList, $modifier);
- }
- }
-
- } else {
- warn "POSSIBLE: $possible ($line)\n" if ($dbg_possible);
- push(@typeList, $possible);
- }
- build_types();
- } else {
- warn "NOTPOSS: $possible ($line)\n" if ($dbg_possible > 1);
- }
-}
-
-my $prefix = '';
-
-sub show_type {
- my ($type) = @_;
-
- return defined $use_type{$type} if (scalar keys %use_type > 0);
-
- return !defined $ignore_type{$type};
-}
-
-sub report {
- my ($level, $type, $msg) = @_;
-
- if (!show_type($type) ||
- (defined $tst_only && $msg !~ /\Q$tst_only\E/)) {
- return 0;
- }
- my $line;
- if ($show_types) {
- $line = "$prefix$level:$type: $msg\n";
- } else {
- $line = "$prefix$level: $msg\n";
- }
- $line = (split('\n', $line))[0] . "\n" if ($terse);
-
- if ($quiet == 0) {
- push(our @report, $line);
- }
- return 1;
-}
-
-sub report_dump {
- our @report;
-}
-
-sub ERROR {
- my ($type, $msg) = @_;
-
- if (report("ERROR", $type, $msg)) {
- our $clean = 0;
- our $cnt_error++;
- return 1;
- }
- return 0;
-}
-sub WARN {
- my ($type, $msg) = @_;
-
- if (report("WARNING", $type, $msg)) {
- ## Warning is okay to submit
- our $clean = 0;
- our $cnt_warn++;
- return 1;
- }
- return 0;
-}
-sub CHK {
- my ($type, $msg) = @_;
-
- if ($check && report("CHECK", $type, $msg)) {
- our $clean = 0;
- our $cnt_chk++;
- return 1;
- }
- return 0;
-}
-
-sub check_absolute_file {
- my ($absolute, $herecurr) = @_;
- my $file = $absolute;
-
- ##print "absolute<$absolute>\n";
-
- # See if any suffix of this path is a path within the tree.
- while ($file =~ s@^[^/]*/@@) {
- if (-f "$root/$file") {
- ##print "file<$file>\n";
- last;
- }
- }
- if (! -f _) {
- return 0;
- }
-
- # It is, so see if the prefix is acceptable.
- my $prefix = $absolute;
- substr($prefix, -length($file)) = '';
-
- ##print "prefix<$prefix>\n";
- if ($prefix ne ".../") {
- WARN("USE_RELATIVE_PATH",
- "use relative pathname instead of absolute in changelog text\n" . $herecurr);
- }
-}
-
-sub trim {
- my ($string) = @_;
-
- $string =~ s/^\s+|\s+$//g;
-
- return $string;
-}
-
-sub ltrim {
- my ($string) = @_;
-
- $string =~ s/^\s+//;
-
- return $string;
-}
-
-sub rtrim {
- my ($string) = @_;
-
- $string =~ s/\s+$//;
-
- return $string;
-}
-
-sub string_find_replace {
- my ($string, $find, $replace) = @_;
-
- $string =~ s/$find/$replace/g;
-
- return $string;
-}
-
-sub tabify {
- my ($leading) = @_;
-
- my $source_indent = 8;
- my $max_spaces_before_tab = $source_indent - 1;
- my $spaces_to_tab = " " x $source_indent;
-
- #convert leading spaces to tabs
- 1 while $leading =~ s@^([\t]*)$spaces_to_tab@$1\t@g;
- #Remove spaces before a tab
- 1 while $leading =~ s@^([\t]*)( {1,$max_spaces_before_tab})\t@$1\t@g;
- return "$leading";
-}
-
-sub pos_last_openparen {
- my ($line) = @_;
-
- my $pos = 0;
-
- my $opens = $line =~ tr/\(/\(/;
- my $closes = $line =~ tr/\)/\)/;
-
- my $last_openparen = 0;
-
- if (($opens == 0) || ($closes >= $opens)) {
- return -1;
- }
-
- my $len = length($line);
-
- for ($pos = 0; $pos < $len; $pos++) {
- my $string = substr($line, $pos);
- if ($string =~ /^($FuncArg|$balanced_parens)/) {
- $pos += length($1) - 1;
- } elsif (substr($line, $pos, 1) eq '(') {
- $last_openparen = $pos;
- } elsif (index($string, '(') == -1) {
- last;
- }
- }
-
- return length(expand_tabs(substr($line, 0, $last_openparen))) + 1;
-}
-
-sub process {
- my $filename = shift;
-
- my $linenr=0;
- my $prevline="";
- my $prevrawline="";
- my $stashline="";
- my $stashrawline="";
-
- my $length;
- my $indent;
- my $previndent=0;
- my $stashindent=0;
-
- our $clean = 1;
- my $signoff = 0;
- my $subject_trailing_dot = 0;
- my $is_patch = 0;
-
- my $in_header_lines = 1;
- my $in_commit_log = 0; #Scanning lines before patch
-
- my $non_utf8_charset = 0;
-
- our @report = ();
- our $cnt_lines = 0;
- our $cnt_error = 0;
- our $cnt_warn = 0;
- our $cnt_chk = 0;
-
- # Trace the real file/line as we go.
- my $realfile = '';
- my $realline = 0;
- my $realcnt = 0;
- my $here = '';
- my $in_comment = 0;
- my $comment_edge = 0;
- my $first_line = 0;
- my $p1_prefix = '';
-
- my $prev_values = 'E';
-
- # suppression flags
- my %suppress_ifbraces;
- my %suppress_whiletrailers;
- my %suppress_export;
- my $suppress_statement = 0;
-
- my %signatures = ();
-
- # Pre-scan the patch sanitizing the lines.
- # Pre-scan the patch looking for any __setup documentation.
- #
- my @setup_docs = ();
- my $setup_docs = 0;
-
- my $camelcase_file_seeded = 0;
-
- sanitise_line_reset();
- my $line;
- foreach my $rawline (@rawlines) {
- $linenr++;
- $line = $rawline;
-
- push(@fixed, $rawline) if ($fix);
-
- if ($rawline=~/^\+\+\+\s+(\S+)/) {
- $setup_docs = 0;
- if ($1 =~ m@Documentation/kernel-parameters.txt$@) {
- $setup_docs = 1;
- }
- #next;
- }
- if ($rawline=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
- $realline=$1-1;
- if (defined $2) {
- $realcnt=$3+1;
- } else {
- $realcnt=1+1;
- }
- $in_comment = 0;
-
- # Guestimate if this is a continuing comment. Run
- # the context looking for a comment "edge". If this
- # edge is a close comment then we must be in a comment
- # at context start.
- my $edge;
- my $cnt = $realcnt;
- for (my $ln = $linenr + 1; $cnt > 0; $ln++) {
- next if (defined $rawlines[$ln - 1] &&
- $rawlines[$ln - 1] =~ /^-/);
- $cnt--;
- #print "RAW<$rawlines[$ln - 1]>\n";
- last if (!defined $rawlines[$ln - 1]);
- if ($rawlines[$ln - 1] =~ m@(/\*|\*/)@ &&
- $rawlines[$ln - 1] !~ m@"[^"]*(?:/\*|\*/)[^"]*"@) {
- ($edge) = $1;
- last;
- }
- }
- if (defined $edge && $edge eq '*/') {
- $in_comment = 1;
- }
-
- # Guestimate if this is a continuing comment. If this
- # is the start of a diff block and this line starts
- # ' *' then it is very likely a comment.
- if (!defined $edge &&
- $rawlines[$linenr] =~ m@^.\s*(?:\*\*+| \*)(?:\s|$)@)
- {
- $in_comment = 1;
- }
-
- ##print "COMMENT:$in_comment edge<$edge> $rawline\n";
- sanitise_line_reset($in_comment);
-
- } elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) {
- # Standardise the strings and chars within the input to
- # simplify matching -- only bother with positive lines.
- $line = sanitise_line($rawline);
- }
- push(@lines, $line);
-
- if ($realcnt > 1) {
- $realcnt-- if ($line =~ /^(?:\+| |$)/);
- } else {
- $realcnt = 0;
- }
-
- #print "==>$rawline\n";
- #print "-->$line\n";
-
- if ($setup_docs && $line =~ /^\+/) {
- push(@setup_docs, $line);
- }
- }
-
- $prefix = '';
-
- $realcnt = 0;
- $linenr = 0;
- foreach my $line (@lines) {
- $linenr++;
- my $sline = $line; #copy of $line
- $sline =~ s/$;/ /g; #with comments as spaces
-
- my $rawline = $rawlines[$linenr - 1];
-
-#extract the line range in the file after the patch is applied
- if ($line=~/^\@\@ -\d+(?:,\d+)? \+(\d+)(,(\d+))? \@\@/) {
- $is_patch = 1;
- $first_line = $linenr + 1;
- $realline=$1-1;
- if (defined $2) {
- $realcnt=$3+1;
- } else {
- $realcnt=1+1;
- }
- annotate_reset();
- $prev_values = 'E';
-
- %suppress_ifbraces = ();
- %suppress_whiletrailers = ();
- %suppress_export = ();
- $suppress_statement = 0;
- next;
-
-# track the line number as we move through the hunk, note that
-# new versions of GNU diff omit the leading space on completely
-# blank context lines so we need to count that too.
- } elsif ($line =~ /^( |\+|$)/) {
- $realline++;
- $realcnt-- if ($realcnt != 0);
-
- # Measure the line length and indent.
- ($length, $indent) = line_stats($rawline);
-
- # Track the previous line.
- ($prevline, $stashline) = ($stashline, $line);
- ($previndent, $stashindent) = ($stashindent, $indent);
- ($prevrawline, $stashrawline) = ($stashrawline, $rawline);
-
- #warn "line<$line>\n";
-
- } elsif ($realcnt == 1) {
- $realcnt--;
- }
-
- my $hunk_line = ($realcnt != 0);
-
-#make up the handle for any error we report on this line
- $prefix = "$filename:$realline: " if ($emacs && $file);
- $prefix = "$filename:$linenr: " if ($emacs && !$file);
-
- $here = "#$linenr: " if (!$file);
- $here = "#$realline: " if ($file);
-
- my $found_file = 0;
- # extract the filename as it passes
- if ($line =~ /^diff --git.*?(\S+)$/) {
- $realfile = $1;
- $realfile =~ s@^([^/]*)/@@ if (!$file);
- $in_commit_log = 0;
- $found_file = 1;
- } elsif ($line =~ /^\+\+\+\s+(\S+)/) {
- $realfile = $1;
- $realfile =~ s@^([^/]*)/@@ if (!$file);
- $in_commit_log = 0;
-
- $p1_prefix = $1;
- if (!$file && $tree && $p1_prefix ne '' &&
- -e "$root/$p1_prefix") {
- WARN("PATCH_PREFIX",
- "patch prefix '$p1_prefix' exists, appears to be a -p0 patch\n");
- }
-
- $found_file = 1;
- }
-
- if ($found_file) {
- if ($realfile =~ m@^(drivers/net/|net/)@) {
- $check = 1;
- } else {
- $check = $check_orig;
- }
- next;
- }
-
- $here .= "FILE: $realfile:$realline:" if ($realcnt != 0);
-
- my $hereline = "$here\n$rawline\n";
- my $herecurr = "$here\n$rawline\n";
- my $hereprev = "$here\n$prevrawline\n$rawline\n";
-
- $cnt_lines++ if ($realcnt != 0);
-
-# Check for incorrect file permissions
- if ($line =~ /^new (file )?mode.*[7531]\d{0,2}$/) {
- my $permhere = $here . "FILE: $realfile\n";
- if ($realfile !~ m@scripts/@ &&
- $realfile !~ /\.(py|pl|awk|sh|t)$/) {
- ERROR("EXECUTE_PERMISSIONS",
- "do not set execute permissions for source files\n" . $permhere);
- }
- }
-
- next if ($realfile =~ /(checkpatch.pl)/);
- next if ($realfile =~ /\.(md|txt|doc|8|pdf|tex)$/);
-
-# Check that the subject does not have a trailing dot
- if ($in_header_lines &&
- $line =~ /^Subject: \[PATCH\] (.+)\.(\s*)$/) {
- $subject_trailing_dot++;
- }
-
-# Check the patch for a signoff:
- if ($line =~ /^\s*signed-off-by:/i) {
- $signoff++;
- $in_commit_log = 0;
- }
-
-# Check signature styles
- if (!$in_header_lines &&
- $line =~ /^(\s*)([a-z0-9_-]+by:|$signature_tags)(\s*)(.*)/i) {
- my $space_before = $1;
- my $sign_off = $2;
- my $space_after = $3;
- my $email = $4;
- my $ucfirst_sign_off = ucfirst(lc($sign_off));
-
- if ($sign_off !~ /$signature_tags/) {
- WARN("BAD_SIGN_OFF",
- "Non-standard signature: $sign_off\n" . $herecurr);
- }
- if (defined $space_before && $space_before ne "") {
- if (WARN("BAD_SIGN_OFF",
- "Do not use whitespace before $ucfirst_sign_off\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =
- "$ucfirst_sign_off $email";
- }
- }
- if ($sign_off =~ /-by:$/i && $sign_off ne $ucfirst_sign_off) {
- if (WARN("BAD_SIGN_OFF",
- "'$ucfirst_sign_off' is the preferred signature form\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =
- "$ucfirst_sign_off $email";
- }
-
- }
- if (!defined $space_after || $space_after ne " ") {
- if (WARN("BAD_SIGN_OFF",
- "Use a single space after $ucfirst_sign_off\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =
- "$ucfirst_sign_off $email";
- }
- }
-
- # Check if email is really Gerrit URL
- if ($email =~ /^($url_tags)(.*)/) {
- my $uri = $1;
- my $url = $2;
- if ($uri && $url !~ /$gerrit_url/) {
- ERROR("BAD_URL",
- "Unrecognized url address: '$email'\n" . $herecurr);
- }
- } else {
- my ($email_name, $email_address, $comment) = parse_email($email);
- my $suggested_email = format_email(($email_name, $email_address));
- if ($suggested_email eq "") {
- ERROR("BAD_SIGN_OFF",
- "Unrecognized email address: '$email'\n" . $herecurr);
- } else {
- my $dequoted = $suggested_email;
- $dequoted =~ s/^"//;
- $dequoted =~ s/" </ </;
- # Don't force email to have quotes
- # Allow just an angle bracketed address
- if ("$dequoted$comment" ne $email &&
- "<$email_address>$comment" ne $email &&
- "$suggested_email$comment" ne $email) {
- WARN("BAD_SIGN_OFF",
- "email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
- }
- }
- }
-
-# Check for duplicate signatures
- my $sig_nospace = $line;
- $sig_nospace =~ s/\s//g;
- $sig_nospace = lc($sig_nospace);
- if (defined $signatures{$sig_nospace}) {
- WARN("BAD_SIGN_OFF",
- "Duplicate signature\n" . $herecurr);
- } else {
- $signatures{$sig_nospace} = 1;
- }
- }
-
-# Check for wrappage within a valid hunk of the file
- if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
- ERROR("CORRUPTED_PATCH",
- "patch seems to be corrupt (line wrapped?)\n" .
- $herecurr) if (!$emitted_corrupt++);
- }
-
-# Check for absolute kernel paths.
- if ($tree) {
- while ($line =~ m{(?:^|\s)(/\S*)}g) {
- my $file = $1;
-
- if ($file =~ m{^(.*?)(?::\d+)+:?$} &&
- check_absolute_file($1, $herecurr)) {
- #
- } else {
- check_absolute_file($file, $herecurr);
- }
- }
- }
-
-# UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
- if (($realfile =~ /^$/ || $line =~ /^\+/) &&
- $rawline !~ m/^$UTF8*$/) {
- my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);
-
- my $blank = copy_spacing($rawline);
- my $ptr = substr($blank, 0, length($utf8_prefix)) . "^";
- my $hereptr = "$hereline$ptr\n";
-
- CHK("INVALID_UTF8",
- "Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr);
- }
-
-# Check if it's the start of a commit log
-# (not a header line and we haven't seen the patch filename)
- if ($in_header_lines && $realfile =~ /^$/ &&
- $rawline !~ /^(commit\b|from\b|[\w-]+:).+$/i) {
- $in_header_lines = 0;
- $in_commit_log = 1;
- }
-
-# Check if there is UTF-8 in a commit log when a mail header has explicitly
-# declined it, i.e defined some charset where it is missing.
- if ($in_header_lines &&
- $rawline =~ /^Content-Type:.+charset="(.+)".*$/ &&
- $1 !~ /utf-8/i) {
- $non_utf8_charset = 1;
- }
-
- if ($in_commit_log && $non_utf8_charset && $realfile =~ /^$/ &&
- $rawline =~ /$NON_ASCII_UTF8/) {
- WARN("UTF8_BEFORE_PATCH",
- "8-bit UTF-8 used in possible commit log\n" . $herecurr);
- }
-
-# ignore non-hunk lines and lines being removed
- next if (!$hunk_line || $line =~ /^-/);
-
-#trailing whitespace
- if ($line =~ /^\+.*\015/) {
- my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- if (ERROR("DOS_LINE_ENDINGS",
- "DOS line endings\n" . $herevet) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/[\s\015]+$//;
- }
- } elsif ($rawline =~ /^\+.*\S\s+$/ || $rawline =~ /^\+\s+$/) {
- my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- if (ERROR("TRAILING_WHITESPACE",
- "trailing whitespace\n" . $herevet) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\s+$//;
- }
-
- $rpt_cleaners = 1;
- }
-
- if (($realfile =~ /Makefile.*/) &&
- ($line =~ /\+(EXTRA_[A-Z]+FLAGS).*/)) {
- my $flag = $1;
- my $replacement = {
- 'EXTRA_AFLAGS' => 'asflags-y',
- 'EXTRA_CFLAGS' => 'ccflags-y',
- 'EXTRA_CPPFLAGS' => 'cppflags-y',
- 'EXTRA_LDFLAGS' => 'ldflags-y',
- };
-
- WARN("DEPRECATED_VARIABLE",
- "Use of $flag is deprecated, please use \`$replacement->{$flag} instead.\n" . $herecurr) if ($replacement->{$flag});
- }
-
-# check we are in .spec file, then ignore this hunk
- next if ($realfile eq "glusterfs.spec.in");
-
-# check we are in a valid source file if not then ignore this hunk
- next if ($realfile !~ /\.(h|c|pl|py|l|y|sh|in)$/);
-
-#line length limit
- if ($line =~ /^\+/ && $prevrawline !~ /\/\*\*/ &&
- $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
- !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:|,|\)\s*;)\s*$/ ||
- $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
- $length > $max_line_length)
- {
- WARN("LONG_LINE",
- "line over $max_line_length characters\n" . $herecurr);
- }
-
-# check for spaces before a quoted newline
- if ($rawline =~ /^.*\".*\s\\n/) {
- if (WARN("QUOTED_WHITESPACE_BEFORE_NEWLINE",
- "unnecessary whitespace before a quoted newline\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/^(\+.*\".*)\s+\\n/$1\\n/;
- }
-
- }
-
-# check for adding lines without a newline.
- if ($line =~ /^\+/ && defined $lines[$linenr] && $lines[$linenr] =~ /^\\ No newline at end of file/) {
- WARN("MISSING_EOF_NEWLINE",
- "adding a line without newline at end of file\n" . $herecurr);
- }
-
-# check we are in a valid source file C or perl if not then ignore this hunk
- next if ($realfile !~ /\.(h|c|pl)$/);
-
-# check for space before tabs.
- if ($rawline =~ /^\+/ && $rawline =~ / \t/) {
- my $herevet = "$here\n" . cat_vet($rawline) . "\n";
- if (WARN("SPACE_BEFORE_TAB",
- "please, no space before tabs\n" . $herevet) &&
- $fix) {
- while ($fixed[$linenr - 1] =~
- s/(^\+.*) {8,8}\t/$1\t\t/) {}
- while ($fixed[$linenr - 1] =~
- s/(^\+.*) +\t/$1\t/) {}
- }
- }
-
-# check for && or || at the start of a line
- if ($rawline =~ /^\+\s*(&&|\|\|)/) {
- CHK("LOGICAL_CONTINUATIONS",
- "Logical continuations should be on the previous line\n" . $hereprev);
- }
-
-# check multi-line statement indentation matches previous line
- if ($^V && $^V ge 5.10.0 &&
- $prevline =~ /^\+([ \t]*)((?:$c90_Keywords(?:\s+if)\s*)|(?:$Declare\s*)?(?:$Ident|\(\s*\*\s*$Ident\s*\))\s*|$Ident\s*=\s*$Ident\s*)\(.*(\&\&|\|\||,)\s*$/) {
- $prevline =~ /^\+(\t*)(.*)$/;
- my $oldindent = $1;
- my $rest = $2;
-
- my $pos = pos_last_openparen($rest);
- if ($pos >= 0) {
- $line =~ /^(\+| )([ \t]*)/;
- my $newindent = $2;
-
- my $goodtabindent = $oldindent .
- "\t" x ($pos / 8) .
- " " x ($pos % 8);
- my $goodspaceindent = $oldindent . " " x $pos;
-
- if ($newindent ne $goodtabindent &&
- $newindent ne $goodspaceindent) {
-
- if (CHK("PARENTHESIS_ALIGNMENT",
- "Alignment should match open parenthesis\n" . $hereprev) &&
- $fix && $line =~ /^\+/) {
- $fixed[$linenr - 1] =~
- s/^\+[ \t]*/\+$goodtabindent/;
- }
- }
- }
- }
-
- if ($line =~ /^\+.*\*[ \t]*\)[ \t]+(?!$Assignment|$Arithmetic)/) {
- if (CHK("SPACING",
- "No space is necessary after a cast\n" . $hereprev) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/^(\+.*\*[ \t]*\))[ \t]+/$1/;
- }
- }
-
-
-# check for missing blank lines after declarations
- if ($sline =~ /^\+\s+\S/ && #Not at char 1
- # actual declarations
- ($prevline =~ /^\+\s+$Declare\s*$Ident\s*[=,;:\[]/ ||
- # foo bar; where foo is some local typedef or #define
- $prevline =~ /^\+\s+$Ident(?:\s+|\s*\*\s*)$Ident\s*[=,;\[]/ ||
- # known declaration macros
- $prevline =~ /^\+\s+$declaration_macros/) &&
- # for "else if" which can look like "$Ident $Ident"
- !($prevline =~ /^\+\s+$c90_Keywords\b/ ||
- # other possible extensions of declaration lines
- $prevline =~ /(?:$Compare|$Assignment|$Operators)\s*$/ ||
- # not starting a section or a macro "\" extended line
- $prevline =~ /(?:\{\s*|\\)$/) &&
- # looks like a declaration
- !($sline =~ /^\+\s+$Declare\s*$Ident\s*[=,;:\[]/ ||
- # foo bar; where foo is some local typedef or #define
- $sline =~ /^\+\s+$Ident(?:\s+|\s*\*\s*)$Ident\s*[=,;\[]/ ||
- # known declaration macros
- $sline =~ /^\+\s+$declaration_macros/ ||
- # start of struct or union or enum
- $sline =~ /^\+\s+(?:union|struct|enum|typedef)\b/ ||
- # start or end of block or continuation of declaration
- $sline =~ /^\+\s+(?:$|[\{\}\.\#\"\?\:\(\[])/ ||
- # bitfield continuation
- $sline =~ /^\+\s+$Ident\s*:\s*\d+\s*[,;]/ ||
- # other possible extensions of declaration lines
- $sline =~ /^\+\s+\(?\s*(?:$Compare|$Assignment|$Operators)/) &&
- # indentation of previous and current line are the same
- (($prevline =~ /\+(\s+)\S/) && $sline =~ /^\+$1\S/)) {
- WARN("SPACING",
- "Missing a blank line after declarations\n" . $hereprev);
- }
-
-# check we are in a valid C source file if not then ignore this hunk
- next if ($realfile !~ /\.(h|c)$/);
-
-# check for RCS/CVS revision markers
- if ($rawline =~ /^\+.*\$(Revision|Log|Id)(?:\$|)/) {
- WARN("CVS_KEYWORD",
- "CVS style keyword markers, these will _not_ be updated\n". $herecurr);
- }
-
-# Check for potential 'bare' types
- my ($stat, $cond, $line_nr_next, $remain_next, $off_next,
- $realline_next);
-#print "LINE<$line>\n";
- if ($linenr >= $suppress_statement &&
- $realcnt && $sline =~ /.\s*\S/) {
- ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
- ctx_statement_block($linenr, $realcnt, 0);
- $stat =~ s/\n./\n /g;
- $cond =~ s/\n./\n /g;
-
-#print "linenr<$linenr> <$stat>\n";
- # If this statement has no statement boundaries within
- # it there is no point in retrying a statement scan
- # until we hit end of it.
- my $frag = $stat; $frag =~ s/;+\s*$//;
- if ($frag !~ /(?:{|;)/) {
-#print "skip<$line_nr_next>\n";
- $suppress_statement = $line_nr_next;
- }
-
- # Find the real next line.
- $realline_next = $line_nr_next;
- if (defined $realline_next &&
- (!defined $lines[$realline_next - 1] ||
- substr($lines[$realline_next - 1], $off_next) =~ /^\s*$/)) {
- $realline_next++;
- }
-
- my $s = $stat;
- $s =~ s/{.*$//s;
-
- # Ignore goto labels.
- if ($s =~ /$Ident:\*$/s) {
-
- # Ignore functions being called
- } elsif ($s =~ /^.\s*$Ident\s*\(/s) {
-
- } elsif ($s =~ /^.\s*else\b/s) {
-
- # declarations always start with types
- } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?((?:\s*$Ident)+?)\b(?:\s+$Sparse)?\s*\**\s*(?:$Ident|\(\*[^\)]*\))(?:\s*$Modifier)?\s*(?:;|=|,|\()/s) {
- my $type = $1;
- $type =~ s/\s+/ /g;
- possible($type, "A:" . $s);
-
- # definitions in global scope can only start with types
- } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b\s*(?!:)/s) {
- possible($1, "B:" . $s);
- }
-
- # any (foo ... *) is a pointer cast, and foo is a type
- while ($s =~ /\(($Ident)(?:\s+$Sparse)*[\s\*]+\s*\)/sg) {
- possible($1, "C:" . $s);
- }
-
- # Check for any sort of function declaration.
- # int foo(something bar, other baz);
- # void (*store_gdt)(x86_descr_ptr *);
- if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) {
- my ($name_len) = length($1);
-
- my $ctx = $s;
- substr($ctx, 0, $name_len + 1, '');
- $ctx =~ s/\)[^\)]*$//;
-
- for my $arg (split(/\s*,\s*/, $ctx)) {
- if ($arg =~ /^(?:const\s+)?($Ident)(?:\s+$Sparse)*\s*\**\s*(:?\b$Ident)?$/s || $arg =~ /^($Ident)$/s) {
-
- possible($1, "D:" . $s);
- }
- }
- }
-
- }
-
-#
-# Checks which may be anchored in the context.
-#
-
-# Check for switch () and associated case and default
-# statements should be at the same indent.
- if ($line=~/\bswitch\s*\(.*\)/) {
- my $err = '';
- my $sep = '';
- my @ctx = ctx_block_outer($linenr, $realcnt);
- shift(@ctx);
- for my $ctx (@ctx) {
- my ($clen, $cindent) = line_stats($ctx);
- if ($ctx =~ /^\+\s*(case\s+|default:)/ &&
- $indent != $cindent) {
- $err .= "$sep$ctx\n";
- $sep = '';
- } else {
- $sep = "[...]\n";
- }
- }
- if ($err ne '') {
- ERROR("SWITCH_CASE_INDENT_LEVEL",
- "switch and case should be at the same indent\n$hereline$err");
- }
- }
-
-# if/while/etc brace do not go on next line, unless defining a do while loop,
-# or if that brace on the next line is for something else
- if ($line =~ /(.*)\b((?:if|while|for|switch)\s*\(|do\b|else\b)/ && $line !~ /^.\s*\#/) {
- my $pre_ctx = "$1$2";
-
- my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
-
- if ($line =~ /^\+\t{6,}/) {
- WARN("DEEP_INDENTATION",
- "Too many leading tabs - consider code refactoring\n" . $herecurr);
- }
-
- my $ctx_cnt = $realcnt - $#ctx - 1;
- my $ctx = join("\n", @ctx);
-
- my $ctx_ln = $linenr;
- my $ctx_skip = $realcnt;
-
- while ($ctx_skip > $ctx_cnt || ($ctx_skip == $ctx_cnt &&
- defined $lines[$ctx_ln - 1] &&
- $lines[$ctx_ln - 1] =~ /^-/)) {
- ##print "SKIP<$ctx_skip> CNT<$ctx_cnt>\n";
- $ctx_skip-- if (!defined $lines[$ctx_ln - 1] || $lines[$ctx_ln - 1] !~ /^-/);
- $ctx_ln++;
- }
-
- #print "realcnt<$realcnt> ctx_cnt<$ctx_cnt>\n";
- #print "pre<$pre_ctx>\nline<$line>\nctx<$ctx>\nnext<$lines[$ctx_ln - 1]>\n";
-
- if ($ctx !~ /{\s*/ && defined($lines[$ctx_ln -1]) && $lines[$ctx_ln - 1] =~ /^\+\s*{/) {
- ERROR("OPEN_BRACE",
- "that open brace { should be on the previous line\n" .
- "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
- }
- if ($level == 0 && $pre_ctx !~ /}\s*while\s*\($/ &&
- $ctx =~ /\)\s*\;\s*$/ &&
- defined $lines[$ctx_ln - 1])
- {
- my ($nlength, $nindent) = line_stats($lines[$ctx_ln - 1]);
- if ($nindent > $indent) {
- WARN("TRAILING_SEMICOLON",
- "trailing semicolon indicates no statements, indent implies otherwise\n" .
- "$here\n$ctx\n$rawlines[$ctx_ln - 1]\n");
- }
- }
- }
-
-# Check relative indent for conditionals and blocks.
- if ($line =~ /\b(?:(?:if|while|for)\s*\(|do\b)/ && $line !~ /^.\s*#/ && $line !~ /\}\s*while\s*/) {
- ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
- ctx_statement_block($linenr, $realcnt, 0)
- if (!defined $stat);
- my ($s, $c) = ($stat, $cond);
-
- substr($s, 0, length($c), '');
-
- # Make sure we remove the line prefixes as we have
- # none on the first line, and are going to readd them
- # where necessary.
- $s =~ s/\n./\n/gs;
-
- # Find out how long the conditional actually is.
- my @newlines = ($c =~ /\n/gs);
- my $cond_lines = 1 + $#newlines;
-
- # We want to check the first line inside the block
- # starting at the end of the conditional, so remove:
- # 1) any blank line termination
- # 2) any opening brace { on end of the line
- # 3) any do (...) {
- my $continuation = 0;
- my $check = 0;
- $s =~ s/^.*\bdo\b//;
- $s =~ s/^\s*{//;
- if ($s =~ s/^\s*\\//) {
- $continuation = 1;
- }
- if ($s =~ s/^\s*?\n//) {
- $check = 1;
- $cond_lines++;
- }
-
- # Also ignore a loop construct at the end of a
- # preprocessor statement.
- if (($prevline =~ /^.\s*#\s*define\s/ ||
- $prevline =~ /\\\s*$/) && $continuation == 0) {
- $check = 0;
- }
-
- my $cond_ptr = -1;
- $continuation = 0;
- while ($cond_ptr != $cond_lines) {
- $cond_ptr = $cond_lines;
-
- # If we see an #else/#elif then the code
- # is not linear.
- if ($s =~ /^\s*\#\s*(?:else|elif)/) {
- $check = 0;
- }
-
- # Ignore:
- # 1) blank lines, they should be at 0,
- # 2) preprocessor lines, and
- # 3) labels.
- if ($continuation ||
- $s =~ /^\s*?\n/ ||
- $s =~ /^\s*#\s*?/ ||
- $s =~ /^\s*$Ident\s*:/) {
- $continuation = ($s =~ /^.*?\\\n/) ? 1 : 0;
- if ($s =~ s/^.*?\n//) {
- $cond_lines++;
- }
- }
- }
-
- my (undef, $sindent) = line_stats("+" . $s);
- my $stat_real = raw_line($linenr, $cond_lines);
-
- # Check if either of these lines are modified, else
- # this is not this patch's fault.
- if (!defined($stat_real) ||
- $stat !~ /^\+/ && $stat_real !~ /^\+/) {
- $check = 0;
- }
- if (defined($stat_real) && $cond_lines > 1) {
- $stat_real = "[...]\n$stat_real";
- }
-
- #print "line<$line> prevline<$prevline> indent<$indent> sindent<$sindent> check<$check> continuation<$continuation> s<$s> cond_lines<$cond_lines> stat_real<$stat_real> stat<$stat>\n";
-
- if ($check && (($sindent % 8) != 0 ||
- ($sindent <= $indent && $s ne ''))) {
- WARN("SUSPECT_CODE_INDENT",
- "suspect code indent for conditional statements ($indent, $sindent)\n" . $herecurr . "$stat_real\n");
- }
- }
-
- # Track the 'values' across context and added lines.
- my $opline = $line; $opline =~ s/^./ /;
- my ($curr_values, $curr_vars) =
- annotate_values($opline . "\n", $prev_values);
- $curr_values = $prev_values . $curr_values;
- if ($dbg_values) {
- my $outline = $opline; $outline =~ s/\t/ /g;
- print "$linenr > .$outline\n";
- print "$linenr > $curr_values\n";
- print "$linenr > $curr_vars\n";
- }
- $prev_values = substr($curr_values, -1);
-
-#ignore lines not being added
- next if ($line =~ /^[^\+]/);
-
-# TEST: allow direct testing of the type matcher.
- if ($dbg_type) {
- if ($line =~ /^.\s*$Declare\s*$/) {
- ERROR("TEST_TYPE",
- "TEST: is type\n" . $herecurr);
- } elsif ($dbg_type > 1 && $line =~ /^.+($Declare)/) {
- ERROR("TEST_NOT_TYPE",
- "TEST: is not type ($1 is)\n". $herecurr);
- }
- next;
- }
-# TEST: allow direct testing of the attribute matcher.
- if ($dbg_attr) {
- if ($line =~ /^.\s*$Modifier\s*$/) {
- ERROR("TEST_ATTR",
- "TEST: is attr\n" . $herecurr);
- } elsif ($dbg_attr > 1 && $line =~ /^.+($Modifier)/) {
- ERROR("TEST_NOT_ATTR",
- "TEST: is not attr ($1 is)\n". $herecurr);
- }
- next;
- }
-
-# check for initialisation to aggregates open brace on the next line
- if ($line =~ /^.\s*{/ &&
- $prevline =~ /(?:^|[^=])=\s*$/) {
- ERROR("OPEN_BRACE",
- "that open brace { should be on the previous line\n" . $hereprev);
- }
-
-#
-# Checks which are anchored on the added line.
-#
-
-# check for malformed paths in #include statements (uses RAW line)
- if ($rawline =~ m{^.\s*\#\s*include\s+[<"](.*)[">]}) {
- my $path = $1;
- if ($path =~ m{//}) {
- ERROR("MALFORMED_INCLUDE",
- "malformed #include filename\n" . $herecurr);
- }
- if ($path =~ "^uapi/" && $realfile =~ m@\binclude/uapi/@) {
- ERROR("UAPI_INCLUDE",
- "No #include in ...include/uapi/... should use a uapi/ path prefix\n" . $herecurr);
- }
- }
-
-# no C99 // comments
- if ($line =~ m{//}) {
- if (ERROR("C99_COMMENTS",
- "do not use C99 // comments\n" . $herecurr) &&
- $fix) {
- my $line = $fixed[$linenr - 1];
- if ($line =~ /\/\/(.*)$/) {
- my $comment = trim($1);
- $fixed[$linenr - 1] =~ s@\/\/(.*)$@/\* $comment \*/@;
- }
- }
- }
- # Remove C99 comments.
- $line =~ s@//.*@@;
- $opline =~ s@//.*@@;
-
-# EXPORT_SYMBOL should immediately follow the thing it is exporting, consider
-# the whole statement.
-#print "APW <$lines[$realline_next - 1]>\n";
- if (defined $realline_next &&
- exists $lines[$realline_next - 1] &&
- !defined $suppress_export{$realline_next} &&
- ($lines[$realline_next - 1] =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
- $lines[$realline_next - 1] =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
- # Handle definitions which produce identifiers with
- # a prefix:
- # XXX(foo);
- # EXPORT_SYMBOL(something_foo);
- my $name = $1;
- if ($stat =~ /^(?:.\s*}\s*\n)?.([A-Z_]+)\s*\(\s*($Ident)/ &&
- $name =~ /^${Ident}_$2/) {
-#print "FOO C name<$name>\n";
- $suppress_export{$realline_next} = 1;
-
- } elsif ($stat !~ /(?:
- \n.}\s*$|
- ^.DEFINE_$Ident\(\Q$name\E\)|
- ^.DECLARE_$Ident\(\Q$name\E\)|
- ^.LIST_HEAD\(\Q$name\E\)|
- ^.(?:$Storage\s+)?$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(|
- \b\Q$name\E(?:\s+$Attribute)*\s*(?:;|=|\[|\()
- )/x) {
-#print "FOO A<$lines[$realline_next - 1]> stat<$stat> name<$name>\n";
- $suppress_export{$realline_next} = 2;
- } else {
- $suppress_export{$realline_next} = 1;
- }
- }
- if (!defined $suppress_export{$linenr} &&
- $prevline =~ /^.\s*$/ &&
- ($line =~ /EXPORT_SYMBOL.*\((.*)\)/ ||
- $line =~ /EXPORT_UNUSED_SYMBOL.*\((.*)\)/)) {
-#print "FOO B <$lines[$linenr - 1]>\n";
- $suppress_export{$linenr} = 2;
- }
- if (defined $suppress_export{$linenr} &&
- $suppress_export{$linenr} == 2) {
- WARN("EXPORT_SYMBOL",
- "EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
- }
-
-# check for global initialisers.
- if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
- if (ERROR("GLOBAL_INITIALISERS",
- "do not initialise globals to 0 or NULL\n" .
- $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
- }
- }
-# check for static initialisers.
- if ($line =~ /^\+.*\bstatic\s.*=\s*(0|NULL|false)\s*;/) {
- if (ERROR("INITIALISED_STATIC",
- "do not initialise statics to 0 or NULL\n" .
- $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/(\bstatic\s.*?)\s*=\s*(0|NULL|false)\s*;/$1;/;
- }
- }
-
-# check for static const char * arrays.
- if ($line =~ /\bstatic\s+const\s+char\s*\*\s*(\w+)\s*\[\s*\]\s*=\s*/) {
- WARN("STATIC_CONST_CHAR_ARRAY",
- "static const char * array should probably be static const char * const\n" .
- $herecurr);
- }
-
-# check for static char foo[] = "bar" declarations.
- if ($line =~ /\bstatic\s+char\s+(\w+)\s*\[\s*\]\s*=\s*"/) {
- WARN("STATIC_CONST_CHAR_ARRAY",
- "static char array declaration should probably be static const char\n" .
- $herecurr);
- }
-
-# check for non-global char *foo[] = {"bar", ...} declarations.
- if ($line =~ /^.\s+(?:static\s+|const\s+)?char\s+\*\s*\w+\s*\[\s*\]\s*=\s*\{/) {
- WARN("STATIC_CONST_CHAR_ARRAY",
- "char * array declaration might be better as static const\n" .
- $herecurr);
- }
-
-# check for function declarations without arguments like "int foo()"
- if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
- if (ERROR("FUNCTION_WITHOUT_ARGS",
- "Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
- }
- }
-
-# check for new typedefs, only function parameters and sparse annotations
-# make sense.
- if ($line =~ /\btypedef\s/ &&
- $line !~ /\btypedef\s+$Type\s*\(\s*\*?$Ident\s*\)\s*\(/ &&
- $line !~ /\btypedef\s+$Type\s+$Ident\s*\(/ &&
- $line !~ /\b$typeTypedefs\b/ &&
- $line !~ /\b__bitwise(?:__|)\b/) {
- WARN("NEW_TYPEDEFS",
- "do not add new typedefs\n" . $herecurr);
- }
-
-# * goes on variable not on type
- # (char*[ const])
- while ($line =~ m{(\($NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)\))}g) {
- #print "AA<$1>\n";
- my ($ident, $from, $to) = ($1, $2, $2);
-
- # Should start with a space.
- $to =~ s/^(\S)/ $1/;
- # Should not end with a space.
- $to =~ s/\s+$//;
- # '*'s should not have spaces between.
- while ($to =~ s/\*\s+\*/\*\*/) {
- }
-
-## print "1: from<$from> to<$to> ident<$ident>\n";
- if ($from ne $to) {
- if (ERROR("POINTER_LOCATION",
- "\"(foo$from)\" should be \"(foo$to)\"\n" . $herecurr) &&
- $fix) {
- my $sub_from = $ident;
- my $sub_to = $ident;
- $sub_to =~ s/\Q$from\E/$to/;
- $fixed[$linenr - 1] =~
- s@\Q$sub_from\E@$sub_to@;
- }
- }
- }
- while ($line =~ m{(\b$NonptrType(\s*(?:$Modifier\b\s*|\*\s*)+)($Ident))}g) {
- #print "BB<$1>\n";
- my ($match, $from, $to, $ident) = ($1, $2, $2, $3);
-
- # Should start with a space.
- $to =~ s/^(\S)/ $1/;
- # Should not end with a space.
- $to =~ s/\s+$//;
- # '*'s should not have spaces between.
- while ($to =~ s/\*\s+\*/\*\*/) {
- }
- # Modifiers should have spaces.
- $to =~ s/(\b$Modifier$)/$1 /;
-
-## print "2: from<$from> to<$to> ident<$ident>\n";
- if ($from ne $to && $ident !~ /^$Modifier$/) {
- if (ERROR("POINTER_LOCATION",
- "\"foo${from}bar\" should be \"foo${to}bar\"\n" . $herecurr) &&
- $fix) {
-
- my $sub_from = $match;
- my $sub_to = $match;
- $sub_to =~ s/\Q$from\E/$to/;
- $fixed[$linenr - 1] =~
- s@\Q$sub_from\E@$sub_to@;
- }
- }
- }
-
-# function brace can't be on same line, except for #defines of do while,
-# or if closed on same line
- if (($line=~/$Type\s*$Ident\(.*\).*\s\{/) and
- !($line=~/\#\s*define.*do\s\{/) and !($line=~/}/)) {
- ERROR("OPEN_BRACE",
- "open brace '{' following function declarations go on the next line\n" . $herecurr);
- }
-
-# open braces for enum, union and struct go on the same line.
- if ($line =~ /^.\s*{/ &&
- $prevline =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident)?\s*$/) {
- ERROR("OPEN_BRACE",
- "open brace '{' following $1 go on the same line\n" . $hereprev);
- }
-
-# missing space after union, struct or enum definition
- if ($line =~ /^.\s*(?:typedef\s+)?(enum|union|struct)(?:\s+$Ident){1,2}[=\{]/) {
- if (WARN("SPACING",
- "missing space after $1 definition\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/^(.\s*(?:typedef\s+)?(?:enum|union|struct)(?:\s+$Ident){1,2})([=\{])/$1 $2/;
- }
- }
-
-# Function pointer declarations
-# check spacing between type, funcptr, and args
-# canonical declaration is "type (*funcptr)(args...)"
- if ($line =~ /^.\s*($Declare)\((\s*)\*(\s*)($Ident)(\s*)\)(\s*)\(/) {
- my $declare = $1;
- my $pre_pointer_space = $2;
- my $post_pointer_space = $3;
- my $funcname = $4;
- my $post_funcname_space = $5;
- my $pre_args_space = $6;
-
-# the $Declare variable will capture all spaces after the type
-# so check it for a missing trailing missing space but pointer return types
-# don't need a space so don't warn for those.
- my $post_declare_space = "";
- if ($declare =~ /(\s+)$/) {
- $post_declare_space = $1;
- $declare = rtrim($declare);
- }
- if ($declare !~ /\*$/ && $post_declare_space =~ /^$/) {
- WARN("SPACING",
- "missing space after return type\n" . $herecurr);
- $post_declare_space = " ";
- }
-
-# unnecessary space "type ( *funcptr)(args...)"
- if (defined $pre_pointer_space &&
- $pre_pointer_space =~ /^\s/) {
- WARN("SPACING",
- "Unnecessary space after function pointer open parenthesis\n" . $herecurr);
- }
-
-# unnecessary space "type (* funcptr)(args...)"
- if (defined $post_pointer_space &&
- $post_pointer_space =~ /^\s/) {
- WARN("SPACING",
- "Unnecessary space before function pointer name\n" . $herecurr);
- }
-
-# unnecessary space "type (*funcptr )(args...)"
- if (defined $post_funcname_space &&
- $post_funcname_space =~ /^\s/) {
- WARN("SPACING",
- "Unnecessary space after function pointer name\n" . $herecurr);
- }
-
-# unnecessary space "type (*funcptr) (args...)"
- if (defined $pre_args_space &&
- $pre_args_space =~ /^\s/) {
- WARN("SPACING",
- "Unnecessary space before function pointer arguments\n" . $herecurr);
- }
-
- if (show_type("SPACING") && $fix) {
- $fixed[$linenr - 1] =~
- s/^(.\s*)$Declare\s*\(\s*\*\s*$Ident\s*\)\s*\(/$1 . $declare . $post_declare_space . '(*' . $funcname . ')('/ex;
- }
- }
-
-# check for spacing round square brackets; allowed:
-# 1. with a type on the left -- int [] a;
-# 2. at the beginning of a line for slice initialisers -- [0...10] = 5,
-# 3. inside a curly brace -- = { [0...10] = 5 }
- while ($line =~ /(.*?\s)\[/g) {
- my ($where, $prefix) = ($-[1], $1);
- if ($prefix !~ /$Type\s+$/ &&
- ($where != 0 || $prefix !~ /^.\s+$/) &&
- $prefix !~ /[{,]\s+$/) {
- if (ERROR("BRACKET_SPACE",
- "space prohibited before open square bracket '['\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/^(\+.*?)\s+\[/$1\[/;
- }
- }
- }
-
-# Check operator spacing.
- if (!($line=~/\#\s*include/)) {
- my $fixed_line = "";
- my $line_fixed = 0;
-
- my $ops = qr{
- <<=|>>=|<=|>=|==|!=|
- \+=|-=|\*=|\/=|%=|\^=|\|=|&=|
- =>|->|<<|>>|<|>|=|!|~|
- &&|\|\||,|\^|\+\+|--|&|\||\+|-|\*|\/|%|
- \?:|\?|:
- }x;
- my @elements = split(/($ops|;)/, $opline);
-
-## print("element count: <" . $#elements . ">\n");
-## foreach my $el (@elements) {
-## print("el: <$el>\n");
-## }
-
- my @fix_elements = ();
- my $off = 0;
-
- foreach my $el (@elements) {
- push(@fix_elements, substr($rawline, $off, length($el)));
- $off += length($el);
- }
-
- $off = 0;
-
- my $blank = copy_spacing($opline);
- my $last_after = -1;
-
- for (my $n = 0; $n < $#elements; $n += 2) {
-
- my $good = $fix_elements[$n] . $fix_elements[$n + 1];
-
-## print("n: <$n> good: <$good>\n");
-
- $off += length($elements[$n]);
-
- # Pick up the preceding and succeeding characters.
- my $ca = substr($opline, 0, $off);
- my $cc = '';
- if (length($opline) >= ($off + length($elements[$n + 1]))) {
- $cc = substr($opline, $off + length($elements[$n + 1]));
- }
- my $cb = "$ca$;$cc";
-
- my $a = '';
- $a = 'V' if ($elements[$n] ne '');
- $a = 'W' if ($elements[$n] =~ /\s$/);
- $a = 'C' if ($elements[$n] =~ /$;$/);
- $a = 'B' if ($elements[$n] =~ /(\[|\()$/);
- $a = 'O' if ($elements[$n] eq '');
- $a = 'E' if ($ca =~ /^\s*$/);
-
- my $op = $elements[$n + 1];
-
- my $c = '';
- if (defined $elements[$n + 2]) {
- $c = 'V' if ($elements[$n + 2] ne '');
- $c = 'W' if ($elements[$n + 2] =~ /^\s/);
- $c = 'C' if ($elements[$n + 2] =~ /^$;/);
- $c = 'B' if ($elements[$n + 2] =~ /^(\)|\]|;)/);
- $c = 'O' if ($elements[$n + 2] eq '');
- $c = 'E' if ($elements[$n + 2] =~ /^\s*\\$/);
- } else {
- $c = 'E';
- }
-
- my $ctx = "${a}x${c}";
-
- my $at = "(ctx:$ctx)";
-
- my $ptr = substr($blank, 0, $off) . "^";
- my $hereptr = "$hereline$ptr\n";
-
- # Pull out the value of this operator.
- my $op_type = substr($curr_values, $off + 1, 1);
-
- # Get the full operator variant.
- my $opv = $op . substr($curr_vars, $off, 1);
-
- # Ignore operators passed as parameters.
- if ($op_type ne 'V' &&
- $ca =~ /\s$/ && $cc =~ /^\s*,/) {
-
-# # Ignore comments
-# } elsif ($op =~ /^$;+$/) {
-
- # ; should have either the end of line or a space or \ after it
- } elsif ($op eq ';') {
- if ($ctx !~ /.x[WEBC]/ &&
- $cc !~ /^\\/ && $cc !~ /^;/) {
- if (ERROR("SPACING",
- "space required after that '$op' $at\n" . $hereptr)) {
- $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
- $line_fixed = 1;
- }
- }
-
- # // is a comment
- } elsif ($op eq '//') {
-
- # : when part of a bitfield
- } elsif ($opv eq ':B') {
- # skip the bitfield test for now
-
- # No spaces for:
- # ->
- } elsif ($op eq '->') {
- if ($ctx =~ /Wx.|.xW/) {
- if (ERROR("SPACING",
- "spaces prohibited around that '$op' $at\n" . $hereptr)) {
- $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
- if (defined $fix_elements[$n + 2]) {
- $fix_elements[$n + 2] =~ s/^\s+//;
- }
- $line_fixed = 1;
- }
- }
-
- # , must have a space on the right.
- } elsif ($op eq ',') {
- if ($ctx !~ /.x[WEC]/ && $cc !~ /^}/) {
- if (ERROR("SPACING",
- "space required after that '$op' $at\n" . $hereptr)) {
- $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
- $line_fixed = 1;
- $last_after = $n;
- }
- }
-
- # '*' as part of a type definition -- reported already.
- } elsif ($opv eq '*_') {
- #warn "'*' is part of type\n";
-
- # unary operators should have a space before and
- # none after. May be left adjacent to another
- # unary operator, or a cast
- } elsif ($op eq '!' || $op eq '~' ||
- $opv eq '*U' || $opv eq '-U' ||
- $opv eq '&U' || $opv eq '&&U') {
- if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
- if (ERROR("SPACING",
- "space required before that '$op' $at\n" . $hereptr)) {
- if ($n != $last_after + 2) {
- $good = $fix_elements[$n] . " " . ltrim($fix_elements[$n + 1]);
- $line_fixed = 1;
- }
- }
- }
- if ($op eq '*' && $cc =~/\s*$Modifier\b/) {
- # A unary '*' may be const
-
- } elsif ($ctx =~ /.xW/) {
- if (ERROR("SPACING",
- "space prohibited after that '$op' $at\n" . $hereptr)) {
- $good = $fix_elements[$n] . rtrim($fix_elements[$n + 1]);
- if (defined $fix_elements[$n + 2]) {
- $fix_elements[$n + 2] =~ s/^\s+//;
- }
- $line_fixed = 1;
- }
- }
-
- # unary ++ and unary -- are allowed no space on one side.
- } elsif ($op eq '++' or $op eq '--') {
- if ($ctx !~ /[WEOBC]x[^W]/ && $ctx !~ /[^W]x[WOBEC]/) {
- if (ERROR("SPACING",
- "space required one side of that '$op' $at\n" . $hereptr)) {
- $good = $fix_elements[$n] . trim($fix_elements[$n + 1]) . " ";
- $line_fixed = 1;
- }
- }
- if ($ctx =~ /Wx[BE]/ ||
- ($ctx =~ /Wx./ && $cc =~ /^;/)) {
- if (ERROR("SPACING",
- "space prohibited before that '$op' $at\n" . $hereptr)) {
- $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
- $line_fixed = 1;
- }
- }
- if ($ctx =~ /ExW/) {
- if (ERROR("SPACING",
- "space prohibited after that '$op' $at\n" . $hereptr)) {
- $good = $fix_elements[$n] . trim($fix_elements[$n + 1]);
- if (defined $fix_elements[$n + 2]) {
- $fix_elements[$n + 2] =~ s/^\s+//;
- }
- $line_fixed = 1;
- }
- }
-
- # << and >> may either have or not have spaces both sides
- } elsif ($op eq '<<' or $op eq '>>' or
- $op eq '&' or $op eq '^' or $op eq '|' or
- $op eq '+' or $op eq '-' or
- $op eq '*' or $op eq '/' or
- $op eq '%')
- {
- if ($ctx =~ /Wx[^WCE]|[^WCE]xW/) {
- if (ERROR("SPACING",
- "need consistent spacing around '$op' $at\n" . $hereptr)) {
- $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
- if (defined $fix_elements[$n + 2]) {
- $fix_elements[$n + 2] =~ s/^\s+//;
- }
- $line_fixed = 1;
- }
- }
-
- # A colon needs no spaces before when it is
- # terminating a case value or a label.
- } elsif ($opv eq ':C' || $opv eq ':L') {
- if ($ctx =~ /Wx./) {
- if (ERROR("SPACING",
- "space prohibited before that '$op' $at\n" . $hereptr)) {
- $good = rtrim($fix_elements[$n]) . trim($fix_elements[$n + 1]);
- $line_fixed = 1;
- }
- }
-
- # All the others need spaces both sides.
- } elsif ($ctx !~ /[EWC]x[CWE]/) {
- my $ok = 0;
-
- # Ignore email addresses <foo@bar>
- if (($op eq '<' &&
- $cc =~ /^\S+\@\S+>/) ||
- ($op eq '>' &&
- $ca =~ /<\S+\@\S+$/))
- {
- $ok = 1;
- }
-
- # messages are ERROR, but ?: are CHK
- if ($ok == 0) {
- my $msg_type = \&ERROR;
- $msg_type = \&CHK if (($op eq '?:' || $op eq '?' || $op eq ':') && $ctx =~ /VxV/);
-
- if (&{$msg_type}("SPACING",
- "spaces required around that '$op' $at\n" . $hereptr)) {
- $good = rtrim($fix_elements[$n]) . " " . trim($fix_elements[$n + 1]) . " ";
- if (defined $fix_elements[$n + 2]) {
- $fix_elements[$n + 2] =~ s/^\s+//;
- }
- $line_fixed = 1;
- }
- }
- }
- $off += length($elements[$n + 1]);
-
-## print("n: <$n> GOOD: <$good>\n");
-
- $fixed_line = $fixed_line . $good;
- }
-
- if (($#elements % 2) == 0) {
- $fixed_line = $fixed_line . $fix_elements[$#elements];
- }
-
- if ($fix && $line_fixed && $fixed_line ne $fixed[$linenr - 1]) {
- $fixed[$linenr - 1] = $fixed_line;
- }
-
-
- }
-
-# check for whitespace before a non-naked semicolon
- if ($line =~ /^\+.*\S\s+;\s*$/) {
- if (WARN("SPACING",
- "space prohibited before semicolon\n" . $herecurr) &&
- $fix) {
- 1 while $fixed[$linenr - 1] =~
- s/^(\+.*\S)\s+;/$1;/;
- }
- }
-
-# check for multiple assignments
- if ($line =~ /^.\s*$Lval\s*=\s*$Lval\s*=(?!=)/) {
- CHK("MULTIPLE_ASSIGNMENTS",
- "multiple assignments should be avoided\n" . $herecurr);
- }
-
-## # check for multiple declarations, allowing for a function declaration
-## # continuation.
-## if ($line =~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Ident.*/ &&
-## $line !~ /^.\s*$Type\s+$Ident(?:\s*=[^,{]*)?\s*,\s*$Type\s*$Ident.*/) {
-##
-## # Remove any bracketed sections to ensure we do not
-## # falsly report the parameters of functions.
-## my $ln = $line;
-## while ($ln =~ s/\([^\(\)]*\)//g) {
-## }
-## if ($ln =~ /,/) {
-## WARN("MULTIPLE_DECLARATION",
-## "declaring multiple variables together should be avoided\n" . $herecurr);
-## }
-## }
-
-#need space before brace following if, while, etc
- if (($line =~ /\(.*\)\{/ && $line !~ /\($Type\)\{/) ||
- $line =~ /do\{/) {
- if (ERROR("SPACING",
- "space required before the open brace '{'\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/^(\+.*(?:do|\))){/$1 {/;
- }
- }
-
-## # check for blank lines before declarations
-## if ($line =~ /^.\t+$Type\s+$Ident(?:\s*=.*)?;/ &&
-## $prevrawline =~ /^.\s*$/) {
-## WARN("SPACING",
-## "No blank lines before declarations\n" . $hereprev);
-## }
-##
-
-# closing brace should have a space following it when it has anything
-# on the line
- if ($line =~ /}(?!(?:,|;|\)))\S/) {
- if (ERROR("SPACING",
- "space required after that close brace '}'\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/}((?!(?:,|;|\)))\S)/} $1/;
- }
- }
-
-# check spacing on square brackets
- if ($line =~ /\[\s/ && $line !~ /\[\s*$/) {
- if (ERROR("SPACING",
- "space prohibited after that open square bracket '['\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/\[\s+/\[/;
- }
- }
- if ($line =~ /\s\]/) {
- if (ERROR("SPACING",
- "space prohibited before that close square bracket ']'\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/\s+\]/\]/;
- }
- }
-
-# check spacing on parentheses
- if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ &&
- $line !~ /for\s*\(\s+;/) {
- if (ERROR("SPACING",
- "space prohibited after that open parenthesis '('\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/\(\s+/\(/;
- }
- }
- if ($line =~ /(\s+)\)/ && $line !~ /^.\s*\)/ &&
- $line !~ /for\s*\(.*;\s+\)/ &&
- $line !~ /:\s+\)/) {
- if (ERROR("SPACING",
- "space prohibited before that close parenthesis ')'\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/\s+\)/\)/;
- }
- }
-
-#goto labels aren't indented, allow a single space however
- if ($line=~/^.\s+[A-Za-z\d_]+:(?![0-9]+)/ and
- !($line=~/^. [A-Za-z\d_]+:/) and !($line=~/^.\s+default:/)) {
- if (WARN("INDENTED_LABEL",
- "labels should not be indented\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/^(.)\s+/$1/;
- }
- }
-
-# return is not a function
- if (defined($stat) && $stat =~ /^.\s*return(\s*)\(/s) {
- my $spacing = $1;
- if ($^V && $^V ge 5.10.0 &&
- $stat =~ /^.\s*return\s*($balanced_parens)\s*;\s*$/) {
- my $value = $1;
- $value = deparenthesize($value);
- if ($value =~ m/^\s*$FuncArg\s*(?:\?|$)/) {
- ERROR("RETURN_PARENTHESES",
- "return is not a function, parentheses are not required\n" . $herecurr);
- }
- } elsif ($spacing !~ /\s+/) {
- ERROR("SPACING",
- "space required before the open parenthesis '('\n" . $herecurr);
- }
- }
-
-# unnecessary return in a void function
-# at end-of-function, with the previous line a single leading tab, then return;
-# and the line before that not a goto label target like "out:"
- if ($sline =~ /^[ \+]}\s*$/ &&
- $prevline =~ /^\+\treturn\s*;\s*$/ &&
- $linenr >= 3 &&
- $lines[$linenr - 3] =~ /^[ +]/ &&
- $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
- WARN("RETURN_VOID",
- "void function return statements are not generally useful\n" . $hereprev);
- }
-
-# if statements using unnecessary parentheses - ie: if ((foo == bar))
- if ($^V && $^V ge 5.10.0 &&
- $line =~ /\bif\s*((?:\(\s*){2,})/) {
- my $openparens = $1;
- my $count = $openparens =~ tr@\(@\(@;
- my $msg = "";
- if ($line =~ /\bif\s*(?:\(\s*){$count,$count}$LvalOrFunc\s*($Compare)\s*$LvalOrFunc(?:\s*\)){$count,$count}/) {
- my $comp = $4; #Not $1 because of $LvalOrFunc
- $msg = " - maybe == should be = ?" if ($comp eq "==");
- WARN("UNNECESSARY_PARENTHESES",
- "Unnecessary parentheses$msg\n" . $herecurr);
- }
- }
-
-# Return of what appears to be an errno should normally be -'ve
- if ($line =~ /^.\s*return\s*(E[A-Z]*)\s*;/) {
- my $name = $1;
- if ($name ne 'EOF' && $name ne 'ERROR') {
- WARN("USE_NEGATIVE_ERRNO",
- "return of an errno should typically be -ve (return -$1)\n" . $herecurr);
- }
- }
-
-# Need a space before open parenthesis after if, while etc
- if ($line =~ /\b(if|while|for|switch)\(/) {
- if (ERROR("SPACING",
- "space required before the open parenthesis '('\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/\b(if|while|for|switch)\(/$1 \(/;
- }
- }
-
-# Check for illegal assignment in if conditional -- and check for trailing
-# statements after the conditional.
- if ($line =~ /do\s*(?!{)/) {
- ($stat, $cond, $line_nr_next, $remain_next, $off_next) =
- ctx_statement_block($linenr, $realcnt, 0)
- if (!defined $stat);
- my ($stat_next) = ctx_statement_block($line_nr_next,
- $remain_next, $off_next);
- $stat_next =~ s/\n./\n /g;
- ##print "stat<$stat> stat_next<$stat_next>\n";
-
- if ($stat_next =~ /^\s*while\b/) {
- # If the statement carries leading newlines,
- # then count those as offsets.
- my ($whitespace) =
- ($stat_next =~ /^((?:\s*\n[+-])*\s*)/s);
- my $offset =
- statement_rawlines($whitespace) - 1;
-
- $suppress_whiletrailers{$line_nr_next +
- $offset} = 1;
- }
- }
- if (!defined $suppress_whiletrailers{$linenr} &&
- defined($stat) && defined($cond) &&
- $line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
- my ($s, $c) = ($stat, $cond);
-
- if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/s) {
- ERROR("ASSIGN_IN_IF",
- "do not use assignment in if condition\n" . $herecurr);
- }
-
- # Find out what is on the end of the line after the
- # conditional.
- substr($s, 0, length($c), '');
- $s =~ s/\n.*//g;
- $s =~ s/$;//g; # Remove any comments
- if (length($c) && $s !~ /^\s*{?\s*\\*\s*$/ &&
- $c !~ /}\s*while\s*/)
- {
- # Find out how long the conditional actually is.
- my @newlines = ($c =~ /\n/gs);
- my $cond_lines = 1 + $#newlines;
- my $stat_real = '';
-
- $stat_real = raw_line($linenr, $cond_lines)
- . "\n" if ($cond_lines);
- if (defined($stat_real) && $cond_lines > 1) {
- $stat_real = "[...]\n$stat_real";
- }
-
- ERROR("TRAILING_STATEMENTS",
- "trailing statements should be on next line\n" . $herecurr . $stat_real);
- }
- }
-
-# Check for bitwise tests written as boolean
- if ($line =~ /
- (?:
- (?:\[|\(|\&\&|\|\|)
- \s*0[xX][0-9]+\s*
- (?:\&\&|\|\|)
- |
- (?:\&\&|\|\|)
- \s*0[xX][0-9]+\s*
- (?:\&\&|\|\||\)|\])
- )/x)
- {
- WARN("HEXADECIMAL_BOOLEAN_TEST",
- "boolean test with hexadecimal, perhaps just 1 \& or \|?\n" . $herecurr);
- }
-
-# if and else should not have general statements after it
- if ($line =~ /^.\s*(?:}\s*)?else\b(.*)/) {
- my $s = $1;
- $s =~ s/$;//g; # Remove any comments
- if ($s !~ /^\s*(?:\sif|(?:{|)\s*\\?\s*$)/) {
- ERROR("TRAILING_STATEMENTS",
- "trailing statements should be on next line\n" . $herecurr);
- }
- }
-# if should not continue a brace
- if ($line =~ /}\s*if\b/) {
- ERROR("TRAILING_STATEMENTS",
- "trailing statements should be on next line\n" .
- $herecurr);
- }
-# case and default should not have general statements after them
- if ($line =~ /^.\s*(?:case\s*.*|default\s*):/g &&
- $line !~ /\G(?:
- (?:\s*$;*)(?:\s*{)?(?:\s*$;*)(?:\s*\\)?\s*$|
- \s*return\s+
- )/xg)
- {
- ERROR("TRAILING_STATEMENTS",
- "trailing statements should be on next line\n" . $herecurr);
- }
-
- # Check for }<nl>else {, these must be at the same
- # indent level to be relevant to each other.
- if ($prevline=~/}\s*$/ and $line=~/^.\s*else\s*/ and
- $previndent == $indent) {
- ERROR("ELSE_AFTER_BRACE",
- "else should follow close brace '}'\n" . $hereprev);
- }
-
- if ($prevline=~/}\s*$/ and $line=~/^.\s*while\s*/ and
- $previndent == $indent) {
- my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
-
- # Find out what is on the end of the line after the
- # conditional.
- substr($s, 0, length($c), '');
- $s =~ s/\n.*//g;
-
- if ($s =~ /^\s*;/) {
- ERROR("WHILE_AFTER_BRACE",
- "while should follow close brace '}'\n" . $hereprev);
- }
- }
-
-#Specific variable tests
- while ($line =~ m{($Constant|$Lval)}g) {
- my $var = $1;
-
-#gcc binary extension
- if ($var =~ /^$Binary$/) {
- if (WARN("GCC_BINARY_CONSTANT",
- "Avoid gcc v4.3+ binary constant extension: <$var>\n" . $herecurr) &&
- $fix) {
- my $hexval = sprintf("0x%x", oct($var));
- $fixed[$linenr - 1] =~
- s/\b$var\b/$hexval/;
- }
- }
-
-#CamelCase
- if ($var !~ /^$Constant$/ &&
- $var =~ /[A-Z][a-z]|[a-z][A-Z]/ &&
-#Ignore Page<foo> variants
- $var !~ /^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
-#Ignore SI style variants like nS, mV and dB (ie: max_uV, regulator_min_uA_show)
- $var !~ /^(?:[a-z_]*?)_?[a-z][A-Z](?:_[a-z_]+)?$/) {
- while ($var =~ m{($Ident)}g) {
- my $word = $1;
- next if ($word !~ /[A-Z][a-z]|[a-z][A-Z]/);
- if ($check) {
- seed_camelcase_includes();
- if (!$file && !$camelcase_file_seeded) {
- seed_camelcase_file($realfile);
- $camelcase_file_seeded = 1;
- }
- }
- if (!defined $camelcase{$word}) {
- $camelcase{$word} = 1;
- CHK("CAMELCASE",
- "Avoid CamelCase: <$word>\n" . $herecurr);
- }
- }
- }
- }
-
-#no spaces allowed after \ in define
- if ($line =~ /\#\s*define.*\\\s+$/) {
- if (WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
- "Whitespace after \\ makes next lines useless\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\s+$//;
- }
- }
-
-#warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
- if ($tree && $rawline =~ m{^.\s*\#\s*include\s*\<asm\/(.*)\.h\>}) {
- my $file = "$1.h";
- my $checkfile = "include/linux/$file";
- if (-f "$root/$checkfile" &&
- $realfile ne $checkfile &&
- $1 !~ /$allowed_asm_includes/)
- {
- if ($realfile =~ m{^arch/}) {
- CHK("ARCH_INCLUDE_LINUX",
- "Consider using #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
- } else {
- WARN("INCLUDE_LINUX",
- "Use #include <linux/$file> instead of <asm/$file>\n" . $herecurr);
- }
- }
- }
-
-# multi-statement macros should be enclosed in a do while loop, grab the
-# first statement and ensure its the whole macro if its not enclosed
-# in a known good container
- if ($realfile !~ m@/vmlinux.lds.h$@ &&
- $line =~ /^.\s*\#\s*define\s*$Ident(\()?/) {
- my $ln = $linenr;
- my $cnt = $realcnt;
- my ($off, $dstat, $dcond, $rest);
- my $ctx = '';
- ($dstat, $dcond, $ln, $cnt, $off) =
- ctx_statement_block($linenr, $realcnt, 0);
- $ctx = $dstat;
- #print "dstat<$dstat> dcond<$dcond> cnt<$cnt> off<$off>\n";
- #print "LINE<$lines[$ln-1]> len<" . length($lines[$ln-1]) . "\n";
-
- $dstat =~ s/^.\s*\#\s*define\s+$Ident(?:\([^\)]*\))?\s*//;
- $dstat =~ s/$;//g;
- $dstat =~ s/\\\n.//g;
- $dstat =~ s/^\s*//s;
- $dstat =~ s/\s*$//s;
-
- # Flatten any parentheses and braces
- while ($dstat =~ s/\([^\(\)]*\)/1/ ||
- $dstat =~ s/\{[^\{\}]*\}/1/ ||
- $dstat =~ s/\[[^\[\]]*\]/1/)
- {
- }
-
- # Flatten any obvious string concatentation.
- while ($dstat =~ s/("X*")\s*$Ident/$1/ ||
- $dstat =~ s/$Ident\s*("X*")/$1/)
- {
- }
-
- my $exceptions = qr{
- $Declare|
- module_param_named|
- MODULE_PARM_DESC|
- DECLARE_PER_CPU|
- DEFINE_PER_CPU|
- __typeof__\(|
- union|
- struct|
- \.$Ident\s*=\s*|
- ^\"|\"$
- }x;
- #print "REST<$rest> dstat<$dstat> ctx<$ctx>\n";
- if ($dstat ne '' &&
- $dstat !~ /^(?:$Ident|-?$Constant),$/ && # 10, // foo(),
- $dstat !~ /^(?:$Ident|-?$Constant);$/ && # foo();
- $dstat !~ /^[!~-]?(?:$Lval|$Constant)$/ && # 10 // foo() // !foo // ~foo // -foo // foo->bar // foo.bar->baz
- $dstat !~ /^'X'$/ && # character constants
- $dstat !~ /$exceptions/ &&
- $dstat !~ /^\.$Ident\s*=/ && # .foo =
- $dstat !~ /^(?:\#\s*$Ident|\#\s*$Constant)\s*$/ && # stringification #foo
- $dstat !~ /^do\s*$Constant\s*while\s*$Constant;?$/ && # do {...} while (...); // do {...} while (...)
- $dstat !~ /^for\s*$Constant$/ && # for (...)
- $dstat !~ /^for\s*$Constant\s+(?:$Ident|-?$Constant)$/ && # for (...) bar()
- $dstat !~ /^do\s*{/ && # do {...
- $dstat !~ /^\(\{/ && # ({...
- $ctx !~ /^.\s*#\s*define\s+TRACE_(?:SYSTEM|INCLUDE_FILE|INCLUDE_PATH)\b/)
- {
- $ctx =~ s/\n*$//;
- my $herectx = $here . "\n";
- my $cnt = statement_rawlines($ctx);
-
- for (my $n = 0; $n < $cnt; $n++) {
- $herectx .= raw_line($linenr, $n) . "\n";
- }
-
- if ($dstat =~ /;/) {
- ERROR("MULTISTATEMENT_MACRO_USE_DO_WHILE",
- "Macros with multiple statements should be enclosed in a do - while loop\n" . "$herectx");
- } else {
- ERROR("COMPLEX_MACRO",
- "Macros with complex values should be enclosed in parenthesis\n" . "$herectx");
- }
- }
-
-# check for line continuations outside of #defines, preprocessor #, and asm
-
- } else {
- if ($prevline !~ /^..*\\$/ &&
- $line !~ /^\+\s*\#.*\\$/ && # preprocessor
- $line !~ /^\+.*\b(__asm__|asm)\b.*\\$/ && # asm
- $line =~ /^\+.*\\$/) {
- WARN("LINE_CONTINUATIONS",
- "Avoid unnecessary line continuations\n" . $herecurr);
- }
- }
-
-# do {} while (0) macro tests:
-# single-statement macros do not need to be enclosed in do while (0) loop,
-# macro should not end with a semicolon
- if ($^V && $^V ge 5.10.0 &&
- $realfile !~ m@/vmlinux.lds.h$@ &&
- $line =~ /^.\s*\#\s*define\s+$Ident(\()?/) {
- my $ln = $linenr;
- my $cnt = $realcnt;
- my ($off, $dstat, $dcond, $rest);
- my $ctx = '';
- ($dstat, $dcond, $ln, $cnt, $off) =
- ctx_statement_block($linenr, $realcnt, 0);
- $ctx = $dstat;
-
- $dstat =~ s/\\\n.//g;
-
- if ($dstat =~ /^\+\s*#\s*define\s+$Ident\s*${balanced_parens}\s*do\s*{(.*)\s*}\s*while\s*\(\s*0\s*\)\s*([;\s]*)\s*$/) {
- my $stmts = $2;
- my $semis = $3;
-
- $ctx =~ s/\n*$//;
- my $cnt = statement_rawlines($ctx);
- my $herectx = $here . "\n";
-
- for (my $n = 0; $n < $cnt; $n++) {
- $herectx .= raw_line($linenr, $n) . "\n";
- }
-
- if (($stmts =~ tr/;/;/) == 1 &&
- $stmts !~ /^\s*(if|while|for|switch)\b/) {
- WARN("SINGLE_STATEMENT_DO_WHILE_MACRO",
- "Single statement macros should not use a do {} while (0) loop\n" . "$herectx");
- }
- if (defined $semis && $semis ne "") {
- WARN("DO_WHILE_MACRO_WITH_TRAILING_SEMICOLON",
- "do {} while (0) macros should not be semicolon terminated\n" . "$herectx");
- }
- } elsif ($dstat =~ /^\+\s*#\s*define\s+$Ident.*;\s*$/) {
- $ctx =~ s/\n*$//;
- my $cnt = statement_rawlines($ctx);
- my $herectx = $here . "\n";
-
- for (my $n = 0; $n < $cnt; $n++) {
- $herectx .= raw_line($linenr, $n) . "\n";
- }
-
- WARN("TRAILING_SEMICOLON",
- "macros should not use a trailing semicolon\n" . "$herectx");
- }
- }
-
-# make sure symbols are always wrapped with VMLINUX_SYMBOL() ...
-# all assignments may have only one of the following with an assignment:
-# .
-# ALIGN(...)
-# VMLINUX_SYMBOL(...)
- if ($realfile eq 'vmlinux.lds.h' && $line =~ /(?:(?:^|\s)$Ident\s*=|=\s*$Ident(?:\s|$))/) {
- WARN("MISSING_VMLINUX_SYMBOL",
- "vmlinux.lds.h needs VMLINUX_SYMBOL() around C-visible symbols\n" . $herecurr);
- }
-
-# check for redundant bracing round if etc
- if ($line =~ /(^.*)\bif\b/ && $1 !~ /else\s*$/) {
- my ($level, $endln, @chunks) =
- ctx_statement_full($linenr, $realcnt, 1);
- #print "chunks<$#chunks> linenr<$linenr> endln<$endln> level<$level>\n";
- #print "APW: <<$chunks[1][0]>><<$chunks[1][1]>>\n";
- if ($#chunks > 0 && $level == 0) {
- my @allowed = ();
- my $allow = 0;
- my $seen = 0;
- my $herectx = $here . "\n";
- my $ln = $linenr - 1;
- for my $chunk (@chunks) {
- my ($cond, $block) = @{$chunk};
-
- # If the condition carries leading newlines, then count those as offsets.
- my ($whitespace) = ($cond =~ /^((?:\s*\n[+-])*\s*)/s);
- my $offset = statement_rawlines($whitespace) - 1;
-
- $allowed[$allow] = 0;
- #print "COND<$cond> whitespace<$whitespace> offset<$offset>\n";
-
- # We have looked at and allowed this specific line.
- $suppress_ifbraces{$ln + $offset} = 1;
-
- $herectx .= "$rawlines[$ln + $offset]\n[...]\n";
- $ln += statement_rawlines($block) - 1;
-
- substr($block, 0, length($cond), '');
-
- $seen++ if ($block =~ /^\s*{/);
-
- #print "cond<$cond> block<$block> allowed<$allowed[$allow]>\n";
- if (statement_lines($cond) > 1) {
- #print "APW: ALLOWED: cond<$cond>\n";
- $allowed[$allow] = 1;
- }
- if ($block =~/\b(?:if|for|while)\b/) {
- #print "APW: ALLOWED: block<$block>\n";
- $allowed[$allow] = 1;
- }
- if (statement_block_size($block) > 1) {
- #print "APW: ALLOWED: lines block<$block>\n";
- $allowed[$allow] = 1;
- }
- $allow++;
- }
- if ($seen) {
- my $sum_allowed = 0;
- foreach (@allowed) {
- $sum_allowed += $_;
- }
- if ($sum_allowed != 0 && $sum_allowed != $allow
- && $seen != $allow) {
- CHK("BRACES",
- "braces {} should be used on all arms of this statement\n" . $herectx);
- }
- }
- }
- }
- if (!defined $suppress_ifbraces{$linenr - 1} &&
- $line =~ /\b(if|while|for|else)\b/) {
- my $allowed = 0;
-
- # Check the pre-context.
- if (substr($line, 0, $-[0]) =~ /(\}\s*)$/) {
- #print "APW: ALLOWED: pre<$1>\n";
- $allowed = 1;
- }
-
- my ($level, $endln, @chunks) =
- ctx_statement_full($linenr, $realcnt, $-[0]);
-
- # Check the condition.
- my ($cond, $block) = @{$chunks[0]};
- #print "CHECKING<$linenr> cond<$cond> block<$block>\n";
- if (defined $cond) {
- substr($block, 0, length($cond), '');
- }
- if (statement_lines($cond) > 1) {
- #print "APW: ALLOWED: cond<$cond>\n";
- $allowed = 1;
- }
- if ($block =~/\b(?:if|for|while)\b/) {
- #print "APW: ALLOWED: block<$block>\n";
- $allowed = 1;
- }
- if (statement_block_size($block) > 1) {
- #print "APW: ALLOWED: lines block<$block>\n";
- $allowed = 1;
- }
- # Check the post-context.
- if (defined $chunks[1]) {
- my ($cond, $block) = @{$chunks[1]};
- if (defined $cond) {
- substr($block, 0, length($cond), '');
- }
- if ($block =~ /^\s*\{/) {
- #print "APW: ALLOWED: chunk-1 block<$block>\n";
- $allowed = 1;
- }
- }
- }
-
-# check for unnecessary blank lines around braces
- if (($line =~ /^.\s*}\s*$/ && $prevrawline =~ /^.\s*$/)) {
- CHK("BRACES",
- "Blank lines aren't necessary before a close brace '}'\n" . $hereprev);
- }
- if (($rawline =~ /^.\s*$/ && $prevline =~ /^..*{\s*$/)) {
- CHK("BRACES",
- "Blank lines aren't necessary after an open brace '{'\n" . $hereprev);
- }
-
-# no volatiles please
- my $asm_volatile = qr{\b(__asm__|asm)\s+(__volatile__|volatile)\b};
- if ($line =~ /\bvolatile\b/ && $line !~ /$asm_volatile/) {
- WARN("VOLATILE",
- "Use of volatile is usually wrong: see Documentation/volatile-considered-harmful.txt\n" . $herecurr);
- }
-
-# warn about #if 0
- if ($line =~ /^.\s*\#\s*if\s+0\b/) {
- CHK("REDUNDANT_CODE",
- "if this code is redundant consider removing it\n" .
- $herecurr);
- }
-
-# check for needless "if (<foo>) fn(<foo>)" uses
- if ($prevline =~ /\bif\s*\(\s*($Lval)\s*\)/) {
- my $expr = '\s*\(\s*' . quotemeta($1) . '\s*\)\s*;';
- if ($line =~ /\b(kfree|usb_free_urb|debugfs_remove(?:_recursive)?)$expr/) {
- WARN('NEEDLESS_IF',
- "$1(NULL) is safe this check is probably not required\n" . $hereprev);
- }
- }
-
-# check for bad placement of section $InitAttribute (e.g.: __initdata)
- if ($line =~ /(\b$InitAttribute\b)/) {
- my $attr = $1;
- if ($line =~ /^\+\s*static\s+(?:const\s+)?(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*[=;]/) {
- my $ptr = $1;
- my $var = $2;
- if ((($ptr =~ /\b(union|struct)\s+$attr\b/ &&
- ERROR("MISPLACED_INIT",
- "$attr should be placed after $var\n" . $herecurr)) ||
- ($ptr !~ /\b(union|struct)\s+$attr\b/ &&
- WARN("MISPLACED_INIT",
- "$attr should be placed after $var\n" . $herecurr))) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/(\bstatic\s+(?:const\s+)?)(?:$attr\s+)?($NonptrTypeWithAttr)\s+(?:$attr\s+)?($Ident(?:\[[^]]*\])?)\s*([=;])\s*/"$1" . trim(string_find_replace($2, "\\s*$attr\\s*", " ")) . " " . trim(string_find_replace($3, "\\s*$attr\\s*", "")) . " $attr" . ("$4" eq ";" ? ";" : " = ")/e;
- }
- }
- }
-
-# check for $InitAttributeData (ie: __initdata) with const
- if ($line =~ /\bconst\b/ && $line =~ /($InitAttributeData)/) {
- my $attr = $1;
- $attr =~ /($InitAttributePrefix)(.*)/;
- my $attr_prefix = $1;
- my $attr_type = $2;
- if (ERROR("INIT_ATTRIBUTE",
- "Use of const init definition must use ${attr_prefix}initconst\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/$InitAttributeData/${attr_prefix}initconst/;
- }
- }
-
-# check for $InitAttributeConst (ie: __initconst) without const
- if ($line !~ /\bconst\b/ && $line =~ /($InitAttributeConst)/) {
- my $attr = $1;
- if (ERROR("INIT_ATTRIBUTE",
- "Use of $attr requires a separate use of const\n" . $herecurr) &&
- $fix) {
- my $lead = $fixed[$linenr - 1] =~
- /(^\+\s*(?:static\s+))/;
- $lead = rtrim($1);
- $lead = "$lead " if ($lead !~ /^\+$/);
- $lead = "${lead}const ";
- $fixed[$linenr - 1] =~ s/(^\+\s*(?:static\s+))/$lead/;
- }
- }
-
-# don't use __constant_<foo> functions outside of include/uapi/
- if ($realfile !~ m@^include/uapi/@ &&
- $line =~ /(__constant_(?:htons|ntohs|[bl]e(?:16|32|64)_to_cpu|cpu_to_[bl]e(?:16|32|64)))\s*\(/) {
- my $constant_func = $1;
- my $func = $constant_func;
- $func =~ s/^__constant_//;
- if (WARN("CONSTANT_CONVERSION",
- "$constant_func should be $func\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\b$constant_func\b/$func/g;
- }
- }
-
-# prefer usleep_range over udelay
- if ($line =~ /\budelay\s*\(\s*(\d+)\s*\)/) {
- my $delay = $1;
- # ignore udelay's < 10, however
- if (! ($delay < 10) ) {
- CHK("USLEEP_RANGE",
- "usleep_range is preferred over udelay; see Documentation/timers/timers-howto.txt\n" . $herecurr);
- }
- if ($delay > 2000) {
- WARN("LONG_UDELAY",
- "long udelay - prefer mdelay; see arch/arm/include/asm/delay.h\n" . $herecurr);
- }
- }
-
-# warn about unexpectedly long msleep's
- if ($line =~ /\bmsleep\s*\((\d+)\);/) {
- if ($1 < 20) {
- WARN("MSLEEP",
- "msleep < 20ms can sleep for up to 20ms; see Documentation/timers/timers-howto.txt\n" . $herecurr);
- }
- }
-
-# check for comparisons of jiffies
- if ($line =~ /\bjiffies\s*$Compare|$Compare\s*jiffies\b/) {
- WARN("JIFFIES_COMPARISON",
- "Comparing jiffies is almost always wrong; prefer time_after, time_before and friends\n" . $herecurr);
- }
-
-# check for comparisons of get_jiffies_64()
- if ($line =~ /\bget_jiffies_64\s*\(\s*\)\s*$Compare|$Compare\s*get_jiffies_64\s*\(\s*\)/) {
- WARN("JIFFIES_COMPARISON",
- "Comparing get_jiffies_64() is almost always wrong; prefer time_after64, time_before64 and friends\n" . $herecurr);
- }
-
-# warn about spacing in #ifdefs
- if ($line =~ /^.\s*\#\s*(ifdef|ifndef|elif)\s\s+/) {
- if (ERROR("SPACING",
- "exactly one space required after that #$1\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~
- s/^(.\s*\#\s*(ifdef|ifndef|elif))\s{2,}/$1 /;
- }
-
- }
-
-# check for spinlock_t definitions without a comment.
- if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
- $line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
- my $which = $1;
- if (!ctx_has_comment($first_line, $linenr)) {
- CHK("UNCOMMENTED_DEFINITION",
- "$1 definition without comment\n" . $herecurr);
- }
- }
-# check for memory barriers without a comment.
- if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) {
- if (!ctx_has_comment($first_line, $linenr)) {
- WARN("MEMORY_BARRIER",
- "memory barrier without comment\n" . $herecurr);
- }
- }
-# check of hardware specific defines
- if ($line =~ m@^.\s*\#\s*if.*\b(__i386__|__powerpc64__|__sun__|__s390x__)\b@ && $realfile !~ m@include/asm-@) {
- CHK("ARCH_DEFINES",
- "architecture specific defines should be avoided\n" . $herecurr);
- }
-
-# Check that the storage class is at the beginning of a declaration
- if ($line =~ /\b$Storage\b/ && $line !~ /^.\s*$Storage\b/) {
- WARN("STORAGE_CLASS",
- "storage class should be at the beginning of the declaration\n" . $herecurr)
- }
-
-# check the location of the inline attribute, that it is between
-# storage class and type.
- if ($line =~ /\b$Type\s+$Inline\b/ ||
- $line =~ /\b$Inline\s+$Storage\b/) {
- ERROR("INLINE_LOCATION",
- "inline keyword should sit between storage class and type\n" . $herecurr);
- }
-
-# Check for __inline__ and __inline, prefer inline
- if ($realfile !~ m@\binclude/uapi/@ &&
- $line =~ /\b(__inline__|__inline)\b/) {
- if (WARN("INLINE",
- "plain inline is preferred over $1\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\b(__inline__|__inline)\b/inline/;
-
- }
- }
-
-# Check for __attribute__ packed, prefer __packed
- if ($realfile !~ m@\binclude/uapi/@ &&
- $line =~ /\b__attribute__\s*\(\s*\(.*\bpacked\b/) {
- WARN("PREFER_PACKED",
- "__packed is preferred over __attribute__((packed))\n" . $herecurr);
- }
-
-# Check for __attribute__ aligned, prefer __aligned
- if ($realfile !~ m@\binclude/uapi/@ &&
- $line =~ /\b__attribute__\s*\(\s*\(.*aligned/) {
- WARN("PREFER_ALIGNED",
- "__aligned(size) is preferred over __attribute__((aligned(size)))\n" . $herecurr);
- }
-
-# Check for __attribute__ format(printf, prefer __printf
- if ($realfile !~ m@\binclude/uapi/@ &&
- $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf/) {
- if (WARN("PREFER_PRINTF",
- "__printf(string-index, first-to-check) is preferred over __attribute__((format(printf, string-index, first-to-check)))\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*printf\s*,\s*(.*)\)\s*\)\s*\)/"__printf(" . trim($1) . ")"/ex;
-
- }
- }
-
-# Check for __attribute__ format(scanf, prefer __scanf
- if ($realfile !~ m@\binclude/uapi/@ &&
- $line =~ /\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\b/) {
- if (WARN("PREFER_SCANF",
- "__scanf(string-index, first-to-check) is preferred over __attribute__((format(scanf, string-index, first-to-check)))\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\b__attribute__\s*\(\s*\(\s*format\s*\(\s*scanf\s*,\s*(.*)\)\s*\)\s*\)/"__scanf(" . trim($1) . ")"/ex;
- }
- }
-
-# check for sizeof(&)
- if ($line =~ /\bsizeof\s*\(\s*\&/) {
- WARN("SIZEOF_ADDRESS",
- "sizeof(& should be avoided\n" . $herecurr);
- }
-
-# check for sizeof without parenthesis
- if ($line =~ /\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/) {
- if (WARN("SIZEOF_PARENTHESIS",
- "sizeof $1 should be sizeof($1)\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\bsizeof\s+((?:\*\s*|)$Lval|$Type(?:\s+$Lval|))/"sizeof(" . trim($1) . ")"/ex;
- }
- }
-
-# check for line continuations in quoted strings with odd counts of "
- if ($rawline =~ /\\$/ && $rawline =~ tr/"/"/ % 2) {
- WARN("LINE_CONTINUATIONS",
- "Avoid line continuations in quoted strings\n" . $herecurr);
- }
-
-# check for struct spinlock declarations
- if ($line =~ /^.\s*\bstruct\s+spinlock\s+\w+\s*;/) {
- WARN("USE_SPINLOCK_T",
- "struct spinlock should be spinlock_t\n" . $herecurr);
- }
-
-# check for seq_printf uses that could be seq_puts
- if ($sline =~ /\bseq_printf\s*\(.*"\s*\)\s*;\s*$/) {
- my $fmt = get_quoted_string($line, $rawline);
- if ($fmt ne "" && $fmt !~ /[^\\]\%/) {
- if (WARN("PREFER_SEQ_PUTS",
- "Prefer seq_puts to seq_printf\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/\bseq_printf\b/seq_puts/;
- }
- }
- }
-
-# Check for misused memsets
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+(?:.*?)\bmemset\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\,\s*$FuncArg\s*\)/s) {
-
- my $ms_addr = $2;
- my $ms_val = $7;
- my $ms_size = $12;
-
- if ($ms_size =~ /^(0x|)0$/i) {
- ERROR("MEMSET",
- "memset to 0's uses 0 as the 2nd argument, not the 3rd\n" . "$here\n$stat\n");
- } elsif ($ms_size =~ /^(0x|)1$/i) {
- WARN("MEMSET",
- "single byte memset is suspicious. Swapped 2nd/3rd argument?\n" . "$here\n$stat\n");
- }
- }
-
-# typecasts on min/max could be min_t/max_t
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+(?:.*?)\b(min|max)\s*\(\s*$FuncArg\s*,\s*$FuncArg\s*\)/) {
- if (defined $2 || defined $7) {
- my $call = $1;
- my $cast1 = deparenthesize($2);
- my $arg1 = $3;
- my $cast2 = deparenthesize($7);
- my $arg2 = $8;
- my $cast;
-
- if ($cast1 ne "" && $cast2 ne "" && $cast1 ne $cast2) {
- $cast = "$cast1 or $cast2";
- } elsif ($cast1 ne "") {
- $cast = $cast1;
- } else {
- $cast = $cast2;
- }
- WARN("MINMAX",
- "$call() should probably be ${call}_t($cast, $arg1, $arg2)\n" . "$here\n$stat\n");
- }
- }
-
-# check usleep_range arguments
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+(?:.*?)\busleep_range\s*\(\s*($FuncArg)\s*,\s*($FuncArg)\s*\)/) {
- my $min = $1;
- my $max = $7;
- if ($min eq $max) {
- WARN("USLEEP_RANGE",
- "usleep_range should not use min == max args; see Documentation/timers/timers-howto.txt\n" . "$here\n$stat\n");
- } elsif ($min =~ /^\d+$/ && $max =~ /^\d+$/ &&
- $min > $max) {
- WARN("USLEEP_RANGE",
- "usleep_range args reversed, use min then max; see Documentation/timers/timers-howto.txt\n" . "$here\n$stat\n");
- }
- }
-
-# check for naked sscanf
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $line =~ /\bsscanf\b/ &&
- ($stat !~ /$Ident\s*=\s*sscanf\s*$balanced_parens/ &&
- $stat !~ /\bsscanf\s*$balanced_parens\s*(?:$Compare)/ &&
- $stat !~ /(?:$Compare)\s*\bsscanf\s*$balanced_parens/)) {
- my $lc = $stat =~ tr@\n@@;
- $lc = $lc + $linenr;
- my $stat_real = raw_line($linenr, 0);
- for (my $count = $linenr + 1; $count <= $lc; $count++) {
- $stat_real = $stat_real . "\n" . raw_line($count, 0);
- }
- WARN("NAKED_SSCANF",
- "unchecked sscanf return value\n" . "$here\n$stat_real\n");
- }
-
-# check for simple sscanf that should be kstrto<foo>
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $line =~ /\bsscanf\b/) {
- my $lc = $stat =~ tr@\n@@;
- $lc = $lc + $linenr;
- my $stat_real = raw_line($linenr, 0);
- for (my $count = $linenr + 1; $count <= $lc; $count++) {
- $stat_real = $stat_real . "\n" . raw_line($count, 0);
- }
- if ($stat_real =~ /\bsscanf\b\s*\(\s*$FuncArg\s*,\s*("[^"]+")/) {
- my $format = $6;
- my $count = $format =~ tr@%@%@;
- if ($count == 1 &&
- $format =~ /^"\%(?i:ll[udxi]|[udxi]ll|ll|[hl]h?[udxi]|[udxi][hl]h?|[hl]h?|[udxi])"$/) {
- WARN("SSCANF_TO_KSTRTO",
- "Prefer kstrto<type> to single variable sscanf\n" . "$here\n$stat_real\n");
- }
- }
- }
-
-# check for new externs in .h files.
- if ($realfile =~ /\.h$/ &&
- $line =~ /^\+\s*(extern\s+)$Type\s*$Ident\s*\(/s) {
- if (CHK("AVOID_EXTERNS",
- "extern prototypes should be avoided in .h files\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/(.*)\bextern\b\s*(.*)/$1$2/;
- }
- }
-
-# check for new externs in .c files.
- if ($realfile =~ /\.c$/ && defined $stat &&
- $stat =~ /^.\s*(?:extern\s+)?$Type\s+($Ident)(\s*)\(/s)
- {
- my $function_name = $1;
- my $paren_space = $2;
-
- my $s = $stat;
- if (defined $cond) {
- substr($s, 0, length($cond), '');
- }
- if ($s =~ /^\s*;/ &&
- $function_name ne 'uninitialized_var')
- {
- WARN("AVOID_EXTERNS",
- "externs should be avoided in .c files\n" . $herecurr);
- }
-
- if ($paren_space =~ /\n/) {
- WARN("FUNCTION_ARGUMENTS",
- "arguments for function declarations should follow identifier\n" . $herecurr);
- }
-
- } elsif ($realfile =~ /\.c$/ && defined $stat &&
- $stat =~ /^.\s*extern\s+/)
- {
- WARN("AVOID_EXTERNS",
- "externs should be avoided in .c files\n" . $herecurr);
- }
-
-# check for pointless casting of kmalloc return
- if ($line =~ /\*\s*\)\s*[kv][czm]alloc(_node){0,1}\b/) {
- WARN("UNNECESSARY_CASTS",
- "unnecessary cast may hide bugs, see http://c-faq.com/malloc/mallocnocast.html\n" . $herecurr);
- }
-
-# alloc style
-# p = alloc(sizeof(struct foo), ...) should be p = alloc(sizeof(*p), ...)
- if ($^V && $^V ge 5.10.0 &&
- $line =~ /\b($Lval)\s*\=\s*(?:$balanced_parens)?\s*([kv][mz]alloc(?:_node)?)\s*\(\s*(sizeof\s*\(\s*struct\s+$Lval\s*\))/) {
- CHK("ALLOC_SIZEOF_STRUCT",
- "Prefer $3(sizeof(*$1)...) over $3($4...)\n" . $herecurr);
- }
-
-# check for multiple semicolons
- if ($line =~ /;\s*;\s*$/) {
- if (WARN("ONE_SEMICOLON",
- "Statements terminations use 1 semicolon\n" . $herecurr) &&
- $fix) {
- $fixed[$linenr - 1] =~ s/(\s*;\s*){2,}$/;/g;
- }
- }
-
-# check for case / default statements not preceeded by break/fallthrough/switch
- if ($line =~ /^.\s*(?:case\s+(?:$Ident|$Constant)\s*|default):/) {
- my $has_break = 0;
- my $has_statement = 0;
- my $count = 0;
- my $prevline = $linenr;
- while ($prevline > 1 && $count < 3 && !$has_break) {
- $prevline--;
- my $rline = $rawlines[$prevline - 1];
- my $fline = $lines[$prevline - 1];
- last if ($fline =~ /^\@\@/);
- next if ($fline =~ /^\-/);
- next if ($fline =~ /^.(?:\s*(?:case\s+(?:$Ident|$Constant)[\s$;]*|default):[\s$;]*)*$/);
- $has_break = 1 if ($rline =~ /fall[\s_-]*(through|thru)/i);
- next if ($fline =~ /^.[\s$;]*$/);
- $has_statement = 1;
- $count++;
- $has_break = 1 if ($fline =~ /\bswitch\b|\b(?:break\s*;[\s$;]*$|return\b|goto\b|continue\b)/);
- }
- if (!$has_break && $has_statement) {
- WARN("MISSING_BREAK",
- "Possible switch case/default not preceeded by break or fallthrough comment\n" . $herecurr);
- }
- }
-
-# check for switch/default statements without a break;
- if ($^V && $^V ge 5.10.0 &&
- defined $stat &&
- $stat =~ /^\+[$;\s]*(?:case[$;\s]+\w+[$;\s]*:[$;\s]*|)*[$;\s]*\bdefault[$;\s]*:[$;\s]*;/g) {
- my $ctx = '';
- my $herectx = $here . "\n";
- my $cnt = statement_rawlines($stat);
- for (my $n = 0; $n < $cnt; $n++) {
- $herectx .= raw_line($linenr, $n) . "\n";
- }
- WARN("DEFAULT_NO_BREAK",
- "switch default: should use break\n" . $herectx);
- }
-
-# check for comparisons against true and false
- if ($line =~ /\+\s*(.*?)\b(true|false|$Lval)\s*(==|\!=)\s*(true|false|$Lval)\b(.*)$/i) {
- my $lead = $1;
- my $arg = $2;
- my $test = $3;
- my $otype = $4;
- my $trail = $5;
- my $op = "!";
-
- ($arg, $otype) = ($otype, $arg) if ($arg =~ /^(?:true|false)$/i);
-
- my $type = lc($otype);
- if ($type =~ /^(?:true|false)$/) {
- if (("$test" eq "==" && "$type" eq "true") ||
- ("$test" eq "!=" && "$type" eq "false")) {
- $op = "";
- }
-
- CHK("BOOL_COMPARISON",
- "Using comparison to $otype is error prone\n" . $herecurr);
- }
- }
-
-# check for semaphores initialized locked
- if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) {
- WARN("CONSIDER_COMPLETION",
- "consider using a completion\n" . $herecurr);
- }
-
-# check for %L{u,d,i} in strings
- my $string;
- while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
- $string = substr($rawline, $-[1], $+[1] - $-[1]);
- $string =~ s/%%/__/g;
- if ($string =~ /(?<!%)%L[udi]/) {
- WARN("PRINTF_L",
- "\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr);
- last;
- }
- }
-
-
-# Mode permission misuses where it seems decimal should be octal
-# This uses a shortcut match to avoid unnecessary uses of a slow foreach loop
- if ($^V && $^V ge 5.10.0 &&
- $line =~ /$mode_perms_search/) {
- foreach my $entry (@mode_permission_funcs) {
- my $func = $entry->[0];
- my $arg_pos = $entry->[1];
-
- my $skip_args = "";
- if ($arg_pos > 1) {
- $arg_pos--;
- $skip_args = "(?:\\s*$FuncArg\\s*,\\s*){$arg_pos,$arg_pos}";
- }
- my $test = "\\b$func\\s*\\(${skip_args}([\\d]+)\\s*[,\\)]";
- if ($line =~ /$test/) {
- my $val = $1;
- $val = $6 if ($skip_args ne "");
-
- if ($val !~ /^0$/ &&
- (($val =~ /^$Int$/ && $val !~ /^$Octal$/) ||
- length($val) ne 4)) {
- ERROR("NON_OCTAL_PERMISSIONS",
- "Use 4 digit octal (0777) not decimal permissions\n" . $herecurr);
- }
- }
- }
- }
- }
-
- # If we have no input at all, then there is nothing to report on
- # so just keep quiet.
- if ($#rawlines == -1) {
- exit(0);
- }
-
- # In mailback mode only produce a report in the negative, for
- # things that appear to be patches.
- if ($mailback && ($clean == 1 || !$is_patch)) {
- exit(0);
- }
-
- # This is not a patch, and we are are in 'no-patch' mode so
- # just keep quiet.
- if (!$chk_patch && !$is_patch) {
- exit(0);
- }
-
- if (!$is_patch) {
- ERROR("NOT_UNIFIED_DIFF",
- "Does not appear to be a unified-diff format patch\n");
- }
- if ($is_patch && $subject_trailing_dot != 0) {
- ERROR("SUBJECT_TRAILING_DOT",
- "The subject of the patch should not end with a dot.\n");
- }
- if ($is_patch && $chk_signoff && $signoff == 0) {
- ERROR("MISSING_SIGN_OFF",
- "Missing Signed-off-by: line(s)\n");
- }
-
- print report_dump();
- if ($summary && !($clean == 1 && $quiet == 1)) {
- print "$filename " if ($summary_file);
- if ($cnt_error > 0) {
- print "Patch not according to coding guidelines! please fix.\n";
- print "total: $cnt_error errors, $cnt_warn warnings, " .
- (($check)? "$cnt_chk checks, " : "") .
- "$cnt_lines lines checked\n"; exit 1;
- } else {
- print "total: $cnt_warn warnings, " .
- (($check)? "$cnt_chk checks, " : "") .
- "$cnt_lines lines checked\n";
- print "Patch found to have warnings, please fix if necessary.\n" if ($cnt_warn > 0);
- exit 2;
- }
- print "\n" if ($quiet == 0);
- }
-
- if ($quiet == 0) {
-
- if ($^V lt 5.10.0) {
- print("NOTE: perl $^V is not modern enough to detect all possible issues.\n");
- print("An upgrade to at least perl v5.10.0 is suggested.\n\n");
- }
-
- # If there were whitespace errors which cleanpatch can fix
- # then suggest that.
- if ($rpt_cleaners) {
- print "NOTE: whitespace errors detected, you may wish to use scripts/cleanpatch or\n";
- print " scripts/cleanfile\n\n";
- $rpt_cleaners = 0;
- }
- }
-
- hash_show_words(\%use_type, "Used");
- hash_show_words(\%ignore_type, "Ignored");
-
- if ($clean == 0 && $fix && "@rawlines" ne "@fixed") {
- my $newfile = $filename;
- $newfile .= ".EXPERIMENTAL-checkpatch-fixes" if (!$fix_inplace);
- my $linecount = 0;
- my $f;
-
- open($f, '>', $newfile)
- or die "$P: Can't open $newfile for write\n";
- foreach my $fixed_line (@fixed) {
- $linecount++;
- if ($file) {
- if ($linecount > 3) {
- $fixed_line =~ s/^\+//;
- print $f $fixed_line. "\n";
- }
- } else {
- print $f $fixed_line . "\n";
- }
- }
- close($f);
-
- if (!$quiet) {
- print << "EOM";
-Wrote EXPERIMENTAL --fix correction(s) to '$newfile'
-
-Do _NOT_ trust the results written to this file.
-Do _NOT_ submit these changes without inspecting them for correctness.
-
-This EXPERIMENTAL file is simply a convenience to help rewrite patches.
-No warranties, expressed or implied...
-EOM
- }
- }
-
- if ($clean == 1 && $quiet == 0) {
- print "$vname has no obvious style problems and is ready for submission.\n"
- }
- if ($clean == 0 && $quiet == 0) {
- print << "EOM";
-$vname has style problems, please review.
-
-If any of these errors are false positives, please report
-them to the maintainer, see MAINTAINERS
-EOM
- }
-
- return $clean;
-}
diff --git a/extras/clang-checker.sh b/extras/clang-checker.sh
new file mode 100755
index 00000000000..4909d3adfcd
--- /dev/null
+++ b/extras/clang-checker.sh
@@ -0,0 +1,301 @@
+#!/usr/bin/env bash
+#*******************************************************************************
+# *
+# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com> *
+# This file is part of GlusterFS. *
+# *
+# This file is licensed to you under your choice of the GNU Lesser *
+# General Public License, version 3 or any later version (LGPLv3 or *
+# later), or the GNU General Public License, version 2 (GPLv2), in all *
+# cases as published by the Free Software Foundation. *
+#------------------------------------------------------------------------------*
+# *
+# clang-checker.sh: This script runs clang static analyzer using 'scan-build' *
+# a perl wrapper. After you commit your patch i.e. right *
+# before executing rfc.sh in order to push the patch to *
+# repository, it is recommended that you execute *
+# clang-checker.sh to perform static analysis inorder to *
+# check if there are any possible bugs in the code. *
+# *
+# This script performs the static analysis with and *
+# without HEAD commit, it runs the analyzer only in the *
+# directory where changes have been made and finally diff's *
+# the number of bugs using both cases (i.e. with and *
+# without your commit) and gives a summary, which explain's *
+# about the eligibility of your patch. *
+# *
+# Usage: $ cd $PATH_TO_GLUSTERFS *
+# $ extras/clang-checker.sh (or) $ make clang-check *
+# *
+# Author: Prasanna Kumar Kalever <prasanna.kalever@redhat.com> *
+# *
+#*******************************************************************************
+
+REPORTS_DIR=$(pwd)
+BASELINE_DIR=${REPORTS_DIR}/baseline
+BRESULTS_DIR=${BASELINE_DIR}/results
+BBACKUP_DIR=${BASELINE_DIR}/backup
+TARGET_DIR=${REPORTS_DIR}/target
+TRESULTS_DIR=${TARGET_DIR}/results
+TBACKUP_DIR=${TARGET_DIR}/backup
+
+declare -A DICT_B
+declare -A DICT_T
+declare -A ARR
+declare -A FILES
+
+function identify_changes () {
+ MODIFIED_DATA=$(git show --name-status --oneline | tail -n +2)
+ FLAG=0
+ for i in ${MODIFIED_DATA}; do
+ if [ $FLAG -eq 1 ]; then
+ ARR+="$(dirname $i) ";
+ FLAG=0;
+ fi
+ if [ $i = 'M' ] || [ $i = 'A' ]; then
+ FLAG=1;
+ fi
+ done
+
+ MODIFIED_DIR=$(echo "${ARR[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')
+ for i in $MODIFIED_DIR; do
+ # run only in directories which has Makefile
+ if [ $(find ./$i -iname "makefile*" | wc -c) -gt 0 ]; then
+ # skip 'doc' and '.'(top) directory
+ if [ "xx$i" != "xxdoc" ] && [ "xx$i" != "xx." ]; then
+ FILES+="$i "
+ fi
+ fi
+ done
+ if [ -z $FILES ]; then
+ echo "Probably no changes made to 'c' files"
+ exit;
+ fi
+}
+
+function check_prerequisites () {
+ if ! type "clang" 2> /dev/null; then
+ echo -e "\ntry after installing clang and scan-build..."
+ echo "useful info at http://clang-analyzer.llvm.org/installation.html\n"
+ echo -e "hint: 'dnf -y install clang-analyzer.noarch'\n"
+ exit 1;
+ elif ! type "scan-build" 2> /dev/null; then
+ echo -e "\ntry after installing scan-build..."
+ echo "useful info at http://clang-analyzer.llvm.org/installation.html"
+ echo -e "hint: 'dnf -y install clang-analyzer.noarch'\n"
+ exit 1;
+ fi
+}
+
+function force_terminate () {
+ echo -e "\nreceived a signal to force terminate ..\n"
+ git am --abort 2> /dev/null
+ git am ${PATCH_NAME}
+ rm -f ${REPORTS_DIR}/${PATCH_NAME}
+ exit 1;
+}
+
+function run_scanbuild () {
+ local CLANG=$(which clang)
+ local SCAN_BUILD=$(which scan-build)
+ local ORIG_COMMIT=$(git rev-parse --verify HEAD^)
+ PATCH_NAME=$(git format-patch $ORIG_COMMIT)
+
+ echo -e "\n| Performing clang analysis on:" \
+ "$(git log --pretty=format:"%h - '%s' by %an" -1) ... |\n"
+ echo -e "Changes are identified in '${FILES[@]}' directorie[s]\n"
+
+ if [ -d "${BRESULTS_DIR}" ]; then
+ mkdir -p ${BBACKUP_DIR} ${TBACKUP_DIR}
+ mv ${BRESULTS_DIR} \
+ ${BBACKUP_DIR}/results_$(ls -l ${BBACKUP_DIR} | wc -l)
+ mv ${TRESULTS_DIR} \
+ ${TBACKUP_DIR}/results_$(ls -l ${TBACKUP_DIR} | wc -l)
+ fi
+ for DIR in ${FILES[@]}; do
+ mkdir -p ${BRESULTS_DIR}/$(echo ${DIR} | sed 's/\//_/g')
+ mkdir -p ${TRESULTS_DIR}/$(echo ${DIR} | sed 's/\//_/g')
+ done
+ # get nproc info
+ case $(uname -s) in
+ 'Linux')
+ local NPROC=$(getconf _NPROCESSORS_ONLN)
+ ;;
+ 'NetBSD')
+ local NPROC=$(getconf NPROCESSORS_ONLN)
+ ;;
+ esac
+
+ trap force_terminate INT TERM QUIT EXIT
+
+ git reset --hard HEAD^
+
+ # build complete source code for sake of dependencies
+ echo -e "\n# make -j${NPROC} ..."
+ make -j${NPROC} 1>/dev/null
+
+ for DIR in ${FILES[@]}; do
+ if [ $(find ./$i -iname "makefile*" | wc -c) -gt 0 ]; then
+ make clean -C ${DIR} 1>/dev/null
+ echo -e "\n| Analyzing ${DIR} without commit ... |\n"
+ # run only in directory where changes are made
+ ${SCAN_BUILD} -o ${BRESULTS_DIR}/$(echo ${DIR} | sed 's/\//_/g') \
+ --use-analyzer=${CLANG} make -j${NPROC} -C ${DIR}
+ fi
+ done
+
+ echo -e "\n| Analyzing without commit complete ... |\n"
+
+ git am ${PATCH_NAME}
+ trap - INT TERM QUIT EXIT
+
+ # In case commit has changes to configure stuff ?
+ echo -e "\n# make clean ..."
+ make clean 1>/dev/null
+ echo -e "\n# ./autogen.sh && ./configure --with-previous-options ..."
+ ${REPORTS_DIR}/autogen.sh 2>/dev/null
+ ${REPORTS_DIR}/configure --with-previous-options 1>/dev/null
+ echo -e "\n# make -j${NPROC} ..."
+ make -j${NPROC} 1>/dev/null
+
+ for DIR in ${FILES[@]}; do
+ if [ $(find ./$i -iname "makefile*" | wc -c) -gt 0 ]; then
+ make clean -C ${DIR} 1>/dev/null
+ echo -e "\n| Analyzing ${DIR} with commit ... |\n"
+ # run only in directory where changes are made
+ ${SCAN_BUILD} -o ${TRESULTS_DIR}/$(echo ${DIR} | sed 's/\//_/g') \
+ --use-analyzer=${CLANG} make -j${NPROC} -C ${DIR}
+ fi
+ done
+
+ echo -e "\n| Analyzing with commit complete ... |\n"
+
+ rm -f ${REPORTS_DIR}/${PATCH_NAME}
+}
+
+function count_for_baseline () {
+ for DIR in ${FILES[@]}; do
+ HTMLS_DIR=${BRESULTS_DIR}/$(echo ${DIR} |
+ sed 's/\//_/g')/$(ls ${BRESULTS_DIR}/$(echo ${DIR} |
+ sed 's/\//_/g')/);
+
+ local NAMES_OF_BUGS_B=$(grep -n "SUMM_DESC" ${HTMLS_DIR}/index.html |
+ cut -d"<" -f3 | cut -d">" -f2 |
+ sed 's/[^a-zA-Z0]/_/g' | tr '\n' ' ')
+ local NO_OF_BUGS_B=$(grep -n "SUMM_DESC" ${HTMLS_DIR}/index.html |
+ cut -d"<" -f5 | cut -d">" -f2 | tr '\n' ' ')
+ local count_B=0;
+
+ read -a BUG_NAME_B <<<$NAMES_OF_BUGS_B
+ read -a BUG_COUNT_B <<<$NO_OF_BUGS_B
+ for i in ${BUG_NAME_B[@]};
+ do
+ if [ ! -z ${DICT_B[$i]} ]; then
+ DICT_B[$i]=$(expr ${BUG_COUNT_B[count_B]} + ${DICT_B[$i]});
+ else
+ DICT_B+=([$i]=${BUG_COUNT_B[count_B]});
+ fi
+ count_B=$(expr $count_B + 1)
+ done
+ done
+
+ echo -e "\nBASELINE BUGS LIST (before applying patch):"
+ echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+ for key_B in ${!DICT_B[@]}; do
+ echo "${key_B} --> ${DICT_B[${key_B}]}" | sed 's/_/ /g' | tr -s ' '
+ done
+}
+
+function count_for_target () {
+ for DIR in ${FILES[@]}; do
+ HTMLS_DIR=${TRESULTS_DIR}/$(echo ${DIR} |
+ sed 's/\//_/g')/$(ls ${TRESULTS_DIR}/$(echo ${DIR} |
+ sed 's/\//_/g')/);
+
+ local NAME_OF_BUGS_T=$(grep -n "SUMM_DESC" ${HTMLS_DIR}/index.html |
+ cut -d"<" -f3 | cut -d">" -f2 |
+ sed 's/[^a-zA-Z0]/_/g'| tr '\n' ' ')
+ local NO_OF_BUGS_T=$(grep -n "SUMM_DESC" ${HTMLS_DIR}/index.html |
+ cut -d"<" -f5 | cut -d">" -f2 | tr '\n' ' ')
+ local count_T=0;
+
+ read -a BUG_NAME_T <<<$NAME_OF_BUGS_T
+ read -a BUG_COUNT_T <<<$NO_OF_BUGS_T
+
+ for i in ${BUG_NAME_T[@]};
+ do
+ if [ ! -z ${DICT_T[$i]} ]; then
+ DICT_T[$i]=$(expr ${BUG_COUNT_T[count_T]} + ${DICT_T[$i]});
+ else
+ DICT_T+=([$i]=${BUG_COUNT_T[count_T]});
+ fi
+ count_T=$(expr $count_T + 1)
+ done
+ done
+
+ echo -e "\nTARGET BUGS LIST (after applying patch):"
+ echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+ for key_T in ${!DICT_T[@]}; do
+ echo "${key_T} --> ${DICT_T[${key_T}]}" | sed 's/_/ /g' | tr -s ' '
+ done
+}
+
+function array_contains () {
+ local SEEKING=$1; shift
+ local IN=1
+ for ELEMENT; do
+ if [[ $ELEMENT == $SEEKING ]]; then
+ IN=0
+ break
+ fi
+ done
+ return $IN
+}
+
+function main () {
+ echo -e "\n================ Clang analyzer in progress ================\n"
+ check_prerequisites
+ identify_changes
+ run_scanbuild
+ clear
+ count_for_baseline
+ count_for_target
+ echo -e "\nSUMMARY OF CLANG-ANALYZER:"
+ echo "~~~~~~~~~~~~~~~~~~~~~~~~~~"
+
+ FLAG=0
+ for BUG in ${!DICT_T[@]}; do
+ array_contains $BUG "${!DICT_B[@]}"
+ if [ $? -eq 1 ]; then
+ echo "New ${DICT_T[${BUG}]} Bug[s] introduced: $(echo $BUG |
+ sed 's/_/ /g' |
+ tr -s ' ')"
+ FLAG=1
+ else
+ if [ ${BUG} != "All_Bugs" ]; then
+ if [ ${DICT_B[${BUG}]} -lt \
+ ${DICT_T[${BUG}]} ]; then
+ echo "Extra $(expr ${DICT_T[${BUG}]} - \
+ ${DICT_B[${BUG}]}) Bug[s] Introduced in: $(echo $BUG |
+ sed 's/_/ /g' | tr -s ' ')"
+ FLAG=1
+ fi
+ fi
+ fi
+ done
+
+ echo
+ if [ $FLAG -eq 0 ]; then
+ echo -e "Patch Value given by Clang analyzer '+1'\n"
+ else
+ echo -e "Patch Value given by Clang analyzer '-1'\n"
+ fi
+ echo -e "\nExplore complete results at:"
+ find ${BRESULTS_DIR}/ -iname "index.html"
+ find ${TRESULTS_DIR}/ -iname "index.html"
+ echo -e "\n================= Done with Clang Analysis =================\n"
+
+ exit ${FLAG}
+}
+
+main
diff --git a/extras/cliutils/Makefile.am b/extras/cliutils/Makefile.am
new file mode 100644
index 00000000000..7039703e275
--- /dev/null
+++ b/extras/cliutils/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST= cliutils.py __init__.py
+
+cliutilsdir = @BUILD_PYTHON_SITE_PACKAGES@/gluster/cliutils
+cliutils_PYTHON = cliutils.py __init__.py
diff --git a/extras/cliutils/README.md b/extras/cliutils/README.md
new file mode 100644
index 00000000000..309beb1ca25
--- /dev/null
+++ b/extras/cliutils/README.md
@@ -0,0 +1,233 @@
+# CLI utility for creating Cluster aware CLI tools for Gluster
+cliutils is a Python library which provides wrapper around `gluster system::
+execute` command to extend the functionalities of Gluster.
+
+Example use cases:
+- Start a service in all peer nodes of Cluster
+- Collect the status of a service from all peer nodes
+- Collect the config values from each peer nodes and display latest
+ config based on version.
+- Copy a file present in GLUSTERD_WORKDIR from one peer node to all
+ other peer nodes.(Geo-replication create push-pem is using this to
+ distribute the SSH public keys from all master nodes to all slave
+ nodes)
+- Generate pem keys in all peer nodes and collect all the public keys
+ to one place(Geo-replication gsec_create is doing this)
+- Provide Config sync CLIs for new features like `gluster-eventsapi`,
+ `gluster-restapi`, `gluster-mountbroker` etc.
+
+## Introduction
+
+If a executable file present in `$GLUSTER_LIBEXEC` directory in all
+peer nodes(Filename startswith `peer_`) then it can be executed by
+running `gluster system:: execute` command from any one peer node.
+
+- This command will not copy any executables to peer nodes, Script
+ should exist in all peer nodes to use this infrastructure. Raises
+ error in case script not exists in any one of the peer node.
+- Filename should start with `peer_` and should exist in
+ `$GLUSTER_LIBEXEC` directory.
+- This command can not be called from outside the cluster.
+
+To understand the functionality, create a executable file `peer_hello`
+under $GLUSTER_LIBEXEC directory and copy to all peer nodes.
+
+ #!/usr/bin/env bash
+ echo "Hello from $(gluster system:: uuid get)"
+
+Now run the following command from any one gluster node,
+
+ gluster system:: execute hello
+
+**Note:** Gluster will not copy the executable script to all nodes,
+ copy `peer_hello` script to all peer nodes to use `gluster system::
+ execute` infrastructure.
+
+It will run `peer_hello` executable in all peer nodes and shows the
+output from each node(Below example shows output from my two nodes
+cluster)
+
+ Hello from UUID: e7a3c5c8-e7ad-47ad-aa9c-c13907c4da84
+ Hello from UUID: c680fc0a-01f9-4c93-a062-df91cc02e40f
+
+## cliutils
+A Python wrapper around `gluster system:: execute` command is created
+to address the following issues
+
+- If a node is down in the cluster, `system:: execute` just skips it
+ and runs only in up nodes.
+- `system:: execute` commands are not user friendly
+- It captures only stdout, so handling errors is tricky.
+
+**Advantages of cliutils:**
+
+- Single executable file will act as node component as well as User CLI.
+- `execute_in_peers` utility function will merge the `gluster system::
+ execute` output with `gluster peer status` to identify offline nodes.
+- Easy CLI Arguments handling.
+- If node component returns non zero return value then, `gluster
+ system:: execute` will fail to aggregate the output from other
+ nodes. `node_output_ok` or `node_output_notok` utility functions
+ returns zero both in case of success or error, but returns json
+ with ok: true or ok:false respectively.
+- Easy to iterate on the node outputs.
+- Better error handling - Geo-rep CLIs `gluster system:: execute
+ mountbroker`, `gluster system:: execute gsec_create` and `gluster
+ system:: add_secret_pub` are suffering from error handling. These
+ tools are not notifying user if any failures during execute or if a node
+ is down during execute.
+
+### Hello World
+Create a file in `$LIBEXEC/glusterfs/peer_message.py` with following
+content.
+
+ #!/usr/bin/python3
+ from gluster.cliutils import Cmd, runcli, execute_in_peers, node_output_ok
+
+ class NodeHello(Cmd):
+ name = "node-hello"
+
+ def run(self, args):
+ node_output_ok("Hello")
+
+ class Hello(Cmd):
+ name = "hello"
+
+ def run(self, args):
+ out = execute_in_peers("node-hello")
+ for row in out:
+ print ("{0} from {1}".format(row.output, row.hostname))
+
+ if __name__ == "__main__":
+ runcli()
+
+When we run `python peer_message.py`, it will have two subcommands,
+"node-hello" and "hello". This file should be copied to
+`$LIBEXEC/glusterfs` directory in all peer nodes. User will call
+subcommand "hello" from any one peer node, which internally call
+`gluster system:: execute message.py node-hello`(This runs in all peer
+nodes and collect the outputs)
+
+For node component do not print the output directly, use
+`node_output_ok` or `node_output_notok` functions. `node_output_ok`
+additionally collects the node UUID and prints in JSON
+format. `execute_in_peers` function will collect this output and
+merges with `peers list` so that we don't miss the node information if
+that node is offline.
+
+If you observed already, function `args` is optional, if you don't
+have arguments then no need to create a function. When we run the
+file, we will have two subcommands. For example,
+
+ python peer_message.py hello
+ python peer_message.py node-hello
+
+First subcommand calls second subcommand in all peer nodes. Basically
+`execute_in_peers(NAME, ARGS)` will be converted into
+
+ CMD_NAME = FILENAME without "peers_"
+ gluster system:: execute <CMD_NAME> <SUBCOMMAND> <ARGS>
+
+In our example,
+
+ filename = "peer_message.py"
+ cmd_name = "message.py"
+ gluster system:: execute ${cmd_name} node-hello
+
+Now create symlink in `/usr/bin` or `/usr/sbin` directory depending on
+the usecase.(Optional step for usability)
+
+ ln -s /usr/libexec/glusterfs/peer_message.py /usr/bin/gluster-message
+
+Now users can use `gluster-message` instead of calling
+`/usr/libexec/glusterfs/peer_message.py`
+
+ gluster-message hello
+
+### Showing CLI output as Table
+
+Following example uses prettytable library, which can be installed
+using `pip install prettytable` or `dnf install python-prettytable`
+
+ #!/usr/bin/python3
+ from prettytable import PrettyTable
+ from gluster.cliutils import Cmd, runcli, execute_in_peers, node_output_ok
+
+ class NodeHello(Cmd):
+ name = "node-hello"
+
+ def run(self, args):
+ node_output_ok("Hello")
+
+ class Hello(Cmd):
+ name = "hello"
+
+ def run(self, args):
+ out = execute_in_peers("node-hello")
+ # Initialize the CLI table
+ table = PrettyTable(["ID", "NODE", "NODE STATUS", "MESSAGE"])
+ table.align["NODE STATUS"] = "r"
+ for row in out:
+ table.add_row([row.nodeid,
+ row.hostname,
+ "UP" if row.node_up else "DOWN",
+ row.output if row.ok else row.error])
+
+ print table
+
+ if __name__ == "__main__":
+ runcli()
+
+
+Example output,
+
+ +--------------------------------------+-----------+-------------+---------+
+ | ID | NODE | NODE STATUS | MESSAGE |
+ +--------------------------------------+-----------+-------------+---------+
+ | e7a3c5c8-e7ad-47ad-aa9c-c13907c4da84 | localhost | UP | Hello |
+ | bb57a4c4-86eb-4af5-865d-932148c2759b | vm2 | UP | Hello |
+ | f69b918f-1ffa-4fe5-b554-ee10f051294e | vm3 | DOWN | N/A |
+ +--------------------------------------+-----------+-------------+---------+
+
+## How to package in Gluster
+If the project is created in `$GLUSTER_SRC/tools/message`
+
+Add "message" to SUBDIRS list in `$GLUSTER_SRC/tools/Makefile.am`
+
+and then create a `Makefile.am` in `$GLUSTER_SRC/tools/message`
+directory with following content.
+
+ EXTRA_DIST = peer_message.py
+
+ peertoolsdir = $(libexecdir)/glusterfs/
+ peertools_SCRIPTS = peer_message.py
+
+ install-exec-hook:
+ $(mkdir_p) $(DESTDIR)$(bindir)
+ rm -f $(DESTDIR)$(bindir)/gluster-message
+ ln -s $(libexecdir)/glusterfs/peer_message.py \
+ $(DESTDIR)$(bindir)/gluster-message
+
+ uninstall-hook:
+ rm -f $(DESTDIR)$(bindir)/gluster-message
+
+Thats all. Add following files in `glusterfs.spec.in` if packaging is
+required.(Under `%files` section)
+
+ %{_libexecdir}/glusterfs/peer_message.py*
+ %{_bindir}/gluster-message
+
+## Who is using cliutils
+- gluster-mountbroker http://review.gluster.org/14544
+- gluster-eventsapi http://review.gluster.org/14248
+- gluster-georep-sshkey http://review.gluster.org/14732
+- gluster-restapi https://github.com/gluster/restapi
+
+## Limitations/TODOs
+- Not yet possible to create CLI without any subcommand, For example
+ `gluster-message` without any arguments
+- Hiding node subcommands in `--help`(`gluster-message --help` will
+ show all subcommands including node subcommands)
+- Only positional arguments supported for node arguments, Optional
+ arguments can be used for other commands.
+- API documentation
diff --git a/extras/cliutils/__init__.py b/extras/cliutils/__init__.py
new file mode 100644
index 00000000000..8765cc85099
--- /dev/null
+++ b/extras/cliutils/__init__.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Reexporting the utility funcs and classes
+from .cliutils import (runcli,
+ sync_file_to_peers,
+ execute_in_peers,
+ execute,
+ node_output_ok,
+ node_output_notok,
+ output_error,
+ oknotok,
+ yesno,
+ get_node_uuid,
+ Cmd,
+ GlusterCmdException,
+ set_common_args_func)
+
+
+# This will be useful when `from cliutils import *`
+__all__ = ["runcli",
+ "sync_file_to_peers",
+ "execute_in_peers",
+ "execute",
+ "node_output_ok",
+ "node_output_notok",
+ "output_error",
+ "oknotok",
+ "yesno",
+ "get_node_uuid",
+ "Cmd",
+ "GlusterCmdException",
+ "set_common_args_func"]
diff --git a/extras/cliutils/cliutils.py b/extras/cliutils/cliutils.py
new file mode 100644
index 00000000000..55fbaf56704
--- /dev/null
+++ b/extras/cliutils/cliutils.py
@@ -0,0 +1,237 @@
+# -*- coding: utf-8 -*-
+from __future__ import print_function
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
+import inspect
+import subprocess
+import os
+import xml.etree.cElementTree as etree
+import json
+import sys
+
+MY_UUID = None
+parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
+ description=__doc__)
+subparsers = parser.add_subparsers(dest="mode")
+
+subcommands = {}
+cache_data = {}
+ParseError = etree.ParseError if hasattr(etree, 'ParseError') else SyntaxError
+_common_args_func = lambda p: True
+
+
+class GlusterCmdException(Exception):
+ def __init__(self, message):
+ self.message = message
+ try:
+ # Python 3
+ super().__init__(message)
+ except TypeError:
+ # Python 2
+ super(GlusterCmdException, self).__init__(message)
+
+
+def get_node_uuid():
+ # Caches the Node UUID in global variable,
+ # Executes gluster system:: uuid get command only if
+ # calling this function for first time
+ global MY_UUID
+ if MY_UUID is not None:
+ return MY_UUID
+
+ cmd = ["gluster", "system::", "uuid", "get", "--xml"]
+ rc, out, err = execute(cmd)
+
+ if rc != 0:
+ return None
+
+ tree = etree.fromstring(out)
+ uuid_el = tree.find("uuidGenerate/uuid")
+ MY_UUID = uuid_el.text
+ return MY_UUID
+
+
+def yesno(flag):
+ return "Yes" if flag else "No"
+
+
+def oknotok(flag):
+ return "OK" if flag else "NOT OK"
+
+
+def output_error(message, errcode=1):
+ print (message, file=sys.stderr)
+ sys.exit(errcode)
+
+
+def node_output_ok(message=""):
+ # Prints Success JSON output and exits with returncode zero
+ out = {"ok": True, "nodeid": get_node_uuid(), "output": message}
+ print (json.dumps(out))
+ sys.exit(0)
+
+
+def node_output_notok(message):
+ # Prints Error JSON output and exits with returncode zero
+ out = {"ok": False, "nodeid": get_node_uuid(), "error": message}
+ print (json.dumps(out))
+ sys.exit(0)
+
+
+def execute(cmd):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=True)
+ out, err = p.communicate()
+ return p.returncode, out, err
+
+
+def get_pool_list():
+ cmd = ["gluster", "--mode=script", "pool", "list", "--xml"]
+ rc, out, err = execute(cmd)
+ if rc != 0:
+ output_error("Failed to get Pool Info: {0}".format(err))
+
+ tree = etree.fromstring(out)
+
+ pool = []
+ try:
+ for p in tree.findall('peerStatus/peer'):
+ pool.append({"nodeid": p.find("uuid").text,
+ "hostname": p.find("hostname").text,
+ "connected": (True if p.find("connected").text == "1"
+ else False)})
+ except (ParseError, AttributeError, ValueError) as e:
+ output_error("Failed to parse Pool Info: {0}".format(e))
+
+ return pool
+
+
+class NodeOutput(object):
+ def __init__(self, **kwargs):
+ self.nodeid = kwargs.get("nodeid", "")
+ self.hostname = kwargs.get("hostname", "")
+ self.node_up = kwargs.get("node_up", False)
+ self.ok = kwargs.get("ok", False)
+ self.output = kwargs.get("output", "N/A")
+ self.error = kwargs.get("error", "N/A")
+
+
+def execute_in_peers(name, args=[]):
+ # Get the file name of Caller function, If the file name is peer_example.py
+ # then Gluster peer command will be gluster system:: execute example.py
+ # Command name is without peer_
+ frame = inspect.stack()[1]
+ module = inspect.getmodule(frame[0])
+ actual_file = module.__file__
+ # If file is symlink then find actual file
+ if os.path.islink(actual_file):
+ actual_file = os.readlink(actual_file)
+
+ # Get the name of file without peer_
+ cmd_name = os.path.basename(actual_file).replace("peer_", "")
+ cmd = ["gluster", "system::", "execute", cmd_name, name] + args
+ rc, out, err = execute(cmd)
+ if rc != 0:
+ raise GlusterCmdException((rc, out, err, " ".join(cmd)))
+
+ out = out.strip().splitlines()
+
+ # JSON decode each line and construct one object with node id as key
+ all_nodes_data = {}
+ for node_data in out:
+ data = json.loads(node_data)
+ all_nodes_data[data["nodeid"]] = {
+ "nodeid": data.get("nodeid"),
+ "ok": data.get("ok"),
+ "output": data.get("output", ""),
+ "error": data.get("error", "")}
+
+ # gluster pool list
+ pool_list = get_pool_list()
+
+ data_out = []
+ # Iterate pool_list and merge all_nodes_data collected above
+ # If a peer node is down then set node_up = False
+ for p in pool_list:
+ p_data = all_nodes_data.get(p.get("nodeid"), None)
+ row_data = NodeOutput(node_up=False,
+ hostname=p.get("hostname"),
+ nodeid=p.get("nodeid"),
+ ok=False)
+
+ if p_data is not None:
+ # Node is UP
+ row_data.node_up = True
+ row_data.ok = p_data.get("ok")
+ row_data.output = p_data.get("output")
+ row_data.error = p_data.get("error")
+
+ data_out.append(row_data)
+
+ return data_out
+
+
+def sync_file_to_peers(fname):
+ # Copy file from current node to all peer nodes, fname
+ # is path after GLUSTERD_WORKDIR
+ cmd = ["gluster", "system::", "copy", "file", fname]
+ rc, out, err = execute(cmd)
+ if rc != 0:
+ raise GlusterCmdException((rc, out, err))
+
+
+class Cmd(object):
+ name = ""
+
+ def run(self, args):
+ # Must required method. Raise NotImplementedError if derived class
+ # not implemented this method
+ raise NotImplementedError("\"run(self, args)\" method is "
+ "not implemented by \"{0}\"".format(
+ self.__class__.__name__))
+
+
+def runcli():
+ # Get list of Classes derived from class "Cmd" and create
+ # a subcommand as specified in the Class name. Call the args
+ # method by passing subcommand parser, Derived class can add
+ # arguments to the subcommand parser.
+ metavar_data = []
+ for c in Cmd.__subclasses__():
+ cls = c()
+ if getattr(cls, "name", "") == "":
+ raise NotImplementedError("\"name\" is not added "
+ "to \"{0}\"".format(
+ cls.__class__.__name__))
+
+ # Do not show in help message if subcommand starts with node-
+ if not cls.name.startswith("node-"):
+ metavar_data.append(cls.name)
+
+ p = subparsers.add_parser(cls.name)
+ args_func = getattr(cls, "args", None)
+ if args_func is not None:
+ args_func(p)
+
+ # Apply common args if any
+ _common_args_func(p)
+
+ # A dict to save subcommands, key is name of the subcommand
+ subcommands[cls.name] = cls
+
+ # Hide node commands in Help message
+ subparsers.metavar = "{" + ",".join(metavar_data) + "}"
+
+ # Get all parsed arguments
+ args = parser.parse_args()
+
+ # Get the subcommand to execute
+ cls = subcommands.get(args.mode, None)
+
+ # Run
+ if cls is not None:
+ cls.run(args)
+
+
+def set_common_args_func(func):
+ global _common_args_func
+ _common_args_func = func
diff --git a/extras/collect-system-stats.sh b/extras/collect-system-stats.sh
new file mode 100755
index 00000000000..865e70bbc11
--- /dev/null
+++ b/extras/collect-system-stats.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+################################################################################
+# Usage: collect-system-stats.sh <delay-in-seconds>
+# This script starts sar/top/iostat/vmstat processes which collect system stats
+# with the interval <delay-in-seconds> given as argument to the script. When
+# the script is stopped either by entering any input or Ctrl+C the list of
+# files where output is captured will be printed on the screen which can be
+# observed to find any problems/bottlenecks.
+###############################################################################
+
+function stop_processes {
+ echo "Stopping the monitoring processes"
+ echo "sar pid:$sar_pid", "top pid: $top_pid", "iostat pid: $iostat_pid", "vmstat pid: $vmstat_pid"
+ kill "$sar_pid" "$top_pid" "$iostat_pid" "$vmstat_pid"
+ echo "Files created: ${timestamp}-network.out, ${timestamp}-top.out, ${timestamp}-iostat.out, ${timestamp}-vmstat.out"
+}
+
+function check_dependent_commands_exist()
+{
+ declare -a arr=("sar" "top" "iostat" "vmstat")
+ for i in "${arr[@]}"
+ do
+ if ! command -v "$i" > /dev/null 2>&1
+ then
+ echo "ERROR: '$i' command is not found"
+ exit 1
+ fi
+ done
+
+}
+
+case "$1" in
+ ''|*[!0-9]*) echo "Usage: $0 <delay-between-successive-metrics-collection-in-seconds>"; exit 1 ;;
+ *) interval="$1" ;;
+esac
+
+timestamp=$(date +"%s")
+
+check_dependent_commands_exist
+sar -n DEV "$interval" > "${timestamp}"-network.out &
+sar_pid="$!"
+top -bHd "$interval" > "${timestamp}"-top.out &
+top_pid="$!"
+iostat -Ntkdx "$interval" > "${timestamp}"-iostat.out &
+iostat_pid="$!"
+vmstat -t "$interval" > "${timestamp}"-vmstat.out &
+vmstat_pid="$!"
+echo "Started sar, vmstat, iostat, top for collecting stats"
+
+
+trap stop_processes EXIT
+read -r -p "Press anything and ENTER to exit";
diff --git a/extras/command-completion/gluster.bash b/extras/command-completion/gluster.bash
index 680ecd964d5..73d16098875 100644
--- a/extras/command-completion/gluster.bash
+++ b/extras/command-completion/gluster.bash
@@ -26,28 +26,28 @@ GLUSTER_TOP_SUBOPTIONS2="
"
GLUSTER_TOP_OPTIONS="
{open
- [ $TOP_SUBOPTIONS1 ]
+ [ $GLUSTER_TOP_SUBOPTIONS1 ]
},
{read
- [ $TOP_SUBOPTIONS1 ]
+ [ $GLUSTER_TOP_SUBOPTIONS1 ]
},
{write
- [ $TOP_SUBOPTIONS1 ]
+ [ $GLUSTER_TOP_SUBOPTIONS1 ]
},
{opendir
- [ $TOP_SUBOPTIONS1 ]
+ [ $GLUSTER_TOP_SUBOPTIONS1 ]
},
{readdir
- [ $TOP_SUBOPTIONS1 ]
+ [ $GLUSTER_TOP_SUBOPTIONS1 ]
},
{clear
- [ $TOP_SUBOPTIONS1 ]
+ [ $GLUSTER_TOP_SUBOPTIONS1 ]
},
{read-perf
- [ $TOP_SUBOPTIONS2 ]
+ [ $GLUSTER_TOP_SUBOPTIONS2 ]
},
{write-perf
- [ $TOP_SUBOPTIONS2 ]
+ [ $GLUSTER_TOP_SUBOPTIONS2 ]
}
"
@@ -282,16 +282,16 @@ _gluster_throw () {
exit
}
-declare FINAL_LIST=''
-declare LIST=''
-declare -i TOP=0
+declare GLUSTER_FINAL_LIST=''
+declare GLUSTER_LIST=''
+declare -i GLUSTER_TOP=0
_gluster_push () {
- TOP=$((TOP + 1))
- return $TOP
+ GLUSTER_TOP=$((GLUSTER_TOP + 1))
+ return $GLUSTER_TOP
}
_gluster_pop () {
- TOP=$((TOP - 1))
- return $TOP
+ GLUSTER_TOP=$((GLUSTER_TOP - 1))
+ return $GLUSTER_TOP
}
_gluster_goto_end ()
@@ -333,7 +333,7 @@ _gluster_form_list ()
top=$?
read -r key
if [ "X$cur_word" == "X" -o "${cur_word:0:1}" == "${key:0:1}" -o "${key:0:1}" == "_" ]; then
- LIST="$LIST $key"
+ GLUSTER_LIST="$GLUSTER_LIST $key"
fi
_gluster_goto_end $top
@@ -452,10 +452,10 @@ _gluster_parse ()
elif [ "$token" == '{' ]; then
read -r tmp_token
- LIST="$tmp_token"
+ GLUSTER_LIST="$tmp_token"
fi
- echo $LIST
+ echo $GLUSTER_LIST
}
_gluster_handle_list ()
@@ -479,12 +479,12 @@ _gluster_handle_list ()
_gluster_completion ()
{
- FINAL_LIST=`echo $GLUSTER_COMMAND_TREE | \
+ GLUSTER_FINAL_LIST=`echo $GLUSTER_COMMAND_TREE | \
egrep -ao --color=never "([A-Za-z0-9_.-]+)|[[:space:]]+|." | \
egrep -v --color=never "^[[:space:]]*$" | \
_gluster_parse`
- ARG="FINAL_LIST"
+ ARG="GLUSTER_FINAL_LIST"
_gluster_handle_list $ARG ${COMP_WORDS[COMP_CWORD]}
return
}
diff --git a/extras/control-cpu-load.sh b/extras/control-cpu-load.sh
new file mode 100755
index 00000000000..52dcf62fd9f
--- /dev/null
+++ b/extras/control-cpu-load.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+USAGE="This script provides a utility to control CPU utilization for any
+gluster daemon.In this, we use cgroup framework to configure CPU quota
+for a process(like selfheal daemon). Before running this script, make
+sure that daemon is running.Every time daemon restarts, it is required
+to rerun this command to set CPU quota on new daemon process id.
+User can enter any value between 10 to 100 for CPU quota.
+Recommended value of quota period is 25. 25 means, kernel will allocate
+25 ms period to this group of tasks in every 100 ms period. This 25ms
+could be considered as the maximum percentage of CPU quota daemon can take.
+This value will be reflected on CPU usage of "top" command.If provided pid
+is the only process and no other process is in competition to get CPU, more
+ than 25% could be allocated to daemon to speed up the process."
+
+if [ $# -ge 1 ]; then
+ case $1 in
+ -h|--help) echo " " "$USAGE" | sed -r -e 's/^[ ]+//g'
+ exit 0;
+ ;;
+ *) echo "Please Provide correct input for script."
+ echo "For help correct options are -h or --help."
+ exit 1;
+ ;;
+ esac
+fi
+
+DIR_EXIST=0
+LOC="/sys/fs/cgroup/cpu,cpuacct/system.slice/glusterd.service"
+echo "Enter gluster daemon pid for which you want to control CPU."
+read daemon_pid
+
+if expr ${daemon_pid} + 0 > /dev/null 2>&1 ;then
+ CHECK_PID=$(pgrep -f gluster | grep ${daemon_pid})
+ if [ -z "${CHECK_PID}" ]; then
+ echo "No daemon is running or pid ${daemon_pid} does not match."
+ echo "with running gluster processes."
+ exit 1
+ fi
+else
+ echo "Entered daemon_pid is not numeric so Rerun the script."
+ exit 1
+fi
+
+
+if [ -f ${LOC}/tasks ];then
+ CHECK_CGROUP=$(grep ${daemon_pid} ${LOC}/tasks)
+ if [ ${CHECK_CGROUP} ]; then
+ echo "pid ${daemon_pid} is attached with glusterd.service cgroup."
+ fi
+fi
+
+cgroup_name=cgroup_gluster_${daemon_pid}
+if [ -f ${LOC}/${cgroup_name}/tasks ]; then
+ CHECK_CGROUP=$(grep ${daemon_pid} ${LOC}/${cgroup_name}/tasks)
+ if [ ${CHECK_CGROUP} ]; then
+ val=`cat ${LOC}/${cgroup_name}/cpu.cfs_quota_us`
+ qval=$((val / 1000))
+ echo "pid ${daemon_pid} is already attached ${cgroup_name} with quota value ${qval}."
+ echo "Press n if you don't want to reassign ${daemon_pid} with new quota value."
+ DIR_EXIST=1
+ else
+ echo "pid ${daemon_pid} is not attached with ${cgroup_name}."
+ fi
+fi
+
+read -p "If you want to continue the script to attach ${daemon_pid} with new ${cgroup_name} cgroup Press (y/n)?" choice
+case "$choice" in
+ y|Y ) echo "yes";;
+ n|N ) echo "no";exit;;
+ * ) echo "invalid";exit;;
+esac
+
+systemctl set-property glusterd.service CPUShares=1024
+
+if [ ${DIR_EXIST} -eq 0 ];then
+ echo "Creating child cgroup directory '${cgroup_name} cgroup' for glusterd.service."
+ mkdir -p ${LOC}/${cgroup_name}
+ if [ ! -f ${LOC}/${cgroup_name}/tasks ];then
+ echo "Not able to create ${cgroup_name} directory so exit."
+ exit 1
+ fi
+fi
+
+echo "Enter quota value in range [10,100]: "
+
+read quota_value
+if expr ${quota_value} + 0 > /dev/null 2>&1 ;then
+ if [ ${quota_value} -lt 10 ] || [ ${quota_value} -gt 100 ]; then
+ echo "Entered quota value is not correct,it should be in the range ."
+ echo "10-100. Ideal value is 25."
+ echo "Rerun the sript with correct value."
+ exit 1
+ else
+ echo "Entered quota value is $quota_value"
+ fi
+else
+ echo "Entered quota value is not numeric so Rerun the script."
+ exit 1
+fi
+
+quota_value=$((quota_value * 1000))
+echo "Setting $quota_value to cpu.cfs_quota_us for gluster_cgroup."
+echo ${quota_value} > ${LOC}/${cgroup_name}/cpu.cfs_quota_us
+
+if ps -T -p ${daemon_pid} | grep gluster > /dev/null; then
+ for thid in `ps -T -p ${daemon_pid} | grep -v SPID | awk -F " " '{print $2}'`;
+ do
+ echo ${thid} > ${LOC}/${cgroup_name}/tasks ;
+ done
+ if cat /proc/${daemon_pid}/cgroup | grep -w ${cgroup_name} > /dev/null; then
+ echo "Tasks are attached successfully specific to ${daemon_pid} to ${cgroup_name}."
+ else
+ echo "Tasks are not attached successfully."
+ fi
+fi
diff --git a/extras/control-mem.sh b/extras/control-mem.sh
new file mode 100755
index 00000000000..91b36f8107a
--- /dev/null
+++ b/extras/control-mem.sh
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+USAGE="This commands provides a utility to control MEMORY utilization for any
+gluster daemon.In this, we use cgroup framework to configure MEMORY limit for
+a process. Before running this script, make sure that daemon is running.Every
+time daemon restarts, it is required to rerun this command to set memory limit
+(in bytes) on new daemon process id.User can enter any value between 100
+(in Mega bytes) to 8000000000000 for Memory limit in Mega bytes.
+Memory limit value is depends on how much maximum memory user wants to restrict
+for specific daemon process.If a process will try to consume memore more than
+configured value then cgroup will hang/sleep this task and to resume the task
+rerun the script with new increase memory limit value ."
+
+if [ $# -ge 1 ]; then
+ case $1 in
+ -h|--help) echo " " "$USAGE" | sed -r -e 's/^[ ]+//g'
+ exit 0;
+ ;;
+ *) echo "Please Provide correct input for script."
+ echo "For help correct options are -h of --help."
+ exit 1;
+ ;;
+ esac
+fi
+
+DIR_EXIST=0
+LOC="/sys/fs/cgroup/memory/system.slice/glusterd.service"
+echo "Enter Any gluster daemon pid for that you want to control MEMORY."
+read daemon_pid
+
+if expr ${daemon_pid} + 0 > /dev/null 2>&1 ;then
+ CHECK_PID=$(pgrep -f gluster | grep ${daemon_pid})
+ if [ -z "${CHECK_PID}" ]; then
+ echo "No daemon is running or pid ${daemon_pid} does not match."
+ echo "with running gluster processes."
+ exit 1
+ fi
+else
+ echo "Entered daemon_pid is not numeric so Rerun the script."
+ exit 1
+fi
+
+
+if [ -f ${LOC}/tasks ]; then
+ CHECK_CGROUP=$(grep ${daemon_pid} ${LOC}/tasks)
+ if [ ${CHECK_CGROUP} ] ;then
+ echo "pid ${daemon_pid} is attached with default glusterd.service cgroup."
+ fi
+fi
+
+cgroup_name=cgroup_gluster_${daemon_pid}
+if [ -f ${LOC}/${cgroup_name}/tasks ];then
+ CHECK_CGROUP=$(grep ${daemon_pid} ${LOC}/${cgroup_name}/tasks)
+ if [ ${CHECK_CGROUP} ]; then
+ val=`cat ${LOC}/${cgroup_name}/memory.limit_in_bytes`
+ mval=$((val / 1024 / 1024))
+ echo "pid ${daemon_pid} is already attached ${cgroup_name} with mem value ${mval}."
+ echo "Press n if you don't want to reassign ${daemon_pid} with new mem value."
+ DIR_EXIST=1
+ else
+ echo "pid ${daemon_pid} is not attached with ${cgroup_name}."
+ fi
+fi
+
+read -p "If you want to continue the script to attach daeomon with new cgroup. Press (y/n)?" choice
+case "$choice" in
+ y|Y ) echo "yes";;
+ n|N ) echo "no";exit;;
+ * ) echo "invalid";exit;;
+esac
+
+systemctl set-property glusterd.service CPUShares=1024
+
+if [ ${DIR_EXIST} -eq 0 ];then
+ echo "Creating child cgroup directory '${cgroup_name} cgroup' for glusterd.service."
+ mkdir -p ${LOC}/${cgroup_name}
+ if [ ! -f ${LOC}/${cgroup_name}/tasks ];then
+ echo "Not able to create ${LOC}/${cgroup_name} directory so exit."
+ exit 1
+ fi
+fi
+
+echo "Enter Memory value in Mega bytes [100,8000000000000]: "
+
+read mem_value
+if expr ${mem_value} + 0 > /dev/null 2>&1 ;then
+ if [ ${mem_value} -lt 100 ] || [ ${mem_value} -gt 8000000000000 ]; then
+ echo "Entered memory value is not correct,it should be in the range ."
+ echo "100-8000000000000, Rerun the script with correct value ."
+ exit 1
+ else
+ echo "Entered memory limit value is ${mem_value}."
+ fi
+else
+ echo "Entered memory value is not numeric so Rerun the script."
+ exit 1
+fi
+
+mem_value=$(($mem_value * 1024 * 1024))
+if [ ${DIR_EXIST} -eq 0 ];then
+ echo "Setting ${mem_value} to memory.limit_in_bytes for ${LOC}/${cgroup_name}."
+ echo ${mem_value} > ${LOC}/${cgroup_name}/memory.limit_in_bytes
+ #Set memory value to memory.memsw.limit_in_bytes
+ echo ${mem_value} > ${LOC}/${cgroup_name}/memory.memsw.limit_in_bytes
+ # disable oom_control so that kernel will not send kill signal to the
+ # task once limit has reached
+ echo 1 > ${LOC}/${cgroup_name}/memory.oom_control
+else
+ #Increase mem_value to memory.memsw.limit_in_bytes
+ echo ${mem_value} > ${LOC}/${cgroup_name}/memory.memsw.limit_in_bytes
+ echo "Increase ${mem_value} to memory.limit_in_bytes for ${LOC}/${cgroup_name}."
+ echo ${mem_value} > ${LOC}/${cgroup_name}/memory.limit_in_bytes
+ # disable oom_control so that kernel will not send kill signal to the
+ # task once limit has reached
+ echo 1 > ${LOC}/${cgroup_name}/memory.oom_control
+fi
+
+if ps -T -p ${daemon_pid} | grep gluster > /dev/null; then
+ for thid in `ps -T -p ${daemon_pid} | grep -v SPID | awk -F " " '{print $2}'`;
+ do
+ echo ${thid} > ${LOC}/${cgroup_name}/tasks ;
+ done
+ if cat /proc/${daemon_pid}/cgroup | grep -iw ${cgroup_name} > /dev/null; then
+ echo "Tasks are attached successfully specific to ${daemon_pid} to ${cgroup_name}."
+ else
+ echo "Tasks are not attached successfully."
+ fi
+fi
diff --git a/extras/create_new_xlator/generate_xlator.py b/extras/create_new_xlator/generate_xlator.py
index dd45b1ef55e..983868c04db 100755
--- a/extras/create_new_xlator/generate_xlator.py
+++ b/extras/create_new_xlator/generate_xlator.py
@@ -1,4 +1,6 @@
-#!/usr/bin/python
+#!/usr/bin/python3
+
+from __future__ import print_function
import os
import re
import sys
@@ -11,11 +13,12 @@ from generator import ops, xlator_cbks, xlator_dumpops
MAKEFILE_FMT = """
xlator_LTLIBRARIES = @XL_NAME@.la
xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/@XL_TYPE@
-@XL_NAME_NO_HYPHEN@_la_LDFLAGS = -module -avoid-version
+@XL_NAME_NO_HYPHEN@_la_LDFLAGS = -module $(GF_XLATOR_DEFAULT_LDFLAGS)
@XL_NAME_NO_HYPHEN@_la_SOURCES = @XL_NAME@.c
@XL_NAME_NO_HYPHEN@_la_LIBADD = $(top_builddir)/libglusterfs/src/libglusterfs.la
noinst_HEADERS = @XL_NAME@.h @XL_NAME@-mem-types.h @XL_NAME@-messages.h
-AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src
+AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
AM_CFLAGS = -Wall -fno-strict-aliasing $(GF_CFLAGS)
CLEANFILES =
"""
@@ -33,11 +36,11 @@ def get_error_arg(type_str):
def get_param(names, types):
# Convert two separate tuples to one of (name, type) sub-tuples.
- as_tuples = zip(types, names)
+ as_tuples = list(zip(types, names))
# Convert each sub-tuple into a "type name" string.
- as_strings = map(string.join, as_tuples)
+ as_strings = [' '.join(item) for item in as_tuples]
# Join all of those into one big string.
- return string.join(as_strings, ",\n\t")
+ return ',\n\t'.join(as_strings)
def generate(tmpl, name, table):
@@ -53,18 +56,18 @@ def generate(tmpl, name, table):
sdict = {}
#Parameters are (t1, var1), (t2, var2)...
#Args are (var1, var2,...)
- sdict["@WIND_ARGS@"] = string.join(w_arg_names, ", ")
- sdict["@UNWIND_ARGS@"] = string.join(u_arg_names, ", ")
- sdict["@ERROR_ARGS@"] = string.join(map(get_error_arg, u_arg_types), ", ")
+ sdict["@WIND_ARGS@"] = ', '.join(w_arg_names)
+ sdict["@UNWIND_ARGS@"] = ', '.join(u_arg_names)
+ sdict["@ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, u_arg_types)))
sdict["@WIND_PARAMS@"] = get_param(w_arg_names, w_arg_types)
sdict["@UNWIND_PARAMS@"] = get_param(u_arg_names, u_arg_types)
sdict["@FUNC_PARAMS@"] = get_param(fn_arg_names, fn_arg_types)
sdict["@NAME@"] = name
sdict["@FOP_PREFIX@"] = fop_prefix
- sdict["@RET_TYPE@"] = string.join(ret_type, "")
- sdict["@RET_VAR@"] = string.join(ret_var, "")
+ sdict["@RET_TYPE@"] = ''.join(ret_type)
+ sdict["@RET_VAR@"] = ''.join(ret_var)
- for old, new in sdict.iteritems():
+ for old, new in sdict.items():
tmpl = tmpl.replace(old, new)
# TBD: reindent/reformat the result for maximum readability.
return tmpl
@@ -73,42 +76,44 @@ def generate(tmpl, name, table):
def gen_xlator():
xl = open(src_dir_path+"/"+xl_name+".c", 'w+')
- print >> xl, COPYRIGHT
- print >> xl, fragments["INCLUDE_IN_SRC_FILE"].replace("@XL_NAME@",
- xl_name)
+ print(COPYRIGHT, file=xl)
+ print(fragments["INCLUDE_IN_SRC_FILE"].replace("@XL_NAME@",
+ xl_name), file=xl)
#Generate cbks and fops
for fop in ops:
- print >> xl, generate(fragments["CBK_TEMPLATE"], fop, ops)
- print >> xl, generate(fragments["FOP_TEMPLATE"], fop, ops)
+ print(generate(fragments["CBK_TEMPLATE"], fop, ops), file=xl)
+ print(generate(fragments["FOP_TEMPLATE"], fop, ops), file=xl)
for cbk in xlator_cbks:
- print >> xl, generate(fragments["FUNC_TEMPLATE"], cbk,
- xlator_cbks)
+ print(generate(fragments["FUNC_TEMPLATE"], cbk,
+ xlator_cbks), file=xl)
for dops in xlator_dumpops:
- print >> xl, generate(fragments["FUNC_TEMPLATE"], dops,
- xlator_dumpops)
-
- print >> xl, fragments["XLATOR_METHODS"]
+ print(generate(fragments["FUNC_TEMPLATE"], dops,
+ xlator_dumpops), file=xl)
#Generate fop table
- print >> xl, "struct xlator_fops fops = {"
+ print("struct xlator_fops fops = {", file=xl)
for fop in ops:
- print >> xl, " .{0:20} = {1}_{2},".format(fop, fop_prefix, fop)
- print >> xl, "};"
+ print(" .{0:20} = {1}_{2},".format(fop, fop_prefix, fop), file=xl)
+ print("};", file=xl)
#Generate xlator_cbks table
- print >> xl, "struct xlator_cbks cbks = {"
+ print("struct xlator_cbks cbks = {", file=xl)
for cbk in xlator_cbks:
- print >> xl, " .{0:20} = {1}_{2},".format(cbk, fop_prefix, cbk)
- print >> xl, "};"
+ print(" .{0:20} = {1}_{2},".format(cbk, fop_prefix, cbk), file=xl)
+ print("};", file=xl)
#Generate xlator_dumpops table
- print >> xl, "struct xlator_dumpops dumpops = {"
+ print("struct xlator_dumpops dumpops = {", file=xl)
for dops in xlator_dumpops:
- print >> xl, " .{0:20} = {1}_{2},".format(dops, fop_prefix, dops)
- print >> xl, "};"
+ print(" .{0:20} = {1}_{2},".format(dops, fop_prefix, dops), file=xl)
+ print("};", file=xl)
+
+ xlator_methods = fragments["XLATOR_METHODS"].replace("@XL_NAME@", xl_name)
+ xlator_methods = xlator_methods.replace("@FOP_PREFIX@", fop_prefix)
+ print(xlator_methods, file=xl)
xl.close()
@@ -121,38 +126,37 @@ def create_dir_struct():
def gen_header_files():
upname = xl_name_no_hyphen.upper()
h = open(src_dir_path+"/"+xl_name+".h", 'w+')
- print >> h, COPYRIGHT
+ print(COPYRIGHT, file=h)
txt = fragments["HEADER_FMT"].replace("@HFL_NAME@", upname)
- txt2 = fragments["INCLUDE_IN_HEADER_FILE"].replace("@XL_NAME@", xl_name)
- txt = txt.replace("@INCLUDE_SECT@",txt2)
- print >> h, txt
+ txt = txt.replace("@XL_NAME@", xl_name)
+ print(txt, file=h)
h.close()
h = open(src_dir_path+"/"+xl_name+"-mem-types.h", 'w+')
- print >> h, COPYRIGHT
- txt = fragments["HEADER_FMT"].replace("@HFL_NAME@", upname+"_MEM_TYPES")
- txt = txt.replace("@INCLUDE_SECT@", '#include "mem-types.h"')
- print >> h, txt
+ print(COPYRIGHT, file=h)
+ txt = fragments["MEM_HEADER_FMT"].replace("@HFL_NAME@", upname+"_MEM_TYPES")
+ txt = txt.replace("@FOP_PREFIX@", fop_prefix)
+ print(txt, file=h)
h.close()
h = open(src_dir_path+"/"+xl_name+"-messages.h", 'w+')
- print >> h, COPYRIGHT
- txt = fragments["HEADER_FMT"].replace("@HFL_NAME@", upname+"_MESSAGES")
- txt = txt.replace("@INCLUDE_SECT@", '')
- print >> h, txt
+ print(COPYRIGHT, file=h)
+ txt = fragments["MSG_HEADER_FMT"].replace("@HFL_NAME@", upname+"_MESSAGES")
+ txt = txt.replace("@FOP_PREFIX@", fop_prefix.upper())
+ print(txt, file=h)
h.close()
def gen_makefiles():
m = open(dir_path+"/Makefile.am", 'w+')
- print >> m, "SUBDIRS = src\n\nCLEANFILES ="
+ print("SUBDIRS = src\n\nCLEANFILES =", file=m)
m.close()
m = open(src_dir_path+"/Makefile.am", 'w+')
txt = MAKEFILE_FMT.replace("@XL_NAME@", xl_name)
txt = txt.replace("@XL_NAME_NO_HYPHEN@", xl_name_no_hyphen)
- txt = txt.replace("@XL_TYPE@",xlator_type)
- print >> m, txt
+ txt = txt.replace("@XL_TYPE@", xlator_type)
+ print(txt, file=m)
m.close()
def get_copyright ():
@@ -165,8 +169,8 @@ def load_fragments ():
cur_value = ""
result = {}
basepath = os.path.abspath(os.path.dirname(__file__))
- fragpath = basepath + "/new-xlator-tmpl.c"
- for line in open(fragpath,"r").readlines():
+ fragpath = basepath + "/new-xlator.c.tmpl"
+ for line in open(fragpath, "r").readlines():
m = pragma_re.search(line)
if m:
if cur_symbol:
@@ -182,7 +186,7 @@ def load_fragments ():
if __name__ == '__main__':
if len(sys.argv) < 3:
- print "USAGE: ./gen_xlator <XLATOR_DIR> <XLATOR_NAME> <FOP_PREFIX>"
+ print("USAGE: ./gen_xlator <XLATOR_DIR> <XLATOR_NAME> <FOP_PREFIX>")
sys.exit(0)
xl_name = sys.argv[2]
diff --git a/extras/create_new_xlator/new-xlator-tmpl.c b/extras/create_new_xlator/new-xlator-tmpl.c
deleted file mode 100644
index ac08f3732a7..00000000000
--- a/extras/create_new_xlator/new-xlator-tmpl.c
+++ /dev/null
@@ -1,89 +0,0 @@
-#pragma fragment CBK_TEMPLATE
-int32_t
-@FOP_PREFIX@_@NAME@_cbk (call_frame_t *frame, void *cookie, xlator_t *this,
- int32_t op_ret, int32_t op_errno, @UNWIND_PARAMS@)
-{
- STACK_UNWIND_STRICT (@NAME@, frame, op_ret, op_errno,
- @UNWIND_ARGS@);
- return 0;
-}
-
-#pragma fragment COMMENT
-If you are generating the leaf xlators, remove the STACK_WIND
-and replace the @ERROR_ARGS@ to @UNWIND_ARGS@ if necessary
-
-#pragma fragment FOP_TEMPLATE
-int32_t
-@FOP_PREFIX@_@NAME@ (call_frame_t *frame, xlator_t *this,
- @WIND_PARAMS@)
-{
- STACK_WIND (frame, @FOP_PREFIX@_@NAME@_cbk,
- FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@,
- @WIND_ARGS@);
- return 0;
-err:
- STACK_UNWIND_STRICT (@NAME@, frame, -1, errno,
- @ERROR_ARGS@);
- return 0;
-}
-
-#pragma fragment FUNC_TEMPLATE
-@RET_TYPE@
-@FOP_PREFIX@_@NAME@ (@FUNC_PARAMS@)
-{
- return @RET_VAR@;
-}
-
-#pragma fragment CP
-/*
- * Copyright (c) @CURRENT_YEAR@ Red Hat, Inc. <http://www.redhat.com>
- * This file is part of GlusterFS.
- *
- * This file is licensed to you under your choice of the GNU Lesser
- * General Public License, version 3 or any later version (LGPLv3 or
- * later), or the GNU General Public License, version 2 (GPLv2), in all
- * cases as published by the Free Software Foundation.
- */
-
-#pragma fragment INCLUDE_IN_SRC_FILE
-#include "@XL_NAME@.h"
-
-#pragma fragment INCLUDE_IN_HEADER_FILE
-#include "@XL_NAME@-mem-types.h"
-#include "@XL_NAME@-messages.h"
-#include "glusterfs.h"
-#include "xlator.h"
-#include "defaults.h"
-
-#pragma fragment XLATOR_METHODS
-int32_t
-init (xlator_t *this)
-{
- return 0;
-}
-
-void
-fini (xlator_t *this)
-{
- return;
-}
-
-int32_t
-reconfigure (xlator_t *this, dict_t *dict)
-{
- return 0;
-}
-
-int
-notify (xlator_t *this, int event, void *data, ...)
-{
- return default_notify (this, event, data);
-}
-
-#pragma fragment HEADER_FMT
-#ifndef __@HFL_NAME@_H__
-#define __@HFL_NAME@_H__
-
-@INCLUDE_SECT@
-
-#endif /* __@HFL_NAME@_H__ */
diff --git a/extras/create_new_xlator/new-xlator.c.tmpl b/extras/create_new_xlator/new-xlator.c.tmpl
new file mode 100644
index 00000000000..fe9735bfcf1
--- /dev/null
+++ b/extras/create_new_xlator/new-xlator.c.tmpl
@@ -0,0 +1,151 @@
+#pragma fragment CBK_TEMPLATE
+int32_t @FOP_PREFIX@_@NAME@_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int32_t op_ret,
+ int32_t op_errno, @UNWIND_PARAMS@)
+{
+ STACK_UNWIND_STRICT(@NAME@, frame, op_ret, op_errno, @UNWIND_ARGS@);
+ return 0;
+}
+
+#pragma fragment COMMENT
+If you are generating the leaf xlators, remove the STACK_WIND and replace the
+ @ERROR_ARGS@ to @UNWIND_ARGS@ if necessary
+
+#pragma fragment FOP_TEMPLATE
+ int32_t @FOP_PREFIX@_@NAME@(call_frame_t *frame, xlator_t *this, @WIND_PARAMS@)
+{
+ STACK_WIND(frame, @FOP_PREFIX@_@NAME@_cbk, FIRST_CHILD(this),
+ FIRST_CHILD(this)->fops->@NAME@, @WIND_ARGS@);
+ return 0;
+err:
+ STACK_UNWIND_STRICT(@NAME@, frame, -1, errno, @ERROR_ARGS@);
+ return 0;
+}
+
+#pragma fragment FUNC_TEMPLATE
+@RET_TYPE@ @FOP_PREFIX@_@NAME@(@FUNC_PARAMS@)
+{
+ return @RET_VAR@;
+}
+
+#pragma fragment CP
+/*
+ * Copyright (c) @CURRENT_YEAR@ Red Hat, Inc. <http://www.redhat.com>
+ * This file is part of GlusterFS.
+ *
+ * This file is licensed to you under your choice of the GNU Lesser
+ * General Public License, version 3 or any later version (LGPLv3 or
+ * later), or the GNU General Public License, version 2 (GPLv2), in all
+ * cases as published by the Free Software Foundation.
+ */
+
+#pragma fragment INCLUDE_IN_SRC_FILE
+#include "@XL_NAME@.h"
+
+#pragma fragment XLATOR_METHODS
+
+static int32_t @FOP_PREFIX@_init(xlator_t *this)
+{
+ return 0;
+}
+
+static void @FOP_PREFIX@_fini(xlator_t *this)
+{
+ return;
+}
+
+static int32_t @FOP_PREFIX@_reconfigure(xlator_t *this, dict_t *dict)
+{
+ return 0;
+}
+
+static int @FOP_PREFIX@_notify(xlator_t *this, int event, void *data, ...)
+{
+ return default_notify(this, event, data);
+}
+
+static int32_t @FOP_PREFIX@_mem_acct_init(xlator_t *this)
+{
+ int ret = -1;
+
+ ret = xlator_mem_acct_init(this, gf_@FOP_PREFIX@_mt_end + 1);
+ return ret;
+}
+
+static int32_t @FOP_PREFIX@_dump_metrics(xlator_t *this, int fd)
+{
+ return 0;
+}
+
+struct volume_options @FOP_PREFIX@_options[] = {
+ /*{ .key = {""},
+ .type = GF_OPTION_TYPE_BOOL,
+ .default_value = "",
+ .op_version = {GD_OP_VERSION_},
+ .flags = OPT_FLAG_SETTABLE | OPT_FLAG_DOC | OPT_FLAG_CLIENT_OPT,
+ .tags = {""},
+ .description = "",
+ .category = GF_EXPERIMENTAL,
+ },
+ { .key = {NULL} },
+ */
+};
+
+xlator_api_t xlator_api = {
+ .init = @FOP_PREFIX@_init,
+ .fini = @FOP_PREFIX@_fini,
+ .notify = @FOP_PREFIX@_notify,
+ .reconfigure = @FOP_PREFIX@_reconfigure,
+ .mem_acct_init = @FOP_PREFIX@_mem_acct_init,
+ .dump_metrics = @FOP_PREFIX@_dump_metrics,
+ .op_version = {GD_OP_VERSION_},
+ .dumpops = &@FOP_PREFIX@_dumpops,
+ .fops = &@FOP_PREFIX@_fops,
+ .cbks = &@FOP_PREFIX @_cbks,
+ .options = @FOP_PREFIX@_options,
+ .identifier = "@XL_NAME@",
+ .category = GF_EXPERIMENTAL,
+};
+#pragma fragment HEADER_FMT
+#ifndef __ @HFL_NAME@_H__
+#define __ @HFL_NAME@_H__
+
+#include "@XL_NAME@-mem-types.h"
+#include "@XL_NAME@-messages.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/defaults.h>
+
+#endif /* __@HFL_NAME@_H__ */
+
+#pragma fragment MEM_HEADER_FMT
+#ifndef __ @HFL_NAME@_H__
+#define __ @HFL_NAME@_H__
+
+#include <glusterfs/mem-types.h>
+
+enum gf_mdc_mem_types_ {
+ gf_@FOP_PREFIX@_mt_ = gf_common_mt_end + 1,
+ gf_@FOP_PREFIX@_mt_end
+};
+
+#endif /* __@HFL_NAME@_H__ */
+
+#pragma fragment MSG_HEADER_FMT
+#ifndef __@HFL_NAME@_H__
+#define __@HFL_NAME@_H__
+
+#include <glusterfs/glfs-message-id.h>
+
+/* To add new message IDs, append new identifiers at the end of the list.
+ *
+ * Never remove a message ID. If it's not used anymore, you can rename it or
+ * leave it as it is, but not delete it. This is to prevent reutilization of
+ * IDs by other messages.
+ *
+ * The component name must match one of the entries defined in
+ * glfs-message-id.h.
+ */
+
+GLFS_MSGID(@FOP_PREFIX@, @FOP_PREFIX@_MSG_NO_MEMORY);
+
+#endif /* __@HFL_NAME@_H__ */
diff --git a/extras/devel-tools/devel-vagrant/Vagrantfile b/extras/devel-tools/devel-vagrant/Vagrantfile
index 43783e441cb..78dc29bdc68 100644
--- a/extras/devel-tools/devel-vagrant/Vagrantfile
+++ b/extras/devel-tools/devel-vagrant/Vagrantfile
@@ -13,6 +13,8 @@ node_count = 0
disk_count = -1
node_name = "Node"
ipbase="192.168.99."
+source_path = "/source/glusterfs"
+target_path = "/mnt/src"
if ARGV[0] == "up"
environment = open('vagrant_env.conf', 'w')
@@ -40,9 +42,16 @@ if ARGV[0] == "up"
end
end
+ print "\e[1;37mEnter GlusterFS source location? Default: \"#{source_path}\" : \e[32m"
+ tmploc = $stdin.gets.strip.to_s
+ if tmploc != ""
+ source_path = "#{tmploc}"
+ end
+
environment.puts("# BEWARE: Do NOT modify ANY settings in here or your vagrant environment will be messed up")
environment.puts(node_count.to_s)
environment.puts(disk_count.to_s)
+ environment.puts(source_path)
print "\e[32m\nOK I will provision #{node_count} VMs for you and each one will have #{disk_count} disks for bricks\e[37m\n\n"
system "sleep 1"
@@ -52,6 +61,7 @@ else # So that we destroy and can connect to all VMs...
environment.readline # Skip the comment on top
node_count = environment.readline.to_i
disk_count = environment.readline.to_i
+ source_path = environment.readline.gsub(/\s+/, "")
if ARGV[0] != "ssh-config"
puts "Detected settings from previous vagrant up:"
@@ -85,13 +95,17 @@ def attachDisks(numDisk, provider)
$devnamecreated = true
end
+
+$ansivar["src_path"].push "#{source_path}"
+$ansivar["trg_path"].push "#{target_path}"
+
groups = Hash.new{ |hash,key| hash[key] = [] }
groups["origin"].push "#{node_name}1"
groups["all"].push "#{node_name}1"
(2..node_count).each do |num|
- groups["group1"].push "#{node_name}#{num}"
+ $ansivar["peer_nodes"].push "#{node_name}#{num}"
groups["all"].push "#{node_name}#{num}"
end
@@ -100,7 +114,6 @@ hostsFile = "\n"
hostsFile += "#{ipbase}#{( 100 + num).to_s} #{node_name}#{num.to_s}\n"
end
-
Vagrant.configure("2") do |config|
(1..node_count).each do |num|
config.vm.define "#{node_name}#{num}" do |node|
@@ -110,7 +123,7 @@ Vagrant.configure("2") do |config|
node.vm.box_url = box_url
node.vm.hostname = "#{node_name}#{num}"
node.ssh.insert_key = false
- node.vm.synced_folder "/work/source", "/work/source", type: "nfs"
+ node.vm.synced_folder "#{source_path}", "#{target_path}", type: "nfs"
# Define basic config for VM, memory, cpu, storage pool
node.vm.provider "libvirt" do |virt|
@@ -124,7 +137,7 @@ Vagrant.configure("2") do |config|
node.vm.post_up_message = "\e[37mBuilding of this VM is finished \n"
"You can access it now with: \n"
"vagrant ssh #{node_name}#{num.to_s}\n\n"
- "/work/source directory in VM #{node_name}#{num.to_s}"
+ "#{target_path} directory in VM #{node_name}#{num.to_s}"
"is synced with Host machine. \nSo any changes done in this"
"directory will be reflected in the host machine as well\n"
"Beware of this when you delete content from this directory\e[32m"
diff --git a/extras/devel-tools/devel-vagrant/ansible/roles/cluster/tasks/main.yml b/extras/devel-tools/devel-vagrant/ansible/roles/cluster/tasks/main.yml
index 1f1ab6116d6..3306c7a3dc2 100644
--- a/extras/devel-tools/devel-vagrant/ansible/roles/cluster/tasks/main.yml
+++ b/extras/devel-tools/devel-vagrant/ansible/roles/cluster/tasks/main.yml
@@ -1,6 +1,5 @@
---
-- name: Gluster peer probe
+- name: gluster peer probe
shell: gluster peer probe {{ item }}
- with_items: groups ['group1']
-
+ with_items: "{{ peer_nodes | default([]) }}"
diff --git a/extras/devel-tools/devel-vagrant/ansible/roles/compile-gluster/tasks/main.yml b/extras/devel-tools/devel-vagrant/ansible/roles/compile-gluster/tasks/main.yml
index 1807dc05f33..6ee258c7780 100644
--- a/extras/devel-tools/devel-vagrant/ansible/roles/compile-gluster/tasks/main.yml
+++ b/extras/devel-tools/devel-vagrant/ansible/roles/compile-gluster/tasks/main.yml
@@ -1,9 +1,10 @@
---
- name: autogen.sh
- shell: chdir=/work/source/glusterfs ./autogen.sh
+ shell: chdir={{ item }} ./autogen.sh
+ with_items: "{{ trg_path }}"
- name: configure
- shell: chdir=/work/source/glusterfs CFLAGS="-g -O0 -Werror -Wall -Wno-error=cpp -Wno-error=maybe-uninitialized" \
+ shell: chdir={{ item }} CFLAGS="-g -O0 -Werror -Wall -Wno-error=cpp -Wno-error=maybe-uninitialized" \
./configure \
--prefix=/usr \
--exec-prefix=/usr \
@@ -20,7 +21,9 @@
--infodir=/usr/share/info \
--libdir=/usr/lib64 \
--enable-debug
+ with_items: "{{ trg_path }}"
- name: make install
- shell: chdir=/work/source/glusterfs make install
+ shell: chdir={{ item }} make install
+ with_items: "{{ trg_path }}"
diff --git a/extras/devel-tools/devel-vagrant/ansible/setup.yml b/extras/devel-tools/devel-vagrant/ansible/setup.yml
index 764078f3669..c26bd7d6051 100644
--- a/extras/devel-tools/devel-vagrant/ansible/setup.yml
+++ b/extras/devel-tools/devel-vagrant/ansible/setup.yml
@@ -1,6 +1,7 @@
---
- hosts: all
- sudo: true
+ become: yes
+ become_method: sudo
roles:
- install-pkgs
- prepare-brick
@@ -8,13 +9,15 @@
- iptables
- hosts: all
- sudo: true
+ become: yes
+ become_method: sudo
serial: 1
roles:
- compile-gluster
- service
- hosts: origin
- sudo: true
+ become: yes
+ become_method: sudo
roles:
- cluster
diff --git a/extras/devel-tools/print-backtrace.sh b/extras/devel-tools/print-backtrace.sh
new file mode 100755
index 00000000000..33fbae288bc
--- /dev/null
+++ b/extras/devel-tools/print-backtrace.sh
@@ -0,0 +1,115 @@
+#!/bin/bash
+# sample unresolved backtrace lines picked up from a brick log that should go
+# into a backtrace file eg. bt-file.txt:
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3ec81)[0x7fe4bc271c81]
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3eecd)[0x7fe4bc271ecd]
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x404cb)[0x7fe4bc2734cb]
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3d2b6)[0x7fe4bc2702b6]
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3d323)[0x7fe4bc270323]
+#
+# following is the output of the script for the above backtrace lines:
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3ec81)[0x7fe4bc271c81] __afr_selfheal_data_finalize_source inlined at /usr/src/debug/glusterfs-3.8.4/xlators/cluster/afr/src/afr-self-heal-data.c:684 in __afr_selfheal_data_prepare /usr/src/debug/glusterfs-3.8.4/xlators/cluster/afr/src/afr-self-heal-data.c:603
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3eecd)[0x7fe4bc271ecd] __afr_selfheal_data /usr/src/debug/glusterfs-3.8.4/xlators/cluster/afr/src/afr-self-heal-data.c:740
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x404cb)[0x7fe4bc2734cb] afr_selfheal_data /usr/src/debug/glusterfs-3.8.4/xlators/cluster/afr/src/afr-self-heal-data.c:883
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3d2b6)[0x7fe4bc2702b6] afr_selfheal_do /usr/src/debug/glusterfs-3.8.4/xlators/cluster/afr/src/afr-self-heal-common.c:1968
+# /usr/lib64/glusterfs/3.8.4/xlator/cluster/replicate.so(+0x3d323)[0x7fe4bc270323] afr_selfheal /usr/src/debug/glusterfs-3.8.4/xlators/cluster/afr/src/afr-self-heal-common.c:2015
+#
+# Usage with debuginfo RPM:
+# print-backtrace.sh $HOME/Downloads/glusterfs-debuginfo-3.8.4-10.el7.x86_64.rpm bt-file.txt
+#
+# Usage with source install:
+# print-packtrace.sh none bt-file.txt
+
+function version_compare() { test $(echo $1|awk -F '.' '{print $1 $2 $3}') -gt $(echo $2|awk -F '.' '{print $1 $2 $3}'); }
+
+function Usage()
+{
+ echo -e "Usage:\n\t$0 { none | <debuginfo-rpm> } <backtrace-file>"
+ echo "none: implies we don't have a debuginfo rpm but want to resolve"
+ echo " against a source install which already has the debuginfo"
+ echo " NOTE: in this case you should have configured the build"
+ echo " with --enable-debug and the linker options should"
+ echo " have the option -rdynamic"
+}
+
+debuginfo_rpm=$1
+backtrace_file=$2
+
+if [ ! $debuginfo_rpm ] || [ ! $backtrace_file ]; then
+ Usage
+ exit 1
+fi
+
+if [ $debuginfo_rpm != "none" ]; then
+ if [ ! -f $debuginfo_rpm ]; then
+ echo "no such rpm file: $debuginfo_rpm"
+ exit 1
+ fi
+fi
+
+if [ ! -f $backtrace_file ]; then
+ echo "no such backtrace file: $backtrace_file"
+ exit 1
+fi
+
+if [ "$debuginfo_rpm" != "none" ]; then
+ if ! file $debuginfo_rpm | grep RPM >/dev/null 2>&1 ; then
+ echo "file does not look like an rpm: $debuginfo_rpm"
+ exit 1
+ fi
+fi
+
+cpio_version=$(cpio --version|grep cpio|cut -f 2 -d ')'|sed -e 's/^[[:space:]]*//')
+rpm_name=""
+debuginfo_path=""
+debuginfo_extension=""
+
+if [ $debuginfo_rpm != "none" ]; then
+ # extract the gluster debuginfo rpm to resolve the symbols against
+ rpm_name=$(basename $debuginfo_rpm '.rpm')
+ if [ -d $rpm_name ]; then
+ echo "directory already exists: $rpm_name"
+ echo "please remove/move it and reattempt"
+ exit 1
+ fi
+ mkdir -p $rpm_name
+ if version_compare $cpio_version "2.11"; then
+ rpm2cpio $debuginfo_rpm | cpio --quiet --extract --make-directories --preserve-modification-time --directory=$rpm_name
+ ret=$?
+ else
+ current_dir="$PWD"
+ cd $rpm_name
+ rpm2cpio $debuginfo_rpm | cpio --quiet --extract --make-directories --preserve-modification-time
+ ret=$?
+ cd $current_dir
+ fi
+ if [ $ret -eq 1 ]; then
+ echo "failed to extract rpm $debuginfo_rpm to $PWD/$rpm_name directory"
+ rm -rf $rpm_name
+ exit 1
+ fi
+ debuginfo_path="$PWD/$rpm_name/usr/lib/debug"
+ debuginfo_extension=".debug"
+else
+ debuginfo_path=""
+ debuginfo_extension=""
+fi
+
+# NOTE: backtrace file should contain only the lines which need to be resolved
+for bt in $(cat $backtrace_file)
+do
+ libname=$(echo $bt | cut -f 1 -d '(')
+ addr=$(echo $bt | cut -f 2 -d '(' | cut -f 1 -d ')')
+ libpath=${debuginfo_path}${libname}${debuginfo_extension}
+ if [ ! -f $libpath ]; then
+ continue
+ fi
+ newbt=( $(eu-addr2line --functions --exe=$libpath $addr) )
+ echo "$bt ${newbt[*]}"
+done
+
+# remove the temporary directory
+if [ -d $rpm_name ]; then
+ rm -rf $rpm_name
+fi
+
diff --git a/extras/devel-tools/strace-brick.sh b/extras/devel-tools/strace-brick.sh
new file mode 100755
index 00000000000..a140729111c
--- /dev/null
+++ b/extras/devel-tools/strace-brick.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+# Usage:
+# nice -n -19 strace-brick.sh glusterfsd 50
+
+brick_process_name=$1
+min_watch_cpu=$2
+if [ ! $brick_process_name ]; then
+ brick_process_name=glusterfsd
+fi
+
+if [ ! $min_watch_cpu ]; then
+ min_watch_cpu=50
+fi
+
+echo "min_watch_cpu: $min_watch_cpu"
+
+break=false
+
+while ! $break;
+do
+ mypids=( $(pgrep $brick_process_name) )
+ echo "mypids: ${mypids[*]}"
+
+ pid_args=$(echo ${mypids[*]} | sed -e 's/ / -p /g;s/^/-p /')
+ echo "pid_args: $pid_args"
+
+ pcpu=( $(ps $pid_args -o pcpu -h ) )
+ echo "pcpu: ${pcpu[*]}"
+
+ wait_longer=false
+
+ for i in $( seq 0 $((${#pcpu[*]} - 1)) )
+ do
+ echo "i: $i"
+ echo "mypids[$i]: ${mypids[$i]}"
+
+ int_pcpu=$(echo ${pcpu[$i]} | cut -f 1 -d '.')
+ echo "int_pcpu: $int_pcpu"
+ if [ ! $int_pcpu ] || [ ! $min_watch_cpu ]; then
+ break=true
+ echo "breaking"
+ fi
+ if [ $int_pcpu -ge $min_watch_cpu ]; then
+ wait_longer=true
+ mydirname="${brick_process_name}-${mypids[$i]}-$(date --utc +'%Y%m%d-%H%M%S.%N')"
+ $(mkdir $mydirname && cd $mydirname && timeout --kill-after=5 --signal=KILL 60 nice -n -19 strace -p ${mypids[$i]} -ff -tt -T -o $brick_process_name) &
+ fi
+ done
+
+ if $wait_longer; then
+ sleep 90
+ else
+ sleep 15
+ fi
+done
diff --git a/extras/distributed-testing/README b/extras/distributed-testing/README
new file mode 100644
index 00000000000..928d943f211
--- /dev/null
+++ b/extras/distributed-testing/README
@@ -0,0 +1,28 @@
+PROBLEM
+
+The testing methodology of Gluster is extremely slow. It takes a very long time (6+ hrs) to run the basic tests on a single machine. It takes about 20+ hours to run code analysis version of tests like valgrind, asan, tsan etc.
+
+SOLUTION
+
+The fundamental problem is that the tests cannot be parallelized on a single machine. The natural solution is to run these tests on a cluster of machines. In a nutshell, apply map-reduce to run unit tests.
+
+WORK @ Facebook
+
+At Facebook we have applied the map-reduce approach to testing and have observed 10X improvements.
+
+The solution supports the following
+
+Distribute tests across machines, collect results/logs
+Share worker pool across different testers
+Try failure 3 times on 3 different machines before calling it a failure
+Support running asan, valgrind, asan-noleaks
+Self management of worker pools. The clients will manage the worker pool including version update, no manual maintenance required
+WORK
+
+Port the code from gluster-fb-3.8 to gluster master
+
+HOW TO RUN
+
+./extras/distributed-testing/distributed-test.sh --hosts '<h1> <h2> <h3>'
+
+All hosts should have no password for ssh via root. This can be achieved with keys setup on the client and the server machines.
diff --git a/extras/distributed-testing/distributed-test-build-env b/extras/distributed-testing/distributed-test-build-env
new file mode 100644
index 00000000000..cd68ff717da
--- /dev/null
+++ b/extras/distributed-testing/distributed-test-build-env
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+GF_CONF_OPTS="--localstatedir=/var --sysconfdir /var/lib --prefix /usr --libdir /usr/lib64 \
+ --enable-bd-xlator=yes --enable-debug --enable-gnfs"
+
+if [ -x /usr/lib/rpm/redhat/dist.sh ]; then
+ REDHAT_MAJOR=$(/usr/lib/rpm/redhat/dist.sh --distnum)
+else
+ REDHAT_MAJOR=0
+fi
+
+ASAN_ENABLED=${ASAN_ENABLED:=0}
+if [ "$ASAN_ENABLED" -eq "1" ]; then
+ GF_CONF_OPTS="$GF_CONF_OPTS --with-asan"
+fi
+
+GF_CONF_OPTS="$GF_CONF_OPTS --with-systemd"
+export GF_CONF_OPTS
+
+export CFLAGS="-O0 -ggdb -fPIC -Wall"
diff --git a/extras/distributed-testing/distributed-test-build.sh b/extras/distributed-testing/distributed-test-build.sh
new file mode 100755
index 00000000000..e8910d8425c
--- /dev/null
+++ b/extras/distributed-testing/distributed-test-build.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+set -e
+
+EXTRA_CONFIGURE_ARGS="$@"
+ASAN_REQUESTED=false
+for arg in $EXTRA_CONFIGURE_ARGS; do
+ if [ $arg == "--with-asan" ]; then
+ echo "Requested ASAN, cleaning build first."
+ make -j distclean || true
+ touch .with_asan
+ ASAN_REQUESTED=true
+ fi
+done
+
+if [ $ASAN_REQUESTED == false ]; then
+ if [ -f .with_asan ]; then
+ echo "Previous build was with ASAN, cleaning build first."
+ make -j distclean || true
+ rm -v .with_asan
+ fi
+fi
+
+source extras/distributed-testing/distributed-test-build-env
+./autogen.sh
+./configure $GF_CONF_OPTS $EXTRA_CONFIGURE_ARGS
+make -j
diff --git a/extras/distributed-testing/distributed-test-env b/extras/distributed-testing/distributed-test-env
new file mode 100644
index 00000000000..36fdd82e5dd
--- /dev/null
+++ b/extras/distributed-testing/distributed-test-env
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+SMOKE_TESTS="\
+ tests/basic/*.t\
+ tests/basic/afr/*.t\
+ tests/basic/distribute/*.t\
+ tests/bugs/fb*.t\
+ tests/features/brick-min-free-space.t\
+"
+
+KNOWN_FLAKY_TESTS="\
+"
+
+BROKEN_TESTS="\
+ tests/features/lock_revocation.t\
+ tests/features/recon.t\
+ tests/features/fdl-overflow.t\
+ tests/features/fdl.t\
+ tests/features/ipc.t\
+ tests/bugs/distribute/bug-1247563.t\
+ tests/bugs/distribute/bug-1543279.t\
+ tests/bugs/distribute/bug-1066798.t\
+ tests/bugs/ec/bug-1304988.t\
+ tests/bugs/unclassified/bug-1357397.t\
+ tests/bugs/quota/bug-1235182.t\
+ tests/bugs/fuse/bug-1309462.t\
+ tests/bugs/glusterd/bug-1238706-daemons-stop-on-peer-cleanup.t\
+ tests/bugs/stripe/bug-1002207.t\
+ tests/bugs/stripe/bug-1111454.t\
+ tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t\
+ tests/bugs/write-behind/bug-1279730.t\
+ tests/bugs/gfapi/bug-1093594.t\
+ tests/bugs/replicate/bug-1473026.t\
+ tests/bugs/replicate/bug-802417.t\
+ tests/basic/inode-leak.t\
+ tests/basic/distribute/force-migration.t\
+ tests/basic/ec/heal-info.t\
+ tests/basic/ec/ec-seek.t\
+ tests/basic/jbr/jbr-volgen.t\
+ tests/basic/jbr/jbr.t\
+ tests/basic/afr/tarissue.t\
+ tests/basic/tier/tierd_check.t\
+ tests/basic/gfapi/bug1291259.t\
+"
+
+SMOKE_TESTS=$(echo $SMOKE_TESTS | tr -s ' ' ' ')
+KNOWN_FLAKY_TESTS=$(echo $KNOWN_FLAKY_TESTS | tr -s ' ' ' ')
+BROKEN_TESTS=$(echo $BROKEN_TESTS | tr -s ' ' ' ')
diff --git a/extras/distributed-testing/distributed-test-runner.py b/extras/distributed-testing/distributed-test-runner.py
new file mode 100755
index 00000000000..5a07e2feab1
--- /dev/null
+++ b/extras/distributed-testing/distributed-test-runner.py
@@ -0,0 +1,859 @@
+#!/usr/bin/python3
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import unicode_literals
+from __future__ import print_function
+import re
+import sys
+import fcntl
+import base64
+import threading
+import socket
+import os
+import shlex
+import argparse
+import subprocess
+import time
+import SimpleXMLRPCServer
+import xmlrpclib
+import md5
+import httplib
+import uuid
+
+DEFAULT_PORT = 9999
+TEST_TIMEOUT_S = 15 * 60
+CLIENT_CONNECT_TIMEOUT_S = 10
+CLIENT_TIMEOUT_S = 60
+PATCH_FILE_UID = str(uuid.uuid4())
+SSH_TIMEOUT_S = 10
+MAX_ATTEMPTS = 3
+ADDRESS_FAMILY = 'IPv4'
+
+
+def socket_instance(address_family):
+ if address_family.upper() == 'ipv4'.upper():
+ return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ elif address_family.upper() == 'ipv6'.upper():
+ return socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+ else:
+ Log.error("Invalid IP address family")
+ sys.exit(1)
+
+
+def patch_file():
+ return "/tmp/%s-patch.tar.gz" % PATCH_FILE_UID
+
+# ..............................................................................
+# SimpleXMLRPCServer IPvX Wrapper
+# ..............................................................................
+
+
+class GeneralXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
+ def __init__(self, addr):
+ SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, addr)
+
+ def server_bind(self):
+ if self.socket:
+ self.socket.close()
+ self.socket = socket_instance(args.address_family)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ SimpleXMLRPCServer.SimpleXMLRPCServer.server_bind(self)
+
+
+class HTTPConnection(httplib.HTTPConnection):
+ def __init__(self, host):
+ self.host = host
+ httplib.HTTPConnection.__init__(self, host)
+
+ def connect(self):
+ old_timeout = socket.getdefaulttimeout()
+ self.sock = socket.create_connection((self.host, self.port),
+ timeout=CLIENT_CONNECT_TIMEOUT_S)
+ self.sock.settimeout(old_timeout)
+
+
+class IPTransport(xmlrpclib.Transport):
+ def __init__(self, *args, **kwargs):
+ xmlrpclib.Transport.__init__(self, *args, **kwargs)
+
+ def make_connection(self, host):
+ return HTTPConnection(host)
+
+
+# ..............................................................................
+# Common
+# ..............................................................................
+
+
+class Timer:
+ def __init__(self):
+ self.start = time.time()
+
+ def elapsed_s(self):
+ return int(time.time() - self.start)
+
+ def reset(self):
+ ret = self.elapsed_s()
+ self.start = time.time()
+ return ret
+
+
+def encode(buf):
+ return base64.b16encode(buf)
+
+
+def decode(buf):
+ return base64.b16decode(buf)
+
+
+def get_file_content(path):
+ with open(path, "r") as f:
+ return f.read()
+
+
+def write_to_file(path, data):
+ with open(path, "w") as f:
+ f.write(data)
+
+
+def failsafe(fn, args=()):
+ try:
+ return (True, fn(*args))
+ except (xmlrpclib.Fault, xmlrpclib.ProtocolError, xmlrpclib.ResponseError,
+ Exception) as err:
+ Log.debug(str(err))
+ return (False, None)
+
+
+class LogLevel:
+ DEBUG = 2
+ ERROR = 1
+ CLI = 0
+
+
+class Log:
+ LOGLEVEL = LogLevel.ERROR
+
+ @staticmethod
+ def _normalize(msg):
+ return msg[:100]
+
+ @staticmethod
+ def debug(msg):
+ if Log.LOGLEVEL >= LogLevel.DEBUG:
+ sys.stdout.write("<debug> %s\n" % Log._normalize(msg))
+ sys.stdout.flush()
+
+ @staticmethod
+ def error(msg):
+ sys.stderr.write("<error> %s\n" % Log._normalize(msg))
+
+ @staticmethod
+ def header(msg):
+ sys.stderr.write("* %s *\n" % Log._normalize(msg))
+
+ @staticmethod
+ def cli(msg):
+ sys.stderr.write("%s\n" % msg)
+
+
+class Shell:
+ def __init__(self, cwd=None, logpath=None):
+ self.cwd = cwd
+ self.shell = True
+ self.redirect = open(os.devnull if not logpath else logpath, "wr+")
+
+ def __del__(self):
+ self.redirect.close()
+
+ def cd(self, cwd):
+ Log.debug("cd %s" % cwd)
+ self.cwd = cwd
+
+ def truncate(self):
+ self.redirect.truncate(0)
+
+ def read_logs(self):
+ self.redirect.seek(0)
+ return self.redirect.read()
+
+ def check_call(self, cmd):
+ status = self.call(cmd)
+ if status:
+ raise Exception("Error running command %s. status=%s"
+ % (cmd, status))
+
+ def call(self, cmd):
+ if isinstance(cmd, list):
+ return self._calls(cmd)
+
+ return self._call(cmd)
+
+ def ssh(self, hostname, cmd, id_rsa=None):
+ flags = "" if not id_rsa else "-i " + id_rsa
+ return self.call("timeout %s ssh %s root@%s \"%s\"" %
+ (SSH_TIMEOUT_S, flags, hostname, cmd))
+
+ def scp(self, hostname, src, dest, id_rsa=None):
+ flags = "" if not id_rsa else "-i " + id_rsa
+ return self.call("timeout %s scp %s %s root@%s:%s" %
+ (SSH_TIMEOUT_S, flags, src, hostname, dest))
+
+ def output(self, cmd, cwd=None):
+ Log.debug("%s> %s" % (cwd, cmd))
+ return subprocess.check_output(shlex.split(cmd), cwd=self.cwd)
+
+ def _calls(self, cmds):
+ Log.debug("Running commands. %s" % cmds)
+ for c in cmds:
+ status = self.call(c)
+ if status:
+ Log.error("Commands failed with %s" % status)
+ return status
+ return 0
+
+ def _call(self, cmd):
+ if not self.shell:
+ cmd = shlex.split(cmd)
+
+ Log.debug("%s> %s" % (self.cwd, cmd))
+
+ status = subprocess.call(cmd, cwd=self.cwd, shell=self.shell,
+ stdout=self.redirect, stderr=self.redirect)
+
+ Log.debug("return %s" % status)
+ return status
+
+
+# ..............................................................................
+# Server role
+# ..............................................................................
+
+class TestServer:
+ def __init__(self, port, scratchdir):
+ self.port = port
+ self.scratchdir = scratchdir
+ self.shell = Shell()
+ self.rpc = None
+ self.pidf = None
+
+ self.shell.check_call("mkdir -p %s" % self.scratchdir)
+ self._process_lock()
+
+ def __del__(self):
+ if self.pidf:
+ self.pidf.close()
+
+ def init(self):
+ Log.debug("Starting xmlrpc server on port %s" % self.port)
+ self.rpc = GeneralXMLRPCServer(("", self.port))
+ self.rpc.register_instance(Handlers(self.scratchdir))
+
+ def serve(self):
+ (status, _) = failsafe(self.rpc.serve_forever)
+ Log.cli("== End ==")
+
+ def _process_lock(self):
+ pid_filename = os.path.basename(__file__).replace("/", "-")
+ pid_filepath = "%s/%s.pid" % (self.scratchdir, pid_filename)
+ self.pidf = open(pid_filepath, "w")
+ try:
+ fcntl.lockf(self.pidf, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ # We have the lock, kick anybody listening on this port
+ self.shell.call("kill $(lsof -t -i:%s)" % self.port)
+ except IOError:
+ Log.error("Another process instance is running")
+ sys.exit(0)
+
+#
+# Server Handler
+#
+
+
+handler_lock = threading.Lock()
+handler_serving_since = Timer()
+
+
+def synchronized(func):
+ def decorator(*args, **kws):
+ handler_lock.acquire()
+ h = args[0]
+ try:
+ h.shell.truncate()
+ ret = func(*args, **kws)
+ return ret
+ except Exception() as err:
+ Log.error(str(err))
+ Log.error(decode(h._log_content()))
+ raise
+ finally:
+ handler_lock.release()
+ handler_serving_since.reset()
+
+ return decorator
+
+
+class Handlers:
+ def __init__(self, scratchdir):
+ self.client_id = None
+ self.scratchdir = scratchdir
+ self.gluster_root = "%s/glusterfs" % self.scratchdir
+ self.shell = Shell(logpath="%s/test-handlers.log" % self.scratchdir)
+
+ def hello(self, id):
+ if not handler_lock.acquire(False):
+ return False
+ try:
+ return self._hello_locked(id)
+ finally:
+ handler_lock.release()
+
+ def _hello_locked(self, id):
+ if handler_serving_since.elapsed_s() > CLIENT_TIMEOUT_S:
+ Log.debug("Disconnected client %s" % self.client_id)
+ self.client_id = None
+
+ if not self.client_id:
+ self.client_id = id
+ handler_serving_since.reset()
+ return True
+
+ return (id == self.client_id)
+
+ @synchronized
+ def ping(self, id=None):
+ if id:
+ return id == self.client_id
+ return True
+
+ @synchronized
+ def bye(self, id):
+ assert id == self.client_id
+ self.client_id = None
+ handler_serving_since.reset()
+ return True
+
+ @synchronized
+ def cleanup(self, id):
+ assert id == self.client_id
+ self.shell.cd(self.gluster_root)
+ self.shell.check_call("PATH=.:$PATH; sudo ./clean_gfs_devserver.sh")
+ return True
+
+ @synchronized
+ def copy(self, id, name, content):
+ with open("%s/%s" % (self.scratchdir, name), "w+") as f:
+ f.write(decode(content))
+ return True
+
+ @synchronized
+ def copygzip(self, id, content):
+ assert id == self.client_id
+ gzipfile = "%s/tmp.tar.gz" % self.scratchdir
+ tarfile = "%s/tmp.tar" % self.scratchdir
+ self.shell.check_call("rm -f %s" % gzipfile)
+ self.shell.check_call("rm -f %s" % tarfile)
+ write_to_file(gzipfile, decode(content))
+
+ self.shell.cd(self.scratchdir)
+ self.shell.check_call("rm -r -f %s" % self.gluster_root)
+ self.shell.check_call("mkdir -p %s" % self.gluster_root)
+
+ self.shell.cd(self.gluster_root)
+ cmds = [
+ "gunzip -f -q %s" % gzipfile,
+ "tar -xvf %s" % tarfile
+ ]
+ return self.shell.call(cmds) == 0
+
+ @synchronized
+ def build(self, id, asan=False):
+ assert id == self.client_id
+ self.shell.cd(self.gluster_root)
+ self.shell.call("make clean")
+ env = "ASAN_ENABLED=1" if asan else ""
+ return self.shell.call(
+ "%s ./extras/distributed-testing/distributed-test-build.sh" % env) == 0
+
+ @synchronized
+ def install(self, id):
+ assert id == self.client_id
+ self.shell.cd(self.gluster_root)
+ return self.shell.call("make install") == 0
+
+ @synchronized
+ def prove(self, id, test, timeout, valgrind="no", asan_noleaks=True):
+ assert id == self.client_id
+ self.shell.cd(self.gluster_root)
+ env = "DEBUG=1 "
+ if valgrind == "memcheck" or valgrind == "yes":
+ cmd = "valgrind"
+ cmd += " --tool=memcheck --leak-check=full --track-origins=yes"
+ cmd += " --show-leak-kinds=all -v prove -v"
+ elif valgrind == "drd":
+ cmd = "valgrind"
+ cmd += " --tool=drd -v prove -v"
+ elif asan_noleaks:
+ cmd = "prove -v"
+ env += "ASAN_OPTIONS=detect_leaks=0 "
+ else:
+ cmd = "prove -v"
+
+ status = self.shell.call(
+ "%s timeout %s %s %s" % (env, timeout, cmd, test))
+
+ if status != 0:
+ return (False, self._log_content())
+ return (True, "")
+
+ def _log_content(self):
+ return encode(self.shell.read_logs())
+
+# ..............................................................................
+# Cli role
+# ..............................................................................
+
+
+class RPCConnection((threading.Thread)):
+ def __init__(self, host, port, path, cb):
+ threading.Thread.__init__(self)
+ self.host = host
+ self.port = port
+ self.path = path
+ self.shell = Shell()
+ self.cb = cb
+ self.stop = False
+ self.proxy = None
+ self.logid = "%s:%s" % (self.host, self.port)
+
+ def connect(self):
+ (status, ret) = failsafe(self._connect)
+ return (status and ret)
+
+ def _connect(self):
+ url = "http://%s:%s" % (self.host, self.port)
+ self.proxy = xmlrpclib.ServerProxy(url, transport=IPTransport())
+ return self.proxy.hello(self.cb.id)
+
+ def disconnect(self):
+ self.stop = True
+
+ def ping(self):
+ return self.proxy.ping()
+
+ def init(self):
+ return self._copy() and self._compile_and_install()
+
+ def run(self):
+ (status, ret) = failsafe(self.init)
+ if not status:
+ self.cb.note_lost_connection(self)
+ return
+ elif not ret:
+ self.cb.note_setup_failed(self)
+ return
+
+ while not self.stop:
+ (status, ret) = failsafe(self._run)
+ if not status or not ret:
+ self.cb.note_lost_connection(self)
+ break
+ time.sleep(0)
+
+ failsafe(self.proxy.bye, (self.cb.id,))
+ Log.debug("%s connection thread stopped" % self.host)
+
+ def _run(self):
+ test = self.cb.next_test()
+ (status, _) = failsafe(self._execute_next, (test,))
+ if not status:
+ self.cb.note_retry(test)
+ return False
+ return True
+
+ def _execute_next(self, test):
+ if not test:
+ time.sleep(1)
+ return
+
+ (status, error) = self.proxy.prove(self.cb.id, test,
+ self.cb.test_timeout,
+ self.cb.valgrind,
+ self.cb.asan_noleaks)
+ if status:
+ self.cb.note_done(test)
+ else:
+ self.cb.note_error(test, error)
+
+ def _compile_and_install(self):
+ Log.debug("<%s> Build " % self.logid)
+ asan = self.cb.asan or self.cb.asan_noleaks
+ return (self.proxy.build(self.cb.id, asan) and
+ self.proxy.install(self.cb.id))
+
+ def _copy(self):
+ return self._copy_gzip()
+
+ def _copy_gzip(self):
+ Log.cli("<%s> copying and compiling %s to remote" %
+ (self.logid, self.path))
+ data = encode(get_file_content(patch_file()))
+ Log.debug("GZIP size = %s B" % len(data))
+ return self.proxy.copygzip(self.cb.id, data)
+
+
+class RPCConnectionPool:
+ def __init__(self, gluster_path, hosts, n, id_rsa):
+ self.gluster_path = gluster_path
+ self.hosts = hosts
+ self.conns = []
+ self.faulty = []
+ self.n = int(len(hosts) / 2) + 1 if not n else n
+ self.id_rsa = id_rsa
+ self.stop = False
+ self.scanner = threading.Thread(target=self._scan_hosts_loop)
+ self.kicker = threading.Thread(target=self._kick_hosts_loop)
+ self.shell = Shell()
+ self.since_start = Timer()
+
+ self.shell.check_call("rm -f %s" % patch_file())
+ self.shell.check_call("tar -zcvf %s ." % patch_file())
+ self.id = md5.new(get_file_content(patch_file())).hexdigest()
+ Log.cli("client UID %s" % self.id)
+ Log.cli("patch UID %s" % PATCH_FILE_UID)
+
+ def __del__(self):
+ self.shell.check_call("rm -f %s" % patch_file())
+
+ def pool_status(self):
+ elapsed_m = int(self.since_start.elapsed_s() / 60)
+ return "%s/%s connected, %smin elapsed" % (len(self.conns), self.n,
+ elapsed_m)
+
+ def connect(self):
+ Log.debug("Starting scanner")
+ self.scanner.start()
+ self.kicker.start()
+
+ def disconnect(self):
+ self.stop = True
+ for conn in self.conns:
+ conn.disconnect()
+
+ def note_lost_connection(self, conn):
+ Log.cli("lost connection to %s" % conn.host)
+ self.conns.remove(conn)
+ self.hosts.append((conn.host, conn.port))
+
+ def note_setup_failed(self, conn):
+ Log.error("Setup failed on %s:%s" % (conn.host, conn.port))
+ self.conns.remove(conn)
+ self.faulty.append((conn.host, conn.port))
+
+ def _scan_hosts_loop(self):
+ Log.debug("Scanner thread started")
+ while not self.stop:
+ failsafe(self._scan_hosts)
+ time.sleep(5)
+
+ def _scan_hosts(self):
+ if len(self.hosts) == 0 and len(self.conns) == 0:
+ Log.error("no more hosts available to loadbalance")
+ sys.exit(1)
+
+ for (host, port) in self.hosts:
+ if (len(self.conns) >= self.n) or self.stop:
+ break
+ self._scan_host(host, port)
+
+ def _scan_host(self, host, port):
+ Log.debug("scanning %s:%s" % (host, port))
+ c = RPCConnection(host, port, self.gluster_path, self)
+ (status, result) = failsafe(c.connect)
+ if status and result:
+ self.hosts.remove((host, port))
+ Log.debug("Connected to %s:%s" % (host, port))
+ self.conns.append(c)
+ c.start()
+ Log.debug("%s / %s connected" % (len(self.conns), self.n))
+ else:
+ Log.debug("Failed to connect to %s:%s" % (host, port))
+
+ def _kick_hosts_loop(self):
+ Log.debug("Kick thread started")
+ while not self.stop:
+ time.sleep(10)
+ failsafe(self._kick_hosts)
+
+ Log.debug("Kick thread stopped")
+
+ def _is_pingable(self, host, port):
+ c = RPCConnection(host, port, self.gluster_path, self)
+ failsafe(c.connect)
+ (status, result) = failsafe(c.ping)
+ return status and result
+
+ def _kick_hosts(self):
+ # Do not kick hosts if we have the optimal number of connections
+ if (len(self.conns) >= self.n) or self.stop:
+ Log.debug("Skip kicking hosts")
+ return
+
+ # Check and if dead kick all hosts
+ for (host, port) in self.hosts:
+ if self.stop:
+ Log.debug("Break kicking hosts")
+ break
+
+ if self._is_pingable(host, port):
+ Log.debug("Host=%s is alive. Won't kick" % host)
+ continue
+
+ Log.debug("Kicking %s" % host)
+ mypath = sys.argv[0]
+ myname = os.path.basename(mypath)
+ destpath = "/tmp/%s" % myname
+ sh = Shell()
+ sh.scp(host, mypath, destpath, self.id_rsa)
+ sh.ssh(host, "nohup %s --server &>> %s.log &" %
+ (destpath, destpath), self.id_rsa)
+
+ def join(self):
+ self.scanner.join()
+ self.kicker.join()
+ for c in self.conns:
+ c.join()
+
+
+# ..............................................................................
+# test role
+# ..............................................................................
+
+class TestRunner(RPCConnectionPool):
+ def __init__(self, gluster_path, hosts, n, tests, flaky_tests, valgrind,
+ asan, asan_noleaks, id_rsa, test_timeout):
+ RPCConnectionPool.__init__(self, gluster_path, self._parse_hosts(hosts),
+ n, id_rsa)
+ self.flaky_tests = flaky_tests.split(" ")
+ self.pending = []
+ self.done = []
+ self.error = []
+ self.retry = {}
+ self.error_logs = []
+ self.stats_timer = Timer()
+ self.valgrind = valgrind
+ self.asan = asan
+ self.asan_noleaks = asan_noleaks
+ self.test_timeout = test_timeout
+
+ self.tests = self._get_tests(tests)
+
+ Log.debug("tests: %s" % self.tests)
+
+ def _get_tests(self, tests):
+ if not tests or tests == "all":
+ return self._not_flaky(self._all())
+ elif tests == "flaky":
+ return self.flaky_tests
+ else:
+ return self._not_flaky(tests.strip().split(" "))
+
+ def run(self):
+ self.connect()
+ self.join()
+ return len(self.error)
+
+ def _pretty_print(self, data):
+ if isinstance(data, list):
+ str = ""
+ for i in data:
+ str = "%s %s" % (str, i)
+ return str
+ return "%s" % data
+
+ def print_result(self):
+ Log.cli("== RESULTS ==")
+ Log.cli("SUCCESS : %s" % len(self.done))
+ Log.cli("ERRORS : %s" % len(self.error))
+ Log.cli("== ERRORS ==")
+ Log.cli(self._pretty_print(self.error))
+ Log.cli("== LOGS ==")
+ Log.cli(self._pretty_print(self.error_logs))
+ Log.cli("== END ==")
+
+ def next_test(self):
+ if len(self.tests):
+ test = self.tests.pop()
+ self.pending.append(test)
+ return test
+
+ if not len(self.pending):
+ self.disconnect()
+
+ return None
+
+ def _pct_completed(self):
+ total = len(self.tests) + len(self.pending) + len(self.done)
+ total += len(self.error)
+ completed = len(self.done) + len(self.error)
+ return 0 if not total else int(completed / total * 100)
+
+ def note_done(self, test):
+ Log.cli("%s PASS (%s%% done) (%s)" % (test, self._pct_completed(),
+ self.pool_status()))
+ self.pending.remove(test)
+ self.done.append(test)
+ if test in self.retry:
+ del self.retry[test]
+
+ def note_error(self, test, errstr):
+ Log.cli("%s FAIL" % test)
+ self.pending.remove(test)
+ if test not in self.retry:
+ self.retry[test] = 1
+
+ if errstr:
+ path = "%s/%s-%s.log" % ("/tmp", test.replace("/", "-"),
+ self.retry[test])
+ failsafe(write_to_file, (path, decode(errstr),))
+ self.error_logs.append(path)
+
+ if self.retry[test] < MAX_ATTEMPTS:
+ self.retry[test] += 1
+ Log.debug("retry test %s attempt %s" % (test, self.retry[test]))
+ self.tests.append(test)
+ else:
+ Log.debug("giveup attempt test %s" % test)
+ del self.retry[test]
+ self.error.append(test)
+
+ def note_retry(self, test):
+ Log.cli("retry %s on another host" % test)
+ self.pending.remove(test)
+ self.tests.append(test)
+
+ #
+ # test classifications
+ #
+ def _all(self):
+ return self._list_tests(["tests"], recursive=True)
+
+ def _not_flaky(self, tests):
+ for t in self.flaky_tests:
+ if t in tests:
+ tests.remove(t)
+ return tests
+
+ def _list_tests(self, prefixes, recursive=False, ignore_ifnotexist=False):
+ tests = []
+ for prefix in prefixes:
+ real_path = "%s/%s" % (self.gluster_path, prefix)
+ if not os.path.exists(real_path) and ignore_ifnotexist:
+ continue
+ for f in os.listdir(real_path):
+ if os.path.isdir(real_path + "/" + f):
+ if recursive:
+ tests += self._list_tests([prefix + "/" + f], recursive)
+ else:
+ if re.match(r".*\.t$", f):
+ tests += [prefix + "/" + f]
+ return tests
+
+ def _parse_hosts(self, hosts):
+ ret = []
+ for h in args.hosts.split(" "):
+ ret.append((h, DEFAULT_PORT))
+ Log.debug(ret)
+ return ret
+
+# ..............................................................................
+# Roles entry point
+# ..............................................................................
+
+
+def run_as_server(args):
+ if not args.server_path:
+ Log.error("please provide server path")
+ return 1
+
+ server = TestServer(args.port, args.server_path)
+ server.init()
+ server.serve()
+ return 0
+
+
+def run_as_tester(args):
+ Log.header("GLUSTER TEST CLI")
+
+ Log.debug("args = %s" % args)
+
+ tests = TestRunner(args.gluster_path, args.hosts, args.n, args.tests,
+ args.flaky_tests, valgrind=args.valgrind,
+ asan=args.asan, asan_noleaks=args.asan_noleaks,
+ id_rsa=args.id_rsa, test_timeout=args.test_timeout)
+ result = tests.run()
+ tests.print_result()
+ return result
+
+# ..............................................................................
+# main
+# ..............................................................................
+
+
+def main(args):
+ if args.v:
+ Log.LOGLEVEL = LogLevel.DEBUG
+
+ if args.server and args.tester:
+ Log.error("Invalid arguments. More than one role specified")
+ sys.exit(1)
+
+ if args.server:
+ sys.exit(run_as_server(args))
+ elif args.tester:
+ sys.exit(run_as_tester(args))
+ else:
+ Log.error("please specify a mode for CI")
+ parser.print_help()
+ sys.exit(1)
+
+
+parser = argparse.ArgumentParser(description="Gluster CI")
+
+# server role
+parser.add_argument("--server", help="start server", action="store_true")
+parser.add_argument("--server_path", help="server scratch space",
+ default="/tmp/gluster-test")
+parser.add_argument("--host", help="server address to listen", default="")
+parser.add_argument("--port", help="server port to listen",
+ type=int, default=DEFAULT_PORT)
+# test role
+parser.add_argument("--tester", help="start tester", action="store_true")
+parser.add_argument("--valgrind[=memcheck,drd]",
+ help="run tests with valgrind tool 'memcheck' or 'drd'",
+ default="no")
+parser.add_argument("--asan", help="test with asan enabled",
+ action="store_true")
+parser.add_argument("--asan-noleaks", help="test with asan but no mem leaks",
+ action="store_true")
+parser.add_argument("--tests", help="all/flaky/list of tests", default=None)
+parser.add_argument("--flaky_tests", help="list of flaky tests", default=None)
+parser.add_argument("--n", help="max number of machines to use", type=int,
+ default=0)
+parser.add_argument("--hosts", help="list of worker machines")
+parser.add_argument("--gluster_path", help="gluster path to test",
+ default=os.getcwd())
+parser.add_argument("--id-rsa", help="private key to use for ssh",
+ default=None)
+parser.add_argument("--test-timeout",
+ help="test timeout in sec (default 15min)",
+ default=TEST_TIMEOUT_S)
+# general
+parser.add_argument("-v", help="verbose", action="store_true")
+parser.add_argument("--address_family", help="IPv6 or IPv4 to use",
+ default=ADDRESS_FAMILY)
+
+args = parser.parse_args()
+
+main(args)
diff --git a/extras/distributed-testing/distributed-test.sh b/extras/distributed-testing/distributed-test.sh
new file mode 100755
index 00000000000..8f1e0310f33
--- /dev/null
+++ b/extras/distributed-testing/distributed-test.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+source ./extras/distributed-testing/distributed-test-env
+
+N=0
+TESTS='all'
+FLAKY=$KNOWN_FLAKY_TESTS
+BROKEN=$BROKEN_TESTS
+TEST_TIMEOUT_S=900
+ADDRESS_FAMILY='IPv4'
+
+FLAGS=""
+
+function print_env {
+ echo "Settings:"
+ echo "N=$N"
+ echo -e "-------\nHOSTS\n$HOSTS\n-------"
+ echo -e "TESTS\n$TESTS\n-------"
+ echo -e "SKIP\n$FLAKY $BROKEN\n-------"
+ echo -e "TEST_TIMEOUT_S=$TEST_TIMEOUT_S s\n"
+}
+
+function cleanup {
+ rm -f /tmp/test*.log
+}
+
+function usage {
+ echo "Usage: $0 [-h or --help] [-v or --verbose]
+ [--all] [--flaky] [--smoke] [--broken]
+ [--valgrind] [--asan] [--asan-noleaks]
+ [--hosts <hosts>] [-n <parallelism>]
+ [--tests <tests>]
+ [--id-rsa <ssh private key>]
+ [--address_family <IPv4 or IPv6>]
+ "
+}
+
+function parse_args () {
+ args=`getopt \
+ -o hvn: \
+ --long help,verbose,address_family:,valgrind,asan,asan-noleaks,all,\
+smoke,flaky,broken,hosts:,tests:,id-rsa:,test-timeout: \
+ -n 'fb-remote-test.sh' -- "$@"`
+
+ if [ $? != 0 ]; then
+ echo "Error parsing getopt"
+ exit 1
+ fi
+
+ eval set -- "$args"
+
+ while true; do
+ case "$1" in
+ -h | --help) usage ; exit 1 ;;
+ -v | --verbose) FLAGS="$FLAGS -v" ; shift ;;
+ --address_family) ADDRESS_FAMILY=$2; shift 2 ;;
+ --valgrind) FLAGS="$FLAGS --valgrind" ; shift ;;
+ --asan-noleaks) FLAGS="$FLAGS --asan-noleaks"; shift ;;
+ --asan) FLAGS="$FLAGS --asan" ; shift ;;
+ --hosts) HOSTS=$2; shift 2 ;;
+ --tests) TESTS=$2; FLAKY= ; BROKEN= ; shift 2 ;;
+ --test-timeout) TEST_TIMEOUT_S=$2; shift 2 ;;
+ --all) TESTS='all' ; shift 1 ;;
+ --flaky) TESTS=$FLAKY; FLAKY= ; shift 1 ;;
+ --smoke) TESTS=$SMOKE_TESTS; shift 1 ;;
+ --broken) TESTS=$BROKEN_TESTS; FLAKY= ; BROKEN= ; shift 1 ;;
+ --id-rsa) FLAGS="$FLAGS --id-rsa $2" ; shift 2 ;;
+ -n) N=$2; shift 2 ;;
+ *) break ;;
+ esac
+ done
+ run_tests_args="$@"
+}
+
+function main {
+ parse_args "$@"
+
+ if [ -z "$HOSTS" ]; then
+ echo "Please provide hosts to run the tests in"
+ exit -1
+ fi
+
+ print_env
+
+ cleanup
+
+ "extras/distributed-testing/distributed-test-runner.py" $FLAGS --tester \
+ --n "$N" --hosts "$HOSTS" --tests "$TESTS" \
+ --flaky_tests "$FLAKY $BROKEN" --test-timeout "$TEST_TIMEOUT_S" \
+ --address_family "$ADDRESS_FAMILY"
+
+ exit $?
+}
+
+main "$@"
diff --git a/extras/ec-heal-script/README.md b/extras/ec-heal-script/README.md
new file mode 100644
index 00000000000..aaefd6681f6
--- /dev/null
+++ b/extras/ec-heal-script/README.md
@@ -0,0 +1,69 @@
+# gluster-heal-scripts
+Scripts to correct extended attributes of fragments of files to make them healble.
+
+Following are the guidelines/suggestions to use these scripts.
+
+1 - Passwordless ssh should be setup for all the nodes of the cluster.
+
+2 - Scripts should be executed from one of these nodes.
+
+3 - Make sure NO "IO" is going on for the files for which we are running
+these two scripts.
+
+4 - There should be no heal going on for the file for which xattrs are being
+set by correct_pending_heals.sh. Disable the self heal while running this script.
+
+5 - All the bricks of the volume should be UP to identify good and bad fragments
+and to decide if an entry is healble or not.
+
+6 - If correct_pending_heals.sh is stopped in the middle while it was processing
+healble entries, it is suggested to re-run gfid_needing_heal_parallel.sh to create
+latest list of healble and non healble entries and "potential_heal" "can_not_heal" files.
+
+7 - Based on the number of entries, these files might take time to get and set the
+stats and xattrs of entries.
+
+8 - A backup of the fragments will be taken on <brick path>/.glusterfs/correct_pending_heals
+ directory with a file name same as gfid.
+
+9 - Once the correctness of the file gets verified by user, these backup should be removed.
+
+10 - Make sure we have enough space on bricks to take these backups.
+
+11 - At the end this will create two files -
+ 1 - modified_and_backedup_files - Contains list of files which have been modified and should be healed.
+ 2 - can_not_heal - Contains list of files which can not be healed.
+
+12 - It is suggested that the integrity of the data of files, which were modified and healed,
+ should be checked by the user.
+
+
+Usage:
+
+Following are the sequence of steps to use these scripts -
+
+1 - ./gfid_needing_heal_parallel.sh <volume name>
+
+ Execute gfid_needing_heal_parallel.sh with volume name to create list of files which could
+ be healed and can not be healed. It creates "potential_heal" and "can_not_heal" files.
+ During execution, it also displays the list of files on consol with the verdict.
+
+2 - ./correct_pending_heals.sh
+
+ Execute correct_pending_heals.sh without any argument. This script processes entries present
+ in "heal" file. It asks user to enter how many files we want to process in one attempt.
+ Once the count is provided, this script will fetch the entries one by one from "potential_heal" file and takes necessary action.
+ If at this point also a file can not be healed, it will be pushed to "can_not_heal" file.
+ If a file can be healed, this script will modify the xattrs of that file fragments and create an entry in "modified_and_backedup_files" file
+
+3 - At the end, all the entries of "potential_heal" will be processed and based on the processing only two files will be left.
+
+ 1 - modified_and_backedup_files - Contains list of files which have been modified and should be healed.
+ 2 - can_not_heal - Contains list of files which can not be healed.
+
+Logs and other files -
+
+1 - modified_and_backedup_files - It contains all the files which could be healed and the location of backup of each fragments.
+2 - can_not_heal - It contains all the files which can not be healed.
+3 - potential_heal - List of files which could be healed and should be processed by "correct_pending_heals.sh"
+4 - /var/log/glusterfs/ec-heal-script.log - It contains logs of both the files.
diff --git a/extras/ec-heal-script/correct_pending_heals.sh b/extras/ec-heal-script/correct_pending_heals.sh
new file mode 100755
index 00000000000..c9f19dd7c89
--- /dev/null
+++ b/extras/ec-heal-script/correct_pending_heals.sh
@@ -0,0 +1,415 @@
+#!/bin/bash
+# Copyright (c) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+# This script finally resets the xattrs of all the fragments of a file
+# which can be healed as per gfid_needing_heal_parallel.sh.
+# gfid_needing_heal_parallel.sh will produce two files, potential_heal and can_not_heal.
+# This script takes potential_heal as input and resets xattrs of all the fragments
+# of those files present in this file and which could be healed as per
+# trusted.ec.size xattar of the file else it will place the entry in can_not_heal
+# file. Those entries which must be healed will be place in must_heal file
+# after setting xattrs so that user can track those files.
+
+
+MOD_BACKUP_FILES="modified_and_backedup_files"
+CAN_NOT_HEAL="can_not_heal"
+LOG_DIR="/var/log/glusterfs"
+LOG_FILE="$LOG_DIR/ec-heal-script.log"
+LINE_SEP="==================================================="
+
+function heal_log()
+{
+ echo "$1" >> "$LOG_FILE"
+}
+
+function desc ()
+{
+ echo ""
+ echo "This script finally resets the xattrs of all the fragments of a file
+which can be healed as per gfid_needing_heal_parallel.sh.
+gfid_needing_heal_parallel.sh will produce two files, potential_heal and can_not_heal.
+This script takes potential_heal as input and resets xattrs of all the fragments
+of those files present in this file and which could be healed as per
+trusted.ec.size xattar of the file else it will place the entry in can_not_heal
+file. Those entries which must be healed will be place in must_heal file
+after setting xattrs so that user can track those files."
+}
+
+function _init ()
+{
+ if [ $# -ne 0 ]
+ then
+ echo "usage: $0"
+ desc
+ exit 2
+ fi
+
+ if [ ! -f "potential_heal" ]
+ then
+ echo "Nothing to correct. File "potential_heal" does not exist"
+ echo ""
+ desc
+ exit 2
+ fi
+}
+
+function total_file_size_in_hex()
+{
+ local frag_size=$1
+ local size=0
+ local hex_size=""
+
+ size=$((frag_size * 4))
+ hex_size=$(printf '0x%016x' $size)
+ echo "$hex_size"
+}
+
+function backup_file_fragment()
+{
+ local file_host=$1
+ local file_entry=$2
+ local gfid_actual_paths=$3
+ local brick_root=""
+ local temp=""
+ local backup_dir=""
+ local cmd=""
+ local gfid=""
+
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+ temp=$(echo "$(basename "$BASH_SOURCE")" | cut -d '.' -f 1)
+ backup_dir=$(echo "${brick_root}/.glusterfs/${temp}")
+ file_entry=${file_entry//#}
+
+ gfid=$(echo "${gfid_actual_paths}" | cut -d '|' -f 1 | cut -d '/' -f 5)
+ echo "${file_host}:${backup_dir}/${gfid}" >> "$MOD_BACKUP_FILES"
+
+ cmd="mkdir -p ${backup_dir} && yes | cp -af ${file_entry} ${backup_dir}/${gfid} 2>/dev/null"
+ ssh -n "${file_host}" "${cmd}"
+}
+
+function set_frag_xattr ()
+{
+ local file_host=$1
+ local file_entry=$2
+ local good=$3
+ local cmd1=""
+ local cmd2=""
+ local cmd=""
+ local version="0x00000000000000010000000000000001"
+ local dirty="0x00000000000000010000000000000001"
+
+ if [[ $good -eq 0 ]]
+ then
+ version="0x00000000000000000000000000000000"
+ fi
+
+ cmd1=" setfattr -n trusted.ec.version -v ${version} ${file_entry} &&"
+ cmd2=" setfattr -n trusted.ec.dirty -v ${dirty} ${file_entry}"
+ cmd=${cmd1}${cmd2}
+ ssh -n "${file_host}" "${cmd}"
+}
+
+function set_version_dirty_xattr ()
+{
+ local file_paths=$1
+ local good=$2
+ local gfid_actual_paths=$3
+ local file_entry=""
+ local file_host=""
+ local bpath=""
+
+ for bpath in ${file_paths//,/ }
+ do
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ backup_file_fragment "$file_host" "$file_entry" "$gfid_actual_paths"
+ file_entry=${file_entry//#}
+ set_frag_xattr "$file_host" "$file_entry" "$good"
+ done
+}
+
+function match_size_xattr_quorum ()
+{
+ local file_paths=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local size_xattr=""
+ local bpath=""
+ declare -A xattr_count
+
+ for bpath in ${file_paths//,/ }
+ do
+ size_xattr=""
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ file_entry=${file_entry//#}
+
+ cmd="getfattr -n trusted.ec.size -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.size" | cut -d '=' -f 2"
+ size_xattr=$(ssh -n "${file_host}" "${cmd}")
+ if [[ -n $size_xattr ]]
+ then
+ count=$((xattr_count["$size_xattr"] + 1))
+ xattr_count["$size_xattr"]=${count}
+ if [[ $count -ge 4 ]]
+ then
+ echo "${size_xattr}"
+ return
+ fi
+ fi
+ done
+ echo "False"
+}
+
+function match_version_xattr ()
+{
+ local file_paths=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local version=""
+ local bpath=""
+ declare -A ver_count
+
+ for bpath in ${file_paths//,/ }
+ do
+ version=""
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ file_entry=${file_entry//#}
+
+ cmd="getfattr -n trusted.ec.version -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.version" | cut -d '=' -f 2"
+ version=$(ssh -n "${file_host}" "${cmd}")
+ ver_count["$version"]=$((ver_count["$version"] + 1))
+ done
+ for key in "${ver_count[@]}"
+ do
+ if [[ $key -ge 4 ]]
+ then
+ echo "True"
+ return
+ else
+ echo "False"
+ return
+ fi
+ done
+}
+
+function match_stat_size_with_xattr ()
+{
+ local bpath=$1
+ local size=$2
+ local file_stat=$3
+ local xattr=$4
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local stat_output=""
+ local hex_size=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+
+ file_entry=${file_entry//#}
+ cmd="stat --format=%F:%B:%s $file_entry 2>/dev/null"
+ stat_output=$(ssh -n "${file_host}" "${cmd}")
+ echo "$stat_output" | grep -w "${file_stat}" > /dev/null
+
+ if [[ $? -eq 0 ]]
+ then
+ cmd="getfattr -n trusted.ec.size -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.size" | cut -d '=' -f 2"
+ hex_size=$(ssh -n "${file_host}" "${cmd}")
+
+ if [[ -z $hex_size || "$hex_size" != "$xattr" ]]
+ then
+ echo "False"
+ return
+ fi
+ size_diff=$(printf '%d' $(( size - hex_size )))
+ if [[ $size_diff -gt 2047 ]]
+ then
+ echo "False"
+ return
+ else
+ echo "True"
+ return
+ fi
+ else
+ echo "False"
+ return
+ fi
+}
+
+function find_file_paths ()
+{
+ local bpath=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local brick_root=""
+ local gfid=""
+ local actual_path=""
+ local gfid_path=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+
+ gfid=$(echo "${file_entry}" | grep ".glusterfs")
+ if [[ -n "$gfid" ]]
+ then
+ gfid_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep -v '.glusterfs' "
+ actual_path=$(ssh -n "${file_host}" "${cmd}")
+ #removing absolute path so that user can refer this from mount point
+ actual_path=${actual_path#"$brick_root"}
+ else
+ actual_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep '.glusterfs' "
+ gfid_path=$(ssh -n "${file_host}" "${cmd}")
+ gfid_path=${gfid_path#"$brick_root"}
+ fi
+
+ echo "${gfid_path}|${actual_path}"
+}
+
+function log_can_not_heal ()
+{
+ local gfid_actual_paths=$1
+ local file_paths=$2
+ file_paths=${file_paths//#}
+
+ echo "${LINE_SEP}" >> "$CAN_NOT_HEAL"
+ echo "Can Not Heal : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$CAN_NOT_HEAL"
+ for bpath in ${file_paths//,/ }
+ do
+ echo "${bpath}" >> "$CAN_NOT_HEAL"
+ done
+}
+
+function check_all_frag_and_set_xattr ()
+{
+ local file_paths=$1
+ local total_size=$2
+ local file_stat=$3
+ local bpath=""
+ local healthy_count=0
+ local match="False"
+ local matching_bricks=""
+ local bad_bricks=""
+ local gfid_actual_paths=""
+
+ for bpath in ${file_paths//,/ }
+ do
+ if [[ -n "$gfid_actual_paths" ]]
+ then
+ break
+ fi
+ gfid_actual_paths=$(find_file_paths "$bpath")
+ done
+
+ match=$(match_size_xattr_quorum "$file_paths")
+
+# echo "${match} : $bpath" >> "$MOD_BACKUP_FILES"
+
+ if [[ "$match" != "False" ]]
+ then
+ xattr="$match"
+ for bpath in ${file_paths//,/ }
+ do
+ match="False"
+ match=$(match_stat_size_with_xattr "$bpath" "$total_size" "$file_stat" "$xattr")
+ if [[ "$match" == "True" ]]
+ then
+ matching_bricks="${bpath},${matching_bricks}"
+ healthy_count=$((healthy_count + 1))
+ else
+ bad_bricks="${bpath},${bad_bricks}"
+ fi
+ done
+ fi
+
+ if [[ $healthy_count -ge 4 ]]
+ then
+ match="True"
+ echo "${LINE_SEP}" >> "$MOD_BACKUP_FILES"
+ echo "Modified : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$MOD_BACKUP_FILES"
+ set_version_dirty_xattr "$matching_bricks" 1 "$gfid_actual_paths"
+ set_version_dirty_xattr "$bad_bricks" 0 "$gfid_actual_paths"
+ else
+ log_can_not_heal "$gfid_actual_paths" "${file_paths}"
+ fi
+
+ echo "$match"
+}
+function set_xattr()
+{
+ local count=$1
+ local heal_entry=""
+ local file_stat=""
+ local frag_size=""
+ local total_size=""
+ local file_paths=""
+ local num=""
+ local can_heal_count=0
+
+ heal_log "Started $(basename $BASH_SOURCE) on $(date) "
+
+ while read -r heal_entry
+ do
+ heal_log "$LINE_SEP"
+ heal_log "${heal_entry}"
+
+ file_stat=$(echo "$heal_entry" | cut -d "|" -f 1)
+ frag_size=$(echo "$file_stat" | rev | cut -d ":" -f 1 | rev)
+ total_size="$(total_file_size_in_hex "$frag_size")"
+ file_paths=$(echo "$heal_entry" | cut -d "|" -f 2)
+ match=$(check_all_frag_and_set_xattr "$file_paths" "$total_size" "$file_stat")
+ if [[ "$match" == "True" ]]
+ then
+ can_heal_count=$((can_heal_count + 1))
+ fi
+
+ sed -i '1d' potential_heal
+ count=$((count - 1))
+ if [ $count == 0 ]
+ then
+ num=$(cat potential_heal | wc -l)
+ heal_log "$LINE_SEP"
+ heal_log "${1} : Processed"
+ heal_log "${can_heal_count} : Modified to Heal"
+ heal_log "$((${1} - can_heal_count)) : Moved to can_not_heal."
+ heal_log "${num} : Pending as Potential Heal"
+ exit 0
+ fi
+
+ done < potential_heal
+}
+
+function main ()
+{
+ local count=0
+
+ read -p "Number of files to correct: [choose between 1-1000] (0 for All):" count
+ if [[ $count -lt 0 || $count -gt 1000 ]]
+ then
+ echo "Provide correct value:"
+ exit 2
+ fi
+
+ if [[ $count -eq 0 ]]
+ then
+ count=$(cat potential_heal | wc -l)
+ fi
+ set_xattr "$count"
+}
+
+_init "$@" && main "$@"
diff --git a/extras/ec-heal-script/gfid_needing_heal_parallel.sh b/extras/ec-heal-script/gfid_needing_heal_parallel.sh
new file mode 100755
index 00000000000..d7f53c97c33
--- /dev/null
+++ b/extras/ec-heal-script/gfid_needing_heal_parallel.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+# Copyright (c) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+# This script provides a list of all the files which can be healed or not healed.
+# It also generates two files, potential_heal and can_not_heal, which contains the information
+# of all theose files. These files could be used by correct_pending_heals.sh to correct
+# the fragmnets so that files could be healed by shd.
+
+CAN_NOT_HEAL="can_not_heal"
+CAN_HEAL="potential_heal"
+LINE_SEP="==================================================="
+LOG_DIR="/var/log/glusterfs"
+LOG_FILE="$LOG_DIR/ec-heal-script.log"
+
+function heal_log()
+{
+ echo "$1" >> "$LOG_FILE"
+}
+
+function _init ()
+{
+ if [ $# -ne 1 ]; then
+ echo "usage: $0 <gluster volume name>";
+ echo "This script provides a list of all the files which can be healed or not healed.
+It also generates two files, potential_heal and can_not_heal, which contains the information
+of all theose files. These files could be used by correct_pending_heals.sh to correct
+the fragmnets so that files could be healed by shd."
+ exit 2;
+ fi
+
+ volume=$1;
+}
+
+function get_pending_entries ()
+{
+ local volume_name=$1
+
+ gluster volume heal "$volume_name" info | grep -v ":/" | grep -v "Number of entries" | grep -v "Status:" | sort -u | sed '/^$/d'
+}
+
+function get_entry_path_on_brick()
+{
+ local path="$1"
+ local gfid_string=""
+ if [[ "${path:0:1}" == "/" ]];
+ then
+ echo "$path"
+ else
+ gfid_string="$(echo "$path" | cut -f2 -d':' | cut -f1 -d '>')"
+ echo "/.glusterfs/${gfid_string:0:2}/${gfid_string:2:2}/$gfid_string"
+ fi
+}
+
+function run_command_on_server()
+{
+ local subvolume="$1"
+ local host="$2"
+ local cmd="$3"
+ local output
+ output=$(ssh -n "${host}" "${cmd}")
+ if [ -n "$output" ]
+ then
+ echo "$subvolume:$output"
+ fi
+}
+
+function get_entry_path_all_bricks ()
+{
+ local entry="$1"
+ local bricks="$2"
+ local cmd=""
+ for brick in $bricks
+ do
+ echo "${brick}#$(get_entry_path_on_brick "$entry")"
+ done | tr '\n' ','
+}
+
+function get_stat_for_entry_from_all_bricks ()
+{
+ local entry="$1"
+ local bricks="$2"
+ local subvolume=0
+ local host=""
+ local bpath=""
+ local cmd=""
+
+ for brick in $bricks
+ do
+ if [[ "$((subvolume % 6))" == "0" ]]
+ then
+ subvolume=$((subvolume+1))
+ fi
+ host=$(echo "$brick" | cut -f1 -d':')
+ bpath=$(echo "$brick" | cut -f2 -d':')
+
+ cmd="stat --format=%F:%B:%s $bpath$(get_entry_path_on_brick "$entry") 2>/dev/null"
+ run_command_on_server "$subvolume" "${host}" "${cmd}" &
+ done | sort | uniq -c | sort -rnk1
+}
+
+function get_bricks_from_volume()
+{
+ local v=$1
+ gluster volume info "$v" | grep -E "^Brick[0-9][0-9]*:" | cut -f2- -d':'
+}
+
+function print_entry_gfid()
+{
+ local host="$1"
+ local dirpath="$2"
+ local entry="$3"
+ local gfid
+ gfid="$(ssh -n "${host}" "getfattr -d -m. -e hex $dirpath/$entry 2>/dev/null | grep trusted.gfid=|cut -f2 -d'='")"
+ echo "$entry" - "$gfid"
+}
+
+function print_brick_directory_info()
+{
+ local h="$1"
+ local dirpath="$2"
+ while read -r e
+ do
+ print_entry_gfid "${h}" "${dirpath}" "${e}"
+ done < <(ssh -n "${h}" "ls $dirpath 2>/dev/null")
+}
+
+function print_directory_info()
+{
+ local entry="$1"
+ local bricks="$2"
+ local h
+ local b
+ local gfid
+ for brick in $bricks;
+ do
+ h="$(echo "$brick" | cut -f1 -d':')"
+ b="$(echo "$brick" | cut -f2 -d':')"
+ dirpath="$b$(get_entry_path_on_brick "$entry")"
+ print_brick_directory_info "${h}" "${dirpath}" &
+ done | sort | uniq -c
+}
+
+function print_entries_needing_heal()
+{
+ local quorum=0
+ local entry="$1"
+ local bricks="$2"
+ while read -r line
+ do
+ quorum=$(echo "$line" | awk '{print $1}')
+ if [[ "$quorum" -lt 4 ]]
+ then
+ echo "$line - Not in Quorum"
+ else
+ echo "$line - In Quorum"
+ fi
+ done < <(print_directory_info "$entry" "$bricks")
+}
+
+function find_file_paths ()
+{
+ local bpath=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local brick_root=""
+ local gfid=""
+ local actual_path=""
+ local gfid_path=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+
+ gfid=$(echo "${file_entry}" | grep ".glusterfs")
+
+ if [[ -n "$gfid" ]]
+ then
+ gfid_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep -v '.glusterfs' "
+ actual_path=$(ssh -n "${file_host}" "${cmd}")
+ #removing absolute path so that user can refer this from mount point
+ actual_path=${actual_path#"$brick_root"}
+ else
+ actual_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep '.glusterfs' "
+ gfid_path=$(ssh -n "${file_host}" "${cmd}")
+ gfid_path=${gfid_path#"$brick_root"}
+ fi
+
+ echo "${gfid_path}|${actual_path}"
+}
+
+function log_can_not_heal ()
+{
+ local gfid_actual_paths=$1
+ local file_paths=$2
+ file_paths=${file_paths//#}
+
+ echo "${LINE_SEP}" >> "$CAN_NOT_HEAL"
+ echo "Can Not Heal : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$CAN_NOT_HEAL"
+ for bpath in ${file_paths//,/ }
+ do
+ echo "${bpath}" >> "$CAN_NOT_HEAL"
+ done
+}
+
+function main ()
+{
+ local bricks=""
+ local quorum=0
+ local stat_info=""
+ local file_type=""
+ local gfid_actual_paths=""
+ local bpath=""
+ local file_paths=""
+ local good=0
+ local bad=0
+ bricks=$(get_bricks_from_volume "$volume")
+ rm -f "$CAN_HEAL"
+ rm -f "$CAN_NOT_HEAL"
+ mkdir "$LOG_DIR" -p
+
+ heal_log "Started $(basename "$BASH_SOURCE") on $(date) "
+ while read -r heal_entry
+ do
+ heal_log "------------------------------------------------------------------"
+ heal_log "$heal_entry"
+
+ gfid_actual_paths=""
+ file_paths="$(get_entry_path_all_bricks "$heal_entry" "$bricks")"
+ stat_info="$(get_stat_for_entry_from_all_bricks "$heal_entry" "$bricks")"
+ heal_log "$stat_info"
+
+ quorum=$(echo "$stat_info" | head -1 | awk '{print $1}')
+ good_stat=$(echo "$stat_info" | head -1 | awk '{print $3}')
+ file_type="$(echo "$stat_info" | head -1 | cut -f2 -d':')"
+ if [[ "$file_type" == "directory" ]]
+ then
+ print_entries_needing_heal "$heal_entry" "$bricks"
+ else
+ if [[ "$quorum" -ge 4 ]]
+ then
+ good=$((good + 1))
+ heal_log "Verdict: Healable"
+
+ echo "${good_stat}|$file_paths" >> "$CAN_HEAL"
+ else
+ bad=$((bad + 1))
+ heal_log "Verdict: Not Healable"
+ for bpath in ${file_paths//,/ }
+ do
+ if [[ -z "$gfid_actual_paths" ]]
+ then
+ gfid_actual_paths=$(find_file_paths "$bpath")
+ else
+ break
+ fi
+ done
+ log_can_not_heal "$gfid_actual_paths" "${file_paths}"
+ fi
+ fi
+ done < <(get_pending_entries "$volume")
+ heal_log "========================================="
+ heal_log "Total number of potential heal : ${good}"
+ heal_log "Total number of can not heal : ${bad}"
+ heal_log "========================================="
+}
+
+_init "$@" && main "$@"
diff --git a/extras/failed-tests.py b/extras/failed-tests.py
index 11dcb0ccea8..f7f110246b5 100755
--- a/extras/failed-tests.py
+++ b/extras/failed-tests.py
@@ -1,123 +1,180 @@
-#!/usr/bin/python
+#!/usr/bin/python3
+from __future__ import print_function
import blessings
-import HTMLParser
import requests
-import sys
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+import re
+import argparse
from collections import defaultdict
-from datetime import date, timedelta, datetime
-from dateutil.parser import parse
+from datetime import timedelta, datetime
+from pystache import render
-## This tool goes though the Gluster regression links and checks for failures
-#
-# Usage: failed-tests.py [<regression links,..> | get-summary \
-# <last number of days> <regression link>]
-#
-# When no options specified, goes through centos regression
-# @build.gluster.org/job/rackspace-regression-2GB-triggered/ and gets the
-# summary of last 30 builds
-# When other regression links (Eg:/job/rackspace-netbsd7-regression-triggered/)
-# are specified it goes through those links and prints the summary of last 30
-# builds in those links
-# When get-summary is specified, it goes through the link specified and gets the
-# summary of the builds that have happened in the last number of days specified.
+# This tool goes though the Gluster regression links and checks for failures
-BASE='https://build.gluster.org'
-TERM=blessings.Terminal()
-MAX_BUILDS=100
-summary=defaultdict(list)
+BASE = 'https://build.gluster.org'
+TERM = blessings.Terminal()
+MAX_BUILDS = 1000
+summary = defaultdict(list)
+VERBOSE = None
-def process_failure (url, cut_off_date):
- text = requests.get(url,verify=False).text
+
+def process_failure(url, node):
+ text = requests.get(url, verify=False).text
accum = []
for t in text.split('\n'):
- if t.find("BUILD_TIMESTAMP=") != -1 and cut_off_date != None:
- build_date = parse (t, fuzzy=True)
- if build_date.date() < cut_off_date:
- return 1
- elif t == 'Result: FAIL':
- print TERM.red + ('FAILURE on %s' % BASE+url) + TERM.normal
+ if t.find("Result: FAIL") != -1:
for t2 in accum:
- print t2.encode('utf-8')
+ if VERBOSE:
+ print(t2.encode('utf-8'))
if t2.find("Wstat") != -1:
- summary[t2.split(" ")[0]].append(url)
- accum = []
- elif t == 'Result: PASS':
+ test_case = re.search('\./tests/.*\.t', t2)
+ if test_case:
+ summary[test_case.group()].append((url, node))
accum = []
elif t.find("cur_cores=/") != -1:
summary["core"].append([t.split("/")[1]])
summary["core"].append(url)
else:
accum.append(t)
- return 0
-class FailureFinder (HTMLParser.HTMLParser):
- def __init__ (*args):
- apply(HTMLParser.HTMLParser.__init__,args)
- self = args[0]
- self.last_href = None
- def handle_starttag (self, tag, attrs):
- if tag == 'a':
- return self.is_a_tag (attrs)
- if tag == 'img':
- return self.is_img_tag (attrs)
- def is_a_tag (self, attrs):
- attrs_dict = dict(attrs)
- try:
- if attrs_dict['class'] != 'build-status-link':
- return
- except KeyError:
- return
- self.last_href = attrs_dict['href']
- def is_img_tag (self, attrs):
- if self.last_href == None:
- return
- attrs_dict = dict(attrs)
- try:
- if attrs_dict['alt'].find('Failed') == -1:
- return
- except KeyError:
- return
- process_failure(BASE+self.last_href, None)
- self.last_href = None
-def main (url):
- parser = FailureFinder()
- text = requests.get(url,verify=False).text
- parser.feed(text)
+def print_summary(failed_builds, total_builds, html=False):
+ # All the templates
+ count = [
+ '{{failed}} of {{total}} regressions failed',
+ '<p><b>{{failed}}</b> of <b>{{total}}</b> regressions failed</p>'
+ ]
+ regression_link = [
+ '\tRegression Link: {{link}}\n'
+ '\tNode: {{node}}',
+ '<p>&emsp;Regression Link: {{link}}</p>'
+ '<p>&emsp;Node: {{node}}</p>'
+ ]
+ component = [
+ '\tComponent: {{comp}}',
+ '<p>&emsp;Component: {{comp}}</p>'
+ ]
+ failure_count = [
+ ''.join([
+ TERM.red,
+ '{{test}} ; Failed {{count}} times',
+ TERM.normal
+ ]),
+ (
+ '<p><font color="red"><b>{{test}};</b> Failed <b>{{count}}'
+ '</b> times</font></p>'
+ )
+ ]
-def print_summary():
- for k,v in summary.iteritems():
+ template = 0
+ if html:
+ template = 1
+ print(render(
+ count[template],
+ {'failed': failed_builds, 'total': total_builds}
+ ))
+ for k, v in summary.items():
if k == 'core':
- print TERM.red + "Found cores:" + TERM.normal
- for cmp,lnk in zip(v[::2], v[1::2]):
- print "\tComponent: %s" % (cmp)
- print "\tRegression Link: %s" % (lnk)
+ print(''.join([TERM.red, "Found cores:", TERM.normal]))
+ for comp, link in zip(v[::2], v[1::2]):
+ print(render(component[template], {'comp': comp}))
+ print(render(
+ regression_link[template],
+ {'link': link[0], 'node': link[1]}
+ ))
+ else:
+ print(render(failure_count[template], {'test': k, 'count': len(v)}))
+ for link in v:
+ print(render(
+ regression_link[template],
+ {'link': link[0], 'node': link[1]}
+ ))
+
+
+def get_summary(cut_off_date, reg_link):
+ '''
+ Get links to the failed jobs
+ '''
+ success_count = 0
+ failure_count = 0
+ for page in range(0, MAX_BUILDS, 100):
+ build_info = requests.get(''.join([
+ BASE,
+ reg_link,
+ 'api/json?depth=1&tree=allBuilds'
+ '[url,result,timestamp,builtOn]',
+ '{{{0},{1}}}'.format(page, page+100)
+ ]), verify=False).json()
+ for build in build_info.get('allBuilds'):
+ if datetime.fromtimestamp(build['timestamp']/1000) < cut_off_date:
+ # stop when timestamp older than cut off date
+ return failure_count, failure_count + success_count
+ if build['result'] in [None, 'SUCCESS']:
+ # pass when build is a success or ongoing
+ success_count += 1
+ continue
+ if VERBOSE:
+ print(''.join([
+ TERM.red,
+ 'FAILURE on {0}'.format(build['url']),
+ TERM.normal
+ ]))
+ url = ''.join([build['url'], 'consoleText'])
+ failure_count += 1
+ process_failure(url, build['builtOn'])
+ return failure_count, failure_count + success_count
+
+
+def main(num_days, regression_link, html_report):
+ cut_off_date = datetime.today() - timedelta(days=num_days)
+ failure = 0
+ total = 0
+ for reg in regression_link:
+ if reg == 'centos':
+ reg_link = '/job/centos6-regression/'
+ elif reg == 'netbsd':
+ reg_link = '/job/netbsd7-regression/'
else:
- print TERM.red + "%s ; Failed %d times" % (k, len(v)) + TERM.normal
- for lnk in v:
- print "\tRegression Links: %s" % (lnk)
+ reg_link = reg
+ counts = get_summary(cut_off_date, reg_link)
+ failure += counts[0]
+ total += counts[1]
+ print_summary(failure, total, html_report)
-def get_summary (build_id, cut_off_date, reg_link):
- for i in xrange(build_id, build_id-MAX_BUILDS, -1):
- url=BASE+reg_link+str(i)+"/consoleFull"
- ret = process_failure(url, cut_off_date)
- if ret == 1:
- return
if __name__ == '__main__':
- if len(sys.argv) < 2:
- main(BASE+'/job/rackspace-regression-2GB-triggered/')
- elif sys.argv[1].find("get-summary") != -1:
- if len(sys.argv) < 4:
- print "Usage: failed-tests.py get-summary <last_no_of_days> <regression_link>"
- sys.exit(0)
- num_days=int(sys.argv[2])
- cut_off_date=date.today() - timedelta(days=num_days)
- reg_link = sys.argv[3]
- build_id = int(requests.get(BASE+reg_link+"lastBuild/buildNumber", verify=False).text)
- get_summary(build_id, cut_off_date, reg_link)
- else:
- for u in sys.argv[1:]:
- main(BASE+u)
- print_summary()
+ requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+ parser = argparse.ArgumentParser()
+ parser.add_argument("get-summary")
+ parser.add_argument(
+ "last_no_of_days",
+ default=1,
+ type=int,
+ help="Regression summary of last number of days"
+ )
+ parser.add_argument(
+ "regression_link",
+ default="centos",
+ nargs='+',
+ help="\"centos\" | \"netbsd\" | any other regression link"
+ )
+ parser.add_argument(
+ "--verbose",
+ default=False,
+ action="store_true",
+ help="Print a detailed report of each test case that is failed"
+ )
+ parser.add_argument(
+ "--html-report",
+ default=False,
+ action="store_true",
+ help="Print a brief report of failed regressions in html format"
+ )
+ args = parser.parse_args()
+ VERBOSE = args.verbose
+ main(
+ num_days=args.last_no_of_days,
+ regression_link=args.regression_link,
+ html_report=args.html_report
+ )
diff --git a/extras/firewalld/Makefile.am b/extras/firewalld/Makefile.am
index a5c11b0b783..530881fb8eb 100644
--- a/extras/firewalld/Makefile.am
+++ b/extras/firewalld/Makefile.am
@@ -1,6 +1,8 @@
EXTRA_DIST = glusterfs.xml
if USE_FIREWALLD
+if WITH_SERVER
staticdir = /usr/lib/firewalld/services/
static_DATA = glusterfs.xml
endif
+endif
diff --git a/extras/firewalld/glusterfs.xml b/extras/firewalld/glusterfs.xml
index f8efd90c3b5..7e176442f5b 100644
--- a/extras/firewalld/glusterfs.xml
+++ b/extras/firewalld/glusterfs.xml
@@ -4,6 +4,7 @@
<description>Default ports for gluster-distributed storage</description>
<port protocol="tcp" port="24007"/> <!--For glusterd -->
<port protocol="tcp" port="24008"/> <!--For glusterd RDMA port management -->
+<port protocol="tcp" port="24009"/> <!--For glustereventsd -->
<port protocol="tcp" port="38465"/> <!--Gluster NFS service -->
<port protocol="tcp" port="38466"/> <!--Gluster NFS service -->
<port protocol="tcp" port="38467"/> <!--Gluster NFS service -->
diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample
index 2077800d255..c22892bde56 100644
--- a/extras/ganesha/config/ganesha-ha.conf.sample
+++ b/extras/ganesha/config/ganesha-ha.conf.sample
@@ -2,9 +2,6 @@
# must be unique within the subnet
HA_NAME="ganesha-ha-360"
#
-# The gluster server from which to mount the shared data volume.
-HA_VOL_SERVER="server1"
-#
# N.B. you may use short names or long names; you may not use IP addrs.
# Once you select one, stay with it as it will be mildly unpleasant to
# clean up if you switch later on. Ensure that all names - short and/or
diff --git a/extras/ganesha/ocf/Makefile.am b/extras/ganesha/ocf/Makefile.am
index 6aed9548a0f..990a609f254 100644
--- a/extras/ganesha/ocf/Makefile.am
+++ b/extras/ganesha/ocf/Makefile.am
@@ -9,4 +9,3 @@ ocfdir = $(prefix)/lib/ocf
radir = $(ocfdir)/resource.d/heartbeat
ra_SCRIPTS = ganesha_grace ganesha_mon ganesha_nfsd
-
diff --git a/extras/ganesha/ocf/ganesha_grace b/extras/ganesha/ocf/ganesha_grace
index cb6dcc4e867..825f7164597 100644
--- a/extras/ganesha/ocf/ganesha_grace
+++ b/extras/ganesha/ocf/ganesha_grace
@@ -219,4 +219,3 @@ rc=$?
# The resource agent may optionally log a debug message
ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
exit $rc
-
diff --git a/extras/ganesha/ocf/ganesha_mon b/extras/ganesha/ocf/ganesha_mon
index 7d2c268d412..2b4a9d6da84 100644
--- a/extras/ganesha/ocf/ganesha_mon
+++ b/extras/ganesha/ocf/ganesha_mon
@@ -232,4 +232,3 @@ rc=$?
# The resource agent may optionally log a debug message
ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
exit $rc
-
diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd
index 29e333ca903..f91e8b6b8f7 100644
--- a/extras/ganesha/ocf/ganesha_nfsd
+++ b/extras/ganesha/ocf/ganesha_nfsd
@@ -36,7 +36,7 @@ else
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
fi
-OCF_RESKEY_ha_vol_mnt_default="/var/run/gluster/shared_storage"
+OCF_RESKEY_ha_vol_mnt_default="/run/gluster/shared_storage"
: ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}}
ganesha_meta_data() {
@@ -165,4 +165,3 @@ rc=$?
# The resource agent may optionally log a debug message
ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
exit $rc
-
diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am
index c326fc2f136..7e345fd5f19 100644
--- a/extras/ganesha/scripts/Makefile.am
+++ b/extras/ganesha/scripts/Makefile.am
@@ -1,6 +1,6 @@
-EXTRA_DIST= ganesha-ha.sh dbus-send.sh create-export-ganesha.sh \
- generate-epoch.py copy-export-ganesha.sh
+EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh \
+ ganesha-ha.sh
scriptsdir = $(libexecdir)/ganesha
-scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh ganesha-ha.sh \
- generate-epoch.py copy-export-ganesha.sh
+scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh generate-epoch.py \
+ ganesha-ha.sh
diff --git a/extras/ganesha/scripts/copy-export-ganesha.sh b/extras/ganesha/scripts/copy-export-ganesha.sh
deleted file mode 100755
index 9c4afa02d68..00000000000
--- a/extras/ganesha/scripts/copy-export-ganesha.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-
-#This script is called by glusterd when in case of
-#reboot.An export file specific to a volume
-#is copied in GANESHA_DIR/exports from online node.
-
-# Try loading the config from any of the distro
-# specific configuration locations
-if [ -f /etc/sysconfig/ganesha ]
- then
- . /etc/sysconfig/ganesha
-fi
-if [ -f /etc/conf.d/ganesha ]
- then
- . /etc/conf.d/ganesha
-fi
-if [ -f /etc/default/ganesha ]
- then
- . /etc/default/ganesha
-fi
-
-GANESHA_DIR=${1%/}
-VOL=$2
-CONF=
-host=$(hostname -s)
-SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
-
-function check_cmd_status()
-{
- if [ "$1" != "0" ]
- then
- rm -rf $GANESHA_DIR/exports/export.$VOL.conf
- exit 1
- fi
-}
-
-
-if [ ! -d "$GANESHA_DIR/exports" ];
- then
- mkdir $GANESHA_DIR/exports
- check_cmd_status `echo $?`
-fi
-
-function find_rhel7_conf
-{
- while [[ $# > 0 ]]
- do
- key="$1"
- case $key in
- -f)
- CONFFILE="$2"
- ;;
- *)
- ;;
- esac
- shift
- done
-}
-
-if [ -z $CONFFILE ]; then
- find_rhel7_conf $OPTIONS
-
-fi
-CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
-
-#remove the old export entry from NFS-Ganesha
-#if already exported
-dbus-send --type=method_call --print-reply --system \
- --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
- org.ganesha.nfsd.exportmgr.ShowExports \
- | grep -w -q "/$VOL"
-if [ $? -eq 0 ]; then
- removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
- grep Export_Id | awk -F"[=,;]" '{print$2}' | tr -d '[[:space:]]'`
-
- dbus-send --print-reply --system --dest=org.ganesha.nfsd \
- /org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport \
- uint16:$removed_id 2>&1
-fi
-
-ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
-IFS=$' '
-for server in ${ha_servers} ; do
- current_host=`echo $server | cut -d "." -f 1`
- if [ $host != $current_host ]
- then
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
- ${SECRET_PEM} $server:$GANESHA_DIR/exports/export.$VOL.conf \
- $GANESHA_DIR/exports/export.$VOL.conf
- break
- fi
-done
-
-if ! (cat $CONF | grep $VOL.conf\"$ )
-then
-echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
-fi
diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh
index a1a35dba58a..3040e8138b0 100755
--- a/extras/ganesha/scripts/create-export-ganesha.sh
+++ b/extras/ganesha/scripts/create-export-ganesha.sh
@@ -21,14 +21,17 @@ if [ -f /etc/default/ganesha ]
fi
GANESHA_DIR=${1%/}
-VOL=$2
-CONF=
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+declare -i EXPORT_ID
function check_cmd_status()
{
if [ "$1" != "0" ]
then
rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
exit 1
fi
}
@@ -40,28 +43,6 @@ if [ ! -d "$GANESHA_DIR/exports" ];
check_cmd_status `echo $?`
fi
-function find_rhel7_conf
-{
- while [[ $# > 0 ]]
- do
- key="$1"
- case $key in
- -f)
- CONFFILE="$2"
- ;;
- *)
- ;;
- esac
- shift
- done
-}
-
-if [ -z $CONFFILE ]; then
- find_rhel7_conf $OPTIONS
-
-fi
-CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
-
function write_conf()
{
echo -e "# WARNING : Using Gluster CLI will overwrite manual
@@ -83,11 +64,29 @@ echo " Pseudo=\"/$VOL\";"
echo ' Protocols = "3", "4" ;'
echo ' Transports = "UDP","TCP";'
echo ' SecType = "sys";'
+echo ' Security_Label = False;'
echo " }"
}
-
-write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
-if ! (cat $CONF | grep $VOL.conf\"$ )
+if [ "$OPTION" = "on" ];
then
-echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ if ! (cat $CONF | grep $VOL.conf\"$ )
+ then
+ write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
+ if [ "$count" = "1" ] ; then
+ EXPORT_ID=2
+ else
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ check_cmd_status `echo $?`
+ EXPORT_ID=EXPORT_ID+1
+ sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ check_cmd_status `echo $?`
+ fi
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ fi
+else
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
fi
diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh
index 14af095464d..9d613a0e7ad 100755
--- a/extras/ganesha/scripts/dbus-send.sh
+++ b/extras/ganesha/scripts/dbus-send.sh
@@ -15,71 +15,22 @@ if [ -f /etc/default/ganesha ]
. /etc/default/ganesha
fi
-declare -i EXPORT_ID
GANESHA_DIR=${1%/}
OPTION=$2
VOL=$3
-CONF=
-
-function find_rhel7_conf
-{
- while [[ $# > 0 ]]
- do
- key="$1"
- case $key in
- -f)
- CONFFILE="$2"
- break;
- ;;
- *)
- ;;
- esac
- shift
- done
-}
-
-if [ -z $CONFFILE ]
- then
- find_rhel7_conf $OPTIONS
-
-fi
-
-CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
+CONF=$GANESHA_DIR"/ganesha.conf"
function check_cmd_status()
{
if [ "$1" != "0" ]
- then
- rm -rf $GANESHA_DIR/exports/export.$VOL.conf
- sed -i /$VOL.conf/d $CONF
- exit 1
+ then
+ logger "dynamic export failed on node :${hostname -s}"
fi
}
#This function keeps track of export IDs and increments it with every new entry
function dynamic_export_add()
{
- count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
- if [ "$count" = "1" ] ;
- then
- EXPORT_ID=2
- else
- #if [ -s /var/lib/ganesha/export_removed ];
- # then
- # EXPORT_ID=`head -1 /var/lib/ganesha/export_removed`
- # sed -i -e "1d" /var/lib/ganesha/export_removed
- # else
-
- EXPORT_ID=`cat $GANESHA_DIR/.export_added`
- check_cmd_status `echo $?`
- EXPORT_ID=EXPORT_ID+1
- #fi
- fi
- echo $EXPORT_ID > $GANESHA_DIR/.export_added
- check_cmd_status `echo $?`
- sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
-$GANESHA_DIR/exports/export.$VOL.conf
- check_cmd_status `echo $?`
dbus-send --system \
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \
@@ -90,16 +41,22 @@ string:"EXPORT(Path=/$VOL)"
#This function removes an export dynamically(uses the export_id of the export)
function dynamic_export_remove()
{
- removed_id=`cat $GANESHA_DIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F"[=,;]" '{print$2}'| tr -d '[[:space:]]'`
- check_cmd_status `echo $?`
+ # Below bash fetch all the export from ShowExport command and search
+ # export entry based on path and then get its export entry.
+ # There are two possiblities for path, either entire volume will be
+ # exported or subdir. It handles both cases. But it remove only first
+ # entry from the list based on assumption that entry exported via cli
+ # has lowest export id value
+ removed_id=$(dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports | grep -B 1 -we \
+ "/"$VOL -e "/"$VOL"/" | grep uint16 | awk '{print $2}' \
+ | head -1)
+
dbus-send --print-reply --system \
--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
check_cmd_status `echo $?`
- sed -i /$VOL.conf/d $CONF
- rm -rf $GANESHA_DIR/exports/export.$VOL.conf
-
}
if [ "$OPTION" = "on" ];
@@ -111,4 +68,3 @@ if [ "$OPTION" = "off" ];
then
dynamic_export_remove $@
fi
-
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index ac8c91f194e..9790a719e10 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -20,16 +20,26 @@
# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to
# the new host.
+GANESHA_HA_SH=$(realpath $0)
HA_NUM_SERVERS=0
HA_SERVERS=""
-HA_CONFDIR="/etc/ganesha"
HA_VOL_NAME="gluster_shared_storage"
-HA_VOL_MNT="/var/run/gluster/shared_storage"
+HA_VOL_MNT="/run/gluster/shared_storage"
+HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
SERVICE_MAN="DISTRO_NOT_FOUND"
-RHEL6_PCS_CNAME_OPTION="--name"
+# rhel, fedora id, version
+ID=""
+VERSION_ID=""
+
+PCS9OR10_PCS_CNAME_OPTION=""
+PCS9OR10_PCS_CLONE_OPTION="clone"
SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
+# UNBLOCK RA uses shared_storage which may become unavailable
+# during any of the nodes reboot. Hence increase timeout value.
+PORTBLOCK_UNBLOCK_TIMEOUT="60s"
+
# Try loading the config from any of the distro
# specific configuration locations
if [ -f /etc/sysconfig/ganesha ]
@@ -64,9 +74,9 @@ function find_rhel7_conf
done
}
-if [ -z $CONFFILE ]
+if [ -z ${CONFFILE} ]
then
- find_rhel7_conf $OPTIONS
+ find_rhel7_conf ${OPTIONS}
fi
@@ -86,9 +96,9 @@ usage() {
determine_service_manager () {
- if [ -e "/usr/bin/systemctl" ];
+ if [ -e "/bin/systemctl" ];
then
- SERVICE_MAN="/usr/bin/systemctl"
+ SERVICE_MAN="/bin/systemctl"
elif [ -e "/sbin/invoke-rc.d" ];
then
SERVICE_MAN="/sbin/invoke-rc.d"
@@ -96,9 +106,9 @@ determine_service_manager () {
then
SERVICE_MAN="/sbin/service"
fi
- if [ "$SERVICE_MAN" == "DISTRO_NOT_FOUND" ]
+ if [[ "${SERVICE_MAN}X" == "DISTRO_NOT_FOUNDX" ]]
then
- echo "Service manager not recognized, exiting"
+ logger "Service manager not recognized, exiting"
exit 1
fi
}
@@ -107,13 +117,23 @@ manage_service ()
{
local action=${1}
local new_node=${2}
- if [ "$SERVICE_MAN" == "/usr/bin/systemctl" ]
+ local option=
+
+ if [[ "${action}" == "start" ]]; then
+ option="yes"
+ else
+ option="no"
+ fi
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
+
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]
then
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "$SERVICE_MAN ${action} nfs-ganesha"
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha"
else
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} root@${new_node} "$SERVICE_MAN nfs-ganesha ${action}"
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} nfs-ganesha ${action}"
fi
}
@@ -125,7 +145,7 @@ check_cluster_exists()
if [ -e /var/run/corosync.pid ]; then
cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
- if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then
+ if [[ "${cluster_name}X" == "${name}X" ]]; then
logger "$name already exists, exiting"
exit 0
fi
@@ -140,7 +160,7 @@ determine_servers()
local tmp_ifs=${IFS}
local ha_servers=""
- if [ "X${cmd}X" != "XsetupX" -a "X${cmd}X" != "XstatusX" ]; then
+ if [ "${cmd}X" != "setupX" -a "${cmd}X" != "statusX" ]; then
ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
IFS=$' '
for server in ${ha_servers} ; do
@@ -160,6 +180,13 @@ determine_servers()
fi
}
+stop_ganesha_all()
+{
+ local serverlist=${1}
+ for node in ${serverlist} ; do
+ manage_service "stop" ${node}
+ done
+}
setup_cluster()
{
@@ -169,16 +196,23 @@ setup_cluster()
local unclean=""
local quorum_policy="stop"
-
logger "setting up cluster ${name} with the following ${servers}"
- pcs cluster auth ${servers}
- # pcs cluster setup --name ${name} ${servers}
- pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --transport udpu ${servers}
+ # pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} ${servers}
+ pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers}
if [ $? -ne 0 ]; then
- logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} ${servers} failed"
+ logger "pcs cluster setup ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers} failed, shutting down ganesha and bailing out"
+ #set up failed stop all ganesha process and clean up symlinks in cluster
+ stop_ganesha_all "${servers}"
exit 1;
fi
+
+ # pcs cluster auth ${servers}
+ pcs cluster auth
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster auth failed"
+ fi
+
pcs cluster start --all
if [ $? -ne 0 ]; then
logger "pcs cluster start failed"
@@ -194,7 +228,7 @@ setup_cluster()
done
unclean=$(pcs status | grep -u "UNCLEAN")
- while [[ "${unclean}X" = "UNCLEANX" ]]; do
+ while [[ "${unclean}X" == "UNCLEANX" ]]; do
sleep 1
unclean=$(pcs status | grep -u "UNCLEAN")
done
@@ -221,86 +255,38 @@ setup_finalize_ha()
local stopped=""
stopped=$(pcs status | grep -u "Stopped")
- while [[ "${stopped}X" = "StoppedX" ]]; do
+ while [[ "${stopped}X" == "StoppedX" ]]; do
sleep 1
stopped=$(pcs status | grep -u "Stopped")
done
}
-setup_copy_config()
-{
- local short_host=$(hostname -s)
- local tganesha_conf=$(mktemp -u)
-
- if [ -e ${SECRET_PEM} ]; then
- while [[ ${1} ]]; do
- current_host=`echo ${1} | cut -d "." -f 1`
- if [ ${short_host} != ${current_host} ]; then
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_CONFDIR}/ganesha-ha.conf ${1}:${HA_CONFDIR}/
- if [ $? -ne 0 ]; then
- logger "warning: scp ganesha-ha.conf to ${1} failed"
- fi
- fi
- shift
- done
- else
- logger "warning: scp ganesha-ha.conf to ${1} failed"
- fi
-}
-
refresh_config ()
{
local short_host=$(hostname -s)
local VOL=${1}
local HA_CONFDIR=${2}
- local tganesha_vol_conf=$(mktemp)
local short_host=$(hostname -s)
- cp ${HA_CONFDIR}/exports/export.$VOL.conf \
-${tganesha_vol_conf}
+ local export_id=$(grep ^[[:space:]]*Export_Id $HA_CONFDIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+
if [ -e ${SECRET_PEM} ]; then
while [[ ${3} ]]; do
current_host=`echo ${3} | cut -d "." -f 1`
- if [ ${short_host} != ${current_host} ]; then
- removed_id=$(ssh -oPasswordAuthentication=no \
--oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
-"cat $HA_CONFDIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F\"[=,;]\" '{print \$2}' | tr -d '[[:space:]]'")
-
+ if [[ ${short_host} != ${current_host} ]]; then
output=$(ssh -oPasswordAuthentication=no \
-oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport \
-uint16:$removed_id 2>&1")
- ret=$?
- logger <<< "${output}"
- if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on ${current_host}."
- exit 1
- fi
- sleep 1
- sed -i s/Export_Id.*/"Export_Id= $removed_id ;"/ \
- ${tganesha_vol_conf}
-
- scp -q -oPasswordAuthentication=no \
--oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_vol_conf} \
-${current_host}:${HA_CONFDIR}/exports/export.$VOL.conf
-
- output=$(ssh -oPasswordAuthentication=no \
--oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
-"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
string:$HA_CONFDIR/exports/export.$VOL.conf \
-string:\"EXPORT(Path=/$removed_id)\" 2>&1")
+string:\"EXPORT(Export_Id=$export_id)\" 2>&1")
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on ${current_host}."
- exit 1
+ echo "Refresh-config failed on ${current_host}. Please check logs on ${current_host}"
else
echo "Refresh-config completed on ${current_host}."
fi
@@ -314,66 +300,17 @@ string:\"EXPORT(Path=/$removed_id)\" 2>&1")
fi
# Run the same command on the localhost,
- removed_id=`cat $HA_CONFDIR/exports/export.$VOL.conf |\
-grep Export_Id | awk -F"[=,;]" '{print$2}' | tr -d '[[:space:]]'`
output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.RemoveExport \
-uint16:$removed_id 2>&1)
- ret=$?
- logger <<< "${output}"
- if [ ${ret} -ne 0 ]; then
- echo "Error: refresh-config failed on localhost."
- exit 1
- fi
- sleep 1
- output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \
-/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
string:$HA_CONFDIR/exports/export.$VOL.conf \
-string:"EXPORT(Path=/$removed_id)" 2>&1)
+string:"EXPORT(Export_Id=$export_id)" 2>&1)
ret=$?
logger <<< "${output}"
if [ ${ret} -ne 0 ] ; then
- echo "Error: refresh-config failed on localhost."
- exit 1
+ echo "Refresh-config failed on localhost."
else
echo "Success: refresh-config completed."
fi
- rm -f ${tganesha_vol_conf}
-
-}
-
-copy_export_config ()
-{
- local new_node=${1}
- local tganesha_conf=$(mktemp)
- local tganesha_exports=$(mktemp -d)
- local short_host=$(hostname -s)
- # avoid prompting for password, even with password-less scp
- # scp $host1:$file $host2:$file prompts for the password
- # Ideally all the existing nodes in the cluster should have same
- # copy of the configuration files. Maybe for sanity check, copy
- # the state from HA_VOL_SERVER?
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
- then
- cp ${GANESHA_CONF} ${tganesha_conf}
- else
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_VOL_SERVER}:${GANESHA_CONF} $short_host:${tganesha_conf}
- fi
- scp -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_conf} ${new_node}:${GANESHA_CONF}
- rm -f ${tganesha_conf}
-
- if [ "${HA_VOL_SERVER}" == $(hostname) ]
- then
- cp -r ${HA_CONFDIR}/exports ${tganesha_exports}
- else
- scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${HA_VOL_SERVER}:${HA_CONFDIR}/exports/ $short_host:${tganesha_exports}
- fi
- scp -r -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
-${SECRET_PEM} ${tganesha_exports}/exports ${new_node}:${HA_CONFDIR}/
- rm -rf ${tganesha_exports}
}
@@ -420,13 +357,10 @@ teardown_cluster()
cleanup_ganesha_config ()
{
- rm -rf ${HA_CONFDIR}/exports/*.conf
- rm -rf ${HA_CONFDIR}/.export_added
+ rm -f /etc/corosync/corosync.conf
rm -rf /etc/cluster/cluster.conf*
rm -rf /var/lib/pacemaker/cib/*
- rm -f /etc/corosync/corosync.conf
- sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' ${GANESHA_CONF}
- rm -rf ${HA_VOL_MNT}/nfs-ganesha
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $HA_CONFDIR/ganesha.conf
}
do_create_virt_ip_constraints()
@@ -437,17 +371,17 @@ do_create_virt_ip_constraints()
# first a constraint location rule that says the VIP must be where
# there's a ganesha.nfsd running
- pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 rule score=-INFINITY ganesha-active ne 1
+ pcs -f ${cibfile} constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1
if [ $? -ne 0 ]; then
- logger "warning: pcs constraint location ${primary}-cluster_ip-1 rule score=-INFINITY ganesha-active ne 1 failed"
+ logger "warning: pcs constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1 failed"
fi
# then a set of constraint location prefers to set the prefered order
# for where a VIP should move
while [[ ${1} ]]; do
- pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 prefers ${1}=${weight}
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${1}=${weight}
if [ $? -ne 0 ]; then
- logger "warning: pcs constraint location ${primary}-cluster_ip-1 prefers ${1}=${weight} failed"
+ logger "warning: pcs constraint location ${primary}-group prefers ${1}=${weight} failed"
fi
weight=$(expr ${weight} + 1000)
shift
@@ -457,9 +391,9 @@ do_create_virt_ip_constraints()
# on Fedora setting appears to be additive, so to get the desired
# value we adjust the weight
# weight=$(expr ${weight} - 100)
- pcs -f ${cibfile} constraint location ${primary}-cluster_ip-1 prefers ${primary}=${weight}
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${primary}=${weight}
if [ $? -ne 0 ]; then
- logger "warning: pcs constraint location ${primary}-cluster_ip-1 prefers ${primary}=${weight} failed"
+ logger "warning: pcs constraint location ${primary}-group prefers ${primary}=${weight} failed"
fi
}
@@ -475,7 +409,7 @@ wrap_create_virt_ip_constraints()
# the result is "node2 node3 node4"; for node2, "node3 node4 node1"
# and so on.
while [[ ${1} ]]; do
- if [ "${1}" = "${primary}" ]; then
+ if [[ ${1} == ${primary} ]]; then
shift
while [[ ${1} ]]; do
tail=${tail}" "${1}
@@ -506,15 +440,15 @@ setup_create_resources()
local cibfile=$(mktemp -u)
# fixup /var/lib/nfs
- logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone"
- pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone
+ logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}"
+ pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone failed"
+ logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
- pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone
+ pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION}
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone failed"
+ logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
# see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace
@@ -522,9 +456,9 @@ setup_create_resources()
# ganesha-active crm_attribute
sleep 5
- pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone meta notify=true
+ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} notify=true
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed"
+ logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1
@@ -554,9 +488,16 @@ setup_create_resources()
eval tmp_ipaddr=\$${clean_name}
ipaddr=${tmp_ipaddr//_/.}
- pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
if [ $? -ne 0 ]; then
- logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s failed"
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
@@ -564,6 +505,16 @@ setup_create_resources()
logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
fi
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
+
shift
done
@@ -602,9 +553,9 @@ teardown_resources()
fi
while [[ ${1} ]]; do
- pcs resource delete ${1}-cluster_ip-1
+ pcs resource delete ${1}-group
if [ $? -ne 0 ]; then
- logger "warning: pcs resource delete ${1}-cluster_ip-1 failed"
+ logger "warning: pcs resource delete ${1}-group failed"
fi
shift
done
@@ -627,9 +578,16 @@ recreate_resources()
eval tmp_ipaddr=\$${clean_name}
ipaddr=${tmp_ipaddr//_/.}
- pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=15s
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
if [ $? -ne 0 ]; then
- logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} cidr_netmask=32 op monitor interval=10s failed"
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
@@ -637,6 +595,15 @@ recreate_resources()
logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
fi
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
shift
done
}
@@ -650,15 +617,32 @@ addnode_recreate_resources()
recreate_resources ${cibfile} ${HA_SERVERS}
- pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=15s
+ pcs -f ${cibfile} resource create ${add_node}-nfs_block ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=block ip=${add_vip} --group ${add_node}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s --group ${add_node}-group \
+ --after ${add_node}-nfs_block
if [ $? -ne 0 ]; then
- logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${add_vip} cidr_netmask=32 op monitor interval=10s failed"
+ logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
if [ $? -ne 0 ]; then
logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed"
fi
+ pcs -f ${cibfile} resource create ${add_node}-nfs_unblock ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=unblock ip=${add_vip} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${add_node}-group --after \
+ ${add_node}-cluster_ip-1 op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op monitor interval=10s \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_unblock failed"
+ fi
}
@@ -667,9 +651,9 @@ clear_resources()
local cibfile=${1}; shift
while [[ ${1} ]]; do
- pcs -f ${cibfile} resource delete ${1}-cluster_ip-1
+ pcs -f ${cibfile} resource delete ${1}-group
if [ $? -ne 0 ]; then
- logger "warning: pcs -f ${cibfile} resource delete ${1}-cluster_ip-1"
+ logger "warning: pcs -f ${cibfile} resource delete ${1}-group"
fi
shift
@@ -746,7 +730,7 @@ deletenode_update_haconfig()
local clean_name=${name//[-.]/_}
ha_servers=$(echo ${HA_SERVERS} | sed -e "s/ /,/")
- sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${clean_name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf
+ sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf
}
@@ -769,7 +753,9 @@ setup_state_volume()
dirname=${1}${dname}
fi
-
+ if [ ! -d ${mnt}/nfs-ganesha/tickle_dir ]; then
+ mkdir ${mnt}/nfs-ganesha/tickle_dir
+ fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}
fi
@@ -781,9 +767,11 @@ setup_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
@@ -793,15 +781,17 @@ setup_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
fi
for server in ${HA_SERVERS} ; do
- if [ ${server} != ${dirname} ]; then
+ if [[ ${server} != ${dirname} ]]; then
ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
fi
@@ -812,6 +802,21 @@ setup_state_volume()
}
+enable_pacemaker()
+{
+ while [[ ${1} ]]; do
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]; then
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} pacemaker enable"
+ fi
+ shift
+ done
+}
+
+
addnode_state_volume()
{
local newnode=${1}; shift
@@ -840,9 +845,11 @@ addnode_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
@@ -852,15 +859,18 @@ addnode_state_volume()
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
fi
if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
fi
if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
fi
for server in ${HA_SERVERS} ; do
+
if [[ ${server} != ${dirname} ]]; then
ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
@@ -893,7 +903,7 @@ delnode_state_volume()
rm -rf ${mnt}/nfs-ganesha/${dirname}
for server in ${HA_SERVERS} ; do
- if [[ "${server}" != "${dirname}" ]]; then
+ if [[ ${server} != ${dirname} ]]; then
rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
fi
@@ -909,8 +919,9 @@ status()
local index=1
local nodes
- # change tabs to spaces, strip leading spaces
- pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*//" > ${scratch}
+ # change tabs to spaces, strip leading spaces, including any
+ # new '*' at the beginning of a line introduced in pcs-0.10.x
+ pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*\*//" -e "s/^[ ]*//" > ${scratch}
nodes[0]=${1}; shift
@@ -925,7 +936,7 @@ status()
done
# print the nodes that are expected to be online
- grep -E "^Online:" ${scratch}
+ grep -E "Online:" ${scratch}
echo
@@ -934,12 +945,18 @@ status()
echo
- # check if the VIP RAs are on the expected nodes
+ # check if the VIP and port block/unblock RAs are on the expected nodes
for n in ${nodes[*]}; do
+ grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch}
result=$?
((healthy+=${result}))
+ grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
done
grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch}
@@ -956,6 +973,43 @@ status()
rm -f ${scratch}
}
+create_ganesha_conf_file()
+{
+ if [[ "$1" == "yes" ]];
+ then
+ if [ -e $GANESHA_CONF ];
+ then
+ rm -rf $GANESHA_CONF
+ fi
+ # The symlink /etc/ganesha/ganesha.conf need to be
+ # created using ganesha conf file mentioned in the
+ # shared storage. Every node will only have this
+ # link and actual file will stored in shared storage,
+ # so that ganesha conf editing of ganesha conf will
+ # be easy as well as it become more consistent.
+
+ ln -s $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ else
+ # Restoring previous file
+ rm -rf $GANESHA_CONF
+ cp $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $GANESHA_CONF
+ fi
+}
+
+set_quorum_policy()
+{
+ local quorum_policy="stop"
+ local num_servers=${1}
+
+ if [ ${num_servers} -lt 3 ]; then
+ quorum_policy="ignore"
+ fi
+ pcs property set no-quorum-policy=${quorum_policy}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed"
+ fi
+}
main()
{
@@ -965,6 +1019,18 @@ main()
usage
exit 0
fi
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --on
+ fi
+
+ local osid=""
+
+ osid=$(grep ^ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F ID=)
+ osid=$(grep ^VERSION_ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F VERSION_ID=)
+
HA_CONFDIR=${1%/}; shift
local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
local node=""
@@ -985,7 +1051,19 @@ main()
determine_servers "setup"
- if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then
+ # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
+ # default is pcs-0.10.x options but check for
+ # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
+ if [[ ! ${ID} =~ {rhel,centos} ]]; then
+ if [[ ${VERSION_ID} == 7.* ]]; then
+ PCS9OR10_PCS_CNAME_OPTION="--name"
+ PCS9OR10_PCS_CLONE_OPTION="--clone"
+ fi
+ fi
+
+ if [[ "${HA_NUM_SERVERS}X" != "1X" ]]; then
+
+ determine_service_manager
setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}"
@@ -995,7 +1073,7 @@ main()
setup_state_volume ${HA_SERVERS}
- setup_copy_config ${HA_SERVERS}
+ enable_pacemaker ${HA_SERVERS}
else
@@ -1025,8 +1103,6 @@ main()
logger "adding ${node} with ${vip} to ${HA_NAME}"
- copy_export_config ${node} ${HA_CONFDIR}
-
determine_service_manager
manage_service "start" ${node}
@@ -1045,7 +1121,7 @@ main()
# newly added node to the file so that the resources specfic
# to this node is correctly recreated in the future.
clean_node=${node//[-.]/_}
- echo "VIP_$clean_node=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf
+ echo "VIP_${node}=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf
NEW_NODES="$HA_CLUSTER_NODES,${node}"
@@ -1054,7 +1130,11 @@ $HA_CONFDIR/ganesha-ha.conf
addnode_state_volume ${node}
- setup_copy_config ${HA_SERVERS}
+ # addnode_create_resources() already appended ${node} to
+ # HA_SERVERS, so only need to increment HA_NUM_SERVERS
+ # and set quorum policy
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} + 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
;;
delete | --delete)
@@ -1073,13 +1153,14 @@ $HA_CONFDIR/ganesha-ha.conf
deletenode_update_haconfig ${node}
- setup_copy_config ${HA_SERVERS}
-
delnode_state_volume ${node}
determine_service_manager
manage_service "stop" ${node}
+
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} - 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
;;
status | --status)
@@ -1096,6 +1177,11 @@ $HA_CONFDIR/ganesha-ha.conf
refresh_config ${VOL} ${HA_CONFDIR} ${HA_SERVERS}
;;
+ setup-ganesha-conf-files | --setup-ganesha-conf-files)
+
+ create_ganesha_conf_file ${1}
+ ;;
+
*)
# setup and teardown are not intended to be used by a
# casual user
@@ -1104,7 +1190,10 @@ $HA_CONFDIR/ganesha-ha.conf
;;
esac
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --off
+ fi
}
main $*
-
diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py
index 5db5e56b480..77af014bab9 100755
--- a/extras/ganesha/scripts/generate-epoch.py
+++ b/extras/ganesha/scripts/generate-epoch.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
@@ -36,13 +36,13 @@ def epoch_uuid():
uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-",""))
- epoch_uuid = int(uuid_bin.encode('hex'), 32) & 0xFFFF0000
+ epoch_uuid = int(binascii.hexlify(uuid_bin), 32) & 0xFFFF0000
return epoch_uuid
# Construct epoch as follows -
# first 32-bit contains the now() time
# rest 32-bit value contains the local glusterd node uuid
epoch = (epoch_now() | epoch_uuid())
-print str(epoch)
+print((str(epoch)))
exit(0)
diff --git a/extras/geo-rep/Makefile.am b/extras/geo-rep/Makefile.am
index da3dd1f1d39..09eff308ac4 100644
--- a/extras/geo-rep/Makefile.am
+++ b/extras/geo-rep/Makefile.am
@@ -1,13 +1,14 @@
-scriptsdir = $(datadir)/glusterfs/scripts
+scriptsdir = $(libexecdir)/glusterfs/scripts
scripts_SCRIPTS = gsync-upgrade.sh generate-gfid-file.sh get-gfid.sh \
- slave-upgrade.sh schedule_georep.py
+ slave-upgrade.sh schedule_georep.py
scripts_PROGRAMS = gsync-sync-gfid
gsync_sync_gfid_CFLAGS = $(GF_CFLAGS) -Wall -I$(top_srcdir)/libglusterfs/src
gsync_sync_gfid_LDFLAGS = $(GF_LDFLAGS)
gsync_sync_gfid_LDADD = $(GF_LDADD) $(top_builddir)/libglusterfs/src/libglusterfs.la
gsync_sync_gfid_SOURCES = gsync-sync-gfid.c
-gsync_sync_gfid_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src
+gsync_sync_gfid_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
EXTRA_DIST = gsync-sync-gfid.c gsync-upgrade.sh generate-gfid-file.sh \
get-gfid.sh slave-upgrade.sh schedule_georep.py.in
diff --git a/extras/geo-rep/gsync-sync-gfid.c b/extras/geo-rep/gsync-sync-gfid.c
index e9b9e633402..47dca0413e9 100644
--- a/extras/geo-rep/gsync-sync-gfid.c
+++ b/extras/geo-rep/gsync-sync-gfid.c
@@ -7,103 +7,103 @@
#include <libgen.h>
#include <ctype.h>
#include <stdlib.h>
-#include "glusterfs.h"
-#include "syscall.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/syscall.h>
#ifndef UUID_CANONICAL_FORM_LEN
#define UUID_CANONICAL_FORM_LEN 36
#endif
#ifndef GF_FUSE_AUX_GFID_HEAL
-#define GF_FUSE_AUX_GFID_HEAL "glusterfs.gfid.heal"
+#define GF_FUSE_AUX_GFID_HEAL "glusterfs.gfid.heal"
#endif
-#define GLFS_LINE_MAX (PATH_MAX + (2 * UUID_CANONICAL_FORM_LEN))
+#define GLFS_LINE_MAX (PATH_MAX + (2 * UUID_CANONICAL_FORM_LEN))
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- char *file = NULL;
- char *tmp = NULL;
- char *tmp1 = NULL;
- char *parent_dir = NULL;
- char *gfid = NULL;
- char *bname = NULL;
- int ret = -1;
- int len = 0;
- FILE *fp = NULL;
- char line[GLFS_LINE_MAX] = {0,};
- char *path = NULL;
- void *blob = NULL;
- void *tmp_blob = NULL;
-
- if (argc != 2) {
- /* each line in the file has the following format
- * uuid-in-canonical-form path-relative-to-gluster-mount.
- * Both uuid and relative path are from master mount.
- */
- fprintf (stderr, "usage: %s <file-of-paths-to-be-synced>\n",
- argv[0]);
- goto out;
+ char *file = NULL;
+ char *tmp = NULL;
+ char *tmp1 = NULL;
+ char *parent_dir = NULL;
+ char *gfid = NULL;
+ char *bname = NULL;
+ int ret = -1;
+ int len = 0;
+ FILE *fp = NULL;
+ char line[GLFS_LINE_MAX] = {
+ 0,
+ };
+ char *path = NULL;
+ void *blob = NULL;
+ void *tmp_blob = NULL;
+
+ if (argc != 2) {
+ /* each line in the file has the following format
+ * uuid-in-canonical-form path-relative-to-gluster-mount.
+ * Both uuid and relative path are from master mount.
+ */
+ fprintf(stderr, "usage: %s <file-of-paths-to-be-synced>\n", argv[0]);
+ goto out;
+ }
+
+ file = argv[1];
+
+ fp = fopen(file, "r");
+ if (fp == NULL) {
+ fprintf(stderr, "cannot open %s for reading (%s)\n", file,
+ strerror(errno));
+ goto out;
+ }
+
+ while (fgets(line, GLFS_LINE_MAX, fp) != NULL) {
+ tmp = line;
+ path = gfid = line;
+
+ path += UUID_CANONICAL_FORM_LEN + 1;
+
+ while (isspace(*path))
+ path++;
+
+ len = strlen(line);
+ if ((len < GLFS_LINE_MAX) && (line[len - 1] == '\n'))
+ line[len - 1] = '\0';
+
+ line[UUID_CANONICAL_FORM_LEN] = '\0';
+
+ tmp = strdup(path);
+ tmp1 = strdup(path);
+ parent_dir = dirname(tmp);
+ bname = basename(tmp1);
+
+ /* gfid + '\0' + bname + '\0' */
+ len = UUID_CANONICAL_FORM_LEN + 1 + strlen(bname) + 1;
+
+ blob = malloc(len);
+
+ memcpy(blob, gfid, UUID_CANONICAL_FORM_LEN);
+
+ tmp_blob = blob + UUID_CANONICAL_FORM_LEN + 1;
+
+ memcpy(tmp_blob, bname, strlen(bname));
+
+ ret = sys_lsetxattr(parent_dir, GF_FUSE_AUX_GFID_HEAL, blob, len, 0);
+ if (ret < 0) {
+ fprintf(stderr, "setxattr on %s/%s failed (%s)\n", parent_dir,
+ bname, strerror(errno));
}
+ memset(line, 0, GLFS_LINE_MAX);
- file = argv[1];
+ free(blob);
+ free(tmp);
+ free(tmp1);
+ blob = NULL;
+ }
- fp = fopen (file, "r");
- if (fp == NULL) {
- fprintf (stderr, "cannot open %s for reading (%s)\n",
- file, strerror (errno));
- goto out;
- }
-
- while (fgets (line, GLFS_LINE_MAX, fp) != NULL) {
- tmp = line;
- path = gfid = line;
-
- path += UUID_CANONICAL_FORM_LEN + 1;
-
- while(isspace (*path))
- path++;
-
- if ((strlen (line) < GLFS_LINE_MAX) &&
- (line[strlen (line) - 1] == '\n'))
- line[strlen (line) - 1] = '\0';
-
- line[UUID_CANONICAL_FORM_LEN] = '\0';
-
- tmp = strdup (path);
- tmp1 = strdup (path);
- parent_dir = dirname (tmp);
- bname = basename (tmp1);
-
- /* gfid + '\0' + bname + '\0' */
- len = UUID_CANONICAL_FORM_LEN + 1 + strlen (bname) + 1;
-
- blob = calloc (1, len);
-
- memcpy (blob, gfid, UUID_CANONICAL_FORM_LEN);
-
- tmp_blob = blob + UUID_CANONICAL_FORM_LEN + 1;
-
- memcpy (tmp_blob, bname, strlen (bname));
-
- ret = sys_lsetxattr (parent_dir, GF_FUSE_AUX_GFID_HEAL,
- blob, len, 0);
- if (ret < 0) {
- fprintf (stderr, "setxattr on %s/%s failed (%s)\n",
- parent_dir, bname, strerror (errno));
- }
- memset (line, 0, GLFS_LINE_MAX);
-
- free (blob);
- free (tmp); free (tmp1);
- blob = NULL;
- }
-
- ret = 0;
+ ret = 0;
out:
- if (fp)
- fclose(fp);
- return ret;
+ if (fp)
+ fclose(fp);
+ return ret;
}
-
diff --git a/extras/geo-rep/schedule_georep.py.in b/extras/geo-rep/schedule_georep.py.in
index c931111b365..48b2b507060 100644
--- a/extras/geo-rep/schedule_georep.py.in
+++ b/extras/geo-rep/schedule_georep.py.in
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python3
"""
Schedule Geo-replication
------------------------
@@ -43,7 +43,7 @@ SESSION_MOUNT_LOG_FILE = ("/var/log/glusterfs/geo-replication"
"/schedule_georep.mount.log")
USE_CLI_COLOR = True
-
+mnt_list = []
class GlusterBadXmlFormat(Exception):
"""
@@ -83,13 +83,15 @@ def execute(cmd, success_msg="", failure_msg="", exitcode=-1):
On success it can print message in stdout if specified.
On failure, exits after writing to stderr.
"""
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
if p.returncode == 0:
if success_msg:
output_ok(success_msg)
return out
else:
+ if exitcode == 0:
+ return
err_msg = err if err else out
output_notok(failure_msg, err=err_msg, exitcode=exitcode)
@@ -112,12 +114,12 @@ def cleanup(hostname, volname, mnt):
"""
Unmount the Volume and Remove the temporary directory
"""
- execute(["umount", mnt],
+ execute(["umount", "-l", mnt],
failure_msg="Unable to Unmount Gluster Volume "
"{0}:{1}(Mounted at {2})".format(hostname, volname, mnt))
execute(["rmdir", mnt],
failure_msg="Unable to Remove temp directory "
- "{0}".format(mnt))
+ "{0}".format(mnt), exitcode=0)
@contextmanager
@@ -130,6 +132,7 @@ def glustermount(hostname, volname):
Automatically unmounts it in case of Exceptions/out of context
"""
mnt = tempfile.mkdtemp(prefix="georepsetup_")
+ mnt_list.append(mnt)
execute(["@SBIN_DIR@/glusterfs",
"--volfile-server", hostname,
"--volfile-id", volname,
@@ -297,6 +300,7 @@ def get_summary(mastervol, slave_url):
status_data = get(mastervol, slave_url)
for session in status_data:
+ session_name = ""
summary = {
"active": 0,
"passive": 0,
@@ -339,7 +343,8 @@ def get_summary(mastervol, slave_url):
if summary["faulty"] == 0 and summary["offline"] == 0:
summary["ok"] = True
- out.append([session_name, summary, faulty_rows, down_rows])
+ if session_name != "":
+ out.append([session_name, summary, faulty_rows, down_rows])
return out
@@ -347,7 +352,7 @@ def get_summary(mastervol, slave_url):
def touch_mount_root(mastervol):
# Create a Mount and Touch the Mount point root,
# Hack to make sure some event available after
- # setting Checkpoint. Without this their is a chance of
+ # setting Checkpoint. Without this there is a chance of
# Checkpoint never completes.
with glustermount("localhost", mastervol) as mnt:
execute(["touch", mnt])
@@ -376,14 +381,14 @@ def main(args):
output_ok("Started Geo-replication and watching Status for "
"Checkpoint completion")
- touch_mount_root(args.mastervol)
-
start_time = int(time.time())
duration = 0
# Sleep till Geo-rep initializes
time.sleep(60)
+ touch_mount_root(args.mastervol)
+
slave_url = "{0}::{1}".format(args.slave, args.slavevol)
# Loop to Check the Geo-replication Status and Checkpoint
@@ -397,41 +402,39 @@ def main(args):
# or any other error. Gluster cmd still produces XML output
# with different message
output_warning("Unable to get Geo-replication Status")
- time.sleep(1)
- continue
-
- session_name, summary, faulty_rows, down_rows = session_summary[0]
- chkpt_status = "COMPLETE" if summary["checkpoints_ok"] else \
- "NOT COMPLETE"
- ok_status = "OK" if summary["ok"] else "NOT OK"
-
- if summary["ok"]:
- output_ok("All Checkpoints {1}, "
- "All status {2} (Turns {0:>3})".format(
- turns, chkpt_status, ok_status))
- else:
- output_warning("All Checkpoints {1}, "
- "All status {2} (Turns {0:>3})".format(
- turns, chkpt_status, ok_status))
-
- output_warning("Geo-rep workers Faulty/Offline, "
- "Faulty: {0} Offline: {1}".format(
- repr(faulty_rows),
- repr(down_rows)))
-
- if summary["checkpoints_ok"]:
- output_ok("Stopping Geo-replication session now")
- cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication",
- args.mastervol,
- "%s::%s" % (args.slave, args.slavevol), "stop"]
- execute(cmd)
- break
else:
- # If Checkpoint is not complete after a iteration means brick
- # was down and came online now. SETATTR on mount is not
- # recorded, So again issue touch on mount root So that
- # Stime will increase and Checkpoint will complete.
- touch_mount_root(args.mastervol)
+ session_name, summary, faulty_rows, down_rows = session_summary[0]
+ chkpt_status = "COMPLETE" if summary["checkpoints_ok"] else \
+ "NOT COMPLETE"
+ ok_status = "OK" if summary["ok"] else "NOT OK"
+
+ if summary["ok"]:
+ output_ok("All Checkpoints {1}, "
+ "All status {2} (Turns {0:>3})".format(
+ turns, chkpt_status, ok_status))
+ else:
+ output_warning("All Checkpoints {1}, "
+ "All status {2} (Turns {0:>3})".format(
+ turns, chkpt_status, ok_status))
+
+ output_warning("Geo-rep workers Faulty/Offline, "
+ "Faulty: {0} Offline: {1}".format(
+ repr(faulty_rows),
+ repr(down_rows)))
+
+ if summary["checkpoints_ok"]:
+ output_ok("Stopping Geo-replication session now")
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication",
+ args.mastervol,
+ "%s::%s" % (args.slave, args.slavevol), "stop"]
+ execute(cmd)
+ break
+ else:
+ # If Checkpoint is not complete after a iteration means brick
+ # was down and came online now. SETATTR on mount is not
+ # recorded, So again issue touch on mount root So that
+ # Stime will increase and Checkpoint will complete.
+ touch_mount_root(args.mastervol)
# Increment the turns and Sleep for 10 sec
turns += 1
@@ -446,13 +449,18 @@ def main(args):
time.sleep(args.interval)
+ for mnt in mnt_list:
+ execute(["rmdir", mnt],
+ failure_msg="Unable to Remove temp directory "
+ "{0}".format(mnt), exitcode=0)
+
if __name__ == "__main__":
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("mastervol", help="Master Volume Name")
parser.add_argument("slave",
- help="SLAVEHOST or root@SLAVEHOST "
- "or user@SLAVEHOST",
+ help="Slave hostname "
+ "(<username>@SLAVEHOST or SLAVEHOST)",
metavar="SLAVE")
parser.add_argument("slavevol", help="Slave Volume Name")
parser.add_argument("--interval", help="Interval in Seconds. "
@@ -462,12 +470,23 @@ if __name__ == "__main__":
"stop Geo-replication if Checkpoint is not complete "
"in the specified timeout time", type=int,
default=0)
- parser.add_argument("--no-color", help="Use Color in CLI output",
+ parser.add_argument("--no-color", help="Don't use Color in CLI output",
action="store_true")
args = parser.parse_args()
if args.no_color:
USE_CLI_COLOR = False
try:
+ # Check for session existence
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication",
+ args.mastervol, "%s::%s" % (args.slave, args.slavevol), "status"]
+ execute(cmd)
main(args)
except KeyboardInterrupt:
+ for mnt in mnt_list:
+ execute(["umount", "-l", mnt],
+ failure_msg="Unable to Unmount Gluster Volume "
+ "Mounted at {0}".format(mnt), exitcode=0)
+ execute(["rmdir", mnt],
+ failure_msg="Unable to Remove temp directory "
+ "{0}".format(mnt), exitcode=0)
output_notok("Exiting...")
diff --git a/extras/git-branch-diff.py b/extras/git-branch-diff.py
new file mode 100755
index 00000000000..382513e069e
--- /dev/null
+++ b/extras/git-branch-diff.py
@@ -0,0 +1,285 @@
+#!/bin/python2
+
+"""
+ Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+"""
+
+"""
+ ABOUT:
+ This script helps in visualizing backported and missed commits between two
+ different branches, tags or commit ranges. In the list of missed commits,
+ it will help you identify patches which are posted for reviews on gerrit server.
+
+ USAGE:
+ $ ./extras/git-branch-diff.py --help
+ usage: git-branch-diff.py [-h] [-s SOURCE] -t TARGET [-a AUTHOR] [-p PATH]
+ [-o OPTIONS]
+
+ git wrapper to diff local or remote branches/tags/commit-ranges
+
+ optional arguments:
+ -h, --help show this help message and exit
+ -s SOURCE, --source SOURCE
+ source pattern, it could be a branch, tag or a commit
+ range
+ -t TARGET, --target TARGET
+ target pattern, it could be a branch, tag or a commit
+ range
+ -a AUTHOR, --author AUTHOR
+ default: git config name/email, to provide multiple
+ specify comma separated values
+ -p PATH, --path PATH show source and target diff w.r.t given path, to
+ provide multiple specify space in between them
+ -o OPTIONS, --options OPTIONS
+ add other git options such as --after=<>, --before=<>
+ etc. experts use;
+
+ SAMPLE EXECUTIONS:
+ $ ./extras/git-branch-diff.py -t origin/release-3.8
+
+ $ ./extras/git-branch-diff.py -s local_branch -t origin/release-3.7
+
+ $ ./extras/git-branch-diff.py -s 4517bf8..e66add8 -t origin/release-3.7
+ $ ./extras/git-branch-diff.py -s HEAD..c4efd39 -t origin/release-3.7
+
+ $ ./extras/git-branch-diff.py -t v3.7.11 --author="author@redhat.com"
+ $ ./extras/git-branch-diff.py -t v3.7.11 --author="authorX, authorY, authorZ"
+
+ $ ./extras/git-branch-diff.py -t origin/release-3.8 --path="xlators/"
+ $ ./extras/git-branch-diff.py -t origin/release-3.8 --path="./xlators ./rpc"
+
+ $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="*"
+ $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="All"
+ $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="Null"
+
+ $ ./extras/git-branch-diff.py -t v3.7.11 --options="--after=2015-03-01 \
+ --before=2016-01-30"
+
+ DECLARATION:
+ While backporting commit to another branch only subject of the patch may
+ remain unchanged, all others such as commit message, commit Id, change Id,
+ bug Id, may be changed. This script works by taking commit subject as the
+ key value for comparing two git branches, which can be local or remote.
+
+ Note: This script may ignore commits which have altered their commit subjects
+ while backporting patches. Also this script doesn't have any intelligence to
+ detect squashed commits.
+
+ AUTHOR:
+ Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
+"""
+
+from __future__ import print_function
+import os
+import sys
+import argparse
+import commands
+import subprocess
+import requests
+
+class GitBranchDiff:
+ def __init__ (self):
+ " color symbols"
+ self.tick = u'\033[1;32m[ \u2714 ]\033[0m'
+ self.cross = u'\033[1;31m[ \u2716 ]\033[0m'
+ self.green_set = u'\033[1;34m'
+ self.yello_set = u'\033[4;33m'
+ self.color_unset = '\033[0m'
+
+ self.parse_cmd_args()
+
+ " replace default values with actual values from command args"
+ self.g_author = self.argsdict['author']
+ self.s_pattern = self.argsdict['source']
+ self.t_pattern = self.argsdict['target']
+ self.r_path = self.argsdict['path']
+ self.options = ' '.join(self.argsdict['options'])
+
+ self.gerrit_server = "http://review.gluster.org"
+
+ def check_dir_exist (self, os_path):
+ " checks whether given path exist"
+ path_list = os_path.split()
+ for path in path_list:
+ if not os.path.exists(path):
+ raise argparse.ArgumentTypeError("'%s' path %s is not valid"
+ %(os_path, path))
+ return os_path
+
+ def check_pattern_exist (self):
+ " defend to check given branch[s] exit"
+ status_sbr, op = commands.getstatusoutput('git log ' +
+ self.s_pattern)
+ status_tbr, op = commands.getstatusoutput('git log ' +
+ self.t_pattern)
+ if status_sbr != 0:
+ print("Error: --source=" + self.s_pattern + " doesn't exit\n")
+ self.parser.print_help()
+ exit(status_sbr)
+ elif status_tbr != 0:
+ print("Error: --target=" + self.t_pattern + " doesn't exit\n")
+ self.parser.print_help()
+ exit(status_tbr)
+
+ def check_author_exist (self):
+ " defend to check given author exist, format in case of multiple"
+ contrib_list = ['', '*', 'all', 'All', 'ALL', 'null', 'Null', 'NULL']
+ if self.g_author in contrib_list:
+ self.g_author = ""
+ else:
+ ide_list = self.g_author.split(',')
+ for ide in ide_list:
+ cmd4 = 'git log ' + self.s_pattern + ' --author=' + ide
+ c_list = subprocess.check_output(cmd4, shell = True)
+ if len(c_list) is 0:
+ print("Error: --author=%s doesn't exit" %self.g_author)
+ print("see '%s --help'" %__file__)
+ exit(1)
+ if len(ide_list) > 1:
+ self.g_author = "\|".join(ide_list)
+
+ def connected_to_gerrit (self):
+ "check if gerrit server is reachable"
+ try:
+ r = requests.get(self.gerrit_server, timeout=3)
+ return True
+ except requests.Timeout as err:
+ " request timed out"
+ print("Warning: failed to get list of open review commits on " \
+ "gerrit.\n" \
+ "hint: Request timed out! gerrit server could possibly " \
+ "slow ...\n")
+ return False
+ except requests.RequestException as err:
+ " handle other errors"
+ print("Warning: failed to get list of open review commits on " \
+ "gerrit\n" \
+ "hint: check with internet connection ...\n")
+ return False
+
+ def parse_cmd_args (self):
+ " command line parser"
+ author = subprocess.check_output('git config user.email',
+ shell = True).rstrip('\n')
+ source = "remotes/origin/master"
+ options = [' --pretty=format:"%h %s" ']
+ path = subprocess.check_output('git rev-parse --show-toplevel',
+ shell = True).rstrip('\n')
+ self.parser = argparse.ArgumentParser(description = 'git wrapper to '
+ 'diff local or remote branches/'
+ 'tags/commit-ranges')
+ self.parser.add_argument('-s',
+ '--source',
+ help = 'source pattern, it could be a branch,'
+ ' tag or a commit range',
+ default = source,
+ dest = 'source')
+ self.parser.add_argument('-t',
+ '--target',
+ help = 'target pattern, it could be a branch,'
+ ' tag or a commit range',
+ required = True,
+ dest = 'target')
+ self.parser.add_argument('-a',
+ '--author',
+ help = 'default: git config name/email, '
+ 'to provide multiple specify comma'
+ ' separated values',
+ default = author,
+ dest = 'author')
+ self.parser.add_argument('-p',
+ '--path',
+ type = self.check_dir_exist,
+ help = 'show source and target diff w.r.t '
+ 'given path, to provide multiple '
+ 'specify space in between them',
+ default = path,
+ dest = 'path')
+ self.parser.add_argument('-o',
+ '--options',
+ help = 'add other git options such as '
+ '--after=<>, --before=<> etc. '
+ 'experts use;',
+ default = options,
+ dest = 'options',
+ action='append')
+ self.argsdict = vars(self.parser.parse_args())
+
+ def print_output (self):
+ " display the result list"
+ print("\n------------------------------------------------------------\n")
+ print(self.tick + " Successfully Backported changes:")
+ print(' {' + 'from: ' + self.s_pattern + \
+ ' to: '+ self.t_pattern + '}\n')
+ for key, value in self.s_dict.items():
+ if value in self.t_dict.itervalues():
+ print("[%s%s%s] %s" %(self.yello_set,
+ key,
+ self.color_unset,
+ value))
+ print("\n------------------------------------------------------------\n")
+ print(self.cross + " Missing patches in " + self.t_pattern + ':\n')
+ if self.connected_to_gerrit():
+ cmd3 = "git review -r origin -l"
+ review_list = subprocess.check_output(cmd3, shell = True).split('\n')
+ else:
+ review_list = []
+
+ for key, value in self.s_dict.items():
+ if value not in self.t_dict.itervalues():
+ if any(value in s for s in review_list):
+ print("[%s%s%s] %s %s(under review)%s" %(self.yello_set,
+ key,
+ self.color_unset,
+ value,
+ self.green_set,
+ self.color_unset))
+ else:
+ print("[%s%s%s] %s" %(self.yello_set,
+ key,
+ self.color_unset,
+ value))
+ print("\n------------------------------------------------------------\n")
+
+ def main (self):
+ self.check_pattern_exist()
+ self.check_author_exist()
+
+ " actual git commands"
+ cmd1 = 'git log' + self.options + ' ' + self.s_pattern + \
+ ' --author=\'' + self.g_author + '\' ' + self.r_path
+
+ " could be backported by anybody so --author doesn't apply here"
+ cmd2 = 'git log' + self.options + ' ' + self.t_pattern + \
+ ' ' + self.r_path
+
+ s_list = subprocess.check_output(cmd1, shell = True).split('\n')
+ t_list = subprocess.check_output(cmd2, shell = True)
+
+ if len(t_list) is 0:
+ print("No commits in the target: %s" %self.t_pattern)
+ print("see '%s --help'" %__file__)
+ exit()
+ else:
+ t_list = t_list.split('\n')
+
+ self.s_dict = dict()
+ self.t_dict = dict()
+
+ for item in s_list:
+ self.s_dict.update(dict([item.split(' ', 1)]))
+ for item in t_list:
+ self.t_dict.update(dict([item.split(' ', 1)]))
+
+ self.print_output()
+
+
+if __name__ == '__main__':
+ run = GitBranchDiff()
+ run.main()
diff --git a/extras/glusterd.vol.in b/extras/glusterd.vol.in
index 957b277801c..5d7bad0e4c8 100644
--- a/extras/glusterd.vol.in
+++ b/extras/glusterd.vol.in
@@ -1,12 +1,15 @@
volume management
type mgmt/glusterd
option working-directory @GLUSTERD_WORKDIR@
- option transport-type socket,rdma
+ option transport-type socket
option transport.socket.keepalive-time 10
option transport.socket.keepalive-interval 2
option transport.socket.read-fail-log off
+ option transport.socket.listen-port 24007
option ping-timeout 0
option event-threads 1
+# option lock-timer 180
# option transport.address-family inet6
# option base-port 49152
+ option max-port 60999
end-volume
diff --git a/extras/glusterfs-georep-logrotate b/extras/glusterfs-georep-logrotate
index 6fdb8c65aaf..3e7ecf373a1 100644
--- a/extras/glusterfs-georep-logrotate
+++ b/extras/glusterfs-georep-logrotate
@@ -1,6 +1,12 @@
/var/log/glusterfs/geo-replication/*/*.log {
sharedscripts
- rotate 52
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
missingok
compress
delaycompress
@@ -15,7 +21,13 @@
/var/log/glusterfs/geo-replication-slaves/*.log {
sharedscripts
- rotate 52
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
missingok
compress
delaycompress
@@ -30,7 +42,13 @@
/var/log/glusterfs/geo-replication-slaves/*/*.log {
sharedscripts
- rotate 52
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
missingok
compress
delaycompress
diff --git a/extras/glusterfs-georep-upgrade.py b/extras/glusterfs-georep-upgrade.py
new file mode 100755
index 00000000000..634576058d6
--- /dev/null
+++ b/extras/glusterfs-georep-upgrade.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python3
+"""
+
+Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+This file is part of GlusterFS.
+
+This file is licensed to you under your choice of the GNU Lesser
+General Public License, version 3 or any later version (LGPLv3 or
+later), or the GNU General Public License, version 2 (GPLv2), in all
+cases as published by the Free Software Foundation.
+
+"""
+
+import argparse
+import errno
+import os, sys
+import shutil
+from datetime import datetime
+
+def find_htime_path(brick_path):
+ dirs = []
+ htime_dir = os.path.join(brick_path, '.glusterfs/changelogs/htime')
+ for file in os.listdir(htime_dir):
+ if os.path.isfile(os.path.join(htime_dir,file)) and file.startswith("HTIME"):
+ dirs.append(os.path.join(htime_dir, file))
+ else:
+ raise FileNotFoundError("%s unavailable" % (os.path.join(htime_dir, file)))
+ return dirs
+
+def modify_htime_file(brick_path):
+ htime_file_path_list = find_htime_path(brick_path)
+
+ for htime_file_path in htime_file_path_list:
+ changelog_path = os.path.join(brick_path, '.glusterfs/changelogs')
+ temp_htime_path = os.path.join(changelog_path, 'htime/temp_htime_file')
+ with open(htime_file_path, 'r') as htime_file, open(temp_htime_path, 'w') as temp_htime_file:
+ #extract epoch times from htime file
+ paths = htime_file.read().split("\x00")
+
+ for pth in paths:
+ epoch_no = pth.split(".")[-1]
+ changelog = os.path.basename(pth)
+ #convert epoch time to year, month and day
+ if epoch_no != '':
+ date=(datetime.fromtimestamp(float(int(epoch_no))).strftime("%Y/%m/%d"))
+ #update paths in temp htime file
+ temp_htime_file.write("%s/%s/%s\x00" % (changelog_path, date, changelog))
+ #create directory in the format year/month/days
+ path = os.path.join(changelog_path, date)
+
+ if changelog.startswith("CHANGELOG."):
+ try:
+ os.makedirs(path, mode = 0o600);
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ #copy existing changelogs to new directory structure, delete old changelog files
+ shutil.copyfile(pth, os.path.join(path, changelog))
+ os.remove(pth)
+
+ #rename temp_htime_file with htime file
+ os.rename(htime_file_path, os.path.join('%s.bak'%htime_file_path))
+ os.rename(temp_htime_path, htime_file_path)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('brick_path', help="This upgrade script, which is to be run on\
+ server side, takes brick path as the argument, \
+ updates paths inside htime file and alters the directory structure \
+ above the changelog files inorder to support new optimised format \
+ of the directory structure as per \
+ https://review.gluster.org/#/c/glusterfs/+/23733/")
+ args = parser.parse_args()
+ modify_htime_file(args.brick_path)
diff --git a/extras/glusterfs-logrotate b/extras/glusterfs-logrotate
index 575c0eee771..6ba6ef18e9f 100644
--- a/extras/glusterfs-logrotate
+++ b/extras/glusterfs-logrotate
@@ -2,7 +2,12 @@
/var/log/glusterfs/*.log {
sharedscripts
weekly
- rotate 52
+ maxsize 10M
+ minsize 100k
+
+# 6 months of logs are good enough
+ rotate 26
+
missingok
compress
delaycompress
@@ -17,7 +22,12 @@
/var/log/glusterfs/bricks/*.log {
sharedscripts
weekly
- rotate 52
+ maxsize 10M
+ minsize 100k
+
+# 6 months of logs are good enough
+ rotate 26
+
missingok
compress
delaycompress
@@ -35,3 +45,24 @@
compress
delaycompress
}
+
+# Rotate snapd log
+/var/log/glusterfs/snaps/*/*.log {
+ sharedscripts
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
+ missingok
+ compress
+ delaycompress
+ notifempty
+ postrotate
+ for pid in `ps -aef | grep glusterfs | egrep "snapd" | awk '{print $2}'`; do
+ /usr/bin/kill -HUP $pid > /dev/null 2>&1 || true
+ done
+ endscript
+}
diff --git a/extras/glusterfs-mode.el b/extras/glusterfs-mode.el
index d4f6dc568b6..a9ed2335ab3 100644
--- a/extras/glusterfs-mode.el
+++ b/extras/glusterfs-mode.el
@@ -1,112 +1,113 @@
-;;; Copyright (C) 2007-2011 Gluster Inc. <http://www.gluster.com>
-;;;
-;;; This program is free software; you can redistribute it and/or modify
-;;; it under the terms of the GNU General Public License as published by
-;;; the Free Software Foundation; either version 2 of the License, or
-;;; (at your option) any later version.
-;;;
-;;; This program is distributed in the hope that it will be useful,
-;;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-;;; GNU General Public License for more details.
-;;;
-;;; You should have received a copy of the GNU General Public License
-;;; along with this program; if not, write to the Free Software
-;;; Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-;;;
-
-(defvar glusterfs-mode-hook nil)
-
-;; (defvar glusterfs-mode-map
-;; (let ((glusterfs-mode-map (make-keymap)))
-;; (define-key glusterfs-mode-map "\C-j" 'newline-and-indent)
-;; glusterfs-mode-map)
-;; "Keymap for WPDL major mode")
-
-(add-to-list 'auto-mode-alist '("\\.vol\\'" . glusterfs-mode))
-
-(defconst glusterfs-font-lock-keywords-1
- (list
- ; "cluster/{unify,afr,stripe}"
- ; "performance/{io-cache,io-threads,write-behind,read-ahead,stat-prefetch}"
- ; "protocol/{client/server}"
- ; "features/{trash,posix-locks,fixed-id,filter}"
- ; "stroage/posix"
- ; "encryption/rot-13"
- ; "debug/trace"
- '("\\<\\(cluster/\\(unify\\|afr\\|replicate\\|stripe\\|ha\\|dht\\|distribute\\)\\|\\performance/\\(io-\\(cache\\|threads\\)\\|write-behind\\|read-ahead\\|symlink-cache\\)\\|protocol/\\(server\\|client\\)\\|features/\\(trash\\|posix-locks\\|locks\\|path-converter\\|filter\\)\\|storage/\\(posix\\|bdb\\)\\|encryption/rot-13\\|debug/trace\\)\\>" . font-lock-keyword-face))
-"Additional Keywords to highlight in GlusterFS mode.")
-
-(defconst glusterfs-font-lock-keywords-2
- (append glusterfs-font-lock-keywords-1
- (list
- ; "replicate" "namespace" "scheduler" "remote-subvolume" "remote-host"
- ; "auth.addr" "block-size" "remote-port" "listen-port" "transport-type"
- ; "limits.min-free-disk" "directory"
- ; TODO: add all the keys here.
- '("\\<\\(inode-lru-limit\\|replicate\\|namespace\\|scheduler\\|username\\|password\\|allow\\|reject\\|block-size\\|listen-port\\|transport-type\\|transport-timeout\\|directory\\|page-size\\|page-count\\|aggregate-size\\|non-blocking-io\\|client-volume-filename\\|bind-address\\|self-heal\\|read-only-subvolumes\\|read-subvolume\\|thread-count\\|cache-size\\|window-size\\|force-revalidate-timeout\\|priority\\|include\\|exclude\\|remote-\\(host\\|subvolume\\|port\\)\\|auth.\\(addr\\|login\\)\\|limits.\\(min-disk-free\\|transaction-size\\|ib-verbs-\\(work-request-\\(send-\\|recv-\\(count\\|size\\)\\)\\|port\\|mtu\\|device-name\\)\\)\\)\ \\>" . font-lock-constant-face)))
- "option keys in GlusterFS mode.")
-
-(defconst glusterfs-font-lock-keywords-3
- (append glusterfs-font-lock-keywords-2
- (list
- ; "option" "volume" "end-volume" "subvolumes" "type"
- '("\\<\\(option\ \\|volume\ \\|subvolumes\ \\|type\ \\|end-volume\\)\\>" . font-lock-builtin-face)))
- ;'((regexp-opt (" option " "^volume " "^end-volume" "subvolumes " " type ") t) . font-lock-builtin-face))
- "Minimal highlighting expressions for GlusterFS mode.")
-
-
-(defvar glusterfs-font-lock-keywords glusterfs-font-lock-keywords-3
- "Default highlighting expressions for GlusterFS mode.")
-
-(defvar glusterfs-mode-syntax-table
- (let ((glusterfs-mode-syntax-table (make-syntax-table)))
- (modify-syntax-entry ?\# "<" glusterfs-mode-syntax-table)
- (modify-syntax-entry ?* ". 23" glusterfs-mode-syntax-table)
- (modify-syntax-entry ?\n ">#" glusterfs-mode-syntax-table)
- glusterfs-mode-syntax-table)
- "Syntax table for glusterfs-mode")
-
-;; TODO: add an indentation table
-
-(defun glusterfs-indent-line ()
- "Indent current line as GlusterFS code"
- (interactive)
- (beginning-of-line)
- (if (bobp)
- (indent-line-to 0) ; First line is always non-indented
- (let ((not-indented t) cur-indent)
- (if (looking-at "^[ \t]*volume\ ")
- (progn
- (save-excursion
- (forward-line -1)
- (setq not-indented nil)
- (setq cur-indent 0))))
- (if (looking-at "^[ \t]*end-volume")
- (progn
- (save-excursion
- (forward-line -1)
- (setq cur-indent 0))
- (if (< cur-indent 0) ; We can't indent past the left margin
- (setq cur-indent 0)))
- (save-excursion
- (while not-indented ; Iterate backwards until we find an indentation hint
- (progn
- (setq cur-indent 2) ; Do the actual indenting
- (setq not-indented nil)))))
- (if cur-indent
- (indent-line-to cur-indent)
- (indent-line-to 0)))))
-
-(defun glusterfs-mode ()
- (interactive)
- (kill-all-local-variables)
- ;; (use-local-map glusterfs-mode-map)
- (set-syntax-table glusterfs-mode-syntax-table)
- (set (make-local-variable 'indent-line-function) 'glusterfs-indent-line)
- (set (make-local-variable 'font-lock-defaults) '(glusterfs-font-lock-keywords))
- (setq major-mode 'glusterfs-mode)
- (setq mode-name "GlusterFS")
- (run-hooks 'glusterfs-mode-hook))
-
-(provide 'glusterfs-mode)
+;;; Copyright (C) 2007-2017 Red Hat, Inc. <http://www.redhat.com>
+;;; Copyright (C) 2007-2011 Gluster Inc. <http://www.gluster.com>
+;;;
+;;; This program is free software; you can redistribute it and/or
+;;; modify it under the terms of the GNU General Public License
+;;; as published by the Free Software Foundation; either version 2
+;;; of the License, or (at your option) any later version.
+;;;
+;;; This program is distributed in the hope that it will be useful,
+;;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;;; GNU General Public License for more details.
+;;;
+;;; You should have received a copy of the GNU General Public License
+;;; along with this program; if not, write to the Free Software
+;;; Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+;;;
+
+(defvar glusterfs-mode-hook nil)
+
+;; (defvar glusterfs-mode-map
+;; (let ((glusterfs-mode-map (make-keymap)))
+;; (define-key glusterfs-mode-map "\C-j" 'newline-and-indent)
+;; glusterfs-mode-map)
+;; "Keymap for WPDL major mode")
+
+(add-to-list 'auto-mode-alist '("\\.vol\\'" . glusterfs-mode))
+
+(defconst glusterfs-font-lock-keywords-1
+ (list
+ ; "cluster/{unify,afr,stripe}"
+ ; "performance/{io-cache,io-threads,write-behind,read-ahead,stat-prefetch}"
+ ; "protocol/{client/server}"
+ ; "features/{trash,posix-locks,fixed-id,filter}"
+ ; "storage/posix"
+ ; "encryption/rot-13"
+ ; "debug/trace"
+ '("\\<\\(cluster/\\(unify\\|afr\\|replicate\\|stripe\\|ha\\|dht\\|distribute\\)\\|\\performance/\\(io-\\(cache\\|threads\\)\\|write-behind\\|read-ahead\\|symlink-cache\\)\\|protocol/\\(server\\|client\\)\\|features/\\(trash\\|posix-locks\\|locks\\|path-converter\\|filter\\)\\|storage/\\(posix\\|bdb\\)\\|encryption/rot-13\\|debug/trace\\)\\>" . font-lock-keyword-face))
+"Additional Keywords to highlight in GlusterFS mode.")
+
+(defconst glusterfs-font-lock-keywords-2
+ (append glusterfs-font-lock-keywords-1
+ (list
+ ; "replicate" "namespace" "scheduler" "remote-subvolume" "remote-host"
+ ; "auth.addr" "block-size" "remote-port" "listen-port" "transport-type"
+ ; "limits.min-free-disk" "directory"
+ ; TODO: add all the keys here.
+ '("\\<\\(inode-lru-limit\\|replicate\\|namespace\\|scheduler\\|username\\|password\\|allow\\|reject\\|block-size\\|listen-port\\|transport-type\\|transport-timeout\\|directory\\|page-size\\|page-count\\|aggregate-size\\|non-blocking-io\\|client-volume-filename\\|bind-address\\|self-heal\\|read-only-subvolumes\\|read-subvolume\\|thread-count\\|cache-size\\|window-size\\|force-revalidate-timeout\\|priority\\|include\\|exclude\\|remote-\\(host\\|subvolume\\|port\\)\\|auth.\\(addr\\|login\\)\\|limits.\\(min-disk-free\\|transaction-size\\|ib-verbs-\\(work-request-\\(send-\\|recv-\\(count\\|size\\)\\)\\|port\\|mtu\\|device-name\\)\\)\\)\ \\>" . font-lock-constant-face)))
+ "option keys in GlusterFS mode.")
+
+(defconst glusterfs-font-lock-keywords-3
+ (append glusterfs-font-lock-keywords-2
+ (list
+ ; "option" "volume" "end-volume" "subvolumes" "type"
+ '("\\<\\(option\ \\|volume\ \\|subvolumes\ \\|type\ \\|end-volume\\)\\>" . font-lock-builtin-face)))
+ ;'((regexp-opt (" option " "^volume " "^end-volume" "subvolumes " " type ") t) . font-lock-builtin-face))
+ "Minimal highlighting expressions for GlusterFS mode.")
+
+
+(defvar glusterfs-font-lock-keywords glusterfs-font-lock-keywords-3
+ "Default highlighting expressions for GlusterFS mode.")
+
+(defvar glusterfs-mode-syntax-table
+ (let ((glusterfs-mode-syntax-table (make-syntax-table)))
+ (modify-syntax-entry ?\# "<" glusterfs-mode-syntax-table)
+ (modify-syntax-entry ?* ". 23" glusterfs-mode-syntax-table)
+ (modify-syntax-entry ?\n ">#" glusterfs-mode-syntax-table)
+ glusterfs-mode-syntax-table)
+ "Syntax table for glusterfs-mode")
+
+;; TODO: add an indentation table
+
+(defun glusterfs-indent-line ()
+ "Indent current line as GlusterFS code"
+ (interactive)
+ (beginning-of-line)
+ (if (bobp)
+ (indent-line-to 0) ; First line is always non-indented
+ (let ((not-indented t) cur-indent)
+ (if (looking-at "^[ \t]*volume\ ")
+ (progn
+ (save-excursion
+ (forward-line -1)
+ (setq not-indented nil)
+ (setq cur-indent 0))))
+ (if (looking-at "^[ \t]*end-volume")
+ (progn
+ (save-excursion
+ (forward-line -1)
+ (setq cur-indent 0))
+ (if (< cur-indent 0) ; We can't indent past the left margin
+ (setq cur-indent 0)))
+ (save-excursion
+ (while not-indented ; Iterate backwards until we find an indentation hint
+ (progn
+ (setq cur-indent 2) ; Do the actual indenting
+ (setq not-indented nil)))))
+ (if cur-indent
+ (indent-line-to cur-indent)
+ (indent-line-to 0)))))
+
+(defun glusterfs-mode ()
+ (interactive)
+ (kill-all-local-variables)
+ ;; (use-local-map glusterfs-mode-map)
+ (set-syntax-table glusterfs-mode-syntax-table)
+ (set (make-local-variable 'indent-line-function) 'glusterfs-indent-line)
+ (set (make-local-variable 'font-lock-defaults) '(glusterfs-font-lock-keywords))
+ (setq major-mode 'glusterfs-mode)
+ (setq mode-name "GlusterFS")
+ (run-hooks 'glusterfs-mode-hook))
+
+(provide 'glusterfs-mode)
diff --git a/extras/gnfs-loganalyse.py b/extras/gnfs-loganalyse.py
index 71e79b6be4e..6341d007188 100644..100755
--- a/extras/gnfs-loganalyse.py
+++ b/extras/gnfs-loganalyse.py
@@ -10,6 +10,7 @@
"""
+from __future__ import print_function
import os
import string
import sys
@@ -72,7 +73,7 @@ class NFSRequest:
self.replygfid = tokens [gfididx + 1].strip(",")
def dump (self):
- print "ReqLine: " + str(self.calllinecount) + " TimeStamp: " + self.timestamp + ", XID: " + self.xid + " " + self.op + " ARGS: " + self.opdata + " RepLine: " + str(self.replylinecount) + " " + self.replydata
+ print("ReqLine: " + str(self.calllinecount) + " TimeStamp: " + self.timestamp + ", XID: " + self.xid + " " + self.op + " ARGS: " + self.opdata + " RepLine: " + str(self.replylinecount) + " " + self.replydata)
class NFSLogAnalyzer:
@@ -149,7 +150,7 @@ class NFSLogAnalyzer:
return
rcount = len (self.xid_request_map.keys ())
orphancount = len (self.orphan_replies.keys ())
- print "Requests: " + str(rcount) + ", Orphans: " + str(orphancount)
+ print("Requests: " + str(rcount) + ", Orphans: " + str(orphancount))
def dump (self):
self.getStats ()
diff --git a/extras/group-db-workload b/extras/group-db-workload
new file mode 100644
index 00000000000..9334d6fb942
--- /dev/null
+++ b/extras/group-db-workload
@@ -0,0 +1,12 @@
+performance.open-behind=on
+performance.write-behind=off
+performance.stat-prefetch=off
+performance.quick-read=off
+performance.strict-o-direct=on
+performance.read-ahead=off
+performance.io-cache=off
+performance.readdir-ahead=off
+performance.client-io-threads=on
+server.event-threads=4
+client.event-threads=4
+performance.read-after-open=yes
diff --git a/extras/group-distributed-virt b/extras/group-distributed-virt
new file mode 100644
index 00000000000..a960b76c694
--- /dev/null
+++ b/extras/group-distributed-virt
@@ -0,0 +1,10 @@
+performance.quick-read=off
+performance.read-ahead=off
+performance.io-cache=off
+performance.low-prio-threads=32
+network.remote-dio=enable
+features.shard=on
+user.cifs=off
+client.event-threads=4
+server.event-threads=4
+performance.client-io-threads=on
diff --git a/extras/group-gluster-block b/extras/group-gluster-block
new file mode 100644
index 00000000000..1e398019e6b
--- /dev/null
+++ b/extras/group-gluster-block
@@ -0,0 +1,27 @@
+performance.quick-read=off
+performance.read-ahead=off
+performance.io-cache=off
+performance.stat-prefetch=off
+performance.open-behind=off
+performance.readdir-ahead=off
+performance.strict-o-direct=on
+performance.client-io-threads=on
+performance.io-thread-count=32
+performance.high-prio-threads=32
+performance.normal-prio-threads=32
+performance.low-prio-threads=32
+performance.least-prio-threads=4
+client.event-threads=8
+server.event-threads=8
+network.remote-dio=disable
+cluster.eager-lock=enable
+cluster.quorum-type=auto
+cluster.data-self-heal-algorithm=full
+cluster.locking-scheme=granular
+cluster.shd-max-threads=8
+cluster.shd-wait-qlength=10000
+features.shard=on
+features.shard-block-size=64MB
+user.cifs=off
+server.allow-insecure=on
+cluster.choose-local=off
diff --git a/extras/group-metadata-cache b/extras/group-metadata-cache
new file mode 100644
index 00000000000..b890b288fc7
--- /dev/null
+++ b/extras/group-metadata-cache
@@ -0,0 +1,6 @@
+features.cache-invalidation=on
+features.cache-invalidation-timeout=600
+performance.stat-prefetch=on
+performance.cache-invalidation=on
+performance.md-cache-timeout=600
+network.inode-lru-limit=200000
diff --git a/extras/group-nl-cache b/extras/group-nl-cache
new file mode 100644
index 00000000000..897807e8933
--- /dev/null
+++ b/extras/group-nl-cache
@@ -0,0 +1,5 @@
+features.cache-invalidation=on
+features.cache-invalidation-timeout=600
+performance.nl-cache=on
+performance.nl-cache-timeout=600
+network.inode-lru-limit=200000
diff --git a/extras/group-samba b/extras/group-samba
new file mode 100644
index 00000000000..eeee6e06031
--- /dev/null
+++ b/extras/group-samba
@@ -0,0 +1,11 @@
+features.cache-invalidation=on
+features.cache-invalidation-timeout=600
+performance.cache-samba-metadata=on
+performance.stat-prefetch=on
+performance.cache-invalidation=on
+performance.md-cache-timeout=600
+network.inode-lru-limit=200000
+performance.nl-cache=on
+performance.nl-cache-timeout=600
+performance.readdir-ahead=on
+performance.parallel-readdir=on
diff --git a/extras/group-virt.example b/extras/group-virt.example
index 4fe3760be2c..cc37c98a25c 100644
--- a/extras/group-virt.example
+++ b/extras/group-virt.example
@@ -1,10 +1,24 @@
performance.quick-read=off
performance.read-ahead=off
performance.io-cache=off
-performance.stat-prefetch=off
+performance.low-prio-threads=32
+network.remote-dio=disable
+performance.strict-o-direct=on
cluster.eager-lock=enable
-network.remote-dio=enable
cluster.quorum-type=auto
cluster.server-quorum-type=server
-features.shard=on
cluster.data-self-heal-algorithm=full
+cluster.locking-scheme=granular
+cluster.shd-max-threads=8
+cluster.shd-wait-qlength=10000
+features.shard=on
+user.cifs=off
+cluster.choose-local=off
+client.event-threads=4
+server.event-threads=4
+performance.client-io-threads=on
+network.ping-timeout=20
+server.tcp-user-timeout=20
+server.keepalive-time=10
+server.keepalive-interval=2
+server.keepalive-count=5
diff --git a/extras/hook-scripts/Makefile.am b/extras/hook-scripts/Makefile.am
index 771b37e3fdf..26059d7dbb9 100644
--- a/extras/hook-scripts/Makefile.am
+++ b/extras/hook-scripts/Makefile.am
@@ -1,5 +1,5 @@
EXTRA_DIST = S40ufo-stop.py S56glusterd-geo-rep-create-post.sh
-SUBDIRS = add-brick set start stop reset
+SUBDIRS = add-brick create delete set start stop reset
scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/gsync-create/post/
if USE_GEOREP
diff --git a/extras/hook-scripts/S40ufo-stop.py b/extras/hook-scripts/S40ufo-stop.py
index 107f1968355..2c79eb1d54a 100755
--- a/extras/hook-scripts/S40ufo-stop.py
+++ b/extras/hook-scripts/S40ufo-stop.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python3
import os
from optparse import OptionParser
diff --git a/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh b/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh
index a5e472e9267..7d6052315bb 100755
--- a/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh
+++ b/extras/hook-scripts/S56glusterd-geo-rep-create-post.sh
@@ -77,18 +77,28 @@ if [ "$val" == "" ]; then
exit;
fi
SSH_PORT=`echo $val`
+SSH_OPT="-oPasswordAuthentication=no -oStrictHostKeyChecking=no"
if [ -f $pub_file ]; then
# For a non-root user copy the pub file to the user's home directory
# For a root user copy the pub files to priv_dir->geo-rep.
if [ "$slave_user" != "root" ]; then
- slave_user_home_dir=`ssh -p ${SSH_PORT} $slave_user@$slave_ip "getent passwd $slave_user | cut -d ':' -f 6"`
- scp -P ${SSH_PORT} $pub_file $slave_user@$slave_ip:$slave_user_home_dir/common_secret.pem.pub_tmp
- ssh -p ${SSH_PORT} $slave_user@$slave_ip "mv $slave_user_home_dir/common_secret.pem.pub_tmp $slave_user_home_dir/${mastervol}_${slavevol}_common_secret.pem.pub"
+ slave_user_home_dir=`ssh -p ${SSH_PORT} ${SSH_OPT} $slave_user@$slave_ip "getent passwd $slave_user | cut -d ':' -f 6"`
+ scp -P ${SSH_PORT} ${SSH_OPT} $pub_file $slave_user@$slave_ip:$slave_user_home_dir/common_secret.pem.pub_tmp
+ ssh -p ${SSH_PORT} ${SSH_OPT} $slave_user@$slave_ip "mv $slave_user_home_dir/common_secret.pem.pub_tmp $slave_user_home_dir/${mastervol}_${slavevol}_common_secret.pem.pub"
else
- scp -P ${SSH_PORT} $pub_file $slave_ip:$pub_file_tmp
- ssh -p ${SSH_PORT} $slave_ip "mv $pub_file_tmp ${pub_file_dname}/${mastervol}_${slavevol}_${pub_file_bname}"
- ssh -p ${SSH_PORT} $slave_ip "gluster system:: copy file /geo-replication/${mastervol}_${slavevol}_common_secret.pem.pub > /dev/null"
- ssh -p ${SSH_PORT} $slave_ip "gluster system:: execute add_secret_pub root geo-replication/${mastervol}_${slavevol}_common_secret.pem.pub > /dev/null"
+ if [[ -z "${GR_SSH_IDENTITY_KEY}" ]]; then
+ scp -P ${SSH_PORT} ${SSH_OPT} $pub_file $slave_ip:$pub_file_tmp
+ ssh -p ${SSH_PORT} ${SSH_OPT} $slave_ip "mv $pub_file_tmp ${pub_file_dname}/${mastervol}_${slavevol}_${pub_file_bname}"
+ ssh -p ${SSH_PORT} ${SSH_OPT} $slave_ip "gluster system:: copy file /geo-replication/${mastervol}_${slavevol}_common_secret.pem.pub > /dev/null"
+ ssh -p ${SSH_PORT} ${SSH_OPT} $slave_ip "gluster system:: execute add_secret_pub root geo-replication/${mastervol}_${slavevol}_common_secret.pem.pub > /dev/null"
+ ssh -p ${SSH_PORT} ${SSH_OPT} $slave_ip "gluster vol set ${slavevol} features.read-only on"
+ else
+ scp -P ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} ${SSH_OPT} $pub_file $slave_ip:$pub_file_tmp
+ ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} ${SSH_OPT} $slave_ip "mv $pub_file_tmp ${pub_file_dname}/${mastervol}_${slavevol}_${pub_file_bname}"
+ ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} ${SSH_OPT} $slave_ip "gluster system:: copy file /geo-replication/${mastervol}_${slavevol}_common_secret.pem.pub > /dev/null"
+ ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} ${SSH_OPT} $slave_ip "gluster system:: execute add_secret_pub root geo-replication/${mastervol}_${slavevol}_common_secret.pem.pub > /dev/null"
+ ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} ${SSH_OPT} $slave_ip "gluster vol set ${slavevol} features.read-only on"
+ fi
fi
fi
diff --git a/extras/hook-scripts/add-brick/post/Makefile.am b/extras/hook-scripts/add-brick/post/Makefile.am
index 5ca5a669de9..9b236df096d 100644
--- a/extras/hook-scripts/add-brick/post/Makefile.am
+++ b/extras/hook-scripts/add-brick/post/Makefile.am
@@ -1,4 +1,6 @@
-EXTRA_DIST = disabled-quota-root-xattr-heal.sh
+EXTRA_DIST = disabled-quota-root-xattr-heal.sh S10selinux-label-brick.sh S13create-subdir-mounts.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/add-brick/post/
-hook_SCRIPTS = disabled-quota-root-xattr-heal.sh
+if WITH_SERVER
+hook_SCRIPTS = disabled-quota-root-xattr-heal.sh S10selinux-label-brick.sh S13create-subdir-mounts.sh
+endif
diff --git a/extras/hook-scripts/add-brick/post/S10selinux-label-brick.sh b/extras/hook-scripts/add-brick/post/S10selinux-label-brick.sh
new file mode 100755
index 00000000000..4a17c993a77
--- /dev/null
+++ b/extras/hook-scripts/add-brick/post/S10selinux-label-brick.sh
@@ -0,0 +1,100 @@
+#!/bin/bash
+#
+# Install to hooks/<HOOKS_VER>/add-brick/post
+#
+# Add an SELinux file context for each brick using the glusterd_brick_t type.
+# This ensures that the brick is relabeled correctly on an SELinux restart or
+# restore. Subsequently, run a restore on the brick path to set the selinux
+# labels.
+#
+###
+
+PROGNAME="Sselinux"
+OPTSPEC="volname:,version:,gd-workdir:,volume-op:"
+VOL=
+
+parse_args () {
+ ARGS=$(getopt -o '' -l ${OPTSPEC} -n ${PROGNAME} -- "$@")
+ eval set -- "${ARGS}"
+
+ while true; do
+ case ${1} in
+ --volname)
+ shift
+ VOL=${1}
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ --version)
+ shift
+ ;;
+ --volume-op)
+ shift
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+}
+
+set_brick_labels()
+{
+ local volname="${1}"
+ local fctx
+ local list=()
+
+ fctx="$(semanage fcontext --list -C)"
+
+ # wait for new brick path to be updated under
+ # ${GLUSTERD_WORKDIR}/vols/${volname}/bricks/
+ sleep 5
+
+ # grab the path for each local brick
+ brickpath="${GLUSTERD_WORKDIR}/vols/${volname}/bricks/"
+ brickdirs=$(
+ find "${brickpath}" -type f -exec grep '^path=' {} \; | \
+ cut -d= -f 2 | \
+ sort -u
+ )
+
+ # create a list of bricks for which custom SELinux
+ # label doesn't exist
+ for b in ${brickdirs}; do
+ pattern="${b}(/.*)?"
+ echo "${fctx}" | grep "^${pattern}\s" >/dev/null
+ if [[ $? -ne 0 ]]; then
+ list+=("${pattern}")
+ fi
+ done
+
+ # Add a file context for each brick path in the list and associate with the
+ # glusterd_brick_t SELinux type.
+ for p in ${list[@]}
+ do
+ semanage fcontext --add -t glusterd_brick_t -r s0 "${p}"
+ done
+
+ # Set the labels for which SELinux label was added above
+ for b in ${brickdirs}
+ do
+ echo "${list[@]}" | grep "${b}" >/dev/null
+ if [[ $? -eq 0 ]]; then
+ restorecon -R "${b}"
+ fi
+ done
+}
+
+SELINUX_STATE=$(which getenforce && getenforce)
+[ "${SELINUX_STATE}" = 'Disabled' ] && exit 0
+
+parse_args "$@"
+[ -z "${VOL}" ] && exit 1
+
+set_brick_labels "${VOL}"
+
+exit 0
diff --git a/extras/hook-scripts/add-brick/post/S13create-subdir-mounts.sh b/extras/hook-scripts/add-brick/post/S13create-subdir-mounts.sh
new file mode 100755
index 00000000000..1a6923ee7aa
--- /dev/null
+++ b/extras/hook-scripts/add-brick/post/S13create-subdir-mounts.sh
@@ -0,0 +1,86 @@
+#!/bin/bash
+
+##---------------------------------------------------------------------------
+## This script runs the self-heal of the directories which are expected to
+## be present as they are mounted as subdirectory mounts.
+##---------------------------------------------------------------------------
+
+MOUNT_DIR=`mktemp -d -t ${0##*/}.XXXXXX`;
+OPTSPEC="volname:,version:,gd-workdir:,volume-op:"
+PROGNAME="add-brick-create-subdir"
+VOL_NAME=test
+GLUSTERD_WORKDIR="/var/lib/glusterd"
+
+cleanup_mountpoint ()
+{
+ umount -f $MOUNT_DIR;
+ if [ 0 -ne $? ]
+ then
+ return $?
+ fi
+
+ rmdir $MOUNT_DIR;
+ if [ 0 -ne $? ]
+ then
+ return $?
+ fi
+}
+
+##------------------------------------------
+## Parse the arguments
+##------------------------------------------
+ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+eval set -- "$ARGS"
+
+while true;
+do
+ case $1 in
+ --volname)
+ shift
+ VOL_NAME=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ --version)
+ shift
+ ;;
+ --volume-op)
+ shift
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+done
+
+## See if we have any subdirs to be healed before going further
+subdirs=$(grep 'auth.allow' ${GLUSTERD_WORKDIR}/vols/${VOL_NAME}/info | cut -f2 -d'=' | tr ',' '\n' | cut -f1 -d'(');
+
+if [ -z ${subdirs} ]; then
+ rmdir $MOUNT_DIR;
+ exit 0;
+fi
+
+##----------------------------------------
+## Mount the volume in temp directory.
+## -----------------------------------
+glusterfs -s localhost --volfile-id=$VOL_NAME --client-pid=-50 $MOUNT_DIR;
+if [ 0 -ne $? ]
+then
+ exit $?;
+fi
+
+## -----------------------------------
+# Do the 'stat' on all the directory for now. Ideal fix is to look at subdir
+# list from 'auth.allow' option and only stat them.
+for subdir in ${subdirs}
+do
+ stat ${MOUNT_DIR}/${subdir} > /dev/null;
+done
+
+## Clean up and exit
+cleanup_mountpoint;
diff --git a/extras/hook-scripts/add-brick/post/disabled-quota-root-xattr-heal.sh b/extras/hook-scripts/add-brick/post/disabled-quota-root-xattr-heal.sh
index bde7249d429..ca17a903549 100755
--- a/extras/hook-scripts/add-brick/post/disabled-quota-root-xattr-heal.sh
+++ b/extras/hook-scripts/add-brick/post/disabled-quota-root-xattr-heal.sh
@@ -13,123 +13,133 @@
QUOTA_LIMIT_XATTR="trusted.glusterfs.quota.limit-set"
QUOTA_OBJECT_LIMIT_XATTR="trusted.glusterfs.quota.limit-objects"
-MOUNT_DIR=`mktemp -d -t ${0##*/}.XXXXXX`;
+MOUNT_DIR=$(mktemp -d -t "${0##*/}.XXXXXX");
OPTSPEC="volname:,version:,gd-workdir:,volume-op:"
PROGNAME="Quota-xattr-heal-add-brick"
VOL_NAME=
VERSION=
VOLUME_OP=
GLUSTERD_WORKDIR=
-ENABLED_NAME="S28Quota-root-xattr-heal.sh"
+ENABLED_NAME_PREFIX="S28"
+ENABLED_NAME="Quota-root-xattr-heal.sh"
+
+THIS_SCRIPT=$(echo "${0}" | awk -F'/' '{print $NF}')
cleanup_mountpoint ()
{
- umount -f $MOUNT_DIR;
- if [ 0 -ne $? ]
- then
- return $?
- fi
-
- rmdir $MOUNT_DIR;
- if [ 0 -ne $? ]
- then
- return $?
- fi
+
+ if umount -f "${MOUNT_DIR}"; then
+ return $?
+ fi
+
+ if rmdir "${MOUNT_DIR}"; then
+ return $?
+ fi
}
disable_and_exit ()
{
- if [ -e "$ENABLED_STATE" ]
- then
- unlink $ENABLED_STATE;
- exit $?
- fi
+ if [ -e "${ENABLED_STATE}" ]
+ then
+ unlink "${ENABLED_STATE}";
+ exit $?
+ fi
- exit 0
+ exit 0
}
get_and_set_xattr ()
{
- XATTR=$1
-
- VALUE=$(getfattr -n $XATTR -e hex --absolute-names $MOUNT_DIR 2>&1)
- RET=$?
- if [ 0 -eq $RET ]; then
- VALUE=$(echo $VALUE | grep $XATTR | awk -F'=' '{print $NF}')
- setfattr -n $XATTR -v $VALUE $MOUNT_DIR;
- RET=$?
- else
- echo $VALUE | grep -iq "No such attribute"
- if [ 0 -eq $? ]; then
- RET=0
- fi
- fi
-
- return $RET;
+ XATTR=$1
+
+ VALUE=$(getfattr -n "${XATTR}" -e hex --absolute-names "${MOUNT_DIR}" 2>&1)
+ RET=$?
+ if [ 0 -eq ${RET} ]; then
+ VALUE=$(echo "${VALUE}" | grep "${XATTR}" | awk -F'=' '{print $NF}')
+ setfattr -n "${XATTR}" -v "${VALUE}" "${MOUNT_DIR}";
+ RET=$?
+ else
+ if echo "${VALUE}" | grep -iq "No such attribute" ; then
+ RET=0
+ fi
+ fi
+
+ return ${RET};
}
##------------------------------------------
## Parse the arguments
##------------------------------------------
-ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+ARGS=$(getopt -o '' -l ${OPTSPEC} -n ${PROGNAME} -- "$@")
eval set -- "$ARGS"
while true;
do
- case $1 in
- --volname)
- shift
- VOL_NAME=$1
- ;;
- --version)
- shift
- VERSION=$1
- ;;
- --gd-workdir)
- shift
- GLUSTERD_WORKDIR=$1
- ;;
- --volume-op)
- shift
- VOLUME_OP=$1
- ;;
- *)
- shift
- break
- ;;
- esac
- shift
+ case $1 in
+ --volname)
+ shift
+ VOL_NAME=$1
+ ;;
+ --version)
+ shift
+ VERSION=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ --volume-op)
+ shift
+ VOLUME_OP=$1
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
done
##----------------------------------------
-ENABLED_STATE="$GLUSTERD_WORKDIR/hooks/$VERSION/$VOLUME_OP/post/$ENABLED_NAME"
+# Avoid long lines
+ENABLED_STATE_1="${GLUSTERD_WORKDIR}/hooks/${VERSION}/${VOLUME_OP}/"
+ENABLED_STATE_2="post/${ENABLED_NAME_PREFIX}${VOL_NAME}-${ENABLED_NAME}"
+ENABLED_STATE="${ENABLED_STATE_1}${ENABLED_STATE_2}"
+
+if [ "${THIS_SCRIPT}" != *"${VOL_NAME}"* ]; then
+ exit 0
+fi
## Is quota enabled?
-FLAG=`grep "^features.quota=" $GLUSTERD_WORKDIR/vols/$VOL_NAME/info \
- | awk -F'=' '{print $NF}'`;
-if [ "$FLAG" != "on" ]
+FLAG=$(grep "^features.quota=" "${GLUSTERD_WORKDIR}/vols/${VOL_NAME}/info" \
+| awk -F'=' '{print $NF}');
+if [ "${FLAG}" != "on" ]
then
- disable_and_exit
+ disable_and_exit
fi
## -----------------------------------
## Mount the volume in temp directory.
## -----------------------------------
-glusterfs -s localhost --volfile-id=$VOL_NAME --client-pid=-42 $MOUNT_DIR;
-if [ 0 -ne $? ]
+# Avoid long lines
+CMD_1="glusterfs -s localhost"
+CMD_2="--volfile-id=${VOL_NAME} client-pid=-42 ${MOUNT_DIR}"
+CMD="${CMD_1}${CMD_2}"
+
+if ${CMD}
then
- exit $?;
+ exit $?;
fi
## -----------------------------------
-RET1=$(get_and_set_xattr $QUOTA_LIMIT_XATTR)
-RET2=$(get_and_set_xattr $QUOTA_OBJECT_LIMIT_XATTR)
+RET1=$(get_and_set_xattr "${QUOTA_LIMIT_XATTR}")
+RET2=$(get_and_set_xattr "${QUOTA_OBJECT_LIMIT_XATTR}")
## Clean up and exit
cleanup_mountpoint;
-if [ $RET1 -ne 0 -o $RET2 -ne 0 ]; then
- exit 1
+if [ "${RET1}" -ne 0 ] || [ "${RET2}" -ne 0 ]; then
+ exit 1
fi
disable_and_exit;
diff --git a/extras/hook-scripts/add-brick/pre/Makefile.am b/extras/hook-scripts/add-brick/pre/Makefile.am
index 6329ad1d4bd..3288581aa57 100644
--- a/extras/hook-scripts/add-brick/pre/Makefile.am
+++ b/extras/hook-scripts/add-brick/pre/Makefile.am
@@ -1,4 +1,6 @@
EXTRA_DIST = S28Quota-enable-root-xattr-heal.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/add-brick/pre/
+if WITH_SERVER
hook_SCRIPTS = S28Quota-enable-root-xattr-heal.sh
+endif
diff --git a/extras/hook-scripts/add-brick/pre/S28Quota-enable-root-xattr-heal.sh b/extras/hook-scripts/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
index 348f34ec3db..27e85231f45 100755
--- a/extras/hook-scripts/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
+++ b/extras/hook-scripts/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
@@ -26,10 +26,11 @@ VOL_NAME=
GLUSTERD_WORKDIR=
VOLUME_OP=
VERSION=
-ENABLED_NAME="S28Quota-root-xattr-heal.sh"
+ENABLED_NAME_PREFIX="S28"
+ENABLED_NAME="Quota-root-xattr-heal.sh"
DISABLED_NAME="disabled-quota-root-xattr-heal.sh"
-enable ()
+activate ()
{
ln -sf $DISABLED_STATE $1;
}
@@ -37,7 +38,7 @@ enable ()
##------------------------------------------
## Parse the arguments
##------------------------------------------
-ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+ARGS=$(getopt -o '' -l $OPTSPEC -n $PROGNAME -- "$@")
eval set -- "$ARGS"
while true;
@@ -69,8 +70,8 @@ done
##----------------------------------------
DISABLED_STATE="$GLUSTERD_WORKDIR/hooks/$VERSION/add-brick/post/$DISABLED_NAME"
-ENABLED_STATE_START="$GLUSTERD_WORKDIR/hooks/$VERSION/start/post/$ENABLED_NAME"
-ENABLED_STATE_ADD_BRICK="$GLUSTERD_WORKDIR/hooks/$VERSION/add-brick/post/$ENABLED_NAME";
+ENABLED_STATE_START="$GLUSTERD_WORKDIR/hooks/$VERSION/start/post/""$ENABLED_NAME_PREFIX$VOL_NAME""-""$ENABLED_NAME"
+ENABLED_STATE_ADD_BRICK="$GLUSTERD_WORKDIR/hooks/$VERSION/add-brick/post/""$ENABLED_NAME_PREFIX""$VOL_NAME""-""$ENABLED_NAME";
## Why to proceed if the required script itself is not present?
ls $DISABLED_STATE;
@@ -92,9 +93,9 @@ FLAG=`cat $GLUSTERD_WORKDIR/vols/$VOL_NAME/info | grep "^status=" \
| awk -F'=' '{print $NF}'`;
if [ "$FLAG" != "1" ]
then
- enable $ENABLED_STATE_START;
+ activate $ENABLED_STATE_START;
exit $?
fi
-enable $ENABLED_STATE_ADD_BRICK;
+activate $ENABLED_STATE_ADD_BRICK;
exit $?
diff --git a/extras/hook-scripts/create/Makefile.am b/extras/hook-scripts/create/Makefile.am
new file mode 100644
index 00000000000..b083a9145d6
--- /dev/null
+++ b/extras/hook-scripts/create/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = post
diff --git a/extras/hook-scripts/create/post/Makefile.am b/extras/hook-scripts/create/post/Makefile.am
new file mode 100644
index 00000000000..fd1892e9589
--- /dev/null
+++ b/extras/hook-scripts/create/post/Makefile.am
@@ -0,0 +1,8 @@
+EXTRA_DIST = S10selinux-label-brick.sh
+
+scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/create/post/
+if WITH_SERVER
+if USE_SELINUX
+scripts_SCRIPTS = S10selinux-label-brick.sh
+endif
+endif
diff --git a/extras/hook-scripts/create/post/S10selinux-label-brick.sh b/extras/hook-scripts/create/post/S10selinux-label-brick.sh
new file mode 100755
index 00000000000..f9b4b1a57e3
--- /dev/null
+++ b/extras/hook-scripts/create/post/S10selinux-label-brick.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Install to hooks/<HOOKS_VER>/create/post
+#
+# Add an SELinux file context for each brick using the glusterd_brick_t type.
+# This ensures that the brick is relabeled correctly on an SELinux restart or
+# restore. Subsequently, run a restore on the brick path to set the selinux
+# labels.
+#
+###
+
+PROGNAME="Sselinux"
+OPTSPEC="volname:"
+VOL=
+
+parse_args () {
+ ARGS=$(getopt -o '' -l ${OPTSPEC} -n ${PROGNAME} -- "$@")
+ eval set -- "${ARGS}"
+
+ while true; do
+ case ${1} in
+ --volname)
+ shift
+ VOL=${1}
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+}
+
+set_brick_labels()
+{
+ volname="${1}"
+
+ # grab the path for each local brick
+ brickpath="/var/lib/glusterd/vols/${volname}/bricks/"
+ brickdirs=$(
+ find "${brickpath}" -type f -exec grep '^path=' {} \; | \
+ cut -d= -f 2 | \
+ sort -u
+ )
+
+ for b in ${brickdirs}; do
+ # Add a file context for each brick path and associate with the
+ # glusterd_brick_t SELinux type.
+ pattern="${b}(/.*)?"
+ semanage fcontext --add -t glusterd_brick_t -r s0 "${pattern}"
+ # Set the labels on the new brick path.
+ restorecon -R "${b}"
+ done
+}
+
+SELINUX_STATE=$(which getenforce && getenforce)
+[ "${SELINUX_STATE}" = 'Disabled' ] && exit 0
+
+parse_args "$@"
+[ -z "${VOL}" ] && exit 1
+
+set_brick_labels "${VOL}"
+
+exit 0
diff --git a/extras/hook-scripts/delete/Makefile.am b/extras/hook-scripts/delete/Makefile.am
new file mode 100644
index 00000000000..c98a05d9205
--- /dev/null
+++ b/extras/hook-scripts/delete/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = pre
diff --git a/extras/hook-scripts/delete/pre/Makefile.am b/extras/hook-scripts/delete/pre/Makefile.am
new file mode 100644
index 00000000000..4fbfbe7311f
--- /dev/null
+++ b/extras/hook-scripts/delete/pre/Makefile.am
@@ -0,0 +1,8 @@
+EXTRA_DIST = S10selinux-del-fcontext.sh
+
+scriptsdir = $(GLUSTERD_WORKDIR)/hooks/1/delete/pre/
+if WITH_SERVER
+if USE_SELINUX
+scripts_SCRIPTS = S10selinux-del-fcontext.sh
+endif
+endif
diff --git a/extras/hook-scripts/delete/pre/S10selinux-del-fcontext.sh b/extras/hook-scripts/delete/pre/S10selinux-del-fcontext.sh
new file mode 100755
index 00000000000..056b52afe76
--- /dev/null
+++ b/extras/hook-scripts/delete/pre/S10selinux-del-fcontext.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+#
+# Install to hooks/<HOOKS_VER>/delete/pre
+#
+# Delete the file context associated with the brick path on volume deletion. The
+# associated file context was added during volume creation.
+#
+# We do not explicitly relabel the brick, as this could be time consuming and
+# unnecessary.
+#
+###
+
+PROGNAME="Sselinux"
+OPTSPEC="volname:"
+VOL=
+
+function parse_args () {
+ ARGS=$(getopt -o '' -l ${OPTSPEC} -n ${PROGNAME} -- "$@")
+ eval set -- "${ARGS}"
+
+ while true; do
+ case ${1} in
+ --volname)
+ shift
+ VOL=${1}
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+}
+
+function delete_brick_fcontext()
+{
+ local volname=$1
+ local fctx
+ local list=()
+
+ fctx="$(semanage fcontext --list -C)"
+ # grab the path for each local brick
+ brickpath="/var/lib/glusterd/vols/${volname}/bricks/"
+ brickdirs=$(find "${brickpath}" -type f -exec grep '^path=' {} \; | \
+ cut -d= -f 2 | sort -u)
+ for b in ${brickdirs}
+ do
+ pattern="${b}(/.*)?"
+ echo "${fctx}" | grep "^${pattern}\s" >/dev/null
+ if [[ $? -eq 0 ]]; then
+ list+=("${pattern}")
+ fi
+ done
+ if [[ ${#list[@]} -gt 0 ]]; then
+ printf 'fcontext --delete %s\n' "${list[@]}" | semanage -i -
+ fi
+ for b in ${brickdirs}
+ do
+ restorecon -R "${b}"
+ done
+}
+
+SELINUX_STATE=$(which getenforce && getenforce)
+[ "${SELINUX_STATE}" = 'Disabled' ] && exit 0
+
+parse_args "$@"
+[ -z "${VOL}" ] && exit 1
+
+delete_brick_fcontext "${VOL}"
+
+# failure to delete the fcontext is not fatal
+exit 0
diff --git a/extras/hook-scripts/set/post/Makefile.am b/extras/hook-scripts/set/post/Makefile.am
index cea579cb2d9..506a25a8666 100644
--- a/extras/hook-scripts/set/post/Makefile.am
+++ b/extras/hook-scripts/set/post/Makefile.am
@@ -1,4 +1,6 @@
EXTRA_DIST = S30samba-set.sh S32gluster_enable_shared_storage.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/set/post/
+if WITH_SERVER
hook_SCRIPTS = $(EXTRA_DIST)
+endif
diff --git a/extras/hook-scripts/set/post/S30samba-set.sh b/extras/hook-scripts/set/post/S30samba-set.sh
index 97d067fc33f..854f131f6c8 100755
--- a/extras/hook-scripts/set/post/S30samba-set.sh
+++ b/extras/hook-scripts/set/post/S30samba-set.sh
@@ -28,7 +28,7 @@ USERSMB_SET=""
USERCIFS_SET=""
function parse_args () {
- ARGS=$(getopt -l $OPTSPEC --name $PROGNAME -o "o:" -- $@)
+ ARGS=$(getopt -o 'o:' -l $OPTSPEC -n $PROGNAME -- "$@")
eval set -- "$ARGS"
while true; do
@@ -89,7 +89,7 @@ function add_samba_share () {
STRING+="glusterfs:loglevel = 7\n"
STRING+="path = /\n"
STRING+="read only = no\n"
- STRING+="guest ok = yes\n"
+ STRING+="kernel share modes = no\n"
printf "$STRING" >> ${CONFIGFILE}
}
@@ -103,9 +103,9 @@ function sighup_samba () {
fi
}
-function del_samba_share () {
+function deactivate_samba_share () {
volname=$1
- sed -i "/\[gluster-$volname\]/,/^$/d" ${CONFIGFILE}
+ sed -i -e '/^\[gluster-'"$volname"'\]/{ :a' -e 'n; /available = no/H; /^$/!{$!ba;}; x; /./!{ s/^/available = no/; $!{G;x}; $H; }; s/.*//; x; };' ${CONFIGFILE}
}
function is_volume_started () {
@@ -123,29 +123,39 @@ function get_smb () {
usersmbvalue=$(grep user.smb $GLUSTERD_WORKDIR/vols/"$volname"/info |\
cut -d"=" -f2)
- if [[ $usercifsvalue = "disable" || $usersmbvalue = "disable" ]]; then
- uservalue="disable"
+ if [ -n "$usercifsvalue" ]; then
+ if [ "$usercifsvalue" = "disable" ] || [ "$usercifsvalue" = "off" ]; then
+ uservalue="disable"
+ fi
fi
+
+ if [ -n "$usersmbvalue" ]; then
+ if [ "$usersmbvalue" = "disable" ] || [ "$usersmbvalue" = "off" ]; then
+ uservalue="disable"
+ fi
+ fi
+
echo "$uservalue"
}
-parse_args $@
-if [ "0" = $(is_volume_started "$VOL") ]; then
+parse_args "$@"
+if [ "0" = "$(is_volume_started "$VOL")" ]; then
exit 0
fi
-if [[ "$USERCIFS_SET" = "YES" || "$USERSMB_SET" = "YES" ]]; then
+if [ "$USERCIFS_SET" = "YES" ] || [ "$USERSMB_SET" = "YES" ]; then
#Find smb.conf, smbd pid directory and smbd logfile path
find_config_info
- if [ $(get_smb "$VOL") = "disable" ]; then
- del_samba_share $VOL
- sighup_samba
+ if [ "$(get_smb "$VOL")" = "disable" ]; then
+ deactivate_samba_share $VOL
else
if ! grep --quiet "\[gluster-$VOL\]" ${CONFIGFILE} ; then
add_samba_share $VOL
- sighup_samba
+ else
+ sed -i '/\[gluster-'"$VOL"'\]/,/^$/!b;/available = no/d' ${CONFIGFILE}
fi
fi
+ sighup_samba
fi
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index ad51babd5f7..1f2564b44ff 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -2,7 +2,7 @@
key=`echo $3 | cut -d '=' -f 1`
val=`echo $3 | cut -d '=' -f 2`
-if [ ! "$key" -eq "enable-shared-storage" -o "$key" -eq "cluster.enable-shared-storage" ]; then
+if [ "$key" != "cluster.enable-shared-storage" ] && [ "$key" != "enable-shared-storage" ]; then
exit;
fi
if [ "$val" != 'enable' ]; then
@@ -79,9 +79,9 @@ done
if [ "$option" == "disable" ]; then
# Unmount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- cat /etc/fstab | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ umount /run/gluster/shared_storage
+ cat /etc/fstab | grep -v "gluster_shared_storage /run/gluster/shared_storage/" > /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
if [ "$is_originator" == 1 ]; then
@@ -104,8 +104,15 @@ function check_volume_status()
echo $status
}
-mount_cmd="mount -t glusterfs "$local_node_hostname":/gluster_shared_storage \
- /var/run/gluster/shared_storage"
+key=`echo $5 | cut -d '=' -f 1`
+val=`echo $5 | cut -d '=' -f 2`
+if [ "$key" == "transport.address-family" ]; then
+ mount_cmd="mount -t glusterfs -o xlator-option=transport.address-family=inet6 \
+ $local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage"
+else
+ mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+ /run/gluster/shared_storage"
+fi
if [ "$option" == "enable" ]; then
retry=0;
@@ -117,13 +124,13 @@ if [ "$option" == "enable" ]; then
if [ "$retry" == 3 ]; then
break;
fi
- status = check_volume_status;
+ status=$(check_volume_status)
done
# Mount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- mkdir -p /var/run/gluster/shared_storage
+ umount /run/gluster/shared_storage
+ mkdir -p /run/gluster/shared_storage
$mount_cmd
- cp /etc/fstab /var/run/gluster/fstab.tmp
- echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults 0 0" >> /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ cp /etc/fstab /run/gluster/fstab.tmp
+ echo "$local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage/ glusterfs defaults 0 0" >> /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
diff --git a/extras/hook-scripts/start/post/Makefile.am b/extras/hook-scripts/start/post/Makefile.am
index 03bb300c5c2..792019d3c9f 100644
--- a/extras/hook-scripts/start/post/Makefile.am
+++ b/extras/hook-scripts/start/post/Makefile.am
@@ -1,4 +1,6 @@
EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh S31ganesha-start.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/start/post/
+if WITH_SERVER
hook_SCRIPTS = $(EXTRA_DIST)
+endif
diff --git a/extras/hook-scripts/start/post/S29CTDBsetup.sh b/extras/hook-scripts/start/post/S29CTDBsetup.sh
index 4265cba54ee..69a0d89a3eb 100755
--- a/extras/hook-scripts/start/post/S29CTDBsetup.sh
+++ b/extras/hook-scripts/start/post/S29CTDBsetup.sh
@@ -9,10 +9,14 @@ CTDB_MNT=/gluster/lock
# Make sure ping-timeout is not default for CTDB volume
PING_TIMEOUT_SECS=10
PROGNAME="ctdb"
-OPTSPEC="volname:"
+OPTSPEC="volname:,gd-workdir:,version:,volume-op:,first:"
HOSTNAME=`hostname`
MNTOPTS="_netdev,transport=tcp,xlator-option=*client*.ping-timeout=${PING_TIMEOUT_SECS}"
VOL=
+GLUSTERD_WORKDIR=
+VERSION=
+VOLUME_OP=
+FIRST=
# $META is the volume that will be used by CTDB as a shared filesystem.
# It is not desirable to use this volume for storing 'data' as well.
# META is set to 'all' (viz. a keyword and hence not a legal volume name)
@@ -21,7 +25,7 @@ VOL=
META="all"
function parse_args () {
- ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+ ARGS=$(getopt -o '' -l $OPTSPEC -n $PROGNAME -- "$@")
eval set -- "$ARGS"
while true; do
@@ -29,13 +33,27 @@ function parse_args () {
--volname)
shift
VOL=$1
- ;;
-
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ --version)
+ shift
+ VERSION=$1
+ ;;
+ --volume-op)
+ shift
+ VOLUME_OP=$1
+ ;;
+ --first)
+ shift
+ FIRST=$1
+ ;;
*)
- shift
- break
- ;;
-
+ shift
+ break
+ ;;
esac
shift
@@ -55,7 +73,7 @@ function add_fstab_entry () {
fi
}
-parse_args $@
+parse_args "$@"
if [ "$META" = "$VOL" ]
then
mkdir -p $CTDB_MNT
diff --git a/extras/hook-scripts/start/post/S30samba-start.sh b/extras/hook-scripts/start/post/S30samba-start.sh
index 752eca650d3..cac0cbf1464 100755
--- a/extras/hook-scripts/start/post/S30samba-start.sh
+++ b/extras/hook-scripts/start/post/S30samba-start.sh
@@ -21,15 +21,18 @@
#volume.
PROGNAME="Ssamba-start"
-OPTSPEC="volname:,gd-workdir:"
+OPTSPEC="volname:,gd-workdir:,version:,volume-op:,first:"
VOL=
CONFIGFILE=
LOGFILEBASE=
PIDDIR=
GLUSTERD_WORKDIR=
+VERSION=
+VOLUME_OP=
+FIRST=
function parse_args () {
- ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+ ARGS=$(getopt -o '' -l $OPTSPEC -n $PROGNAME -- "$@")
eval set -- "$ARGS"
while true; do
@@ -42,24 +45,37 @@ function parse_args () {
shift
GLUSTERD_WORKDIR=$1
;;
+ --version)
+ shift
+ VERSION=$1
+ ;;
+ --volume-op)
+ shift
+ VOLUME_OP=$1
+ ;;
+ --first)
+ shift
+ FIRST=$1
+ ;;
*)
shift
break
;;
esac
+
shift
done
}
function find_config_info () {
- cmdout=`smbd -b | grep smb.conf`
- if [ $? -ne 0 ];then
+ cmdout=$(smbd -b 2> /dev/null)
+ CONFIGFILE=$(echo "$cmdout" | grep CONFIGFILE | awk '{print $2}')
+ if [ -z "$CONFIGFILE" ]; then
echo "Samba is not installed"
exit 1
fi
- CONFIGFILE=`echo $cmdout | awk {'print $2'}`
- PIDDIR=`smbd -b | grep PIDDIR | awk {'print $2'}`
- LOGFILEBASE=`smbd -b | grep 'LOGFILEBASE' | awk '{print $2}'`
+ PIDDIR=$(echo "$cmdout" | grep PIDDIR | awk '{print $2}')
+ LOGFILEBASE=$(echo "$cmdout" | grep 'LOGFILEBASE' | awk '{print $2}')
}
function add_samba_share () {
@@ -72,12 +88,12 @@ function add_samba_share () {
STRING+="glusterfs:loglevel = 7\n"
STRING+="path = /\n"
STRING+="read only = no\n"
- STRING+="guest ok = yes\n"
- printf "$STRING" >> ${CONFIGFILE}
+ STRING+="kernel share modes = no\n"
+ printf "$STRING" >> "${CONFIGFILE}"
}
function sighup_samba () {
- pid=`cat ${PIDDIR}/smbd.pid`
+ pid=$(cat "${PIDDIR}/smbd.pid" 2> /dev/null)
if [ "x$pid" != "x" ]
then
kill -HUP "$pid";
@@ -90,26 +106,40 @@ function get_smb () {
volname=$1
uservalue=
- usercifsvalue=$(grep user.cifs $GLUSTERD_WORKDIR/vols/"$volname"/info |\
+ usercifsvalue=$(grep user.cifs "$GLUSTERD_WORKDIR"/vols/"$volname"/info |\
cut -d"=" -f2)
- usersmbvalue=$(grep user.smb $GLUSTERD_WORKDIR/vols/"$volname"/info |\
+ usersmbvalue=$(grep user.smb "$GLUSTERD_WORKDIR"/vols/"$volname"/info |\
cut -d"=" -f2)
- if [[ $usercifsvalue = "disable" || $usersmbvalue = "disable" ]]; then
- uservalue="disable"
+ if [ -n "$usercifsvalue" ]; then
+ if [ "$usercifsvalue" = "enable" ] || [ "$usercifsvalue" = "on" ]; then
+ uservalue="enable"
+ fi
+ fi
+
+ if [ -n "$usersmbvalue" ]; then
+ if [ "$usersmbvalue" = "enable" ] || [ "$usersmbvalue" = "on" ]; then
+ uservalue="enable"
+ fi
fi
+
echo "$uservalue"
}
-parse_args $@
-if [ $(get_smb "$VOL") = "disable" ]; then
+parse_args "$@"
+
+value=$(get_smb "$VOL")
+
+if [ -z "$value" ] || [ "$value" != "enable" ]; then
exit 0
fi
#Find smb.conf, smbd pid directory and smbd logfile path
find_config_info
-if ! grep --quiet "\[gluster-$VOL\]" ${CONFIGFILE} ; then
- add_samba_share $VOL
- sighup_samba
+if ! grep --quiet "\[gluster-$VOL\]" "${CONFIGFILE}" ; then
+ add_samba_share "$VOL"
+else
+ sed -i '/\[gluster-'"$VOL"'\]/,/^$/!b;/available = no/d' "${CONFIGFILE}"
fi
+sighup_samba
diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
index d0b5101f0ea..7ad6f23ad06 100755
--- a/extras/hook-scripts/start/post/S31ganesha-start.sh
+++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
@@ -4,7 +4,7 @@ OPTSPEC="volname:,gd-workdir:"
VOL=
declare -i EXPORT_ID
ganesha_key="ganesha.enable"
-GANESHA_DIR="/etc/ganesha"
+GANESHA_DIR="/run/gluster/shared_storage/nfs-ganesha"
CONF1="$GANESHA_DIR/ganesha.conf"
GLUSTERD_WORKDIR=
@@ -60,45 +60,15 @@ echo " SecType = \"sys\";"
echo "}"
}
-#This function keeps track of export IDs and increments it with every new entry
-#Also it adds the export dynamically by sending dbus signals
+#It adds the export dynamically by sending dbus signals
function export_add()
{
- count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
- if [ "$count" = "1" ] ;
- then
- EXPORT_ID=2
- else
- #if [ -s /var/lib/ganesha/export_removed ];
- # then
- # EXPORT_ID=`head -1 /var/lib/ganesha/export_removed`
- # sed -i -e "1d" /var/lib/ganesha/export_removed
- # else
-
- EXPORT_ID=`cat $GANESHA_DIR/.export_added`
- EXPORT_ID=EXPORT_ID+1
- #fi
- fi
- echo $EXPORT_ID > $GANESHA_DIR/.export_added
- sed -i s/Export_Id.*/"Export_Id=$EXPORT_ID;"/ \
-$GANESHA_DIR/exports/export.$VOL.conf
- echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF1
-
dbus-send --print-reply --system --dest=org.ganesha.nfsd \
/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
string:$GANESHA_DIR/exports/export.$VOL.conf string:"EXPORT(Export_Id=$EXPORT_ID)"
}
-function start_ganesha()
-{
- #Remove export entry from nfs-ganesha.conf
- sed -i /$VOL.conf/d $CONF1
- #Create a new export entry
- export_add $VOL
-
-}
-
# based on src/scripts/ganeshactl/Ganesha/export_mgr.py
function is_exported()
{
@@ -133,9 +103,20 @@ if ganesha_enabled ${VOL} && ! is_exported ${VOL}
then
if [ ! -e ${GANESHA_DIR}/exports/export.${VOL}.conf ]
then
+ #Remove export entry from nfs-ganesha.conf
+ sed -i /$VOL.conf/d $CONF1
write_conf ${VOL} > ${GANESHA_DIR}/exports/export.${VOL}.conf
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ EXPORT_ID=EXPORT_ID+1
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ sed -i s/Export_Id.*/"Export_Id=$EXPORT_ID;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF1
+ else
+ EXPORT_ID=$(grep ^[[:space:]]*Export_Id $GANESHA_DIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
fi
- start_ganesha ${VOL}
+ export_add $VOL
fi
exit 0
diff --git a/extras/hook-scripts/stop/pre/Makefile.am b/extras/hook-scripts/stop/pre/Makefile.am
index bf63e7393d3..9e8d1565e93 100644
--- a/extras/hook-scripts/stop/pre/Makefile.am
+++ b/extras/hook-scripts/stop/pre/Makefile.am
@@ -1,4 +1,6 @@
EXTRA_DIST = S29CTDB-teardown.sh S30samba-stop.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/stop/pre/
+if WITH_SERVER
hook_SCRIPTS = $(EXTRA_DIST)
+endif
diff --git a/extras/hook-scripts/stop/pre/S29CTDB-teardown.sh b/extras/hook-scripts/stop/pre/S29CTDB-teardown.sh
index 5fb49bd9e97..0975a00f18d 100755
--- a/extras/hook-scripts/stop/pre/S29CTDB-teardown.sh
+++ b/extras/hook-scripts/stop/pre/S29CTDB-teardown.sh
@@ -2,8 +2,9 @@
CTDB_MNT=/gluster/lock
PROGNAME="ctdb"
-OPTSPEC="volname:"
+OPTSPEC="volname:,last:"
VOL=
+LAST=
# $META is the volume that will be used by CTDB as a shared filesystem.
# It is not desirable to use this volume for storing 'data' as well.
# META is set to 'all' (viz. a keyword and hence not a legal volume name)
@@ -12,7 +13,7 @@ VOL=
META="all"
function parse_args () {
- ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+ ARGS=$(getopt -o '' -l $OPTSPEC -n $PROGNAME -- "$@")
eval set -- "$ARGS"
while true; do
@@ -20,15 +21,16 @@ function parse_args () {
--volname)
shift
VOL=$1
- ;;
-
+ ;;
+ --last)
+ shift
+ LAST=$1
+ ;;
*)
- shift
- break
- ;;
-
+ shift
+ break
+ ;;
esac
-
shift
done
}
@@ -51,7 +53,7 @@ function remove_fstab_entry () {
fi
}
-parse_args $@
+parse_args "$@"
if [ "$META" = "$VOL" ]
then
umount "$CTDB_MNT"
diff --git a/extras/hook-scripts/stop/pre/S30samba-stop.sh b/extras/hook-scripts/stop/pre/S30samba-stop.sh
index 62cf7d1e0d2..ea799381d62 100755
--- a/extras/hook-scripts/stop/pre/S30samba-stop.sh
+++ b/extras/hook-scripts/stop/pre/S30samba-stop.sh
@@ -16,27 +16,33 @@
#event by removing the volume related entries(if any) in smb.conf file.
PROGNAME="Ssamba-stop"
-OPTSPEC="volname:"
+OPTSPEC="volname:,last:"
VOL=
CONFIGFILE=
PIDDIR=
+LAST=
function parse_args () {
- ARGS=$(getopt -l $OPTSPEC -name $PROGNAME $@)
+ ARGS=$(getopt -o '' -l $OPTSPEC -n $PROGNAME -- "$@")
eval set -- "$ARGS"
while true; do
- case $1 in
- --volname)
- shift
- VOL=$1
- ;;
- *)
- shift
- break
- ;;
- esac
- shift
+ case $1 in
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --last)
+ shift
+ LAST=$1
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+
+ shift
done
}
@@ -46,13 +52,13 @@ function find_config_info () {
echo "Samba is not installed"
exit 1
fi
- CONFIGFILE=`echo $cmdout | awk {'print $2'}`
- PIDDIR=`smbd -b | grep PIDDIR | awk {'print $2'}`
+ CONFIGFILE=`echo $cmdout | awk '{print $2}'`
+ PIDDIR=`smbd -b | grep PIDDIR | awk '{print $2}'`
}
-function del_samba_share () {
+function deactivate_samba_share () {
volname=$1
- sed -i "/\[gluster-$volname\]/,/^$/d" ${CONFIGFILE}
+ sed -i -e '/^\[gluster-'"$volname"'\]/{ :a' -e 'n; /available = no/H; /^$/!{$!ba;}; x; /./!{ s/^/available = no/; $!{G;x}; $H; }; s/.*//; x; };' ${CONFIGFILE}
}
function sighup_samba () {
@@ -65,7 +71,7 @@ function sighup_samba () {
fi
}
-parse_args $@
+parse_args "$@"
find_config_info
-del_samba_share $VOL
+deactivate_samba_share $VOL
sighup_samba
diff --git a/extras/identify-hangs.sh b/extras/identify-hangs.sh
new file mode 100755
index 00000000000..ebc6bf144aa
--- /dev/null
+++ b/extras/identify-hangs.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+function get_statedump_fnames_without_timestamps
+{
+ ls | grep -E "[.]dump[.][0-9][0-9]*" | cut -f1-3 -d'.' | sort -u
+}
+
+function get_non_uniq_fields
+{
+ local statedump_fname_prefix=$1
+ print_stack_lkowner_unique_in_one_line "$statedump_fname_prefix" | sort | uniq -c | grep -vE "^\s*1 " | awk '{$1="repeats="$1; print $0}'
+}
+
+function print_stack_lkowner_unique_in_one_line
+{
+ local statedump_fname_prefix=$1
+ sed -e '/./{H;$!d;}' -e 'x;/unique=/!d;/stack=/!d;/lk-owner=/!d;/pid=/!d;' "${statedump_fname_prefix}"* | grep -E "(stack|lk-owner|unique|pid)=" | paste -d " " - - - -
+}
+
+function get_stacks_that_appear_in_multiple_statedumps
+{
+ #If a stack with same 'unique/lk-owner/stack' appears in multiple statedumps
+ #print the stack
+ local statedump_fname_prefix=$1
+ while read -r non_uniq_stack;
+ do
+ if [ -z "$printed" ];
+ then
+ printed="1"
+ fi
+ echo "$statedump_fname_prefix" "$non_uniq_stack"
+ done < <(get_non_uniq_fields "$statedump_fname_prefix")
+}
+
+statedumpdir=${1}
+if [ -z "$statedumpdir" ];
+then
+ echo "Usage: $0 <statedump-dir>"
+ exit 1
+fi
+
+if [ ! -d "$statedumpdir" ];
+then
+ echo "$statedumpdir: Is not a directory"
+ echo "Usage: $0 <statedump-dir>"
+ exit 1
+fi
+
+cd "$statedumpdir" || exit 1
+for statedump_fname_prefix in $(get_statedump_fnames_without_timestamps);
+do
+ get_stacks_that_appear_in_multiple_statedumps "$statedump_fname_prefix"
+done | column -t
+echo "NOTE: stacks with lk-owner=\"\"/lk-owner=0000000000000000/unique=0 may not be hung frames and need further inspection" >&2
diff --git a/extras/init.d/Makefile.am b/extras/init.d/Makefile.am
index 8c43e513d77..8d8cc69571a 100644
--- a/extras/init.d/Makefile.am
+++ b/extras/init.d/Makefile.am
@@ -1,5 +1,7 @@
-EXTRA_DIST = glusterd-Debian glusterd-FreeBSD glusterd-Redhat glusterd-SuSE glusterd.plist rhel5-load-fuse.modules
+EXTRA_DIST = glusterd-Debian glusterd-FreeBSD glusterd-Redhat \
+ glusterd-SuSE glusterd.plist glustereventsd-FreeBSD \
+ glustereventsd-Redhat glustereventsd-Debian
CLEANFILES =
@@ -8,10 +10,18 @@ SYSTEMD_DIR = @systemddir@
LAUNCHD_DIR = @launchddir@
$(GF_DISTRIBUTION):
+if WITH_SERVER
@if [ ! -d $(SYSTEMD_DIR) ]; then \
$(mkdir_p) $(DESTDIR)$(INIT_DIR); \
$(INSTALL_PROGRAM) glusterd-$(GF_DISTRIBUTION) $(DESTDIR)$(INIT_DIR)/glusterd; \
fi
+endif
+if BUILD_EVENTS
+ @if [ ! -d $(SYSTEMD_DIR) ]; then \
+ $(mkdir_p) $(DESTDIR)$(INIT_DIR); \
+ $(INSTALL_PROGRAM) glustereventsd-$(GF_DISTRIBUTION) $(DESTDIR)$(INIT_DIR)/glustereventsd; \
+ fi
+endif
install-exec-local: $(GF_DISTRIBUTION)
diff --git a/extras/init.d/glustereventsd-Debian.in b/extras/init.d/glustereventsd-Debian.in
new file mode 100644
index 00000000000..6eebdb2b8d8
--- /dev/null
+++ b/extras/init.d/glustereventsd-Debian.in
@@ -0,0 +1,91 @@
+#!/bin/sh
+### BEGIN INIT INFO
+# Provides: glustereventsd
+# Required-Start: $local_fs $network
+# Required-Stop: $local_fs $network
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Gluster Events Server
+# Description: Gluster Events Server
+### END INIT INFO
+
+# Author: Chris AtLee <chris@atlee.ca>
+# Patched by: Matthias Albert < matthias@linux4experts.de>
+
+PATH=/sbin:/usr/sbin:/bin:/usr/bin
+NAME=glustereventsd
+SCRIPTNAME=/etc/init.d/$NAME
+DAEMON=@prefix@/sbin/$NAME
+PIDFILE=/var/run/$NAME.pid
+GLUSTEREVENTSD_OPTS=""
+PID=`test -f $PIDFILE && cat $PIDFILE`
+
+
+# Gracefully exit if the package has been removed.
+test -x $DAEMON || exit 0
+
+# Load the VERBOSE setting and other rcS variables
+. /lib/init/vars.sh
+
+# Define LSB log_* functions.
+. /lib/lsb/init-functions
+
+
+do_start()
+{
+ pidofproc -p $PIDFILE $DAEMON >/dev/null
+ status=$?
+ if [ $status -eq 0 ]; then
+ log_success_msg "glustereventsd service is already running with pid $PID"
+ else
+ log_daemon_msg "Starting glustereventsd service" "glustereventsd"
+ start-stop-daemon --start --quiet --oknodo --pidfile $PIDFILE --startas $DAEMON -- -p $PIDFILE $GLUSTEREVENTSD_OPTS
+ log_end_msg $?
+ start_daemon -p $PIDFILE $DAEMON -f $CONFIGFILE
+ return $?
+ fi
+}
+
+do_stop()
+{
+ log_daemon_msg "Stopping glustereventsd service" "glustereventsd"
+ start-stop-daemon --stop --quiet --oknodo --pidfile $PIDFILE
+ log_end_msg $?
+ rm -f $PIDFILE
+ killproc -p $PIDFILE $DAEMON
+ return $?
+}
+
+do_status()
+{
+ pidofproc -p $PIDFILE $DAEMON >/dev/null
+ status=$?
+ if [ $status -eq 0 ]; then
+ log_success_msg "glustereventsd service is running with pid $PID"
+ else
+ log_failure_msg "glustereventsd service is not running."
+ fi
+ exit $status
+}
+
+case "$1" in
+ start)
+ do_start
+ ;;
+ stop)
+ do_stop
+ ;;
+ status)
+ do_status;
+ ;;
+ restart|force-reload)
+ do_stop
+ sleep 2
+ do_start
+ ;;
+ *)
+ echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
+ exit 3
+ ;;
+esac
+
diff --git a/extras/init.d/glustereventsd-FreeBSD.in b/extras/init.d/glustereventsd-FreeBSD.in
new file mode 100644
index 00000000000..2e8303ec6c6
--- /dev/null
+++ b/extras/init.d/glustereventsd-FreeBSD.in
@@ -0,0 +1,19 @@
+#!/bin/sh
+#
+# $FreeBSD$
+#
+
+# PROVIDE: glustereventsd
+
+. /etc/rc.subr
+
+name="glustereventsd"
+rcvar=`set_rcvar`
+command=@prefix@/sbin/${name}
+command_interpreter=/usr/local/bin/python
+pidfile="/var/run/${name}.pid"
+glustereventsd_flags="-p /var/run/${name}.pid"
+start_cmd="/usr/sbin/daemon $command ${glustereventsd_flags}"
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/extras/init.d/glustereventsd-Redhat.in b/extras/init.d/glustereventsd-Redhat.in
new file mode 100644
index 00000000000..d23ce4c244f
--- /dev/null
+++ b/extras/init.d/glustereventsd-Redhat.in
@@ -0,0 +1,129 @@
+#!/bin/bash
+#
+# glustereventsd Startup script for the glusterfs Events server
+#
+# chkconfig: - 20 80
+# description: Gluster Events Server
+
+### BEGIN INIT INFO
+# Provides: glustereventsd
+# Required-Start: $local_fs $network
+# Required-Stop: $local_fs $network
+# Should-Start:
+# Should-Stop:
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: glusterfs Events server
+# Description: GlusterFS Events Server
+### END INIT INFO
+#
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+BASE=glustereventsd
+
+# Fedora File System Layout dictates /run
+[ -e /run ] && RUNDIR="/run"
+PIDFILE="${RUNDIR:-/var/run}/${BASE}.pid"
+
+PID=`test -f $PIDFILE && cat $PIDFILE`
+
+GLUSTEREVENTSD_BIN=@prefix@/sbin/$BASE
+GLUSTEREVENTSD_OPTS="--pid-file=$PIDFILE"
+GLUSTEREVENTSD="$GLUSTEREVENTSD_BIN $GLUSTEREVENTSD_OPTS"
+RETVAL=0
+
+LOCKFILE=/var/lock/subsys/${BASE}
+
+# Start the service $BASE
+start()
+{
+ if pidofproc -p $PIDFILE $GLUSTEREVENTSD_BIN &> /dev/null; then
+ echo "glustereventsd service is already running with pid $PID"
+ return 0
+ else
+ echo -n $"Starting $BASE:"
+ daemon $GLUSTEREVENTSD &
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch $LOCKFILE
+ return $RETVAL
+ fi
+}
+
+# Stop the service $BASE
+stop()
+{
+ echo -n $"Stopping $BASE:"
+ if pidofproc -p $PIDFILE $GLUSTEREVENTSD_BIN &> /dev/null; then
+ killproc -p $PIDFILE $BASE
+ else
+ killproc $BASE
+ fi
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && rm -f $LOCKFILE
+ return $RETVAL
+}
+
+restart()
+{
+ stop
+ start
+}
+
+reload()
+{
+ restart
+}
+
+force_reload()
+{
+ restart
+}
+
+rh_status()
+{
+ status $BASE
+}
+
+rh_status_q()
+{
+ rh_status &>/dev/null
+}
+
+
+### service arguments ###
+case $1 in
+ start)
+ rh_status_q && exit 0
+ $1
+ ;;
+ stop)
+ rh_status_q || exit 0
+ $1
+ ;;
+ restart)
+ $1
+ ;;
+ reload)
+ rh_status_q || exit 7
+ $1
+ ;;
+ force-reload)
+ force_reload
+ ;;
+ status)
+ rh_status
+ ;;
+ condrestart|try-restart)
+ rh_status_q || exit 0
+ restart
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
+ exit 1
+esac
+
+exit $?
diff --git a/extras/init.d/rhel5-load-fuse.modules b/extras/init.d/rhel5-load-fuse.modules
deleted file mode 100755
index ee194db99b8..00000000000
--- a/extras/init.d/rhel5-load-fuse.modules
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/sh
-#
-# fusermount-glusterfs requires the /dev/fuse character device. The fuse module
-# provides this and is loaded on demand in newer Linux distributions.
-#
-
-[ -c /dev/fuse ] || /sbin/modprobe fuse
diff --git a/extras/mount-shared-storage.sh b/extras/mount-shared-storage.sh
new file mode 100755
index 00000000000..cc40e13c3e3
--- /dev/null
+++ b/extras/mount-shared-storage.sh
@@ -0,0 +1,39 @@
+#!/bin/bash
+#Post reboot there is a chance in which mounting of shared storage will fail
+#This will impact starting of features like NFS-Ganesha. So this script will
+#try to mount the shared storage if it fails
+
+exitStatus=0
+
+while IFS= read -r glm
+do
+ IFS=$' \t' read -r -a arr <<< "$glm"
+
+ #Validate storage type is glusterfs
+ if [ "${arr[2]}" == "glusterfs" ]
+ then
+
+ #check whether shared storage is mounted
+ #if it is mounted then mountpoint -q will return a 0 success code
+ if mountpoint -q "${arr[1]}"
+ then
+ echo "${arr[1]} is already mounted"
+ continue
+ fi
+
+ mount -t glusterfs -o "${arr[3]}" "${arr[0]}" "${arr[1]}"
+ #wait for few seconds
+ sleep 10
+
+ #recheck mount got succeed
+ if mountpoint -q "${arr[1]}"
+ then
+ echo "${arr[1]} has been mounted"
+ continue
+ else
+ echo "${arr[1]} failed to mount"
+ exitStatus=1
+ fi
+ fi
+done <<< "$(sed '/^#/ d' </etc/fstab | grep 'glusterfs')"
+exit $exitStatus
diff --git a/extras/ocf/volume.in b/extras/ocf/volume.in
index 72fd1213af2..76cc649e55f 100755
--- a/extras/ocf/volume.in
+++ b/extras/ocf/volume.in
@@ -6,6 +6,7 @@
# HA resource
#
# Authors: Florian Haas (hastexo Professional Services GmbH)
+# Jiri Lunacek (Hosting90 Systems s.r.o.)
#
# License: GNU General Public License (GPL)
@@ -54,6 +55,14 @@ must have clone ordering enabled.
<shortdesc lang="en">gluster executable</shortdesc>
<content type="string" default="$OCF_RESKEY_binary_default"/>
</parameter>
+ <parameter name="peer_map">
+ <longdesc lang="en">
+ Mapping of hostname - peer name in the gluster cluster
+ in format hostname1:peername1,hostname2:peername2,...
+ </longdesc>
+ <shortdesc lang="en">gluster peer map</shortdesc>
+ <content type="string" default=""/>
+ </parameter>
</parameters>
<actions>
<action name="start" timeout="20" />
@@ -68,9 +77,13 @@ EOF
}
+if [ -n "${OCF_RESKEY_peer_map}" ]; then
+ SHORTHOSTNAME=`echo "${OCF_RESKEY_peer_map}" | egrep -o "$SHORTHOSTNAME\:[^,]+" | awk -F: '{print $2}'`
+fi
+
volume_getdir() {
local voldir
- voldir="@sysconfdir@/glusterd/vols/${OCF_RESKEY_volname}"
+ voldir="@GLUSTERD_WORKDIR@/vols/${OCF_RESKEY_volname}"
[ -d ${voldir} ] || return 1
@@ -78,6 +91,16 @@ volume_getdir() {
return 0
}
+volume_getpid_dir() {
+ local volpid_dir
+ volpid_dir="/var/run/gluster/vols/${OCF_RESKEY_volname}"
+
+ [ -d ${volpid_dir} ] || return 1
+
+ echo "${volpid_dir}"
+ return 0
+}
+
volume_getbricks() {
local infofile
local voldir
@@ -92,17 +115,19 @@ volume_getbricks() {
volume_getpids() {
local bricks
- local piddir
local pidfile
local infofile
- local voldir
+ local volpid_dir
- voldir=`volume_getdir`
+ volpid_dir=`volume_getpid_dir`
bricks=`volume_getbricks`
- piddir="${voldir}/run"
+
+ if [ -z "$bricks" ]; then
+ return 1
+ fi
for brick in ${bricks}; do
- pidfile="${piddir}/${SHORTHOSTNAME}${brick}.pid"
+ pidfile="${volpid_dir}/${SHORTHOSTNAME}${brick}.pid"
[ -e $pidfile ] || return 1
cat $pidfile
done
@@ -206,6 +231,11 @@ volume_validate_all() {
# Test for required binaries
check_binary $OCF_RESKEY_binary
+
+ if [ -z "$SHORTHOSTNAME" ]; then
+ ocf_log err 'Unable to get host in node map'
+ return $OCF_ERR_CONFIGURED
+ fi
return $OCF_SUCCESS
}
diff --git a/extras/profiler/glusterfs-profiler b/extras/profiler/glusterfs-profiler
index 65d445864aa..aaafd088648 100755
--- a/extras/profiler/glusterfs-profiler
+++ b/extras/profiler/glusterfs-profiler
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python3
# Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
@@ -291,7 +291,7 @@ class Texttable:
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
- l = string.join([horiz*n for n in self._width], s)
+ l = s.join([horiz*n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
diff --git a/extras/prot_filter.py b/extras/prot_filter.py
deleted file mode 100755
index 7dccacf155e..00000000000
--- a/extras/prot_filter.py
+++ /dev/null
@@ -1,144 +0,0 @@
-#!/usr/bin/python
-
-"""
- Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-"""
-
-"""
- INSTRUCTIONS
- Put this in /usr/lib64/glusterfs/$version/filter to have it run automatically,
- or else you'll have to run it by hand every time you change the volume
- configuration. Give it a list of volume names on which to enable the
- protection functionality; it will deliberately ignore client volfiles for
- other volumes, and all server volfiles. It *will* include internal client
- volfiles such as those used for NFS or rebalance/self-heal; this is a
- deliberate choice so that it will catch deletions from those sources as well.
-"""
-
-volume_list = [ "jdtest" ]
-
-import copy
-import string
-import sys
-import types
-
-class Translator:
- def __init__ (self, name):
- self.name = name
- self.xl_type = ""
- self.opts = {}
- self.subvols = []
- self.dumped = False
- def __repr__ (self):
- return "<Translator %s>" % self.name
-
-def load (path):
- # If it's a string, open it; otherwise, assume it's already a
- # file-like object (most notably from urllib*).
- if type(path) in types.StringTypes:
- fp = file(path,"r")
- else:
- fp = path
- all_xlators = {}
- xlator = None
- last_xlator = None
- while True:
- text = fp.readline()
- if text == "":
- break
- text = text.split()
- if not len(text):
- continue
- if text[0] == "volume":
- if xlator:
- raise RuntimeError, "nested volume definition"
- xlator = Translator(text[1])
- continue
- if not xlator:
- raise RuntimeError, "text outside volume definition"
- if text[0] == "type":
- xlator.xl_type = text[1]
- continue
- if text[0] == "option":
- xlator.opts[text[1]] = string.join(text[2:])
- continue
- if text[0] == "subvolumes":
- for sv in text[1:]:
- xlator.subvols.append(all_xlators[sv])
- continue
- if text[0] == "end-volume":
- all_xlators[xlator.name] = xlator
- last_xlator = xlator
- xlator = None
- continue
- raise RuntimeError, "unrecognized keyword %s" % text[0]
- if xlator:
- raise RuntimeError, "unclosed volume definition"
- return all_xlators, last_xlator
-
-def generate (graph, last, stream=sys.stdout):
- for sv in last.subvols:
- if not sv.dumped:
- generate(graph,sv,stream)
- print >> stream, ""
- sv.dumped = True
- print >> stream, "volume %s" % last.name
- print >> stream, " type %s" % last.xl_type
- for k, v in last.opts.iteritems():
- print >> stream, " option %s %s" % (k, v)
- if last.subvols:
- print >> stream, " subvolumes %s" % string.join(
- [ sv.name for sv in last.subvols ])
- print >> stream, "end-volume"
-
-def push_filter (graph, old_xl, filt_type, opts={}):
- new_type = "-" + filt_type.split("/")[1]
- old_type = "-" + old_xl.xl_type.split("/")[1]
- pos = old_xl.name.find(old_type)
- if pos >= 0:
- new_name = old_xl.name
- old_name = new_name[:pos] + new_type + new_name[len(old_type)+pos:]
- else:
- new_name = old_xl.name + old_type
- old_name = old_xl.name + new_type
- new_xl = Translator(new_name)
- new_xl.xl_type = old_xl.xl_type
- new_xl.opts = old_xl.opts
- new_xl.subvols = old_xl.subvols
- graph[new_xl.name] = new_xl
- old_xl.name = old_name
- old_xl.xl_type = filt_type
- old_xl.opts = opts
- old_xl.subvols = [new_xl]
- graph[old_xl.name] = old_xl
-
-if __name__ == "__main__":
- path = sys.argv[1]
- # Alow an override for debugging.
- for extra in sys.argv[2:]:
- volume_list.append(extra)
- graph, last = load(path)
- for v in volume_list:
- if graph.has_key(v):
- break
- else:
- print "No configured volumes found - aborting."
- sys.exit(0)
- for v in graph.values():
- if v.xl_type == "cluster/distribute":
- push_filter(graph,v,"features/prot_dht")
- elif v.xl_type == "protocol/client":
- push_filter(graph,v,"features/prot_client")
- # We push debug/trace so that every fop gets a real frame, because DHT
- # gets confused if STACK_WIND_TAIL causes certain fops to be invoked
- # from anything other than a direct child.
- for v in graph.values():
- if v.xl_type == "features/prot_client":
- push_filter(graph,v,"debug/trace")
- generate(graph,last,stream=open(path,"w"))
diff --git a/extras/python/Makefile.am b/extras/python/Makefile.am
new file mode 100644
index 00000000000..7d81fa0319b
--- /dev/null
+++ b/extras/python/Makefile.am
@@ -0,0 +1,7 @@
+if HAVE_PYTHON
+# Install __init__.py into the Python site-packages area
+pypkgdir = @BUILD_PYTHON_SITE_PACKAGES@/gluster
+pypkg_PYTHON = __init__.py
+endif
+
+EXTRA_DIST = __init__.py
diff --git a/extras/python/__init__.py b/extras/python/__init__.py
new file mode 100644
index 00000000000..3ad9513f40e
--- /dev/null
+++ b/extras/python/__init__.py
@@ -0,0 +1,2 @@
+from pkgutil import extend_path
+__path__ = extend_path(__path__, __name__)
diff --git a/extras/contri-add.sh b/extras/quota/contri-add.sh
index 7db5edd5d20..7db5edd5d20 100755
--- a/extras/contri-add.sh
+++ b/extras/quota/contri-add.sh
diff --git a/extras/quota/log_accounting.sh b/extras/quota/log_accounting.sh
new file mode 100755
index 00000000000..e2dd87b84d7
--- /dev/null
+++ b/extras/quota/log_accounting.sh
@@ -0,0 +1,26 @@
+#!/bin/bash
+# The script does an accounting of all directories using command 'du' and
+# using gluster. We can then compare the two to identify accounting mismatch
+# THere can be minor mismatch because gluster only accounts for the size of
+# files. Direcotries can take up upto 4kB space on FS per directory. THis
+# size is accounted by du and not by gluster. However the difference would
+# not be significant.
+
+mountpoint=$1
+volname=$2
+
+usage ()
+{
+ echo >&2 "usage: $0 <mountpoint> <volume name>"
+ exit
+}
+
+[ $# -lt 2 ] && usage
+
+cd $mountpoint
+du -h | head -n -1 | tr -d '.' |awk '{ for (i = 2; i <= NF; i++) { printf("%s ", $i);} print "" }' > /tmp/gluster_quota_1
+cat /tmp/gluster_quota_1 | sed 's/ $//' | sed 's/ /\\ /g' | sed 's/(/\\(/g' | sed 's/)/\\)/g' |xargs gluster v quota $volname list > /tmp/gluster_quota_2
+du -h | head -n -1 |awk '{ for (i = 2; i <= NF; i++) { printf("%s %s", $i, $1);} print "" }' | tr -d '.' > /tmp/gluster_quota_3
+cat /tmp/gluster_quota_2 /tmp/gluster_quota_3 | sort > /tmp/gluster_quota_4
+find . -type d > /tmp/gluster_quota_5
+tar -cvf /tmp/gluster_quota_files.tar /tmp/gluster_quota_*
diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
new file mode 100755
index 00000000000..e62f7fc52a3
--- /dev/null
+++ b/extras/quota/quota_fsck.py
@@ -0,0 +1,377 @@
+#!/usr/bin/python3
+# The following script enables, Detecting, Reporting and Fixing
+# anomalies in quota accounting. Run this script with -h option
+# for further details.
+
+'''
+ Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+'''
+from __future__ import print_function
+import os, sys, re
+from stat import *
+import subprocess
+import argparse
+import xattr
+
+aggr_size = {}
+verbose_mode = False
+mnt_path = None
+brick_path = None
+obj_fix_count = 0
+file_count = 0
+dir_count = 0
+
+#CONSTANTS
+KB = 1024
+MB = 1048576
+GB = 1048576 * 1024
+TB = 1048576 * 1048576
+
+QUOTA_VERBOSE = 0
+QUOTA_META_ABSENT = 1
+QUOTA_SIZE_MISMATCH = 2
+
+IS_DIRTY ='0x3100'
+IS_CLEAN ='0x3000'
+
+
+epilog_msg='''
+ The script attempts to find any gluster accounting issues in the
+ filesystem at the given subtree. The script crawls the given
+ subdirectory tree doing a stat for all files and compares the
+ size reported by gluster quota with the size reported by stat
+ calls. Any mismatch is reported. In addition integrity of marker
+ xattrs are verified.
+ '''
+
+def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None):
+ if log_type == QUOTA_VERBOSE:
+ print('%-24s %-60s\nxattr_values: %s\n%s\n' % ("Verbose", path, xattr_dict, stbuf))
+ elif log_type == QUOTA_META_ABSENT:
+ print('%-24s %-60s\n%s\n' % ("Quota-Meta Absent", path, xattr_dict))
+ elif log_type == QUOTA_SIZE_MISMATCH:
+ print("mismatch")
+ if dir_size is not None:
+ print('%24s %60s %12s %12s' % ("Size Mismatch", path,
+ xattr_dict, dir_size))
+ else:
+ print('%-24s %-60s %-12s %-12s' % ("Size Mismatch", path, xattr_dict,
+ stbuf.st_size))
+
+def size_differs_lot(s1, s2):
+ '''
+ There could be minor accounting differences between the stat based
+ accounting and gluster accounting. To avoid these from throwing lot
+ of false positives in our logs. using a threshold of 1M for now.
+ TODO: For a deeply nested directory, at higher levels in hierarchy
+ differences may not be significant, hence this check needs to be improved.
+ '''
+ if abs(s1-s2) > 0:
+ return True
+ else:
+ return False
+
+def fix_hardlink_accounting(curr_dict, accounted_dict, curr_size):
+ '''
+ Hard links are messy.. we have to account them for their parent
+ directory. But, stop accounting at the most common ancestor.
+ Eg:
+ say we have 3 hardlinks : /d1/d2/h1, /d1/d3/h2 and /d1/h3
+
+ suppose we encounter the hard links h1 first , then h2 and then h3.
+ while accounting for h1, we account the size until root(d2->d1->/)
+ while accounting for h2, we need to account only till d3. (as d1
+ and / are accounted for this inode).
+ while accounting for h3 we should not account at all.. as all
+ its ancestors are already accounted for same inode.
+
+ curr_dict : dict of hardlinks that were seen and
+ accounted by the current iteration.
+ accounted_dict : dict of hardlinks that has already been
+ accounted for.
+
+ size : size of the object as accounted by the
+ curr_iteration.
+
+ Return vale:
+ curr_size : size reduced by hardlink sizes for those
+ hardlinks that has already been accounted
+ in current subtree.
+ Also delete the duplicate link from curr_dict.
+ '''
+
+ dual_accounted_links = set(curr_dict.keys()) & set(accounted_dict.keys())
+ for link in dual_accounted_links:
+ curr_size = curr_size - curr_dict[link]
+ del curr_dict[link]
+ return curr_size
+
+
+def fix_xattr(file_name, mark_dirty):
+ global obj_fix_count
+ global mnt_path
+
+ if mnt_path is None:
+ return
+ if mark_dirty:
+ print("MARKING DIRTY: " + file_name)
+ out = subprocess.check_output (["/usr/bin/setfattr", "-n",
+ "trusted.glusterfs.quota.dirty",
+ "-v", IS_DIRTY, file_name])
+ rel_path = os.path.relpath(file_name, brick_path)
+ print("stat on " + mnt_path + "/" + rel_path)
+ stbuf = os.lstat(mnt_path + "/" + rel_path)
+
+ obj_fix_count += 1
+
+def get_quota_xattr_brick(dpath):
+ out = subprocess.check_output (["/usr/bin/getfattr", "--no-dereference",
+ "-d", "-m.", "-e", "hex", dpath])
+ pairs = out.splitlines()
+
+ '''
+ Sample output to be parsed:
+ [root@dhcp35-100 mnt]# getfattr -d -m. -e hex /export/b1/B0/d14/d13/
+ # file: export/b1/B0/d14/d13/
+ security.selinux=0x756e636f6e66696e65645f753a6f626a6563745f723a7573725f743a733000
+ trusted.gfid=0xbae5e0d2d05043de9fd851d91ecf63e8
+ trusted.glusterfs.dht=0x000000010000000000000000ffffffff
+ trusted.glusterfs.dht.mds=0x00000000
+ trusted.glusterfs.quota.6a7675a3-b85a-40c5-830b-de9229d702ce.contri.39=0x00000000000000000000000000000000000000000000000e
+ trusted.glusterfs.quota.dirty=0x3000
+ trusted.glusterfs.quota.size.39=0x00000000000000000000000000000000000000000000000e
+ '''
+
+ '''
+ xattr_dict dictionary holds quota related xattrs
+ eg:
+ '''
+
+ xattr_dict = {}
+ xattr_dict['parents'] = {}
+
+ for xattr in pairs[1:]:
+ xattr = xattr.decode("utf-8")
+ xattr_key = xattr.split("=")[0]
+ if xattr_key == "":
+ # skip any empty lines
+ continue
+ elif not re.search("quota", xattr_key):
+ # skip all non quota xattr.
+ continue
+
+ xattr_value = xattr.split("=")[1]
+ if re.search("contri", xattr_key):
+
+ xattr_version = xattr_key.split(".")[5]
+ if 'version' not in xattr_dict:
+ xattr_dict['version'] = xattr_version
+ else:
+ if xattr_version != xattr_dict['version']:
+ print("Multiple xattr version found")
+
+
+ cur_parent = xattr_key.split(".")[3]
+ if cur_parent not in xattr_dict['parents']:
+ xattr_dict['parents'][cur_parent] = {}
+
+ contri_dict = xattr_dict['parents'][cur_parent]
+ if len(xattr_value) == 34:
+ # 34 bytes implies file contri xattr
+ # contri format =0x< 16bytes file size><16bytes file count>
+ # size is obtained in iatt, file count = 1, dir count=0
+ contri_dict['contri_size'] = int(xattr_value[2:18], 16)
+ contri_dict['contri_file_count'] = int(xattr_value[18:34], 16)
+ contri_dict['contri_dir_count'] = 0
+ else:
+ # This is a directory contri.
+ contri_dict['contri_size'] = int(xattr_value[2:18], 16)
+ contri_dict['contri_file_count'] = int(xattr_value[18:34], 16)
+ contri_dict['contri_dir_count'] = int(xattr_value[34:], 16)
+
+ elif re.search("size", xattr_key):
+ xattr_dict['size'] = int(xattr_value[2:18], 16)
+ xattr_dict['file_count'] = int(xattr_value[18:34], 16)
+ xattr_dict['dir_count'] = int(xattr_value[34:], 16)
+ elif re.search("dirty", xattr_key):
+ if xattr_value == IS_CLEAN:
+ xattr_dict['dirty'] = False
+ elif xattr_value == IS_DIRTY:
+ xattr_dict['dirty'] = True
+ elif re.search("limit_objects", xattr_key):
+ xattr_dict['limit_objects'] = int(xattr_value[2:18], 16)
+ elif re.search("limit_set", xattr_key):
+ xattr_dict['limit_set'] = int(xattr_value[2:18], 16)
+
+ return xattr_dict
+
+def verify_file_xattr(path, stbuf = None):
+
+ global file_count
+ file_count += 1
+
+ if stbuf is None:
+ stbuf = os.lstat(path)
+
+ xattr_dict = get_quota_xattr_brick(path)
+
+ for parent in xattr_dict['parents']:
+ contri_dict = xattr_dict['parents'][parent]
+
+ if 'contri_size' not in contri_dict or \
+ 'contri_file_count' not in contri_dict or \
+ 'contri_dir_count' not in contri_dict:
+ print_msg(QUOTA_META_ABSENT, path, xattr_dict, stbuf)
+ fix_xattr(path, False)
+ return
+ elif size_differs_lot(contri_dict['contri_size'], stbuf.st_size):
+ print_msg(QUOTA_SIZE_MISMATCH, path, xattr_dict, stbuf)
+ fix_xattr(path, False)
+ return
+
+ if verbose_mode is True:
+ print_msg(QUOTA_VERBOSE, path, xattr_dict, stbuf)
+
+
+def verify_dir_xattr(path, dir_size):
+
+ global dir_count
+ dir_count += 1
+ xattr_dict = get_quota_xattr_brick(path)
+
+ stbuf = os.lstat(path)
+
+ for parent in xattr_dict['parents']:
+ contri_dict = xattr_dict['parents'][parent]
+
+ if 'size' not in xattr_dict or 'contri_size' not in contri_dict:
+ print_msg(QUOTA_META_ABSENT, path)
+ fix_xattr(path, True)
+ return
+ elif size_differs_lot(dir_size, xattr_dict['size']) or \
+ size_differs_lot(contri_dict['contri_size'], xattr_dict['size']):
+ print_msg(QUOTA_SIZE_MISMATCH, path, xattr_dict, stbuf, dir_size)
+ fix_xattr(path, True)
+ return
+
+ if verbose_mode is True:
+ print_msg("VERBOSE", path, xattr_dict, stbuf, dir_size)
+
+
+def walktree(t_dir, hard_link_dict):
+ '''recursively descend the directory tree rooted at dir,
+ aggregating the size
+ t_dir : directory to walk over.
+ hard_link_dict : dict of inodes with multiple hard_links under t_dir
+ '''
+ global aggr_size
+ aggr_size[t_dir] = 0
+
+ for entry in os.listdir(t_dir):
+ pathname = os.path.join(t_dir, entry)
+ stbuf = os.lstat(pathname)
+ if S_ISDIR(stbuf.st_mode):
+ # It's a directory, recurse into it
+ if entry == '.glusterfs':
+ print("skipping " + pathname)
+ continue
+ descendent_hardlinks = {}
+ subtree_size = walktree(pathname, descendent_hardlinks)
+
+ subtree_size = fix_hardlink_accounting(descendent_hardlinks,
+ hard_link_dict,
+ subtree_size)
+
+ aggr_size[t_dir] = aggr_size[t_dir] + subtree_size
+
+ elif S_ISREG(stbuf.st_mode) or S_ISLNK(stbuf.st_mode):
+ # Even a symbolic link file may have multiple hardlinks.
+
+ file_size = stbuf.st_size
+ if stbuf.st_nlink > 2:
+ # send a single element dict to check if file is accounted.
+ file_size = fix_hardlink_accounting({stbuf.st_ino:stbuf.st_size},
+ hard_link_dict,
+ stbuf.st_size)
+
+ if file_size == 0:
+ print_msg("HARD_LINK (skipped)", pathname, "",
+ stbuf)
+ else:
+ print_msg("HARD_LINK (accounted)", pathname, "",
+ stbuf)
+ hard_link_dict[stbuf.st_ino] = stbuf.st_size
+
+ if t_dir in aggr_size:
+ aggr_size[t_dir] = aggr_size[t_dir] + file_size
+ else:
+ aggr_size[t_dir] = file_size
+ verify_file_xattr(pathname, stbuf)
+
+ else:
+ # Unknown file type, print a message
+ print('Skipping %s, due to file mode' % (pathname))
+
+ if t_dir not in aggr_size:
+ aggr_size[t_dir] = 0
+
+ verify_dir_xattr(t_dir, aggr_size[t_dir])
+ # du also accounts for t_directory sizes
+ # aggr_size[t_dir] += 4096
+
+ #cleanup
+ ret = aggr_size[t_dir]
+ del aggr_size[t_dir]
+ return ret
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser(description='Diagnose quota accounting issues.', epilog=epilog_msg)
+ parser.add_argument('brick_path', nargs=1,
+ help='The brick path (or any descendent sub-directory of brick path)',
+ )
+ parser.add_argument('--full-logs', dest='verbose', action='store_true',
+ help='''
+ log all the xattr values and stat values reported
+ for analysis. [CAUTION: This can give lot of output
+ depending on FS depth. So one has to make sure enough
+ disk space exists if redirecting to file]
+ '''
+ )
+ parser.add_argument('--fix-issues', metavar='mount_path', dest='mnt', action='store',
+ help='''
+ fix accounting issues where the xattr values disagree
+ with stat sizes reported by gluster. A mount is also
+ required for this option to be used.
+ [CAUTION: This will directly modify backend xattr]
+ '''
+ )
+ parser.add_argument('--sub-dir', metavar='sub_dir', dest='sub_dir', action='store',
+ help='''
+ limit the crawling and accounting verification/correction
+ to a specific subdirectory.
+ '''
+ )
+
+ args = parser.parse_args()
+ verbose_mode = args.verbose
+ brick_path = args.brick_path[0]
+ sub_dir = args.sub_dir
+ mnt_path = args.mnt
+ hard_link_dict = {}
+ if sub_dir is not None:
+ walktree(os.path.join(brick_path, sub_dir), hard_link_dict)
+ else:
+ walktree(brick_path, hard_link_dict)
+
+ print("Files verified : " + str(file_count))
+ print("Directories verified : " + str(dir_count))
+ if mnt_path is not None:
+ print("Objects Fixed : " + str(obj_fix_count))
diff --git a/extras/quota/xattr_analysis.py b/extras/quota/xattr_analysis.py
new file mode 100755
index 00000000000..7bd7d96374c
--- /dev/null
+++ b/extras/quota/xattr_analysis.py
@@ -0,0 +1,73 @@
+#!/usr/bin/python3
+# Below script has two purposes
+# 1. Display xattr of entire FS tree in a human readable form
+# 2. Display all the directory where contri and size mismatch.
+# (If there are any directory with contri and size mismatch that are not dirty
+# then that highlights a propagation issue)
+# The script takes only one input LOG _FILE generated from the command,
+# find <brick_path> | xargs getfattr -d -m. -e hex > log_gluster_xattr
+
+from __future__ import print_function
+import re
+import subprocess
+import sys
+from hurry.filesize import size
+
+if len(sys.argv) < 2:
+ sys.exit('Usage: %s log_gluster_xattr \n'
+ 'to generate log_gluster_xattr use: \n'
+ 'find <brick_path> | xargs getfattr -d -m. -e hex > log_gluster_xattr'
+ % sys.argv[0])
+LOG_FILE=sys.argv[1]
+
+def get_quota_xattr_brick():
+ out = subprocess.check_output (["/usr/bin/cat", LOG_FILE])
+ pairs = out.splitlines()
+
+ xdict = {}
+ mismatch_size = [('====contri_size===', '====size====')]
+ for xattr in pairs:
+ k = xattr.split("=")[0]
+ if re.search("# file:", k):
+ print(xdict)
+ filename=k
+ print("=====" + filename + "=======")
+ xdict = {}
+ elif k is "":
+ pass
+ else:
+ print(xattr)
+ v = xattr.split("=")[1]
+ if re.search("contri", k):
+ if len(v) == 34:
+ # for files size is obtained in iatt, file count should be 1, dir count=0
+ xdict['contri_file_count'] = int(v[18:34], 16)
+ xdict['contri_dir_count'] = 0
+ else:
+ xdict['contri_size'] = size(int(v[2:18], 16))
+ xdict['contri_file_count'] = int(v[18:34], 16)
+ xdict['contri_dir_count'] = int(v[34:], 16)
+ elif re.search("size", k):
+ xdict['size'] = size(int(v[2:18], 16))
+ xdict['file_count'] = int(v[18:34], 16)
+ xdict['dir_count'] = int(v[34:], 16)
+ elif re.search("dirty", k):
+ if v == '0x3000':
+ xdict['dirty'] = False
+ elif v == '0x3100':
+ xdict['dirty'] = True
+ elif re.search("limit_objects", k):
+ xdict['limit_objects'] = int(v[2:18], 16)
+ elif re.search("limit_set", k):
+ xdict['limit_set'] = size(int(v[2:18], 16))
+
+ if 'size' in xdict and 'contri_size' in xdict and xdict['size'] != xdict['contri_size']:
+ mismatch_size.append((xdict['contri_size'], xdict['size'], filename))
+
+ for values in mismatch_size:
+ print(values)
+
+
+if __name__ == '__main__':
+ get_quota_xattr_brick()
+
diff --git a/extras/rebalance.py b/extras/rebalance.py
index 80c614c5dfe..37c68ebbb42 100755
--- a/extras/rebalance.py
+++ b/extras/rebalance.py
@@ -1,4 +1,6 @@
-#!/usr/bin/python
+#!/usr/bin/python3
+
+from __future__ import print_function
import atexit
import copy
@@ -11,6 +13,7 @@ import subprocess
import sys
import tempfile
import volfilter
+import platform
# It's just more convenient to have named fields.
class Brick:
@@ -37,20 +40,20 @@ class Brick:
def get_bricks (host, vol):
t = pipes.Template()
- t.prepend("gluster --remote-host=%s system getspec %s"%(host,vol),".-")
- return t.open(None,"r")
+ t.prepend("gluster --remote-host=%s system getspec %s"%(host, vol), ".-")
+ return t.open(None, "r")
def generate_stanza (vf, all_xlators, cur_subvol):
sv_list = []
for sv in cur_subvol.subvols:
- generate_stanza(vf,all_xlators,sv)
+ generate_stanza(vf, all_xlators, sv)
sv_list.append(sv.name)
- vf.write("volume %s\n"%cur_subvol.name)
- vf.write(" type %s\n"%cur_subvol.type)
- for kvpair in cur_subvol.opts.iteritems():
- vf.write(" option %s %s\n"%kvpair)
+ vf.write("volume %s\n" % cur_subvol.name)
+ vf.write(" type %s\n" % cur_subvol.type)
+ for kvpair in cur_subvol.opts.items():
+ vf.write(" option %s %s\n" % kvpair)
if sv_list:
- vf.write(" subvolumes %s\n"%string.join(sv_list))
+ vf.write(" subvolumes %s\n" % ''.join(sv_list))
vf.write("end-volume\n\n")
@@ -58,14 +61,14 @@ def mount_brick (localpath, all_xlators, dht_subvol):
# Generate a volfile.
vf_name = localpath + ".vol"
- vf = open(vf_name,"w")
- generate_stanza(vf,all_xlators,dht_subvol)
+ vf = open(vf_name, "w")
+ generate_stanza(vf, all_xlators, dht_subvol)
vf.flush()
vf.close()
# Create a brick directory and mount the brick there.
os.mkdir(localpath)
- subprocess.call(["glusterfs","-f",vf_name,localpath])
+ subprocess.call(["glusterfs", "-f", vf_name, localpath])
# We use the command-line tools because there's no getxattr support in the
# Python standard library (which is ridiculous IMO). Adding the xattr package
@@ -79,16 +82,16 @@ def mount_brick (localpath, all_xlators, dht_subvol):
def get_range (brick):
t = pipes.Template()
cmd = "getfattr -e hex -n trusted.glusterfs.dht %s 2> /dev/null"
- t.prepend(cmd%brick,".-")
- t.append("grep ^trusted.glusterfs.dht=","--")
- f = t.open(None,"r")
+ t.prepend(cmd%brick, ".-")
+ t.append("grep ^trusted.glusterfs.dht=", "--")
+ f = t.open(None, "r")
try:
value = f.readline().rstrip().split('=')[1][2:]
except:
- print "could not get layout for %s (might be OK)" % brick
+ print("could not get layout for %s (might be OK)" % brick)
return None
- v_start = int("0x"+value[16:24],16)
- v_end = int("0x"+value[24:32],16)
+ v_start = int("0x"+value[16:24], 16)
+ v_end = int("0x"+value[24:32], 16)
return (v_start, v_end)
def calc_sizes (bricks, total):
@@ -125,7 +128,7 @@ def normalize (in_bricks):
curr_hash = b.r_end + 1
break
else:
- print "gap found at 0x%08x" % curr_hash
+ print("gap found at 0x%08x" % curr_hash)
sys.exit(1)
return out_bricks + in_bricks, used
@@ -153,8 +156,8 @@ def get_score (bricks):
if __name__ == "__main__":
- my_usage = "%prog [options] server volume [directory]"
- parser = optparse.OptionParser(usage=my_usage)
+ my_usage = "%prog [options] server volume [directory]"
+ parser = optparse.OptionParser(usage=my_usage)
parser.add_option("-f", "--free-space", dest="free_space",
default=False, action="store_true",
help="use free space instead of total space")
@@ -164,7 +167,7 @@ if __name__ == "__main__":
parser.add_option("-v", "--verbose", dest="verbose",
default=False, action="store_true",
help="verbose output")
- options, args = parser.parse_args()
+ options, args = parser.parse_args()
if len(args) == 3:
fix_dir = args[2]
@@ -182,9 +185,9 @@ if __name__ == "__main__":
def cleanup_workdir ():
os.chdir(orig_dir)
if options.verbose:
- print "Cleaning up %s" % work_dir
+ print("Cleaning up %s" % work_dir)
for b in bricks:
- subprocess.call(["umount",b.path])
+ subprocess.call(["umount", b.path])
shutil.rmtree(work_dir)
if not options.leave_mounted:
atexit.register(cleanup_workdir)
@@ -192,44 +195,51 @@ if __name__ == "__main__":
# Mount each brick individually, so we can issue brick-specific calls.
if options.verbose:
- print "Mounting subvolumes..."
+ print("Mounting subvolumes...")
index = 0
- volfile_pipe = get_bricks(hostname,volname)
+ volfile_pipe = get_bricks(hostname, volname)
all_xlators, last_xlator = volfilter.load(volfile_pipe)
for dht_vol in all_xlators.itervalues():
if dht_vol.type == "cluster/distribute":
break
else:
- print "no DHT volume found"
+ print("no DHT volume found")
sys.exit(1)
for sv in dht_vol.subvols:
#print "found subvol %s" % sv.name
lpath = "%s/brick%s" % (work_dir, index)
index += 1
- mount_brick(lpath,all_xlators,sv)
- bricks.append(Brick(lpath,sv.name))
+ mount_brick(lpath, all_xlators, sv)
+ bricks.append(Brick(lpath, sv.name))
if index == 0:
- print "no bricks"
+ print("no bricks")
sys.exit(1)
# Collect all of the sizes.
if options.verbose:
- print "Collecting information..."
+ print("Collecting information...")
total = 0
for b in bricks:
info = os.statvfs(b.path)
+ # On FreeBSD f_bsize (info[0]) contains the optimal I/O size,
+ # not the block size as it's found on Linux. In this case we
+ # use f_frsize (info[1]).
+ if platform.system() == 'FreeBSD':
+ bsize = info[1]
+ else:
+ bsize = info[0]
# We want a standard unit even if different bricks use
# different block sizes. The size is chosen to avoid overflows
# for very large bricks with very small block sizes, but also
# accommodate filesystems which use very large block sizes to
# cheat on benchmarks.
- blocksper100mb = 104857600 / info[0]
+ blocksper100mb = 104857600 / bsize
if options.free_space:
size = info[3] / blocksper100mb
else:
size = info[2] / blocksper100mb
if size <= 0:
- print "brick %s has invalid size %d" % (b.path, size)
+ print("brick %s has invalid size %d" % (b.path, size))
sys.exit(1)
b.set_size(size)
total += size
@@ -240,13 +250,13 @@ if __name__ == "__main__":
if hash_range is not None:
rs, re = hash_range
if rs > re:
- print "%s has backwards hash range" % b.path
+ print("%s has backwards hash range" % b.path)
sys.exit(1)
- b.set_range(hash_range[0],hash_range[1])
+ b.set_range(hash_range[0], hash_range[1])
if options.verbose:
- print "Calculating new layouts..."
- calc_sizes(bricks,total)
+ print("Calculating new layouts...")
+ calc_sizes(bricks, total)
bricks, used = normalize(bricks)
# We can't afford O(n!) here, but O(n^2) should be OK and the result
@@ -254,10 +264,10 @@ if __name__ == "__main__":
while used < len(bricks):
best_place = used
best_score = get_score(bricks)
- for i in xrange(used):
+ for i in range(used):
new_bricks = bricks[:]
del new_bricks[used]
- new_bricks.insert(i,bricks[used])
+ new_bricks.insert(i, bricks[used])
new_score = get_score(new_bricks)
if new_score > best_score:
best_place = i
@@ -265,7 +275,7 @@ if __name__ == "__main__":
if best_place != used:
nb = bricks[used]
del bricks[used]
- bricks.insert(best_place,nb)
+ bricks.insert(best_place, nb)
used += 1
# Finalize whatever we decided on.
@@ -275,25 +285,25 @@ if __name__ == "__main__":
curr_hash += b.good_size
b.r_end = curr_hash - 1
- print "Here are the xattr values for your size-weighted layout:"
+ print("Here are the xattr values for your size-weighted layout:")
for b in bricks:
- print " %s: 0x0000000200000000%08x%08x" % (
- b.sv_name, b.r_start, b.r_end)
+ print(" %s: 0x0000000200000000%08x%08x" % (
+ b.sv_name, b.r_start, b.r_end))
if fix_dir:
if options.verbose:
- print "Fixing layout for %s" % fix_dir
+ print("Fixing layout for %s" % fix_dir)
for b in bricks:
value = "0x0000000200000000%08x%08x" % (
b.r_start, b.r_end)
path = "%s/%s" % (b.path, fix_dir)
cmd = "setfattr -n trusted.glusterfs.dht -v %s %s" % (
value, path)
- print cmd
+ print(cmd)
if options.leave_mounted:
- print "The following subvolumes are still mounted:"
+ print("The following subvolumes are still mounted:")
for b in bricks:
- print "%s on %s" % (b.sv_name, b.path)
- print "Don't forget to clean up when you're done."
+ print("%s on %s" % (b.sv_name, b.path))
+ print("Don't forget to clean up when you're done.")
diff --git a/extras/run-gluster.tmpfiles.in b/extras/run-gluster.tmpfiles.in
index 49a2662c4c8..329f2dde6db 100644
--- a/extras/run-gluster.tmpfiles.in
+++ b/extras/run-gluster.tmpfiles.in
@@ -1,2 +1,2 @@
# hardcoding /run for now, should be detected while building from source?
-d /run/gluster 0755 root root -
+d /run/gluster 0775 gluster gluster -
diff --git a/extras/snap_scheduler/Makefile.am b/extras/snap_scheduler/Makefile.am
index 896595f1504..782f139016f 100644
--- a/extras/snap_scheduler/Makefile.am
+++ b/extras/snap_scheduler/Makefile.am
@@ -1,7 +1,9 @@
snap_schedulerdir = $(sbindir)/
-snap_scheduler_SCRIPTS = gcron.py snap_scheduler.py
+if WITH_SERVER
+snap_scheduler_SCRIPTS = gcron.py snap_scheduler.py conf.py
+endif
-EXTRA_DIST = gcron.py snap_scheduler.py
+EXTRA_DIST = gcron.py snap_scheduler.py conf.py
CLEANFILES =
diff --git a/extras/snap_scheduler/conf.py.in b/extras/snap_scheduler/conf.py.in
new file mode 100644
index 00000000000..6dcca0534a7
--- /dev/null
+++ b/extras/snap_scheduler/conf.py.in
@@ -0,0 +1,11 @@
+#
+# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+#
+
+GLUSTERFS_LIBEXECDIR = '@GLUSTERFS_LIBEXECDIR@'
diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
index d72057861ff..0e4df77d481 100755
--- a/extras/snap_scheduler/gcron.py
+++ b/extras/snap_scheduler/gcron.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python3
#
# Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
@@ -19,10 +19,10 @@ import logging.handlers
import fcntl
-GCRON_TASKS = "/var/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
+GCRON_TASKS = "/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
GCRON_CROND_TASK = "/etc/cron.d/glusterfs_snap_cron_tasks"
GCRON_RELOAD_FLAG = "/var/run/gluster/crond_task_reload_flag"
-LOCK_FILE_DIR = "/var/run/gluster/shared_storage/snaps/lock_files/"
+LOCK_FILE_DIR = "/run/gluster/shared_storage/snaps/lock_files/"
log = logging.getLogger("gcron-logger")
start_time = 0.0
@@ -38,7 +38,8 @@ def initLogger(script_name):
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
out, err = process.communicate()
if process.returncode == 0:
logfile = os.path.join(out.strip(), script_name[:-3]+".log")
@@ -88,7 +89,7 @@ def takeSnap(volname="", snapname=""):
def doJob(name, lockFile, jobFunc, volname):
success = True
try:
- f = os.open(lockFile, os.O_RDWR | os.O_NONBLOCK)
+ f = os.open(lockFile, os.O_CREAT | os.O_RDWR | os.O_NONBLOCK)
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
mtime = os.path.getmtime(lockFile)
@@ -105,11 +106,11 @@ def doJob(name, lockFile, jobFunc, volname):
else:
log.info("Job %s has been processed already", name)
fcntl.flock(f, fcntl.LOCK_UN)
- except IOError as (errno, strerror):
+ except (OSError, IOError):
log.info("Job %s is being processed by another agent", name)
os.close(f)
- except IOError as (errno, strerror):
- log.debug("Failed to open lock file %s : %s", lockFile, strerror)
+ except (OSError, IOError) as e:
+ log.debug("Failed to open lock file %s : %s", lockFile, e)
log.error("Failed to process job %s", name)
success = False
@@ -122,19 +123,20 @@ def main():
global start_time
if sys.argv[1] == "--update":
if not os.path.exists(GCRON_TASKS):
- # Create a flag in /var/run/gluster which indicates that this nodes
- # doesn't have access to GCRON_TASKS right now, so that
+ # Create a flag in /var/run/gluster which indicates that this
+ # node doesn't have access to GCRON_TASKS right now, so that
# when the mount is available and GCRON_TASKS is available
# the flag will tell this routine to reload GCRON_CROND_TASK
try:
- f = os.open(GCRON_RELOAD_FLAG, os.O_CREAT | os.O_NONBLOCK, 0644)
+ f = os.open(GCRON_RELOAD_FLAG,
+ os.O_CREAT | os.O_NONBLOCK, 0o644)
os.close(f)
- except OSError as (errno, strerror):
+ except OSError as e:
if errno != EEXIST:
log.error("Failed to create %s : %s",
- GCRON_RELOAD_FLAG, strerror)
+ GCRON_RELOAD_FLAG, e)
output("Failed to create %s. Error: %s"
- % (GCRON_RELOAD_FLAG, strerror))
+ % (GCRON_RELOAD_FLAG, e))
return
if not os.path.exists(GCRON_CROND_TASK):
@@ -153,9 +155,9 @@ def main():
if process.returncode != 0:
log.error("Failed to touch %s. Error: %s.",
GCRON_CROND_TASK, err)
- except (IOError, OSError) as (errno, strerror):
+ except (IOError, OSError) as e:
log.error("Failed to touch %s. Error: %s.",
- GCRON_CROND_TASK, strerror)
+ GCRON_CROND_TASK, e)
return
if os.lstat(GCRON_TASKS).st_mtime > \
os.lstat(GCRON_CROND_TASK).st_mtime:
@@ -167,9 +169,9 @@ def main():
if process.returncode != 0:
log.error("Failed to touch %s. Error: %s.",
GCRON_CROND_TASK, err)
- except IOError as (errno, strerror):
+ except IOError as e:
log.error("Failed to touch %s. Error: %s.",
- GCRON_CROND_TASK, strerror)
+ GCRON_CROND_TASK, e)
return
volname = sys.argv[1]
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index af092e2c341..e8fcc449a9b 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python3
#
# Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
@@ -19,12 +19,55 @@ import logging.handlers
import sys
import shutil
from errno import EEXIST
-
+from conf import GLUSTERFS_LIBEXECDIR
+sys.path.insert(1, GLUSTERFS_LIBEXECDIR)
+
+EVENTS_ENABLED = True
+try:
+ from events.eventtypes import SNAPSHOT_SCHEDULER_INITIALISED \
+ as EVENT_SNAPSHOT_SCHEDULER_INITIALISED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_INIT_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_DISABLED \
+ as EVENT_SNAPSHOT_SCHEDULER_DISABLED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_DISABLE_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_ENABLED \
+ as EVENT_SNAPSHOT_SCHEDULER_ENABLED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_ENABLE_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_ADDED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_DELETED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_EDITED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED
+except ImportError:
+ # Events APIs not installed, dummy eventtypes with None
+ EVENTS_ENABLED = False
+ EVENT_SNAPSHOT_SCHEDULER_INITIALISED = None
+ EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_DISABLED = None
+ EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_ENABLED = None
+ EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED = None
SCRIPT_NAME = "snap_scheduler"
scheduler_enabled = False
log = logging.getLogger(SCRIPT_NAME)
-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
+SHARED_STORAGE_DIR="/run/gluster/shared_storage"
GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
@@ -55,6 +98,42 @@ INVALID_SCHEDULE = 15
INVALID_ARG = 16
VOLUME_DOES_NOT_EXIST = 17
+def print_error (error_num):
+ if error_num == INTERNAL_ERROR:
+ return "Internal Error"
+ elif error_num == SHARED_STORAGE_DIR_DOESNT_EXIST:
+ return "The shared storage directory ("+SHARED_STORAGE_DIR+")" \
+ " does not exist."
+ elif error_num == SHARED_STORAGE_NOT_MOUNTED:
+ return "The shared storage directory ("+SHARED_STORAGE_DIR+")" \
+ " is not mounted."
+ elif error_num == ANOTHER_TRANSACTION_IN_PROGRESS:
+ return "Another transaction is in progress."
+ elif error_num == INIT_FAILED:
+ return "Initialisation failed."
+ elif error_num == SCHEDULING_ALREADY_DISABLED:
+ return "Snapshot scheduler is already disabled."
+ elif error_num == SCHEDULING_ALREADY_ENABLED:
+ return "Snapshot scheduler is already enabled."
+ elif error_num == NODE_NOT_INITIALISED:
+ return "The node is not initialised."
+ elif error_num == ANOTHER_SCHEDULER_ACTIVE:
+ return "Another scheduler is active."
+ elif error_num == JOB_ALREADY_EXISTS:
+ return "The job already exists."
+ elif error_num == JOB_NOT_FOUND:
+ return "The job cannot be found."
+ elif error_num == INVALID_JOBNAME:
+ return "The job name is invalid."
+ elif error_num == INVALID_VOLNAME:
+ return "The volume name is invalid."
+ elif error_num == INVALID_SCHEDULE:
+ return "The schedule is invalid."
+ elif error_num == INVALID_ARG:
+ return "The argument is invalid."
+ elif error_num == VOLUME_DOES_NOT_EXIST:
+ return "The volume does not exist."
+
def output(msg):
print("%s: %s" % (SCRIPT_NAME, msg))
@@ -70,7 +149,7 @@ def initLogger():
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE, universal_newlines=True)
logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
fh = logging.FileHandler(logfile)
@@ -128,11 +207,11 @@ def enable_scheduler():
os.remove(GCRON_TASKS)
try:
f = os.open(GCRON_ENABLED, os.O_CREAT | os.O_NONBLOCK,
- 0644)
+ 0o644)
os.close(f)
- except OSError as (errno, strerror):
+ except OSError as e:
log.error("Failed to open %s. Error: %s.",
- GCRON_ENABLED, strerror)
+ GCRON_ENABLED, e)
ret = INTERNAL_ERROR
return ret
os.symlink(GCRON_ENABLED, GCRON_TASKS)
@@ -140,8 +219,9 @@ def enable_scheduler():
log.info("Snapshot scheduling is enabled")
output("Snapshot scheduling is enabled")
ret = 0
- except OSError as (errno, strerror):
- print_str = "Failed to enable snapshot scheduling. Error: "+strerror
+ except OSError as e:
+ print_str = ("Failed to enable snapshot scheduling."
+ "Error: {{}}" + e)
log.error(print_str)
output(print_str)
ret = INTERNAL_ERROR
@@ -183,14 +263,15 @@ def disable_scheduler():
os.remove(GCRON_DISABLED)
if os.path.lexists(GCRON_TASKS):
os.remove(GCRON_TASKS)
- f = os.open(GCRON_DISABLED, os.O_CREAT, 0644)
+ f = os.open(GCRON_DISABLED, os.O_CREAT, 0o644)
os.close(f)
os.symlink(GCRON_DISABLED, GCRON_TASKS)
log.info("Snapshot scheduling is disabled")
output("Snapshot scheduling is disabled")
ret = 0
- except OSError as (errno, strerror):
- print_str = "Failed to disable snapshot scheduling. Error: "+strerror
+ except OSError as e:
+ print_str = ("Failed to disable snapshot scheduling. Error: "
+ + e)
log.error(print_str)
output(print_str)
ret = INTERNAL_ERROR
@@ -229,8 +310,8 @@ def load_tasks_from_file():
tasks[jobname] = schedule+":"+volname
f.close()
ret = 0
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", GCRON_ENABLED, strerror)
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", GCRON_ENABLED, e)
ret = INTERNAL_ERROR
return ret
@@ -243,8 +324,8 @@ def get_current_scheduler():
current_scheduler = f.readline().rstrip('\n')
f.close()
ret = 0
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", CURRENT_SCHEDULER, strerror)
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", CURRENT_SCHEDULER, e)
ret = INTERNAL_ERROR
return ret
@@ -284,7 +365,7 @@ def list_schedules():
def write_tasks_to_file():
try:
- with open(TMP_FILE, "w", 0644) as f:
+ with open(TMP_FILE, "w", 0o644) as f:
# If tasks is empty, just create an empty tmp file
if len(tasks) != 0:
for key in sorted(tasks):
@@ -297,8 +378,8 @@ def write_tasks_to_file():
f.flush()
os.fsync(f.fileno())
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", TMP_FILE, strerror)
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", TMP_FILE, e)
ret = INTERNAL_ERROR
return ret
@@ -309,13 +390,13 @@ def write_tasks_to_file():
def update_current_scheduler(data):
try:
- with open(TMP_FILE, "w", 0644) as f:
+ with open(TMP_FILE, "w", 0o644) as f:
f.write("%s" % data)
f.flush()
os.fsync(f.fileno())
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", TMP_FILE, strerror)
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", TMP_FILE, e)
ret = INTERNAL_ERROR
return ret
@@ -378,11 +459,11 @@ def add_schedules(jobname, schedule, volname):
job_lockfile = LOCK_FILE_DIR + jobname
try:
f = os.open(job_lockfile, os.O_CREAT | os.O_NONBLOCK,
- 0644)
+ 0o644)
os.close(f)
- except OSError as (errno, strerror):
+ except OSError as e:
log.error("Failed to open %s. Error: %s.",
- job_lockfile, strerror)
+ job_lockfile, e)
ret = INTERNAL_ERROR
return ret
log.info("Successfully added snapshot schedule %s" %
@@ -410,9 +491,9 @@ def delete_schedules(jobname):
job_lockfile = LOCK_FILE_DIR+jobname
try:
os.remove(job_lockfile)
- except OSError as (errno, strerror):
+ except OSError as e:
log.error("Failed to open %s. Error: %s.",
- job_lockfile, strerror)
+ job_lockfile, e)
ret = INTERNAL_ERROR
return ret
log.info("Successfully deleted snapshot schedule %s"
@@ -466,18 +547,113 @@ def edit_schedules(jobname, schedule, volname):
return ret
+def get_bool_val():
+ getsebool_cli = ["getsebool",
+ "-a"]
+ p1 = subprocess.Popen(getsebool_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ grep_cmd = ["grep",
+ "cron_system_cronjob_use_shares"]
+ p2 = subprocess.Popen(grep_cmd, stdin=p1.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ p1.stdout.close()
+ output, err = p2.communicate()
+ rv = p2.returncode
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ return -1
+
+ bool_val = output.split()[2]
+ log.debug("Bool value = '%s'", bool_val)
+
+ return bool_val
+
+def get_selinux_status():
+ getenforce_cli = ["getenforce"]
+ log.debug("Running command '%s'", " ".join(getenforce_cli))
+
+ try:
+ p1 = subprocess.Popen(getenforce_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except OSError as oserr:
+ log.error("Failed to run the command \"getenforce\". Error: %s" %\
+ oserr)
+ return -1
+
+ output, err = p1.communicate()
+ rv = p1.returncode
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ return -1
+ else:
+ selinux_status=output.rstrip()
+ log.debug("selinux status: %s", selinux_status)
+
+ return selinux_status
+
+def set_cronjob_user_share():
+ selinux_status = get_selinux_status()
+ if (selinux_status == -1):
+ log.error("Failed to get selinux status")
+ return -1
+ elif (selinux_status == "Disabled"):
+ return 0
+
+ bool_val = get_bool_val()
+ # In case of a failure (where the boolean value is not)
+ # present in the system, we should not proceed further
+ # We should only proceed when the value is "off"
+ if (bool_val == -1 or bool_val != "off"):
+ return 0
+
+ setsebool_cli = ["setsebool", "-P",
+ "cron_system_cronjob_use_shares",
+ "on"]
+ log.debug("Running command '%s'", " ".join(setsebool_cli))
+
+ p1 = subprocess.Popen(setsebool_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ output, err = p1.communicate()
+ rv = p1.returncode
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ return rv
+
+ bool_val = get_bool_val()
+ if (bool_val == "on"):
+ return 0
+ else:
+ # In case of an error or if boolean is not on
+ # we return a failure here
+ return -1
def initialise_scheduler():
+ ret = set_cronjob_user_share()
+ if ret:
+ log.error("Failed to set selinux boolean "
+ "cron_system_cronjob_use_shares to 'on'")
+ return ret
+
try:
- with open(TMP_FILE, "w+", 0644) as f:
+ with open(TMP_FILE, "w+", 0o644) as f:
updater = ("* * * * * root PATH=$PATH:/usr/local/sbin:"
"/usr/sbin gcron.py --update\n")
f.write("%s\n" % updater)
f.flush()
os.fsync(f.fileno())
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", TMP_FILE, strerror)
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", TMP_FILE, e)
ret = INIT_FAILED
return ret
@@ -485,10 +661,10 @@ def initialise_scheduler():
if not os.path.lexists(GCRON_TASKS):
try:
- f = open(GCRON_TASKS, "w", 0644)
+ f = open(GCRON_TASKS, "w", 0o644)
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", GCRON_TASKS, strerror)
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", GCRON_TASKS, e)
ret = INIT_FAILED
return ret
@@ -499,6 +675,7 @@ def initialise_scheduler():
log.info("Successfully initialised snapshot scheduler for this node")
output("Successfully initialised snapshot scheduler for this node")
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_INITIALISED, status="Success")
ret = 0
return ret
@@ -545,6 +722,8 @@ def perform_operation(args):
ret = initialise_scheduler()
if ret != 0:
output("Failed to initialise snapshot scheduling")
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED,
+ error=print_error(ret))
return ret
# Disable snapshot scheduler
@@ -552,6 +731,11 @@ def perform_operation(args):
ret = disable_scheduler()
if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED,
+ status="Successfully Disabled")
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED,
+ error=print_error(ret))
return ret
# Check if the symlink to GCRON_TASKS is properly set in the shared storage
@@ -582,6 +766,11 @@ def perform_operation(args):
ret = enable_scheduler()
if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLED,
+ status="Successfully Enabled")
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED,
+ error=print_error(ret))
return ret
# Disable snapshot scheduler
@@ -589,6 +778,11 @@ def perform_operation(args):
ret = disable_scheduler()
if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED,
+ status="Successfully Disabled")
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED,
+ error=print_error(ret))
return ret
# List snapshot schedules
@@ -604,6 +798,12 @@ def perform_operation(args):
ret = add_schedules(args.jobname, args.schedule, args.volname)
if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED,
+ status="Successfully added job "+args.jobname)
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED,
+ status="Failed to add job "+args.jobname,
+ error=print_error(ret))
return ret
# Delete snapshot schedules
@@ -614,6 +814,12 @@ def perform_operation(args):
ret = delete_schedules(args.jobname)
if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED,
+ status="Successfully deleted job "+args.jobname)
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED,
+ status="Failed to delete job "+args.jobname,
+ error=print_error(ret))
return ret
# Edit snapshot schedules
@@ -624,11 +830,22 @@ def perform_operation(args):
ret = edit_schedules(args.jobname, args.schedule, args.volname)
if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED,
+ status="Successfully edited job "+args.jobname)
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED,
+ status="Failed to edit job "+args.jobname,
+ error=print_error(ret))
return ret
ret = INVALID_ARG
return ret
+def gf_event(event_type, **kwargs):
+ if EVENTS_ENABLED:
+ from events.gf_event import gf_event as gfevent
+ gfevent(event_type, **kwargs)
+
def main(argv):
initLogger()
@@ -679,42 +896,42 @@ def main(argv):
if not os.path.exists(SHARED_STORAGE_DIR+"/snaps/"):
try:
os.makedirs(SHARED_STORAGE_DIR+"/snaps/")
- except OSError as (errno, strerror):
+ except OSError as e:
if errno != EEXIST:
- log.error("Failed to create %s : %s", SHARED_STORAGE_DIR+"/snaps/", strerror)
+ log.error("Failed to create %s : %s", SHARED_STORAGE_DIR+"/snaps/", e)
output("Failed to create %s. Error: %s"
- % (SHARED_STORAGE_DIR+"/snaps/", strerror))
+ % (SHARED_STORAGE_DIR+"/snaps/", e))
return INTERNAL_ERROR
if not os.path.exists(GCRON_ENABLED):
- f = os.open(GCRON_ENABLED, os.O_CREAT | os.O_NONBLOCK, 0644)
+ f = os.open(GCRON_ENABLED, os.O_CREAT | os.O_NONBLOCK, 0o644)
os.close(f)
if not os.path.exists(LOCK_FILE_DIR):
try:
os.makedirs(LOCK_FILE_DIR)
- except OSError as (errno, strerror):
+ except OSError as e:
if errno != EEXIST:
- log.error("Failed to create %s : %s", LOCK_FILE_DIR, strerror)
+ log.error("Failed to create %s : %s", LOCK_FILE_DIR, e)
output("Failed to create %s. Error: %s"
- % (LOCK_FILE_DIR, strerror))
+ % (LOCK_FILE_DIR, e))
return INTERNAL_ERROR
try:
- f = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR | os.O_NONBLOCK, 0644)
+ f = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR | os.O_NONBLOCK, 0o644)
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
ret = perform_operation(args)
fcntl.flock(f, fcntl.LOCK_UN)
- except IOError as (errno, strerror):
+ except IOError:
log.info("%s is being processed by another agent.", LOCK_FILE)
output("Another snap_scheduler command is running. "
"Please try again after some time.")
return ANOTHER_TRANSACTION_IN_PROGRESS
os.close(f)
- except OSError as (errno, strerror):
- log.error("Failed to open %s : %s", LOCK_FILE, strerror)
- output("Failed to open %s. Error: %s" % (LOCK_FILE, strerror))
+ except OSError as e:
+ log.error("Failed to open %s : %s", LOCK_FILE, e)
+ output("Failed to open %s. Error: %s" % (LOCK_FILE, e))
return INTERNAL_ERROR
return ret
diff --git a/extras/statedumpparse.rb b/extras/statedumpparse.rb
new file mode 100755
index 00000000000..1aff43377db
--- /dev/null
+++ b/extras/statedumpparse.rb
@@ -0,0 +1,208 @@
+#!/usr/bin/env ruby
+
+require 'time'
+require 'optparse'
+
+unless Array.instance_methods.include? :to_h
+ class Array
+ def to_h
+ h = {}
+ each { |k,v| h[k]=v }
+ h
+ end
+ end
+end
+
+# statedump.c:gf_proc_dump_mempool_info uses a five-dash record separator,
+# client.c:client_fd_lk_ctx_dump uses a six-dash record separator.
+ARRSEP = /^(-{5,6}=-{5,6})?$/
+HEAD = /^\[(.*)\]$/
+INPUT_FORMATS = %w[statedump json]
+
+format = 'json'
+input_format = 'statedump'
+tz = '+0000'
+memstat_select,memstat_reject = //,/\Z./
+OptionParser.new do |op|
+ op.banner << " [<] <STATEDUMP>"
+ op.on("-f", "--format=F", "json/yaml/memstat(-[plain|human|json])") { |s| format = s }
+ op.on("--input-format=F", INPUT_FORMATS.join(?/)) { |s| input_format = s }
+ op.on("--timezone=T",
+ "time zone to apply to zoneless timestamps [default UTC]") { |s| tz = s }
+ op.on("--memstat-select=RX", "memstat: select memory types matching RX") { |s|
+ memstat_select = Regexp.new s
+ }
+ op.on("--memstat-reject=RX", "memstat: reject memory types matching RX") { |s|
+ memstat_reject = Regexp.new s
+ }
+end.parse!
+
+
+if format =~ /\Amemstat(?:-(.*))?/
+ memstat_type = $1 || 'plain'
+ unless %w[plain human json].include? memstat_type
+ raise "unknown memstat type #{memstat_type.dump}"
+ end
+ format = 'memstat'
+end
+
+repr, logsep = case format
+when 'yaml'
+ require 'yaml'
+
+ [proc { |e| e.to_yaml }, "\n"]
+when 'json', 'memstat'
+ require 'json'
+
+ [proc { |e| e.to_json }, " "]
+else
+ raise "unkonwn format '#{format}'"
+end
+formatter = proc { |e| puts repr.call(e) }
+
+INPUT_FORMATS.include? input_format or raise "unkwown input format '#{input_format}'"
+
+dumpinfo = {}
+
+# parse a statedump entry
+elem_cbk = proc { |s,&cbk|
+ arraylike = false
+ s.grep(/\S/).empty? and next
+ head = nil
+ while s.last =~ /^\s*$/
+ s.pop
+ end
+ body = catch { |misc2|
+ s[0] =~ HEAD ? (head = $1) : (throw misc2)
+ body = [[]]
+ s[1..-1].each { |l|
+ if l =~ ARRSEP
+ arraylike = true
+ body << []
+ next
+ end
+ body.last << l
+ }
+
+ body.reject(&:empty?).map { |e|
+ ea = e.map { |l|
+ k,v = l.split("=",2)
+ m = /\A(0|-?[1-9]\d*)(\.\d+)?\Z/.match v
+ [k, m ? (m[2] ? Float(v) : Integer(v)) : v]
+ }
+ begin
+ ea.to_h
+ rescue
+ throw misc2
+ end
+ }
+ }
+
+ if body
+ cbk.call [head, arraylike ? body : (body.empty? ? {} : body[0])]
+ else
+ STDERR.puts ["WARNING: failed to parse record:", repr.call(s)].join(logsep)
+ end
+}
+
+# aggregator routine
+aggr = case format
+when 'memstat'
+ meminfo = {}
+ # commit memory-related entries to meminfo
+ proc { |k,r|
+ case k
+ when /memusage/
+ (meminfo["GF_MALLOC"]||={})[k] ||= r["size"] if k =~ memstat_select and k !~ memstat_reject
+ when "mempool"
+ r.each {|e|
+ kk = "mempool:#{e['pool-name']}"
+ (meminfo["mempool"]||={})[kk] ||= e["size"] if kk =~ memstat_select and kk !~ memstat_reject
+ }
+ end
+ }
+else
+ # just format data, don't actually aggregate anything
+ proc { |pair| formatter.call pair }
+end
+
+# processing the data
+case input_format
+when 'statedump'
+ acc = []
+ $<.each { |l|
+ l = l.strip
+ if l =~ /^(DUMP-(?:START|END)-TIME):\s+(.*)/
+ dumpinfo["_meta"]||={}
+ (dumpinfo["_meta"]["date"]||={})[$1] = Time.parse([$2, tz].join " ")
+ next
+ end
+
+ if l =~ HEAD
+ elem_cbk.call(acc, &aggr)
+ acc = [l]
+ next
+ end
+
+ acc << l
+ }
+ elem_cbk.call(acc, &aggr)
+when 'json'
+ $<.each { |l|
+ r = JSON.load l
+ case r
+ when Array
+ aggr[r]
+ when Hash
+ dumpinfo.merge! r
+ end
+ }
+end
+
+# final actions: output aggregated data
+case format
+when 'memstat'
+ ma = meminfo.values.map(&:to_a).inject(:+)
+ totals = meminfo.map { |coll,h| [coll, h.values.inject(:+)] }.to_h
+ tt = ma.transpose[1].inject(:+)
+
+ summary_sep,showm = case memstat_type
+ when 'json'
+ ["", proc { |k,v| puts({type: k, value: v}.to_json) }]
+ when 'plain', 'human'
+ # human-friendly number representation
+ hr = proc { |n|
+ qa = %w[B kB MB GB]
+ q = ((1...qa.size).find {|i| n < (1 << i*10)} || qa.size) - 1
+ "%.2f%s" % [n.to_f / (1 << q*10), qa[q]]
+ }
+
+ templ = "%{val} %{key}"
+ tft = proc { |t| t }
+ nft = if memstat_type == 'human'
+ nw = [ma.transpose[1], totals.values, tt].flatten.map{|n| hr[n].size}.max
+ proc { |n|
+ hn = hr[n]
+ " " * (nw - hn.size) + hn
+ }
+ else
+ nw = tt.to_s.size
+ proc { |n| "%#{nw}d" % n }
+ end
+ ## Alternative template, key first:
+ # templ = "%{key} %{val}"
+ # tw = ma.transpose[0].map(&:size).max
+ # tft = proc { |t| t + " " * [tw - t.size, 0].max }
+ # nft = (memstat_type == 'human') ? hr : proc { |n| n }
+ ["\n", proc { |k,v| puts templ % {key: tft[k], val: nft[v]} }]
+ else
+ raise 'this should be impossible'
+ end
+
+ ma.sort_by { |k,v| v }.each(&showm)
+ print summary_sep
+ totals.each { |coll,t| showm.call "Total #{coll}", t }
+ showm.call "TOTAL", tt
+else
+ formatter.call dumpinfo
+end
diff --git a/extras/stop-all-gluster-processes.sh b/extras/stop-all-gluster-processes.sh
index 356a2a63059..710aaf5fd3c 100755
--- a/extras/stop-all-gluster-processes.sh
+++ b/extras/stop-all-gluster-processes.sh
@@ -1,4 +1,37 @@
-#!/usr/bin/env bash
+#!/bin/bash
+#
+# Kill all the processes/services except glusterd
+#
+# Usage: ./extras/stop-all-gluster-processes.sh [-g] [-h]
+# options:
+# -g Terminate in graceful mode
+# -h Show this message, then exit
+#
+# eg:
+# 1. ./extras/stop-all-gluster-processes.sh
+# 2. ./extras/stop-all-gluster-processes.sh -g
+#
+# By default, this script executes in force mode, i.e. all of brick, gsyncd
+# and other glustershd services/processes are killed without checking for
+# ongoing tasks such as geo-rep, self-heal, rebalance and etc. which may lead
+# to inconsistency after the node is brought back.
+#
+# On specifying '-g' option this script works in graceful mode, to maintain
+# data consistency the script fails with a valid exit code incase if any of
+# the gluster processes are busy in doing their jobs.
+#
+# The author of page [1] proposes user-defined exit codes to the range 64 - 113
+# Find the better explanation behind the choice in the link
+#
+# The exit code returned by stop-all-gluster-processes.sh:
+# 0 No errors/Success
+# 64 Rebalance is in progress
+# 65 Self-Heal is in progress
+# 66 Tier daemon running on this node
+# 127 option not found
+#
+# [1] http://www.tldp.org/LDP/abs/html/exitcodes.html
+
# global
errors=0
@@ -39,7 +72,7 @@ kill_bricks_and_services()
local pidfile
local pid
- for pidfile in $(find /var/lib/glusterd/ -name '*.pid');
+ for pidfile in $(find /var/run/gluster/ -name '*.pid');
do
local pid=$(cat ${pidfile});
echo "sending SIG${signal} to pid: ${pid}";
@@ -64,21 +97,97 @@ kill_georep_gsync()
fi
}
+# check if all processes are ready to die
+check_background_tasks()
+{
+ volumes=$(gluster vol list)
+ quit=0
+ for volname in ${volumes};
+ do
+ # tiering
+ if [[ $(gluster volume tier ${volname} status 2> /dev/null |
+ grep "localhost" | grep -c "in progress") -gt 0 ]]
+ then
+ quit=66
+ break;
+ fi
+
+ # rebalance
+ if [[ $(gluster volume rebalance ${volname} status 2> /dev/null |
+ grep -c "in progress") -gt 0 ]]
+ then
+ quit=64
+ break;
+ fi
+
+ # self heal
+ if [[ $(gluster volume heal ${volname} info | grep "Number of entries" |
+ awk '{ sum+=$4} END {print sum}') -gt 0 ]];
+ then
+ quit=65
+ break;
+ fi
+
+ # geo-rep, snapshot and quota doesn't need grace checks,
+ # as they ensures the consistancy on force kills
+ done
+
+ echo ${quit}
+}
+
+usage()
+{
+ cat <<EOM
+Usage: $0 [-g] [-h]
+ options:
+ -g Terminate in graceful mode
+ -h Show this message, then exit
+
+eg:
+ 1. $0
+ 2. $0 -g
+EOM
+}
+
main()
{
+ while getopts "gh" opt; do
+ case $opt in
+ g)
+ # graceful mode
+ quit=$(check_background_tasks)
+ if [[ ${quit} -ne 0 ]]
+ then
+ exit ${quit};
+ fi
+ # else safe to kill
+ ;;
+ h)
+ usage
+ exit 0;
+ ;;
+ *)
+ usage
+ exit 127;
+ ;;
+ esac
+ done
+ # remove all the options that have been parsed by getopts
+ shift $((OPTIND-1))
+
kill_mounts TERM
- kill_bricks_and_services TERM
kill_georep_gsync TERM
+ kill_bricks_and_services TERM
sleep 5;
echo ""
# still not Terminated? let's pass SIGKILL
kill_mounts KILL
- kill_bricks_and_services KILL
kill_georep_gsync KILL
+ kill_bricks_and_services KILL
exit ${errors};
}
-main
+main "$@"
diff --git a/extras/stripe-merge.c b/extras/stripe-merge.c
index 74bd47e303e..e013a6e6e8a 100644
--- a/extras/stripe-merge.c
+++ b/extras/stripe-merge.c
@@ -28,7 +28,7 @@
#include <stdint.h>
#include <errno.h>
#include <string.h>
-#include <attr/xattr.h>
+#include <sys/xattr.h>
#include <fnmatch.h>
#define ATTRNAME_STRIPE_INDEX "trusted.*.stripe-index"
@@ -40,33 +40,33 @@
#define INVALID_MODE UINT32_MAX
struct file_stripe_info {
- int stripe_count;
- int stripe_size;
- int coalesce;
- mode_t mode;
- int fd[0];
+ int stripe_count;
+ int stripe_size;
+ int coalesce;
+ mode_t mode;
+ int fd[0];
};
-static int close_files(struct file_stripe_info *);
+static int
+close_files(struct file_stripe_info *);
-static struct
-file_stripe_info *alloc_file_stripe_info(int count)
+static struct file_stripe_info *
+alloc_file_stripe_info(int count)
{
- int i;
- struct file_stripe_info *finfo;
+ int i;
+ struct file_stripe_info *finfo;
- finfo = calloc(1, sizeof(struct file_stripe_info) +
- (sizeof(int) * count));
- if (!finfo)
- return NULL;
+ finfo = calloc(1, sizeof(struct file_stripe_info) + (sizeof(int) * count));
+ if (!finfo)
+ return NULL;
- for (i = 0; i < count; i++)
- finfo->fd[i] = INVALID_FD;
+ for (i = 0; i < count; i++)
+ finfo->fd[i] = INVALID_FD;
- finfo->mode = INVALID_MODE;
- finfo->coalesce = INVALID_FD;
+ finfo->mode = INVALID_MODE;
+ finfo->coalesce = INVALID_FD;
- return finfo;
+ return finfo;
}
/*
@@ -77,39 +77,39 @@ file_stripe_info *alloc_file_stripe_info(int count)
static int
get_stripe_attr_name(const char *path, const char *pattern, char **attrname)
{
- char attrbuf[4096];
- char *ptr, *match = NULL;
- int len, r, match_count = 0;
-
- if (!path || !pattern || !attrname)
- return -1;
-
- len = listxattr(path, attrbuf, sizeof(attrbuf));
- if (len < 0)
- return len;
-
- ptr = attrbuf;
- while (ptr) {
- r = fnmatch(pattern, ptr, 0);
- if (!r) {
- if (!match)
- match = ptr;
- match_count++;
- } else if (r != FNM_NOMATCH) {
- return -1;
- }
-
- len -= strlen(ptr) + 1;
- if (len > 0)
- ptr += strlen(ptr) + 1;
- else
- ptr = NULL;
- }
-
- if (match)
- *attrname = strdup(match);
-
- return match_count;
+ char attrbuf[4096];
+ char *ptr, *match = NULL;
+ int len, r, match_count = 0;
+
+ if (!path || !pattern || !attrname)
+ return -1;
+
+ len = listxattr(path, attrbuf, sizeof(attrbuf));
+ if (len < 0)
+ return len;
+
+ ptr = attrbuf;
+ while (ptr) {
+ r = fnmatch(pattern, ptr, 0);
+ if (!r) {
+ if (!match)
+ match = ptr;
+ match_count++;
+ } else if (r != FNM_NOMATCH) {
+ return -1;
+ }
+
+ len -= strlen(ptr) + 1;
+ if (len > 0)
+ ptr += strlen(ptr) + 1;
+ else
+ ptr = NULL;
+ }
+
+ if (match)
+ *attrname = strdup(match);
+
+ return match_count;
}
/*
@@ -118,19 +118,19 @@ get_stripe_attr_name(const char *path, const char *pattern, char **attrname)
static int
get_stripe_attr_val(const char *path, const char *attr, int *val)
{
- char attrbuf[4096];
- int len;
+ char attrbuf[4096];
+ int len;
- if (!path || !attr || !val)
- return -1;
+ if (!path || !attr || !val)
+ return -1;
- len = getxattr(path, attr, attrbuf, sizeof(attrbuf));
- if (len < 0)
- return len;
+ len = getxattr(path, attr, attrbuf, sizeof(attrbuf));
+ if (len < 0)
+ return len;
- *val = atoi(attrbuf);
+ *val = atoi(attrbuf);
- return 0;
+ return 0;
}
/*
@@ -145,29 +145,31 @@ get_stripe_attr_val(const char *path, const char *attr, int *val)
static int
get_attr(const char *path, const char *pattern, char **buf, int *val)
{
- int count = 1;
-
- if (!buf)
- return -1;
-
- if (!*buf) {
- count = get_stripe_attr_name(path, pattern, buf);
- if (count > 1) {
- /* pattern isn't good enough */
- fprintf(stderr, "ERROR: duplicate attributes found "
- "matching pattern: %s\n", pattern);
- free(*buf);
- *buf = NULL;
- return count;
- } else if (count < 1) {
- return count;
- }
- }
-
- if (get_stripe_attr_val(path, *buf, val) < 0)
- return -1;
-
- return count;
+ int count = 1;
+
+ if (!buf)
+ return -1;
+
+ if (!*buf) {
+ count = get_stripe_attr_name(path, pattern, buf);
+ if (count > 1) {
+ /* pattern isn't good enough */
+ fprintf(stderr,
+ "ERROR: duplicate attributes found "
+ "matching pattern: %s\n",
+ pattern);
+ free(*buf);
+ *buf = NULL;
+ return count;
+ } else if (count < 1) {
+ return count;
+ }
+ }
+
+ if (get_stripe_attr_val(path, *buf, val) < 0)
+ return -1;
+
+ return count;
}
/*
@@ -178,164 +180,168 @@ get_attr(const char *path, const char *pattern, char **buf, int *val)
* print a warning if any files are missing. We proceed without error in the
* latter case to support partial recovery.
*/
-static struct
-file_stripe_info *validate_and_open_files(char *paths[], int count)
+static struct file_stripe_info *
+validate_and_open_files(char *paths[], int count)
{
- int i, val, tmp;
- struct stat sbuf;
- char *stripe_count_attr = NULL;
- char *stripe_size_attr = NULL;
- char *stripe_index_attr = NULL;
- char *stripe_coalesce_attr = NULL;
- struct file_stripe_info *finfo = NULL;
-
- for (i = 0; i < count; i++) {
- if (!paths[i])
- goto err;
-
- /*
- * Check the stripe count first so we can allocate the info
- * struct with the appropriate number of fds.
- */
- if (get_attr(paths[i], ATTRNAME_STRIPE_COUNT,
- &stripe_count_attr, &val) != 1) {
- fprintf(stderr, "ERROR: %s: attribute: '%s'\n",
- paths[i], ATTRNAME_STRIPE_COUNT);
- goto err;
- }
- if (!finfo) {
- finfo = alloc_file_stripe_info(val);
- if (!finfo)
- goto err;
-
- if (val != count)
- fprintf(stderr, "WARNING: %s: stripe-count "
- "(%d) != file count (%d). Result may "
- "be incomplete.\n", paths[i], val,
- count);
-
- finfo->stripe_count = val;
- } else if (val != finfo->stripe_count) {
- fprintf(stderr, "ERROR %s: invalid stripe count: %d "
- "(expected %d)\n", paths[i], val,
- finfo->stripe_count);
- goto err;
- }
-
- /*
- * Get and validate the chunk size.
- */
- if (get_attr(paths[i], ATTRNAME_STRIPE_SIZE, &stripe_size_attr,
- &val) != 1) {
- fprintf(stderr, "ERROR: %s: attribute: '%s'\n",
- paths[i], ATTRNAME_STRIPE_SIZE);
- goto err;
- }
-
- if (!finfo->stripe_size) {
- finfo->stripe_size = val;
- } else if (val != finfo->stripe_size) {
- fprintf(stderr, "ERROR: %s: invalid stripe size: %d "
- "(expected %d)\n", paths[i], val,
- finfo->stripe_size);
- goto err;
- }
-
- /*
- * stripe-coalesce is a backward compatible attribute. If the
- * attribute does not exist, assume a value of zero for the
- * traditional stripe format.
- */
- tmp = get_attr(paths[i], ATTRNAME_STRIPE_COALESCE,
- &stripe_coalesce_attr, &val);
- if (!tmp) {
- val = 0;
- } else if (tmp != 1) {
- fprintf(stderr, "ERROR: %s: attribute: '%s'\n",
- paths[i], ATTRNAME_STRIPE_COALESCE);
- goto err;
- }
-
- if (finfo->coalesce == INVALID_FD) {
- finfo->coalesce = val;
- } else if (val != finfo->coalesce) {
- fprintf(stderr, "ERROR: %s: invalid coalesce flag\n",
- paths[i]);
- goto err;
- }
-
- /*
- * Get/validate the stripe index and open the file in the
- * appropriate fd slot.
- */
- if (get_attr(paths[i], ATTRNAME_STRIPE_INDEX,
- &stripe_index_attr, &val) != 1) {
- fprintf(stderr, "ERROR: %s: attribute: '%s'\n",
- paths[i], ATTRNAME_STRIPE_INDEX);
- goto err;
- }
- if (finfo->fd[val] != INVALID_FD) {
- fprintf(stderr, "ERROR: %s: duplicate stripe index: "
- "%d\n", paths[i], val);
- goto err;
- }
-
- finfo->fd[val] = open(paths[i], O_RDONLY);
- if (finfo->fd[val] < 0)
- goto err;
-
- /*
- * Get the creation mode for the file.
- */
- if (fstat(finfo->fd[val], &sbuf) < 0)
- goto err;
- if (finfo->mode == INVALID_MODE) {
- finfo->mode = sbuf.st_mode;
- } else if (sbuf.st_mode != finfo->mode) {
- fprintf(stderr, "ERROR: %s: invalid mode\n", paths[i]);
- goto err;
- }
- }
-
- free(stripe_count_attr);
- free(stripe_size_attr);
- free(stripe_index_attr);
- free(stripe_coalesce_attr);
-
- return finfo;
+ int i, val, tmp;
+ struct stat sbuf;
+ char *stripe_count_attr = NULL;
+ char *stripe_size_attr = NULL;
+ char *stripe_index_attr = NULL;
+ char *stripe_coalesce_attr = NULL;
+ struct file_stripe_info *finfo = NULL;
+
+ for (i = 0; i < count; i++) {
+ if (!paths[i])
+ goto err;
+
+ /*
+ * Check the stripe count first so we can allocate the info
+ * struct with the appropriate number of fds.
+ */
+ if (get_attr(paths[i], ATTRNAME_STRIPE_COUNT, &stripe_count_attr,
+ &val) != 1) {
+ fprintf(stderr, "ERROR: %s: attribute: '%s'\n", paths[i],
+ ATTRNAME_STRIPE_COUNT);
+ goto err;
+ }
+ if (!finfo) {
+ finfo = alloc_file_stripe_info(val);
+ if (!finfo)
+ goto err;
+
+ if (val != count)
+ fprintf(stderr,
+ "WARNING: %s: stripe-count "
+ "(%d) != file count (%d). Result may "
+ "be incomplete.\n",
+ paths[i], val, count);
+
+ finfo->stripe_count = val;
+ } else if (val != finfo->stripe_count) {
+ fprintf(stderr,
+ "ERROR %s: invalid stripe count: %d "
+ "(expected %d)\n",
+ paths[i], val, finfo->stripe_count);
+ goto err;
+ }
+
+ /*
+ * Get and validate the chunk size.
+ */
+ if (get_attr(paths[i], ATTRNAME_STRIPE_SIZE, &stripe_size_attr, &val) !=
+ 1) {
+ fprintf(stderr, "ERROR: %s: attribute: '%s'\n", paths[i],
+ ATTRNAME_STRIPE_SIZE);
+ goto err;
+ }
+
+ if (!finfo->stripe_size) {
+ finfo->stripe_size = val;
+ } else if (val != finfo->stripe_size) {
+ fprintf(stderr,
+ "ERROR: %s: invalid stripe size: %d "
+ "(expected %d)\n",
+ paths[i], val, finfo->stripe_size);
+ goto err;
+ }
+
+ /*
+ * stripe-coalesce is a backward compatible attribute. If the
+ * attribute does not exist, assume a value of zero for the
+ * traditional stripe format.
+ */
+ tmp = get_attr(paths[i], ATTRNAME_STRIPE_COALESCE,
+ &stripe_coalesce_attr, &val);
+ if (!tmp) {
+ val = 0;
+ } else if (tmp != 1) {
+ fprintf(stderr, "ERROR: %s: attribute: '%s'\n", paths[i],
+ ATTRNAME_STRIPE_COALESCE);
+ goto err;
+ }
+
+ if (finfo->coalesce == INVALID_FD) {
+ finfo->coalesce = val;
+ } else if (val != finfo->coalesce) {
+ fprintf(stderr, "ERROR: %s: invalid coalesce flag\n", paths[i]);
+ goto err;
+ }
+
+ /*
+ * Get/validate the stripe index and open the file in the
+ * appropriate fd slot.
+ */
+ if (get_attr(paths[i], ATTRNAME_STRIPE_INDEX, &stripe_index_attr,
+ &val) != 1) {
+ fprintf(stderr, "ERROR: %s: attribute: '%s'\n", paths[i],
+ ATTRNAME_STRIPE_INDEX);
+ goto err;
+ }
+ if (finfo->fd[val] != INVALID_FD) {
+ fprintf(stderr,
+ "ERROR: %s: duplicate stripe index: "
+ "%d\n",
+ paths[i], val);
+ goto err;
+ }
+
+ finfo->fd[val] = open(paths[i], O_RDONLY);
+ if (finfo->fd[val] < 0)
+ goto err;
+
+ /*
+ * Get the creation mode for the file.
+ */
+ if (fstat(finfo->fd[val], &sbuf) < 0)
+ goto err;
+ if (finfo->mode == INVALID_MODE) {
+ finfo->mode = sbuf.st_mode;
+ } else if (sbuf.st_mode != finfo->mode) {
+ fprintf(stderr, "ERROR: %s: invalid mode\n", paths[i]);
+ goto err;
+ }
+ }
+
+ free(stripe_count_attr);
+ free(stripe_size_attr);
+ free(stripe_index_attr);
+ free(stripe_coalesce_attr);
+
+ return finfo;
err:
- free(stripe_count_attr);
- free(stripe_size_attr);
- free(stripe_index_attr);
- free(stripe_coalesce_attr);
+ free(stripe_count_attr);
+ free(stripe_size_attr);
+ free(stripe_index_attr);
+ free(stripe_coalesce_attr);
- if (finfo) {
- close_files(finfo);
- free(finfo);
- }
+ if (finfo) {
+ close_files(finfo);
+ free(finfo);
+ }
- return NULL;
+ return NULL;
}
static int
close_files(struct file_stripe_info *finfo)
{
- int i, ret;
+ int i, ret;
- if (!finfo)
- return -1;
+ if (!finfo)
+ return -1;
- for (i = 0; i < finfo->stripe_count; i++) {
- if (finfo->fd[i] == INVALID_FD)
- continue;
+ for (i = 0; i < finfo->stripe_count; i++) {
+ if (finfo->fd[i] == INVALID_FD)
+ continue;
- ret = close(finfo->fd[i]);
- if (ret < 0)
- return ret;
- }
+ ret = close(finfo->fd[i]);
+ if (ret < 0)
+ return ret;
+ }
- return ret;
+ return ret;
}
/*
@@ -351,43 +357,43 @@ close_files(struct file_stripe_info *finfo)
static int
generate_file_coalesce(int target, struct file_stripe_info *finfo)
{
- char *buf;
- int ret = 0;
- int r, w, i;
-
- buf = malloc(finfo->stripe_size);
- if (!buf)
- return -1;
-
- i = 0;
- while (1) {
- if (finfo->fd[i] == INVALID_FD) {
- if (lseek(target, finfo->stripe_size, SEEK_CUR) < 0)
- break;
-
- i = (i + 1) % finfo->stripe_count;
- continue;
- }
-
- r = read(finfo->fd[i], buf, finfo->stripe_size);
- if (r < 0) {
- ret = r;
- break;
- }
- if (!r)
- break;
-
- w = write(target, buf, r);
- if (w < 0) {
- ret = w;
- break;
- }
-
- i = (i + 1) % finfo->stripe_count;
- }
-
- free(buf);
- return ret;
+ char *buf;
+ int ret = 0;
+ int r, w, i;
+
+ buf = malloc(finfo->stripe_size);
+ if (!buf)
+ return -1;
+
+ i = 0;
+ while (1) {
+ if (finfo->fd[i] == INVALID_FD) {
+ if (lseek(target, finfo->stripe_size, SEEK_CUR) < 0)
+ break;
+
+ i = (i + 1) % finfo->stripe_count;
+ continue;
+ }
+
+ r = read(finfo->fd[i], buf, finfo->stripe_size);
+ if (r < 0) {
+ ret = r;
+ break;
+ }
+ if (!r)
+ break;
+
+ w = write(target, buf, r);
+ if (w < 0) {
+ ret = w;
+ break;
+ }
+
+ i = (i + 1) % finfo->stripe_count;
+ }
+
+ free(buf);
+ return ret;
}
/*
@@ -398,97 +404,100 @@ generate_file_coalesce(int target, struct file_stripe_info *finfo)
static int
generate_file_traditional(int target, struct file_stripe_info *finfo)
{
- int i, j, max_ret, ret;
- char buf[finfo->stripe_count][4096];
-
- do {
- char newbuf[4096] = {0, };
-
- max_ret = 0;
- for (i = 0; i < finfo->stripe_count; i++) {
- memset(buf[i], 0, 4096);
- ret = read(finfo->fd[i], buf[i], 4096);
- if (ret > max_ret)
- max_ret = ret;
- }
- for (i = 0; i < max_ret; i++)
- for (j = 0; j < finfo->stripe_count; j++)
- newbuf[i] |= buf[j][i];
- write(target, newbuf, max_ret);
- } while (max_ret);
-
- return 0;
+ int i, j, max_ret, ret;
+ char buf[finfo->stripe_count][4096];
+
+ do {
+ char newbuf[4096] = {
+ 0,
+ };
+
+ max_ret = 0;
+ for (i = 0; i < finfo->stripe_count; i++) {
+ memset(buf[i], 0, 4096);
+ ret = read(finfo->fd[i], buf[i], 4096);
+ if (ret > max_ret)
+ max_ret = ret;
+ }
+ for (i = 0; i < max_ret; i++)
+ for (j = 0; j < finfo->stripe_count; j++)
+ newbuf[i] |= buf[j][i];
+ write(target, newbuf, max_ret);
+ } while (max_ret);
+
+ return 0;
}
static int
generate_file(int target, struct file_stripe_info *finfo)
{
- if (finfo->coalesce)
- return generate_file_coalesce(target, finfo);
+ if (finfo->coalesce)
+ return generate_file_coalesce(target, finfo);
- return generate_file_traditional(target, finfo);
+ return generate_file_traditional(target, finfo);
}
static void
usage(char *name)
{
- fprintf(stderr, "Usage: %s [-o <outputfile>] <inputfile1> "
- "<inputfile2> ...\n", name);
+ fprintf(stderr,
+ "Usage: %s [-o <outputfile>] <inputfile1> "
+ "<inputfile2> ...\n",
+ name);
}
int
main(int argc, char *argv[])
{
- int file_count, opt;
- char *opath = NULL;
- int targetfd;
- struct file_stripe_info *finfo;
-
- while ((opt = getopt(argc, argv, "o:")) != -1) {
- switch (opt) {
- case 'o':
- opath = optarg;
- break;
- default:
- usage(argv[0]);
- return -1;
- }
- }
-
- file_count = argc - optind;
-
- if (!opath || !file_count) {
- usage(argv[0]);
- return -1;
- }
-
- finfo = validate_and_open_files(&argv[optind], file_count);
- if (!finfo)
- goto err;
-
- targetfd = open(opath, O_RDWR|O_CREAT, finfo->mode);
- if (targetfd < 0)
- goto err;
-
- if (generate_file(targetfd, finfo) < 0)
- goto err;
-
- if (fsync(targetfd) < 0)
- fprintf(stderr, "ERROR: %s\n", strerror(errno));
- if (close(targetfd) < 0)
- fprintf(stderr, "ERROR: %s\n", strerror(errno));
-
- close_files(finfo);
- free(finfo);
-
- return 0;
+ int file_count, opt;
+ char *opath = NULL;
+ int targetfd;
+ struct file_stripe_info *finfo;
+
+ while ((opt = getopt(argc, argv, "o:")) != -1) {
+ switch (opt) {
+ case 'o':
+ opath = optarg;
+ break;
+ default:
+ usage(argv[0]);
+ return -1;
+ }
+ }
+
+ file_count = argc - optind;
+
+ if (!opath || !file_count) {
+ usage(argv[0]);
+ return -1;
+ }
+
+ finfo = validate_and_open_files(&argv[optind], file_count);
+ if (!finfo)
+ goto err;
+
+ targetfd = open(opath, O_RDWR | O_CREAT, finfo->mode);
+ if (targetfd < 0)
+ goto err;
+
+ if (generate_file(targetfd, finfo) < 0)
+ goto err;
+
+ if (fsync(targetfd) < 0)
+ fprintf(stderr, "ERROR: %s\n", strerror(errno));
+ if (close(targetfd) < 0)
+ fprintf(stderr, "ERROR: %s\n", strerror(errno));
+
+ close_files(finfo);
+ free(finfo);
+
+ return 0;
err:
- if (finfo) {
- close_files(finfo);
- free(finfo);
- }
+ if (finfo) {
+ close_files(finfo);
+ free(finfo);
+ }
- return -1;
+ return -1;
}
-
diff --git a/extras/systemd/Makefile.am b/extras/systemd/Makefile.am
index 3f0ec89537a..61446a9b84a 100644
--- a/extras/systemd/Makefile.am
+++ b/extras/systemd/Makefile.am
@@ -1,7 +1,17 @@
-CLEANFILES = glusterd.service
-EXTRA_DIST = glusterd.service.in
+CLEANFILES = glusterd.service glustereventsd.service glusterfssharedstorage.service gluster-ta-volume.service
+EXTRA_DIST = glusterd.service.in glustereventsd.service.in glusterfssharedstorage.service.in gluster-ta-volume.service.in
if USE_SYSTEMD
+systemd_DATA = gluster-ta-volume.service
+endif
+
+if WITH_SERVER
+if USE_SYSTEMD
# systemddir is already defined through configure.ac
-systemd_DATA = glusterd.service
+systemd_DATA += glusterd.service glusterfssharedstorage.service
+
+if BUILD_EVENTS
+systemd_DATA += glustereventsd.service
+endif
+endif
endif
diff --git a/extras/systemd/gluster-ta-volume.service.in b/extras/systemd/gluster-ta-volume.service.in
new file mode 100644
index 00000000000..2802bca05bf
--- /dev/null
+++ b/extras/systemd/gluster-ta-volume.service.in
@@ -0,0 +1,13 @@
+[Unit]
+Description=GlusterFS, Thin-arbiter process to maintain quorum for replica volume
+After=network.target
+
+[Service]
+Environment="LOG_LEVEL=WARNING"
+ExecStart=@prefix@/sbin/glusterfsd -N --volfile-id ta -f @GLUSTERD_WORKDIR@/thin-arbiter/thin-arbiter.vol --brick-port 24007 --xlator-option ta-server.transport.socket.listen-port=24007 -LWARNING
+Restart=always
+KillMode=process
+SuccessExitStatus=15
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
index 26694cfc8ab..abb0d82911f 100644
--- a/extras/systemd/glusterd.service.in
+++ b/extras/systemd/glusterd.service.in
@@ -1,7 +1,10 @@
[Unit]
Description=GlusterFS, a clustered file-system server
-Requires=rpcbind.service
-After=network.target rpcbind.service
+Documentation=man:glusterd(8)
+StartLimitBurst=6
+StartLimitIntervalSec=3600
+Requires=@RPCBIND_SERVICE@
+After=network.target @RPCBIND_SERVICE@
Before=network-online.target
[Service]
@@ -9,9 +12,15 @@ Type=forking
PIDFile=@localstatedir@/run/glusterd.pid
LimitNOFILE=65536
Environment="LOG_LEVEL=INFO"
-EnvironmentFile=-@sysconfdir@/sysconfig/glusterd
+EnvironmentFile=-@SYSCONF_DIR@/sysconfig/glusterd
ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS
KillMode=process
+TimeoutSec=300
+SuccessExitStatus=15
+Restart=on-abnormal
+RestartSec=60
+StartLimitBurst=6
+StartLimitInterval=3600
[Install]
WantedBy=multi-user.target
diff --git a/extras/systemd/glustereventsd.service.in b/extras/systemd/glustereventsd.service.in
new file mode 100644
index 00000000000..f80b78199f6
--- /dev/null
+++ b/extras/systemd/glustereventsd.service.in
@@ -0,0 +1,16 @@
+[Unit]
+Description=Gluster Events Notifier
+After=network.target
+Documentation=man:glustereventsd(8)
+
+
+[Service]
+Environment=PYTHONPATH=@BUILD_PYTHON_SITE_PACKAGES_EXPANDED@:$PYTHONPATH
+Type=simple
+ExecStart=@SBIN_DIR@/glustereventsd --pid-file @localstatedir@/run/glustereventsd.pid
+ExecReload=/bin/kill -SIGUSR2 $MAINPID
+KillMode=control-group
+PIDFile=@localstatedir@/run/glustereventsd.pid
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/systemd/glusterfssharedstorage.service.in b/extras/systemd/glusterfssharedstorage.service.in
new file mode 100644
index 00000000000..723ff49afb7
--- /dev/null
+++ b/extras/systemd/glusterfssharedstorage.service.in
@@ -0,0 +1,13 @@
+[Unit]
+Description=Mount glusterfs sharedstorage
+Requires=glusterd.service remote-fs-pre.target local-fs.target
+
+[Service]
+Type=forking
+ExecStart=@GLUSTERFS_LIBEXECDIR@/mount-shared-storage.sh
+Restart=on-failure
+RestartSec=3
+RestartForceExitStatus=1
+
+[Install]
+WantedBy=multi-user.target
diff --git a/extras/test/ld-preload-test/ld-preload-lib.c b/extras/test/ld-preload-test/ld-preload-lib.c
index 8f74a25cf68..d120c053a69 100644
--- a/extras/test/ld-preload-test/ld-preload-lib.c
+++ b/extras/test/ld-preload-test/ld-preload-lib.c
@@ -34,594 +34,582 @@
#include <fcntl.h>
#include <sys/stat.h>
#include <dirent.h>
-#include <attr/xattr.h>
+#include <sys/xattr.h>
#include <sys/sendfile.h>
/* Err number that is assigned to errno so that test application can
* verify that the function was intercepted correctly.
*/
-#define PRELOAD_ERRNO_VERF 6449
-#define set_errno() (errno = PRELOAD_ERRNO_VERF)
+#define PRELOAD_ERRNO_VERF 6449
+#define set_errno() (errno = PRELOAD_ERRNO_VERF)
void
-intercept (char *call, int tabs)
+intercept(char *call, int tabs)
{
- while (tabs > 0) {
- fprintf (stdout, "\t");
- --tabs;
- }
+ while (tabs > 0) {
+ fprintf(stdout, "\t");
+ --tabs;
+ }
- fprintf (stdout, "Intercepted by %s", call);
+ fprintf(stdout, "Intercepted by %s", call);
}
int
-creat64 (const char *pathname, mode_t mode)
+creat64(const char *pathname, mode_t mode)
{
- intercept ("creat64", 2);
- set_errno ();
- return -1;
+ intercept("creat64", 2);
+ set_errno();
+ return -1;
}
int
-creat (const char *pathname, mode_t mode)
+creat(const char *pathname, mode_t mode)
{
- intercept ("creat", 2);
- set_errno ();
- return -1;
+ intercept("creat", 2);
+ set_errno();
+ return -1;
}
-
int
-close (int fd)
+close(int fd)
{
- intercept ("close", 2);
- set_errno ();
- return -1;
+ intercept("close", 2);
+ set_errno();
+ return -1;
}
int
-open64 (const char *pathname, int flags, ...)
+open64(const char *pathname, int flags, ...)
{
- intercept ("open64", 2);
- set_errno ();
- return -1;
+ intercept("open64", 2);
+ set_errno();
+ return -1;
}
-
int
-open (const char *pathname, int flags, ...)
+open(const char *pathname, int flags, ...)
{
- intercept ("open", 2);
- set_errno ();
- return -1;
+ intercept("open", 2);
+ set_errno();
+ return -1;
}
ssize_t
-read (int fd, void *buf, size_t count)
+read(int fd, void *buf, size_t count)
{
- intercept ("read", 2);
- set_errno ();
- return -1;
+ intercept("read", 2);
+ set_errno();
+ return -1;
}
ssize_t
-readv (int fd, const struct iovec *vector, int count)
+readv(int fd, const struct iovec *vector, int count)
{
- intercept ("readv", 2);
- set_errno ();
- return -1;
+ intercept("readv", 2);
+ set_errno();
+ return -1;
}
ssize_t
-pread (int fd, void *buf, size_t count, unsigned long offset)
+pread(int fd, void *buf, size_t count, unsigned long offset)
{
- intercept ("pread", 2);
- set_errno ();
- return -1;
+ intercept("pread", 2);
+ set_errno();
+ return -1;
}
-
ssize_t
-pread64 (int fd, void *buf, size_t count, uint64_t offset)
+pread64(int fd, void *buf, size_t count, uint64_t offset)
{
- intercept ("pread64", 2);
- set_errno ();
- return -1;
+ intercept("pread64", 2);
+ set_errno();
+ return -1;
}
ssize_t
-write (int fd, const void *buf, size_t count)
+write(int fd, const void *buf, size_t count)
{
- intercept ("write", 2);
- set_errno ();
- return -1;
+ intercept("write", 2);
+ set_errno();
+ return -1;
}
ssize_t
-writev (int fd, const struct iovec *vector, int count)
+writev(int fd, const struct iovec *vector, int count)
{
- intercept ("writev", 2);
- set_errno ();
- return -1;
+ intercept("writev", 2);
+ set_errno();
+ return -1;
}
ssize_t
-pwrite (int fd, const void *buf, size_t count, unsigned long offset)
+pwrite(int fd, const void *buf, size_t count, unsigned long offset)
{
- intercept ("pwrite", 2);
- set_errno ();
- return -1;
+ intercept("pwrite", 2);
+ set_errno();
+ return -1;
}
ssize_t
-pwrite64 (int fd, const void *buf, size_t count, uint64_t offset)
+pwrite64(int fd, const void *buf, size_t count, uint64_t offset)
{
- intercept ("pwrite64", 2);
- set_errno ();
- return -1;
+ intercept("pwrite64", 2);
+ set_errno();
+ return -1;
}
-
off_t
-lseek (int fildes, unsigned long offset, int whence)
+lseek(int fildes, unsigned long offset, int whence)
{
- intercept ("lseek", 2);
- set_errno ();
- return -1;
+ intercept("lseek", 2);
+ set_errno();
+ return -1;
}
off_t
-lseek64 (int fildes, uint64_t offset, int whence)
+lseek64(int fildes, uint64_t offset, int whence)
{
- intercept ("lseek64", 2);
- set_errno ();
- return -1;
+ intercept("lseek64", 2);
+ set_errno();
+ return -1;
}
-
int
-dup (int fd)
+dup(int fd)
{
- intercept ("dup", 2);
- set_errno ();
- return -1;
+ intercept("dup", 2);
+ set_errno();
+ return -1;
}
int
-dup2 (int oldfd, int newfd)
+dup2(int oldfd, int newfd)
{
- intercept ("dup2", 2);
- set_errno ();
- return -1;
+ intercept("dup2", 2);
+ set_errno();
+ return -1;
}
int
-mkdir (const char *pathname, mode_t mode)
+mkdir(const char *pathname, mode_t mode)
{
- intercept ("mkdir", 2);
- set_errno ();
- return -1;
+ intercept("mkdir", 2);
+ set_errno();
+ return -1;
}
int
-rmdir (const char *pathname)
+rmdir(const char *pathname)
{
- intercept ("rmdir", 2);
- set_errno ();
- return -1;
+ intercept("rmdir", 2);
+ set_errno();
+ return -1;
}
int
-chmod (const char *pathname, mode_t mode)
+chmod(const char *pathname, mode_t mode)
{
- intercept ("chmod", 2);
- set_errno ();
- return -1;
+ intercept("chmod", 2);
+ set_errno();
+ return -1;
}
int
-chown (const char *pathname, uid_t owner, gid_t group)
+chown(const char *pathname, uid_t owner, gid_t group)
{
- intercept ("chown", 2);
- set_errno ();
- return -1;
+ intercept("chown", 2);
+ set_errno();
+ return -1;
}
int
-fchmod (int fd, mode_t mode)
+fchmod(int fd, mode_t mode)
{
- intercept ("fchmod", 2);
- set_errno ();
- return -1;
+ intercept("fchmod", 2);
+ set_errno();
+ return -1;
}
int
-fchown (int fd, uid_t uid, gid_t gid)
+fchown(int fd, uid_t uid, gid_t gid)
{
- intercept ("fchown", 2);
- set_errno ();
- return -1;
+ intercept("fchown", 2);
+ set_errno();
+ return -1;
}
-int fsync (int fd)
+int
+fsync(int fd)
{
- intercept ("fsync", 2);
- set_errno ();
- return -1;
+ intercept("fsync", 2);
+ set_errno();
+ return -1;
}
-
int
-ftruncate (int fd, off_t length)
+ftruncate(int fd, off_t length)
{
- intercept ("ftruncate", 1);
- set_errno ();
- return -1;
+ intercept("ftruncate", 1);
+ set_errno();
+ return -1;
}
-
int
-ftruncate64 (int fd, off_t length)
+ftruncate64(int fd, off_t length)
{
- intercept ("ftruncate64", 1);
- set_errno ();
- return -1;
+ intercept("ftruncate64", 1);
+ set_errno();
+ return -1;
}
int
-link (const char *oldpath, const char *newname)
+link(const char *oldpath, const char *newname)
{
- intercept ("link", 2);
- set_errno ();
- return -1;
+ intercept("link", 2);
+ set_errno();
+ return -1;
}
int
-rename (const char *oldpath, const char *newpath)
+rename(const char *oldpath, const char *newpath)
{
- intercept ("rename", 2);
- set_errno ();
- return -1;
+ intercept("rename", 2);
+ set_errno();
+ return -1;
}
int
-utimes (const char *path, const struct timeval times[2])
+utimes(const char *path, const struct timeval times[2])
{
- intercept ("utimes", 2);
- set_errno ();
- return -1;
+ intercept("utimes", 2);
+ set_errno();
+ return -1;
}
int
-utime (const char *path, const struct utimbuf *buf)
+futimes(int fd, const struct timeval times[2])
{
- intercept ("utime", 2);
- set_errno ();
- return -1;
+ intercept("futimes", 2);
+ set_errno();
+ return -1;
}
-
int
-mknod (const char *path, mode_t mode, dev_t dev)
+utime(const char *path, const struct utimbuf *buf)
{
- intercept ("mknod", 2);
- set_errno ();
- return -1;
+ intercept("utime", 2);
+ set_errno();
+ return -1;
}
int
-__xmknod (int ver, const char *path, mode_t mode, dev_t *dev)
+mknod(const char *path, mode_t mode, dev_t dev)
{
- intercept ("__xmknod", 2);
- set_errno ();
- return -1;
+ intercept("mknod", 2);
+ set_errno();
+ return -1;
}
int
-mkfifo (const char *path, mode_t mode)
+__xmknod(int ver, const char *path, mode_t mode, dev_t *dev)
{
- intercept ("mkfifo", 2);
- set_errno ();
- return -1;
+ intercept("__xmknod", 2);
+ set_errno();
+ return -1;
}
int
-unlink (const char *path)
+mkfifo(const char *path, mode_t mode)
{
- intercept ("unlink", 2);
- set_errno ();
- return -1;
+ intercept("mkfifo", 2);
+ set_errno();
+ return -1;
}
-
int
-symlink (const char *oldpath, const char *newpath)
+unlink(const char *path)
{
- intercept ("symlink", 2);
- set_errno ();
- return -1;
+ intercept("unlink", 2);
+ set_errno();
+ return -1;
}
int
-readlink (const char *path, char *buf, size_t bufsize)
+symlink(const char *oldpath, const char *newpath)
{
- intercept ("readlink", 1);
- set_errno ();
- return -1;
+ intercept("symlink", 2);
+ set_errno();
+ return -1;
}
+int
+readlink(const char *path, char *buf, size_t bufsize)
+{
+ intercept("readlink", 1);
+ set_errno();
+ return -1;
+}
char *
-realpath (const char *path, char *resolved)
+realpath(const char *path, char *resolved)
{
- intercept ("realpath", 1);
- set_errno ();
- return NULL;
+ intercept("realpath", 1);
+ set_errno();
+ return NULL;
}
-
DIR *
-opendir (const char *path)
+opendir(const char *path)
{
- intercept ("opendir", 2);
- set_errno ();
- return NULL;
+ intercept("opendir", 2);
+ set_errno();
+ return NULL;
}
-
struct dirent *
-readdir (DIR *dir)
+readdir(DIR *dir)
{
- intercept ("readdir\t", 2);
- set_errno ();
- return NULL;
+ intercept("readdir\t", 2);
+ set_errno();
+ return NULL;
}
struct dirent *
-readdir64 (DIR *dir)
+readdir64(DIR *dir)
{
- intercept ("readdir64", 2);
- set_errno ();
- return NULL;
+ intercept("readdir64", 2);
+ set_errno();
+ return NULL;
}
-
int
-readdir_r (DIR *dir, struct dirent *entry, struct dirent **result)
+readdir_r(DIR *dir, struct dirent *entry, struct dirent **result)
{
- intercept ("readdir_r", 1);
- set_errno ();
- return -1;
+ intercept("readdir_r", 1);
+ set_errno();
+ return -1;
}
int
-readdir64_r (DIR *dir, struct dirent *entry, struct dirent **result)
+readdir64_r(DIR *dir, struct dirent *entry, struct dirent **result)
{
- intercept ("readdir64_r", 1);
- set_errno ();
- return -1;
+ intercept("readdir64_r", 1);
+ set_errno();
+ return -1;
}
-
int
-closedir (DIR *dh)
+closedir(DIR *dh)
{
- intercept ("closedir", 1);
- set_errno ();
- return -1;
+ intercept("closedir", 1);
+ set_errno();
+ return -1;
}
int
-__xstat (int ver, const char *path, struct stat *buf)
+__xstat(int ver, const char *path, struct stat *buf)
{
- intercept ("__xstat\t", 2);
- set_errno ();
- return -1;
+ intercept("__xstat\t", 2);
+ set_errno();
+ return -1;
}
-
int
-__xstat64 (int ver, const char *path, struct stat *buf)
+__xstat64(int ver, const char *path, struct stat *buf)
{
- intercept ("__xstat64", 2);
- set_errno ();
- return -1;
+ intercept("__xstat64", 2);
+ set_errno();
+ return -1;
}
int
-stat (const char *path, struct stat *buf)
+stat(const char *path, struct stat *buf)
{
- intercept ("stat", 2);
- set_errno ();
- return -1;
+ intercept("stat", 2);
+ set_errno();
+ return -1;
}
int
-stat64 (const char *path, struct stat *buf)
+stat64(const char *path, struct stat *buf)
{
- intercept ("stat64", 2);
- set_errno ();
- return -1;
+ intercept("stat64", 2);
+ set_errno();
+ return -1;
}
int
-__fxstat (int ver, int fd, struct stat *buf)
+__fxstat(int ver, int fd, struct stat *buf)
{
- intercept ("__fxstat\t", 2);
- set_errno ();
- return -1;
+ intercept("__fxstat\t", 2);
+ set_errno();
+ return -1;
}
-
int
-__fxstat64 (int ver, int fd, struct stat *buf)
+__fxstat64(int ver, int fd, struct stat *buf)
{
- intercept ("__fxstat64", 2);
- set_errno ();
- return -1;
+ intercept("__fxstat64", 2);
+ set_errno();
+ return -1;
}
int
-fstat (int fd, struct stat *buf)
+fstat(int fd, struct stat *buf)
{
- intercept ("fstat", 2);
- set_errno ();
- return -1;
+ intercept("fstat", 2);
+ set_errno();
+ return -1;
}
int
-fstat64 (int fd , struct stat *buf)
+fstat64(int fd, struct stat *buf)
{
- intercept ("fstat64", 2);
- set_errno ();
- return -1;
+ intercept("fstat64", 2);
+ set_errno();
+ return -1;
}
int
-__lxstat (int ver, const char *path, struct stat *buf)
+__lxstat(int ver, const char *path, struct stat *buf)
{
- intercept ("__lxstat\t", 2);
- set_errno ();
- return -1;
+ intercept("__lxstat\t", 2);
+ set_errno();
+ return -1;
}
int
-__lxstat64 (int ver, const char *path, struct stat *buf)
+__lxstat64(int ver, const char *path, struct stat *buf)
{
- intercept ("__lxstat64", 2);
- set_errno ();
- return -1;
+ intercept("__lxstat64", 2);
+ set_errno();
+ return -1;
}
int
-lstat (const char *path, struct stat *buf)
+lstat(const char *path, struct stat *buf)
{
- intercept ("lstat", 2);
- set_errno ();
- return -1;
+ intercept("lstat", 2);
+ set_errno();
+ return -1;
}
int
-lstat64 (const char *path, struct stat *buf)
+lstat64(const char *path, struct stat *buf)
{
- intercept ("lstat64", 2);
- set_errno ();
- return -1;
+ intercept("lstat64", 2);
+ set_errno();
+ return -1;
}
int
-statfs (const char *path, struct statfs *buf)
+statfs(const char *path, struct statfs *buf)
{
- intercept ("statfs", 2);
- set_errno ();
- return -1;
+ intercept("statfs", 2);
+ set_errno();
+ return -1;
}
-
int
-statfs64 (const char *path, struct statfs *buf)
+statfs64(const char *path, struct statfs *buf)
{
- intercept ("statfs64", 2);
- set_errno ();
- return -1;
+ intercept("statfs64", 2);
+ set_errno();
+ return -1;
}
int
-statvfs (const char *path, struct statvfs *buf)
+statvfs(const char *path, struct statvfs *buf)
{
- intercept ("statvfs\t", 2);
- set_errno ();
- return -1;
+ intercept("statvfs\t", 2);
+ set_errno();
+ return -1;
}
-
int
-statvfs64 (const char *path, struct statvfs *buf)
+statvfs64(const char *path, struct statvfs *buf)
{
- intercept ("statvfs64", 2);
- set_errno ();
- return -1;
+ intercept("statvfs64", 2);
+ set_errno();
+ return -1;
}
ssize_t
-getxattr (const char *path, const char *name, void *value, size_t size)
+getxattr(const char *path, const char *name, void *value, size_t size)
{
- intercept ("getxattr", 1);
- set_errno ();
- return -1;
+ intercept("getxattr", 1);
+ set_errno();
+ return -1;
}
ssize_t
-lgetxattr (const char *path, const char *name, void *value, size_t size)
+lgetxattr(const char *path, const char *name, void *value, size_t size)
{
- intercept ("lgetxattr", 1);
- set_errno ();
- return -1;
+ intercept("lgetxattr", 1);
+ set_errno();
+ return -1;
}
-
int
-remove (const char* path)
+remove(const char *path)
{
- intercept ("remove", 2);
- set_errno ();
- return -1;
+ intercept("remove", 2);
+ set_errno();
+ return -1;
}
int
-lchown (const char *path, uid_t owner, gid_t group)
+lchown(const char *path, uid_t owner, gid_t group)
{
- intercept ("lchown", 2);
- set_errno ();
- return -1;
+ intercept("lchown", 2);
+ set_errno();
+ return -1;
}
void
-rewinddir (DIR *dirp)
+rewinddir(DIR *dirp)
{
- intercept ("rewinddir", 1);
- set_errno ();
- return;
+ intercept("rewinddir", 1);
+ set_errno();
+ return;
}
void
-seekdir (DIR *dirp, off_t offset)
+seekdir(DIR *dirp, off_t offset)
{
- intercept ("seekdir", 2);
- set_errno ();
- return;
+ intercept("seekdir", 2);
+ set_errno();
+ return;
}
off_t
-telldir (DIR *dirp)
+telldir(DIR *dirp)
{
- intercept ("telldir", 2);
- set_errno ();
- return -1;
+ intercept("telldir", 2);
+ set_errno();
+ return -1;
}
ssize_t
-sendfile (int out_fd, int in_fd, off_t *offset, size_t count)
+sendfile(int out_fd, int in_fd, off_t *offset, size_t count)
{
- intercept ("sendfile\t", 1);
- set_errno ();
- return -1;
+ intercept("sendfile\t", 1);
+ set_errno();
+ return -1;
}
ssize_t
-sendfile64 (int out_fd, int in_fd, off_t *offset, size_t count)
+sendfile64(int out_fd, int in_fd, off_t *offset, size_t count)
{
- intercept ("sendfile64", 1);
- set_errno ();
- return -1;
+ intercept("sendfile64", 1);
+ set_errno();
+ return -1;
}
-
int
-fcntl (int fd, int cmd, ...)
+fcntl(int fd, int cmd, ...)
{
- intercept ("fcntl", 2);
- set_errno ();
- return -1;
+ intercept("fcntl", 2);
+ set_errno();
+ return -1;
}
-
diff --git a/extras/test/ld-preload-test/ld-preload-test.c b/extras/test/ld-preload-test/ld-preload-test.c
index cf8dd52c3e1..54dde8c7d54 100644
--- a/extras/test/ld-preload-test/ld-preload-test.c
+++ b/extras/test/ld-preload-test/ld-preload-test.c
@@ -46,322 +46,313 @@
#include <sys/uio.h>
#include <utime.h>
#include <sys/time.h>
-#include <attr/xattr.h>
+#include <sys/xattr.h>
#include <sys/sendfile.h>
-
-#define PRELOAD_ERRNO_VERF 6449
+#define PRELOAD_ERRNO_VERF 6449
void
check_err(int ret, char *call, int tabs)
{
- while (tabs > 0) {
- fprintf (stdout, "\t");
- --tabs;
- }
- if (ret != -1) {
- fprintf (stdout, "Not intercepted: %s\n", call);
- return;
- }
-
- if (errno != PRELOAD_ERRNO_VERF) {
- fprintf (stdout, "Not intercepted: %s: err: %s\n", call,
- strerror (errno));
- return;
- }
+ while (tabs > 0) {
+ fprintf(stdout, "\t");
+ --tabs;
+ }
+ if (ret != -1) {
+ fprintf(stdout, "Not intercepted: %s\n", call);
+ return;
+ }
- fprintf (stdout, "Intercept verified: %s\n", call);
+ if (errno != PRELOAD_ERRNO_VERF) {
+ fprintf(stdout, "Not intercepted: %s: err: %s\n", call,
+ strerror(errno));
return;
+ }
+
+ fprintf(stdout, "Intercept verified: %s\n", call);
+ return;
}
void
-usage (FILE *fp)
+usage(FILE *fp)
{
- fprintf (fp, "Usage: ld-preload-test <Options>\n");
- fprintf (fp, "Options\n");
- fprintf (fp, "\t--path\t\tPathname is used as the file/directory"
- " created for the test.\n");
-
+ fprintf(fp, "Usage: ld-preload-test <Options>\n");
+ fprintf(fp, "Options\n");
+ fprintf(fp,
+ "\t--path\t\tPathname is used as the file/directory"
+ " created for the test.\n");
}
-
int
-run_file_tests (char *testfile)
+run_file_tests(char *testfile)
{
- int ret = -1;
- struct stat buf;
+ int ret = -1;
+ struct stat buf;
- assert (testfile);
- fprintf (stdout, "Testing creat");
- ret = creat (testfile, S_IRWXU);
- check_err (ret, "creat", 2);
+ assert(testfile);
+ fprintf(stdout, "Testing creat");
+ ret = creat(testfile, S_IRWXU);
+ check_err(ret, "creat", 2);
- fprintf (stdout, "Testing close");
- ret = close (ret);
- check_err (ret, "close", 2);
+ fprintf(stdout, "Testing close");
+ ret = close(ret);
+ check_err(ret, "close", 2);
- fprintf (stdout, "Testing open");
- ret = open (testfile, O_RDONLY);
- check_err (ret, "open", 2);
+ fprintf(stdout, "Testing open");
+ ret = open(testfile, O_RDONLY);
+ check_err(ret, "open", 2);
- fprintf (stdout, "Testing read");
- ret = read (0, NULL, 0);
- check_err (ret, "read", 2);
+ fprintf(stdout, "Testing read");
+ ret = read(0, NULL, 0);
+ check_err(ret, "read", 2);
- fprintf (stdout, "Testing readv");
- ret = readv (0, NULL, 0);
- check_err (ret, "readv", 2);
+ fprintf(stdout, "Testing readv");
+ ret = readv(0, NULL, 0);
+ check_err(ret, "readv", 2);
- fprintf (stdout, "Testing pread");
- ret = pread (0, NULL, 0, 0);
- check_err (ret, "pread", 2);
+ fprintf(stdout, "Testing pread");
+ ret = pread(0, NULL, 0, 0);
+ check_err(ret, "pread", 2);
- fprintf (stdout, "Testing write");
- ret = write (0, NULL, 0);
- check_err (ret, "write", 2);
+ fprintf(stdout, "Testing write");
+ ret = write(0, NULL, 0);
+ check_err(ret, "write", 2);
- fprintf (stdout, "Testing writev");
- ret = writev (0, NULL, 0);
- check_err (ret, "writev", 2);
+ fprintf(stdout, "Testing writev");
+ ret = writev(0, NULL, 0);
+ check_err(ret, "writev", 2);
- fprintf (stdout, "Testing pwrite");
- ret = pwrite (0, NULL, 0, 0);
- check_err (ret, "pwrite", 2);
+ fprintf(stdout, "Testing pwrite");
+ ret = pwrite(0, NULL, 0, 0);
+ check_err(ret, "pwrite", 2);
- fprintf (stdout, "Testing lseek");
- ret = lseek (0, 0, 0);
- check_err (ret, "lseek", 2);
+ fprintf(stdout, "Testing lseek");
+ ret = lseek(0, 0, 0);
+ check_err(ret, "lseek", 2);
- fprintf (stdout, "Testing dup");
- ret = dup (0);
- check_err (ret, "dup", 2);
+ fprintf(stdout, "Testing dup");
+ ret = dup(0);
+ check_err(ret, "dup", 2);
- fprintf (stdout, "Testing dup2");
- ret = dup2 (0, 0);
- check_err (ret, "dup2", 2);
+ fprintf(stdout, "Testing dup2");
+ ret = dup2(0, 0);
+ check_err(ret, "dup2", 2);
- fprintf (stdout, "Testing fchmod");
- ret = fchmod (0, 0);
- check_err (ret, "fchmod", 2);
+ fprintf(stdout, "Testing fchmod");
+ ret = fchmod(0, 0);
+ check_err(ret, "fchmod", 2);
- fprintf (stdout, "Testing fchown");
- ret = fchown (0, 0, 0);
- check_err (ret, "fchown", 2);
+ fprintf(stdout, "Testing fchown");
+ ret = fchown(0, 0, 0);
+ check_err(ret, "fchown", 2);
- fprintf (stdout, "Testing fsync");
- ret = fsync (0);
- check_err (ret, "fsync", 2);
+ fprintf(stdout, "Testing fsync");
+ ret = fsync(0);
+ check_err(ret, "fsync", 2);
- fprintf (stdout, "Testing ftruncate");
- ret = ftruncate (0, 0);
- check_err (ret, "ftruncate", 1);
+ fprintf(stdout, "Testing ftruncate");
+ ret = ftruncate(0, 0);
+ check_err(ret, "ftruncate", 1);
- fprintf (stdout, "Testing fstat");
- ret = fstat (0, &buf);
- check_err (ret, "fstat", 1);
+ fprintf(stdout, "Testing fstat");
+ ret = fstat(0, &buf);
+ check_err(ret, "fstat", 1);
- fprintf (stdout, "Testing sendfile");
- ret = sendfile (0, 0, NULL, 0);
- check_err (ret, "sendfile", 1);
+ fprintf(stdout, "Testing sendfile");
+ ret = sendfile(0, 0, NULL, 0);
+ check_err(ret, "sendfile", 1);
- fprintf (stdout, "Testing fcntl");
- ret = fcntl (0, 0, NULL);
- check_err (ret, "fcntl", 2);
+ fprintf(stdout, "Testing fcntl");
+ ret = fcntl(0, 0, NULL);
+ check_err(ret, "fcntl", 2);
- fprintf (stdout, "Testing close");
- ret = close (ret);
- check_err (ret, "close", 2);
+ fprintf(stdout, "Testing close");
+ ret = close(ret);
+ check_err(ret, "close", 2);
- fprintf (stdout, "Testing remove");
- ret = remove (testfile);
- check_err (ret, "remove", 2);
+ fprintf(stdout, "Testing remove");
+ ret = remove(testfile);
+ check_err(ret, "remove", 2);
- return ret;
+ return ret;
}
-
int
-run_attr_tests (char *testfile)
+run_attr_tests(char *testfile)
{
- int ret = -1;
- char *res = NULL;
- struct stat buf;
- struct statfs sbuf;
- struct statvfs svbuf;
-
- assert (testfile);
-
- fprintf (stdout, "Testing chmod");
- ret = chmod (testfile, 0);
- check_err (ret, "chmod", 2);
-
- fprintf (stdout, "Testing chown");
- ret = chown (testfile, 0, 0);
- check_err (ret, "chown", 2);
-
- fprintf (stdout, "Testing link");
- ret = link (testfile, testfile);
- check_err (ret, "link", 2);
-
- fprintf (stdout, "Testing rename");
- ret = rename (testfile, testfile);
- check_err (ret, "rename", 2);
-
- fprintf (stdout, "Testing utimes");
- ret = utimes (testfile, NULL);
- check_err (ret, "utimes", 2);
-
- fprintf (stdout, "Testing utime");
- ret = utime (testfile, NULL);
- check_err (ret, "utime", 2);
-
- fprintf (stdout, "Testing unlink");
- ret = unlink (testfile);
- check_err (ret, "unlink", 2);
-
- fprintf (stdout, "Testing symlink");
- ret = symlink (testfile, testfile);
- check_err (ret, "symlink", 2);
-
- fprintf (stdout, "Testing readlink");
- ret = readlink (testfile, testfile, 0);
- check_err (ret, "readlink", 2);
-
- fprintf (stdout, "Testing realpath");
- ret = 0;
- res = realpath ((const char *)testfile, testfile);
- if (!res)
- ret = -1;
- check_err (ret, "realpath", 2);
-
- fprintf (stdout, "Testing stat");
- ret = stat (testfile, &buf);
- check_err (ret, "stat", 1);
-
- fprintf (stdout, "Testing lstat");
- ret = lstat (testfile, &buf);
- check_err (ret, "lstat", 1);
-
- fprintf (stdout, "Testing statfs");
- ret = statfs (testfile, &sbuf);
- check_err (ret, "statfs", 2);
-
- fprintf (stdout, "Testing statvfs");
- ret = statvfs (testfile, &svbuf);
- check_err (ret, "statvfs", 1);
-
- fprintf (stdout, "Testing getxattr");
- ret = getxattr (testfile, NULL, NULL, 0);
- check_err (ret, "getxattr", 2);
-
- fprintf (stdout, "Testing lgetxattr");
- ret = lgetxattr (testfile, NULL, NULL, 0);
- check_err (ret, "lgetxattr", 1);
-
- fprintf (stdout, "Testing lchown");
- ret = lchown (testfile, 0, 0);
- check_err (ret, "lchown", 2);
- return 0;
+ int ret = -1;
+ char *res = NULL;
+ struct stat buf;
+ struct statfs sbuf;
+ struct statvfs svbuf;
+
+ assert(testfile);
+
+ fprintf(stdout, "Testing chmod");
+ ret = chmod(testfile, 0);
+ check_err(ret, "chmod", 2);
+
+ fprintf(stdout, "Testing chown");
+ ret = chown(testfile, 0, 0);
+ check_err(ret, "chown", 2);
+
+ fprintf(stdout, "Testing link");
+ ret = link(testfile, testfile);
+ check_err(ret, "link", 2);
+
+ fprintf(stdout, "Testing rename");
+ ret = rename(testfile, testfile);
+ check_err(ret, "rename", 2);
+
+ fprintf(stdout, "Testing utimes");
+ ret = utimes(testfile, NULL);
+ check_err(ret, "utimes", 2);
+
+ fprintf(stdout, "Testing utime");
+ ret = utime(testfile, NULL);
+ check_err(ret, "utime", 2);
+
+ fprintf(stdout, "Testing unlink");
+ ret = unlink(testfile);
+ check_err(ret, "unlink", 2);
+
+ fprintf(stdout, "Testing symlink");
+ ret = symlink(testfile, testfile);
+ check_err(ret, "symlink", 2);
+
+ fprintf(stdout, "Testing readlink");
+ ret = readlink(testfile, testfile, 0);
+ check_err(ret, "readlink", 2);
+
+ fprintf(stdout, "Testing realpath");
+ ret = 0;
+ res = realpath((const char *)testfile, testfile);
+ if (!res)
+ ret = -1;
+ check_err(ret, "realpath", 2);
+
+ fprintf(stdout, "Testing stat");
+ ret = stat(testfile, &buf);
+ check_err(ret, "stat", 1);
+
+ fprintf(stdout, "Testing lstat");
+ ret = lstat(testfile, &buf);
+ check_err(ret, "lstat", 1);
+
+ fprintf(stdout, "Testing statfs");
+ ret = statfs(testfile, &sbuf);
+ check_err(ret, "statfs", 2);
+
+ fprintf(stdout, "Testing statvfs");
+ ret = statvfs(testfile, &svbuf);
+ check_err(ret, "statvfs", 1);
+
+ fprintf(stdout, "Testing getxattr");
+ ret = getxattr(testfile, NULL, NULL, 0);
+ check_err(ret, "getxattr", 2);
+
+ fprintf(stdout, "Testing lgetxattr");
+ ret = lgetxattr(testfile, NULL, NULL, 0);
+ check_err(ret, "lgetxattr", 1);
+
+ fprintf(stdout, "Testing lchown");
+ ret = lchown(testfile, 0, 0);
+ check_err(ret, "lchown", 2);
+ return 0;
}
-
int
-run_dev_tests (char *testfile)
+run_dev_tests(char *testfile)
{
- int ret = -1;
+ int ret = -1;
- assert (testfile);
+ assert(testfile);
- fprintf (stdout, "Testing mknod");
- ret = mknod (testfile, 0, 0);
- check_err (ret, "mknod", 2);
+ fprintf(stdout, "Testing mknod");
+ ret = mknod(testfile, 0, 0);
+ check_err(ret, "mknod", 2);
- fprintf (stdout, "Testing mkfifo");
- ret = mkfifo (testfile, 0);
- check_err (ret, "mkfifo", 2);
- return 0;
+ fprintf(stdout, "Testing mkfifo");
+ ret = mkfifo(testfile, 0);
+ check_err(ret, "mkfifo", 2);
+ return 0;
}
int
-run_dir_tests (char *testpath)
+run_dir_tests(char *testpath)
{
- int ret = -1;
- DIR *dh = NULL;
- struct dirent *dire = NULL;
-
- assert (testpath);
-
- fprintf (stdout, "Testing mkdir");
- ret = mkdir (testpath, 0);
- check_err (ret, "mkdir", 2);
-
- fprintf (stdout, "Testing rmdir");
- ret = rmdir (testpath);
- check_err (ret, "rmdir", 2);
-
- fprintf (stdout, "Testing opendir");
- ret = 0;
- dh = opendir (testpath);
- if (!dh)
- ret = -1;
- check_err (ret, "opendir", 2);
-
- fprintf (stdout, "Testing readdir");
- ret = 0;
- dire = readdir (dh);
- if (!dire)
- ret = -1;
- check_err (ret, "readdir", 1);
-
- fprintf (stdout, "Testing readdir_r");
- ret = readdir_r (dh, dire, &dire);
- check_err (ret, "readdir_r", 1);
-
- fprintf (stdout, "Testing rewinddir");
- rewinddir (dh);
- check_err (-1, "rewinddir", 1);
-
- fprintf (stdout, "Testing seekdir");
- seekdir (dh, 0);
- check_err (-1, "seekdir", 2);
-
- fprintf (stdout, "Testing telldir");
- ret = telldir (dh);
- check_err (ret, "telldir", 2);
-
- fprintf (stdout, "Testing closedir");
- ret = closedir (dh);
- check_err (ret, "closedir", 2);
- return 0;
+ int ret = -1;
+ DIR *dh = NULL;
+ struct dirent *dire = NULL;
+
+ assert(testpath);
+
+ fprintf(stdout, "Testing mkdir");
+ ret = mkdir(testpath, 0);
+ check_err(ret, "mkdir", 2);
+
+ fprintf(stdout, "Testing rmdir");
+ ret = rmdir(testpath);
+ check_err(ret, "rmdir", 2);
+
+ fprintf(stdout, "Testing opendir");
+ ret = 0;
+ dh = opendir(testpath);
+ if (!dh)
+ ret = -1;
+ check_err(ret, "opendir", 2);
+
+ fprintf(stdout, "Testing readdir");
+ ret = 0;
+ dire = readdir(dh);
+ if (!dire)
+ ret = -1;
+ check_err(ret, "readdir", 1);
+
+ fprintf(stdout, "Testing readdir_r");
+ ret = readdir_r(dh, dire, &dire);
+ check_err(ret, "readdir_r", 1);
+
+ fprintf(stdout, "Testing rewinddir");
+ rewinddir(dh);
+ check_err(-1, "rewinddir", 1);
+
+ fprintf(stdout, "Testing seekdir");
+ seekdir(dh, 0);
+ check_err(-1, "seekdir", 2);
+
+ fprintf(stdout, "Testing telldir");
+ ret = telldir(dh);
+ check_err(ret, "telldir", 2);
+
+ fprintf(stdout, "Testing closedir");
+ ret = closedir(dh);
+ check_err(ret, "closedir", 2);
+ return 0;
}
-
-
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- char *testpath = NULL;
- int x = 0;
-
- for (;x < argc; ++x) {
- if (strcmp (argv[x], "--path") == 0) {
- testpath = argv[x+1];
- continue;
- }
+ char *testpath = NULL;
+ int x = 0;
+ for (; x < argc; ++x) {
+ if (strcmp(argv[x], "--path") == 0) {
+ testpath = argv[x + 1];
+ continue;
}
+ }
- if (!testpath) {
- fprintf (stderr, "--path not specified\n");
- usage (stderr);
- return -1;
- }
+ if (!testpath) {
+ fprintf(stderr, "--path not specified\n");
+ usage(stderr);
+ return -1;
+ }
- run_file_tests (testpath);
- run_dir_tests (testpath);
- run_attr_tests (testpath);
- run_dev_tests (testpath);
+ run_file_tests(testpath);
+ run_dir_tests(testpath);
+ run_attr_tests(testpath);
+ run_dev_tests(testpath);
- return 0;
+ return 0;
}
-
-
diff --git a/extras/test/open-fd-tests.c b/extras/test/open-fd-tests.c
index 4184079d043..509952b4180 100644
--- a/extras/test/open-fd-tests.c
+++ b/extras/test/open-fd-tests.c
@@ -4,61 +4,64 @@
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <attr/xattr.h>
+#include <sys/xattr.h>
#include <errno.h>
#include <string.h>
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- int ret = -1;
- int fd = 0;
- char *filename = NULL;
- int loop = 0;
- struct stat stbuf = {0,};
- char string[1024] = {0,};
+ int ret = -1;
+ int fd = 0;
+ char *filename = NULL;
+ int loop = 0;
+ struct stat stbuf = {
+ 0,
+ };
+ char string[1024] = {
+ 0,
+ };
- if (argc > 1)
- filename = argv[1];
+ if (argc > 1)
+ filename = argv[1];
- if (!filename)
- filename = "temp-fd-test-file";
+ if (!filename)
+ filename = "temp-fd-test-file";
- fd = open (filename, O_RDWR|O_CREAT|O_TRUNC);
- if (fd < 0) {
- fd = 0;
- fprintf (stderr, "open failed : %s\n", strerror (errno));
- goto out;
- }
-
- while (loop < 1000) {
- /* Use it as a mechanism to test time delays */
- memset (string, 0, 1024);
- scanf ("%s", string);
+ fd = open(filename, O_RDWR | O_CREAT | O_TRUNC);
+ if (fd < 0) {
+ fd = 0;
+ fprintf(stderr, "open failed : %s\n", strerror(errno));
+ goto out;
+ }
- ret = write (fd, string, strlen (string));
- if (ret != strlen (string)) {
- fprintf (stderr, "write failed : %s (%s %d)\n",
- strerror (errno), string, loop);
- goto out;
- }
+ while (loop < 1000) {
+ /* Use it as a mechanism to test time delays */
+ memset(string, 0, 1024);
+ scanf("%s", string);
- ret = write (fd, "\n", 1);
- if (ret != 1) {
- fprintf (stderr, "write failed : %s (%d)\n",
- strerror (errno), loop);
- goto out;
- }
+ ret = write(fd, string, strlen(string));
+ if (ret != strlen(string)) {
+ fprintf(stderr, "write failed : %s (%s %d)\n", strerror(errno),
+ string, loop);
+ goto out;
+ }
- loop++;
+ ret = write(fd, "\n", 1);
+ if (ret != 1) {
+ fprintf(stderr, "write failed : %s (%d)\n", strerror(errno), loop);
+ goto out;
}
- fprintf (stdout, "finishing the test after %d loops\n", loop);
+ loop++;
+ }
+
+ fprintf(stdout, "finishing the test after %d loops\n", loop);
- ret = 0;
+ ret = 0;
out:
- if (fd)
- close (fd);
+ if (fd)
+ close(fd);
- return ret;
+ return ret;
}
diff --git a/extras/test/test-ffop.c b/extras/test/test-ffop.c
index 219dd6a2da2..1d9c125db67 100644
--- a/extras/test/test-ffop.c
+++ b/extras/test/test-ffop.c
@@ -3,777 +3,825 @@
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <attr/xattr.h>
+#include <sys/xattr.h>
#include <errno.h>
#include <string.h>
#include <dirent.h>
-int fd_based_fops_1 (char *filename); //for fd based fops after unlink
-int fd_based_fops_2 (char *filename); //for fd based fops before unlink
-int dup_fd_based_fops (char *filename); // fops based on fd after dup
-int path_based_fops (char *filename); //for fops based on path
-int dir_based_fops (char *filename); // for fops which operate on directory
-int link_based_fops (char *filename); //for fops which operate in link files (symlinks)
-int test_open_modes (char *filename); // to test open syscall with open modes available.
-int generic_open_read_write (char *filename, int flag); // generic function which does open write and read.
+int
+fd_based_fops_1(char *filename); // for fd based fops after unlink
+int
+fd_based_fops_2(char *filename); // for fd based fops before unlink
+int
+dup_fd_based_fops(char *filename); // fops based on fd after dup
+int
+path_based_fops(char *filename); // for fops based on path
+int
+dir_based_fops(char *filename); // for fops which operate on directory
+int
+link_based_fops(
+ char *filename); // for fops which operate in link files (symlinks)
+int
+test_open_modes(
+ char *filename); // to test open syscall with open modes available.
+int
+generic_open_read_write(
+ char *filename,
+ int flag); // generic function which does open write and read.
int
-main (int argc, char *argv[])
+main(int argc, char *argv[])
{
- int ret = -1;
- char filename[255] = {0,};
-
- if (argc > 1)
- strcpy(filename, argv[1]);
- else
- strcpy(filename, "temp-xattr-test-file");
-
- ret = fd_based_fops_1 (strcat(filename, "_1"));
- if (ret < 0)
- fprintf (stderr, "fd based file operation 1 failed\n");
- else
- fprintf (stdout, "fd based file operation 1 passed\n");
-
- ret = fd_based_fops_2 (strcat(filename, "_2"));
- if (ret < 0)
- fprintf (stderr, "fd based file operation 2 failed\n");
- else
- fprintf (stdout, "fd based file operation 2 passed\n");
-
- ret = dup_fd_based_fops (strcat (filename, "_3"));
- if (ret < 0)
- fprintf (stderr, "dup fd based file operation failed\n");
- else
- fprintf (stdout, "dup fd based file operation passed\n");
-
- ret = path_based_fops (strcat (filename, "_4"));
- if (ret < 0)
- fprintf (stderr, "path based file operation failed\n");
- else
- fprintf (stdout, "path based file operation passed\n");
-
- ret = dir_based_fops (strcat (filename, "_5"));
- if (ret < 0)
- fprintf (stderr, "directory based file operation failed\n");
- else
- fprintf (stdout, "directory based file operation passed\n");
-
- ret = link_based_fops (strcat (filename, "_5"));
- if (ret < 0)
- fprintf (stderr, "link based file operation failed\n");
- else
- fprintf (stdout, "link based file operation passed\n");
-
- ret = test_open_modes (strcat (filename, "_5"));
- if (ret < 0)
- fprintf (stderr, "testing modes of 'open' call failed\n");
- else
- fprintf (stdout, "testing modes of 'open' call passed\n");
+ int ret = -1;
+ char filename[255] = {
+ 0,
+ };
+
+ if (argc > 1)
+ strcpy(filename, argv[1]);
+ else
+ strcpy(filename, "temp-xattr-test-file");
+
+ ret = fd_based_fops_1(strcat(filename, "_1"));
+ if (ret < 0)
+ fprintf(stderr, "fd based file operation 1 failed\n");
+ else
+ fprintf(stdout, "fd based file operation 1 passed\n");
+
+ ret = fd_based_fops_2(strcat(filename, "_2"));
+ if (ret < 0)
+ fprintf(stderr, "fd based file operation 2 failed\n");
+ else
+ fprintf(stdout, "fd based file operation 2 passed\n");
+
+ ret = dup_fd_based_fops(strcat(filename, "_3"));
+ if (ret < 0)
+ fprintf(stderr, "dup fd based file operation failed\n");
+ else
+ fprintf(stdout, "dup fd based file operation passed\n");
+
+ ret = path_based_fops(strcat(filename, "_4"));
+ if (ret < 0)
+ fprintf(stderr, "path based file operation failed\n");
+ else
+ fprintf(stdout, "path based file operation passed\n");
+
+ ret = dir_based_fops(strcat(filename, "_5"));
+ if (ret < 0)
+ fprintf(stderr, "directory based file operation failed\n");
+ else
+ fprintf(stdout, "directory based file operation passed\n");
+
+ ret = link_based_fops(strcat(filename, "_5"));
+ if (ret < 0)
+ fprintf(stderr, "link based file operation failed\n");
+ else
+ fprintf(stdout, "link based file operation passed\n");
+
+ ret = test_open_modes(strcat(filename, "_5"));
+ if (ret < 0)
+ fprintf(stderr, "testing modes of 'open' call failed\n");
+ else
+ fprintf(stdout, "testing modes of 'open' call passed\n");
out:
- return ret;
+ return ret;
}
int
-fd_based_fops_1 (char *filename)
+fd_based_fops_1(char *filename)
{
- int fd = 0;
- int ret = -1;
- struct stat stbuf = {0,};
- char wstr[50] = {0,};
- char rstr[50] = {0,};
-
- fd = open (filename, O_RDWR|O_CREAT);
- if (fd < 0) {
- fd = 0;
- fprintf (stderr, "open failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = unlink (filename);
- if (ret < 0) {
- fprintf (stderr, "unlink failed : %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (wstr, "This is my string\n");
- ret = write (fd, wstr, strlen(wstr));
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "write failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lseek (fd, 0, SEEK_SET);
- if (ret < 0) {
- fprintf (stderr, "lseek failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = read (fd, rstr, strlen(wstr));
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "read failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = memcmp (rstr, wstr, strlen (wstr));
- if (ret != 0) {
- ret = -1;
- fprintf (stderr, "read returning junk\n");
- goto out;
- }
-
- ret = ftruncate (fd, 0);
- if (ret < 0) {
- fprintf (stderr, "ftruncate failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fstat (fd, &stbuf);
- if (ret < 0) {
- fprintf (stderr, "fstat failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fchmod (fd, 0640);
- if (ret < 0) {
- fprintf (stderr, "fchmod failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fchown (fd, 10001, 10001);
- if (ret < 0) {
- fprintf (stderr, "fchown failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fsync (fd);
- if (ret < 0) {
- fprintf (stderr, "fsync failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fsetxattr (fd, "trusted.xattr-test", "working", 8, 0);
- if (ret < 0) {
- fprintf (stderr, "fsetxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fdatasync (fd);
- if (ret < 0) {
- fprintf (stderr, "fdatasync failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = flistxattr (fd, NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "flistxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fgetxattr (fd, "trusted.xattr-test", NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "fgetxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fremovexattr (fd, "trusted.xattr-test");
- if (ret < 0) {
- fprintf (stderr, "fremovexattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = 0;
+ int fd = 0;
+ int ret = -1;
+ struct stat stbuf = {
+ 0,
+ };
+ char wstr[50] = {
+ 0,
+ };
+ char rstr[50] = {
+ 0,
+ };
+
+ fd = open(filename, O_RDWR | O_CREAT);
+ if (fd < 0) {
+ fd = 0;
+ fprintf(stderr, "open failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = unlink(filename);
+ if (ret < 0) {
+ fprintf(stderr, "unlink failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(wstr, "This is my string\n");
+ ret = write(fd, wstr, strlen(wstr));
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "write failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lseek(fd, 0, SEEK_SET);
+ if (ret < 0) {
+ fprintf(stderr, "lseek failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = read(fd, rstr, strlen(wstr));
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "read failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = memcmp(rstr, wstr, strlen(wstr));
+ if (ret != 0) {
+ ret = -1;
+ fprintf(stderr, "read returning junk\n");
+ goto out;
+ }
+
+ ret = ftruncate(fd, 0);
+ if (ret < 0) {
+ fprintf(stderr, "ftruncate failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fstat(fd, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "fstat failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fchmod(fd, 0640);
+ if (ret < 0) {
+ fprintf(stderr, "fchmod failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fchown(fd, 10001, 10001);
+ if (ret < 0) {
+ fprintf(stderr, "fchown failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fsync(fd);
+ if (ret < 0) {
+ fprintf(stderr, "fsync failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fsetxattr(fd, "trusted.xattr-test", "working", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fdatasync(fd);
+ if (ret < 0) {
+ fprintf(stderr, "fdatasync failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = flistxattr(fd, NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "flistxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fgetxattr(fd, "trusted.xattr-test", NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "fgetxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fremovexattr(fd, "trusted.xattr-test");
+ if (ret < 0) {
+ fprintf(stderr, "fremovexattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = 0;
out:
- if (fd)
- close (fd);
+ if (fd)
+ close(fd);
- return ret;
+ return ret;
}
-
int
-fd_based_fops_2 (char *filename)
+fd_based_fops_2(char *filename)
{
- int fd = 0;
- int ret = -1;
- struct stat stbuf = {0,};
- char wstr[50] = {0,};
- char rstr[50] = {0,};
-
- fd = open (filename, O_RDWR|O_CREAT);
- if (fd < 0) {
- fd = 0;
- fprintf (stderr, "open failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = ftruncate (fd, 0);
-
- if (ret < 0) {
- fprintf (stderr, "ftruncate failed : %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (wstr, "This is my second string\n");
- ret = write (fd, wstr, strlen (wstr));
- if (ret < 0) {
- ret = -1;
- fprintf (stderr, "write failed: %s\n", strerror (errno));
- goto out;
- }
-
- lseek (fd, 0, SEEK_SET);
- if (ret < 0) {
- fprintf (stderr, "lseek failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = read (fd, rstr, strlen (wstr));
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "read failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = memcmp (rstr, wstr, strlen (wstr));
- if (ret != 0) {
- ret = -1;
- fprintf (stderr, "read returning junk\n");
- goto out;
- }
-
- ret = fstat (fd, &stbuf);
- if (ret < 0) {
- fprintf (stderr, "fstat failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fchmod (fd, 0640);
- if (ret < 0) {
- fprintf (stderr, "fchmod failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fchown (fd, 10001, 10001);
- if (ret < 0) {
- fprintf (stderr, "fchown failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fsync (fd);
- if (ret < 0) {
- fprintf (stderr, "fsync failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fsetxattr (fd, "trusted.xattr-test", "working", 8, 0);
- if (ret < 0) {
- fprintf (stderr, "fsetxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fdatasync (fd);
- if (ret < 0) {
- fprintf (stderr, "fdatasync failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = flistxattr (fd, NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "flistxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fgetxattr (fd, "trusted.xattr-test", NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "fgetxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fremovexattr (fd, "trusted.xattr-test");
- if (ret < 0) {
- fprintf (stderr, "fremovexattr failed : %s\n", strerror (errno));
- goto out;
- }
+ int fd = 0;
+ int ret = -1;
+ struct stat stbuf = {
+ 0,
+ };
+ char wstr[50] = {
+ 0,
+ };
+ char rstr[50] = {
+ 0,
+ };
+
+ fd = open(filename, O_RDWR | O_CREAT);
+ if (fd < 0) {
+ fd = 0;
+ fprintf(stderr, "open failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = ftruncate(fd, 0);
+
+ if (ret < 0) {
+ fprintf(stderr, "ftruncate failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(wstr, "This is my second string\n");
+ ret = write(fd, wstr, strlen(wstr));
+ if (ret < 0) {
+ ret = -1;
+ fprintf(stderr, "write failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ lseek(fd, 0, SEEK_SET);
+ if (ret < 0) {
+ fprintf(stderr, "lseek failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = read(fd, rstr, strlen(wstr));
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "read failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = memcmp(rstr, wstr, strlen(wstr));
+ if (ret != 0) {
+ ret = -1;
+ fprintf(stderr, "read returning junk\n");
+ goto out;
+ }
+
+ ret = fstat(fd, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "fstat failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fchmod(fd, 0640);
+ if (ret < 0) {
+ fprintf(stderr, "fchmod failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fchown(fd, 10001, 10001);
+ if (ret < 0) {
+ fprintf(stderr, "fchown failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fsync(fd);
+ if (ret < 0) {
+ fprintf(stderr, "fsync failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fsetxattr(fd, "trusted.xattr-test", "working", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fdatasync(fd);
+ if (ret < 0) {
+ fprintf(stderr, "fdatasync failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = flistxattr(fd, NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "flistxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fgetxattr(fd, "trusted.xattr-test", NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "fgetxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fremovexattr(fd, "trusted.xattr-test");
+ if (ret < 0) {
+ fprintf(stderr, "fremovexattr failed : %s\n", strerror(errno));
+ goto out;
+ }
out:
- if (fd)
- close (fd);
- unlink (filename);
+ if (fd)
+ close(fd);
+ unlink(filename);
- return ret;
+ return ret;
}
int
-path_based_fops (char *filename)
+path_based_fops(char *filename)
{
- int ret = -1;
- int fd = 0;
- struct stat stbuf = {0,};
- char newfilename[255] = {0,};
-
- fd = creat (filename, 0644);
- if (fd < 0) {
- fprintf (stderr, "creat failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = truncate (filename, 0);
- if (ret < 0) {
- fprintf (stderr, "truncate failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = stat (filename, &stbuf);
- if (ret < 0) {
- fprintf (stderr, "stat failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = chmod (filename, 0640);
- if (ret < 0) {
- fprintf (stderr, "chmod failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = chown (filename, 10001, 10001);
- if (ret < 0) {
- fprintf (stderr, "chown failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = setxattr (filename, "trusted.xattr-test", "working", 8, 0);
- if (ret < 0) {
- fprintf (stderr, "setxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = listxattr (filename, NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "listxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = getxattr (filename, "trusted.xattr-test", NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "getxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = removexattr (filename, "trusted.xattr-test");
- if (ret < 0) {
- fprintf (stderr, "removexattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = access (filename, R_OK|W_OK);
- if (ret < 0) {
- fprintf (stderr, "access failed: %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (newfilename, filename);
- strcat(newfilename, "_new");
- ret = rename (filename, newfilename);
- if (ret < 0) {
- fprintf (stderr, "rename failed: %s\n", strerror (errno));
- goto out;
- }
- unlink (newfilename);
+ int ret = -1;
+ int fd = 0;
+ struct stat stbuf = {
+ 0,
+ };
+ char newfilename[255] = {
+ 0,
+ };
+
+ fd = creat(filename, 0644);
+ if (fd < 0) {
+ fprintf(stderr, "creat failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = truncate(filename, 0);
+ if (ret < 0) {
+ fprintf(stderr, "truncate failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = stat(filename, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "stat failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = chmod(filename, 0640);
+ if (ret < 0) {
+ fprintf(stderr, "chmod failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = chown(filename, 10001, 10001);
+ if (ret < 0) {
+ fprintf(stderr, "chown failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = setxattr(filename, "trusted.xattr-test", "working", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "setxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = listxattr(filename, NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "listxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = getxattr(filename, "trusted.xattr-test", NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "getxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = removexattr(filename, "trusted.xattr-test");
+ if (ret < 0) {
+ fprintf(stderr, "removexattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = access(filename, R_OK | W_OK);
+ if (ret < 0) {
+ fprintf(stderr, "access failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(newfilename, filename);
+ strcat(newfilename, "_new");
+ ret = rename(filename, newfilename);
+ if (ret < 0) {
+ fprintf(stderr, "rename failed: %s\n", strerror(errno));
+ goto out;
+ }
+ unlink(newfilename);
out:
- if (fd)
- close (fd);
+ if (fd)
+ close(fd);
- unlink (filename);
- return ret;
+ unlink(filename);
+ return ret;
}
int
-dup_fd_based_fops (char *filename)
+dup_fd_based_fops(char *filename)
{
- int fd = 0;
- int newfd = 0;
- int ret = -1;
- struct stat stbuf = {0,};
- char wstr[50] = {0,};
- char rstr[50] = {0,};
-
- fd = open (filename, O_RDWR|O_CREAT);
- if (fd < 0) {
- fd = 0;
- fprintf (stderr, "open failed : %s\n", strerror (errno));
- goto out;
- }
-
- newfd = dup (fd);
- if (newfd < 0) {
- ret = -1;
- fprintf (stderr, "dup failed: %s\n", strerror (errno));
- goto out;
- }
-
- close (fd);
-
- strcpy (wstr, "This is my string\n");
- ret = write (newfd, wstr, strlen(wstr));
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "write failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lseek (newfd, 0, SEEK_SET);
- if (ret < 0) {
- fprintf (stderr, "lseek failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = read (newfd, rstr, strlen(wstr));
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "read failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = memcmp (rstr, wstr, strlen (wstr));
- if (ret != 0) {
- ret = -1;
- fprintf (stderr, "read returning junk\n");
- goto out;
- }
-
- ret = ftruncate (newfd, 0);
- if (ret < 0) {
- fprintf (stderr, "ftruncate failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fstat (newfd, &stbuf);
- if (ret < 0) {
- fprintf (stderr, "fstat failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fchmod (newfd, 0640);
- if (ret < 0) {
- fprintf (stderr, "fchmod failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fchown (newfd, 10001, 10001);
- if (ret < 0) {
- fprintf (stderr, "fchown failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fsync (newfd);
- if (ret < 0) {
- fprintf (stderr, "fsync failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fsetxattr (newfd, "trusted.xattr-test", "working", 8, 0);
- if (ret < 0) {
- fprintf (stderr, "fsetxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fdatasync (newfd);
- if (ret < 0) {
- fprintf (stderr, "fdatasync failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = flistxattr (newfd, NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "flistxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fgetxattr (newfd, "trusted.xattr-test", NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "fgetxattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = fremovexattr (newfd, "trusted.xattr-test");
- if (ret < 0) {
- fprintf (stderr, "fremovexattr failed : %s\n", strerror (errno));
- goto out;
- }
-
- ret = 0;
+ int fd = 0;
+ int newfd = 0;
+ int ret = -1;
+ struct stat stbuf = {
+ 0,
+ };
+ char wstr[50] = {
+ 0,
+ };
+ char rstr[50] = {
+ 0,
+ };
+
+ fd = open(filename, O_RDWR | O_CREAT);
+ if (fd < 0) {
+ fd = 0;
+ fprintf(stderr, "open failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ newfd = dup(fd);
+ if (newfd < 0) {
+ ret = -1;
+ fprintf(stderr, "dup failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ close(fd);
+
+ strcpy(wstr, "This is my string\n");
+ ret = write(newfd, wstr, strlen(wstr));
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "write failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lseek(newfd, 0, SEEK_SET);
+ if (ret < 0) {
+ fprintf(stderr, "lseek failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = read(newfd, rstr, strlen(wstr));
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "read failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = memcmp(rstr, wstr, strlen(wstr));
+ if (ret != 0) {
+ ret = -1;
+ fprintf(stderr, "read returning junk\n");
+ goto out;
+ }
+
+ ret = ftruncate(newfd, 0);
+ if (ret < 0) {
+ fprintf(stderr, "ftruncate failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fstat(newfd, &stbuf);
+ if (ret < 0) {
+ fprintf(stderr, "fstat failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fchmod(newfd, 0640);
+ if (ret < 0) {
+ fprintf(stderr, "fchmod failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fchown(newfd, 10001, 10001);
+ if (ret < 0) {
+ fprintf(stderr, "fchown failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fsync(newfd);
+ if (ret < 0) {
+ fprintf(stderr, "fsync failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fsetxattr(newfd, "trusted.xattr-test", "working", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fdatasync(newfd);
+ if (ret < 0) {
+ fprintf(stderr, "fdatasync failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = flistxattr(newfd, NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "flistxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fgetxattr(newfd, "trusted.xattr-test", NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "fgetxattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = fremovexattr(newfd, "trusted.xattr-test");
+ if (ret < 0) {
+ fprintf(stderr, "fremovexattr failed : %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = 0;
out:
- if (newfd)
- close (newfd);
- ret = unlink (filename);
- if (ret < 0)
- fprintf (stderr, "unlink failed : %s\n", strerror (errno));
+ if (newfd)
+ close(newfd);
+ ret = unlink(filename);
+ if (ret < 0)
+ fprintf(stderr, "unlink failed : %s\n", strerror(errno));
- return ret;
+ return ret;
}
int
-dir_based_fops (char *dirname)
+dir_based_fops(char *dirname)
{
- int ret = -1;
- DIR *dp = NULL;
- char buff[255] = {0,};
- struct dirent *dbuff = {0,};
- struct stat stbuff = {0,};
- char newdname[255] = {0,};
- char *cwd = NULL;
-
- ret = mkdir (dirname, 0755);
- if (ret < 0) {
- fprintf (stderr, "mkdir failed: %s\n", strerror (errno));
- goto out;
- }
-
- dp = opendir (dirname);
- if (dp == NULL) {
- fprintf (stderr, "opendir failed: %s\n", strerror (errno));
- goto out;
- }
-
- dbuff = readdir (dp);
- if (NULL == dbuff) {
- fprintf (stderr, "readdir failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = closedir (dp);
- if (ret < 0) {
- fprintf (stderr, "closedir failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = stat (dirname, &stbuff);
- if (ret < 0) {
- fprintf (stderr, "stat failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = chmod (dirname, 0744);
- if (ret < 0) {
- fprintf (stderr, "chmod failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = chown (dirname, 10001, 10001);
- if (ret < 0) {
- fprintf (stderr, "chmod failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = setxattr (dirname, "trusted.xattr-test", "working", 8, 0);
- if (ret < 0) {
- fprintf (stderr, "setxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = listxattr (dirname, NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "listxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = getxattr (dirname, "trusted.xattr-test", NULL, 0);
- if (ret <= 0) {
- ret = -1;
- fprintf (stderr, "getxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = removexattr (dirname, "trusted.xattr-test");
- if (ret < 0) {
- fprintf (stderr, "removexattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (newdname, dirname);
- strcat (newdname, "/../");
- ret = chdir (newdname);
- if (ret < 0) {
- fprintf (stderr, "chdir failed: %s\n", strerror (errno));
- goto out;
- }
-
- cwd = getcwd (buff, 255);
- if (NULL == cwd) {
- fprintf (stderr, "getcwd failed: %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (newdname, dirname);
- strcat (newdname, "new");
- ret = rename (dirname, newdname);
- if (ret < 0) {
- fprintf (stderr, "rename failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = rmdir (newdname);
- if (ret < 0) {
- fprintf (stderr, "rmdir failed: %s\n", strerror (errno));
- return ret;
- }
+ int ret = -1;
+ DIR *dp = NULL;
+ char buff[255] = {
+ 0,
+ };
+ struct dirent *dbuff = {
+ 0,
+ };
+ struct stat stbuff = {
+ 0,
+ };
+ char newdname[255] = {
+ 0,
+ };
+ char *cwd = NULL;
+
+ ret = mkdir(dirname, 0755);
+ if (ret < 0) {
+ fprintf(stderr, "mkdir failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ dp = opendir(dirname);
+ if (dp == NULL) {
+ fprintf(stderr, "opendir failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ dbuff = readdir(dp);
+ if (NULL == dbuff) {
+ fprintf(stderr, "readdir failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = closedir(dp);
+ if (ret < 0) {
+ fprintf(stderr, "closedir failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = stat(dirname, &stbuff);
+ if (ret < 0) {
+ fprintf(stderr, "stat failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = chmod(dirname, 0744);
+ if (ret < 0) {
+ fprintf(stderr, "chmod failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = chown(dirname, 10001, 10001);
+ if (ret < 0) {
+ fprintf(stderr, "chmod failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = setxattr(dirname, "trusted.xattr-test", "working", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "setxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = listxattr(dirname, NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "listxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = getxattr(dirname, "trusted.xattr-test", NULL, 0);
+ if (ret <= 0) {
+ ret = -1;
+ fprintf(stderr, "getxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = removexattr(dirname, "trusted.xattr-test");
+ if (ret < 0) {
+ fprintf(stderr, "removexattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(newdname, dirname);
+ strcat(newdname, "/../");
+ ret = chdir(newdname);
+ if (ret < 0) {
+ fprintf(stderr, "chdir failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ cwd = getcwd(buff, 255);
+ if (NULL == cwd) {
+ fprintf(stderr, "getcwd failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(newdname, dirname);
+ strcat(newdname, "new");
+ ret = rename(dirname, newdname);
+ if (ret < 0) {
+ fprintf(stderr, "rename failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = rmdir(newdname);
+ if (ret < 0) {
+ fprintf(stderr, "rmdir failed: %s\n", strerror(errno));
+ return ret;
+ }
out:
- rmdir (dirname);
- return ret;
+ rmdir(dirname);
+ return ret;
}
int
-link_based_fops (char *filename)
+link_based_fops(char *filename)
{
- int ret = -1;
- int fd = 0;
- char newname[255] = {0,};
- char linkname[255] = {0,};
- struct stat lstbuf = {0,};
-
- fd = creat (filename, 0644);
- if (fd < 0) {
- fd = 0;
- fprintf (stderr, "creat failed: %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (newname, filename);
- strcat (newname, "_hlink");
- ret = link (filename, newname);
- if (ret < 0) {
- fprintf (stderr, "link failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = unlink (filename);
- if (ret < 0) {
- fprintf (stderr, "unlink failed: %s\n", strerror (errno));
- goto out;
- }
-
- strcpy (linkname, filename);
- strcat (linkname, "_slink");
- ret = symlink (newname, linkname);
- if (ret < 0) {
- fprintf (stderr, "symlink failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lstat (linkname, &lstbuf);
- if (ret < 0) {
- fprintf (stderr, "lstbuf failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lchown (linkname, 10001, 10001);
- if (ret < 0) {
- fprintf (stderr, "lchown failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lsetxattr (linkname, "trusted.lxattr-test", "working", 8, 0);
- if (ret < 0) {
- fprintf (stderr, "lsetxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = llistxattr (linkname, NULL, 0);
- if (ret < 0) {
- ret = -1;
- fprintf (stderr, "llistxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lgetxattr (linkname, "trusted.lxattr-test", NULL, 0);
- if (ret < 0) {
- ret = -1;
- fprintf (stderr, "lgetxattr failed: %s\n", strerror (errno));
- goto out;
- }
-
- ret = lremovexattr (linkname, "trusted.lxattr-test");
- if (ret < 0) {
- fprintf (stderr, "lremovexattr failed: %s\n", strerror (errno));
- goto out;
- }
-
+ int ret = -1;
+ int fd = 0;
+ char newname[255] = {
+ 0,
+ };
+ char linkname[255] = {
+ 0,
+ };
+ struct stat lstbuf = {
+ 0,
+ };
+
+ fd = creat(filename, 0644);
+ if (fd < 0) {
+ fd = 0;
+ fprintf(stderr, "creat failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(newname, filename);
+ strcat(newname, "_hlink");
+ ret = link(filename, newname);
+ if (ret < 0) {
+ fprintf(stderr, "link failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = unlink(filename);
+ if (ret < 0) {
+ fprintf(stderr, "unlink failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ strcpy(linkname, filename);
+ strcat(linkname, "_slink");
+ ret = symlink(newname, linkname);
+ if (ret < 0) {
+ fprintf(stderr, "symlink failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lstat(linkname, &lstbuf);
+ if (ret < 0) {
+ fprintf(stderr, "lstbuf failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lchown(linkname, 10001, 10001);
+ if (ret < 0) {
+ fprintf(stderr, "lchown failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lsetxattr(linkname, "trusted.lxattr-test", "working", 8, 0);
+ if (ret < 0) {
+ fprintf(stderr, "lsetxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = llistxattr(linkname, NULL, 0);
+ if (ret < 0) {
+ ret = -1;
+ fprintf(stderr, "llistxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lgetxattr(linkname, "trusted.lxattr-test", NULL, 0);
+ if (ret < 0) {
+ ret = -1;
+ fprintf(stderr, "lgetxattr failed: %s\n", strerror(errno));
+ goto out;
+ }
+
+ ret = lremovexattr(linkname, "trusted.lxattr-test");
+ if (ret < 0) {
+ fprintf(stderr, "lremovexattr failed: %s\n", strerror(errno));
+ goto out;
+ }
out:
- if (fd)
- close(fd);
- unlink (linkname);
- unlink (newname);
+ if (fd)
+ close(fd);
+ unlink(linkname);
+ unlink(newname);
}
int
-test_open_modes (char *filename)
+test_open_modes(char *filename)
{
- int ret = -1;
-
- ret = generic_open_read_write (filename, O_CREAT|O_WRONLY);
- if (3 != ret) {
- fprintf (stderr, "flag O_CREAT|O_WRONLY failed: \n");
- goto out;
- }
-
- ret = generic_open_read_write (filename, O_CREAT|O_RDWR);
- if (ret != 0) {
- fprintf (stderr, "flag O_CREAT|O_RDWR failed\n");
- goto out;
- }
-
- ret = generic_open_read_write (filename, O_CREAT|O_RDONLY);
- if (ret != 0) {
- fprintf (stderr, "flag O_CREAT|O_RDONLY failed\n");
- goto out;
- }
-
- ret = creat (filename, 0644);
- close (ret);
- ret = generic_open_read_write (filename, O_WRONLY);
- if (3 != ret) {
- fprintf (stderr, "flag O_WRONLY failed\n");
- goto out;
- }
-
- ret = creat (filename, 0644);
- close (ret);
- ret = generic_open_read_write (filename, O_RDWR);
- if (0 != ret) {
- fprintf (stderr, "flag O_RDWR failed\n");
- goto out;
- }
-
- ret = creat (filename, 0644);
- close (ret);
- ret = generic_open_read_write (filename, O_RDONLY);
- if (0 != ret) {
- fprintf (stderr, "flag O_RDONLY failed\n");
- goto out;
- }
-
- ret = creat (filename, 0644);
- close (ret);
- ret = generic_open_read_write (filename, O_TRUNC|O_WRONLY);
- if (3 != ret) {
- fprintf (stderr, "flag O_TRUNC|O_WRONLY failed\n");
- goto out;
- }
+ int ret = -1;
+
+ ret = generic_open_read_write(filename, O_CREAT | O_WRONLY);
+ if (3 != ret) {
+ fprintf(stderr, "flag O_CREAT|O_WRONLY failed: \n");
+ goto out;
+ }
+
+ ret = generic_open_read_write(filename, O_CREAT | O_RDWR);
+ if (ret != 0) {
+ fprintf(stderr, "flag O_CREAT|O_RDWR failed\n");
+ goto out;
+ }
+
+ ret = generic_open_read_write(filename, O_CREAT | O_RDONLY);
+ if (ret != 0) {
+ fprintf(stderr, "flag O_CREAT|O_RDONLY failed\n");
+ goto out;
+ }
+
+ ret = creat(filename, 0644);
+ close(ret);
+ ret = generic_open_read_write(filename, O_WRONLY);
+ if (3 != ret) {
+ fprintf(stderr, "flag O_WRONLY failed\n");
+ goto out;
+ }
+
+ ret = creat(filename, 0644);
+ close(ret);
+ ret = generic_open_read_write(filename, O_RDWR);
+ if (0 != ret) {
+ fprintf(stderr, "flag O_RDWR failed\n");
+ goto out;
+ }
+
+ ret = creat(filename, 0644);
+ close(ret);
+ ret = generic_open_read_write(filename, O_RDONLY);
+ if (0 != ret) {
+ fprintf(stderr, "flag O_RDONLY failed\n");
+ goto out;
+ }
+
+ ret = creat(filename, 0644);
+ close(ret);
+ ret = generic_open_read_write(filename, O_TRUNC | O_WRONLY);
+ if (3 != ret) {
+ fprintf(stderr, "flag O_TRUNC|O_WRONLY failed\n");
+ goto out;
+ }
#if 0 /* undefined behaviour, unable to reliably test */
ret = creat (filename, 0644);
@@ -785,84 +833,88 @@ test_open_modes (char *filename)
}
#endif
- ret = generic_open_read_write (filename, O_CREAT|O_RDWR|O_SYNC);
- if (0 != ret) {
- fprintf (stderr, "flag O_CREAT|O_RDWR|O_SYNC failed\n");
- goto out;
- }
+ ret = generic_open_read_write(filename, O_CREAT | O_RDWR | O_SYNC);
+ if (0 != ret) {
+ fprintf(stderr, "flag O_CREAT|O_RDWR|O_SYNC failed\n");
+ goto out;
+ }
- ret = creat (filename, 0644);
- close (ret);
- ret = generic_open_read_write (filename, O_CREAT|O_EXCL);
- if (0 != ret) {
- fprintf (stderr, "flag O_CREAT|O_EXCL failed\n");
- goto out;
- }
+ ret = creat(filename, 0644);
+ close(ret);
+ ret = generic_open_read_write(filename, O_CREAT | O_EXCL);
+ if (0 != ret) {
+ fprintf(stderr, "flag O_CREAT|O_EXCL failed\n");
+ goto out;
+ }
out:
- return ret;
+ return ret;
}
-int generic_open_read_write (char *filename, int flag)
+int
+generic_open_read_write(char *filename, int flag)
{
- int fd = 0;
- int ret = -1;
- char wstring[50] = {0,};
- char rstring[50] = {0,};
-
- fd = open (filename, flag);
- if (fd < 0) {
- if (flag == O_CREAT|O_EXCL && errno == EEXIST) {
- unlink (filename);
- return 0;
- }
- else {
- fd = 0;
- fprintf (stderr, "open failed: %s\n", strerror (errno));
- return 1;
- }
- }
-
- strcpy (wstring, "My string to write\n");
- ret = write (fd, wstring, strlen(wstring));
- if (ret <= 0) {
- if (errno != EBADF) {
- fprintf (stderr, "write failed: %s\n", strerror (errno));
- close (fd);
- unlink(filename);
- return 2;
- }
- }
-
- ret = lseek (fd, 0, SEEK_SET);
- if (ret < 0) {
- close (fd);
- unlink(filename);
- return 4;
- }
-
- ret = read (fd, rstring, strlen(wstring));
- if (ret < 0) {
- close (fd);
- unlink (filename);
- return 3;
- }
-
- /* Compare the rstring with wstring. But we do not want to return
- * error when the flag is either O_RDONLY, O_CREAT|O_RDONLY or
- * O_TRUNC|O_RDONLY. Because in that case we are not writing
- * anything to the file.*/
-
- ret = memcmp (wstring, rstring, strlen (wstring));
- if (0 != ret && !(flag == O_CREAT|O_RDONLY || flag == O_RDONLY ||\
- flag == O_TRUNC|O_RDONLY)) {
- fprintf (stderr, "read is returning junk\n");
- close (fd);
- unlink (filename);
- return 4;
- }
-
- close (fd);
- unlink (filename);
- return 0;
+ int fd = 0;
+ int ret = -1;
+ char wstring[50] = {
+ 0,
+ };
+ char rstring[50] = {
+ 0,
+ };
+
+ fd = open(filename, flag);
+ if (fd < 0) {
+ if (flag == O_CREAT | O_EXCL && errno == EEXIST) {
+ unlink(filename);
+ return 0;
+ } else {
+ fd = 0;
+ fprintf(stderr, "open failed: %s\n", strerror(errno));
+ return 1;
+ }
+ }
+
+ strcpy(wstring, "My string to write\n");
+ ret = write(fd, wstring, strlen(wstring));
+ if (ret <= 0) {
+ if (errno != EBADF) {
+ fprintf(stderr, "write failed: %s\n", strerror(errno));
+ close(fd);
+ unlink(filename);
+ return 2;
+ }
+ }
+
+ ret = lseek(fd, 0, SEEK_SET);
+ if (ret < 0) {
+ close(fd);
+ unlink(filename);
+ return 4;
+ }
+
+ ret = read(fd, rstring, strlen(wstring));
+ if (ret < 0) {
+ close(fd);
+ unlink(filename);
+ return 3;
+ }
+
+ /* Compare the rstring with wstring. But we do not want to return
+ * error when the flag is either O_RDONLY, O_CREAT|O_RDONLY or
+ * O_TRUNC|O_RDONLY. Because in that case we are not writing
+ * anything to the file.*/
+
+ ret = memcmp(wstring, rstring, strlen(wstring));
+ if (0 != ret && !(flag == O_CREAT | O_RDONLY || flag == O_RDONLY ||
+ flag == O_TRUNC | O_RDONLY)) {
+ fprintf(stderr, "read is returning junk\n");
+ close(fd);
+ unlink(filename);
+ return 4;
+ }
+
+ close(fd);
+ unlink(filename);
+ return 0;
}
diff --git a/extras/thin-arbiter/setup-thin-arbiter.sh b/extras/thin-arbiter/setup-thin-arbiter.sh
new file mode 100755
index 00000000000..0681b30ef3f
--- /dev/null
+++ b/extras/thin-arbiter/setup-thin-arbiter.sh
@@ -0,0 +1,184 @@
+#!/bin/bash
+# Copyright (c) 2018-2019 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+
+# This tool has been developed to setup thin-arbiter process on a node.
+# Seting up a thin arbiter process involves following files -
+# 1 - thin-arbiter.vol
+# Thin-arbiter (TA) process will use the graph in this file to load the
+# required translators.
+# 2 - gluster-ta-volume.service (generated by gluster-ta-volume.service.in)
+# TA process would be running as systemd service.
+#
+# TA process uses a location to save TA id files for every subvolume.
+# This location can be taken as input from user. Once provided and the
+# TA process is started on a node, it can not be changed using this
+# script or by any other mean. The same location should be used in
+# the gluster CLI when creating thin-arbiter volumes.
+
+MYPATH=`dirname $0`
+
+volloc="/var/lib/glusterd/thin-arbiter"
+mkdir -p $volloc
+
+if [ -f /etc/glusterfs/thin-arbiter.vol ]; then
+ volfile=/etc/glusterfs/thin-arbiter.vol
+else
+ volfile=$MYPATH/thin-arbiter.vol
+fi
+
+tafile="$volloc/thin-arbiter.vol"
+
+
+help () {
+ echo " "
+ echo ' This tool helps to setup thin-arbiter (TA) process on a node.
+ TA process uses a location to save TA id files for every subvolume.
+ This location can be taken as input from user. Once provided and the
+ TA process is started on a node, it can not be changed using this script
+ or by any other mean. The same location should be used in gluster CLI
+ when creating thin-arbiter volumes.
+
+ usage: setup-thin-arbiter.sh [-s] [-h]
+ options:
+ -s - Setup thin-arbiter file path and start process
+ -h - Show this help message and exit
+'
+}
+
+volfile_set_brick_path () {
+ while read -r line
+ do
+ dir=`echo "$line" | cut -d' ' -f 2`
+ if [ "$dir" = "directory" ]; then
+ bpath=`echo "$line" | cut -d' ' -f 3`
+ sed -i -- 's?'$bpath'?'$1'?g' $tafile
+ return
+ fi
+ done < $tafile
+}
+
+check_ta_proc () {
+ pro=`ps aux | grep thin-arbiter.vol | grep "volfile-id"`
+ if [ "${pro}" = '' ]; then
+ echo ""
+ else
+ curr_loc=`cat $volloc/thin-arbiter.vol | grep option | grep directory`
+ loc=`echo "${curr_loc##* }"`
+ echo "******************************************************"
+ echo "Error:"
+ echo "Thin-arbiter process is running with thin-arbiter path = $loc"
+ echo "Can not change TA path on this host now."
+ echo "$pro"
+ echo "******************************************************"
+ exit 1
+ fi
+}
+
+getpath () {
+ check_ta_proc
+ echo "******************************************************"
+ echo "User will be required to enter a path/folder for arbiter volume."
+ echo "Please note that this path will be used for ALL VOLUMES using this"
+ echo "node to host thin-arbiter. After setting, if a volume"
+ echo "has been created using this host and path then path for"
+ echo "thin-arbiter can not be changed "
+ echo "******************************************************"
+ echo " "
+ while true;
+ do
+ echo -n "Enter brick path for thin arbiter volumes: "
+ echo " "
+ read tapath
+ if [ "${tapath}" = '' ]; then
+ echo "Please enter valid path"
+ continue
+ else
+ echo "Entered brick path : $tapath "
+ echo "Please note that this brick path will be used for ALL"
+ echo "VOLUMES using this node to host thin-arbiter brick"
+ echo -n "Want to continue? (y/N): "
+ echo " "
+ read cont
+
+ if [ "${cont}" = 'N' ] || [ "${cont}" = 'n' ]; then
+ exit 0
+ else
+ break
+ fi
+ fi
+ done
+}
+
+setup () {
+ getpath
+ mkdir -p $tapath/.glusterfs/indices
+ if [ -d $tapath/.glusterfs/indices ]; then
+ echo " "
+ else
+ echo "Could not create $tapath/.glusterfs/indices directory, check provided ta path."
+ exit 1
+ fi
+
+ cp -f --backup --suffix=_old $volfile $volloc/thin-arbiter.vol
+ volfile_set_brick_path "$tapath"
+
+ echo "Directory path to be used for thin-arbiter volume is: $tapath"
+ echo " "
+ echo "========================================================"
+
+ if [ -f /usr/lib/systemd/system/gluster-ta-volume.service ]; then
+ echo "Starting thin-arbiter process"
+ else
+ cp $MYPATH/../systemd/gluster-ta-volume.service /etc/systemd/system/
+ echo "Starting thin-arbiter process"
+ chmod 0644 /etc/systemd/system/gluster-ta-volume.service
+ fi
+
+ systemctl daemon-reload
+ systemctl enable gluster-ta-volume
+ systemctl stop gluster-ta-volume
+ systemctl start gluster-ta-volume
+
+ if [ $? == 0 ]; then
+ echo "thin-arbiter process has been setup and running"
+ else
+ echo "Failed to setup thin arbiter"
+ exit 1
+ fi
+
+}
+
+main()
+{
+
+ if [ "$#" -ne 1 ]; then
+ help
+ exit 0
+ fi
+
+ while getopts "sh" opt; do
+ case $opt in
+ h)
+ help
+ exit 0
+ ;;
+ s)
+ setup
+ exit 0
+ ;;
+ *)
+ help
+ exit 0
+ ;;
+ esac
+ done
+}
+
+main "$@"
diff --git a/extras/thin-arbiter/thin-arbiter.vol b/extras/thin-arbiter/thin-arbiter.vol
new file mode 100644
index 00000000000..c76babc7b3c
--- /dev/null
+++ b/extras/thin-arbiter/thin-arbiter.vol
@@ -0,0 +1,57 @@
+volume ta-posix
+ type storage/posix
+ option directory /mnt/thin-arbiter
+end-volume
+
+volume ta-thin-arbiter
+ type features/thin-arbiter
+ subvolumes ta-posix
+end-volume
+
+volume ta-locks
+ type features/locks
+ option notify-contention yes
+ subvolumes ta-thin-arbiter
+end-volume
+
+volume ta-upcall
+ type features/upcall
+ option cache-invalidation off
+ subvolumes ta-locks
+end-volume
+
+volume ta-io-threads
+ type performance/io-threads
+ subvolumes ta-upcall
+end-volume
+
+volume ta-index
+ type features/index
+ option xattrop-pending-watchlist trusted.afr.ta-
+ option xattrop-dirty-watchlist trusted.afr.dirty
+ option index-base /mnt/thin-arbiter/.glusterfs/indices
+ subvolumes ta-io-threads
+end-volume
+
+volume /mnt/thin-arbiter
+ type debug/io-stats
+ option count-fop-hits off
+ option latency-measurement off
+ option unique-id /mnt/thin-arbiter
+ subvolumes ta-index
+end-volume
+
+volume ta-server
+ type protocol/server
+ option transport.listen-backlog 10
+ option transport.socket.keepalive-count 9
+ option transport.socket.keepalive-interval 2
+ option transport.socket.keepalive-time 20
+ option transport.tcp-user-timeout 0
+ option transport.socket.keepalive 1
+ option auth.addr./mnt/thin-arbiter.allow *
+ option auth-path /mnt/thin-arbiter
+ option transport.address-family inet
+ option transport-type tcp
+ subvolumes /mnt/thin-arbiter
+end-volume
diff --git a/extras/volfilter.py b/extras/volfilter.py
index 0ca456a7882..5558a1beff4 100644
--- a/extras/volfilter.py
+++ b/extras/volfilter.py
@@ -13,6 +13,7 @@
# You should have received a copy of the GNU General Public License * along
# with HekaFS. If not, see <http://www.gnu.org/licenses/>.
+from __future__ import print_function
import copy
import string
import sys
@@ -35,7 +36,7 @@ good_xlators = [
"storage/posix",
]
-def copy_stack (old_xl,suffix,recursive=False):
+def copy_stack (old_xl, suffix, recursive=False):
if recursive:
new_name = old_xl.name + "-" + suffix
else:
@@ -45,7 +46,7 @@ def copy_stack (old_xl,suffix,recursive=False):
# The results with normal assignment here are . . . amusing.
new_xl.opts = copy.deepcopy(old_xl.opts)
for sv in old_xl.subvols:
- new_xl.subvols.append(copy_stack(sv,suffix,True))
+ new_xl.subvols.append(copy_stack(sv, suffix, True))
# Patch up the path at the bottom.
if new_xl.type == "storage/posix":
new_xl.opts["directory"] += ("/" + suffix)
@@ -63,10 +64,10 @@ def cleanup (parent, graph):
parent.opts["transport-type"] = "ssl"
sv = []
for child in parent.subvols:
- sv.append(cleanup(child,graph))
+ sv.append(cleanup(child, graph))
parent.subvols = sv
else:
- parent = cleanup(parent.subvols[0],graph)
+ parent = cleanup(parent.subvols[0], graph)
return parent
class Translator:
@@ -82,8 +83,8 @@ class Translator:
def load (path):
# If it's a string, open it; otherwise, assume it's already a
# file-like object (most notably from urllib*).
- if type(path) in types.StringTypes:
- fp = file(path,"r")
+ if type(path) in (str,):
+ fp = file(path, "r")
else:
fp = path
all_xlators = {}
@@ -98,16 +99,16 @@ def load (path):
continue
if text[0] == "volume":
if xlator:
- raise RuntimeError, "nested volume definition"
+ raise RuntimeError("nested volume definition")
xlator = Translator(text[1])
continue
if not xlator:
- raise RuntimeError, "text outside volume definition"
+ raise RuntimeError("text outside volume definition")
if text[0] == "type":
xlator.type = text[1]
continue
if text[0] == "option":
- xlator.opts[text[1]] = string.join(text[2:])
+ xlator.opts[text[1]] = ''.join(text[2:])
continue
if text[0] == "subvolumes":
for sv in text[1:]:
@@ -118,25 +119,25 @@ def load (path):
last_xlator = xlator
xlator = None
continue
- raise RuntimeError, "unrecognized keyword %s" % text[0]
+ raise RuntimeError("unrecognized keyword %s" % text[0])
if xlator:
- raise RuntimeError, "unclosed volume definition"
+ raise RuntimeError("unclosed volume definition")
return all_xlators, last_xlator
def generate (graph, last, stream=sys.stdout):
for sv in last.subvols:
if not sv.dumped:
- generate(graph,sv,stream)
- print >> stream, ""
+ generate(graph, sv, stream)
+ print("", file=stream)
sv.dumped = True
- print >> stream, "volume %s" % last.name
- print >> stream, " type %s" % last.type
- for k, v in last.opts.iteritems():
- print >> stream, " option %s %s" % (k, v)
+ print("volume %s" % last.name, file=stream)
+ print(" type %s" % last.type, file=stream)
+ for k, v in last.opts.items():
+ print(" option %s %s" % (k, v), file=stream)
if last.subvols:
- print >> stream, " subvolumes %s" % string.join(
- [ sv.name for sv in last.subvols ])
- print >> stream, "end-volume"
+ print(" subvolumes %s" % ''.join(
+ [ sv.name for sv in last.subvols ]), file=stream)
+ print("end-volume", file=stream)
def push_filter (graph, old_xl, filt_type, opts={}):
suffix = "-" + old_xl.type.split("/")[1]
@@ -156,7 +157,7 @@ def push_filter (graph, old_xl, filt_type, opts={}):
def delete (graph, victim):
if len(victim.subvols) != 1:
- raise RuntimeError, "attempt to delete non-unary translator"
+ raise RuntimeError("attempt to delete non-unary translator")
for xl in graph.itervalues():
while xl.subvols.count(victim):
i = xl.subvols.index(victim)
@@ -164,4 +165,4 @@ def delete (graph, victim):
if __name__ == "__main__":
graph, last = load(sys.argv[1])
- generate(graph,last)
+ generate(graph, last)
diff --git a/extras/who-wrote-glusterfs/gitdm.aliases b/extras/who-wrote-glusterfs/gitdm.aliases
index e19b99c79c8..901c12418e3 100644
--- a/extras/who-wrote-glusterfs/gitdm.aliases
+++ b/extras/who-wrote-glusterfs/gitdm.aliases
@@ -16,11 +16,13 @@ anush@gluster.com ashetty@redhat.com
csaba@gluster.com csaba@redhat.com
csaba@lowlife.hu csaba@redhat.com
csaba@zresearch.com csaba@redhat.com
+gd@samba.org gd@redhat.com
harsha@gluster.com fharshav@redhat.com
harsha@zresearch.com fharshav@redhat.com
harsha@dev.gluster.com fharshav@redhat.com
harsha@harshavardhana.net fharshav@redhat.com
jclift@redhat.com jclift@gluster.org
+kkeithle@linux.keithley.org kkeithle@redhat.com
kkeithle@f16node1.kkeithle.usersys.redhat.com kkeithle@redhat.com
kaushal@gluster.com kaushal@redhat.com
kaushikbv@gluster.com kbudiger@redhat.com
@@ -32,6 +34,9 @@ me@louiszuckerman.com louiszuckerman@gmail.com
msvbhat@gmail.com vbhat@redhat.com
nullpai@gmail.com ppai@redhat.com
vishwanath@gluster.com vbhat@redhat.com
+obnox@samba.org madam@redhat.com
+oleksandr@natalenko.name o.natalenko@lanet.ua
+patrick@puiterwijk.org puiterwijk@fedoraproject.org
pavan@dev.gluster.com pavan@gluster.com
zaitcev@yahoo.com zaitcev@kotori.zaitcev.us
pranithk@gluster.com pkarampu@redhat.com
@@ -41,6 +46,8 @@ raghavendra@zresearch.com rgowdapp@redhat.com
rahulcssjce@gmail.com rahulcs@redhat.com
rajesh@gluster.com rajesh@redhat.com
rajesh.amaravathi@gmail.com rajesh@redhat.com
+root@ravi2.(none) ravishankar@redhat.com
+sabansal@localhost.localdomain sabansal@redhat.com
shehjart@zresearch.com shehjart@gluster.com
venky@gluster.com vshankar@redhat.com
vijay@gluster.com vbellur@redhat.com
@@ -48,3 +55,4 @@ vijay@dev.gluster.com vbellur@redhat.com
vijaykumar.koppad@gmail.com vkoppad@redhat.com
vikas@zresearch.com vikas@gluster.com
shishirng@gluster.com sgowda@redhat.com
+potatogim@potatogim.net potatogim@gluesys.com
diff --git a/extras/who-wrote-glusterfs/gitdm.domain-map b/extras/who-wrote-glusterfs/gitdm.domain-map
index 39526f0f99c..7cd2bbd605b 100644
--- a/extras/who-wrote-glusterfs/gitdm.domain-map
+++ b/extras/who-wrote-glusterfs/gitdm.domain-map
@@ -2,15 +2,28 @@
# Here is a set of mappings of domain names onto employer names.
#
active.by ActiveCloud
+appeartv.com Appear TV
cern.ch CERN
+cmss.chinamobile.com China Mobile(Suzhou) Software Technology
+datalab.es DataLab S.L.
+fb.com Facebook
+fedoraproject.org Fedora Project
gluster.com Red Hat
-gmail.com (unknown)
+gmail.com (personal contributions)
gooddata.com GoodData
hastexo.com hastexo
+horde.com (personal contributions)
ibm.com IBM
+io.com IO
+lanet.ua Lanet Network
linbit.com LINBIT
+nectec.or.th NECTEC
netbsd.org NetBSD
netdirect.ca Net Direct
+nokia.com Nokia
redhat.com Red Hat
stepping-stone.ch stepping stone GmbH
+xtaotech.com XTAO Co.
+yahoo.in (personal contributions)
zresearch.com Red Hat
+gluesys.com Gluesys