summaryrefslogtreecommitdiffstats
path: root/booster
Commit message (Expand)AuthorAgeFilesLines
* fuse: retire the faked-FLUSH-upon-RELEASE hackCsaba Henk2010-10-261-1/+0
* Copyright changesVijay Bellur2010-10-114-4/+4
* Change GNU GPL to GNU AGPLPranith K2010-10-044-12/+12
* booster/fcntl: implement F_GETFD and F_SETFD.v3.0.1rc5Raghavendra G2010-01-233-5/+85
* fixing some warnings on 64bit machine.Amar Tumballi2009-12-031-8/+11
* booster: implement xattr related apis.Raghavendra G2009-11-181-0/+101
* booster: implement creat64.Raghavendra G2009-11-181-0/+47
* booster: implement truncate and truncate64.Raghavendra G2009-11-181-0/+76
* booster: implement ftruncate64.Raghavendra G2009-11-181-1/+33
* booster: implement getcwd.Raghavendra G2009-11-181-0/+14
* booster: implement fchdir.Raghavendra G2009-11-181-0/+43
* booster: implement chdir.Raghavendra G2009-11-181-0/+53
* Revert "booster, libglusterfsclient: Support samba specific relative paths"Raghavendra G2009-11-181-8/+0
* booster: Must check errno for error number not return valueShehjar Tikoo2009-10-301-3/+3
* booster, libglusterfsclient: Support samba specific relative pathsShehjar Tikoo2009-10-301-0/+8
* booster: seperate out the implementations of readdir and readdir64.Raghavendra G2009-10-262-13/+41
* Changed occurrences of Z Research to Gluster.Vijay Bellur2009-10-074-4/+4
* Make changes such that glusterfs builds with Werror and fix LONG_LONG_MAX def...Pavan Sondur2009-10-011-2/+2
* booster: Cleanup booster_cleanup and register for atexitShehjar Tikoo2009-09-241-20/+10
* booster: Reduce logging aggressiveness to TRACEShehjar Tikoo2009-09-242-159/+159
* fuse: emit a flush from release if we didn't get an adjacent FLUSH message fr...Csaba Henk2009-09-231-0/+1
* booster: implement F_DUPFD command in fcntl.Raghavendra G2009-09-221-0/+2
* booster: Fix build warningsShehjar Tikoo2009-09-221-14/+15
* booster: use __REDIRECT macro to prevent creat being renamed to creat64.Raghavendra G2009-09-151-1/+4
* booster: use appropriate conversion specifier during logging in close.Raghavendra G2009-09-151-1/+1
* booster: Fix fd_t leak in pread64Shehjar Tikoo2009-09-091-0/+1
* booster: Enhance booster loggingShehjar Tikoo2009-09-093-71/+547
* booster: Support backward compatible optionsShehjar Tikoo2009-09-021-0/+6
* changed booster fstab to parse the arguments same as mount.glusterfsAmar Tumballi2009-08-201-2/+2
* booster: Fix fd leak due to incorrect NULL checkShehjar Tikoo2009-07-291-1/+1
* booster: automake 1.11 compatibility fixCsaba Henk2009-07-021-1/+1
* booster: Fix build problems with various libtool versionsShehjar Tikoo2009-06-292-12/+20
* booster: Another attempt to fix 32 and 64 bit interoperabilityShehjar Tikoo2009-06-291-17/+27
* booster: Add new booster-specific fd-tableShehjar Tikoo2009-06-294-139/+438
* booster: fix build error in fcntl implementation.Raghavendra G2009-06-151-1/+7
* booster: fix memory corruption in booster_cleanupRaghavendra G2009-06-111-4/+10
* booster: implement fcntl.Raghavendra G2009-06-111-1/+104
* booster: implement readdir_r and readdir64_r.Raghavendra G2009-06-112-0/+70
* booster: call glusterfs_umount_all during cleanupRaghavendra G2009-06-111-0/+4
* booster: implement sendfile.Raghavendra G2009-06-111-0/+58
* booster: remove mount table which maps a path to glusterfs handler.Raghavendra G2009-06-111-236/+86
* booster: attr_timeout: Read timeout from booster fstabShehjar Tikoo2009-06-031-0/+17
* booster: Clean-up handling of log/fstab env variablesShehjar Tikoo2009-06-021-5/+14
* booster: Eliminate gluster context creation raceShehjar Tikoo2009-06-021-92/+133
* booster: Clear up env var usageShehjar Tikoo2009-06-021-1/+14
* add booster_fstab.h in noinst_HEADERS to fix build breakage after 'make dist'Anand Avati2009-05-261-0/+1
* booster: Move fstab parsing into booster from libglusterfsShehjar Tikoo2009-05-204-109/+500
* booster: Fall back to remaining real_* functorsShehjar Tikoo2009-05-191-13/+36
* booster: Dont de-init fd tables on VMP-init failureShehjar Tikoo2009-05-191-17/+5
* booster: Do not read info for non-glusterfs mount pointsShehjar Tikoo2009-05-191-0/+3
c/developer-guide/fuse-interrupt.md97
-rw-r--r--doc/developer-guide/identifying-resource-leaks.md24
-rw-r--r--doc/developer-guide/options-to-contribute.md2
-rw-r--r--doc/developer-guide/thread-naming.md5
-rw-r--r--doc/developer-guide/translator-development.md2
-rw-r--r--doc/developer-guide/xlator-classification.md2
-rw-r--r--doc/gluster.811
-rw-r--r--doc/glusterfs.84
-rw-r--r--doc/mount.glusterfs.811
-rw-r--r--doc/release-notes/7.0.md265
-rw-r--r--events/src/eventsapiconf.py.in2
-rw-r--r--events/src/glustereventsd.py37
-rw-r--r--events/src/peer_eventsapi.py3
-rw-r--r--events/src/utils.py3
-rw-r--r--extras/Makefile.am3
-rw-r--r--extras/cliutils/README.md2
-rwxr-xr-xextras/distributed-testing/distributed-test-runner.py12
-rw-r--r--extras/ec-heal-script/README.md69
-rwxr-xr-xextras/ec-heal-script/correct_pending_heals.sh415
-rwxr-xr-xextras/ec-heal-script/gfid_needing_heal_parallel.sh278
-rw-r--r--extras/ganesha/Makefile.am2
-rw-r--r--extras/ganesha/config/Makefile.am4
-rw-r--r--extras/ganesha/config/ganesha-ha.conf.sample19
-rw-r--r--extras/ganesha/ocf/Makefile.am11
-rw-r--r--extras/ganesha/ocf/ganesha_grace221
-rw-r--r--extras/ganesha/ocf/ganesha_mon234
-rw-r--r--extras/ganesha/ocf/ganesha_nfsd167
-rw-r--r--extras/ganesha/scripts/Makefile.am6
-rwxr-xr-xextras/ganesha/scripts/create-export-ganesha.sh92
-rwxr-xr-xextras/ganesha/scripts/dbus-send.sh70
-rw-r--r--extras/ganesha/scripts/ganesha-ha.sh1199
-rwxr-xr-xextras/ganesha/scripts/generate-epoch.py48
-rw-r--r--extras/geo-rep/Makefile.am2
-rw-r--r--extras/geo-rep/schedule_georep.py.in6
-rw-r--r--extras/glusterd.vol.in3
-rwxr-xr-xextras/glusterfs-georep-upgrade.py77
-rw-r--r--extras/glusterfs-logrotate21
-rw-r--r--extras/group-gluster-block8
-rw-r--r--extras/group-virt.example8
-rwxr-xr-xextras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh27
-rw-r--r--extras/hook-scripts/start/post/Makefile.am2
-rwxr-xr-xextras/hook-scripts/start/post/S31ganesha-start.sh122
-rwxr-xr-xextras/mount-shared-storage.sh2
-rwxr-xr-xextras/ocf/volume.in22
-rwxr-xr-xextras/quota/quota_fsck.py20
-rwxr-xr-xextras/snap_scheduler/gcron.py7
-rwxr-xr-xextras/snap_scheduler/snap_scheduler.py4
-rwxr-xr-xextras/statedumpparse.rb208
-rw-r--r--extras/systemd/gluster-ta-volume.service.in2
-rw-r--r--extras/systemd/glusterd.service.in9
-rw-r--r--extras/thin-arbiter/thin-arbiter.vol5
-rw-r--r--extras/who-wrote-glusterfs/gitdm.domain-map1
-rw-r--r--geo-replication/gsyncd.conf.in6
-rw-r--r--geo-replication/setup.py6
-rw-r--r--geo-replication/src/peer_mountbroker.py.in17
-rw-r--r--geo-replication/syncdaemon/Makefile.am2
-rw-r--r--geo-replication/syncdaemon/README.md1
-rw-r--r--geo-replication/syncdaemon/changelogagent.py78
-rw-r--r--geo-replication/syncdaemon/gsyncd.py25
-rw-r--r--geo-replication/syncdaemon/gsyncdstatus.py4
-rw-r--r--geo-replication/syncdaemon/libcxattr.py4
-rw-r--r--geo-replication/syncdaemon/libgfchangelog.py256
-rw-r--r--geo-replication/syncdaemon/master.py42
-rw-r--r--geo-replication/syncdaemon/monitor.py93
-rw-r--r--geo-replication/syncdaemon/py2py3.py46
-rw-r--r--geo-replication/syncdaemon/resource.py67
-rw-r--r--geo-replication/syncdaemon/subcmds.py11
-rw-r--r--geo-replication/syncdaemon/syncdutils.py90
-rwxr-xr-xgeo-replication/tests/unit/test_gsyncdstatus.py10
-rw-r--r--glusterfs.spec.in656
-rw-r--r--glusterfsd/src/Makefile.am5
-rw-r--r--glusterfsd/src/gf_attach.c55
-rw-r--r--glusterfsd/src/glusterfsd-messages.h56
-rw-r--r--glusterfsd/src/glusterfsd-mgmt.c522
-rw-r--r--glusterfsd/src/glusterfsd.c551
-rw-r--r--glusterfsd/src/glusterfsd.h12
-rw-r--r--heal/src/Makefile.am6
-rw-r--r--heal/src/glfs-heal.c36
-rw-r--r--libgfdb.pc.in12
-rw-r--r--libglusterd/Makefile.am3
-rw-r--r--libglusterd/src/Makefile.am31
-rw-r--r--libglusterd/src/gd-common-utils.c78
-rw-r--r--libglusterd/src/gd-common-utils.h28
-rw-r--r--libglusterd/src/libglusterd.sym2
-rw-r--r--libglusterfs/src/Makefile.am18
-rw-r--r--libglusterfs/src/call-stub.c163
-rw-r--r--libglusterfs/src/client_t.c111
-rw-r--r--libglusterfs/src/common-utils.c488
-rw-r--r--libglusterfs/src/compat.c2
-rw-r--r--libglusterfs/src/ctx.c8
-rw-r--r--libglusterfs/src/defaults-tmpl.c7
-rw-r--r--libglusterfs/src/dict.c432
-rw-r--r--libglusterfs/src/event-epoll.c93
-rw-r--r--libglusterfs/src/event-poll.c47
-rw-r--r--libglusterfs/src/event.c3
-rw-r--r--libglusterfs/src/events.c81
-rw-r--r--libglusterfs/src/fd.c26
-rw-r--r--libglusterfs/src/gf-dirent.c2
-rw-r--r--libglusterfs/src/gfdb/Makefile.am37
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store.c802
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store.h331
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store_helper.c588
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store_helper.h95
-rw-r--r--libglusterfs/src/gfdb/gfdb_data_store_types.h532
-rw-r--r--libglusterfs/src/gfdb/gfdb_mem-types.h17
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3.c1542
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3.h328
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3_helper.c1260
-rw-r--r--libglusterfs/src/gfdb/gfdb_sqlite3_helper.h51
-rw-r--r--libglusterfs/src/gidcache.c5
-rw-r--r--libglusterfs/src/globals.c13
-rw-r--r--libglusterfs/src/glusterfs/client_t.h24
-rw-r--r--libglusterfs/src/glusterfs/common-utils.h139
-rw-r--r--libglusterfs/src/glusterfs/compat.h3
-rw-r--r--libglusterfs/src/glusterfs/dict.h13
-rw-r--r--libglusterfs/src/glusterfs/fd.h3
-rw-r--r--libglusterfs/src/glusterfs/gf-event.h4
-rw-r--r--libglusterfs/src/glusterfs/globals.h12
-rw-r--r--libglusterfs/src/glusterfs/glusterfs-acl.h2
-rw-r--r--libglusterfs/src/glusterfs/glusterfs-fops.h241
-rw-r--r--libglusterfs/src/glusterfs/glusterfs.h43
-rw-r--r--libglusterfs/src/glusterfs/inode.h5
-rw-r--r--libglusterfs/src/glusterfs/iobuf.h12
-rw-r--r--libglusterfs/src/glusterfs/latency.h22
-rw-r--r--libglusterfs/src/glusterfs/libglusterfs-messages.h130
-rw-r--r--libglusterfs/src/glusterfs/lkowner.h2
-rw-r--r--libglusterfs/src/glusterfs/logging.h11
-rw-r--r--libglusterfs/src/glusterfs/mem-pool.h58
-rw-r--r--libglusterfs/src/glusterfs/mem-types.h6
-rw-r--r--libglusterfs/src/glusterfs/stack.h14
-rw-r--r--libglusterfs/src/glusterfs/statedump.h2
-rw-r--r--libglusterfs/src/glusterfs/store.h6
-rw-r--r--libglusterfs/src/glusterfs/syncop.h80
-rw-r--r--libglusterfs/src/glusterfs/syscall.h18
-rw-r--r--libglusterfs/src/glusterfs/xlator.h25
-rw-r--r--libglusterfs/src/graph.c30
-rw-r--r--libglusterfs/src/inode.c114
-rw-r--r--libglusterfs/src/iobuf.c129
-rw-r--r--libglusterfs/src/latency.c91
-rw-r--r--libglusterfs/src/libglusterfs.sym25
-rw-r--r--libglusterfs/src/logging.c343
-rw-r--r--libglusterfs/src/mem-pool.c328
-rw-r--r--libglusterfs/src/monitoring.c8
-rw-r--r--libglusterfs/src/options.c123
-rw-r--r--libglusterfs/src/rbthash.c46
-rw-r--r--libglusterfs/src/stack.c4
-rw-r--r--libglusterfs/src/statedump.c164
-rw-r--r--libglusterfs/src/store.c85
-rw-r--r--libglusterfs/src/syncop-utils.c37
-rw-r--r--libglusterfs/src/syncop.c359
-rw-r--r--libglusterfs/src/syscall.c48
-rw-r--r--libglusterfs/src/throttle-tbf.c2
-rw-r--r--libglusterfs/src/tier-ctr-interface.h44
-rw-r--r--libglusterfs/src/timer.c3
-rw-r--r--libglusterfs/src/xlator.c179
-rwxr-xr-xrfc.sh200
-rw-r--r--rpc/rpc-lib/src/Makefile.am2
-rw-r--r--rpc/rpc-lib/src/libgfrpc.sym1
-rw-r--r--rpc/rpc-lib/src/protocol-common.h1
-rw-r--r--rpc/rpc-lib/src/rpc-clnt-ping.c2
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.c51
-rw-r--r--rpc/rpc-lib/src/rpc-clnt.h19
-rw-r--r--rpc/rpc-lib/src/rpc-drc.c13
-rw-r--r--rpc/rpc-lib/src/rpc-drc.h2
-rw-r--r--rpc/rpc-lib/src/rpc-transport.c87
-rw-r--r--rpc/rpc-lib/src/rpc-transport.h22
-rw-r--r--rpc/rpc-lib/src/rpcsvc-common.h4
-rw-r--r--rpc/rpc-lib/src/rpcsvc.c164
-rw-r--r--rpc/rpc-lib/src/rpcsvc.h59
-rw-r--r--rpc/rpc-lib/src/xdr-common.h2
-rw-r--r--rpc/rpc-transport/Makefile.am2
-rw-r--r--rpc/rpc-transport/rdma/Makefile.am1
-rw-r--r--rpc/rpc-transport/rdma/src/Makefile.am24
-rw-r--r--rpc/rpc-transport/rdma/src/name.c703
-rw-r--r--rpc/rpc-transport/rdma/src/name.h35
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.c4912
-rw-r--r--rpc/rpc-transport/rdma/src/rdma.h384
-rw-r--r--rpc/rpc-transport/rdma/src/rpc-trans-rdma-messages.h66
-rw-r--r--rpc/rpc-transport/socket/src/name.c86
-rw-r--r--rpc/rpc-transport/socket/src/socket.c506
-rw-r--r--rpc/rpc-transport/socket/src/socket.h25
-rw-r--r--rpc/xdr/gen/Makefile.am49
-rw-r--r--rpc/xdr/src/.gitignore2
-rw-r--r--rpc/xdr/src/Makefile.am70
-rw-r--r--rpc/xdr/src/cli1-xdr.x1
-rw-r--r--rpc/xdr/src/glusterd1-xdr.x15
-rw-r--r--rpc/xdr/src/glusterfs-fops.x250
-rw-r--r--rpc/xdr/src/glusterfs3-xdr.x3
-rw-r--r--rpc/xdr/src/glusterfs4-xdr.x6
-rw-r--r--rpc/xdr/src/libgfxdr.sym2
-rw-r--r--rpc/xdr/src/rpc-common-xdr.x3
-rwxr-xr-xrun-tests.sh79
-rw-r--r--tests/00-geo-rep/00-georep-verify-non-root-setup.t43
-rw-r--r--tests/00-geo-rep/00-georep-verify-setup.t3
-rw-r--r--tests/00-geo-rep/bug-1708603.t63
-rw-r--r--tests/00-geo-rep/georep-basic-dr-rsync.t13
-rw-r--r--tests/00-geo-rep/georep-upgrade.t79
-rw-r--r--tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t (renamed from tests/basic/afr/split-brain-favorite-child-policy.t)4
-rw-r--r--tests/000-flaky/basic_changelog_changelog-snapshot.t (renamed from tests/basic/changelog/changelog-snapshot.t)4
-rw-r--r--tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t (renamed from tests/basic/distribute/rebal-all-nodes-migrate.t)8
-rw-r--r--tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t50
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/basic_mount-nfs-auth.t (renamed from tests/basic/mount-nfs-auth.t)0
-rw-r--r--tests/000-flaky/bugs_core_multiplex-limit-issue-151.t (renamed from tests/bugs/core/multiplex-limit-issue-151.t)6
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/bugs_distribute_bug-1117851.t (renamed from tests/bugs/distribute/bug-1117851.t)4
-rw-r--r--tests/000-flaky/bugs_distribute_bug-1122443.t (renamed from tests/bugs/distribute/bug-1122443.t)17
-rw-r--r--tests/000-flaky/bugs_glusterd_bug-857330/common.rc (renamed from tests/bugs/glusterd/bug-857330/common.rc)2
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/normal.t (renamed from tests/bugs/glusterd/bug-857330/normal.t)4
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/xml.t (renamed from tests/bugs/glusterd/bug-857330/xml.t)4
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/bugs_glusterd_quorum-value-check.t (renamed from tests/bugs/glusterd/quorum-value-check.t)6
-rw-r--r--tests/000-flaky/bugs_nfs_bug-1116503.t (renamed from tests/bugs/nfs/bug-1116503.t)6
-rw-r--r--tests/000-flaky/features_lock-migration_lkmigration-set-option.t (renamed from tests/features/lock-migration/lkmigration-set-option.t)4
-rw-r--r--tests/afr.rc16
-rw-r--r--tests/basic/afr/afr-anon-inode-no-quorum.t63
-rw-r--r--tests/basic/afr/afr-anon-inode.t114
-rw-r--r--tests/basic/afr/afr-seek.t55
-rw-r--r--tests/basic/afr/durability-off.t2
-rw-r--r--tests/basic/afr/entry-self-heal-anon-dir-off.t459
-rw-r--r--tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t9
-rw-r--r--tests/basic/afr/halo.t61
-rw-r--r--tests/basic/afr/rename-data-loss.t72
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t124
-rw-r--r--tests/basic/changelog/changelog-history.t12
-rw-r--r--tests/basic/ctime/ctime-utimesat.t28
-rw-r--r--tests/basic/distribute/file-rename.t1021
-rw-r--r--tests/basic/distribute/spare_file_rebalance.t51
-rw-r--r--tests/basic/ec/ec-badfd.c124
-rwxr-xr-xtests/basic/ec/ec-badfd.t26
-rw-r--r--tests/basic/ec/ec-quorum-count.t167
-rw-r--r--tests/basic/ec/ec-read-mask.t114
-rw-r--r--tests/basic/ec/ec-reset-brick.t50
-rw-r--r--tests/basic/ec/ec-seek.t3
-rw-r--r--tests/basic/ec/gfapi-ec-open-truncate.c3
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.c227
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.t115
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.c182
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.t102
-rw-r--r--tests/basic/fuse/active-io-graph-switch.t65
-rw-r--r--tests/basic/gfapi/bug-1507896.c49
-rw-r--r--tests/basic/gfapi/bug-1507896.t33
-rw-r--r--tests/basic/gfapi/gfapi-copy-file-range.t16
-rw-r--r--tests/basic/gfapi/gfapi-graph-switch-open-fd.t44
-rw-r--r--tests/basic/gfapi/gfapi-keep-writing.c129
-rw-r--r--tests/basic/gfapi/gfapi-ssl-load-volfile-test.c127
-rwxr-xr-xtests/basic/gfapi/gfapi-ssl-load-volfile-test.t76
-rw-r--r--tests/basic/gfapi/glfsxmp-coverage.c168
-rw-r--r--tests/basic/gfapi/glfsxmp.t5
-rw-r--r--tests/basic/gfapi/protocol-client-ssl.vol.in15
-rw-r--r--tests/basic/glusterd-restart-shd-mux.t10
-rw-r--r--tests/basic/glusterd/arbiter-volume.t29
-rw-r--r--tests/basic/glusterd/disperse-create.t4
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t2
-rw-r--r--tests/basic/glusterd/volume-brick-count.t61
-rw-r--r--tests/basic/graph-cleanup-brick-down-shd-mux.t64
-rw-r--r--tests/basic/metadisp/fsyncdir.c29
-rw-r--r--tests/basic/metadisp/ftruncate.c34
-rw-r--r--tests/basic/metadisp/fxattr.c107
-rw-r--r--tests/basic/metadisp/gfs-fsetxattr.c141
-rw-r--r--tests/basic/metadisp/metadisp.t316
-rw-r--r--tests/basic/metadisp/metadisp.vol14
-rwxr-xr-xtests/basic/mount.t3
-rw-r--r--tests/basic/multiple-volume-shd-mux.t46
-rw-r--r--tests/basic/open-behind/open-behind.t183
-rw-r--r--tests/basic/open-behind/tester-fd.c99
-rw-r--r--tests/basic/open-behind/tester.c444
-rw-r--r--tests/basic/open-behind/tester.h145
-rw-r--r--tests/basic/posix/shared-statfs.t11
-rw-r--r--tests/basic/posix/zero-fill-enospace.c7
-rw-r--r--tests/basic/quick-read-with-upcall.t16
-rw-r--r--tests/basic/seek.c (renamed from tests/basic/ec/seek.c)0
-rw-r--r--tests/basic/shd-mux-afr.t70
-rw-r--r--tests/basic/shd-mux-ec.t75
-rw-r--r--tests/basic/shd-mux.t149
-rwxr-xr-xtests/basic/trace.t22
-rw-r--r--tests/basic/volume-scale-shd-mux.t33
-rw-r--r--tests/basic/volume-snap-scheduler.t49
-rwxr-xr-xtests/basic/volume-snapshot-xml.t6
-rw-r--r--[-rwxr-xr-x]tests/basic/volume.t36
-rw-r--r--tests/bitrot/br-signer-threads-config-1797869.t73
-rw-r--r--tests/bugs/bitrot/bug-1227996.t1
-rw-r--r--tests/bugs/bitrot/bug-1245981.t4
-rwxr-xr-xtests/bugs/bug-1064147.t72
-rw-r--r--tests/bugs/bug-1371806.t1
-rw-r--r--tests/bugs/bug-1371806_acl.t1
-rw-r--r--tests/bugs/bug-1620580.t67
-rw-r--r--tests/bugs/bug-1694920.t63
-rwxr-xr-xtests/bugs/cli/bug-1320388.t2
-rwxr-xr-xtests/bugs/ctime/issue-832.t32
-rw-r--r--tests/bugs/distribute/bug-1600379.t54
-rwxr-xr-xtests/bugs/distribute/bug-1786679.t69
-rwxr-xr-xtests/bugs/distribute/issue-1327.t33
-rw-r--r--tests/bugs/fuse/bug-985074.t5
-rwxr-xr-xtests/bugs/fuse/many-groups-for-acl.t13
-rw-r--r--tests/bugs/gfapi/bug-1447266/bug-1447266.t2
-rw-r--r--tests/bugs/glusterd/brick-mux-validation-in-cluster.t59
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t4
-rw-r--r--tests/bugs/glusterd/brick-mux.t2
-rw-r--r--tests/bugs/glusterd/brick-order-check-add-brick.t61
-rw-r--r--tests/bugs/glusterd/bug-1720566.t50
-rw-r--r--tests/bugs/glusterd/check_elastic_server.t63
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t18
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t8
-rw-r--r--tests/bugs/glusterd/quorum-validation.t2
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t9
-rw-r--r--tests/bugs/glusterd/remove-brick-validation.t (renamed from tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t)14
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t1
-rw-r--r--tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t2
-rw-r--r--tests/bugs/glusterd/validating-options-for-replicated-volume.t13
-rwxr-xr-xtests/bugs/glusterfs-server/bug-852147.t2
-rw-r--r--tests/bugs/glusterfs-server/bug-873549.t2
-rwxr-xr-xtests/bugs/glusterfs-server/bug-887145.t14
-rw-r--r--tests/bugs/glusterfs/bug-873962-spb.t1
-rwxr-xr-xtests/bugs/logging/bug-823081.t8
-rw-r--r--tests/bugs/posix/bug-1651445.t30
-rw-r--r--tests/bugs/protocol/bug-1433815-auth-allow.t1
-rw-r--r--tests/bugs/replicate/bug-1101647.t2
-rw-r--r--tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t2
-rw-r--r--tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t18
-rw-r--r--tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t2
-rw-r--r--tests/bugs/replicate/bug-1744548-heal-timeout.t19
-rw-r--r--tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t74
-rw-r--r--tests/bugs/replicate/bug-1801624-entry-heal.t58
-rw-r--r--tests/bugs/replicate/bug-880898.t7
-rw-r--r--tests/bugs/replicate/issue-1254-prioritize-enospc.t80
-rw-r--r--tests/bugs/replicate/mdata-heal-no-xattrs.t59
-rwxr-xr-xtests/bugs/rpc/bug-954057.t10
-rw-r--r--tests/bugs/shard/bug-1272986.t6
-rw-r--r--tests/bugs/shard/bug-1696136.c5
-rw-r--r--tests/bugs/shard/bug-shard-discard.c5
-rw-r--r--tests/bugs/shard/issue-1243.t43
-rw-r--r--tests/bugs/shard/issue-1281.t34
-rw-r--r--tests/bugs/shard/issue-1425.t45
-rw-r--r--tests/bugs/shard/shard-fallocate.c5
-rwxr-xr-xtests/bugs/snapshot/bug-1111041.t10
-rw-r--r--tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t1
-rw-r--r--tests/bugs/snapshot/bug-1597662.t3
-rwxr-xr-xtests/bugs/transport/bug-873367.t2
-rw-r--r--tests/bugs/write-behind/issue-884.c267
-rwxr-xr-xtests/bugs/write-behind/issue-884.t40
-rw-r--r--tests/cluster.rc41
-rw-r--r--tests/ec.rc9
-rw-r--r--tests/env.rc.in2
-rw-r--r--tests/features/flock_interrupt.t1
-rw-r--r--tests/features/fuse-lru-limit.t1
-rw-r--r--tests/features/interrupt.t14
-rwxr-xr-xtests/features/ssl-authz.t26
-rw-r--r--tests/features/ssl-ciphers.t72
-rwxr-xr-xtests/features/trash.t74
-rwxr-xr-xtests/features/worm.t39
-rw-r--r--tests/geo-rep.rc5
-rw-r--r--tests/glusterfind/glusterfind-basic.t84
-rw-r--r--tests/line-coverage/afr-heal-info.t43
-rwxr-xr-xtests/line-coverage/arbiter-coverage.t32
-rw-r--r--tests/line-coverage/cli-peer-and-volume-operations.t48
-rwxr-xr-xtests/line-coverage/errorgen-coverage.t2
-rw-r--r--tests/line-coverage/log-and-brick-ops-negative-case.t82
-rw-r--r--tests/ssl.rc2
-rw-r--r--tests/thin-arbiter.rc3
-rw-r--r--tests/volume.rc15
-rw-r--r--tools/gfind_missing_files/gfind_missing_files.sh2
-rwxr-xr-xtools/glusterfind/S57glusterfind-delete-post.py2
-rw-r--r--tools/glusterfind/src/changelog.py16
-rw-r--r--tools/glusterfind/src/gfind_py2py3.py25
-rw-r--r--tools/glusterfind/src/main.py61
-rw-r--r--tools/glusterfind/src/utils.py6
-rw-r--r--xlators/cluster/afr/src/afr-common.c1205
-rw-r--r--xlators/cluster/afr/src/afr-dir-read.c18
-rw-r--r--xlators/cluster/afr/src/afr-dir-write.c16
-rw-r--r--xlators/cluster/afr/src/afr-inode-read.c38
-rw-r--r--xlators/cluster/afr/src/afr-inode-read.h3
-rw-r--r--xlators/cluster/afr/src/afr-inode-write.c75
-rw-r--r--xlators/cluster/afr/src/afr-mem-types.h2
-rw-r--r--xlators/cluster/afr/src/afr-messages.h160
-rw-r--r--xlators/cluster/afr/src/afr-open.c19
-rw-r--r--xlators/cluster/afr/src/afr-read-txn.c10
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-common.c203
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-data.c22
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-entry.c212
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-metadata.c70
-rw-r--r--xlators/cluster/afr/src/afr-self-heal-name.c41
-rw-r--r--xlators/cluster/afr/src/afr-self-heal.h21
-rw-r--r--xlators/cluster/afr/src/afr-self-heald.c286
-rw-r--r--xlators/cluster/afr/src/afr-self-heald.h20
-rw-r--r--xlators/cluster/afr/src/afr-transaction.c154
-rw-r--r--xlators/cluster/afr/src/afr.c55
-rw-r--r--xlators/cluster/afr/src/afr.h338
-rw-r--r--xlators/cluster/dht/src/dht-common.c1892
-rw-r--r--xlators/cluster/dht/src/dht-common.h405
-rw-r--r--xlators/cluster/dht/src/dht-diskusage.c12
-rw-r--r--xlators/cluster/dht/src/dht-hashfn.c38
-rw-r--r--xlators/cluster/dht/src/dht-helper.c199
-rw-r--r--xlators/cluster/dht/src/dht-inode-read.c95
-rw-r--r--xlators/cluster/dht/src/dht-inode-write.c56
-rw-r--r--xlators/cluster/dht/src/dht-layout.c92
-rw-r--r--xlators/cluster/dht/src/dht-linkfile.c50
-rw-r--r--xlators/cluster/dht/src/dht-lock.c190
-rw-r--r--xlators/cluster/dht/src/dht-mem-types.h3
-rw-r--r--xlators/cluster/dht/src/dht-messages.h320
-rw-r--r--xlators/cluster/dht/src/dht-rebalance.c1130
-rw-r--r--xlators/cluster/dht/src/dht-rename.c4
-rw-r--r--xlators/cluster/dht/src/dht-selfheal.c589
-rw-r--r--xlators/cluster/dht/src/dht-shared.c136
-rw-r--r--xlators/cluster/dht/src/nufa.c1
-rw-r--r--xlators/cluster/ec/src/ec-combine.c40
-rw-r--r--xlators/cluster/ec/src/ec-common.c126
-rw-r--r--xlators/cluster/ec/src/ec-common.h24
-rw-r--r--xlators/cluster/ec/src/ec-dir-read.c23
-rw-r--r--xlators/cluster/ec/src/ec-dir-write.c57
-rw-r--r--xlators/cluster/ec/src/ec-generic.c51
-rw-r--r--xlators/cluster/ec/src/ec-heal.c159
-rw-r--r--xlators/cluster/ec/src/ec-heald.c73
-rw-r--r--xlators/cluster/ec/src/ec-helpers.c9
-rw-r--r--xlators/cluster/ec/src/ec-inode-read.c46
-rw-r--r--xlators/cluster/ec/src/ec-inode-write.c61
-rw-r--r--xlators/cluster/ec/src/ec-locks.c69
-rw-r--r--xlators/cluster/ec/src/ec-messages.h2
-rw-r--r--xlators/cluster/ec/src/ec-types.h21
-rw-r--r--xlators/cluster/ec/src/ec.c125
-rw-r--r--xlators/cluster/ec/src/ec.h1
-rw-r--r--xlators/debug/delay-gen/src/delay-gen.c2
-rw-r--r--xlators/debug/error-gen/src/error-gen.c47
-rw-r--r--xlators/debug/io-stats/src/io-stats.c217
-rw-r--r--xlators/debug/trace/src/trace.c20
-rw-r--r--xlators/features/Makefile.am6
-rw-r--r--xlators/features/barrier/src/barrier.c2
-rw-r--r--xlators/features/barrier/src/barrier.h3
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-bitd-messages.h51
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.c12
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub-status.h20
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot-scrub.c80
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.c284
-rw-r--r--xlators/features/bit-rot/src/bitd/bit-rot.h20
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-helpers.c97
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-mem-types.h1
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub-messages.h73
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.c324
-rw-r--r--xlators/features/bit-rot/src/stub/bit-rot-stub.h22
-rw-r--r--xlators/features/changelog/lib/src/changelog-lib-messages.h30
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-api.c4
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-journal-handler.c25
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog-reborp.c32
-rw-r--r--xlators/features/changelog/lib/src/gf-changelog.c5
-rw-r--r--xlators/features/changelog/lib/src/gf-history-changelog.c68
-rw-r--r--xlators/features/changelog/src/changelog-barrier.c15
-rw-r--r--xlators/features/changelog/src/changelog-ev-handle.c9
-rw-r--r--xlators/features/changelog/src/changelog-helpers.c285
-rw-r--r--xlators/features/changelog/src/changelog-helpers.h35
-rw-r--r--xlators/features/changelog/src/changelog-messages.h122
-rw-r--r--xlators/features/changelog/src/changelog-rpc-common.c42
-rw-r--r--xlators/features/changelog/src/changelog-rpc.c19
-rw-r--r--xlators/features/changelog/src/changelog.c346
-rw-r--r--xlators/features/cloudsync/src/Makefile.am4
-rwxr-xr-xxlators/features/cloudsync/src/cloudsync-fops-c.py2
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/libcloudsyncs3.c2
-rw-r--r--xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/libcvlt.c4
-rw-r--r--xlators/features/cloudsync/src/cloudsync.c88
-rw-r--r--xlators/features/cloudsync/src/cloudsync.h2
-rw-r--r--xlators/features/gfid-access/src/gfid-access.c6
-rw-r--r--xlators/features/index/src/index.c4
-rw-r--r--xlators/features/leases/src/leases-internal.c4
-rw-r--r--xlators/features/leases/src/leases.h35
-rw-r--r--xlators/features/locks/src/clear.c4
-rw-r--r--xlators/features/locks/src/common.c326
-rw-r--r--xlators/features/locks/src/common.h41
-rw-r--r--xlators/features/locks/src/entrylk.c35
-rw-r--r--xlators/features/locks/src/inodelk.c179
-rw-r--r--xlators/features/locks/src/locks.h41
-rw-r--r--xlators/features/locks/src/posix.c254
-rw-r--r--xlators/features/locks/src/reservelk.c2
-rw-r--r--xlators/features/marker/src/marker-quota.c26
-rw-r--r--xlators/features/metadisp/Makefile.am3
-rw-r--r--xlators/features/metadisp/src/Makefile.am38
-rw-r--r--xlators/features/metadisp/src/backend.c45
-rw-r--r--xlators/features/metadisp/src/fops-tmpl.c10
-rw-r--r--xlators/features/metadisp/src/gen-fops.py160
-rw-r--r--xlators/features/metadisp/src/metadisp-create.c101
-rw-r--r--xlators/features/metadisp/src/metadisp-fops.h51
-rw-r--r--xlators/features/metadisp/src/metadisp-fsync.c54
-rw-r--r--xlators/features/metadisp/src/metadisp-lookup.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-open.c70
-rw-r--r--xlators/features/metadisp/src/metadisp-readdir.c65
-rw-r--r--xlators/features/metadisp/src/metadisp-setattr.c90
-rw-r--r--xlators/features/metadisp/src/metadisp-stat.c124
-rw-r--r--xlators/features/metadisp/src/metadisp-unlink.c160
-rw-r--r--xlators/features/metadisp/src/metadisp.c46
-rw-r--r--xlators/features/metadisp/src/metadisp.h45
-rw-r--r--xlators/features/quiesce/src/quiesce.c42
-rw-r--r--xlators/features/quota/src/quota-enforcer-client.c20
-rw-r--r--xlators/features/quota/src/quota.c125
-rw-r--r--xlators/features/quota/src/quota.h8
-rw-r--r--xlators/features/quota/src/quotad-aggregator.c16
-rw-r--r--xlators/features/read-only/src/worm-helper.c15
-rw-r--r--xlators/features/read-only/src/worm.c94
-rw-r--r--xlators/features/shard/src/shard.c539
-rw-r--r--xlators/features/shard/src/shard.h7
-rw-r--r--xlators/features/snapview-client/src/snapview-client-messages.h35
-rw-r--r--xlators/features/snapview-client/src/snapview-client.c384
-rw-r--r--xlators/features/snapview-server/src/snapview-server-mgmt.c25
-rw-r--r--xlators/features/trash/src/trash.c4
-rw-r--r--xlators/features/upcall/src/upcall-internal.c10
-rwxr-xr-xxlators/features/utime/src/utime-gen-fops-c.py10
-rw-r--r--xlators/features/utime/src/utime-helpers.h1
-rw-r--r--xlators/features/utime/src/utime.c31
-rw-r--r--xlators/meta/src/meta-helpers.c9
-rw-r--r--xlators/mgmt/glusterd/src/Makefile.am19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-bitrot.c87
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-brick-ops.c380
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.c19
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-conn-mgmt.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-errno.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-ganesha.c927
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.c146
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-geo-rep.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc-helper.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.c11
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-gfproxyd-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c671
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handshake.c264
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.c108
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-hooks.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-log-ops.c15
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h1
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-messages.h153
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c247
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c432
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.c48
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mountbroker.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-nfs-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c834
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.c309
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-peer-utils.h9
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.c32
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-pmap.h4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-proc-mgmt.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quota.c77
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-quotad-svc.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rebalance.c86
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-replace-brick.c28
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-reset-brick.c14
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c92
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-scrub-svc.c4
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-server-quorum.c13
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-shd-svc.c121
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-sm.c211
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.c20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapd-svc.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.c337
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot-utils.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c241
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-statedump.c5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c511
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-helper.c122
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.c38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-svc-mgmt.h17
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c83
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-tierd-svc-helper.c3
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c1212
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h38
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c781
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h20
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c538
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-set.c188
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c306
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h393
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.c423
-rw-r--r--xlators/mount/fuse/src/fuse-bridge.h58
-rw-r--r--xlators/mount/fuse/src/fuse-helpers.c110
-rwxr-xr-xxlators/mount/fuse/utils/mount.glusterfs.in39
-rwxr-xr-xxlators/mount/fuse/utils/mount_glusterfs.in19
-rw-r--r--xlators/nfs/server/src/acl3.c27
-rw-r--r--xlators/nfs/server/src/acl3.h2
-rw-r--r--xlators/nfs/server/src/auth-cache.c4
-rw-r--r--xlators/nfs/server/src/mount3.c59
-rw-r--r--xlators/nfs/server/src/mount3udp_svc.c2
-rw-r--r--xlators/nfs/server/src/nfs-common.c5
-rw-r--r--xlators/nfs/server/src/nfs-fops.c2
-rw-r--r--xlators/nfs/server/src/nfs.c14
-rw-r--r--xlators/nfs/server/src/nfs3-fh.c6
-rw-r--r--xlators/nfs/server/src/nfs3-helpers.c4
-rw-r--r--xlators/nfs/server/src/nfs3.c54
-rw-r--r--xlators/nfs/server/src/nlm4.c77
-rw-r--r--xlators/nfs/server/src/nlmcbk_svc.c5
-rw-r--r--xlators/performance/io-cache/src/io-cache-messages.h39
-rw-r--r--xlators/performance/io-cache/src/io-cache.c171
-rw-r--r--xlators/performance/io-cache/src/io-cache.h27
-rw-r--r--xlators/performance/io-cache/src/ioc-inode.c14
-rw-r--r--xlators/performance/io-cache/src/page.c38
-rw-r--r--xlators/performance/io-threads/src/io-threads-messages.h14
-rw-r--r--xlators/performance/io-threads/src/io-threads.c53
-rw-r--r--xlators/performance/md-cache/src/md-cache.c430
-rw-r--r--xlators/performance/nl-cache/src/nl-cache-helper.c55
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.c6
-rw-r--r--xlators/performance/nl-cache/src/nl-cache.h2
-rw-r--r--xlators/performance/open-behind/src/open-behind-messages.h6
-rw-r--r--xlators/performance/open-behind/src/open-behind.c1324
-rw-r--r--xlators/performance/quick-read/src/quick-read.c50
-rw-r--r--xlators/performance/quick-read/src/quick-read.h2
-rw-r--r--xlators/performance/read-ahead/src/read-ahead.c9
-rw-r--r--xlators/performance/readdir-ahead/src/readdir-ahead.c4
-rw-r--r--xlators/performance/write-behind/src/write-behind.c8
-rw-r--r--xlators/protocol/client/src/client-callback.c89
-rw-r--r--xlators/protocol/client/src/client-common.c30
-rw-r--r--xlators/protocol/client/src/client-handshake.c712
-rw-r--r--xlators/protocol/client/src/client-helpers.c65
-rw-r--r--xlators/protocol/client/src/client-lk.c31
-rw-r--r--xlators/protocol/client/src/client-messages.h123
-rw-r--r--xlators/protocol/client/src/client-rpc-fops.c700
-rw-r--r--xlators/protocol/client/src/client-rpc-fops_v2.c721
-rw-r--r--xlators/protocol/client/src/client.c1210
-rw-r--r--xlators/protocol/client/src/client.h43
-rw-r--r--xlators/protocol/server/src/Makefile.am4
-rw-r--r--xlators/protocol/server/src/server-common.c32
-rw-r--r--xlators/protocol/server/src/server-handshake.c124
-rw-r--r--xlators/protocol/server/src/server-helpers.c131
-rw-r--r--xlators/protocol/server/src/server-messages.h179
-rw-r--r--xlators/protocol/server/src/server-rpc-fops.c204
-rw-r--r--xlators/protocol/server/src/server-rpc-fops_v2.c906
-rw-r--r--xlators/protocol/server/src/server.c203
-rw-r--r--xlators/protocol/server/src/server.h1
-rw-r--r--xlators/storage/posix/src/posix-common.c241
-rw-r--r--xlators/storage/posix/src/posix-entry-ops.c388
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.c89
-rw-r--r--xlators/storage/posix/src/posix-gfid-path.h4
-rw-r--r--xlators/storage/posix/src/posix-handle.c202
-rw-r--r--xlators/storage/posix/src/posix-handle.h33
-rw-r--r--xlators/storage/posix/src/posix-helpers.c588
-rw-r--r--xlators/storage/posix/src/posix-inode-fd-ops.c364
-rw-r--r--xlators/storage/posix/src/posix-inode-handle.h13
-rw-r--r--xlators/storage/posix/src/posix-messages.h4
-rw-r--r--xlators/storage/posix/src/posix-metadata.c153
-rw-r--r--xlators/storage/posix/src/posix-metadata.h6
-rw-r--r--xlators/storage/posix/src/posix.h139
-rw-r--r--xlators/system/posix-acl/src/posix-acl.c18
682 files changed, 41556 insertions, 34033 deletions
diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE
index 306b4eebc15..386ed2d8dd5 100644
--- a/.github/ISSUE_TEMPLATE
+++ b/.github/ISSUE_TEMPLATE
@@ -1,13 +1,30 @@
-This repository (glusterfs) uses github issues to track feature requests *only*.
+<!-- Please use this template while reporting an issue, providing as much information as possible. Failure to do so may result in a delayed response. Thank you! -->
-For assistance with bugs, please file a bug in our bugzilla instance [1]
+**Description of problem:**
-For other queries regarding glusterfs, please post to the users [2] or devel [3] lists as appropriate.
-You may further want to subscribe to these lists or view the archives for these lists, see [4] [5].
+**The exact command to reproduce the issue**:
+
+
+**The full output of the command that failed**:
+<details>
+
+
+
+</details>
+
+**Expected results:**
+
+
+**Additional info:**
+
+
+**- The output of the `gluster volume info` command**:
+<details>
+
+
+
+</details>
+
+**- The operating system / glusterfs version**:
-[1] glusterfs Bugzilla: https://bugzilla.redhat.com/enter_bug.cgi?product=GlusterFS
-[2] Gluster users list ID: gluster-users@gluster.org
-[3] Gluster devel list ID: gluster-devel@gluster.org
-[4] Gluster users list subscribe/view archives: https://lists.gluster.org/mailman/listinfo/gluster-users
-[5] Gluster devel list subscribe/view archives: https://lists.gluster.org/mailman/listinfo/gluster-devel
diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE
index 0ec6eb319ee..e69de29bb2d 100644
--- a/.github/PULL_REQUEST_TEMPLATE
+++ b/.github/PULL_REQUEST_TEMPLATE
@@ -1,33 +0,0 @@
-Many thanks for your interest in improving GlusterFS!
-
-GlusterFS does not use GitHub Pull-Requests. Instead, changes are reviewed
-on the Gerrit instance of the Gluster Community at https://review.gluster.org
-
-In order to send your changes for review, follow these steps:
-
-1. login on https://review.gluster.org with your GitHub account
-2. add a public ssh-key to your profile on https://review.gluster.org/#/settings/ssh-keys
-3. add the Gerrit remote to your locally cloned git repository
-
- $ git remote add gerrit ssh://$USER@review.gluster.org/glusterfs.git
-
-4. configure the commit hooks
-
- $ git review --setup
-
-5. post your changes to Gerrit
-
- $ git review
-
-
-You may need to install the 'git-review' package if 'git review' is not
-available. Note that the hooks for the repository make sure to add a ChangeId
-label in the commit messages. Gerrit uses the ChangeId to track single patches
-and its updated versions.
-
-For more details, see the documented development workflow at
- http://gluster.readthedocs.io/en/latest/Developer-guide/Simplified-Development-Workflow/
-
-If there are any troubles or difficulties with these instructions, please
-contact us on gluster-devel@gluster.org or on Freenode IRC in the #gluster-dev
-channel.
diff --git a/.github/RELEASE_TRACKER_TEMPLATE b/.github/RELEASE_TRACKER_TEMPLATE
new file mode 100644
index 00000000000..502bbd5556c
--- /dev/null
+++ b/.github/RELEASE_TRACKER_TEMPLATE
@@ -0,0 +1,12 @@
+<!-- Please use this template while creating a tracker issue -->
+
+**Description of problem:**
+A tracker issue to track the issues that will be fixed as a part of this release
+
+
+**Major or minor release**:
+
+
+**Release version**:
+
+
diff --git a/.github/stale.yml b/.github/stale.yml
new file mode 100644
index 00000000000..460e327c6ea
--- /dev/null
+++ b/.github/stale.yml
@@ -0,0 +1,25 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 210
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 15
+# Issues with these labels will never be considered stale
+exemptLabels:
+ - pinned
+ - security
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ Thank you for your contributions.
+
+ Noticed that this issue is not having any activity in last ~6 months! We
+ are marking this issue as stale because it has not had recent activity.
+
+ It will be closed in 2 weeks if no one responds with a comment here.
+
+
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: >
+ Closing this issue as there was no update since my last update on issue.
+ If this is an issue which is still valid, feel free to open it.
diff --git a/.gitignore b/.gitignore
index 8e05080c947..fc5ba586f8e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ test-driver
*.patch
.libs
.deps
+.dirstamp
# Softlinks to test and log
log
@@ -86,17 +87,14 @@ glusterfsd/src/glusterfsd
glusterfsd/src/gf_attach
heal/src/glfsheal
libgfchangelog.pc
-libgfdb.pc
libglusterfs/src/graph.lex.c
libglusterfs/src/y.tab.c
libglusterfs/src/y.tab.h
libglusterfs/src/defaults.c
-libglusterfs/src/glusterfs-fops.h
libglusterfs/src/cli1-xdr.h
libglusterfs/src/protocol-common.h
libtool
# copied XDR for cyclic libglusterfs <-> rpc-header dependency
-rpc/xdr/gen/*.x
run-tests.sh
!tests/basic/fuse/Makefile
!tests/basic/gfapi/Makefile
@@ -110,14 +108,6 @@ extras/peer_add_secret_pub
tools/gfind_missing_files/gcrawler
tools/glusterfind/glusterfind
tools/glusterfind/src/tool.conf
-# Generated by fdl xlator
-xlators/experimental/fdl/src/fdl.c
-xlators/experimental/fdl/src/gf_logdump
-xlators/experimental/fdl/src/gf_recon
-xlators/experimental/fdl/src/libfdl.c
-xlators/experimental/fdl/src/librecon.c
-xlators/experimental/jbr-client/src/jbrc-cg.c
-xlators/experimental/jbr-server/src/jbr-cg.c
# Eventing
events/src/eventsapiconf.py
extras/systemd/glustereventsd.service
@@ -131,3 +121,5 @@ xlators/features/cloudsync/src/cloudsync-autogen-fops.c
xlators/features/cloudsync/src/cloudsync-autogen-fops.h
xlators/features/utime/src/utime-autogen-fops.c
xlators/features/utime/src/utime-autogen-fops.h
+tests/basic/metadisp/ftruncate
+xlators/features/metadisp/src/fops.c
diff --git a/.testignore b/.testignore
index 190573191e1..fe8f838bf2b 100644
--- a/.testignore
+++ b/.testignore
@@ -1,4 +1,6 @@
.github/ISSUE_TEMPLATE
+.github/PULL_REQUEST_TEMPLATE
+.github/stale.yml
.gitignore
.mailmap
.testignore
@@ -6,7 +8,7 @@
rfc.sh
submit-for-review.sh
AUTHORS
-CONTRIBUTING
+CONTRIBUTING.md
COPYING-GPLV2
COPYING-LGPLV3
ChangeLog
diff --git a/CONTRIBUTING b/CONTRIBUTING
deleted file mode 100644
index 7bccd88d7e5..00000000000
--- a/CONTRIBUTING
+++ /dev/null
@@ -1,25 +0,0 @@
- Developer's Certificate of Origin 1.1
-
- By making a contribution to this project, I certify that:
-
- (a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
- (b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
- (c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
- (d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 00000000000..65fc3497104
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,114 @@
+# GlusterFS project Contribution guidelines
+
+## Development Workflow
+
+We follow most of the details as per the [document here](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests). If you are not aware of the github workflow, it is recommended to go through them before continuing here.
+
+
+#### Get the Repository setup
+
+0. Fork Repository
+ - Fork [GlusterFS repository](https://github.com/gluster/glusterfs/fork).
+
+1. Clone Repository
+ - Clone the glusterfs repo freshly from github using below steps.
+
+```
+ git clone git@github.com:${username}/glusterfs.git
+ cd glusterfs/
+ git remote add upstream git@github.com:gluster/glusterfs.git
+```
+
+About two tasks are one time for the life time. You can continue to use the same repository for all the work in future.
+
+#### Development & Other flows
+
+0. Issue:
+ - Make sure there is an issue filed for the task you are working on.
+ - If it is not filed, open the issue with all the description.
+ - If it is a bug fix, add label "Type:Bug".
+ - If it is an RFC, provide all the documentation, and request for "DocApproved", and "SpecApproved" label.
+
+1. Code:
+ - Start coding
+ - Build and test locally
+ - Make sure clang-format is installed and is run on the patch.
+
+2. Keep up-to-date
+ - GlusterFS is a large project with many developers, so there would be one or the other patch everyday.
+ - It is critical for developer to be up-to-date with `devel` repo to be Conflict-Free when PR is opened.
+ - Git provides many options to keep up-to-date, below is one of them
+```
+ git fetch upstream
+ git rebase upstream/devel
+```
+ - It is recommended you keep pushing to your repo every day, so you don't loose any work.
+ - It can be done by `./rfc.sh` (or `git push origin HEAD:issueNNN`)
+
+2. Commit Message / PR description:
+ - The name of the branch on your personal fork can start with issueNNNN, followed by anything of your choice.
+ - PRs continue to have the title of format "component: \<title\>", like it is practiced now.
+ - When you open a PR, having a reference Issue for the commit is mandatory in GlusterFS.
+ - Commit message can have, either `Fixes: #NNNN` or `Updates: #NNNN` in a separate line in the commit message.
+ - Here, NNNN is the Issue ID in glusterfs repository.
+ - Each commit needs the author to have the "Signed-off-by: Name \<email\>" line.
+ - Can do this by `-s` option for `git commit`.
+ - If the PR is not ready for review, apply the label `work-in-progress`.
+ - Check the availability of "Draft PR" is present for you, if yes, use that instead.
+
+3. Tests:
+ - All the required smoke tests would be auto-triggered.
+ - Developers get a chance to retrigger the smoke tests using **"/recheck smoke"** as comment.
+ - The "regression" tests would be triggered by a comment **"/run regression"** from developers in the [@gluster-maintainers](https://github.com/orgs/gluster/teams/gluster-maintainers) group.
+ - Ask for help as comment in PR if you have any questions about the process!
+
+4. Review Process:
+ - `+2` : is equivalent to "Approve" from the people in the maintainer's group.
+ - `+1` : can be given by a maintainer/reviewer by explicitly stating that in the comment.
+ - `-1` : provide details on required changes and pick "Request Changes" while submitting your review.
+ - `-2` : done by adding the `DO-NOT-MERGE` label.
+
+ - Any further discussions can happen as comments in the PR.
+
+5. Making changes:
+ - There are 2 approaches to submit changes done after addressing review comments.
+ - Commit changes as a new commit on top of the original commits in the branch, and push the changes to same branch (issueNNNN)
+ - Commit changes into the same commit with `--amend` option, and do a push to the same branch with `--force` option.
+
+6. Merging:
+ - GlusterFS project follows 'Squash and Merge' method
+ - This is mainly to preserve the historic Gerrit method of one patch in `git log` for one URL link.
+ - This also makes every merge a complete patch, which has passed all tests.
+ - The merging of the patch is expected to be done by the maintainers.
+ - It can be done when all the tests (smoke and regression) pass.
+ - When the PR has 'Approved' flag from corresponding maintainer.
+ - If you feel there is delay, feel free to add a comment, discuss the same in Slack channel, or send email.
+
+## By contributing to this project, the contributor would need to agree to below.
+
+### Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
diff --git a/MAINTAINERS b/MAINTAINERS
index 7ff1142f735..953e8755fd9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -51,15 +51,10 @@ Descriptions of section entries:
General Project Architects
--------------------------
-M: Jeff Darcy <jeff@pl.atyp.us>
-M: Vijay Bellur <vbellur@redhat.com>
-P: Amar Tumballi <amarts@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
-P: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Shyamsundar Ranganathan <srangana@redhat.com>
-P: Niels de Vos <ndevos@redhat.com>
-P: Xavier Hernandez <xhernandez@datalab.es>
-
+M: Amar Tumballi <amarts@gmail.com>
+M: Xavier Hernandez <xhernandez@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
+P: Atin Mukherjee <amukherj@redhat.com>
xlators:
--------
@@ -71,13 +66,14 @@ F: xlators/system/posix-acl/
Arbiter
M: Ravishankar N <ravishankar@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
S: Maintained
F: xlators/features/arbiter/
Automatic File Replication (AFR)
-M: Pranith Karampuri <pkarampu@redhat.com>
-P: Ravishankar N <ravishankar@redhat.com>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
+M: Ravishankar N <ravishankar@redhat.com>
+P: Karthik US <ksubrahm@redhat.com>
S: Maintained
F: xlators/cluster/afr/
@@ -87,10 +83,6 @@ P: Atin Mukherjee <amukherj@redhat.com>
S: Maintained
F: xlators/features/barrier
-Block Device
-S: Orphan
-F: xlators/storage/bd/
-
BitRot
M: Kotresh HR <khiremat@redhat.com>
P: Raghavendra Bhat <rabhat@redhat.com>
@@ -104,17 +96,14 @@ S: Maintained
F: xlators/features/changelog/
Distributed Hashing Table (DHT)
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-M: Nithya Balachandran <nbalacha@redhat.com>
P: Susant Palai <spalai@redhat.com>
S: Maintained
F: xlators/cluster/dht/
Erasure Coding
-M: Pranith Karampuri <pkarampu@redhat.com>
-M: Xavier Hernandez <xhernandez@datalab.es>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
+M: Xavier Hernandez <xhernandez@redhat.com>
P: Ashish Pandey <aspandey@redhat.com>
-P: Sunil Kumar Acharya <sheggodu@redhat.com>
S: Maintained
F: xlators/cluster/ec/
@@ -124,21 +113,19 @@ S: Maintained
F: xlators/debug/error-gen/
FUSE Bridge
-M: Niels de Vos <ndevos@redhat.com>
-P: Csaba Henk <chenk@redhat.com>
+M: Csaba Henk <chenk@redhat.com>
+P: Niels de Vos <ndevos@redhat.com>
S: Maintained
F: xlators/mount/
Index
-M: Pranith Karampuri <pkarampu@redhat.com>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
P: Ravishankar N <ravishankar@redhat.com>
S: Maintained
F: xlators/features/index/
IO Cache
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
-P: Nithya Balachandran <nbalacha@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
S: Maintained
F: xlators/performance/io-cache/
@@ -149,7 +136,7 @@ S: Maintained
F: xlators/debug/io-stats/
IO threads
-M: Pranith Karampuri <pkarampu@redhat.com>
+M: Pranith Karampuri <pranith.karampuri@phonepe.com>
P: Ravishankar N <ravishankar@redhat.com>
S: Maintained
F: xlators/performance/io-threads/
@@ -163,18 +150,17 @@ F: xlators/features/leases/
Locks
M: Krutika Dhananjay <kdhananj@redhat.com>
+P: Xavier Hernandez <xhernandez@redhat.com>
S: Maintained
F: xlators/features/locks/
Marker
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
M: Kotresh HR <khiremat@redhat.com>
-P: Sanoj Unnikrishnan <sunnikri@redhat.com>
S: Maintained
F: xlators/features/marker/
Meta
-M: Mohammed Rafi KC <rkavunga@redhat.com>
+M: Mohammed Rafi KC <rafi.kavungal@iternity.com>
S: Maintained
F: xlators/features/meta/
@@ -186,53 +172,44 @@ F: xlators/performance/md-cache/
Negative-lookup Cache
M: Poornima G <pgurusid@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
S: Maintained
F: xlators/performance/nl-cache/
-NFS
-M: Shreyas Siravara <sshreyas@fb.com>
-M: Jeff Darcy <jeff@pl.atyp.us>
-P: Jiffin Tony Thottan <jthottan@redhat.com>
-P: Soumya Koduri <skoduri@redhat.com>
-S: Maintained
+gNFS
+M: Jiffin Tony Thottan <jthottan@redhat.com>
+P: Xie Changlong <xiechanglong@cmss.chinamobile.com>
+P: Amar Tumballi <amarts@gmail.com>
+S: Odd Fixes
F: xlators/nfs/server/
Open-behind
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Milind Changire <mchangir@redhat.com>
S: Maintained
F: xlators/performance/open-behind/
Posix:
M: Raghavendra Bhat <raghavendra@redhat.com>
+P: Kotresh HR <khiremat@redhat.com>
P: Krutika Dhananjay <kdhananj@redhat.com>
-P: Jiffin Tony Thottan <jthottan@redhat.com>
S: Maintained
F: xlators/storage/posix/
Quick-read
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Milind Changire <mchangir@redhat.com>
S: Maintained
F: xlators/performance/quick-read/
Quota
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Sanoj Unnikrishnan <sunnikri@redhat.com>
M: Shyamsundar Ranganathan <srangana@redhat.com>
+P: Hari Gowtham <hgowtham@redhat.com>
S: Maintained
F: xlators/features/quota/
Read-ahead
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
P: Csaba Henk <chenk@redhat.com>
S: Maintained
F: xlators/performance/read-ahead/
Readdir-ahead
-M: Poornima G <pgurusid@redhat.com>
-P: Krutika Dhananjay <kdhananj@redhat.com>
S: Maintained
F: xlators/performance/readdir-ahead/
@@ -256,7 +233,6 @@ S: Maintained
F: xlators/features/upcall/
Write-behind
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
P: Csaba Henk <chenk@redhat.com>
S: Maintained
F: xlators/performance/write-behind/
@@ -271,45 +247,24 @@ M: Susant Kumar Palai <spalai@redhat.com>
S: Maintained
F: xlators/features/cloudsync/
-Experimental Features:
-----------------------
-
-RIO-Distribution
-M: Shyamsundar Ranganathan <srangana@redhat.com>
-P: Kotresh HR <khiremat@redhat.com>
-P: Susant Palai <spalai@redhat.com>
-S: Maintained
-F: xlators/experimental/dht2/
-
-Journal Based Replication
-M: Jeff Darcy <jeff@pl.atyp.us>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
-S: Maintained
-F: xlators/experimental/fdl/
-F: xlators/experimenta/jbr-client/
-F: xlators/experimental/jbr-server/
-
-
Other bits of code:
-------------------
Doc
M: Humble Chirammal <hchiramm@redhat.com>
M: Raghavendra Talur <rtalur@redhat.com>
-M: Prashanth Pai <ppai@redhat.com>
S: Maintained
F: doc/
Geo Replication
M: Aravinda V K <avishwan@redhat.com>
M: Kotresh HR <khiremat@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+M: Sunny Kumar <sunkumar@redhat.com>
S: Maintained
F: geo-replication/
Glusterfind
-M: Milind Changire <mchangir@redhat.com>
-P: Aravinda VK <avishwan@redhat.com>
+M: Aravinda VK <avishwan@redhat.com>
S: Maintained
F: tools/glusterfind/
@@ -322,12 +277,12 @@ S: Maintained
F: api/
libglusterfs
-M: Amar Tumballi <amarts@redhat.com>
+M: Amar Tumballi <amarts@gmail.com>
+M: Xavier Hernandez <xhernandez@redhat.com>
M: Jeff Darcy <jeff@pl.atyp.us>
P: Kaleb Keithley <kkeithle@redhat.com>
P: Niels de Vos <ndevos@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
-P: Raghavendra Gowdappa <rgowdapp@redhat.com>
+P: Pranith Karampuri <pranith.karampuri@phonepe.com>
P: Shyamsundar Ranganathan <srangana@redhat.com>
S: Maintained
F: libglusterfs/
@@ -335,43 +290,26 @@ F: libglusterfs/
xxhash
M: Aravinda VK <avishwan@redhat.com>
M: Kotresh HR <khiremat@redhat.com>
-P: Amar Tumballi <amarts@redhat.com>
+P: Yaniv Kaul <ykaul@redhat.com>
S: Maintained
F: contrib/xxhash/
T: https://github.com/Cyan4973/xxHash.git
-Management Daemon - glusterd1
+Management Daemon - glusterd
M: Atin Mukherjee <amukherj@redhat.com>
-M: Samikshan Bairagya <samikshan@gmail.com>
+M: Mohit Agrawal <moagrawa@redhat.com>
+M: Sanju Rakonde <srakonde@redhat.com>
S: Maintained
F: cli/
F: xlators/mgmt/glusterd/
-Management Daemon - glusterd2
-M: Kaushal M <kaushal@redhat.com>
-M: Prashanth Pai <ppai@redhat.com>
-P: Aravinda VK <avishwan@redhat.com>
-S: Maintained
-T: https://github.com/gluster/glusterd2.git
-
Protocol
-M: Kaleb Keithley <kkeithle@redhat.com>
M: Niels de Vos <ndevos@redhat.com>
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
S: Maintained
F: xlators/protocol/
-RDMA subsystem
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-M: Amar Tumballi <amarts@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
-S: Maintained
-F: rpc/rpc-transport/rdma/
-
Remote Procedure Call subsystem
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
-P: Milind Changire <mchangir@redhat.com>
P: Mohit Agrawal <moagrawa@redhat.com>
S: Maintained
F: rpc/rpc-lib/
@@ -379,17 +317,16 @@ F: rpc/xdr/
Snapshot
M: Raghavendra Bhat <raghavendra@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
P: Sunny Kumar <sunkumar@redhat.com>
S: Maintained
F: xlators/mgmt/glusterd/src/glusterd-snap*
F: extras/snap-scheduler.py
Socket subsystem
-M: Raghavendra Gowdappa <rgowdapp@redhat.com>
P: Krutika Dhananjay <kdhananj@redhat.com>
P: Milind Changire <mchangir@redhat.com>
-P: Mohammed Rafi KC <rkavunga@redhat.com>
+P: Mohammed Rafi KC <rafi.kavungal@iternity.com>
P: Mohit Agrawal <moagrawa@redhat.com>
S: Maintained
F: rpc/rpc-transport/socket/
@@ -418,16 +355,18 @@ F: extras/systemd/glustereventsd*
Distribution Specific:
----------------------
Build:
-M: Kaleb Keithley <kkeithle@redhat.com>
M: Niels de Vos <ndevos@redhat.com>
+M: Hari Gowtham <hgowtham@redhat.com>
P: Anoop C S <anoopcs@redhat.com>
-P: Kaushal M <kaushal@redhat.com>
P: Raghavendra Talur <rtalur@redhat.com>
+P: Rinku Kothiya <rkothiya@redhat.com>
S: Maintained
Debian packages on download.gluster.org
M: packaging@gluster.org
M: Kaleb Keithley <kkeithle@redhat.com>
+P: Sheetal Pamecha <spamecha@redhat.com>
+P: Shwetha Acharya <sacharya@redhat.com>
S: Maintained
W: http://download.gluster.org/pub/gluster/glusterfs/LATEST/Debian/Debian.README
T: https://github.com/gluster/glusterfs-debian.git
@@ -435,6 +374,8 @@ T: https://github.com/gluster/glusterfs-debian.git
OpenSuSE
M: packaging@gluster.org
M: Kaleb Keithley <kkeithle@redhat.com>
+P: Sheetal Pamecha <spamecha@redhat.com>
+P: Shwetha Acharya <sacharya@redhat.com>
S: Maintained
W: https://build.opensuse.org/repositories/home:glusterfs
W: https://download.gluster.org/pub/gluster/glusterfs/LATEST/SuSE/SuSE.README
@@ -451,6 +392,8 @@ T: https://github.com/CentOS-Storage-SIG/glusterfs.git
Ubuntu PPA
M: packaging@gluster.org
M: Kaleb Keithley <kkeithle@redhat.com>
+P: Sheetal Pamecha <spamecha@redhat.com>
+P: Shwetha Acharya <sacharya@redhat.com>
S: Maintained
W: https://launchpad.net/~gluster
W: http://download.gluster.org/pub/gluster/glusterfs/LATEST/Ubuntu/Ubuntu.README
@@ -460,22 +403,10 @@ Related projects
----------------
Gluster Block
M: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
-P: Pranith Karampuri <pkarampu@redhat.com>
-P: Niels de Vos <ndevos@redhat.com>
+M: Xiubo Li <xiubli@redhat.com>
S: Maintained
T: https://github.com/gluster/gluster-block.git
-Gluster Object
-P: Ram Edara <redara@redhat.com>
-P: Saravanakumar Arumugam <sarumuga@redhat.com>
-S: Maintained
-T: https://github.com/gluster/gluster-swift.git
-
-GlusterFS Hadoop HCFS plugin
-S: Orphan
-W: https://github.com/gluster/glusterfs-hadoop/wiki
-T: https://github.com/gluster/glusterfs-hadoop.git
-
GlusterFS core-utils
M: Anoop C S <anoopcs@redhat.com>
S: Maintained
@@ -489,11 +420,6 @@ S: Maintained
T: git://github.com/nfs-ganesha/nfs-ganesha.git
F: src/nfs-ganesha~/src/FSAL/FSAL_GLUSTER/
-Nagios Monitoring
-M: Sahina Bose <sabose@redhat.com>
-S: Maintained
-T: https://github.com/gluster/nagios-plugins-gluster.git
-
QEMU integration
M: Niels de Vos <ndevos@redhat.com>
M: Prasanna Kumar Kalever <prasanna.kalever@redhat.com>
@@ -536,12 +462,11 @@ Infrastructure
Platform
M: Michael Scherer <misc@redhat.com>
P: Shyamsundar Ranganathan <srangana@redhat.com>
-P: Amar Tumballi <amarts@redhat.com>
+P: Amar Tumballi <amarts@gmail.com>
Continuous Integration
M: Michael Scherer <misc@redhat.com>
M: Deepshikha Khandelwal <dkhandel@redhat.com>
-P: Kaushal M <kaushal@redhat.com>
P: Niels de Vos <ndevos@redhat.com>
Special Thanks
@@ -549,6 +474,17 @@ Special Thanks
GlusterFS would not be possible without the contributions of:
+
+M: Vijay Bellur <vbellur@redhat.com>
+M: Jeff Darcy <jeff@pl.atyp.us>
+M: Shreyas Siravara <sshreyas@fb.com>
+M: Kaushal M <kaushal@redhat.com>
+M: Nigel Babu
+M: Prashanth Pai
+P: Sanoj Unnikrishnan
+P: Milind Changire <mchangir@redhat.com>
+P: Sunil Kumar Acharya <sheggodu@redhat.com>
+M: Samikshan Bairagya <samikshan@gmail.com>
M: Chris Hertel
M: M. Mohan Kumar <mohan@in.ibm.com>
M: Shishir Gowda <gowda.shishir@gmail.com>
@@ -570,3 +506,5 @@ M: Jay Vyas
M: Luis Pabon
M: Ira Cooper
M: Shwetha Panduranga
+M: Nithya Balachandran
+M: Raghavendra Gowdappa
diff --git a/Makefile.am b/Makefile.am
index e0c795f418f..98ea5c1038d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -9,8 +9,9 @@ EXTRA_DIST = autogen.sh \
contrib/umountd \
$(shell find $(top_srcdir)/tests -type f -print)
-SUBDIRS = $(ARGP_STANDALONE_DIR) rpc/xdr/gen libglusterfs rpc api xlators \
- glusterfsd $(FUSERMOUNT_SUBDIR) doc extras cli heal \
+
+SUBDIRS = $(ARGP_STANDALONE_DIR) libglusterfs rpc libglusterd api \
+ glusterfsd xlators $(FUSERMOUNT_SUBDIR) doc extras cli heal \
@SYNCDAEMON_SUBDIR@ @UMOUNTD_SUBDIR@ tools events
pkgconfigdir = @pkgconfigdir@
@@ -18,9 +19,13 @@ pkgconfig_DATA = glusterfs-api.pc libgfchangelog.pc
CLEANFILES = glusterfs-api.pc libgfchangelog.pc contrib/umountd/Makefile
+clean-local:
+ find . -name '*.o' -o -name '*.lo' -o -name '.Po' | xargs rm -f
+
gitclean: distclean
find . -name Makefile.in -exec rm -f {} \;
find . -name mount.glusterfs -exec rm -f {} \;
+ find . -name .deps -o -name .libs | xargs rm -rf
rm -fr autom4te.cache
rm -f missing aclocal.m4 config.h.in config.guess config.sub ltmain.sh install-sh configure depcomp
@@ -48,4 +53,3 @@ gen-VERSION:
./build-aux/pkg-version --full \
> $(abs_top_builddir)/$(distdir)/VERSION; \
fi
-
diff --git a/README.md b/README.md
index 92f829e431e..9d68e033782 100644
--- a/README.md
+++ b/README.md
@@ -3,8 +3,7 @@
petabytes. It provides interfaces for object, block and file storage.
## Development
- Contributions to gluster in the form of patches and new feature additions can
- be made by following steps outlined at [Developers Guide](http://docs.gluster.org/en/latest/Developer-guide/Developers-Index/#contributing-to-the-gluster-community).
+ The development workflow is documented in [Contributors guide](CONTRIBUTING.md)
## Documentation
The Gluster documentation can be found at [Gluster Docs](http://docs.gluster.org).
diff --git a/api/src/README.Symbol_Versions b/api/src/README.Symbol_Versions
index d5cdedd826b..b6ec95f9311 100644
--- a/api/src/README.Symbol_Versions
+++ b/api/src/README.Symbol_Versions
@@ -1,3 +1,3 @@
-See .../doc/gfapi-symbol-versions/gfapi-symbol-versions.md
+See ../../doc/developer-guide/gfapi-symbol-versions.md
diff --git a/api/src/gfapi-messages.h b/api/src/gfapi-messages.h
index 68d12427aea..b9223940416 100644
--- a/api/src/gfapi-messages.h
+++ b/api/src/gfapi-messages.h
@@ -49,6 +49,99 @@ GLFS_MSGID(API, API_MSG_MEM_ACCT_INIT_FAILED, API_MSG_MASTER_XLATOR_INIT_FAILED,
API_MSG_INODE_LINK_FAILED, API_MSG_STATEDUMP_FAILED,
API_MSG_XREADDIRP_R_FAILED, API_MSG_LOCK_INSERT_MERGE_FAILED,
API_MSG_SETTING_LOCK_TYPE_FAILED, API_MSG_INODE_FIND_FAILED,
- API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED);
+ API_MSG_FDCTX_SET_FAILED, API_MSG_UPCALL_SYNCOP_FAILED,
+ API_MSG_INVALID_ARG, API_MSG_UPCALL_EVENT_NULL_RECEIVED,
+ API_MSG_FLAGS_HANDLE, API_MSG_FDCREATE_FAILED_ON_GRAPH,
+ API_MSG_TRANS_RDMA_DEP, API_MSG_TRANS_NOT_SUPPORTED,
+ API_MSG_FS_NOT_INIT, API_MSG_INVALID_SYSRQ,
+ API_MSG_DECODE_XDR_FAILED, API_MSG_NULL, API_MSG_CALL_NOT_SUCCESSFUL,
+ API_MSG_CALL_NOT_VALID, API_MSG_UNABLE_TO_DEL,
+ API_MSG_REMOTE_HOST_DISCONN, API_MSG_HANDLE_NOT_SET);
+#define API_MSG_ALLOC_FAILED_STR "Upcall allocation failed"
+#define API_MSG_LOCK_INSERT_MERGE_FAILED_STR \
+ "Lock insertion and splitting/merging failed"
+#define API_MSG_SETTING_LOCK_TYPE_FAILED_STR "Setting lock type failed"
+
+#define API_MSG_INVALID_ARG_STR "Invalid"
+#define API_MSG_INVALID_ENTRY_STR "Upcall entry validation failed"
+#define API_MSG_INODE_FIND_FAILED_STR "Unable to find inode entry"
+#define API_MSG_CREATE_HANDLE_FAILED_STR "handle creation failed"
+#define API_MSG_UPCALL_EVENT_NULL_RECEIVED_STR \
+ "Upcall_EVENT_NULL received. Skipping it"
+#define API_MSG_UPCALL_SYNCOP_FAILED_STR "Synctask for upcall failed"
+#define API_MSG_FDCREATE_FAILED_STR "Allocating anonymous fd failed"
+#define API_MSG_XREADDIRP_R_FAILED_STR "glfs_x_readdirp_r failed"
+#define API_MSG_FDCTX_SET_FAILED_STR "Setting fd ctx failed"
+#define API_MSG_FLAGS_HANDLE_STR "arg not set. Flags handled are"
+#define API_MSG_INODE_REFRESH_FAILED_STR "inode refresh failed"
+#define API_MSG_INODE_LINK_FAILED_STR "inode linking failed"
+#define API_MSG_GET_CWD_FAILED_STR "Failed to get cwd"
+#define API_MSG_FGETXATTR_FAILED_STR "fgetxattr failed"
+#define API_MSG_LOCKINFO_KEY_MISSING_STR "missing lockinfo key"
+#define API_MSG_FSYNC_FAILED_STR "fsync() failed"
+#define API_MSG_FDCREATE_FAILED_ON_GRAPH_STR "fd_create failed on graph"
+#define API_MSG_INODE_PATH_FAILED_STR "inode_path failed"
+#define API_MSG_SYNCOP_OPEN_FAILED_STR "syncop_open failed"
+#define API_MSG_LOCK_MIGRATE_FAILED_STR "lock migration failed on graph"
+#define API_MSG_OPENFD_SKIPPED_STR "skipping openfd in graph"
+#define API_MSG_FIRST_LOOKUP_GRAPH_FAILED_STR "first lookup on graph failed"
+#define API_MSG_CWD_GRAPH_REF_FAILED_STR "cwd refresh of graph failed"
+#define API_MSG_SWITCHED_GRAPH_STR "switched to graph"
+#define API_MSG_FSETXATTR_FAILED_STR "fsetxattr failed"
+#define API_MSG_MEM_ACCT_INIT_FAILED_STR "Memory accounting init failed"
+#define API_MSG_MASTER_XLATOR_INIT_FAILED_STR \
+ "master xlator for initialization failed"
+#define API_MSG_GFAPI_XLATOR_INIT_FAILED_STR \
+ "failed to initialize gfapi translator"
+#define API_MSG_VOLFILE_OPEN_FAILED_STR "volume file open failed"
+#define API_MSG_VOL_SPEC_FILE_ERROR_STR "Cannot reach volume specification file"
+#define API_MSG_TRANS_RDMA_DEP_STR \
+ "transport RDMA is deprecated, falling back to tcp"
+#define API_MSG_TRANS_NOT_SUPPORTED_STR \
+ "transport is not supported, possible values tcp|unix"
+#define API_MSG_GLFS_FSOBJ_NULL_STR "fs is NULL"
+#define API_MSG_FS_NOT_INIT_STR "fs is not properly initialized"
+#define API_MSG_FSMUTEX_LOCK_FAILED_STR \
+ "pthread lock on glfs mutex, returned error"
+#define API_MSG_FSMUTEX_UNLOCK_FAILED_STR \
+ "pthread unlock on glfs mutex, returned error"
+#define API_MSG_COND_WAIT_FAILED_STR "cond wait failed"
+#define API_MSG_INVALID_SYSRQ_STR "not a valid sysrq"
+#define API_MSG_GRAPH_CONSTRUCT_FAILED_STR "failed to construct the graph"
+#define API_MSG_API_XLATOR_ERROR_STR \
+ "api master xlator cannot be specified in volume file"
+#define API_MSG_STATEDUMP_FAILED_STR "statedump failed"
+#define API_MSG_DECODE_XDR_FAILED_STR \
+ "Failed to decode xdr response for GF_CBK_STATEDUMP"
+#define API_MSG_NULL_STR "NULL"
+#define API_MSG_XDR_PAYLOAD_FAILED_STR "failed to create XDR payload"
+#define API_MSG_CALL_NOT_SUCCESSFUL_STR \
+ "GET_VOLUME_INFO RPC call is not successful"
+#define API_MSG_XDR_RESPONSE_DECODE_FAILED_STR \
+ "Failed to decode xdr response for GET_VOLUME_INFO"
+#define API_MSG_CALL_NOT_VALID_STR \
+ "Response received for GET_VOLUME_INFO RPC is not valid"
+#define API_MSG_GET_VOLINFO_CBK_FAILED_STR \
+ "In GET_VOLUME_INFO cbk, received error"
+#define API_MSG_FETCH_VOLUUID_FAILED_STR "Unable to fetch volume UUID"
+#define API_MSG_INSUFF_SIZE_STR "Insufficient size passed"
+#define API_MSG_FRAME_CREAT_FAILED_STR "failed to create the frame"
+#define API_MSG_DICT_SET_FAILED_STR "failed to set"
+#define API_MSG_XDR_DECODE_FAILED_STR "XDR decoding error"
+#define API_MSG_GET_VOLFILE_FAILED_STR "failed to get the volume file"
+#define API_MSG_VOLFILE_INFO_STR "No change in volfile, continuing"
+#define API_MSG_UNABLE_TO_DEL_STR "unable to delete file"
+#define API_MSG_WRONG_OPVERSION_STR \
+ "Server is operating at an op-version which is not supported"
+#define API_MSG_DICT_SERIALIZE_FAILED_STR "Failed to serialize dictionary"
+#define API_MSG_REMOTE_HOST_CONN_FAILED_STR "Failed to connect to remote-host"
+#define API_MSG_REMOTE_HOST_DISCONN_STR "disconnected from remote-host"
+#define API_MSG_VOLFILE_SERVER_EXHAUST_STR "Exhausted all volfile servers"
+#define API_MSG_VOLFILE_CONNECTING_STR "connecting to next volfile server"
+#define API_MSG_CREATE_RPC_CLIENT_FAILED_STR "failed to create rpc clnt"
+#define API_MSG_REG_NOTIFY_FUNC_FAILED_STR "failed to register notify function"
+#define API_MSG_REG_CBK_FUNC_FAILED_STR "failed to register callback function"
+#define API_MSG_NEW_GRAPH_STR "New graph coming up"
+#define API_MSG_HANDLE_NOT_SET_STR "handle not set. Flags handled for xstat are"
#endif /* !_GFAPI_MESSAGES_H__ */
diff --git a/api/src/gfapi.aliases b/api/src/gfapi.aliases
index c77d72b0534..bc639e6b99f 100644
--- a/api/src/gfapi.aliases
+++ b/api/src/gfapi.aliases
@@ -196,6 +196,6 @@ _pub_glfs_copy_file_range _glfs_copy_file_range$GFAPI_6.0
_pub_glfs_fsetattr _glfs_fsetattr$GFAPI_6.0
_pub_glfs_setattr _glfs_setattr$GFAPI_6.0
-_pub_glfs_h_creat_open _glfs_h_creat_open@GFAPI_6.6
-
_pub_glfs_set_statedump_path _glfs_set_statedump_path@GFAPI_7.0
+
+_pub_glfs_h_creat_open _glfs_h_creat_open@GFAPI_6.6
diff --git a/api/src/glfs-fops.c b/api/src/glfs-fops.c
index e6adea5ea9f..6aa3c5602d1 100644
--- a/api/src/glfs-fops.c
+++ b/api/src/glfs-fops.c
@@ -119,8 +119,8 @@ glfs_get_upcall_cache_invalidation(struct gf_upcall *to_up_data,
ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
if (!ca_data) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -154,8 +154,8 @@ glfs_get_upcall_lease(struct gf_upcall *to_up_data,
ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
if (!ca_data) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -292,6 +292,7 @@ glfs_iatt_to_statx(struct glfs *fs, const struct iatt *iatt,
statx->glfs_st_attributes_mask = 0;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_iatt_from_statx, 6.0)
void
priv_glfs_iatt_from_statx(struct iatt *iatt, const struct glfs_stat *statx)
{
@@ -371,7 +372,6 @@ priv_glfs_iatt_from_statx(struct iatt *iatt, const struct glfs_stat *statx)
iatt->ia_attributes = statx->glfs_st_attributes;
iatt->ia_attributes_mask = statx->glfs_st_attributes_mask;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_iatt_from_statx, 6.0);
void
glfsflags_from_gfapiflags(struct glfs_stat *stat, int *glvalid)
@@ -415,6 +415,7 @@ glfs_loc_unlink(loc_t *loc)
return 0;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_open, 3.4.0)
struct glfs_fd *
pub_glfs_open(struct glfs *fs, const char *path, int flags)
{
@@ -509,8 +510,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_open, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_close, 3.4.0)
int
pub_glfs_close(struct glfs_fd *glfd)
{
@@ -565,8 +565,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_close, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lstat, 3.4.0)
int
pub_glfs_lstat(struct glfs *fs, const char *path, struct stat *stat)
{
@@ -607,8 +606,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lstat, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_stat, 3.4.0)
int
pub_glfs_stat(struct glfs *fs, const char *path, struct stat *stat)
{
@@ -649,8 +647,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_stat, 3.4.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_statx, 6.0)
int
priv_glfs_statx(struct glfs *fs, const char *path, const unsigned int mask,
struct glfs_stat *statxbuf)
@@ -704,8 +701,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_statx, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstat, 3.4.0)
int
pub_glfs_fstat(struct glfs_fd *glfd, struct stat *stat)
{
@@ -754,8 +750,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstat, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_creat, 3.4.0)
struct glfs_fd *
pub_glfs_creat(struct glfs *fs, const char *path, int flags, mode_t mode)
{
@@ -902,8 +897,6 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_creat, 3.4.0);
-
#ifdef HAVE_SEEK_HOLE
static int
glfs_seek(struct glfs_fd *glfd, off_t offset, int whence)
@@ -957,6 +950,7 @@ out:
}
#endif
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lseek, 3.4.0)
off_t
pub_glfs_lseek(struct glfs_fd *glfd, off_t offset, int whence)
{
@@ -1012,8 +1006,6 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lseek, 3.4.0);
-
static ssize_t
glfs_preadv_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags, struct glfs_stat *poststat)
@@ -1091,6 +1083,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv, 3.4.0)
ssize_t
pub_glfs_preadv(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags)
@@ -1098,8 +1091,7 @@ pub_glfs_preadv(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
return glfs_preadv_common(glfd, iovec, iovcnt, offset, flags, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read, 3.4.0)
ssize_t
pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
{
@@ -1108,6 +1100,11 @@ pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = buf;
iov.iov_len = count;
@@ -1116,8 +1113,7 @@ pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pread34, glfs_pread, 3.4.0)
ssize_t
pub_glfs_pread34(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
int flags)
@@ -1135,8 +1131,7 @@ pub_glfs_pread34(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pread34, glfs_pread, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread, 6.0)
ssize_t
pub_glfs_pread(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
int flags, struct glfs_stat *poststat)
@@ -1154,21 +1149,23 @@ pub_glfs_pread(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv, 3.4.0)
ssize_t
pub_glfs_readv(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = pub_glfs_preadv(glfd, iov, count, glfd->offset, flags);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv, 3.4.0);
-
struct glfs_io {
struct glfs_fd *glfd;
int op;
@@ -1370,6 +1367,7 @@ invalid_fs:
return -1;
}
+GFAPI_SYMVER_PUBLIC(glfs_preadv_async34, glfs_preadv_async, 3.4.0)
int
pub_glfs_preadv_async34(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk34 fn,
@@ -1379,8 +1377,7 @@ pub_glfs_preadv_async34(struct glfs_fd *glfd, const struct iovec *iovec,
(void *)fn, data);
}
-GFAPI_SYMVER_PUBLIC(glfs_preadv_async34, glfs_preadv_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv_async, 6.0)
int
pub_glfs_preadv_async(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk fn,
@@ -1390,8 +1387,7 @@ pub_glfs_preadv_async(struct glfs_fd *glfd, const struct iovec *iovec,
_gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_read_async34, glfs_read_async, 3.4.0)
int
pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
glfs_io_cbk34 fn, void *data)
@@ -1401,6 +1397,11 @@ pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = buf;
iov.iov_len = count;
@@ -1410,8 +1411,7 @@ pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_read_async34, glfs_read_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read_async, 6.0)
int
pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
glfs_io_cbk fn, void *data)
@@ -1421,6 +1421,11 @@ pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = buf;
iov.iov_len = count;
@@ -1430,8 +1435,7 @@ pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pread_async34, glfs_pread_async, 3.4.0)
int
pub_glfs_pread_async34(struct glfs_fd *glfd, void *buf, size_t count,
off_t offset, int flags, glfs_io_cbk34 fn, void *data)
@@ -1450,8 +1454,7 @@ pub_glfs_pread_async34(struct glfs_fd *glfd, void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pread_async34, glfs_pread_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread_async, 6.0)
int
pub_glfs_pread_async(struct glfs_fd *glfd, void *buf, size_t count,
off_t offset, int flags, glfs_io_cbk fn, void *data)
@@ -1470,34 +1473,40 @@ pub_glfs_pread_async(struct glfs_fd *glfd, void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_readv_async34, glfs_readv_async, 3.4.0)
int
pub_glfs_readv_async34(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags, glfs_io_cbk34 fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
_gf_true, (void *)fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_readv_async34, glfs_readv_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv_async, 6.0)
int
pub_glfs_readv_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags, glfs_io_cbk fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
_gf_false, fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv_async, 6.0);
-
static ssize_t
glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags, struct glfs_stat *prestat,
@@ -1583,6 +1592,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_copy_file_range, 6.0)
ssize_t
pub_glfs_copy_file_range(struct glfs_fd *glfd_in, off64_t *off_in,
struct glfs_fd *glfd_out, off64_t *off_out, size_t len,
@@ -1736,8 +1746,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_copy_file_range, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev, 3.4.0)
ssize_t
pub_glfs_pwritev(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
off_t offset, int flags)
@@ -1745,8 +1754,7 @@ pub_glfs_pwritev(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
return glfs_pwritev_common(glfd, iovec, iovcnt, offset, flags, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write, 3.4.0)
ssize_t
pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
{
@@ -1755,6 +1763,11 @@ pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = (void *)buf;
iov.iov_len = count;
@@ -1763,21 +1776,24 @@ pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev, 3.4.0)
ssize_t
pub_glfs_writev(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = pub_glfs_pwritev(glfd, iov, count, glfd->offset, flags);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pwrite34, glfs_pwrite, 3.4.0)
ssize_t
pub_glfs_pwrite34(struct glfs_fd *glfd, const void *buf, size_t count,
off_t offset, int flags)
@@ -1795,8 +1811,7 @@ pub_glfs_pwrite34(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pwrite34, glfs_pwrite, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite, 6.0)
ssize_t
pub_glfs_pwrite(struct glfs_fd *glfd, const void *buf, size_t count,
off_t offset, int flags, struct glfs_stat *prestat,
@@ -1815,8 +1830,6 @@ pub_glfs_pwrite(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite, 6.0);
-
extern glfs_t *
pub_glfs_from_glfd(glfs_fd_t *);
@@ -1935,6 +1948,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_pwritev_async34, glfs_pwritev_async, 3.4.0)
int
pub_glfs_pwritev_async34(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk34 fn,
@@ -1944,8 +1958,7 @@ pub_glfs_pwritev_async34(struct glfs_fd *glfd, const struct iovec *iovec,
_gf_true, (void *)fn, data);
}
-GFAPI_SYMVER_PUBLIC(glfs_pwritev_async34, glfs_pwritev_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev_async, 6.0)
int
pub_glfs_pwritev_async(struct glfs_fd *glfd, const struct iovec *iovec,
int count, off_t offset, int flags, glfs_io_cbk fn,
@@ -1955,8 +1968,7 @@ pub_glfs_pwritev_async(struct glfs_fd *glfd, const struct iovec *iovec,
_gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_write_async34, glfs_write_async, 3.4.0)
int
pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
int flags, glfs_io_cbk34 fn, void *data)
@@ -1966,6 +1978,11 @@ pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = (void *)buf;
iov.iov_len = count;
@@ -1975,8 +1992,7 @@ pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_write_async34, glfs_write_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write_async, 6.0)
int
pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
int flags, glfs_io_cbk fn, void *data)
@@ -1986,6 +2002,11 @@ pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
};
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
iov.iov_base = (void *)buf;
iov.iov_len = count;
@@ -1995,8 +2016,7 @@ pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_pwrite_async34, glfs_pwrite_async, 3.4.0)
int
pub_glfs_pwrite_async34(struct glfs_fd *glfd, const void *buf, int count,
off_t offset, int flags, glfs_io_cbk34 fn, void *data)
@@ -2015,8 +2035,7 @@ pub_glfs_pwrite_async34(struct glfs_fd *glfd, const void *buf, int count,
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_pwrite_async34, glfs_pwrite_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite_async, 6.0)
int
pub_glfs_pwrite_async(struct glfs_fd *glfd, const void *buf, int count,
off_t offset, int flags, glfs_io_cbk fn, void *data)
@@ -2035,34 +2054,40 @@ pub_glfs_pwrite_async(struct glfs_fd *glfd, const void *buf, int count,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_writev_async34, glfs_writev_async, 3.4.0)
int
pub_glfs_writev_async34(struct glfs_fd *glfd, const struct iovec *iov,
int count, int flags, glfs_io_cbk34 fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
_gf_true, (void *)fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_writev_async34, glfs_writev_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev_async, 6.0)
int
pub_glfs_writev_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
int flags, glfs_io_cbk fn, void *data)
{
ssize_t ret = 0;
+ if (glfd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
_gf_false, fn, data);
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev_async, 6.0);
-
static int
glfs_fsync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2127,14 +2152,14 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_fsync34, glfs_fsync, 3.4.0)
int
pub_glfs_fsync34(struct glfs_fd *glfd)
{
return glfs_fsync_common(glfd, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC(glfs_fsync34, glfs_fsync, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync, 6.0)
int
pub_glfs_fsync(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2142,8 +2167,6 @@ pub_glfs_fsync(struct glfs_fd *glfd, struct glfs_stat *prestat,
return glfs_fsync_common(glfd, prestat, poststat);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync, 6.0);
-
static int
glfs_fsync_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
@@ -2224,6 +2247,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_fsync_async34, glfs_fsync_async, 3.4.0)
int
pub_glfs_fsync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
{
@@ -2240,8 +2264,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_fsync_async34, glfs_fsync_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync_async, 6.0)
int
pub_glfs_fsync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
{
@@ -2258,8 +2281,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync_async, 6.0);
-
static int
glfs_fdatasync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2324,14 +2345,14 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_fdatasync34, glfs_fdatasync, 3.4.0)
int
pub_glfs_fdatasync34(struct glfs_fd *glfd)
{
return glfs_fdatasync_common(glfd, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC(glfs_fdatasync34, glfs_fdatasync, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync, 6.0)
int
pub_glfs_fdatasync(struct glfs_fd *glfd, struct glfs_stat *prestat,
struct glfs_stat *poststat)
@@ -2339,8 +2360,7 @@ pub_glfs_fdatasync(struct glfs_fd *glfd, struct glfs_stat *prestat,
return glfs_fdatasync_common(glfd, prestat, poststat);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync, 6.0);
-
+GFAPI_SYMVER_PUBLIC(glfs_fdatasync_async34, glfs_fdatasync_async, 3.4.0)
int
pub_glfs_fdatasync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
{
@@ -2357,8 +2377,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_fdatasync_async34, glfs_fdatasync_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync_async, 6.0)
int
pub_glfs_fdatasync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
{
@@ -2375,8 +2394,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync_async, 6.0);
-
static int
glfs_ftruncate_common(struct glfs_fd *glfd, off_t offset,
struct glfs_stat *prestat, struct glfs_stat *poststat)
@@ -2442,14 +2459,14 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_ftruncate34, glfs_ftruncate, 3.4.0)
int
pub_glfs_ftruncate34(struct glfs_fd *glfd, off_t offset)
{
return glfs_ftruncate_common(glfd, offset, NULL, NULL);
}
-GFAPI_SYMVER_PUBLIC(glfs_ftruncate34, glfs_ftruncate, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate, 6.0)
int
pub_glfs_ftruncate(struct glfs_fd *glfd, off_t offset,
struct glfs_stat *prestat, struct glfs_stat *poststat)
@@ -2457,8 +2474,7 @@ pub_glfs_ftruncate(struct glfs_fd *glfd, off_t offset,
return glfs_ftruncate_common(glfd, offset, prestat, poststat);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_truncate, 3.7.15)
int
pub_glfs_truncate(struct glfs *fs, const char *path, off_t length)
{
@@ -2504,8 +2520,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_truncate, 3.7.15);
-
static int
glfs_ftruncate_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
@@ -2598,6 +2612,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_ftruncate_async34, glfs_ftruncate_async, 3.4.0)
int
pub_glfs_ftruncate_async34(struct glfs_fd *glfd, off_t offset, glfs_io_cbk34 fn,
void *data)
@@ -2606,8 +2621,7 @@ pub_glfs_ftruncate_async34(struct glfs_fd *glfd, off_t offset, glfs_io_cbk34 fn,
data);
}
-GFAPI_SYMVER_PUBLIC(glfs_ftruncate_async34, glfs_ftruncate_async, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate_async, 6.0)
int
pub_glfs_ftruncate_async(struct glfs_fd *glfd, off_t offset, glfs_io_cbk fn,
void *data)
@@ -2615,8 +2629,7 @@ pub_glfs_ftruncate_async(struct glfs_fd *glfd, off_t offset, glfs_io_cbk fn,
return glfs_ftruncate_async_common(glfd, offset, _gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate_async, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_access, 3.4.0)
int
pub_glfs_access(struct glfs *fs, const char *path, int mode)
{
@@ -2662,8 +2675,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_access, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlink, 3.4.0)
int
pub_glfs_symlink(struct glfs *fs, const char *data, const char *path)
{
@@ -2753,8 +2765,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlink, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlink, 3.4.0)
int
pub_glfs_readlink(struct glfs *fs, const char *path, char *buf, size_t bufsiz)
{
@@ -2811,8 +2822,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlink, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknod, 3.4.0)
int
pub_glfs_mknod(struct glfs *fs, const char *path, mode_t mode, dev_t dev)
{
@@ -2902,8 +2912,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknod, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdir, 3.4.0)
int
pub_glfs_mkdir(struct glfs *fs, const char *path, mode_t mode)
{
@@ -2993,8 +3002,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlink, 3.4.0)
int
pub_glfs_unlink(struct glfs *fs, const char *path)
{
@@ -3050,8 +3058,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlink, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rmdir, 3.4.0)
int
pub_glfs_rmdir(struct glfs *fs, const char *path)
{
@@ -3106,8 +3113,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rmdir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rename, 3.4.0)
int
pub_glfs_rename(struct glfs *fs, const char *oldpath, const char *newpath)
{
@@ -3196,8 +3202,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rename, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_link, 3.4.0)
int
pub_glfs_link(struct glfs *fs, const char *oldpath, const char *newpath)
{
@@ -3283,8 +3288,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_link, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_opendir, 3.4.0)
struct glfs_fd *
pub_glfs_opendir(struct glfs *fs, const char *path)
{
@@ -3365,8 +3369,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_opendir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_closedir, 3.4.0)
int
pub_glfs_closedir(struct glfs_fd *glfd)
{
@@ -3387,22 +3390,30 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_closedir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_telldir, 3.4.0)
long
pub_glfs_telldir(struct glfs_fd *fd)
{
+ if (fd == NULL) {
+ errno = EBADF;
+ return -1;
+ }
+
return fd->offset;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_telldir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_seekdir, 3.4.0)
void
pub_glfs_seekdir(struct glfs_fd *fd, long offset)
{
gf_dirent_t *entry = NULL;
gf_dirent_t *tmp = NULL;
+ if (fd == NULL) {
+ errno = EBADF;
+ return;
+ }
+
if (fd->offset == offset)
return;
@@ -3425,8 +3436,6 @@ pub_glfs_seekdir(struct glfs_fd *fd, long offset)
*/
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_seekdir, 3.4.0);
-
static int
glfs_discard_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno,
@@ -3517,6 +3526,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_discard_async35, glfs_discard_async, 3.5.0)
int
pub_glfs_discard_async35(struct glfs_fd *glfd, off_t offset, size_t len,
glfs_io_cbk34 fn, void *data)
@@ -3525,8 +3535,7 @@ pub_glfs_discard_async35(struct glfs_fd *glfd, off_t offset, size_t len,
data);
}
-GFAPI_SYMVER_PUBLIC(glfs_discard_async35, glfs_discard_async, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard_async, 6.0)
int
pub_glfs_discard_async(struct glfs_fd *glfd, off_t offset, size_t len,
glfs_io_cbk fn, void *data)
@@ -3534,8 +3543,6 @@ pub_glfs_discard_async(struct glfs_fd *glfd, off_t offset, size_t len,
return glfs_discard_async_common(glfd, offset, len, _gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard_async, 6.0);
-
static int
glfs_zerofill_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
int32_t op_ret, int32_t op_errno,
@@ -3628,6 +3635,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC(glfs_zerofill_async35, glfs_zerofill_async, 3.5.0)
int
pub_glfs_zerofill_async35(struct glfs_fd *glfd, off_t offset, off_t len,
glfs_io_cbk34 fn, void *data)
@@ -3636,8 +3644,7 @@ pub_glfs_zerofill_async35(struct glfs_fd *glfd, off_t offset, off_t len,
data);
}
-GFAPI_SYMVER_PUBLIC(glfs_zerofill_async35, glfs_zerofill_async, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill_async, 6.0)
int
pub_glfs_zerofill_async(struct glfs_fd *glfd, off_t offset, off_t len,
glfs_io_cbk fn, void *data)
@@ -3645,8 +3652,6 @@ pub_glfs_zerofill_async(struct glfs_fd *glfd, off_t offset, off_t len,
return glfs_zerofill_async_common(glfd, offset, len, _gf_false, fn, data);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill_async, 6.0);
-
void
gf_dirent_to_dirent(gf_dirent_t *gf_dirent, struct dirent *dirent)
{
@@ -3806,6 +3811,7 @@ unlock:
return buf;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus_r, 3.4.0)
int
pub_glfs_readdirplus_r(struct glfs_fd *glfd, struct stat *stat,
struct dirent *ext, struct dirent **res)
@@ -3861,8 +3867,7 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus_r, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir_r, 3.4.0)
int
pub_glfs_readdir_r(struct glfs_fd *glfd, struct dirent *buf,
struct dirent **res)
@@ -3870,8 +3875,7 @@ pub_glfs_readdir_r(struct glfs_fd *glfd, struct dirent *buf,
return pub_glfs_readdirplus_r(glfd, 0, buf, res);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir_r, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus, 3.5.0)
struct dirent *
pub_glfs_readdirplus(struct glfs_fd *glfd, struct stat *stat)
{
@@ -3885,16 +3889,14 @@ pub_glfs_readdirplus(struct glfs_fd *glfd, struct stat *stat)
return res;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir, 3.5.0)
struct dirent *
pub_glfs_readdir(struct glfs_fd *glfd)
{
return pub_glfs_readdirplus(glfd, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_statvfs, 3.4.0)
int
pub_glfs_statvfs(struct glfs *fs, const char *path, struct statvfs *buf)
{
@@ -3940,8 +3942,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_statvfs, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setattr, 6.0)
int
pub_glfs_setattr(struct glfs *fs, const char *path, struct glfs_stat *stat,
int follow)
@@ -4001,8 +4002,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setattr, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetattr, 6.0)
int
pub_glfs_fsetattr(struct glfs_fd *glfd, struct glfs_stat *stat)
{
@@ -4055,8 +4055,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetattr, 6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chmod, 3.4.0)
int
pub_glfs_chmod(struct glfs *fs, const char *path, mode_t mode)
{
@@ -4073,8 +4072,7 @@ pub_glfs_chmod(struct glfs *fs, const char *path, mode_t mode)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chmod, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmod, 3.4.0)
int
pub_glfs_fchmod(struct glfs_fd *glfd, mode_t mode)
{
@@ -4091,8 +4089,7 @@ pub_glfs_fchmod(struct glfs_fd *glfd, mode_t mode)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmod, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chown, 3.4.0)
int
pub_glfs_chown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
{
@@ -4117,8 +4114,7 @@ pub_glfs_chown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chown, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lchown, 3.4.0)
int
pub_glfs_lchown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
{
@@ -4143,8 +4139,7 @@ pub_glfs_lchown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lchown, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchown, 3.4.0)
int
pub_glfs_fchown(struct glfs_fd *glfd, uid_t uid, gid_t gid)
{
@@ -4169,8 +4164,7 @@ pub_glfs_fchown(struct glfs_fd *glfd, uid_t uid, gid_t gid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchown, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_utimens, 3.4.0)
int
pub_glfs_utimens(struct glfs *fs, const char *path,
const struct timespec times[2])
@@ -4190,8 +4184,7 @@ pub_glfs_utimens(struct glfs *fs, const char *path,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_utimens, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lutimens, 3.4.0)
int
pub_glfs_lutimens(struct glfs *fs, const char *path,
const struct timespec times[2])
@@ -4211,8 +4204,7 @@ pub_glfs_lutimens(struct glfs *fs, const char *path,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lutimens, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_futimens, 3.4.0)
int
pub_glfs_futimens(struct glfs_fd *glfd, const struct timespec times[2])
{
@@ -4231,8 +4223,6 @@ pub_glfs_futimens(struct glfs_fd *glfd, const struct timespec times[2])
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_futimens, 3.4.0);
-
int
glfs_getxattr_process(void *value, size_t size, dict_t *xattr, const char *name)
{
@@ -4332,6 +4322,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getxattr, 3.4.0)
ssize_t
pub_glfs_getxattr(struct glfs *fs, const char *path, const char *name,
void *value, size_t size)
@@ -4339,8 +4330,7 @@ pub_glfs_getxattr(struct glfs *fs, const char *path, const char *name,
return glfs_getxattr_common(fs, path, name, value, size, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lgetxattr, 3.4.0)
ssize_t
pub_glfs_lgetxattr(struct glfs *fs, const char *path, const char *name,
void *value, size_t size)
@@ -4348,8 +4338,7 @@ pub_glfs_lgetxattr(struct glfs *fs, const char *path, const char *name,
return glfs_getxattr_common(fs, path, name, value, size, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lgetxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fgetxattr, 3.4.0)
ssize_t
pub_glfs_fgetxattr(struct glfs_fd *glfd, const char *name, void *value,
size_t size)
@@ -4412,8 +4401,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fgetxattr, 3.4.0);
-
int
glfs_listxattr_process(void *value, size_t size, dict_t *xattr)
{
@@ -4497,22 +4484,21 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_listxattr, 3.4.0)
ssize_t
pub_glfs_listxattr(struct glfs *fs, const char *path, void *value, size_t size)
{
return glfs_listxattr_common(fs, path, value, size, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_listxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_llistxattr, 3.4.0)
ssize_t
pub_glfs_llistxattr(struct glfs *fs, const char *path, void *value, size_t size)
{
return glfs_listxattr_common(fs, path, value, size, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_llistxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_flistxattr, 3.4.0)
ssize_t
pub_glfs_flistxattr(struct glfs_fd *glfd, void *value, size_t size)
{
@@ -4562,8 +4548,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_flistxattr, 3.4.0);
-
int
glfs_setxattr_common(struct glfs *fs, const char *path, const char *name,
const void *value, size_t size, int flags, int follow)
@@ -4643,6 +4627,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setxattr, 3.4.0)
int
pub_glfs_setxattr(struct glfs *fs, const char *path, const char *name,
const void *value, size_t size, int flags)
@@ -4650,8 +4635,7 @@ pub_glfs_setxattr(struct glfs *fs, const char *path, const char *name,
return glfs_setxattr_common(fs, path, name, value, size, flags, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lsetxattr, 3.4.0)
int
pub_glfs_lsetxattr(struct glfs *fs, const char *path, const char *name,
const void *value, size_t size, int flags)
@@ -4659,8 +4643,7 @@ pub_glfs_lsetxattr(struct glfs *fs, const char *path, const char *name,
return glfs_setxattr_common(fs, path, name, value, size, flags, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lsetxattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetxattr, 3.4.0)
int
pub_glfs_fsetxattr(struct glfs_fd *glfd, const char *name, const void *value,
size_t size, int flags)
@@ -4735,8 +4718,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetxattr, 3.4.0);
-
int
glfs_removexattr_common(struct glfs *fs, const char *path, const char *name,
int follow)
@@ -4787,22 +4768,21 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_removexattr, 3.4.0)
int
pub_glfs_removexattr(struct glfs *fs, const char *path, const char *name)
{
return glfs_removexattr_common(fs, path, name, 1);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_removexattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lremovexattr, 3.4.0)
int
pub_glfs_lremovexattr(struct glfs *fs, const char *path, const char *name)
{
return glfs_removexattr_common(fs, path, name, 0);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lremovexattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fremovexattr, 3.4.0)
int
pub_glfs_fremovexattr(struct glfs_fd *glfd, const char *name)
{
@@ -4845,8 +4825,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fremovexattr, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fallocate, 3.5.0)
int
pub_glfs_fallocate(struct glfs_fd *glfd, int keep_size, off_t offset,
size_t len)
@@ -4897,8 +4876,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fallocate, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard, 3.5.0)
int
pub_glfs_discard(struct glfs_fd *glfd, off_t offset, size_t len)
{
@@ -4948,8 +4926,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill, 3.5.0)
int
pub_glfs_zerofill(struct glfs_fd *glfd, off_t offset, off_t len)
{
@@ -4997,8 +4974,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chdir, 3.4.0)
int
pub_glfs_chdir(struct glfs *fs, const char *path)
{
@@ -5048,8 +5024,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chdir, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchdir, 3.4.0)
int
pub_glfs_fchdir(struct glfs_fd *glfd)
{
@@ -5101,8 +5076,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchdir, 3.4.0);
-
static gf_boolean_t warn_realpath = _gf_true; /* log once */
static char *
@@ -5185,22 +5158,21 @@ invalid_fs:
return retpath;
}
+GFAPI_SYMVER_PUBLIC(glfs_realpath34, glfs_realpath, 3.4.0)
char *
pub_glfs_realpath34(struct glfs *fs, const char *path, char *resolved_path)
{
return glfs_realpath_common(fs, path, resolved_path, _gf_true);
}
-GFAPI_SYMVER_PUBLIC(glfs_realpath34, glfs_realpath, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_realpath, 3.7.17)
char *
pub_glfs_realpath(struct glfs *fs, const char *path, char *resolved_path)
{
return glfs_realpath_common(fs, path, resolved_path, _gf_false);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_realpath, 3.7.17);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getcwd, 3.4.0)
char *
pub_glfs_getcwd(struct glfs *fs, char *buf, size_t n)
{
@@ -5249,8 +5221,6 @@ invalid_fs:
return buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getcwd, 3.4.0);
-
static void
gf_flock_to_flock(struct gf_flock *gf_flock, struct flock *flock)
{
@@ -5336,11 +5306,9 @@ glfs_lock_common(struct glfs_fd *glfd, int cmd, struct flock *flock,
if (ret == 0 && (cmd == F_SETLK || cmd == F_SETLKW)) {
ret = fd_lk_insert_and_merge(fd, cmd, &saved_flock);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0,
- API_MSG_LOCK_INSERT_MERGE_FAILED,
- "Lock insertion and splitting/merging failed "
- "on gfid %s",
- uuid_utoa(fd->inode->gfid));
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ API_MSG_LOCK_INSERT_MERGE_FAILED, "gfid=%s",
+ uuid_utoa(fd->inode->gfid), NULL);
ret = 0;
}
}
@@ -5359,6 +5327,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_file_lock, 4.0.0)
int
pub_glfs_file_lock(struct glfs_fd *glfd, int cmd, struct flock *flock,
glfs_lock_mode_t lk_mode)
@@ -5379,9 +5348,8 @@ pub_glfs_file_lock(struct glfs_fd *glfd, int cmd, struct flock *flock,
* GLFS_LK_MANDATORY */
ret = dict_set_uint32(xdata_in, GF_LOCK_MODE, GF_LK_MANDATORY);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0,
- API_MSG_SETTING_LOCK_TYPE_FAILED,
- "Setting lock type failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0,
+ API_MSG_SETTING_LOCK_TYPE_FAILED, NULL);
ret = -1;
errno = ENOMEM;
goto out;
@@ -5396,16 +5364,14 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_file_lock, 4.0.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_posix_lock, 3.4.0)
int
pub_glfs_posix_lock(struct glfs_fd *glfd, int cmd, struct flock *flock)
{
return glfs_lock_common(glfd, cmd, flock, NULL);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_posix_lock, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fd_set_lkowner, 3.10.7)
int
pub_glfs_fd_set_lkowner(struct glfs_fd *glfd, void *data, int len)
{
@@ -5422,8 +5388,8 @@ pub_glfs_fd_set_lkowner(struct glfs_fd *glfd, void *data, int len)
if ((len <= 0) || (len > GFAPI_MAX_LOCK_OWNER_LEN)) {
errno = EINVAL;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "Invalid lk_owner len (%d)", len);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "lk_owner len=%d", len, NULL);
goto out;
}
@@ -5441,8 +5407,8 @@ out:
invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fd_set_lkowner, 3.10.7);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_dup, 3.4.0)
struct glfs_fd *
pub_glfs_dup(struct glfs_fd *glfd)
{
@@ -5493,8 +5459,6 @@ invalid_fs:
return dupfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_dup, 3.4.0);
-
static void
glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
{
@@ -5507,8 +5471,8 @@ glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
u_list = GF_CALLOC(1, sizeof(*u_list), glfs_mt_upcall_entry_t);
if (!u_list) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5530,8 +5494,7 @@ glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
}
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY,
- "Upcall entry validation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
goto out;
}
@@ -5601,9 +5564,9 @@ glfs_recall_lease_fd(struct glfs *fs, struct gf_upcall *up_data)
inode = inode_find(subvol->itable, up_data->gfid);
if (!inode) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INODE_FIND_FAILED,
- "Unable to find inode entry for gfid:%s graph id:%d",
- uuid_utoa(up_data->gfid), subvol->graph->id);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INODE_FIND_FAILED,
+ "gfid=%s", uuid_utoa(up_data->gfid), "graph_id=%d",
+ subvol->graph->id, NULL);
goto out;
}
@@ -5681,8 +5644,8 @@ glfs_recall_lease_upcall(struct glfs *fs, struct glfs_upcall *up_arg,
* the handle and hence will no more be interested in
* the upcall for this particular gfid.
*/
- gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
- "handle creation of %s failed", uuid_utoa(up_data->gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
+ "gfid=%s", uuid_utoa(up_data->gfid), NULL);
errno = ESTALE;
goto out;
}
@@ -5771,8 +5734,8 @@ glfs_cbk_upcall_syncop(void *opaque)
up_arg = GLFS_CALLOC(1, sizeof(struct gf_upcall), glfs_release_upcall,
glfs_mt_upcall_entry_t);
if (!up_arg) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5794,14 +5757,14 @@ glfs_cbk_upcall_syncop(void *opaque)
* send upcall then
*/
if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) {
- gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_INVALID_ENTRY,
- "Upcall_EVENT_NULL received. Skipping it.");
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
+ API_MSG_UPCALL_EVENT_NULL_RECEIVED, NULL);
ret = 0;
GLFS_FREE(up_arg);
goto out;
} else if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY,
- "Upcall entry validation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
+ GLFS_FREE(up_arg);
goto out;
}
@@ -5827,8 +5790,8 @@ gf_copy_cache_invalidation(struct gf_upcall_cache_invalidation *src)
glfs_mt_upcall_entry_t);
if (!dst) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5858,8 +5821,8 @@ gf_copy_recall_lease(struct gf_upcall_recall_lease *src)
glfs_mt_upcall_entry_t);
if (!dst) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall entry allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
+ NULL);
goto out;
}
@@ -5887,8 +5850,8 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data)
args = GF_CALLOC(1, sizeof(struct upcall_syncop_args),
glfs_mt_upcall_entry_t);
if (!args) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
- "Upcall syncop args allocation failed.");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
+ "syncop args", NULL);
goto out;
}
@@ -5924,8 +5887,10 @@ upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data)
return args;
out:
if (ret) {
- GF_FREE(args->upcall_data.client_uid);
- GF_FREE(args);
+ if (args) {
+ GF_FREE(args->upcall_data.client_uid);
+ GF_FREE(args);
+ }
}
return NULL;
@@ -5954,9 +5919,9 @@ glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
glfs_upcall_syncop_cbk, NULL, args);
/* should we retry incase of failure? */
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
- "Synctak for Upcall event_type(%d) and gfid(%s) failed",
- upcall_data->event_type, (char *)(upcall_data->gfid));
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
+ "event_type=%d", upcall_data->event_type, "gfid=%s",
+ (char *)(upcall_data->gfid), NULL);
upcall_syncop_args_free(args);
}
@@ -5978,6 +5943,7 @@ out:
* Otherwise all the upcall events are queued up in a list
* to be read/polled by the applications.
*/
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_process_upcall_event, 3.7.0)
void
priv_glfs_process_upcall_event(struct glfs *fs, void *data)
{
@@ -6045,7 +6011,6 @@ out:
err:
return;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_process_upcall_event, 3.7.0);
ssize_t
glfs_anonymous_pwritev(struct glfs *fs, struct glfs_object *object,
@@ -6084,8 +6049,7 @@ glfs_anonymous_pwritev(struct glfs *fs, struct glfs_object *object,
fd = fd_anonymous(inode);
if (!fd) {
ret = -1;
- gf_msg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED,
- "Allocating anonymous fd failed");
+ gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
errno = ENOMEM;
goto out;
}
@@ -6183,8 +6147,7 @@ glfs_anonymous_preadv(struct glfs *fs, struct glfs_object *object,
fd = fd_anonymous(inode);
if (!fd) {
ret = -1;
- gf_msg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED,
- "Allocating anonymous fd failed");
+ gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
errno = ENOMEM;
goto out;
}
@@ -6233,6 +6196,7 @@ glfs_release_xreaddirp_stat(void *ptr)
* Given glfd of a directory, this function does readdirp and returns
* xstat along with dirents.
*/
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_r, 3.11.0)
int
pub_glfs_xreaddirplus_r(struct glfs_fd *glfd, uint32_t flags,
struct glfs_xreaddirp_stat **xstat_p,
@@ -6327,8 +6291,8 @@ out:
GF_REF_PUT(glfd);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_WARNING, errno, API_MSG_XREADDIRP_R_FAILED,
- "glfs_x_readdirp_r failed - reason (%s)", strerror(errno));
+ gf_smsg(THIS->name, GF_LOG_WARNING, errno, API_MSG_XREADDIRP_R_FAILED,
+ "reason=%s", strerror(errno), NULL);
if (xstat)
GLFS_FREE(xstat);
@@ -6341,24 +6305,23 @@ out:
invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_r, 3.11.0);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_stat, 3.11.0)
struct stat *
pub_glfs_xreaddirplus_get_stat(struct glfs_xreaddirp_stat *xstat)
{
GF_VALIDATE_OR_GOTO("glfs_xreaddirplus_get_stat", xstat, out);
if (!xstat->flags_handled & GFAPI_XREADDIRP_STAT)
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "GFAPI_XREADDIRP_STAT is not set. Flags"
- "handled for xstat(%p) are (%x)",
- xstat, xstat->flags_handled);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_FLAGS_HANDLE,
+ "GFAPI_XREADDIRP_STAT"
+ "xstat=%p",
+ xstat, "handles=%x", xstat->flags_handled, NULL);
return &xstat->st;
out:
return NULL;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_stat, 3.11.0);
void
gf_lease_to_glfs_lease(struct gf_lease *gf_lease, struct glfs_lease *lease)
@@ -6378,6 +6341,7 @@ glfs_lease_to_gf_lease(struct glfs_lease *lease, struct gf_lease *gf_lease)
memcpy(gf_lease->lease_id, lease->lease_id, LEASE_ID_SIZE);
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lease, 4.0.0)
int
pub_glfs_lease(struct glfs_fd *glfd, struct glfs_lease *lease,
glfs_recall_cbk fn, void *data)
@@ -6458,8 +6422,8 @@ pub_glfs_lease(struct glfs_fd *glfd, struct glfs_lease *lease,
if (ret == 0) {
ret = fd_ctx_set(glfd->fd, subvol, (uint64_t)(long)glfd);
if (ret) {
- gf_msg(subvol->name, GF_LOG_ERROR, ENOMEM, API_MSG_FDCTX_SET_FAILED,
- "Setting fd ctx failed for fd(%p)", glfd->fd);
+ gf_smsg(subvol->name, GF_LOG_ERROR, ENOMEM,
+ API_MSG_FDCTX_SET_FAILED, "fd=%p", glfd->fd, NULL);
goto out;
}
glfd->cbk = fn;
@@ -6479,5 +6443,3 @@ out:
invalid_fs:
return ret;
}
-
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lease, 4.0.0);
diff --git a/api/src/glfs-handleops.c b/api/src/glfs-handleops.c
index cdf368379d6..53c2ee896f9 100644
--- a/api/src/glfs-handleops.c
+++ b/api/src/glfs-handleops.c
@@ -60,6 +60,7 @@ glfs_iatt_from_stat(struct stat *stat, int valid, struct iatt *iatt,
return;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lookupat, 3.7.4)
struct glfs_object *
pub_glfs_h_lookupat(struct glfs *fs, struct glfs_object *parent,
const char *path, struct stat *stat, int follow)
@@ -126,8 +127,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lookupat, 3.7.4);
-
+GFAPI_SYMVER_PUBLIC(glfs_h_lookupat34, glfs_h_lookupat, 3.4.2)
struct glfs_object *
pub_glfs_h_lookupat34(struct glfs *fs, struct glfs_object *parent,
const char *path, struct stat *stat)
@@ -135,8 +135,7 @@ pub_glfs_h_lookupat34(struct glfs *fs, struct glfs_object *parent,
return pub_glfs_h_lookupat(fs, parent, path, stat, 0);
}
-GFAPI_SYMVER_PUBLIC(glfs_h_lookupat34, glfs_h_lookupat, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_statfs, 3.7.0)
int
pub_glfs_h_statfs(struct glfs *fs, struct glfs_object *object,
struct statvfs *statvfs)
@@ -194,8 +193,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_statfs, 3.7.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_stat, 3.4.2)
int
pub_glfs_h_stat(struct glfs *fs, struct glfs_object *object, struct stat *stat)
{
@@ -259,8 +257,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_stat, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getattrs, 3.4.2)
int
pub_glfs_h_getattrs(struct glfs *fs, struct glfs_object *object,
struct stat *stat)
@@ -317,8 +314,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getattrs, 3.4.2);
-
int
glfs_h_getxattrs_common(struct glfs *fs, struct glfs_object *object,
dict_t **xattr, const char *name,
@@ -380,6 +375,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getxattrs, 3.5.1)
int
pub_glfs_h_getxattrs(struct glfs *fs, struct glfs_object *object,
const char *name, void *value, size_t size)
@@ -416,8 +412,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_getxattrs, 3.5.1);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setattrs, 3.4.2)
int
pub_glfs_h_setattrs(struct glfs *fs, struct glfs_object *object,
struct stat *stat, int valid)
@@ -480,8 +475,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setattrs, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setxattrs, 3.5.0)
int
pub_glfs_h_setxattrs(struct glfs *fs, struct glfs_object *object,
const char *name, const void *value, size_t size,
@@ -568,8 +562,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_setxattrs, 3.5.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_removexattrs, 3.5.1)
int
pub_glfs_h_removexattrs(struct glfs *fs, struct glfs_object *object,
const char *name)
@@ -626,8 +619,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_removexattrs, 3.5.1);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_open, 3.4.2)
struct glfs_fd *
pub_glfs_h_open(struct glfs *fs, struct glfs_object *object, int flags)
{
@@ -727,8 +719,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_open, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat, 3.4.2)
struct glfs_object *
pub_glfs_h_creat(struct glfs *fs, struct glfs_object *parent, const char *path,
int flags, mode_t mode, struct stat *stat)
@@ -840,8 +831,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat_open, 6.6)
struct glfs_object *
pub_glfs_h_creat_open(struct glfs *fs, struct glfs_object *parent,
const char *path, int flags, mode_t mode,
@@ -975,8 +965,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_creat_open, 6.6);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mkdir, 3.4.2)
struct glfs_object *
pub_glfs_h_mkdir(struct glfs *fs, struct glfs_object *parent, const char *path,
mode_t mode, struct stat *stat)
@@ -1074,8 +1063,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mkdir, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mknod, 3.4.2)
struct glfs_object *
pub_glfs_h_mknod(struct glfs *fs, struct glfs_object *parent, const char *path,
mode_t mode, dev_t dev, struct stat *stat)
@@ -1172,8 +1160,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_mknod, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_unlink, 3.4.2)
int
pub_glfs_h_unlink(struct glfs *fs, struct glfs_object *parent, const char *path)
{
@@ -1244,8 +1231,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_unlink, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_opendir, 3.4.2)
struct glfs_fd *
pub_glfs_h_opendir(struct glfs *fs, struct glfs_object *object)
{
@@ -1327,8 +1313,7 @@ invalid_fs:
return glfd;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_opendir, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_access, 3.6.0)
int
pub_glfs_h_access(struct glfs *fs, struct glfs_object *object, int mask)
{
@@ -1385,8 +1370,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_access, 3.6.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_extract_handle, 3.4.2)
ssize_t
pub_glfs_h_extract_handle(struct glfs_object *object, unsigned char *handle,
int len)
@@ -1417,8 +1401,7 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_extract_handle, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_create_from_handle, 3.4.2)
struct glfs_object *
pub_glfs_h_create_from_handle(struct glfs *fs, unsigned char *handle, int len,
struct stat *stat)
@@ -1495,9 +1478,9 @@ pub_glfs_h_create_from_handle(struct glfs *fs, unsigned char *handle, int len,
ret = syncop_lookup(subvol, &loc, &iatt, 0, 0, 0);
DECODE_SYNCOP_ERR(ret);
if (ret) {
- gf_msg(subvol->name, GF_LOG_WARNING, errno,
- API_MSG_INODE_REFRESH_FAILED, "inode refresh of %s failed: %s",
- uuid_utoa(loc.gfid), strerror(errno));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno,
+ API_MSG_INODE_REFRESH_FAILED, "gfid=%s", uuid_utoa(loc.gfid),
+ "error=%s", strerror(errno), NULL);
goto out;
}
@@ -1508,8 +1491,8 @@ pub_glfs_h_create_from_handle(struct glfs *fs, unsigned char *handle, int len,
}
inode_lookup(newinode);
} else {
- gf_msg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
- "inode linking of %s failed", uuid_utoa(loc.gfid));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
+ "gfid=%s", uuid_utoa(loc.gfid), NULL);
goto out;
}
@@ -1541,8 +1524,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_create_from_handle, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_close, 3.4.2)
int
pub_glfs_h_close(struct glfs_object *object)
{
@@ -1555,8 +1537,7 @@ pub_glfs_h_close(struct glfs_object *object)
return 0;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_close, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_truncate, 3.4.2)
int
pub_glfs_h_truncate(struct glfs *fs, struct glfs_object *object, off_t offset)
{
@@ -1616,8 +1597,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_truncate, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_symlink, 3.4.2)
struct glfs_object *
pub_glfs_h_symlink(struct glfs *fs, struct glfs_object *parent,
const char *name, const char *data, struct stat *stat)
@@ -1716,8 +1696,7 @@ invalid_fs:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_symlink, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_readlink, 3.4.2)
int
pub_glfs_h_readlink(struct glfs *fs, struct glfs_object *object, char *buf,
size_t bufsiz)
@@ -1782,8 +1761,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_readlink, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_link, 3.4.2)
int
pub_glfs_h_link(struct glfs *fs, struct glfs_object *linksrc,
struct glfs_object *parent, const char *name)
@@ -1880,8 +1858,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_link, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_rename, 3.4.2)
int
pub_glfs_h_rename(struct glfs *fs, struct glfs_object *olddir,
const char *oldname, struct glfs_object *newdir,
@@ -1991,8 +1968,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_rename, 3.4.2);
-
/*
* Given a handle/gfid, find if the corresponding inode is present in
* the inode table. If yes create and return the corresponding glfs_object.
@@ -2097,8 +2072,8 @@ glfs_h_poll_cache_invalidation(struct glfs *fs, struct glfs_upcall *up_arg,
* the handle and hence will no more be interested in
* the upcall for this particular gfid.
*/
- gf_msg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
- "handle creation of %s failed", uuid_utoa(upcall_data->gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
+ "gfid=%s", uuid_utoa(upcall_data->gfid), NULL);
errno = ESTALE;
goto out;
}
@@ -2121,9 +2096,9 @@ glfs_h_poll_cache_invalidation(struct glfs *fs, struct glfs_upcall *up_arg,
p_object = glfs_h_find_handle(fs, ca_data->p_stat.ia_gfid,
GFAPI_HANDLE_LENGTH);
if (!p_object) {
- gf_msg(THIS->name, GF_LOG_DEBUG, errno,
- API_MSG_CREATE_HANDLE_FAILED, "handle creation of %s failed",
- uuid_utoa(ca_data->p_stat.ia_gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
+ API_MSG_CREATE_HANDLE_FAILED, "gfid=%s",
+ uuid_utoa(ca_data->p_stat.ia_gfid), NULL);
errno = ESTALE;
goto out;
}
@@ -2137,9 +2112,9 @@ glfs_h_poll_cache_invalidation(struct glfs *fs, struct glfs_upcall *up_arg,
oldp_object = glfs_h_find_handle(fs, ca_data->oldp_stat.ia_gfid,
GFAPI_HANDLE_LENGTH);
if (!oldp_object) {
- gf_msg(THIS->name, GF_LOG_DEBUG, errno,
- API_MSG_CREATE_HANDLE_FAILED, "handle creation of %s failed",
- uuid_utoa(ca_data->oldp_stat.ia_gfid));
+ gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
+ API_MSG_CREATE_HANDLE_FAILED, "gfid=%s",
+ uuid_utoa(ca_data->oldp_stat.ia_gfid), NULL);
errno = ESTALE;
/* By the time we receive upcall old parent_dir may
* have got removed. We still need to send upcall
@@ -2200,6 +2175,7 @@ glfs_release_upcall(void *ptr)
* calling glfs_fini(..). Hence making an assumption that 'fs' & ctx structures
* cannot be freed while in this routine.
*/
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_poll_upcall, 3.7.16)
int
pub_glfs_h_poll_upcall(struct glfs *fs, struct glfs_upcall **up_arg)
{
@@ -2317,8 +2293,6 @@ err:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_poll_upcall, 3.7.16);
-
static gf_boolean_t log_upcall370 = _gf_true; /* log once */
/* The old glfs_h_poll_upcall interface requires intimate knowledge of the
@@ -2332,6 +2306,7 @@ static gf_boolean_t log_upcall370 = _gf_true; /* log once */
*
* WARNING: this function will be removed in the future.
*/
+GFAPI_SYMVER_PUBLIC(glfs_h_poll_upcall370, glfs_h_poll_upcall, 3.7.0)
int
pub_glfs_h_poll_upcall370(struct glfs *fs, struct glfs_callback_arg *up_arg)
{
@@ -2399,12 +2374,11 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC(glfs_h_poll_upcall370, glfs_h_poll_upcall, 3.7.0);
-
#ifdef HAVE_ACL_LIBACL_H
#include <glusterfs/glusterfs-acl.h>
#include <acl/libacl.h>
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_set, 3.7.0)
int
pub_glfs_h_acl_set(struct glfs *fs, struct glfs_object *object,
const acl_type_t type, const acl_t acl)
@@ -2453,6 +2427,7 @@ invalid_fs:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_get, 3.7.0)
acl_t
pub_glfs_h_acl_get(struct glfs *fs, struct glfs_object *object,
const acl_type_t type)
@@ -2507,6 +2482,7 @@ invalid_fs:
return acl;
}
#else /* !HAVE_ACL_LIBACL_H */
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_get, 3.7.0)
acl_t
pub_glfs_h_acl_get(struct glfs *fs, struct glfs_object *object,
const acl_type_t type)
@@ -2515,6 +2491,7 @@ pub_glfs_h_acl_get(struct glfs *fs, struct glfs_object *object,
return NULL;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_set, 3.7.0)
int
pub_glfs_h_acl_set(struct glfs *fs, struct glfs_object *object,
const acl_type_t type, const acl_t acl)
@@ -2523,10 +2500,9 @@ pub_glfs_h_acl_set(struct glfs *fs, struct glfs_object *object,
return -1;
}
#endif
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_set, 3.7.0);
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_acl_get, 3.7.0);
/* The API to perform read using anonymous fd */
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_read, 3.7.0)
ssize_t
pub_glfs_h_anonymous_read(struct glfs *fs, struct glfs_object *object,
const void *buf, size_t count, off_t offset)
@@ -2550,9 +2526,8 @@ pub_glfs_h_anonymous_read(struct glfs *fs, struct glfs_object *object,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_read, 3.7.0);
-
/* The API to perform write using anonymous fd */
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_write, 3.7.0)
ssize_t
pub_glfs_h_anonymous_write(struct glfs *fs, struct glfs_object *object,
const void *buf, size_t count, off_t offset)
@@ -2576,8 +2551,7 @@ pub_glfs_h_anonymous_write(struct glfs *fs, struct glfs_object *object,
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_anonymous_write, 3.7.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_object_copy, 3.11.0)
struct glfs_object *
pub_glfs_object_copy(struct glfs_object *src)
{
@@ -2588,9 +2562,8 @@ pub_glfs_object_copy(struct glfs_object *src)
object = GF_CALLOC(1, sizeof(struct glfs_object), glfs_mt_glfs_object_t);
if (object == NULL) {
errno = ENOMEM;
- gf_msg(THIS->name, GF_LOG_WARNING, errno, API_MSG_CREATE_HANDLE_FAILED,
- "glfs_dup_object for gfid-%s failed",
- uuid_utoa(src->inode->gfid));
+ gf_smsg(THIS->name, GF_LOG_WARNING, errno, API_MSG_CREATE_HANDLE_FAILED,
+ "glfs_dup_object gfid=%s", uuid_utoa(src->inode->gfid), NULL);
return NULL;
}
@@ -2600,26 +2573,25 @@ pub_glfs_object_copy(struct glfs_object *src)
out:
return object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_object_copy, 3.11.0);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_object, 3.11.0)
struct glfs_object *
pub_glfs_xreaddirplus_get_object(struct glfs_xreaddirp_stat *xstat)
{
GF_VALIDATE_OR_GOTO("glfs_xreaddirplus_get_object", xstat, out);
if (!(xstat->flags_handled & GFAPI_XREADDIRP_HANDLE))
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "GFAPI_XREADDIRP_HANDLE is not set. Flags"
- "handled for xstat(%p) are (%x)",
- xstat, xstat->flags_handled);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_HANDLE_NOT_SET,
+ "GFAPI_XREADDIRP_HANDLE xstat=%p", xstat, "handle=%x",
+ xstat->flags_handled, NULL);
return xstat->object;
out:
return NULL;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_object, 3.11.0);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lease, 4.0.0)
int
pub_glfs_h_lease(struct glfs *fs, struct glfs_object *object,
struct glfs_lease *lease)
@@ -2681,5 +2653,3 @@ out:
invalid_fs:
return ret;
}
-
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_h_lease, 4.0.0);
diff --git a/api/src/glfs-internal.h b/api/src/glfs-internal.h
index 55401b2910e..7cc3b18a104 100644
--- a/api/src/glfs-internal.h
+++ b/api/src/glfs-internal.h
@@ -16,6 +16,7 @@
#include <glusterfs/upcall-utils.h>
#include "glfs-handles.h"
#include <glusterfs/refcount.h>
+#include <glusterfs/syncop.h>
#define GLFS_SYMLINK_MAX_FOLLOW 2048
@@ -80,25 +81,40 @@
#ifndef GFAPI_PRIVATE
#define GFAPI_PRIVATE(sym, ver) /**/
#endif
+#if __GNUC__ >= 10
#define GFAPI_SYMVER_PUBLIC_DEFAULT(fn, ver) \
- asm(".symver pub_" STR(fn) ", " STR(fn) "@@GFAPI_" STR(ver))
+ __attribute__((__symver__(STR(fn) "@@GFAPI_" STR(ver))))
#define GFAPI_SYMVER_PRIVATE_DEFAULT(fn, ver) \
- asm(".symver priv_" STR(fn) ", " STR(fn) "@@GFAPI_PRIVATE_" STR(ver))
+ __attribute__((__symver__(STR(fn) "@@GFAPI_PRIVATE_" STR(ver))))
#define GFAPI_SYMVER_PUBLIC(fn1, fn2, ver) \
- asm(".symver pub_" STR(fn1) ", " STR(fn2) "@GFAPI_" STR(ver))
+ __attribute__((__symver__(STR(fn2) "@GFAPI_" STR(ver))))
#define GFAPI_SYMVER_PRIVATE(fn1, fn2, ver) \
- asm(".symver priv_" STR(fn1) ", " STR(fn2) "@GFAPI_PRIVATE_" STR(ver))
+ __attribute__((__symver__(STR(fn2) "@GFAPI_PRIVATE_" STR(ver))))
+
+#else
+#define GFAPI_SYMVER_PUBLIC_DEFAULT(fn, ver) \
+ asm(".symver pub_" STR(fn) ", " STR(fn) "@@GFAPI_" STR(ver));
+
+#define GFAPI_SYMVER_PRIVATE_DEFAULT(fn, ver) \
+ asm(".symver priv_" STR(fn) ", " STR(fn) "@@GFAPI_PRIVATE_" STR(ver));
+
+#define GFAPI_SYMVER_PUBLIC(fn1, fn2, ver) \
+ asm(".symver pub_" STR(fn1) ", " STR(fn2) "@GFAPI_" STR(ver));
+
+#define GFAPI_SYMVER_PRIVATE(fn1, fn2, ver) \
+ asm(".symver priv_" STR(fn1) ", " STR(fn2) "@GFAPI_PRIVATE_" STR(ver));
+#endif
#define STR(str) #str
#else
#ifndef GFAPI_PUBLIC
-#define GFAPI_PUBLIC(sym, ver) __asm("_" __STRING(sym) "$GFAPI_" __STRING(ver))
+#define GFAPI_PUBLIC(sym, ver) __asm("_" __STRING(sym) "$GFAPI_" __STRING(ver));
#endif
#ifndef GFAPI_PRIVATE
#define GFAPI_PRIVATE(sym, ver) \
- __asm("_" __STRING(sym) "$GFAPI_PRIVATE_" __STRING(ver))
+ __asm("_" __STRING(sym) "$GFAPI_PRIVATE_" __STRING(ver));
#endif
#define GFAPI_SYMVER_PUBLIC_DEFAULT(fn, dotver) /**/
#define GFAPI_SYMVER_PRIVATE_DEFAULT(fn, dotver) /**/
@@ -207,6 +223,7 @@ struct glfs {
glfs_upcall_cbk up_cbk; /* upcall cbk function to be registered */
void *up_data; /* Opaque data provided by application
* during upcall registration */
+ struct list_head waitq; /* waiting synctasks */
};
/* This enum is used to maintain the state of glfd. In case of async fops
@@ -442,6 +459,34 @@ glfs_process_upcall_event(struct glfs *fs, void *data)
THIS = glfd->fd->inode->table->xl->ctx->master; \
} while (0)
+#define __GLFS_LOCK_WAIT(fs) \
+ do { \
+ struct synctask *task = NULL; \
+ \
+ task = synctask_get(); \
+ \
+ if (task) { \
+ list_add_tail(&task->waitq, &fs->waitq); \
+ pthread_mutex_unlock(&fs->mutex); \
+ synctask_yield(task, NULL); \
+ pthread_mutex_lock(&fs->mutex); \
+ } else { \
+ /* non-synctask */ \
+ pthread_cond_wait(&fs->cond, &fs->mutex); \
+ } \
+ } while (0)
+
+#define __GLFS_SYNCTASK_WAKE(fs) \
+ do { \
+ struct synctask *waittask = NULL; \
+ \
+ while (!list_empty(&fs->waitq)) { \
+ waittask = list_entry(fs->waitq.next, struct synctask, waitq); \
+ list_del_init(&waittask->waitq); \
+ synctask_wake(waittask); \
+ } \
+ } while (0)
+
/*
By default all lock attempts from user context must
use glfs_lock() and glfs_unlock(). This allows
@@ -466,10 +511,10 @@ glfs_lock(struct glfs *fs, gf_boolean_t wait_for_migration)
pthread_mutex_lock(&fs->mutex);
while (!fs->init)
- pthread_cond_wait(&fs->cond, &fs->mutex);
+ __GLFS_LOCK_WAIT(fs);
while (wait_for_migration && fs->migration_in_progress)
- pthread_cond_wait(&fs->cond, &fs->mutex);
+ __GLFS_LOCK_WAIT(fs);
return 0;
}
diff --git a/api/src/glfs-master.c b/api/src/glfs-master.c
index 4bc69d1f3d8..100dcc16cc0 100644
--- a/api/src/glfs-master.c
+++ b/api/src/glfs-master.c
@@ -75,9 +75,10 @@ notify(xlator_t *this, int event, void *data, ...)
switch (event) {
case GF_EVENT_GRAPH_NEW:
- gf_msg(this->name, GF_LOG_INFO, 0, API_MSG_NEW_GRAPH,
- "New graph %s (%d) coming up",
- uuid_utoa((unsigned char *)graph->graph_uuid), graph->id);
+ gf_smsg(this->name, GF_LOG_INFO, 0, API_MSG_NEW_GRAPH,
+ "graph-uuid=%s",
+ uuid_utoa((unsigned char *)graph->graph_uuid), "id=%d",
+ graph->id, NULL);
break;
case GF_EVENT_CHILD_UP:
pthread_mutex_lock(&fs->mutex);
@@ -120,9 +121,8 @@ mem_acct_init(xlator_t *this)
ret = xlator_mem_acct_init(this, glfs_mt_end + 1);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
- "Failed to initialise "
- "memory accounting");
+ gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
+ NULL);
return ret;
}
diff --git a/api/src/glfs-mgmt.c b/api/src/glfs-mgmt.c
index 66a13432f38..7c82b8cd162 100644
--- a/api/src/glfs-mgmt.c
+++ b/api/src/glfs-mgmt.c
@@ -46,16 +46,15 @@ glfs_process_volfp(struct glfs *fs, FILE *fp)
ctx = fs->ctx;
graph = glusterfs_graph_construct(fp);
if (!graph) {
- gf_msg("glfs", GF_LOG_ERROR, errno, API_MSG_GRAPH_CONSTRUCT_FAILED,
- "failed to construct the graph");
+ gf_smsg("glfs", GF_LOG_ERROR, errno, API_MSG_GRAPH_CONSTRUCT_FAILED,
+ NULL);
goto out;
}
for (trav = graph->first; trav; trav = trav->next) {
if (strcmp(trav->type, "mount/api") == 0) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_API_XLATOR_ERROR,
- "api master xlator cannot be specified "
- "in volume file");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_API_XLATOR_ERROR,
+ NULL);
goto out;
}
}
@@ -120,32 +119,28 @@ mgmt_cbk_statedump(struct rpc_clnt *rpc, void *mydata, void *data)
this = mydata;
if (!this) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "NULL mydata");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_NULL, "mydata", NULL);
errno = EINVAL;
goto out;
}
fs = this->private;
if (!fs) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "NULL glfs");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_NULL, "glfs", NULL);
errno = EINVAL;
goto out;
}
iov = (struct iovec *)data;
if (!iov) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "NULL iovec data");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_NULL, "iovec data", NULL);
errno = EINVAL;
goto out;
}
ret = xdr_to_generic(*iov, &target_pid, (xdrproc_t)xdr_gf_statedump);
if (ret < 0) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_STATEDUMP_FAILED,
- "Failed to decode xdr response for GF_CBK_STATEDUMP");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_DECODE_XDR_FAILED, NULL);
goto out;
}
@@ -156,22 +151,21 @@ mgmt_cbk_statedump(struct rpc_clnt *rpc, void *mydata, void *data)
ret = glfs_sysrq(fs, GLFS_SYSRQ_STATEDUMP);
if (ret < 0) {
- gf_msg("glfs", GF_LOG_INFO, 0, API_MSG_STATEDUMP_FAILED,
- "statedump failed");
+ gf_smsg("glfs", GF_LOG_INFO, 0, API_MSG_STATEDUMP_FAILED, NULL);
}
}
out:
return ret;
}
-rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_statedump},
+static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
+ GF_CBK_EVENT_NOTIFY},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_statedump, GF_CBK_STATEDUMP},
};
-struct rpcclnt_cb_program mgmt_cbk_prog = {
+static struct rpcclnt_cb_program mgmt_cbk_prog = {
.progname = "GlusterFS Callback",
.prognum = GLUSTER_CBK_PROGRAM,
.progver = GLUSTER_CBK_VERSION,
@@ -179,7 +173,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = {
.numactors = GF_CBK_MAXVALUE,
};
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = "NULL",
[GF_HNDSK_SETVOLUME] = "SETVOLUME",
[GF_HNDSK_GETSPEC] = "GETSPEC",
@@ -188,7 +182,7 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_GET_VOLUME_INFO] = "GETVOLUMEINFO",
};
-rpc_clnt_prog_t clnt_handshake_prog = {
+static rpc_clnt_prog_t clnt_handshake_prog = {
.progname = "GlusterFS Handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
@@ -230,8 +224,8 @@ mgmt_submit_request(void *req, call_frame_t *frame, glusterfs_ctx_t *ctx,
/* Create the xdr payload */
ret = xdr_serialize_generic(iov, req, xdrproc);
if (ret == -1) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_XDR_PAYLOAD_FAILED,
- "failed to create XDR payload");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_XDR_PAYLOAD_FAILED,
+ NULL);
goto out;
}
iov.iov_len = ret;
@@ -261,7 +255,6 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = 0;
char *volume_id_str = NULL;
dict_t *dict = NULL;
- char key[1024] = {0};
gf_get_volume_info_rsp rsp = {
0,
};
@@ -275,8 +268,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
args = frame->local;
if (!ctx) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "NULL context");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_NULL,
+ "context", NULL);
errno = EINVAL;
ret = -1;
goto out;
@@ -285,8 +278,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
fs = ((xlator_t *)ctx->master)->private;
if (-1 == req->rpc_status) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "GET_VOLUME_INFO RPC call is not successful");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL,
+ API_MSG_CALL_NOT_SUCCESSFUL, NULL);
errno = EINVAL;
ret = -1;
goto out;
@@ -295,9 +288,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_get_volume_info_rsp);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_ERROR, 0,
- API_MSG_XDR_RESPONSE_DECODE_FAILED,
- "Failed to decode xdr response for GET_VOLUME_INFO");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0,
+ API_MSG_XDR_RESPONSE_DECODE_FAILED, NULL);
goto out;
}
@@ -313,9 +305,8 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
if (!rsp.dict.dict_len) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "Response received for "
- "GET_VOLUME_INFO RPC call is not valid");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_CALL_NOT_VALID,
+ NULL);
ret = -1;
errno = EINVAL;
goto out;
@@ -336,8 +327,7 @@ mgmt_get_volinfo_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- snprintf(key, sizeof(key), "volume_id");
- ret = dict_get_str(dict, key, &volume_id_str);
+ ret = dict_get_str_sizen(dict, "volume_id", &volume_id_str);
if (ret) {
errno = EINVAL;
goto out;
@@ -353,11 +343,9 @@ out:
}
if (ret) {
- gf_msg(frame->this->name, GF_LOG_ERROR, errno,
- API_MSG_GET_VOLINFO_CBK_FAILED,
- "In GET_VOLUME_INFO "
- "cbk, received error: %s",
- strerror(errno));
+ gf_smsg(frame->this->name, GF_LOG_ERROR, errno,
+ API_MSG_GET_VOLINFO_CBK_FAILED, "error=%s", strerror(errno),
+ NULL);
}
if (dict)
@@ -376,6 +364,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volumeid, 3.5.0)
int
pub_glfs_get_volumeid(struct glfs *fs, char *volid, size_t size)
{
@@ -399,9 +388,8 @@ pub_glfs_get_volumeid(struct glfs *fs, char *volid, size_t size)
glfs_get_volume_info(fs);
if (gf_uuid_is_null(fs->vol_uuid)) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_FETCH_VOLUUID_FAILED,
- "Unable to fetch "
- "volume UUID");
+ gf_smsg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_FETCH_VOLUUID_FAILED,
+ NULL);
goto out;
}
@@ -413,8 +401,7 @@ done:
}
if (size < uuid_size) {
- gf_msg(THIS->name, GF_LOG_ERROR, ERANGE, API_MSG_INSUFF_SIZE,
- "Insufficient size passed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ERANGE, API_MSG_INSUFF_SIZE, NULL);
errno = ERANGE;
goto out;
}
@@ -432,8 +419,6 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volumeid, 3.5.0);
-
int
glfs_get_volume_info(struct glfs *fs)
{
@@ -447,8 +432,7 @@ glfs_get_volume_info(struct glfs *fs)
ctx = fs->ctx;
frame = create_frame(THIS, ctx->pool);
if (!frame) {
- gf_msg("glfs", GF_LOG_ERROR, ENOMEM, API_MSG_FRAME_CREAT_FAILED,
- "failed to create the frame");
+ gf_smsg("glfs", GF_LOG_ERROR, ENOMEM, API_MSG_FRAME_CREAT_FAILED, NULL);
ret = -1;
goto out;
}
@@ -504,8 +488,8 @@ glfs_get_volume_info_rpc(call_frame_t *frame, xlator_t *this, struct glfs *fs)
flags = (int32_t)GF_GET_VOLUME_UUID; // ctx->flags;
ret = dict_set_int32(dict, "flags", flags);
if (ret) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
- "failed to set flags");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL,
+ API_MSG_DICT_SET_FAILED, "flags", NULL);
goto out;
}
@@ -575,8 +559,8 @@ glfs_mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ctx = frame->this->ctx;
if (!ctx) {
- gf_msg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "NULL context");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, EINVAL, API_MSG_NULL,
+ "context", NULL);
errno = EINVAL;
ret = -1;
goto out;
@@ -592,16 +576,15 @@ glfs_mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_ERROR, 0, API_MSG_XDR_DECODE_FAILED,
- "XDR decoding error");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0, API_MSG_XDR_DECODE_FAILED,
+ NULL);
ret = -1;
goto out;
}
if (-1 == rsp.op_ret) {
- gf_msg(frame->this->name, GF_LOG_ERROR, rsp.op_errno,
- API_MSG_GET_VOLFILE_FAILED,
- "failed to get the 'volume file' from server");
+ gf_smsg(frame->this->name, GF_LOG_ERROR, rsp.op_errno,
+ API_MSG_GET_VOLFILE_FAILED, "from server", NULL);
ret = -1;
errno = rsp.op_errno;
goto out;
@@ -650,8 +633,7 @@ volfile:
if ((size == fs->oldvollen) &&
(memcmp(fs->oldvolfile, rsp.spec, size) == 0)) {
pthread_mutex_unlock(&fs->mutex);
- gf_msg(frame->this->name, GF_LOG_INFO, 0, API_MSG_VOLFILE_INFO,
- "No change in volfile, continuing");
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, API_MSG_VOLFILE_INFO, NULL);
goto out;
}
pthread_mutex_unlock(&fs->mutex);
@@ -668,8 +650,8 @@ volfile:
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_INFO, 0, API_MSG_VOLFILE_INFO,
- "Unable to delete file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, API_MSG_UNABLE_TO_DEL,
+ "template=%s", template, NULL);
ret = 0;
}
@@ -731,9 +713,7 @@ out:
// Stop if server is running at an unsupported op-version
if (ENOTSUP == ret) {
- gf_msg("mgmt", GF_LOG_ERROR, ENOTSUP, API_MSG_WRONG_OPVERSION,
- "Server is operating at an op-version which is not "
- "supported");
+ gf_smsg("mgmt", GF_LOG_ERROR, ENOTSUP, API_MSG_WRONG_OPVERSION, NULL);
errno = ENOTSUP;
glfs_init_done(fs, -1);
}
@@ -742,9 +722,8 @@ out:
/* Do it only for the first time */
/* Failed to get the volume file, something wrong,
restart the process */
- gf_msg("glfs-mgmt", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "failed to fetch volume file (key:%s)",
- ctx->cmd_args.volfile_id);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, EINVAL, API_MSG_GET_VOLFILE_FAILED,
+ "key=%s", ctx->cmd_args.volfile_id, NULL);
if (!need_retry) {
if (!errno)
errno = EINVAL;
@@ -767,7 +746,7 @@ glfs_volfile_fetch(struct glfs *fs)
gf_getspec_req req = {
0,
};
- int ret = 0;
+ int ret = -1;
call_frame_t *frame = NULL;
glusterfs_ctx_t *ctx = NULL;
dict_t *dict = NULL;
@@ -775,14 +754,11 @@ glfs_volfile_fetch(struct glfs *fs)
ctx = fs->ctx;
cmd_args = &ctx->cmd_args;
- frame = create_frame(THIS, ctx->pool);
-
req.key = cmd_args->volfile_id;
req.flags = 0;
dict = dict_new();
if (!dict) {
- ret = -1;
goto out;
}
@@ -790,15 +766,15 @@ glfs_volfile_fetch(struct glfs *fs)
// decision
ret = dict_set_int32(dict, "min-op-version", GD_OP_VERSION_MIN);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
- "Failed to set min-op-version in request dict");
+ gf_smsg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
+ "min-op-version", NULL);
goto out;
}
ret = dict_set_int32(dict, "max-op-version", GD_OP_VERSION_MAX);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
- "Failed to set max-op-version in request dict");
+ gf_smsg(THIS->name, GF_LOG_ERROR, EINVAL, API_MSG_DICT_SET_FAILED,
+ "max-op-version", NULL);
goto out;
}
@@ -810,8 +786,14 @@ glfs_volfile_fetch(struct glfs *fs)
ret = dict_allocate_and_serialize(dict, &req.xdata.xdata_val,
&req.xdata.xdata_len);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, API_MSG_DICT_SERIALIZE_FAILED,
- "Failed to serialize dictionary");
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, API_MSG_DICT_SERIALIZE_FAILED,
+ NULL);
+ goto out;
+ }
+
+ frame = create_frame(THIS, ctx->pool);
+ if (!frame) {
+ ret = -1;
goto out;
}
@@ -852,15 +834,13 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
case RPC_CLNT_DISCONNECT:
if (!ctx->active) {
if (rpc_trans->connect_failed)
- gf_msg("glfs-mgmt", GF_LOG_ERROR, 0,
- API_MSG_REMOTE_HOST_CONN_FAILED,
- "failed to connect to remote-host: %s",
- ctx->cmd_args.volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, 0,
+ API_MSG_REMOTE_HOST_CONN_FAILED, "server=%s",
+ ctx->cmd_args.volfile_server, NULL);
else
- gf_msg("glfs-mgmt", GF_LOG_INFO, 0,
- API_MSG_REMOTE_HOST_CONN_FAILED,
- "disconnected from remote-host: %s",
- ctx->cmd_args.volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_INFO, 0,
+ API_MSG_REMOTE_HOST_CONN_FAILED, "server=%s",
+ ctx->cmd_args.volfile_server, NULL);
if (!rpc->disabled) {
/*
@@ -875,9 +855,8 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
server = ctx->cmd_args.curr_server;
if (server->list.next == &ctx->cmd_args.volfile_servers) {
errno = ENOTCONN;
- gf_msg("glfs-mgmt", GF_LOG_INFO, ENOTCONN,
- API_MSG_VOLFILE_SERVER_EXHAUST,
- "Exhausted all volfile servers");
+ gf_smsg("glfs-mgmt", GF_LOG_INFO, ENOTCONN,
+ API_MSG_VOLFILE_SERVER_EXHAUST, NULL);
glfs_init_done(fs, -1);
break;
}
@@ -890,10 +869,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
ret = dict_set_str(rpc_trans->options, "transport-type",
server->transport);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set transport-type: %s",
- server->transport);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED, "transport-type=%s",
+ server->transport, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -904,10 +882,10 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
"transport.socket.connect-path",
server->volfile_server);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set socket.connect-path: %s",
- server->volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED,
+ "socket.connect-path=%s",
+ server->volfile_server, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -922,9 +900,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
ret = dict_set_int32(rpc_trans->options, "remote-port",
server->port);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set remote-port: %d", server->port);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED, "remote-port=%d",
+ server->port, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -933,10 +911,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
ret = dict_set_str(rpc_trans->options, "remote-host",
server->volfile_server);
if (ret != 0) {
- gf_msg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
- API_MSG_DICT_SET_FAILED,
- "failed to set remote-host: %s",
- server->volfile_server);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, ENOTCONN,
+ API_MSG_DICT_SET_FAILED, "remote-host=%s",
+ server->volfile_server, NULL);
errno = ENOTCONN;
glfs_init_done(fs, -1);
break;
@@ -949,10 +926,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
"transport.socket.connect-path");
}
- gf_msg("glfs-mgmt", GF_LOG_INFO, 0, API_MSG_VOLFILE_CONNECTING,
- "connecting to next volfile server %s"
- " at port %d with transport: %s",
- server->volfile_server, server->port, server->transport);
+ gf_smsg("glfs-mgmt", GF_LOG_INFO, 0, API_MSG_VOLFILE_CONNECTING,
+ "server=%s", server->volfile_server, "port=%d",
+ server->port, "transport=%s", server->transport, NULL);
}
break;
case RPC_CLNT_CONNECT:
@@ -960,9 +936,9 @@ mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
if (ret && (ctx->active == NULL)) {
/* Do it only for the first time */
/* Exit the process.. there are some wrong options */
- gf_msg("glfs-mgmt", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "failed to fetch volume file (key:%s)",
- ctx->cmd_args.volfile_id);
+ gf_smsg("glfs-mgmt", GF_LOG_ERROR, EINVAL,
+ API_MSG_GET_VOLFILE_FAILED, "key=%s",
+ ctx->cmd_args.volfile_id, NULL);
errno = EINVAL;
glfs_init_done(fs, -1);
}
@@ -1037,30 +1013,25 @@ glfs_mgmt_init(struct glfs *fs)
if (ret)
goto out;
- if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) {
- ctx->secure_mgmt = 1;
- ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
- }
-
rpc = rpc_clnt_new(options, THIS, THIS->name, 8);
if (!rpc) {
ret = -1;
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_CREATE_RPC_CLIENT_FAILED,
- "failed to create rpc clnt");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_CREATE_RPC_CLIENT_FAILED,
+ NULL);
goto out;
}
ret = rpc_clnt_register_notify(rpc, mgmt_rpc_notify, THIS);
if (ret) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_NOTIFY_FUNC_FAILED,
- "failed to register notify function");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_NOTIFY_FUNC_FAILED,
+ NULL);
goto out;
}
ret = rpcclnt_cbk_program_register(rpc, &mgmt_cbk_prog, THIS);
if (ret) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_CBK_FUNC_FAILED,
- "failed to register callback function");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, API_MSG_REG_CBK_FUNC_FAILED,
+ NULL);
goto out;
}
diff --git a/api/src/glfs-resolve.c b/api/src/glfs-resolve.c
index a79f4905749..8a393ecb464 100644
--- a/api/src/glfs-resolve.c
+++ b/api/src/glfs-resolve.c
@@ -65,6 +65,9 @@ __glfs_first_lookup(struct glfs *fs, xlator_t *subvol)
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return ret;
}
@@ -116,9 +119,9 @@ glfs_refresh_inode_safe(xlator_t *subvol, inode_t *oldinode,
DECODE_SYNCOP_ERR(ret);
if (ret) {
- gf_msg(subvol->name, GF_LOG_WARNING, errno,
- API_MSG_INODE_REFRESH_FAILED, "inode refresh of %s failed: %s",
- uuid_utoa(oldinode->gfid), strerror(errno));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno,
+ API_MSG_INODE_REFRESH_FAILED, "gfid=%s",
+ uuid_utoa(oldinode->gfid), "err=%s", strerror(errno), NULL);
loc_wipe(&loc);
return NULL;
}
@@ -129,9 +132,8 @@ glfs_refresh_inode_safe(xlator_t *subvol, inode_t *oldinode,
inode_ctx_set(newinode, THIS, &ctx_value);
inode_lookup(newinode);
} else {
- gf_msg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
- "inode linking of %s failed",
- uuid_utoa((unsigned char *)&iatt.ia_gfid));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
+ "gfid=%s", uuid_utoa((unsigned char *)&iatt.ia_gfid), NULL);
}
loc_wipe(&loc);
@@ -154,9 +156,13 @@ __glfs_refresh_inode(struct glfs *fs, xlator_t *subvol, inode_t *inode,
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return newinode;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_loc_touchup, 3.4.0)
int
priv_glfs_loc_touchup(loc_t *loc)
{
@@ -171,8 +177,6 @@ priv_glfs_loc_touchup(loc_t *loc)
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_loc_touchup, 3.4.0);
-
int
glfs_resolve_symlink(struct glfs *fs, xlator_t *subvol, inode_t *inode,
char **lpath)
@@ -335,6 +339,7 @@ glfs_resolve_component(struct glfs *fs, xlator_t *subvol, inode_t *parent,
if (temp_parent) {
inode_unref(loc.parent);
loc.parent = temp_parent;
+ gf_uuid_copy(loc.pargfid, temp_parent->gfid);
inode_find_directory_name(loc.inode, &loc.name);
}
@@ -345,10 +350,12 @@ glfs_resolve_component(struct glfs *fs, xlator_t *subvol, inode_t *parent,
if (temp_parent) {
inode_unref(loc.parent);
loc.parent = temp_parent;
+ gf_uuid_copy(loc.pargfid, temp_parent->gfid);
inode_find_directory_name(loc.inode, &loc.name);
} else if (__is_root_gfid(loc.inode->gfid)) {
inode_unref(loc.parent);
loc.parent = inode_ref(loc.inode);
+ gf_uuid_copy(loc.pargfid, loc.inode->gfid);
loc.name = ".";
} else {
inode_unref(loc.inode);
@@ -439,9 +446,8 @@ glfs_resolve_component(struct glfs *fs, xlator_t *subvol, inode_t *parent,
inode = inode_link(loc.inode, loc.parent, component, &ciatt);
if (!inode) {
- gf_msg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
- "inode linking of %s failed",
- uuid_utoa((unsigned char *)&ciatt.ia_gfid));
+ gf_smsg(subvol->name, GF_LOG_WARNING, errno, API_MSG_INODE_LINK_FAILED,
+ "gfid=%s", uuid_utoa((unsigned char *)&ciatt.ia_gfid), NULL);
goto out;
} else if (inode == loc.inode)
inode_ctx_set(inode, THIS, &ctx_value);
@@ -460,6 +466,7 @@ out:
return inode;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve_at, 3.4.0)
int
priv_glfs_resolve_at(struct glfs *fs, xlator_t *subvol, inode_t *at,
const char *origpath, loc_t *loc, struct iatt *iatt,
@@ -610,8 +617,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve_at, 3.4.0);
-
int
glfs_resolve_path(struct glfs *fs, xlator_t *subvol, const char *origpath,
loc_t *loc, struct iatt *iatt, int follow, int reval)
@@ -625,8 +630,8 @@ glfs_resolve_path(struct glfs *fs, xlator_t *subvol, const char *origpath,
cwd = glfs_cwd_get(fs);
if (NULL == cwd) {
- gf_msg(subvol->name, GF_LOG_WARNING, EIO, API_MSG_GET_CWD_FAILED,
- "Failed to get cwd");
+ gf_smsg(subvol->name, GF_LOG_WARNING, EIO, API_MSG_GET_CWD_FAILED,
+ NULL);
errno = EIO;
goto out;
}
@@ -640,6 +645,7 @@ out:
return ret;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve, 3.7.0)
int
priv_glfs_resolve(struct glfs *fs, xlator_t *subvol, const char *origpath,
loc_t *loc, struct iatt *iatt, int reval)
@@ -650,7 +656,6 @@ priv_glfs_resolve(struct glfs *fs, xlator_t *subvol, const char *origpath,
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_resolve, 3.7.0);
int
glfs_lresolve(struct glfs *fs, xlator_t *subvol, const char *origpath,
@@ -680,28 +685,27 @@ glfs_migrate_fd_locks_safe(struct glfs *fs, xlator_t *oldsubvol, fd_t *oldfd,
NULL, NULL);
DECODE_SYNCOP_ERR(ret);
if (ret < 0) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FGETXATTR_FAILED,
- "fgetxattr (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(oldfd->inode->gfid, uuid1), strerror(errno),
- graphid_str(oldsubvol), oldsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FGETXATTR_FAILED,
+ "gfid=%s", uuid_utoa_r(oldfd->inode->gfid, uuid1), "err=%s",
+ strerror(errno), "subvol=%s", graphid_str(oldsubvol), "id=%d",
+ oldsubvol->graph->id, NULL);
goto out;
}
if (!dict_get(lockinfo, GF_XATTR_LOCKINFO_KEY)) {
- gf_msg(fs->volname, GF_LOG_WARNING, 0, API_MSG_LOCKINFO_KEY_MISSING,
- "missing lockinfo key (%s) on graph %s (%d)",
- uuid_utoa_r(oldfd->inode->gfid, uuid1), graphid_str(oldsubvol),
- oldsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, 0, API_MSG_LOCKINFO_KEY_MISSING,
+ "gfid=%s", uuid_utoa_r(oldfd->inode->gfid, uuid1), "subvol=%s",
+ graphid_str(oldsubvol), "id=%d", oldsubvol->graph->id, NULL);
goto out;
}
ret = syncop_fsetxattr(newsubvol, newfd, lockinfo, 0, NULL, NULL);
DECODE_SYNCOP_ERR(ret);
if (ret < 0) {
- gf_msg(fs->volname, GF_LOG_WARNING, 0, API_MSG_FSETXATTR_FAILED,
- "fsetxattr (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(newfd->inode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, 0, API_MSG_FSETXATTR_FAILED,
+ "gfid=%s", uuid_utoa_r(newfd->inode->gfid, uuid1), "err=%s",
+ strerror(errno), "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
out:
@@ -722,6 +726,7 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
0,
};
char uuid1[64];
+ dict_t *xdata = NULL;
oldinode = oldfd->inode;
oldsubvol = oldinode->table->xl;
@@ -730,32 +735,43 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
return fd_ref(oldfd);
if (!oldsubvol->switched) {
- ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, NULL, NULL);
+ xdata = dict_new();
+ if (!xdata || dict_set_int8(xdata, "last-fsync", 1)) {
+ gf_smsg(fs->volname, GF_LOG_WARNING, ENOMEM, API_MSG_FSYNC_FAILED,
+ "err=%s", "last-fsync set failed", "gfid=%s",
+ uuid_utoa_r(oldfd->inode->gfid, uuid1), "subvol=%s",
+ graphid_str(oldsubvol), "id=%d", oldsubvol->graph->id,
+ NULL);
+ }
+
+ ret = syncop_fsync(oldsubvol, oldfd, 0, NULL, NULL, xdata, NULL);
DECODE_SYNCOP_ERR(ret);
if (ret) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FSYNC_FAILED,
- "fsync() failed "
- "(%s) on %s graph %s (%d)",
- strerror(errno), uuid_utoa_r(oldfd->inode->gfid, uuid1),
- graphid_str(oldsubvol), oldsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FSYNC_FAILED,
+ "err=%s", strerror(errno), "gfid=%s",
+ uuid_utoa_r(oldfd->inode->gfid, uuid1), "subvol=%s",
+ graphid_str(oldsubvol), "id=%d", oldsubvol->graph->id,
+ NULL);
}
}
newinode = glfs_refresh_inode_safe(newsubvol, oldinode, _gf_false);
if (!newinode) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_INODE_REFRESH_FAILED,
- "inode (%s) refresh failed (%s) on graph %s (%d)",
- uuid_utoa_r(oldinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno,
+ API_MSG_INODE_REFRESH_FAILED, "gfid=%s",
+ uuid_utoa_r(oldinode->gfid, uuid1), "err=%s", strerror(errno),
+ "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
newfd = fd_create(newinode, getpid());
if (!newfd) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_FDCREATE_FAILED,
- "fd_create (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(newinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno,
+ API_MSG_FDCREATE_FAILED_ON_GRAPH, "gfid=%s",
+ uuid_utoa_r(newinode->gfid, uuid1), "err=%s", strerror(errno),
+ "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
@@ -763,8 +779,7 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
ret = inode_path(oldfd->inode, NULL, (char **)&loc.path);
if (ret < 0) {
- gf_msg(fs->volname, GF_LOG_INFO, 0, API_MSG_INODE_PATH_FAILED,
- "inode_path failed");
+ gf_smsg(fs->volname, GF_LOG_INFO, 0, API_MSG_INODE_PATH_FAILED, NULL);
goto out;
}
@@ -780,21 +795,21 @@ glfs_migrate_fd_safe(struct glfs *fs, xlator_t *newsubvol, fd_t *oldfd)
loc_wipe(&loc);
if (ret) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_SYNCOP_OPEN_FAILED,
- "syncop_open%s (%s) failed (%s) on graph %s (%d)",
- IA_ISDIR(oldinode->ia_type) ? "dir" : "",
- uuid_utoa_r(newinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_SYNCOP_OPEN_FAILED,
+ "type=%s", IA_ISDIR(oldinode->ia_type) ? "dir" : "", "gfid=%s",
+ uuid_utoa_r(newinode->gfid, uuid1), "err=%s", strerror(errno),
+ "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
ret = glfs_migrate_fd_locks_safe(fs, oldsubvol, oldfd, newsubvol, newfd);
if (ret) {
- gf_msg(fs->volname, GF_LOG_WARNING, errno, API_MSG_LOCK_MIGRATE_FAILED,
- "lock migration (%s) failed (%s) on graph %s (%d)",
- uuid_utoa_r(newinode->gfid, uuid1), strerror(errno),
- graphid_str(newsubvol), newsubvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_WARNING, errno, API_MSG_LOCK_MIGRATE_FAILED,
+ "gfid=%s", uuid_utoa_r(newinode->gfid, uuid1), "err=%s",
+ strerror(errno), "subvol=%s", graphid_str(newsubvol), "id=%d",
+ newsubvol->graph->id, NULL);
goto out;
}
@@ -809,6 +824,9 @@ out:
newfd = NULL;
}
+ if (xdata)
+ dict_unref(xdata);
+
return newfd;
}
@@ -829,6 +847,9 @@ __glfs_migrate_fd(struct glfs *fs, xlator_t *newsubvol, struct glfs_fd *glfd)
fs->migration_in_progress = 0;
pthread_cond_broadcast(&fs->cond);
+ /* wake up other waiting tasks */
+ __GLFS_SYNCTASK_WAKE(fs);
+
return newfd;
}
@@ -875,9 +896,9 @@ __glfs_migrate_openfds(struct glfs *fs, xlator_t *subvol)
list_for_each_entry(glfd, &fs->openfds, openfds)
{
if (gf_uuid_is_null(glfd->fd->inode->gfid)) {
- gf_msg(fs->volname, GF_LOG_INFO, 0, API_MSG_OPENFD_SKIPPED,
- "skipping openfd %p/%p in graph %s (%d)", glfd, glfd->fd,
- graphid_str(subvol), subvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_INFO, 0, API_MSG_OPENFD_SKIPPED,
+ "glfd=%p", glfd, "glfd->fd=%p", glfd->fd, "subvol=%s",
+ graphid_str(subvol), "id=%d", subvol->graph->id, NULL);
/* create in progress, defer */
continue;
}
@@ -912,10 +933,10 @@ __glfs_active_subvol(struct glfs *fs)
ret = __glfs_first_lookup(fs, new_subvol);
if (ret) {
- gf_msg(fs->volname, GF_LOG_INFO, errno,
- API_MSG_FIRST_LOOKUP_GRAPH_FAILED,
- "first lookup on graph %s (%d) failed (%s)",
- graphid_str(new_subvol), new_subvol->graph->id, strerror(errno));
+ gf_smsg(fs->volname, GF_LOG_INFO, errno,
+ API_MSG_FIRST_LOOKUP_GRAPH_FAILED, "subvol=%s",
+ graphid_str(new_subvol), "id=%d", new_subvol->graph->id,
+ "err=%s", strerror(errno), NULL);
return NULL;
}
@@ -924,11 +945,11 @@ __glfs_active_subvol(struct glfs *fs)
if (!new_cwd) {
char buf1[64];
- gf_msg(fs->volname, GF_LOG_INFO, errno,
- API_MSG_CWD_GRAPH_REF_FAILED,
- "cwd refresh of %s graph %s (%d) failed (%s)",
- uuid_utoa_r(fs->cwd->gfid, buf1), graphid_str(new_subvol),
- new_subvol->graph->id, strerror(errno));
+ gf_smsg(fs->volname, GF_LOG_INFO, errno,
+ API_MSG_CWD_GRAPH_REF_FAILED, "buf=%s",
+ uuid_utoa_r(fs->cwd->gfid, buf1), "subvol=%s",
+ graphid_str(new_subvol), "id=%d", new_subvol->graph->id,
+ "err=%s", strerror(errno), NULL);
return NULL;
}
}
@@ -949,13 +970,13 @@ __glfs_active_subvol(struct glfs *fs)
inode_unref(new_cwd);
}
- gf_msg(fs->volname, GF_LOG_INFO, 0, API_MSG_SWITCHED_GRAPH,
- "switched to graph %s (%d)", graphid_str(new_subvol),
- new_subvol->graph->id);
+ gf_smsg(fs->volname, GF_LOG_INFO, 0, API_MSG_SWITCHED_GRAPH, "subvol=%s",
+ graphid_str(new_subvol), "id=%d", new_subvol->graph->id, NULL);
return new_subvol;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_subvol_done, 3.4.0)
void
priv_glfs_subvol_done(struct glfs *fs, xlator_t *subvol)
{
@@ -983,8 +1004,7 @@ priv_glfs_subvol_done(struct glfs *fs, xlator_t *subvol)
}
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_subvol_done, 3.4.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_active_subvol, 3.4.0)
xlator_t *
priv_glfs_active_subvol(struct glfs *fs)
{
@@ -1012,8 +1032,6 @@ priv_glfs_active_subvol(struct glfs *fs)
return subvol;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_active_subvol, 3.4.0);
-
int
__glfs_cwd_set(struct glfs *fs, inode_t *inode)
{
diff --git a/api/src/glfs.c b/api/src/glfs.c
index 98054b6bdb6..b4bf1423f6d 100644
--- a/api/src/glfs.c
+++ b/api/src/glfs.c
@@ -69,8 +69,8 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ret = xlator_mem_acct_init(THIS, glfs_mt_end + 1);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
- "Memory accounting init failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_MEM_ACCT_INIT_FAILED,
+ NULL);
return ret;
}
@@ -189,10 +189,8 @@ create_master(struct glfs *fs)
goto err;
if (xlator_set_type(master, "mount/api") == -1) {
- gf_msg("glfs", GF_LOG_ERROR, 0, API_MSG_MASTER_XLATOR_INIT_FAILED,
- "master xlator "
- "for %s initialization failed",
- fs->volname);
+ gf_smsg("glfs", GF_LOG_ERROR, 0, API_MSG_MASTER_XLATOR_INIT_FAILED,
+ "name=%s", fs->volname, NULL);
goto err;
}
@@ -204,8 +202,8 @@ create_master(struct glfs *fs)
ret = xlator_init(master);
if (ret) {
- gf_msg("glfs", GF_LOG_ERROR, 0, API_MSG_GFAPI_XLATOR_INIT_FAILED,
- "failed to initialize gfapi translator");
+ gf_smsg("glfs", GF_LOG_ERROR, 0, API_MSG_GFAPI_XLATOR_INIT_FAILED,
+ NULL);
goto err;
}
@@ -231,9 +229,8 @@ get_volfp(struct glfs *fs)
cmd_args = &fs->ctx->cmd_args;
if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) {
- gf_msg("glfs", GF_LOG_ERROR, errno, API_MSG_VOLFILE_OPEN_FAILED,
- "volume file %s open failed: %s", cmd_args->volfile,
- strerror(errno));
+ gf_smsg("glfs", GF_LOG_ERROR, errno, API_MSG_VOLFILE_OPEN_FAILED,
+ "file=%s", cmd_args->volfile, "err=%s", strerror(errno), NULL);
return NULL;
}
@@ -254,6 +251,11 @@ glfs_volumes_init(struct glfs *fs)
if (!vol_assigned(cmd_args))
return -1;
+ if (sys_access(SECURE_ACCESS_FILE, F_OK) == 0) {
+ fs->ctx->secure_mgmt = 1;
+ fs->ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
+ }
+
if (cmd_args->volfile_server) {
ret = glfs_mgmt_init(fs);
goto out;
@@ -262,8 +264,8 @@ glfs_volumes_init(struct glfs *fs)
fp = get_volfp(fs);
if (!fp) {
- gf_msg("glfs", GF_LOG_ERROR, ENOENT, API_MSG_VOL_SPEC_FILE_ERROR,
- "Cannot reach volume specification file");
+ gf_smsg("glfs", GF_LOG_ERROR, ENOENT, API_MSG_VOL_SPEC_FILE_ERROR,
+ NULL);
ret = -1;
goto out;
}
@@ -278,6 +280,7 @@ out:
///////////////////////////////////////////////////////////////////////////////
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_xlator_option, 3.4.0)
int
pub_glfs_set_xlator_option(struct glfs *fs, const char *xlator, const char *key,
const char *value)
@@ -327,8 +330,7 @@ invalid_fs:
return -1;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_xlator_option, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unset_volfile_server, 3.5.1)
int
pub_glfs_unset_volfile_server(struct glfs *fs, const char *transport,
const char *host, const int port)
@@ -369,6 +371,8 @@ pub_glfs_unset_volfile_server(struct glfs *fs, const char *transport,
list_for_each_entry_safe(server, tmp, &cmd_args->curr_server->list, list)
{
+ if (!server->volfile_server || !server->transport)
+ continue;
if ((!strcmp(server->volfile_server, host) &&
!strcmp(server->transport, transport_val) &&
(server->port == port_val))) {
@@ -386,8 +390,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unset_volfile_server, 3.5.1);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile_server, 3.4.0)
int
pub_glfs_set_volfile_server(struct glfs *fs, const char *transport,
const char *host, int port)
@@ -420,14 +423,11 @@ pub_glfs_set_volfile_server(struct glfs *fs, const char *transport,
server_transport = gf_strdup(transport);
} else if (!strcmp(transport, "rdma")) {
server_transport = gf_strdup(GF_DEFAULT_VOLFILE_TRANSPORT);
- gf_msg("glfs", GF_LOG_WARNING, EINVAL, API_MSG_INVALID_ENTRY,
- "transport RDMA is deprecated, "
- "falling back to tcp");
+ gf_smsg("glfs", GF_LOG_WARNING, EINVAL, API_MSG_TRANS_RDMA_DEP,
+ NULL);
} else {
- gf_msg("glfs", GF_LOG_TRACE, EINVAL, API_MSG_INVALID_ENTRY,
- "transport %s is not supported, "
- "possible values tcp|unix",
- transport);
+ gf_smsg("glfs", GF_LOG_TRACE, EINVAL, API_MSG_TRANS_NOT_SUPPORTED,
+ "transport=%s", transport, NULL);
goto out;
}
} else {
@@ -469,8 +469,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile_server, 3.4.0);
-
/* *
* Used to free the arguments allocated by glfs_set_volfile_server()
*/
@@ -513,6 +511,7 @@ glfs_free_xlator_options(cmd_args_t *cmd_args)
}
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsuid, 3.4.2)
int
pub_glfs_setfsuid(uid_t fsuid)
{
@@ -522,8 +521,7 @@ pub_glfs_setfsuid(uid_t fsuid)
return syncopctx_setfsuid(&fsuid);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsuid, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgid, 3.4.2)
int
pub_glfs_setfsgid(gid_t fsgid)
{
@@ -533,8 +531,7 @@ pub_glfs_setfsgid(gid_t fsgid)
return syncopctx_setfsgid(&fsgid);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgid, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgroups, 3.4.2)
int
pub_glfs_setfsgroups(size_t size, const gid_t *list)
{
@@ -544,8 +541,7 @@ pub_glfs_setfsgroups(size_t size, const gid_t *list)
return syncopctx_setfsgroups(size, list);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsgroups, 3.4.2);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsleaseid, 4.0.0)
int
pub_glfs_setfsleaseid(glfs_leaseid_t leaseid)
{
@@ -567,8 +563,6 @@ pub_glfs_setfsleaseid(glfs_leaseid_t leaseid)
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setfsleaseid, 4.0.0);
-
int
get_fop_attr_glfd(dict_t **fop_attr, struct glfs_fd *glfd)
{
@@ -656,14 +650,19 @@ unset_fop_attr(dict_t **fop_attr)
*fop_attr = NULL;
}
}
+
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_from_glfd, 3.4.0)
struct glfs *
pub_glfs_from_glfd(struct glfs_fd *glfd)
{
+ if (glfd == NULL) {
+ errno = EBADF;
+ return NULL;
+ }
+
return glfd->fs;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_from_glfd, 3.4.0);
-
static void
glfs_fd_destroy(struct glfs_fd *glfd)
{
@@ -741,6 +740,7 @@ glfs_new_fs(const char *volname)
INIT_LIST_HEAD(&fs->openfds);
INIT_LIST_HEAD(&fs->upcall_list);
+ INIT_LIST_HEAD(&fs->waitq);
PTHREAD_MUTEX_INIT(&fs->mutex, NULL, fs->pthread_flags, GLFS_INIT_MUTEX,
err);
@@ -811,21 +811,40 @@ unlock:
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_new, 3.4.0)
struct glfs *
pub_glfs_new(const char *volname)
{
+ if (!volname) {
+ errno = EINVAL;
+ return NULL;
+ }
+
struct glfs *fs = NULL;
+ int i = 0;
int ret = -1;
glusterfs_ctx_t *ctx = NULL;
xlator_t *old_THIS = NULL;
char pname[16] = "";
char msg[32] = "";
- if (!volname) {
+ if (volname[0] == '/' || volname[0] == '-') {
+ if (strncmp(volname, "/snaps/", 7) == 0) {
+ goto label;
+ }
errno = EINVAL;
return NULL;
}
+ for (i = 0; i < strlen(volname); i++) {
+ if (!isalnum(volname[i]) && (volname[i] != '_') &&
+ (volname[i] != '-')) {
+ errno = EINVAL;
+ return NULL;
+ }
+ }
+
+label:
/*
* Do this as soon as possible in case something else depends on
* pool allocations.
@@ -899,8 +918,7 @@ out:
return fs;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_new, 3.4.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_new_from_ctx, 3.7.0)
struct glfs *
priv_glfs_new_from_ctx(glusterfs_ctx_t *ctx)
{
@@ -919,8 +937,7 @@ out:
return fs;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_new_from_ctx, 3.7.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_free_from_ctx, 3.7.0)
void
priv_glfs_free_from_ctx(struct glfs *fs)
{
@@ -956,8 +973,7 @@ priv_glfs_free_from_ctx(struct glfs *fs)
FREE(fs);
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_free_from_ctx, 3.7.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile, 3.4.0)
int
pub_glfs_set_volfile(struct glfs *fs, const char *volfile)
{
@@ -974,8 +990,7 @@ pub_glfs_set_volfile(struct glfs *fs, const char *volfile)
return 0;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_volfile, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_logging, 3.4.0)
int
pub_glfs_set_logging(struct glfs *fs, const char *logfile, int loglevel)
{
@@ -1013,8 +1028,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_logging, 3.4.0);
-
int
glfs_init_wait(struct glfs *fs)
{
@@ -1033,14 +1046,14 @@ glfs_init_wait(struct glfs *fs)
return ret;
}
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_init_done, 3.4.0)
void
priv_glfs_init_done(struct glfs *fs, int ret)
{
glfs_init_cbk init_cbk;
if (!fs) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_GLFS_FSOBJ_NULL,
- "fs is NULL");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_GLFS_FSOBJ_NULL, NULL);
goto out;
}
@@ -1064,8 +1077,6 @@ out:
return;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_init_done, 3.4.0);
-
int
glfs_init_common(struct glfs *fs)
{
@@ -1093,8 +1104,7 @@ glfs_init_async(struct glfs *fs, glfs_init_cbk cbk)
int ret = -1;
if (!fs || !fs->ctx) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "fs is not properly initialized.");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_FS_NOT_INIT, NULL);
errno = EINVAL;
return ret;
}
@@ -1106,6 +1116,7 @@ glfs_init_async(struct glfs *fs, glfs_init_cbk cbk)
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_init, 3.4.0)
int
pub_glfs_init(struct glfs *fs)
{
@@ -1114,8 +1125,7 @@ pub_glfs_init(struct glfs *fs)
DECLARE_OLD_THIS;
if (!fs || !fs->ctx) {
- gf_msg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_INVALID_ENTRY,
- "fs is not properly initialized.");
+ gf_smsg("glfs", GF_LOG_ERROR, EINVAL, API_MSG_FS_NOT_INIT, NULL);
errno = EINVAL;
return ret;
}
@@ -1139,8 +1149,6 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_init, 3.4.0);
-
static int
glusterfs_ctx_destroy(glusterfs_ctx_t *ctx)
{
@@ -1218,6 +1226,7 @@ glusterfs_ctx_destroy(glusterfs_ctx_t *ctx)
return ret;
}
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fini, 3.4.0)
int
pub_glfs_fini(struct glfs *fs)
{
@@ -1229,6 +1238,7 @@ pub_glfs_fini(struct glfs *fs)
call_pool_t *call_pool = NULL;
int fs_init = 0;
int err = -1;
+ struct synctask *waittask = NULL;
DECLARE_OLD_THIS;
@@ -1250,6 +1260,13 @@ pub_glfs_fini(struct glfs *fs)
call_pool = fs->ctx->pool;
+ /* Wake up any suspended synctasks */
+ while (!list_empty(&fs->waitq)) {
+ waittask = list_entry(fs->waitq.next, struct synctask, waitq);
+ list_del_init(&waittask->waitq);
+ synctask_wake(waittask);
+ }
+
while (countdown--) {
/* give some time for background frames to finish */
pthread_mutex_lock(&fs->mutex);
@@ -1268,7 +1285,7 @@ pub_glfs_fini(struct glfs *fs)
}
}
pthread_mutex_unlock(&fs->mutex);
- usleep(100000);
+ gf_nanosleep(100000 * GF_US_IN_NS);
}
/* leaked frames may exist, we ignore */
@@ -1301,10 +1318,8 @@ pub_glfs_fini(struct glfs *fs)
graph = subvol->graph;
err = pthread_mutex_lock(&fs->mutex);
if (err != 0) {
- gf_msg("glfs", GF_LOG_ERROR, err, API_MSG_FSMUTEX_LOCK_FAILED,
- "pthread lock on glfs mutex, "
- "returned error: (%s)",
- strerror(err));
+ gf_smsg("glfs", GF_LOG_ERROR, err, API_MSG_FSMUTEX_LOCK_FAILED,
+ "error=%s", strerror(err), NULL);
goto fail;
}
/* check and wait for CHILD_DOWN for active subvol*/
@@ -1312,19 +1327,17 @@ pub_glfs_fini(struct glfs *fs)
while (graph->used) {
err = pthread_cond_wait(&fs->child_down_cond, &fs->mutex);
if (err != 0)
- gf_msg("glfs", GF_LOG_INFO, err,
- API_MSG_COND_WAIT_FAILED,
- "%s cond wait failed %s", subvol->name,
- strerror(err));
+ gf_smsg("glfs", GF_LOG_INFO, err,
+ API_MSG_COND_WAIT_FAILED, "name=%s",
+ subvol->name, "err=%s", strerror(err), NULL);
}
}
err = pthread_mutex_unlock(&fs->mutex);
if (err != 0) {
- gf_msg("glfs", GF_LOG_ERROR, err, API_MSG_FSMUTEX_UNLOCK_FAILED,
- "pthread unlock on glfs mutex, "
- "returned error: (%s)",
- strerror(err));
+ gf_smsg("glfs", GF_LOG_ERROR, err,
+ API_MSG_FSMUTEX_UNLOCK_FAILED, "error=%s",
+ strerror(err), NULL);
goto fail;
}
}
@@ -1404,8 +1417,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fini, 3.4.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volfile, 3.6.0)
ssize_t
pub_glfs_get_volfile(struct glfs *fs, void *buf, size_t len)
{
@@ -1431,8 +1443,7 @@ invalid_fs:
return res;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_get_volfile, 3.6.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_ipc, 3.12.0)
int
priv_glfs_ipc(struct glfs *fs, int opcode, void *xd_in, void **xd_out)
{
@@ -1460,8 +1471,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_ipc, 3.12.0);
-
+GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_setfspid, 6.1)
int
priv_glfs_setfspid(struct glfs *fs, pid_t pid)
{
@@ -1475,107 +1485,104 @@ priv_glfs_setfspid(struct glfs *fs, pid_t pid)
return ret;
}
-GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_setfspid, 6.1);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_free, 3.7.16)
void
pub_glfs_free(void *ptr)
{
GLFS_FREE(ptr);
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_free, 3.7.16);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_fs, 3.7.16)
struct glfs *
pub_glfs_upcall_get_fs(struct glfs_upcall *arg)
{
return arg->fs;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_fs, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_reason, 3.7.16)
enum glfs_upcall_reason
pub_glfs_upcall_get_reason(struct glfs_upcall *arg)
{
return arg->reason;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_reason, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_event, 3.7.16)
void *
pub_glfs_upcall_get_event(struct glfs_upcall *arg)
{
return arg->event;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_get_event, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_object, 3.7.16)
struct glfs_object *
pub_glfs_upcall_inode_get_object(struct glfs_upcall_inode *arg)
{
return arg->object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_object, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_flags, 3.7.16)
uint64_t
pub_glfs_upcall_inode_get_flags(struct glfs_upcall_inode *arg)
{
return arg->flags;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_flags, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_stat, 3.7.16)
struct stat *
pub_glfs_upcall_inode_get_stat(struct glfs_upcall_inode *arg)
{
return &arg->buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_stat, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_expire, 3.7.16)
uint64_t
pub_glfs_upcall_inode_get_expire(struct glfs_upcall_inode *arg)
{
return arg->expire_time_attr;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_expire, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pobject, 3.7.16)
struct glfs_object *
pub_glfs_upcall_inode_get_pobject(struct glfs_upcall_inode *arg)
{
return arg->p_object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pobject, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pstat, 3.7.16)
struct stat *
pub_glfs_upcall_inode_get_pstat(struct glfs_upcall_inode *arg)
{
return &arg->p_buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_pstat, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpobject, 3.7.16)
struct glfs_object *
pub_glfs_upcall_inode_get_oldpobject(struct glfs_upcall_inode *arg)
{
return arg->oldp_object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpobject, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpstat, 3.7.16)
struct stat *
pub_glfs_upcall_inode_get_oldpstat(struct glfs_upcall_inode *arg)
{
return &arg->oldp_buf;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_inode_get_oldpstat, 3.7.16);
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_object, 4.1.6)
struct glfs_object *
pub_glfs_upcall_lease_get_object(struct glfs_upcall_lease *arg)
{
return arg->object;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_object, 4.1.6);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_lease_type, 4.1.6)
uint32_t
pub_glfs_upcall_lease_get_lease_type(struct glfs_upcall_lease *arg)
{
return arg->lease_type;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_lease_get_lease_type, 4.1.6);
/* definitions of the GLFS_SYSRQ_* chars are in glfs.h */
static struct glfs_sysrq_help {
@@ -1585,6 +1592,7 @@ static struct glfs_sysrq_help {
{GLFS_SYSRQ_STATEDUMP, "(S)tatedump"},
{0, NULL}};
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_sysrq, 3.10.0)
int
pub_glfs_sysrq(struct glfs *fs, char sysrq)
{
@@ -1624,8 +1632,8 @@ pub_glfs_sysrq(struct glfs *fs, char sysrq)
gf_proc_dump_info(SIGUSR1, ctx);
break;
default:
- gf_msg("glfs", GF_LOG_ERROR, ENOTSUP, API_MSG_INVALID_ENTRY,
- "'%c' is not a valid sysrq", sysrq);
+ gf_smsg("glfs", GF_LOG_ERROR, ENOTSUP, API_MSG_INVALID_SYSRQ,
+ "sysrq=%c", sysrq, NULL);
errno = ENOTSUP;
ret = -1;
}
@@ -1633,8 +1641,7 @@ out:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_sysrq, 3.10.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_register, 3.13.0)
int
pub_glfs_upcall_register(struct glfs *fs, uint32_t event_list,
glfs_upcall_cbk cbk, void *data)
@@ -1656,8 +1663,8 @@ pub_glfs_upcall_register(struct glfs *fs, uint32_t event_list,
if ((event_list != GLFS_EVENT_ANY) && (event_list & ~up_events)) {
errno = EINVAL;
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "invalid event_list (0x%08x)", event_list);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "event_list=(0x%08x)", event_list, NULL);
goto out;
}
@@ -1690,8 +1697,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_register, 3.13.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_unregister, 3.13.0)
int
pub_glfs_upcall_unregister(struct glfs *fs, uint32_t event_list)
{
@@ -1709,8 +1715,8 @@ pub_glfs_upcall_unregister(struct glfs *fs, uint32_t event_list)
if ((event_list != GLFS_EVENT_ANY) && (event_list & ~up_events)) {
errno = EINVAL;
ret = -1;
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_INVALID_ARG,
- "invalid event_list (0x%08x)", event_list);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
+ "event_list=(0x%08x)", event_list, NULL);
goto out;
}
@@ -1738,8 +1744,7 @@ invalid_fs:
return ret;
}
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_upcall_unregister, 3.13.0);
-
+GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_statedump_path, 7.0)
int
pub_glfs_set_statedump_path(struct glfs *fs, const char *path)
{
@@ -1799,5 +1804,3 @@ err:
invalid_fs:
return -1;
}
-
-GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_set_statedump_path, 7.0);
diff --git a/build-aux/pkg-version b/build-aux/pkg-version
index 7c57c639a5c..17ceab70c03 100755
--- a/build-aux/pkg-version
+++ b/build-aux/pkg-version
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# To override version/release from git,
# create VERSION file containing text with version/release
diff --git a/cli/src/Makefile.am b/cli/src/Makefile.am
index 3e7511fe1b7..16063f27c7f 100644
--- a/cli/src/Makefile.am
+++ b/cli/src/Makefile.am
@@ -5,6 +5,7 @@ gluster_SOURCES = cli.c registry.c input.c cli-cmd.c cli-rl.c cli-cmd-global.c \
cli-cmd-system.c cli-cmd-misc.c cli-xml-output.c cli-quotad-client.c cli-cmd-snapshot.c
gluster_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la $(GF_LDADD) \
+ $(top_builddir)/libglusterd/src/libglusterd.la \
$(RLLIBS) $(top_builddir)/rpc/xdr/src/libgfxdr.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(XML_LIBS)
@@ -13,13 +14,14 @@ gluster_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = cli.h cli-mem-types.h cli-cmd.h cli-quotad-client.h
AM_CPPFLAGS = $(GF_CPPFLAGS) \
- -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src\
- -I$(top_srcdir)/rpc/xdr/src\
- -I$(top_builddir)/rpc/xdr/src\
+ -I$(top_srcdir)/libglusterfs/src -I$(top_srcdir)/rpc/rpc-lib/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_srcdir)/libglusterd/src \
+ -I$(top_builddir)/rpc/xdr/src \
-DDATADIR=\"$(localstatedir)\" \
-DCONFDIR=\"$(sysconfdir)/glusterfs\" \
-DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\"\
- -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE) -DSBIN_DIR=\"$(sbindir)\"
+ -DGLFSHEAL_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\"\
+ -DSYNCDAEMON_COMPILE=$(SYNCDAEMON_COMPILE)
AM_CFLAGS = -Wall $(GF_CFLAGS) $(XML_CFLAGS)
@@ -28,5 +30,8 @@ CLEANFILES =
$(top_builddir)/libglusterfs/src/libglusterfs.la:
$(MAKE) -C $(top_builddir)/libglusterfs/src/ all
+$(top_builddir)/libglusterd/src/libglusterd.la:
+ $(MAKE) -C $(top_builddir)/libglusterd/src/ all
+
install-data-hook:
$(mkdir_p) $(DESTDIR)$(localstatedir)/run/gluster
diff --git a/cli/src/cli-cmd-global.c b/cli/src/cli-cmd-global.c
index d0729ac1f0a..2c9a5f01bb1 100644
--- a/cli/src/cli-cmd-global.c
+++ b/cli/src/cli-cmd-global.c
@@ -27,8 +27,6 @@
#include <glusterfs/syscall.h>
#include <glusterfs/common-utils.h>
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_global_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -36,6 +34,10 @@ int
cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
const char **words, int wordcount);
+int
+cli_cmd_ganesha_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount);
+
struct cli_cmd global_cmds[] = {
{
"global help",
@@ -48,6 +50,11 @@ struct cli_cmd global_cmds[] = {
cli_cmd_get_state_cbk,
"Get local state representation of mentioned daemon",
},
+ {
+ "nfs-ganesha {enable| disable} ",
+ cli_cmd_ganesha_cbk,
+ "Enable/disable NFS-Ganesha support",
+ },
{NULL, NULL, NULL}};
int
@@ -89,8 +96,9 @@ out:
}
int
-cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
- const char **words, int wordcount)
+cli_cmd_ganesha_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+
{
int sent = 0;
int parse_error = 0;
@@ -101,10 +109,53 @@ cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
cli_local_t *local = NULL;
char *op_errstr = NULL;
+ proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GANESHA];
+
frame = create_frame(THIS, THIS->ctx->pool);
if (!frame)
goto out;
+ ret = cli_cmd_ganesha_parse(state, words, wordcount, &options, &op_errstr);
+ if (ret) {
+ if (op_errstr) {
+ cli_err("%s", op_errstr);
+ GF_FREE(op_errstr);
+ } else
+ cli_usage_out(word->pattern);
+ parse_error = 1;
+ goto out;
+ }
+
+ CLI_LOCAL_INIT(local, words, frame, options);
+
+ if (proc->fn) {
+ ret = proc->fn(frame, THIS, options);
+ }
+
+out:
+ if (ret) {
+ cli_cmd_sent_status_get(&sent);
+ if ((sent == 0) && (parse_error == 0))
+ cli_out("Setting global option failed");
+ }
+
+ CLI_STACK_DESTROY(frame);
+ return ret;
+}
+
+int
+cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
+ const char **words, int wordcount)
+{
+ int sent = 0;
+ int parse_error = 0;
+ int ret = -1;
+ rpc_clnt_procedure_t *proc = NULL;
+ call_frame_t *frame = NULL;
+ dict_t *options = NULL;
+ cli_local_t *local = NULL;
+ char *op_errstr = NULL;
+
ret = cli_cmd_get_state_parse(state, words, wordcount, &options,
&op_errstr);
@@ -120,6 +171,12 @@ cli_cmd_get_state_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_STATE];
diff --git a/cli/src/cli-cmd-misc.c b/cli/src/cli-cmd-misc.c
index 120d4ab69b5..e961d88da86 100644
--- a/cli/src/cli-cmd-misc.c
+++ b/cli/src/cli-cmd-misc.c
@@ -18,10 +18,6 @@
#include "cli-mem-types.h"
#include "protocol-common.h"
-extern struct rpc_clnt *global_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
extern struct cli_cmd volume_cmds[];
extern struct cli_cmd bitrot_cmds[];
extern struct cli_cmd quota_cmds[];
@@ -57,7 +53,7 @@ int
cli_cmd_display_help(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount)
{
- struct cli_cmd *cmd[] = {
+ static struct cli_cmd *cmd[] = {
cli_misc_cmds, cli_probe_cmds, volume_cmds, bitrot_cmds,
quota_cmds, snapshot_cmds, global_cmds, NULL};
struct cli_cmd *cmd_ind = NULL;
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c
index decdd10cb50..34620b4a31b 100644
--- a/cli/src/cli-cmd-parser.c
+++ b/cli/src/cli-cmd-parser.c
@@ -26,7 +26,7 @@
#define MAX_SNAP_DESCRIPTION_LEN 1024
-struct snap_config_opt_vals_ snap_confopt_vals[] = {
+static struct snap_config_opt_vals_ snap_confopt_vals[] = {
{.op_name = "snap-max-hard-limit",
.question = "Changing snapshot-max-hard-limit "
"will limit the creation of new snapshots "
@@ -567,9 +567,9 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
char *bricks = NULL;
char *ta_brick = NULL;
int32_t brick_count = 0;
- char *opwords[] = {"replica", "stripe", "transport",
- "disperse", "redundancy", "disperse-data",
- "arbiter", "thin-arbiter", NULL};
+ static char *opwords[] = {"replica", "stripe", "transport",
+ "disperse", "redundancy", "disperse-data",
+ "arbiter", "thin-arbiter", NULL};
char *w = NULL;
int op_count = 0;
@@ -675,7 +675,8 @@ cli_cmd_volume_create_parse(struct cli_state *state, const char **words,
index += 2;
} else if (!strcmp(words[index], "thin-arbiter")) {
ret = gf_string2int(words[index + 1], &thin_arbiter_count);
- if ((ret == -1) || (thin_arbiter_count != 1)) {
+ if ((ret == -1) || (thin_arbiter_count != 1) ||
+ (replica_count != 2)) {
cli_err(
"For thin-arbiter "
"configuration, "
@@ -1194,19 +1195,19 @@ cli_cmd_quota_parse(const char **words, int wordcount, dict_t **options)
};
int64_t value = 0;
gf_quota_type type = GF_QUOTA_OPTION_TYPE_NONE;
- char *opwords[] = {"enable",
- "disable",
- "limit-usage",
- "remove",
- "list",
- "alert-time",
- "soft-timeout",
- "hard-timeout",
- "default-soft-limit",
- "limit-objects",
- "list-objects",
- "remove-objects",
- NULL};
+ static char *opwords[] = {"enable",
+ "disable",
+ "limit-usage",
+ "remove",
+ "list",
+ "alert-time",
+ "soft-timeout",
+ "hard-timeout",
+ "default-soft-limit",
+ "limit-objects",
+ "list-objects",
+ "remove-objects",
+ NULL};
char *w = NULL;
uint32_t time = 0;
double percent = 0;
@@ -1622,6 +1623,11 @@ cli_add_key_group(dict_t *dict, char *key, char *value, char **op_errstr)
}
goto out;
}
+
+ /* Treat line that start with "#" as comments */
+ if ('#' == line[0])
+ continue;
+
opt_count++;
tok_key = strtok_r(line, "=", &saveptr);
tok_val = strtok_r(NULL, "\r\n", &saveptr);
@@ -1847,7 +1853,7 @@ cli_cmd_volume_add_brick_parse(struct cli_state *state, const char **words,
int ret = -1;
int brick_count = 0, brick_index = 0;
char *bricks = NULL;
- char *opwords_cl[] = {"replica", "stripe", NULL};
+ static char *opwords_cl[] = {"replica", "stripe", NULL};
gf1_cluster_type type = GF_CLUSTER_TYPE_NONE;
int count = 1;
int arbiter_count = 0;
@@ -2007,8 +2013,9 @@ cli_cmd_volume_remove_brick_parse(struct cli_state *state, const char **words,
int32_t j = 0;
char *tmp_brick = NULL;
char *tmp_brick1 = NULL;
- char *type_opword[] = {"replica", NULL};
- char *opwords[] = {"start", "commit", "stop", "status", "force", NULL};
+ static char *type_opword[] = {"replica", NULL};
+ static char *opwords[] = {"start", "commit", "stop",
+ "status", "force", NULL};
char *w = NULL;
int32_t command = GF_OP_CMD_NONE;
long count = 0;
@@ -2579,8 +2586,6 @@ cli_cmd_log_rotate_parse(const char **words, int wordcount, dict_t **options)
if (strcmp("rotate", words[3]) == 0)
volname = (char *)words[2];
- else if (strcmp("rotate", words[2]) == 0)
- volname = (char *)words[3];
GF_ASSERT(volname);
ret = dict_set_str(dict, "volname", volname);
@@ -2856,8 +2861,8 @@ out:
}
int32_t
-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options,
- char **errstr)
+cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **errstr)
{
int32_t ret = -1;
dict_t *dict = NULL;
@@ -2867,13 +2872,16 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options,
unsigned slavei = 0;
unsigned glob = 0;
unsigned cmdi = 0;
- char *opwords[] = {"create", "status", "start", "stop", "config",
- "force", "delete", "ssh-port", "no-verify", "push-pem",
- "detail", "pause", "resume", NULL};
+ static char *opwords[] = {"create", "status", "start", "stop",
+ "config", "force", "delete", "ssh-port",
+ "no-verify", "push-pem", "detail", "pause",
+ "resume", NULL};
char *w = NULL;
char *save_ptr = NULL;
char *slave_temp = NULL;
char *token = NULL;
+ gf_answer_t answer = GF_ANSWER_NO;
+ const char *question = NULL;
GF_ASSERT(words);
GF_ASSERT(options);
@@ -3060,16 +3068,36 @@ cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **options,
}
if (!ret)
ret = dict_set_int32(dict, "type", type);
- if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG)
+ if (!ret && type == GF_GSYNC_OPTION_TYPE_CONFIG) {
+ if (!strcmp((char *)words[wordcount - 2], "ignore-deletes") &&
+ !strcmp((char *)words[wordcount - 1], "true")) {
+ question =
+ "There exists ~15 seconds delay for the option to take"
+ " effect from stime of the corresponding brick. Please"
+ " check the log for the time, the option is effective."
+ " Proceed";
+
+ answer = cli_cmd_get_confirmation(state, question);
+
+ if (GF_ANSWER_NO == answer) {
+ gf_log("cli", GF_LOG_INFO,
+ "Operation "
+ "cancelled, exiting");
+ *errstr = gf_strdup("Aborted by user.");
+ ret = -1;
+ goto out;
+ }
+ }
+
ret = config_parse(words, wordcount, dict, cmdi, glob);
+ }
out:
if (slave_temp)
GF_FREE(slave_temp);
- if (ret) {
- if (dict)
- dict_unref(dict);
- } else
+ if (ret && dict)
+ dict_unref(dict);
+ else
*options = dict;
return ret;
@@ -3086,7 +3114,7 @@ cli_cmd_volume_profile_parse(const char **words, int wordcount,
gf1_cli_info_op info_op = GF_CLI_INFO_NONE;
gf_boolean_t is_peek = _gf_false;
- char *opwords[] = {"start", "stop", "info", NULL};
+ static char *opwords[] = {"start", "stop", "info", NULL};
char *w = NULL;
GF_ASSERT(words);
@@ -3187,8 +3215,9 @@ cli_cmd_volume_top_parse(const char **words, int wordcount, dict_t **options)
int count = 0;
gf_boolean_t nfs = _gf_false;
char *delimiter = NULL;
- char *opwords[] = {"open", "read", "write", "opendir", "readdir",
- "read-perf", "write-perf", "clear", NULL};
+ static char *opwords[] = {"open", "read", "write",
+ "opendir", "readdir", "read-perf",
+ "write-perf", "clear", NULL};
char *w = NULL;
GF_ASSERT(words);
@@ -3367,9 +3396,9 @@ cli_cmd_get_statusop(const char *arg)
int i = 0;
uint32_t ret = GF_CLI_STATUS_NONE;
char *w = NULL;
- char *opwords[] = {"detail", "mem", "clients", "fd", "inode",
- "callpool", "tasks", "client-list", NULL};
- struct {
+ static char *opwords[] = {"detail", "mem", "clients", "fd", "inode",
+ "callpool", "tasks", "client-list", NULL};
+ static struct {
char *opname;
uint32_t opcode;
} optable[] = {{"detail", GF_CLI_STATUS_DETAIL},
@@ -3567,9 +3596,9 @@ out:
gf_boolean_t
cli_cmd_validate_dumpoption(const char *arg, char **option)
{
- char *opwords[] = {"all", "nfs", "mem", "iobuf", "callpool",
- "priv", "fd", "inode", "history", "inodectx",
- "fdctx", "quotad", NULL};
+ static char *opwords[] = {"all", "nfs", "mem", "iobuf", "callpool",
+ "priv", "fd", "inode", "history", "inodectx",
+ "fdctx", "quotad", NULL};
char *w = NULL;
w = str_getunamb(arg, opwords);
@@ -3877,8 +3906,6 @@ heal_command_type_get(const char *command)
[GF_SHD_OP_HEAL_INDEX] = NULL,
[GF_SHD_OP_HEAL_FULL] = "full",
[GF_SHD_OP_INDEX_SUMMARY] = "info",
- [GF_SHD_OP_HEALED_FILES] = NULL,
- [GF_SHD_OP_HEAL_FAILED_FILES] = NULL,
[GF_SHD_OP_SPLIT_BRAIN_FILES] = NULL,
[GF_SHD_OP_STATISTICS] = "statistics",
[GF_SHD_OP_STATISTICS_HEAL_COUNT] = NULL,
@@ -5220,24 +5247,25 @@ cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
dict_t *dict = NULL;
gf1_cli_snapshot type = GF_SNAP_OPTION_TYPE_NONE;
char *w = NULL;
- char *opwords[] = {"create", "delete", "restore", "activate",
- "deactivate", "list", "status", "config",
- "info", "clone", NULL};
- char *invalid_snapnames[] = {"description", "force", "volume", "all", NULL};
- char *invalid_volnames[] = {"volume",
- "type",
- "subvolumes",
- "option",
- "end-volume",
- "all",
- "volume_not_in_ring",
- "description",
- "force",
- "snap-max-hard-limit",
- "snap-max-soft-limit",
- "auto-delete",
- "activate-on-create",
- NULL};
+ static char *opwords[] = {"create", "delete", "restore", "activate",
+ "deactivate", "list", "status", "config",
+ "info", "clone", NULL};
+ static char *invalid_snapnames[] = {"description", "force", "volume", "all",
+ NULL};
+ static char *invalid_volnames[] = {"volume",
+ "type",
+ "subvolumes",
+ "option",
+ "end-volume",
+ "all",
+ "volume_not_in_ring",
+ "description",
+ "force",
+ "snap-max-hard-limit",
+ "snap-max-soft-limit",
+ "auto-delete",
+ "activate-on-create",
+ NULL};
GF_ASSERT(words);
GF_ASSERT(options);
@@ -5557,16 +5585,18 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
int32_t ret = -1;
char *w = NULL;
char *volname = NULL;
- char *opwords[] = {
- "enable", "disable", "scrub-throttle", "scrub-frequency", "scrub",
- "signing-time", NULL};
- char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
- char *scrub_freq_values[] = {"hourly", "daily", "weekly", "biweekly",
- "monthly", "minute", NULL};
- char *scrub_values[] = {"pause", "resume", "status", "ondemand", NULL};
+ static char *opwords[] = {"enable", "disable", "scrub-throttle",
+ "scrub-frequency", "scrub", "signing-time",
+ "signer-threads", NULL};
+ static char *scrub_throt_values[] = {"lazy", "normal", "aggressive", NULL};
+ static char *scrub_freq_values[] = {
+ "hourly", "daily", "weekly", "biweekly", "monthly", "minute", NULL};
+ static char *scrub_values[] = {"pause", "resume", "status", "ondemand",
+ NULL};
dict_t *dict = NULL;
gf_bitrot_type type = GF_BITROT_OPTION_TYPE_NONE;
int32_t expiry_time = 0;
+ int32_t signer_th_count = 0;
GF_ASSERT(words);
GF_ASSERT(options);
@@ -5747,6 +5777,31 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
}
goto set_type;
}
+ } else if (!strcmp(words[3], "signer-threads")) {
+ if (!words[4]) {
+ cli_err(
+ "Missing signer-thread value for bitrot "
+ "option");
+ ret = -1;
+ goto out;
+ } else {
+ type = GF_BITROT_OPTION_TYPE_SIGNER_THREADS;
+
+ signer_th_count = strtol(words[4], NULL, 0);
+ if (signer_th_count < 1) {
+ cli_err("signer-thread count should not be less than 1");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_uint32(dict, "signer-threads",
+ (unsigned int)signer_th_count);
+ if (ret) {
+ cli_out("Failed to set dict for bitrot");
+ goto out;
+ }
+ goto set_type;
+ }
} else {
cli_err(
"Invalid option %s for bitrot. Please enter valid "
@@ -5755,7 +5810,6 @@ cli_cmd_bitrot_parse(const char **words, int wordcount, dict_t **options)
ret = -1;
goto out;
}
-
set_type:
ret = dict_set_int32(dict, "type", type);
if (ret < 0)
@@ -5772,3 +5826,121 @@ out:
return ret;
}
+
+/* Parsing global option for NFS-Ganesha config
+ * gluster nfs-ganesha enable/disable */
+
+int32_t
+cli_cmd_ganesha_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **op_errstr)
+{
+ dict_t *dict = NULL;
+ int ret = -1;
+ char *key = NULL;
+ char *value = NULL;
+ char *w = NULL;
+ static char *opwords[] = {"enable", "disable", NULL};
+ const char *question = NULL;
+ gf_answer_t answer = GF_ANSWER_NO;
+
+ GF_ASSERT(words);
+ GF_ASSERT(options);
+
+ dict = dict_new();
+
+ if (!dict)
+ goto out;
+
+ if (wordcount != 2)
+ goto out;
+
+ key = (char *)words[0];
+ value = (char *)words[1];
+
+ if (!key || !value) {
+ cli_out("Usage : nfs-ganesha <enable/disable>");
+ ret = -1;
+ goto out;
+ }
+
+ ret = gf_strip_whitespace(value, strlen(value));
+ if (ret == -1)
+ goto out;
+
+ if (strcmp(key, "nfs-ganesha")) {
+ gf_asprintf(op_errstr,
+ "Global option: error: ' %s '"
+ "is not a valid global option.",
+ key);
+ ret = -1;
+ goto out;
+ }
+
+ w = str_getunamb(value, opwords);
+ if (!w) {
+ cli_out(
+ "Invalid global option \n"
+ "Usage : nfs-ganesha <enable/disable>");
+ ret = -1;
+ goto out;
+ }
+
+ if (strcmp(value, "enable") == 0) {
+ question =
+ "Enabling NFS-Ganesha requires Gluster-NFS to be "
+ "disabled across the trusted pool. Do you "
+ "still want to continue?\n";
+ } else if (strcmp(value, "disable") == 0) {
+ question =
+ "Disabling NFS-Ganesha will tear down the entire "
+ "ganesha cluster across the trusted pool. Do you "
+ "still want to continue?\n";
+ } else {
+ ret = -1;
+ goto out;
+ }
+ answer = cli_cmd_get_confirmation(state, question);
+ if (GF_ANSWER_NO == answer) {
+ gf_log("cli", GF_LOG_ERROR,
+ "Global operation "
+ "cancelled, exiting");
+ ret = -1;
+ goto out;
+ }
+ cli_out("This will take a few minutes to complete. Please wait ..");
+
+ ret = dict_set_str(dict, "key", key);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set on key failed");
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "value", value);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set on value failed");
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "globalname", "All");
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "dict set on global"
+ " key failed.");
+ goto out;
+ }
+
+ ret = dict_set_int32(dict, "hold_global_locks", _gf_true);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR,
+ "dict set on global key "
+ "failed.");
+ goto out;
+ }
+
+ *options = dict;
+out:
+ if (ret)
+ dict_unref(dict);
+
+ return ret;
+}
diff --git a/cli/src/cli-cmd-peer.c b/cli/src/cli-cmd-peer.c
index e42a1139b87..084998701d8 100644
--- a/cli/src/cli-cmd-peer.c
+++ b/cli/src/cli-cmd-peer.c
@@ -20,10 +20,6 @@
#include "protocol-common.h"
#include <glusterfs/events.h>
-extern struct rpc_clnt *global_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_peer_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -48,10 +44,6 @@ cli_cmd_peer_probe_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PROBE];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
dict = dict_new();
if (!dict)
goto out;
@@ -77,6 +69,12 @@ cli_cmd_peer_probe_cbk(struct cli_state *state, struct cli_cmd_word *word,
}
*/
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -127,10 +125,6 @@ cli_cmd_peer_deprobe_cbk(struct cli_state *state, struct cli_cmd_word *word,
"want to proceed?";
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEPROBE];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
dict = dict_new();
ret = dict_set_str(dict, "hostname", (char *)words[2]);
@@ -162,6 +156,12 @@ cli_cmd_peer_deprobe_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
diff --git a/cli/src/cli-cmd-snapshot.c b/cli/src/cli-cmd-snapshot.c
index 814ab82f6eb..859d6b2e40d 100644
--- a/cli/src/cli-cmd-snapshot.c
+++ b/cli/src/cli-cmd-snapshot.c
@@ -17,8 +17,6 @@
#include "cli-cmd.h"
#include "cli-mem-types.h"
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_snapshot_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -36,12 +34,6 @@ cli_cmd_snapshot_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SNAP];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (frame == NULL) {
- ret = -1;
- goto out;
- }
-
/* Parses the command entered by the user */
ret = cli_cmd_snapshot_parse(words, wordcount, &options, state);
if (ret) {
@@ -55,6 +47,12 @@ cli_cmd_snapshot_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (frame == NULL) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
diff --git a/cli/src/cli-cmd-system.c b/cli/src/cli-cmd-system.c
index cb3a9ea7484..801e8f4efed 100644
--- a/cli/src/cli-cmd-system.c
+++ b/cli/src/cli-cmd-system.c
@@ -18,10 +18,6 @@
#include "cli-mem-types.h"
#include "protocol-common.h"
-extern struct rpc_clnt *global_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
-
int
cli_cmd_system_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
const char **words, int wordcount);
@@ -43,25 +39,26 @@ cli_cmd_getspec_cbk(struct cli_state *state, struct cli_cmd_word *word,
call_frame_t *frame = NULL;
dict_t *dict = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
+ if (wordcount != 3) {
+ cli_usage_out(word->pattern);
goto out;
+ }
dict = dict_new();
if (!dict)
goto out;
- if (wordcount != 3) {
- cli_usage_out(word->pattern);
- goto out;
- }
-
ret = dict_set_str(dict, "volid", (char *)words[2]);
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GETSPEC];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
ret = proc->fn(frame, THIS, dict);
}
@@ -74,6 +71,7 @@ out:
if (dict)
dict_unref(dict);
+ CLI_STACK_DESTROY(frame);
return ret;
}
@@ -86,25 +84,26 @@ cli_cmd_pmap_b2p_cbk(struct cli_state *state, struct cli_cmd_word *word,
call_frame_t *frame = NULL;
dict_t *dict = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
+ if (wordcount != 4) {
+ cli_usage_out(word->pattern);
goto out;
+ }
dict = dict_new();
if (!dict)
goto out;
- if (wordcount != 4) {
- cli_usage_out(word->pattern);
- goto out;
- }
-
ret = dict_set_str(dict, "brick", (char *)words[3]);
if (ret)
goto out;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_PMAP_PORTBYBRICK];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
ret = proc->fn(frame, THIS, dict);
}
@@ -116,6 +115,8 @@ out:
if (dict)
dict_unref(dict);
+
+ CLI_STACK_DESTROY(frame);
return ret;
}
@@ -175,6 +176,7 @@ make_seq_dict(int argc, char **argv)
{
char index[] = "4294967296"; // 1<<32
int i = 0;
+ int len;
int ret = 0;
dict_t *dict = dict_new();
@@ -182,8 +184,8 @@ make_seq_dict(int argc, char **argv)
return NULL;
for (i = 0; i < argc; i++) {
- snprintf(index, sizeof(index), "%d", i);
- ret = dict_set_str(dict, index, argv[i]);
+ len = snprintf(index, sizeof(index), "%d", i);
+ ret = dict_set_strn(dict, index, len, argv[i]);
if (ret == -1)
break;
}
@@ -395,7 +397,7 @@ out:
return ret;
}
-struct cli_cmd cli_system_cmds[] = {
+static struct cli_cmd cli_system_cmds[] = {
{"system:: getspec <VOLNAME>", cli_cmd_getspec_cbk,
"fetch the volume file for the volume <VOLNAME>"},
@@ -439,6 +441,7 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
char *tmp = NULL;
int ret = -1;
int i = -1;
+ int len;
int cmd_args_count = 0;
int in_cmd_args_count = 0;
rpc_clnt_procedure_t *proc = NULL;
@@ -451,15 +454,16 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- dict = dict_new();
- if (!dict)
- goto out;
-
command = strtok_r((char *)words[2], " ", &saveptr);
if (command == NULL) {
gf_log("cli", GF_LOG_ERROR, "Failed to parse command");
goto out;
}
+
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
do {
tmp = strtok_r(NULL, " ", &saveptr);
if (tmp) {
@@ -487,9 +491,9 @@ cli_cmd_sys_exec_cbk(struct cli_state *state, struct cli_cmd_word *word,
for (i = 1; i <= cmd_args_count; i++) {
in_cmd_args_count++;
- snprintf(cmd_arg_name, sizeof(cmd_arg_name), "cmd_arg_%d",
- in_cmd_args_count);
- ret = dict_set_str(dict, cmd_arg_name, (char *)words[2 + i]);
+ len = snprintf(cmd_arg_name, sizeof(cmd_arg_name), "cmd_arg_%d",
+ in_cmd_args_count);
+ ret = dict_set_strn(dict, cmd_arg_name, len, (char *)words[2 + i]);
if (ret) {
gf_log("", GF_LOG_ERROR, "Unable to set %s in dict", cmd_arg_name);
goto out;
diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c
index f454b097aa7..f238851586e 100644
--- a/cli/src/cli-cmd-volume.c
+++ b/cli/src/cli-cmd-volume.c
@@ -28,10 +28,6 @@
#include <glusterfs/common-utils.h>
#include <glusterfs/events.h>
-extern struct rpc_clnt *global_rpc;
-extern struct rpc_clnt *global_quotad_rpc;
-
-extern rpc_clnt_prog_t *cli_rpc_prog;
extern rpc_clnt_prog_t cli_quotad_clnt;
static int
@@ -65,10 +61,6 @@ cli_cmd_volume_info_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if ((wordcount == 2) || (wordcount == 3 && !strcmp(words[2], "all"))) {
ctx.flags = GF_CLI_GET_NEXT_VOLUME;
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_NEXT_VOLUME];
@@ -91,6 +83,10 @@ cli_cmd_volume_info_cbk(struct cli_state *state, struct cli_cmd_word *word,
if (!local)
goto out;
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
+
local->get_vol.flags = ctx.flags;
if (ctx.volname)
local->get_vol.volname = gf_strdup(ctx.volname);
@@ -216,10 +212,6 @@ cli_cmd_volume_create_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CREATE_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_create_parse(state, words, wordcount, &options,
&bricks);
@@ -245,6 +237,12 @@ cli_cmd_volume_create_cbk(struct cli_state *state, struct cli_cmd_word *word,
}
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -287,14 +285,6 @@ cli_cmd_volume_delete_cbk(struct cli_state *state, struct cli_cmd_word *word,
"Do you want to continue?";
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DELETE_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
- dict = dict_new();
- if (!dict)
- goto out;
-
if (wordcount != 3) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -303,6 +293,10 @@ cli_cmd_volume_delete_cbk(struct cli_state *state, struct cli_cmd_word *word,
volname = (char *)words[2];
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
ret = dict_set_str(dict, "volname", volname);
if (ret) {
gf_log(THIS->name, GF_LOG_WARNING, "dict set failed");
@@ -324,6 +318,12 @@ cli_cmd_volume_delete_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -359,30 +359,15 @@ cli_cmd_volume_start_cbk(struct cli_state *state, struct cli_cmd_word *word,
int flags = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 3 || wordcount > 4) {
cli_usage_out(word->pattern);
parse_error = 1;
goto out;
}
- dict = dict_new();
- if (!dict) {
- goto out;
- }
-
if (!words[2])
goto out;
- ret = dict_set_str(dict, "volname", (char *)words[2]);
- if (ret) {
- gf_log(THIS->name, GF_LOG_ERROR, "dict set failed");
- goto out;
- }
-
if (wordcount == 4) {
if (!strcmp("force", words[3])) {
flags |= GF_CLI_FLAG_OP_FORCE;
@@ -393,6 +378,18 @@ cli_cmd_volume_start_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
}
+
+ dict = dict_new();
+ if (!dict) {
+ goto out;
+ }
+
+ ret = dict_set_str(dict, "volname", (char *)words[2]);
+ if (ret) {
+ gf_log(THIS->name, GF_LOG_ERROR, "dict set failed");
+ goto out;
+ }
+
ret = dict_set_int32(dict, "flags", flags);
if (ret) {
gf_log(THIS->name, GF_LOG_ERROR, "dict set failed");
@@ -401,6 +398,12 @@ cli_cmd_volume_start_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_START_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -487,10 +490,6 @@ cli_cmd_volume_stop_cbk(struct cli_state *state, struct cli_cmd_word *word,
"Stopping volume will make its data inaccessible. "
"Do you want to continue?";
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 3 || wordcount > 4) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -541,6 +540,12 @@ cli_cmd_volume_stop_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STOP_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -577,20 +582,16 @@ cli_cmd_volume_rename_cbk(struct cli_state *state, struct cli_cmd_word *word,
int sent = 0;
int parse_error = 0;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
- dict = dict_new();
- if (!dict)
- goto out;
-
if (wordcount != 4) {
cli_usage_out(word->pattern);
parse_error = 1;
goto out;
}
+ dict = dict_new();
+ if (!dict)
+ goto out;
+
ret = dict_set_str(dict, "old-volname", (char *)words[2]);
if (ret)
@@ -604,6 +605,11 @@ cli_cmd_volume_rename_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RENAME_VOLUME];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
ret = proc->fn(frame, THIS, dict);
}
@@ -642,10 +648,6 @@ cli_cmd_volume_defrag_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
#endif
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_defrag_parse(words, wordcount, &dict);
if (ret) {
@@ -655,6 +657,12 @@ cli_cmd_volume_defrag_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_DEFRAG_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, dict);
if (proc->fn) {
@@ -703,10 +711,6 @@ cli_cmd_volume_reset_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_reset_parse(words, wordcount, &options);
if (ret) {
cli_usage_out(word->pattern);
@@ -714,6 +718,12 @@ cli_cmd_volume_reset_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -818,10 +828,6 @@ cli_cmd_volume_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_SET_VOLUME];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_set_parse(state, words, wordcount, &options,
&op_errstr);
if (ret) {
@@ -835,6 +841,12 @@ cli_cmd_volume_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1042,10 +1054,6 @@ cli_cmd_volume_add_brick_cbk(struct cli_state *state, struct cli_cmd_word *word,
"filesystem operations on the volume after the change. Do you "
"really want to continue with 'stripe' count option ? ";
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_add_brick_parse(state, words, wordcount, &options, 0);
if (ret) {
cli_usage_out(word->pattern);
@@ -1090,6 +1098,12 @@ cli_cmd_volume_add_brick_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_ADD_BRICK];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1291,12 +1305,6 @@ cli_cmd_quota_handle_list_all(const char **words, dict_t *options)
goto out;
}
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame) {
- ret = -1;
- goto out;
- }
-
volname_dup = gf_strdup(volname);
if (!volname_dup) {
ret = -1;
@@ -1328,6 +1336,12 @@ cli_cmd_quota_handle_list_all(const char **words, dict_t *options)
if (ret)
goto out;
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, xdata);
proc = &cli_quotad_clnt.proctable[GF_AGGREGATOR_GETLIMIT];
@@ -1740,10 +1754,6 @@ cli_cmd_volume_remove_brick_cbk(struct cli_state *state,
int32_t command = GF_OP_CMD_NONE;
char *question = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_remove_brick_parse(state, words, wordcount, &options,
&need_question, &brick_count,
&command);
@@ -1809,6 +1819,12 @@ cli_cmd_volume_remove_brick_cbk(struct cli_state *state,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REMOVE_BRICK];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1855,10 +1871,6 @@ cli_cmd_volume_reset_brick_cbk(struct cli_state *state,
#endif
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_RESET_BRICK];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_reset_brick_parse(words, wordcount, &options);
if (ret) {
@@ -1877,6 +1889,12 @@ cli_cmd_volume_reset_brick_cbk(struct cli_state *state,
}
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -1923,10 +1941,6 @@ cli_cmd_volume_replace_brick_cbk(struct cli_state *state,
#endif
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_REPLACE_BRICK];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
ret = cli_cmd_volume_replace_brick_parse(words, wordcount, &options);
if (ret) {
@@ -1935,6 +1949,12 @@ cli_cmd_volume_replace_brick_cbk(struct cli_state *state,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2030,8 +2050,7 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- if (!((strcmp("rotate", words[2]) == 0) ||
- (strcmp("rotate", words[3]) == 0))) {
+ if (!(strcmp("rotate", words[3]) == 0)) {
cli_usage_out(word->pattern);
parse_error = 1;
goto out;
@@ -2039,6 +2058,10 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LOG_ROTATE];
+ ret = cli_cmd_log_rotate_parse(words, wordcount, &options);
+ if (ret)
+ goto out;
+
frame = create_frame(THIS, THIS->ctx->pool);
if (!frame) {
gf_log(THIS->name, GF_LOG_ERROR, "failed to create frame");
@@ -2046,10 +2069,6 @@ cli_cmd_log_rotate_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- ret = cli_cmd_log_rotate_parse(words, wordcount, &options);
- if (ret)
- goto out;
-
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2151,13 +2170,7 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GSYNC_SET];
- frame = create_frame(THIS, THIS->ctx->pool);
- if (frame == NULL) {
- ret = -1;
- goto out;
- }
-
- ret = cli_cmd_gsync_set_parse(words, wordcount, &options, &errstr);
+ ret = cli_cmd_gsync_set_parse(state, words, wordcount, &options, &errstr);
if (ret) {
if (errstr) {
cli_err("%s", errstr);
@@ -2169,6 +2182,12 @@ cli_cmd_volume_gsync_set_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (frame == NULL) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
@@ -2540,7 +2559,7 @@ cli_launch_glfs_heal(int heal_op, dict_t *options)
runinit(&runner);
ret = dict_get_str(options, "volname", &volname);
- runner_add_args(&runner, SBIN_DIR "/glfsheal", volname, NULL);
+ runner_add_args(&runner, GLFSHEAL_PREFIX "/glfsheal", volname, NULL);
runner_redir(&runner, STDOUT_FILENO, RUN_PIPE);
switch (heal_op) {
@@ -2615,9 +2634,6 @@ cli_cmd_volume_heal_cbk(struct cli_state *state, struct cli_cmd_word *word,
int heal_op = 0;
this = THIS;
- frame = create_frame(this, this->ctx->pool);
- if (!frame)
- goto out;
if (wordcount < 3) {
cli_usage_out(word->pattern);
@@ -2644,6 +2660,12 @@ cli_cmd_volume_heal_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_HEAL_VOLUME];
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2678,10 +2700,6 @@ cli_cmd_volume_statedump_cbk(struct cli_state *state, struct cli_cmd_word *word,
int parse_error = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 3) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -2707,6 +2725,12 @@ cli_cmd_volume_statedump_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_STATEDUMP_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2734,12 +2758,11 @@ cli_cmd_volume_list_cbk(struct cli_state *state, struct cli_cmd_word *word,
rpc_clnt_procedure_t *proc = NULL;
int sent = 0;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_LIST_VOLUME];
if (proc->fn) {
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame)
+ goto out;
ret = proc->fn(frame, THIS, NULL);
}
@@ -2768,10 +2791,6 @@ cli_cmd_volume_clearlocks_cbk(struct cli_state *state,
int parse_error = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount < 7 || wordcount > 8) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -2798,6 +2817,12 @@ cli_cmd_volume_clearlocks_cbk(struct cli_state *state,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_CLRLOCKS_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn) {
@@ -2828,10 +2853,6 @@ cli_cmd_volume_barrier_cbk(struct cli_state *state, struct cli_cmd_word *word,
int parse_error = 0;
cli_local_t *local = NULL;
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
if (wordcount != 4) {
cli_usage_out(word->pattern);
parse_error = 1;
@@ -2853,6 +2874,12 @@ cli_cmd_volume_barrier_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_BARRIER_VOLUME];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
@@ -2887,10 +2914,6 @@ cli_cmd_volume_getopt_cbk(struct cli_state *state, struct cli_cmd_word *word,
goto out;
}
- frame = create_frame(THIS, THIS->ctx->pool);
- if (!frame)
- goto out;
-
options = dict_new();
if (!options)
goto out;
@@ -2905,6 +2928,12 @@ cli_cmd_volume_getopt_cbk(struct cli_state *state, struct cli_cmd_word *word,
proc = &cli_rpc_prog->proctable[GLUSTER_CLI_GET_VOL_OPT];
+ frame = create_frame(THIS, THIS->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
+ }
+
CLI_LOCAL_INIT(local, words, frame, options);
if (proc->fn)
@@ -2934,6 +2963,16 @@ struct cli_cmd bitrot_cmds[] = {
{"volume bitrot <VOLNAME> {enable|disable}", NULL, /*cli_cmd_bitrot_cbk,*/
"Enable/disable bitrot for volume <VOLNAME>"},
+ {"volume bitrot <VOLNAME> signing-time <time-in-secs>",
+ NULL, /*cli_cmd_bitrot_cbk,*/
+ "Waiting time for an object after last fd is closed to start signing "
+ "process"},
+
+ {"volume bitrot <VOLNAME> signer-threads <count>",
+ NULL, /*cli_cmd_bitrot_cbk,*/
+ "Number of signing process threads. Usually set to number of available "
+ "cores"},
+
{"volume bitrot <VOLNAME> scrub-throttle {lazy|normal|aggressive}",
NULL, /*cli_cmd_bitrot_cbk,*/
"Set the speed of the scrubber for volume <VOLNAME>"},
@@ -2949,6 +2988,8 @@ struct cli_cmd bitrot_cmds[] = {
"the scrubber. ondemand starts the scrubber immediately."},
{"volume bitrot <VOLNAME> {enable|disable}\n"
+ "volume bitrot <VOLNAME> signing-time <time-in-secs>\n"
+ "volume bitrot <VOLNAME> signer-threads <count>\n"
"volume bitrot <volname> scrub-throttle {lazy|normal|aggressive}\n"
"volume bitrot <volname> scrub-frequency {hourly|daily|weekly|biweekly"
"|monthly}\n"
@@ -3043,18 +3084,14 @@ struct cli_cmd volume_cmds[] = {
{"volume set <VOLNAME> <KEY> <VALUE>", cli_cmd_volume_set_cbk,
"set options for volume <VOLNAME>"},
- {"volume set <VOLNAME> group <GROUP>", cli_cmd_volume_set_cbk,
- "This option can be used for setting multiple pre-defined volume options"
- "where group_name is a file under /var/lib/glusterd/groups containing one"
- "key, value pair per line"},
+ {"volume set <VOLNAME> group <GROUP>", cli_cmd_volume_set_cbk,
+ "This option can be used for setting multiple pre-defined volume options "
+ "where group_name is a file under /var/lib/glusterd/groups containing one "
+ "key value pair per line"},
{"volume log <VOLNAME> rotate [BRICK]", cli_cmd_log_rotate_cbk,
"rotate the log file for corresponding volume/brick"},
- {"volume log rotate <VOLNAME> [BRICK]", cli_cmd_log_rotate_cbk,
- "rotate the log file for corresponding volume/brick"
- " NOTE: This is an old syntax, will be deprecated from next release."},
-
{"volume sync <HOSTNAME> [all|<VOLNAME>]", cli_cmd_sync_volume_cbk,
"sync the volume information from a peer"},
@@ -3077,8 +3114,8 @@ struct cli_cmd volume_cmds[] = {
cli_cmd_volume_profile_cbk, "volume profile operations"},
{"volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick "
- "<brick>] [list-cnt <value>] |\n"
- "volume top <VOLNAME> {read-perf|write-perf} [bs <size> count <count>] "
+ "<brick>] [list-cnt <value>] | "
+ "{read-perf|write-perf} [bs <size> count <count>] "
"[brick <brick>] [list-cnt <value>]",
cli_cmd_volume_top_cbk, "volume top operations"},
diff --git a/cli/src/cli-cmd.c b/cli/src/cli-cmd.c
index 2ee8b1b4968..2d458b16a56 100644
--- a/cli/src/cli-cmd.c
+++ b/cli/src/cli-cmd.c
@@ -28,11 +28,7 @@ static pthread_cond_t conn = PTHREAD_COND_INITIALIZER;
static pthread_mutex_t conn_mutex = PTHREAD_MUTEX_INITIALIZER;
int cli_op_ret = 0;
-int connected = 0;
-
-int
-cli_cmd_log_help_cbk(struct cli_state *state, struct cli_cmd_word *in_word,
- const char **words, int wordcount);
+static gf_boolean_t connected = _gf_false;
static unsigned
cli_cmd_needs_connection(struct cli_cmd_word *word)
@@ -236,18 +232,6 @@ out:
}
int
-cli_cmd_cond_init()
-{
- pthread_mutex_init(&cond_mutex, NULL);
- pthread_cond_init(&cond, NULL);
-
- pthread_mutex_init(&conn_mutex, NULL);
- pthread_cond_init(&conn, NULL);
-
- return 0;
-}
-
-int
cli_cmd_lock()
{
pthread_mutex_lock(&cond_mutex);
@@ -344,19 +328,32 @@ cli_cmd_await_connected(unsigned conn_timo)
}
int32_t
-cli_cmd_broadcast_connected()
+cli_cmd_broadcast_connected(gf_boolean_t status)
{
pthread_mutex_lock(&conn_mutex);
{
- connected = 1;
+ connected = status;
pthread_cond_broadcast(&conn);
}
-
pthread_mutex_unlock(&conn_mutex);
return 0;
}
+gf_boolean_t
+cli_cmd_connected(void)
+{
+ gf_boolean_t status;
+
+ pthread_mutex_lock(&conn_mutex);
+ {
+ status = connected;
+ }
+ pthread_mutex_unlock(&conn_mutex);
+
+ return status;
+}
+
int
cli_cmd_submit(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
rpc_clnt_prog_t *prog, int procnum, struct iobref *iobref,
@@ -366,7 +363,8 @@ cli_cmd_submit(struct rpc_clnt *rpc, void *req, call_frame_t *frame,
unsigned timeout = 0;
if ((GLUSTER_CLI_PROFILE_VOLUME == procnum) ||
- (GLUSTER_CLI_HEAL_VOLUME == procnum))
+ (GLUSTER_CLI_HEAL_VOLUME == procnum) ||
+ (GLUSTER_CLI_GANESHA == procnum))
timeout = cli_ten_minutes_timeout;
else
timeout = cli_default_conn_timeout;
diff --git a/cli/src/cli-cmd.h b/cli/src/cli-cmd.h
index b6bc5659936..c1c068c7085 100644
--- a/cli/src/cli-cmd.h
+++ b/cli/src/cli-cmd.h
@@ -102,9 +102,6 @@ int
cli_cmd_broadcast_response(int32_t status);
int
-cli_cmd_cond_init();
-
-int
cli_cmd_lock();
int
diff --git a/cli/src/cli-quotad-client.c b/cli/src/cli-quotad-client.c
index 52ab97ee815..772b8f75bd9 100644
--- a/cli/src/cli-quotad-client.c
+++ b/cli/src/cli-quotad-client.c
@@ -10,9 +10,6 @@
#include "cli-quotad-client.h"
-extern struct rpc_clnt global_quotad_rpc;
-extern struct rpc_clnt_program cli_quotad_clnt;
-
int
cli_quotad_submit_request(void *req, call_frame_t *frame, rpc_clnt_prog_t *prog,
int procnum, struct iobref *iobref, xlator_t *this,
@@ -60,7 +57,7 @@ cli_quotad_submit_request(void *req, call_frame_t *frame, rpc_clnt_prog_t *prog,
}
/* Send the msg */
- ret = rpc_clnt_submit(&global_quotad_rpc, prog, procnum, cbkfn, &iov, count,
+ ret = rpc_clnt_submit(global_quotad_rpc, prog, procnum, cbkfn, &iov, count,
NULL, 0, iobref, frame, NULL, 0, NULL, 0, NULL);
ret = 0;
diff --git a/cli/src/cli-rpc-ops.c b/cli/src/cli-rpc-ops.c
index 35985ab44c6..9b6b0c7fa50 100644
--- a/cli/src/cli-rpc-ops.c
+++ b/cli/src/cli-rpc-ops.c
@@ -19,6 +19,12 @@
#define INDENT_MAIN_HEAD "%-25s %s "
+#define RETURNING "Returning %d"
+#define XML_ERROR "Error outputting to xml"
+#define XDR_DECODE_FAIL "Failed to decode xdr response"
+#define DICT_SERIALIZE_FAIL "Failed to serialize to data to dictionary"
+#define DICT_UNSERIALIZE_FAIL "Failed to unserialize the dictionary"
+
/* Do not show estimates if greater than this number */
#define REBAL_ESTIMATE_SEC_UPPER_LIMIT (60 * 24 * 3600)
#define REBAL_ESTIMATE_START_TIME 600
@@ -29,32 +35,21 @@
#include <sys/uio.h>
#include <stdlib.h>
#include <sys/mount.h>
-#include "cli1-xdr.h"
-#include "xdr-generic.h"
-#include "protocol-common.h"
-#include "cli-mem-types.h"
#include <glusterfs/compat.h>
-#include <glusterfs/upcall-utils.h>
-
+#include "cli-mem-types.h"
#include <glusterfs/syscall.h>
#include "glusterfs3.h"
#include "portmap-xdr.h"
#include <glusterfs/byte-order.h>
-#include "cli-quotad-client.h"
#include <glusterfs/run.h>
-#include <glusterfs/quota-common-utils.h>
#include <glusterfs/events.h>
enum gf_task_types { GF_TASK_TYPE_REBALANCE, GF_TASK_TYPE_REMOVE_BRICK };
-extern struct rpc_clnt *global_quotad_rpc;
-extern rpc_clnt_prog_t cli_quotad_clnt;
-extern rpc_clnt_prog_t *cli_rpc_prog;
-extern int cli_op_ret;
-extern int connected;
+rpc_clnt_prog_t cli_quotad_clnt;
-int32_t
+static int32_t
gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data);
char *cli_vol_status_str[] = {
@@ -74,27 +69,27 @@ char *cli_vol_task_status_str[] = {"not started",
"fix-layout failed",
"unknown"};
-int32_t
+static int32_t
gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data);
-int32_t
+static int32_t
gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data);
-int
+static int
cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this,
rpc_clnt_prog_t *prog, struct iobref *iobref);
-int
+static int
add_cli_cmd_timeout_to_dict(dict_t *dict);
-rpc_clnt_prog_t cli_handshake_prog = {
+static rpc_clnt_prog_t cli_handshake_prog = {
.progname = "cli handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
};
-rpc_clnt_prog_t cli_pmap_prog = {
+static rpc_clnt_prog_t cli_pmap_prog = {
.progname = "cli portmap",
.prognum = GLUSTER_PMAP_PROGRAM,
.progver = GLUSTER_PMAP_VERSION,
@@ -133,7 +128,7 @@ gf_free_xdr_fsm_log_rsp(gf1_cli_fsm_log_rsp rsp)
}
}
-int
+static int
gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -141,9 +136,7 @@ gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
0,
};
int ret = -1;
- char msg[1024] = {
- 0,
- };
+ char msg[1024] = "success";
GF_ASSERT(myframe);
@@ -154,7 +147,7 @@ gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
// rsp.op_ret = -1;
// rsp.op_errno = EINVAL;
goto out;
@@ -162,22 +155,23 @@ gf_cli_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_log("cli", GF_LOG_INFO, "Received resp to probe");
- if (rsp.op_errstr && (strlen(rsp.op_errstr) > 0)) {
+ if (rsp.op_errstr && rsp.op_errstr[0] != '\0') {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
- if (rsp.op_ret)
+ if (rsp.op_ret) {
gf_log("cli", GF_LOG_ERROR, "%s", msg);
+ }
}
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret,
rsp.op_errno, (rsp.op_ret) ? msg : NULL);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
if (!rsp.op_ret)
- cli_out("peer probe: success. %s", msg);
+ cli_out("peer probe: %s", msg);
else
cli_err("peer probe: failed: %s", msg);
@@ -189,7 +183,7 @@ out:
return ret;
}
-int
+static int
gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -197,9 +191,7 @@ gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
0,
};
int ret = -1;
- char msg[1024] = {
- 0,
- };
+ char msg[1024] = "success";
GF_ASSERT(myframe);
@@ -210,7 +202,7 @@ gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
// rsp.op_ret = -1;
// rsp.op_errno = EINVAL;
goto out;
@@ -219,19 +211,17 @@ gf_cli_deprobe_cbk(struct rpc_req *req, struct iovec *iov, int count,
gf_log("cli", GF_LOG_INFO, "Received resp to deprobe");
if (rsp.op_ret) {
- if (strlen(rsp.op_errstr) > 0) {
+ if (rsp.op_errstr[0] != '\0') {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
gf_log("cli", GF_LOG_ERROR, "%s", rsp.op_errstr);
}
- } else {
- snprintf(msg, sizeof(msg), "success");
}
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str(NULL, (rsp.op_ret) ? NULL : msg, rsp.op_ret,
rsp.op_errno, (rsp.op_ret) ? msg : NULL);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -248,11 +238,11 @@ out:
return ret;
}
-int
-gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix)
+static int
+gf_cli_output_peer_hostnames(dict_t *dict, int count, const char *prefix)
{
int ret = -1;
- char key[256] = {
+ char key[512] = {
0,
};
int i = 0;
@@ -263,8 +253,8 @@ gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix)
* as friend.hostname
*/
for (i = 1; i < count; i++) {
- snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
- ret = dict_get_str(dict, key, &hostname);
+ ret = snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
+ ret = dict_get_strn(dict, key, ret, &hostname);
if (ret)
break;
cli_out("%s", hostname);
@@ -274,7 +264,7 @@ gf_cli_output_peer_hostnames(dict_t *dict, int count, char *prefix)
return ret;
}
-int
+static int
gf_cli_output_peer_status(dict_t *dict, int count)
{
int ret = -1;
@@ -284,26 +274,27 @@ gf_cli_output_peer_status(dict_t *dict, int count)
char key[256] = {
0,
};
+ int keylen;
char *state = NULL;
int32_t connected = 0;
- char *connected_str = NULL;
+ const char *connected_str = NULL;
int hostname_count = 0;
cli_out("Number of Peers: %d", count);
i = 1;
while (i <= count) {
- snprintf(key, 256, "friend%d.uuid", i);
- ret = dict_get_str(dict, key, &uuid_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
+ ret = dict_get_strn(dict, key, keylen, &uuid_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.hostname", i);
- ret = dict_get_str(dict, key, &hostname_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", i);
+ ret = dict_get_strn(dict, key, keylen, &hostname_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.connected", i);
- ret = dict_get_int32(dict, key, &connected);
+ keylen = snprintf(key, sizeof(key), "friend%d.connected", i);
+ ret = dict_get_int32n(dict, key, keylen, &connected);
if (ret)
goto out;
if (connected)
@@ -311,16 +302,16 @@ gf_cli_output_peer_status(dict_t *dict, int count)
else
connected_str = "Disconnected";
- snprintf(key, 256, "friend%d.state", i);
- ret = dict_get_str(dict, key, &state);
+ keylen = snprintf(key, sizeof(key), "friend%d.state", i);
+ ret = dict_get_strn(dict, key, keylen, &state);
if (ret)
goto out;
cli_out("\nHostname: %s\nUuid: %s\nState: %s (%s)", hostname_buf,
uuid_buf, state, connected_str);
- snprintf(key, sizeof(key), "friend%d.hostname_count", i);
- ret = dict_get_int32(dict, key, &hostname_count);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &hostname_count);
/* Print other addresses only if there are more than 1.
*/
if ((ret == 0) && (hostname_count > 1)) {
@@ -340,7 +331,7 @@ out:
return ret;
}
-int
+static int
gf_cli_output_pool_list(dict_t *dict, int count)
{
int ret = -1;
@@ -348,18 +339,19 @@ gf_cli_output_pool_list(dict_t *dict, int count)
char *hostname_buf = NULL;
int32_t hostname_len = 8; /*min len 8 chars*/
int32_t i = 1;
- char key[256] = {
+ char key[64] = {
0,
};
+ int keylen;
int32_t connected = 0;
- char *connected_str = NULL;
+ const char *connected_str = NULL;
if (count <= 0)
goto out;
while (i <= count) {
- snprintf(key, 256, "friend%d.hostname", i);
- ret = dict_get_str(dict, key, &hostname_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", i);
+ ret = dict_get_strn(dict, key, keylen, &hostname_buf);
if (ret)
goto out;
@@ -374,18 +366,18 @@ gf_cli_output_pool_list(dict_t *dict, int count)
i = 1;
while (i <= count) {
- snprintf(key, 256, "friend%d.uuid", i);
- ret = dict_get_str(dict, key, &uuid_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
+ ret = dict_get_strn(dict, key, keylen, &uuid_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.hostname", i);
- ret = dict_get_str(dict, key, &hostname_buf);
+ keylen = snprintf(key, sizeof(key), "friend%d.hostname", i);
+ ret = dict_get_strn(dict, key, keylen, &hostname_buf);
if (ret)
goto out;
- snprintf(key, 256, "friend%d.connected", i);
- ret = dict_get_int32(dict, key, &connected);
+ keylen = snprintf(key, sizeof(key), "friend%d.connected", i);
+ ret = dict_get_int32n(dict, key, keylen, &connected);
if (ret)
goto out;
if (connected)
@@ -406,7 +398,7 @@ out:
/* function pointer for gf_cli_output_{pool_list,peer_status} */
typedef int (*cli_friend_output_fn)(dict_t *, int);
-int
+static int
gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -418,11 +410,15 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
char msg[1024] = {
0,
};
- char *cmd = NULL;
+ const char *cmd = NULL;
cli_friend_output_fn friend_output_fn;
call_frame_t *frame = NULL;
unsigned long flags = 0;
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
GF_ASSERT(myframe);
frame = myframe;
@@ -440,14 +436,9 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
/* 'free' the flags set by gf_cli_list_friends */
frame->local = NULL;
- if (-1 == req->rpc_status) {
- goto out;
- }
-
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
// rsp.op_ret = -1;
// rsp.op_errno = EINVAL;
goto out;
@@ -462,7 +453,7 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno,
msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
cli_err("%s", msg);
@@ -481,7 +472,7 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
&dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -489,11 +480,11 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno,
msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret) {
goto out;
}
@@ -507,7 +498,7 @@ gf_cli_list_friends_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_peer_status(dict, rsp.op_ret, rsp.op_errno,
NULL);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
} else {
ret = -1;
}
@@ -531,7 +522,7 @@ out:
return ret;
}
-int
+static int
gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -551,7 +542,7 @@ gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -574,11 +565,11 @@ gf_cli_get_state_cbk(struct rpc_req *req, struct iovec *iov, int count,
"Failed to get daemon state. Check glusterd"
" log file for more details");
} else {
- ret = dict_get_str(dict, "daemon", &daemon_name);
+ ret = dict_get_str_sizen(dict, "daemon", &daemon_name);
if (ret)
gf_log("cli", GF_LOG_ERROR, "Couldn't get daemon name");
- ret = dict_get_str(dict, "ofilepath", &ofilepath);
+ ret = dict_get_str_sizen(dict, "ofilepath", &ofilepath);
if (ret)
gf_log("cli", GF_LOG_ERROR, "Couldn't get filepath");
@@ -598,7 +589,7 @@ out:
return ret;
}
-void
+static void
cli_out_options(char *substr, char *optstr, char *valstr)
{
char *ptr1 = NULL;
@@ -649,22 +640,24 @@ static int
print_brick_details(dict_t *dict, int volcount, int start_index, int end_index,
int replica_count)
{
- char key[1024] = {
+ char key[64] = {
0,
};
+ int keylen;
int index = start_index;
int isArbiter = 0;
int ret = -1;
char *brick = NULL;
while (index <= end_index) {
- snprintf(key, 1024, "volume%d.brick%d", volcount, index);
- ret = dict_get_str(dict, key, &brick);
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d", volcount,
+ index);
+ ret = dict_get_strn(dict, key, keylen, &brick);
if (ret)
goto out;
- snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", volcount,
- index);
- if (dict_get(dict, key))
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
+ volcount, index);
+ if (dict_getn(dict, key, keylen))
isArbiter = 1;
else
isArbiter = 0;
@@ -679,7 +672,8 @@ print_brick_details(dict_t *dict, int volcount, int start_index, int end_index,
out:
return ret;
}
-void
+
+static void
gf_cli_print_number_of_bricks(int type, int brick_count, int dist_count,
int stripe_count, int replica_count,
int disperse_count, int redundancy_count,
@@ -705,7 +699,7 @@ gf_cli_print_number_of_bricks(int type, int brick_count, int dist_count,
}
}
-int
+static int
gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -731,7 +725,8 @@ gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
char *ta_brick = NULL;
dict_t *dict = NULL;
cli_local_t *local = NULL;
- char key[1024] = {0};
+ char key[64] = {0};
+ int keylen;
char err_str[2048] = {0};
gf_cli_rsp rsp = {0};
char *caps __attribute__((unused)) = NULL;
@@ -751,8 +746,7 @@ gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -776,11 +770,11 @@ gf_cli_get_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret)
goto out;
@@ -817,7 +811,7 @@ xml_output:
ret = cli_xml_output_vol_info_begin(local, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -825,7 +819,7 @@ xml_output:
if (dict) {
ret = cli_xml_output_vol_info(local, dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -833,80 +827,80 @@ xml_output:
if (local->get_vol.flags == GF_CLI_GET_VOLUME) {
ret = cli_xml_output_vol_info_end(local);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
}
while (i < count) {
cli_out(" ");
- snprintf(key, 256, "volume%d.name", i);
- ret = dict_get_str(dict, key, &volname);
+ keylen = snprintf(key, sizeof(key), "volume%d.name", i);
+ ret = dict_get_strn(dict, key, keylen, &volname);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.type", i);
- ret = dict_get_int32(dict, key, &type);
+ keylen = snprintf(key, sizeof(key), "volume%d.type", i);
+ ret = dict_get_int32n(dict, key, keylen, &type);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.status", i);
- ret = dict_get_int32(dict, key, &status);
+ keylen = snprintf(key, sizeof(key), "volume%d.status", i);
+ ret = dict_get_int32n(dict, key, keylen, &status);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.brick_count", i);
- ret = dict_get_int32(dict, key, &brick_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.brick_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &brick_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.dist_count", i);
- ret = dict_get_int32(dict, key, &dist_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.dist_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &dist_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.stripe_count", i);
- ret = dict_get_int32(dict, key, &stripe_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &stripe_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.replica_count", i);
- ret = dict_get_int32(dict, key, &replica_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.replica_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &replica_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.disperse_count", i);
- ret = dict_get_int32(dict, key, &disperse_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &disperse_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.redundancy_count", i);
- ret = dict_get_int32(dict, key, &redundancy_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &redundancy_count);
if (ret)
goto out;
- snprintf(key, sizeof(key), "volume%d.arbiter_count", i);
- ret = dict_get_int32(dict, key, &arbiter_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &arbiter_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.transport", i);
- ret = dict_get_int32(dict, key, &transport);
+ keylen = snprintf(key, sizeof(key), "volume%d.transport", i);
+ ret = dict_get_int32n(dict, key, keylen, &transport);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.volume_id", i);
- ret = dict_get_str(dict, key, &volume_id_str);
+ keylen = snprintf(key, sizeof(key), "volume%d.volume_id", i);
+ ret = dict_get_strn(dict, key, keylen, &volume_id_str);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.snap_count", i);
- ret = dict_get_int32(dict, key, &snap_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.snap_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &snap_count);
if (ret)
goto out;
- snprintf(key, 256, "volume%d.thin_arbiter_count", i);
- ret = dict_get_int32(dict, key, &thin_arbiter_count);
+ keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", i);
+ ret = dict_get_int32n(dict, key, keylen, &thin_arbiter_count);
if (ret)
goto out;
@@ -937,14 +931,14 @@ xml_output:
goto out;
if (thin_arbiter_count) {
- snprintf(key, 1024, "volume%d.thin_arbiter_brick", i);
+ snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick", i);
ret = dict_get_str(dict, key, &ta_brick);
if (ret)
goto out;
cli_out("Thin-arbiter-path: %s", ta_brick);
}
- snprintf(key, 256, "volume%d.opt_count", i);
+ snprintf(key, sizeof(key), "volume%d.opt_count", i);
ret = dict_get_int32(dict, key, &opt_count);
if (ret)
goto out;
@@ -954,7 +948,7 @@ xml_output:
cli_out("Options Reconfigured:");
- snprintf(key, 256, "volume%d.option.", i);
+ snprintf(key, sizeof(key), "volume%d.option.", i);
ret = dict_foreach(dict, _gf_cli_output_volinfo_opts, key);
if (ret)
@@ -975,11 +969,11 @@ out:
gf_free_xdr_cli_rsp(rsp);
- gf_log("cli", GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1006,24 +1000,19 @@ gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_INFO, "Received resp to create volume");
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret)
- goto out;
-
if (global_state->mode & GLUSTER_MODE_XML) {
if (rsp.op_ret == 0) {
rsp_dict = dict_new();
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1031,10 +1020,14 @@ gf_cli_create_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_create(rsp_dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret)
+ goto out;
+
if (rsp.op_ret && strcmp(rsp.op_errstr, ""))
cli_err("volume create: %s: failed: %s", volname, rsp.op_errstr);
else if (rsp.op_ret)
@@ -1056,7 +1049,7 @@ out:
return ret;
}
-int
+static int
gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1069,12 +1062,11 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
call_frame_t *frame = NULL;
dict_t *rsp_dict = NULL;
- GF_ASSERT(myframe);
-
if (-1 == req->rpc_status) {
goto out;
}
+ GF_ASSERT(myframe);
frame = myframe;
GF_ASSERT(frame->local);
@@ -1083,14 +1075,7 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
- goto out;
- }
-
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret) {
- gf_log(frame->this->name, GF_LOG_ERROR, "dict get failed");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1102,7 +1087,7 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1110,7 +1095,13 @@ gf_cli_delete_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_generic_volume("volDelete", rsp_dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR, "dict get failed");
goto out;
}
@@ -1129,11 +1120,11 @@ out:
if (rsp_dict)
dict_unref(rsp_dict);
- gf_log("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1159,8 +1150,7 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1176,17 +1166,7 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to unserialize "
- "response for uuid get");
- goto out;
- }
-
- ret = dict_get_str(dict, "uuid", &uuid_str);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to get uuid "
- "from dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -1194,7 +1174,7 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_dict("uuidGenerate", dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1205,6 +1185,11 @@ gf_cli3_1_uuid_get_cbk(struct rpc_req *req, struct iovec *iov, int count,
cli_err("%s", rsp.op_errstr);
} else {
+ ret = dict_get_str_sizen(dict, "uuid", &uuid_str);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR, "Failed to get uuid from dictionary");
+ goto out;
+ }
cli_out("UUID: %s", uuid_str);
}
ret = rsp.op_ret;
@@ -1217,11 +1202,11 @@ out:
if (dict)
dict_unref(dict);
- gf_log("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1246,8 +1231,7 @@ gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1259,7 +1243,7 @@ gf_cli3_1_uuid_reset_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_dict("uuidReset", NULL, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1275,11 +1259,11 @@ out:
cli_local_wipe(local);
gf_free_xdr_cli_rsp(rsp);
- gf_log("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1306,14 +1290,7 @@ gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
- goto out;
- }
-
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR, "dict get failed");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1325,7 +1302,7 @@ gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1333,7 +1310,13 @@ gf_cli_start_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_generic_volume("volStart", rsp_dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR, "dict get failed");
goto out;
}
@@ -1355,7 +1338,7 @@ out:
return ret;
}
-int
+static int
gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1382,15 +1365,7 @@ gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
- goto out;
- }
-
- ret = dict_get_str(local->dict, "volname", &volname);
- if (ret) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Unable to get volname from dict");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -1402,7 +1377,7 @@ gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len,
&rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -1410,7 +1385,14 @@ gf_cli_stop_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_generic_volume("volStop", rsp_dict, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
+ if (ret) {
+ gf_log(frame->this->name, GF_LOG_ERROR,
+ "Unable to get volname from dict");
goto out;
}
@@ -1433,15 +1415,16 @@ out:
return ret;
}
-int
+static int
gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
{
int ret = -1;
int count = 0;
int i = 1;
- char key[256] = {
+ char key[64] = {
0,
};
+ int keylen;
gf_defrag_status_t status_rcd = GF_DEFRAG_STATUS_NOT_STARTED;
uint64_t files = 0;
uint64_t size = 0;
@@ -1461,19 +1444,26 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
uint64_t time_left = 0;
gf_boolean_t show_estimates = _gf_false;
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "count not set");
goto out;
}
- snprintf(key, sizeof(key), "status-1");
-
- ret = dict_get_int32(dict, key, (int32_t *)&status_rcd);
- if (ret) {
- gf_log("cli", GF_LOG_TRACE, "count %d %d", count, 1);
- gf_log("cli", GF_LOG_TRACE, "failed to get status");
- goto out;
+ for (i = 1; i <= count; i++) {
+ keylen = snprintf(key, sizeof(key), "status-%d", i);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status_rcd);
+ /* If information from a node is missing we should skip
+ * the node and try to fetch information of other nodes.
+ * If information is not found for all nodes, we should
+ * error out.
+ */
+ if (!ret)
+ break;
+ if (ret && i == count) {
+ gf_log("cli", GF_LOG_TRACE, "failed to get status");
+ goto out;
+ }
}
/* Fix layout will be sent to all nodes for the volume
@@ -1512,15 +1502,13 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
time_left = 0;
/* Check if status is NOT_STARTED, and continue early */
- snprintf(key, sizeof(key), "status-%d", i);
+ keylen = snprintf(key, sizeof(key), "status-%d", i);
- ret = dict_get_int32(dict, key, (int32_t *)&status_rcd);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status_rcd);
if (ret == -ENOENT) {
gf_log("cli", GF_LOG_TRACE, "count %d %d", count, i);
gf_log("cli", GF_LOG_TRACE, "failed to get status");
- gf_log("cli", GF_LOG_ERROR,
- "node down and has failed"
- " to set dict");
+ gf_log("cli", GF_LOG_ERROR, "node down and has failed to set dict");
continue;
/* skip this node if value not available*/
} else if (ret) {
@@ -1536,8 +1524,8 @@ gf_cli_print_rebalance_status(dict_t *dict, enum gf_task_types task_type)
if (GF_DEFRAG_STATUS_STARTED == status_rcd)
show_estimates = _gf_true;
- snprintf(key, 256, "node-name-%d", i);
- ret = dict_get_str(dict, key, &node_name);
+ keylen = snprintf(key, sizeof(key), "node-name-%d", i);
+ ret = dict_get_strn(dict, key, keylen, &node_name);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get node-name");
@@ -1661,7 +1649,7 @@ out:
return ret;
}
-int
+static int
gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1693,18 +1681,18 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_str(local->dict, "volname", &volname);
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "Failed to get volname");
goto out;
}
- ret = dict_get_int32(local->dict, "rebalance-command", (int32_t *)&cmd);
+ ret = dict_get_int32_sizen(local->dict, "rebalance-command",
+ (int32_t *)&cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get command");
goto out;
@@ -1716,16 +1704,14 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- gf_log("glusterd", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log("glusterd", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
if (!((cmd == GF_DEFRAG_CMD_STOP) || (cmd == GF_DEFRAG_CMD_STATUS)) &&
!(global_state->mode & GLUSTER_MODE_XML)) {
- ret = dict_get_str(dict, GF_REBALANCE_TID_KEY, &task_id_str);
+ ret = dict_get_str_sizen(dict, GF_REBALANCE_TID_KEY, &task_id_str);
if (ret) {
gf_log("cli", GF_LOG_WARNING, "failed to get %s from dict",
GF_REBALANCE_TID_KEY);
@@ -1783,8 +1769,7 @@ gf_cli_defrag_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
else
snprintf(msg, sizeof(msg),
- "Failed to get the status of "
- "rebalance process");
+ "Failed to get the status of rebalance process");
goto done;
} else {
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
@@ -1823,7 +1808,7 @@ out:
return ret;
}
-int
+static int
gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1844,7 +1829,7 @@ gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -1856,7 +1841,7 @@ gf_cli_rename_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volRename", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1873,7 +1858,7 @@ out:
return ret;
}
-int
+static int
gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1894,7 +1879,7 @@ gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -1910,7 +1895,7 @@ gf_cli_reset_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volReset", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -1927,7 +1912,63 @@ out:
return ret;
}
-char *
+static int
+gf_cli_ganesha_cbk(struct rpc_req *req, struct iovec *iov, int count,
+ void *myframe)
+{
+ gf_cli_rsp rsp = {
+ 0,
+ };
+ int ret = -1;
+ dict_t *dict = NULL;
+
+ GF_ASSERT(myframe);
+
+ if (-1 == req->rpc_status) {
+ goto out;
+ }
+
+ ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
+ if (ret < 0) {
+ gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
+ XDR_DECODE_FAIL);
+ goto out;
+ }
+
+ gf_log("cli", GF_LOG_DEBUG, "Received resp to ganesha");
+
+ dict = dict_new();
+
+ if (!dict) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
+ if (ret)
+ goto out;
+
+ if (rsp.op_ret) {
+ if (strcmp(rsp.op_errstr, ""))
+ cli_err("nfs-ganesha: failed: %s", rsp.op_errstr);
+ else
+ cli_err("nfs-ganesha: failed");
+ }
+
+ else {
+ cli_out("nfs-ganesha : success ");
+ }
+
+ ret = rsp.op_ret;
+
+out:
+ if (dict)
+ dict_unref(dict);
+ cli_cmd_broadcast_response(ret);
+ return ret;
+}
+
+static char *
is_server_debug_xlator(void *myframe)
{
call_frame_t *frame = NULL;
@@ -1972,7 +2013,7 @@ is_server_debug_xlator(void *myframe)
return debug_xlator;
}
-int
+static int
gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -1999,7 +2040,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2014,8 +2055,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to unserialize volume set respone dict");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -2026,7 +2066,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
*/
debug_xlator = is_server_debug_xlator(myframe);
- if (dict_get_str(dict, "help-str", &help_str) && !msg[0])
+ if (dict_get_str_sizen(dict, "help-str", &help_str) && !msg[0])
snprintf(msg, sizeof(msg), "Set volume %s",
(rsp.op_ret) ? "unsuccessful" : "successful");
if (rsp.op_ret == 0 && debug_xlator) {
@@ -2041,7 +2081,7 @@ gf_cli_set_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volSet", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2093,7 +2133,7 @@ gf_cli_add_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2109,7 +2149,7 @@ gf_cli_add_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volAddBrick", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2125,7 +2165,7 @@ out:
return ret;
}
-int
+static int
gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
int count, void *myframe)
{
@@ -2141,7 +2181,7 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
gf1_op_commands cmd = GF_OP_CMD_NONE;
cli_local_t *local = NULL;
call_frame_t *frame = NULL;
- char *cmd_str = "unknown";
+ const char *cmd_str;
GF_ASSERT(myframe);
@@ -2157,12 +2197,11 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_int32(local->dict, "command", &command);
+ ret = dict_get_int32_sizen(local->dict, "command", &command);
if (ret)
goto out;
@@ -2176,20 +2215,17 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
cmd_str = "status";
break;
default:
+ cmd_str = "unknown";
break;
}
ret = rsp.op_ret;
if (rsp.op_ret == -1) {
if (strcmp(rsp.op_errstr, ""))
- snprintf(msg, sizeof(msg),
- "volume remove-brick %s: "
- "failed: %s",
+ snprintf(msg, sizeof(msg), "volume remove-brick %s: failed: %s",
cmd_str, rsp.op_errstr);
else
- snprintf(msg, sizeof(msg),
- "volume remove-brick %s: "
- "failed",
+ snprintf(msg, sizeof(msg), "volume remove-brick %s: failed",
cmd_str);
if (global_state->mode & GLUSTER_MODE_XML)
@@ -2205,10 +2241,7 @@ gf_cli3_remove_brick_status_cbk(struct rpc_req *req, struct iovec *iov,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- strncpy(msg,
- "failed to unserialize req-buffer to "
- "dictionary",
- sizeof(msg));
+ strncpy(msg, DICT_UNSERIALIZE_FAIL, sizeof(msg));
if (global_state->mode & GLUSTER_MODE_XML) {
rsp.op_ret = -1;
@@ -2237,8 +2270,7 @@ xml_output:
ret = gf_cli_print_rebalance_status(dict, GF_TASK_TYPE_REMOVE_BRICK);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to print remove-brick "
- "rebalance status");
+ "Failed to print remove-brick rebalance status");
goto out;
}
@@ -2260,7 +2292,7 @@ out:
return ret;
}
-int
+static int
gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -2292,12 +2324,11 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_int32(local->dict, "command", (int32_t *)&cmd);
+ ret = dict_get_int32_sizen(local->dict, "command", (int32_t *)&cmd);
if (ret) {
gf_log("", GF_LOG_ERROR, "failed to get command");
goto out;
@@ -2312,7 +2343,7 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed to unserialize rsp_dict");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
@@ -2322,7 +2353,8 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
case GF_OP_CMD_START:
cmd_str = "start";
- ret = dict_get_str(rsp_dict, GF_REMOVE_BRICK_TID_KEY, &task_id_str);
+ ret = dict_get_str_sizen(rsp_dict, GF_REMOVE_BRICK_TID_KEY,
+ &task_id_str);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
"remove-brick-id is not present in dict");
@@ -2352,7 +2384,7 @@ gf_cli_remove_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
rsp.op_errno, msg,
"volRemoveBrick");
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2383,7 +2415,7 @@ out:
return ret;
}
-int
+static int
gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -2393,7 +2425,7 @@ gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
cli_local_t *local = NULL;
call_frame_t *frame = NULL;
- char *rb_operation_str = NULL;
+ const char *rb_operation_str = NULL;
dict_t *rsp_dict = NULL;
char msg[1024] = {
0,
@@ -2414,80 +2446,53 @@ gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_str(local->dict, "operation", &reset_op);
+ ret = dict_get_str_sizen(local->dict, "operation", &reset_op);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "dict_get on operation failed");
goto out;
}
+ if (strcmp(reset_op, "GF_RESET_OP_START") &&
+ strcmp(reset_op, "GF_RESET_OP_COMMIT") &&
+ strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) {
+ ret = -1;
+ goto out;
+ }
+
if (rsp.dict.dict_len) {
/* Unserialize the dictionary */
rsp_dict = dict_new();
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize rsp buffer to dictionary");
+ gf_log(frame->this->name, GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
- if (strcmp(reset_op, "GF_RESET_OP_START") &&
- strcmp(reset_op, "GF_RESET_OP_COMMIT") &&
- strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) {
- rb_operation_str = gf_strdup("Unknown operation");
- ret = -1;
- goto out;
- }
-
if (rsp.op_ret && (strcmp(rsp.op_errstr, ""))) {
- rb_operation_str = gf_strdup(rsp.op_errstr);
+ rb_operation_str = rsp.op_errstr;
} else {
if (!strcmp(reset_op, "GF_RESET_OP_START")) {
if (rsp.op_ret)
- rb_operation_str = gf_strdup(
- "reset-brick "
- "start "
- "operation "
- "failed");
+ rb_operation_str = "reset-brick start operation failed";
else
- rb_operation_str = gf_strdup(
- "reset-brick "
- "start "
- "operation "
- "successful");
+ rb_operation_str = "reset-brick start operation successful";
} else if (!strcmp(reset_op, "GF_RESET_OP_COMMIT")) {
if (rsp.op_ret)
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "operation "
- "failed");
+ rb_operation_str = "reset-brick commit operation failed";
else
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "operation "
- "successful");
+ rb_operation_str = "reset-brick commit operation successful";
} else if (!strcmp(reset_op, "GF_RESET_OP_COMMIT_FORCE")) {
if (rsp.op_ret)
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "force operation "
- "failed");
+ rb_operation_str = "reset-brick commit force operation failed";
else
- rb_operation_str = gf_strdup(
- "reset-brick "
- "commit "
- "force operation "
- "successful");
+ rb_operation_str =
+ "reset-brick commit force operation successful";
}
}
@@ -2499,7 +2504,7 @@ gf_cli_reset_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_replace_brick(rsp_dict, rsp.op_ret,
rsp.op_errno, msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2516,9 +2521,6 @@ out:
if (local)
cli_local_wipe(local);
- if (rb_operation_str)
- GF_FREE(rb_operation_str);
-
cli_cmd_broadcast_response(ret);
gf_free_xdr_cli_rsp(rsp);
if (rsp_dict)
@@ -2526,7 +2528,7 @@ out:
return ret;
}
-int
+static int
gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -2536,7 +2538,7 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
cli_local_t *local = NULL;
call_frame_t *frame = NULL;
- char *rb_operation_str = NULL;
+ const char *rb_operation_str = NULL;
dict_t *rsp_dict = NULL;
char msg[1024] = {
0,
@@ -2557,12 +2559,11 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_str(local->dict, "operation", &replace_op);
+ ret = dict_get_str_sizen(local->dict, "operation", &replace_op);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "dict_get on operation failed");
goto out;
@@ -2574,29 +2575,23 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &rsp_dict);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "failed to "
- "unserialize rsp buffer to dictionary");
+ gf_log(frame->this->name, GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
if (!strcmp(replace_op, "GF_REPLACE_OP_COMMIT_FORCE")) {
if (rsp.op_ret || ret)
- rb_operation_str = gf_strdup(
- "replace-brick commit "
- "force operation failed");
+ rb_operation_str = "replace-brick commit force operation failed";
else
- rb_operation_str = gf_strdup(
- "replace-brick commit "
- "force operation "
- "successful");
+ rb_operation_str =
+ "replace-brick commit force operation successful";
} else {
gf_log(frame->this->name, GF_LOG_DEBUG, "Unknown operation");
}
if (rsp.op_ret && (strcmp(rsp.op_errstr, ""))) {
- rb_operation_str = gf_strdup(rsp.op_errstr);
+ rb_operation_str = rsp.op_errstr;
}
gf_log("cli", GF_LOG_INFO, "Received resp to replace brick");
@@ -2607,7 +2602,7 @@ gf_cli_replace_brick_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_replace_brick(rsp_dict, rsp.op_ret,
rsp.op_errno, msg);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2624,9 +2619,6 @@ out:
if (local)
cli_local_wipe(local);
- if (rb_operation_str)
- GF_FREE(rb_operation_str);
-
cli_cmd_broadcast_response(ret);
gf_free_xdr_cli_rsp(rsp);
if (rsp_dict)
@@ -2656,7 +2648,7 @@ gf_cli_log_rotate_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2672,7 +2664,7 @@ gf_cli_log_rotate_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volLogRotate", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2710,7 +2702,7 @@ gf_cli_sync_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -2726,7 +2718,7 @@ gf_cli_sync_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_str("volSync", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -2755,29 +2747,23 @@ print_quota_list_usage_output(cli_local_t *local, char *path, int64_t avail,
char *hl_str = NULL;
char *sl_val = NULL;
- used_str = gf_uint64_2human_readable(used_space->size);
-
- if (limit_set) {
- hl_str = gf_uint64_2human_readable(limits->hl);
- avail_str = gf_uint64_2human_readable(avail);
-
- sl_val = gf_uint64_2human_readable(sl_num);
- }
-
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_quota_xml_output(local, path, limits->hl, sl_str, sl_num,
used_space->size, avail, sl ? "Yes" : "No",
hl ? "Yes" : "No", limit_set);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to "
- "output in xml format for quota "
- "list command");
+ "Failed to output in xml format for quota list command");
}
goto out;
}
+ used_str = gf_uint64_2human_readable(used_space->size);
+
if (limit_set) {
+ hl_str = gf_uint64_2human_readable(limits->hl);
+ sl_val = gf_uint64_2human_readable(sl_num);
+
if (!used_str) {
cli_out("%-40s %7s %7s(%s) %8" PRIu64 "%9" PRIu64
""
@@ -2785,6 +2771,7 @@ print_quota_list_usage_output(cli_local_t *local, char *path, int64_t avail,
path, hl_str, sl_str, sl_val, used_space->size, avail,
sl ? "Yes" : "No", hl ? "Yes" : "No");
} else {
+ avail_str = gf_uint64_2human_readable(avail);
cli_out("%-40s %7s %7s(%s) %8s %7s %15s %20s", path, hl_str, sl_str,
sl_val, used_str, avail_str, sl ? "Yes" : "No",
hl ? "Yes" : "No");
@@ -2820,9 +2807,7 @@ print_quota_list_object_output(cli_local_t *local, char *path, int64_t avail,
hl ? "Yes" : "No", limit_set);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to "
- "output in xml format for quota "
- "list command");
+ "Failed to output in xml format for quota list command");
}
goto out;
}
@@ -2868,16 +2853,17 @@ print_quota_list_output(cli_local_t *local, char *path, char *default_sl,
ret = gf_string2percent(default_sl, &sl_num);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "could not convert default soft limit"
- " to percent");
+ "could not convert default soft limit to percent");
goto out;
}
sl_num = (sl_num * limits->hl) / 100;
sl_final = default_sl;
} else {
sl_num = (limits->sl * limits->hl) / 100;
- snprintf(percent_str, sizeof(percent_str), "%" PRIu64 "%%",
- limits->sl);
+ ret = snprintf(percent_str, sizeof(percent_str), "%" PRIu64 "%%",
+ limits->sl);
+ if (ret < 0)
+ goto out;
sl_final = percent_str;
}
if (type == GF_QUOTA_OPTION_TYPE_LIST)
@@ -2937,9 +2923,8 @@ print_quota_list_from_mountdir(cli_local_t *local, char *mountdir,
ret = sys_lgetxattr(mountdir, key, (void *)&limits, sizeof(limits));
if (ret < 0) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to get the xattr %s "
- "on %s. Reason : %s",
- key, mountdir, strerror(errno));
+ "Failed to get the xattr %s on %s. Reason : %s", key, mountdir,
+ strerror(errno));
switch (errno) {
#if defined(ENODATA)
@@ -2998,9 +2983,7 @@ enoattr:
}
if (ret < 0) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to get quota size "
- "on path %s: %s",
+ gf_log("cli", GF_LOG_ERROR, "Failed to get quota size on path %s: %s",
mountdir, strerror(errno));
print_quota_list_empty(path, type);
goto out;
@@ -3016,7 +2999,7 @@ out:
return ret;
}
-int
+static int
gluster_remove_auxiliary_mount(char *volname)
{
int ret = -1;
@@ -3031,25 +3014,24 @@ gluster_remove_auxiliary_mount(char *volname)
GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(mountdir, volname, "/");
ret = gf_umount_lazy(this->name, mountdir, 1);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "umount on %s failed, "
- "reason : %s",
+ gf_log("cli", GF_LOG_ERROR, "umount on %s failed, reason : %s",
mountdir, strerror(errno));
}
return ret;
}
-int
+static int
gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
dict_t *dict, char *default_sl, int count,
int op_ret, int op_errno, char *op_errstr)
{
int ret = -1;
int i = 0;
- char key[1024] = {
+ char key[32] = {
0,
};
+ int keylen;
char mountdir[PATH_MAX] = {
0,
};
@@ -3059,7 +3041,7 @@ gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
if (!dict || count <= 0)
goto out;
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get quota type");
goto out;
@@ -3069,7 +3051,7 @@ gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
ret = cli_xml_output_vol_quota_limit_list_begin(local, op_ret, op_errno,
op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting xml begin");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
} else {
@@ -3077,13 +3059,11 @@ gf_cli_print_limit_list_from_dict(cli_local_t *local, char *volname,
}
while (count--) {
- snprintf(key, sizeof(key), "path%d", i++);
+ keylen = snprintf(key, sizeof(key), "path%d", i++);
- ret = dict_get_str(dict, key, &path);
+ ret = dict_get_strn(dict, key, keylen, &path);
if (ret < 0) {
- gf_log("cli", GF_LOG_DEBUG,
- "Path not present in limit"
- " list");
+ gf_log("cli", GF_LOG_DEBUG, "Path not present in limit list");
continue;
}
@@ -3099,7 +3079,7 @@ out:
return ret;
}
-int
+static int
print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
{
char *path = NULL;
@@ -3122,25 +3102,22 @@ print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
local = frame->local;
gd_rsp_dict = local->dict;
- ret = dict_get_int32(rsp_dict, "type", &type);
+ ret = dict_get_int32_sizen(rsp_dict, "type", &type);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get type");
goto out;
}
- ret = dict_get_str(rsp_dict, GET_ANCESTRY_PATH_KEY, &path);
+ ret = dict_get_str_sizen(rsp_dict, GET_ANCESTRY_PATH_KEY, &path);
if (ret) {
- gf_log("cli", GF_LOG_WARNING,
- "path key is not present "
- "in dict");
+ gf_log("cli", GF_LOG_WARNING, "path key is not present in dict");
goto out;
}
- ret = dict_get_str(gd_rsp_dict, "default-soft-limit", &default_sl);
+ ret = dict_get_str_sizen(gd_rsp_dict, "default-soft-limit", &default_sl);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR,
- "failed to "
- "get default soft limit");
+ "failed to get default soft limit");
goto out;
}
@@ -3179,19 +3156,18 @@ print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
LOCK(&local->lock);
{
- ret = dict_get_int32(gd_rsp_dict, "quota-list-success-count",
- &success_count);
+ ret = dict_get_int32_sizen(gd_rsp_dict, "quota-list-success-count",
+ &success_count);
if (ret)
success_count = 0;
- ret = dict_set_int32(gd_rsp_dict, "quota-list-success-count",
- success_count + 1);
+ ret = dict_set_int32_sizen(gd_rsp_dict, "quota-list-success-count",
+ success_count + 1);
}
UNLOCK(&local->lock);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to set "
- "quota-list-success-count in dict");
+ "Failed to set quota-list-success-count in dict");
goto out;
}
@@ -3201,9 +3177,7 @@ print_quota_list_from_quotad(call_frame_t *frame, dict_t *rsp_dict)
} else {
ret = cli_xml_output_vol_quota_limit_list_begin(local, 0, 0, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Error in "
- "printing xml output");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -3215,7 +3189,7 @@ out:
return ret;
}
-void *
+static void *
cli_cmd_broadcast_response_detached(void *opaque)
{
int32_t ret = 0;
@@ -3226,7 +3200,7 @@ cli_cmd_broadcast_response_detached(void *opaque)
return NULL;
}
-int32_t
+static int32_t
cli_quota_compare_path(struct list_head *list1, struct list_head *list2)
{
struct list_node *node1 = NULL;
@@ -3243,18 +3217,18 @@ cli_quota_compare_path(struct list_head *list1, struct list_head *list2)
dict1 = node1->ptr;
dict2 = node2->ptr;
- ret = dict_get_str(dict1, GET_ANCESTRY_PATH_KEY, &path1);
+ ret = dict_get_str_sizen(dict1, GET_ANCESTRY_PATH_KEY, &path1);
if (ret < 0)
return 0;
- ret = dict_get_str(dict2, GET_ANCESTRY_PATH_KEY, &path2);
+ ret = dict_get_str_sizen(dict2, GET_ANCESTRY_PATH_KEY, &path2);
if (ret < 0)
return 0;
return strcmp(path1, path2);
}
-int
+static int
cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3284,19 +3258,18 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
LOCK(&local->lock);
{
- ret = dict_get_int32(local->dict, "quota-list-count", &list_count);
+ ret = dict_get_int32_sizen(local->dict, "quota-list-count",
+ &list_count);
if (ret)
list_count = 0;
list_count++;
- ret = dict_set_int32(local->dict, "quota-list-count", list_count);
+ ret = dict_set_int32_sizen(local->dict, "quota-list-count", list_count);
}
UNLOCK(&local->lock);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to set "
- "quota-list-count in dict");
+ gf_log("cli", GF_LOG_ERROR, "Failed to set quota-list-count in dict");
goto out;
}
@@ -3311,8 +3284,7 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -3331,9 +3303,7 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -3347,7 +3317,7 @@ cli_quotad_getlimit_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- ret = dict_get_int32(local->dict, "max_count", &max_count);
+ ret = dict_get_int32_sizen(local->dict, "max_count", &max_count);
if (ret < 0) {
gf_log("cli", GF_LOG_ERROR, "failed to get max_count");
goto out;
@@ -3383,9 +3353,7 @@ out:
ret = pthread_create(&th_id, NULL, cli_cmd_broadcast_response_detached,
(void *)-1);
if (ret)
- gf_log("cli", GF_LOG_ERROR,
- "pthread_create failed: "
- "%s",
+ gf_log("cli", GF_LOG_ERROR, "pthread_create failed: %s",
strerror(errno));
} else {
cli_cmd_broadcast_response(ret);
@@ -3395,7 +3363,7 @@ out:
return ret;
}
-int
+static int
cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3415,7 +3383,7 @@ cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret < 0) {
- gf_log(this->name, GF_LOG_ERROR, "failed to serialize the data");
+ gf_log(this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -3426,28 +3394,29 @@ cli_quotad_getlimit(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-void
+static void
gf_cli_quota_list(cli_local_t *local, char *volname, dict_t *dict,
char *default_sl, int count, int op_ret, int op_errno,
char *op_errstr)
{
- GF_VALIDATE_OR_GOTO("cli", volname, out);
-
- if (!connected)
+ if (!cli_cmd_connected())
goto out;
- if (count > 0)
+ if (count > 0) {
+ GF_VALIDATE_OR_GOTO("cli", volname, out);
+
gf_cli_print_limit_list_from_dict(local, volname, dict, default_sl,
count, op_ret, op_errno, op_errstr);
+ }
out:
return;
}
-int
+static int
gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3478,8 +3447,7 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -3491,9 +3459,7 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (strcmp(rsp.op_errstr, "")) {
cli_err("quota command failed : %s", rsp.op_errstr);
if (rsp.op_ret == -ENOENT)
- cli_err(
- "please enter the path relative to "
- "the volume");
+ cli_err("please enter the path relative to the volume");
} else {
cli_err("quota command : failed");
}
@@ -3507,24 +3473,17 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret < 0) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to "
- "unserialize req-buffer to dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
gf_log("cli", GF_LOG_DEBUG, "Received resp to quota command");
- ret = dict_get_str(dict, "volname", &volname);
- if (ret)
- gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname");
-
- ret = dict_get_str(dict, "default-soft-limit", &default_sl);
+ ret = dict_get_str_sizen(dict, "default-soft-limit", &default_sl);
if (ret)
gf_log(frame->this->name, GF_LOG_TRACE,
- "failed to get "
- "default soft limit");
+ "failed to get default soft limit");
// default-soft-limit is part of rsp_dict only iff we sent
// GLUSTER_CLI_QUOTA with type being GF_QUOTA_OPTION_TYPE_LIST
@@ -3534,8 +3493,8 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = -1;
goto out;
}
- ret = dict_set_dynstr(local->dict, "default-soft-limit",
- default_sl_dup);
+ ret = dict_set_dynstr_sizen(local->dict, "default-soft-limit",
+ default_sl_dup);
if (ret) {
gf_log(frame->this->name, GF_LOG_TRACE,
"failed to set default soft limit");
@@ -3543,11 +3502,15 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
}
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+ if (ret)
+ gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname");
+
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret)
gf_log(frame->this->name, GF_LOG_TRACE, "failed to get type");
- ret = dict_get_int32(dict, "count", &entry_count);
+ ret = dict_get_int32_sizen(dict, "count", &entry_count);
if (ret)
gf_log(frame->this->name, GF_LOG_TRACE, "failed to get count");
@@ -3560,9 +3523,7 @@ gf_cli_quota_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_quota_limit_list_end(local);
if (ret < 0) {
ret = -1;
- gf_log("cli", GF_LOG_ERROR,
- "Error in printing"
- " xml output");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
}
@@ -3574,7 +3535,7 @@ xml_output:
ret = cli_xml_output_str("volQuota", NULL, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -3599,7 +3560,7 @@ out:
return ret;
}
-int
+static int
gf_cli_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3618,7 +3579,7 @@ gf_cli_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -3650,7 +3611,7 @@ out:
return ret;
}
-int
+static int
gf_cli_pmap_b2p_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -3669,7 +3630,7 @@ gf_cli_pmap_b2p_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_port_by_brick_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -3692,7 +3653,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {
@@ -3711,9 +3672,9 @@ gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_int32(dict, "port", &port);
+ ret = dict_get_int32_sizen(dict, "port", &port);
if (ret) {
- ret = dict_set_int32(dict, "port", CLI_GLUSTERD_PORT);
+ ret = dict_set_int32_sizen(dict, "port", CLI_GLUSTERD_PORT);
if (ret)
goto out;
}
@@ -3724,12 +3685,12 @@ gf_cli_probe(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {
@@ -3748,16 +3709,16 @@ gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data)
}
dict = data;
- ret = dict_get_int32(dict, "port", &port);
+ ret = dict_get_int32_sizen(dict, "port", &port);
if (ret) {
- ret = dict_set_int32(dict, "port", CLI_GLUSTERD_PORT);
+ ret = dict_set_int32_sizen(dict, "port", CLI_GLUSTERD_PORT);
if (ret)
goto out;
}
- ret = dict_get_int32(dict, "flags", &flags);
+ ret = dict_get_int32_sizen(dict, "flags", &flags);
if (ret) {
- ret = dict_set_int32(dict, "flags", 0);
+ ret = dict_set_int32_sizen(dict, "flags", 0);
if (ret)
goto out;
}
@@ -3768,12 +3729,12 @@ gf_cli_deprobe(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_list_friends(call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_peer_list_req req = {
@@ -3806,11 +3767,11 @@ out:
*/
frame->local = NULL;
}
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_state(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {
@@ -3821,24 +3782,18 @@ gf_cli_get_state(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_get_state_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_GET_STATE, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_next_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -3856,7 +3811,7 @@ gf_cli_get_next_volume(call_frame_t *frame, xlator_t *this, void *data)
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_info_begin(local, 0, 0, "");
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
}
@@ -3884,15 +3839,15 @@ end_xml:
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_vol_info_end(local);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3903,7 +3858,7 @@ gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *dict = NULL;
int32_t flags = 0;
- if (!frame || !this || !data) {
+ if (!this || !data) {
ret = -1;
goto out;
}
@@ -3918,13 +3873,13 @@ gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
}
if (ctx->volname) {
- ret = dict_set_str(dict, "volname", ctx->volname);
+ ret = dict_set_str_sizen(dict, "volname", ctx->volname);
if (ret)
goto out;
}
flags = ctx->flags;
- ret = dict_set_int32(dict, "flags", flags);
+ ret = dict_set_int32_sizen(dict, "flags", flags);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "failed to set flags");
goto out;
@@ -3933,7 +3888,7 @@ gf_cli_get_volume(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret) {
- gf_log(frame->this->name, GF_LOG_ERROR, "failed to serialize dict");
+ gf_log(frame->this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -3947,11 +3902,11 @@ out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli3_1_uuid_get(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3960,22 +3915,16 @@ gf_cli3_1_uuid_get(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli3_1_uuid_get_cbk,
(xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_UUID_GET,
this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli3_1_uuid_reset(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -3984,22 +3933,16 @@ gf_cli3_1_uuid_reset(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli3_1_uuid_reset_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_UUID_RESET, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_create_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4008,26 +3951,19 @@ gf_cli_create_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_create_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_CREATE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_delete_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4036,25 +3972,18 @@ gf_cli_delete_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_delete_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_DELETE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_start_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4063,25 +3992,19 @@ gf_cli_start_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_start_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_START_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_stop_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4090,25 +4013,18 @@ gf_cli_stop_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = data;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_stop_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_STOP_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_defrag_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4117,25 +4033,18 @@ gf_cli_defrag_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_defrag_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_DEFRAG_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4154,7 +4063,7 @@ gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data)
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
&req.dict.dict_len);
if (ret < 0) {
- gf_log(this->name, GF_LOG_ERROR, "failed to serialize the data");
+ gf_log(this->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -4165,12 +4074,12 @@ gf_cli_rename_volume(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_reset_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4179,25 +4088,18 @@ gf_cli_reset_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_reset_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_RESET_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
-gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
+static int32_t
+gf_cli_ganesha(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
0,
@@ -4205,19 +4107,31 @@ gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
+ dict = data;
+
+ ret = cli_to_glusterd(&req, frame, gf_cli_ganesha_cbk,
+ (xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_GANESHA,
+ this, cli_rpc_prog, NULL);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
+
+ return ret;
+}
+
+static int32_t
+gf_cli_set_volume(call_frame_t *frame, xlator_t *this, void *data)
+{
+ gf_cli_req req = {{
+ 0,
+ }};
+ int ret = 0;
+ dict_t *dict = NULL;
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_set_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_SET_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
@@ -4231,38 +4145,21 @@ gf_cli_add_brick(call_frame_t *frame, xlator_t *this, void *data)
}};
int ret = 0;
dict_t *dict = NULL;
- char *volname = NULL;
- int32_t count = 0;
-
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
dict = data;
- ret = dict_get_str(dict, "volname", &volname);
-
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "count", &count);
- if (ret)
- goto out;
-
ret = cli_to_glusterd(&req, frame, gf_cli_add_brick_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_ADD_BRICK, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4276,21 +4173,16 @@ gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
int32_t command = 0;
- char *volname = NULL;
int32_t cmd = 0;
- if (!frame || !this || !data) {
+ if (!frame || !this) {
ret = -1;
goto out;
}
dict = data;
- ret = dict_get_str(dict, "volname", &volname);
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "command", &command);
+ ret = dict_get_int32_sizen(dict, "command", &command);
if (ret)
goto out;
@@ -4305,7 +4197,7 @@ gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
else
cmd |= GF_DEFRAG_CMD_STOP;
- ret = dict_set_int32(dict, "rebalance-command", (int32_t)cmd);
+ ret = dict_set_int32_sizen(dict, "rebalance-command", (int32_t)cmd);
if (ret) {
gf_log(this->name, GF_LOG_ERROR, "Failed to set dict");
goto out;
@@ -4318,7 +4210,7 @@ gf_cli_remove_brick(call_frame_t *frame, xlator_t *this, void *data)
}
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
@@ -4327,7 +4219,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4336,8 +4228,6 @@ gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
char *dst_brick = NULL;
- char *src_brick = NULL;
- char *volname = NULL;
char *op = NULL;
if (!frame || !this || !data) {
@@ -4347,49 +4237,34 @@ gf_cli_reset_brick(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_str(dict, "operation", &op);
+ ret = dict_get_str_sizen(dict, "operation", &op);
if (ret) {
gf_log(this->name, GF_LOG_DEBUG, "dict_get on operation failed");
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on volname failed");
- goto out;
- }
-
- ret = dict_get_str(dict, "src-brick", &src_brick);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on src-brick failed");
- goto out;
- }
-
if (!strcmp(op, "GF_RESET_OP_COMMIT") ||
!strcmp(op, "GF_RESET_OP_COMMIT_FORCE")) {
- ret = dict_get_str(dict, "dst-brick", &dst_brick);
+ ret = dict_get_str_sizen(dict, "dst-brick", &dst_brick);
if (ret) {
gf_log(this->name, GF_LOG_DEBUG, "dict_get on dst-brick failed");
goto out;
}
}
- gf_log(this->name, GF_LOG_DEBUG, "Received command reset-brick %s on %s.",
- op, src_brick);
-
ret = cli_to_glusterd(&req, frame, gf_cli_reset_brick_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_RESET_BRICK, this, cli_rpc_prog, NULL);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4397,9 +4272,6 @@ gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data)
}};
int ret = 0;
dict_t *dict = NULL;
- char *src_brick = NULL;
- char *dst_brick = NULL;
- char *volname = NULL;
int32_t op = 0;
if (!frame || !this || !data) {
@@ -4409,47 +4281,25 @@ gf_cli_replace_brick(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_int32(dict, "operation", &op);
+ ret = dict_get_int32_sizen(dict, "operation", &op);
if (ret) {
gf_log(this->name, GF_LOG_DEBUG, "dict_get on operation failed");
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on volname failed");
- goto out;
- }
-
- ret = dict_get_str(dict, "src-brick", &src_brick);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on src-brick failed");
- goto out;
- }
-
- ret = dict_get_str(dict, "dst-brick", &dst_brick);
- if (ret) {
- gf_log(this->name, GF_LOG_DEBUG, "dict_get on dst-brick failed");
- goto out;
- }
-
- gf_log(this->name, GF_LOG_DEBUG,
- "Received command replace-brick %s with "
- "%s with operation=%d",
- src_brick, dst_brick, op);
ret = cli_to_glusterd(&req, frame, gf_cli_replace_brick_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_REPLACE_BRICK, this, cli_rpc_prog, NULL);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_log_rotate(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4458,25 +4308,18 @@ gf_cli_log_rotate(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_log_rotate_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_LOG_ROTATE, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_sync_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -4485,25 +4328,18 @@ gf_cli_sync_volume(call_frame_t *frame, xlator_t *this, void *data)
}};
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_sync_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_SYNC_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data)
{
gf_getspec_req req = {
@@ -4513,14 +4349,14 @@ gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data)
dict_t *dict = NULL;
dict_t *op_dict = NULL;
- if (!frame || !this || !data) {
+ if (!frame || !this) {
ret = -1;
goto out;
}
dict = data;
- ret = dict_get_str(dict, "volid", &req.key);
+ ret = dict_get_str_sizen(dict, "volid", &req.key);
if (ret)
goto out;
@@ -4532,26 +4368,24 @@ gf_cli_getspec(call_frame_t *frame, xlator_t *this, void *data)
// Set the supported min and max op-versions, so glusterd can make a
// decision
- ret = dict_set_int32(op_dict, "min-op-version", GD_OP_VERSION_MIN);
+ ret = dict_set_int32_sizen(op_dict, "min-op-version", GD_OP_VERSION_MIN);
if (ret) {
gf_log(THIS->name, GF_LOG_ERROR,
- "Failed to set min-op-version"
- " in request dict");
+ "Failed to set min-op-version in request dict");
goto out;
}
- ret = dict_set_int32(op_dict, "max-op-version", GD_OP_VERSION_MAX);
+ ret = dict_set_int32_sizen(op_dict, "max-op-version", GD_OP_VERSION_MAX);
if (ret) {
gf_log(THIS->name, GF_LOG_ERROR,
- "Failed to set max-op-version"
- " in request dict");
+ "Failed to set max-op-version in request dict");
goto out;
}
ret = dict_allocate_and_serialize(op_dict, &req.xdata.xdata_val,
&req.xdata.xdata_len);
if (ret < 0) {
- gf_log(THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary");
+ gf_log(THIS->name, GF_LOG_ERROR, DICT_SERIALIZE_FAIL);
goto out;
}
@@ -4564,12 +4398,12 @@ out:
dict_unref(op_dict);
}
GF_FREE(req.xdata.xdata_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_quota(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -4578,24 +4412,17 @@ gf_cli_quota(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_quota_cbk,
(xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_QUOTA,
this, cli_rpc_prog, NULL);
-
-out:
GF_FREE(req.dict.dict_val);
-
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data)
{
pmap_port_by_brick_req req = {
@@ -4604,14 +4431,14 @@ gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
+ if (!frame || !this) {
ret = -1;
goto out;
}
dict = data;
- ret = dict_get_str(dict, "brick", &req.brick);
+ ret = dict_get_str_sizen(dict, "brick", &req.brick);
if (ret)
goto out;
@@ -4620,7 +4447,7 @@ gf_cli_pmap_b2p(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_pmap_port_by_brick_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
@@ -4635,7 +4462,8 @@ gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
dict_t *dict = NULL;
int tr_count = 0;
- char key[256] = {0};
+ char key[64] = {0};
+ int keylen;
int i = 0;
char *old_state = NULL;
char *new_state = NULL;
@@ -4651,7 +4479,7 @@ gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -4673,33 +4501,33 @@ gf_cli_fsm_log_cbk(struct rpc_req *req, struct iovec *iov, int count,
&dict);
if (ret) {
- cli_err("bad response");
+ cli_err(DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "count", &tr_count);
- if (tr_count)
+ ret = dict_get_int32_sizen(dict, "count", &tr_count);
+ if (!ret && tr_count)
cli_out("number of transitions: %d", tr_count);
else
cli_err("No transitions");
for (i = 0; i < tr_count; i++) {
- snprintf(key, sizeof(key), "log%d-old-state", i);
- ret = dict_get_str(dict, key, &old_state);
+ keylen = snprintf(key, sizeof(key), "log%d-old-state", i);
+ ret = dict_get_strn(dict, key, keylen, &old_state);
if (ret)
goto out;
- snprintf(key, sizeof(key), "log%d-event", i);
- ret = dict_get_str(dict, key, &event);
+ keylen = snprintf(key, sizeof(key), "log%d-event", i);
+ ret = dict_get_strn(dict, key, keylen, &event);
if (ret)
goto out;
- snprintf(key, sizeof(key), "log%d-new-state", i);
- ret = dict_get_str(dict, key, &new_state);
+ keylen = snprintf(key, sizeof(key), "log%d-new-state", i);
+ ret = dict_get_strn(dict, key, keylen, &new_state);
if (ret)
goto out;
- snprintf(key, sizeof(key), "log%d-time", i);
- ret = dict_get_str(dict, key, &time);
+ keylen = snprintf(key, sizeof(key), "log%d-time", i);
+ ret = dict_get_strn(dict, key, keylen, &time);
if (ret)
goto out;
cli_out(
@@ -4722,7 +4550,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_fsm_log(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -4742,12 +4570,12 @@ gf_cli_fsm_log(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf1_cli_fsm_log_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_gsync_config_command(dict_t *dict)
{
runner_t runner = {
@@ -4762,7 +4590,7 @@ gf_cli_gsync_config_command(dict_t *dict)
int ret = -1;
char conf_path[PATH_MAX] = "";
- if (dict_get_str(dict, "subop", &subop) != 0)
+ if (dict_get_str_sizen(dict, "subop", &subop) != 0)
return -1;
if (strcmp(subop, "get") != 0 && strcmp(subop, "get-all") != 0) {
@@ -4770,16 +4598,16 @@ gf_cli_gsync_config_command(dict_t *dict)
return 0;
}
- if (dict_get_str(dict, "glusterd_workdir", &gwd) != 0 ||
- dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "glusterd_workdir", &gwd) != 0 ||
+ dict_get_str_sizen(dict, "slave", &slave) != 0)
return -1;
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = NULL;
- if (dict_get_str(dict, "op_name", &op_name) != 0)
+ if (dict_get_str_sizen(dict, "op_name", &op_name) != 0)
op_name = NULL;
- ret = dict_get_str(dict, "conf_path", &confpath);
+ ret = dict_get_str_sizen(dict, "conf_path", &confpath);
if (ret || !confpath) {
ret = snprintf(conf_path, sizeof(conf_path) - 1,
"%s/" GEOREP "/gsyncd_template.conf", gwd);
@@ -4801,7 +4629,7 @@ gf_cli_gsync_config_command(dict_t *dict)
return runner_run(&runner);
}
-int
+static int
gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
int *spacing, int gsync_count, int number_of_fields,
int is_detail)
@@ -4830,7 +4658,7 @@ gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
total_spacing += 4; /* For the spacing between the fields */
/* char pointers for each field */
- output_values = GF_CALLOC(number_of_fields, sizeof(char *),
+ output_values = GF_MALLOC(number_of_fields * sizeof(char *),
gf_common_mt_char);
if (!output_values) {
ret = -1;
@@ -4845,12 +4673,6 @@ gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
}
}
- hyphens = GF_CALLOC(total_spacing + 1, sizeof(char), gf_common_mt_char);
- if (!hyphens) {
- ret = -1;
- goto out;
- }
-
cli_out(" ");
/* setting the title "NODE", "MASTER", etc. from title_values[]
@@ -4873,6 +4695,12 @@ gf_cli_print_status(char **title_values, gf_gsync_status_t **sts_vals,
output_values[10], output_values[11], output_values[12],
output_values[13], output_values[14], output_values[15]);
+ hyphens = GF_MALLOC((total_spacing + 1) * sizeof(char), gf_common_mt_char);
+ if (!hyphens) {
+ ret = -1;
+ goto out;
+ }
+
/* setting and printing the hyphens */
memset(hyphens, '-', total_spacing);
hyphens[total_spacing] = '\0';
@@ -4937,7 +4765,7 @@ gf_gsync_status_t_comparator(const void *p, const void *q)
return strcmp(slavekey1, slavekey2);
}
-int
+static int
gf_cli_read_status_data(dict_t *dict, gf_gsync_status_t **sts_vals,
int *spacing, int gsync_count, int number_of_fields)
{
@@ -4977,7 +4805,7 @@ out:
return ret;
}
-int
+static int
gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail)
{
int gsync_count = 0;
@@ -4988,47 +4816,43 @@ gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail)
char errmsg[1024] = "";
char *master = NULL;
char *slave = NULL;
- char *title_values[] = {"MASTER NODE",
- "MASTER VOL",
- "MASTER BRICK",
- "SLAVE USER",
- "SLAVE",
- "SLAVE NODE",
- "STATUS",
- "CRAWL STATUS",
- "LAST_SYNCED",
- "ENTRY",
- "DATA",
- "META",
- "FAILURES",
- "CHECKPOINT TIME",
- "CHECKPOINT COMPLETED",
- "CHECKPOINT COMPLETION TIME"};
+ static char *title_values[] = {"MASTER NODE",
+ "MASTER VOL",
+ "MASTER BRICK",
+ "SLAVE USER",
+ "SLAVE",
+ "SLAVE NODE",
+ "STATUS",
+ "CRAWL STATUS",
+ "LAST_SYNCED",
+ "ENTRY",
+ "DATA",
+ "META",
+ "FAILURES",
+ "CHECKPOINT TIME",
+ "CHECKPOINT COMPLETED",
+ "CHECKPOINT COMPLETION TIME"};
gf_gsync_status_t **sts_vals = NULL;
/* Checks if any session is active or not */
- ret = dict_get_int32(dict, "gsync-count", &gsync_count);
+ ret = dict_get_int32_sizen(dict, "gsync-count", &gsync_count);
if (ret) {
- ret = dict_get_str(dict, "master", &master);
+ ret = dict_get_str_sizen(dict, "master", &master);
- ret = dict_get_str(dict, "slave", &slave);
+ ret = dict_get_str_sizen(dict, "slave", &slave);
if (master) {
if (slave)
snprintf(errmsg, sizeof(errmsg),
- "No active "
- "geo-replication sessions between %s"
+ "No active geo-replication sessions between %s"
" and %s",
master, slave);
else
snprintf(errmsg, sizeof(errmsg),
- "No active "
- "geo-replication sessions for %s",
- master);
+ "No active geo-replication sessions for %s", master);
} else
snprintf(errmsg, sizeof(errmsg),
- "No active "
- "geo-replication sessions");
+ "No active geo-replication sessions");
gf_log("cli", GF_LOG_INFO, "%s", errmsg);
cli_out("%s", errmsg);
@@ -5048,14 +4872,6 @@ gf_cli_gsync_status_output(dict_t *dict, gf_boolean_t is_detail)
ret = -1;
goto out;
}
- for (i = 0; i < gsync_count; i++) {
- sts_vals[i] = GF_CALLOC(1, sizeof(gf_gsync_status_t),
- gf_common_mt_char);
- if (!sts_vals[i]) {
- ret = -1;
- goto out;
- }
- }
ret = gf_cli_read_status_data(dict, sts_vals, spacing, gsync_count,
num_of_fields);
@@ -5084,13 +4900,13 @@ write_contents_to_common_pem_file(dict_t *dict, int output_count)
char *workdir = NULL;
char common_pem_file[PATH_MAX] = "";
char *output = NULL;
- char output_name[PATH_MAX] = "";
+ char output_name[32] = "";
int bytes_written = 0;
int fd = -1;
int ret = -1;
int i = -1;
- ret = dict_get_str(dict, "glusterd_workdir", &workdir);
+ ret = dict_get_str_sizen(dict, "glusterd_workdir", &workdir);
if (ret || !workdir) {
gf_log("", GF_LOG_ERROR, "Unable to fetch workdir");
ret = -1;
@@ -5104,17 +4920,15 @@ write_contents_to_common_pem_file(dict_t *dict, int output_count)
fd = open(common_pem_file, O_WRONLY | O_CREAT, 0600);
if (fd == -1) {
- gf_log("", GF_LOG_ERROR,
- "Failed to open %s"
- " Error : %s",
+ gf_log("", GF_LOG_ERROR, "Failed to open %s Error : %s",
common_pem_file, strerror(errno));
ret = -1;
goto out;
}
for (i = 1; i <= output_count; i++) {
- snprintf(output_name, sizeof(output_name), "output_%d", i);
- ret = dict_get_str(dict, output_name, &output);
+ ret = snprintf(output_name, sizeof(output_name), "output_%d", i);
+ ret = dict_get_strn(dict, output_name, ret, &output);
if (ret) {
gf_log("", GF_LOG_ERROR, "Failed to get %s.", output_name);
cli_out("Unable to fetch output.");
@@ -5122,9 +4936,7 @@ write_contents_to_common_pem_file(dict_t *dict, int output_count)
if (output) {
bytes_written = sys_write(fd, output, strlen(output));
if (bytes_written != strlen(output)) {
- gf_log("", GF_LOG_ERROR,
- "Failed to write "
- "to %s",
+ gf_log("", GF_LOG_ERROR, "Failed to write to %s",
common_pem_file);
ret = -1;
goto out;
@@ -5146,11 +4958,11 @@ out:
if (fd >= 0)
sys_close(fd);
- gf_log("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int
+static int
gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5159,7 +4971,7 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
int i = -1;
char *output = NULL;
char *command = NULL;
- char output_name[PATH_MAX] = "";
+ char output_name[32] = "";
gf_cli_rsp rsp = {
0,
};
@@ -5174,7 +4986,7 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5196,14 +5008,14 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- ret = dict_get_int32(dict, "output_count", &output_count);
+ ret = dict_get_int32_sizen(dict, "output_count", &output_count);
if (ret) {
cli_out("Command executed successfully.");
ret = 0;
goto out;
}
- ret = dict_get_str(dict, "command", &command);
+ ret = dict_get_str_sizen(dict, "command", &command);
if (ret) {
gf_log("", GF_LOG_ERROR, "Unable to get command from dict");
goto out;
@@ -5216,8 +5028,8 @@ gf_cli_sys_exec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
for (i = 1; i <= output_count; i++) {
- snprintf(output_name, sizeof(output_name), "output_%d", i);
- ret = dict_get_str(dict, output_name, &output);
+ ret = snprintf(output_name, sizeof(output_name), "output_%d", i);
+ ret = dict_get_strn(dict, output_name, ret, &output);
if (ret) {
gf_log("", GF_LOG_ERROR, "Failed to get %s.", output_name);
cli_out("Unable to fetch output.");
@@ -5237,7 +5049,7 @@ out:
return ret;
}
-int
+static int
gf_cli_copy_file_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5256,7 +5068,7 @@ gf_cli_copy_file_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5288,7 +5100,7 @@ out:
return ret;
}
-int
+static int
gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5312,7 +5124,7 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5332,7 +5144,7 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_gsync(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -5343,11 +5155,11 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- ret = dict_get_str(dict, "gsync-status", &gsync_status);
+ ret = dict_get_str_sizen(dict, "gsync-status", &gsync_status);
if (!ret)
cli_out("%s", gsync_status);
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
"failed to get type");
@@ -5357,29 +5169,25 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
switch (type) {
case GF_GSYNC_OPTION_TYPE_START:
case GF_GSYNC_OPTION_TYPE_STOP:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
cli_out(
- "%s " GEOREP
- " session between %s & %s"
- " has been successful",
+ "%s " GEOREP " session between %s & %s has been successful",
type == GF_GSYNC_OPTION_TYPE_START ? "Starting" : "Stopping",
master, slave);
break;
case GF_GSYNC_OPTION_TYPE_PAUSE:
case GF_GSYNC_OPTION_TYPE_RESUME:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
- cli_out("%s " GEOREP
- " session between %s & %s"
- " has been successful",
+ cli_out("%s " GEOREP " session between %s & %s has been successful",
type == GF_GSYNC_OPTION_TYPE_PAUSE ? "Pausing" : "Resuming",
master, slave);
break;
@@ -5395,24 +5203,22 @@ gf_cli_gsync_set_cbk(struct rpc_req *req, struct iovec *iov, int count,
break;
case GF_GSYNC_OPTION_TYPE_DELETE:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
cli_out("Deleting " GEOREP
- " session between %s & %s"
- " has been successful",
+ " session between %s & %s has been successful",
master, slave);
break;
case GF_GSYNC_OPTION_TYPE_CREATE:
- if (dict_get_str(dict, "master", &master) != 0)
+ if (dict_get_str_sizen(dict, "master", &master) != 0)
master = "???";
- if (dict_get_str(dict, "slave", &slave) != 0)
+ if (dict_get_str_sizen(dict, "slave", &slave) != 0)
slave = "???";
cli_out("Creating " GEOREP
- " session between %s & %s"
- " has been successful",
+ " session between %s & %s has been successful",
master, slave);
break;
@@ -5428,7 +5234,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_sys_exec(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -5437,23 +5243,22 @@ gf_cli_sys_exec(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this || !data) {
- ret = -1;
- gf_log("cli", GF_LOG_ERROR, "Invalid data");
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_sys_exec_cbk,
(xdrproc_t)xdr_gf_cli_req, dict, GLUSTER_CLI_SYS_EXEC,
this, cli_rpc_prog, NULL);
-out:
+ if (ret)
+ if (!frame || !this || !data) {
+ ret = -1;
+ gf_log("cli", GF_LOG_ERROR, "Invalid data");
+ }
+
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_copy_file(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -5462,23 +5267,22 @@ gf_cli_copy_file(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this || !data) {
- ret = -1;
- gf_log("cli", GF_LOG_ERROR, "Invalid data");
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_copy_file_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_COPY_FILE, this, cli_rpc_prog, NULL);
-out:
+ if (ret)
+ if (!frame || !this || !data) {
+ ret = -1;
+ gf_log("cli", GF_LOG_ERROR, "Invalid data");
+ }
+
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_gsync_set(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = 0;
@@ -5487,18 +5291,11 @@ gf_cli_gsync_set(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_gsync_set_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_GSYNC_SET, this, cli_rpc_prog, NULL);
-
-out:
GF_FREE(req.dict.dict_val);
return ret;
@@ -5509,20 +5306,18 @@ cli_profile_info_percentage_cmp(void *a, void *b)
{
cli_profile_info_t *ia = NULL;
cli_profile_info_t *ib = NULL;
- int ret = 0;
ia = a;
ib = b;
if (ia->percentage_avg_latency < ib->percentage_avg_latency)
- ret = -1;
+ return -1;
else if (ia->percentage_avg_latency > ib->percentage_avg_latency)
- ret = 1;
- else
- ret = 0;
- return ret;
+ return 1;
+
+ return 0;
}
-void
+static void
cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
{
char key[256] = {0};
@@ -5546,7 +5341,8 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
double total_percentage_latency = 0;
for (i = 0; i < 32; i++) {
- snprintf(key, sizeof(key), "%d-%d-read-%d", count, interval, (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-read-%" PRIu32, count, interval,
+ (1U << i));
ret = dict_get_uint64(dict, key, &rb_counts[i]);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key);
@@ -5554,7 +5350,8 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
}
for (i = 0; i < 32; i++) {
- snprintf(key, sizeof(key), "%d-%d-write-%d", count, interval, (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-write-%" PRIu32, count, interval,
+ (1U << i));
ret = dict_get_uint64(dict, key, &wb_counts[i]);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get %s from dict", key);
@@ -5640,7 +5437,8 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
if ((rb_counts[i] == 0) && (wb_counts[i] == 0))
continue;
per_line++;
- snprintf(output + index, sizeof(output) - index, "%19db+ ", (1 << i));
+ snprintf(output + index, sizeof(output) - index, "%19" PRIu32 "b+ ",
+ (1U << i));
if (rb_counts[i]) {
snprintf(read_blocks + index, sizeof(read_blocks) - index,
"%21" PRId64 " ", rb_counts[i]);
@@ -5717,7 +5515,7 @@ cmd_profile_volume_brick_out(dict_t *dict, int count, int interval)
cli_out(" ");
}
-int32_t
+static int32_t
gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5727,7 +5525,8 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
int ret = -1;
dict_t *dict = NULL;
gf1_cli_stats_op op = GF_CLI_STATS_NONE;
- char key[256] = {0};
+ char key[64] = {0};
+ int len;
int interval = 0;
int i = 1;
int32_t brick_count = 0;
@@ -5749,7 +5548,7 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5763,7 +5562,7 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -5771,15 +5570,15 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_profile(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_int32_sizen(dict, "op", (int32_t *)&op);
if (ret)
goto out;
- ret = dict_get_int32(dict, "op", (int32_t *)&op);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
@@ -5814,11 +5613,7 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- ret = dict_get_int32(dict, "info-op", (int32_t *)&info_op);
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "count", &brick_count);
+ ret = dict_get_int32_sizen(dict, "count", &brick_count);
if (ret)
goto out;
@@ -5827,9 +5622,13 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
+ ret = dict_get_int32_sizen(dict, "info-op", (int32_t *)&info_op);
+ if (ret)
+ goto out;
+
while (i <= brick_count) {
- snprintf(key, sizeof(key), "%d-brick", i);
- ret = dict_get_str(dict, key, &brick);
+ len = snprintf(key, sizeof(key), "%d-brick", i);
+ ret = dict_get_strn(dict, key, len, &brick);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Couldn't get brick name");
goto out;
@@ -5838,28 +5637,28 @@ gf_cli_profile_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
if (ret)
- snprintf(str, sizeof(str), "NFS Server : %s", brick);
+ len = snprintf(str, sizeof(str), "NFS Server : %s", brick);
else
- snprintf(str, sizeof(str), "Brick: %s", brick);
+ len = snprintf(str, sizeof(str), "Brick: %s", brick);
cli_out("%s", str);
- memset(str, '-', strlen(str));
+ memset(str, '-', len);
cli_out("%s", str);
if (GF_CLI_INFO_CLEAR == info_op) {
- snprintf(key, sizeof(key), "%d-stats-cleared", i);
- ret = dict_get_int32(dict, key, &stats_cleared);
+ len = snprintf(key, sizeof(key), "%d-stats-cleared", i);
+ ret = dict_get_int32n(dict, key, len, &stats_cleared);
if (ret)
goto out;
cli_out(stats_cleared ? "Cleared stats."
: "Failed to clear stats.");
} else {
- snprintf(key, sizeof(key), "%d-cumulative", i);
- ret = dict_get_int32(dict, key, &interval);
+ len = snprintf(key, sizeof(key), "%d-cumulative", i);
+ ret = dict_get_int32n(dict, key, len, &interval);
if (ret == 0)
cmd_profile_volume_brick_out(dict, i, interval);
- snprintf(key, sizeof(key), "%d-interval", i);
- ret = dict_get_int32(dict, key, &interval);
+ len = snprintf(key, sizeof(key), "%d-interval", i);
+ ret = dict_get_int32n(dict, key, len, &interval);
if (ret == 0)
cmd_profile_volume_brick_out(dict, i, interval);
}
@@ -5875,7 +5674,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_profile_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -5888,22 +5687,18 @@ gf_cli_profile_volume(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(this);
GF_ASSERT(data);
- if (!frame || !this || !data)
- goto out;
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_profile_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -5914,6 +5709,7 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
dict_t *dict = NULL;
gf1_cli_stats_op op = GF_CLI_STATS_NONE;
char key[256] = {0};
+ int keylen;
int i = 0;
int32_t brick_count = 0;
char brick[1024];
@@ -5929,7 +5725,7 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
double time = 0;
int32_t time_sec = 0;
long int time_usec = 0;
- char timestr[256] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char *openfd_str = NULL;
@@ -5947,7 +5743,7 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -5969,11 +5765,11 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "op", (int32_t *)&op);
+ ret = dict_get_int32_sizen(dict, "op", (int32_t *)&op);
if (op != GF_CLI_STATS_TOP) {
ret = 0;
@@ -5984,16 +5780,16 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_top(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
}
- ret = dict_get_int32(dict, "count", &brick_count);
+ ret = dict_get_int32_sizen(dict, "count", &brick_count);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%d-top-op", 1);
- ret = dict_get_int32(dict, key, (int32_t *)&top_op);
+ keylen = snprintf(key, sizeof(key), "%d-top-op", 1);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&top_op);
if (ret)
goto out;
@@ -6001,16 +5797,16 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
while (i < brick_count) {
i++;
- snprintf(brick, sizeof(brick), "%d-brick", i);
- ret = dict_get_str(dict, brick, &bricks);
+ keylen = snprintf(brick, sizeof(brick), "%d-brick", i);
+ ret = dict_get_strn(dict, brick, keylen, &bricks);
if (ret)
goto out;
nfs = dict_get_str_boolean(dict, "nfs", _gf_false);
if (clear_stats) {
- snprintf(key, sizeof(key), "%d-stats-cleared", i);
- ret = dict_get_int32(dict, key, &stats_cleared);
+ keylen = snprintf(key, sizeof(key), "%d-stats-cleared", i);
+ ret = dict_get_int32n(dict, key, keylen, &stats_cleared);
if (ret)
goto out;
cli_out(stats_cleared ? "Cleared stats for %s %s"
@@ -6024,8 +5820,8 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
else
cli_out("Brick: %s", bricks);
- snprintf(key, sizeof(key), "%d-members", i);
- ret = dict_get_int32(dict, key, &members);
+ keylen = snprintf(key, sizeof(key), "%d-members", i);
+ ret = dict_get_int32n(dict, key, keylen, &members);
switch (top_op) {
case GF_CLI_TOP_OPEN:
@@ -6037,13 +5833,12 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_get_uint64(dict, key, &max_nr_open);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%d-max-openfd-time", i);
- ret = dict_get_str(dict, key, &openfd_str);
+ keylen = snprintf(key, sizeof(key), "%d-max-openfd-time", i);
+ ret = dict_get_strn(dict, key, keylen, &openfd_str);
if (ret)
goto out;
- cli_out("Current open fds: %" PRIu64
- ", Max open"
- " fds: %" PRIu64 ", Max openfd time: %s",
+ cli_out("Current open fds: %" PRIu64 ", Max open fds: %" PRIu64
+ ", Max openfd time: %s",
nr_open, max_nr_open, openfd_str);
case GF_CLI_TOP_READ:
case GF_CLI_TOP_WRITE:
@@ -6081,8 +5876,8 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
for (j = 1; j <= members; j++) {
- snprintf(key, sizeof(key), "%d-filename-%d", i, j);
- ret = dict_get_str(dict, key, &filename);
+ keylen = snprintf(key, sizeof(key), "%d-filename-%d", i, j);
+ ret = dict_get_strn(dict, key, keylen, &filename);
if (ret)
break;
snprintf(key, sizeof(key), "%d-value-%d", i, j);
@@ -6091,12 +5886,12 @@ gf_cli_top_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
if (top_op == GF_CLI_TOP_READ_PERF ||
top_op == GF_CLI_TOP_WRITE_PERF) {
- snprintf(key, sizeof(key), "%d-time-sec-%d", i, j);
- ret = dict_get_int32(dict, key, (int32_t *)&time_sec);
+ keylen = snprintf(key, sizeof(key), "%d-time-sec-%d", i, j);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&time_sec);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%d-time-usec-%d", i, j);
- ret = dict_get_int32(dict, key, (int32_t *)&time_usec);
+ keylen = snprintf(key, sizeof(key), "%d-time-usec-%d", i, j);
+ ret = dict_get_int32n(dict, key, keylen, (int32_t *)&time_usec);
if (ret)
goto out;
gf_time_fmt(timestr, sizeof timestr, time_sec, gf_timefmt_FT);
@@ -6130,7 +5925,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_top_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -6143,21 +5938,17 @@ gf_cli_top_volume(call_frame_t *frame, xlator_t *this, void *data)
GF_ASSERT(this);
GF_ASSERT(data);
- if (!frame || !this || !data)
- goto out;
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_top_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_PROFILE_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int
+static int
gf_cli_getwd_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -6175,7 +5966,7 @@ gf_cli_getwd_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_getwd_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -6200,7 +5991,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_getwd(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -6219,12 +6010,12 @@ gf_cli_getwd(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf1_cli_getwd_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-void
+static void
cli_print_volume_status_mempool(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -6237,16 +6028,18 @@ cli_print_volume_status_mempool(dict_t *dict, char *prefix)
int32_t maxalloc = 0;
uint64_t pool_misses = 0;
int32_t maxstdalloc = 0;
- char key[1024] = {
+ char key[128] = {
+ /* prefix is really small 'brick%d' really */
0,
};
+ int keylen;
int i = 0;
GF_ASSERT(dict);
GF_ASSERT(prefix);
- snprintf(key, sizeof(key), "%s.mempool-count", prefix);
- ret = dict_get_int32(dict, key, &mempool_count);
+ keylen = snprintf(key, sizeof(key), "%s.mempool-count", prefix);
+ ret = dict_get_int32n(dict, key, keylen, &mempool_count);
if (ret)
goto out;
@@ -6259,18 +6052,18 @@ cli_print_volume_status_mempool(dict_t *dict, char *prefix)
"------------");
for (i = 0; i < mempool_count; i++) {
- snprintf(key, sizeof(key), "%s.pool%d.name", prefix, i);
- ret = dict_get_str(dict, key, &name);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.name", prefix, i);
+ ret = dict_get_strn(dict, key, keylen, &name);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.hotcount", prefix, i);
- ret = dict_get_int32(dict, key, &hotcount);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.hotcount", prefix, i);
+ ret = dict_get_int32n(dict, key, keylen, &hotcount);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.coldcount", prefix, i);
- ret = dict_get_int32(dict, key, &coldcount);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.coldcount", prefix, i);
+ ret = dict_get_int32n(dict, key, keylen, &coldcount);
if (ret)
goto out;
@@ -6284,13 +6077,14 @@ cli_print_volume_status_mempool(dict_t *dict, char *prefix)
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.max_alloc", prefix, i);
- ret = dict_get_int32(dict, key, &maxalloc);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.max_alloc", prefix, i);
+ ret = dict_get_int32n(dict, key, keylen, &maxalloc);
if (ret)
goto out;
- snprintf(key, sizeof(key), "%s.pool%d.max-stdalloc", prefix, i);
- ret = dict_get_int32(dict, key, &maxstdalloc);
+ keylen = snprintf(key, sizeof(key), "%s.pool%d.max-stdalloc", prefix,
+ i);
+ ret = dict_get_int32n(dict, key, keylen, &maxstdalloc);
if (ret)
goto out;
@@ -6309,7 +6103,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6317,7 +6111,7 @@ cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick)
char *hostname = NULL;
char *path = NULL;
int online = -1;
- char key[1024] = {
+ char key[64] = {
0,
};
int brick_index_max = -1;
@@ -6328,15 +6122,15 @@ cli_print_volume_status_mem(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Memory status for volume : %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -6441,14 +6235,14 @@ out:
return;
}
-void
+static void
cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
char *volname = NULL;
int client_count = 0;
int current_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -6463,12 +6257,12 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Client connections for volume %s", volname);
- ret = dict_get_int32(dict, "client-count", &client_count);
+ ret = dict_get_int32_sizen(dict, "client-count", &client_count);
if (ret)
goto out;
@@ -6482,7 +6276,7 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
if (!strncmp(name, "fuse", 4)) {
if (!is_fuse_done) {
is_fuse_done = _gf_true;
- ret = dict_get_int32(dict, "fuse-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "fuse-count", &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6492,7 +6286,7 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strncmp(name, "gfapi", 5)) {
if (!is_gfapi_done) {
is_gfapi_done = _gf_true;
- ret = dict_get_int32(dict, "gfapi-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "gfapi-count", &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6502,7 +6296,8 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "rebalance")) {
if (!is_rebalance_done) {
is_rebalance_done = _gf_true;
- ret = dict_get_int32(dict, "rebalance-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "rebalance-count",
+ &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6512,7 +6307,8 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "glustershd")) {
if (!is_glustershd_done) {
is_glustershd_done = _gf_true;
- ret = dict_get_int32(dict, "glustershd-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "glustershd-count",
+ &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6522,7 +6318,8 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "quotad")) {
if (!is_quotad_done) {
is_quotad_done = _gf_true;
- ret = dict_get_int32(dict, "quotad-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "quotad-count",
+ &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6532,7 +6329,7 @@ cli_print_volume_status_client_list(dict_t *dict, gf_boolean_t notbrick)
} else if (!strcmp(name, "snapd")) {
if (!is_snapd_done) {
is_snapd_done = _gf_true;
- ret = dict_get_int32(dict, "snapd-count", &current_count);
+ ret = dict_get_int32_sizen(dict, "snapd-count", &current_count);
if (ret)
goto out;
total = total + current_count;
@@ -6551,7 +6348,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6567,7 +6364,7 @@ cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick)
uint64_t bytesread = 0;
uint64_t byteswrite = 0;
uint32_t opversion = 0;
- char key[1024] = {
+ char key[128] = {
0,
};
int i = 0;
@@ -6575,15 +6372,15 @@ cli_print_volume_status_clients(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Client connections for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -6656,7 +6453,8 @@ out:
return;
}
-void
+#ifdef DEBUG /* this function is only used in debug */
+static void
cli_print_volume_status_inode_entry(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -6725,8 +6523,9 @@ cli_print_volume_status_inode_entry(dict_t *dict, char *prefix)
out:
return;
}
+#endif
-void
+static void
cli_print_volume_status_itables(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -6812,7 +6611,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6824,7 +6623,7 @@ cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick)
char *path = NULL;
int online = -1;
int conn_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -6832,15 +6631,15 @@ cli_print_volume_status_inode(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Inode tables for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -6897,7 +6696,7 @@ void
cli_print_volume_status_fdtable(dict_t *dict, char *prefix)
{
int ret = -1;
- char key[1024] = {
+ char key[256] = {
0,
};
int refcount = 0;
@@ -6965,7 +6764,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -6977,7 +6776,7 @@ cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick)
char *path = NULL;
int online = -1;
int conn_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -6985,15 +6784,15 @@ cli_print_volume_status_fd(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("FD tables for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -7046,7 +6845,7 @@ out:
return;
}
-void
+static void
cli_print_volume_status_call_frame(dict_t *dict, char *prefix)
{
int ret = -1;
@@ -7110,11 +6909,11 @@ cli_print_volume_status_call_frame(dict_t *dict, char *prefix)
cli_out(" Unwind To = %s", unwind_to);
}
-void
+static void
cli_print_volume_status_call_stack(dict_t *dict, char *prefix)
{
int ret = -1;
- char key[1024] = {
+ char key[256] = {
0,
};
int uid = 0;
@@ -7125,7 +6924,7 @@ cli_print_volume_status_call_stack(dict_t *dict, char *prefix)
int count = 0;
int i = 0;
- if (!dict || !prefix)
+ if (!prefix)
return;
snprintf(key, sizeof(key), "%s.uid", prefix);
@@ -7177,7 +6976,7 @@ cli_print_volume_status_call_stack(dict_t *dict, char *prefix)
cli_out(" ");
}
-void
+static void
cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick)
{
int ret = -1;
@@ -7189,7 +6988,7 @@ cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick)
char *path = NULL;
int online = -1;
int call_count = 0;
- char key[1024] = {
+ char key[64] = {
0,
};
int i = 0;
@@ -7197,15 +6996,15 @@ cli_print_volume_status_callpool(dict_t *dict, gf_boolean_t notbrick)
GF_ASSERT(dict);
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
cli_out("Pending calls for volume %s", volname);
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
@@ -7282,16 +7081,16 @@ cli_print_volume_status_tasks(dict_t *dict)
};
char *brick = NULL;
- ret = dict_get_str(dict, "volname", &volname);
- if (ret)
- goto out;
-
- ret = dict_get_int32(dict, "tasks", &task_count);
+ ret = dict_get_int32_sizen(dict, "tasks", &task_count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get tasks count");
return;
}
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+ if (ret)
+ goto out;
+
cli_out("Task Status of Volume %s", volname);
cli_print_line(CLI_BRICK_STATUS_LINE_LEN);
@@ -7360,7 +7159,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
int pid = -1;
uint32_t cmd = 0;
gf_boolean_t notbrick = _gf_false;
- char key[1024] = {
+ char key[64] = {
0,
};
char *hostname = NULL;
@@ -7385,7 +7184,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -7407,8 +7206,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
else
snprintf(msg, sizeof(msg),
- "Unable to obtain volume "
- "status information.");
+ "Unable to obtain volume status information.");
if (global_state->mode & GLUSTER_MODE_XML) {
if (!local->all)
@@ -7455,32 +7253,25 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
- (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) ||
- (cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB))
- notbrick = _gf_true;
-
if (global_state->mode & GLUSTER_MODE_XML) {
if (!local->all) {
ret = cli_xml_output_vol_status_begin(local, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto xml_end;
}
}
if (cmd & GF_CLI_STATUS_TASKS) {
ret = cli_xml_output_vol_status_tasks_detail(local, dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Error outputting "
- "to xml");
+ gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
goto xml_end;
}
} else {
ret = cli_xml_output_vol_status(local, dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto xml_end;
}
}
@@ -7489,18 +7280,17 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (!local->all) {
ret = cli_xml_output_vol_status_end(local);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
}
goto out;
}
- status.brick = GF_MALLOC(PATH_MAX + 256, gf_common_mt_strdup);
- if (!status.brick) {
- errno = ENOMEM;
- ret = -1;
- goto out;
- }
+ if ((cmd & GF_CLI_STATUS_NFS) || (cmd & GF_CLI_STATUS_SHD) ||
+ (cmd & GF_CLI_STATUS_QUOTAD) || (cmd & GF_CLI_STATUS_SNAPD) ||
+ (cmd & GF_CLI_STATUS_BITD) || (cmd & GF_CLI_STATUS_SCRUB))
+ notbrick = _gf_true;
+
switch (cmd & GF_CLI_STATUS_MASK) {
case GF_CLI_STATUS_MEM:
cli_print_volume_status_mem(dict, notbrick);
@@ -7534,25 +7324,25 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
break;
}
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
goto out;
- ret = dict_get_int32(dict, "brick-index-max", &brick_index_max);
+ ret = dict_get_int32_sizen(dict, "brick-index-max", &brick_index_max);
if (ret)
goto out;
- ret = dict_get_int32(dict, "other-count", &other_count);
+ ret = dict_get_int32_sizen(dict, "other-count", &other_count);
if (ret)
goto out;
index_max = brick_index_max + other_count;
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret)
goto out;
- ret = dict_get_int32(dict, "hot_brick_count", &hot_brick_count);
+ ret = dict_get_int32_sizen(dict, "hot_brick_count", &hot_brick_count);
if (ret)
goto out;
@@ -7563,6 +7353,14 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
"Gluster process", "TCP Port", "RDMA Port", "Online", "Pid");
cli_print_line(CLI_BRICK_STATUS_LINE_LEN);
}
+
+ status.brick = GF_MALLOC(PATH_MAX + 256, gf_common_mt_strdup);
+ if (!status.brick) {
+ errno = ENOMEM;
+ ret = -1;
+ goto out;
+ }
+
for (i = 0; i <= index_max; i++) {
status.rdma_port = 0;
@@ -7579,7 +7377,7 @@ gf_cli_status_cbk(struct rpc_req *req, struct iovec *iov, int count,
/* Brick/not-brick is handled separately here as all
* types of nodes are contained in the default output
*/
- memset(status.brick, 0, PATH_MAX + 255);
+ status.brick[0] = '\0';
if (!strcmp(hostname, "NFS Server") ||
!strcmp(hostname, "Self-heal Daemon") ||
!strcmp(hostname, "Quota Daemon") ||
@@ -7651,7 +7449,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_status_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -7660,21 +7458,17 @@ gf_cli_status_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = -1;
dict_t *dict = NULL;
- if (!frame || !this || !data)
- goto out;
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_status_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_STATUS_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning: %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int
+static int
gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
{
int i = 0;
@@ -7710,7 +7504,7 @@ gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
if (ret)
goto out;
- ret = dict_get_int32((dict_t *)vol_dict, "vol_count", &vol_count);
+ ret = dict_get_int32_sizen((dict_t *)vol_dict, "vol_count", &vol_count);
if (ret) {
cli_err("Failed to get names of volumes");
goto out;
@@ -7724,7 +7518,7 @@ gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
// TODO: Pass proper op_* values
ret = cli_xml_output_vol_status_begin(local, 0, 0, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto xml_end;
}
}
@@ -7740,12 +7534,12 @@ gf_cli_status_volume_all(call_frame_t *frame, xlator_t *this, void *data)
if (!dict)
goto out;
- snprintf(key, sizeof(key), "vol%d", i);
- ret = dict_get_str(vol_dict, key, &volname);
+ ret = snprintf(key, sizeof(key), "vol%d", i);
+ ret = dict_get_strn(vol_dict, key, ret, &volname);
if (ret)
goto out;
- ret = dict_set_str(dict, "volname", volname);
+ ret = dict_set_str_sizen(dict, "volname", volname);
if (ret)
goto out;
@@ -7802,7 +7596,7 @@ gf_cli_mount_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_mount_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -7826,7 +7620,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_mount(call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_mount_req req = {
@@ -7857,7 +7651,7 @@ gf_cli_mount(call_frame_t *frame, xlator_t *this, void *data)
out:
GF_FREE(req.dict.dict_val);
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
@@ -7879,7 +7673,7 @@ gf_cli_umount_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf1_cli_umount_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -7897,7 +7691,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data)
{
gf1_cli_umount_req req = {
@@ -7911,9 +7705,9 @@ gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data)
dict = data;
- ret = dict_get_str(dict, "path", &req.path);
+ ret = dict_get_str_sizen(dict, "path", &req.path);
if (ret == 0)
- ret = dict_get_int32(dict, "lazy", &req.lazy);
+ ret = dict_get_int32_sizen(dict, "lazy", &req.lazy);
if (ret) {
ret = -1;
@@ -7925,11 +7719,11 @@ gf_cli_umount(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf1_cli_umount_req);
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-void
+static void
cmd_heal_volume_statistics_out(dict_t *dict, int brick)
{
uint64_t num_entries = 0;
@@ -8010,18 +7804,18 @@ out:
return;
}
-void
+static void
cmd_heal_volume_brick_out(dict_t *dict, int brick)
{
uint64_t num_entries = 0;
int ret = 0;
- char key[256] = {0};
+ char key[64] = {0};
char *hostname = NULL;
char *path = NULL;
char *status = NULL;
uint64_t i = 0;
uint32_t time = 0;
- char timestr[32] = {0};
+ char timestr[GF_TIMESTR_SIZE] = {0};
char *shd_status = NULL;
snprintf(key, sizeof key, "%d-hostname", brick);
@@ -8036,7 +7830,7 @@ cmd_heal_volume_brick_out(dict_t *dict, int brick)
snprintf(key, sizeof key, "%d-status", brick);
ret = dict_get_str(dict, key, &status);
- if (status && strlen(status))
+ if (status && status[0] != '\0')
cli_out("Status: %s", status);
snprintf(key, sizeof key, "%d-shd-status", brick);
@@ -8072,12 +7866,12 @@ out:
return;
}
-void
+static void
cmd_heal_volume_statistics_heal_count_out(dict_t *dict, int brick)
{
uint64_t num_entries = 0;
int ret = 0;
- char key[256] = {0};
+ char key[64] = {0};
char *hostname = NULL;
char *path = NULL;
char *status = NULL;
@@ -8114,18 +7908,16 @@ out:
return;
}
-int
+static int
gf_is_cli_heal_get_command(gf_xl_afr_op_t heal_op)
{
/* If the command is get command value is 1 otherwise 0, for
invalid commands -1 */
- int get_cmds[GF_SHD_OP_HEAL_DISABLE + 1] = {
+ static int get_cmds[GF_SHD_OP_HEAL_DISABLE + 1] = {
[GF_SHD_OP_INVALID] = -1,
[GF_SHD_OP_HEAL_INDEX] = 0,
[GF_SHD_OP_HEAL_FULL] = 0,
[GF_SHD_OP_INDEX_SUMMARY] = 1,
- [GF_SHD_OP_HEALED_FILES] = 1,
- [GF_SHD_OP_HEAL_FAILED_FILES] = 1,
[GF_SHD_OP_SPLIT_BRAIN_FILES] = 1,
[GF_SHD_OP_STATISTICS] = 1,
[GF_SHD_OP_STATISTICS_HEAL_COUNT] = 1,
@@ -8154,9 +7946,9 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
int brick_count = 0;
int i = 0;
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
- char *operation = NULL;
- char *substr = NULL;
- char *heal_op_str = NULL;
+ const char *operation = NULL;
+ const char *substr = NULL;
+ const char *heal_op_str = NULL;
GF_ASSERT(myframe);
@@ -8172,25 +7964,23 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
- ret = dict_get_int32(local->dict, "heal-op", (int32_t *)&heal_op);
+ ret = dict_get_int32_sizen(local->dict, "heal-op", (int32_t *)&heal_op);
// TODO: Proper XML output
//#if (HAVE_LIB_XML)
// if (global_state->mode & GLUSTER_MODE_XML) {
// ret = cli_xml_output_dict ("volHeal", dict, rsp.op_ret,
// rsp.op_errno, rsp.op_errstr);
// if (ret)
- // gf_log ("cli", GF_LOG_ERROR,
- // "Error outputting to xml");
+ // gf_log ("cli", GF_LOG_ERROR, XML_ERROR);
// goto out;
// }
//#endif
- ret = dict_get_str(local->dict, "volname", &volname);
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "failed to get volname");
goto out;
@@ -8204,26 +7994,16 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
case GF_SHD_OP_HEAL_INDEX:
operation = "Launching heal operation ";
heal_op_str = "to perform index self heal";
- substr =
- "\nUse heal info commands to check"
- " status.";
+ substr = "\nUse heal info commands to check status.";
break;
case GF_SHD_OP_HEAL_FULL:
operation = "Launching heal operation ";
heal_op_str = "to perform full self heal";
- substr =
- "\nUse heal info commands to check"
- " status.";
+ substr = "\nUse heal info commands to check status.";
break;
case GF_SHD_OP_INDEX_SUMMARY:
heal_op_str = "list of entries to be healed";
break;
- case GF_SHD_OP_HEALED_FILES:
- heal_op_str = "list of healed entries";
- break;
- case GF_SHD_OP_HEAL_FAILED_FILES:
- heal_op_str = "list of heal failed entries";
- break;
case GF_SHD_OP_SPLIT_BRAIN_FILES:
heal_op_str = "list of split brain entries";
break;
@@ -8236,12 +8016,14 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
heal_op_str = "count of entries to be healed per replica";
break;
- /* The below 4 cases are never hit; they're coded only to make
- * compiler warnings go away.*/
case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
case GF_SHD_OP_HEAL_SUMMARY:
+ case GF_SHD_OP_HEALED_FILES:
+ case GF_SHD_OP_HEAL_FAILED_FILES:
+ /* These cases are never hit; they're coded just to silence the
+ * compiler warnings.*/
break;
case GF_SHD_OP_INVALID:
@@ -8291,11 +8073,11 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
- ret = dict_get_int32(dict, "count", &brick_count);
+ ret = dict_get_int32_sizen(dict, "count", &brick_count);
if (ret)
goto out;
@@ -8316,8 +8098,6 @@ gf_cli_heal_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
cmd_heal_volume_statistics_heal_count_out(dict, i);
break;
case GF_SHD_OP_INDEX_SUMMARY:
- case GF_SHD_OP_HEALED_FILES:
- case GF_SHD_OP_HEAL_FAILED_FILES:
case GF_SHD_OP_SPLIT_BRAIN_FILES:
for (i = 0; i < brick_count; i++)
cmd_heal_volume_brick_out(dict, i);
@@ -8336,7 +8116,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_heal_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -8345,26 +8125,20 @@ gf_cli_heal_volume(call_frame_t *frame, xlator_t *this, void *data)
int ret = 0;
dict_t *dict = NULL;
- if (!frame || !this || !data) {
- ret = -1;
- goto out;
- }
-
dict = data;
ret = cli_to_glusterd(&req, frame, gf_cli_heal_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, dict,
GLUSTER_CLI_HEAL_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -8372,9 +8146,7 @@ gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
0,
};
int ret = -1;
- char msg[1024] = {
- 0,
- };
+ char msg[1024] = "Volume statedump successful";
GF_ASSERT(myframe);
@@ -8384,20 +8156,18 @@ gf_cli_statedump_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to statedump");
if (rsp.op_ret)
snprintf(msg, sizeof(msg), "%s", rsp.op_errstr);
- else
- snprintf(msg, sizeof(msg), "Volume statedump successful");
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str("volStatedump", msg, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -8413,7 +8183,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_statedump_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -8422,23 +8192,18 @@ gf_cli_statedump_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(
&req, frame, gf_cli_statedump_volume_cbk, (xdrproc_t)xdr_gf_cli_req,
options, GLUSTER_CLI_STATEDUMP_VOLUME, this, cli_rpc_prog, NULL);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -8463,7 +8228,7 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -8475,7 +8240,7 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Unable to allocate memory");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -8483,14 +8248,14 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_list(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
if (rsp.op_ret)
cli_err("%s", rsp.op_errstr);
else {
- ret = dict_get_int32(dict, "count", &vol_count);
+ ret = dict_get_int32_sizen(dict, "count", &vol_count);
if (ret)
goto out;
@@ -8499,8 +8264,8 @@ gf_cli_list_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
for (i = 0; i < vol_count; i++) {
- snprintf(key, sizeof(key), "volume%d", i);
- ret = dict_get_str(dict, key, &volname);
+ ret = snprintf(key, sizeof(key), "volume%d", i);
+ ret = dict_get_strn(dict, key, ret, &volname);
if (ret)
goto out;
cli_out("%s", volname);
@@ -8518,7 +8283,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_list_volume(call_frame_t *frame, xlator_t *this, void *data)
{
int ret = -1;
@@ -8526,19 +8291,14 @@ gf_cli_list_volume(call_frame_t *frame, xlator_t *this, void *data)
0,
}};
- if (!frame || !this)
- goto out;
-
ret = cli_cmd_submit(NULL, &req, frame, cli_rpc_prog,
GLUSTER_CLI_LIST_VOLUME, NULL, this,
gf_cli_list_volume_cbk, (xdrproc_t)xdr_gf_cli_req);
-
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
return ret;
}
-int32_t
+static int32_t
gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -8547,7 +8307,6 @@ gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
};
int ret = -1;
char *lk_summary = NULL;
- char *volname = NULL;
dict_t *dict = NULL;
GF_ASSERT(myframe);
@@ -8558,7 +8317,7 @@ gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to clear-locks");
@@ -8584,24 +8343,14 @@ gf_cli_clearlocks_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Unable to serialize response dictionary");
- goto out;
- }
-
- ret = dict_get_str(dict, "volname", &volname);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Unable to get volname "
- "from dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
ret = dict_get_str(dict, "lk-summary", &lk_summary);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Unable to get lock "
- "summary from dictionary");
+ "Unable to get lock summary from dictionary");
goto out;
}
cli_out("Volume clear-locks successful");
@@ -8618,7 +8367,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_clearlocks_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -8627,22 +8376,18 @@ gf_cli_clearlocks_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(
&req, frame, gf_cli_clearlocks_volume_cbk, (xdrproc_t)xdr_gf_cli_req,
options, GLUSTER_CLI_CLRLOCKS_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
{
int32_t ret = -1;
@@ -8656,7 +8401,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
local = frame->local;
- ret = dict_get_int32(dict, "sub-cmd", &delete_cmd);
+ ret = dict_get_int32_sizen(dict, "sub-cmd", &delete_cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get sub-cmd");
goto end;
@@ -8668,8 +8413,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
rsp->op_errno, rsp->op_errstr);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to create "
- "xml output for delete");
+ "Failed to create xml output for delete");
goto end;
}
}
@@ -8705,8 +8449,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
ret = cli_xml_snapshot_delete(local, dict, rsp);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to create "
- "xml output for snapshot delete command");
+ "Failed to create xml output for snapshot delete command");
goto out;
}
/* Error out in case of the op already failed */
@@ -8715,7 +8458,7 @@ cli_snapshot_remove_reply(gf_cli_rsp *rsp, dict_t *dict, call_frame_t *frame)
goto out;
}
} else {
- ret = dict_get_str(dict, "snapname", &snap_name);
+ ret = dict_get_str_sizen(dict, "snapname", &snap_name);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get snapname");
goto out;
@@ -8734,7 +8477,7 @@ end:
return ret;
}
-int
+static int
cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
{
char buf[PATH_MAX] = "";
@@ -8760,26 +8503,19 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
goto out;
}
- ret = dict_get_int32(dict, "config-command", &config_command);
+ ret = dict_get_int32_sizen(dict, "config-command", &config_command);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch config type");
goto out;
}
- ret = dict_get_str(dict, "volname", &volname);
- /* Ignore the error, as volname is optional */
-
- if (!volname) {
- volname = "System";
- }
-
ret = dict_get_uint64(dict, "snap-max-hard-limit", &hard_limit);
/* Ignore the error, as the key specified is optional */
ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit);
- ret = dict_get_str(dict, "auto-delete", &auto_delete);
+ ret = dict_get_str_sizen(dict, "auto-delete", &auto_delete);
- ret = dict_get_str(dict, "snap-activate-on-create", &snap_activate);
+ ret = dict_get_str_sizen(dict, "snap-activate-on-create", &snap_activate);
if (!hard_limit && !soft_limit &&
config_command != GF_SNAP_CONFIG_DISPLAY && !auto_delete &&
@@ -8789,13 +8525,19 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
goto out;
}
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+ /* Ignore the error, as volname is optional */
+
+ if (!volname) {
+ volname = "System";
+ }
+
switch (config_command) {
case GF_SNAP_CONFIG_TYPE_SET:
if (hard_limit && soft_limit) {
cli_out(
"snapshot config: snap-max-hard-limit "
- "& snap-max-soft-limit for system set "
- "successfully");
+ "& snap-max-soft-limit for system set successfully");
} else if (hard_limit) {
cli_out(
"snapshot config: snap-max-hard-limit "
@@ -8807,13 +8549,9 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
"for %s set successfully",
volname);
} else if (auto_delete) {
- cli_out(
- "snapshot config: auto-delete "
- "successfully set");
+ cli_out("snapshot config: auto-delete successfully set");
} else if (snap_activate) {
- cli_out(
- "snapshot config: activate-on-create "
- "successfully set");
+ cli_out("snapshot config: activate-on-create successfully set");
}
break;
@@ -8822,9 +8560,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
ret = dict_get_uint64(dict, "snap-max-hard-limit", &value);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- "snap_max_hard_limit for %s",
- volname);
+ "Could not fetch snap_max_hard_limit for %s", volname);
ret = -1;
goto out;
}
@@ -8833,9 +8569,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- "snap-max-soft-limit for %s",
- volname);
+ "Could not fetch snap-max-soft-limit for %s", volname);
ret = -1;
goto out;
}
@@ -8855,13 +8589,11 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
}
for (i = 0; i < voldisplaycount; i++) {
- snprintf(buf, sizeof(buf), "volume%" PRIu64 "-volname", i);
- ret = dict_get_str(dict, buf, &volname);
+ ret = snprintf(buf, sizeof(buf), "volume%" PRIu64 "-volname",
+ i);
+ ret = dict_get_strn(dict, buf, ret, &volname);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- " %s",
- buf);
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf);
ret = -1;
goto out;
}
@@ -8871,10 +8603,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
"volume%" PRIu64 "-snap-max-hard-limit", i);
ret = dict_get_uint64(dict, buf, &value);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- " %s",
- buf);
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf);
ret = -1;
goto out;
}
@@ -8886,8 +8615,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
if (ret) {
gf_log("cli", GF_LOG_ERROR,
"Could not fetch"
- " effective snap_max_hard_limit for "
- "%s",
+ " effective snap_max_hard_limit for %s",
volname);
ret = -1;
goto out;
@@ -8898,10 +8626,7 @@ cli_snapshot_config_display(dict_t *dict, gf_cli_rsp *rsp)
"volume%" PRIu64 "-snap-max-soft-limit", i);
ret = dict_get_uint64(dict, buf, &value);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- " %s",
- buf);
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch %s", buf);
ret = -1;
goto out;
}
@@ -8926,7 +8651,7 @@ out:
* arg - 0, dict : Response Dictionary.
* arg - 1, prefix str : snaplist.snap{0..}.vol{0..}.*
*/
-int
+static int
cli_get_each_volinfo_in_snap(dict_t *dict, char *keyprefix,
gf_boolean_t snap_driven)
{
@@ -9013,7 +8738,7 @@ out:
* arg - 0, dict : Response dictionary.
* arg - 1, prefix_str : snaplist.snap{0..}.*
*/
-int
+static int
cli_get_volinfo_in_snap(dict_t *dict, char *keyprefix)
{
char key[PATH_MAX] = "";
@@ -9038,8 +8763,7 @@ cli_get_volinfo_in_snap(dict_t *dict, char *keyprefix)
ret = cli_get_each_volinfo_in_snap(dict, key, _gf_true);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not list "
- "details of volume in a snap");
+ "Could not list details of volume in a snap");
goto out;
}
cli_out(" ");
@@ -9049,7 +8773,7 @@ out:
return ret;
}
-int
+static int
cli_get_each_snap_info(dict_t *dict, char *prefix_str, gf_boolean_t snap_driven)
{
char key_buffer[PATH_MAX] = "";
@@ -9116,9 +8840,7 @@ cli_get_each_snap_info(dict_t *dict, char *prefix_str, gf_boolean_t snap_driven)
cli_out("%-12s", "Snap Volumes:\n");
ret = cli_get_volinfo_in_snap(dict, prefix_str);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Unable to list details "
- "of the snaps");
+ gf_log("cli", GF_LOG_ERROR, "Unable to list details of the snaps");
goto out;
}
}
@@ -9129,17 +8851,17 @@ out:
/* This is a generic function to print snap related information.
* arg - 0, dict : Response Dictionary
*/
-int
+static int
cli_call_snapshot_info(dict_t *dict, gf_boolean_t bool_snap_driven)
{
int snap_count = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
int ret = -1;
int i = 0;
GF_ASSERT(dict);
- ret = dict_get_int32(dict, "snapcount", &snap_count);
+ ret = dict_get_int32_sizen(dict, "snapcount", &snap_count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Unable to get snapcount");
goto out;
@@ -9164,33 +8886,33 @@ out:
return ret;
}
-int
+static int
cli_get_snaps_in_volume(dict_t *dict)
{
int ret = -1;
int i = 0;
int count = 0;
int avail = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
char *get_buffer = NULL;
GF_ASSERT(dict);
- ret = dict_get_str(dict, "origin-volname", &get_buffer);
+ ret = dict_get_str_sizen(dict, "origin-volname", &get_buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch origin-volname");
goto out;
}
cli_out(INDENT_MAIN_HEAD "%s", "Volume Name", ":", get_buffer);
- ret = dict_get_int32(dict, "snapcount", &avail);
+ ret = dict_get_int32_sizen(dict, "snapcount", &avail);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch snapcount");
goto out;
}
cli_out(INDENT_MAIN_HEAD "%d", "Snaps Taken", ":", avail);
- ret = dict_get_int32(dict, "snaps-available", &count);
+ ret = dict_get_int32_sizen(dict, "snaps-available", &count);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch snaps-available");
goto out;
@@ -9212,8 +8934,7 @@ cli_get_snaps_in_volume(dict_t *dict)
ret = cli_get_each_volinfo_in_snap(dict, key, _gf_false);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not get volume "
- "related information");
+ "Could not get volume related information");
goto out;
}
@@ -9223,18 +8944,18 @@ out:
return ret;
}
-int
+static int
cli_snapshot_list(dict_t *dict)
{
int snapcount = 0;
- char key[PATH_MAX] = "";
+ char key[32] = "";
int ret = -1;
int i = 0;
char *get_buffer = NULL;
GF_ASSERT(dict);
- ret = dict_get_int32(dict, "snapcount", &snapcount);
+ ret = dict_get_int32_sizen(dict, "snapcount", &snapcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch snap count");
goto out;
@@ -9250,7 +8971,7 @@ cli_snapshot_list(dict_t *dict)
goto out;
}
- ret = dict_get_str(dict, key, &get_buffer);
+ ret = dict_get_strn(dict, key, ret, &get_buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get %s ", key);
goto out;
@@ -9262,7 +8983,7 @@ out:
return ret;
}
-int
+static int
cli_get_snap_volume_status(dict_t *dict, char *key_prefix)
{
int ret = -1;
@@ -9363,11 +9084,11 @@ out:
return ret;
}
-int
+static int
cli_get_single_snap_status(dict_t *dict, char *keyprefix)
{
int ret = -1;
- char key[PATH_MAX] = "";
+ char key[64] = ""; /* keyprefix is ""status.snap0" */
int i = 0;
int volcount = 0;
char *get_buffer = NULL;
@@ -9426,7 +9147,7 @@ out:
return ret;
}
-int32_t
+static int32_t
cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index)
{
int32_t ret = -1;
@@ -9436,11 +9157,10 @@ cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index)
GF_ASSERT(snap_dict);
GF_ASSERT(dict);
- ret = dict_set_int32(snap_dict, "sub-cmd", GF_SNAP_DELETE_TYPE_ITER);
+ ret = dict_set_int32_sizen(snap_dict, "sub-cmd", GF_SNAP_DELETE_TYPE_ITER);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not save command "
- "type in snap dictionary");
+ "Could not save command type in snap dictionary");
goto out;
}
@@ -9461,7 +9181,7 @@ cli_populate_req_dict_for_delete(dict_t *snap_dict, dict_t *dict, size_t index)
goto out;
}
- ret = dict_set_int32(snap_dict, "type", GF_SNAP_OPTION_TYPE_DELETE);
+ ret = dict_set_int32_sizen(snap_dict, "type", GF_SNAP_OPTION_TYPE_DELETE);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to save command type");
goto out;
@@ -9476,7 +9196,7 @@ out:
return ret;
}
-int
+static int
cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
{
int ret = -1;
@@ -9488,9 +9208,7 @@ cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
ret = dict_set_uint32(snap_dict, "sub-cmd", GF_SNAP_STATUS_TYPE_ITER);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not save command "
- "type in snap dict");
+ gf_log("cli", GF_LOG_ERROR, "Could not save command type in snap dict");
goto out;
}
@@ -9499,21 +9217,19 @@ cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
goto out;
}
- ret = dict_get_str(dict, key, &buffer);
+ ret = dict_get_strn(dict, key, ret, &buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get snapname");
goto out;
}
- ret = dict_set_str(snap_dict, "snapname", buffer);
+ ret = dict_set_str_sizen(snap_dict, "snapname", buffer);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not save snapname "
- "in snap dict");
+ gf_log("cli", GF_LOG_ERROR, "Could not save snapname in snap dict");
goto out;
}
- ret = dict_set_int32(snap_dict, "type", GF_SNAP_OPTION_TYPE_STATUS);
+ ret = dict_set_int32_sizen(snap_dict, "type", GF_SNAP_OPTION_TYPE_STATUS);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not save command type");
goto out;
@@ -9525,7 +9241,7 @@ cli_populate_req_dict_for_status(dict_t *snap_dict, dict_t *dict, int index)
goto out;
}
- ret = dict_set_int32(snap_dict, "hold_vol_locks", _gf_false);
+ ret = dict_set_int32_sizen(snap_dict, "hold_vol_locks", _gf_false);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Setting volume lock flag failed");
goto out;
@@ -9535,10 +9251,9 @@ out:
return ret;
}
-int
+static int
cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
{
- char key[PATH_MAX] = "";
int ret = -1;
int status_cmd = -1;
cli_local_t *local = NULL;
@@ -9559,8 +9274,7 @@ cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
rsp->op_errstr);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to set "
- "op_errstr in local dictionary");
+ "Failed to set op_errstr in local dictionary");
goto out;
}
}
@@ -9568,7 +9282,7 @@ cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
goto out;
}
- ret = dict_get_int32(dict, "sub-cmd", &status_cmd);
+ ret = dict_get_int32_sizen(dict, "sub-cmd", &status_cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch status type");
goto out;
@@ -9580,35 +9294,24 @@ cli_snapshot_status(dict_t *dict, gf_cli_rsp *rsp, call_frame_t *frame)
goto out;
}
- ret = snprintf(key, sizeof(key), "status.snap0");
- if (ret < 0) {
- goto out;
- }
-
if (global_state->mode & GLUSTER_MODE_XML) {
- ret = cli_xml_snapshot_status_single_snap(local, dict, key);
+ ret = cli_xml_snapshot_status_single_snap(local, dict, "status.snap0");
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to create "
- "xml output for snapshot status");
- goto out;
+ "Failed to create xml output for snapshot status");
}
} else {
- ret = cli_get_single_snap_status(dict, key);
+ ret = cli_get_single_snap_status(dict, "status.snap0");
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not fetch "
- "status of snap");
- goto out;
+ gf_log("cli", GF_LOG_ERROR, "Could not fetch status of snap");
}
}
- ret = 0;
out:
return ret;
}
-int
+static int
gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
char *snap_name, char *volname, char *snap_uuid,
char *clone_name)
@@ -9658,8 +9361,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_CREATED,
- "snapshot_name=%s;"
- "volume_name=%s;snapshot_uuid=%s",
+ "snapshot_name=%s;volume_name=%s;snapshot_uuid=%s",
snap_name, volname, snap_uuid);
ret = 0;
@@ -9686,9 +9388,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_ACTIVATED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s",
- snap_name, snap_uuid);
+ "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid);
ret = 0;
break;
@@ -9714,9 +9414,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_DEACTIVATED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s",
- snap_name, snap_uuid);
+ "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid);
ret = 0;
break;
@@ -9749,15 +9447,14 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_RESTORED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s;volume_name=%s",
+ "snapshot_name=%s;snapshot_uuid=%s;volume_name=%s",
snap_name, snap_uuid, volname);
ret = 0;
break;
case GF_SNAP_OPTION_TYPE_DELETE:
- ret = dict_get_int32(dict, "sub-cmd", &delete_cmd);
+ ret = dict_get_int32_sizen(dict, "sub-cmd", &delete_cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get sub-cmd");
goto out;
@@ -9793,9 +9490,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_DELETED,
- "snapshot_name=%s;"
- "snapshot_uuid=%s",
- snap_name, snap_uuid);
+ "snapshot_name=%s;snapshot_uuid=%s", snap_name, snap_uuid);
ret = 0;
break;
@@ -9813,9 +9508,8 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
if (rsp->op_ret != 0) {
gf_event(EVENT_SNAPSHOT_CLONE_FAILED,
- "snapshot_name=%s;clone_name=%s;"
- "error=%s",
- snap_name, clone_name,
+ "snapshot_name=%s;clone_name=%s;error=%s", snap_name,
+ clone_name,
rsp->op_errstr ? rsp->op_errstr
: "Please check log file for details");
ret = 0;
@@ -9829,9 +9523,8 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
}
gf_event(EVENT_SNAPSHOT_CLONED,
- "snapshot_name=%s;"
- "clone_name=%s;clone_uuid=%s",
- snap_name, clone_name, snap_uuid);
+ "snapshot_name=%s;clone_name=%s;clone_uuid=%s", snap_name,
+ clone_name, snap_uuid);
ret = 0;
break;
@@ -9845,7 +9538,7 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
break;
}
- ret = dict_get_int32(dict, "config-command", &config_command);
+ ret = dict_get_int32_sizen(dict, "config-command", &config_command);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not fetch config type");
goto out;
@@ -9859,8 +9552,9 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
/* These are optional parameters therefore ignore the error */
ret = dict_get_uint64(dict, "snap-max-hard-limit", &hard_limit);
ret = dict_get_uint64(dict, "snap-max-soft-limit", &soft_limit);
- ret = dict_get_str(dict, "auto-delete", &auto_delete);
- ret = dict_get_str(dict, "snap-activate-on-create", &snap_activate);
+ ret = dict_get_str_sizen(dict, "auto-delete", &auto_delete);
+ ret = dict_get_str_sizen(dict, "snap-activate-on-create",
+ &snap_activate);
if (!hard_limit && !soft_limit && !auto_delete && !snap_activate) {
ret = -1;
@@ -9872,9 +9566,6 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
goto out;
}
- volname = NULL;
- ret = dict_get_str(dict, "volname", &volname);
-
if (hard_limit || soft_limit) {
snprintf(option, sizeof(option), "%s=%" PRIu64,
hard_limit ? "hard_limit" : "soft_limit",
@@ -9885,6 +9576,9 @@ gf_cli_generate_snapshot_event(gf_cli_rsp *rsp, dict_t *dict, int32_t type,
auto_delete ? auto_delete : snap_activate);
}
+ volname = NULL;
+ ret = dict_get_str_sizen(dict, "volname", &volname);
+
snprintf(msg, sizeof(msg), "config_type=%s;%s",
volname ? "volume_config" : "system_config", option);
@@ -9908,7 +9602,7 @@ out:
* Fetch necessary data from dict at one place instead of *
* repeating the same code again and again. *
*/
-int
+static int
gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name,
char **volname, char **snap_uuid,
int8_t *soft_limit_flag, char **clone_name)
@@ -9918,21 +9612,21 @@ gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name,
GF_VALIDATE_OR_GOTO("cli", dict, out);
if (snap_name) {
- ret = dict_get_str(dict, "snapname", snap_name);
+ ret = dict_get_str_sizen(dict, "snapname", snap_name);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get snapname from dict");
}
}
if (volname) {
- ret = dict_get_str(dict, "volname1", volname);
+ ret = dict_get_str_sizen(dict, "volname1", volname);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get volname1 from dict");
}
}
if (snap_uuid) {
- ret = dict_get_str(dict, "snapuuid", snap_uuid);
+ ret = dict_get_str_sizen(dict, "snapuuid", snap_uuid);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get snapuuid from dict");
}
@@ -9947,7 +9641,7 @@ gf_cli_snapshot_get_data_from_dict(dict_t *dict, char **snap_name,
}
if (clone_name) {
- ret = dict_get_str(dict, "clonename", clone_name);
+ ret = dict_get_str_sizen(dict, "clonename", clone_name);
if (ret) {
gf_log("cli", GF_LOG_DEBUG, "failed to get clonename from dict");
}
@@ -9958,7 +9652,7 @@ out:
return ret;
}
-int
+static int
gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -9986,8 +9680,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
- gf_log(frame->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ gf_log(frame->this->name, GF_LOG_ERROR, XDR_DECODE_FAIL);
goto out;
}
@@ -10003,7 +9696,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
if (ret)
goto out;
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log(frame->this->name, GF_LOG_ERROR, "failed to get type");
goto out;
@@ -10032,7 +9725,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_snapshot(type, dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
goto out;
@@ -10060,10 +9753,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "snapshot create: success: Snap %s created "
- "successfully",
- snap_name);
+ cli_out("snapshot create: success: Snap %s created successfully",
+ snap_name);
if (soft_limit_flag == 1) {
cli_out(
@@ -10094,10 +9785,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "snapshot clone: success: Clone %s created "
- "successfully",
- clone_name);
+ cli_out("snapshot clone: success: Clone %s created successfully",
+ clone_name);
ret = 0;
break;
@@ -10116,10 +9805,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "Snapshot restore: %s: Snap restored "
- "successfully",
- snap_name);
+ cli_out("Snapshot restore: %s: Snap restored successfully",
+ snap_name);
ret = 0;
break;
@@ -10137,10 +9824,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "Snapshot activate: %s: Snap activated "
- "successfully",
- snap_name);
+ cli_out("Snapshot activate: %s: Snap activated successfully",
+ snap_name);
ret = 0;
break;
@@ -10159,10 +9844,8 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
goto out;
}
- cli_out(
- "Snapshot deactivate: %s: Snap deactivated "
- "successfully",
- snap_name);
+ cli_out("Snapshot deactivate: %s: Snap deactivated successfully",
+ snap_name);
ret = 0;
break;
@@ -10195,8 +9878,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_snapshot_config_display(dict, &rsp);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to display "
- "snapshot config output.");
+ "Failed to display snapshot config output.");
goto out;
}
break;
@@ -10212,9 +9894,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_snapshot_list(dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to display "
- "snapshot list");
+ gf_log("cli", GF_LOG_ERROR, "Failed to display snapshot list");
goto out;
}
break;
@@ -10231,8 +9911,7 @@ gf_cli_snapshot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_snapshot_status(dict, &rsp, frame);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to display "
- "snapshot status output.");
+ "Failed to display snapshot status output.");
goto out;
}
break;
@@ -10276,11 +9955,9 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
local = frame->local;
- ret = dict_get_int32(local->dict, "sub-cmd", &cmd);
+ ret = dict_get_int32_sizen(local->dict, "sub-cmd", &cmd);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to get "
- "sub-cmd");
+ gf_log("cli", GF_LOG_ERROR, "Failed to get sub-cmd");
goto out;
}
@@ -10290,11 +9967,9 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = dict_get_int32(local->dict, "snapcount", &snapcount);
+ ret = dict_get_int32_sizen(local->dict, "snapcount", &snapcount);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Could not get "
- "snapcount");
+ gf_log("cli", GF_LOG_ERROR, "Could not get snapcount");
goto out;
}
@@ -10304,8 +9979,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
local->writer, (xmlChar *)"snapCount", "%d", snapcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to write "
- "xml element \"snapCount\"");
+ "Failed to write xml element \"snapCount\"");
goto out;
}
#endif /* HAVE_LIB_XML */
@@ -10316,22 +9990,19 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
if (cmd == GF_SNAP_DELETE_TYPE_ALL) {
snprintf(question, sizeof(question),
- "System contains %d "
- "snapshot(s).\nDo you still "
+ "System contains %d snapshot(s).\nDo you still "
"want to continue and delete them? ",
snapcount);
} else {
- ret = dict_get_str(local->dict, "volname", &volname);
+ ret = dict_get_str_sizen(local->dict, "volname", &volname);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to fetch "
- "volname from local dictionary");
+ "Failed to fetch volname from local dictionary");
goto out;
}
snprintf(question, sizeof(question),
- "Volume (%s) contains "
- "%d snapshot(s).\nDo you still want to "
+ "Volume (%s) contains %d snapshot(s).\nDo you still want to "
"continue and delete them? ",
volname, snapcount);
}
@@ -10340,8 +10011,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
if (GF_ANSWER_NO == answer) {
ret = 0;
gf_log("cli", GF_LOG_DEBUG,
- "User cancelled "
- "snapshot delete operation for snap delete");
+ "User cancelled snapshot delete operation for snap delete");
goto out;
}
@@ -10355,8 +10025,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
ret = cli_populate_req_dict_for_delete(snap_dict, local->dict, i);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not "
- "populate snap request dictionary");
+ "Could not populate snap request dictionary");
goto out;
}
@@ -10368,8 +10037,7 @@ gf_cli_snapshot_for_delete(call_frame_t *frame, xlator_t *this, void *data)
* snapshots is failed
*/
gf_log("cli", GF_LOG_ERROR,
- "cli_to_glusterd "
- "for snapshot delete failed");
+ "cli_to_glusterd for snapshot delete failed");
goto out;
}
dict_unref(snap_dict);
@@ -10384,7 +10052,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10404,7 +10072,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
local = frame->local;
- ret = dict_get_int32(local->dict, "sub-cmd", &cmd);
+ ret = dict_get_int32_sizen(local->dict, "sub-cmd", &cmd);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get sub-cmd");
goto out;
@@ -10420,7 +10088,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
goto out;
}
- ret = dict_get_int32(local->dict, "status.snapcount", &snapcount);
+ ret = dict_get_int32_sizen(local->dict, "status.snapcount", &snapcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Could not get snapcount");
goto out;
@@ -10440,8 +10108,7 @@ gf_cli_snapshot_for_status(call_frame_t *frame, xlator_t *this, void *data)
ret = cli_populate_req_dict_for_status(snap_dict, local->dict, i);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Could not "
- "populate snap request dictionary");
+ "Could not populate snap request dictionary");
goto out;
}
@@ -10478,7 +10145,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10491,24 +10158,18 @@ gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
char *err_str = NULL;
int type = -1;
- if (!frame || !this || !data)
- goto out;
-
- if (!frame->local)
+ if (!frame || !frame->local || !this || !data)
goto out;
local = frame->local;
options = data;
- ret = dict_get_int32(local->dict, "type", &type);
-
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_snapshot_begin_composite_op(local);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to begin "
- "snapshot xml composite op");
+ "Failed to begin snapshot xml composite op");
goto out;
}
}
@@ -10517,18 +10178,17 @@ gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
(xdrproc_t)xdr_gf_cli_req, options, GLUSTER_CLI_SNAP,
this, cli_rpc_prog, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "cli_to_glusterd for "
- "snapshot failed");
+ gf_log("cli", GF_LOG_ERROR, "cli_to_glusterd for snapshot failed");
goto xmlend;
}
+ ret = dict_get_int32_sizen(local->dict, "type", &type);
+
if (GF_SNAP_OPTION_TYPE_STATUS == type) {
ret = gf_cli_snapshot_for_status(frame, this, data);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "cli to glusterd "
- "for snapshot status command failed");
+ "cli to glusterd for snapshot status command failed");
}
goto xmlend;
@@ -10538,8 +10198,7 @@ gf_cli_snapshot(call_frame_t *frame, xlator_t *this, void *data)
ret = gf_cli_snapshot_for_delete(frame, this, data);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "cli to glusterd "
- "for snapshot delete command failed");
+ "cli to glusterd for snapshot delete command failed");
}
goto xmlend;
@@ -10552,25 +10211,23 @@ xmlend:
ret = cli_xml_snapshot_end_composite_op(local);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to end "
- "snapshot xml composite op");
+ "Failed to end snapshot xml composite op");
goto out;
}
}
out:
if (ret && local && GF_SNAP_OPTION_TYPE_STATUS == type) {
- tmp_ret = dict_get_str(local->dict, "op_err_str", &err_str);
+ tmp_ret = dict_get_str_sizen(local->dict, "op_err_str", &err_str);
if (tmp_ret || !err_str) {
cli_err("Snapshot Status : failed: %s",
- "Please "
- "check log file for details");
+ "Please check log file for details");
} else {
cli_err("Snapshot Status : failed: %s", err_str);
- dict_del(local->dict, "op_err_str");
+ dict_del_sizen(local->dict, "op_err_str");
}
}
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
@@ -10581,7 +10238,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_barrier_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -10598,7 +10255,7 @@ gf_cli_barrier_volume_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to barrier");
@@ -10620,7 +10277,7 @@ out:
return ret;
}
-int
+static int
gf_cli_barrier_volume(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10629,22 +10286,18 @@ gf_cli_barrier_volume(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(&req, frame, gf_cli_barrier_volume_cbk,
(xdrproc_t)xdr_gf_cli_req, options,
GLUSTER_CLI_BARRIER_VOLUME, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int32_t
+static int32_t
gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -10671,27 +10324,23 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
gf_log("cli", GF_LOG_DEBUG, "Received response to get volume option");
if (rsp.op_ret) {
if (strcmp(rsp.op_errstr, ""))
- snprintf(msg, sizeof(msg),
- "volume get option: "
- "failed: %s",
+ snprintf(msg, sizeof(msg), "volume get option: failed: %s",
rsp.op_errstr);
else
- snprintf(msg, sizeof(msg),
- "volume get option: "
- "failed");
+ snprintf(msg, sizeof(msg), "volume get option: failed");
if (global_state->mode & GLUSTER_MODE_XML) {
ret = cli_xml_output_str("volGetopts", msg, rsp.op_ret,
rsp.op_errno, rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
}
} else {
cli_err("%s", msg);
@@ -10708,7 +10357,7 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR, "Failed rsp_dict unserialization");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
@@ -10716,32 +10365,26 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = cli_xml_output_vol_getopts(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "xml output generation "
- "failed");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
ret = 0;
}
goto out;
}
- ret = dict_get_str(dict, "warning", &value);
+ ret = dict_get_str_sizen(dict, "warning", &value);
if (!ret) {
cli_out("%s", value);
}
- ret = dict_get_int32(dict, "count", &count);
+ ret = dict_get_int32_sizen(dict, "count", &count);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to retrieve count "
- "from the dictionary");
+ "Failed to retrieve count from the dictionary");
goto out;
}
if (count <= 0) {
- gf_log("cli", GF_LOG_ERROR,
- "Value of count :%d is "
- "invalid",
- count);
+ gf_log("cli", GF_LOG_ERROR, "Value of count :%d is invalid", count);
ret = -1;
goto out;
}
@@ -10749,23 +10392,18 @@ gf_cli_get_vol_opt_cbk(struct rpc_req *req, struct iovec *iov, int count,
cli_out("%-40s%-40s", "Option", "Value");
cli_out("%-40s%-40s", "------", "-----");
for (i = 1; i <= count; i++) {
- snprintf(dict_key, sizeof dict_key, "key%d", i);
- ret = dict_get_str(dict, dict_key, &key);
+ ret = snprintf(dict_key, sizeof dict_key, "key%d", i);
+ ret = dict_get_strn(dict, dict_key, ret, &key);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to"
- " retrieve %s from the "
- "dictionary",
- dict_key);
+ "Failed to retrieve %s from the dictionary", dict_key);
goto out;
}
- snprintf(dict_key, sizeof dict_key, "value%d", i);
- ret = dict_get_str(dict, dict_key, &value);
+ ret = snprintf(dict_key, sizeof dict_key, "value%d", i);
+ ret = dict_get_strn(dict, dict_key, ret, &value);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
- "Failed to "
- "retrieve key value for %s from"
- "the dictionary",
+ "Failed to retrieve key value for %s from the dictionary",
dict_key);
goto out;
}
@@ -10787,7 +10425,7 @@ out_nolog:
return ret;
}
-int
+static int
gf_cli_get_vol_opt(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -10796,22 +10434,18 @@ gf_cli_get_vol_opt(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(&req, frame, gf_cli_get_vol_opt_cbk,
(xdrproc_t)xdr_gf_cli_req, options,
GLUSTER_CLI_GET_VOL_OPT, this, cli_rpc_prog, NULL);
-out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-int
+static int
add_cli_cmd_timeout_to_dict(dict_t *dict)
{
int ret = 0;
@@ -10819,15 +10453,13 @@ add_cli_cmd_timeout_to_dict(dict_t *dict)
if (cli_default_conn_timeout > 120) {
ret = dict_set_uint32(dict, "timeout", cli_default_conn_timeout);
if (ret) {
- gf_log("cli", GF_LOG_INFO,
- "Failed to save"
- "timeout to dict");
+ gf_log("cli", GF_LOG_INFO, "Failed to save timeout to dict");
}
}
return ret;
}
-int
+static int
cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
xdrproc_t xdrproc, dict_t *dict, int procnum, xlator_t *this,
rpc_clnt_prog_t *prog, struct iobref *iobref)
@@ -10839,12 +10471,7 @@ cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
const char **words = NULL;
cli_local_t *local = NULL;
- if (!this || !frame || !dict) {
- ret = -1;
- goto out;
- }
-
- if (!frame->local) {
+ if (!this || !frame || !frame->local || !dict) {
ret = -1;
goto out;
}
@@ -10861,12 +10488,12 @@ cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
while (words[i])
len += strlen(words[i++]) + 1;
- cmd = GF_CALLOC(1, len, gf_common_mt_char);
-
+ cmd = GF_MALLOC(len + 1, gf_common_mt_char);
if (!cmd) {
ret = -1;
goto out;
}
+ cmd[0] = '\0';
for (i = 0; words[i]; i++) {
strncat(cmd, words[i], len - 1);
@@ -10874,9 +10501,7 @@ cli_to_glusterd(gf_cli_req *req, call_frame_t *frame, fop_cbk_fn_t cbkfn,
strncat(cmd, " ", len - 1);
}
- cmd[len - 1] = '\0';
-
- ret = dict_set_dynstr(dict, "cmd-str", cmd);
+ ret = dict_set_dynstr_sizen(dict, "cmd-str", cmd);
if (ret)
goto out;
@@ -10897,14 +10522,14 @@ out:
return ret;
}
-int
+static int
gf_cli_print_bitrot_scrub_status(dict_t *dict)
{
int i = 1;
int j = 0;
int ret = -1;
int count = 0;
- char key[256] = {
+ char key[64] = {
0,
};
char *volname = NULL;
@@ -10927,51 +10552,42 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
int8_t scrub_running = 0;
char *scrub_state_op = NULL;
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_int32_sizen(dict, "count", &count);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR,
+ "failed to get count value from dictionary");
+ goto out;
+ }
+
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get volume name");
- ret = dict_get_str(dict, "features.scrub", &state_scrub);
+ ret = dict_get_str_sizen(dict, "features.scrub", &state_scrub);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get scrub state value");
- ret = dict_get_str(dict, "features.scrub-throttle", &scrub_impact);
+ ret = dict_get_str_sizen(dict, "features.scrub-throttle", &scrub_impact);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrub impact "
- "value");
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrub impact value");
- ret = dict_get_str(dict, "features.scrub-freq", &scrub_freq);
+ ret = dict_get_str_sizen(dict, "features.scrub-freq", &scrub_freq);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get scrub -freq value");
- ret = dict_get_str(dict, "bitrot_log_file", &bitrot_log_file);
+ ret = dict_get_str_sizen(dict, "bitrot_log_file", &bitrot_log_file);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get bitrot log file "
- "location");
+ gf_log("cli", GF_LOG_TRACE, "failed to get bitrot log file location");
- ret = dict_get_str(dict, "scrub_log_file", &scrub_log_file);
+ ret = dict_get_str_sizen(dict, "scrub_log_file", &scrub_log_file);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrubber log file "
- "location");
-
- ret = dict_get_int32(dict, "count", &count);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "count not get count value from"
- " dictionary");
- goto out;
- }
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrubber log file location");
for (i = 1; i <= count; i++) {
- snprintf(key, 256, "scrub-running-%d", i);
+ snprintf(key, sizeof(key), "scrub-running-%d", i);
ret = dict_get_int8(dict, key, &scrub_running);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrubbed "
- "files");
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrubbed files");
if (scrub_running)
break;
}
@@ -10998,56 +10614,41 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
node_name = NULL;
last_scrub = NULL;
scrub_time = 0;
- days = 0;
- hours = 0;
- minutes = 0;
- seconds = 0;
error_count = 0;
scrub_files = 0;
unsigned_files = 0;
- snprintf(key, 256, "node-name-%d", i);
+ snprintf(key, sizeof(key), "node-name-%d", i);
ret = dict_get_str(dict, key, &node_name);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get node-name");
- snprintf(key, 256, "scrubbed-files-%d", i);
+ snprintf(key, sizeof(key), "scrubbed-files-%d", i);
ret = dict_get_uint64(dict, key, &scrub_files);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get scrubbed "
- "files");
+ gf_log("cli", GF_LOG_TRACE, "failed to get scrubbed files");
- snprintf(key, 256, "unsigned-files-%d", i);
+ snprintf(key, sizeof(key), "unsigned-files-%d", i);
ret = dict_get_uint64(dict, key, &unsigned_files);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get unsigned "
- "files");
+ gf_log("cli", GF_LOG_TRACE, "failed to get unsigned files");
- snprintf(key, 256, "scrub-duration-%d", i);
+ snprintf(key, sizeof(key), "scrub-duration-%d", i);
ret = dict_get_uint64(dict, key, &scrub_time);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get last scrub "
- "duration");
+ gf_log("cli", GF_LOG_TRACE, "failed to get last scrub duration");
- snprintf(key, 256, "last-scrub-time-%d", i);
+ snprintf(key, sizeof(key), "last-scrub-time-%d", i);
ret = dict_get_str(dict, key, &last_scrub);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get last scrub"
- " time");
- snprintf(key, 256, "error-count-%d", i);
+ gf_log("cli", GF_LOG_TRACE, "failed to get last scrub time");
+ snprintf(key, sizeof(key), "error-count-%d", i);
ret = dict_get_uint64(dict, key, &error_count);
if (ret)
- gf_log("cli", GF_LOG_TRACE,
- "failed to get error "
- "count");
+ gf_log("cli", GF_LOG_TRACE, "failed to get error count");
cli_out("\n%s\n",
- "=========================================="
- "===============");
+ "=========================================================");
cli_out("%s: %s\n", "Node", node_name);
@@ -11076,7 +10677,7 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
cli_out("%s:\n", "Corrupted object's [GFID]");
/* Printing list of bad file's (Corrupted object's)*/
for (j = 0; j < error_count; j++) {
- snprintf(key, 256, "quarantine-%d-%d", j, i);
+ snprintf(key, sizeof(key), "quarantine-%d-%d", j, i);
ret = dict_get_str(dict, key, &bad_file_str);
if (!ret) {
cli_out("%s\n", bad_file_str);
@@ -11085,15 +10686,14 @@ gf_cli_print_bitrot_scrub_status(dict_t *dict)
}
}
cli_out("%s\n",
- "=========================================="
- "===============");
+ "=========================================================");
out:
GF_FREE(scrub_state_op);
return 0;
}
-int
+static int
gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count,
void *myframe)
{
@@ -11117,7 +10717,7 @@ gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_cli_rsp);
if (ret < 0) {
gf_log(((call_frame_t *)myframe)->this->name, GF_LOG_ERROR,
- "Failed to decode xdr response");
+ XDR_DECODE_FAIL);
goto out;
}
@@ -11146,81 +10746,67 @@ gf_cli_bitrot_cbk(struct rpc_req *req, struct iovec *iov, int count,
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "failed to unserialize "
- "req-buffer to dictionary");
+ gf_log("cli", GF_LOG_ERROR, DICT_UNSERIALIZE_FAIL);
goto out;
}
}
gf_log("cli", GF_LOG_DEBUG, "Received resp to bit rot command");
- ret = dict_get_int32(dict, "type", &type);
+ ret = dict_get_int32_sizen(dict, "type", &type);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Failed to get command type");
goto out;
}
+ if ((type == GF_BITROT_CMD_SCRUB_STATUS) &&
+ !(global_state->mode & GLUSTER_MODE_XML)) {
+ ret = gf_cli_print_bitrot_scrub_status(dict);
+ if (ret) {
+ gf_log("cli", GF_LOG_ERROR, "Failed to print bitrot scrub status");
+ }
+ goto out;
+ }
+
/* Ignoring the error, as using dict val for cli output only */
- ret = dict_get_str(dict, "scrub-value", &scrub_cmd);
+ ret = dict_get_str_sizen(dict, "volname", &volname);
if (ret)
- gf_log("cli", GF_LOG_TRACE, "Failed to get scrub command");
+ gf_log("cli", GF_LOG_TRACE, "failed to get volume name");
- ret = dict_get_str(dict, "volname", &volname);
+ ret = dict_get_str_sizen(dict, "scrub-value", &scrub_cmd);
if (ret)
- gf_log("cli", GF_LOG_TRACE, "failed to get volume name");
+ gf_log("cli", GF_LOG_TRACE, "Failed to get scrub command");
- ret = dict_get_str(dict, "cmd-str", &cmd_str);
+ ret = dict_get_str_sizen(dict, "cmd-str", &cmd_str);
if (ret)
gf_log("cli", GF_LOG_TRACE, "failed to get command string");
if (cmd_str)
cmd_op = strrchr(cmd_str, ' ') + 1;
- if ((type == GF_BITROT_CMD_SCRUB_STATUS) &&
- !(global_state->mode & GLUSTER_MODE_XML)) {
- ret = gf_cli_print_bitrot_scrub_status(dict);
- if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "Failed to print bitrot "
- "scrub status");
- }
- goto out;
- }
-
switch (type) {
case GF_BITROT_OPTION_TYPE_ENABLE:
- cli_out(
- "volume bitrot: success bitrot enabled "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: success bitrot enabled for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_OPTION_TYPE_DISABLE:
- cli_out(
- "volume bitrot: success bitrot disabled "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: success bitrot disabled for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_CMD_SCRUB_ONDEMAND:
- cli_out(
- "volume bitrot: scrubber started ondemand "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: scrubber started ondemand for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_OPTION_TYPE_SCRUB:
if (!strncmp("pause", scrub_cmd, sizeof("pause")))
- cli_out(
- "volume bitrot: scrubber paused "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: scrubber paused for volume %s",
+ volname);
if (!strncmp("resume", scrub_cmd, sizeof("resume")))
- cli_out(
- "volume bitrot: scrubber resumed "
- "for volume %s",
- volname);
+ cli_out("volume bitrot: scrubber resumed for volume %s",
+ volname);
ret = 0;
goto out;
case GF_BITROT_OPTION_TYPE_SCRUB_FREQ:
@@ -11244,7 +10830,7 @@ xml_output:
ret = cli_xml_output_vol_profile(dict, rsp.op_ret, rsp.op_errno,
rsp.op_errstr);
if (ret)
- gf_log("cli", GF_LOG_ERROR, "Error outputting to xml");
+ gf_log("cli", GF_LOG_ERROR, XML_ERROR);
goto out;
}
@@ -11262,7 +10848,7 @@ out:
return ret;
}
-int32_t
+static int32_t
gf_cli_bitrot(call_frame_t *frame, xlator_t *this, void *data)
{
gf_cli_req req = {{
@@ -11271,30 +10857,25 @@ gf_cli_bitrot(call_frame_t *frame, xlator_t *this, void *data)
dict_t *options = NULL;
int ret = -1;
- if (!frame || !this || !data)
- goto out;
-
options = data;
ret = cli_to_glusterd(&req, frame, gf_cli_bitrot_cbk,
(xdrproc_t)xdr_gf_cli_req, options,
GLUSTER_CLI_BITROT, this, cli_rpc_prog, NULL);
if (ret) {
- gf_log("cli", GF_LOG_ERROR,
- "cli_to_glusterd for "
- "bitrot failed");
+ gf_log("cli", GF_LOG_ERROR, "cli_to_glusterd for bitrot failed");
goto out;
}
out:
- gf_log("cli", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log("cli", GF_LOG_DEBUG, RETURNING, ret);
GF_FREE(req.dict.dict_val);
return ret;
}
-struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
+static struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_NULL] = {"NULL", NULL},
[GLUSTER_CLI_PROBE] = {"PROBE_QUERY", gf_cli_probe},
[GLUSTER_CLI_DEPROBE] = {"DEPROBE_QUERY", gf_cli_deprobe},
@@ -11342,6 +10923,7 @@ struct rpc_clnt_procedure gluster_cli_actors[GLUSTER_CLI_MAXVALUE] = {
[GLUSTER_CLI_BITROT] = {"BITROT", gf_cli_bitrot},
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", gf_cli_get_state},
[GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", gf_cli_reset_brick},
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", gf_cli_ganesha},
};
struct rpc_clnt_program cli_prog = {
@@ -11352,7 +10934,7 @@ struct rpc_clnt_program cli_prog = {
.proctable = gluster_cli_actors,
};
-struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = {
+static struct rpc_clnt_procedure cli_quotad_procs[GF_AGGREGATOR_MAXVALUE] = {
[GF_AGGREGATOR_NULL] = {"NULL", NULL},
[GF_AGGREGATOR_LOOKUP] = {"LOOKUP", NULL},
[GF_AGGREGATOR_GETLIMIT] = {"GETLIMIT", cli_quotad_getlimit},
diff --git a/cli/src/cli-xml-output.c b/cli/src/cli-xml-output.c
index 3accd9ce4bf..069de75801c 100644
--- a/cli/src/cli-xml-output.c
+++ b/cli/src/cli-xml-output.c
@@ -1661,15 +1661,15 @@ cli_xml_output_vol_top_rw_perf(xmlTextWriterPtr writer, dict_t *dict,
int ret = -1;
char *filename = NULL;
uint64_t throughput = 0;
- long int time_sec = 0;
- long int time_usec = 0;
- char timestr[256] = {
+ struct timeval tv = {
+ 0,
+ };
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char key[1024] = {
0,
};
- int len;
/* <file> */
ret = xmlTextWriterStartElement(writer, (xmlChar *)"file");
@@ -1692,19 +1692,16 @@ cli_xml_output_vol_top_rw_perf(xmlTextWriterPtr writer, dict_t *dict,
XML_RET_CHECK_AND_GOTO(ret, out);
snprintf(key, sizeof(key), "%d-time-sec-%d", brick_index, member_index);
- ret = dict_get_int32(dict, key, (int32_t *)&time_sec);
+ ret = dict_get_int32(dict, key, (int32_t *)&tv.tv_sec);
if (ret)
goto out;
snprintf(key, sizeof(key), "%d-time-usec-%d", brick_index, member_index);
- ret = dict_get_int32(dict, key, (int32_t *)&time_usec);
+ ret = dict_get_int32(dict, key, (int32_t *)&tv.tv_usec);
if (ret)
goto out;
- gf_time_fmt(timestr, sizeof timestr, time_sec, gf_timefmt_FT);
- len = strlen(timestr);
- snprintf(timestr + len, sizeof(timestr) - len, ".%" GF_PRI_SUSECONDS,
- time_usec);
+ gf_time_fmt_tv(timestr, sizeof timestr, &tv, gf_timefmt_FT);
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar *)"time", "%s",
timestr);
XML_RET_CHECK_AND_GOTO(ret, out);
@@ -1953,11 +1950,11 @@ cli_xml_output_vol_profile_stats(xmlTextWriterPtr writer, dict_t *dict,
XML_RET_CHECK_AND_GOTO(ret, out);
ret = xmlTextWriterWriteFormatElement(writer, (xmlChar *)"size",
- "%" PRIu32, (1 << i));
+ "%" PRIu32, (1U << i));
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%d-%d-read-%d", brick_index, interval,
- (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-read-%" PRIu32, brick_index, interval,
+ (1U << i));
ret = dict_get_uint64(dict, key, &read_count);
if (ret)
read_count = 0;
@@ -1965,8 +1962,8 @@ cli_xml_output_vol_profile_stats(xmlTextWriterPtr writer, dict_t *dict,
"%" PRIu64, read_count);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%d-%d-write-%d", brick_index, interval,
- (1 << i));
+ snprintf(key, sizeof(key), "%d-%d-write-%" PRIu32, brick_index,
+ interval, (1U << i));
ret = dict_get_uint64(dict, key, &write_count);
if (ret)
write_count = 0;
@@ -2507,8 +2504,9 @@ cli_xml_output_vol_info(cli_local_t *local, dict_t *dict)
ret = dict_get_int32(dict, key, &dist_count);
if (ret)
goto out;
- ret = xmlTextWriterWriteFormatElement(
- local->writer, (xmlChar *)"distCount", "%d", dist_count);
+ ret = xmlTextWriterWriteFormatElement(local->writer,
+ (xmlChar *)"distCount", "%d",
+ (brick_count / dist_count));
XML_RET_CHECK_AND_GOTO(ret, out);
snprintf(key, sizeof(key), "volume%d.stripe_count", i);
@@ -2779,7 +2777,9 @@ cli_xml_output_peer_hostnames(xmlTextWriterPtr writer, dict_t *dict,
XML_RET_CHECK_AND_GOTO(ret, out);
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
+ ret = snprintf(key, sizeof(key), "%s.hostname%d", prefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &hostname);
if (ret)
goto out;
@@ -3577,20 +3577,21 @@ cli_xml_output_vol_gsync_status(dict_t *dict, xmlTextWriterPtr writer)
char *volume_next = NULL;
char *slave = NULL;
char *slave_next = NULL;
- char *title_values[] = {"master_node", "", "master_brick", "slave_user",
- "slave", "slave_node", "status", "crawl_status",
- /* last_synced */
- "", "entry", "data", "meta", "failures",
- /* checkpoint_time */
- "", "checkpoint_completed",
- /* checkpoint_completion_time */
- "", "master_node_uuid",
- /* last_synced_utc */
- "last_synced",
- /* checkpoint_time_utc */
- "checkpoint_time",
- /* checkpoint_completion_time_utc */
- "checkpoint_completion_time"};
+ static const char *title_values[] = {
+ "master_node", "", "master_brick", "slave_user", "slave", "slave_node",
+ "status", "crawl_status",
+ /* last_synced */
+ "", "entry", "data", "meta", "failures",
+ /* checkpoint_time */
+ "", "checkpoint_completed",
+ /* checkpoint_completion_time */
+ "", "master_node_uuid",
+ /* last_synced_utc */
+ "last_synced",
+ /* checkpoint_time_utc */
+ "checkpoint_time",
+ /* checkpoint_completion_time_utc */
+ "checkpoint_completion_time"};
GF_ASSERT(dict);
@@ -4201,7 +4202,9 @@ cli_xml_snapshot_info_snap_vol(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = xmlTextWriterStartElement(writer, (xmlChar *)"snapVolume");
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.volname", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.volname", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4213,7 +4216,9 @@ cli_xml_snapshot_info_snap_vol(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.vol-status", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.vol-status", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4228,7 +4233,10 @@ cli_xml_snapshot_info_snap_vol(xmlTextWriterPtr writer, xmlDocPtr doc,
/* If the command is snap_driven then we need to show origin volume
* info. Else this is shown in the start of info display.*/
if (snap_driven) {
- snprintf(key, sizeof(key), "%s.", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.", keyprefix);
+ if (ret < 0)
+ goto out;
+
ret = cli_xml_snapshot_info_orig_vol(writer, doc, dict, key);
if (ret) {
gf_log("cli", GF_LOG_ERROR,
@@ -4279,7 +4287,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = xmlTextWriterStartElement(writer, (xmlChar *)"snapshot");
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snapname", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snapname", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (ret) {
@@ -4291,7 +4301,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snap-id", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-id", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (ret) {
@@ -4303,7 +4315,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snap-desc", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-desc", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (!ret) {
@@ -4315,7 +4329,9 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
}
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.snap-time", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.snap-time", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key_buffer, &buffer);
if (ret) {
@@ -4327,7 +4343,10 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key_buffer, sizeof(key_buffer), "%s.vol-count", keyprefix);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.vol-count", keyprefix);
+ if (ret < 0)
+ goto out;
+
ret = dict_get_int32(dict, key_buffer, &volcount);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Fail to get snap vol count");
@@ -4341,7 +4360,10 @@ cli_xml_snapshot_info_per_snap(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = dict_get_int32(dict, key_buffer, &volcount);
/* Display info of each snapshot volume */
for (i = 1; i <= volcount; i++) {
- snprintf(key_buffer, sizeof(key_buffer), "%s.vol%d", keyprefix, i);
+ ret = snprintf(key_buffer, sizeof(key_buffer), "%s.vol%d", keyprefix,
+ i);
+ if (ret < 0)
+ goto out;
ret = cli_xml_snapshot_info_snap_vol(writer, doc, dict, key_buffer,
snap_driven);
@@ -4465,7 +4487,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
GF_ASSERT(dict);
GF_ASSERT(keyprefix);
- snprintf(key, sizeof(key), "%s.brickcount", keyprefix);
+ ret = snprintf(key, sizeof(key), "%s.brickcount", keyprefix);
+ if (ret < 0)
+ goto out;
ret = dict_get_int32(dict, key, &brickcount);
if (ret) {
@@ -4483,7 +4507,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
ret = xmlTextWriterStartElement(writer, (xmlChar *)"brick");
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.path", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.path", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4502,7 +4528,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
buffer);
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.vgname", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.vgname", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4515,7 +4543,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.status", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.status", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4528,7 +4558,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.pid", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.pid", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_int32(dict, key, &pid);
if (ret) {
@@ -4541,7 +4573,9 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.data", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.data", keyprefix, i);
+ if (ret < 0)
+ goto out;
ret = dict_get_str(dict, key, &buffer);
if (ret) {
@@ -4554,7 +4588,10 @@ cli_xml_snapshot_volume_status(xmlTextWriterPtr writer, xmlDocPtr doc,
XML_RET_CHECK_AND_GOTO(ret, out);
- snprintf(key, sizeof(key), "%s.brick%d.lvsize", keyprefix, i);
+ ret = snprintf(key, sizeof(key), "%s.brick%d.lvsize", keyprefix, i);
+ if (ret < 0)
+ goto out;
+
ret = dict_get_str(dict, key, &buffer);
if (ret) {
gf_log("cli", GF_LOG_ERROR, "Unable to get LV Size");
diff --git a/cli/src/cli.c b/cli/src/cli.c
index fac32d3e9ca..a52b39c5fb8 100644
--- a/cli/src/cli.c
+++ b/cli/src/cli.c
@@ -33,12 +33,6 @@
#include <malloc.h>
#endif
-#ifdef HAVE_MALLOC_STATS
-#ifdef DEBUG
-#include <mcheck.h>
-#endif
-#endif
-
#include "cli.h"
#include "cli-quotad-client.h"
#include "cli-cmd.h"
@@ -61,7 +55,6 @@
#include "xdr-generic.h"
-extern int connected;
/* using argp for command line parsing */
const char *argp_program_version =
@@ -78,12 +71,16 @@ const char *argp_program_version =
const char *argp_program_bug_address = "<" PACKAGE_BUGREPORT ">";
struct rpc_clnt *global_quotad_rpc;
+
struct rpc_clnt *global_rpc;
rpc_clnt_prog_t *cli_rpc_prog;
extern struct rpc_clnt_program cli_prog;
+int cli_default_conn_timeout = 120;
+int cli_ten_minutes_timeout = 600;
+
static int
glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
{
@@ -306,14 +303,14 @@ cli_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
switch (event) {
case RPC_CLNT_CONNECT: {
- cli_cmd_broadcast_connected();
+ cli_cmd_broadcast_connected(_gf_true);
gf_log(this->name, GF_LOG_TRACE, "got RPC_CLNT_CONNECT");
break;
}
case RPC_CLNT_DISCONNECT: {
+ cli_cmd_broadcast_connected(_gf_false);
gf_log(this->name, GF_LOG_TRACE, "got RPC_CLNT_DISCONNECT");
- connected = 0;
if (!global_state->prompt && global_state->await_connected) {
ret = 1;
cli_out(
@@ -854,10 +851,6 @@ main(int argc, char *argv[])
if (ret)
goto out;
- ret = cli_cmd_cond_init();
- if (ret)
- goto out;
-
ret = cli_input_init(&state);
if (ret)
goto out;
diff --git a/cli/src/cli.h b/cli/src/cli.h
index 7166991a7ff..c0d933e8f8a 100644
--- a/cli/src/cli.h
+++ b/cli/src/cli.h
@@ -17,6 +17,7 @@
#include <glusterfs/quota-common-utils.h>
#include "cli1-xdr.h"
+#include "gd-common-utils.h"
#if (HAVE_LIB_XML)
#include <libxml/encoding.h>
@@ -39,8 +40,8 @@ enum argp_option_keys {
ARGP_PORT_KEY = 'p',
};
-int cli_default_conn_timeout;
-int cli_ten_minutes_timeout;
+extern int cli_default_conn_timeout;
+extern int cli_ten_minutes_timeout;
typedef enum {
COLD_BRICK_COUNT,
@@ -188,6 +189,12 @@ typedef ssize_t (*cli_serialize_t)(struct iovec outmsg, void *args);
extern struct cli_state *global_state; /* use only in readline callback */
+extern struct rpc_clnt *global_quotad_rpc;
+
+extern struct rpc_clnt *global_rpc;
+
+extern rpc_clnt_prog_t *cli_rpc_prog;
+
typedef const char *(*cli_selector_t)(void *wcon);
char *
@@ -266,8 +273,8 @@ int32_t
cli_cmd_volume_reset_parse(const char **words, int wordcount, dict_t **opt);
int32_t
-cli_cmd_gsync_set_parse(const char **words, int wordcount, dict_t **opt,
- char **errstr);
+cli_cmd_gsync_set_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **opt, char **errstr);
int32_t
cli_cmd_quota_parse(const char **words, int wordcount, dict_t **opt);
@@ -283,6 +290,10 @@ cli_cmd_volume_set_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
int32_t
+cli_cmd_ganesha_parse(struct cli_state *state, const char **words,
+ int wordcount, dict_t **options, char **op_errstr);
+
+int32_t
cli_cmd_get_state_parse(struct cli_state *state, const char **words,
int wordcount, dict_t **options, char **op_errstr);
@@ -324,11 +335,14 @@ cli_local_get();
void
cli_local_wipe(cli_local_t *local);
+gf_boolean_t
+cli_cmd_connected();
+
int32_t
-cli_cmd_await_connected();
+cli_cmd_await_connected(unsigned timeout);
int32_t
-cli_cmd_broadcast_connected();
+cli_cmd_broadcast_connected(gf_boolean_t status);
int
cli_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
@@ -483,9 +497,6 @@ cli_xml_output_snapshot(int cmd_type, dict_t *dict, int op_ret, int op_errno,
int
cli_xml_snapshot_status_single_snap(cli_local_t *local, dict_t *dict,
char *key);
-char *
-is_server_debug_xlator(void *myframe);
-
int32_t
cli_cmd_snapshot_parse(const char **words, int wordcount, dict_t **options,
struct cli_state *state);
diff --git a/configure.ac b/configure.ac
index bec743bf084..e2d6fd66cec 100644
--- a/configure.ac
+++ b/configure.ac
@@ -21,15 +21,13 @@ AM_INIT_AUTOMAKE([tar-pax foreign])
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES(yes)])
-if make --help 2>&1 | grep -q no-print-directory; then
- AM_MAKEFLAGS="$AM_MAKEFLAGS --no-print-directory";
-fi
-
AC_CONFIG_HEADERS([config.h site.h])
AC_CONFIG_FILES([Makefile
libglusterfs/Makefile
libglusterfs/src/Makefile
+ libglusterd/Makefile
+ libglusterd/src/Makefile
geo-replication/src/peer_gsec_create
geo-replication/src/peer_mountbroker
geo-replication/src/peer_mountbroker.py
@@ -46,11 +44,8 @@ AC_CONFIG_FILES([Makefile
rpc/rpc-transport/Makefile
rpc/rpc-transport/socket/Makefile
rpc/rpc-transport/socket/src/Makefile
- rpc/rpc-transport/rdma/Makefile
- rpc/rpc-transport/rdma/src/Makefile
rpc/xdr/Makefile
rpc/xdr/src/Makefile
- rpc/xdr/gen/Makefile
xlators/Makefile
xlators/meta/Makefile
xlators/meta/src/Makefile
@@ -169,6 +164,8 @@ AC_CONFIG_FILES([Makefile
xlators/features/cloudsync/src/cloudsync-plugins/src/cloudsyncs3/src/Makefile
xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/Makefile
xlators/features/cloudsync/src/cloudsync-plugins/src/cvlt/src/Makefile
+ xlators/features/metadisp/Makefile
+ xlators/features/metadisp/src/Makefile
xlators/playground/Makefile
xlators/playground/template/Makefile
xlators/playground/template/src/Makefile
@@ -196,6 +193,10 @@ AC_CONFIG_FILES([Makefile
extras/init.d/glustereventsd-Debian
extras/init.d/glustereventsd-Redhat
extras/init.d/glustereventsd-FreeBSD
+ extras/ganesha/Makefile
+ extras/ganesha/config/Makefile
+ extras/ganesha/scripts/Makefile
+ extras/ganesha/ocf/Makefile
extras/systemd/Makefile
extras/systemd/glusterd.service
extras/systemd/glustereventsd.service
@@ -274,36 +275,78 @@ AC_ARG_ENABLE([debug],
[Enable debug build options.]))
if test "x$enable_debug" = "xyes"; then
BUILD_DEBUG=yes
- GF_CFLAGS="${GF_CFLAGS} -g -rdynamic -O0 -DDEBUG"
+ GF_CFLAGS="${GF_CFLAGS} -g -O0 -DDEBUG"
else
BUILD_DEBUG=no
fi
+SANITIZER=none
+
AC_ARG_ENABLE([asan],
AC_HELP_STRING([--enable-asan],
[Enable Address Sanitizer support]))
if test "x$enable_asan" = "xyes"; then
- BUILD_ASAN=yes
- AC_CHECK_LIB([asan], [__asan_report_error], ,
- [AC_MSG_ERROR([libasan.so not found, this is required for --enable-asan])])
- GF_CFLAGS="${GF_CFLAGS} -O1 -g -fsanitize=address -fno-omit-frame-pointer"
- dnl -lasan always need to be the first library, otherwise libxml complains
- GF_LDFLAGS="-lasan ${GF_LDFLAGS}"
-else
- BUILD_ASAN=no
+ SANITIZER=asan
+ AC_CHECK_LIB([asan], [__asan_init], ,
+ [AC_MSG_ERROR([--enable-asan requires libasan.so, exiting])])
+ GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=address -fno-omit-frame-pointer"
+ GF_LDFLAGS="${GF_LDFLAGS} -lasan"
fi
-AC_ARG_ENABLE([atan],
+AC_ARG_ENABLE([tsan],
AC_HELP_STRING([--enable-tsan],
- [Enable ThreadSanitizer support]))
+ [Enable Thread Sanitizer support]))
if test "x$enable_tsan" = "xyes"; then
- BUILD_TSAN=yes
+ if test "x$SANITIZER" != "xnone"; then
+ AC_MSG_ERROR([only one sanitizer can be enabled at once])
+ fi
+ SANITIZER=tsan
AC_CHECK_LIB([tsan], [__tsan_init], ,
- [AC_MSG_ERROR([libtsan.so not found, this is required for --enable-tsan])])
- GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=thread"
+ [AC_MSG_ERROR([--enable-tsan requires libtsan.so, exiting])])
+ if test "x$ac_cv_lib_tsan___tsan_init" = xyes; then
+ AC_MSG_CHECKING([whether tsan API can be used])
+ saved_CFLAGS=${CFLAGS}
+ CFLAGS="${CFLAGS} -fsanitize=thread"
+ AC_COMPILE_IFELSE(
+ [AC_LANG_PROGRAM([
+ [#include <sanitizer/tsan_interface.h>]],
+ [[__tsan_create_fiber(0)]])],
+ [TSAN_API=yes], [TSAN_API=no])
+ AC_MSG_RESULT([$TSAN_API])
+ if test x$TSAN_API = "xyes"; then
+ AC_DEFINE(HAVE_TSAN_API, 1, [Define if tsan API can be used.])
+ fi
+ CFLAGS=${saved_CFLAGS}
+ fi
+ GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=thread -fno-omit-frame-pointer"
GF_LDFLAGS="${GF_LDFLAGS} -ltsan"
-else
- BUILD_TSAN=no
+fi
+
+AC_ARG_ENABLE([ubsan],
+ AC_HELP_STRING([--enable-ubsan],
+ [Enable Undefined Behavior Sanitizer support]))
+if test "x$enable_ubsan" = "xyes"; then
+ if test "x$SANITIZER" != "xnone"; then
+ AC_MSG_ERROR([only one sanitizer can be enabled at once])
+ fi
+ SANITIZER=ubsan
+ AC_CHECK_LIB([ubsan], [__ubsan_default_options], ,
+ [AC_MSG_ERROR([--enable-ubsan requires libubsan.so, exiting])])
+ GF_CFLAGS="${GF_CFLAGS} -O2 -g -fsanitize=undefined -fno-omit-frame-pointer"
+ GF_LDFLAGS="${GF_LDFLAGS} -lubsan"
+fi
+
+# Initialize CFLAGS before usage
+BUILD_TCMALLOC=no
+AC_ARG_ENABLE([tcmalloc],
+ AC_HELP_STRING([--enable-tcmalloc],
+ [Enable linking with tcmalloc library.]))
+if test "x$enable_tcmalloc" = "xyes"; then
+ BUILD_TCMALLOC=yes
+ GF_CFLAGS="${GF_CFLAGS} -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-free"
+ AC_CHECK_LIB([tcmalloc], [malloc], [],
+ [AC_MSG_ERROR([when --enable-tcmalloc is used, tcmalloc library needs to be present])])
+ GF_LDFLAGS="-ltcmalloc ${GF_LDFLAGS}"
fi
@@ -366,12 +409,27 @@ esac
# --enable-valgrind prevents calling dlclose(), this leaks memory
AC_ARG_ENABLE([valgrind],
- AC_HELP_STRING([--enable-valgrind],
- [Enable valgrind for resource leak debugging.]))
-if test "x$enable_valgrind" = "xyes"; then
- AC_DEFINE(RUN_WITH_VALGRIND, 1, [define if all processes should run under valgrind])
-fi
-
+ AC_HELP_STRING([--enable-valgrind@<:@=memcheck,drd@:>@],
+ [Enable valgrind for resource leak (memcheck, which is
+ the default) or thread synchronization (drd) debugging.]))
+case x$enable_valgrind in
+ xmemcheck|xyes)
+ AC_DEFINE(RUN_WITH_MEMCHECK, 1,
+ [Define if all processes should run under 'valgrind --tool=memcheck'.])
+ VALGRIND_TOOL=memcheck
+ ;;
+ xdrd)
+ AC_DEFINE(RUN_WITH_DRD, 1,
+ [Define if all processes should run under 'valgrind --tool=drd'.])
+ VALGRIND_TOOL=drd
+ ;;
+ x|xno)
+ VALGRIND_TOOL=no
+ ;;
+ *)
+ AC_MSG_ERROR([Please specify --enable-valgrind@<:@=memcheck,drd@:>@])
+ ;;
+esac
AC_ARG_WITH([previous-options],
[AS_HELP_STRING([--with-previous-options],
@@ -672,6 +730,14 @@ if test "x$enable_fuse_client" != "xno"; then
fi
AC_SUBST(FUSE_CLIENT_SUBDIR)
+
+AC_ARG_ENABLE([fuse-notifications],
+ AS_HELP_STRING([--disable-fuse-notifications], [Disable FUSE notifications]))
+
+AS_IF([test "x$enable_fuse_notifications" != "xno"], [
+ AC_DEFINE([HAVE_FUSE_NOTIFICATIONS], [1], [Use FUSE notifications])
+])
+
# end FUSE section
@@ -712,53 +778,6 @@ if test "x$enable_epoll" != "xno"; then
fi
# end EPOLL section
-
-# IBVERBS section
-AC_ARG_ENABLE([ibverbs],
- AC_HELP_STRING([--disable-ibverbs],
- [Do not build the ibverbs transport]))
-
-if test "x$enable_ibverbs" != "xno"; then
- AC_CHECK_LIB([ibverbs],
- [ibv_get_device_list],
- [HAVE_LIBIBVERBS="yes"],
- [HAVE_LIBIBVERBS="no"])
- AC_CHECK_LIB([rdmacm], [rdma_create_id], [HAVE_RDMACM="yes"], [HAVE_RDMACM="no"])
- if test "x$HAVE_RDMACM" = "xyes" ; then
- AC_CHECK_DECLS(
- [RDMA_OPTION_ID_REUSEADDR],
- [],
- [AC_ERROR([Need at least version 1.0.15 of librdmacm])],
- [[#include <rdma/rdma_cma.h>]])
- fi
-fi
-
-if test "x$enable_ibverbs" = "xyes"; then
- if test "x$HAVE_LIBIBVERBS" = "xno"; then
- echo "ibverbs-transport requested, but libibverbs is not present."
- exit 1
- fi
-
- if test "x$HAVE_RDMACM" = "xno"; then
- echo "ibverbs-transport requested, but librdmacm is not present."
- exit 1
- fi
-fi
-
-BUILD_RDMA=no
-BUILD_IBVERBS=no
-if test "x$enable_ibverbs" != "xno" -a "x$HAVE_LIBIBVERBS" = "xyes" -a "x$HAVE_RDMACM" = "xyes"; then
- IBVERBS_SUBDIR=ib-verbs
- BUILD_IBVERBS=yes
- RDMA_SUBDIR=rdma
- BUILD_RDMA=yes
-fi
-
-AC_SUBST(IBVERBS_SUBDIR)
-AC_SUBST(RDMA_SUBDIR)
-# end IBVERBS section
-
-
# SYNCDAEMON section
AC_ARG_ENABLE([georeplication],
AC_HELP_STRING([--disable-georeplication],
@@ -766,6 +785,9 @@ AC_ARG_ENABLE([georeplication],
BUILD_SYNCDAEMON=no
case $host_os in
+ freebsd*)
+#do nothing
+ ;;
linux*)
#do nothing
;;
@@ -805,6 +827,17 @@ fi
AC_SUBST(GEOREP_EXTRAS_SUBDIR)
AM_CONDITIONAL(USE_GEOREP, test "x$enable_georeplication" != "xno")
+# METADISP section
+AC_ARG_ENABLE([metadisp],
+ AC_HELP_STRING([--enable-metadisp],
+ [Enable the metadata dispersal xlator]))
+BUILD_METADISP=no
+if test "x${enable_metadisp}" = "xyes"; then
+ BUILD_METADISP=yes
+fi
+AM_CONDITIONAL([BUILD_METADISP], [test "x$BUILD_METADISP" = "xyes"])
+# end METADISP section
+
# Events section
AC_ARG_ENABLE([events],
AC_HELP_STRING([--disable-events],
@@ -943,6 +976,25 @@ if test "x${have_backtrace}" = "xyes"; then
fi
AC_SUBST(HAVE_BACKTRACE)
+dnl Old (before C11) compiler can compile (but not link) this:
+dnl
+dnl int main () {
+dnl _Static_assert(1, "True");
+dnl return 0;
+dnl }
+dnl
+dnl assuming that _Static_assert is an implicitly declared function. So
+dnl we're trying to link just to make sure that this is not the case.
+
+AC_MSG_CHECKING([whether $CC supports C11 _Static_assert])
+AC_TRY_LINK([], [_Static_assert(1, "True");],
+ [STATIC_ASSERT=yes], [STATIC_ASSERT=no])
+
+AC_MSG_RESULT([$STATIC_ASSERT])
+if test x$STATIC_ASSERT = "xyes"; then
+ AC_DEFINE(HAVE_STATIC_ASSERT, 1, [Define if C11 _Static_assert is supported.])
+fi
+
if test "x${have_backtrace}" != "xyes"; then
AC_TRY_COMPILE([#include <math.h>], [double x=0.0; x=ceil(0.0);],
[],
@@ -950,11 +1002,11 @@ AC_TRY_COMPILE([#include <math.h>], [double x=0.0; x=ceil(0.0);],
fi
dnl glusterfs prints memory usage to stderr by sending it SIGUSR1
-AC_CHECK_FUNC([malloc_stats], [have_malloc_stats=yes])
-if test "x${have_malloc_stats}" = "xyes"; then
- AC_DEFINE(HAVE_MALLOC_STATS, 1, [define if found malloc_stats])
+AC_CHECK_FUNC([mallinfo], [have_mallinfo=yes])
+if test "x${have_mallinfo}" = "xyes"; then
+ AC_DEFINE(HAVE_MALLINFO, 1, [define if found mallinfo])
fi
-AC_SUBST(HAVE_MALLOC_STATS)
+AC_SUBST(HAVE_MALLINFO)
dnl Linux, Solaris, Cygwin
AC_CHECK_MEMBERS([struct stat.st_atim.tv_nsec])
@@ -981,6 +1033,9 @@ case $host_os in
CFLAGS="${CFLAGS} -isystem /usr/local/include"
ARGP_LDADD=-largp
;;
+ *netbsd*)
+ ARGP_LDADD=-largp
+ ;;
esac
dnl argp-standalone does not provide a pkg-config file
AC_CHECK_HEADER([argp.h], AC_DEFINE(HAVE_ARGP, 1, [have argp]))
@@ -1050,6 +1105,19 @@ else
CFLAGS=${OLD_CFLAGS}
fi
+AC_CHECK_FUNC([syncfs], [have_syncfs=yes])
+if test "x${have_syncfs}" = "xyes"; then
+ AC_DEFINE(HAVE_SYNCFS, 1, [define if syncfs exists])
+else
+ OLD_CFLAGS=${CFLAGS}
+ CFLAGS="-D_GNU_SOURCE"
+ AC_CHECK_DECL([SYS_syncfs], , , [#include <sys/syscall.h>])
+ if test "x${ac_cv_have_decl_SYS_syncfs}" = "xyes"; then
+ AC_DEFINE(HAVE_SYNCFS_SYS, 1, [define if SYS_syncfs is available])
+ fi
+ CFLAGS=${OLD_CFLAGS}
+fi
+
BUILD_NANOSECOND_TIMESTAMPS=no
AC_CHECK_FUNC([utimensat], [have_utimensat=yes])
if test "x${have_utimensat}" = "xyes"; then
@@ -1188,7 +1256,6 @@ if test "x$exec_prefix" = xNONE; then
exec_prefix="$(eval echo $prefix)"
fi
GLUSTERFS_LIBEXECDIR="$(eval echo $libexecdir)/glusterfs"
-GLUSTERFSD_MISCDIR="$(eval echo $prefix)/var/lib/misc/glusterfsd"
prefix=$old_prefix
exec_prefix=$old_exec_prefix
@@ -1200,6 +1267,8 @@ fi
localstatedir="$(eval echo ${localstatedir})"
LOCALSTATEDIR=$localstatedir
+GLUSTERFSD_MISCDIR="$(eval echo ${localstatedir})/lib/misc/glusterfsd"
+
old_prefix=$prefix
if test "x$prefix" = xNONE; then
prefix=$ac_default_prefix
@@ -1241,10 +1310,6 @@ case $host_os in
;;
*freebsd*)
GF_HOST_OS="GF_BSD_HOST_OS"
- GF_CFLAGS="${GF_CFLAGS} -O0"
- GF_CFLAGS="${GF_CFLAGS} -DTHREAD_UNSAFE_BASENAME"
- GF_CFLAGS="${GF_CFLAGS} -DTHREAD_UNSAFE_DIRNAME"
- GF_CFLAGS="${GF_CFLAGS} -D_LIBGEN_H_"
GF_CFLAGS="${GF_CFLAGS} -DO_DSYNC=0"
GF_CFLAGS="${GF_CFLAGS} -Dxdr_quad_t=xdr_longlong_t"
GF_CFLAGS="${GF_CFLAGS} -Dxdr_u_quad_t=xdr_u_longlong_t"
@@ -1364,6 +1429,7 @@ AC_ARG_ENABLE([gnfs],
[Enable legacy gnfs server xlator.]))
if test "x${with_server}" = "xyes" -a "x$enable_gnfs" = "xyes"; then
BUILD_GNFS="yes"
+ GF_CFLAGS="$GF_CFLAGS -DBUILD_GNFS"
RPCBIND_SERVICE="rpcbind.service"
fi
AM_CONDITIONAL([BUILD_GNFS], [test x$BUILD_GNFS = xyes])
@@ -1524,9 +1590,9 @@ case $host_os in
;;
esac
dnl GF_XLATOR_DEFAULT_LDFLAGS is for most xlators that expose a common set of symbols
-GF_XLATOR_DEFAULT_LDFLAGS='-avoid-version -export-symbols $(top_srcdir)/xlators/xlator.sym $(UUID_LIBS) $(GF_NO_UNDEFINED)'
+GF_XLATOR_DEFAULT_LDFLAGS='-avoid-version -export-symbols $(top_srcdir)/xlators/xlator.sym $(UUID_LIBS) $(GF_NO_UNDEFINED) $(TIRPC_LIBS)'
dnl GF_XLATOR_LDFLAGS is for xlators that expose extra symbols, e.g. dht
-GF_XLATOR_LDFLAGS='-avoid-version $(UUID_LIBS) $(GF_NO_UNDEFINED)'
+GF_XLATOR_LDFLAGS='-avoid-version $(UUID_LIBS) $(GF_NO_UNDEFINED) $(TIRPC_LIBS)'
AC_SUBST(GF_HOST_OS)
AC_SUBST(GF_CFLAGS)
@@ -1541,6 +1607,13 @@ AC_SUBST(AM_LIBTOOLFLAGS)
AC_SUBST(GF_NO_UNDEFINED)
AC_SUBST(GF_XLATOR_DEFAULT_LDFLAGS)
AC_SUBST(GF_XLATOR_LDFLAGS)
+AC_SUBST(GF_XLATOR_MGNT_LIBADD)
+
+case $host_os in
+ *freebsd*)
+ GF_XLATOR_MGNT_LIBADD="-lutil -lprocstat"
+ ;;
+esac
CONTRIBDIR='$(top_srcdir)/contrib'
AC_SUBST(CONTRIBDIR)
@@ -1606,15 +1679,14 @@ echo
echo "GlusterFS configure summary"
echo "==========================="
echo "FUSE client : $BUILD_FUSE_CLIENT"
-echo "Infiniband verbs : $BUILD_IBVERBS"
echo "epoll IO multiplex : $BUILD_EPOLL"
echo "fusermount : $BUILD_FUSERMOUNT"
echo "readline : $BUILD_READLINE"
echo "georeplication : $BUILD_SYNCDAEMON"
echo "Linux-AIO : $BUILD_LIBAIO"
echo "Enable Debug : $BUILD_DEBUG"
-echo "Enable ASAN : $BUILD_ASAN"
-echo "Enable TSAN : $BUILD_TSAN"
+echo "Run with Valgrind : $VALGRIND_TOOL"
+echo "Sanitizer enabled : $SANITIZER"
echo "Use syslog : $USE_SYSLOG"
echo "XML output : $BUILD_XML_OUTPUT"
echo "Unit Tests : $BUILD_UNITTEST"
@@ -1632,16 +1704,14 @@ echo "IPV6 default : $with_ipv6_default"
echo "Use TIRPC : $with_libtirpc"
echo "With Python : ${PYTHON_VERSION}"
echo "Cloudsync : $BUILD_CLOUDSYNC"
+echo "Metadata dispersal : $BUILD_METADISP"
+echo "Link with TCMALLOC : $BUILD_TCMALLOC"
echo
-if test "x$BUILD_ASAN" = "xyes"; then
- echo "### Run below command before executing your tests if your system"
- echo "### has 'gcc --version' above 7.x (works on Fedora 27 and Above)"
- echo "export ASAN_OPTIONS=log_path=/var/log/glusterfs/asan-output.log"
- echo ""
- echo "### Above is required to get details of asan run, as glusterfs"
- echo "### processes are daemon processes. Further details and more"
- echo "### options can be found under 'Run-time flags' at"
- echo "### https://github.com/google/sanitizers/wiki/AddressSanitizerFlags"
- echo
+# dnl Note: ${X^^} capitalization assumes bash >= 4.x
+if test "x$SANITIZER" != "xnone"; then
+ echo "Note: since glusterfs processes are daemon processes, use"
+ echo "'export ${SANITIZER^^}_OPTIONS=log_path=/path/to/xxx.log' to collect"
+ echo "sanitizer output. Further details and more options can be"
+ echo "found at https://github.com/google/sanitizers."
fi
diff --git a/contrib/fuse-lib/mount.c b/contrib/fuse-lib/mount.c
index ffa0a4b6316..06ff191f542 100644
--- a/contrib/fuse-lib/mount.c
+++ b/contrib/fuse-lib/mount.c
@@ -52,12 +52,16 @@ gf_fuse_unmount (const char *mountpoint, int fd)
if (geteuid () == 0) {
fuse_mnt_umount ("fuse", mountpoint, mountpoint, 1);
return;
+ } else {
+ GFFUSE_LOGERR ("fuse: Effective-uid: %d", geteuid());
}
res = umount2 (mountpoint, 2);
if (res == 0)
return;
+ GFFUSE_LOGERR ("fuse: failed to unmount %s: %s",
+ mountpoint, strerror (errno));
pid = fork ();
if (pid == -1)
return;
@@ -67,6 +71,8 @@ gf_fuse_unmount (const char *mountpoint, int fd)
"--", mountpoint, NULL };
execvp (FUSERMOUNT_PROG, (char **)argv);
+ GFFUSE_LOGERR ("fuse: failed to execute fuserumount: %s",
+ strerror (errno));
_exit (1);
}
waitpid (pid, NULL, 0);
@@ -384,6 +390,7 @@ fuse_mount_sys (const char *mountpoint, char *fsname,
build_iovec (&iov, &iovlen, "from", "/dev/fuse", -1);
build_iovec (&iov, &iovlen, "volname", source, -1);
build_iovec (&iov, &iovlen, "fd", fdstr, -1);
+ build_iovec (&iov, &iovlen, "allow_other", NULL, -1);
ret = nmount (iov, iovlen, mountflags);
#else
ret = mount (source, mountpoint, fstype, mountflags,
diff --git a/contrib/sunrpc/xdr_sizeof.c b/contrib/sunrpc/xdr_sizeof.c
deleted file mode 100644
index ca1f7bf0a5e..00000000000
--- a/contrib/sunrpc/xdr_sizeof.c
+++ /dev/null
@@ -1,204 +0,0 @@
-/*
- * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
- *
- * @APPLE_LICENSE_HEADER_START@
- *
- * Portions Copyright (c) 1999 Apple Computer, Inc. All Rights
- * Reserved. This file contains Original Code and/or Modifications of
- * Original Code as defined in and that are subject to the Apple Public
- * Source License Version 1.1 (the "License"). You may not use this file
- * except in compliance with the License. Please obtain a copy of the
- * License at http://www.apple.com/publicsource and read it before using
- * this file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON- INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-
-/*
- * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
- * unrestricted use provided that this legend is included on all tape
- * media and as a part of the software program in whole or part. Users
- * may copy or modify Sun RPC without charge, but are not authorized
- * to license or distribute it to anyone else except as part of a product or
- * program developed by the user.
- *
- * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
- * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
- *
- * Sun RPC is provided with no support and without any obligation on the
- * part of Sun Microsystems, Inc. to assist in its use, correction,
- * modification or enhancement.
- *
- * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
- * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
- * OR ANY PART THEREOF.
- *
- * In no event will Sun Microsystems, Inc. be liable for any lost revenue
- * or profits or other special, indirect and consequential damages, even if
- * Sun has been advised of the possibility of such damages.
- *
- * Sun Microsystems, Inc.
- * 2550 Garcia Avenue
- * Mountain View, California 94043
- */
-
-/*
- * xdr_sizeof.c
- *
- * Copyright 1990 Sun Microsystems, Inc.
- *
- * General purpose routine to see how much space something will use
- * when serialized using XDR.
- */
-
-#ifdef GF_DARWIN_HOST_OS
-
-#include <rpc/types.h>
-#include <rpc/xdr.h>
-#include <sys/types.h>
-#include <sys/cdefs.h>
-
-#include <stdlib.h>
-
-/* ARGSUSED */
-#ifdef GF_DARWIN_HOST_OS
-static bool_t
-x_putlong (XDR *xdrs, const int *longp)
-{
- xdrs->x_handy += BYTES_PER_XDR_UNIT;
- return TRUE;
-}
-
-#else
-static bool_t
-x_putlong (XDR *xdrs, const long *longp)
-{
- xdrs->x_handy += BYTES_PER_XDR_UNIT;
- return TRUE;
-}
-#endif
-
-/* ARGSUSED */
-static bool_t
-x_putbytes (XDR *xdrs, const char *bp, u_int len)
-{
- xdrs->x_handy += len;
- return TRUE;
-}
-
-#ifdef GF_DARWIN_HOST_OS
-static u_int
-x_getpostn (XDR *xdrs)
-{
- return xdrs->x_handy;
-}
-#else
-static u_int
-x_getpostn (const XDR *xdrs)
-{
- return xdrs->x_handy;
-}
-#endif
-
-/* ARGSUSED */
-static bool_t
-x_setpostn (XDR *xdrs, u_int len)
-{
- /* This is not allowed */
- return FALSE;
-}
-
-static int32_t *
-x_inline (XDR *xdrs, u_int len)
-{
- if (len == 0)
- return NULL;
- if (xdrs->x_op != XDR_ENCODE)
- return NULL;
- if (len < (u_int) (long int) xdrs->x_base)
- {
- /* x_private was already allocated */
- xdrs->x_handy += len;
- return (int32_t *) xdrs->x_private;
- }
- else
- {
- /* Free the earlier space and allocate new area */
- free (xdrs->x_private);
- if ((xdrs->x_private = (caddr_t) malloc (len)) == NULL)
- {
- xdrs->x_base = 0;
- return NULL;
- }
- xdrs->x_base = (void *) (long) len;
- xdrs->x_handy += len;
- return (int32_t *) xdrs->x_private;
- }
-}
-
-static int
-harmless (void)
-{
- /* Always return FALSE/NULL, as the case may be */
- return 0;
-}
-
-static void
-x_destroy (XDR *xdrs)
-{
- xdrs->x_handy = 0;
- xdrs->x_base = 0;
- if (xdrs->x_private)
- {
- free (xdrs->x_private);
- xdrs->x_private = NULL;
- }
- return;
-}
-
-unsigned long
-xdr_sizeof (xdrproc_t func, void *data)
-{
- XDR x;
- struct xdr_ops ops;
- bool_t stat;
-
-#ifdef GF_DARWIN_HOST_OS
- typedef bool_t (*dummyfunc1) (XDR *, int *);
-#else
- typedef bool_t (*dummyfunc1) (XDR *, long *);
-#endif
- typedef bool_t (*dummyfunc2) (XDR *, caddr_t, u_int);
-
- ops.x_putlong = x_putlong;
- ops.x_putbytes = x_putbytes;
- ops.x_inline = x_inline;
- ops.x_getpostn = x_getpostn;
- ops.x_setpostn = x_setpostn;
- ops.x_destroy = x_destroy;
-
- /* the other harmless ones */
- ops.x_getlong = (dummyfunc1) harmless;
- ops.x_getbytes = (dummyfunc2) harmless;
-
- x.x_op = XDR_ENCODE;
- x.x_ops = &ops;
- x.x_handy = 0;
- x.x_private = (caddr_t) NULL;
- x.x_base = (caddr_t) 0;
-
- stat = func (&x, data, 0);
- if (x.x_private)
- free (x.x_private);
- return (stat == TRUE ? (unsigned) x.x_handy : 0);
-}
-#endif /* GF_DARWIN_HOST_OS */
diff --git a/doc/README.md b/doc/README.md
index 8a92bd990d0..6aa28642ef4 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -18,7 +18,7 @@ The Gluster features which are 'in progress' or implemented can be found at [git
## Upgrade Guide ##
-The gluster upgrade guide is maintained at [github](https://github.com/gluster/glusterdocs). The browsable upgrade guide can be found [here](http://docs.gluster.org/en/latest/Upgrade-Guide/README/)
+The gluster upgrade guide is maintained at [github](https://github.com/gluster/glusterdocs). The browsable upgrade guide can be found [here](http://docs.gluster.org/en/latest/Upgrade-Guide)
The doc patch has to be sent against the above mentioned repository.
diff --git a/doc/debugging/gfid-to-path.md b/doc/debugging/gfid-to-path.md
index 49e9aa09a3f..1917bf2cca1 100644
--- a/doc/debugging/gfid-to-path.md
+++ b/doc/debugging/gfid-to-path.md
@@ -64,9 +64,5 @@ trusted.glusterfs.pathinfo="(<DISTRIBUTE:test-dht> <POSIX(/mnt/brick-test/b):vm1
```
---
-### Get file path from GFID (Method 3):
-https://gist.github.com/semiosis/4392640
-
----
#### References and links:
[posix: placeholders for GFID to path conversion](http://review.gluster.org/5951)
diff --git a/doc/debugging/statedump.md b/doc/debugging/statedump.md
index 9d594320ddc..9dfdce15fad 100644
--- a/doc/debugging/statedump.md
+++ b/doc/debugging/statedump.md
@@ -328,7 +328,7 @@ cur-stdalloc=214
max-stdalloc=220
```
-Here, with cold count being 0 by default, `cur-stdalloc` indicated the number of `dict_t` objects that were allocated in heap using `mem_get()`, and yet to be freed using `mem_put()` (refer to this [page](https://github.com/gluster/glusterfs/blob/master/doc/data-structures/mem-pool.md) for more details on how mempool works). After the test case (name selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for `dict_t`.
+Here, with cold count being 0 by default, `cur-stdalloc` indicated the number of `dict_t` objects that were allocated in heap using `mem_get()`, and yet to be freed using `mem_put()` (refer to this [page](../developer-guide/datastructure-mem-pool.md) for more details on how mempool works). After the test case (name selfheal of 100 files), there was a rise in the cur-stdalloc value (from 14 to 214) for `dict_t`.
After these leaks were fixed, glusterfs was again compiled with -DDEBUG flags, and the same steps were performed again and statedump was taken before and after executing the test case, of the mount. This was done to ascertain the validity of the fix. And the following are the results:
diff --git a/doc/developer-guide/README.md b/doc/developer-guide/README.md
index 1501eeb9207..aaf9c7476b0 100644
--- a/doc/developer-guide/README.md
+++ b/doc/developer-guide/README.md
@@ -51,11 +51,10 @@ Daemon Management Framework
Translators
-----------
-- [Block Device Tanslator](./bd-xlator.md)
- [Performance/write-Behind Translator](./write-behind.md)
- [Translator Development](./translator-development.md)
- [Storage/posix Translator](./posix.md)
-- [Compression translator](./network_compression.md)
+
Brick multiplex
---------------
@@ -74,7 +73,7 @@ Testing/Debugging
- [Using the Gluster Test
Framework](./Using-Gluster-Test-Framework.md) - Step by
step instructions for running the Gluster Test Framework
-- [Coredump Analysis](./coredump-analysis.md) - Steps to analize coredumps generated by regression machines.
+- [Coredump Analysis](../debugging/analyzing-regression-cores.md) - Steps to analize coredumps generated by regression machines.
- [Identifying Resource Leaks](./identifying-resource-leaks.md)
Release Process
diff --git a/doc/developer-guide/commit-guidelines.md b/doc/developer-guide/commit-guidelines.md
index 9b191dac178..38bbe525cbd 100644
--- a/doc/developer-guide/commit-guidelines.md
+++ b/doc/developer-guide/commit-guidelines.md
@@ -118,7 +118,7 @@ The 'bug' line can reference a bug in a few ways. Gerrit creates a link to the b
**Updates: bz#1193929** -- use 'Updates: bz#NNNN' if the commit is only a partial fix and more work is needed.
**Updates: #175** -- use 'Updates: #NNNN' if the commit is only a partial fix and more work is needed for the feature completion.
-We encourage the use of Co-Authored-By: name <name@example.com> in commit messages to indicate people who worked on a particular patch. It's a convention for recognizing multiple authors, and our projects would encourage the stats tools to observe it when collecting statistics.
+We encourage the use of `Co-Authored-By: name <name@example.com>` in commit messages to indicate people who worked on a particular patch. It's a convention for recognizing multiple authors, and our projects would encourage the stats tools to observe it when collecting statistics.
### Summary of Git commit message structure
diff --git a/doc/developer-guide/fuse-interrupt.md b/doc/developer-guide/fuse-interrupt.md
index f92b5532eaf..ec991b81ec5 100644
--- a/doc/developer-guide/fuse-interrupt.md
+++ b/doc/developer-guide/fuse-interrupt.md
@@ -23,9 +23,10 @@ not exported to a header file).
```
enum fuse_interrupt_state {
- INTERRUPT_NONE,
+ /* ... */
INTERRUPT_SQUELCHED,
INTERRUPT_HANDLED,
+ /* ... */
};
typedef enum fuse_interrupt_state fuse_interrupt_state_t;
struct fuse_interrupt_record;
@@ -62,8 +63,58 @@ dummy implementation only for demonstration purposes.) Flush is chosen
because a `FLUSH` interrupt is easy to trigger (see
*tests/features/interrupt.t*). Interrupt handling for flush is switched on
by `--fuse-flush-handle-interrupt` (a hidden glusterfs command line flag).
-The flush interrupt handling code is guarded by the
-`flush_handle_interrupt` Boolean member of `fuse_private_t`.
+The implementation of flush interrupt is contained in the
+`fuse_flush_interrupt_handler()` function and blocks guarded by the
+
+```
+if (priv->flush_handle_interrupt) { ...
+```
+
+conditional (where `priv` is a `*fuse_private_t`).
+
+### Overview
+
+"Regular" fuse fops and interrupt handlers interact via a list containing
+interrupt records.
+
+If a fop wishes to have its interrupts handled, it needs to set up an
+interrupt record and insert it into the list; also when it's to finish
+(ie. in its "cbk" stage) it needs to delete the record from the list.
+
+If no interrupt happens, basically that's all to it - a list insertion
+and deletion.
+
+However, if an interrupt comes for the fop, the interrupt FUSE request
+will carry the data identifying an ongoing fop (that is, its `unique`),
+and based on that, the interrupt record will be looked up in the list, and
+the specific interrupt handler (a member of the interrupt record) will be
+called.
+
+Usually the fop needs to share some data with the interrupt handler to
+enable it to perform its task (also shared via the interrupt record).
+The interrupt API offers two approaches to manage shared data:
+- _Async or reference-counting strategy_: from the point on when the interrupt
+ record is inserted to the list, it's owned jointly by the regular fop and
+ the prospective interrupt handler. Both of them need to check before they
+ return if the other is still holding a reference; if not, then they are
+ responsible for reclaiming the shared data.
+- _Sync or borrow strategy_: the interrupt handler is considered a borrower
+ of the shared data. The interrupt handler should not reclaim the shared
+ data. The fop will wait for the interrupt handler to finish (ie., the borrow
+ to be returned), then it has to reclaim the shared data.
+
+The user of the interrupt API need to call the following functions to
+instrument this control flow:
+- `fuse_interrupt_record_insert()` in the fop to insert the interrupt record to
+ the list;
+- `fuse_interrupt_finish_fop()`in the fop (cbk) and
+- `fuse_interrupt_finish_interrupt()`in the interrupt handler
+
+to perform needed synchronization at the end their tenure. The data management
+strategies are implemented by the `fuse_interrupt_finish_*()` functions (which
+have an argument to specify which strategy to use); these routines take care
+of freeing the interrupt record itself, while the reclamation of the shared data
+is left to the API user.
### Usage
@@ -75,12 +126,15 @@ steps:
call (directly or as async callback) `fuse_interrupt_finish_interrupt()`.
The `intstat` argument to `fuse_interrupt_finish_interrupt` should be
either `INTERRUPT_SQUELCHED` or `INTERRUPT_HANDLED`.
- - `INTERRUPT_SQUELCHED` means that we choose not to handle the interrupt
+ - `INTERRUPT_SQUELCHED` means that the interrupt could not be delivered
and the fop is going on uninterrupted.
- `INTERRUPT_HANDLED` means that the interrupt was actually handled. In
this case the fop will be answered from interrupt context with errno
`EINTR` (that is, the fop should not send a response to the kernel).
+ (the enum `fuse_interrupt_state` includes further members, which are reserved
+ for internal use).
+
We return to the `sync` and `datap` arguments later.
- In the `fuse_<FOP>` function create an interrupt record using
`fuse_interrupt_record_new()`, passing the incoming `fuse_in_header` and
@@ -92,10 +146,10 @@ steps:
`fuse_interrupt_record_insert()`.
- In `fuse_<FOP>_cbk` call `fuse_interrupt_finish_fop()`.
- `fuse_interrupt_finish_fop()` returns a Boolean according to whether the
- interrupt was handled. If it was, then the fuse request is already
+ interrupt was handled. If it was, then the FUSE request is already
answered and the stack gets destroyed in `fuse_interrupt_finish_fop` so
- `fuse_<FOP>_cbk` can just return (zero). Otherwise follow the standard
- cbk logic (answer the fuse request and destroy the stack -- these are
+ `fuse_<FOP>_cbk()` can just return (zero). Otherwise follow the standard
+ cbk logic (answer the FUSE request and destroy the stack -- these are
typically accomplished by `fuse_err_cbk()`).
- The last two argument of `fuse_interrupt_finish_fop()` and
`fuse_interrupt_finish_interrupt()` are `gf_boolean_t sync` and
@@ -124,7 +178,34 @@ steps:
then that pointer will be directed to the `data` member of the interrupt
record and it's up to the caller what it's doing with it.
- If `sync` is true, interrupt handler can use `datap = NULL`, and
- fop handler will have `datap` set.
+ fop handler will have `datap` point to a valid pointer.
- If `sync` is false, and handlers pass a pointer to a pointer for
`datap`, they should check if the pointed pointer is NULL before
attempting to deal with the data.
+
+### FUSE answer for the interrupted fop
+
+The kernel acknowledges a successful interruption for a given FUSE request
+if the filesystem daemon answers it with errno EINTR; upon that, the syscall
+which induced the request will be abruptly terminated with an interrupt, rather
+than returning a value.
+
+In glusterfs, this can be arranged in two ways.
+
+- If the interrupt handler wins the race for the interrupt record, ie.
+ `fuse_interrupt_finish_fop()` returns true to `fuse_<FOP>_cbk()`, then, as
+ said above, `fuse_<FOP>_cbk()` does not need to answer the FUSE request.
+ That's because then the interrupt handler will take care about answering
+ it (with errno EINTR).
+- If `fuse_interrupt_finish_fop()` returns false to `fuse_<FOP>_cbk()`, then
+ this return value does not inform the fop handler whether there was an interrupt
+ or not. This return value occurs both when fop handler won the race for the
+ interrupt record against the interrupt handler, and when there was no interrupt
+ at all.
+
+ However, the internal logic of the fop handler might detect from other
+ circumstances that an interrupt was delivered. For example, the fop handler
+ might be sleeping, waiting for some data to arrive, so that a premature
+ wakeup (with no data present) occurs if the interrupt handler intervenes. In
+ such cases it's the responsibility of the fop handler to reply the FUSE
+ request with errro EINTR.
diff --git a/doc/developer-guide/identifying-resource-leaks.md b/doc/developer-guide/identifying-resource-leaks.md
index 851fc4424bc..950cae79b0a 100644
--- a/doc/developer-guide/identifying-resource-leaks.md
+++ b/doc/developer-guide/identifying-resource-leaks.md
@@ -174,3 +174,27 @@ In this case, the resource leak can be addressed by adding a single line to the
Running the same Valgrind command and comparing the output will show that the
memory leak in `xlators/meta/src/meta.c:init` is not reported anymore.
+
+### Running DRD, the Valgrind thread error detector
+
+When configuring GlusterFS with:
+
+```shell
+./configure --enable-valgrind
+```
+
+the default Valgrind tool (Memcheck) is enabled. But it's also possble to select
+one of Memcheck or DRD by using:
+
+```shell
+./configure --enable-valgrind=memcheck
+```
+
+or:
+
+```shell
+./configure --enable-valgrind=drd
+```
+
+respectively. When using DRD, it's recommended to consult
+https://valgrind.org/docs/manual/drd-manual.html before running.
diff --git a/doc/developer-guide/options-to-contribute.md b/doc/developer-guide/options-to-contribute.md
index 5dd1895cb1c..3f0d84e7645 100644
--- a/doc/developer-guide/options-to-contribute.md
+++ b/doc/developer-guide/options-to-contribute.md
@@ -60,7 +60,7 @@ Reference: https://review.gluster.org/20925/
5. Now, pick a `.h` file, and see if a structure is very large, and see
-if re-aligning them as per [coding-standard](./conding-standard.md) gives any size benefit,
+if re-aligning them as per [coding-standard](./coding-standard.md) gives any size benefit,
if yes, go ahead and change it. Make sure you check all the structures
in the file for similar pattern.
diff --git a/doc/developer-guide/thread-naming.md b/doc/developer-guide/thread-naming.md
index 74efba28e45..513140d4437 100644
--- a/doc/developer-guide/thread-naming.md
+++ b/doc/developer-guide/thread-naming.md
@@ -29,10 +29,10 @@ gf_thread_create_detached (pthread_t *thread,
As max name length for a thread in POSIX is only 16 characters including the
'\0' character, you have to be a little creative with naming. Also, it is
important that all Gluster threads have common prefix. Considering these
-conditions, we have "gluster" as prefix for all the threads created by these
+conditions, we have "glfs_" as prefix for all the threads created by these
wrapper functions. It is responsibility of the owner of thread to provide the
suffix part of the name. It does not have to be a descriptive name, as it has
-only 8 letters to work with. However, it should be unique enough such that it
+only 10 letters to work with. However, it should be unique enough such that it
can be matched with a table which describes it.
If n number of threads are spwaned to perform same function, it is must that the
@@ -87,6 +87,7 @@ such that it can be matched with a table below without ambiguity.
- posixfsy - posix fsync
- posixhc - posix heal
- posixjan - posix janitor
+- posixrsv - posix reserve
- quiesce - quiesce dequeue
- rdmaAsyn - rdma async event handler
- rdmaehan - rdma completion handler
diff --git a/doc/developer-guide/translator-development.md b/doc/developer-guide/translator-development.md
index 8f67bb1cae9..f75935519f6 100644
--- a/doc/developer-guide/translator-development.md
+++ b/doc/developer-guide/translator-development.md
@@ -680,4 +680,4 @@ Original author's site:
Gluster community site:
- * [Translators](http://www.gluster.org/community/documentation/index.php/Translators)
+ * [Translators](https://docs.gluster.org/en/latest/Quick-Start-Guide/Architecture/#translators)
diff --git a/doc/developer-guide/xlator-classification.md b/doc/developer-guide/xlator-classification.md
index 36d924d0934..6073df9375f 100644
--- a/doc/developer-guide/xlator-classification.md
+++ b/doc/developer-guide/xlator-classification.md
@@ -23,7 +23,7 @@ This document is intended for the following community participants,
- Existing xlator maintainers
- Packaging and gluster management stack maintainers
-For a more user facing understanding it is recommended to read section [tbd](TBD)
+For a more user facing understanding it is recommended to read section (TBD)
in the gluster documentation.
## Categories
diff --git a/doc/gluster.8 b/doc/gluster.8
index 99a8d5e5048..ba595edca15 100644
--- a/doc/gluster.8
+++ b/doc/gluster.8
@@ -41,7 +41,7 @@ List all volumes in cluster
\fB\ volume status [all | <VOLNAME> [nfs|shd|<BRICK>|quotad]] [detail|clients|mem|inode|fd|callpool|tasks|client-list] \fR
Display status of all or specified volume(s)/brick
.TP
-\fB\ volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... <TA-BRICK> \fR
+\fB\ volume create <NEW-VOLNAME> [stripe <COUNT>] [[replica <COUNT> [arbiter <COUNT>]]|[replica 2 thin-arbiter 1]] [disperse [<COUNT>]] [disperse-data <COUNT>] [redundancy <COUNT>] [transport <tcp|rdma|tcp,rdma>] <NEW-BRICK> ... <TA-BRICK> \fR
Create a new volume of the specified type using the specified bricks and transport type (the default transport type is tcp).
To create a volume with both transports (tcp and rdma), give 'transport tcp,rdma' as an option.
.TP
@@ -113,6 +113,9 @@ Rotate the log file for corresponding volume/brick.
\fB\ volume profile <VOLNAME> {start|info [peek|incremental [peek]|cumulative|clear]|stop} [nfs] \fR
Profile operations on the volume. Once started, volume profile <volname> info provides cumulative statistics of the FOPs performed.
.TP
+\fB\ volume top <VOLNAME> {open|read|write|opendir|readdir|clear} [nfs|brick <brick>] [list-cnt <value>] | {read-perf|write-perf} [bs <size> count <count>] [brick <brick>] [list-cnt <value>] \fR
+Generates a profile of a volume representing the performance and bottlenecks/hotspots of each brick.
+.TP
\fB\ volume statedump <VOLNAME> [[nfs|quotad] [all|mem|iobuf|callpool|priv|fd|inode|history]... | [client <hostname:process-id>]] \fR
Dumps the in memory state of the specified process or the bricks of the volume.
.TP
@@ -215,6 +218,12 @@ Use "!<OPTION>" to reset option <OPTION> to default value.
\fB\ volume bitrot <VOLNAME> {enable|disable} \fR
Enable/disable bitrot for volume <VOLNAME>
.TP
+\fB\ volume bitrot <VOLNAME> signing-time <time-in-secs> \fR
+Waiting time for an object after last fd is closed to start signing process.
+.TP
+\fB\ volume bitrot <VOLNAME> signer-threads <count> \fR
+Number of signing process threads. Usually set to number of available cores.
+.TP
\fB\ volume bitrot <VOLNAME> scrub-throttle {lazy|normal|aggressive} \fR
Scrub-throttle value is a measure of how fast or slow the scrubber scrubs the filesystem for volume <VOLNAME>
.TP
diff --git a/doc/glusterfs.8 b/doc/glusterfs.8
index e36bd6fbcfe..3d359ea85e4 100644
--- a/doc/glusterfs.8
+++ b/doc/glusterfs.8
@@ -63,8 +63,8 @@ and \fB\-\-log\-file\fR to console.
\fB\-\-enable\-ino32=BOOL\fR
Use 32-bit inodes when mounting to workaround application that doesn't support 64-bit inodes.
.TP
-\fB\-\-fopen\-keep\-cache\fR
-Do not purge the cache on file open.
+\fB\-\-fopen\-keep\-cache[=BOOL]\fR
+Do not purge the cache on file open (default: false).
.TP
\fB\-\-mac\-compat=BOOL\fR
Provide stubs for attributes needed for seamless operation on Macs (the default is off).
diff --git a/doc/mount.glusterfs.8 b/doc/mount.glusterfs.8
index 286631b9c5c..ce16e9e40b7 100644
--- a/doc/mount.glusterfs.8
+++ b/doc/mount.glusterfs.8
@@ -44,8 +44,8 @@ INFO and NONE [default: INFO]
\fBacl
Mount the filesystem with POSIX ACL support
.TP
-\fBfopen\-keep\-cache
-Do not purge the cache on file open
+\fBfopen\-keep\-cache[=BOOL]
+Do not purge the cache on file open (default: false)
.TP
\fBworm
Mount the filesystem in 'worm' mode
@@ -123,7 +123,12 @@ Provide list of backup volfile servers in the following format [default: None]
.TP
.TP
\fBlru-limit=\fRN
-Set fuse module's limit for number of inodes kept in LRU list to N [default: 131072]
+Set fuse module's limit for number of inodes kept in LRU list to N [default: 65536]
+.TP
+.TP
+\fBinvalidate-limit=\fRN
+Suspend fuse invalidations implied by 'lru-limit' if number of outstanding
+invalidations reaches N
.TP
.TP
\fBbackground-qlen=\fRN
diff --git a/doc/release-notes/7.0.md b/doc/release-notes/7.0.md
deleted file mode 100644
index 2bcf4b28396..00000000000
--- a/doc/release-notes/7.0.md
+++ /dev/null
@@ -1,265 +0,0 @@
-# Release notes for Gluster 7.0
-
-This is a major release that includes a range of code improvements and stability
-fixes along with a few features as noted below.
-
-A selection of the key features and changes are documented in this page.
-A full list of bugs that have been addressed is included further below.
-
-- [Announcements](#announcements)
-- [Major changes and features](#major-changes-and-features)
-- [Major issues](#major-issues)
-- [Bugs addressed in the release](#bugs-addressed)
-
-## Announcements
-
-1. Releases that receive maintenance updates post release 7 are, 6 and 7
-([reference](https://www.gluster.org/release-schedule/))
-
-2. Release 7 will receive maintenance updates around the 10th of every month
-for the first 3 months post release (i.e Nov'19, Dec'19, Jan'20). Post the
-initial 3 months, it will receive maintenance updates every 2 months till EOL.
-
-
-
-## Major changes and features
-
-### Highlights
-
-- Several stability fixes addressing,
- - coverity, clang-scan, address sanitizer and valgrind reported issues
- - removal of unused and hence, deprecated code and features
-- Performance Improvements
-
-
-Features
-
-#### 1. Rpcbind not required in glusterd.service when gnfs isn't built.
-
-#### 2. Latency based read child to improve read workload latency in a cluster, especially in a cloud setup. Also provides a load balancing with the outstanding pending request.
-
-#### 3. Glusterfind: integrate with gfid2path, to improve performance.
-
-#### 4. Issue #532: Work towards implementing global thread pooling has started
-
-#### 5. This release includes extra coverage for glfs public APIs in our regression tests, so we don't break anything.
-
-
-
-## Major issues
-
-**None**
-
-## Note
-
-Any new volumes created with the release will have the `fips-mode-rchecksum` volume option set to `on` by default.
-
-If a client older than glusterfs-4.x (i.e. 3.x clients) accesses a volume which has the `fips-mode-rchecksum` volume option enabled, it can cause erroneous checksum computation/ unwanted behaviour during afr self-heal. This option is to be enabled only when all clients are also >=4.x. So if you are using these older clients, please explicitly turn this option `off`.
-
-
-## Bugs addressed
-
-Bugs addressed since release-6 are listed below.
-
-- [#789278](https://bugzilla.redhat.com/789278): Issues reported by Coverity static analysis tool
-- [#1098991](https://bugzilla.redhat.com/1098991): Dist-geo-rep: Invalid slave url (::: three or more colons) error out with unclear error message.
-- [#1193929](https://bugzilla.redhat.com/1193929): GlusterFS can be improved
-- [#1241494](https://bugzilla.redhat.com/1241494): [Backup]: Glusterfind CLI commands need to verify the accepted names for session/volume, before failing with error(s)
-- [#1512093](https://bugzilla.redhat.com/1512093): Value of pending entry operations in detail status output is going up after each synchronization.
-- [#1535511](https://bugzilla.redhat.com/1535511): Gluster CLI shouldn't stop if log file couldn't be opened
-- [#1542072](https://bugzilla.redhat.com/1542072): Syntactical errors in hook scripts for managing SELinux context on bricks #2 (S10selinux-label-brick.sh + S10selinux-del-fcontext.sh)
-- [#1573226](https://bugzilla.redhat.com/1573226): eventsapi: ABRT report for package glusterfs has reached 10 occurrences
-- [#1580315](https://bugzilla.redhat.com/1580315): gluster volume status inode getting timed out after 30 minutes with no output/error
-- [#1590385](https://bugzilla.redhat.com/1590385): Refactor dht lookup code
-- [#1593224](https://bugzilla.redhat.com/1593224): [Disperse] : Client side heal is not removing dirty flag for some of the files.
-- [#1596787](https://bugzilla.redhat.com/1596787): glusterfs rpc-clnt.c: error returned while attempting to connect to host: (null), port 0
-- [#1622665](https://bugzilla.redhat.com/1622665): clang-scan report: glusterfs issues
-- [#1624701](https://bugzilla.redhat.com/1624701): error-out {inode,entry}lk fops with all-zero lk-owner
-- [#1628194](https://bugzilla.redhat.com/1628194): tests/dht: Additional tests for dht operations
-- [#1633930](https://bugzilla.redhat.com/1633930): ASan (address sanitizer) fixes - Blanket bug
-- [#1634664](https://bugzilla.redhat.com/1634664): Inconsistent quorum checks during open and fd based operations
-- [#1635688](https://bugzilla.redhat.com/1635688): Keep only the valid (maintained/supported) components in the build
-- [#1642168](https://bugzilla.redhat.com/1642168): changes to cloudsync xlator
-- [#1642810](https://bugzilla.redhat.com/1642810): remove glupy from code and build
-- [#1648169](https://bugzilla.redhat.com/1648169): Fuse mount would crash if features.encryption is on in the version from 3.13.0 to 4.1.5
-- [#1648768](https://bugzilla.redhat.com/1648768): Tracker bug for all leases related issues
-- [#1650095](https://bugzilla.redhat.com/1650095): Regression tests for geo-replication on EC volume is not available. It should be added.
-- [#1651246](https://bugzilla.redhat.com/1651246): Failed to dispatch handler
-- [#1651439](https://bugzilla.redhat.com/1651439): gluster-NFS crash while expanding volume
-- [#1651445](https://bugzilla.redhat.com/1651445): [RFE] storage.reserve option should take size of disk as input instead of percentage
-- [#1652887](https://bugzilla.redhat.com/1652887): Geo-rep help looks to have a typo.
-- [#1654021](https://bugzilla.redhat.com/1654021): Gluster volume heal causes continuous info logging of "invalid argument"
-- [#1654270](https://bugzilla.redhat.com/1654270): glusterd crashed with seg fault possibly during node reboot while volume creates and deletes were happening
-- [#1659334](https://bugzilla.redhat.com/1659334): FUSE mount seems to be hung and not accessible
-- [#1659708](https://bugzilla.redhat.com/1659708): Optimize by not stopping (restart) selfheal deamon (shd) when a volume is stopped unless it is the last volume
-- [#1664934](https://bugzilla.redhat.com/1664934): glusterfs-fuse client not benefiting from page cache on read after write
-- [#1670031](https://bugzilla.redhat.com/1670031): performance regression seen with smallfile workload tests
-- [#1672480](https://bugzilla.redhat.com/1672480): Bugs Test Module tests failing on s390x
-- [#1672711](https://bugzilla.redhat.com/1672711): Upgrade from glusterfs 3.12 to gluster 4/5 broken
-- [#1672727](https://bugzilla.redhat.com/1672727): Fix timeouts so the tests pass on AWS
-- [#1672851](https://bugzilla.redhat.com/1672851): With parallel-readdir enabled, deleting a directory containing stale linkto files fails with "Directory not empty"
-- [#1674389](https://bugzilla.redhat.com/1674389): [thin arbiter] : rpm - add thin-arbiter package
-- [#1674406](https://bugzilla.redhat.com/1674406): glusterfs FUSE client crashing every few days with 'Failed to dispatch handler'
-- [#1674412](https://bugzilla.redhat.com/1674412): listing a file while writing to it causes deadlock
-- [#1675076](https://bugzilla.redhat.com/1675076): [posix]: log the actual path wherever possible
-- [#1676400](https://bugzilla.redhat.com/1676400): rm -rf fails with "Directory not empty"
-- [#1676430](https://bugzilla.redhat.com/1676430): distribute: Perf regression in mkdir path
-- [#1676736](https://bugzilla.redhat.com/1676736): tests: ./tests/bugs/distribute/bug-1161311.t times out
-- [#1676797](https://bugzilla.redhat.com/1676797): server xlator doesn't handle dict unserialization failures correctly
-- [#1677559](https://bugzilla.redhat.com/1677559): gNFS crashed when processing "gluster v profile [vol] info nfs"
-- [#1678726](https://bugzilla.redhat.com/1678726): Integer Overflow possible in md-cache.c due to data type inconsistency
-- [#1679401](https://bugzilla.redhat.com/1679401): Geo-rep setup creates an incorrectly formatted authorized_keys file
-- [#1679406](https://bugzilla.redhat.com/1679406): glustereventsd does not start on Ubuntu 16.04 LTS
-- [#1680587](https://bugzilla.redhat.com/1680587): Building RPM packages with _for_fedora_koji_builds enabled fails on el6
-- [#1683352](https://bugzilla.redhat.com/1683352): remove experimental xlators informations from glusterd-volume-set.c
-- [#1683594](https://bugzilla.redhat.com/1683594): nfs ltp ftest* fstat gets mismatch size as except after turn on md-cache
-- [#1683816](https://bugzilla.redhat.com/1683816): Memory leak when peer detach fails
-- [#1684385](https://bugzilla.redhat.com/1684385): [ovirt-gluster] Rolling gluster upgrade from 3.12.5 to 5.3 led to shard on-disk xattrs disappearing
-- [#1684404](https://bugzilla.redhat.com/1684404): Multiple shd processes are running on brick_mux environmet
-- [#1685027](https://bugzilla.redhat.com/1685027): Error handling in /usr/sbin/gluster-eventsapi produces IndexError: tuple index out of range
-- [#1685120](https://bugzilla.redhat.com/1685120): upgrade from 3.12, 4.1 and 5 to 6 broken
-- [#1685414](https://bugzilla.redhat.com/1685414): glusterd memory usage grows at 98 MB/h while running "gluster v profile" in a loop
-- [#1685944](https://bugzilla.redhat.com/1685944): WORM-XLator: Maybe integer overflow when computing new atime
-- [#1686371](https://bugzilla.redhat.com/1686371): Cleanup nigel access and document it
-- [#1686398](https://bugzilla.redhat.com/1686398): Thin-arbiter minor fixes
-- [#1686568](https://bugzilla.redhat.com/1686568): [geo-rep]: Checksum mismatch when 2x2 vols are converted to arbiter
-- [#1686711](https://bugzilla.redhat.com/1686711): [Thin-arbiter] : send correct error code in case of failure
-- [#1687326](https://bugzilla.redhat.com/1687326): [RFE] Revoke access from nodes using Certificate Revoke List in SSL
-- [#1687705](https://bugzilla.redhat.com/1687705): Brick process has coredumped, when starting glusterd
-- [#1687811](https://bugzilla.redhat.com/1687811): core dump generated while running the test ./tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
-- [#1688068](https://bugzilla.redhat.com/1688068): Proper error message needed for FUSE mount failure when /var is filled.
-- [#1688106](https://bugzilla.redhat.com/1688106): Remove implementation of number of files opened in posix xlator
-- [#1688116](https://bugzilla.redhat.com/1688116): Spurious failure in test ./tests/bugs/glusterfs/bug-844688.t
-- [#1688287](https://bugzilla.redhat.com/1688287): ganesha crash on glusterfs with shard volume
-- [#1689097](https://bugzilla.redhat.com/1689097): gfapi: provide an option for changing statedump path in glfs-api.
-- [#1689799](https://bugzilla.redhat.com/1689799): [cluster/ec] : Fix handling of heal info cases without locks
-- [#1689920](https://bugzilla.redhat.com/1689920): lots of "Matching lock not found for unlock xxx" when using disperse (ec) xlator
-- [#1690753](https://bugzilla.redhat.com/1690753): Volume stop when quorum not met is successful
-- [#1691164](https://bugzilla.redhat.com/1691164): glusterd leaking memory when issued gluster vol status all tasks continuosly
-- [#1691616](https://bugzilla.redhat.com/1691616): client log flooding with intentional socket shutdown message when a brick is down
-- [#1692093](https://bugzilla.redhat.com/1692093): Network throughput usage increased x5
-- [#1692612](https://bugzilla.redhat.com/1692612): Locking issue when restarting bricks
-- [#1692666](https://bugzilla.redhat.com/1692666): ssh-port config set is failing
-- [#1693575](https://bugzilla.redhat.com/1693575): gfapi: do not block epoll thread for upcall notifications
-- [#1693648](https://bugzilla.redhat.com/1693648): Geo-re: Geo replication failing in "cannot allocate memory"
-- [#1693692](https://bugzilla.redhat.com/1693692): Increase code coverage from regression tests
-- [#1694820](https://bugzilla.redhat.com/1694820): Geo-rep: Data inconsistency while syncing heavy renames with constant destination name
-- [#1694925](https://bugzilla.redhat.com/1694925): GF_LOG_OCCASSIONALLY API doesn't log at first instance
-- [#1695327](https://bugzilla.redhat.com/1695327): regression test fails with brick mux enabled.
-- [#1696046](https://bugzilla.redhat.com/1696046): Log level changes do not take effect until the process is restarted
-- [#1696077](https://bugzilla.redhat.com/1696077): Add pause and resume test case for geo-rep
-- [#1696136](https://bugzilla.redhat.com/1696136): gluster fuse mount crashed, when deleting 2T image file from oVirt Manager UI
-- [#1696512](https://bugzilla.redhat.com/1696512): glusterfs build is failing on rhel-6
-- [#1696599](https://bugzilla.redhat.com/1696599): Fops hang when inodelk fails on the first fop
-- [#1697316](https://bugzilla.redhat.com/1697316): Getting SEEK-2 and SEEK7 errors with [Invalid argument] in the bricks' logs
-- [#1697486](https://bugzilla.redhat.com/1697486): bug-1650403.t && bug-858215.t are throwing error "No such file" at the time of access glustershd pidfile
-- [#1697866](https://bugzilla.redhat.com/1697866): Provide a way to detach a failed node
-- [#1697907](https://bugzilla.redhat.com/1697907): ctime feature breaks old client to connect to new server
-- [#1697930](https://bugzilla.redhat.com/1697930): Thin-Arbiter SHD minor fixes
-- [#1698078](https://bugzilla.redhat.com/1698078): ctime: Creation of tar file on gluster mount throws warning "file changed as we read it"
-- [#1698449](https://bugzilla.redhat.com/1698449): thin-arbiter lock release fixes
-- [#1699025](https://bugzilla.redhat.com/1699025): Brick is not able to detach successfully in brick_mux environment
-- [#1699176](https://bugzilla.redhat.com/1699176): rebalance start command doesn't throw up error message if the command fails
-- [#1699189](https://bugzilla.redhat.com/1699189): fix truncate lock to cover the write in tuncate clean
-- [#1699339](https://bugzilla.redhat.com/1699339): With 1800+ vol and simultaneous 2 gluster pod restarts, running gluster commands gives issues once all pods are up
-- [#1699394](https://bugzilla.redhat.com/1699394): [geo-rep]: Geo-rep goes FAULTY with OSError
-- [#1699866](https://bugzilla.redhat.com/1699866): I/O error on writes to a disperse volume when replace-brick is executed
-- [#1700078](https://bugzilla.redhat.com/1700078): disablle + reenable of bitrot leads to files marked as bad
-- [#1700865](https://bugzilla.redhat.com/1700865): FUSE mount seems to be hung and not accessible
-- [#1701337](https://bugzilla.redhat.com/1701337): issues with 'building' glusterfs packages if we do 'git clone --depth 1'
-- [#1701457](https://bugzilla.redhat.com/1701457): ctime: Logs are flooded with "posix set mdata failed, No ctime" error during open
-- [#1702131](https://bugzilla.redhat.com/1702131): The source file is left in EC volume after rename when glusterfsd out of service
-- [#1702185](https://bugzilla.redhat.com/1702185): coredump reported by test ./tests/bugs/glusterd/bug-1699339.t
-- [#1702299](https://bugzilla.redhat.com/1702299): Custom xattrs are not healed on newly added brick
-- [#1702303](https://bugzilla.redhat.com/1702303): Enable enable fips-mode-rchecksum for new volumes by default
-- [#1702952](https://bugzilla.redhat.com/1702952): remove tier related information from manual pages
-- [#1703020](https://bugzilla.redhat.com/1703020): The cluster.heal-timeout option is unavailable for ec volume
-- [#1703629](https://bugzilla.redhat.com/1703629): statedump is not capturing info related to glusterd
-- [#1703948](https://bugzilla.redhat.com/1703948): Self-heal daemon resources are not cleaned properly after a ec fini
-- [#1704252](https://bugzilla.redhat.com/1704252): Creation of bulkvoldict thread logic is not correct while brick_mux is enabled for single volume
-- [#1704888](https://bugzilla.redhat.com/1704888): delete the snapshots and volume at the end of uss.t
-- [#1705865](https://bugzilla.redhat.com/1705865): VM stuck in a shutdown because of a pending fuse request
-- [#1705884](https://bugzilla.redhat.com/1705884): Image size as reported from the fuse mount is incorrect
-- [#1706603](https://bugzilla.redhat.com/1706603): Glusterfsd crashing in ec-inode-write.c, in GF_ASSERT
-- [#1707081](https://bugzilla.redhat.com/1707081): Self heal daemon not coming up after upgrade to glusterfs-6.0-2 (intermittently) on a brick mux setup
-- [#1707700](https://bugzilla.redhat.com/1707700): maintain consistent values across for options when fetched at cluster level or volume level
-- [#1707728](https://bugzilla.redhat.com/1707728): geo-rep: Sync hangs with tarssh as sync-engine
-- [#1707742](https://bugzilla.redhat.com/1707742): tests/geo-rep: arequal checksum comparison always succeeds
-- [#1707746](https://bugzilla.redhat.com/1707746): AFR-v2 does not log before attempting data self-heal
-- [#1708051](https://bugzilla.redhat.com/1708051): Capture memory consumption for gluster process at the time of throwing no memory available message
-- [#1708156](https://bugzilla.redhat.com/1708156): ec ignores lock contention notifications for partially acquired locks
-- [#1708163](https://bugzilla.redhat.com/1708163): tests: fix bug-1319374.c compile warnings.
-- [#1708926](https://bugzilla.redhat.com/1708926): Invalid memory access while executing cleanup_and_exit
-- [#1708929](https://bugzilla.redhat.com/1708929): Add more test coverage for shd mux
-- [#1709248](https://bugzilla.redhat.com/1709248): [geo-rep]: Non-root - Unable to set up mountbroker root directory and group
-- [#1709653](https://bugzilla.redhat.com/1709653): geo-rep: With heavy rename workload geo-rep log if flooded
-- [#1710054](https://bugzilla.redhat.com/1710054): Optimize the glustershd manager to send reconfigure
-- [#1710159](https://bugzilla.redhat.com/1710159): glusterd: While upgrading (3-node cluster) 'gluster v status' times out on node to be upgraded
-- [#1711240](https://bugzilla.redhat.com/1711240): [GNFS] gf_nfs_mt_inode_ctx serious memory leak
-- [#1711250](https://bugzilla.redhat.com/1711250): bulkvoldict thread is not handling all volumes while brick multiplex is enabled
-- [#1711297](https://bugzilla.redhat.com/1711297): Optimize glusterd code to copy dictionary in handshake code path
-- [#1711764](https://bugzilla.redhat.com/1711764): Files inaccessible if one rebalance process is killed in a multinode volume
-- [#1711820](https://bugzilla.redhat.com/1711820): Typo in cli return string.
-- [#1711827](https://bugzilla.redhat.com/1711827): test case bug-1399598-uss-with-ssl.t is generating crash
-- [#1712322](https://bugzilla.redhat.com/1712322): Brick logs inundated with [2019-04-27 22:14:53.378047] I [dict.c:541:dict_get] (-->/usr/lib64/glusterfs/6.0/xlator/features/worm.so(+0x7241) [0x7fe857bb3241] -->/usr/lib64/glusterfs/6.0/xlator/features/locks.so(+0x1c219) [0x7fe857dda219] [Invalid argumen
-- [#1712668](https://bugzilla.redhat.com/1712668): Remove-brick shows warning cluster.force-migration enabled where as cluster.force-migration is disabled on the volume
-- [#1712741](https://bugzilla.redhat.com/1712741): glusterd_svcs_stop should call individual wrapper function to stop rather than calling the glusterd_svc_stop
-- [#1713730](https://bugzilla.redhat.com/1713730): Failure when glusterd is configured to bind specific IPv6 address. If bind-address is IPv6, *addr_len will be non-zero and it goes to ret = -1 branch, which will cause listen failure eventually
-- [#1714098](https://bugzilla.redhat.com/1714098): Make debugging hung frames easier
-- [#1714415](https://bugzilla.redhat.com/1714415): Script to make it easier to find hung frames
-- [#1714973](https://bugzilla.redhat.com/1714973): upgrade after tier code removal results in peer rejection.
-- [#1715921](https://bugzilla.redhat.com/1715921): uss.t tests times out with brick-mux regression
-- [#1716695](https://bugzilla.redhat.com/1716695): Fix memory leaks that are present even after an xlator fini [client side xlator]
-- [#1716766](https://bugzilla.redhat.com/1716766): [Thin-arbiter] TA process is not picking 24007 as port while starting up
-- [#1716812](https://bugzilla.redhat.com/1716812): Failed to create volume which transport_type is "tcp,rdma"
-- [#1716830](https://bugzilla.redhat.com/1716830): DHT: directory permissions are wiped out
-- [#1717757](https://bugzilla.redhat.com/1717757): WORM: Segmentation Fault if bitrot stub do signature
-- [#1717782](https://bugzilla.redhat.com/1717782): gluster v get <VolumeName> all still showing storage.fips-mode-rchecksum off
-- [#1717819](https://bugzilla.redhat.com/1717819): Changes to self-heal logic w.r.t. detecting metadata split-brains
-- [#1717953](https://bugzilla.redhat.com/1717953): SELinux context labels are missing for newly added bricks using add-brick command
-- [#1718191](https://bugzilla.redhat.com/1718191): Regression: Intermittent test failure for quick-read-with-upcall.t
-- [#1718273](https://bugzilla.redhat.com/1718273): markdown formatting errors in files present under /doc directory of the project
-- [#1718316](https://bugzilla.redhat.com/1718316): Ganesha-gfapi logs are flooded with error messages related to "gf_uuid_is_null(gfid)) [Invalid argument]" when lookups are running from multiple clients
-- [#1718338](https://bugzilla.redhat.com/1718338): Upcall: Avoid sending upcalls for invalid Inode
-- [#1718848](https://bugzilla.redhat.com/1718848): False positive logging of mount failure
-- [#1718998](https://bugzilla.redhat.com/1718998): Fix test case "tests/basic/afr/split-brain-favorite-child-policy.t" failure
-- [#1720201](https://bugzilla.redhat.com/1720201): Healing not proceeding during in-service upgrade on a disperse volume
-- [#1720290](https://bugzilla.redhat.com/1720290): ctime changes: tar still complains file changed as we read it if uss is enabled
-- [#1720615](https://bugzilla.redhat.com/1720615): [RHEL-8.1] yum update fails for rhel-8 glusterfs client packages 6.0-5.el8
-- [#1720993](https://bugzilla.redhat.com/1720993): tests/features/subdir-mount.t is failing for brick_mux regrssion
-- [#1721385](https://bugzilla.redhat.com/1721385): glusterfs-libs: usage of inet_addr() may impact IPv6
-- [#1721435](https://bugzilla.redhat.com/1721435): DHT: Internal xattrs visible on the mount
-- [#1721441](https://bugzilla.redhat.com/1721441): geo-rep: Fix permissions for GEOREP_DIR in non-root setup
-- [#1721601](https://bugzilla.redhat.com/1721601): [SHD] : logs of one volume are going to log file of other volume
-- [#1722541](https://bugzilla.redhat.com/1722541): stale shd process files leading to heal timing out and heal deamon not coming up for all volumes
-- [#1703322](https://bugzilla.redhat.com/1703322): Need to document about fips-mode-rchecksum in gluster-7 release notes.
-- [#1722802](https://bugzilla.redhat.com/1722802): Incorrect power of two calculation in mem_pool_get_fn
-- [#1723890](https://bugzilla.redhat.com/1723890): Crash in glusterd when running test script bug-1699339.t
-- [#1728770](https://bugzilla.redhat.com/1728770): Failures in remove-brick due to [Input/output error] errors
-- [#1736481](https://bugzilla.redhat.com/1736481): capture stat failure error while setting the gfid
-- [#1739424](https://bugzilla.redhat.com/1739424): Disperse volume : data corruption with ftruncate data in 4+2 config
-- [#1739426](https://bugzilla.redhat.com/1739426): Open fd heal should filter O_APPEND/O_EXCL
-- [#1739427](https://bugzilla.redhat.com/1739427): An Input/Output error happens on a disperse volume when doing unaligned writes to a sparse file
-- [#1741041](https://bugzilla.redhat.com/1741041): atime/mtime is not restored after healing for entry self heals
-- [#1743200](https://bugzilla.redhat.com/1743200): ./tests/bugs/glusterd/bug-1595320.t is failing
-- [#1744874](https://bugzilla.redhat.com/1744874): interrupts leak memory
-- [#1745422](https://bugzilla.redhat.com/1745422): ./tests/bugs/glusterd/bug-1595320.t is failing
-- [#1745914](https://bugzilla.redhat.com/1745914): ESTALE change in fuse breaks get_real_filename implementation
-- [#1746142](https://bugzilla.redhat.com/1746142): ctime: If atime is updated via utimensat syscall ctime is not getting updated
-- [#1746145](https://bugzilla.redhat.com/1746145): CentOs 6 GlusterFS client creates files with time 01/01/1970
-- [#1747301](https://bugzilla.redhat.com/1747301): Setting cluster.heal-timeout requires volume restart
-- [#1747746](https://bugzilla.redhat.com/1747746): The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
-- [#1748448](https://bugzilla.redhat.com/1748448): syncop: Bail out if frame creation fails
-- [#1748774](https://bugzilla.redhat.com/1748774): Incorrect power of two calculation in mem_pool_get_fn
-- [#1749155](https://bugzilla.redhat.com/1749155): bug-1402841.t-mt-dir-scan-race.t fails spuriously
-- [#1749305](https://bugzilla.redhat.com/1749305): Failures in remove-brick due to [Input/output error] errors
-- [#1749664](https://bugzilla.redhat.com/1749664): The result (hostname) of getnameinfo for all bricks (ipv6 addresses) are the same, while they are not.
-- [#1751556](https://bugzilla.redhat.com/1751556): syncop: Bail out if frame creation fails
-- [#1752245](https://bugzilla.redhat.com/1752245): Crash in glusterd when running test script bug-1699339.t
-- [#1752429](https://bugzilla.redhat.com/1752429): Ctime: Cannot see the "trusted.glusterfs.mdata" xattr for directory on a new brick after rebalance
-- [#1755212](https://bugzilla.redhat.com/1755212): geo-rep: performance improvement while syncing heavy renames with existing destination
-- [#1755213](https://bugzilla.redhat.com/1755213): geo-rep: non-root session going fault due improper sub-command
-- [#1755678](https://bugzilla.redhat.com/1755678): Segmentation fault occurs while truncate file
-- [#1756002](https://bugzilla.redhat.com/1756002): git clone fails on gluster volumes exported via nfs-ganesha
-
-
diff --git a/events/src/eventsapiconf.py.in b/events/src/eventsapiconf.py.in
index 76b5954d325..700093bee60 100644
--- a/events/src/eventsapiconf.py.in
+++ b/events/src/eventsapiconf.py.in
@@ -28,6 +28,8 @@ def get_glusterd_workdir():
return glusterd_workdir
SERVER_ADDRESS = "0.0.0.0"
+SERVER_ADDRESSv4 = "0.0.0.0"
+SERVER_ADDRESSv6 = "::1"
DEFAULT_CONFIG_FILE = "@SYSCONF_DIR@/glusterfs/eventsconfig.json"
CUSTOM_CONFIG_FILE_TO_SYNC = "/events/config.json"
CUSTOM_CONFIG_FILE = get_glusterd_workdir() + CUSTOM_CONFIG_FILE_TO_SYNC
diff --git a/events/src/glustereventsd.py b/events/src/glustereventsd.py
index c4c7b65e332..341a3b60947 100644
--- a/events/src/glustereventsd.py
+++ b/events/src/glustereventsd.py
@@ -13,6 +13,7 @@
from __future__ import print_function
import sys
import signal
+import threading
try:
import socketserver
except ImportError:
@@ -23,10 +24,17 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter
from eventtypes import all_events
import handlers
import utils
-from eventsapiconf import SERVER_ADDRESS, PID_FILE
+from eventsapiconf import SERVER_ADDRESSv4, SERVER_ADDRESSv6, PID_FILE
from eventsapiconf import AUTO_BOOL_ATTRIBUTES, AUTO_INT_ATTRIBUTES
from utils import logger, PidFile, PidFileLockFailed, boolify
+# Subclass so that specifically IPv4 packets are captured
+class UDPServerv4(socketserver.ThreadingUDPServer):
+ address_family = socket.AF_INET
+
+# Subclass so that specifically IPv6 packets are captured
+class UDPServerv6(socketserver.ThreadingUDPServer):
+ address_family = socket.AF_INET6
class GlusterEventsRequestHandler(socketserver.BaseRequestHandler):
@@ -89,6 +97,10 @@ def signal_handler_sigusr2(sig, frame):
utils.restart_webhook_pool()
+def UDP_server_thread(sock):
+ sock.serve_forever()
+
+
def init_event_server():
utils.setup_logger()
utils.load_all()
@@ -99,15 +111,26 @@ def init_event_server():
sys.stderr.write("Unable to get Port details from Config\n")
sys.exit(1)
- # Start the Eventing Server, UDP Server
+ # Creating the Eventing Server, UDP Server for IPv4 packets
+ try:
+ serverv4 = UDPServerv4((SERVER_ADDRESSv4, port),
+ GlusterEventsRequestHandler)
+ except socket.error as e:
+ sys.stderr.write("Failed to start Eventsd for IPv4: {0}\n".format(e))
+ sys.exit(1)
+ # Creating the Eventing Server, UDP Server for IPv6 packets
try:
- server = socketserver.ThreadingUDPServer(
- (SERVER_ADDRESS, port),
- GlusterEventsRequestHandler)
+ serverv6 = UDPServerv6((SERVER_ADDRESSv6, port),
+ GlusterEventsRequestHandler)
except socket.error as e:
- sys.stderr.write("Failed to start Eventsd: {0}\n".format(e))
+ sys.stderr.write("Failed to start Eventsd for IPv6: {0}\n".format(e))
sys.exit(1)
- server.serve_forever()
+ server_thread1 = threading.Thread(target=UDP_server_thread,
+ args=(serverv4,))
+ server_thread2 = threading.Thread(target=UDP_server_thread,
+ args=(serverv6,))
+ server_thread1.start()
+ server_thread2.start()
def get_args():
diff --git a/events/src/peer_eventsapi.py b/events/src/peer_eventsapi.py
index 26b77a09179..4d2e5f35b1c 100644
--- a/events/src/peer_eventsapi.py
+++ b/events/src/peer_eventsapi.py
@@ -353,8 +353,7 @@ class WebhookModCmd(Cmd):
errcode=ERROR_WEBHOOK_NOT_EXISTS,
json_output=args.json)
- if isinstance(data[args.url], str) or \
- isinstance(data[args.url], unicode):
+ if isinstance(data[args.url], str):
data[args.url]["token"] = data[args.url]
if args.bearer_token != "":
diff --git a/events/src/utils.py b/events/src/utils.py
index 38b707a1b28..6d4e0791a2b 100644
--- a/events/src/utils.py
+++ b/events/src/utils.py
@@ -13,6 +13,7 @@ import sys
import json
import os
import logging
+import logging.handlers
import fcntl
from errno import EBADF
from threading import Thread
@@ -98,7 +99,7 @@ def setup_logger():
logger.setLevel(logging.INFO)
# create the logging file handler
- fh = logging.FileHandler(LOG_FILE)
+ fh = logging.handlers.WatchedFileHandler(LOG_FILE)
formatter = logging.Formatter("[%(asctime)s] %(levelname)s "
"[%(module)s - %(lineno)s:%(funcName)s] "
diff --git a/extras/Makefile.am b/extras/Makefile.am
index ff5ca9bdb83..983f014cca6 100644
--- a/extras/Makefile.am
+++ b/extras/Makefile.am
@@ -11,7 +11,8 @@ EditorModedir = $(docdir)
EditorMode_DATA = glusterfs-mode.el glusterfs.vim
SUBDIRS = init.d systemd benchmarking hook-scripts $(OCF_SUBDIR) LinuxRPM \
- $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python
+ $(GEOREP_EXTRAS_SUBDIR) snap_scheduler firewalld cliutils python \
+ ganesha
confdir = $(sysconfdir)/glusterfs
if WITH_SERVER
diff --git a/extras/cliutils/README.md b/extras/cliutils/README.md
index e11166774e3..309beb1ca25 100644
--- a/extras/cliutils/README.md
+++ b/extras/cliutils/README.md
@@ -221,7 +221,7 @@ required.(Under `%files` section)
- gluster-mountbroker http://review.gluster.org/14544
- gluster-eventsapi http://review.gluster.org/14248
- gluster-georep-sshkey http://review.gluster.org/14732
-- gluster-restapi https://github.com/aravindavk/glusterfs-restapi
+- gluster-restapi https://github.com/gluster/restapi
## Limitations/TODOs
- Not yet possible to create CLI without any subcommand, For example
diff --git a/extras/distributed-testing/distributed-test-runner.py b/extras/distributed-testing/distributed-test-runner.py
index 7bfb6c9652a..5a07e2feab1 100755
--- a/extras/distributed-testing/distributed-test-runner.py
+++ b/extras/distributed-testing/distributed-test-runner.py
@@ -383,14 +383,17 @@ class Handlers:
return self.shell.call("make install") == 0
@synchronized
- def prove(self, id, test, timeout, valgrind=False, asan_noleaks=True):
+ def prove(self, id, test, timeout, valgrind="no", asan_noleaks=True):
assert id == self.client_id
self.shell.cd(self.gluster_root)
env = "DEBUG=1 "
- if valgrind:
+ if valgrind == "memcheck" or valgrind == "yes":
cmd = "valgrind"
cmd += " --tool=memcheck --leak-check=full --track-origins=yes"
cmd += " --show-leak-kinds=all -v prove -v"
+ elif valgrind == "drd":
+ cmd = "valgrind"
+ cmd += " --tool=drd -v prove -v"
elif asan_noleaks:
cmd = "prove -v"
env += "ASAN_OPTIONS=detect_leaks=0 "
@@ -827,8 +830,9 @@ parser.add_argument("--port", help="server port to listen",
type=int, default=DEFAULT_PORT)
# test role
parser.add_argument("--tester", help="start tester", action="store_true")
-parser.add_argument("--valgrind", help="run tests under valgrind",
- action="store_true")
+parser.add_argument("--valgrind[=memcheck,drd]",
+ help="run tests with valgrind tool 'memcheck' or 'drd'",
+ default="no")
parser.add_argument("--asan", help="test with asan enabled",
action="store_true")
parser.add_argument("--asan-noleaks", help="test with asan but no mem leaks",
diff --git a/extras/ec-heal-script/README.md b/extras/ec-heal-script/README.md
new file mode 100644
index 00000000000..aaefd6681f6
--- /dev/null
+++ b/extras/ec-heal-script/README.md
@@ -0,0 +1,69 @@
+# gluster-heal-scripts
+Scripts to correct extended attributes of fragments of files to make them healble.
+
+Following are the guidelines/suggestions to use these scripts.
+
+1 - Passwordless ssh should be setup for all the nodes of the cluster.
+
+2 - Scripts should be executed from one of these nodes.
+
+3 - Make sure NO "IO" is going on for the files for which we are running
+these two scripts.
+
+4 - There should be no heal going on for the file for which xattrs are being
+set by correct_pending_heals.sh. Disable the self heal while running this script.
+
+5 - All the bricks of the volume should be UP to identify good and bad fragments
+and to decide if an entry is healble or not.
+
+6 - If correct_pending_heals.sh is stopped in the middle while it was processing
+healble entries, it is suggested to re-run gfid_needing_heal_parallel.sh to create
+latest list of healble and non healble entries and "potential_heal" "can_not_heal" files.
+
+7 - Based on the number of entries, these files might take time to get and set the
+stats and xattrs of entries.
+
+8 - A backup of the fragments will be taken on <brick path>/.glusterfs/correct_pending_heals
+ directory with a file name same as gfid.
+
+9 - Once the correctness of the file gets verified by user, these backup should be removed.
+
+10 - Make sure we have enough space on bricks to take these backups.
+
+11 - At the end this will create two files -
+ 1 - modified_and_backedup_files - Contains list of files which have been modified and should be healed.
+ 2 - can_not_heal - Contains list of files which can not be healed.
+
+12 - It is suggested that the integrity of the data of files, which were modified and healed,
+ should be checked by the user.
+
+
+Usage:
+
+Following are the sequence of steps to use these scripts -
+
+1 - ./gfid_needing_heal_parallel.sh <volume name>
+
+ Execute gfid_needing_heal_parallel.sh with volume name to create list of files which could
+ be healed and can not be healed. It creates "potential_heal" and "can_not_heal" files.
+ During execution, it also displays the list of files on consol with the verdict.
+
+2 - ./correct_pending_heals.sh
+
+ Execute correct_pending_heals.sh without any argument. This script processes entries present
+ in "heal" file. It asks user to enter how many files we want to process in one attempt.
+ Once the count is provided, this script will fetch the entries one by one from "potential_heal" file and takes necessary action.
+ If at this point also a file can not be healed, it will be pushed to "can_not_heal" file.
+ If a file can be healed, this script will modify the xattrs of that file fragments and create an entry in "modified_and_backedup_files" file
+
+3 - At the end, all the entries of "potential_heal" will be processed and based on the processing only two files will be left.
+
+ 1 - modified_and_backedup_files - Contains list of files which have been modified and should be healed.
+ 2 - can_not_heal - Contains list of files which can not be healed.
+
+Logs and other files -
+
+1 - modified_and_backedup_files - It contains all the files which could be healed and the location of backup of each fragments.
+2 - can_not_heal - It contains all the files which can not be healed.
+3 - potential_heal - List of files which could be healed and should be processed by "correct_pending_heals.sh"
+4 - /var/log/glusterfs/ec-heal-script.log - It contains logs of both the files.
diff --git a/extras/ec-heal-script/correct_pending_heals.sh b/extras/ec-heal-script/correct_pending_heals.sh
new file mode 100755
index 00000000000..c9f19dd7c89
--- /dev/null
+++ b/extras/ec-heal-script/correct_pending_heals.sh
@@ -0,0 +1,415 @@
+#!/bin/bash
+# Copyright (c) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+# This script finally resets the xattrs of all the fragments of a file
+# which can be healed as per gfid_needing_heal_parallel.sh.
+# gfid_needing_heal_parallel.sh will produce two files, potential_heal and can_not_heal.
+# This script takes potential_heal as input and resets xattrs of all the fragments
+# of those files present in this file and which could be healed as per
+# trusted.ec.size xattar of the file else it will place the entry in can_not_heal
+# file. Those entries which must be healed will be place in must_heal file
+# after setting xattrs so that user can track those files.
+
+
+MOD_BACKUP_FILES="modified_and_backedup_files"
+CAN_NOT_HEAL="can_not_heal"
+LOG_DIR="/var/log/glusterfs"
+LOG_FILE="$LOG_DIR/ec-heal-script.log"
+LINE_SEP="==================================================="
+
+function heal_log()
+{
+ echo "$1" >> "$LOG_FILE"
+}
+
+function desc ()
+{
+ echo ""
+ echo "This script finally resets the xattrs of all the fragments of a file
+which can be healed as per gfid_needing_heal_parallel.sh.
+gfid_needing_heal_parallel.sh will produce two files, potential_heal and can_not_heal.
+This script takes potential_heal as input and resets xattrs of all the fragments
+of those files present in this file and which could be healed as per
+trusted.ec.size xattar of the file else it will place the entry in can_not_heal
+file. Those entries which must be healed will be place in must_heal file
+after setting xattrs so that user can track those files."
+}
+
+function _init ()
+{
+ if [ $# -ne 0 ]
+ then
+ echo "usage: $0"
+ desc
+ exit 2
+ fi
+
+ if [ ! -f "potential_heal" ]
+ then
+ echo "Nothing to correct. File "potential_heal" does not exist"
+ echo ""
+ desc
+ exit 2
+ fi
+}
+
+function total_file_size_in_hex()
+{
+ local frag_size=$1
+ local size=0
+ local hex_size=""
+
+ size=$((frag_size * 4))
+ hex_size=$(printf '0x%016x' $size)
+ echo "$hex_size"
+}
+
+function backup_file_fragment()
+{
+ local file_host=$1
+ local file_entry=$2
+ local gfid_actual_paths=$3
+ local brick_root=""
+ local temp=""
+ local backup_dir=""
+ local cmd=""
+ local gfid=""
+
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+ temp=$(echo "$(basename "$BASH_SOURCE")" | cut -d '.' -f 1)
+ backup_dir=$(echo "${brick_root}/.glusterfs/${temp}")
+ file_entry=${file_entry//#}
+
+ gfid=$(echo "${gfid_actual_paths}" | cut -d '|' -f 1 | cut -d '/' -f 5)
+ echo "${file_host}:${backup_dir}/${gfid}" >> "$MOD_BACKUP_FILES"
+
+ cmd="mkdir -p ${backup_dir} && yes | cp -af ${file_entry} ${backup_dir}/${gfid} 2>/dev/null"
+ ssh -n "${file_host}" "${cmd}"
+}
+
+function set_frag_xattr ()
+{
+ local file_host=$1
+ local file_entry=$2
+ local good=$3
+ local cmd1=""
+ local cmd2=""
+ local cmd=""
+ local version="0x00000000000000010000000000000001"
+ local dirty="0x00000000000000010000000000000001"
+
+ if [[ $good -eq 0 ]]
+ then
+ version="0x00000000000000000000000000000000"
+ fi
+
+ cmd1=" setfattr -n trusted.ec.version -v ${version} ${file_entry} &&"
+ cmd2=" setfattr -n trusted.ec.dirty -v ${dirty} ${file_entry}"
+ cmd=${cmd1}${cmd2}
+ ssh -n "${file_host}" "${cmd}"
+}
+
+function set_version_dirty_xattr ()
+{
+ local file_paths=$1
+ local good=$2
+ local gfid_actual_paths=$3
+ local file_entry=""
+ local file_host=""
+ local bpath=""
+
+ for bpath in ${file_paths//,/ }
+ do
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ backup_file_fragment "$file_host" "$file_entry" "$gfid_actual_paths"
+ file_entry=${file_entry//#}
+ set_frag_xattr "$file_host" "$file_entry" "$good"
+ done
+}
+
+function match_size_xattr_quorum ()
+{
+ local file_paths=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local size_xattr=""
+ local bpath=""
+ declare -A xattr_count
+
+ for bpath in ${file_paths//,/ }
+ do
+ size_xattr=""
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ file_entry=${file_entry//#}
+
+ cmd="getfattr -n trusted.ec.size -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.size" | cut -d '=' -f 2"
+ size_xattr=$(ssh -n "${file_host}" "${cmd}")
+ if [[ -n $size_xattr ]]
+ then
+ count=$((xattr_count["$size_xattr"] + 1))
+ xattr_count["$size_xattr"]=${count}
+ if [[ $count -ge 4 ]]
+ then
+ echo "${size_xattr}"
+ return
+ fi
+ fi
+ done
+ echo "False"
+}
+
+function match_version_xattr ()
+{
+ local file_paths=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local version=""
+ local bpath=""
+ declare -A ver_count
+
+ for bpath in ${file_paths//,/ }
+ do
+ version=""
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ file_entry=${file_entry//#}
+
+ cmd="getfattr -n trusted.ec.version -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.version" | cut -d '=' -f 2"
+ version=$(ssh -n "${file_host}" "${cmd}")
+ ver_count["$version"]=$((ver_count["$version"] + 1))
+ done
+ for key in "${ver_count[@]}"
+ do
+ if [[ $key -ge 4 ]]
+ then
+ echo "True"
+ return
+ else
+ echo "False"
+ return
+ fi
+ done
+}
+
+function match_stat_size_with_xattr ()
+{
+ local bpath=$1
+ local size=$2
+ local file_stat=$3
+ local xattr=$4
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local stat_output=""
+ local hex_size=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+
+ file_entry=${file_entry//#}
+ cmd="stat --format=%F:%B:%s $file_entry 2>/dev/null"
+ stat_output=$(ssh -n "${file_host}" "${cmd}")
+ echo "$stat_output" | grep -w "${file_stat}" > /dev/null
+
+ if [[ $? -eq 0 ]]
+ then
+ cmd="getfattr -n trusted.ec.size -d -e hex ${file_entry} 2>/dev/null | grep -w "trusted.ec.size" | cut -d '=' -f 2"
+ hex_size=$(ssh -n "${file_host}" "${cmd}")
+
+ if [[ -z $hex_size || "$hex_size" != "$xattr" ]]
+ then
+ echo "False"
+ return
+ fi
+ size_diff=$(printf '%d' $(( size - hex_size )))
+ if [[ $size_diff -gt 2047 ]]
+ then
+ echo "False"
+ return
+ else
+ echo "True"
+ return
+ fi
+ else
+ echo "False"
+ return
+ fi
+}
+
+function find_file_paths ()
+{
+ local bpath=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local brick_root=""
+ local gfid=""
+ local actual_path=""
+ local gfid_path=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+
+ gfid=$(echo "${file_entry}" | grep ".glusterfs")
+ if [[ -n "$gfid" ]]
+ then
+ gfid_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep -v '.glusterfs' "
+ actual_path=$(ssh -n "${file_host}" "${cmd}")
+ #removing absolute path so that user can refer this from mount point
+ actual_path=${actual_path#"$brick_root"}
+ else
+ actual_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep '.glusterfs' "
+ gfid_path=$(ssh -n "${file_host}" "${cmd}")
+ gfid_path=${gfid_path#"$brick_root"}
+ fi
+
+ echo "${gfid_path}|${actual_path}"
+}
+
+function log_can_not_heal ()
+{
+ local gfid_actual_paths=$1
+ local file_paths=$2
+ file_paths=${file_paths//#}
+
+ echo "${LINE_SEP}" >> "$CAN_NOT_HEAL"
+ echo "Can Not Heal : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$CAN_NOT_HEAL"
+ for bpath in ${file_paths//,/ }
+ do
+ echo "${bpath}" >> "$CAN_NOT_HEAL"
+ done
+}
+
+function check_all_frag_and_set_xattr ()
+{
+ local file_paths=$1
+ local total_size=$2
+ local file_stat=$3
+ local bpath=""
+ local healthy_count=0
+ local match="False"
+ local matching_bricks=""
+ local bad_bricks=""
+ local gfid_actual_paths=""
+
+ for bpath in ${file_paths//,/ }
+ do
+ if [[ -n "$gfid_actual_paths" ]]
+ then
+ break
+ fi
+ gfid_actual_paths=$(find_file_paths "$bpath")
+ done
+
+ match=$(match_size_xattr_quorum "$file_paths")
+
+# echo "${match} : $bpath" >> "$MOD_BACKUP_FILES"
+
+ if [[ "$match" != "False" ]]
+ then
+ xattr="$match"
+ for bpath in ${file_paths//,/ }
+ do
+ match="False"
+ match=$(match_stat_size_with_xattr "$bpath" "$total_size" "$file_stat" "$xattr")
+ if [[ "$match" == "True" ]]
+ then
+ matching_bricks="${bpath},${matching_bricks}"
+ healthy_count=$((healthy_count + 1))
+ else
+ bad_bricks="${bpath},${bad_bricks}"
+ fi
+ done
+ fi
+
+ if [[ $healthy_count -ge 4 ]]
+ then
+ match="True"
+ echo "${LINE_SEP}" >> "$MOD_BACKUP_FILES"
+ echo "Modified : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$MOD_BACKUP_FILES"
+ set_version_dirty_xattr "$matching_bricks" 1 "$gfid_actual_paths"
+ set_version_dirty_xattr "$bad_bricks" 0 "$gfid_actual_paths"
+ else
+ log_can_not_heal "$gfid_actual_paths" "${file_paths}"
+ fi
+
+ echo "$match"
+}
+function set_xattr()
+{
+ local count=$1
+ local heal_entry=""
+ local file_stat=""
+ local frag_size=""
+ local total_size=""
+ local file_paths=""
+ local num=""
+ local can_heal_count=0
+
+ heal_log "Started $(basename $BASH_SOURCE) on $(date) "
+
+ while read -r heal_entry
+ do
+ heal_log "$LINE_SEP"
+ heal_log "${heal_entry}"
+
+ file_stat=$(echo "$heal_entry" | cut -d "|" -f 1)
+ frag_size=$(echo "$file_stat" | rev | cut -d ":" -f 1 | rev)
+ total_size="$(total_file_size_in_hex "$frag_size")"
+ file_paths=$(echo "$heal_entry" | cut -d "|" -f 2)
+ match=$(check_all_frag_and_set_xattr "$file_paths" "$total_size" "$file_stat")
+ if [[ "$match" == "True" ]]
+ then
+ can_heal_count=$((can_heal_count + 1))
+ fi
+
+ sed -i '1d' potential_heal
+ count=$((count - 1))
+ if [ $count == 0 ]
+ then
+ num=$(cat potential_heal | wc -l)
+ heal_log "$LINE_SEP"
+ heal_log "${1} : Processed"
+ heal_log "${can_heal_count} : Modified to Heal"
+ heal_log "$((${1} - can_heal_count)) : Moved to can_not_heal."
+ heal_log "${num} : Pending as Potential Heal"
+ exit 0
+ fi
+
+ done < potential_heal
+}
+
+function main ()
+{
+ local count=0
+
+ read -p "Number of files to correct: [choose between 1-1000] (0 for All):" count
+ if [[ $count -lt 0 || $count -gt 1000 ]]
+ then
+ echo "Provide correct value:"
+ exit 2
+ fi
+
+ if [[ $count -eq 0 ]]
+ then
+ count=$(cat potential_heal | wc -l)
+ fi
+ set_xattr "$count"
+}
+
+_init "$@" && main "$@"
diff --git a/extras/ec-heal-script/gfid_needing_heal_parallel.sh b/extras/ec-heal-script/gfid_needing_heal_parallel.sh
new file mode 100755
index 00000000000..d7f53c97c33
--- /dev/null
+++ b/extras/ec-heal-script/gfid_needing_heal_parallel.sh
@@ -0,0 +1,278 @@
+#!/bin/bash
+# Copyright (c) 2019-2020 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+
+# This script provides a list of all the files which can be healed or not healed.
+# It also generates two files, potential_heal and can_not_heal, which contains the information
+# of all theose files. These files could be used by correct_pending_heals.sh to correct
+# the fragmnets so that files could be healed by shd.
+
+CAN_NOT_HEAL="can_not_heal"
+CAN_HEAL="potential_heal"
+LINE_SEP="==================================================="
+LOG_DIR="/var/log/glusterfs"
+LOG_FILE="$LOG_DIR/ec-heal-script.log"
+
+function heal_log()
+{
+ echo "$1" >> "$LOG_FILE"
+}
+
+function _init ()
+{
+ if [ $# -ne 1 ]; then
+ echo "usage: $0 <gluster volume name>";
+ echo "This script provides a list of all the files which can be healed or not healed.
+It also generates two files, potential_heal and can_not_heal, which contains the information
+of all theose files. These files could be used by correct_pending_heals.sh to correct
+the fragmnets so that files could be healed by shd."
+ exit 2;
+ fi
+
+ volume=$1;
+}
+
+function get_pending_entries ()
+{
+ local volume_name=$1
+
+ gluster volume heal "$volume_name" info | grep -v ":/" | grep -v "Number of entries" | grep -v "Status:" | sort -u | sed '/^$/d'
+}
+
+function get_entry_path_on_brick()
+{
+ local path="$1"
+ local gfid_string=""
+ if [[ "${path:0:1}" == "/" ]];
+ then
+ echo "$path"
+ else
+ gfid_string="$(echo "$path" | cut -f2 -d':' | cut -f1 -d '>')"
+ echo "/.glusterfs/${gfid_string:0:2}/${gfid_string:2:2}/$gfid_string"
+ fi
+}
+
+function run_command_on_server()
+{
+ local subvolume="$1"
+ local host="$2"
+ local cmd="$3"
+ local output
+ output=$(ssh -n "${host}" "${cmd}")
+ if [ -n "$output" ]
+ then
+ echo "$subvolume:$output"
+ fi
+}
+
+function get_entry_path_all_bricks ()
+{
+ local entry="$1"
+ local bricks="$2"
+ local cmd=""
+ for brick in $bricks
+ do
+ echo "${brick}#$(get_entry_path_on_brick "$entry")"
+ done | tr '\n' ','
+}
+
+function get_stat_for_entry_from_all_bricks ()
+{
+ local entry="$1"
+ local bricks="$2"
+ local subvolume=0
+ local host=""
+ local bpath=""
+ local cmd=""
+
+ for brick in $bricks
+ do
+ if [[ "$((subvolume % 6))" == "0" ]]
+ then
+ subvolume=$((subvolume+1))
+ fi
+ host=$(echo "$brick" | cut -f1 -d':')
+ bpath=$(echo "$brick" | cut -f2 -d':')
+
+ cmd="stat --format=%F:%B:%s $bpath$(get_entry_path_on_brick "$entry") 2>/dev/null"
+ run_command_on_server "$subvolume" "${host}" "${cmd}" &
+ done | sort | uniq -c | sort -rnk1
+}
+
+function get_bricks_from_volume()
+{
+ local v=$1
+ gluster volume info "$v" | grep -E "^Brick[0-9][0-9]*:" | cut -f2- -d':'
+}
+
+function print_entry_gfid()
+{
+ local host="$1"
+ local dirpath="$2"
+ local entry="$3"
+ local gfid
+ gfid="$(ssh -n "${host}" "getfattr -d -m. -e hex $dirpath/$entry 2>/dev/null | grep trusted.gfid=|cut -f2 -d'='")"
+ echo "$entry" - "$gfid"
+}
+
+function print_brick_directory_info()
+{
+ local h="$1"
+ local dirpath="$2"
+ while read -r e
+ do
+ print_entry_gfid "${h}" "${dirpath}" "${e}"
+ done < <(ssh -n "${h}" "ls $dirpath 2>/dev/null")
+}
+
+function print_directory_info()
+{
+ local entry="$1"
+ local bricks="$2"
+ local h
+ local b
+ local gfid
+ for brick in $bricks;
+ do
+ h="$(echo "$brick" | cut -f1 -d':')"
+ b="$(echo "$brick" | cut -f2 -d':')"
+ dirpath="$b$(get_entry_path_on_brick "$entry")"
+ print_brick_directory_info "${h}" "${dirpath}" &
+ done | sort | uniq -c
+}
+
+function print_entries_needing_heal()
+{
+ local quorum=0
+ local entry="$1"
+ local bricks="$2"
+ while read -r line
+ do
+ quorum=$(echo "$line" | awk '{print $1}')
+ if [[ "$quorum" -lt 4 ]]
+ then
+ echo "$line - Not in Quorum"
+ else
+ echo "$line - In Quorum"
+ fi
+ done < <(print_directory_info "$entry" "$bricks")
+}
+
+function find_file_paths ()
+{
+ local bpath=$1
+ local file_entry=""
+ local file_host=""
+ local cmd=""
+ local brick_root=""
+ local gfid=""
+ local actual_path=""
+ local gfid_path=""
+
+ file_host=$(echo "$bpath" | cut -d ":" -f 1)
+ file_entry=$(echo "$bpath" | cut -d ":" -f 2)
+ brick_root=$(echo "$file_entry" | cut -d "#" -f 1)
+
+ gfid=$(echo "${file_entry}" | grep ".glusterfs")
+
+ if [[ -n "$gfid" ]]
+ then
+ gfid_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep -v '.glusterfs' "
+ actual_path=$(ssh -n "${file_host}" "${cmd}")
+ #removing absolute path so that user can refer this from mount point
+ actual_path=${actual_path#"$brick_root"}
+ else
+ actual_path=$(echo "$file_entry" | cut -d "#" -f 2)
+ file_entry=${file_entry//#}
+ cmd="find -L '$brick_root' -samefile '$file_entry' 2>/dev/null | grep '.glusterfs' "
+ gfid_path=$(ssh -n "${file_host}" "${cmd}")
+ gfid_path=${gfid_path#"$brick_root"}
+ fi
+
+ echo "${gfid_path}|${actual_path}"
+}
+
+function log_can_not_heal ()
+{
+ local gfid_actual_paths=$1
+ local file_paths=$2
+ file_paths=${file_paths//#}
+
+ echo "${LINE_SEP}" >> "$CAN_NOT_HEAL"
+ echo "Can Not Heal : $(echo "$gfid_actual_paths" | cut -d '|' -f 2)" >> "$CAN_NOT_HEAL"
+ for bpath in ${file_paths//,/ }
+ do
+ echo "${bpath}" >> "$CAN_NOT_HEAL"
+ done
+}
+
+function main ()
+{
+ local bricks=""
+ local quorum=0
+ local stat_info=""
+ local file_type=""
+ local gfid_actual_paths=""
+ local bpath=""
+ local file_paths=""
+ local good=0
+ local bad=0
+ bricks=$(get_bricks_from_volume "$volume")
+ rm -f "$CAN_HEAL"
+ rm -f "$CAN_NOT_HEAL"
+ mkdir "$LOG_DIR" -p
+
+ heal_log "Started $(basename "$BASH_SOURCE") on $(date) "
+ while read -r heal_entry
+ do
+ heal_log "------------------------------------------------------------------"
+ heal_log "$heal_entry"
+
+ gfid_actual_paths=""
+ file_paths="$(get_entry_path_all_bricks "$heal_entry" "$bricks")"
+ stat_info="$(get_stat_for_entry_from_all_bricks "$heal_entry" "$bricks")"
+ heal_log "$stat_info"
+
+ quorum=$(echo "$stat_info" | head -1 | awk '{print $1}')
+ good_stat=$(echo "$stat_info" | head -1 | awk '{print $3}')
+ file_type="$(echo "$stat_info" | head -1 | cut -f2 -d':')"
+ if [[ "$file_type" == "directory" ]]
+ then
+ print_entries_needing_heal "$heal_entry" "$bricks"
+ else
+ if [[ "$quorum" -ge 4 ]]
+ then
+ good=$((good + 1))
+ heal_log "Verdict: Healable"
+
+ echo "${good_stat}|$file_paths" >> "$CAN_HEAL"
+ else
+ bad=$((bad + 1))
+ heal_log "Verdict: Not Healable"
+ for bpath in ${file_paths//,/ }
+ do
+ if [[ -z "$gfid_actual_paths" ]]
+ then
+ gfid_actual_paths=$(find_file_paths "$bpath")
+ else
+ break
+ fi
+ done
+ log_can_not_heal "$gfid_actual_paths" "${file_paths}"
+ fi
+ fi
+ done < <(get_pending_entries "$volume")
+ heal_log "========================================="
+ heal_log "Total number of potential heal : ${good}"
+ heal_log "Total number of can not heal : ${bad}"
+ heal_log "========================================="
+}
+
+_init "$@" && main "$@"
diff --git a/extras/ganesha/Makefile.am b/extras/ganesha/Makefile.am
new file mode 100644
index 00000000000..9eaa401b6c8
--- /dev/null
+++ b/extras/ganesha/Makefile.am
@@ -0,0 +1,2 @@
+SUBDIRS = scripts config ocf
+CLEANFILES =
diff --git a/extras/ganesha/config/Makefile.am b/extras/ganesha/config/Makefile.am
new file mode 100644
index 00000000000..c729273096e
--- /dev/null
+++ b/extras/ganesha/config/Makefile.am
@@ -0,0 +1,4 @@
+EXTRA_DIST= ganesha-ha.conf.sample
+
+confdir = $(sysconfdir)/ganesha
+conf_DATA = ganesha-ha.conf.sample
diff --git a/extras/ganesha/config/ganesha-ha.conf.sample b/extras/ganesha/config/ganesha-ha.conf.sample
new file mode 100644
index 00000000000..c22892bde56
--- /dev/null
+++ b/extras/ganesha/config/ganesha-ha.conf.sample
@@ -0,0 +1,19 @@
+# Name of the HA cluster created.
+# must be unique within the subnet
+HA_NAME="ganesha-ha-360"
+#
+# N.B. you may use short names or long names; you may not use IP addrs.
+# Once you select one, stay with it as it will be mildly unpleasant to
+# clean up if you switch later on. Ensure that all names - short and/or
+# long - are in DNS or /etc/hosts on all machines in the cluster.
+#
+# The subset of nodes of the Gluster Trusted Pool that form the ganesha
+# HA cluster. Hostname is specified.
+HA_CLUSTER_NODES="server1,server2,..."
+#HA_CLUSTER_NODES="server1.lab.redhat.com,server2.lab.redhat.com,..."
+#
+# Virtual IPs for each of the nodes specified above.
+VIP_server1="10.0.2.1"
+VIP_server2="10.0.2.2"
+#VIP_server1_lab_redhat_com="10.0.2.1"
+#VIP_server2_lab_redhat_com="10.0.2.2"
diff --git a/extras/ganesha/ocf/Makefile.am b/extras/ganesha/ocf/Makefile.am
new file mode 100644
index 00000000000..990a609f254
--- /dev/null
+++ b/extras/ganesha/ocf/Makefile.am
@@ -0,0 +1,11 @@
+EXTRA_DIST= ganesha_grace ganesha_mon ganesha_nfsd
+
+# The root of the OCF resource agent hierarchy
+# Per the OCF standard, it's always "lib",
+# not "lib64" (even on 64-bit platforms).
+ocfdir = $(prefix)/lib/ocf
+
+# The provider directory
+radir = $(ocfdir)/resource.d/heartbeat
+
+ra_SCRIPTS = ganesha_grace ganesha_mon ganesha_nfsd
diff --git a/extras/ganesha/ocf/ganesha_grace b/extras/ganesha/ocf/ganesha_grace
new file mode 100644
index 00000000000..825f7164597
--- /dev/null
+++ b/extras/ganesha/ocf/ganesha_grace
@@ -0,0 +1,221 @@
+#!/bin/bash
+#
+# Copyright (c) 2014 Anand Subramanian anands@redhat.com
+# Copyright (c) 2015 Red Hat Inc.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+if [ -n "$OCF_DEBUG_LIBRARY" ]; then
+ . $OCF_DEBUG_LIBRARY
+else
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+fi
+
+OCF_RESKEY_grace_active_default="grace-active"
+: ${OCF_RESKEY_grace_active=${OCF_RESKEY_grace_active_default}}
+
+ganesha_meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ganesha_grace">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource agent acts as a dummy
+resource agent for nfs-ganesha.
+</longdesc>
+
+<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
+
+<parameters>
+<parameter name="grace_active">
+<longdesc lang="en">NFS-Ganesha grace active attribute</longdesc>
+<shortdesc lang="en">NFS-Ganesha grace active attribute</shortdesc>
+<content type="string" default="grace-active" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="40s" />
+<action name="status" timeout="20s" interval="60s" />
+<action name="monitor" depth="0" timeout="10s" interval="5s" />
+<action name="notify" timeout="10s" />
+<action name="meta-data" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+return ${OCF_SUCCESS}
+}
+
+ganesha_grace_usage() {
+ echo "ganesha.nfsd USAGE"
+}
+
+# Make sure meta-data and usage always succeed
+case $__OCF_ACTION in
+ meta-data) ganesha_meta_data
+ exit ${OCF_SUCCESS}
+ ;;
+ usage|help) ganesha_usage
+ exit ${OCF_SUCCESS}
+ ;;
+ *)
+ ;;
+esac
+
+ganesha_grace_start()
+{
+ local rc=${OCF_ERR_GENERIC}
+ local host=$(hostname -s)
+
+ ocf_log debug "ganesha_grace_start()"
+ # give ganesha_mon RA a chance to set the crm_attr first
+ # I mislike the sleep, but it's not clear that looping
+ # with a small sleep is necessarily better
+ # start has a 40sec timeout, so a 5sec sleep here is okay
+ sleep 5
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null)
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null )
+ if [ $? -ne 0 ]; then
+ ocf_log info "grace start: crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} failed"
+ fi
+ fi
+
+ # Three possibilities:
+ # 1. There is no attribute at all and attr_updater returns
+ # a zero length string. This happens when
+ # ganesha_mon::monitor hasn't run at least once to set
+ # the attribute. The assumption here is that the system
+ # is coming up. We pretend, for now, that the node is
+ # healthy, to allow the system to continue coming up.
+ # It will cure itself in a few seconds
+ # 2. There is an attribute, and it has the value "1"; this
+ # node is healthy.
+ # 3. There is an attribute, but it has no value or the value
+ # "0"; this node is not healthy.
+
+ # case 1
+ if [[ -z "${attr}" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ # case 2
+ if [[ "${attr}" = *"value=1" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ # case 3
+ return ${OCF_NOT_RUNNING}
+}
+
+ganesha_grace_stop()
+{
+
+ ocf_log debug "ganesha_grace_stop()"
+ return ${OCF_SUCCESS}
+}
+
+ganesha_grace_notify()
+{
+ # since this is a clone RA we should only ever see pre-start
+ # or post-stop
+ mode="${OCF_RESKEY_CRM_meta_notify_type}-${OCF_RESKEY_CRM_meta_notify_operation}"
+ case "${mode}" in
+ pre-start | post-stop)
+ dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${OCF_RESKEY_CRM_meta_notify_stop_uname}
+ if [ $? -ne 0 ]; then
+ ocf_log info "dbus-send --print-reply --system --dest=org.ganesha.nfsd /org/ganesha/nfsd/admin org.ganesha.nfsd.admin.grace string:${OCF_RESKEY_CRM_meta_notify_stop_uname} failed"
+ fi
+ ;;
+ esac
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_grace_monitor()
+{
+ local host=$(hostname -s)
+
+ ocf_log debug "monitor"
+
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null)
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ attr=$(crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} 2> /dev/null)
+ if [ $? -ne 0 ]; then
+ ocf_log info "crm_attribute --query --node=${host} --name=${OCF_RESKEY_grace_active} failed"
+ fi
+ fi
+
+ # if there is no attribute (yet), maybe it's because
+ # this RA started before ganesha_mon (nfs-mon) has had
+ # chance to create it. In which case we'll pretend
+ # everything is okay this time around
+ if [[ -z "${attr}" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ if [[ "${attr}" = *"value=1" ]]; then
+ return ${OCF_SUCCESS}
+ fi
+
+ return ${OCF_NOT_RUNNING}
+}
+
+ganesha_grace_validate()
+{
+ return ${OCF_SUCCESS}
+}
+
+ganesha_grace_validate
+
+# Translate each action into the appropriate function call
+case $__OCF_ACTION in
+start) ganesha_grace_start
+ ;;
+stop) ganesha_grace_stop
+ ;;
+status|monitor) ganesha_grace_monitor
+ ;;
+notify) ganesha_grace_notify
+ ;;
+*) ganesha_grace_usage
+ exit ${OCF_ERR_UNIMPLEMENTED}
+ ;;
+esac
+
+rc=$?
+
+# The resource agent may optionally log a debug message
+ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
+exit $rc
diff --git a/extras/ganesha/ocf/ganesha_mon b/extras/ganesha/ocf/ganesha_mon
new file mode 100644
index 00000000000..2b4a9d6da84
--- /dev/null
+++ b/extras/ganesha/ocf/ganesha_mon
@@ -0,0 +1,234 @@
+#!/bin/bash
+#
+# Copyright (c) 2014 Anand Subramanian anands@redhat.com
+# Copyright (c) 2015 Red Hat Inc.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+if [ -n "${OCF_DEBUG_LIBRARY}" ]; then
+ . ${OCF_DEBUG_LIBRARY}
+else
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+fi
+
+# Defaults
+OCF_RESKEY_ganesha_active_default="ganesha-active"
+OCF_RESKEY_grace_active_default="grace-active"
+OCF_RESKEY_grace_delay_default="5"
+
+: ${OCF_RESKEY_ganesha_active=${OCF_RESKEY_ganesha_active_default}}
+: ${OCF_RESKEY_grace_active=${OCF_RESKEY_grace_active_default}}
+: ${OCF_RESKEY_grace_delay=${OCF_RESKEY_grace_delay_default}}
+
+ganesha_meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ganesha_mon">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource agent acts as a dummy
+resource agent for nfs-ganesha.
+</longdesc>
+
+<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
+
+<parameters>
+<parameter name="ganesha_active">
+<longdesc lang="en">NFS-Ganesha daemon active attribute</longdesc>
+<shortdesc lang="en">NFS-Ganesha daemon active attribute</shortdesc>
+<content type="string" default="ganesha-active" />
+</parameter>
+<parameter name="grace_active">
+<longdesc lang="en">NFS-Ganesha grace active attribute</longdesc>
+<shortdesc lang="en">NFS-Ganesha grace active attribute</shortdesc>
+<content type="string" default="grace-active" />
+</parameter>
+<parameter name="grace_delay">
+<longdesc lang="en">
+NFS-Ganesha grace delay.
+When changing this, adjust the ganesha_grace RA's monitor interval to match.
+</longdesc>
+<shortdesc lang="en">NFS-Ganesha grace delay</shortdesc>
+<content type="string" default="5" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="40s" />
+<action name="stop" timeout="40s" />
+<action name="status" timeout="20s" interval="60s" />
+<action name="monitor" depth="0" timeout="10s" interval="10s" />
+<action name="meta-data" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+return ${OCF_SUCCESS}
+}
+
+ganesha_mon_usage() {
+ echo "ganesha.nfsd USAGE"
+}
+
+# Make sure meta-data and usage always succeed
+case ${__OCF_ACTION} in
+ meta-data) ganesha_meta_data
+ exit ${OCF_SUCCESS}
+ ;;
+ usage|help) ganesha_usage
+ exit ${OCF_SUCCESS}
+ ;;
+ *)
+ ;;
+esac
+
+ganesha_mon_start()
+{
+ ocf_log debug "ganesha_mon_start"
+ ganesha_mon_monitor
+ return $OCF_SUCCESS
+}
+
+ganesha_mon_stop()
+{
+ ocf_log debug "ganesha_mon_stop"
+ return $OCF_SUCCESS
+}
+
+ganesha_mon_monitor()
+{
+ local host=$(hostname -s)
+ local pid_file="/var/run/ganesha.pid"
+ local rhel6_pid_file="/var/run/ganesha.nfsd.pid"
+ local proc_pid="/proc/"
+
+ # RHEL6 /etc/init.d/nfs-ganesha adds -p /var/run/ganesha.nfsd.pid
+ # RHEL7 systemd does not. Would be nice if all distros used the
+ # same pid file.
+ if [ -e ${rhel6_pid_file} ]; then
+ pid_file=${rhel6_pid_file}
+ fi
+ if [ -e ${pid_file} ]; then
+ proc_pid="${proc_pid}$(cat ${pid_file})"
+ fi
+
+ if [ "x${proc_pid}" != "x/proc/" -a -d ${proc_pid} ]; then
+
+ attrd_updater -n ${OCF_RESKEY_ganesha_active} -v 1
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -n ${OCF_RESKEY_ganesha_active} -v 1 failed"
+ fi
+
+ # ganesha_grace (nfs-grace) RA follows grace-active attr
+ # w/ constraint location
+ attrd_updater -n ${OCF_RESKEY_grace_active} -v 1
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -n ${OCF_RESKEY_grace_active} -v 1 failed"
+ fi
+
+ # ganesha_mon (nfs-mon) and ganesha_grace (nfs-grace)
+ # track grace-active crm_attr (attr != crm_attr)
+ # we can't just use the attr as there's no way to query
+ # its value in RHEL6 pacemaker
+
+ crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 2> /dev/null
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 2> /dev/null
+ if [ $? -ne 0 ]; then
+ ocf_log info "mon monitor warning: crm_attribute --node=${host} --lifetime=forever --name=${OCF_RESKEY_grace_active} --update=1 failed"
+ fi
+ fi
+
+ return ${OCF_SUCCESS}
+ fi
+
+ # VIP fail-over is triggered by clearing the
+ # ganesha-active node attribute on this node.
+ #
+ # Meanwhile the ganesha_grace notify() runs when its
+ # nfs-grace resource is disabled on a node; which
+ # is triggered by clearing the grace-active attribute
+ # on this node.
+ #
+ # We need to allow time for it to run and put
+ # the remaining ganesha.nfsds into grace before
+ # initiating the VIP fail-over.
+
+ attrd_updater -D -n ${OCF_RESKEY_grace_active}
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -D -n ${OCF_RESKEY_grace_active} failed"
+ fi
+
+ host=$(hostname -s)
+ crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 2> /dev/null
+ if [ $? -ne 0 ]; then
+ host=$(hostname)
+ crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 2> /dev/null
+ if [ $? -ne 0 ]; then
+ ocf_log info "mon monitor warning: crm_attribute --node=${host} --name=${OCF_RESKEY_grace_active} --update=0 failed"
+ fi
+ fi
+
+ sleep ${OCF_RESKEY_grace_delay}
+
+ attrd_updater -D -n ${OCF_RESKEY_ganesha_active}
+ if [ $? -ne 0 ]; then
+ ocf_log info "warning: attrd_updater -D -n ${OCF_RESKEY_ganesha_active} failed"
+ fi
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_mon_validate()
+{
+ return ${OCF_SUCCESS}
+}
+
+ganesha_mon_validate
+
+# Translate each action into the appropriate function call
+case ${__OCF_ACTION} in
+start) ganesha_mon_start
+ ;;
+stop) ganesha_mon_stop
+ ;;
+status|monitor) ganesha_mon_monitor
+ ;;
+*) ganesha_mon_usage
+ exit ${OCF_ERR_UNIMPLEMENTED}
+ ;;
+esac
+
+rc=$?
+
+# The resource agent may optionally log a debug message
+ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
+exit $rc
diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd
new file mode 100644
index 00000000000..f91e8b6b8f7
--- /dev/null
+++ b/extras/ganesha/ocf/ganesha_nfsd
@@ -0,0 +1,167 @@
+#!/bin/bash
+#
+# Copyright (c) 2014 Anand Subramanian anands@redhat.com
+# Copyright (c) 2015 Red Hat Inc.
+# All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of version 2 of the GNU General Public License as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it would be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#
+# Further, this software is distributed without any warranty that it is
+# free of the rightful claim of any third person regarding infringement
+# or the like. Any license provided herein, whether implied or
+# otherwise, applies only to this software file. Patent licenses, if
+# any, provided herein do not apply to combinations of this program with
+# other software, or any other product whatsoever.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write the Free Software Foundation,
+# Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+#
+#
+
+# Initialization:
+: ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+
+if [ -n "${OCF_DEBUG_LIBRARY}" ]; then
+ . ${OCF_DEBUG_LIBRARY}
+else
+ : ${OCF_FUNCTIONS_DIR=${OCF_ROOT}/lib/heartbeat}
+ . ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
+fi
+
+OCF_RESKEY_ha_vol_mnt_default="/run/gluster/shared_storage"
+: ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}}
+
+ganesha_meta_data() {
+ cat <<END
+<?xml version="1.0"?>
+<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
+<resource-agent name="ganesha_nfsd">
+<version>1.0</version>
+
+<longdesc lang="en">
+This Linux-specific resource agent acts as a dummy
+resource agent for nfs-ganesha.
+</longdesc>
+
+<shortdesc lang="en">Manages the user-space nfs-ganesha NFS server</shortdesc>
+
+<parameters>
+<parameter name="ha_vol_mnt">
+<longdesc lang="en">HA State Volume Mount Point</longdesc>
+<shortdesc lang="en">HA_State Volume Mount Point</shortdesc>
+<content type="string" default="" />
+</parameter>
+</parameters>
+
+<actions>
+<action name="start" timeout="5s" />
+<action name="stop" timeout="5s" />
+<action name="status" depth="0" timeout="5s" interval="0" />
+<action name="monitor" depth="0" timeout="5s" interval="0" />
+<action name="meta-data" timeout="20s" />
+</actions>
+</resource-agent>
+END
+
+return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_usage() {
+ echo "ganesha.nfsd USAGE"
+}
+
+# Make sure meta-data and usage always succeed
+case $__OCF_ACTION in
+ meta-data) ganesha_meta_data
+ exit ${OCF_SUCCESS}
+ ;;
+ usage|help) ganesha_usage
+ exit ${OCF_SUCCESS}
+ ;;
+ *)
+ ;;
+esac
+
+ganesha_nfsd_start()
+{
+ local long_host=$(hostname)
+
+ if [[ -d /var/lib/nfs ]]; then
+ mv /var/lib/nfs /var/lib/nfs.backup
+ if [ $? -ne 0 ]; then
+ ocf_log notice "mv /var/lib/nfs /var/lib/nfs.backup failed"
+ fi
+ ln -s ${OCF_RESKEY_ha_vol_mnt}/nfs-ganesha/${long_host}/nfs /var/lib/nfs
+ if [ $? -ne 0 ]; then
+ ocf_log notice "ln -s ${OCF_RESKEY_ha_vol_mnt}/nfs-ganesha/${long_host}/nfs /var/lib/nfs failed"
+ fi
+ fi
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_stop()
+{
+
+ if [ -L /var/lib/nfs -a -d /var/lib/nfs.backup ]; then
+ rm -f /var/lib/nfs
+ if [ $? -ne 0 ]; then
+ ocf_log notice "rm -f /var/lib/nfs failed"
+ fi
+ mv /var/lib/nfs.backup /var/lib/nfs
+ if [ $? -ne 0 ]; then
+ ocf_log notice "mv /var/lib/nfs.backup /var/lib/nfs failed"
+ fi
+ fi
+
+ return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_monitor()
+{
+ # pacemaker checks to see if RA is already running before starting it.
+ # if we return success, then it's presumed it's already running and
+ # doesn't need to be started, i.e. invoke the start action.
+ # return something other than success to make pacemaker invoke the
+ # start action
+ if [[ -L /var/lib/nfs ]]; then
+ return ${OCF_SUCCESS}
+ fi
+ return ${OCF_NOT_RUNNING}
+}
+
+ganesha_nfsd_validate()
+{
+ return ${OCF_SUCCESS}
+}
+
+ganesha_nfsd_validate
+
+# ocf_log notice "ganesha_nfsd ${OCF_RESOURCE_INSTANCE} $__OCF_ACTION"
+
+# Translate each action into the appropriate function call
+case $__OCF_ACTION in
+start) ganesha_nfsd_start
+ ;;
+stop) ganesha_nfsd_stop
+ ;;
+status|monitor) ganesha_nfsd_monitor
+ ;;
+*) ganesha_nfsd_usage
+ exit ${OCF_ERR_UNIMPLEMENTED}
+ ;;
+esac
+
+rc=$?
+
+# The resource agent may optionally log a debug message
+ocf_log debug "${OCF_RESOURCE_INSTANCE} ${__OCF_ACTION} returned $rc"
+exit $rc
diff --git a/extras/ganesha/scripts/Makefile.am b/extras/ganesha/scripts/Makefile.am
new file mode 100644
index 00000000000..7e345fd5f19
--- /dev/null
+++ b/extras/ganesha/scripts/Makefile.am
@@ -0,0 +1,6 @@
+EXTRA_DIST= create-export-ganesha.sh generate-epoch.py dbus-send.sh \
+ ganesha-ha.sh
+
+scriptsdir = $(libexecdir)/ganesha
+scripts_SCRIPTS = create-export-ganesha.sh dbus-send.sh generate-epoch.py \
+ ganesha-ha.sh
diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh
new file mode 100755
index 00000000000..3040e8138b0
--- /dev/null
+++ b/extras/ganesha/scripts/create-export-ganesha.sh
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+#This script is called by glusterd when the user
+#tries to export a volume via NFS-Ganesha.
+#An export file specific to a volume
+#is created in GANESHA_DIR/exports.
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_DIR=${1%/}
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+declare -i EXPORT_ID
+
+function check_cmd_status()
+{
+ if [ "$1" != "0" ]
+ then
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
+ exit 1
+ fi
+}
+
+
+if [ ! -d "$GANESHA_DIR/exports" ];
+ then
+ mkdir $GANESHA_DIR/exports
+ check_cmd_status `echo $?`
+fi
+
+function write_conf()
+{
+echo -e "# WARNING : Using Gluster CLI will overwrite manual
+# changes made to this file. To avoid it, edit the
+# file and run ganesha-ha.sh --refresh-config."
+
+echo "EXPORT{"
+echo " Export_Id = 2;"
+echo " Path = \"/$VOL\";"
+echo " FSAL {"
+echo " name = "GLUSTER";"
+echo " hostname=\"localhost\";"
+echo " volume=\"$VOL\";"
+echo " }"
+echo " Access_type = RW;"
+echo " Disable_ACL = true;"
+echo ' Squash="No_root_squash";'
+echo " Pseudo=\"/$VOL\";"
+echo ' Protocols = "3", "4" ;'
+echo ' Transports = "UDP","TCP";'
+echo ' SecType = "sys";'
+echo ' Security_Label = False;'
+echo " }"
+}
+if [ "$OPTION" = "on" ];
+then
+ if ! (cat $CONF | grep $VOL.conf\"$ )
+ then
+ write_conf $@ > $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF
+ count=`ls -l $GANESHA_DIR/exports/*.conf | wc -l`
+ if [ "$count" = "1" ] ; then
+ EXPORT_ID=2
+ else
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ check_cmd_status `echo $?`
+ EXPORT_ID=EXPORT_ID+1
+ sed -i s/Export_Id.*/"Export_Id= $EXPORT_ID ;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ check_cmd_status `echo $?`
+ fi
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ fi
+else
+ rm -rf $GANESHA_DIR/exports/export.$VOL.conf
+ sed -i /$VOL.conf/d $CONF
+fi
diff --git a/extras/ganesha/scripts/dbus-send.sh b/extras/ganesha/scripts/dbus-send.sh
new file mode 100755
index 00000000000..9d613a0e7ad
--- /dev/null
+++ b/extras/ganesha/scripts/dbus-send.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_DIR=${1%/}
+OPTION=$2
+VOL=$3
+CONF=$GANESHA_DIR"/ganesha.conf"
+
+function check_cmd_status()
+{
+ if [ "$1" != "0" ]
+ then
+ logger "dynamic export failed on node :${hostname -s}"
+ fi
+}
+
+#This function keeps track of export IDs and increments it with every new entry
+function dynamic_export_add()
+{
+ dbus-send --system \
+--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+org.ganesha.nfsd.exportmgr.AddExport string:$GANESHA_DIR/exports/export.$VOL.conf \
+string:"EXPORT(Path=/$VOL)"
+ check_cmd_status `echo $?`
+}
+
+#This function removes an export dynamically(uses the export_id of the export)
+function dynamic_export_remove()
+{
+ # Below bash fetch all the export from ShowExport command and search
+ # export entry based on path and then get its export entry.
+ # There are two possiblities for path, either entire volume will be
+ # exported or subdir. It handles both cases. But it remove only first
+ # entry from the list based on assumption that entry exported via cli
+ # has lowest export id value
+ removed_id=$(dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports | grep -B 1 -we \
+ "/"$VOL -e "/"$VOL"/" | grep uint16 | awk '{print $2}' \
+ | head -1)
+
+ dbus-send --print-reply --system \
+--dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+org.ganesha.nfsd.exportmgr.RemoveExport uint16:$removed_id
+ check_cmd_status `echo $?`
+}
+
+if [ "$OPTION" = "on" ];
+then
+ dynamic_export_add $@
+fi
+
+if [ "$OPTION" = "off" ];
+then
+ dynamic_export_remove $@
+fi
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
new file mode 100644
index 00000000000..9790a719e10
--- /dev/null
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -0,0 +1,1199 @@
+#!/bin/bash
+
+# Copyright 2015-2016 Red Hat Inc. All Rights Reserved
+#
+# Pacemaker+Corosync High Availability for NFS-Ganesha
+#
+# setup, teardown, add, delete, refresh-config, and status
+#
+# Each participating node in the cluster is assigned a virtual IP (VIP)
+# which fails over to another node when its associated ganesha.nfsd dies
+# for any reason. After the VIP is moved to another node all the
+# ganesha.nfsds are send a signal using DBUS to put them into NFS GRACE.
+#
+# There are six resource agent types used: ganesha_mon, ganesha_grace,
+# ganesha_nfsd, IPaddr, and Dummy. ganesha_mon is used to monitor the
+# ganesha.nfsd. ganesha_grace is used to send the DBUS signal to put
+# the remaining ganesha.nfsds into grace. ganesha_nfsd is used to start
+# and stop the ganesha.nfsd during setup and teardown. IPaddr manages
+# the VIP. A Dummy resource named $hostname-trigger_ip-1 is used to
+# ensure that the NFS GRACE DBUS signal is sent after the VIP moves to
+# the new host.
+
+GANESHA_HA_SH=$(realpath $0)
+HA_NUM_SERVERS=0
+HA_SERVERS=""
+HA_VOL_NAME="gluster_shared_storage"
+HA_VOL_MNT="/run/gluster/shared_storage"
+HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
+SERVICE_MAN="DISTRO_NOT_FOUND"
+
+# rhel, fedora id, version
+ID=""
+VERSION_ID=""
+
+PCS9OR10_PCS_CNAME_OPTION=""
+PCS9OR10_PCS_CLONE_OPTION="clone"
+SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
+
+# UNBLOCK RA uses shared_storage which may become unavailable
+# during any of the nodes reboot. Hence increase timeout value.
+PORTBLOCK_UNBLOCK_TIMEOUT="60s"
+
+# Try loading the config from any of the distro
+# specific configuration locations
+if [ -f /etc/sysconfig/ganesha ]
+ then
+ . /etc/sysconfig/ganesha
+fi
+if [ -f /etc/conf.d/ganesha ]
+ then
+ . /etc/conf.d/ganesha
+fi
+if [ -f /etc/default/ganesha ]
+ then
+ . /etc/default/ganesha
+fi
+
+GANESHA_CONF=
+
+function find_rhel7_conf
+{
+ while [[ $# > 0 ]]
+ do
+ key="$1"
+ case $key in
+ -f)
+ CONFFILE="$2"
+ break;
+ ;;
+ *)
+ ;;
+ esac
+ shift
+ done
+}
+
+if [ -z ${CONFFILE} ]
+ then
+ find_rhel7_conf ${OPTIONS}
+
+fi
+
+GANESHA_CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}
+
+usage() {
+
+ echo "Usage : add|delete|refresh-config|status"
+ echo "Add-node : ganesha-ha.sh --add <HA_CONF_DIR> \
+<NODE-HOSTNAME> <NODE-VIP>"
+ echo "Delete-node: ganesha-ha.sh --delete <HA_CONF_DIR> \
+<NODE-HOSTNAME>"
+ echo "Refresh-config : ganesha-ha.sh --refresh-config <HA_CONFDIR> \
+<volume>"
+ echo "Status : ganesha-ha.sh --status <HA_CONFDIR>"
+}
+
+determine_service_manager () {
+
+ if [ -e "/bin/systemctl" ];
+ then
+ SERVICE_MAN="/bin/systemctl"
+ elif [ -e "/sbin/invoke-rc.d" ];
+ then
+ SERVICE_MAN="/sbin/invoke-rc.d"
+ elif [ -e "/sbin/service" ];
+ then
+ SERVICE_MAN="/sbin/service"
+ fi
+ if [[ "${SERVICE_MAN}X" == "DISTRO_NOT_FOUNDX" ]]
+ then
+ logger "Service manager not recognized, exiting"
+ exit 1
+ fi
+}
+
+manage_service ()
+{
+ local action=${1}
+ local new_node=${2}
+ local option=
+
+ if [[ "${action}" == "start" ]]; then
+ option="yes"
+ else
+ option="no"
+ fi
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
+
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]
+ then
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${new_node} "${SERVICE_MAN} nfs-ganesha ${action}"
+ fi
+}
+
+
+check_cluster_exists()
+{
+ local name=${1}
+ local cluster_name=""
+
+ if [ -e /var/run/corosync.pid ]; then
+ cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
+ if [[ "${cluster_name}X" == "${name}X" ]]; then
+ logger "$name already exists, exiting"
+ exit 0
+ fi
+ fi
+}
+
+
+determine_servers()
+{
+ local cmd=${1}
+ local num_servers=0
+ local tmp_ifs=${IFS}
+ local ha_servers=""
+
+ if [ "${cmd}X" != "setupX" -a "${cmd}X" != "statusX" ]; then
+ ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
+ IFS=$' '
+ for server in ${ha_servers} ; do
+ num_servers=$(expr ${num_servers} + 1)
+ done
+ IFS=${tmp_ifs}
+ HA_NUM_SERVERS=${num_servers}
+ HA_SERVERS="${ha_servers}"
+ else
+ IFS=$','
+ for server in ${HA_CLUSTER_NODES} ; do
+ num_servers=$(expr ${num_servers} + 1)
+ done
+ IFS=${tmp_ifs}
+ HA_NUM_SERVERS=${num_servers}
+ HA_SERVERS="${HA_CLUSTER_NODES//,/ }"
+ fi
+}
+
+stop_ganesha_all()
+{
+ local serverlist=${1}
+ for node in ${serverlist} ; do
+ manage_service "stop" ${node}
+ done
+}
+
+setup_cluster()
+{
+ local name=${1}
+ local num_servers=${2}
+ local servers=${3}
+ local unclean=""
+ local quorum_policy="stop"
+
+ logger "setting up cluster ${name} with the following ${servers}"
+
+ # pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} ${servers}
+ pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers}
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster setup ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers} failed, shutting down ganesha and bailing out"
+ #set up failed stop all ganesha process and clean up symlinks in cluster
+ stop_ganesha_all "${servers}"
+ exit 1;
+ fi
+
+ # pcs cluster auth ${servers}
+ pcs cluster auth
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster auth failed"
+ fi
+
+ pcs cluster start --all
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster start failed"
+ exit 1;
+ fi
+
+ sleep 1
+ # wait for the cluster to elect a DC before querying or writing
+ # to the CIB. BZ 1334092
+ crmadmin --dc_lookup --timeout=5000 > /dev/null 2>&1
+ while [ $? -ne 0 ]; do
+ crmadmin --dc_lookup --timeout=5000 > /dev/null 2>&1
+ done
+
+ unclean=$(pcs status | grep -u "UNCLEAN")
+ while [[ "${unclean}X" == "UNCLEANX" ]]; do
+ sleep 1
+ unclean=$(pcs status | grep -u "UNCLEAN")
+ done
+ sleep 1
+
+ if [ ${num_servers} -lt 3 ]; then
+ quorum_policy="ignore"
+ fi
+ pcs property set no-quorum-policy=${quorum_policy}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed"
+ fi
+
+ pcs property set stonith-enabled=false
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set stonith-enabled=false failed"
+ fi
+}
+
+
+setup_finalize_ha()
+{
+ local cibfile=${1}
+ local stopped=""
+
+ stopped=$(pcs status | grep -u "Stopped")
+ while [[ "${stopped}X" == "StoppedX" ]]; do
+ sleep 1
+ stopped=$(pcs status | grep -u "Stopped")
+ done
+}
+
+
+refresh_config ()
+{
+ local short_host=$(hostname -s)
+ local VOL=${1}
+ local HA_CONFDIR=${2}
+ local short_host=$(hostname -s)
+
+ local export_id=$(grep ^[[:space:]]*Export_Id $HA_CONFDIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+
+
+ if [ -e ${SECRET_PEM} ]; then
+ while [[ ${3} ]]; do
+ current_host=`echo ${3} | cut -d "." -f 1`
+ if [[ ${short_host} != ${current_host} ]]; then
+ output=$(ssh -oPasswordAuthentication=no \
+-oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
+"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
+string:$HA_CONFDIR/exports/export.$VOL.conf \
+string:\"EXPORT(Export_Id=$export_id)\" 2>&1")
+ ret=$?
+ logger <<< "${output}"
+ if [ ${ret} -ne 0 ]; then
+ echo "Refresh-config failed on ${current_host}. Please check logs on ${current_host}"
+ else
+ echo "Refresh-config completed on ${current_host}."
+ fi
+
+ fi
+ shift
+ done
+ else
+ echo "Error: refresh-config failed. Passwordless ssh is not enabled."
+ exit 1
+ fi
+
+ # Run the same command on the localhost,
+ output=$(dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.UpdateExport \
+string:$HA_CONFDIR/exports/export.$VOL.conf \
+string:"EXPORT(Export_Id=$export_id)" 2>&1)
+ ret=$?
+ logger <<< "${output}"
+ if [ ${ret} -ne 0 ] ; then
+ echo "Refresh-config failed on localhost."
+ else
+ echo "Success: refresh-config completed."
+ fi
+}
+
+
+teardown_cluster()
+{
+ local name=${1}
+
+ for server in ${HA_SERVERS} ; do
+ if [[ ${HA_CLUSTER_NODES} != *${server}* ]]; then
+ logger "info: ${server} is not in config, removing"
+
+ pcs cluster stop ${server} --force
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster stop ${server} failed"
+ fi
+
+ pcs cluster node remove ${server}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster node remove ${server} failed"
+ fi
+ fi
+ done
+
+ # BZ 1193433 - pcs doesn't reload cluster.conf after modification
+ # after teardown completes, a subsequent setup will appear to have
+ # 'remembered' the deleted node. You can work around this by
+ # issuing another `pcs cluster node remove $node`,
+ # `crm_node -f -R $server`, or
+ # `cibadmin --delete --xml-text '<node id="$server"
+ # uname="$server"/>'
+
+ pcs cluster stop --all
+ if [ $? -ne 0 ]; then
+ logger "warning pcs cluster stop --all failed"
+ fi
+
+ pcs cluster destroy
+ if [ $? -ne 0 ]; then
+ logger "error pcs cluster destroy failed"
+ exit 1
+ fi
+}
+
+
+cleanup_ganesha_config ()
+{
+ rm -f /etc/corosync/corosync.conf
+ rm -rf /etc/cluster/cluster.conf*
+ rm -rf /var/lib/pacemaker/cib/*
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $HA_CONFDIR/ganesha.conf
+}
+
+do_create_virt_ip_constraints()
+{
+ local cibfile=${1}; shift
+ local primary=${1}; shift
+ local weight="1000"
+
+ # first a constraint location rule that says the VIP must be where
+ # there's a ganesha.nfsd running
+ pcs -f ${cibfile} constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location ${primary}-group rule score=-INFINITY ganesha-active ne 1 failed"
+ fi
+
+ # then a set of constraint location prefers to set the prefered order
+ # for where a VIP should move
+ while [[ ${1} ]]; do
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${1}=${weight}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location ${primary}-group prefers ${1}=${weight} failed"
+ fi
+ weight=$(expr ${weight} + 1000)
+ shift
+ done
+ # and finally set the highest preference for the VIP to its home node
+ # default weight when created is/was 100.
+ # on Fedora setting appears to be additive, so to get the desired
+ # value we adjust the weight
+ # weight=$(expr ${weight} - 100)
+ pcs -f ${cibfile} constraint location ${primary}-group prefers ${primary}=${weight}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location ${primary}-group prefers ${primary}=${weight} failed"
+ fi
+}
+
+
+wrap_create_virt_ip_constraints()
+{
+ local cibfile=${1}; shift
+ local primary=${1}; shift
+ local head=""
+ local tail=""
+
+ # build a list of peers, e.g. for a four node cluster, for node1,
+ # the result is "node2 node3 node4"; for node2, "node3 node4 node1"
+ # and so on.
+ while [[ ${1} ]]; do
+ if [[ ${1} == ${primary} ]]; then
+ shift
+ while [[ ${1} ]]; do
+ tail=${tail}" "${1}
+ shift
+ done
+ else
+ head=${head}" "${1}
+ fi
+ shift
+ done
+ do_create_virt_ip_constraints ${cibfile} ${primary} ${tail} ${head}
+}
+
+
+create_virt_ip_constraints()
+{
+ local cibfile=${1}; shift
+
+ while [[ ${1} ]]; do
+ wrap_create_virt_ip_constraints ${cibfile} ${1} ${HA_SERVERS}
+ shift
+ done
+}
+
+
+setup_create_resources()
+{
+ local cibfile=$(mktemp -u)
+
+ # fixup /var/lib/nfs
+ logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}"
+ pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION} failed"
+ fi
+
+ pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION} failed"
+ fi
+
+ # see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace
+ # start method. Allow time for ganesha_mon to start and set the
+ # ganesha-active crm_attribute
+ sleep 5
+
+ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} notify=true
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} failed"
+ fi
+
+ pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1"
+ fi
+
+ pcs cluster cib ${cibfile}
+
+ while [[ ${1} ]]; do
+
+ # this is variable indirection
+ # from a nvs like 'VIP_host1=10.7.6.5' or 'VIP_host1="10.7.6.5"'
+ # (or VIP_host-1=..., or VIP_host-1.my.domain.name=...)
+ # a variable 'clean_name' is created (e.g. w/ value 'VIP_host_1')
+ # and a clean nvs (e.g. w/ value 'VIP_host_1="10_7_6_5"')
+ # after the `eval ${clean_nvs}` there is a variable VIP_host_1
+ # with the value '10_7_6_5', and the following \$$ magic to
+ # reference it, i.e. `eval tmp_ipaddr=\$${clean_name}` gives us
+ # ${tmp_ipaddr} with 10_7_6_5 and then convert the _s back to .s
+ # to give us ipaddr="10.7.6.5". whew!
+ name="VIP_${1}"
+ clean_name=${name//[-.]/_}
+ nvs=$(grep "^${name}=" ${HA_CONFDIR}/ganesha-ha.conf)
+ clean_nvs=${nvs//[-.]/_}
+ eval ${clean_nvs}
+ eval tmp_ipaddr=\$${clean_name}
+ ipaddr=${tmp_ipaddr//_/.}
+
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
+ fi
+
+ pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
+ fi
+
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
+
+ shift
+ done
+
+ create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
+
+ pcs cluster cib-push ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs cluster cib-push ${cibfile} failed"
+ fi
+ rm -f ${cibfile}
+}
+
+
+teardown_resources()
+{
+ # local mntpt=$(grep ha-vol-mnt ${HA_CONFIG_FILE} | cut -d = -f 2)
+
+ # restore /var/lib/nfs
+ logger "notice: pcs resource delete nfs_setup-clone"
+ pcs resource delete nfs_setup-clone
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete nfs_setup-clone failed"
+ fi
+
+ # delete -clone resource agents
+ # in particular delete the ganesha monitor so we don't try to
+ # trigger anything when we shut down ganesha next.
+ pcs resource delete nfs-mon-clone
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete nfs-mon-clone failed"
+ fi
+
+ pcs resource delete nfs-grace-clone
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete nfs-grace-clone failed"
+ fi
+
+ while [[ ${1} ]]; do
+ pcs resource delete ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs resource delete ${1}-group failed"
+ fi
+ shift
+ done
+
+}
+
+
+recreate_resources()
+{
+ local cibfile=${1}; shift
+
+ while [[ ${1} ]]; do
+ # this is variable indirection
+ # see the comment on the same a few lines up
+ name="VIP_${1}"
+ clean_name=${name//[-.]/_}
+ nvs=$(grep "^${name}=" ${HA_CONFDIR}/ganesha-ha.conf)
+ clean_nvs=${nvs//[-.]/_}
+ eval ${clean_nvs}
+ eval tmp_ipaddr=\$${clean_name}
+ ipaddr=${tmp_ipaddr//_/.}
+
+ pcs -f ${cibfile} resource create ${1}-nfs_block ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=block ip=${ipaddr} --group ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s --group ${1}-group --after ${1}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-cluster_ip-1 ocf:heartbeat:IPaddr ip=${ipaddr} \
+ cidr_netmask=32 op monitor interval=15s failed"
+ fi
+
+ pcs -f ${cibfile} constraint order nfs-grace-clone then ${1}-cluster_ip-1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint order nfs-grace-clone then ${1}-cluster_ip-1 failed"
+ fi
+
+ pcs -f ${cibfile} resource create ${1}-nfs_unblock ocf:heartbeat:portblock protocol=tcp \
+ portno=2049 action=unblock ip=${ipaddr} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${1}-group --after ${1}-cluster_ip-1 \
+ op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} \
+ op monitor interval=10s timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${1}-nfs_unblock failed"
+ fi
+
+ shift
+ done
+}
+
+
+addnode_recreate_resources()
+{
+ local cibfile=${1}; shift
+ local add_node=${1}; shift
+ local add_vip=${1}; shift
+
+ recreate_resources ${cibfile} ${HA_SERVERS}
+
+ pcs -f ${cibfile} resource create ${add_node}-nfs_block ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=block ip=${add_vip} --group ${add_node}-group
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_block failed"
+ fi
+ pcs -f ${cibfile} resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s --group ${add_node}-group \
+ --after ${add_node}-nfs_block
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
+ fi
+
+ pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs constraint order nfs-grace-clone then ${add_node}-cluster_ip-1 failed"
+ fi
+ pcs -f ${cibfile} resource create ${add_node}-nfs_unblock ocf:heartbeat:portblock \
+ protocol=tcp portno=2049 action=unblock ip=${add_vip} reset_local_on_unblock_stop=true \
+ tickle_dir=${HA_VOL_MNT}/nfs-ganesha/tickle_dir/ --group ${add_node}-group --after \
+ ${add_node}-cluster_ip-1 op stop timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op start \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT} op monitor interval=10s \
+ timeout=${PORTBLOCK_UNBLOCK_TIMEOUT}
+ if [ $? -ne 0 ]; then
+ logger "warning pcs resource create ${add_node}-nfs_unblock failed"
+ fi
+}
+
+
+clear_resources()
+{
+ local cibfile=${1}; shift
+
+ while [[ ${1} ]]; do
+ pcs -f ${cibfile} resource delete ${1}-group
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs -f ${cibfile} resource delete ${1}-group"
+ fi
+
+ shift
+ done
+}
+
+
+addnode_create_resources()
+{
+ local add_node=${1}; shift
+ local add_vip=${1}; shift
+ local cibfile=$(mktemp -u)
+
+ # start HA on the new node
+ pcs cluster start ${add_node}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster start ${add_node} failed"
+ fi
+
+ pcs cluster cib ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib ${cibfile} failed"
+ fi
+
+ # delete all the -cluster_ip-1 resources, clearing
+ # their constraints, then create them again so we can
+ # recompute their constraints
+ clear_resources ${cibfile} ${HA_SERVERS}
+ addnode_recreate_resources ${cibfile} ${add_node} ${add_vip}
+
+ HA_SERVERS="${HA_SERVERS} ${add_node}"
+ create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
+
+ pcs cluster cib-push ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib-push ${cibfile} failed"
+ fi
+ rm -f ${cibfile}
+}
+
+
+deletenode_delete_resources()
+{
+ local node=${1}; shift
+ local ha_servers=$(echo "${HA_SERVERS}" | sed s/${node}//)
+ local cibfile=$(mktemp -u)
+
+ pcs cluster cib ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib ${cibfile} failed"
+ fi
+
+ # delete all the -cluster_ip-1 and -trigger_ip-1 resources,
+ # clearing their constraints, then create them again so we can
+ # recompute their constraints
+ clear_resources ${cibfile} ${HA_SERVERS}
+ recreate_resources ${cibfile} ${ha_servers}
+ HA_SERVERS=$(echo "${ha_servers}" | sed -e "s/ / /")
+
+ create_virt_ip_constraints ${cibfile} ${HA_SERVERS}
+
+ pcs cluster cib-push ${cibfile}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster cib-push ${cibfile} failed"
+ fi
+ rm -f ${cibfile}
+
+}
+
+
+deletenode_update_haconfig()
+{
+ local name="VIP_${1}"
+ local clean_name=${name//[-.]/_}
+
+ ha_servers=$(echo ${HA_SERVERS} | sed -e "s/ /,/")
+ sed -i -e "s/^HA_CLUSTER_NODES=.*$/HA_CLUSTER_NODES=\"${ha_servers// /,}\"/" -e "s/^${name}=.*$//" -e "/^$/d" ${HA_CONFDIR}/ganesha-ha.conf
+}
+
+
+setup_state_volume()
+{
+ local mnt=${HA_VOL_MNT}
+ local longname=""
+ local shortname=""
+ local dname=""
+ local dirname=""
+
+ longname=$(hostname)
+ dname=${longname#$(hostname -s)}
+
+ while [[ ${1} ]]; do
+
+ if [[ ${1} == *${dname} ]]; then
+ dirname=${1}
+ else
+ dirname=${1}${dname}
+ fi
+
+ if [ ! -d ${mnt}/nfs-ganesha/tickle_dir ]; then
+ mkdir ${mnt}/nfs-ganesha/tickle_dir
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
+ fi
+ for server in ${HA_SERVERS} ; do
+ if [[ ${server} != ${dirname} ]]; then
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
+ fi
+ done
+ shift
+ done
+
+}
+
+
+enable_pacemaker()
+{
+ while [[ ${1} ]]; do
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]; then
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
+ else
+ ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
+${SECRET_PEM} root@${1} "${SERVICE_MAN} pacemaker enable"
+ fi
+ shift
+ done
+}
+
+
+addnode_state_volume()
+{
+ local newnode=${1}; shift
+ local mnt=${HA_VOL_MNT}
+ local longname=""
+ local dname=""
+ local dirname=""
+
+ longname=$(hostname)
+ dname=${longname#$(hostname -s)}
+
+ if [[ ${newnode} == *${dname} ]]; then
+ dirname=${newnode}
+ else
+ dirname=${newnode}${dname}
+ fi
+
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname} ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/state
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4recov
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/v4old
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm
+ fi
+ if [ ! -d ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak ]; then
+ mkdir ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ chown rpcuser:rpcuser ${mnt}/nfs-ganesha/${dirname}/nfs/statd/sm.bak
+ fi
+ if [ ! -e ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state ]; then
+ touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
+ fi
+
+ for server in ${HA_SERVERS} ; do
+
+ if [[ ${server} != ${dirname} ]]; then
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
+ ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
+
+ ln -s ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
+ ln -s ${mnt}/nfs-ganesha/${dirname}/nfs/statd ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
+ fi
+ done
+
+}
+
+
+delnode_state_volume()
+{
+ local delnode=${1}; shift
+ local mnt=${HA_VOL_MNT}
+ local longname=""
+ local dname=""
+ local dirname=""
+
+ longname=$(hostname)
+ dname=${longname#$(hostname -s)}
+
+ if [[ ${delnode} == *${dname} ]]; then
+ dirname=${delnode}
+ else
+ dirname=${delnode}${dname}
+ fi
+
+ rm -rf ${mnt}/nfs-ganesha/${dirname}
+
+ for server in ${HA_SERVERS} ; do
+ if [[ ${server} != ${dirname} ]]; then
+ rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
+ rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
+ fi
+ done
+}
+
+
+status()
+{
+ local scratch=$(mktemp)
+ local regex_str="^${1}-cluster_ip-1"
+ local healthy=0
+ local index=1
+ local nodes
+
+ # change tabs to spaces, strip leading spaces, including any
+ # new '*' at the beginning of a line introduced in pcs-0.10.x
+ pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*\*//" -e "s/^[ ]*//" > ${scratch}
+
+ nodes[0]=${1}; shift
+
+ # make a regex of the configured nodes
+ # and initalize the nodes array for later
+ while [[ ${1} ]]; do
+
+ regex_str="${regex_str}|^${1}-cluster_ip-1"
+ nodes[${index}]=${1}
+ ((index++))
+ shift
+ done
+
+ # print the nodes that are expected to be online
+ grep -E "Online:" ${scratch}
+
+ echo
+
+ # print the VIPs and which node they are on
+ grep -E "${regex_str}" < ${scratch} | cut -d ' ' -f 1,4
+
+ echo
+
+ # check if the VIP and port block/unblock RAs are on the expected nodes
+ for n in ${nodes[*]}; do
+
+ grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
+ grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
+ grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch}
+ result=$?
+ ((healthy+=${result}))
+ done
+
+ grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch}
+ result=$?
+
+ if [ ${result} -eq 0 ]; then
+ echo "Cluster HA Status: BAD"
+ elif [ ${healthy} -eq 0 ]; then
+ echo "Cluster HA Status: HEALTHY"
+ else
+ echo "Cluster HA Status: FAILOVER"
+ fi
+
+ rm -f ${scratch}
+}
+
+create_ganesha_conf_file()
+{
+ if [[ "$1" == "yes" ]];
+ then
+ if [ -e $GANESHA_CONF ];
+ then
+ rm -rf $GANESHA_CONF
+ fi
+ # The symlink /etc/ganesha/ganesha.conf need to be
+ # created using ganesha conf file mentioned in the
+ # shared storage. Every node will only have this
+ # link and actual file will stored in shared storage,
+ # so that ganesha conf editing of ganesha conf will
+ # be easy as well as it become more consistent.
+
+ ln -s $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ else
+ # Restoring previous file
+ rm -rf $GANESHA_CONF
+ cp $HA_CONFDIR/ganesha.conf $GANESHA_CONF
+ sed -r -i -e '/^%include[[:space:]]+".+\.conf"$/d' $GANESHA_CONF
+ fi
+}
+
+set_quorum_policy()
+{
+ local quorum_policy="stop"
+ local num_servers=${1}
+
+ if [ ${num_servers} -lt 3 ]; then
+ quorum_policy="ignore"
+ fi
+ pcs property set no-quorum-policy=${quorum_policy}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs property set no-quorum-policy=${quorum_policy} failed"
+ fi
+}
+
+main()
+{
+
+ local cmd=${1}; shift
+ if [[ ${cmd} == *help ]]; then
+ usage
+ exit 0
+ fi
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --on
+ fi
+
+ local osid=""
+
+ osid=$(grep ^ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F ID=)
+ osid=$(grep ^VERSION_ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F VERSION_ID=)
+
+ HA_CONFDIR=${1%/}; shift
+ local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
+ local node=""
+ local vip=""
+
+ # ignore any comment lines
+ cfgline=$(grep ^HA_NAME= ${ha_conf})
+ eval $(echo ${cfgline} | grep -F HA_NAME=)
+ cfgline=$(grep ^HA_CLUSTER_NODES= ${ha_conf})
+ eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=)
+
+ case "${cmd}" in
+
+ setup | --setup)
+ logger "setting up ${HA_NAME}"
+
+ check_cluster_exists ${HA_NAME}
+
+ determine_servers "setup"
+
+ # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
+ # default is pcs-0.10.x options but check for
+ # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
+ if [[ ! ${ID} =~ {rhel,centos} ]]; then
+ if [[ ${VERSION_ID} == 7.* ]]; then
+ PCS9OR10_PCS_CNAME_OPTION="--name"
+ PCS9OR10_PCS_CLONE_OPTION="--clone"
+ fi
+ fi
+
+ if [[ "${HA_NUM_SERVERS}X" != "1X" ]]; then
+
+ determine_service_manager
+
+ setup_cluster ${HA_NAME} ${HA_NUM_SERVERS} "${HA_SERVERS}"
+
+ setup_create_resources ${HA_SERVERS}
+
+ setup_finalize_ha
+
+ setup_state_volume ${HA_SERVERS}
+
+ enable_pacemaker ${HA_SERVERS}
+
+ else
+
+ logger "insufficient servers for HA, aborting"
+ fi
+ ;;
+
+ teardown | --teardown)
+ logger "tearing down ${HA_NAME}"
+
+ determine_servers "teardown"
+
+ teardown_resources ${HA_SERVERS}
+
+ teardown_cluster ${HA_NAME}
+
+ cleanup_ganesha_config ${HA_CONFDIR}
+ ;;
+
+ cleanup | --cleanup)
+ cleanup_ganesha_config ${HA_CONFDIR}
+ ;;
+
+ add | --add)
+ node=${1}; shift
+ vip=${1}; shift
+
+ logger "adding ${node} with ${vip} to ${HA_NAME}"
+
+ determine_service_manager
+
+ manage_service "start" ${node}
+
+ determine_servers "add"
+
+ pcs cluster node add ${node}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster node add ${node} failed"
+ fi
+
+ addnode_create_resources ${node} ${vip}
+ # Subsequent add-node recreates resources for all the nodes
+ # that already exist in the cluster. The nodes are picked up
+ # from the entries in the ganesha-ha.conf file. Adding the
+ # newly added node to the file so that the resources specfic
+ # to this node is correctly recreated in the future.
+ clean_node=${node//[-.]/_}
+ echo "VIP_${node}=\"${vip}\"" >> ${HA_CONFDIR}/ganesha-ha.conf
+
+ NEW_NODES="$HA_CLUSTER_NODES,${node}"
+
+ sed -i s/HA_CLUSTER_NODES.*/"HA_CLUSTER_NODES=\"$NEW_NODES\""/ \
+$HA_CONFDIR/ganesha-ha.conf
+
+ addnode_state_volume ${node}
+
+ # addnode_create_resources() already appended ${node} to
+ # HA_SERVERS, so only need to increment HA_NUM_SERVERS
+ # and set quorum policy
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} + 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
+ ;;
+
+ delete | --delete)
+ node=${1}; shift
+
+ logger "deleting ${node} from ${HA_NAME}"
+
+ determine_servers "delete"
+
+ deletenode_delete_resources ${node}
+
+ pcs cluster node remove ${node}
+ if [ $? -ne 0 ]; then
+ logger "warning: pcs cluster node remove ${node} failed"
+ fi
+
+ deletenode_update_haconfig ${node}
+
+ delnode_state_volume ${node}
+
+ determine_service_manager
+
+ manage_service "stop" ${node}
+
+ HA_NUM_SERVERS=$(expr ${HA_NUM_SERVERS} - 1)
+ set_quorum_policy ${HA_NUM_SERVERS}
+ ;;
+
+ status | --status)
+ determine_servers "status"
+
+ status ${HA_SERVERS}
+ ;;
+
+ refresh-config | --refresh-config)
+ VOL=${1}
+
+ determine_servers "refresh-config"
+
+ refresh_config ${VOL} ${HA_CONFDIR} ${HA_SERVERS}
+ ;;
+
+ setup-ganesha-conf-files | --setup-ganesha-conf-files)
+
+ create_ganesha_conf_file ${1}
+ ;;
+
+ *)
+ # setup and teardown are not intended to be used by a
+ # casual user
+ usage
+ logger "Usage: ganesha-ha.sh add|delete|status"
+ ;;
+
+ esac
+
+ if (selinuxenabled) ;then
+ semanage boolean -m gluster_use_execmem --off
+ fi
+}
+
+main $*
diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py
new file mode 100755
index 00000000000..77af014bab9
--- /dev/null
+++ b/extras/ganesha/scripts/generate-epoch.py
@@ -0,0 +1,48 @@
+#!/usr/bin/python3
+#
+# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+#
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+#
+# Generates unique epoch value on each gluster node to be used by
+# nfs-ganesha service on that node.
+#
+# Configure 'EPOCH_EXEC' option to this script path in
+# '/etc/sysconfig/ganesha' file used by nfs-ganesha service.
+#
+# Construct epoch as follows -
+# first 32-bit contains the now() time
+# rest 32-bit value contains the local glusterd node uuid
+
+import time
+import binascii
+
+# Calculate the now() time into a 64-bit integer value
+def epoch_now():
+ epoch_time = int(time.mktime(time.localtime())) << 32
+ return epoch_time
+
+# Read glusterd UUID and extract first 32-bit of it
+def epoch_uuid():
+ file_name = '/var/lib/glusterd/glusterd.info'
+
+ for line in open(file_name):
+ if "UUID" in line:
+ glusterd_uuid = line.split('=')[1].strip()
+
+ uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-",""))
+
+ epoch_uuid = int(binascii.hexlify(uuid_bin), 32) & 0xFFFF0000
+ return epoch_uuid
+
+# Construct epoch as follows -
+# first 32-bit contains the now() time
+# rest 32-bit value contains the local glusterd node uuid
+epoch = (epoch_now() | epoch_uuid())
+print((str(epoch)))
+
+exit(0)
diff --git a/extras/geo-rep/Makefile.am b/extras/geo-rep/Makefile.am
index e4603ae80b8..09eff308ac4 100644
--- a/extras/geo-rep/Makefile.am
+++ b/extras/geo-rep/Makefile.am
@@ -1,4 +1,4 @@
-scriptsdir = $(datadir)/glusterfs/scripts
+scriptsdir = $(libexecdir)/glusterfs/scripts
scripts_SCRIPTS = gsync-upgrade.sh generate-gfid-file.sh get-gfid.sh \
slave-upgrade.sh schedule_georep.py
diff --git a/extras/geo-rep/schedule_georep.py.in b/extras/geo-rep/schedule_georep.py.in
index f29ae020b8f..48b2b507060 100644
--- a/extras/geo-rep/schedule_georep.py.in
+++ b/extras/geo-rep/schedule_georep.py.in
@@ -352,7 +352,7 @@ def get_summary(mastervol, slave_url):
def touch_mount_root(mastervol):
# Create a Mount and Touch the Mount point root,
# Hack to make sure some event available after
- # setting Checkpoint. Without this their is a chance of
+ # setting Checkpoint. Without this there is a chance of
# Checkpoint never completes.
with glustermount("localhost", mastervol) as mnt:
execute(["touch", mnt])
@@ -459,8 +459,8 @@ if __name__ == "__main__":
description=__doc__)
parser.add_argument("mastervol", help="Master Volume Name")
parser.add_argument("slave",
- help="SLAVEHOST or root@SLAVEHOST "
- "or user@SLAVEHOST",
+ help="Slave hostname "
+ "(<username>@SLAVEHOST or SLAVEHOST)",
metavar="SLAVE")
parser.add_argument("slavevol", help="Slave Volume Name")
parser.add_argument("--interval", help="Interval in Seconds. "
diff --git a/extras/glusterd.vol.in b/extras/glusterd.vol.in
index 6141d8a736e..5d7bad0e4c8 100644
--- a/extras/glusterd.vol.in
+++ b/extras/glusterd.vol.in
@@ -1,12 +1,11 @@
volume management
type mgmt/glusterd
option working-directory @GLUSTERD_WORKDIR@
- option transport-type socket,rdma
+ option transport-type socket
option transport.socket.keepalive-time 10
option transport.socket.keepalive-interval 2
option transport.socket.read-fail-log off
option transport.socket.listen-port 24007
- option transport.rdma.listen-port 24008
option ping-timeout 0
option event-threads 1
# option lock-timer 180
diff --git a/extras/glusterfs-georep-upgrade.py b/extras/glusterfs-georep-upgrade.py
new file mode 100755
index 00000000000..634576058d6
--- /dev/null
+++ b/extras/glusterfs-georep-upgrade.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python3
+"""
+
+Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+This file is part of GlusterFS.
+
+This file is licensed to you under your choice of the GNU Lesser
+General Public License, version 3 or any later version (LGPLv3 or
+later), or the GNU General Public License, version 2 (GPLv2), in all
+cases as published by the Free Software Foundation.
+
+"""
+
+import argparse
+import errno
+import os, sys
+import shutil
+from datetime import datetime
+
+def find_htime_path(brick_path):
+ dirs = []
+ htime_dir = os.path.join(brick_path, '.glusterfs/changelogs/htime')
+ for file in os.listdir(htime_dir):
+ if os.path.isfile(os.path.join(htime_dir,file)) and file.startswith("HTIME"):
+ dirs.append(os.path.join(htime_dir, file))
+ else:
+ raise FileNotFoundError("%s unavailable" % (os.path.join(htime_dir, file)))
+ return dirs
+
+def modify_htime_file(brick_path):
+ htime_file_path_list = find_htime_path(brick_path)
+
+ for htime_file_path in htime_file_path_list:
+ changelog_path = os.path.join(brick_path, '.glusterfs/changelogs')
+ temp_htime_path = os.path.join(changelog_path, 'htime/temp_htime_file')
+ with open(htime_file_path, 'r') as htime_file, open(temp_htime_path, 'w') as temp_htime_file:
+ #extract epoch times from htime file
+ paths = htime_file.read().split("\x00")
+
+ for pth in paths:
+ epoch_no = pth.split(".")[-1]
+ changelog = os.path.basename(pth)
+ #convert epoch time to year, month and day
+ if epoch_no != '':
+ date=(datetime.fromtimestamp(float(int(epoch_no))).strftime("%Y/%m/%d"))
+ #update paths in temp htime file
+ temp_htime_file.write("%s/%s/%s\x00" % (changelog_path, date, changelog))
+ #create directory in the format year/month/days
+ path = os.path.join(changelog_path, date)
+
+ if changelog.startswith("CHANGELOG."):
+ try:
+ os.makedirs(path, mode = 0o600);
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ #copy existing changelogs to new directory structure, delete old changelog files
+ shutil.copyfile(pth, os.path.join(path, changelog))
+ os.remove(pth)
+
+ #rename temp_htime_file with htime file
+ os.rename(htime_file_path, os.path.join('%s.bak'%htime_file_path))
+ os.rename(temp_htime_path, htime_file_path)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('brick_path', help="This upgrade script, which is to be run on\
+ server side, takes brick path as the argument, \
+ updates paths inside htime file and alters the directory structure \
+ above the changelog files inorder to support new optimised format \
+ of the directory structure as per \
+ https://review.gluster.org/#/c/glusterfs/+/23733/")
+ args = parser.parse_args()
+ modify_htime_file(args.brick_path)
diff --git a/extras/glusterfs-logrotate b/extras/glusterfs-logrotate
index 75f700e6459..6ba6ef18e9f 100644
--- a/extras/glusterfs-logrotate
+++ b/extras/glusterfs-logrotate
@@ -45,3 +45,24 @@
compress
delaycompress
}
+
+# Rotate snapd log
+/var/log/glusterfs/snaps/*/*.log {
+ sharedscripts
+ weekly
+ maxsize 10M
+ minsize 100k
+
+ # 6 months of logs are good enough
+ rotate 26
+
+ missingok
+ compress
+ delaycompress
+ notifempty
+ postrotate
+ for pid in `ps -aef | grep glusterfs | egrep "snapd" | awk '{print $2}'`; do
+ /usr/bin/kill -HUP $pid > /dev/null 2>&1 || true
+ done
+ endscript
+}
diff --git a/extras/group-gluster-block b/extras/group-gluster-block
index 56b406e3641..1e398019e6b 100644
--- a/extras/group-gluster-block
+++ b/extras/group-gluster-block
@@ -5,6 +5,14 @@ performance.stat-prefetch=off
performance.open-behind=off
performance.readdir-ahead=off
performance.strict-o-direct=on
+performance.client-io-threads=on
+performance.io-thread-count=32
+performance.high-prio-threads=32
+performance.normal-prio-threads=32
+performance.low-prio-threads=32
+performance.least-prio-threads=4
+client.event-threads=8
+server.event-threads=8
network.remote-dio=disable
cluster.eager-lock=enable
cluster.quorum-type=auto
diff --git a/extras/group-virt.example b/extras/group-virt.example
index c2ce89d7b9c..cc37c98a25c 100644
--- a/extras/group-virt.example
+++ b/extras/group-virt.example
@@ -2,7 +2,8 @@ performance.quick-read=off
performance.read-ahead=off
performance.io-cache=off
performance.low-prio-threads=32
-network.remote-dio=enable
+network.remote-dio=disable
+performance.strict-o-direct=on
cluster.eager-lock=enable
cluster.quorum-type=auto
cluster.server-quorum-type=server
@@ -16,3 +17,8 @@ cluster.choose-local=off
client.event-threads=4
server.event-threads=4
performance.client-io-threads=on
+network.ping-timeout=20
+server.tcp-user-timeout=20
+server.keepalive-time=10
+server.keepalive-interval=2
+server.keepalive-count=5
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index 885ed03ad5b..1f2564b44ff 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -79,9 +79,9 @@ done
if [ "$option" == "disable" ]; then
# Unmount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- cat /etc/fstab | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ umount /run/gluster/shared_storage
+ cat /etc/fstab | grep -v "gluster_shared_storage /run/gluster/shared_storage/" > /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
if [ "$is_originator" == 1 ]; then
@@ -104,8 +104,15 @@ function check_volume_status()
echo $status
}
-mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
- /var/run/gluster/shared_storage"
+key=`echo $5 | cut -d '=' -f 1`
+val=`echo $5 | cut -d '=' -f 2`
+if [ "$key" == "transport.address-family" ]; then
+ mount_cmd="mount -t glusterfs -o xlator-option=transport.address-family=inet6 \
+ $local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage"
+else
+ mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+ /run/gluster/shared_storage"
+fi
if [ "$option" == "enable" ]; then
retry=0;
@@ -120,10 +127,10 @@ if [ "$option" == "enable" ]; then
status=$(check_volume_status)
done
# Mount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- mkdir -p /var/run/gluster/shared_storage
+ umount /run/gluster/shared_storage
+ mkdir -p /run/gluster/shared_storage
$mount_cmd
- cp /etc/fstab /var/run/gluster/fstab.tmp
- echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults 0 0" >> /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ cp /etc/fstab /run/gluster/fstab.tmp
+ echo "$local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage/ glusterfs defaults 0 0" >> /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
diff --git a/extras/hook-scripts/start/post/Makefile.am b/extras/hook-scripts/start/post/Makefile.am
index e32546dc999..792019d3c9f 100644
--- a/extras/hook-scripts/start/post/Makefile.am
+++ b/extras/hook-scripts/start/post/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh
+EXTRA_DIST = S29CTDBsetup.sh S30samba-start.sh S31ganesha-start.sh
hookdir = $(GLUSTERD_WORKDIR)/hooks/1/start/post/
if WITH_SERVER
diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
new file mode 100755
index 00000000000..7ad6f23ad06
--- /dev/null
+++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
@@ -0,0 +1,122 @@
+#!/bin/bash
+PROGNAME="Sganesha-start"
+OPTSPEC="volname:,gd-workdir:"
+VOL=
+declare -i EXPORT_ID
+ganesha_key="ganesha.enable"
+GANESHA_DIR="/run/gluster/shared_storage/nfs-ganesha"
+CONF1="$GANESHA_DIR/ganesha.conf"
+GLUSTERD_WORKDIR=
+
+function parse_args ()
+{
+ ARGS=$(getopt -l $OPTSPEC -o "o" -name $PROGNAME $@)
+ eval set -- "$ARGS"
+
+ while true; do
+ case $1 in
+ --volname)
+ shift
+ VOL=$1
+ ;;
+ --gd-workdir)
+ shift
+ GLUSTERD_WORKDIR=$1
+ ;;
+ *)
+ shift
+ break
+ ;;
+ esac
+ shift
+ done
+}
+
+
+
+#This function generates a new export entry as export.volume_name.conf
+function write_conf()
+{
+echo -e "# WARNING : Using Gluster CLI will overwrite manual
+# changes made to this file. To avoid it, edit the
+# file, copy it over to all the NFS-Ganesha nodes
+# and run ganesha-ha.sh --refresh-config."
+
+echo "EXPORT{"
+echo " Export_Id = 2;"
+echo " Path = \"/$VOL\";"
+echo " FSAL {"
+echo " name = \"GLUSTER\";"
+echo " hostname=\"localhost\";"
+echo " volume=\"$VOL\";"
+echo " }"
+echo " Access_type = RW;"
+echo " Disable_ACL = true;"
+echo " Squash=\"No_root_squash\";"
+echo " Pseudo=\"/$VOL\";"
+echo " Protocols = \"3\", \"4\" ;"
+echo " Transports = \"UDP\",\"TCP\";"
+echo " SecType = \"sys\";"
+echo "}"
+}
+
+#It adds the export dynamically by sending dbus signals
+function export_add()
+{
+ dbus-send --print-reply --system --dest=org.ganesha.nfsd \
+/org/ganesha/nfsd/ExportMgr org.ganesha.nfsd.exportmgr.AddExport \
+string:$GANESHA_DIR/exports/export.$VOL.conf string:"EXPORT(Export_Id=$EXPORT_ID)"
+
+}
+
+# based on src/scripts/ganeshactl/Ganesha/export_mgr.py
+function is_exported()
+{
+ local volume="${1}"
+
+ dbus-send --type=method_call --print-reply --system \
+ --dest=org.ganesha.nfsd /org/ganesha/nfsd/ExportMgr \
+ org.ganesha.nfsd.exportmgr.ShowExports \
+ | grep -w -q "/${volume}"
+
+ return $?
+}
+
+# Check the info file (contains the volume options) to see if Ganesha is
+# enabled for this volume.
+function ganesha_enabled()
+{
+ local volume="${1}"
+ local info_file="${GLUSTERD_WORKDIR}/vols/${VOL}/info"
+ local enabled="off"
+
+ enabled=$(grep -w ${ganesha_key} ${info_file} | cut -d"=" -f2)
+
+ [ "${enabled}" == "on" ]
+
+ return $?
+}
+
+parse_args $@
+
+if ganesha_enabled ${VOL} && ! is_exported ${VOL}
+then
+ if [ ! -e ${GANESHA_DIR}/exports/export.${VOL}.conf ]
+ then
+ #Remove export entry from nfs-ganesha.conf
+ sed -i /$VOL.conf/d $CONF1
+ write_conf ${VOL} > ${GANESHA_DIR}/exports/export.${VOL}.conf
+ EXPORT_ID=`cat $GANESHA_DIR/.export_added`
+ EXPORT_ID=EXPORT_ID+1
+ echo $EXPORT_ID > $GANESHA_DIR/.export_added
+ sed -i s/Export_Id.*/"Export_Id=$EXPORT_ID;"/ \
+ $GANESHA_DIR/exports/export.$VOL.conf
+ echo "%include \"$GANESHA_DIR/exports/export.$VOL.conf\"" >> $CONF1
+ else
+ EXPORT_ID=$(grep ^[[:space:]]*Export_Id $GANESHA_DIR/exports/export.$VOL.conf |\
+ awk -F"[=,;]" '{print $2}' | tr -d '[[:space:]]')
+ fi
+ export_add $VOL
+fi
+
+exit 0
diff --git a/extras/mount-shared-storage.sh b/extras/mount-shared-storage.sh
index e99233f7e1e..cc40e13c3e3 100755
--- a/extras/mount-shared-storage.sh
+++ b/extras/mount-shared-storage.sh
@@ -21,7 +21,7 @@ do
continue
fi
- mount -t glusterfs "${arr[0]}" "${arr[1]}"
+ mount -t glusterfs -o "${arr[3]}" "${arr[0]}" "${arr[1]}"
#wait for few seconds
sleep 10
diff --git a/extras/ocf/volume.in b/extras/ocf/volume.in
index 46dd20b8ced..76cc649e55f 100755
--- a/extras/ocf/volume.in
+++ b/extras/ocf/volume.in
@@ -6,6 +6,7 @@
# HA resource
#
# Authors: Florian Haas (hastexo Professional Services GmbH)
+# Jiri Lunacek (Hosting90 Systems s.r.o.)
#
# License: GNU General Public License (GPL)
@@ -54,6 +55,14 @@ must have clone ordering enabled.
<shortdesc lang="en">gluster executable</shortdesc>
<content type="string" default="$OCF_RESKEY_binary_default"/>
</parameter>
+ <parameter name="peer_map">
+ <longdesc lang="en">
+ Mapping of hostname - peer name in the gluster cluster
+ in format hostname1:peername1,hostname2:peername2,...
+ </longdesc>
+ <shortdesc lang="en">gluster peer map</shortdesc>
+ <content type="string" default=""/>
+ </parameter>
</parameters>
<actions>
<action name="start" timeout="20" />
@@ -68,6 +77,10 @@ EOF
}
+if [ -n "${OCF_RESKEY_peer_map}" ]; then
+ SHORTHOSTNAME=`echo "${OCF_RESKEY_peer_map}" | egrep -o "$SHORTHOSTNAME\:[^,]+" | awk -F: '{print $2}'`
+fi
+
volume_getdir() {
local voldir
voldir="@GLUSTERD_WORKDIR@/vols/${OCF_RESKEY_volname}"
@@ -108,6 +121,10 @@ volume_getpids() {
volpid_dir=`volume_getpid_dir`
bricks=`volume_getbricks`
+
+ if [ -z "$bricks" ]; then
+ return 1
+ fi
for brick in ${bricks}; do
pidfile="${volpid_dir}/${SHORTHOSTNAME}${brick}.pid"
@@ -214,6 +231,11 @@ volume_validate_all() {
# Test for required binaries
check_binary $OCF_RESKEY_binary
+
+ if [ -z "$SHORTHOSTNAME" ]; then
+ ocf_log err 'Unable to get host in node map'
+ return $OCF_ERR_CONFIGURED
+ fi
return $OCF_SUCCESS
}
diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
index f03895de114..e62f7fc52a3 100755
--- a/extras/quota/quota_fsck.py
+++ b/extras/quota/quota_fsck.py
@@ -52,17 +52,17 @@ epilog_msg='''
def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None):
if log_type == QUOTA_VERBOSE:
- print('%-24s %-60s\nxattr_values: %s\n%s\n' % {"Verbose", path, xattr_dict, stbuf})
+ print('%-24s %-60s\nxattr_values: %s\n%s\n' % ("Verbose", path, xattr_dict, stbuf))
elif log_type == QUOTA_META_ABSENT:
- print('%-24s %-60s\n%s\n' % {"Quota-Meta Absent", path, xattr_dict})
+ print('%-24s %-60s\n%s\n' % ("Quota-Meta Absent", path, xattr_dict))
elif log_type == QUOTA_SIZE_MISMATCH:
print("mismatch")
if dir_size is not None:
- print('%24s %60s %12s %12s' % {"Size Mismatch", path, xattr_dict['contri_size'],
- dir_size})
+ print('%24s %60s %12s %12s' % ("Size Mismatch", path,
+ xattr_dict, dir_size))
else:
- print('%-24s %-60s %-12i %-12i' % {"Size Mismatch", path, xattr_dict['contri_size'],
- stbuf.st_size})
+ print('%-24s %-60s %-12s %-12s' % ("Size Mismatch", path, xattr_dict,
+ stbuf.st_size))
def size_differs_lot(s1, s2):
'''
@@ -156,12 +156,10 @@ def get_quota_xattr_brick(dpath):
xattr_dict = {}
xattr_dict['parents'] = {}
- for xattr in pairs:
+ for xattr in pairs[1:]:
+ xattr = xattr.decode("utf-8")
xattr_key = xattr.split("=")[0]
- if re.search("# file:", xattr_key):
- # skip the file comment
- continue
- elif xattr_key is "":
+ if xattr_key == "":
# skip any empty lines
continue
elif not re.search("quota", xattr_key):
diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
index 1127be0e976..0e4df77d481 100755
--- a/extras/snap_scheduler/gcron.py
+++ b/extras/snap_scheduler/gcron.py
@@ -19,10 +19,10 @@ import logging.handlers
import fcntl
-GCRON_TASKS = "/var/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
+GCRON_TASKS = "/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
GCRON_CROND_TASK = "/etc/cron.d/glusterfs_snap_cron_tasks"
GCRON_RELOAD_FLAG = "/var/run/gluster/crond_task_reload_flag"
-LOCK_FILE_DIR = "/var/run/gluster/shared_storage/snaps/lock_files/"
+LOCK_FILE_DIR = "/run/gluster/shared_storage/snaps/lock_files/"
log = logging.getLogger("gcron-logger")
start_time = 0.0
@@ -38,7 +38,8 @@ def initLogger(script_name):
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
out, err = process.communicate()
if process.returncode == 0:
logfile = os.path.join(out.strip(), script_name[:-3]+".log")
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index a66c5e3d5ce..e8fcc449a9b 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -67,7 +67,7 @@ except ImportError:
SCRIPT_NAME = "snap_scheduler"
scheduler_enabled = False
log = logging.getLogger(SCRIPT_NAME)
-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
+SHARED_STORAGE_DIR="/run/gluster/shared_storage"
GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
@@ -149,7 +149,7 @@ def initLogger():
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE, universal_newlines=True)
logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
fh = logging.FileHandler(logfile)
diff --git a/extras/statedumpparse.rb b/extras/statedumpparse.rb
new file mode 100755
index 00000000000..1aff43377db
--- /dev/null
+++ b/extras/statedumpparse.rb
@@ -0,0 +1,208 @@
+#!/usr/bin/env ruby
+
+require 'time'
+require 'optparse'
+
+unless Array.instance_methods.include? :to_h
+ class Array
+ def to_h
+ h = {}
+ each { |k,v| h[k]=v }
+ h
+ end
+ end
+end
+
+# statedump.c:gf_proc_dump_mempool_info uses a five-dash record separator,
+# client.c:client_fd_lk_ctx_dump uses a six-dash record separator.
+ARRSEP = /^(-{5,6}=-{5,6})?$/
+HEAD = /^\[(.*)\]$/
+INPUT_FORMATS = %w[statedump json]
+
+format = 'json'
+input_format = 'statedump'
+tz = '+0000'
+memstat_select,memstat_reject = //,/\Z./
+OptionParser.new do |op|
+ op.banner << " [<] <STATEDUMP>"
+ op.on("-f", "--format=F", "json/yaml/memstat(-[plain|human|json])") { |s| format = s }
+ op.on("--input-format=F", INPUT_FORMATS.join(?/)) { |s| input_format = s }
+ op.on("--timezone=T",
+ "time zone to apply to zoneless timestamps [default UTC]") { |s| tz = s }
+ op.on("--memstat-select=RX", "memstat: select memory types matching RX") { |s|
+ memstat_select = Regexp.new s
+ }
+ op.on("--memstat-reject=RX", "memstat: reject memory types matching RX") { |s|
+ memstat_reject = Regexp.new s
+ }
+end.parse!
+
+
+if format =~ /\Amemstat(?:-(.*))?/
+ memstat_type = $1 || 'plain'
+ unless %w[plain human json].include? memstat_type
+ raise "unknown memstat type #{memstat_type.dump}"
+ end
+ format = 'memstat'
+end
+
+repr, logsep = case format
+when 'yaml'
+ require 'yaml'
+
+ [proc { |e| e.to_yaml }, "\n"]
+when 'json', 'memstat'
+ require 'json'
+
+ [proc { |e| e.to_json }, " "]
+else
+ raise "unkonwn format '#{format}'"
+end
+formatter = proc { |e| puts repr.call(e) }
+
+INPUT_FORMATS.include? input_format or raise "unkwown input format '#{input_format}'"
+
+dumpinfo = {}
+
+# parse a statedump entry
+elem_cbk = proc { |s,&cbk|
+ arraylike = false
+ s.grep(/\S/).empty? and next
+ head = nil
+ while s.last =~ /^\s*$/
+ s.pop
+ end
+ body = catch { |misc2|
+ s[0] =~ HEAD ? (head = $1) : (throw misc2)
+ body = [[]]
+ s[1..-1].each { |l|
+ if l =~ ARRSEP
+ arraylike = true
+ body << []
+ next
+ end
+ body.last << l
+ }
+
+ body.reject(&:empty?).map { |e|
+ ea = e.map { |l|
+ k,v = l.split("=",2)
+ m = /\A(0|-?[1-9]\d*)(\.\d+)?\Z/.match v
+ [k, m ? (m[2] ? Float(v) : Integer(v)) : v]
+ }
+ begin
+ ea.to_h
+ rescue
+ throw misc2
+ end
+ }
+ }
+
+ if body
+ cbk.call [head, arraylike ? body : (body.empty? ? {} : body[0])]
+ else
+ STDERR.puts ["WARNING: failed to parse record:", repr.call(s)].join(logsep)
+ end
+}
+
+# aggregator routine
+aggr = case format
+when 'memstat'
+ meminfo = {}
+ # commit memory-related entries to meminfo
+ proc { |k,r|
+ case k
+ when /memusage/
+ (meminfo["GF_MALLOC"]||={})[k] ||= r["size"] if k =~ memstat_select and k !~ memstat_reject
+ when "mempool"
+ r.each {|e|
+ kk = "mempool:#{e['pool-name']}"
+ (meminfo["mempool"]||={})[kk] ||= e["size"] if kk =~ memstat_select and kk !~ memstat_reject
+ }
+ end
+ }
+else
+ # just format data, don't actually aggregate anything
+ proc { |pair| formatter.call pair }
+end
+
+# processing the data
+case input_format
+when 'statedump'
+ acc = []
+ $<.each { |l|
+ l = l.strip
+ if l =~ /^(DUMP-(?:START|END)-TIME):\s+(.*)/
+ dumpinfo["_meta"]||={}
+ (dumpinfo["_meta"]["date"]||={})[$1] = Time.parse([$2, tz].join " ")
+ next
+ end
+
+ if l =~ HEAD
+ elem_cbk.call(acc, &aggr)
+ acc = [l]
+ next
+ end
+
+ acc << l
+ }
+ elem_cbk.call(acc, &aggr)
+when 'json'
+ $<.each { |l|
+ r = JSON.load l
+ case r
+ when Array
+ aggr[r]
+ when Hash
+ dumpinfo.merge! r
+ end
+ }
+end
+
+# final actions: output aggregated data
+case format
+when 'memstat'
+ ma = meminfo.values.map(&:to_a).inject(:+)
+ totals = meminfo.map { |coll,h| [coll, h.values.inject(:+)] }.to_h
+ tt = ma.transpose[1].inject(:+)
+
+ summary_sep,showm = case memstat_type
+ when 'json'
+ ["", proc { |k,v| puts({type: k, value: v}.to_json) }]
+ when 'plain', 'human'
+ # human-friendly number representation
+ hr = proc { |n|
+ qa = %w[B kB MB GB]
+ q = ((1...qa.size).find {|i| n < (1 << i*10)} || qa.size) - 1
+ "%.2f%s" % [n.to_f / (1 << q*10), qa[q]]
+ }
+
+ templ = "%{val} %{key}"
+ tft = proc { |t| t }
+ nft = if memstat_type == 'human'
+ nw = [ma.transpose[1], totals.values, tt].flatten.map{|n| hr[n].size}.max
+ proc { |n|
+ hn = hr[n]
+ " " * (nw - hn.size) + hn
+ }
+ else
+ nw = tt.to_s.size
+ proc { |n| "%#{nw}d" % n }
+ end
+ ## Alternative template, key first:
+ # templ = "%{key} %{val}"
+ # tw = ma.transpose[0].map(&:size).max
+ # tft = proc { |t| t + " " * [tw - t.size, 0].max }
+ # nft = (memstat_type == 'human') ? hr : proc { |n| n }
+ ["\n", proc { |k,v| puts templ % {key: tft[k], val: nft[v]} }]
+ else
+ raise 'this should be impossible'
+ end
+
+ ma.sort_by { |k,v| v }.each(&showm)
+ print summary_sep
+ totals.each { |coll,t| showm.call "Total #{coll}", t }
+ showm.call "TOTAL", tt
+else
+ formatter.call dumpinfo
+end
diff --git a/extras/systemd/gluster-ta-volume.service.in b/extras/systemd/gluster-ta-volume.service.in
index 452c01c419f..2802bca05bf 100644
--- a/extras/systemd/gluster-ta-volume.service.in
+++ b/extras/systemd/gluster-ta-volume.service.in
@@ -4,7 +4,7 @@ After=network.target
[Service]
Environment="LOG_LEVEL=WARNING"
-ExecStart=@prefix@/sbin/glusterfsd -N --volfile-id ta -f @GLUSTERD_WORKDIR@/thin-arbiter/thin-arbiter.vol --brick-port 24007 --xlator-option ta-server.transport.socket.listen-port=24007
+ExecStart=@prefix@/sbin/glusterfsd -N --volfile-id ta -f @GLUSTERD_WORKDIR@/thin-arbiter/thin-arbiter.vol --brick-port 24007 --xlator-option ta-server.transport.socket.listen-port=24007 -LWARNING
Restart=always
KillMode=process
SuccessExitStatus=15
diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
index 89ef402ac83..abb0d82911f 100644
--- a/extras/systemd/glusterd.service.in
+++ b/extras/systemd/glusterd.service.in
@@ -1,6 +1,8 @@
[Unit]
Description=GlusterFS, a clustered file-system server
Documentation=man:glusterd(8)
+StartLimitBurst=6
+StartLimitIntervalSec=3600
Requires=@RPCBIND_SERVICE@
After=network.target @RPCBIND_SERVICE@
Before=network-online.target
@@ -10,10 +12,15 @@ Type=forking
PIDFile=@localstatedir@/run/glusterd.pid
LimitNOFILE=65536
Environment="LOG_LEVEL=INFO"
-EnvironmentFile=-@sysconfdir@/sysconfig/glusterd
+EnvironmentFile=-@SYSCONF_DIR@/sysconfig/glusterd
ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS
KillMode=process
+TimeoutSec=300
SuccessExitStatus=15
+Restart=on-abnormal
+RestartSec=60
+StartLimitBurst=6
+StartLimitInterval=3600
[Install]
WantedBy=multi-user.target
diff --git a/extras/thin-arbiter/thin-arbiter.vol b/extras/thin-arbiter/thin-arbiter.vol
index 244a4caf485..c76babc7b3c 100644
--- a/extras/thin-arbiter/thin-arbiter.vol
+++ b/extras/thin-arbiter/thin-arbiter.vol
@@ -33,11 +33,10 @@ volume ta-index
subvolumes ta-io-threads
end-volume
-volume ta-io-stats
+volume /mnt/thin-arbiter
type debug/io-stats
option count-fop-hits off
option latency-measurement off
- option log-level WARNING
option unique-id /mnt/thin-arbiter
subvolumes ta-index
end-volume
@@ -54,5 +53,5 @@ volume ta-server
option auth-path /mnt/thin-arbiter
option transport.address-family inet
option transport-type tcp
- subvolumes ta-io-stats
+ subvolumes /mnt/thin-arbiter
end-volume
diff --git a/extras/who-wrote-glusterfs/gitdm.domain-map b/extras/who-wrote-glusterfs/gitdm.domain-map
index 315355b08b8..7cd2bbd605b 100644
--- a/extras/who-wrote-glusterfs/gitdm.domain-map
+++ b/extras/who-wrote-glusterfs/gitdm.domain-map
@@ -4,6 +4,7 @@
active.by ActiveCloud
appeartv.com Appear TV
cern.ch CERN
+cmss.chinamobile.com China Mobile(Suzhou) Software Technology
datalab.es DataLab S.L.
fb.com Facebook
fedoraproject.org Fedora Project
diff --git a/geo-replication/gsyncd.conf.in b/geo-replication/gsyncd.conf.in
index 9155cd87bbe..9688c79fab7 100644
--- a/geo-replication/gsyncd.conf.in
+++ b/geo-replication/gsyncd.conf.in
@@ -123,7 +123,7 @@ type=bool
help=Use this to set Active Passive mode to meta-volume.
[meta-volume-mnt]
-value=/var/run/gluster/shared_storage
+value=/run/gluster/shared_storage
help=Meta Volume or Shared Volume mount path
[allow-network]
@@ -266,7 +266,9 @@ allowed_values=ERROR,INFO,WARNING,DEBUG
[ssh-port]
value=22
-validation=int
+validation=minmax
+min=1
+max=65535
help=Set SSH port
type=int
diff --git a/geo-replication/setup.py b/geo-replication/setup.py
index 6d678baa2f7..0eae469d2d6 100644
--- a/geo-replication/setup.py
+++ b/geo-replication/setup.py
@@ -1,7 +1,7 @@
#
# Copyright (c) 2011-2014 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
-
+#
# This file is licensed to you under your choice of the GNU Lesser
# General Public License, version 3 or any later version (LGPLv3 or
# later), or the GNU General Public License, version 2 (GPLv2), in all
@@ -20,11 +20,11 @@ setup(
name=name,
version="",
description='GlusterFS Geo Replication',
- license='',
+ license='GPLV2 and LGPLV3+',
author='Red Hat, Inc.',
author_email='gluster-devel@gluster.org',
url='http://www.gluster.org',
- packages=['syncdaemon', ],
+ packages=[name, ],
test_suite='nose.collector',
install_requires=[],
scripts=[],
diff --git a/geo-replication/src/peer_mountbroker.py.in b/geo-replication/src/peer_mountbroker.py.in
index 96a72643ac3..40b90ffc560 100644
--- a/geo-replication/src/peer_mountbroker.py.in
+++ b/geo-replication/src/peer_mountbroker.py.in
@@ -222,8 +222,10 @@ class CliSetup(Cmd):
name = "setup"
def args(self, parser):
- parser.add_argument("mount_root")
- parser.add_argument("group")
+ parser.add_argument("mount_root",
+ help="Path to the mountbroker-root directory.")
+ parser.add_argument("group",
+ help="Group to be used for setup.")
def run(self, args):
out = execute_in_peers("node-setup", [args.mount_root,
@@ -333,8 +335,10 @@ class CliAdd(Cmd):
name = "add"
def args(self, parser):
- parser.add_argument("volume")
- parser.add_argument("user")
+ parser.add_argument("volume",
+ help="Volume to be added.")
+ parser.add_argument("user",
+ help="User for which volume is to be added.")
def run(self, args):
out = execute_in_peers("node-add", [args.volume,
@@ -374,8 +378,9 @@ class CliRemove(Cmd):
name = "remove"
def args(self, parser):
- parser.add_argument("--volume", default=".")
- parser.add_argument("--user", default=".")
+ parser.add_argument("--volume", default=".", help="Volume to be removed.")
+ parser.add_argument("--user", default=".",
+ help="User for which volume has to be removed.")
def run(self, args):
out = execute_in_peers("node-remove", [args.volume,
diff --git a/geo-replication/syncdaemon/Makefile.am b/geo-replication/syncdaemon/Makefile.am
index 62c5ce7fe30..d70e3368faf 100644
--- a/geo-replication/syncdaemon/Makefile.am
+++ b/geo-replication/syncdaemon/Makefile.am
@@ -2,7 +2,7 @@ syncdaemondir = $(GLUSTERFS_LIBEXECDIR)/python/syncdaemon
syncdaemon_PYTHON = rconf.py gsyncd.py __init__.py master.py README.md repce.py \
resource.py syncdutils.py monitor.py libcxattr.py gsyncdconfig.py \
- libgfchangelog.py changelogagent.py gsyncdstatus.py conf.py logutils.py \
+ libgfchangelog.py gsyncdstatus.py conf.py logutils.py \
subcmds.py argsupgrade.py py2py3.py
CLEANFILES =
diff --git a/geo-replication/syncdaemon/README.md b/geo-replication/syncdaemon/README.md
index 2a202e3f99e..5ab785ae669 100644
--- a/geo-replication/syncdaemon/README.md
+++ b/geo-replication/syncdaemon/README.md
@@ -19,7 +19,6 @@ INSTALLATION
As of now, the supported way of operation is running from the source directory or using the RPMs given.
-If you use Python 2.4.x, you need to install the [Ctypes module](http://python.net/crew/theller/ctypes/).
CONFIGURATION
-------------
diff --git a/geo-replication/syncdaemon/changelogagent.py b/geo-replication/syncdaemon/changelogagent.py
deleted file mode 100644
index c5fdbc3a74f..00000000000
--- a/geo-replication/syncdaemon/changelogagent.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/python3
-#
-# Copyright (c) 2011-2014 Red Hat, Inc. <http://www.redhat.com>
-# This file is part of GlusterFS.
-
-# This file is licensed to you under your choice of the GNU Lesser
-# General Public License, version 3 or any later version (LGPLv3 or
-# later), or the GNU General Public License, version 2 (GPLv2), in all
-# cases as published by the Free Software Foundation.
-#
-
-import logging
-import syncdutils
-from syncdutils import select, CHANGELOG_AGENT_SERVER_VERSION
-from repce import RepceServer
-
-
-class _MetaChangelog(object):
-
- def __getattr__(self, meth):
- from libgfchangelog import Changes as LChanges
- xmeth = [m for m in dir(LChanges) if m[0] != '_']
- if meth not in xmeth:
- return
- for m in xmeth:
- setattr(self, m, getattr(LChanges, m))
- return getattr(self, meth)
-
-Changes = _MetaChangelog()
-
-
-class Changelog(object):
- def version(self):
- return CHANGELOG_AGENT_SERVER_VERSION
-
- def init(self):
- return Changes.cl_init()
-
- def register(self, cl_brick, cl_dir, cl_log, cl_level, retries=0):
- return Changes.cl_register(cl_brick, cl_dir, cl_log, cl_level, retries)
-
- def scan(self):
- return Changes.cl_scan()
-
- def getchanges(self):
- return Changes.cl_getchanges()
-
- def done(self, clfile):
- return Changes.cl_done(clfile)
-
- def history(self, changelog_path, start, end, num_parallel):
- return Changes.cl_history_changelog(changelog_path, start, end,
- num_parallel)
-
- def history_scan(self):
- return Changes.cl_history_scan()
-
- def history_getchanges(self):
- return Changes.cl_history_getchanges()
-
- def history_done(self, clfile):
- return Changes.cl_history_done(clfile)
-
-
-class ChangelogAgent(object):
- def __init__(self, obj, fd_tup):
- (inf, ouf, rw, ww) = fd_tup.split(',')
- repce = RepceServer(obj, int(inf), int(ouf), 1)
- t = syncdutils.Thread(target=lambda: (repce.service_loop(),
- syncdutils.finalize()))
- t.start()
- logging.info('Agent listining...')
-
- select((), (), ())
-
-
-def agent(obj, fd_tup):
- return ChangelogAgent(obj, fd_tup)
diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py
index 8940384616a..257ed72c6ae 100644
--- a/geo-replication/syncdaemon/gsyncd.py
+++ b/geo-replication/syncdaemon/gsyncd.py
@@ -22,8 +22,8 @@ import gsyncdconfig as gconf
from rconf import rconf
import subcmds
from conf import GLUSTERD_WORKDIR, GLUSTERFS_CONFDIR, GCONF_VERSION
-from syncdutils import set_term_handler, finalize, lf
-from syncdutils import log_raise_exception, FreeObject, escape
+from syncdutils import (set_term_handler, finalize, lf,
+ log_raise_exception, FreeObject, escape)
import argsupgrade
@@ -79,8 +79,6 @@ def main():
help="feedback fd between monitor and worker")
p.add_argument("--local-node", help="Local master node")
p.add_argument("--local-node-id", help="Local Node ID")
- p.add_argument("--rpc-fd",
- help="Read and Write fds for worker-agent communication")
p.add_argument("--subvol-num", type=int, help="Subvolume number")
p.add_argument("--is-hottier", action="store_true",
help="Is this brick part of hot tier")
@@ -92,19 +90,6 @@ def main():
p.add_argument("-c", "--config-file", help="Config File")
p.add_argument("--debug", action="store_true")
- # Agent
- p = sp.add_parser("agent")
- p.add_argument("master", help="Master Volume Name")
- p.add_argument("slave", help="Slave details user@host::vol format")
- p.add_argument("--local-path", help="Local brick path")
- p.add_argument("--local-node", help="Local master node")
- p.add_argument("--local-node-id", help="Local Node ID")
- p.add_argument("--slave-id", help="Slave Volume ID")
- p.add_argument("--rpc-fd",
- help="Read and Write fds for worker-agent communication")
- p.add_argument("-c", "--config-file", help="Config File")
- p.add_argument("--debug", action="store_true")
-
# Slave
p = sp.add_parser("slave")
p.add_argument("master", help="Master Volume Name")
@@ -271,8 +256,8 @@ def main():
# Default label to print in log file
label = args.subcmd
- if args.subcmd in ("worker", "agent"):
- # If Worker or agent, then add brick path also to label
+ if args.subcmd in ("worker"):
+ # If Worker, then add brick path also to label
label = "%s %s" % (args.subcmd, args.local_path)
elif args.subcmd == "slave":
# If Slave add Master node and Brick details
@@ -315,7 +300,7 @@ def main():
# Log message for loaded config file
if config_file is not None:
- logging.info(lf("Using session config file", path=config_file))
+ logging.debug(lf("Using session config file", path=config_file))
set_term_handler()
excont = FreeObject(exval=0)
diff --git a/geo-replication/syncdaemon/gsyncdstatus.py b/geo-replication/syncdaemon/gsyncdstatus.py
index 72bcb092f01..1a655ff8887 100644
--- a/geo-replication/syncdaemon/gsyncdstatus.py
+++ b/geo-replication/syncdaemon/gsyncdstatus.py
@@ -23,8 +23,8 @@ from datetime import datetime
from errno import EACCES, EAGAIN, ENOENT
import logging
-from syncdutils import EVENT_GEOREP_ACTIVE, EVENT_GEOREP_PASSIVE, gf_event
-from syncdutils import EVENT_GEOREP_CHECKPOINT_COMPLETED, lf
+from syncdutils import (EVENT_GEOREP_ACTIVE, EVENT_GEOREP_PASSIVE, gf_event,
+ EVENT_GEOREP_CHECKPOINT_COMPLETED, lf)
DEFAULT_STATUS = "N/A"
MONITOR_STATUS = ("Created", "Started", "Paused", "Stopped")
diff --git a/geo-replication/syncdaemon/libcxattr.py b/geo-replication/syncdaemon/libcxattr.py
index c7d69d7eb2e..e6406c36bd7 100644
--- a/geo-replication/syncdaemon/libcxattr.py
+++ b/geo-replication/syncdaemon/libcxattr.py
@@ -10,8 +10,8 @@
import os
from ctypes import CDLL, get_errno
-from py2py3 import bytearray_to_str, gr_create_string_buffer
-from py2py3 import gr_query_xattr, gr_lsetxattr, gr_lremovexattr
+from py2py3 import (bytearray_to_str, gr_create_string_buffer,
+ gr_query_xattr, gr_lsetxattr, gr_lremovexattr)
class Xattr(object):
diff --git a/geo-replication/syncdaemon/libgfchangelog.py b/geo-replication/syncdaemon/libgfchangelog.py
index 8d129567075..a3bda7282c0 100644
--- a/geo-replication/syncdaemon/libgfchangelog.py
+++ b/geo-replication/syncdaemon/libgfchangelog.py
@@ -12,130 +12,132 @@ import os
from ctypes import CDLL, RTLD_GLOBAL, get_errno, byref, c_ulong
from ctypes.util import find_library
from syncdutils import ChangelogException, ChangelogHistoryNotAvailable
-from py2py3 import gr_cl_history_changelog, gr_cl_done, gr_create_string_buffer
-from py2py3 import gr_cl_register, gr_cl_history_done, bytearray_to_str
-
-
-class Changes(object):
- libgfc = CDLL(find_library("gfchangelog"), mode=RTLD_GLOBAL,
- use_errno=True)
-
- @classmethod
- def geterrno(cls):
- return get_errno()
-
- @classmethod
- def raise_changelog_err(cls):
- errn = cls.geterrno()
- raise ChangelogException(errn, os.strerror(errn))
-
- @classmethod
- def _get_api(cls, call):
- return getattr(cls.libgfc, call)
-
- @classmethod
- def cl_init(cls):
- ret = cls._get_api('gf_changelog_init')(None)
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_register(cls, brick, path, log_file, log_level, retries=0):
- ret = gr_cl_register(cls, brick, path, log_file, log_level, retries)
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_scan(cls):
- ret = cls._get_api('gf_changelog_scan')()
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_startfresh(cls):
- ret = cls._get_api('gf_changelog_start_fresh')()
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_getchanges(cls):
- """ remove hardcoding for path name length """
- def clsort(f):
- return f.split('.')[-1]
- changes = []
- buf = gr_create_string_buffer(4096)
- call = cls._get_api('gf_changelog_next_change')
-
- while True:
- ret = call(buf, 4096)
- if ret in (0, -1):
- break
- # py2 and py3 compatibility
- result = bytearray_to_str(buf.raw[:ret - 1])
- changes.append(result)
- if ret == -1:
- cls.raise_changelog_err()
- # cleanup tracker
- cls.cl_startfresh()
- return sorted(changes, key=clsort)
-
- @classmethod
- def cl_done(cls, clfile):
- ret = gr_cl_done(cls, clfile)
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_history_scan(cls):
- ret = cls._get_api('gf_history_changelog_scan')()
- if ret == -1:
- cls.raise_changelog_err()
-
- return ret
-
- @classmethod
- def cl_history_changelog(cls, changelog_path, start, end, num_parallel):
- actual_end = c_ulong()
- ret = gr_cl_history_changelog(cls, changelog_path, start, end,
- num_parallel, byref(actual_end))
- if ret == -1:
- cls.raise_changelog_err()
-
- if ret == -2:
- raise ChangelogHistoryNotAvailable()
-
- return (ret, actual_end.value)
-
- @classmethod
- def cl_history_startfresh(cls):
- ret = cls._get_api('gf_history_changelog_start_fresh')()
- if ret == -1:
- cls.raise_changelog_err()
-
- @classmethod
- def cl_history_getchanges(cls):
- """ remove hardcoding for path name length """
- def clsort(f):
- return f.split('.')[-1]
-
- changes = []
- buf = gr_create_string_buffer(4096)
- call = cls._get_api('gf_history_changelog_next_change')
-
- while True:
- ret = call(buf, 4096)
- if ret in (0, -1):
- break
- # py2 and py3 compatibility
- result = bytearray_to_str(buf.raw[:ret - 1])
- changes.append(result)
- if ret == -1:
- cls.raise_changelog_err()
-
- return sorted(changes, key=clsort)
-
- @classmethod
- def cl_history_done(cls, clfile):
- ret = gr_cl_history_done(cls, clfile)
- if ret == -1:
- cls.raise_changelog_err()
+from py2py3 import (gr_cl_history_changelog, gr_cl_done,
+ gr_create_string_buffer, gr_cl_register,
+ gr_cl_history_done, bytearray_to_str)
+
+
+libgfc = CDLL(
+ find_library("gfchangelog"),
+ mode=RTLD_GLOBAL,
+ use_errno=True
+)
+
+
+def _raise_changelog_err():
+ errn = get_errno()
+ raise ChangelogException(errn, os.strerror(errn))
+
+
+def _init():
+ if libgfc.gf_changelog_init(None) == -1:
+ _raise_changelog_err()
+
+
+def register(brick, path, log_file, log_level, retries=0):
+ _init()
+
+ ret = gr_cl_register(libgfc, brick, path, log_file, log_level, retries)
+
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def scan():
+ ret = libgfc.gf_changelog_scan()
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def startfresh():
+ ret = libgfc.gf_changelog_start_fresh()
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def getchanges():
+ def clsort(cfile):
+ return cfile.split('.')[-1]
+
+ changes = []
+ buf = gr_create_string_buffer(4096)
+ call = libgfc.gf_changelog_next_change
+
+ while True:
+ ret = call(buf, 4096)
+ if ret in (0, -1):
+ break
+
+ # py2 and py3 compatibility
+ result = bytearray_to_str(buf.raw[:ret - 1])
+ changes.append(result)
+
+ if ret == -1:
+ _raise_changelog_err()
+
+ # cleanup tracker
+ startfresh()
+
+ return sorted(changes, key=clsort)
+
+
+def done(clfile):
+ ret = gr_cl_done(libgfc, clfile)
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def history_scan():
+ ret = libgfc.gf_history_changelog_scan()
+ if ret == -1:
+ _raise_changelog_err()
+
+ return ret
+
+
+def history_changelog(changelog_path, start, end, num_parallel):
+ actual_end = c_ulong()
+ ret = gr_cl_history_changelog(libgfc, changelog_path, start, end,
+ num_parallel, byref(actual_end))
+ if ret == -1:
+ _raise_changelog_err()
+
+ if ret == -2:
+ raise ChangelogHistoryNotAvailable()
+
+ return (ret, actual_end.value)
+
+
+def history_startfresh():
+ ret = libgfc.gf_history_changelog_start_fresh()
+ if ret == -1:
+ _raise_changelog_err()
+
+
+def history_getchanges():
+ def clsort(cfile):
+ return cfile.split('.')[-1]
+
+ changes = []
+ buf = gr_create_string_buffer(4096)
+ call = libgfc.gf_history_changelog_next_change
+
+ while True:
+ ret = call(buf, 4096)
+ if ret in (0, -1):
+ break
+
+ # py2 and py3 compatibility
+ result = bytearray_to_str(buf.raw[:ret - 1])
+ changes.append(result)
+
+ if ret == -1:
+ _raise_changelog_err()
+
+ return sorted(changes, key=clsort)
+
+
+def history_done(clfile):
+ ret = gr_cl_history_done(libgfc, clfile)
+ if ret == -1:
+ _raise_changelog_err()
diff --git a/geo-replication/syncdaemon/master.py b/geo-replication/syncdaemon/master.py
index f02cdb4c7f8..9501aeae6b5 100644
--- a/geo-replication/syncdaemon/master.py
+++ b/geo-replication/syncdaemon/master.py
@@ -22,11 +22,13 @@ from threading import Condition, Lock
from datetime import datetime
import gsyncdconfig as gconf
+import libgfchangelog
from rconf import rconf
-from syncdutils import Thread, GsyncdError, escape_space_newline
-from syncdutils import unescape_space_newline, gauxpfx, escape
-from syncdutils import lstat, errno_wrap, FreeObject, lf, matching_disk_gfid
-from syncdutils import NoStimeAvailable, PartialHistoryAvailable
+from syncdutils import (Thread, GsyncdError, escape_space_newline,
+ unescape_space_newline, gauxpfx, escape,
+ lstat, errno_wrap, FreeObject, lf, matching_disk_gfid,
+ NoStimeAvailable, PartialHistoryAvailable,
+ host_brick_split)
URXTIME = (-1, 0)
@@ -517,7 +519,7 @@ class GMasterCommon(object):
# If crawlwrap is called when partial history available,
# then it sets register_time which is the time when geo-rep
# worker registered to changelog consumption. Since nsec is
- # not considered in register time, their are chances of skipping
+ # not considered in register time, there are chances of skipping
# changes detection in xsync crawl. This limit will be reset when
# crawlwrap is called again.
self.live_changelog_start_time = None
@@ -1465,7 +1467,7 @@ class GMasterChangelogMixin(GMasterCommon):
node = rconf.args.resource_remote
node_data = node.split("@")
node = node_data[-1]
- remote_node_ip = node.split(":")[0]
+ remote_node_ip, _ = host_brick_split(node)
self.status.set_slave_node(remote_node_ip)
def changelogs_batch_process(self, changes):
@@ -1498,9 +1500,9 @@ class GMasterChangelogMixin(GMasterCommon):
# that are _historical_ to that time.
data_stime = self.get_data_stime()
- self.changelog_agent.scan()
+ libgfchangelog.scan()
self.crawls += 1
- changes = self.changelog_agent.getchanges()
+ changes = libgfchangelog.getchanges()
if changes:
if data_stime:
logging.info(lf("slave's time",
@@ -1517,10 +1519,9 @@ class GMasterChangelogMixin(GMasterCommon):
self.changelogs_batch_process(changes)
- def register(self, register_time, changelog_agent, status):
- self.changelog_agent = changelog_agent
+ def register(self, register_time, status):
self.sleep_interval = gconf.get("change-interval")
- self.changelog_done_func = self.changelog_agent.done
+ self.changelog_done_func = libgfchangelog.done
self.tempdir = self.setup_working_dir()
self.processed_changelogs_dir = os.path.join(self.tempdir,
".processed")
@@ -1529,11 +1530,10 @@ class GMasterChangelogMixin(GMasterCommon):
class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
- def register(self, register_time, changelog_agent, status):
- self.changelog_agent = changelog_agent
+ def register(self, register_time, status):
self.changelog_register_time = register_time
self.history_crawl_start_time = register_time
- self.changelog_done_func = self.changelog_agent.history_done
+ self.changelog_done_func = libgfchangelog.history_done
self.history_turns = 0
self.tempdir = self.setup_working_dir()
self.processed_changelogs_dir = os.path.join(self.tempdir,
@@ -1547,6 +1547,12 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
data_stime = self.get_data_stime()
end_time = int(time.time())
+
+ #as start of historical crawl marks Geo-rep worker restart
+ if gconf.get("ignore-deletes"):
+ logging.info(lf('ignore-deletes config option is set',
+ stime=data_stime))
+
logging.info(lf('starting history crawl',
turns=self.history_turns,
stime=data_stime,
@@ -1561,7 +1567,7 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
# location then consuming history will not work(Known issue as of now)
changelog_path = os.path.join(rconf.args.local_path,
".glusterfs/changelogs")
- ret, actual_end = self.changelog_agent.history(
+ ret, actual_end = libgfchangelog.history_changelog(
changelog_path,
data_stime[0],
end_time,
@@ -1573,10 +1579,10 @@ class GMasterChangeloghistoryMixin(GMasterChangelogMixin):
# to be processed. returns positive value as number of changelogs
# to be processed, which will be fetched using
# history_getchanges()
- while self.changelog_agent.history_scan() > 0:
+ while libgfchangelog.history_scan() > 0:
self.crawls += 1
- changes = self.changelog_agent.history_getchanges()
+ changes = libgfchangelog.history_getchanges()
if changes:
if data_stime:
logging.info(lf("slave's time",
@@ -1629,7 +1635,7 @@ class GMasterXsyncMixin(GMasterChangelogMixin):
XSYNC_MAX_ENTRIES = 1 << 13
- def register(self, register_time=None, changelog_agent=None, status=None):
+ def register(self, register_time=None, status=None):
self.status = status
self.counter = 0
self.comlist = []
diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py
index 236afe70d11..6aa7b9dfc99 100644
--- a/geo-replication/syncdaemon/monitor.py
+++ b/geo-replication/syncdaemon/monitor.py
@@ -20,13 +20,14 @@ import random
from resource import SSH
import gsyncdconfig as gconf
+import libgfchangelog
from rconf import rconf
-from syncdutils import select, waitpid, errno_wrap, lf, grabpidfile
-from syncdutils import set_term_handler, GsyncdError
-from syncdutils import Thread, finalize, Volinfo, VolinfoFromGconf
-from syncdutils import gf_event, EVENT_GEOREP_FAULTY, get_up_nodes
+from syncdutils import (select, waitpid, errno_wrap, lf, grabpidfile,
+ set_term_handler, GsyncdError,
+ Thread, finalize, Volinfo, VolinfoFromGconf,
+ gf_event, EVENT_GEOREP_FAULTY, get_up_nodes,
+ unshare_propagation_supported)
from gsyncdstatus import GeorepStatus, set_monitor_status
-from syncdutils import unshare_propagation_supported
import py2py3
from py2py3 import pipe
@@ -81,7 +82,7 @@ class Monitor(object):
# give a chance to graceful exit
errno_wrap(os.kill, [-os.getpid(), signal.SIGTERM], [ESRCH])
- def monitor(self, w, argv, cpids, agents, slave_vol, slave_host, master,
+ def monitor(self, w, argv, cpids, slave_vol, slave_host, master,
suuid, slavenodes):
"""the monitor loop
@@ -150,7 +151,7 @@ class Monitor(object):
remote_host = "%s@%s" % (remote_user, remote_new[0])
remote_id = remote_new[1]
- # Spawn the worker and agent in lock to avoid fd leak
+ # Spawn the worker in lock to avoid fd leak
self.lock.acquire()
self.status[w[0]['dir']].set_worker_status(self.ST_INIT)
@@ -158,44 +159,10 @@ class Monitor(object):
brick=w[0]['dir'],
slave_node=remote_host))
- # Couple of pipe pairs for RPC communication b/w
- # worker and changelog agent.
-
- # read/write end for agent
- (ra, ww) = pipe()
- # read/write end for worker
- (rw, wa) = pipe()
-
- # spawn the agent process
- apid = os.fork()
- if apid == 0:
- os.close(rw)
- os.close(ww)
- args_to_agent = argv + [
- 'agent',
- rconf.args.master,
- rconf.args.slave,
- '--local-path', w[0]['dir'],
- '--local-node', w[0]['host'],
- '--local-node-id', w[0]['uuid'],
- '--slave-id', suuid,
- '--rpc-fd', ','.join([str(ra), str(wa), str(rw), str(ww)])
- ]
-
- if rconf.args.config_file is not None:
- args_to_agent += ['-c', rconf.args.config_file]
-
- if rconf.args.debug:
- args_to_agent.append("--debug")
-
- os.execv(sys.executable, args_to_agent)
-
pr, pw = pipe()
cpid = os.fork()
if cpid == 0:
os.close(pr)
- os.close(ra)
- os.close(wa)
args_to_worker = argv + [
'worker',
@@ -206,8 +173,6 @@ class Monitor(object):
'--local-node', w[0]['host'],
'--local-node-id', w[0]['uuid'],
'--slave-id', suuid,
- '--rpc-fd',
- ','.join([str(rw), str(ww), str(ra), str(wa)]),
'--subvol-num', str(w[2]),
'--resource-remote', remote_host,
'--resource-remote-id', remote_id
@@ -238,14 +203,8 @@ class Monitor(object):
os.execv(sys.executable, args_to_worker)
cpids.add(cpid)
- agents.add(apid)
os.close(pw)
- # close all RPC pipes in monitor
- os.close(ra)
- os.close(wa)
- os.close(rw)
- os.close(ww)
self.lock.release()
t0 = time.time()
@@ -254,42 +213,19 @@ class Monitor(object):
if so:
ret = nwait(cpid, os.WNOHANG)
- ret_agent = nwait(apid, os.WNOHANG)
-
- if ret_agent is not None:
- # Agent is died Kill Worker
- logging.info(lf("Changelog Agent died, Aborting Worker",
- brick=w[0]['dir']))
- errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
- nwait(cpid)
- nwait(apid)
if ret is not None:
logging.info(lf("worker died before establishing "
"connection",
brick=w[0]['dir']))
- nwait(apid) # wait for agent
else:
logging.debug("worker(%s) connected" % w[0]['dir'])
while time.time() < t0 + conn_timeout:
ret = nwait(cpid, os.WNOHANG)
- ret_agent = nwait(apid, os.WNOHANG)
if ret is not None:
logging.info(lf("worker died in startup phase",
brick=w[0]['dir']))
- nwait(apid) # wait for agent
- break
-
- if ret_agent is not None:
- # Agent is died Kill Worker
- logging.info(lf("Changelog Agent died, Aborting "
- "Worker",
- brick=w[0]['dir']))
- errno_wrap(os.kill, [cpid, signal.SIGKILL],
- [ESRCH])
- nwait(cpid)
- nwait(apid)
break
time.sleep(1)
@@ -304,12 +240,8 @@ class Monitor(object):
brick=w[0]['dir'],
timeout=conn_timeout))
errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
- nwait(apid) # wait for agent
ret = nwait(cpid)
if ret is None:
- # If worker dies, agent terminates on EOF.
- # So lets wait for agent first.
- nwait(apid)
ret = nwait(cpid)
if exit_signalled(ret):
ret = 0
@@ -333,18 +265,15 @@ class Monitor(object):
argv = [os.path.basename(sys.executable), sys.argv[0]]
cpids = set()
- agents = set()
ta = []
for wx in wspx:
def wmon(w):
- cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol,
+ cpid, _ = self.monitor(w, argv, cpids, slave_vol,
slave_host, master, suuid, slavenodes)
time.sleep(1)
self.lock.acquire()
for cpid in cpids:
errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH])
- for apid in agents:
- errno_wrap(os.kill, [apid, signal.SIGKILL], [ESRCH])
self.lock.release()
finalize(exval=1)
t = Thread(target=wmon, args=[wx])
@@ -354,8 +283,8 @@ class Monitor(object):
# monitor status was being updated in each monitor thread. It
# should not be done as it can cause deadlock for a worker start.
# set_monitor_status uses flock to synchronize multple instances
- # updating the file. Since each monitor thread forks worker and
- # agent, these processes can hold the reference to fd of status
+ # updating the file. Since each monitor thread forks worker,
+ # these processes can hold the reference to fd of status
# file causing deadlock to workers which starts later as flock
# will not be release until all references to same fd is closed.
# It will also cause fd leaks.
diff --git a/geo-replication/syncdaemon/py2py3.py b/geo-replication/syncdaemon/py2py3.py
index faad750059c..f9c76e1b50a 100644
--- a/geo-replication/syncdaemon/py2py3.py
+++ b/geo-replication/syncdaemon/py2py3.py
@@ -55,23 +55,23 @@ if sys.version_info >= (3,):
def gr_lremovexattr(cls, path, attr):
return cls.libc.lremovexattr(path.encode(), attr.encode())
- def gr_cl_register(cls, brick, path, log_file, log_level, retries):
- return cls._get_api('gf_changelog_register')(brick.encode(),
- path.encode(),
- log_file.encode(),
- log_level, retries)
+ def gr_cl_register(libgfapi, brick, path, log_file, log_level, retries):
+ return libgfapi.gf_changelog_register(brick.encode(),
+ path.encode(),
+ log_file.encode(),
+ log_level, retries)
- def gr_cl_done(cls, clfile):
- return cls._get_api('gf_changelog_done')(clfile.encode())
+ def gr_cl_done(libgfapi, clfile):
+ return libgfapi.gf_changelog_done(clfile.encode())
- def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
+ def gr_cl_history_changelog(libgfapi, changelog_path, start, end, num_parallel,
actual_end):
- return cls._get_api('gf_history_changelog')(changelog_path.encode(),
- start, end, num_parallel,
- actual_end)
+ return libgfapi.gf_history_changelog(changelog_path.encode(),
+ start, end, num_parallel,
+ actual_end)
- def gr_cl_history_done(cls, clfile):
- return cls._get_api('gf_history_changelog_done')(clfile.encode())
+ def gr_cl_history_done(libgfapi, clfile):
+ return libgfapi.gf_history_changelog_done(clfile.encode())
# regular file
@@ -137,20 +137,20 @@ else:
def gr_lremovexattr(cls, path, attr):
return cls.libc.lremovexattr(path, attr)
- def gr_cl_register(cls, brick, path, log_file, log_level, retries):
- return cls._get_api('gf_changelog_register')(brick, path, log_file,
- log_level, retries)
+ def gr_cl_register(libgfapi, brick, path, log_file, log_level, retries):
+ return libgfapi.gf_changelog_register(brick, path, log_file,
+ log_level, retries)
- def gr_cl_done(cls, clfile):
- return cls._get_api('gf_changelog_done')(clfile)
+ def gr_cl_done(libgfapi, clfile):
+ return libgfapi.gf_changelog_done(clfile)
- def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
+ def gr_cl_history_changelog(libgfapi, changelog_path, start, end, num_parallel,
actual_end):
- return cls._get_api('gf_history_changelog')(changelog_path, start, end,
- num_parallel, actual_end)
+ return libgfapi.gf_history_changelog(changelog_path, start, end,
+ num_parallel, actual_end)
- def gr_cl_history_done(cls, clfile):
- return cls._get_api('gf_history_changelog_done')(clfile)
+ def gr_cl_history_done(libgfapi, clfile):
+ return libgfapi.gf_history_changelog_done(clfile)
# regular file
diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py
index 189d8a101fd..f12c7ceaa36 100644
--- a/geo-replication/syncdaemon/resource.py
+++ b/geo-replication/syncdaemon/resource.py
@@ -19,30 +19,31 @@ import struct
import logging
import tempfile
import subprocess
-from errno import EEXIST, ENOENT, ENODATA, ENOTDIR, ELOOP, EACCES
-from errno import EISDIR, ENOTEMPTY, ESTALE, EINVAL, EBUSY, EPERM
+from errno import (EEXIST, ENOENT, ENODATA, ENOTDIR, ELOOP, EACCES,
+ EISDIR, ENOTEMPTY, ESTALE, EINVAL, EBUSY, EPERM)
import errno
from rconf import rconf
import gsyncdconfig as gconf
+import libgfchangelog
import repce
from repce import RepceServer, RepceClient
from master import gmaster_builder
import syncdutils
-from syncdutils import GsyncdError, select, privileged, funcode
-from syncdutils import entry2pb, gauxpfx, errno_wrap, lstat
-from syncdutils import NoStimeAvailable, PartialHistoryAvailable
-from syncdutils import ChangelogException, ChangelogHistoryNotAvailable
-from syncdutils import get_changelog_log_level, get_rsync_version
-from syncdutils import CHANGELOG_AGENT_CLIENT_VERSION
-from syncdutils import GX_GFID_CANONICAL_LEN
+from syncdutils import (GsyncdError, select, privileged, funcode,
+ entry2pb, gauxpfx, errno_wrap, lstat,
+ NoStimeAvailable, PartialHistoryAvailable,
+ ChangelogException, ChangelogHistoryNotAvailable,
+ get_changelog_log_level, get_rsync_version,
+ GX_GFID_CANONICAL_LEN,
+ gf_mount_ready, lf, Popen, sup,
+ Xattr, matching_disk_gfid, get_gfid_from_mnt,
+ unshare_propagation_supported, get_slv_dir_path)
from gsyncdstatus import GeorepStatus
-from syncdutils import lf, Popen, sup
-from syncdutils import Xattr, matching_disk_gfid, get_gfid_from_mnt
-from syncdutils import unshare_propagation_supported, get_slv_dir_path
-from py2py3 import pipe, str_to_bytearray, entry_pack_reg
-from py2py3 import entry_pack_reg_stat, entry_pack_mkdir, entry_pack_symlink
+from py2py3 import (pipe, str_to_bytearray, entry_pack_reg,
+ entry_pack_reg_stat, entry_pack_mkdir,
+ entry_pack_symlink)
ENOTSUP = getattr(errno, 'ENOTSUP', 'EOPNOTSUPP')
@@ -950,6 +951,16 @@ class Mounter(object):
logging.exception('mount cleanup failure:')
rv = 200
os._exit(rv)
+
+ #Polling the dht.subvol.status value.
+ RETRIES = 10
+ while not gf_mount_ready():
+ if RETRIES < 0:
+ logging.error('Subvols are not up')
+ break
+ RETRIES -= 1
+ time.sleep(0.2)
+
logging.debug('auxiliary glusterfs mount prepared')
@@ -1245,9 +1256,6 @@ class GLUSTER(object):
# register the crawlers and start crawling
# g1 ==> Xsync, g2 ==> config.change_detector(changelog by default)
# g3 ==> changelog History
- (inf, ouf, ra, wa) = rconf.args.rpc_fd.split(',')
- changelog_agent = RepceClient(int(inf), int(ouf))
-
status = GeorepStatus(gconf.get("state-file"),
rconf.args.local_node,
rconf.args.local_path,
@@ -1255,12 +1263,6 @@ class GLUSTER(object):
rconf.args.master,
rconf.args.slave)
status.reset_on_worker_start()
- rv = changelog_agent.version()
- if int(rv) != CHANGELOG_AGENT_CLIENT_VERSION:
- raise GsyncdError(
- "RePCe major version mismatch(changelog agent): "
- "local %s, remote %s" %
- (CHANGELOG_AGENT_CLIENT_VERSION, rv))
try:
workdir = g2.setup_working_dir()
@@ -1271,17 +1273,16 @@ class GLUSTER(object):
# register with the changelog library
# 9 == log level (DEBUG)
# 5 == connection retries
- changelog_agent.init()
- changelog_agent.register(rconf.args.local_path,
- workdir,
- gconf.get("changelog-log-file"),
- get_changelog_log_level(
- gconf.get("changelog-log-level")),
- g2.CHANGELOG_CONN_RETRIES)
+ libgfchangelog.register(rconf.args.local_path,
+ workdir,
+ gconf.get("changelog-log-file"),
+ get_changelog_log_level(
+ gconf.get("changelog-log-level")),
+ g2.CHANGELOG_CONN_RETRIES)
register_time = int(time.time())
- g2.register(register_time, changelog_agent, status)
- g3.register(register_time, changelog_agent, status)
+ g2.register(register_time, status)
+ g3.register(register_time, status)
except ChangelogException as e:
logging.error(lf("Changelog register failed", error=e))
sys.exit(1)
@@ -1483,7 +1484,7 @@ class SSH(object):
if log_rsync_performance:
# use stdout=PIPE only when log_rsync_performance enabled
- # Else rsync will write to stdout and nobody is their
+ # Else rsync will write to stdout and nobody is there
# to consume. If PIPE is full rsync hangs.
po = Popen(argv, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py
index f8515f2607b..b8508532e30 100644
--- a/geo-replication/syncdaemon/subcmds.py
+++ b/geo-replication/syncdaemon/subcmds.py
@@ -97,17 +97,6 @@ def subcmd_slave(args):
local.service_loop()
-def subcmd_agent(args):
- import os
- from changelogagent import agent, Changelog
- from syncdutils import lf
-
- os.setsid()
- logging.debug(lf("RPC FD",
- rpc_fd=repr(args.rpc_fd)))
- return agent(Changelog(), args.rpc_fd)
-
-
def subcmd_voluuidget(args):
from subprocess import Popen, PIPE
import xml.etree.ElementTree as XET
diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py
index b08098ef007..a3df103e76c 100644
--- a/geo-replication/syncdaemon/syncdutils.py
+++ b/geo-replication/syncdaemon/syncdutils.py
@@ -21,8 +21,8 @@ import subprocess
import socket
from subprocess import PIPE
from threading import Lock, Thread as baseThread
-from errno import EACCES, EAGAIN, EPIPE, ENOTCONN, ECONNABORTED
-from errno import EINTR, ENOENT, ESTALE, EBUSY, errorcode
+from errno import (EACCES, EAGAIN, EPIPE, ENOTCONN, ENOMEM, ECONNABORTED,
+ EINTR, ENOENT, ESTALE, EBUSY, ENODATA, errorcode, EIO)
from signal import signal, SIGTERM
import select as oselect
from os import waitpid as owaitpid
@@ -55,6 +55,8 @@ from rconf import rconf
from hashlib import sha256 as sha256
+ENOTSUP = getattr(errno, 'ENOTSUP', 'EOPNOTSUPP')
+
# auxiliary gfid based access prefix
_CL_AUX_GFID_PFX = ".gfid/"
ROOT_GFID = "00000000-0000-0000-0000-000000000001"
@@ -62,8 +64,6 @@ GF_OP_RETRIES = 10
GX_GFID_CANONICAL_LEN = 37 # canonical gfid len + '\0'
-CHANGELOG_AGENT_SERVER_VERSION = 1.0
-CHANGELOG_AGENT_CLIENT_VERSION = 1.0
NodeID = None
rsync_version = None
unshare_mnt_propagation = None
@@ -100,6 +100,19 @@ def unescape_space_newline(s):
.replace(NEWLINE_ESCAPE_CHAR, "\n")\
.replace(PERCENTAGE_ESCAPE_CHAR, "%")
+# gf_mount_ready() returns 1 if all subvols are up, else 0
+def gf_mount_ready():
+ ret = errno_wrap(Xattr.lgetxattr,
+ ['.', 'dht.subvol.status', 16],
+ [ENOENT, ENOTSUP, ENODATA], [ENOMEM])
+
+ if isinstance(ret, int):
+ logging.error("failed to get the xattr value")
+ return 1
+ ret = ret.rstrip('\x00')
+ if ret == "1":
+ return 1
+ return 0
def norm(s):
if s:
@@ -331,13 +344,24 @@ def log_raise_exception(excont):
ECONNABORTED):
logging.error(lf('Gluster Mount process exited',
error=errorcode[exc.errno]))
+ elif isinstance(exc, OSError) and exc.errno == EIO:
+ logging.error("Getting \"Input/Output error\" "
+ "is most likely due to "
+ "a. Brick is down or "
+ "b. Split brain issue.")
+ logging.error("This is expected as per design to "
+ "keep the consistency of the file system. "
+ "Once the above issue is resolved "
+ "geo-replication would automatically "
+ "proceed further.")
+ logtag = "FAIL"
else:
logtag = "FAIL"
if not logtag and logging.getLogger().isEnabledFor(logging.DEBUG):
logtag = "FULL EXCEPTION TRACE"
if logtag:
logging.exception(logtag + ": ")
- sys.stderr.write("failed with %s.\n" % type(exc).__name__)
+ sys.stderr.write("failed with %s: %s.\n" % (type(exc).__name__, exc))
excont.exval = 1
sys.exit(excont.exval)
@@ -564,7 +588,6 @@ def errno_wrap(call, arg=[], errnos=[], retry_errnos=[]):
def lstat(e):
return errno_wrap(os.lstat, [e], [ENOENT], [ESTALE, EBUSY])
-
def get_gfid_from_mnt(gfidpath):
return errno_wrap(Xattr.lgetxattr,
[gfidpath, 'glusterfs.gfid.string',
@@ -702,11 +725,13 @@ def get_slv_dir_path(slv_host, slv_volume, gfid):
if not isinstance(realpath, int):
basename = os.path.basename(realpath).rstrip('\x00')
dirpath = os.path.dirname(realpath)
- if dirpath is "/":
+ if dirpath == "/":
pargfid = ROOT_GFID
else:
dirpath = dirpath.strip("/")
pargfid = get_gfid_from_mnt(dirpath)
+ if isinstance(pargfid, int):
+ return None
dir_entry = os.path.join(pfx, pargfid, basename)
return dir_entry
@@ -718,12 +743,12 @@ def lf(event, **kwargs):
Log Format helper function, log messages can be
easily modified to structured log format.
lf("Config Change", sync_jobs=4, brick=/bricks/b1) will be
- converted as "Config Change<TAB>brick=/bricks/b1<TAB>sync_jobs=4"
+ converted as "Config Change [{brick=/bricks/b1}, {sync_jobs=4}]"
"""
- msg = event
+ msgparts = []
for k, v in kwargs.items():
- msg += "\t{0}={1}".format(k, v)
- return msg
+ msgparts.append("{%s=%s}" % (k, v))
+ return "%s [%s]" % (event, ", ".join(msgparts))
class Popen(subprocess.Popen):
@@ -869,6 +894,19 @@ class Popen(subprocess.Popen):
self.errfail()
+def host_brick_split(value):
+ """
+ IPv6 compatible way to split and get the host
+ and brick information. Example inputs:
+ node1.example.com:/exports/bricks/brick1/brick
+ fe80::af0f:df82:844f:ef66%utun0:/exports/bricks/brick1/brick
+ """
+ parts = value.split(":")
+ brick = parts[-1]
+ hostparts = parts[0:-1]
+ return (":".join(hostparts), brick)
+
+
class Volinfo(object):
def __init__(self, vol, host='localhost', prelude=[], master=True):
@@ -911,7 +949,7 @@ class Volinfo(object):
@memoize
def bricks(self):
def bparse(b):
- host, dirp = b.find("name").text.split(':', 2)
+ host, dirp = host_brick_split(b.find("name").text)
return {'host': host, 'dir': dirp, 'uuid': b.find("hostUuid").text}
return [bparse(b) for b in self.get('brick')]
@@ -987,6 +1025,16 @@ class VolinfoFromGconf(object):
def is_hot(self, brickpath):
return False
+ def is_uuid(self, value):
+ try:
+ uuid.UUID(value)
+ return True
+ except ValueError:
+ return False
+
+ def possible_path(self, value):
+ return "/" in value
+
@property
@memoize
def bricks(self):
@@ -1000,8 +1048,22 @@ class VolinfoFromGconf(object):
out = []
for b in bricks_data:
parts = b.split(":")
- bpath = parts[2] if len(parts) == 3 else ""
- out.append({"host": parts[1], "dir": bpath, "uuid": parts[0]})
+ b_uuid = None
+ if self.is_uuid(parts[0]):
+ b_uuid = parts[0]
+ # Set all parts except first
+ parts = parts[1:]
+
+ if self.possible_path(parts[-1]):
+ bpath = parts[-1]
+ # Set all parts except last
+ parts = parts[0:-1]
+
+ out.append({
+ "host": ":".join(parts), # if remaining parts are IPv6 name
+ "dir": bpath,
+ "uuid": b_uuid
+ })
return out
diff --git a/geo-replication/tests/unit/test_gsyncdstatus.py b/geo-replication/tests/unit/test_gsyncdstatus.py
index 483023dbfe9..9c1aa2ad4ad 100755
--- a/geo-replication/tests/unit/test_gsyncdstatus.py
+++ b/geo-replication/tests/unit/test_gsyncdstatus.py
@@ -13,11 +13,11 @@ import unittest
import os
import urllib
-from syncdaemon.gstatus import GeorepStatus, set_monitor_status
-from syncdaemon.gstatus import get_default_values
-from syncdaemon.gstatus import MONITOR_STATUS, DEFAULT_STATUS
-from syncdaemon.gstatus import STATUS_VALUES, CRAWL_STATUS_VALUES
-from syncdaemon.gstatus import human_time, human_time_utc
+from syncdaemon.gstatus import (GeorepStatus, set_monitor_status,
+ get_default_values,
+ MONITOR_STATUS, DEFAULT_STATUS,
+ STATUS_VALUES, CRAWL_STATUS_VALUES,
+ human_time, human_time_utc)
class GeorepStatusTestCase(unittest.TestCase):
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 2bd5c77e723..b6d63146e14 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -70,16 +70,6 @@
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without ocf
%{?_without_ocf:%global _without_ocf --without-ocf}
-# rdma
-# if you wish to compile an rpm without rdma support, compile like this...
-# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without rdma
-%{?_without_rdma:%global _without_rdma --disable-ibverbs}
-
-# No RDMA Support on 32-bit ARM
-%ifarch armv7hl
-%global _without_rdma --disable-ibverbs
-%endif
-
# server
# if you wish to build rpms without server components, compile like this
# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without server
@@ -121,6 +111,12 @@
## All %%global definitions should be placed here and keep them sorted
##
+# selinux booleans whose defalut value needs modification
+# these booleans will be consumed by "%%selinux_set_booleans" macro.
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+%global selinuxbooleans rsync_full_access=1 rsync_client=1
+%endif
+
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
%global _with_systemd true
%endif
@@ -241,7 +237,9 @@ Requires(pre): shadow-utils
BuildRequires: systemd
%endif
-Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: libgfrpc0%{?_isa} = %{version}-%{release}
+Requires: libgfxdr0%{?_isa} = %{version}-%{release}
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%endif
@@ -282,75 +280,40 @@ BuildRequires: libattr-devel
BuildRequires: firewalld
%endif
-Obsoletes: hekafs
Obsoletes: %{name}-common < %{version}-%{release}
Obsoletes: %{name}-core < %{version}-%{release}
-Obsoletes: %{name}-ufo
Obsoletes: %{name}-ganesha
+Obsoletes: %{name}-rdma < %{version}-%{release}
%if ( 0%{!?_with_gnfs:1} )
-Obsoletes: %{name}-gnfs
+Obsoletes: %{name}-gnfs < %{version}-%{release}
%endif
Provides: %{name}-common = %{version}-%{release}
Provides: %{name}-core = %{version}-%{release}
%description
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package includes the glusterfs binary, the glusterfsd daemon and the
libglusterfs and glusterfs translator modules common to both GlusterFS server
and client framework.
-%package api
-Summary: GlusterFS api library
-Requires: %{name}%{?_isa} = %{version}-%{release}
-Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
-
-%description api
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the glusterfs libgfapi library.
-
-%package api-devel
-Summary: Development Libraries
-Requires: %{name}%{?_isa} = %{version}-%{release}
-Requires: %{name}-devel%{?_isa} = %{version}-%{release}
-Requires: libacl-devel
-
-%description api-devel
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the api include files.
-
%package cli
Summary: GlusterFS CLI
-Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: libglusterd0%{?_isa} = %{version}-%{release}
%description cli
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the GlusterFS CLI application and its man page
@@ -360,32 +323,14 @@ BuildRequires: libcurl-devel
%description cloudsync-plugins
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides cloudsync plugins for archival feature.
-%package devel
-Summary: Development Libraries
-Requires: %{name}%{?_isa} = %{version}-%{release}
-# Needed for the Glupy examples to work
-Requires: %{name}-extra-xlators%{?_isa} = %{version}-%{release}
-
-%description devel
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides the development libraries and include files.
-
%package extra-xlators
Summary: Extra Gluster filesystem Translators
# We need python-gluster rpm for gluster module's __init__.py in Python
@@ -395,12 +340,11 @@ Requires: python%{_pythonver}
%description extra-xlators
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides extra filesystem Translators, such as Glupy,
for GlusterFS.
@@ -419,6 +363,49 @@ Provides: %{name}-client = %{version}-%{release}
%description fuse
GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides support to FUSE based clients and inlcudes the
+glusterfs(d) binary.
+
+%if ( 0%{!?_without_server:1} )
+%package ganesha
+Summary: NFS-Ganesha configuration
+Group: Applications/File
+
+Requires: %{name}-server%{?_isa} = %{version}-%{release}
+Requires: nfs-ganesha-selinux >= 2.7.6
+Requires: nfs-ganesha-gluster >= 2.7.6
+Requires: pcs >= 0.10.0
+Requires: resource-agents >= 4.2.0
+Requires: dbus
+
+%if ( 0%{?rhel} && 0%{?rhel} == 6 )
+Requires: cman, pacemaker, corosync
+%endif
+
+%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
+# we need portblock resource-agent in 3.9.5 and later.
+Requires: net-tools
+%endif
+
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%if ( 0%{?rhel} && 0%{?rhel} < 8 )
+Requires: selinux-policy >= 3.13.1-160
+Requires(post): policycoreutils-python
+Requires(postun): policycoreutils-python
+%else
+Requires(post): policycoreutils-python-utils
+Requires(postun): policycoreutils-python-utils
+%endif
+%endif
+
+%description ganesha
+GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
@@ -426,8 +413,9 @@ terms of features and extensibility. It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.
-This package provides support to FUSE based clients and inlcudes the
-glusterfs(d) binary.
+This package provides the configuration and related files for using
+NFS-Ganesha as the NFS server using GlusterFS
+%endif
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
@@ -440,15 +428,22 @@ Requires: python%{_pythonver}-gluster = %{version}-%{release}
Requires: rsync
Requires: util-linux
+# required for setting selinux bools
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+Requires(post): policycoreutils-python-utils
+Requires(postun): policycoreutils-python-utils
+Requires: selinux-policy-targeted
+Requires(post): selinux-policy-targeted
+BuildRequires: selinux-policy-devel
+%endif
%description geo-replication
GlusterFS is a distributed file-system capable of scaling to several
-peta-bytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file system in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in userspace and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides support to geo-replication.
%endif
@@ -462,29 +457,187 @@ Requires: nfs-utils
%description gnfs
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the glusterfs legacy gNFS server xlator
%endif
-%package libs
-Summary: GlusterFS common libraries
+%package -n libglusterfs0
+Summary: GlusterFS libglusterfs library
+Requires: libgfrpc0%{?_isa} = %{version}-%{release}
+Requires: libgfxdr0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+Provides: %{name}-libs = %{version}-%{release}
-%description libs
+%description -n libglusterfs0
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the base libglusterfs library
+
+%package -n libglusterfs-devel
+Summary: GlusterFS libglusterfs library
+Requires: libgfrpc-devel%{?_isa} = %{version}-%{release}
+Requires: libgfxdr-devel%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
+Provides: %{name}-devel = %{version}-%{release}
+
+%description -n libglusterfs-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libglusterfs.so and the gluster C header files.
+
+%package -n libgfapi0
+Summary: GlusterFS api library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-api <= %{version}-%{release}
+Provides: %{name}-api = %{version}-%{release}
+
+%description -n libgfapi0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the glusterfs libgfapi library.
+
+%package -n libgfapi-devel
+Summary: Development Libraries
+Requires: libglusterfs-devel%{?_isa} = %{version}-%{release}
+Requires: libacl-devel
+Obsoletes: %{name}-api-devel <= %{version}-%{release}
+Provides: %{name}-api-devel = %{version}-%{release}
+
+%description -n libgfapi-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfapi.so and the api C header files.
+
+%package -n libgfchangelog0
+Summary: GlusterFS libchangelog library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libgfchangelog0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libgfchangelog library
+
+%package -n libgfchangelog-devel
+Summary: GlusterFS libchangelog library
+Requires: libglusterfs-devel%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
+
+%description -n libgfchangelog-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfchangelog.so and changelog C header files.
+
+%package -n libgfrpc0
+Summary: GlusterFS libgfrpc0 library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libgfrpc0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libgfrpc library
+
+%package -n libgfrpc-devel
+Summary: GlusterFS libgfrpc library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
-This package provides the base GlusterFS libraries
+%description -n libgfrpc-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfrpc.so and rpc C header files.
+
+%package -n libgfxdr0
+Summary: GlusterFS libgfxdr0 library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libgfxdr0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libgfxdr library
+
+%package -n libgfxdr-devel
+Summary: GlusterFS libgfxdr library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-devel <= %{version}-%{release}
+
+%description -n libgfxdr-devel
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides libgfxdr.so.
+
+%package -n libglusterd0
+Summary: GlusterFS libglusterd library
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Obsoletes: %{name}-libs <= %{version}-%{release}
+
+%description -n libglusterd0
+GlusterFS is a distributed file-system capable of scaling to several
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
+
+This package provides the libglusterd library
%package -n python%{_pythonver}-gluster
Summary: GlusterFS python library
@@ -497,39 +650,15 @@ Obsoletes: python-gluster < 3.10
%description -n python%{_pythonver}-gluster
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package contains the python modules of GlusterFS and own gluster
namespace.
-%if ( 0%{!?_without_rdma:1} )
-%package rdma
-Summary: GlusterFS rdma support for ib-verbs
-%if ( 0%{?fedora} && 0%{?fedora} > 26 )
-BuildRequires: rdma-core-devel
-%else
-BuildRequires: libibverbs-devel
-BuildRequires: librdmacm-devel >= 1.0.15
-%endif
-Requires: %{name}%{?_isa} = %{version}-%{release}
-
-%description rdma
-GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
-
-This package provides support to ib-verbs library.
-%endif
-
%package regression-tests
Summary: Development Tools
Requires: %{name}%{?_isa} = %{version}-%{release}
@@ -559,12 +688,11 @@ Requires: %{_prefix}/lib/ocf/resource.d
%description resource-agents
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the resource agents which plug glusterd into
Open Cluster Framework (OCF) compliant cluster resource managers,
@@ -576,11 +704,15 @@ like Pacemaker.
Summary: Clustered file-system server
Requires: %{name}%{?_isa} = %{version}-%{release}
Requires: %{name}-cli%{?_isa} = %{version}-%{release}
-Requires: %{name}-libs%{?_isa} = %{version}-%{release}
+Requires: libglusterfs0%{?_isa} = %{version}-%{release}
+Requires: libgfchangelog0%{?_isa} = %{version}-%{release}
+%if ( 0%{?fedora} && 0%{?fedora} >= 30 || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
+Requires: glusterfs-selinux >= 0.1.0-2
+%endif
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
Requires: %{name}-fuse%{?_isa} = %{version}-%{release}
# self-heal daemon, rebalance, nfs-server etc. are actually clients
-Requires: %{name}-api%{?_isa} = %{version}-%{release}
+Requires: libgfapi0%{?_isa} = %{version}-%{release}
Requires: %{name}-client-xlators%{?_isa} = %{version}-%{release}
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
Requires: lvm2
@@ -619,12 +751,11 @@ Requires: valgrind
%description server
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the glusterfs server daemon.
%endif
@@ -646,12 +777,11 @@ Summary: GlusterFS client-side translators
%description client-xlators
GlusterFS is a distributed file-system capable of scaling to several
-petabytes. It aggregates various storage bricks over Infiniband RDMA
-or TCP/IP interconnect into one large parallel network file
-system. GlusterFS is one of the most sophisticated file systems in
-terms of features and extensibility. It borrows a powerful concept
-called Translators from GNU Hurd kernel. Much of the code in GlusterFS
-is in user space and easily manageable.
+petabytes. It aggregates various storage bricks over TCP/IP interconnect
+into one large parallel network filesystem. GlusterFS is one of the
+most sophisticated file systems in terms of features and extensibility.
+It borrows a powerful concept called Translators from GNU Hurd kernel.
+Much of the code in GlusterFS is in user space and easily manageable.
This package provides the translators needed on any GlusterFS client.
@@ -708,7 +838,6 @@ done
%{?_without_fusermount} \
%{?_without_georeplication} \
%{?_without_ocf} \
- %{?_without_rdma} \
%{?_without_server} \
%{?_without_syslog} \
%{?_with_ipv6default} \
@@ -792,6 +921,15 @@ sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sha
install -D -p -m 0644 extras/glusterfs-logrotate \
%{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
+# ganesha ghosts
+%if ( 0%{!?_without_server:1} )
+mkdir -p %{buildroot}%{_sysconfdir}/ganesha
+touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
+mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
+touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
+touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
# geo-rep ghosts
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
@@ -842,23 +980,46 @@ rm -rf %{buildroot}
%endif
exit 0
-%post api
-/sbin/ldconfig
-
%if ( 0%{!?_without_events:1} )
%post events
%systemd_post glustereventsd
%endif
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%post ganesha
+semanage boolean -m ganesha_use_fusefs --on
+exit 0
+%endif
+%endif
+
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
+%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
+%selinux_set_booleans %{selinuxbooleans}
+%endif
if [ $1 -ge 1 ]; then
%systemd_postun_with_restart glusterd
fi
exit 0
%endif
-%post libs
+%post -n libglusterfs0
+/sbin/ldconfig
+
+%post -n libgfapi0
+/sbin/ldconfig
+
+%post -n libgfchangelog0
+/sbin/ldconfig
+
+%post -n libgfrpc0
+/sbin/ldconfig
+
+%post -n libgfxdr0
+/sbin/ldconfig
+
+%post -n libglusterd0
/sbin/ldconfig
%if ( 0%{!?_without_server:1} )
@@ -1004,6 +1165,36 @@ fi
exit 0
%endif
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%postun ganesha
+semanage boolean -m ganesha_use_fusefs --off
+exit 0
+%endif
+%endif
+
+##-----------------------------------------------------------------------------
+## All %%trigger should be placed here and keep them sorted
+##
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%trigger ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --on
+exit 0
+%endif
+%endif
+
+##-----------------------------------------------------------------------------
+## All %%triggerun should be placed here and keep them sorted
+##
+%if ( 0%{!?_without_server:1} )
+%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
+%triggerun ganesha -- selinux-policy-targeted
+semanage boolean -m ganesha_use_fusefs --off
+exit 0
+%endif
+%endif
+
##-----------------------------------------------------------------------------
## All %%files should be placed here and keep them grouped
##
@@ -1014,9 +1205,6 @@ exit 0
%exclude %{_mandir}/man8/gluster.8*
%endif
%dir %{_localstatedir}/log/glusterfs
-%if ( 0%{!?_without_rdma:1} )
-%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
-%endif
%if 0%{?!_without_server:1}
%dir %{_datadir}/glusterfs
%dir %{_datadir}/glusterfs/scripts
@@ -1070,20 +1258,12 @@ exit 0
%{_tmpfilesdir}/gluster.conf
%endif
-%files api
-%exclude %{_libdir}/*.so
-# libgfapi files
-%{_libdir}/libgfapi.*
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
- %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
-
-%files api-devel
-%{_libdir}/pkgconfig/glusterfs-api.pc
-%{_libdir}/libgfapi.so
-%dir %{_includedir}/glusterfs
-%dir %{_includedir}/glusterfs/api
- %{_includedir}/glusterfs/api/*
+%if ( 0%{?_without_server:1} )
+#exclude ganesha related files
+%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
+%exclude %{_libexecdir}/ganesha/*
+%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
+%endif
%files cli
%{_sbindir}/gluster
@@ -1095,14 +1275,33 @@ exit 0
%{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
-%files devel
+%files -n libglusterfs-devel
%dir %{_includedir}/glusterfs
- %{_includedir}/glusterfs/*
-%exclude %{_includedir}/glusterfs/api
-%exclude %{_libdir}/libgfapi.so
-%{_libdir}/*.so
+ %{_includedir}/glusterfs/*.h
+ %{_includedir}/glusterfs/server/*.h
+%{_libdir}/libglusterfs.so
+
+%files -n libgfapi-devel
+%dir %{_includedir}/glusterfs/api
+ %{_includedir}/glusterfs/api/*.h
+%{_libdir}/libgfapi.so
+%{_libdir}/pkgconfig/glusterfs-api.pc
+
+
+%files -n libgfchangelog-devel
+%dir %{_includedir}/glusterfs/gfchangelog
+ %{_includedir}/glusterfs/gfchangelog/*.h
+%{_libdir}/libgfchangelog.so
%{_libdir}/pkgconfig/libgfchangelog.pc
+%files -n libgfrpc-devel
+%dir %{_includedir}/glusterfs/rpc
+ %{_includedir}/glusterfs/rpc/*.h
+%{_libdir}/libgfrpc.so
+
+%files -n libgfxdr-devel
+%{_libdir}/libgfxdr.so
+
%files client-xlators
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
@@ -1153,7 +1352,6 @@ exit 0
%{_unitdir}/gluster-ta-volume.service
%endif
-
%if ( 0%{!?_without_georeplication:1} )
%files geo-replication
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
@@ -1165,6 +1363,13 @@ exit 0
%dir %{_libexecdir}/glusterfs/python/syncdaemon
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
+%dir %{_libexecdir}/glusterfs/scripts
+ %{_libexecdir}/glusterfs/scripts/get-gfid.sh
+ %{_libexecdir}/glusterfs/scripts/slave-upgrade.sh
+ %{_libexecdir}/glusterfs/scripts/gsync-upgrade.sh
+ %{_libexecdir}/glusterfs/scripts/generate-gfid-file.sh
+ %{_libexecdir}/glusterfs/scripts/gsync-sync-gfid
+ %{_libexecdir}/glusterfs/scripts/schedule_georep.py*
%{_libexecdir}/glusterfs/gverify.sh
%{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
%{_libexecdir}/glusterfs/peer_gsec_create
@@ -1181,19 +1386,28 @@ exit 0
%attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
-%dir %{_datadir}/glusterfs
-%dir %{_datadir}/glusterfs/scripts
- %{_datadir}/glusterfs/scripts/get-gfid.sh
- %{_datadir}/glusterfs/scripts/slave-upgrade.sh
- %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
- %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
- %{_datadir}/glusterfs/scripts/gsync-sync-gfid
- %{_datadir}/glusterfs/scripts/schedule_georep.py*
%endif
-%files libs
-%{_libdir}/*.so.*
-%exclude %{_libdir}/libgfapi.*
+%files -n libglusterfs0
+%{_libdir}/libglusterfs.so.*
+
+%files -n libgfapi0
+%{_libdir}/libgfapi.so.*
+%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
+ %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
+
+%files -n libgfchangelog0
+%{_libdir}/libgfchangelog.so.*
+
+%files -n libgfrpc0
+%{_libdir}/libgfrpc.so.*
+
+%files -n libgfxdr0
+%{_libdir}/libgfxdr.so.*
+
+%files -n libglusterd0
+%{_libdir}/libglusterd.so.*
+%exclude %{_libdir}/libglusterd.so
%files -n python%{_pythonver}-gluster
# introducing glusterfs module in site packages.
@@ -1209,18 +1423,25 @@ exit 0
%{python2_sitelib}/gluster/cliutils
%endif
-%if ( 0%{!?_without_rdma:1} )
-%files rdma
-%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
- %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
-%endif
-
%files regression-tests
%dir %{_datadir}/glusterfs
%{_datadir}/glusterfs/run-tests.sh
%{_datadir}/glusterfs/tests
%exclude %{_datadir}/glusterfs/tests/vagrant
+%if ( 0%{!?_without_server:1} )
+%files ganesha
+%dir %{_libexecdir}/ganesha
+%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
+%{_libexecdir}/ganesha/*
+%{_prefix}/lib/ocf/resource.d/heartbeat/*
+%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
+%ghost %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
+%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
+%ghost %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
+%ghost %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
+%endif
+
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
# /usr/lib is the standard for OCF, also on x86_64
@@ -1255,7 +1476,7 @@ exit 0
# binaries
%{_sbindir}/glusterd
-%{_sbindir}/glfsheal
+%{_libexecdir}/glusterfs/glfsheal
%{_sbindir}/gf_attach
%{_sbindir}/gluster-setgfid2path
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
@@ -1400,9 +1621,25 @@ exit 0
%endif
%changelog
+* Thu May 14 2020 Kaleb S. KEITHLEY <kkeithle@redhat.com>
+- refactor, common practice, Issue #1126
+
+* Mon May 11 2020 Sunny Kumar <sunkumar@redhat.com>
+- added requires policycoreutils-python-utils on rhel8 for geo-replication
+
* Wed Oct 9 2019 Kaleb S. KEITHLEY <kkeithle@redhat.com>
- remove leftover bd xlator cruft
+* Fri Aug 23 2019 Shwetha K Acharya <sacharya@redhat.com>
+- removed {name}-ufs from Obsoletes
+- added "< version" for obsoletes {name}-gnfs and {name}-rdma
+
+* Mon Jul 15 2019 Jiffin Tony Thottan <jthottan@redhat.com>
+- Adding ganesha ha bits back in gluster repository
+
+* Fri Jul 12 2019 Amar Tumballi <amarts@redhat.com>
+- Remove rdma package, and mark older rdma package as 'Obsoletes'
+
* Fri Jun 14 2019 Niels de Vos <ndevos@redhat.com>
- always build glusterfs-cli to allow monitoring/managing from clients
@@ -1493,9 +1730,6 @@ exit 0
* Thu Feb 16 2017 Niels de Vos <ndevos@redhat.com>
- Obsolete and Provide python-gluster for upgrading from glusterfs < 3.10
-* Tue Feb 7 2017 Kaleb S. KEITHLEY <kkeithle@redhat.com>
-- remove ganesha (#1418417)
-
* Wed Feb 1 2017 Poornima G <pgurusid@redhat.com>
- Install /var/lib/glusterd/groups/metadata-cache by default
diff --git a/glusterfsd/src/Makefile.am b/glusterfsd/src/Makefile.am
index 7b8d1dbf1fb..a0a778158d8 100644
--- a/glusterfsd/src/Makefile.am
+++ b/glusterfsd/src/Makefile.am
@@ -6,14 +6,15 @@ endif
glusterfsd_SOURCES = glusterfsd.c glusterfsd-mgmt.c
glusterfsd_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
- $(top_builddir)/rpc/xdr/src/libgfxdr.la ${GF_LDADD}
-glusterfsd_LDFLAGS = $(GF_LDFLAGS) $(LIB_DL)
+ $(top_builddir)/rpc/xdr/src/libgfxdr.la $(GF_LDADD) $(LIB_DL)
+glusterfsd_LDFLAGS = $(GF_LDFLAGS)
gf_attach_SOURCES = gf_attach.c
gf_attach_LDADD = $(top_builddir)/libglusterfs/src/libglusterfs.la \
$(top_builddir)/api/src/libgfapi.la \
$(top_builddir)/rpc/rpc-lib/src/libgfrpc.la \
$(top_builddir)/rpc/xdr/src/libgfxdr.la
+gf_attach_LDFLAGS = $(GF_LDFLAGS)
noinst_HEADERS = glusterfsd.h glusterfsd-mem-types.h glusterfsd-messages.h
diff --git a/glusterfsd/src/gf_attach.c b/glusterfsd/src/gf_attach.c
index e688c3c9eb4..c553b0b1f61 100644
--- a/glusterfsd/src/gf_attach.c
+++ b/glusterfsd/src/gf_attach.c
@@ -19,9 +19,16 @@
#include "xdr-generic.h"
#include "glusterd1-xdr.h"
+/* In seconds */
+#define CONNECT_TIMEOUT 60
+#define REPLY_TIMEOUT 120
+
int done = 0;
int rpc_status;
+pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+
struct rpc_clnt_procedure gf_attach_actors[GLUSTERD_BRICK_MAXVALUE] = {
[GLUSTERD_BRICK_NULL] = {"NULL", NULL},
[GLUSTERD_BRICK_OP] = {"BRICK_OP", NULL},
@@ -38,8 +45,12 @@ struct rpc_clnt_program gf_attach_prog = {
int32_t
my_callback(struct rpc_req *req, struct iovec *iov, int count, void *frame)
{
+ pthread_mutex_lock(&mutex);
rpc_status = req->rpc_status;
done = 1;
+ /* Signal main thread which is the only waiter */
+ pthread_cond_signal(&cond);
+ pthread_mutex_unlock(&mutex);
return 0;
}
@@ -48,6 +59,7 @@ int
send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
{
int ret = -1;
+ struct timespec ts;
struct iobuf *iobuf = NULL;
struct iobref *iobref = NULL;
struct iovec iov = {
@@ -57,7 +69,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
call_frame_t *frame = NULL;
gd1_mgmt_brick_op_req brick_req;
void *req = &brick_req;
- int i;
brick_req.op = op;
brick_req.name = path;
@@ -75,10 +86,6 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
if (!iobref)
goto out;
- frame = create_frame(this, this->ctx->pool);
- if (!frame)
- goto out;
-
iobref_add(iobref, iobuf);
iov.iov_base = iobuf->ptr;
@@ -91,20 +98,44 @@ send_brick_req(xlator_t *this, struct rpc_clnt *rpc, char *path, int op)
iov.iov_len = ret;
- for (i = 0; i < 60; ++i) {
- if (rpc->conn.connected) {
- break;
- }
- sleep(1);
+ /* Wait for connection */
+ timespec_now_realtime(&ts);
+ ts.tv_sec += CONNECT_TIMEOUT;
+ pthread_mutex_lock(&rpc->conn.lock);
+ {
+ while (!rpc->conn.connected)
+ if (pthread_cond_timedwait(&rpc->conn.cond, &rpc->conn.lock, &ts) ==
+ ETIMEDOUT) {
+ fprintf(stderr, "timeout waiting for RPC connection\n");
+ pthread_mutex_unlock(&rpc->conn.lock);
+ return EXIT_FAILURE;
+ }
+ }
+ pthread_mutex_unlock(&rpc->conn.lock);
+
+ frame = create_frame(this, this->ctx->pool);
+ if (!frame) {
+ ret = -1;
+ goto out;
}
/* Send the msg */
ret = rpc_clnt_submit(rpc, &gf_attach_prog, op, my_callback, &iov, 1, NULL,
0, iobref, frame, NULL, 0, NULL, 0, NULL);
if (!ret) {
- for (i = 0; !done && (i < 120); ++i) {
- sleep(1);
+ /* OK, wait for callback */
+ timespec_now_realtime(&ts);
+ ts.tv_sec += REPLY_TIMEOUT;
+ pthread_mutex_lock(&mutex);
+ {
+ while (!done)
+ if (pthread_cond_timedwait(&cond, &mutex, &ts) == ETIMEDOUT) {
+ fprintf(stderr, "timeout waiting for RPC reply\n");
+ pthread_mutex_unlock(&mutex);
+ return EXIT_FAILURE;
+ }
}
+ pthread_mutex_unlock(&mutex);
}
out:
diff --git a/glusterfsd/src/glusterfsd-messages.h b/glusterfsd/src/glusterfsd-messages.h
index 209279d051b..0cdbffa71ea 100644
--- a/glusterfsd/src/glusterfsd-messages.h
+++ b/glusterfsd/src/glusterfsd-messages.h
@@ -34,6 +34,60 @@ GLFS_MSGID(
glusterfsd_msg_28, glusterfsd_msg_29, glusterfsd_msg_30, glusterfsd_msg_31,
glusterfsd_msg_32, glusterfsd_msg_33, glusterfsd_msg_34, glusterfsd_msg_35,
glusterfsd_msg_36, glusterfsd_msg_37, glusterfsd_msg_38, glusterfsd_msg_39,
- glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43);
+ glusterfsd_msg_40, glusterfsd_msg_41, glusterfsd_msg_42, glusterfsd_msg_43,
+ glusterfsd_msg_029, glusterfsd_msg_041, glusterfsd_msg_042);
+
+#define glusterfsd_msg_1_STR "Could not create absolute mountpoint path"
+#define glusterfsd_msg_2_STR "Could not get current working directory"
+#define glusterfsd_msg_4_STR "failed to set mount-point to options dictionary"
+#define glusterfsd_msg_3_STR "failed to set dict value for key"
+#define glusterfsd_msg_5_STR "failed to set disable for key"
+#define glusterfsd_msg_6_STR "failed to set enable for key"
+#define glusterfsd_msg_7_STR \
+ "Not a client process, not performing mount operation"
+#define glusterfsd_msg_8_STR "MOUNT_POINT initialization failed"
+#define glusterfsd_msg_9_STR "loading volume file failed"
+#define glusterfsd_msg_10_STR "xlator option is invalid"
+#define glusterfsd_msg_11_STR "Fetching the volume file from server..."
+#define glusterfsd_msg_12_STR "volume initialization failed"
+#define glusterfsd_msg_34_STR "memory init failed"
+#define glusterfsd_msg_13_STR "ERROR: glusterfs uuid generation failed"
+#define glusterfsd_msg_14_STR "ERROR: glusterfs pool creation failed"
+#define glusterfsd_msg_15_STR \
+ "ERROR: '--volfile-id' is mandatory if '-s' OR '--volfile-server' option " \
+ "is given"
+#define glusterfsd_msg_16_STR "ERROR: parsing the volfile failed"
+#define glusterfsd_msg_33_STR \
+ "obsolete option '--volfile-max-fecth-attempts or fetch-attempts' was " \
+ "provided"
+#define glusterfsd_msg_17_STR "pidfile open failed"
+#define glusterfsd_msg_18_STR "pidfile lock failed"
+#define glusterfsd_msg_20_STR "pidfile truncation failed"
+#define glusterfsd_msg_21_STR "pidfile write failed"
+#define glusterfsd_msg_22_STR "failed to exeute pthread_sigmask"
+#define glusterfsd_msg_23_STR "failed to create pthread"
+#define glusterfsd_msg_24_STR "daemonization failed"
+#define glusterfsd_msg_25_STR "mount failed"
+#define glusterfsd_msg_26_STR "failed to construct the graph"
+#define glusterfsd_msg_27_STR "fuse xlator cannot be specified in volume file"
+#define glusterfsd_msg_28_STR "Cannot reach volume specification file"
+#define glusterfsd_msg_29_STR "ERROR: glusterfsd context not initialized"
+#define glusterfsd_msg_43_STR \
+ "command line argument --brick-mux is valid only for brick process"
+#define glusterfsd_msg_029_STR "failed to create command line string"
+#define glusterfsd_msg_30_STR "Started running version"
+#define glusterfsd_msg_31_STR "Could not create new sync-environment"
+#define glusterfsd_msg_40_STR "No change in volfile, countinuing"
+#define glusterfsd_msg_39_STR "Unable to create/delete temporary file"
+#define glusterfsd_msg_38_STR \
+ "Not processing brick-op since volume graph is not yet active"
+#define glusterfsd_msg_35_STR "rpc req buffer unserialization failed"
+#define glusterfsd_msg_36_STR "problem in xlator loading"
+#define glusterfsd_msg_37_STR "failed to get dict value"
+#define glusterfsd_msg_41_STR "received attach request for volfile"
+#define glusterfsd_msg_42_STR "failed to unserialize xdata to dictionary"
+#define glusterfsd_msg_041_STR "can't detach. flie not found"
+#define glusterfsd_msg_042_STR \
+ "couldnot detach old graph. Aborting the reconfiguration operation"
#endif /* !_GLUSTERFSD_MESSAGES_H_ */
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c
index 027ff618992..eaf6796e4c3 100644
--- a/glusterfsd/src/glusterfsd-mgmt.c
+++ b/glusterfsd/src/glusterfsd-mgmt.c
@@ -45,8 +45,6 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
int
glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp);
int
-glusterfs_graph_unknown_options(glusterfs_graph_t *graph);
-int
emancipate(glusterfs_ctx_t *ctx, int ret);
int
glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp,
@@ -65,6 +63,11 @@ glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj);
gf_boolean_t
mgmt_is_multiplexed_daemon(char *name);
+
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test);
+
int
mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
{
@@ -102,8 +105,8 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
sizeof(volfile_obj->volfile_checksum))) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
- "No change in volfile, continuing");
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
+ NULL);
goto out;
}
volfile_tmp = volfile_obj;
@@ -115,8 +118,8 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
tmp_fd = mkstemp(template);
if (-1 == tmp_fd) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
- "Unable to create temporary file: %s", template);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
ret = -1;
goto out;
}
@@ -126,8 +129,8 @@ mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
- "Unable to delete temporary file: %s", template);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
ret = 0;
}
@@ -202,6 +205,7 @@ glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,
retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
if (retlen == -1) {
gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");
+ GF_FREE(iob);
goto ret;
}
@@ -475,10 +479,6 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
dict_t *dict = NULL;
xlator_t *this = NULL;
gf1_cli_top_op top_op = 0;
- uint32_t blk_size = 0;
- uint32_t blk_count = 0;
- double time = 0;
- double throughput = 0;
xlator_t *any = NULL;
xlator_t *xlator = NULL;
glusterfs_graph_t *active = NULL;
@@ -511,35 +511,23 @@ glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
}
ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);
- if ((!ret) &&
- (GF_CLI_TOP_READ_PERF == top_op || GF_CLI_TOP_WRITE_PERF == top_op)) {
- ret = dict_get_uint32(dict, "blk-size", &blk_size);
- if (ret)
- goto cont;
- ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
- if (ret)
- goto cont;
-
- if (GF_CLI_TOP_READ_PERF == top_op) {
- ret = glusterfs_volume_top_read_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
- ret = glusterfs_volume_top_write_perf(
- blk_size, blk_count, xlator_req.name, &throughput, &time);
- }
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "time", time);
- if (ret)
- goto cont;
- ret = dict_set_double(dict, "throughput", throughput);
- if (ret)
- goto cont;
+ if (ret)
+ goto cont;
+ if (GF_CLI_TOP_READ_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false);
+ } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
+ ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true);
}
+
cont:
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
any = active->first;
xlator = get_xlator_by_name(any, xlator_req.name);
@@ -576,13 +564,12 @@ out:
return ret;
}
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
+static int
+glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
+ gf_boolean_t write_test)
{
int32_t fd = -1;
- int32_t input_fd = -1;
+ int32_t output_fd = -1;
char export_path[PATH_MAX] = {
0,
};
@@ -590,46 +577,44 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
int32_t iter = 0;
int32_t ret = -1;
uint64_t total_blks = 0;
+ uint32_t blk_size;
+ uint32_t blk_count;
+ double throughput = 0;
+ double time = 0;
struct timeval begin, end = {
0,
};
GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
+ ret = dict_get_uint32(dict, "blk-size", &blk_size);
+ if (ret)
+ goto out;
+ ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
+ if (ret)
+ goto out;
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ if (!(blk_size > 0) || !(blk_count > 0))
goto out;
- }
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
+ buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char);
if (!buf) {
ret = -1;
+ gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
goto out;
}
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
+ ".gf-tmp-stats-perf");
+ fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
+ if (-1 == fd) {
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Unable to open input file");
+ gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
goto out;
}
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
ret = sys_write(fd, buf, blk_size);
if (ret != blk_size) {
ret = -1;
@@ -637,77 +622,36 @@ glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
gf_log("glusterd", GF_LOG_WARNING, "Error in write");
ret = -1;
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes written %" PRId64,
- *throughput, *time, total_blks);
-
-out:
- if (fd >= 0)
- sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
- GF_FREE(buf);
- sys_unlink(export_path);
-
- return ret;
-}
-
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time)
-{
- int32_t fd = -1;
- int32_t input_fd = -1;
- int32_t output_fd = -1;
- char export_path[PATH_MAX] = {
- 0,
- };
- char *buf = NULL;
- int32_t iter = 0;
- int32_t ret = -1;
- uint64_t total_blks = 0;
- struct timeval begin, end = {
- 0,
- };
+ throughput, time, total_blks);
- GF_ASSERT(brick_path);
- GF_ASSERT(throughput);
- GF_ASSERT(time);
- if (!(blk_size > 0) || !(blk_count > 0))
- goto out;
-
- snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
- ".gf-tmp-stats-perf");
- fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
- if (-1 == fd) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
+ /* if it's a write test, we are done. Otherwise, we continue to the read
+ * part */
+ if (write_test == _gf_true) {
+ ret = 0;
goto out;
}
- buf = GF_MALLOC(blk_size * sizeof(*buf), gf_common_mt_char);
- if (!buf) {
- ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
+ ret = sys_fsync(fd);
+ if (ret) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
goto out;
}
-
- input_fd = open("/dev/zero", O_RDONLY);
- if (-1 == input_fd) {
+ ret = sys_lseek(fd, 0L, 0);
+ if (ret != 0) {
+ gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
ret = -1;
- gf_log("glusterd", GF_LOG_ERROR, "Could not open input file");
goto out;
}
@@ -718,30 +662,8 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
goto out;
}
- for (iter = 0; iter < blk_count; iter++) {
- ret = sys_read(input_fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- ret = sys_write(fd, buf, blk_size);
- if (ret != blk_size) {
- ret = -1;
- goto out;
- }
- }
+ total_blks = 0;
- ret = sys_fsync(fd);
- if (ret) {
- gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
- goto out;
- }
- ret = sys_lseek(fd, 0L, 0);
- if (ret != 0) {
- gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
- ret = -1;
- goto out;
- }
gettimeofday(&begin, NULL);
for (iter = 0; iter < blk_count; iter++) {
ret = sys_read(fd, buf, blk_size);
@@ -756,31 +678,36 @@ glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
}
total_blks += ret;
}
- ret = 0;
+ gettimeofday(&end, NULL);
if (total_blks != ((uint64_t)blk_size * blk_count)) {
ret = -1;
gf_log("glusterd", GF_LOG_WARNING, "Error in read");
goto out;
}
- gettimeofday(&end, NULL);
- *time = (end.tv_sec - begin.tv_sec) * 1e6 + (end.tv_usec - begin.tv_usec);
- *throughput = total_blks / *time;
+ time = gf_tvdiff(&begin, &end);
+ throughput = total_blks / time;
gf_log("glusterd", GF_LOG_INFO,
"Throughput %.2f Mbps time %.2f secs "
"bytes read %" PRId64,
- *throughput, *time, total_blks);
-
+ throughput, time, total_blks);
+ ret = 0;
out:
if (fd >= 0)
sys_close(fd);
- if (input_fd >= 0)
- sys_close(input_fd);
if (output_fd >= 0)
sys_close(output_fd);
GF_FREE(buf);
sys_unlink(export_path);
-
+ if (ret == 0) {
+ ret = dict_set_double(dict, "time", time);
+ if (ret)
+ goto end;
+ ret = dict_set_double(dict, "throughput", throughput);
+ if (ret)
+ goto end;
+ }
+end:
return ret;
}
@@ -796,7 +723,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
xlator_t *xlator = NULL;
xlator_t *any = NULL;
dict_t *output = NULL;
- char key[2048] = {0};
+ char key[32] = {0};
+ int len;
char *xname = NULL;
glusterfs_ctx_t *ctx = NULL;
glusterfs_graph_t *active = NULL;
@@ -820,10 +748,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
active = ctx->active;
if (!active) {
ret = -1;
- gf_msg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
- "Not processing brick-op no. %d since volume graph is "
- "not yet active.",
- xlator_req.op);
+ gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
+ "brick-op_no.=%d", xlator_req.op, NULL);
goto out;
}
any = active->first;
@@ -848,8 +774,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
if (ret) {
gf_log(this->name, GF_LOG_ERROR,
"Couldn't get "
@@ -867,8 +793,8 @@ glusterfs_handle_translator_op(rpcsvc_request_t *req)
}
}
for (i = 0; i < count; i++) {
- snprintf(key, sizeof(key), "xl-%d", i);
- ret = dict_get_str(input, key, &xname);
+ len = snprintf(key, sizeof(key), "xl-%d", i);
+ ret = dict_get_strn(input, key, len, &xname);
xlator = xlator_search_by_name(any, xname);
XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
/* If notify fails for an xlator we need to capture it but
@@ -942,8 +868,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator_req.input.input_len, &input);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35,
- "rpc req buffer unserialization failed.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL);
goto out;
}
@@ -952,8 +877,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
xlator = xlator_search_by_name(any, xname);
if (!xlator) {
snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36,
- "problem in xlator loading.");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL);
goto out;
}
@@ -966,8 +890,7 @@ glusterfs_handle_bitrot(rpcsvc_request_t *req)
ret = dict_get_str(input, "scrub-value", &scrub_opt);
if (ret) {
snprintf(msg, sizeof(msg), "Failed to get scrub value");
- gf_msg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37,
- "failed to get dict value");
+ gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL);
ret = -1;
goto out;
}
@@ -1034,44 +957,49 @@ glusterfs_handle_attach(rpcsvc_request_t *req)
}
ret = 0;
+ if (!this->ctx->active) {
+ gf_log(this->name, GF_LOG_WARNING,
+ "got attach for %s but no active graph", xlator_req.name);
+ goto post_unlock;
+ }
+
+ gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);
+
LOCK(&ctx->volfile_lock);
{
- if (this->ctx->active) {
- gf_log(this->name, GF_LOG_INFO, "got attach for %s",
- xlator_req.name);
- ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
- &newgraph);
- if (!ret && (newgraph && newgraph->first)) {
- nextchild = newgraph->first;
- ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- LG_MSG_EVENT_NOTIFY_FAILED,
- "Parent up notification "
- "failed for %s ",
- nextchild->name);
- goto out;
- }
- /* we need a protocol/server xlator as
- * nextchild
- */
- srv_xl = this->ctx->active->first;
- srv_conf = (server_conf_t *)srv_xl->private;
- rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
+ ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
+ &newgraph);
+ if (!ret && (newgraph && newgraph->first)) {
+ nextchild = newgraph->first;
+ ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,
+ "event=ParentUp", "name=%s", nextchild->name, NULL);
+ goto unlock;
}
- } else {
- gf_log(this->name, GF_LOG_WARNING,
- "got attach for %s but no active graph", xlator_req.name);
+ /* we need a protocol/server xlator as
+ * nextchild
+ */
+ srv_xl = this->ctx->active->first;
+ srv_conf = (server_conf_t *)srv_xl->private;
+ rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
}
if (ret) {
ret = -1;
}
-
- glusterfs_translator_info_response_send(req, ret, NULL, NULL);
-
- out:
+ ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);
+ if (ret) {
+ /* Response sent back to glusterd, req is already destroyed. So
+ * resetting the ret to 0. Otherwise another response will be
+ * send from rpcsvc_check_and_reply_error. Which will lead to
+ * double resource leak.
+ */
+ ret = 0;
+ }
+ unlock:
UNLOCK(&ctx->volfile_lock);
}
+post_unlock:
if (xlator_req.dict.dict_val)
free(xlator_req.dict.dict_val);
free(xlator_req.input.input_val);
@@ -1088,14 +1016,12 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
0,
};
xlator_t *this = NULL;
- glusterfs_ctx_t *ctx = NULL;
dict_t *dict = NULL;
GF_ASSERT(req);
this = THIS;
GF_ASSERT(this);
- ctx = this->ctx;
ret = xdr_to_generic(req->msg[0], &xlator_req,
(xdrproc_t)xdr_gd1_mgmt_brick_op_req);
@@ -1105,10 +1031,8 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
goto out;
}
- gf_msg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41,
- "received attach "
- "request for volfile-id=%s",
- xlator_req.name);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s",
+ xlator_req.name, NULL);
dict = dict_new();
if (!dict) {
@@ -1120,22 +1044,16 @@ glusterfs_handle_svc_attach(rpcsvc_request_t *req)
ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
&dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
- "failed to unserialize xdata to dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL);
goto out;
}
dict->extra_stdfree = xlator_req.dict.dict_val;
ret = 0;
- if (ctx->active) {
- ret = mgmt_process_volfile(xlator_req.input.input_val,
- xlator_req.input.input_len, xlator_req.name,
- dict);
- } else {
- gf_msg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42,
- "got attach for %s but no active graph", xlator_req.name);
- }
+ ret = mgmt_process_volfile(xlator_req.input.input_val,
+ xlator_req.input.input_len, xlator_req.name,
+ dict);
out:
if (dict)
dict_unref(dict);
@@ -1154,8 +1072,8 @@ glusterfs_handle_svc_detach(rpcsvc_request_t *req)
0,
};
ssize_t ret;
- glusterfs_ctx_t *ctx = NULL;
gf_volfile_t *volfile_obj = NULL;
+ glusterfs_ctx_t *ctx = NULL;
gf_volfile_t *volfile_tmp = NULL;
ret = xdr_to_generic(req->msg[0], &xlator_req,
@@ -1178,8 +1096,8 @@ glusterfs_handle_svc_detach(rpcsvc_request_t *req)
if (!volfile_tmp) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_41,
- "can't detach %s - not found", xlator_req.name);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s",
+ xlator_req.name, NULL);
/*
* Used to be -ENOENT. However, the caller asked us to
* make sure it's down and if it's already down that's
@@ -1188,12 +1106,12 @@ glusterfs_handle_svc_detach(rpcsvc_request_t *req)
ret = 0;
goto out;
}
+ /* coverity[ORDER_REVERSAL] */
ret = glusterfs_process_svc_detach(ctx, volfile_tmp);
if (ret) {
UNLOCK(&ctx->volfile_lock);
- gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_41,
- "Could not detach "
- "old graph. Aborting the reconfiguration operation");
+ gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042,
+ NULL);
goto out;
}
}
@@ -1250,10 +1168,8 @@ glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
goto out;
if (statbuf.st_size > GF_UNIT_MB) {
- gf_msg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
- "Allocated size exceeds expectation: "
- "reconsider logic (%" PRId64 ")",
- statbuf.st_size);
+ gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
+ "reconsider logic (%" PRId64 ")", statbuf.st_size, NULL);
}
msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);
if (!msg)
@@ -1571,10 +1487,12 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
}
any = active->first;
- if ((cmd & GF_CLI_STATUS_NFS) != 0)
- ret = gf_asprintf(&node_name, "%s", "nfs-server");
- else if ((cmd & GF_CLI_STATUS_SHD) != 0)
+ if ((cmd & GF_CLI_STATUS_SHD) != 0)
ret = gf_asprintf(&node_name, "%s", "glustershd");
+#ifdef BUILD_GNFS
+ else if ((cmd & GF_CLI_STATUS_NFS) != 0)
+ ret = gf_asprintf(&node_name, "%s", "nfs-server");
+#endif
else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
ret = gf_asprintf(&node_name, "%s", "quotad");
else if ((cmd & GF_CLI_STATUS_BITD) != 0)
@@ -1634,7 +1552,7 @@ glusterfs_handle_node_status(rpcsvc_request_t *req)
break;
case GF_CLI_STATUS_CLIENTS:
- // clients not availbale for SHD
+ // clients not available for SHD
if ((cmd & GF_CLI_STATUS_SHD) != 0)
break;
@@ -1917,6 +1835,11 @@ glusterfs_handle_barrier(rpcsvc_request_t *req)
ctx = glusterfsd_ctx;
GF_ASSERT(ctx);
active = ctx->active;
+ if (active == NULL) {
+ gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
+ ret = -1;
+ goto out;
+ }
top = active->first;
for (trav = top->children; trav; trav = trav->next) {
@@ -2021,14 +1944,14 @@ glusterfs_handle_rpc_msg(rpcsvc_request_t *req)
return ret;
}
-rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
- [GF_CBK_FETCHSPEC] = {"FETCHSPEC", GF_CBK_FETCHSPEC, mgmt_cbk_spec},
- [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", GF_CBK_EVENT_NOTIFY,
- mgmt_cbk_event},
- [GF_CBK_STATEDUMP] = {"STATEDUMP", GF_CBK_STATEDUMP, mgmt_cbk_event},
+static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
+ [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
+ [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
+ GF_CBK_EVENT_NOTIFY},
+ [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP},
};
-struct rpcclnt_cb_program mgmt_cbk_prog = {
+static struct rpcclnt_cb_program mgmt_cbk_prog = {
.progname = "GlusterFS Callback",
.prognum = GLUSTER_CBK_PROGRAM,
.progver = GLUSTER_CBK_VERSION,
@@ -2036,7 +1959,7 @@ struct rpcclnt_cb_program mgmt_cbk_prog = {
.numactors = GF_CBK_MAXVALUE,
};
-char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
+static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_NULL] = "NULL",
[GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
[GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
@@ -2045,14 +1968,14 @@ char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
[GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
};
-rpc_clnt_prog_t clnt_pmap_prog = {
+static rpc_clnt_prog_t clnt_pmap_prog = {
.progname = "Gluster Portmap",
.prognum = GLUSTER_PMAP_PROGRAM,
.progver = GLUSTER_PMAP_VERSION,
.procnames = clnt_pmap_procs,
};
-char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
+static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_NULL] = "NULL",
[GF_HNDSK_SETVOLUME] = "SETVOLUME",
[GF_HNDSK_GETSPEC] = "GETSPEC",
@@ -2060,57 +1983,55 @@ char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
[GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
};
-rpc_clnt_prog_t clnt_handshake_prog = {
+static rpc_clnt_prog_t clnt_handshake_prog = {
.progname = "GlusterFS Handshake",
.prognum = GLUSTER_HNDSK_PROGRAM,
.progver = GLUSTER_HNDSK_VERSION,
.procnames = clnt_handshake_procs,
};
-rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
- [GLUSTERD_BRICK_NULL] = {"NULL", GLUSTERD_BRICK_NULL,
- glusterfs_handle_rpc_msg, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", GLUSTERD_BRICK_TERMINATE,
- glusterfs_handle_terminate, NULL, 0, DRC_NA},
+static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
+ [GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL,
+ GLUSTERD_BRICK_NULL, DRC_NA, 0},
+ [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL,
+ GLUSTERD_BRICK_TERMINATE, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
- GLUSTERD_BRICK_XLATOR_INFO,
glusterfs_handle_translator_info_get, NULL,
- 0, DRC_NA},
- [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP", GLUSTERD_BRICK_XLATOR_OP,
- glusterfs_handle_translator_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_STATUS] = {"STATUS", GLUSTERD_BRICK_STATUS,
- glusterfs_handle_brick_status, NULL, 0, DRC_NA},
+ GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0},
+ [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
+ glusterfs_handle_translator_op, NULL,
+ GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL,
+ GLUSTERD_BRICK_STATUS, DRC_NA, 0},
[GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
- GLUSTERD_BRICK_XLATOR_DEFRAG,
- glusterfs_handle_defrag, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", GLUSTERD_NODE_PROFILE,
- glusterfs_handle_nfs_profile, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_STATUS] = {"NFS STATUS", GLUSTERD_NODE_STATUS,
- glusterfs_handle_node_status, NULL, 0, DRC_NA},
+ glusterfs_handle_defrag, NULL,
+ GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0},
+ [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile,
+ NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0},
+ [GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL,
+ GLUSTERD_NODE_STATUS, DRC_NA, 0},
[GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
- GLUSTERD_VOLUME_BARRIER_OP,
- glusterfs_handle_volume_barrier_op, NULL, 0,
- DRC_NA},
- [GLUSTERD_BRICK_BARRIER] = {"BARRIER", GLUSTERD_BRICK_BARRIER,
- glusterfs_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTERD_NODE_BITROT] = {"BITROT", GLUSTERD_NODE_BITROT,
- glusterfs_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTERD_BRICK_ATTACH] = {"ATTACH", GLUSTERD_BRICK_ATTACH,
- glusterfs_handle_attach, NULL, 0, DRC_NA},
+ glusterfs_handle_volume_barrier_op, NULL,
+ GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0},
+ [GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL,
+ GLUSTERD_BRICK_BARRIER, DRC_NA, 0},
+ [GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL,
+ GLUSTERD_NODE_BITROT, DRC_NA, 0},
+ [GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL,
+ GLUSTERD_BRICK_ATTACH, DRC_NA, 0},
- [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", GLUSTERD_DUMP_METRICS,
- glusterfs_handle_dump_metrics, NULL, 0, DRC_NA},
+ [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics,
+ NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0},
- [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", GLUSTERD_SVC_ATTACH,
- glusterfs_handle_svc_attach, NULL, 0, DRC_NA},
+ [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL,
+ GLUSTERD_SVC_ATTACH, DRC_NA, 0},
- [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", GLUSTERD_SVC_DETACH,
- glusterfs_handle_svc_detach, NULL, 0, DRC_NA},
+ [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL,
+ GLUSTERD_SVC_DETACH, DRC_NA, 0},
};
-struct rpcsvc_program glusterfs_mop_prog = {
+static struct rpcsvc_program glusterfs_mop_prog = {
.progname = "Gluster Brick operations",
.prognum = GD_BRICK_PROGRAM,
.progver = GD_BRICK_VERSION,
@@ -2238,10 +2159,12 @@ mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
}
dict->extra_stdfree = rsp.xdata.xdata_val;
- /* glusterd2 only */
ret = dict_get_str(dict, "servers-list", &servers_list);
if (ret) {
- goto volfile;
+ /* Server list is set by glusterd at the time of getspec */
+ ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list);
+ if (ret)
+ goto volfile;
}
gf_log(frame->this->name, GF_LOG_INFO,
@@ -2291,8 +2214,8 @@ volfile:
tmp_fd = mkstemp(template);
if (-1 == tmp_fd) {
UNLOCK(&ctx->volfile_lock);
- gf_msg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
- "Unable to create temporary file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
+ "create template=%s", template, NULL);
ret = -1;
goto post_unlock;
}
@@ -2302,8 +2225,8 @@ volfile:
*/
ret = sys_unlink(template);
if (ret < 0) {
- gf_msg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
- "Unable to delete temporary file: %s", template);
+ gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
+ "delete template=%s", template, NULL);
ret = 0;
}
@@ -2864,50 +2787,6 @@ out:
}
int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx)
-{
- cmd_args_t *cmd_args = NULL;
- rpcsvc_t *rpc = NULL;
- rpcsvc_listener_t *listener = NULL;
- rpcsvc_listener_t *next = NULL;
- int ret = 0;
- xlator_t *this = NULL;
-
- GF_ASSERT(ctx);
-
- rpc = ctx->listener;
- ctx->listener = NULL;
-
- (void)rpcsvc_program_unregister(rpc, &glusterfs_mop_prog);
-
- list_for_each_entry_safe(listener, next, &rpc->listeners, list)
- {
- rpcsvc_listener_destroy(listener);
- }
-
- (void)rpcsvc_unregister_notify(rpc, glusterfs_rpcsvc_notify, THIS);
-
- GF_FREE(rpc);
-
- cmd_args = &ctx->cmd_args;
- if (cmd_args->sock_file) {
- ret = sys_unlink(cmd_args->sock_file);
- if (ret && (ENOENT == errno)) {
- ret = 0;
- }
- }
-
- if (ret) {
- this = THIS;
- gf_log(this->name, GF_LOG_ERROR,
- "Failed to unlink listener "
- "socket %s, error: %s",
- cmd_args->sock_file, strerror(errno));
- }
- return ret;
-}
-
-int
glusterfs_mgmt_notify(int32_t op, void *data, ...)
{
int ret = 0;
@@ -3139,9 +3018,6 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
int ret = -1;
int emancipate_ret = -1;
cmd_args_t *cmd_args = NULL;
- char brick_name[PATH_MAX] = {
- 0,
- };
cmd_args = &ctx->cmd_args;
@@ -3152,14 +3028,6 @@ glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
goto out;
}
- if (cmd_args->volfile_server_transport &&
- !strcmp(cmd_args->volfile_server_transport, "rdma")) {
- snprintf(brick_name, sizeof(brick_name), "%s.rdma",
- cmd_args->brick_name);
- req.brick = brick_name;
- } else
- req.brick = cmd_args->brick_name;
-
req.port = cmd_args->brick_port;
req.pid = (int)getpid(); /* only glusterd2 consumes this */
diff --git a/glusterfsd/src/glusterfsd.c b/glusterfsd/src/glusterfsd.c
index cf6d9a7215c..dae41f33fef 100644
--- a/glusterfsd/src/glusterfsd.c
+++ b/glusterfsd/src/glusterfsd.c
@@ -47,12 +47,6 @@
#include <malloc.h>
#endif
-#ifdef HAVE_MALLOC_STATS
-#ifdef DEBUG
-#include <mcheck.h>
-#endif
-#endif
-
#include <glusterfs/xlator.h>
#include <glusterfs/glusterfs.h>
#include <glusterfs/compat.h>
@@ -198,7 +192,7 @@ static struct argp_option gf_options[] = {
{"brick-port", ARGP_BRICK_PORT_KEY, "BRICK-PORT", OPTION_HIDDEN,
"Brick Port to be registered with Gluster portmapper"},
{"fopen-keep-cache", ARGP_FOPEN_KEEP_CACHE_KEY, "BOOL", OPTION_ARG_OPTIONAL,
- "Do not purge the cache on file open"},
+ "Do not purge the cache on file open [default: false]"},
{"global-timer-wheel", ARGP_GLOBAL_TIMER_WHEEL, "BOOL", OPTION_ARG_OPTIONAL,
"Instantiate process global timer-wheel"},
{"thin-client", ARGP_THIN_CLIENT_KEY, 0, 0,
@@ -222,7 +216,10 @@ static struct argp_option gf_options[] = {
"Resolve all auxiliary groups in fuse translator (max 32 otherwise)"},
{"lru-limit", ARGP_FUSE_LRU_LIMIT_KEY, "N", 0,
"Set fuse module's limit for number of inodes kept in LRU list to N "
- "[default: 131072]"},
+ "[default: 65536]"},
+ {"invalidate-limit", ARGP_FUSE_INVALIDATE_LIMIT_KEY, "N", 0,
+ "Suspend inode invalidations implied by 'lru-limit' if the number of "
+ "outstanding invalidations reaches N"},
{"background-qlen", ARGP_FUSE_BACKGROUND_QLEN_KEY, "N", 0,
"Set fuse module's background queue length to N "
"[default: 64]"},
@@ -276,6 +273,9 @@ static struct argp_option gf_options[] = {
"attribute, dentry and page-cache. "
"Disable this only if same files/directories are not accessed across "
"two different mounts concurrently [default: \"on\"]"},
+ {"fuse-dev-eperm-ratelimit-ns", ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY,
+ "OPTIONS", OPTION_HIDDEN,
+ "rate limit reading from fuse device upon EPERM failure"},
{"brick-mux", ARGP_BRICK_MUX_KEY, 0, 0, "Enable brick mux. "},
{0, 0, 0, 0, "Miscellaneous Options:"},
{
@@ -292,8 +292,12 @@ int
glusterfs_mgmt_init(glusterfs_ctx_t *ctx);
int
glusterfs_listener_init(glusterfs_ctx_t *ctx);
-int
-glusterfs_listener_stop(glusterfs_ctx_t *ctx);
+
+#define DICT_SET_VAL(method, dict, key, val, msgid) \
+ if (method(dict, key, val)) { \
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, msgid, "key=%s", key); \
+ goto err; \
+ }
static int
set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
@@ -315,172 +319,97 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
ret = gf_asprintf(&mount_point, "%s/%s", cwd,
cmd_args->mount_point);
if (ret == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
- "Could not create absolute mountpoint "
- "path");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_1,
+ "gf_asprintf failed", NULL);
goto err;
}
} else {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
- "Could not get current working directory");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_2,
+ "getcwd failed", NULL);
goto err;
}
- } else
- mount_point = gf_strdup(cmd_args->mount_point);
- ret = dict_set_dynstr(options, ZR_MOUNTPOINT_OPT, mount_point);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
- "failed to set mount-point to options dictionary");
- goto err;
+ } else {
+ mount_point = gf_strdup(cmd_args->mount_point);
}
+ DICT_SET_VAL(dict_set_dynstr_sizen, options, ZR_MOUNTPOINT_OPT, mount_point,
+ glusterfsd_msg_3);
if (cmd_args->fuse_attribute_timeout >= 0) {
- ret = dict_set_double(options, ZR_ATTR_TIMEOUT_OPT,
- cmd_args->fuse_attribute_timeout);
-
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_4,
- "failed to set dict value "
- "for key " ZR_ATTR_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_ATTR_TIMEOUT_OPT,
+ cmd_args->fuse_attribute_timeout, glusterfsd_msg_3);
}
if (cmd_args->fuse_entry_timeout >= 0) {
- ret = dict_set_double(options, ZR_ENTRY_TIMEOUT_OPT,
- cmd_args->fuse_entry_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_ENTRY_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_ENTRY_TIMEOUT_OPT,
+ cmd_args->fuse_entry_timeout, glusterfsd_msg_3);
}
if (cmd_args->fuse_negative_timeout >= 0) {
- ret = dict_set_double(options, ZR_NEGATIVE_TIMEOUT_OPT,
- cmd_args->fuse_negative_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_NEGATIVE_TIMEOUT_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_double, options, ZR_NEGATIVE_TIMEOUT_OPT,
+ cmd_args->fuse_negative_timeout, glusterfsd_msg_3);
}
if (cmd_args->client_pid_set) {
- ret = dict_set_int32(options, "client-pid", cmd_args->client_pid);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key client-pid");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "client-pid",
+ cmd_args->client_pid, glusterfsd_msg_3);
}
if (cmd_args->uid_map_root) {
- ret = dict_set_int32(options, "uid-map-root", cmd_args->uid_map_root);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "uid-map-root");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "uid-map-root",
+ cmd_args->uid_map_root, glusterfsd_msg_3);
}
if (cmd_args->volfile_check) {
- ret = dict_set_int32(options, ZR_STRICT_VOLFILE_CHECK,
- cmd_args->volfile_check);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_STRICT_VOLFILE_CHECK);
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, ZR_STRICT_VOLFILE_CHECK,
+ cmd_args->volfile_check, glusterfsd_msg_3);
}
if (cmd_args->dump_fuse) {
- ret = dict_set_static_ptr(options, ZR_DUMP_FUSE, cmd_args->dump_fuse);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_DUMP_FUSE);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DUMP_FUSE,
+ cmd_args->dump_fuse, glusterfsd_msg_3);
}
if (cmd_args->acl) {
- ret = dict_set_static_ptr(options, "acl", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key acl");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "acl", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->selinux) {
- ret = dict_set_static_ptr(options, "selinux", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key selinux");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "selinux", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->capability) {
- ret = dict_set_static_ptr(options, "capability", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key capability");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "capability", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->aux_gfid_mount) {
- ret = dict_set_static_ptr(options, "virtual-gfid-access", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "aux-gfid-mount");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "virtual-gfid-access", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->enable_ino32) {
- ret = dict_set_static_ptr(options, "enable-ino32", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "enable-ino32");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "enable-ino32", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->read_only) {
- ret = dict_set_static_ptr(options, "read-only", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key read-only");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "read-only", "on",
+ glusterfsd_msg_3);
}
switch (cmd_args->fopen_keep_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
+
+ DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache", "on",
+ glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "fopen-keep-cache", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "fopen-keep-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "fopen-keep-cache",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "fopen-keep-cache mode %d",
cmd_args->fopen_keep_cache);
@@ -488,72 +417,43 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
}
if (cmd_args->gid_timeout_set) {
- ret = dict_set_int32(options, "gid-timeout", cmd_args->gid_timeout);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key gid-timeout");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "gid-timeout",
+ cmd_args->gid_timeout, glusterfsd_msg_3);
}
if (cmd_args->resolve_gids) {
- ret = dict_set_static_ptr(options, "resolve-gids", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "resolve-gids");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "resolve-gids", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->lru_limit >= 0) {
- ret = dict_set_int32(options, "lru-limit", cmd_args->lru_limit);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "lru-limit");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "lru-limit",
+ cmd_args->lru_limit, glusterfsd_msg_3);
+ }
+
+ if (cmd_args->invalidate_limit >= 0) {
+ DICT_SET_VAL(dict_set_int32_sizen, options, "invalidate-limit",
+ cmd_args->invalidate_limit, glusterfsd_msg_3);
}
if (cmd_args->background_qlen) {
- ret = dict_set_int32(options, "background-qlen",
- cmd_args->background_qlen);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "background-qlen");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "background-qlen",
+ cmd_args->background_qlen, glusterfsd_msg_3);
}
if (cmd_args->congestion_threshold) {
- ret = dict_set_int32(options, "congestion-threshold",
- cmd_args->congestion_threshold);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "congestion-threshold");
- goto err;
- }
+ DICT_SET_VAL(dict_set_int32_sizen, options, "congestion-threshold",
+ cmd_args->congestion_threshold, glusterfsd_msg_3);
}
switch (cmd_args->fuse_direct_io_mode) {
case GF_OPTION_DISABLE: /* disable */
- ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "disable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key " ZR_DIRECT_IO_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT,
+ "disable", glusterfsd_msg_3);
break;
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr(options, ZR_DIRECT_IO_OPT, "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key " ZR_DIRECT_IO_OPT);
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, ZR_DIRECT_IO_OPT,
+ "enable", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* auto */
default:
gf_msg_debug("glusterfsd", 0, "fuse direct io type %d",
cmd_args->fuse_direct_io_mode);
@@ -562,150 +462,82 @@ set_fuse_mount_options(glusterfs_ctx_t *ctx, dict_t *options)
switch (cmd_args->no_root_squash) {
case GF_OPTION_ENABLE: /* enable */
- ret = dict_set_static_ptr(options, "no-root-squash", "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_6,
- "failed to set 'enable' for key "
- "no-root-squash");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash",
+ "enable", glusterfsd_msg_3);
break;
- case GF_OPTION_DISABLE: /* disable/default */
default:
- ret = dict_set_static_ptr(options, "no-root-squash", "disable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_5,
- "failed to set 'disable' for key "
- "no-root-squash");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "no-root-squash",
+ "disable", glusterfsd_msg_3);
gf_msg_debug("glusterfsd", 0, "fuse no-root-squash mode %d",
cmd_args->no_root_squash);
break;
}
if (!cmd_args->no_daemon_mode) {
- ret = dict_set_static_ptr(options, "sync-to-mount", "enable");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key sync-mtab");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "sync-to-mount", "enable",
+ glusterfsd_msg_3);
}
if (cmd_args->use_readdirp) {
- ret = dict_set_str(options, "use-readdirp", cmd_args->use_readdirp);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "use-readdirp");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "use-readdirp",
+ cmd_args->use_readdirp, glusterfsd_msg_3);
}
if (cmd_args->event_history) {
ret = dict_set_str(options, "event-history", cmd_args->event_history);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "event-history");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "event-history",
+ cmd_args->event_history, glusterfsd_msg_3);
}
if (cmd_args->thin_client) {
- ret = dict_set_static_ptr(options, "thin-client", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "thin-client");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "thin-client", "on",
+ glusterfsd_msg_3);
}
if (cmd_args->reader_thread_count) {
- ret = dict_set_uint32(options, "reader-thread-count",
- cmd_args->reader_thread_count);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "reader-thread-count");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "reader-thread-count",
+ cmd_args->reader_thread_count, glusterfsd_msg_3);
}
- ret = dict_set_uint32(options, "auto-invalidation",
- cmd_args->fuse_auto_inval);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key auto-invalidation");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "auto-invalidation",
+ cmd_args->fuse_auto_inval, glusterfsd_msg_3);
switch (cmd_args->kernel_writeback_cache) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache",
+ "on", glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "kernel-writeback-cache", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "kernel-writeback-cache");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "kernel-writeback-cache",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "kernel-writeback-cache mode %d",
cmd_args->kernel_writeback_cache);
break;
}
if (cmd_args->attr_times_granularity) {
- ret = dict_set_uint32(options, "attr-times-granularity",
- cmd_args->attr_times_granularity);
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "attr-times-granularity");
- goto err;
- }
+ DICT_SET_VAL(dict_set_uint32, options, "attr-times-granularity",
+ cmd_args->attr_times_granularity, glusterfsd_msg_3);
}
switch (cmd_args->fuse_flush_handle_interrupt) {
case GF_OPTION_ENABLE:
- ret = dict_set_static_ptr(options, "flush-handle-interrupt", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "flush-handle-interrupt");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt",
+ "on", glusterfsd_msg_3);
break;
case GF_OPTION_DISABLE:
- ret = dict_set_static_ptr(options, "flush-handle-interrupt", "off");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key "
- "flush-handle-interrupt");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "flush-handle-interrupt",
+ "off", glusterfsd_msg_3);
break;
- case GF_OPTION_DEFERRED: /* default */
default:
gf_msg_debug("glusterfsd", 0, "fuse-flush-handle-interrupt mode %d",
cmd_args->fuse_flush_handle_interrupt);
break;
}
if (cmd_args->global_threading) {
- ret = dict_set_static_ptr(options, "global-threading", "on");
- if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key global-threading");
- goto err;
- }
+ DICT_SET_VAL(dict_set_static_ptr, options, "global-threading", "on",
+ glusterfsd_msg_3);
+ }
+ if (cmd_args->fuse_dev_eperm_ratelimit_ns) {
+ DICT_SET_VAL(dict_set_uint32, options, "fuse-dev-eperm-ratelimit-ns",
+ cmd_args->fuse_dev_eperm_ratelimit_ns, glusterfsd_msg_3);
}
ret = 0;
@@ -728,8 +560,7 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
}
if (ctx->process_mode != GF_CLIENT_PROCESS) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7,
- "Not a client process, not performing mount operation");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_7, NULL);
return -1;
}
@@ -742,8 +573,8 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
goto err;
if (xlator_set_type(master, "mount/fuse") == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
- "MOUNT-POINT %s initialization failed", cmd_args->mount_point);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_8,
+ "MOUNT-POINT=%s", cmd_args->mount_point, NULL);
goto err;
}
@@ -760,8 +591,8 @@ create_fuse_mount(glusterfs_ctx_t *ctx)
ret = dict_set_static_ptr(master->options, ZR_FUSE_MOUNTOPTS,
cmd_args->fuse_mountopts);
if (ret < 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_4,
- "failed to set dict value for key " ZR_FUSE_MOUNTOPTS);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_3,
+ ZR_FUSE_MOUNTOPTS, NULL);
goto err;
}
}
@@ -787,23 +618,14 @@ err:
static FILE *
get_volfp(glusterfs_ctx_t *ctx)
{
- int ret = 0;
cmd_args_t *cmd_args = NULL;
FILE *specfp = NULL;
- struct stat statbuf;
cmd_args = &ctx->cmd_args;
- ret = sys_lstat(cmd_args->volfile, &statbuf);
- if (ret == -1) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
- return NULL;
- }
-
if ((specfp = fopen(cmd_args->volfile, "r")) == NULL) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
- "loading volume file %s failed", cmd_args->volfile);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_9,
+ "volume_file=%s", cmd_args->volfile, NULL);
return NULL;
}
@@ -859,8 +681,7 @@ gf_remember_xlator_option(char *arg)
dot = strchr(arg, '.');
if (!dot) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -873,8 +694,7 @@ gf_remember_xlator_option(char *arg)
equals = strchr(arg, '=');
if (!equals) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -886,8 +706,7 @@ gf_remember_xlator_option(char *arg)
option->key[(equals - dot - 1)] = '\0';
if (!*(equals + 1)) {
- gf_msg("", GF_LOG_WARNING, 0, glusterfsd_msg_10,
- "xlator option %s is invalid", arg);
+ gf_smsg("", GF_LOG_WARNING, 0, glusterfsd_msg_10, "arg=%s", arg, NULL);
goto out;
}
@@ -1304,6 +1123,14 @@ parse_opts(int key, char *arg, struct argp_state *state)
argp_failure(state, -1, 0, "unknown LRU limit option %s", arg);
break;
+ case ARGP_FUSE_INVALIDATE_LIMIT_KEY:
+ if (!gf_string2int32(arg, &cmd_args->invalidate_limit))
+ break;
+
+ argp_failure(state, -1, 0, "unknown invalidate limit option %s",
+ arg);
+ break;
+
case ARGP_FUSE_BACKGROUND_QLEN_KEY:
if (!gf_string2int(arg, &cmd_args->background_qlen))
break;
@@ -1536,6 +1363,21 @@ parse_opts(int key, char *arg, struct argp_state *state)
argp_failure(state, -1, 0,
"Invalid value for global threading \"%s\"", arg);
break;
+
+ case ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY:
+ if (gf_string2uint32(arg, &cmd_args->fuse_dev_eperm_ratelimit_ns)) {
+ argp_failure(state, -1, 0,
+ "Non-numerical value for "
+ "'fuse-dev-eperm-ratelimit-ns' option %s",
+ arg);
+ } else if (cmd_args->fuse_dev_eperm_ratelimit_ns > 1000000000) {
+ argp_failure(state, -1, 0,
+ "Invalid 'fuse-dev-eperm-ratelimit-ns' value %s. "
+ "Valid range: [\"0, 1000000000\"]",
+ arg);
+ }
+
+ break;
}
return 0;
}
@@ -1553,11 +1395,6 @@ should_call_fini(glusterfs_ctx_t *ctx, xlator_t *trav)
return _gf_true;
}
- /* This is the only one known to be safe in glusterfsd. */
- if (!strcmp(trav->type, "experimental/fdl")) {
- return _gf_true;
- }
-
return _gf_false;
}
@@ -1675,8 +1512,7 @@ reincarnate(int signum)
gf_msg_trace("gluster", 0, "received reincarnate request (sig:HUP)");
if (cmd_args->volfile_server) {
- gf_msg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11,
- "Fetching the volume file from server...");
+ gf_smsg("glusterfsd", GF_LOG_INFO, 0, glusterfsd_msg_11, NULL);
ret = glusterfs_volfile_fetch(ctx);
}
@@ -1684,8 +1520,7 @@ reincarnate(int signum)
gf_log_logrotate(1);
if (ret < 0)
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12,
- "volume initialization failed.");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_12, NULL);
return;
}
@@ -1737,8 +1572,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ret = xlator_mem_acct_init(THIS, gfd_mt_end);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34,
- "memory accounting init failed.");
+ gf_smsg(THIS->name, GF_LOG_CRITICAL, 0, glusterfsd_msg_34, NULL);
return ret;
}
@@ -1752,8 +1586,7 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ctx->process_uuid = generate_glusterfs_ctx_id();
if (!ctx->process_uuid) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13,
- "ERROR: glusterfs uuid generation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_13, NULL);
goto out;
}
@@ -1761,23 +1594,20 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
ctx->iobuf_pool = iobuf_pool_new();
if (!ctx->iobuf_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs iobuf pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "iobuf", NULL);
goto out;
}
ctx->event_pool = gf_event_pool_new(DEFAULT_EVENT_POOL_SIZE,
STARTING_EVENT_THREADS);
if (!ctx->event_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs event pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "event", NULL);
goto out;
}
ctx->pool = GF_CALLOC(1, sizeof(call_pool_t), gfd_mt_call_pool_t);
if (!ctx->pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs call pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "call", NULL);
goto out;
}
@@ -1787,22 +1617,19 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
/* frame_mem_pool size 112 * 4k */
ctx->pool->frame_mem_pool = mem_pool_new(call_frame_t, 4096);
if (!ctx->pool->frame_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs frame pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "frame", NULL);
goto out;
}
/* stack_mem_pool size 256 * 1024 */
ctx->pool->stack_mem_pool = mem_pool_new(call_stack_t, 1024);
if (!ctx->pool->stack_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stack pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stack", NULL);
goto out;
}
ctx->stub_mem_pool = mem_pool_new(call_stub_t, 1024);
if (!ctx->stub_mem_pool) {
- gf_msg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14,
- "ERROR: glusterfs stub pool creation failed");
+ gf_smsg("", GF_LOG_CRITICAL, 0, glusterfsd_msg_14, "stub", NULL);
goto out;
}
@@ -1860,6 +1687,10 @@ glusterfs_ctx_defaults_init(glusterfs_ctx_t *ctx)
INIT_LIST_HEAD(&cmd_args->xlator_options);
INIT_LIST_HEAD(&cmd_args->volfile_servers);
+ ctx->pxl_count = 0;
+ pthread_mutex_init(&ctx->fd_lock, NULL);
+ pthread_cond_init(&ctx->fd_cond, NULL);
+ INIT_LIST_HEAD(&ctx->janitor_fds);
lim.rlim_cur = RLIM_INFINITY;
lim.rlim_max = RLIM_INFINITY;
@@ -2148,7 +1979,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
struct stat stbuf = {
0,
};
- char timestr[32];
+ char timestr[GF_TIMESTR_SIZE];
char tmp_logfile[1024] = {0};
char *tmp_logfile_dyn = NULL;
char *tmp_logfilebase = NULL;
@@ -2210,9 +2041,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
/* Make sure after the parsing cli, if '--volfile-server' option is
given, then '--volfile-id' is mandatory */
if (cmd_args->volfile_server && !cmd_args->volfile_id) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15,
- "ERROR: '--volfile-id' is mandatory if '-s' OR "
- "'--volfile-server' option is given");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_15, NULL);
ret = -1;
goto out;
}
@@ -2229,8 +2058,8 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
and exit */
ret = sys_stat(cmd_args->volfile, &stbuf);
if (ret) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
- "ERROR: parsing the volfile failed");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, errno, glusterfsd_msg_16,
+ NULL);
/* argp_usage (argp.) */
fprintf(stderr, "USAGE: %s [options] [mountpoint]\n", argv[0]);
goto out;
@@ -2254,8 +2083,8 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
if (((ret == 0) &&
(S_ISREG(stbuf.st_mode) || S_ISLNK(stbuf.st_mode))) ||
(ret == -1)) {
- /* Have separate logfile per run */
- gf_time_fmt(timestr, sizeof timestr, time(NULL), gf_timefmt_FT);
+ /* Have separate logfile per run. */
+ gf_time_fmt(timestr, sizeof timestr, gf_time(), gf_timefmt_FT);
sprintf(tmp_logfile, "%s.%s.%d", cmd_args->log_file, timestr,
getpid());
@@ -2282,9 +2111,7 @@ parse_cmdline(int argc, char *argv[], glusterfs_ctx_t *ctx)
compatibility with third party applications
*/
if (cmd_args->max_connect_attempts) {
- gf_msg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33,
- "obsolete option '--volfile-max-fecth-attempts or "
- "fetch-attempts' was provided");
+ gf_smsg("glusterfs", GF_LOG_WARNING, 0, glusterfsd_msg_33, NULL);
}
#ifdef GF_DARWIN_HOST_OS
@@ -2311,8 +2138,8 @@ glusterfs_pidfile_setup(glusterfs_ctx_t *ctx)
pidfp = fopen(cmd_args->pid_file, "a+");
if (!pidfp) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
- "pidfile %s open failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_17,
+ "pidfile=%s", cmd_args->pid_file, NULL);
goto out;
}
@@ -2363,29 +2190,29 @@ glusterfs_pidfile_update(glusterfs_ctx_t *ctx, pid_t pid)
ret = lockf(fileno(pidfp), F_TLOCK, 0);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
- "pidfile %s lock failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_18,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = sys_ftruncate(fileno(pidfp), 0);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
- "pidfile %s truncation failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_20,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = fprintf(pidfp, "%d\n", pid);
if (ret <= 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
ret = fflush(pidfp);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
- "pidfile %s write failed", cmd_args->pid_file);
+ gf_smsg("glusterfsd", GF_LOG_ERROR, errno, glusterfsd_msg_21,
+ "pidfile=%s", cmd_args->pid_file, NULL);
return ret;
}
@@ -2478,8 +2305,7 @@ glusterfs_signals_setup(glusterfs_ctx_t *ctx)
ret = pthread_sigmask(SIG_BLOCK, &set, NULL);
if (ret) {
- gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22,
- "failed to execute pthread_sigmask");
+ gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_22, NULL);
return ret;
}
@@ -2491,8 +2317,7 @@ glusterfs_signals_setup(glusterfs_ctx_t *ctx)
fallback to signals getting handled by other threads.
setup the signal handlers
*/
- gf_msg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23,
- "failed to create pthread");
+ gf_smsg("glusterfsd", GF_LOG_WARNING, errno, glusterfsd_msg_23, NULL);
return ret;
}
@@ -2538,8 +2363,7 @@ daemonize(glusterfs_ctx_t *ctx)
sys_close(ctx->daemon_pipe[1]);
}
- gf_msg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24,
- "daemonization failed");
+ gf_smsg("daemonize", GF_LOG_ERROR, errno, glusterfsd_msg_24, NULL);
goto out;
case 0:
/* child */
@@ -2560,8 +2384,8 @@ daemonize(glusterfs_ctx_t *ctx)
} else {
err = cstatus;
}
- gf_msg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
- "mount failed");
+ gf_smsg("daemonize", GF_LOG_ERROR, 0, glusterfsd_msg_25,
+ NULL);
exit(err);
}
}
@@ -2652,16 +2476,13 @@ glusterfs_process_volfp(glusterfs_ctx_t *ctx, FILE *fp)
graph = glusterfs_graph_construct(fp);
if (!graph) {
- gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_26,
- "failed to construct the graph");
+ gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_26, NULL);
goto out;
}
for (trav = graph->first; trav; trav = trav->next) {
if (strcmp(trav->type, "mount/fuse") == 0) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27,
- "fuse xlator cannot be specified in volume "
- "file");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_27, NULL);
goto out;
}
}
@@ -2742,8 +2563,7 @@ glusterfs_volumes_init(glusterfs_ctx_t *ctx)
fp = get_volfp(ctx);
if (!fp) {
- gf_msg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28,
- "Cannot reach volume specification file");
+ gf_smsg("glusterfsd", GF_LOG_ERROR, 0, glusterfsd_msg_28, NULL);
ret = -1;
goto out;
}
@@ -2774,8 +2594,7 @@ main(int argc, char *argv[])
ctx = glusterfs_ctx_new();
if (!ctx) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29,
- "ERROR: glusterfs context not initialized");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_29, NULL);
return ENOMEM;
}
glusterfsd_ctx = ctx;
@@ -2839,9 +2658,7 @@ main(int argc, char *argv[])
/* set brick_mux mode only for server process */
if ((ctx->process_mode != GF_SERVER_PROCESS) && cmd->brick_mux) {
- gf_msg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43,
- "command line argument --brick-mux is valid only for brick "
- "process");
+ gf_smsg("glusterfs", GF_LOG_CRITICAL, 0, glusterfsd_msg_43, NULL);
goto out;
}
@@ -2856,15 +2673,14 @@ main(int argc, char *argv[])
len = snprintf(cmdlinestr + pos, sizeof(cmdlinestr) - pos, " %s",
argv[i]);
if ((len <= 0) || (len >= (sizeof(cmdlinestr) - pos))) {
- gf_msg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_29,
- "failed to create command line string");
+ gf_smsg("glusterfs", GF_LOG_ERROR, 0, glusterfsd_msg_029, NULL);
ret = -1;
goto out;
}
}
- gf_msg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30,
- "Started running %s version %s (args: %s)", argv[0],
- PACKAGE_VERSION, cmdlinestr);
+ gf_smsg(argv[0], GF_LOG_INFO, 0, glusterfsd_msg_30, "arg=%s", argv[0],
+ "version=%s", PACKAGE_VERSION, "cmdlinestr=%s", cmdlinestr,
+ NULL);
ctx->cmdlinestr = gf_strdup(cmdlinestr);
}
@@ -2899,8 +2715,7 @@ main(int argc, char *argv[])
ctx->env = syncenv_new(0, 0, 0);
if (!ctx->env) {
- gf_msg("", GF_LOG_ERROR, 0, glusterfsd_msg_31,
- "Could not create new sync-environment");
+ gf_smsg("", GF_LOG_ERROR, 0, glusterfsd_msg_31, NULL);
goto out;
}
diff --git a/glusterfsd/src/glusterfsd.h b/glusterfsd/src/glusterfsd.h
index dc7d995e778..4e1413caa70 100644
--- a/glusterfsd/src/glusterfsd.h
+++ b/glusterfsd/src/glusterfsd.h
@@ -112,7 +112,9 @@ enum argp_option_keys {
ARGP_FUSE_LRU_LIMIT_KEY = 190,
ARGP_FUSE_AUTO_INVAL_KEY = 191,
ARGP_GLOBAL_THREADING_KEY = 192,
- ARGP_BRICK_MUX_KEY = 193
+ ARGP_BRICK_MUX_KEY = 193,
+ ARGP_FUSE_DEV_EPERM_RATELIMIT_NS_KEY = 194,
+ ARGP_FUSE_INVALIDATE_LIMIT_KEY = 195,
};
struct _gfd_vol_top_priv {
@@ -133,14 +135,6 @@ glusterfs_volfile_fetch(glusterfs_ctx_t *ctx);
void
cleanup_and_exit(int signum);
-int
-glusterfs_volume_top_write_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time);
-int
-glusterfs_volume_top_read_perf(uint32_t blk_size, uint32_t blk_count,
- char *brick_path, double *throughput,
- double *time);
void
xlator_mem_cleanup(xlator_t *this);
diff --git a/heal/src/Makefile.am b/heal/src/Makefile.am
index f04a294cc56..aa18d3eff88 100644
--- a/heal/src/Makefile.am
+++ b/heal/src/Makefile.am
@@ -1,5 +1,6 @@
if WITH_SERVER
-sbin_PROGRAMS = glfsheal
+scriptdir = $(GLUSTERFS_LIBEXECDIR)
+script_PROGRAMS = glfsheal
endif
glfsheal_SOURCES = glfs-heal.c
@@ -18,8 +19,7 @@ AM_CPPFLAGS = $(GF_CPPFLAGS) \
-I$(top_srcdir)/rpc/xdr/src\
-I$(top_builddir)/rpc/xdr/src\
-I$(top_srcdir)/api/src\
- -DDATADIR=\"$(localstatedir)\" \
- -DSBIN_DIR=\"$(sbindir)\"
+ -DDATADIR=\"$(localstatedir)\"
AM_CFLAGS = -Wall $(GF_CFLAGS) $(XML_CFLAGS)
diff --git a/heal/src/glfs-heal.c b/heal/src/glfs-heal.c
index 20372316edd..bf4b47f8760 100644
--- a/heal/src/glfs-heal.c
+++ b/heal/src/glfs-heal.c
@@ -773,8 +773,7 @@ static int
glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
uint64_t *offset, num_entries_t *num_entries,
print_status glfsh_print_status,
- gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode,
- dict_t *xattr_req)
+ gf_boolean_t ignore_dirty, glfsh_fail_mode_t mode)
{
gf_dirent_t *entry = NULL;
gf_dirent_t *tmp = NULL;
@@ -806,7 +805,7 @@ glfsh_process_entries(xlator_t *xl, fd_t *fd, gf_dirent_t *entries,
gf_uuid_parse(entry->d_name, gfid);
gf_uuid_copy(loc.gfid, gfid);
- ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, xattr_req, NULL);
+ ret = syncop_getxattr(this, &loc, &dict, GF_HEAL_INFO, NULL, NULL);
if (ret) {
if ((mode != GLFSH_MODE_CONTINUE_ON_ERROR) && (ret == -ENOTCONN))
goto out;
@@ -875,19 +874,19 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
if (heal_op == GF_SHD_OP_INDEX_SUMMARY) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_print_heal_status,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_SPLIT_BRAIN_FILES) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_print_spb_status,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_HEAL_SUMMARY) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_print_summary_status,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
} else if (heal_op == GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) {
@@ -896,7 +895,7 @@ glfsh_crawl_directory(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
} else if (heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) {
ret = glfsh_process_entries(readdir_xl, fd, &entries, &offset,
num_entries, glfsh_heal_status_boolean,
- ignore, mode, xattr_req);
+ ignore, mode);
if (ret < 0)
goto out;
}
@@ -950,10 +949,6 @@ glfsh_print_pending_heals_type(glfs_t *fs, xlator_t *top_subvol, loc_t *rootloc,
int32_t op_errno = 0;
gf_boolean_t ignore = _gf_false;
- ret = dict_set_str(xattr_req, "index-vgfid", vgfid);
- if (ret)
- return ret;
-
if (!strcmp(vgfid, GF_XATTROP_DIRTY_GFID))
ignore = _gf_true;
@@ -1060,6 +1055,10 @@ glfsh_set_heal_options(glfs_t *fs, gf_xl_afr_op_t heal_op)
if (ret)
goto out;
+ ret = glfs_set_xlator_option(fs, "*-replicate-*", "halo-enabled", "off");
+ if (ret)
+ goto out;
+
if ((heal_op != GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE) &&
(heal_op != GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK) &&
(heal_op != GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME))
@@ -1730,14 +1729,19 @@ main(int argc, char **argv)
goto out;
}
+ char *var_str = (heal_op == GF_SHD_OP_INDEX_SUMMARY ||
+ heal_op == GF_SHD_OP_HEAL_SUMMARY)
+ ? "replicate/disperse"
+ : "replicate";
+
ret = glfsh_validate_volume(top_subvol, heal_op);
if (ret < 0) {
ret = -EINVAL;
- gf_asprintf(&op_errstr, "Volume %s is not of type %s", volname,
- (heal_op == GF_SHD_OP_INDEX_SUMMARY ||
- heal_op == GF_SHD_OP_HEAL_SUMMARY)
- ? "replicate/disperse"
- : "replicate");
+ gf_asprintf(&op_errstr,
+ "This command is supported "
+ "for only volumes of %s type. Volume %s "
+ "is not of type %s",
+ var_str, volname, var_str);
goto out;
}
rootloc.inode = inode_ref(top_subvol->itable->root);
diff --git a/libgfdb.pc.in b/libgfdb.pc.in
deleted file mode 100644
index 463e8becd3a..00000000000
--- a/libgfdb.pc.in
+++ /dev/null
@@ -1,12 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-
-Name: libgfdb
-Description: GlusterFS Database Library
-Version: @LIBGFDB_VERSION@
-Libs: -L${libdir} -lgfchangedb -lglusterfs
-Cflags: -I${includedir}
-Requires: sqlite3 @PKGCONFIG_UUID@
diff --git a/libglusterd/Makefile.am b/libglusterd/Makefile.am
new file mode 100644
index 00000000000..a985f42a877
--- /dev/null
+++ b/libglusterd/Makefile.am
@@ -0,0 +1,3 @@
+SUBDIRS = src
+
+CLEANFILES =
diff --git a/libglusterd/src/Makefile.am b/libglusterd/src/Makefile.am
new file mode 100644
index 00000000000..684d2bac96b
--- /dev/null
+++ b/libglusterd/src/Makefile.am
@@ -0,0 +1,31 @@
+libglusterd_la_CFLAGS = $(GF_CFLAGS) $(GF_DARWIN_LIBGLUSTERFS_CFLAGS) \
+ -DDATADIR=\"$(localstatedir)\"
+
+libglusterd_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
+ -DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
+ -DXLATORPARENTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)\" \
+ -DXXH_NAMESPACE=GF_ -D__USE_LARGEFILE64 \
+ -I$(CONTRIBDIR)/rbtree \
+ -I$(CONTRIBDIR)/libexecinfo ${ARGP_STANDALONE_CPPFLAGS} \
+ -DSBIN_DIR=\"$(sbindir)\" -I$(CONTRIBDIR)/timer-wheel \
+ -I$(CONTRIBDIR)/xxhash \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \
+ -I$(top_srcdir)/rpc/rpc-lib/src/
+
+libglusterd_la_LIBADD = $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS)
+libglusterd_la_LDFLAGS = -version-info $(LIBGLUSTERFS_LT_VERSION) $(GF_LDFLAGS) \
+ -export-symbols $(top_srcdir)/libglusterd/src/libglusterd.sym
+
+lib_LTLIBRARIES = libglusterd.la
+
+libglusterd_la_SOURCES = gd-common-utils.c
+
+libglusterd_la_HEADERS = gd-common-utils.h
+
+libglusterd_ladir = $(includedir)/glusterfs
+
+noinst_HEADERS = gd-common-utils.h
+
+EXTRA_DIST = libglusterd.sym
+
+CLEANFILES =
diff --git a/libglusterd/src/gd-common-utils.c b/libglusterd/src/gd-common-utils.c
new file mode 100644
index 00000000000..243fab215e6
--- /dev/null
+++ b/libglusterd/src/gd-common-utils.c
@@ -0,0 +1,78 @@
+/*
+ Copyright (c) 2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "gd-common-utils.h"
+#include "cli1-xdr.h"
+
+int
+get_vol_type(int type, int dist_count, int brick_count)
+{
+ if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
+ (dist_count < brick_count))
+ type = type + GF_CLUSTER_TYPE_MAX - 1;
+
+ return type;
+}
+
+char *
+get_struct_variable(int mem_num, gf_gsync_status_t *sts_val)
+{
+ switch (mem_num) {
+ case 0:
+ return (sts_val->node);
+ case 1:
+ return (sts_val->master);
+ case 2:
+ return (sts_val->brick);
+ case 3:
+ return (sts_val->slave_user);
+ case 4:
+ return (sts_val->slave);
+ case 5:
+ return (sts_val->slave_node);
+ case 6:
+ return (sts_val->worker_status);
+ case 7:
+ return (sts_val->crawl_status);
+ case 8:
+ return (sts_val->last_synced);
+ case 9:
+ return (sts_val->entry);
+ case 10:
+ return (sts_val->data);
+ case 11:
+ return (sts_val->meta);
+ case 12:
+ return (sts_val->failures);
+ case 13:
+ return (sts_val->checkpoint_time);
+ case 14:
+ return (sts_val->checkpoint_completed);
+ case 15:
+ return (sts_val->checkpoint_completion_time);
+ case 16:
+ return (sts_val->brick_host_uuid);
+ case 17:
+ return (sts_val->last_synced_utc);
+ case 18:
+ return (sts_val->checkpoint_time_utc);
+ case 19:
+ return (sts_val->checkpoint_completion_time_utc);
+ case 20:
+ return (sts_val->slavekey);
+ case 21:
+ return (sts_val->session_slave);
+ default:
+ goto out;
+ }
+
+out:
+ return NULL;
+}
diff --git a/libglusterd/src/gd-common-utils.h b/libglusterd/src/gd-common-utils.h
new file mode 100644
index 00000000000..b9bb4f956fe
--- /dev/null
+++ b/libglusterd/src/gd-common-utils.h
@@ -0,0 +1,28 @@
+/*
+ Copyright (c) 2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef _GD_COMMON_UTILS_H
+#define _GD_COMMON_UTILS_H
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <stddef.h>
+
+#include "protocol-common.h"
+#include "rpcsvc.h"
+
+int
+get_vol_type(int type, int dist_count, int brick_count);
+
+char *
+get_struct_variable(int mem_num, gf_gsync_status_t *sts_val);
+
+#endif /* _GD_COMMON_UTILS_H */
diff --git a/libglusterd/src/libglusterd.sym b/libglusterd/src/libglusterd.sym
new file mode 100644
index 00000000000..45969a87c12
--- /dev/null
+++ b/libglusterd/src/libglusterd.sym
@@ -0,0 +1,2 @@
+get_vol_type
+get_struct_variable
diff --git a/libglusterfs/src/Makefile.am b/libglusterfs/src/Makefile.am
index 79ab7ee93f2..385e8ef4600 100644
--- a/libglusterfs/src/Makefile.am
+++ b/libglusterfs/src/Makefile.am
@@ -7,13 +7,13 @@ libglusterfs_la_CPPFLAGS = $(GF_CPPFLAGS) -D__USE_FILE_OFFSET64 \
-DXLATORDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator\" \
-DXLATORPARENTDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)\" \
-DXXH_NAMESPACE=GF_ -D__USE_LARGEFILE64 \
- -I$(top_srcdir)/rpc/xdr/src/ -I$(top_builddir)/rpc/xdr/src/ \
- -I$(top_srcdir)/rpc/rpc-lib/src/ -I$(CONTRIBDIR)/rbtree \
+ -I$(CONTRIBDIR)/rbtree \
-I$(CONTRIBDIR)/libexecinfo ${ARGP_STANDALONE_CPPFLAGS} \
-DSBIN_DIR=\"$(sbindir)\" -I$(CONTRIBDIR)/timer-wheel \
-I$(CONTRIBDIR)/xxhash
-libglusterfs_la_LIBADD = $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS)
+libglusterfs_la_LIBADD = $(ZLIB_LIBS) $(MATH_LIB) $(UUID_LIBS) $(LIB_DL) \
+ $(URCU_LIBS) $(URCU_CDS_LIBS)
libglusterfs_la_LDFLAGS = -version-info $(LIBGLUSTERFS_LT_VERSION) $(GF_LDFLAGS) \
-export-symbols $(top_srcdir)/libglusterfs/src/libglusterfs.sym
@@ -40,12 +40,9 @@ libglusterfs_la_SOURCES = dict.c xlator.c logging.c \
throttle-tbf.c monitoring.c async.c
nodist_libglusterfs_la_SOURCES = y.tab.c graph.lex.c defaults.c
-nodist_libglusterfs_la_HEADERS = y.tab.h protocol-common.h
+nodist_libglusterfs_la_HEADERS = y.tab.h
-BUILT_SOURCES = graph.lex.c defaults.c eventtypes.h protocol-common.h
-
-protocol-common.h: $(top_srcdir)/rpc/rpc-lib/src/protocol-common.h
- cp $(top_srcdir)/rpc/rpc-lib/src/protocol-common.h .
+BUILT_SOURCES = graph.lex.c defaults.c eventtypes.h
libglusterfs_la_HEADERS = glusterfs/common-utils.h glusterfs/defaults.h \
glusterfs/default-args.h glusterfs/dict.h glusterfs/glusterfs.h \
@@ -69,7 +66,7 @@ libglusterfs_la_HEADERS = glusterfs/common-utils.h glusterfs/defaults.h \
glusterfs/quota-common-utils.h glusterfs/rot-buffs.h \
glusterfs/compat-uuid.h glusterfs/upcall-utils.h glusterfs/throttle-tbf.h \
glusterfs/events.h glusterfs/atomic.h glusterfs/monitoring.h \
- glusterfs/async.h
+ glusterfs/async.h glusterfs/glusterfs-fops.h
libglusterfs_ladir = $(includedir)/glusterfs
@@ -82,8 +79,7 @@ noinst_HEADERS = unittest/unittest.h \
$(CONTRIBDIR)/userspace-rcu/wfcqueue.h \
$(CONTRIBDIR)/userspace-rcu/wfstack.h \
$(CONTRIBDIR)/userspace-rcu/static-wfcqueue.h \
- $(CONTRIBDIR)/userspace-rcu/static-wfstack.h \
- tier-ctr-interface.h
+ $(CONTRIBDIR)/userspace-rcu/static-wfstack.h
eventtypes.h: $(top_srcdir)/events/eventskeygen.py
$(PYTHON) $(top_srcdir)/events/eventskeygen.py C_HEADER
diff --git a/libglusterfs/src/call-stub.c b/libglusterfs/src/call-stub.c
index 3c7c7748c99..ee84f08acd4 100644
--- a/libglusterfs/src/call-stub.c
+++ b/libglusterfs/src/call-stub.c
@@ -41,7 +41,6 @@ fop_lookup_stub(call_frame_t *frame, fop_lookup_t fn, loc_t *loc, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_LOOKUP);
@@ -60,8 +59,6 @@ fop_lookup_cbk_stub(call_frame_t *frame, fop_lookup_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LOOKUP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -77,7 +74,6 @@ fop_stat_stub(call_frame_t *frame, fop_stat_t fn, loc_t *loc, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_STAT);
@@ -95,8 +91,6 @@ fop_stat_cbk_stub(call_frame_t *frame, fop_stat_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_STAT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -111,8 +105,6 @@ fop_fstat_stub(call_frame_t *frame, fop_fstat_t fn, fd_t *fd, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FSTAT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -128,8 +120,6 @@ fop_fstat_cbk_stub(call_frame_t *frame, fop_fstat_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSTAT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -145,7 +135,6 @@ fop_truncate_stub(call_frame_t *frame, fop_truncate_t fn, loc_t *loc, off_t off,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_TRUNCATE);
@@ -164,8 +153,6 @@ fop_truncate_cbk_stub(call_frame_t *frame, fop_truncate_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_TRUNCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -182,8 +169,6 @@ fop_ftruncate_stub(call_frame_t *frame, fop_ftruncate_t fn, fd_t *fd, off_t off,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FTRUNCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -201,8 +186,6 @@ fop_ftruncate_cbk_stub(call_frame_t *frame, fop_ftruncate_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FTRUNCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -220,7 +203,6 @@ fop_access_stub(call_frame_t *frame, fop_access_t fn, loc_t *loc, int32_t mask,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_ACCESS);
@@ -238,8 +220,6 @@ fop_access_cbk_stub(call_frame_t *frame, fop_access_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ACCESS);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -255,7 +235,6 @@ fop_readlink_stub(call_frame_t *frame, fop_readlink_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_READLINK);
@@ -274,8 +253,6 @@ fop_readlink_cbk_stub(call_frame_t *frame, fop_readlink_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READLINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -292,7 +269,6 @@ fop_mknod_stub(call_frame_t *frame, fop_mknod_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_MKNOD);
@@ -312,8 +288,6 @@ fop_mknod_cbk_stub(call_frame_t *frame, fop_mknod_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_MKNOD);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -330,7 +304,6 @@ fop_mkdir_stub(call_frame_t *frame, fop_mkdir_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_MKDIR);
@@ -350,8 +323,6 @@ fop_mkdir_cbk_stub(call_frame_t *frame, fop_mkdir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_MKDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -368,7 +339,6 @@ fop_unlink_stub(call_frame_t *frame, fop_unlink_t fn, loc_t *loc, int xflag,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_UNLINK);
@@ -388,8 +358,6 @@ fop_unlink_cbk_stub(call_frame_t *frame, fop_unlink_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_UNLINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -406,7 +374,6 @@ fop_rmdir_stub(call_frame_t *frame, fop_rmdir_t fn, loc_t *loc, int flags,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_RMDIR);
@@ -426,8 +393,6 @@ fop_rmdir_cbk_stub(call_frame_t *frame, fop_rmdir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_RMDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -444,7 +409,6 @@ fop_symlink_stub(call_frame_t *frame, fop_symlink_t fn, const char *linkname,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
GF_VALIDATE_OR_GOTO("call-stub", linkname, out);
@@ -465,8 +429,6 @@ fop_symlink_cbk_stub(call_frame_t *frame, fop_symlink_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SYMLINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -483,7 +445,6 @@ fop_rename_stub(call_frame_t *frame, fop_rename_t fn, loc_t *oldloc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", oldloc, out);
GF_VALIDATE_OR_GOTO("call-stub", newloc, out);
@@ -505,8 +466,6 @@ fop_rename_cbk_stub(call_frame_t *frame, fop_rename_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_RENAME);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -523,7 +482,6 @@ fop_link_stub(call_frame_t *frame, fop_link_t fn, loc_t *oldloc, loc_t *newloc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", oldloc, out);
GF_VALIDATE_OR_GOTO("call-stub", newloc, out);
@@ -544,8 +502,6 @@ fop_link_cbk_stub(call_frame_t *frame, fop_link_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -562,7 +518,6 @@ fop_create_stub(call_frame_t *frame, fop_create_t fn, loc_t *loc, int32_t flags,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_CREATE);
@@ -582,8 +537,6 @@ fop_create_cbk_stub(call_frame_t *frame, fop_create_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_CREATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -600,7 +553,6 @@ fop_open_stub(call_frame_t *frame, fop_open_t fn, loc_t *loc, int32_t flags,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_OPEN);
@@ -618,8 +570,6 @@ fop_open_cbk_stub(call_frame_t *frame, fop_open_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_OPEN);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -635,8 +585,6 @@ fop_readv_stub(call_frame_t *frame, fop_readv_t fn, fd_t *fd, size_t size,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_READ);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -653,8 +601,6 @@ fop_readv_cbk_stub(call_frame_t *frame, fop_readv_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READ);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -672,7 +618,6 @@ fop_writev_stub(call_frame_t *frame, fop_writev_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", vector, out);
stub = stub_new(frame, 1, GF_FOP_WRITE);
@@ -692,8 +637,6 @@ fop_writev_cbk_stub(call_frame_t *frame, fop_writev_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_WRITE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -709,8 +652,6 @@ fop_flush_stub(call_frame_t *frame, fop_flush_t fn, fd_t *fd, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FLUSH);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -726,8 +667,6 @@ fop_flush_cbk_stub(call_frame_t *frame, fop_flush_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FLUSH);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -743,8 +682,6 @@ fop_fsync_stub(call_frame_t *frame, fop_fsync_t fn, fd_t *fd, int32_t datasync,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FSYNC);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -761,8 +698,6 @@ fop_fsync_cbk_stub(call_frame_t *frame, fop_fsync_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSYNC);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -779,7 +714,6 @@ fop_opendir_stub(call_frame_t *frame, fop_opendir_t fn, loc_t *loc, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_OPENDIR);
@@ -797,8 +731,6 @@ fop_opendir_cbk_stub(call_frame_t *frame, fop_opendir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_OPENDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -814,8 +746,6 @@ fop_fsyncdir_stub(call_frame_t *frame, fop_fsyncdir_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FSYNCDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -831,8 +761,6 @@ fop_fsyncdir_cbk_stub(call_frame_t *frame, fop_fsyncdir_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSYNCDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -847,7 +775,6 @@ fop_statfs_stub(call_frame_t *frame, fop_statfs_t fn, loc_t *loc, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_STATFS);
@@ -865,8 +792,6 @@ fop_statfs_cbk_stub(call_frame_t *frame, fop_statfs_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_STATFS);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -882,7 +807,6 @@ fop_setxattr_stub(call_frame_t *frame, fop_setxattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_SETXATTR);
@@ -900,8 +824,6 @@ fop_setxattr_cbk_stub(call_frame_t *frame, fop_setxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -917,7 +839,6 @@ fop_getxattr_stub(call_frame_t *frame, fop_getxattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
stub = stub_new(frame, 1, GF_FOP_GETXATTR);
@@ -936,8 +857,6 @@ fop_getxattr_cbk_stub(call_frame_t *frame, fop_getxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_GETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -953,7 +872,6 @@ fop_fsetxattr_stub(call_frame_t *frame, fop_fsetxattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
stub = stub_new(frame, 1, GF_FOP_FSETXATTR);
@@ -971,8 +889,6 @@ fop_fsetxattr_cbk_stub(call_frame_t *frame, fop_fsetxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -988,7 +904,6 @@ fop_fgetxattr_stub(call_frame_t *frame, fop_fgetxattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
stub = stub_new(frame, 1, GF_FOP_FGETXATTR);
@@ -1007,8 +922,6 @@ fop_fgetxattr_cbk_stub(call_frame_t *frame, fop_fgetxattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_GETXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1024,7 +937,6 @@ fop_removexattr_stub(call_frame_t *frame, fop_removexattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", loc, out);
GF_VALIDATE_OR_GOTO("call-stub", name, out);
@@ -1043,8 +955,6 @@ fop_removexattr_cbk_stub(call_frame_t *frame, fop_removexattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_REMOVEXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1060,7 +970,6 @@ fop_fremovexattr_stub(call_frame_t *frame, fop_fremovexattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
GF_VALIDATE_OR_GOTO("call-stub", name, out);
@@ -1079,8 +988,6 @@ fop_fremovexattr_cbk_stub(call_frame_t *frame, fop_fremovexattr_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FREMOVEXATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1096,7 +1003,6 @@ fop_lk_stub(call_frame_t *frame, fop_lk_t fn, fd_t *fd, int32_t cmd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", lock, out);
stub = stub_new(frame, 1, GF_FOP_LK);
@@ -1114,8 +1020,6 @@ fop_lk_cbk_stub(call_frame_t *frame, fop_lk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1131,7 +1035,6 @@ fop_inodelk_stub(call_frame_t *frame, fop_inodelk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", lock, out);
stub = stub_new(frame, 1, GF_FOP_INODELK);
@@ -1149,8 +1052,6 @@ fop_inodelk_cbk_stub(call_frame_t *frame, fop_inodelk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_INODELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1166,7 +1067,6 @@ fop_finodelk_stub(call_frame_t *frame, fop_finodelk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", lock, out);
stub = stub_new(frame, 1, GF_FOP_FINODELK);
@@ -1185,8 +1085,6 @@ fop_finodelk_cbk_stub(call_frame_t *frame, fop_inodelk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FINODELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1203,8 +1101,6 @@ fop_entrylk_stub(call_frame_t *frame, fop_entrylk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_ENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1221,8 +1117,6 @@ fop_entrylk_cbk_stub(call_frame_t *frame, fop_entrylk_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1239,8 +1133,6 @@ fop_fentrylk_stub(call_frame_t *frame, fop_fentrylk_t fn, const char *volume,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 1, GF_FOP_FENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1256,8 +1148,6 @@ fop_fentrylk_cbk_stub(call_frame_t *frame, fop_fentrylk_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FENTRYLK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1274,8 +1164,6 @@ fop_readdirp_cbk_stub(call_frame_t *frame, fop_readdirp_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READDIRP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1291,8 +1179,6 @@ fop_readdir_cbk_stub(call_frame_t *frame, fop_readdir_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_READDIR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1338,7 +1224,6 @@ fop_rchecksum_stub(call_frame_t *frame, fop_rchecksum_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fd, out);
stub = stub_new(frame, 1, GF_FOP_RCHECKSUM);
@@ -1357,8 +1242,6 @@ fop_rchecksum_cbk_stub(call_frame_t *frame, fop_rchecksum_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_RCHECKSUM);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1375,8 +1258,6 @@ fop_xattrop_cbk_stub(call_frame_t *frame, fop_xattrop_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_XATTROP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1392,7 +1273,6 @@ fop_fxattrop_cbk_stub(call_frame_t *frame, fop_fxattrop_cbk_t fn,
dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
stub = stub_new(frame, 0, GF_FOP_FXATTROP);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1409,7 +1289,6 @@ fop_xattrop_stub(call_frame_t *frame, fop_xattrop_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", xattr, out);
stub = stub_new(frame, 1, GF_FOP_XATTROP);
@@ -1427,7 +1306,6 @@ fop_fxattrop_stub(call_frame_t *frame, fop_fxattrop_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", xattr, out);
stub = stub_new(frame, 1, GF_FOP_FXATTROP);
@@ -1446,8 +1324,6 @@ fop_setattr_cbk_stub(call_frame_t *frame, fop_setattr_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SETATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1465,8 +1341,6 @@ fop_fsetattr_cbk_stub(call_frame_t *frame, fop_setattr_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FSETATTR);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1483,7 +1357,6 @@ fop_setattr_stub(call_frame_t *frame, fop_setattr_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_SETATTR);
@@ -1501,7 +1374,6 @@ fop_fsetattr_stub(call_frame_t *frame, fop_fsetattr_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_FSETATTR);
@@ -1520,8 +1392,6 @@ fop_fallocate_cbk_stub(call_frame_t *frame, fop_fallocate_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_FALLOCATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1539,7 +1409,6 @@ fop_fallocate_stub(call_frame_t *frame, fop_fallocate_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_FALLOCATE);
@@ -1558,8 +1427,6 @@ fop_discard_cbk_stub(call_frame_t *frame, fop_discard_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_DISCARD);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1577,7 +1444,6 @@ fop_discard_stub(call_frame_t *frame, fop_discard_t fn, fd_t *fd, off_t offset,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_DISCARD);
@@ -1596,8 +1462,6 @@ fop_zerofill_cbk_stub(call_frame_t *frame, fop_zerofill_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ZEROFILL);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1615,7 +1479,6 @@ fop_zerofill_stub(call_frame_t *frame, fop_zerofill_t fn, fd_t *fd,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_ZEROFILL);
@@ -1633,8 +1496,6 @@ fop_ipc_cbk_stub(call_frame_t *frame, fop_ipc_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_IPC);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1650,7 +1511,6 @@ fop_ipc_stub(call_frame_t *frame, fop_ipc_t fn, int32_t op, dict_t *xdata)
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_IPC);
@@ -1668,8 +1528,6 @@ fop_lease_cbk_stub(call_frame_t *frame, fop_lease_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_LEASE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1685,7 +1543,6 @@ fop_lease_stub(call_frame_t *frame, fop_lease_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
GF_VALIDATE_OR_GOTO("call-stub", lease, out);
@@ -1704,8 +1561,6 @@ fop_seek_cbk_stub(call_frame_t *frame, fop_seek_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SEEK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1722,7 +1577,6 @@ fop_seek_stub(call_frame_t *frame, fop_seek_t fn, fd_t *fd, off_t offset,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_SEEK);
@@ -1741,8 +1595,6 @@ fop_getactivelk_cbk_stub(call_frame_t *frame, fop_getactivelk_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_GETACTIVELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1759,7 +1611,6 @@ fop_getactivelk_stub(call_frame_t *frame, fop_getactivelk_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_GETACTIVELK);
@@ -1781,8 +1632,6 @@ fop_setactivelk_cbk_stub(call_frame_t *frame, fop_setactivelk_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_SETACTIVELK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1803,7 +1652,6 @@ fop_setactivelk_stub(call_frame_t *frame, fop_setactivelk_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_SETACTIVELK);
@@ -1825,7 +1673,6 @@ fop_copy_file_range_stub(call_frame_t *frame, fop_copy_file_range_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_COPY_FILE_RANGE);
@@ -1848,7 +1695,6 @@ fop_copy_file_range_cbk_stub(call_frame_t *frame, fop_copy_file_range_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 0, GF_FOP_COPY_FILE_RANGE);
@@ -1869,7 +1715,6 @@ fop_put_stub(call_frame_t *frame, fop_put_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", vector, out);
stub = stub_new(frame, 1, GF_FOP_PUT);
@@ -1889,8 +1734,6 @@ fop_put_cbk_stub(call_frame_t *frame, fop_put_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_PUT);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1907,7 +1750,6 @@ fop_icreate_stub(call_frame_t *frame, fop_icreate_t fn, loc_t *loc, mode_t mode,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_ICREATE);
@@ -1947,8 +1789,6 @@ fop_icreate_cbk_stub(call_frame_t *frame, fop_icreate_cbk_t fn, int32_t op_ret,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_ICREATE);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
@@ -1966,7 +1806,6 @@ fop_namelink_stub(call_frame_t *frame, fop_namelink_t fn, loc_t *loc,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
GF_VALIDATE_OR_GOTO("call-stub", fn, out);
stub = stub_new(frame, 1, GF_FOP_NAMELINK);
@@ -2006,8 +1845,6 @@ fop_namelink_cbk_stub(call_frame_t *frame, fop_namelink_cbk_t fn,
{
call_stub_t *stub = NULL;
- GF_VALIDATE_OR_GOTO("call-stub", frame, out);
-
stub = stub_new(frame, 0, GF_FOP_NAMELINK);
GF_VALIDATE_OR_GOTO("call-stub", stub, out);
diff --git a/libglusterfs/src/client_t.c b/libglusterfs/src/client_t.c
index e875c8b6b69..9d377c3c2e1 100644
--- a/libglusterfs/src/client_t.c
+++ b/libglusterfs/src/client_t.c
@@ -13,7 +13,6 @@
#include "glusterfs/statedump.h"
#include "glusterfs/client_t.h"
#include "glusterfs/list.h"
-#include "rpcsvc.h"
#include "glusterfs/libglusterfs-messages.h"
static int
@@ -110,50 +109,13 @@ gf_clienttable_alloc(void)
return clienttable;
}
-void
-gf_client_clienttable_destroy(clienttable_t *clienttable)
-{
- client_t *client = NULL;
- cliententry_t *cliententries = NULL;
- uint32_t client_count = 0;
- int32_t i = 0;
-
- if (!clienttable) {
- gf_msg_callingfn("client_t", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "!clienttable");
- return;
- }
-
- LOCK(&clienttable->lock);
- {
- client_count = clienttable->max_clients;
- clienttable->max_clients = 0;
- cliententries = clienttable->cliententries;
- clienttable->cliententries = NULL;
- }
- UNLOCK(&clienttable->lock);
-
- if (cliententries != NULL) {
- for (i = 0; i < client_count; i++) {
- client = cliententries[i].client;
- if (client != NULL) {
- gf_client_unref(client);
- }
- }
-
- GF_FREE(cliententries);
- LOCK_DESTROY(&clienttable->lock);
- GF_FREE(clienttable);
- }
-}
-
/*
* Increments ref.bind if the client is already present or creates a new
* client with ref.bind = 1,ref.count = 1 it signifies that
* as long as ref.bind is > 0 client should be alive.
*/
client_t *
-gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
+gf_client_get(xlator_t *this, client_auth_data_t *cred, char *client_uid,
char *subdir_mount)
{
client_t *client = NULL;
@@ -181,11 +143,10 @@ gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
* if auth was used, matching auth flavour and data
*/
if (strcmp(client_uid, client->client_uid) == 0 &&
- (cred->flavour != AUTH_NONE &&
- (cred->flavour == client->auth.flavour &&
- (size_t)cred->datalen == client->auth.len &&
- memcmp(cred->authdata, client->auth.data, client->auth.len) ==
- 0))) {
+ (cred->flavour && (cred->flavour == client->auth.flavour &&
+ (size_t)cred->datalen == client->auth.len &&
+ memcmp(cred->authdata, client->auth.data,
+ client->auth.len) == 0))) {
GF_ATOMIC_INC(client->bind);
goto unlock;
}
@@ -227,7 +188,7 @@ gf_client_get(xlator_t *this, struct rpcsvc_auth_data *cred, char *client_uid,
GF_ATOMIC_INIT(client->fd_cnt, 0);
client->auth.flavour = cred->flavour;
- if (cred->flavour != AUTH_NONE) {
+ if (cred->flavour) {
client->auth.data = GF_MALLOC(cred->datalen, gf_common_mt_client_t);
if (client->auth.data == NULL) {
GF_FREE(client->scratch_ctx.ctx);
@@ -353,8 +314,6 @@ client_destroy(client_t *client)
clienttable = client->this->ctx->clienttable;
- LOCK_DESTROY(&client->scratch_ctx.lock);
-
LOCK(&clienttable->lock);
{
clienttable->cliententries[client->tbl_index].client = NULL;
@@ -372,6 +331,8 @@ client_destroy(client_t *client)
if (client->subdir_inode)
inode_unref(client->subdir_inode);
+ LOCK_DESTROY(&client->scratch_ctx.lock);
+
GF_FREE(client->auth.data);
GF_FREE(client->auth.username);
GF_FREE(client->auth.passwd);
@@ -581,62 +542,6 @@ client_ctx_del(client_t *client, void *key, void **value)
}
void
-client_dump(client_t *client, char *prefix)
-{
- if (!client)
- return;
-
- gf_proc_dump_write("refcount", "%" GF_PRI_ATOMIC,
- GF_ATOMIC_GET(client->count));
-}
-
-void
-cliententry_dump(cliententry_t *cliententry, char *prefix)
-{
- if (!cliententry)
- return;
-
- if (GF_CLIENTENTRY_ALLOCATED != cliententry->next_free)
- return;
-
- if (cliententry->client)
- client_dump(cliententry->client, prefix);
-}
-
-void
-clienttable_dump(clienttable_t *clienttable, char *prefix)
-{
- int i = 0;
- int ret = -1;
- char key[GF_DUMP_MAX_BUF_LEN] = {0};
-
- if (!clienttable)
- return;
-
- ret = TRY_LOCK(&clienttable->lock);
- {
- if (ret) {
- gf_msg("client_t", GF_LOG_WARNING, 0, LG_MSG_LOCK_FAILED,
- "Unable to acquire lock");
- return;
- }
- gf_proc_dump_build_key(key, prefix, "maxclients");
- gf_proc_dump_write(key, "%d", clienttable->max_clients);
- gf_proc_dump_build_key(key, prefix, "first_free");
- gf_proc_dump_write(key, "%d", clienttable->first_free);
- for (i = 0; i < clienttable->max_clients; i++) {
- if (GF_CLIENTENTRY_ALLOCATED ==
- clienttable->cliententries[i].next_free) {
- gf_proc_dump_build_key(key, prefix, "cliententry[%d]", i);
- gf_proc_dump_add_section("%s", key);
- cliententry_dump(&clienttable->cliententries[i], key);
- }
- }
- }
- UNLOCK(&clienttable->lock);
-}
-
-void
client_ctx_dump(client_t *client, char *prefix)
{
#if 0 /* TBD, FIXME */
diff --git a/libglusterfs/src/common-utils.c b/libglusterfs/src/common-utils.c
index 8e2ffa3accd..682cbf28055 100644
--- a/libglusterfs/src/common-utils.c
+++ b/libglusterfs/src/common-utils.c
@@ -37,6 +37,9 @@
#ifndef GF_LINUX_HOST_OS
#include <sys/resource.h>
#endif
+#ifdef HAVE_SYNCFS_SYS
+#include <sys/syscall.h>
+#endif
#include "glusterfs/compat-errno.h"
#include "glusterfs/common-utils.h"
@@ -45,13 +48,12 @@
#include "glusterfs/stack.h"
#include "glusterfs/lkowner.h"
#include "glusterfs/syscall.h"
-#include "cli1-xdr.h"
#include "glusterfs/globals.h"
#define XXH_INLINE_ALL
#include "xxhash.h"
#include <ifaddrs.h>
#include "glusterfs/libglusterfs-messages.h"
-#include "protocol-common.h"
+#include "glusterfs/glusterfs-acl.h"
#ifdef __FreeBSD__
#include <pthread_np.h>
#undef BIT_SET
@@ -77,6 +79,15 @@ char *vol_type_str[] = {
typedef int32_t (*rw_op_t)(int32_t fd, char *buf, int32_t size);
typedef int32_t (*rwv_op_t)(int32_t fd, const struct iovec *buf, int32_t size);
+char *xattrs_to_heal[] = {"user.",
+ POSIX_ACL_ACCESS_XATTR,
+ POSIX_ACL_DEFAULT_XATTR,
+ QUOTA_LIMIT_KEY,
+ QUOTA_LIMIT_OBJECTS_KEY,
+ GF_SELINUX_XATTR_KEY,
+ GF_XATTR_MDATA_KEY,
+ NULL};
+
void
gf_xxh64_wrapper(const unsigned char *data, size_t const len,
unsigned long long const seed, char *xxh64)
@@ -304,8 +315,7 @@ mkdir_p(char *path, mode_t mode, gf_boolean_t allow_symlinks)
dir[i] = '\0';
ret = sys_mkdir(dir, mode);
if (ret && errno != EEXIST) {
- gf_msg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
- "Failed due to reason");
+ gf_smsg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED, NULL);
goto out;
}
@@ -316,10 +326,8 @@ mkdir_p(char *path, mode_t mode, gf_boolean_t allow_symlinks)
if (S_ISLNK(stbuf.st_mode)) {
ret = -1;
- gf_msg("", GF_LOG_ERROR, 0, LG_MSG_DIR_IS_SYMLINK,
- "%s is a "
- "symlink",
- dir);
+ gf_smsg("", GF_LOG_ERROR, 0, LG_MSG_DIR_IS_SYMLINK, "dir=%s",
+ dir, NULL);
goto out;
}
}
@@ -332,10 +340,10 @@ mkdir_p(char *path, mode_t mode, gf_boolean_t allow_symlinks)
if (ret == 0)
errno = 0;
ret = -1;
- gf_msg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
- "Failed"
- " to create directory, possibly some of the components"
- " were not directories");
+ gf_smsg("", GF_LOG_ERROR, errno, LG_MSG_DIR_OP_FAILED,
+ "possibly some of the components"
+ " were not directories",
+ NULL);
goto out;
}
@@ -408,10 +416,8 @@ gf_rev_dns_lookup(const char *ip)
/* Get the FQDN */
ret = gf_get_hostname_from_ip((char *)ip, &fqdn);
if (ret != 0) {
- gf_msg("resolver", GF_LOG_INFO, errno, LG_MSG_RESOLVE_HOSTNAME_FAILED,
- "could not resolve "
- "hostname for %s",
- ip);
+ gf_smsg("resolver", GF_LOG_INFO, errno, LG_MSG_RESOLVE_HOSTNAME_FAILED,
+ "hostname=%s", ip, NULL);
}
out:
return fqdn;
@@ -432,7 +438,7 @@ gf_resolve_path_parent(const char *path)
GF_VALIDATE_OR_GOTO(THIS->name, path, out);
- if (strlen(path) <= 0) {
+ if (0 == strlen(path)) {
gf_msg_callingfn(THIS->name, GF_LOG_DEBUG, 0, LG_MSG_INVALID_STRING,
"invalid string for 'path'");
goto out;
@@ -500,9 +506,8 @@ gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
}
if ((ret = getaddrinfo(hostname, port_str, &hints, &cache->first)) !=
0) {
- gf_msg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
- "getaddrinfo failed (family:%d) (%s)", family,
- gai_strerror(ret));
+ gf_smsg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
+ "family=%d", family, "ret=%s", gai_strerror(ret), NULL);
GF_FREE(*dnscache);
*dnscache = NULL;
@@ -519,10 +524,8 @@ gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
cache->next->ai_addrlen, host, sizeof(host), service,
sizeof(service), NI_NUMERICHOST);
if (ret != 0) {
- gf_msg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "getnameinfo failed"
- " (%s)",
- gai_strerror(ret));
+ gf_smsg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto err;
}
@@ -541,10 +544,8 @@ gf_resolve_ip6(const char *hostname, uint16_t port, int family, void **dnscache,
cache->next->ai_addrlen, host, sizeof(host), service,
sizeof(service), NI_NUMERICHOST);
if (ret != 0) {
- gf_msg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "getnameinfo failed"
- " (%s)",
- gai_strerror(ret));
+ gf_smsg("resolver", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto err;
}
@@ -576,8 +577,14 @@ struct dnscache *
gf_dnscache_init(time_t ttl)
{
struct dnscache *cache = GF_MALLOC(sizeof(*cache), gf_common_mt_dnscache);
- if (cache) {
- cache->cache_dict = NULL;
+ if (!cache)
+ return NULL;
+
+ cache->cache_dict = dict_new();
+ if (!cache->cache_dict) {
+ GF_FREE(cache);
+ cache = NULL;
+ } else {
cache->ttl = ttl;
}
@@ -585,6 +592,20 @@ gf_dnscache_init(time_t ttl)
}
/**
+ * gf_dnscache_deinit -- cleanup resources used by struct dnscache
+ */
+void
+gf_dnscache_deinit(struct dnscache *cache)
+{
+ if (!cache) {
+ gf_msg_plain(GF_LOG_WARNING, "dnscache is NULL");
+ return;
+ }
+ dict_unref(cache->cache_dict);
+ GF_FREE(cache);
+}
+
+/**
* gf_dnscache_entry_init -- Initialize a dnscache entry
*
* @return: SUCCESS: Pointer to an allocated dnscache entry struct
@@ -632,12 +653,6 @@ gf_rev_dns_lookup_cached(const char *ip, struct dnscache *dnscache)
if (!dnscache)
goto out;
- if (!dnscache->cache_dict) {
- dnscache->cache_dict = dict_new();
- if (!dnscache->cache_dict) {
- goto out;
- }
- }
cache = dnscache->cache_dict;
/* Quick cache lookup to see if we already hold it */
@@ -645,7 +660,7 @@ gf_rev_dns_lookup_cached(const char *ip, struct dnscache *dnscache)
if (entrydata) {
dnsentry = (struct dnscache_entry *)entrydata->data;
/* First check the TTL & timestamp */
- if (time(NULL) - dnsentry->timestamp > dnscache->ttl) {
+ if (gf_time() - dnsentry->timestamp > dnscache->ttl) {
gf_dnscache_entry_deinit(dnsentry);
entrydata->data = NULL; /* Mark this as 'null' so
* dict_del () doesn't try free
@@ -676,23 +691,16 @@ gf_rev_dns_lookup_cached(const char *ip, struct dnscache *dnscache)
from_cache = _gf_false;
out:
/* Insert into the cache */
- if (fqdn && !from_cache) {
+ if (fqdn && !from_cache && ip) {
struct dnscache_entry *entry = gf_dnscache_entry_init();
- if (!entry) {
- goto out;
+ if (entry) {
+ entry->fqdn = fqdn;
+ entry->ip = gf_strdup(ip);
+ entry->timestamp = gf_time();
+ entrydata = bin_to_data(entry, sizeof(*entry));
+ dict_set(cache, (char *)ip, entrydata);
}
- entry->fqdn = fqdn;
- if (!ip) {
- gf_dnscache_entry_deinit(entry);
- goto out;
- }
-
- entry->ip = gf_strdup(ip);
- entry->timestamp = time(NULL);
-
- entrydata = bin_to_data(entry, sizeof(*entry));
- dict_set(cache, (char *)ip, entrydata);
}
return fqdn;
}
@@ -901,7 +909,7 @@ gf_print_trace(int32_t signum, glusterfs_ctx_t *ctx)
char msg[1024] = {
0,
};
- char timestr[64] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
call_stack_t *stack = NULL;
@@ -941,7 +949,7 @@ gf_print_trace(int32_t signum, glusterfs_ctx_t *ctx)
{
/* Dump the timestamp of the crash too, so the previous logs
can be related */
- gf_time_fmt(timestr, sizeof timestr, time(NULL), gf_timefmt_FT);
+ gf_time_fmt(timestr, sizeof timestr, gf_time(), gf_timefmt_FT);
gf_msg_plain_nomem(GF_LOG_ALERT, "time of crash: ");
gf_msg_plain_nomem(GF_LOG_ALERT, timestr);
}
@@ -1943,6 +1951,74 @@ gf_string2boolean(const char *str, gf_boolean_t *b)
}
int
+gf_strn2boolean(const char *str, const int len, gf_boolean_t *b)
+{
+ if (str == NULL) {
+ gf_msg_callingfn(THIS->name, GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+ "argument invalid");
+ return -1;
+ }
+
+ switch (len) {
+ case 1:
+ if (strcasecmp(str, "1") == 0) {
+ *b = _gf_true;
+ return 0;
+ } else if (strcasecmp(str, "0") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 2:
+ if (strcasecmp(str, "on") == 0) {
+ *b = _gf_true;
+ return 0;
+ } else if (strcasecmp(str, "no") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 3:
+ if (strcasecmp(str, "yes") == 0) {
+ *b = _gf_true;
+ return 0;
+ } else if (strcasecmp(str, "off") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 4:
+ if (strcasecmp(str, "true") == 0) {
+ *b = _gf_true;
+ return 0;
+ }
+ break;
+ case 5:
+ if (strcasecmp(str, "false") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ case 6:
+ if (strcasecmp(str, "enable") == 0) {
+ *b = _gf_true;
+ return 0;
+ }
+ break;
+ case 7:
+ if (strcasecmp(str, "disable") == 0) {
+ *b = _gf_false;
+ return 0;
+ }
+ break;
+ default:
+ return -1;
+ break;
+ }
+ return -1;
+}
+
+int
gf_lockfd(int fd)
{
struct gf_flock fl;
@@ -2037,8 +2113,8 @@ get_checksum_for_path(char *path, uint32_t *checksum, int op_version)
fd = open(path, O_RDWR);
if (fd == -1) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_PATH_ERROR,
- "Unable to open %s", path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_PATH_OPEN_FAILED,
+ "path=%s", path, NULL);
goto out;
}
@@ -2071,8 +2147,8 @@ get_file_mtime(const char *path, time_t *stamp)
ret = sys_stat(path, &f_stat);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
- "failed to stat %s", path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
+ "path=%s", path, NULL);
goto out;
}
@@ -2131,14 +2207,14 @@ gf_is_ip_in_net(const char *network, const char *ip_str)
/* Convert IP address to a long */
ret = inet_pton(family, ip_str, &ip_buf);
if (ret < 0)
- gf_msg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
- "inet_pton() failed");
+ gf_smsg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
+ NULL);
/* Convert network IP address to a long */
ret = inet_pton(family, net_ip, &net_ip_buf);
if (ret < 0) {
- gf_msg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
- "inet_pton() failed");
+ gf_smsg("common-utils", GF_LOG_ERROR, errno, LG_MSG_INET_PTON_FAILED,
+ NULL);
goto out;
}
@@ -2718,8 +2794,8 @@ gf_boolean_t
gf_sock_union_equal_addr(union gf_sock_union *a, union gf_sock_union *b)
{
if (!a || !b) {
- gf_msg("common-utils", GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Invalid arguments to gf_sock_union_equal_addr");
+ gf_smsg("common-utils", GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
+ "gf_sock_union_equal_addr", NULL);
return _gf_false;
}
@@ -2947,8 +3023,8 @@ gf_roundup_power_of_two(int32_t nr)
int32_t result = 1;
if (nr < 0) {
- gf_msg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
- "negative number passed");
+ gf_smsg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
+ NULL);
result = -1;
goto out;
}
@@ -2971,8 +3047,8 @@ gf_roundup_next_power_of_two(int32_t nr)
int32_t result = 1;
if (nr < 0) {
- gf_msg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
- "negative number passed");
+ gf_smsg("common-utils", GF_LOG_WARNING, 0, LG_MSG_NEGATIVE_NUM_PASSED,
+ NULL);
result = -1;
goto out;
}
@@ -2985,16 +3061,6 @@ out:
}
int
-get_vol_type(int type, int dist_count, int brick_count)
-{
- if ((type != GF_CLUSTER_TYPE_TIER) && (type > 0) &&
- (dist_count < brick_count))
- type = type + GF_CLUSTER_TYPE_MAX - 1;
-
- return type;
-}
-
-int
validate_brick_name(char *brick)
{
char *delimiter = NULL;
@@ -3067,7 +3133,7 @@ get_mem_size()
memsize = page_size * num_pages;
#endif
-#if defined GF_DARWIN_HOST_OS
+#if defined GF_DARWIN_HOST_OS || defined __FreeBSD__
size_t len = sizeof(memsize);
int name[] = {CTL_HW, HW_PHYSMEM};
@@ -3156,8 +3222,7 @@ gf_canonicalize_path(char *path)
out:
if (ret)
- gf_msg("common-utils", GF_LOG_ERROR, 0, LG_MSG_PATH_ERROR,
- "Path manipulation failed");
+ gf_smsg("common-utils", GF_LOG_ERROR, 0, LG_MSG_PATH_ERROR, NULL);
GF_FREE(tmppath);
@@ -3211,19 +3276,15 @@ gf_get_reserved_ports()
* continue with older method of using any of the available
* port? For now 2nd option is considered.
*/
- gf_msg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
- "could not open the file "
- "/proc/sys/net/ipv4/ip_local_reserved_ports for "
- "getting reserved ports info");
+ gf_smsg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
+ " /proc/sys/net/ipv4/ip_local_reserved_ports", NULL);
goto out;
}
ret = sys_read(proc_fd, buffer, sizeof(buffer) - 1);
if (ret < 0) {
- gf_msg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
- "could not read the file %s for"
- " getting reserved ports info",
- proc_file);
+ gf_smsg("glusterfs", GF_LOG_WARNING, errno, LG_MSG_FILE_OP_FAILED,
+ "file=%s", proc_file, NULL);
goto out;
}
@@ -3251,10 +3312,8 @@ gf_process_reserved_ports(unsigned char *ports, uint32_t ceiling)
ports_info = gf_get_reserved_ports();
if (!ports_info) {
- gf_msg("glusterfs", GF_LOG_WARNING, 0, LG_MSG_RESERVED_PORTS_ERROR,
- "Not able to get reserved"
- " ports, hence there is a possibility that glusterfs "
- "may consume reserved port");
+ gf_smsg("glusterfs", GF_LOG_WARNING, 0, LG_MSG_RESERVED_PORTS_ERROR,
+ NULL);
goto out;
}
@@ -3291,8 +3350,8 @@ gf_ports_reserved(char *blocked_port, unsigned char *ports, uint32_t ceiling)
blocked_port[strlen(blocked_port) - 1] = '\0';
if (gf_string2int32(blocked_port, &tmp_port1) == 0) {
if (tmp_port1 > GF_PORT_MAX || tmp_port1 < 0) {
- gf_msg("glusterfs-socket", GF_LOG_WARNING, 0,
- LG_MSG_INVALID_PORT, "invalid port %d", tmp_port1);
+ gf_smsg("glusterfs-socket", GF_LOG_WARNING, 0,
+ LG_MSG_INVALID_PORT, "port=%d", tmp_port1, NULL);
result = _gf_true;
goto out;
} else {
@@ -3303,10 +3362,8 @@ gf_ports_reserved(char *blocked_port, unsigned char *ports, uint32_t ceiling)
BIT_SET(ports, tmp_port1);
}
} else {
- gf_msg("glusterfs-socket", GF_LOG_WARNING, 0, LG_MSG_INVALID_PORT,
- "%s is not a valid port "
- "identifier",
- blocked_port);
+ gf_smsg("glusterfs-socket", GF_LOG_WARNING, 0, LG_MSG_INVALID_PORT,
+ "port=%s", blocked_port, NULL);
result = _gf_true;
goto out;
}
@@ -3408,10 +3465,8 @@ gf_get_hostname_from_ip(char *client_ip, char **hostname)
ret = getnameinfo(client_sockaddr, addr_sz, client_hostname,
sizeof(client_hostname), NULL, 0, 0);
if (ret) {
- gf_msg("common-utils", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "Could not lookup hostname "
- "of %s : %s",
- client_ip, gai_strerror(ret));
+ gf_smsg("common-utils", GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ip=%s", client_ip, "ret=%s", gai_strerror(ret), NULL);
ret = -1;
goto out;
}
@@ -3440,8 +3495,8 @@ gf_interface_search(char *ip)
ret = getifaddrs(&ifaddr);
if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETIFADDRS_FAILED,
- "getifaddrs() failed: %s\n", gai_strerror(ret));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETIFADDRS_FAILED, "ret=%s",
+ gai_strerror(ret), NULL);
goto out;
}
@@ -3465,10 +3520,8 @@ gf_interface_search(char *ip)
host, NI_MAXHOST, NULL, 0, NI_NUMERICHOST);
if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
- "getnameinfo() "
- "failed: %s\n",
- gai_strerror(ret));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETNAMEINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto out;
}
@@ -3518,14 +3571,12 @@ get_ip_from_addrinfo(struct addrinfo *addr, char **ip)
break;
default:
- gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY,
- "Invalid family");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY, NULL);
return NULL;
}
if (!inet_ntop(addr->ai_family, in_addr, buf, sizeof(buf))) {
- gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_CONVERSION_FAILED,
- "String conversion failed");
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, LG_MSG_CONVERSION_FAILED, NULL);
return NULL;
}
@@ -3560,10 +3611,9 @@ gf_is_loopback_localhost(const struct sockaddr *sa, char *hostname)
default:
if (hostname)
- gf_msg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY,
- "unknown "
- "address family %d for %s",
- sa->sa_family, hostname);
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, LG_MSG_INVALID_FAMILY,
+ "family=%d", sa->sa_family, "hostname=%s", hostname,
+ NULL);
break;
}
@@ -3593,8 +3643,8 @@ gf_is_local_addr(char *hostname)
ret = getaddrinfo(hostname, NULL, &hints, &result);
if (ret != 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
- "error in getaddrinfo: %s\n", gai_strerror(ret));
+ gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_GETADDRINFO_FAILED,
+ "ret=%s", gai_strerror(ret), NULL);
goto out;
}
@@ -3642,15 +3692,15 @@ gf_is_same_address(char *name1, char *name2)
gai_err = getaddrinfo(name1, NULL, &hints, &addr1);
if (gai_err != 0) {
- gf_msg(name1, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED,
- "error in getaddrinfo: %s\n", gai_strerror(gai_err));
+ gf_smsg(name1, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED, "error=%s",
+ gai_strerror(gai_err), NULL);
goto out;
}
gai_err = getaddrinfo(name2, NULL, &hints, &addr2);
if (gai_err != 0) {
- gf_msg(name2, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED,
- "error in getaddrinfo: %s\n", gai_strerror(gai_err));
+ gf_smsg(name2, GF_LOG_WARNING, 0, LG_MSG_GETADDRINFO_FAILED, "error=%s",
+ gai_strerror(gai_err), NULL);
goto out;
}
@@ -3787,8 +3837,10 @@ gf_set_volfile_server_common(cmd_args_t *cmd_args, const char *host,
if ((!strcmp(tmp->volfile_server, server->volfile_server) &&
!strcmp(tmp->transport, server->transport) &&
(tmp->port == server->port))) {
- errno = EEXIST;
- ret = -1;
+ /* Duplicate option given, log and ignore */
+ gf_smsg("gluster", GF_LOG_INFO, EEXIST, LG_MSG_DUPLICATE_ENTRY,
+ NULL);
+ ret = 0;
goto out;
}
}
@@ -3965,15 +4017,14 @@ gf_thread_set_vname(pthread_t thread, const char *name, va_list args)
sizeof(thread_name) - sizeof(GF_THREAD_NAME_PREFIX) + 1,
name, args);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PTHREAD_NAMING_FAILED,
- "Failed to compose thread name ('%s')", name);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PTHREAD_NAMING_FAILED,
+ "name=%s", name, NULL);
return;
}
if (ret >= sizeof(thread_name)) {
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_PTHREAD_NAMING_FAILED,
- "Thread name is too long. It has been truncated ('%s')",
- thread_name);
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_THREAD_NAME_TOO_LONG,
+ "name=%s", thread_name, NULL);
}
#ifdef GF_LINUX_HOST_OS
@@ -3987,8 +4038,8 @@ gf_thread_set_vname(pthread_t thread, const char *name, va_list args)
ret = ENOSYS;
#endif
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_WARNING, ret, LG_MSG_PTHREAD_NAMING_FAILED,
- "Could not set thread name: %s", thread_name);
+ gf_smsg(THIS->name, GF_LOG_WARNING, ret, LG_MSG_SET_THREAD_FAILED,
+ "name=%s", thread_name, NULL);
}
}
@@ -4023,8 +4074,8 @@ gf_thread_vcreate(pthread_t *thread, const pthread_attr_t *attr,
ret = pthread_create(thread, attr, start_routine, arg);
if (ret != 0) {
- gf_msg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_FAILED,
- "Thread creation failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_THREAD_CREATE_FAILED,
+ NULL);
ret = -1;
} else if (name != NULL) {
gf_thread_set_vname(*thread, name, args);
@@ -4060,8 +4111,8 @@ gf_thread_create_detached(pthread_t *thread, void *(*start_routine)(void *),
ret = pthread_attr_init(&attr);
if (ret) {
- gf_msg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_ATTR_INIT_FAILED,
- "Thread attribute initialization failed");
+ gf_smsg(THIS->name, GF_LOG_ERROR, ret, LG_MSG_PTHREAD_ATTR_INIT_FAILED,
+ NULL);
return -1;
}
@@ -4083,8 +4134,7 @@ gf_skip_header_section(int fd, int header_len)
ret = sys_lseek(fd, header_len, SEEK_SET);
if (ret == (off_t)-1) {
- gf_msg("", GF_LOG_ERROR, 0, LG_MSG_SKIP_HEADER_FAILED,
- "Failed to skip header section");
+ gf_smsg("", GF_LOG_ERROR, 0, LG_MSG_SKIP_HEADER_FAILED, NULL);
} else {
ret = 0;
}
@@ -4097,6 +4147,14 @@ gf_skip_header_section(int fd, int header_len)
gf_boolean_t
gf_is_pid_running(int pid)
{
+#ifdef __FreeBSD__
+ int ret = -1;
+
+ ret = sys_kill(pid, 0);
+ if (ret < 0) {
+ return _gf_false;
+ }
+#else
char fname[32] = {
0,
};
@@ -4110,6 +4168,7 @@ gf_is_pid_running(int pid)
}
sys_close(fd);
+#endif
return _gf_true;
}
@@ -4134,8 +4193,8 @@ gf_is_service_running(char *pidfile, int *pid)
ret = fscanf(file, "%d", pid);
if (ret <= 0) {
- gf_msg("", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED,
- "Unable to read pidfile: %s", pidfile);
+ gf_smsg("", GF_LOG_ERROR, errno, LG_MSG_FILE_OP_FAILED, "pidfile=%s",
+ pidfile, NULL);
*pid = -1;
running = _gf_false;
goto out;
@@ -4209,10 +4268,10 @@ gf_check_log_format(const char *value)
log_format = gf_logformat_withmsgid;
if (log_format == -1)
- gf_msg(
- THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
- "Invalid log-format. possible values are " GF_LOG_FORMAT_NO_MSG_ID
- "|" GF_LOG_FORMAT_WITH_MSG_ID);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
+ "possible_values=" GF_LOG_FORMAT_NO_MSG_ID
+ "|" GF_LOG_FORMAT_WITH_MSG_ID,
+ NULL);
return log_format;
}
@@ -4228,9 +4287,9 @@ gf_check_logger(const char *value)
logger = gf_logger_syslog;
if (logger == -1)
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
- "Invalid logger. possible values are " GF_LOGGER_GLUSTER_LOG
- "|" GF_LOGGER_SYSLOG);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_LOG,
+ "possible_values=" GF_LOGGER_GLUSTER_LOG "|" GF_LOGGER_SYSLOG,
+ NULL);
return logger;
}
@@ -4302,8 +4361,8 @@ gf_set_timestamp(const char *src, const char *dest)
ret = sys_stat(src, &sb);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
- "stat on %s", src);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, LG_MSG_FILE_STAT_FAILED,
+ "stat=%s", src, NULL);
goto out;
}
/* The granularity is nano seconds if `utimensat()` is available,
@@ -4319,8 +4378,8 @@ gf_set_timestamp(const char *src, const char *dest)
/* dirfd = 0 is ignored because `dest` is an absolute path. */
ret = sys_utimensat(AT_FDCWD, dest, new_time, AT_SYMLINK_NOFOLLOW);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMENSAT_FAILED,
- "utimensat on %s", dest);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMENSAT_FAILED,
+ "dest=%s", dest, NULL);
}
#else
new_time[0].tv_sec = sb.st_atime;
@@ -4331,8 +4390,8 @@ gf_set_timestamp(const char *src, const char *dest)
ret = sys_utimes(dest, new_time);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMES_FAILED,
- "utimes on %s", dest);
+ gf_smsg(this->name, GF_LOG_ERROR, errno, LG_MSG_UTIMES_FAILED,
+ "dest=%s", dest, NULL);
}
#endif
out:
@@ -4351,7 +4410,7 @@ gf_backtrace_end(char *buf, size_t frames)
frames = min(frames, GF_BACKTRACE_LEN - pos - 1);
- if (frames <= 0)
+ if (0 == frames)
return;
memset(buf + pos, ')', frames);
@@ -4397,8 +4456,8 @@ gf_backtrace_fillframes(char *buf)
*/
ret = sys_unlink(tmpl);
if (ret < 0) {
- gf_msg(THIS->name, GF_LOG_INFO, 0, LG_MSG_FILE_OP_FAILED,
- "Unable to delete temporary file: %s", tmpl);
+ gf_smsg(THIS->name, GF_LOG_INFO, 0, LG_MSG_FILE_DELETE_FAILED,
+ "temporary_file=%s", tmpl, NULL);
}
/*The most recent two frames are the calling function and
@@ -4458,8 +4517,7 @@ gf_backtrace_save(char *buf)
if ((0 == gf_backtrace_fillframes(bt)))
return bt;
- gf_msg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_BACKTRACE_SAVE_FAILED,
- "Failed to save the backtrace.");
+ gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_BACKTRACE_SAVE_FAILED, NULL);
return NULL;
}
@@ -4550,16 +4608,16 @@ gf_build_absolute_path(char *current_path, char *relative_path, char **path)
*/
currentpath_len = strlen(current_path);
if (current_path[0] != '/' || (currentpath_len > PATH_MAX)) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Wrong value for current path %s", current_path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_WRONG_VALUE,
+ "current-path=%s", current_path, NULL);
ret = -EINVAL;
goto err;
}
relativepath_len = strlen(relative_path);
if (relative_path[0] == '/' || (relativepath_len > PATH_MAX)) {
- gf_msg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_INVALID_ENTRY,
- "Wrong value for relative path %s", relative_path);
+ gf_smsg(THIS->name, GF_LOG_ERROR, 0, LG_MSG_WRONG_VALUE,
+ "relative-path=%s", relative_path, NULL);
ret = -EINVAL;
goto err;
}
@@ -4675,8 +4733,9 @@ recursive_rmdir(const char *delete_path)
goto out;
}
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
- while (entry) {
+ while ((entry = sys_readdir(dir, scratch))) {
+ if (gf_irrelevant_entry(entry))
+ continue;
snprintf(path, PATH_MAX, "%s/%s", delete_path, entry->d_name);
ret = sys_lstat(path, &st);
if (ret == -1) {
@@ -4702,8 +4761,6 @@ recursive_rmdir(const char *delete_path)
gf_msg_debug(this->name, 0, "%s %s",
ret ? "Failed to remove" : "Removed", entry->d_name);
-
- GF_SKIP_IRRELEVANT_ENTRIES(entry, dir, scratch);
}
ret = sys_closedir(dir);
@@ -5027,8 +5084,8 @@ gf_zero_fill_stat(struct iatt *buf)
gf_boolean_t
gf_is_valid_xattr_namespace(char *key)
{
- static char *xattr_namespaces[] = {"trusted.", "security.", "system.",
- "user.", NULL};
+ static char *xattr_namespaces[] = {"trusted.", "system.", "user.",
+ "security.", NULL};
int i = 0;
for (i = 0; xattr_namespaces[i]; i++) {
@@ -5251,62 +5308,6 @@ glusterfs_compute_sha256(const unsigned char *content, size_t size,
return 0;
}
-char *
-get_struct_variable(int mem_num, gf_gsync_status_t *sts_val)
-{
- switch (mem_num) {
- case 0:
- return (sts_val->node);
- case 1:
- return (sts_val->master);
- case 2:
- return (sts_val->brick);
- case 3:
- return (sts_val->slave_user);
- case 4:
- return (sts_val->slave);
- case 5:
- return (sts_val->slave_node);
- case 6:
- return (sts_val->worker_status);
- case 7:
- return (sts_val->crawl_status);
- case 8:
- return (sts_val->last_synced);
- case 9:
- return (sts_val->entry);
- case 10:
- return (sts_val->data);
- case 11:
- return (sts_val->meta);
- case 12:
- return (sts_val->failures);
- case 13:
- return (sts_val->checkpoint_time);
- case 14:
- return (sts_val->checkpoint_completed);
- case 15:
- return (sts_val->checkpoint_completion_time);
- case 16:
- return (sts_val->brick_host_uuid);
- case 17:
- return (sts_val->last_synced_utc);
- case 18:
- return (sts_val->checkpoint_time_utc);
- case 19:
- return (sts_val->checkpoint_completion_time_utc);
- case 20:
- return (sts_val->slavekey);
- case 21:
- return (sts_val->session_slave);
- default:
- goto out;
- }
-
-out:
- return NULL;
-}
-
/* * Safe wrapper function for strncpy.
* This wrapper makes sure that when there is no null byte among the first n in
* source srting for strncpy function call, the string placed in dest will be
@@ -5421,3 +5422,44 @@ gf_d_type_from_ia_type(ia_type_t type)
return DT_UNKNOWN;
}
}
+
+int
+gf_nanosleep(uint64_t nsec)
+{
+ struct timespec req;
+ struct timespec rem;
+ int ret = -1;
+
+ req.tv_sec = nsec / GF_SEC_IN_NS;
+ req.tv_nsec = nsec % GF_SEC_IN_NS;
+
+ do {
+ ret = nanosleep(&req, &rem);
+ req = rem;
+ } while (ret == -1 && errno == EINTR);
+
+ return ret;
+}
+
+int
+gf_syncfs(int fd)
+{
+ int ret = 0;
+#if defined(HAVE_SYNCFS)
+ /* Linux with glibc recent enough. */
+ ret = syncfs(fd);
+#elif defined(HAVE_SYNCFS_SYS)
+ /* Linux with no library function. */
+ ret = syscall(SYS_syncfs, fd);
+#else
+ /* Fallback to generic UNIX stuff. */
+ sync();
+#endif
+ return ret;
+}
+
+char **
+get_xattrs_to_heal()
+{
+ return xattrs_to_heal;
+}
diff --git a/libglusterfs/src/compat.c b/libglusterfs/src/compat.c
index 877cda282de..8a05a30a8fe 100644
--- a/libglusterfs/src/compat.c
+++ b/libglusterfs/src/compat.c
@@ -176,7 +176,7 @@ solaris_xattr_resolve_path(const char *real_path, char **path)
if (!ret && export_path) {
strcat(export_path, "/" GF_SOLARIS_XATTR_DIR);
if (lstat(export_path, &statbuf)) {
- ret = mkdir(export_path, 0777);
+ ret = mkdir(export_path, 0755);
if (ret && (errno != EEXIST)) {
gf_msg_debug(THIS->name, 0,
"mkdir failed,"
diff --git a/libglusterfs/src/ctx.c b/libglusterfs/src/ctx.c
index 4a001c29209..3d890b04ec9 100644
--- a/libglusterfs/src/ctx.c
+++ b/libglusterfs/src/ctx.c
@@ -37,8 +37,12 @@ glusterfs_ctx_new()
ctx->log.loglevel = DEFAULT_LOG_LEVEL;
-#ifdef RUN_WITH_VALGRIND
- ctx->cmd_args.valgrind = _gf_true;
+#if defined(RUN_WITH_MEMCHECK)
+ ctx->cmd_args.vgtool = _gf_memcheck;
+#elif defined(RUN_WITH_DRD)
+ ctx->cmd_args.vgtool = _gf_drd;
+#else
+ ctx->cmd_args.vgtool = _gf_none;
#endif
/* lock is never destroyed! */
diff --git a/libglusterfs/src/defaults-tmpl.c b/libglusterfs/src/defaults-tmpl.c
index 82e7f78d7f3..3cf707f42aa 100644
--- a/libglusterfs/src/defaults-tmpl.c
+++ b/libglusterfs/src/defaults-tmpl.c
@@ -171,8 +171,11 @@ default_notify(xlator_t *this, int32_t event, void *data, ...)
/* Make sure this is not a daemon with master xlator */
pthread_mutex_lock(&graph->mutex);
{
- graph->used = 0;
- pthread_cond_broadcast(&graph->child_down_cond);
+ if (graph->parent_down ==
+ graph_total_client_xlator(graph)) {
+ graph->used = 0;
+ pthread_cond_broadcast(&graph->child_down_cond);
+ }
}
pthread_mutex_unlock(&graph->mutex);
}
diff --git a/libglusterfs/src/dict.c b/libglusterfs/src/dict.c
index b44dda33f00..1d9be9217a6 100644
--- a/libglusterfs/src/dict.c
+++ b/libglusterfs/src/dict.c
@@ -57,7 +57,6 @@ get_new_data()
GF_ATOMIC_INIT(data->refcount, 0);
data->is_static = _gf_false;
- LOCK_INIT(&data->lock);
return data;
}
@@ -98,6 +97,8 @@ get_new_dict_full(int size_hint)
}
}
+ dict->free_pair.key = NULL;
+ dict->totkvlen = 0;
LOCK_INIT(&dict->lock);
return dict;
@@ -293,8 +294,6 @@ void
data_destroy(data_t *data)
{
if (data) {
- LOCK_DESTROY(&data->lock);
-
if (!data->is_static)
GF_FREE(data->data);
@@ -325,7 +324,6 @@ data_copy(data_t *old)
}
newdata->data_type = old->data_type;
- LOCK_INIT(&newdata->lock);
return newdata;
err_out:
@@ -339,7 +337,7 @@ err_out:
* checked by callers.
*/
static data_pair_t *
-dict_lookup_common(dict_t *this, char *key, uint32_t hash)
+dict_lookup_common(const dict_t *this, const char *key, const uint32_t hash)
{
int hashval = 0;
data_pair_t *pair;
@@ -386,8 +384,8 @@ dict_lookup(dict_t *this, char *key, data_t **data)
}
static int32_t
-dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
- gf_boolean_t replace)
+dict_set_lk(dict_t *this, char *key, const int key_len, data_t *value,
+ const uint32_t hash, gf_boolean_t replace)
{
int hashval = 0;
data_pair_t *pair;
@@ -403,7 +401,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
key_free = 1;
key_hash = (uint32_t)XXH64(key, keylen, 0);
} else {
- keylen = strlen(key);
+ keylen = key_len;
key_hash = hash;
}
@@ -413,6 +411,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
if (pair) {
data_t *unref_data = pair->value;
pair->value = data_ref(value);
+ this->totkvlen += (value->len - unref_data->len);
data_unref(unref_data);
if (key_free)
GF_FREE(key);
@@ -421,16 +420,15 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
}
}
- if (this->free_pair_in_use) {
+ if (this->free_pair.key) { /* the free_pair is used */
pair = mem_get(THIS->ctx->dict_pair_pool);
if (!pair) {
if (key_free)
GF_FREE(key);
return -1;
}
- } else {
+ } else { /* assign the pair to the free pair */
pair = &this->free_pair;
- this->free_pair_in_use = _gf_true;
}
if (key_free) {
@@ -440,9 +438,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
} else {
pair->key = (char *)GF_MALLOC(keylen + 1, gf_common_mt_char);
if (!pair->key) {
- if (pair == &this->free_pair) {
- this->free_pair_in_use = _gf_false;
- } else {
+ if (pair != &this->free_pair) {
mem_put(pair);
}
return -1;
@@ -451,6 +447,7 @@ dict_set_lk(dict_t *this, char *key, data_t *value, const uint32_t hash,
}
pair->key_hash = key_hash;
pair->value = data_ref(value);
+ this->totkvlen += (keylen + 1 + value->len);
/* If the divisor is 1, the modulo is always 0,
* in such case avoid hash calculation.
@@ -500,12 +497,12 @@ dict_setn(dict_t *this, char *key, const int keylen, data_t *value)
}
if (key) {
- key_hash = (int32_t)XXH64(key, keylen, 0);
+ key_hash = (uint32_t)XXH64(key, keylen, 0);
}
LOCK(&this->lock);
- ret = dict_set_lk(this, key, value, key_hash, 1);
+ ret = dict_set_lk(this, key, keylen, value, key_hash, 1);
UNLOCK(&this->lock);
@@ -539,7 +536,7 @@ dict_addn(dict_t *this, char *key, const int keylen, data_t *value)
LOCK(&this->lock);
- ret = dict_set_lk(this, key, value, key_hash, 0);
+ ret = dict_set_lk(this, key, keylen, value, key_hash, 0);
UNLOCK(&this->lock);
@@ -648,6 +645,7 @@ dict_deln(dict_t *this, char *key, const int keylen)
else
this->members[hashval] = pair->hash_next;
+ this->totkvlen -= pair->value->len;
data_unref(pair->value);
if (pair->prev)
@@ -658,9 +656,10 @@ dict_deln(dict_t *this, char *key, const int keylen)
if (pair->next)
pair->next->prev = pair->prev;
+ this->totkvlen -= (strlen(pair->key) + 1);
GF_FREE(pair->key);
if (pair == &this->free_pair) {
- this->free_pair_in_use = _gf_false;
+ this->free_pair.key = NULL;
} else {
mem_put(pair);
}
@@ -700,16 +699,18 @@ dict_destroy(dict_t *this)
GF_FREE(prev->key);
if (prev != &this->free_pair) {
mem_put(prev);
+ } else {
+ this->free_pair.key = NULL;
}
total_pairs++;
prev = pair;
}
+ this->totkvlen = 0;
if (this->members != &this->members_internal) {
mem_put(this->members);
}
- GF_FREE(this->extra_free);
free(this->extra_stdfree);
/* update 'ctx->stats.dict.details' using max_count */
@@ -808,6 +809,7 @@ int_to_data(int64_t value)
data->len = gf_asprintf(&data->data, "%" PRId64, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
data->len++; /* account for terminating NULL */
@@ -827,6 +829,7 @@ data_from_int64(int64_t value)
data->len = gf_asprintf(&data->data, "%" PRId64, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
data->len++; /* account for terminating NULL */
@@ -846,6 +849,7 @@ data_from_int32(int32_t value)
data->len = gf_asprintf(&data->data, "%" PRId32, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -866,6 +870,7 @@ data_from_int16(int16_t value)
data->len = gf_asprintf(&data->data, "%" PRId16, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -886,6 +891,7 @@ data_from_int8(int8_t value)
data->len = gf_asprintf(&data->data, "%d", value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -906,6 +912,7 @@ data_from_uint64(uint64_t value)
data->len = gf_asprintf(&data->data, "%" PRIu64, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -926,6 +933,8 @@ data_from_double(double value)
data->len = gf_asprintf(&data->data, "%f", value);
if (data->len == -1) {
+ gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
data->len++; /* account for terminating NULL */
@@ -945,6 +954,7 @@ data_from_uint32(uint32_t value)
data->len = gf_asprintf(&data->data, "%" PRIu32, value);
if (-1 == data->len) {
gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -964,6 +974,8 @@ data_from_uint16(uint16_t value)
}
data->len = gf_asprintf(&data->data, "%" PRIu16, value);
if (-1 == data->len) {
+ gf_msg_debug("dict", 0, "asprintf failed");
+ data_destroy(data);
return NULL;
}
@@ -1099,117 +1111,146 @@ data_to_int64(data_t *data)
{
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
- return (int64_t)strtoull(data->data, NULL, 0);
+ char *endptr = NULL;
+ int64_t value = 0;
+
+ errno = 0;
+ value = strtoll(data->data, &endptr, 0);
+
+ if (endptr && *endptr != '\0')
+ /* Unrecognized characters at the end of string. */
+ errno = EINVAL;
+ if (errno) {
+ gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
+ LG_MSG_DATA_CONVERSION_ERROR,
+ "Error in data conversion: '%s' can't "
+ "be represented as int64_t",
+ data->data);
+ return -1;
+ }
+ return value;
}
+/* Like above but implies signed range check. */
+
+#define DATA_TO_RANGED_SIGNED(endptr, value, data, type, min, max) \
+ do { \
+ errno = 0; \
+ value = strtoll(data->data, &endptr, 0); \
+ if (endptr && *endptr != '\0') \
+ errno = EINVAL; \
+ if (errno || value > max || value < min) { \
+ gf_msg_callingfn("dict", GF_LOG_WARNING, errno, \
+ LG_MSG_DATA_CONVERSION_ERROR, \
+ "Error in data conversion: '%s' can't " \
+ "be represented as " #type, \
+ data->data); \
+ return -1; \
+ } \
+ return (type)value; \
+ } while (0)
+
int32_t
data_to_int32(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ char *endptr = NULL;
+ int64_t value = 0;
- return strtoul(data->data, NULL, 0);
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ DATA_TO_RANGED_SIGNED(endptr, value, data, int32_t, INT_MIN, INT_MAX);
}
int16_t
data_to_int16(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
-
- int16_t value = 0;
-
- errno = 0;
- value = strtol(data->data, NULL, 0);
+ char *endptr = NULL;
+ int64_t value = 0;
- if ((value > SHRT_MAX) || (value < SHRT_MIN)) {
- errno = ERANGE;
- gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
- LG_MSG_DATA_CONVERSION_ERROR,
- "Error in data"
- " conversion: detected overflow");
- return -1;
- }
-
- return (int16_t)value;
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ DATA_TO_RANGED_SIGNED(endptr, value, data, int16_t, SHRT_MIN, SHRT_MAX);
}
int8_t
data_to_int8(data_t *data)
{
+ char *endptr = NULL;
+ int64_t value = 0;
+
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, "null", -1);
+ DATA_TO_RANGED_SIGNED(endptr, value, data, int8_t, CHAR_MIN, CHAR_MAX);
+}
+
+uint64_t
+data_to_uint64(data_t *data)
+{
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
- int8_t value = 0;
+ char *endptr = NULL;
+ uint64_t value = 0;
errno = 0;
- value = strtol(data->data, NULL, 0);
+ value = strtoull(data->data, &endptr, 0);
- if ((value > SCHAR_MAX) || (value < SCHAR_MIN)) {
- errno = ERANGE;
+ if (endptr && *endptr != '\0')
+ errno = EINVAL;
+ if (errno) {
gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
LG_MSG_DATA_CONVERSION_ERROR,
- "Error in data"
- " conversion: detected overflow");
+ "Error in data conversion: '%s' can't "
+ "be represented as uint64_t",
+ data->data);
return -1;
}
-
- return (int8_t)value;
+ return value;
}
-uint64_t
-data_to_uint64(data_t *data)
-{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+/* Like above but implies unsigned range check. */
- return strtoll(data->data, NULL, 0);
-}
+#define DATA_TO_RANGED_UNSIGNED(endptr, value, data, type, max) \
+ do { \
+ errno = 0; \
+ value = strtoull(data->data, &endptr, 0); \
+ if (endptr && *endptr != '\0') \
+ errno = EINVAL; \
+ if (errno || value > max) { \
+ gf_msg_callingfn("dict", GF_LOG_WARNING, errno, \
+ LG_MSG_DATA_CONVERSION_ERROR, \
+ "Error in data conversion: '%s' can't " \
+ "be represented as " #type, \
+ data->data); \
+ return -1; \
+ } \
+ return (type)value; \
+ } while (0)
uint32_t
data_to_uint32(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ char *endptr = NULL;
+ uint64_t value = 0;
- return strtol(data->data, NULL, 0);
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ DATA_TO_RANGED_UNSIGNED(endptr, value, data, uint32_t, UINT_MAX);
}
uint16_t
data_to_uint16(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
-
- uint16_t value = 0;
-
- errno = 0;
- value = strtol(data->data, NULL, 0);
-
- if ((USHRT_MAX - value) < 0) {
- errno = ERANGE;
- gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
- LG_MSG_DATA_CONVERSION_ERROR,
- "Error in data conversion: "
- "overflow detected");
- return -1;
- }
+ char *endptr = NULL;
+ uint64_t value = 0;
- return (uint16_t)value;
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ DATA_TO_RANGED_UNSIGNED(endptr, value, data, uint16_t, USHRT_MAX);
}
uint8_t
data_to_uint8(data_t *data)
{
- VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
-
- errno = 0;
- uint32_t value = strtol(data->data, NULL, 0);
-
- if ((UCHAR_MAX - (uint8_t)value) < 0) {
- errno = ERANGE;
- gf_msg_callingfn("dict", GF_LOG_WARNING, errno,
- LG_MSG_DATA_CONVERSION_ERROR,
- "data "
- "conversion overflow detected");
- return -1;
- }
+ char *endptr = NULL;
+ uint64_t value = 0;
- return (uint8_t)value;
+ VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_UINT, "null", -1);
+ DATA_TO_RANGED_UNSIGNED(endptr, value, data, uint8_t, UCHAR_MAX);
}
char *
@@ -1243,8 +1284,8 @@ data_to_iatt(data_t *data, char *key)
* pass more data but are backward compatible (if the initial contents
* of the struct are maintained, of course). */
if (data->len < sizeof(struct iatt)) {
- gf_msg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
- "data value for '%s' is smaller than expected", key);
+ gf_smsg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
+ "key=%s", key, NULL);
return NULL;
}
@@ -1261,8 +1302,8 @@ int
dict_remove_foreach_fn(dict_t *d, char *k, data_t *v, void *_tmp)
{
if (!d || !k) {
- gf_msg("glusterfs", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ENTRY,
- "%s is NULL", d ? "key" : "dictionary");
+ gf_smsg("glusterfs", GF_LOG_WARNING, EINVAL, LG_MSG_KEY_OR_VALUE_NULL,
+ "d=%s", d ? "key" : "dictionary", NULL);
return -1;
}
@@ -1454,32 +1495,13 @@ fail:
* -val error, val = errno
*/
-int
-dict_get_with_ref(dict_t *this, char *key, data_t **data)
-{
- if (!this || !key || !data) {
- gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "dict OR key (%s) is NULL", key);
- return -EINVAL;
- }
-
- return dict_get_with_refn(this, key, strlen(key), data);
-}
-
-int
+static int
dict_get_with_refn(dict_t *this, char *key, const int keylen, data_t **data)
{
data_pair_t *pair = NULL;
int ret = -ENOENT;
uint32_t hash;
- if (!this || !key || !data) {
- gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
- "dict OR key (%s) is NULL", key);
- ret = -EINVAL;
- goto err;
- }
-
hash = (uint32_t)XXH64(key, keylen, 0);
LOCK(&this->lock);
@@ -1492,10 +1514,22 @@ dict_get_with_refn(dict_t *this, char *key, const int keylen, data_t **data)
}
}
UNLOCK(&this->lock);
-err:
+
return ret;
}
+int
+dict_get_with_ref(dict_t *this, char *key, data_t **data)
+{
+ if (!this || !key || !data) {
+ gf_msg_callingfn("dict", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
+ "dict OR key (%s) is NULL", key);
+ return -EINVAL;
+ }
+
+ return dict_get_with_refn(this, key, strlen(key), data);
+}
+
static int
data_to_ptr_common(data_t *data, void **val)
{
@@ -1669,7 +1703,7 @@ dict_get_int8(dict_t *this, char *key, int8_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1715,7 +1749,7 @@ dict_get_int16(dict_t *this, char *key, int16_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1787,7 +1821,7 @@ dict_get_int32(dict_t *this, char *key, int32_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1852,7 +1886,7 @@ dict_get_int64(dict_t *this, char *key, int64_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1897,7 +1931,7 @@ dict_get_uint16(dict_t *this, char *key, uint16_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1942,7 +1976,7 @@ dict_get_uint32(dict_t *this, char *key, uint32_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -1987,7 +2021,7 @@ dict_get_uint64(dict_t *this, char *key, uint64_t *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -2083,7 +2117,7 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
*/
GF_ASSERT(flag >= 0 && flag < DICT_MAX_FLAGS);
- hash = (int32_t)XXH64(key, strlen(key), 0);
+ hash = (uint32_t)XXH64(key, strlen(key), 0);
LOCK(&this->lock);
{
pair = dict_lookup_common(this, key, hash);
@@ -2097,8 +2131,8 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
} else {
ptr = GF_CALLOC(1, DICT_MAX_FLAGS / 8, gf_common_mt_char);
if (!ptr) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate flag bit array");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
+ "flag bit array", NULL);
ret = -ENOMEM;
goto err;
}
@@ -2106,8 +2140,8 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
data = data_from_dynptr(ptr, DICT_MAX_FLAGS / 8);
if (!data) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate data");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY, "data",
+ NULL);
GF_FREE(ptr);
ret = -ENOMEM;
goto err;
@@ -2118,30 +2152,29 @@ _dict_modify_flag(dict_t *this, char *key, int flag, int op)
else
BIT_CLEAR((unsigned char *)(data->data), flag);
- if (this->free_pair_in_use) {
+ if (this->free_pair.key) { /* the free pair is in use */
pair = mem_get0(THIS->ctx->dict_pair_pool);
if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate dict pair");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
+ "dict pair", NULL);
ret = -ENOMEM;
goto err;
}
- } else {
+ } else { /* use the free pair */
pair = &this->free_pair;
- this->free_pair_in_use = _gf_true;
}
pair->key = (char *)GF_MALLOC(strlen(key) + 1, gf_common_mt_char);
if (!pair->key) {
- gf_msg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
- "unable to allocate dict pair");
+ gf_smsg("dict", GF_LOG_ERROR, ENOMEM, LG_MSG_NO_MEMORY,
+ "dict pair", NULL);
ret = -ENOMEM;
goto err;
}
strcpy(pair->key, key);
pair->key_hash = hash;
pair->value = data_ref(data);
-
+ this->totkvlen += (strlen(key) + 1 + data->len);
hashval = hash % this->hash_size;
pair->hash_next = this->members[hashval];
this->members[hashval] = pair;
@@ -2166,12 +2199,11 @@ err:
UNLOCK(&this->lock);
if (pair) {
- if (pair->key)
- free(pair->key);
-
- if (pair == &this->free_pair) {
- this->free_pair_in_use = _gf_false;
- } else {
+ if (pair->key) {
+ GF_FREE(pair->key);
+ pair->key = NULL;
+ }
+ if (pair != &this->free_pair) {
mem_put(pair);
}
}
@@ -2179,8 +2211,8 @@ err:
if (data)
data_destroy(data);
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_DICT_SET_FAILED,
- "unable to set key (%s) in dict ", key);
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_DICT_SET_FAILED, "key=%s", key,
+ NULL);
return ret;
}
@@ -2212,7 +2244,7 @@ dict_get_double(dict_t *this, char *key, double *val)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !val) {
+ if (!val) {
ret = -EINVAL;
goto err;
}
@@ -2295,7 +2327,7 @@ dict_get_ptr(dict_t *this, char *key, void **ptr)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !ptr) {
+ if (!ptr) {
ret = -EINVAL;
goto err;
}
@@ -2325,7 +2357,7 @@ dict_get_ptr_and_len(dict_t *this, char *key, void **ptr, int *len)
data_t *data = NULL;
int ret = 0;
- if (!this || !key || !ptr) {
+ if (!ptr) {
ret = -EINVAL;
goto err;
}
@@ -2383,7 +2415,7 @@ dict_get_str(dict_t *this, char *key, char **str)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !str) {
+ if (!str) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2557,7 +2589,7 @@ dict_get_bin(dict_t *this, char *key, void **bin)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !bin) {
+ if (!bin) {
goto err;
}
@@ -2660,7 +2692,7 @@ dict_get_gfuuid(dict_t *this, char *key, uuid_t *gfid)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !gfid) {
+ if (!gfid) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2693,7 +2725,7 @@ dict_get_mdata(dict_t *this, char *key, struct mdata_iatt *mdata)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !mdata) {
+ if (!mdata) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2703,8 +2735,8 @@ dict_get_mdata(dict_t *this, char *key, struct mdata_iatt *mdata)
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_MDATA, key, -EINVAL);
if (data->len < sizeof(struct mdata_iatt)) {
- gf_msg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
- "data value for '%s' is smaller than expected", key);
+ gf_smsg("glusterfs", GF_LOG_ERROR, ENOBUFS, LG_MSG_UNDERSIZED_BUF,
+ "key=%s", key, NULL);
ret = -ENOBUFS;
goto err;
}
@@ -2731,7 +2763,7 @@ dict_get_iatt(dict_t *this, char *key, struct iatt *iatt)
data_t *data = NULL;
int ret = -EINVAL;
- if (!this || !key || !iatt) {
+ if (!iatt) {
goto err;
}
ret = dict_get_with_ref(this, key, &data);
@@ -2791,7 +2823,7 @@ dict_get_str_boolean(dict_t *this, char *key, int default_val)
VALIDATE_DATA_AND_LOG(data, GF_DATA_TYPE_INT, key, -EINVAL);
- ret = gf_string2boolean(data->data, &boo);
+ ret = gf_strn2boolean(data->data, data->len - 1, &boo);
if (ret == -1)
goto err;
@@ -2811,6 +2843,7 @@ dict_rename_key(dict_t *this, char *key, char *replace_key)
int ret = -EINVAL;
uint32_t hash;
uint32_t replacekey_hash;
+ int replacekey_len;
/* replacing a key by itself is a NO-OP */
if (strcmp(key, replace_key) == 0)
@@ -2823,7 +2856,8 @@ dict_rename_key(dict_t *this, char *key, char *replace_key)
}
hash = (uint32_t)XXH64(key, strlen(key), 0);
- replacekey_hash = (uint32_t)XXH64(replace_key, strlen(replace_key), 0);
+ replacekey_len = strlen(replace_key);
+ replacekey_hash = (uint32_t)XXH64(replace_key, replacekey_len, 0);
LOCK(&this->lock);
{
@@ -2832,8 +2866,8 @@ dict_rename_key(dict_t *this, char *key, char *replace_key)
if (!pair)
ret = -ENODATA;
else
- ret = dict_set_lk(this, replace_key, pair->value, replacekey_hash,
- 1);
+ ret = dict_set_lk(this, replace_key, replacekey_len, pair->value,
+ replacekey_hash, 1);
}
UNLOCK(&this->lock);
@@ -2866,53 +2900,15 @@ dict_serialized_length_lk(dict_t *this)
{
int ret = -EINVAL;
int count = this->count;
- int len = DICT_HDR_LEN;
- data_pair_t *pair = this->members_list;
+ const int keyhdrlen = DICT_DATA_HDR_KEY_LEN + DICT_DATA_HDR_VAL_LEN;
if (count < 0) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_COUNT_LESS_THAN_ZERO,
- "count (%d) < 0!", count);
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_COUNT_LESS_THAN_ZERO,
+ "count=%d", count, NULL);
goto out;
}
- while (count) {
- if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL,
- LG_MSG_COUNT_LESS_THAN_DATA_PAIRS,
- "less than count data pairs found!");
- goto out;
- }
-
- len += DICT_DATA_HDR_KEY_LEN + DICT_DATA_HDR_VAL_LEN;
-
- if (!pair->key) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_NULL_PTR,
- "pair->key is null!");
- goto out;
- }
-
- len += strlen(pair->key) + 1 /* for '\0' */;
-
- if (!pair->value) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_NULL_PTR,
- "pair->value is null!");
- goto out;
- }
-
- if (pair->value->len < 0) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL,
- LG_MSG_VALUE_LENGTH_LESS_THAN_ZERO, "value->len (%d) < 0",
- pair->value->len);
- goto out;
- }
-
- len += pair->value->len;
-
- pair = pair->next;
- count--;
- }
-
- ret = len;
+ ret = DICT_HDR_LEN + this->totkvlen + (count * keyhdrlen);
out:
return ret;
}
@@ -2939,14 +2935,13 @@ dict_serialize_lk(dict_t *this, char *buf)
int32_t netword = 0;
if (!buf) {
- gf_msg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
- "buf is null!");
+ gf_smsg("dict", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG, NULL);
goto out;
}
if (count < 0) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
- "count (%d) < 0!", count);
+ gf_smsg("dict", GF_LOG_ERROR, 0, LG_MSG_COUNT_LESS_THAN_ZERO,
+ "count=%d", count, NULL);
goto out;
}
@@ -2956,14 +2951,13 @@ dict_serialize_lk(dict_t *this, char *buf)
while (count) {
if (!pair) {
- gf_msg("dict", GF_LOG_ERROR, 0, LG_MSG_PAIRS_LESS_THA