summaryrefslogtreecommitdiffstats
path: root/swift/1.4.8/plugins/Glusterfs.py
diff options
context:
space:
mode:
authorPeter Portante <peter.portante@redhat.com>2012-10-27 00:10:47 -0400
committerVijay Bellur <vbellur@redhat.com>2012-11-06 13:50:16 -0800
commitb0cb7aaf04eff033a329e017a8628c84a62e33cd (patch)
tree022233e16b489304f547ebeaaa7b43378aef3c91 /swift/1.4.8/plugins/Glusterfs.py
parent04fc3fdb5825fbfacaf610c6d86c5a4766f16ee3 (diff)
object-storage: remove glusterfs filter requirement
Fixes https://bugzilla.redhat.com/show_bug.cgi?id=870589 Remove the Glusterfs object, transforming it into a module providing module data fields (like swift.common.constraints) and module methods for mounting/unmounting and access the gluster volume information. As a result, we can then remove the glusterfs filter from the pipeline since we no longer need to provide the Glusterfs object through all the plugin code paths. This is one more step closer to removing our dependency on modifying the Swift code directly with these changes. See It is also the first step to acknowledging that we are not a plugin, but a layering on top of Swift. The major piece of work here is based on a recognition that the plugins/Glusterfs.py module provided a Glusterfs class that instantiated instances of an object that always contained the same data from the configuration file. The fields of such an object were not being changed and were treated as read-only in all cases. Since the object's data was the same for all instantiations there was no need to pass the data from the glusterfs filter all the way down into the bowels of the Gluster_DiskFile and DiskDir objects. Taking advantage of the nature of that data, we now just have those fields read into module variables, and change the Glusterfs object methods into module level functions. Much of the changes result from the consequence of making that switch from object to module. Here are a few other changes made along the way: * Bump the release numbers in the spec files in recognition of these changes * Create the plugins/fs_utils.py module so that the methods in the plugins/Glusterfs.py module don't have to include plugins/utils.py, which would create a circular dependency * Note that this dependency comes from methods in plugins/utils.py depending on the module level constructs in plugins/Glusterfs.py so that we only store those values in one place * Changed plugins/DiskDir.py:DiskDir class to not check for, and/or optionally create, the /etc/swift/db_file.db at run time, just create it a module init time * Removed the duplicate strip_obj_storage_path() from plugins/DiskDir.py and utils.py and move it to the Glusterfs module * Used os.path.join in plugins/DiskDir.py where possible * Renamed the .conf files to .conf-gluster so that we don't clobber existing config files * This is not a complete change, as the spec file also needs to be modified to avoid the clobbering * See also https://bugzilla.redhat.com/show_bug.cgi?id=865867 * Removed the redundant DIR_TYPE definition in plugins/utils.py * Removed MOUNT_PATH from plugins/utils.py replacing references with that from Glusterfs * This actually fixes a bug if a user every used a different mount path from the default in fs.conf * Added ASYNCDIR definition to plugins/utils.py until such time as another refactoring can rely on the one from swift.obj.server * Renamed plugins/utils.py's plugin_enabled() function to Gluster_enabled() * The diffs we carry for Swift are now a bit smaller in that we no longer have to add the plugin() method, we don't have to keep a fs_object field in these objects, and we can reference the Glusterfs module directly * Unit tests were modified appropriately, but now need to be run in the context of a Swift tree; this is unfortunate, but further refactoring will address this Change-Id: Id5d2510d56364761c03b3979bc71187dbe2f82fe BUG: 870589 Signed-off-by: Peter Portante <peter.portante@redhat.com> Reviewed-on: http://review.gluster.org/4141 Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Reviewed-by: Mohammed Junaid <junaid@redhat.com> Tested-by: Kaleb KEITHLEY <kkeithle@redhat.com>
Diffstat (limited to 'swift/1.4.8/plugins/Glusterfs.py')
-rw-r--r--swift/1.4.8/plugins/Glusterfs.py217
1 files changed, 113 insertions, 104 deletions
diff --git a/swift/1.4.8/plugins/Glusterfs.py b/swift/1.4.8/plugins/Glusterfs.py
index 5e191e1bd8f..69902d85d48 100644
--- a/swift/1.4.8/plugins/Glusterfs.py
+++ b/swift/1.4.8/plugins/Glusterfs.py
@@ -17,115 +17,124 @@ import os, fcntl, time
from ConfigParser import ConfigParser
from swift.common.utils import TRUE_VALUES
from hashlib import md5
-from swift.plugins.utils import mkdirs
-
-class Glusterfs(object):
- def __init__(self):
- self.name = 'glusterfs'
- self.fs_conf = ConfigParser()
- self.fs_conf.read(os.path.join('/etc/swift', 'fs.conf'))
- self.mount_path = self.fs_conf.get('DEFAULT', 'mount_path', '/mnt/gluster-object')
- self.auth_account = self.fs_conf.get('DEFAULT', 'auth_account', 'auth')
- self.mount_ip = self.fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
- self.remote_cluster = self.fs_conf.get('DEFAULT', 'remote_cluster', False) in TRUE_VALUES
- self.object_only = self.fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
-
- def busy_wait(self, mount_path):
- # Iterate for definite number of time over a given
- # interval for successful mount
- for i in range(0, 5):
- if os.path.ismount(os.path.join(mount_path)):
- return True
- time.sleep(2)
- return False
-
- def mount(self, account):
- mount_path = os.path.join(self.mount_path, account)
- export = self.get_export_from_account_id(account)
-
- pid_dir = "/var/lib/glusterd/vols/%s/run/" %export
- pid_file = os.path.join(pid_dir, 'swift.pid');
-
- if not os.path.exists(pid_dir):
- mkdirs(pid_dir)
-
- fd = os.open(pid_file, os.O_CREAT|os.O_RDWR)
- with os.fdopen(fd, 'r+b') as f:
- try:
- fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
- except:
- ex = sys.exc_info()[1]
- if isinstance(ex, IOError) and ex.errno in (EACCES, EAGAIN):
- # This means that some other process is mounting the
- # filesystem, so wait for the mount process to complete
- return self.busy_wait(mount_path)
-
- mnt_cmd = 'mount -t glusterfs %s:%s %s' % (self.mount_ip, export, \
- mount_path)
- if os.system(mnt_cmd) or not self.busy_wait(mount_path):
- raise Exception('Mount failed %s: %s' % (self.name, mnt_cmd))
- return False
- return True
-
- def unmount(self, mount_path):
- umnt_cmd = 'umount %s 2>> /dev/null' % mount_path
- if os.system(umnt_cmd):
- logging.error('Unable to unmount %s %s' % (mount_path, self.name))
-
- def get_export_list_local(self):
- export_list = []
- cmnd = 'gluster volume info'
-
- if os.system(cmnd + ' >> /dev/null'):
- raise Exception('Getting volume failed %s', self.name)
- return export_list
-
- fp = os.popen(cmnd)
- while True:
- item = fp.readline()
- if not item:
- break
- item = item.strip('\n').strip(' ')
- if item.lower().startswith('volume name:'):
- export_list.append(item.split(':')[1].strip(' '))
+from swift.plugins.fs_utils import mkdirs
- return export_list
+#
+# Read the fs.conf file once at startup (module load)
+#
+_fs_conf = ConfigParser()
+MOUNT_PATH = '/mnt/gluster-object'
+AUTH_ACCOUNT = 'auth'
+MOUNT_IP = 'localhost'
+REMOTE_CLUSTER = False
+OBJECT_ONLY = False
+if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
+ try:
+ MOUNT_PATH = _fs_conf.get('DEFAULT', 'mount_path', '/mnt/gluster-object')
+ except (NoSectionError, NoOptionError):
+ pass
+ try:
+ AUTH_ACCOUNT = _fs_conf.get('DEFAULT', 'auth_account', 'auth')
+ except (NoSectionError, NoOptionError):
+ pass
+ try:
+ MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
+ except (NoSectionError, NoOptionError):
+ pass
+ try:
+ REMOTE_CLUSTER = _fs_conf.get('DEFAULT', 'remote_cluster', False) in TRUE_VALUES
+ except (NoSectionError, NoOptionError):
+ pass
+ try:
+ OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
+ except (NoSectionError, NoOptionError):
+ pass
+NAME = 'glusterfs'
+
+
+def strip_obj_storage_path(path, mp=MOUNT_PATH):
+ """
+ strip the mount path off, also stripping the leading and trailing slashes
+ """
+ return path.replace(mp, '').strip(os.path.sep)
+
+def _busy_wait(full_mount_path):
+ # Iterate for definite number of time over a given
+ # interval for successful mount
+ for i in range(0, 5):
+ if os.path.ismount(os.path.join(full_mount_path)):
+ return True
+ time.sleep(2)
+ return False
+
+def mount(account):
+ global mount_path, mount_ip
+
+ full_mount_path = os.path.join(mount_path, account)
+ export = get_export_from_account_id(account)
+
+ pid_dir = "/var/lib/glusterd/vols/%s/run/" % export
+ pid_file = os.path.join(pid_dir, 'swift.pid');
+
+ if not os.path.exists(pid_dir):
+ mkdirs(pid_dir)
+
+ fd = os.open(pid_file, os.O_CREAT|os.O_RDWR)
+ with os.fdopen(fd, 'r+b') as f:
+ try:
+ fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except:
+ ex = sys.exc_info()[1]
+ if isinstance(ex, IOError) and ex.errno in (EACCES, EAGAIN):
+ # This means that some other process is mounting the
+ # filesystem, so wait for the mount process to complete
+ return _busy_wait(full_mount_path)
+
+ mnt_cmd = 'mount -t glusterfs %s:%s %s' % (mount_ip, export, \
+ full_mount_path)
+ if os.system(mnt_cmd) or not _busy_wait(full_mount_path):
+ raise Exception('Mount failed %s: %s' % (NAME, mnt_cmd))
+ return True
+
+def unmount(full_mount_path):
+ umnt_cmd = 'umount %s 2>> /dev/null' % full_mount_path
+ if os.system(umnt_cmd):
+ logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
+
+def get_export_list():
+ global mount_ip
+
+ if remote_cluster:
+ cmnd = 'ssh %s gluster volume info' % mount_ip
+ else:
+ cmnd = 'gluster volume info'
- def get_export_list_remote(self):
- export_list = []
- cmnd = 'ssh %s gluster volume info' % self.mount_ip
-
- if os.system(cmnd + ' >> /dev/null'):
+ if os.system(cmnd + ' >> /dev/null'):
+ if remove_cluster:
raise Exception('Getting volume info failed %s, make sure to have \
- passwordless ssh on %s', self.name, self.mount_ip)
- return export_list
-
- fp = os.popen(cmnd)
- while True:
- item = fp.readline()
- if not item:
- break
- item = item.strip('\n').strip(' ')
- if item.lower().startswith('volume name:'):
- export_list.append(item.split(':')[1].strip(' '))
-
- return export_list
-
- def get_export_list(self):
- if self.remote_cluster:
- return self.get_export_list_remote()
+ passwordless ssh on %s', NAME, mount_ip)
else:
- return self.get_export_list_local()
+ raise Exception('Getting volume failed %s', NAME)
+
+ export_list = []
+ fp = os.popen(cmnd)
+ while True:
+ item = fp.readline()
+ if not item:
+ break
+ item = item.strip('\n').strip(' ')
+ if item.lower().startswith('volume name:'):
+ export_list.append(item.split(':')[1].strip(' '))
+
+ return export_list
- def get_export_from_account_id(self, account):
- if not account:
- print 'account is none, returning'
- raise AttributeError
+def get_export_from_account_id(account):
+ if not account:
+ raise ValueError('No account given')
- for export in self.get_export_list():
- if account == 'AUTH_' + export:
- return export
+ for export in get_export_list():
+ if account == 'AUTH_' + export:
+ return export
- raise Exception('No export found %s %s' % (account, self.name))
- return None
+ raise Exception('No export found %s %s' % (account, NAME))