summaryrefslogtreecommitdiffstats
path: root/extras/snap_scheduler/snap_scheduler.py
diff options
context:
space:
mode:
Diffstat (limited to 'extras/snap_scheduler/snap_scheduler.py')
-rwxr-xr-xextras/snap_scheduler/snap_scheduler.py539
1 files changed, 411 insertions, 128 deletions
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index e7c1791a15e..e8fcc449a9b 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python3
#
# Copyright (c) 2015 Red Hat, Inc. <http://www.redhat.com>
# This file is part of GlusterFS.
@@ -19,12 +19,55 @@ import logging.handlers
import sys
import shutil
from errno import EEXIST
-
+from conf import GLUSTERFS_LIBEXECDIR
+sys.path.insert(1, GLUSTERFS_LIBEXECDIR)
+
+EVENTS_ENABLED = True
+try:
+ from events.eventtypes import SNAPSHOT_SCHEDULER_INITIALISED \
+ as EVENT_SNAPSHOT_SCHEDULER_INITIALISED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_INIT_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_DISABLED \
+ as EVENT_SNAPSHOT_SCHEDULER_DISABLED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_DISABLE_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_ENABLED \
+ as EVENT_SNAPSHOT_SCHEDULER_ENABLED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_ENABLE_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_ADDED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_DELETED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_EDITED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED
+ from events.eventtypes import SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED \
+ as EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED
+except ImportError:
+ # Events APIs not installed, dummy eventtypes with None
+ EVENTS_ENABLED = False
+ EVENT_SNAPSHOT_SCHEDULER_INITIALISED = None
+ EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_DISABLED = None
+ EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_ENABLED = None
+ EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED = None
+ EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED = None
SCRIPT_NAME = "snap_scheduler"
scheduler_enabled = False
log = logging.getLogger(SCRIPT_NAME)
-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
+SHARED_STORAGE_DIR="/run/gluster/shared_storage"
GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
@@ -38,6 +81,58 @@ tasks = {}
longest_field = 12
current_scheduler = ""
+INTERNAL_ERROR = 2
+SHARED_STORAGE_DIR_DOESNT_EXIST = 3
+SHARED_STORAGE_NOT_MOUNTED = 4
+ANOTHER_TRANSACTION_IN_PROGRESS = 5
+INIT_FAILED = 6
+SCHEDULING_ALREADY_DISABLED = 7
+SCHEDULING_ALREADY_ENABLED = 8
+NODE_NOT_INITIALISED = 9
+ANOTHER_SCHEDULER_ACTIVE = 10
+JOB_ALREADY_EXISTS = 11
+JOB_NOT_FOUND = 12
+INVALID_JOBNAME = 13
+INVALID_VOLNAME = 14
+INVALID_SCHEDULE = 15
+INVALID_ARG = 16
+VOLUME_DOES_NOT_EXIST = 17
+
+def print_error (error_num):
+ if error_num == INTERNAL_ERROR:
+ return "Internal Error"
+ elif error_num == SHARED_STORAGE_DIR_DOESNT_EXIST:
+ return "The shared storage directory ("+SHARED_STORAGE_DIR+")" \
+ " does not exist."
+ elif error_num == SHARED_STORAGE_NOT_MOUNTED:
+ return "The shared storage directory ("+SHARED_STORAGE_DIR+")" \
+ " is not mounted."
+ elif error_num == ANOTHER_TRANSACTION_IN_PROGRESS:
+ return "Another transaction is in progress."
+ elif error_num == INIT_FAILED:
+ return "Initialisation failed."
+ elif error_num == SCHEDULING_ALREADY_DISABLED:
+ return "Snapshot scheduler is already disabled."
+ elif error_num == SCHEDULING_ALREADY_ENABLED:
+ return "Snapshot scheduler is already enabled."
+ elif error_num == NODE_NOT_INITIALISED:
+ return "The node is not initialised."
+ elif error_num == ANOTHER_SCHEDULER_ACTIVE:
+ return "Another scheduler is active."
+ elif error_num == JOB_ALREADY_EXISTS:
+ return "The job already exists."
+ elif error_num == JOB_NOT_FOUND:
+ return "The job cannot be found."
+ elif error_num == INVALID_JOBNAME:
+ return "The job name is invalid."
+ elif error_num == INVALID_VOLNAME:
+ return "The volume name is invalid."
+ elif error_num == INVALID_SCHEDULE:
+ return "The schedule is invalid."
+ elif error_num == INVALID_ARG:
+ return "The argument is invalid."
+ elif error_num == VOLUME_DOES_NOT_EXIST:
+ return "The volume does not exist."
def output(msg):
print("%s: %s" % (SCRIPT_NAME, msg))
@@ -54,7 +149,7 @@ def initLogger():
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE, universal_newlines=True)
logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
fh = logging.FileHandler(logfile)
@@ -66,7 +161,7 @@ def initLogger():
def scheduler_status():
- success = False
+ ret = INTERNAL_ERROR
global scheduler_enabled
try:
f = os.path.realpath(GCRON_TASKS)
@@ -76,33 +171,32 @@ def scheduler_status():
else:
log.info("Snapshot scheduler is currently enabled.")
scheduler_enabled = True
- success = True
+ ret = 0
except:
log.error("Failed to enable snapshot scheduling. Error: "
"Failed to check the status of %s.", GCRON_DISABLED)
- return success
+ return ret
def enable_scheduler():
ret = scheduler_status()
- if ret:
+ if ret == 0:
if not scheduler_enabled:
# Check if another scheduler is active.
ret = get_current_scheduler()
- if ret:
+ if ret == 0:
if (current_scheduler != "none"):
print_str = "Failed to enable snapshot scheduling. " \
"Error: Another scheduler is active."
log.error(print_str)
output(print_str)
- ret = False
+ ret = ANOTHER_SCHEDULER_ACTIVE
return ret
else:
print_str = "Failed to get current scheduler info."
log.error(print_str)
output(print_str)
- ret = False
return ret
log.info("Enabling snapshot scheduler.")
@@ -113,28 +207,30 @@ def enable_scheduler():
os.remove(GCRON_TASKS)
try:
f = os.open(GCRON_ENABLED, os.O_CREAT | os.O_NONBLOCK,
- 0644)
+ 0o644)
os.close(f)
- except IOError as (errno, strerror):
+ except OSError as e:
log.error("Failed to open %s. Error: %s.",
- GCRON_ENABLED, strerror)
- ret = False
+ GCRON_ENABLED, e)
+ ret = INTERNAL_ERROR
return ret
os.symlink(GCRON_ENABLED, GCRON_TASKS)
update_current_scheduler("cli")
log.info("Snapshot scheduling is enabled")
output("Snapshot scheduling is enabled")
- except IOError as (errno, strerror):
- print_str = "Failed to enable snapshot scheduling. Error: "+strerror
+ ret = 0
+ except OSError as e:
+ print_str = ("Failed to enable snapshot scheduling."
+ "Error: {{}}" + e)
log.error(print_str)
output(print_str)
- ret = False
+ ret = INTERNAL_ERROR
else:
print_str = "Failed to enable snapshot scheduling. " \
"Error: Snapshot scheduling is already enabled."
log.error(print_str)
output(print_str)
- ret = False
+ ret = SCHEDULING_ALREADY_ENABLED
else:
print_str = "Failed to enable snapshot scheduling. " \
"Error: Failed to check scheduler status."
@@ -146,14 +242,14 @@ def enable_scheduler():
def disable_scheduler():
ret = scheduler_status()
- if ret:
+ if ret == 0:
if scheduler_enabled:
log.info("Disabling snapshot scheduler.")
try:
# Check if another scheduler is active. If not, then
# update current scheduler to "none". Else do nothing.
ret = get_current_scheduler()
- if ret:
+ if ret == 0:
if (current_scheduler == "cli"):
update_current_scheduler("none")
else:
@@ -161,34 +257,36 @@ def disable_scheduler():
"Error: Failed to get current scheduler info."
log.error(print_str)
output(print_str)
- ret = False
return ret
if os.path.exists(GCRON_DISABLED):
os.remove(GCRON_DISABLED)
if os.path.lexists(GCRON_TASKS):
os.remove(GCRON_TASKS)
- f = os.open(GCRON_DISABLED, os.O_CREAT, 0644)
+ f = os.open(GCRON_DISABLED, os.O_CREAT, 0o644)
os.close(f)
os.symlink(GCRON_DISABLED, GCRON_TASKS)
log.info("Snapshot scheduling is disabled")
output("Snapshot scheduling is disabled")
- except IOError as (errno, strerror):
- print_str = "Failed to disable snapshot scheduling. Error: "+strerror
+ ret = 0
+ except OSError as e:
+ print_str = ("Failed to disable snapshot scheduling. Error: "
+ + e)
log.error(print_str)
output(print_str)
- ret = False
+ ret = INTERNAL_ERROR
else:
print_str = "Failed to disable scheduling. " \
"Error: Snapshot scheduling is already disabled."
log.error(print_str)
output(print_str)
- ret = False
+ ret = SCHEDULING_ALREADY_DISABLED
else:
print_str = "Failed to disable snapshot scheduling. " \
"Error: Failed to check scheduler status."
log.error(print_str)
output(print_str)
+ ret = INTERNAL_ERROR
return ret
@@ -211,10 +309,10 @@ def load_tasks_from_file():
len(schedule))
tasks[jobname] = schedule+":"+volname
f.close()
- ret = True
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", GCRON_ENABLED, strerror)
- ret = False
+ ret = 0
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", GCRON_ENABLED, e)
+ ret = INTERNAL_ERROR
return ret
@@ -225,10 +323,10 @@ def get_current_scheduler():
with open(CURRENT_SCHEDULER, 'r') as f:
current_scheduler = f.readline().rstrip('\n')
f.close()
- ret = True
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", CURRENT_SCHEDULER, strerror)
- ret = False
+ ret = 0
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", CURRENT_SCHEDULER, e)
+ ret = INTERNAL_ERROR
return ret
@@ -236,7 +334,7 @@ def get_current_scheduler():
def list_schedules():
log.info("Listing snapshot schedules.")
ret = load_tasks_from_file()
- if ret:
+ if ret == 0:
if len(tasks) == 0:
output("No snapshots scheduled")
else:
@@ -255,6 +353,7 @@ def list_schedules():
longest_field + 5)
operation = "Snapshot Create".ljust(longest_field+5)
print(jobname+schedule+operation+volname)
+ ret = 0
else:
print_str = "Failed to list snapshot schedules. " \
"Error: Failed to load tasks from "+GCRON_ENABLED
@@ -265,9 +364,8 @@ def list_schedules():
def write_tasks_to_file():
- ret = False
try:
- with open(TMP_FILE, "w", 0644) as f:
+ with open(TMP_FILE, "w", 0o644) as f:
# If tasks is empty, just create an empty tmp file
if len(tasks) != 0:
for key in sorted(tasks):
@@ -280,61 +378,98 @@ def write_tasks_to_file():
f.flush()
os.fsync(f.fileno())
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", TMP_FILE, strerror)
- ret = False
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", TMP_FILE, e)
+ ret = INTERNAL_ERROR
return ret
shutil.move(TMP_FILE, GCRON_ENABLED)
- ret = True
+ ret = 0
return ret
def update_current_scheduler(data):
- ret = False
try:
- with open(TMP_FILE, "w", 0644) as f:
+ with open(TMP_FILE, "w", 0o644) as f:
f.write("%s" % data)
f.flush()
os.fsync(f.fileno())
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", TMP_FILE, strerror)
- ret = False
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", TMP_FILE, e)
+ ret = INTERNAL_ERROR
return ret
shutil.move(TMP_FILE, CURRENT_SCHEDULER)
- ret = True
+ ret = 0
return ret
+def isVolumePresent(volname):
+ success = False
+ if volname == "":
+ log.debug("No volname given")
+ return success
+
+ cli = ["gluster",
+ "volume",
+ "info",
+ volname]
+ log.debug("Running command '%s'", " ".join(cli))
+
+ p = subprocess.Popen(cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ rv = p.returncode
+
+ log.debug("Command '%s' returned '%d'", " ".join(cli), rv)
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ else:
+ success = True;
+
+ return success
+
+
def add_schedules(jobname, schedule, volname):
log.info("Adding snapshot schedules.")
ret = load_tasks_from_file()
- if ret:
+ if ret == 0:
if jobname in tasks:
print_str = ("%s already exists in schedule. Use "
"'edit' to modify %s" % (jobname, jobname))
log.error(print_str)
output(print_str)
- ret = False
+ ret = JOB_ALREADY_EXISTS
else:
- tasks[jobname] = schedule + ":" + volname
- ret = write_tasks_to_file()
- if ret:
- # Create a LOCK_FILE for the job
- job_lockfile = LOCK_FILE_DIR + jobname
- try:
- f = os.open(job_lockfile, os.O_CREAT | os.O_NONBLOCK, 0644)
- os.close(f)
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.",
- job_lockfile, strerror)
- ret = False
- return ret
- log.info("Successfully added snapshot schedule %s" % jobname)
- output("Successfully added snapshot schedule")
+ if not isVolumePresent(volname):
+ print_str = ("Volume %s does not exist. Create %s and retry." %
+ (volname, volname))
+ log.error(print_str)
+ output(print_str)
+ ret = VOLUME_DOES_NOT_EXIST
+ else:
+ tasks[jobname] = schedule + ":" + volname
+ ret = write_tasks_to_file()
+ if ret == 0:
+ # Create a LOCK_FILE for the job
+ job_lockfile = LOCK_FILE_DIR + jobname
+ try:
+ f = os.open(job_lockfile, os.O_CREAT | os.O_NONBLOCK,
+ 0o644)
+ os.close(f)
+ except OSError as e:
+ log.error("Failed to open %s. Error: %s.",
+ job_lockfile, e)
+ ret = INTERNAL_ERROR
+ return ret
+ log.info("Successfully added snapshot schedule %s" %
+ jobname)
+ output("Successfully added snapshot schedule")
+ ret = 0
else:
print_str = "Failed to add snapshot schedule. " \
"Error: Failed to load tasks from "+GCRON_ENABLED
@@ -347,27 +482,30 @@ def add_schedules(jobname, schedule, volname):
def delete_schedules(jobname):
log.info("Delete snapshot schedules.")
ret = load_tasks_from_file()
- if ret:
+ if ret == 0:
if jobname in tasks:
del tasks[jobname]
ret = write_tasks_to_file()
- if ret:
+ if ret == 0:
# Delete the LOCK_FILE for the job
job_lockfile = LOCK_FILE_DIR+jobname
try:
os.remove(job_lockfile)
- except IOError as (errno, strerror):
+ except OSError as e:
log.error("Failed to open %s. Error: %s.",
- job_lockfile, strerror)
+ job_lockfile, e)
+ ret = INTERNAL_ERROR
+ return ret
log.info("Successfully deleted snapshot schedule %s"
% jobname)
output("Successfully deleted snapshot schedule")
+ ret = 0
else:
print_str = ("Failed to delete %s. Error: No such "
"job scheduled" % jobname)
log.error(print_str)
output(print_str)
- ret = False
+ ret = JOB_NOT_FOUND
else:
print_str = "Failed to delete snapshot schedule. " \
"Error: Failed to load tasks from "+GCRON_ENABLED
@@ -380,19 +518,27 @@ def delete_schedules(jobname):
def edit_schedules(jobname, schedule, volname):
log.info("Editing snapshot schedules.")
ret = load_tasks_from_file()
- if ret:
+ if ret == 0:
if jobname in tasks:
- tasks[jobname] = schedule+":"+volname
- ret = write_tasks_to_file()
- if ret:
- log.info("Successfully edited snapshot schedule %s" % jobname)
- output("Successfully edited snapshot schedule")
+ if not isVolumePresent(volname):
+ print_str = ("Volume %s does not exist. Create %s and retry." %
+ (volname, volname))
+ log.error(print_str)
+ output(print_str)
+ ret = VOLUME_DOES_NOT_EXIST
+ else:
+ tasks[jobname] = schedule+":"+volname
+ ret = write_tasks_to_file()
+ if ret == 0:
+ log.info("Successfully edited snapshot schedule %s" %
+ jobname)
+ output("Successfully edited snapshot schedule")
else:
print_str = ("Failed to edit %s. Error: No such "
"job scheduled" % jobname)
log.error(print_str)
output(print_str)
- ret = False
+ ret = JOB_NOT_FOUND
else:
print_str = "Failed to edit snapshot schedule. " \
"Error: Failed to load tasks from "+GCRON_ENABLED
@@ -401,30 +547,125 @@ def edit_schedules(jobname, schedule, volname):
return ret
+def get_bool_val():
+ getsebool_cli = ["getsebool",
+ "-a"]
+ p1 = subprocess.Popen(getsebool_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ grep_cmd = ["grep",
+ "cron_system_cronjob_use_shares"]
+ p2 = subprocess.Popen(grep_cmd, stdin=p1.stdout,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ p1.stdout.close()
+ output, err = p2.communicate()
+ rv = p2.returncode
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ return -1
+
+ bool_val = output.split()[2]
+ log.debug("Bool value = '%s'", bool_val)
+
+ return bool_val
+
+def get_selinux_status():
+ getenforce_cli = ["getenforce"]
+ log.debug("Running command '%s'", " ".join(getenforce_cli))
+
+ try:
+ p1 = subprocess.Popen(getenforce_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ except OSError as oserr:
+ log.error("Failed to run the command \"getenforce\". Error: %s" %\
+ oserr)
+ return -1
+
+ output, err = p1.communicate()
+ rv = p1.returncode
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ return -1
+ else:
+ selinux_status=output.rstrip()
+ log.debug("selinux status: %s", selinux_status)
+
+ return selinux_status
+
+def set_cronjob_user_share():
+ selinux_status = get_selinux_status()
+ if (selinux_status == -1):
+ log.error("Failed to get selinux status")
+ return -1
+ elif (selinux_status == "Disabled"):
+ return 0
+
+ bool_val = get_bool_val()
+ # In case of a failure (where the boolean value is not)
+ # present in the system, we should not proceed further
+ # We should only proceed when the value is "off"
+ if (bool_val == -1 or bool_val != "off"):
+ return 0
+
+ setsebool_cli = ["setsebool", "-P",
+ "cron_system_cronjob_use_shares",
+ "on"]
+ log.debug("Running command '%s'", " ".join(setsebool_cli))
+
+ p1 = subprocess.Popen(setsebool_cli, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+
+ output, err = p1.communicate()
+ rv = p1.returncode
+
+ if rv:
+ log.error("Command output:")
+ log.error(err)
+ return rv
+
+ bool_val = get_bool_val()
+ if (bool_val == "on"):
+ return 0
+ else:
+ # In case of an error or if boolean is not on
+ # we return a failure here
+ return -1
def initialise_scheduler():
+ ret = set_cronjob_user_share()
+ if ret:
+ log.error("Failed to set selinux boolean "
+ "cron_system_cronjob_use_shares to 'on'")
+ return ret
+
try:
- with open("/tmp/crontab", "w+", 0644) as f:
+ with open(TMP_FILE, "w+", 0o644) as f:
updater = ("* * * * * root PATH=$PATH:/usr/local/sbin:"
"/usr/sbin gcron.py --update\n")
f.write("%s\n" % updater)
f.flush()
os.fsync(f.fileno())
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open /tmp/crontab. Error: %s.", strerror)
- ret = False
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", TMP_FILE, e)
+ ret = INIT_FAILED
return ret
- shutil.move("/tmp/crontab", GCRON_UPDATE_TASK)
+ shutil.move(TMP_FILE, GCRON_UPDATE_TASK)
if not os.path.lexists(GCRON_TASKS):
try:
- f = open(GCRON_TASKS, "w", 0644)
+ f = open(GCRON_TASKS, "w", 0o644)
f.close()
- except IOError as (errno, strerror):
- log.error("Failed to open %s. Error: %s.", GCRON_TASKS, strerror)
- ret = False
+ except IOError as e:
+ log.error("Failed to open %s. Error: %s.", GCRON_TASKS, e)
+ ret = INIT_FAILED
return ret
if os.path.lexists(GCRON_CROND_TASK):
@@ -432,25 +673,26 @@ def initialise_scheduler():
os.symlink(GCRON_TASKS, GCRON_CROND_TASK)
- log.info("Successfully inited snapshot scheduler for this node")
- output("Successfully inited snapshot scheduler for this node")
+ log.info("Successfully initialised snapshot scheduler for this node")
+ output("Successfully initialised snapshot scheduler for this node")
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_INITIALISED, status="Success")
- ret = True
+ ret = 0
return ret
def syntax_checker(args):
- ret = False
-
if hasattr(args, 'jobname'):
if (len(args.jobname.split()) != 1):
output("Invalid Jobname. Jobname should not be empty and should not contain \" \" character.")
+ ret = INVALID_JOBNAME
return ret
args.jobname=args.jobname.strip()
if hasattr(args, 'volname'):
if (len(args.volname.split()) != 1):
output("Invalid Volname. Volname should not be empty and should not contain \" \" character.")
+ ret = INVALID_VOLNAME
return ret
args.volname=args.volname.strip()
@@ -464,27 +706,36 @@ def syntax_checker(args):
print ("| | +-------- Day of the Month (range: 1-31)")
print ("| +---------- Hour (range: 0-23)")
print ("+------------ Minute (range: 0-59)")
+ ret = INVALID_SCHEDULE
return ret
- ret = True
+ ret = 0
return ret
def perform_operation(args):
- ret = False
+ if not os.path.exists(CURRENT_SCHEDULER):
+ update_current_scheduler("none")
# Initialise snapshot scheduler on local node
if args.action == "init":
ret = initialise_scheduler()
- if not ret:
+ if ret != 0:
output("Failed to initialise snapshot scheduling")
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED,
+ error=print_error(ret))
return ret
# Disable snapshot scheduler
if args.action == "disable_force":
ret = disable_scheduler()
- if ret:
+ if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED,
+ status="Successfully Disabled")
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED,
+ error=print_error(ret))
return ret
# Check if the symlink to GCRON_TASKS is properly set in the shared storage
@@ -495,12 +746,13 @@ def perform_operation(args):
"the snap scheduler for the local node.")
log.error(print_str)
output(print_str)
+ ret = NODE_NOT_INITIALISED
return ret
# Check status of snapshot scheduler.
if args.action == "status":
ret = scheduler_status()
- if ret:
+ if ret == 0:
if scheduler_enabled:
output("Snapshot scheduling status: Enabled")
else:
@@ -512,15 +764,25 @@ def perform_operation(args):
# Enable snapshot scheduler
if args.action == "enable":
ret = enable_scheduler()
- if ret:
+ if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLED,
+ status="Successfully Enabled")
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED,
+ error=print_error(ret))
return ret
# Disable snapshot scheduler
if args.action == "disable":
ret = disable_scheduler()
- if ret:
+ if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED,
+ status="Successfully Disabled")
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED,
+ error=print_error(ret))
return ret
# List snapshot schedules
@@ -531,37 +793,61 @@ def perform_operation(args):
# Add snapshot schedules
if args.action == "add":
ret = syntax_checker(args)
- if not ret:
+ if ret != 0:
return ret
ret = add_schedules(args.jobname, args.schedule, args.volname)
- if ret:
+ if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED,
+ status="Successfully added job "+args.jobname)
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED,
+ status="Failed to add job "+args.jobname,
+ error=print_error(ret))
return ret
# Delete snapshot schedules
if args.action == "delete":
ret = syntax_checker(args)
- if not ret:
+ if ret != 0:
return ret
ret = delete_schedules(args.jobname)
- if ret:
+ if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED,
+ status="Successfully deleted job "+args.jobname)
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED,
+ status="Failed to delete job "+args.jobname,
+ error=print_error(ret))
return ret
# Edit snapshot schedules
if args.action == "edit":
ret = syntax_checker(args)
- if not ret:
+ if ret != 0:
return ret
ret = edit_schedules(args.jobname, args.schedule, args.volname)
- if ret:
+ if ret == 0:
subprocess.Popen(["touch", "-h", GCRON_TASKS])
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED,
+ status="Successfully edited job "+args.jobname)
+ else:
+ gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED,
+ status="Failed to edit job "+args.jobname,
+ error=print_error(ret))
return ret
+ ret = INVALID_ARG
return ret
+def gf_event(event_type, **kwargs):
+ if EVENTS_ENABLED:
+ from events.gf_event import gf_event as gfevent
+ gfevent(event_type, **kwargs)
+
-def main():
+def main(argv):
initLogger()
ret = -1
parser = argparse.ArgumentParser()
@@ -597,62 +883,59 @@ def main():
parser_edit.add_argument("schedule", help="Schedule")
parser_edit.add_argument("volname", help="Volume Name")
- args = parser.parse_args()
+ args = parser.parse_args(argv)
if not os.path.exists(SHARED_STORAGE_DIR):
output("Failed: "+SHARED_STORAGE_DIR+" does not exist.")
- return ret
+ return SHARED_STORAGE_DIR_DOESNT_EXIST
if not os.path.ismount(SHARED_STORAGE_DIR):
output("Failed: Shared storage is not mounted at "+SHARED_STORAGE_DIR)
- return ret
+ return SHARED_STORAGE_NOT_MOUNTED
if not os.path.exists(SHARED_STORAGE_DIR+"/snaps/"):
try:
os.makedirs(SHARED_STORAGE_DIR+"/snaps/")
- except IOError as (errno, strerror):
+ except OSError as e:
if errno != EEXIST:
- log.error("Failed to create %s : %s", SHARED_STORAGE_DIR+"/snaps/", strerror)
+ log.error("Failed to create %s : %s", SHARED_STORAGE_DIR+"/snaps/", e)
output("Failed to create %s. Error: %s"
- % (SHARED_STORAGE_DIR+"/snaps/", strerror))
+ % (SHARED_STORAGE_DIR+"/snaps/", e))
+ return INTERNAL_ERROR
if not os.path.exists(GCRON_ENABLED):
- f = os.open(GCRON_ENABLED, os.O_CREAT | os.O_NONBLOCK, 0644)
+ f = os.open(GCRON_ENABLED, os.O_CREAT | os.O_NONBLOCK, 0o644)
os.close(f)
if not os.path.exists(LOCK_FILE_DIR):
try:
os.makedirs(LOCK_FILE_DIR)
- except IOError as (errno, strerror):
+ except OSError as e:
if errno != EEXIST:
- log.error("Failed to create %s : %s", LOCK_FILE_DIR, strerror)
+ log.error("Failed to create %s : %s", LOCK_FILE_DIR, e)
output("Failed to create %s. Error: %s"
- % (LOCK_FILE_DIR, strerror))
-
- if not os.path.exists(CURRENT_SCHEDULER):
- update_current_scheduler("none")
+ % (LOCK_FILE_DIR, e))
+ return INTERNAL_ERROR
try:
- f = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR | os.O_NONBLOCK, 0644)
+ f = os.open(LOCK_FILE, os.O_CREAT | os.O_RDWR | os.O_NONBLOCK, 0o644)
try:
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
ret = perform_operation(args)
- if not ret:
- ret = 1
- else:
- ret = 0
fcntl.flock(f, fcntl.LOCK_UN)
- except IOError as (errno, strerror):
+ except IOError:
log.info("%s is being processed by another agent.", LOCK_FILE)
output("Another snap_scheduler command is running. "
"Please try again after some time.")
+ return ANOTHER_TRANSACTION_IN_PROGRESS
os.close(f)
- except IOError as (errno, strerror):
- log.error("Failed to open %s : %s", LOCK_FILE, strerror)
- output("Failed to open %s. Error: %s" % (LOCK_FILE, strerror))
+ except OSError as e:
+ log.error("Failed to open %s : %s", LOCK_FILE, e)
+ output("Failed to open %s. Error: %s" % (LOCK_FILE, e))
+ return INTERNAL_ERROR
return ret
if __name__ == "__main__":
- sys.exit(main())
+ sys.exit(main(sys.argv[1:]))