diff options
Diffstat (limited to 'xlators/features/locks/src/common.c')
| -rw-r--r-- | xlators/features/locks/src/common.c | 1835 |
1 files changed, 1433 insertions, 402 deletions
diff --git a/xlators/features/locks/src/common.c b/xlators/features/locks/src/common.c index 675fb0235be..a2c6be93e03 100644 --- a/xlators/features/locks/src/common.c +++ b/xlators/features/locks/src/common.c @@ -1,560 +1,1591 @@ /* - Copyright (c) 2006, 2007, 2008 Z RESEARCH, Inc. <http://www.zresearch.com> - This file is part of GlusterFS. - - GlusterFS is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published - by the Free Software Foundation; either version 3 of the License, - or (at your option) any later version. - - GlusterFS is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see - <http://www.gnu.org/licenses/>. -*/ + Copyright (c) 2006-2012, 2015-2016 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ #include <unistd.h> #include <fcntl.h> #include <limits.h> #include <pthread.h> -#ifndef _CONFIG_H -#define _CONFIG_H -#include "config.h" -#endif - -#include "glusterfs.h" -#include "compat.h" -#include "xlator.h" -#include "inode.h" -#include "logging.h" -#include "common-utils.h" +#include <glusterfs/glusterfs.h> +#include <glusterfs/compat.h> +#include <glusterfs/logging.h> +#include <glusterfs/syncop.h> #include "locks.h" - +#include "common.h" static int -__is_lock_grantable (pl_inode_t *pl_inode, posix_lock_t *lock, - gf_lk_domain_t dom); +__is_lock_grantable(pl_inode_t *pl_inode, posix_lock_t *lock); static void -__insert_and_merge (pl_inode_t *pl_inode, posix_lock_t *lock, - gf_lk_domain_t dom); +__insert_and_merge(pl_inode_t *pl_inode, posix_lock_t *lock); +static int +pl_send_prelock_unlock(xlator_t *this, pl_inode_t *pl_inode, + posix_lock_t *old_lock); + +static pl_dom_list_t * +__allocate_domain(const char *volume) +{ + pl_dom_list_t *dom = NULL; -#define DOMAIN_HEAD(pl_inode, dom) (dom == GF_LOCK_POSIX \ - ? &pl_inode->ext_list \ - : &pl_inode->int_list) + dom = GF_CALLOC(1, sizeof(*dom), gf_locks_mt_pl_dom_list_t); + if (!dom) + goto out; -pl_inode_t * -pl_inode_get (xlator_t *this, inode_t *inode) + dom->domain = gf_strdup(volume); + if (!dom->domain) + goto out; + + gf_log("posix-locks", GF_LOG_TRACE, "New domain allocated: %s", + dom->domain); + + INIT_LIST_HEAD(&dom->inode_list); + INIT_LIST_HEAD(&dom->entrylk_list); + INIT_LIST_HEAD(&dom->blocked_entrylks); + INIT_LIST_HEAD(&dom->inodelk_list); + INIT_LIST_HEAD(&dom->blocked_inodelks); + +out: + if (dom && (NULL == dom->domain)) { + GF_FREE(dom); + dom = NULL; + } + + return dom; +} + +/* Returns domain for the lock. If domain is not present, + * allocates a domain and returns it + */ +pl_dom_list_t * +get_domain(pl_inode_t *pl_inode, const char *volume) { - uint64_t tmp_pl_inode = 0; - pl_inode_t *pl_inode = NULL; - mode_t st_mode = 0; - int ret = 0; + pl_dom_list_t *dom = NULL; - ret = inode_ctx_get (inode, this,&tmp_pl_inode); - if (ret == 0) { - pl_inode = (pl_inode_t *)(long)tmp_pl_inode; - goto out; + GF_VALIDATE_OR_GOTO("posix-locks", pl_inode, out); + GF_VALIDATE_OR_GOTO("posix-locks", volume, out); + + pthread_mutex_lock(&pl_inode->mutex); + { + list_for_each_entry(dom, &pl_inode->dom_list, inode_list) + { + if (strcmp(dom->domain, volume) == 0) + goto unlock; } - pl_inode = CALLOC (1, sizeof (*pl_inode)); - if (!pl_inode) { - gf_log (this->name, GF_LOG_ERROR, - "out of memory :("); - goto out; - } - st_mode = inode->st_mode; - if ((st_mode & S_ISGID) && !(st_mode & S_IXGRP)) - pl_inode->mandatory = 1; + dom = __allocate_domain(volume); + if (dom) + list_add(&dom->inode_list, &pl_inode->dom_list); + } +unlock: + pthread_mutex_unlock(&pl_inode->mutex); + if (dom) { + gf_log("posix-locks", GF_LOG_TRACE, "Domain %s found", volume); + } else { + gf_log("posix-locks", GF_LOG_TRACE, "Domain %s not found", volume); + } +out: + return dom; +} +unsigned long +fd_to_fdnum(fd_t *fd) +{ + return ((unsigned long)fd); +} - pthread_mutex_init (&pl_inode->mutex, NULL); +fd_t * +fd_from_fdnum(posix_lock_t *lock) +{ + return ((fd_t *)lock->fd_num); +} - INIT_LIST_HEAD (&pl_inode->dir_list); - INIT_LIST_HEAD (&pl_inode->ext_list); - INIT_LIST_HEAD (&pl_inode->int_list); - INIT_LIST_HEAD (&pl_inode->rw_list); +int +__pl_inode_is_empty(pl_inode_t *pl_inode) +{ + return (list_empty(&pl_inode->ext_list)); +} - ret = inode_ctx_put (inode, this, (uint64_t)(long)(pl_inode)); +void +pl_print_locker(char *str, int size, xlator_t *this, call_frame_t *frame) +{ + snprintf(str, size, "Pid=%llu, lk-owner=%s, Client=%p, Frame=%llu", + (unsigned long long)frame->root->pid, + lkowner_utoa(&frame->root->lk_owner), frame->root->client, + (unsigned long long)frame->root->unique); +} -out: - return pl_inode; +void +pl_print_lockee(char *str, int size, fd_t *fd, loc_t *loc) +{ + inode_t *inode = NULL; + char *ipath = NULL; + int ret = 0; + + if (fd) + inode = fd->inode; + if (loc) + inode = loc->inode; + + if (!inode) { + snprintf(str, size, "<nul>"); + return; + } + + if (loc && loc->path) { + ipath = gf_strdup(loc->path); + } else { + ret = inode_path(inode, NULL, &ipath); + if (ret <= 0) + ipath = NULL; + } + + snprintf(str, size, "gfid=%s, fd=%p, path=%s", uuid_utoa(inode->gfid), fd, + ipath ? ipath : "<nul>"); + + GF_FREE(ipath); } +void +pl_print_lock(char *str, int size, int cmd, struct gf_flock *flock, + gf_lkowner_t *owner) +{ + char *cmd_str = NULL; + char *type_str = NULL; -/* Create a new posix_lock_t */ -posix_lock_t * -new_posix_lock (struct flock *flock, transport_t *transport, pid_t client_pid) + switch (cmd) { +#if F_GETLK != F_GETLK64 + case F_GETLK64: +#endif + case F_GETLK: + cmd_str = "GETLK"; + break; + +#if F_SETLK != F_SETLK64 + case F_SETLK64: +#endif + case F_SETLK: + cmd_str = "SETLK"; + break; + +#if F_SETLKW != F_SETLKW64 + case F_SETLKW64: +#endif + case F_SETLKW: + cmd_str = "SETLKW"; + break; + + default: + cmd_str = "UNKNOWN"; + break; + } + + switch (flock->l_type) { + case F_RDLCK: + type_str = "READ"; + break; + case F_WRLCK: + type_str = "WRITE"; + break; + case F_UNLCK: + type_str = "UNLOCK"; + break; + default: + type_str = "UNKNOWN"; + break; + } + + snprintf(str, size, + "lock=FCNTL, cmd=%s, type=%s, " + "start=%llu, len=%llu, pid=%llu, lk-owner=%s", + cmd_str, type_str, (unsigned long long)flock->l_start, + (unsigned long long)flock->l_len, (unsigned long long)flock->l_pid, + lkowner_utoa(owner)); +} + +void +pl_trace_in(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, int cmd, + struct gf_flock *flock, const char *domain) { - posix_lock_t *lock = NULL; + posix_locks_private_t *priv = this->private; + char pl_locker[256]; + char pl_lockee[256]; + char pl_lock[256]; + + if (!priv->trace) + return; + + pl_print_locker(pl_locker, 256, this, frame); + pl_print_lockee(pl_lockee, 256, fd, loc); + if (domain) + pl_print_inodelk(pl_lock, 256, cmd, flock, domain); + else + pl_print_lock(pl_lock, 256, cmd, flock, &frame->root->lk_owner); + + gf_log(this->name, GF_LOG_INFO, + "[REQUEST] Locker = {%s} Lockee = {%s} Lock = {%s}", pl_locker, + pl_lockee, pl_lock); +} - lock = CALLOC (1, sizeof (posix_lock_t)); - if (!lock) { - return NULL; - } +void +pl_print_verdict(char *str, int size, int op_ret, int op_errno) +{ + char *verdict = NULL; + + if (op_ret == 0) { + verdict = "GRANTED"; + } else { + switch (op_errno) { + case EAGAIN: + verdict = "TRYAGAIN"; + break; + default: + verdict = strerror(op_errno); + } + } - lock->fl_start = flock->l_start; - lock->fl_type = flock->l_type; + snprintf(str, size, "%s", verdict); +} + +void +pl_trace_out(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, int cmd, + struct gf_flock *flock, int op_ret, int op_errno, + const char *domain) + +{ + posix_locks_private_t *priv = NULL; + char pl_locker[256]; + char pl_lockee[256]; + char pl_lock[256]; + char verdict[32]; - if (flock->l_len == 0) - lock->fl_end = LLONG_MAX; - else - lock->fl_end = flock->l_start + flock->l_len - 1; + priv = this->private; - lock->transport = transport; - lock->client_pid = client_pid; + if (!priv->trace) + return; - INIT_LIST_HEAD (&lock->list); + pl_print_locker(pl_locker, 256, this, frame); + pl_print_lockee(pl_lockee, 256, fd, loc); + if (domain) + pl_print_inodelk(pl_lock, 256, cmd, flock, domain); + else + pl_print_lock(pl_lock, 256, cmd, flock, &frame->root->lk_owner); - return lock; + pl_print_verdict(verdict, 32, op_ret, op_errno); + + gf_log(this->name, GF_LOG_INFO, + "[%s] Locker = {%s} Lockee = {%s} Lock = {%s}", verdict, pl_locker, + pl_lockee, pl_lock); } +void +pl_trace_block(xlator_t *this, call_frame_t *frame, fd_t *fd, loc_t *loc, + int cmd, struct gf_flock *flock, const char *domain) + +{ + posix_locks_private_t *priv = this->private; + char pl_locker[256]; + char pl_lockee[256]; + char pl_lock[256]; + + if (!priv->trace) + return; + + pl_print_locker(pl_locker, 256, this, frame); + pl_print_lockee(pl_lockee, 256, fd, loc); + if (domain) + pl_print_inodelk(pl_lock, 256, cmd, flock, domain); + else + pl_print_lock(pl_lock, 256, cmd, flock, &frame->root->lk_owner); + + gf_log(this->name, GF_LOG_INFO, + "[BLOCKED] Locker = {%s} Lockee = {%s} Lock = {%s}", pl_locker, + pl_lockee, pl_lock); +} -/* Delete a lock from the inode's lock list */ void -__delete_lock (pl_inode_t *pl_inode, posix_lock_t *lock) +pl_trace_flush(xlator_t *this, call_frame_t *frame, fd_t *fd) { - list_del_init (&lock->list); + posix_locks_private_t *priv = NULL; + char pl_locker[256]; + char pl_lockee[256]; + pl_inode_t *pl_inode = NULL; + + priv = this->private; + + if (!priv->trace) + return; + + pl_inode = pl_inode_get(this, fd->inode, NULL); + + if (pl_inode && __pl_inode_is_empty(pl_inode)) + return; + + pl_print_locker(pl_locker, 256, this, frame); + pl_print_lockee(pl_lockee, 256, fd, NULL); + + gf_log(this->name, GF_LOG_INFO, "[FLUSH] Locker = {%s} Lockee = {%s}", + pl_locker, pl_lockee); } +void +pl_trace_release(xlator_t *this, fd_t *fd) +{ + posix_locks_private_t *priv = NULL; + char pl_lockee[256]; + + priv = this->private; + + if (!priv->trace) + return; + + pl_print_lockee(pl_lockee, 256, fd, NULL); + + gf_log(this->name, GF_LOG_INFO, "[RELEASE] Lockee = {%s}", pl_lockee); +} -/* Destroy a posix_lock */ void -__destroy_lock (posix_lock_t *lock) +pl_update_refkeeper(xlator_t *this, inode_t *inode) +{ + pl_inode_t *pl_inode = NULL; + int is_empty = 0; + int need_unref = 0; + int need_ref = 0; + + pl_inode = pl_inode_get(this, inode, NULL); + if (!pl_inode) + return; + + pthread_mutex_lock(&pl_inode->mutex); + { + is_empty = __pl_inode_is_empty(pl_inode); + + if (is_empty && pl_inode->refkeeper) { + need_unref = 1; + pl_inode->refkeeper = NULL; + } + + if (!is_empty && !pl_inode->refkeeper) { + need_ref = 1; + pl_inode->refkeeper = inode; + } + } + pthread_mutex_unlock(&pl_inode->mutex); + + if (need_unref) + inode_unref(inode); + + if (need_ref) + inode_ref(inode); +} + +/* Get lock enforcement info from disk */ +int +pl_fetch_mlock_info_from_disk(xlator_t *this, pl_inode_t *pl_inode, + pl_local_t *local) +{ + dict_t *xdata_rsp = NULL; + int ret = 0; + int op_ret = 0; + + if (!local) { + return -1; + } + + if (local->fd) { + op_ret = syncop_fgetxattr(this, local->fd, &xdata_rsp, + GF_ENFORCE_MANDATORY_LOCK, NULL, NULL); + } else { + op_ret = syncop_getxattr(this, &local->loc[0], &xdata_rsp, + GF_ENFORCE_MANDATORY_LOCK, NULL, NULL); + } + + pthread_mutex_lock(&pl_inode->mutex); + { + if (op_ret >= 0) { + pl_inode->mlock_enforced = _gf_true; + pl_inode->check_mlock_info = _gf_false; + } else { + gf_msg(this->name, GF_LOG_WARNING, -op_ret, 0, + "getxattr failed with %d", op_ret); + pl_inode->mlock_enforced = _gf_false; + + if (-op_ret == ENODATA) { + pl_inode->check_mlock_info = _gf_false; + } else { + pl_inode->check_mlock_info = _gf_true; + } + } + } + pthread_mutex_unlock(&pl_inode->mutex); + + return ret; +} + +pl_inode_t * +pl_inode_get(xlator_t *this, inode_t *inode, pl_local_t *local) { - free (lock); + uint64_t tmp_pl_inode = 0; + pl_inode_t *pl_inode = NULL; + int ret = 0; + + LOCK(&inode->lock); + { + ret = __inode_ctx_get(inode, this, &tmp_pl_inode); + if (ret == 0) { + pl_inode = (pl_inode_t *)(long)tmp_pl_inode; + goto unlock; + } + + pl_inode = GF_CALLOC(1, sizeof(*pl_inode), gf_locks_mt_pl_inode_t); + if (!pl_inode) { + goto unlock; + } + + gf_log(this->name, GF_LOG_TRACE, "Allocating new pl inode"); + + pthread_mutex_init(&pl_inode->mutex, NULL); + pthread_cond_init(&pl_inode->check_fop_wind_count, 0); + + INIT_LIST_HEAD(&pl_inode->dom_list); + INIT_LIST_HEAD(&pl_inode->ext_list); + INIT_LIST_HEAD(&pl_inode->rw_list); + INIT_LIST_HEAD(&pl_inode->reservelk_list); + INIT_LIST_HEAD(&pl_inode->blocked_reservelks); + INIT_LIST_HEAD(&pl_inode->blocked_calls); + INIT_LIST_HEAD(&pl_inode->metalk_list); + INIT_LIST_HEAD(&pl_inode->queued_locks); + INIT_LIST_HEAD(&pl_inode->waiting); + gf_uuid_copy(pl_inode->gfid, inode->gfid); + + pl_inode->check_mlock_info = _gf_true; + pl_inode->mlock_enforced = _gf_false; + + /* -2 means never looked up. -1 means something went wrong and link + * tracking is disabled. */ + pl_inode->links = -2; + + ret = __inode_ctx_put(inode, this, (uint64_t)(long)(pl_inode)); + if (ret) { + pthread_mutex_destroy(&pl_inode->mutex); + GF_FREE(pl_inode); + pl_inode = NULL; + goto unlock; + } + } +unlock: + UNLOCK(&inode->lock); + + if ((pl_inode != NULL) && pl_is_mandatory_locking_enabled(pl_inode) && + pl_inode->check_mlock_info && local) { + /* Note: The lock enforcement information per file can be stored in the + attribute flag of stat(x) in posix. With that there won't be a need + for doing getxattr post a reboot + */ + pl_fetch_mlock_info_from_disk(this, pl_inode, local); + } + + return pl_inode; +} + +/* Create a new posix_lock_t */ +posix_lock_t * +new_posix_lock(struct gf_flock *flock, client_t *client, pid_t client_pid, + gf_lkowner_t *owner, fd_t *fd, uint32_t lk_flags, int blocking, + int32_t *op_errno) +{ + posix_lock_t *lock = NULL; + + GF_VALIDATE_OR_GOTO("posix-locks", flock, out); + GF_VALIDATE_OR_GOTO("posix-locks", client, out); + GF_VALIDATE_OR_GOTO("posix-locks", fd, out); + + if (!pl_is_lk_owner_valid(owner, client)) { + *op_errno = EINVAL; + goto out; + } + + lock = GF_CALLOC(1, sizeof(posix_lock_t), gf_locks_mt_posix_lock_t); + if (!lock) { + *op_errno = ENOMEM; + goto out; + } + + lock->fl_start = flock->l_start; + lock->fl_type = flock->l_type; + + if (flock->l_len == 0) + lock->fl_end = LLONG_MAX; + else + lock->fl_end = flock->l_start + flock->l_len - 1; + + lock->client = client; + + lock->client_uid = gf_strdup(client->client_uid); + if (lock->client_uid == NULL) { + GF_FREE(lock); + lock = NULL; + *op_errno = ENOMEM; + goto out; + } + + lock->fd_num = fd_to_fdnum(fd); + lock->fd = fd; + lock->client_pid = client_pid; + lock->owner = *owner; + lock->lk_flags = lk_flags; + + lock->blocking = blocking; + memcpy(&lock->user_flock, flock, sizeof(lock->user_flock)); + + INIT_LIST_HEAD(&lock->list); + +out: + return lock; } +/* Delete a lock from the inode's lock list */ +void +__delete_lock(posix_lock_t *lock) +{ + list_del_init(&lock->list); +} -/* Convert a posix_lock to a struct flock */ +/* Destroy a posix_lock */ void -posix_lock_to_flock (posix_lock_t *lock, struct flock *flock) +__destroy_lock(posix_lock_t *lock) { - flock->l_pid = lock->client_pid; - flock->l_type = lock->fl_type; - flock->l_start = lock->fl_start; + GF_FREE(lock->client_uid); + GF_FREE(lock); +} - if (lock->fl_end == 0) - flock->l_len = LLONG_MAX; - else - flock->l_len = lock->fl_end - lock->fl_start + 1; +static posix_lock_t * +__copy_lock(posix_lock_t *src) +{ + posix_lock_t *dst; + + dst = GF_MALLOC(sizeof(posix_lock_t), gf_locks_mt_posix_lock_t); + if (dst != NULL) { + memcpy(dst, src, sizeof(posix_lock_t)); + dst->client_uid = gf_strdup(src->client_uid); + if (dst->client_uid == NULL) { + GF_FREE(dst); + dst = NULL; + } + + if (dst != NULL) + INIT_LIST_HEAD(&dst->list); + } + + return dst; } +/* Convert a posix_lock to a struct gf_flock */ +void +posix_lock_to_flock(posix_lock_t *lock, struct gf_flock *flock) +{ + flock->l_pid = lock->user_flock.l_pid; + flock->l_type = lock->fl_type; + flock->l_start = lock->fl_start; + flock->l_owner = lock->owner; + + if (lock->fl_end == LLONG_MAX) + flock->l_len = 0; + else + flock->l_len = lock->fl_end - lock->fl_start + 1; +} /* Insert the lock into the inode's lock list */ static void -__insert_lock (pl_inode_t *pl_inode, posix_lock_t *lock, gf_lk_domain_t dom) +__insert_lock(pl_inode_t *pl_inode, posix_lock_t *lock) { - list_add_tail (&lock->list, DOMAIN_HEAD (pl_inode, dom)); + if (lock->blocked) + lock->blkd_time = gf_time(); + else + lock->granted_time = gf_time(); - return; + list_add_tail(&lock->list, &pl_inode->ext_list); } - /* Return true if the locks overlap, false otherwise */ int -locks_overlap (posix_lock_t *l1, posix_lock_t *l2) +locks_overlap(posix_lock_t *l1, posix_lock_t *l2) { - /* - Note: - FUSE always gives us absolute offsets, so no need to worry - about SEEK_CUR or SEEK_END - */ + /* + Note: + FUSE always gives us absolute offsets, so no need to worry + about SEEK_CUR or SEEK_END + */ - return ((l1->fl_end >= l2->fl_start) && - (l2->fl_end >= l1->fl_start)); + return ((l1->fl_end >= l2->fl_start) && (l2->fl_end >= l1->fl_start)); } - /* Return true if the locks have the same owner */ int -same_owner (posix_lock_t *l1, posix_lock_t *l2) +same_owner(posix_lock_t *l1, posix_lock_t *l2) { - return ((l1->client_pid == l2->client_pid) && - (l1->transport == l2->transport)); + return (is_same_lkowner(&l1->owner, &l2->owner) && + (l1->client == l2->client)); } - /* Delete all F_UNLCK locks */ void -__delete_unlck_locks (pl_inode_t *pl_inode, gf_lk_domain_t dom) +__delete_unlck_locks(pl_inode_t *pl_inode) { - posix_lock_t *l = NULL; - posix_lock_t *tmp = NULL; - - list_for_each_entry_safe (l, tmp, DOMAIN_HEAD (pl_inode, dom), list) { - if (l->fl_type == F_UNLCK) { - __delete_lock (pl_inode, l); - __destroy_lock (l); - } - } + posix_lock_t *l = NULL; + posix_lock_t *tmp = NULL; + + list_for_each_entry_safe(l, tmp, &pl_inode->ext_list, list) + { + if (l->fl_type == F_UNLCK) { + __delete_lock(l); + __destroy_lock(l); + } + } } - /* Add two locks */ static posix_lock_t * -add_locks (posix_lock_t *l1, posix_lock_t *l2) +add_locks(posix_lock_t *l1, posix_lock_t *l2, posix_lock_t *dst) { - posix_lock_t *sum = NULL; + posix_lock_t *sum = NULL; - sum = CALLOC (1, sizeof (posix_lock_t)); - if (!sum) - return NULL; + sum = __copy_lock(dst); + if (!sum) + return NULL; - sum->fl_start = min (l1->fl_start, l2->fl_start); - sum->fl_end = max (l1->fl_end, l2->fl_end); + sum->fl_start = min(l1->fl_start, l2->fl_start); + sum->fl_end = max(l1->fl_end, l2->fl_end); - return sum; + posix_lock_to_flock(sum, &sum->user_flock); + + return sum; } /* Subtract two locks */ struct _values { - posix_lock_t *locks[3]; + posix_lock_t *locks[3]; }; /* {big} must always be contained inside {small} */ static struct _values -subtract_locks (posix_lock_t *big, posix_lock_t *small) -{ - struct _values v = { .locks = {0, 0, 0} }; - - if ((big->fl_start == small->fl_start) && - (big->fl_end == small->fl_end)) { - /* both edges coincide with big */ - v.locks[0] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[0]); - memcpy (v.locks[0], big, sizeof (posix_lock_t)); - v.locks[0]->fl_type = small->fl_type; - } - else if ((small->fl_start > big->fl_start) && - (small->fl_end < big->fl_end)) { - /* both edges lie inside big */ - v.locks[0] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[0]); - v.locks[1] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[1]); - v.locks[2] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[2]); - - memcpy (v.locks[0], big, sizeof (posix_lock_t)); - v.locks[0]->fl_end = small->fl_start - 1; - - memcpy (v.locks[1], small, sizeof (posix_lock_t)); - memcpy (v.locks[2], big, sizeof (posix_lock_t)); - v.locks[2]->fl_start = small->fl_end + 1; - } - /* one edge coincides with big */ - else if (small->fl_start == big->fl_start) { - v.locks[0] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[0]); - v.locks[1] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[1]); - - memcpy (v.locks[0], big, sizeof (posix_lock_t)); - v.locks[0]->fl_start = small->fl_end + 1; - - memcpy (v.locks[1], small, sizeof (posix_lock_t)); - } - else if (small->fl_end == big->fl_end) { - v.locks[0] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[0]); - v.locks[1] = CALLOC (1, sizeof (posix_lock_t)); - ERR_ABORT (v.locks[1]); - - memcpy (v.locks[0], big, sizeof (posix_lock_t)); - v.locks[0]->fl_end = small->fl_start - 1; - - memcpy (v.locks[1], small, sizeof (posix_lock_t)); - } - else { - gf_log ("posix-locks", GF_LOG_DEBUG, - "unexpected case in subtract_locks"); - } - - return v; -} - -/* - Start searching from {begin}, and return the first lock that - conflicts, NULL if no conflict - If {begin} is NULL, then start from the beginning of the list -*/ -static posix_lock_t * -first_overlap (pl_inode_t *pl_inode, posix_lock_t *lock, - gf_lk_domain_t dom) +subtract_locks(posix_lock_t *big, posix_lock_t *small) { - posix_lock_t *l = NULL; + struct _values v = {.locks = {0, 0, 0}}; + + if ((big->fl_start == small->fl_start) && (big->fl_end == small->fl_end)) { + /* both edges coincide with big */ + v.locks[0] = __copy_lock(big); + if (!v.locks[0]) { + goto out; + } + + v.locks[0]->fl_type = small->fl_type; + v.locks[0]->user_flock.l_type = small->fl_type; + goto done; + } + + if ((small->fl_start > big->fl_start) && (small->fl_end < big->fl_end)) { + /* both edges lie inside big */ + v.locks[0] = __copy_lock(big); + v.locks[1] = __copy_lock(small); + v.locks[2] = __copy_lock(big); + if ((v.locks[0] == NULL) || (v.locks[1] == NULL) || + (v.locks[2] == NULL)) { + goto out; + } + + v.locks[0]->fl_end = small->fl_start - 1; + v.locks[2]->fl_start = small->fl_end + 1; + posix_lock_to_flock(v.locks[0], &v.locks[0]->user_flock); + posix_lock_to_flock(v.locks[2], &v.locks[2]->user_flock); + goto done; + } + + /* one edge coincides with big */ + if (small->fl_start == big->fl_start) { + v.locks[0] = __copy_lock(big); + v.locks[1] = __copy_lock(small); + if ((v.locks[0] == NULL) || (v.locks[1] == NULL)) { + goto out; + } + + v.locks[0]->fl_start = small->fl_end + 1; + posix_lock_to_flock(v.locks[0], &v.locks[0]->user_flock); + goto done; + } - list_for_each_entry (l, DOMAIN_HEAD (pl_inode, dom), list) { - if (l->blocked) - continue; + if (small->fl_end == big->fl_end) { + v.locks[0] = __copy_lock(big); + v.locks[1] = __copy_lock(small); + if ((v.locks[0] == NULL) || (v.locks[1] == NULL)) { + goto out; + } + + v.locks[0]->fl_end = small->fl_start - 1; + posix_lock_to_flock(v.locks[0], &v.locks[0]->user_flock); + goto done; + } + + GF_ASSERT(0); + gf_log("posix-locks", GF_LOG_ERROR, "Unexpected case in subtract_locks"); - if (locks_overlap (l, lock)) - return l; - } +out: + if (v.locks[0]) { + __destroy_lock(v.locks[0]); + v.locks[0] = NULL; + } + if (v.locks[1]) { + __destroy_lock(v.locks[1]); + v.locks[1] = NULL; + } + if (v.locks[2]) { + __destroy_lock(v.locks[2]); + v.locks[2] = NULL; + } + +done: + return v; +} + +static posix_lock_t * +first_conflicting_overlap(pl_inode_t *pl_inode, posix_lock_t *lock) +{ + posix_lock_t *l = NULL; + posix_lock_t *conf = NULL; + + pthread_mutex_lock(&pl_inode->mutex); + { + list_for_each_entry(l, &pl_inode->ext_list, list) + { + if (l->blocked) + continue; + + if (locks_overlap(l, lock)) { + if (same_owner(l, lock)) + continue; + + if ((l->fl_type == F_WRLCK) || (lock->fl_type == F_WRLCK)) { + conf = l; + goto unlock; + } + } + } + } +unlock: + pthread_mutex_unlock(&pl_inode->mutex); - return NULL; + return conf; } +/* + Start searching from {begin}, and return the first lock that + conflicts, NULL if no conflict + If {begin} is NULL, then start from the beginning of the list +*/ +static posix_lock_t * +first_overlap(pl_inode_t *pl_inode, posix_lock_t *lock) +{ + posix_lock_t *l = NULL; + + list_for_each_entry(l, &pl_inode->ext_list, list) + { + if (l->blocked) + continue; + if (locks_overlap(l, lock)) + return l; + } + + return NULL; +} /* Return true if lock is grantable */ static int -__is_lock_grantable (pl_inode_t *pl_inode, posix_lock_t *lock, - gf_lk_domain_t dom) +__is_lock_grantable(pl_inode_t *pl_inode, posix_lock_t *lock) { - posix_lock_t *l = NULL; - int ret = 1; + posix_lock_t *l = NULL; + int ret = 1; + + list_for_each_entry(l, &pl_inode->ext_list, list) + { + if (!l->blocked && locks_overlap(lock, l)) { + if (((l->fl_type == F_WRLCK) || (lock->fl_type == F_WRLCK)) && + (lock->fl_type != F_UNLCK) && !same_owner(l, lock)) { + ret = 0; + break; + } + } + } + return ret; +} + +extern void +do_blocked_rw(pl_inode_t *); + +static void +__insert_and_merge(pl_inode_t *pl_inode, posix_lock_t *lock) +{ + posix_lock_t *conf = NULL; + posix_lock_t *t = NULL; + posix_lock_t *sum = NULL; + int i = 0; + struct _values v = {.locks = {0, 0, 0}}; + + list_for_each_entry_safe(conf, t, &pl_inode->ext_list, list) + { + if (conf->blocked) + continue; + if (!locks_overlap(conf, lock)) + continue; + + if (same_owner(conf, lock)) { + if (conf->fl_type == lock->fl_type && + conf->lk_flags == lock->lk_flags) { + sum = add_locks(lock, conf, lock); + + __delete_lock(conf); + __destroy_lock(conf); + + __destroy_lock(lock); + INIT_LIST_HEAD(&sum->list); + posix_lock_to_flock(sum, &sum->user_flock); + __insert_and_merge(pl_inode, sum); + + return; + } else { + sum = add_locks(lock, conf, conf); + + v = subtract_locks(sum, lock); + + __delete_lock(conf); + __destroy_lock(conf); + + __delete_lock(lock); + __destroy_lock(lock); + + __destroy_lock(sum); + + for (i = 0; i < 3; i++) { + if (!v.locks[i]) + continue; + + __insert_and_merge(pl_inode, v.locks[i]); + } + + __delete_unlck_locks(pl_inode); + return; + } + } - list_for_each_entry (l, DOMAIN_HEAD (pl_inode, dom), list) { - if (!l->blocked && locks_overlap (lock, l)) { - if (((l->fl_type == F_WRLCK) - || (lock->fl_type == F_WRLCK)) - && (lock->fl_type != F_UNLCK) - && !same_owner (l, lock)) { - ret = 0; - break; - } - } - } - return ret; + if (lock->fl_type == F_UNLCK) { + continue; + } + + if ((conf->fl_type == F_RDLCK) && (lock->fl_type == F_RDLCK)) { + __insert_lock(pl_inode, lock); + return; + } + } + + /* no conflicts, so just insert */ + if (lock->fl_type != F_UNLCK) { + __insert_lock(pl_inode, lock); + } else { + __destroy_lock(lock); + } } +void +__grant_blocked_locks(xlator_t *this, pl_inode_t *pl_inode, + struct list_head *granted) +{ + struct list_head tmp_list; + posix_lock_t *l = NULL; + posix_lock_t *tmp = NULL; + posix_lock_t *conf = NULL; + + INIT_LIST_HEAD(&tmp_list); + + list_for_each_entry_safe(l, tmp, &pl_inode->ext_list, list) + { + if (l->blocked) { + conf = first_overlap(pl_inode, l); + if (conf) + continue; + + l->blocked = 0; + list_move_tail(&l->list, &tmp_list); + } + } -extern void do_blocked_rw (pl_inode_t *); + list_for_each_entry_safe(l, tmp, &tmp_list, list) + { + list_del_init(&l->list); + if (__is_lock_grantable(pl_inode, l)) { + conf = GF_CALLOC(1, sizeof(*conf), gf_locks_mt_posix_lock_t); -static void -__insert_and_merge (pl_inode_t *pl_inode, posix_lock_t *lock, - gf_lk_domain_t dom) + if (!conf) { + l->blocked = 1; + __insert_lock(pl_inode, l); + continue; + } + + conf->frame = l->frame; + l->frame = NULL; + + posix_lock_to_flock(l, &conf->user_flock); + + gf_log(this->name, GF_LOG_TRACE, + "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 + " => Granted", + l->fl_type == F_UNLCK ? "Unlock" : "Lock", l->client_pid, + lkowner_utoa(&l->owner), l->user_flock.l_start, + l->user_flock.l_len); + + __insert_and_merge(pl_inode, l); + + list_add(&conf->list, granted); + } else { + l->blocked = 1; + __insert_lock(pl_inode, l); + } + } +} + +void +grant_blocked_locks(xlator_t *this, pl_inode_t *pl_inode) { - posix_lock_t *conf = NULL; - posix_lock_t *t = NULL; - posix_lock_t *sum = NULL; - int i = 0; - struct _values v = { .locks = {0, 0, 0} }; + struct list_head granted_list; + posix_lock_t *tmp = NULL; + posix_lock_t *lock = NULL; + pl_local_t *local = NULL; + INIT_LIST_HEAD(&granted_list); + + pthread_mutex_lock(&pl_inode->mutex); + { + __grant_blocked_locks(this, pl_inode, &granted_list); + } + pthread_mutex_unlock(&pl_inode->mutex); + + list_for_each_entry_safe(lock, tmp, &granted_list, list) + { + list_del_init(&lock->list); + + pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock, + 0, 0, NULL); + local = lock->frame->local; + PL_STACK_UNWIND_AND_FREE(local, lk, lock->frame, 0, 0, + &lock->user_flock, NULL); + __destroy_lock(lock); + } + + return; +} - list_for_each_entry_safe (conf, t, DOMAIN_HEAD (pl_inode, dom), list) { - if (!locks_overlap (conf, lock)) - continue; +static int +pl_send_prelock_unlock(xlator_t *this, pl_inode_t *pl_inode, + posix_lock_t *old_lock) +{ + struct gf_flock flock = { + 0, + }; + posix_lock_t *unlock_lock = NULL; + int32_t op_errno = 0; - if (same_owner (conf, lock)) { - if (conf->fl_type == lock->fl_type) { - sum = add_locks (lock, conf); + struct list_head granted_list; + posix_lock_t *tmp = NULL; + posix_lock_t *lock = NULL; + pl_local_t *local = NULL; - sum->fl_type = lock->fl_type; - sum->transport = lock->transport; - sum->client_pid = lock->client_pid; + int ret = -1; - __delete_lock (pl_inode, conf); - __destroy_lock (conf); + INIT_LIST_HEAD(&granted_list); - __destroy_lock (lock); - __insert_and_merge (pl_inode, sum, dom); + flock.l_type = F_UNLCK; + flock.l_whence = old_lock->user_flock.l_whence; + flock.l_start = old_lock->user_flock.l_start; + flock.l_len = old_lock->user_flock.l_len; + flock.l_pid = old_lock->user_flock.l_pid; - return; - } else { - sum = add_locks (lock, conf); + unlock_lock = new_posix_lock(&flock, old_lock->client, old_lock->client_pid, + &old_lock->owner, old_lock->fd, + old_lock->lk_flags, 0, &op_errno); + GF_VALIDATE_OR_GOTO(this->name, unlock_lock, out); + ret = 0; - sum->fl_type = conf->fl_type; - sum->transport = conf->transport; - sum->client_pid = conf->client_pid; + __insert_and_merge(pl_inode, unlock_lock); - v = subtract_locks (sum, lock); - - __delete_lock (pl_inode, conf); - __destroy_lock (conf); + __grant_blocked_locks(this, pl_inode, &granted_list); - __delete_lock (pl_inode, lock); - __destroy_lock (lock); + list_for_each_entry_safe(lock, tmp, &granted_list, list) + { + list_del_init(&lock->list); - __destroy_lock (sum); + pl_trace_out(this, lock->frame, NULL, NULL, F_SETLKW, &lock->user_flock, + 0, 0, NULL); + local = lock->frame->local; + PL_STACK_UNWIND_AND_FREE(local, lk, lock->frame, 0, 0, + &lock->user_flock, NULL); + __destroy_lock(lock); + } - for (i = 0; i < 3; i++) { - if (!v.locks[i]) - continue; +out: + return ret; +} - if (v.locks[i]->fl_type == F_UNLCK) { - __destroy_lock (v.locks[i]); - continue; - } - __insert_and_merge (pl_inode, - v.locks[i], dom); - } +int +pl_setlk(xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock, + int can_block) +{ + int ret = 0; + + errno = 0; + + pthread_mutex_lock(&pl_inode->mutex); + { + /* Send unlock before the actual lock to + prevent lock upgrade / downgrade + problems only if: + - it is a blocking call + - it has other conflicting locks + */ + + if (can_block && !(__is_lock_grantable(pl_inode, lock))) { + ret = pl_send_prelock_unlock(this, pl_inode, lock); + if (ret) + gf_log(this->name, GF_LOG_DEBUG, + "Could not send pre-lock " + "unlock"); + } - __delete_unlck_locks (pl_inode, dom); - return; - } - } + if (__is_lock_grantable(pl_inode, lock)) { + if (pl_metalock_is_active(pl_inode)) { + __pl_queue_lock(pl_inode, lock); + pthread_mutex_unlock(&pl_inode->mutex); + ret = -2; + goto out; + } + gf_log(this->name, GF_LOG_TRACE, + "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 " => OK", + lock->fl_type == F_UNLCK ? "Unlock" : "Lock", + lock->client_pid, lkowner_utoa(&lock->owner), + lock->user_flock.l_start, lock->user_flock.l_len); + __insert_and_merge(pl_inode, lock); + } else if (can_block) { + if (pl_metalock_is_active(pl_inode)) { + __pl_queue_lock(pl_inode, lock); + pthread_mutex_unlock(&pl_inode->mutex); + ret = -2; + goto out; + } + gf_log(this->name, GF_LOG_TRACE, + "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 + " => Blocked", + lock->fl_type == F_UNLCK ? "Unlock" : "Lock", + lock->client_pid, lkowner_utoa(&lock->owner), + lock->user_flock.l_start, lock->user_flock.l_len); + + pl_trace_block(this, lock->frame, NULL, NULL, F_SETLKW, + &lock->user_flock, NULL); + + lock->blocked = 1; + __insert_lock(pl_inode, lock); + ret = -1; + } else { + gf_log(this->name, GF_LOG_TRACE, + "%s (pid=%d) lk-owner:%s %" PRId64 " - %" PRId64 " => NOK", + lock->fl_type == F_UNLCK ? "Unlock" : "Lock", + lock->client_pid, lkowner_utoa(&lock->owner), + lock->user_flock.l_start, lock->user_flock.l_len); + errno = EAGAIN; + ret = -1; + } + } + pthread_mutex_unlock(&pl_inode->mutex); - if (lock->fl_type == F_UNLCK) { - continue; - } + grant_blocked_locks(this, pl_inode); - if ((conf->fl_type == F_RDLCK) && (lock->fl_type == F_RDLCK)) { - __insert_lock (pl_inode, lock, dom); - return; - } - } + do_blocked_rw(pl_inode); - /* no conflicts, so just insert */ - if (lock->fl_type != F_UNLCK) { - __insert_lock (pl_inode, lock, dom); - } else { - __destroy_lock (lock); - } +out: + return ret; } +posix_lock_t * +pl_getlk(pl_inode_t *pl_inode, posix_lock_t *lock) +{ + posix_lock_t *conf = first_conflicting_overlap(pl_inode, lock); + if (conf == NULL) { + lock->fl_type = F_UNLCK; + return lock; + } -void -__grant_blocked_locks (xlator_t *this, pl_inode_t *pl_inode, - gf_lk_domain_t dom, struct list_head *granted) + return conf; +} + +gf_boolean_t +pl_does_monkey_want_stuck_lock() +{ + long int monkey_unlock_rand = 0; + long int monkey_unlock_rand_rem = 0; + + /* coverity[DC.WEAK_CRYPTO] */ + monkey_unlock_rand = random(); + monkey_unlock_rand_rem = monkey_unlock_rand % 100; + if (monkey_unlock_rand_rem == 0) + return _gf_true; + return _gf_false; +} + +int +pl_lock_preempt(pl_inode_t *pl_inode, posix_lock_t *reqlock) { - struct list_head tmp_list; - posix_lock_t *l = NULL; - posix_lock_t *tmp = NULL; - posix_lock_t *conf = NULL; + posix_lock_t *lock = NULL; + posix_lock_t *i = NULL; + pl_rw_req_t *rw = NULL; + pl_rw_req_t *itr = NULL; + struct list_head unwind_blist = { + 0, + }; + struct list_head unwind_rw_list = { + 0, + }; + int ret = 0; + + INIT_LIST_HEAD(&unwind_blist); + INIT_LIST_HEAD(&unwind_rw_list); + + pthread_mutex_lock(&pl_inode->mutex); + { + /* + - go through the lock list + - remove all locks from different owners + - same owner locks will be added or substracted based on + the new request + - add the new lock + */ + list_for_each_entry_safe(lock, i, &pl_inode->ext_list, list) + { + if (lock->blocked) { + list_del_init(&lock->list); + list_add(&lock->list, &unwind_blist); + continue; + } + + if (locks_overlap(lock, reqlock)) { + if (same_owner(lock, reqlock)) + continue; + + /* remove conflicting locks */ + list_del_init(&lock->list); + __delete_lock(lock); + __destroy_lock(lock); + } + } - INIT_LIST_HEAD (&tmp_list); + __insert_and_merge(pl_inode, reqlock); - list_for_each_entry_safe (l, tmp, DOMAIN_HEAD (pl_inode, dom), list) { - if (l->blocked) { - conf = first_overlap (pl_inode, l, dom); - if (conf) - continue; + list_for_each_entry_safe(rw, itr, &pl_inode->rw_list, list) + { + list_del_init(&rw->list); + list_add(&rw->list, &unwind_rw_list); + } + } + pthread_mutex_unlock(&pl_inode->mutex); + + /* unwind blocked locks */ + list_for_each_entry_safe(lock, i, &unwind_blist, list) + { + PL_STACK_UNWIND_AND_FREE(((pl_local_t *)lock->frame->local), lk, + lock->frame, -1, EBUSY, &lock->user_flock, + NULL); + __destroy_lock(lock); + } + + /* unwind blocked IOs */ + list_for_each_entry_safe(rw, itr, &unwind_rw_list, list) + { + pl_clean_local(rw->stub->frame->local); + call_unwind_error(rw->stub, -1, EBUSY); + } + + return ret; +} - l->blocked = 0; - list_move_tail (&l->list, &tmp_list); - } - } +/* Return true in case we need to ensure mandatory-locking + * semantics under different modes. + */ +gf_boolean_t +pl_is_mandatory_locking_enabled(pl_inode_t *pl_inode) +{ + posix_locks_private_t *priv = THIS->private; - list_for_each_entry_safe (l, tmp, &tmp_list, list) { - list_del_init (&l->list); + if (priv->mandatory_mode == MLK_FILE_BASED && pl_inode->mandatory) + return _gf_true; + else if (priv->mandatory_mode == MLK_FORCED || + priv->mandatory_mode == MLK_OPTIMAL) + return _gf_true; - if (__is_lock_grantable (pl_inode, l, dom)) { - conf = CALLOC (1, sizeof (*conf)); + return _gf_false; +} - if (!conf) { - l->blocked = 1; - __insert_lock (pl_inode, l, dom); - continue; - } +void +pl_clean_local(pl_local_t *local) +{ + if (!local) + return; + + if (local->inodelk_dom_count_req) + data_unref(local->inodelk_dom_count_req); + loc_wipe(&local->loc[0]); + loc_wipe(&local->loc[1]); + if (local->fd) + fd_unref(local->fd); + if (local->inode) + inode_unref(local->inode); + mem_put(local); +} - conf->frame = l->frame; - l->frame = NULL; +/* +TODO: detach local initialization from PL_LOCAL_GET_REQUESTS and add it here +*/ +int +pl_local_init(call_frame_t *frame, xlator_t *this, loc_t *loc, fd_t *fd) +{ + pl_local_t *local = NULL; + + if (!loc && !fd) { + return -1; + } + + if (!frame->local) { + local = mem_get0(this->local_pool); + if (!local) { + gf_msg(this->name, GF_LOG_ERROR, ENOMEM, 0, + "mem allocation failed"); + return -1; + } - posix_lock_to_flock (l, &conf->user_flock); + local->inode = (loc ? inode_ref(loc->inode) : inode_ref(fd->inode)); - gf_log (this->name, GF_LOG_DEBUG, - "%s (pid=%d) %"PRId64" - %"PRId64" => Granted", - l->fl_type == F_UNLCK ? "Unlock" : "Lock", - l->client_pid, - l->user_flock.l_start, - l->user_flock.l_len); + frame->local = local; + } - __insert_and_merge (pl_inode, l, dom); + return 0; +} - list_add (&conf->list, granted); - } else { - l->blocked = 1; - __insert_lock (pl_inode, l, dom); - } - } +gf_boolean_t +pl_is_lk_owner_valid(gf_lkowner_t *owner, client_t *client) +{ + if (client && (client->opversion < GD_OP_VERSION_7_0)) { + return _gf_true; + } + + if (is_lk_owner_null(owner)) { + return _gf_false; + } + return _gf_true; } +static int32_t +pl_inode_from_loc(loc_t *loc, inode_t **pinode) +{ + inode_t *inode = NULL; + int32_t error = 0; + + if (loc->inode != NULL) { + inode = inode_ref(loc->inode); + goto done; + } + + if (loc->parent == NULL) { + error = EINVAL; + goto done; + } + + if (!gf_uuid_is_null(loc->gfid)) { + inode = inode_find(loc->parent->table, loc->gfid); + if (inode != NULL) { + goto done; + } + } -void -grant_blocked_locks (xlator_t *this, pl_inode_t *pl_inode, gf_lk_domain_t dom) + if (loc->name == NULL) { + error = EINVAL; + goto done; + } + + inode = inode_grep(loc->parent->table, loc->parent, loc->name); + if (inode == NULL) { + /* We haven't found any inode. This means that the file doesn't exist + * or that even if it exists, we don't have any knowledge about it, so + * we don't have locks on it either, which is fine for our purposes. */ + goto done; + } + +done: + *pinode = inode; + + return error; +} + +static gf_boolean_t +pl_inode_has_owners(xlator_t *xl, client_t *client, pl_inode_t *pl_inode, + struct timespec *now, struct list_head *contend) { - struct list_head granted_list; - posix_lock_t *tmp = NULL; - posix_lock_t *lock = NULL; + pl_dom_list_t *dom; + pl_inode_lock_t *lock; + gf_boolean_t has_owners = _gf_false; + + list_for_each_entry(dom, &pl_inode->dom_list, inode_list) + { + list_for_each_entry(lock, &dom->inodelk_list, list) + { + /* If the lock belongs to the same client, we assume it's related + * to the same operation, so we allow the removal to continue. */ + if (lock->client == client) { + continue; + } + /* If the lock belongs to an internal process, we don't block the + * removal. */ + if (lock->client_pid < 0) { + continue; + } + if (contend == NULL) { + return _gf_true; + } + has_owners = _gf_true; + inodelk_contention_notify_check(xl, lock, now, contend); + } + } + + return has_owners; +} + +int32_t +pl_inode_remove_prepare(xlator_t *xl, call_frame_t *frame, loc_t *loc, + pl_inode_t **ppl_inode, struct list_head *contend) +{ + struct timespec now; + inode_t *inode; + pl_inode_t *pl_inode; + int32_t error; + + pl_inode = NULL; + + error = pl_inode_from_loc(loc, &inode); + if ((error != 0) || (inode == NULL)) { + goto done; + } + + pl_inode = pl_inode_get(xl, inode, NULL); + if (pl_inode == NULL) { + inode_unref(inode); + error = ENOMEM; + goto done; + } + + /* pl_inode_from_loc() already increments ref count for inode, so + * we only assign here our reference. */ + pl_inode->inode = inode; + + timespec_now(&now); + + pthread_mutex_lock(&pl_inode->mutex); + + if (pl_inode->removed) { + error = ESTALE; + goto unlock; + } - INIT_LIST_HEAD (&granted_list); + if (pl_inode_has_owners(xl, frame->root->client, pl_inode, &now, contend)) { + error = -1; + /* We skip the unlock here because the caller must create a stub when + * we return -1 and do a call to pl_inode_remove_complete(), which + * assumes the lock is still acquired and will release it once + * everything else is prepared. */ + goto done; + } - pthread_mutex_lock (&pl_inode->mutex); - { - __grant_blocked_locks (this, pl_inode, dom, &granted_list); - } - pthread_mutex_unlock (&pl_inode->mutex); + pl_inode->is_locked = _gf_true; + pl_inode->remove_running++; - list_for_each_entry_safe (lock, tmp, &granted_list, list) { - list_del_init (&lock->list); +unlock: + pthread_mutex_unlock(&pl_inode->mutex); - STACK_UNWIND (lock->frame, 0, 0, &lock->user_flock); +done: + *ppl_inode = pl_inode; - FREE (lock); - } + return error; +} + +int32_t +pl_inode_remove_complete(xlator_t *xl, pl_inode_t *pl_inode, call_stub_t *stub, + struct list_head *contend) +{ + pl_inode_lock_t *lock; + int32_t error = -1; + + if (stub != NULL) { + list_add_tail(&stub->list, &pl_inode->waiting); + pl_inode->is_locked = _gf_true; + } else { + error = ENOMEM; + + while (!list_empty(contend)) { + lock = list_first_entry(contend, pl_inode_lock_t, list); + list_del_init(&lock->list); + __pl_inodelk_unref(lock); + } + } + + pthread_mutex_unlock(&pl_inode->mutex); - return; + if (error < 0) { + inodelk_contention_notify(xl, contend); + } + + inode_unref(pl_inode->inode); + + return error; } +void +pl_inode_remove_wake(struct list_head *list) +{ + call_stub_t *stub; + + while (!list_empty(list)) { + stub = list_first_entry(list, call_stub_t, list); + list_del_init(&stub->list); -int -pl_setlk (xlator_t *this, pl_inode_t *pl_inode, posix_lock_t *lock, - int can_block, gf_lk_domain_t dom) -{ - int ret = 0; - - errno = 0; - - pthread_mutex_lock (&pl_inode->mutex); - { - if (__is_lock_grantable (pl_inode, lock, dom)) { - gf_log (this->name, GF_LOG_DEBUG, - "%s (pid=%d) %"PRId64" - %"PRId64" => OK", - lock->fl_type == F_UNLCK ? "Unlock" : "Lock", - lock->client_pid, - lock->user_flock.l_start, - lock->user_flock.l_len); - __insert_and_merge (pl_inode, lock, dom); - } else if (can_block) { - gf_log (this->name, GF_LOG_DEBUG, - "%s (pid=%d) %"PRId64" - %"PRId64" => Blocked", - lock->fl_type == F_UNLCK ? "Unlock" : "Lock", - lock->client_pid, - lock->user_flock.l_start, - lock->user_flock.l_len); - lock->blocked = 1; - __insert_lock (pl_inode, lock, dom); - ret = -1; - } else { - gf_log (this->name, GF_LOG_DEBUG, - "%s (pid=%d) %"PRId64" - %"PRId64" => NOK", - lock->fl_type == F_UNLCK ? "Unlock" : "Lock", - lock->client_pid, - lock->user_flock.l_start, - lock->user_flock.l_len); - errno = EAGAIN; - ret = -1; - } - } - pthread_mutex_unlock (&pl_inode->mutex); - - grant_blocked_locks (this, pl_inode, dom); - - do_blocked_rw (pl_inode); - - return ret; + call_resume(stub); + } } +void +pl_inode_remove_cbk(xlator_t *xl, pl_inode_t *pl_inode, int32_t error) +{ + struct list_head contend, granted; + struct timespec now; + pl_dom_list_t *dom; -posix_lock_t * -pl_getlk (pl_inode_t *pl_inode, posix_lock_t *lock, gf_lk_domain_t dom) + if (pl_inode == NULL) { + return; + } + + INIT_LIST_HEAD(&contend); + INIT_LIST_HEAD(&granted); + timespec_now(&now); + + pthread_mutex_lock(&pl_inode->mutex); + + if (error == 0) { + if (pl_inode->links >= 0) { + pl_inode->links--; + } + if (pl_inode->links == 0) { + pl_inode->removed = _gf_true; + } + } + + pl_inode->remove_running--; + + if ((pl_inode->remove_running == 0) && list_empty(&pl_inode->waiting)) { + pl_inode->is_locked = _gf_false; + + list_for_each_entry(dom, &pl_inode->dom_list, inode_list) + { + __grant_blocked_inode_locks(xl, pl_inode, &granted, dom, &now, + &contend); + } + } + + pthread_mutex_unlock(&pl_inode->mutex); + + unwind_granted_inodes(xl, pl_inode, &granted); + + inodelk_contention_notify(xl, &contend); + + inode_unref(pl_inode->inode); +} + +void +pl_inode_remove_unlocked(xlator_t *xl, pl_inode_t *pl_inode, + struct list_head *list) { - posix_lock_t *conf = NULL; + call_stub_t *stub, *tmp; + + if (!pl_inode->is_locked) { + return; + } - conf = first_overlap (pl_inode, lock, dom); + list_for_each_entry_safe(stub, tmp, &pl_inode->waiting, list) + { + if (!pl_inode_has_owners(xl, stub->frame->root->client, pl_inode, NULL, + NULL)) { + list_move_tail(&stub->list, list); + } + } +} - if (conf == NULL) { - lock->fl_type = F_UNLCK; - return lock; - } +/* This function determines if an inodelk attempt can be done now or it needs + * to wait. + * + * Possible return values: + * < 0: An error occurred. Currently only -ESTALE can be returned if the + * inode has been deleted previously by unlink/rmdir/rename + * = 0: The lock can be attempted. + * > 0: The lock needs to wait because a conflicting remove operation is + * ongoing. + */ +int32_t +pl_inode_remove_inodelk(pl_inode_t *pl_inode, pl_inode_lock_t *lock) +{ + pl_dom_list_t *dom; + pl_inode_lock_t *ilock; + + /* If the inode has been deleted, we won't allow any lock. */ + if (pl_inode->removed) { + return -ESTALE; + } + + /* We only synchronize with locks made for regular operations coming from + * the user. Locks done for internal purposes are hard to control and could + * lead to long delays or deadlocks quite easily. */ + if (lock->client_pid < 0) { + return 0; + } + if (!pl_inode->is_locked) { + return 0; + } + if (pl_inode->remove_running > 0) { + return 1; + } + + list_for_each_entry(dom, &pl_inode->dom_list, inode_list) + { + list_for_each_entry(ilock, &dom->inodelk_list, list) + { + /* If a lock from the same client is already granted, we allow this + * one to continue. This is necessary to prevent deadlocks when + * multiple locks are taken for the same operation. + * + * On the other side it's unlikely that the same client sends + * completely unrelated locks for the same inode. + */ + if (ilock->client == lock->client) { + return 0; + } + } + } - return conf; + return 1; } |
