From 3f8d118e48f11f448f35aca0c48ad40e0fd34f5b Mon Sep 17 00:00:00 2001 From: Xavier Hernandez Date: Tue, 7 Nov 2017 13:45:03 +0100 Subject: libglusterfs/atomic: Improved atomic support This patch solves a detection problem in configure.ac that prevented that compilation detects builtin __atomic or __sync functions. It also adds more atomic types and support for other atomic functions. An special case has been added to support 64-bit atomics on 32-bit systems. The solution is to fallback to the mutex solution only for 64-bit atomics, but smaller atomic types will still take advantage of builtins if available. Change-Id: I6b9afc7cd6e66b28a33278715583552872278801 BUG: 1510397 Signed-off-by: Xavier Hernandez --- configure.ac | 12 +- libglusterfs/src/atomic.h | 497 ++++++++++++++++++--- xlators/debug/io-stats/src/io-stats.c | 4 +- xlators/performance/md-cache/src/md-cache.c | 16 +- xlators/performance/nl-cache/src/nl-cache-helper.c | 4 +- xlators/performance/nl-cache/src/nl-cache.c | 20 +- 6 files changed, 456 insertions(+), 97 deletions(-) diff --git a/configure.ac b/configure.ac index cc1410f9e8d..45e892741f0 100644 --- a/configure.ac +++ b/configure.ac @@ -1013,18 +1013,24 @@ AC_SUBST(ARGP_STANDALONE_LDADD) AC_SUBST(ARGP_STANDALONE_DIR) dnl Check for atomic operation support -AC_CHECK_FUNC([__atomic_load], [have_atomic_builtins]) +AC_MSG_CHECKING([for gcc __atomic builtins]) +AC_TRY_LINK([], [int v; __atomic_load_n(&v, __ATOMIC_ACQUIRE);], + [have_atomic_builtins=yes], [have_atomic_builtins=no]) if test "x${have_atomic_builtins}" = "xyes"; then AC_DEFINE(HAVE_ATOMIC_BUILTINS, 1, [define if __atomic_*() builtins are available]) fi AC_SUBST(HAVE_ATOMIC_BUILTINS) +AC_MSG_RESULT([$have_atomic_builtins]) dnl __sync_*() will not be needed if __atomic_*() is available -AC_CHECK_FUNC([__sync_fetch_and_add], [have_sync_builtins]) -if test "x${have_sync_builtind}" = "xyes"; then +AC_MSG_CHECKING([for gcc __sync builtins]) +AC_TRY_LINK([], [__sync_synchronize();], + [have_sync_builtins=yes], [have_sync_builtins=no]) +if test "x${have_sync_builtins}" = "xyes"; then AC_DEFINE(HAVE_SYNC_BUILTINS, 1, [define if __sync_*() builtins are available]) fi AC_SUBST(HAVE_SYNC_BUILTINS) +AC_MSG_RESULT([$have_sync_builtins]) AC_CHECK_HEADER([malloc.h], AC_DEFINE(HAVE_MALLOC_H, 1, [have malloc.h])) diff --git a/libglusterfs/src/atomic.h b/libglusterfs/src/atomic.h index 71fcb1ee972..ca7f919f0d1 100644 --- a/libglusterfs/src/atomic.h +++ b/libglusterfs/src/atomic.h @@ -12,98 +12,451 @@ #define _ATOMIC_H #include +#include + +#include "locking.h" + +/* Macros used to join two arguments and generate a new macro name. */ +#define GF_ATOMIC_MACRO_1(_macro) _macro +#define GF_ATOMIC_MACRO(_base, _name) GF_ATOMIC_MACRO_1(_base##_name) + +/* There's a problem on 32-bit architectures when we try to use atomic + * builtins with 64-bit types. Only way to solve the problem is to use + * a mutex to protect the access to the atomic, but we don't want to + * use mutexes for other smaller types that could work with the atomic + * builtins. + * + * So on each atomic type we add a field for the mutex if atomic operation + * is not supported and a dummy zero size field if it's supported. This way + * we can have different atomic types, some with a mutex and some without. + * + * To define these types, we use two macros: + * + * GF_ATOMIC_MUTEX_FIELD_0 = char lk[0] + * GF_ATOMIC_MUTEX_FILED_1 = gf_lock_t lk + * + * Both macros define the 'lk' field that will be used in the atomic + * structure. One when the atomic is supported by the architecture and + * another when not. We need to define the field even if it won't be + * used. Otherwise the compiler will return an error. + * + * Now we need to take the mutex or not depending on the existence of + * the mutex field in the structure. To do so we check the size of the + * structure, and if it's bigger than uint64_t (all structures with a + * mutex will be bigger), we use the mutex-based version. Otherwise we + * use the atomic builtin. This check is easily optimized out by the + * compiler, leaving a clean and efficient compiled code. */ + +#define GF_ATOMIC_MUTEX_FIELD_0 char lk[0] +#define GF_ATOMIC_MUTEX_FIELD_1 gf_lock_t lk + +/* We'll use SIZEOF_LONG to determine the architecture. 32-bit machines + * will have 4 here, while 64-bit machines will have 8. If additional + * needs or restrictions appear on other platforms, these tests can be + * extended to handle them. */ + +/* GF_ATOMIC_SIZE_X macros map each type size to one of the + * GF_ATOMIC_MUTEX_FIELD_X macros, depending on detected conditions. */ #if defined(HAVE_ATOMIC_BUILTINS) || defined(HAVE_SYNC_BUILTINS) -/* optimized implementation, macros only */ -typedef struct gf_atomic_t { - int64_t cnt; -} gf_atomic_t; +#define GF_ATOMIC_SIZE_1 GF_ATOMIC_MUTEX_FIELD_0 +#define GF_ATOMIC_SIZE_2 GF_ATOMIC_MUTEX_FIELD_0 +#define GF_ATOMIC_SIZE_4 GF_ATOMIC_MUTEX_FIELD_0 + +#if SIZEOF_LONG >= 8 +#define GF_ATOMIC_SIZE_8 GF_ATOMIC_MUTEX_FIELD_0 +#endif + +#endif /* HAVE_(ATOMIC|SYNC)_BUILTINS */ + +/* Any GF_ATOMIC_SIZE_X macro not yet defined will use the mutex version */ +#ifndef GF_ATOMIC_SIZE_1 +#define GF_ATOMIC_SIZE_1 GF_ATOMIC_MUTEX_FIELD_1 +#endif + +#ifndef GF_ATOMIC_SIZE_2 +#define GF_ATOMIC_SIZE_2 GF_ATOMIC_MUTEX_FIELD_1 +#endif + +#ifndef GF_ATOMIC_SIZE_4 +#define GF_ATOMIC_SIZE_4 GF_ATOMIC_MUTEX_FIELD_1 +#endif + +#ifndef GF_ATOMIC_SIZE_8 +#define GF_ATOMIC_SIZE_8 GF_ATOMIC_MUTEX_FIELD_1 +#endif + +/* This macro is used to define all atomic types supported. First field + * represents the size of the type in bytes, and the second one the name. */ +#define GF_ATOMIC_TYPE(_size, _name) \ + typedef struct _gf_atomic_##_name##_t { \ + GF_ATOMIC_MACRO(GF_ATOMIC_SIZE_, _size); \ + _name##_t value; \ + } gf_atomic_##_name##_t + +/* The atomic types we support */ +GF_ATOMIC_TYPE(1, int8); /* gf_atomic_int8_t */ +GF_ATOMIC_TYPE(2, int16); /* gf_atomic_int16_t */ +GF_ATOMIC_TYPE(4, int32); /* gf_atomic_int32_t */ +GF_ATOMIC_TYPE(8, int64); /* gf_atomic_int64_t */ +GF_ATOMIC_TYPE(SIZEOF_LONG, intptr); /* gf_atomic_intptr_t */ +GF_ATOMIC_TYPE(1, uint8); /* gf_atomic_uint8_t */ +GF_ATOMIC_TYPE(2, uint16); /* gf_atomic_uint16_t */ +GF_ATOMIC_TYPE(4, uint32); /* gf_atomic_uint32_t */ +GF_ATOMIC_TYPE(8, uint64); /* gf_atomic_uint64_t */ +GF_ATOMIC_TYPE(SIZEOF_LONG, uintptr); /* gf_atomic_uintptr_t */ + +/* Define the default atomic type as int64_t */ +#define gf_atomic_t gf_atomic_int64_t + +/* This macro will choose between the mutex based version and the atomic + * builtin version depending on the size of the atomic structure. */ +#define GF_ATOMIC_CHOOSE(_atomic, _op, _args...) \ + ((sizeof(_atomic) > sizeof(uint64_t)) \ + ? ({ GF_ATOMIC_MACRO(GF_ATOMIC_LOCK_, _op)(_atomic, ## _args); }) \ + : ({ GF_ATOMIC_MACRO(GF_ATOMIC_BASE_, _op)(_atomic, ## _args); })) + +/* Macros to implement the mutex-based atomics. */ +#define GF_ATOMIC_OP_PREPARE(_atomic, _name) \ + typeof(_atomic) *__atomic = &(_atomic); \ + gf_lock_t *__lock = (gf_lock_t *)&__atomic->lk; \ + LOCK(__lock); \ + typeof(__atomic->value) _name = __atomic->value + +#define GF_ATOMIC_OP_STORE(_value) \ + (__atomic->value = (_value)) + +#define GF_ATOMIC_OP_RETURN(_value) \ + ({ \ + UNLOCK(__lock); \ + _value; \ + }) + +#define GF_ATOMIC_LOCK_INIT(_atomic, _value) \ + do { \ + typeof(_atomic) *__atomic = &(_atomic); \ + LOCK_INIT((gf_lock_t *)&__atomic->lk); \ + __atomic->value = (_value); \ + } while (0) + +#define GF_ATOMIC_LOCK_GET(_atomic) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_ADD(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value += (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_SUB(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value -= (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_AND(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value &= (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_OR(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value |= (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_XOR(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value ^= (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_NAND(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value = ~(__value & (_value))); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_FETCH_ADD(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value + (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_FETCH_SUB(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value - (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_FETCH_AND(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value & (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_FETCH_OR(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value | (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_FETCH_XOR(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(__value ^ (_value)); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_FETCH_NAND(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(~(__value & (_value))); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_SWAP(_atomic, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + GF_ATOMIC_OP_STORE(_value); \ + GF_ATOMIC_OP_RETURN(__value); \ + }) + +#define GF_ATOMIC_LOCK_CMP_SWAP(_atomic, _expected, _value) \ + ({ \ + GF_ATOMIC_OP_PREPARE(_atomic, __value); \ + bool __ret = (__value == (_expected)); \ + if (__ret) { \ + GF_ATOMIC_OP_STORE(_value); \ + } \ + GF_ATOMIC_OP_RETURN(__ret); \ + }) #if defined(HAVE_ATOMIC_BUILTINS) -/* all macros have a 'gf_atomic_t' as 1st argument */ -#define GF_ATOMIC_INIT(op, n) __atomic_store (&(op.cnt), __ATOMIC_RELEASE) -#define GF_ATOMIC_GET(op) __atomic_load (&(op.cnt), __ATOMIC_ACQUIRE) -#define GF_ATOMIC_INC(op) __atomic_add_and_fetch (&(op.cnt), 1, \ - __ATOMIC_ACQ_REL) -#define GF_ATOMIC_DEC(op) __atomic_sub_and_fetch (&(op.cnt), 1, \ - __ATOMIC_ACQ_REL) -#define GF_ATOMIC_ADD(op, n) __atomic_add_and_fetch (&(op.cnt), n, \ - __ATOMIC_ACQ_REL) -#define GF_ATOMIC_SUB(op, n) __atomic_sub_and_fetch (&(op.cnt), n, \ - __ATOMIC_ACQ_REL) - -#else /* !HAVE_ATOMIC_BUILTINS, but HAVE_SYNC_BUILTINS */ - -/* all macros have a 'gf_atomic_t' as 1st argument */ -#define GF_ATOMIC_INIT(op, n) ({ op.cnt = n; __sync_synchronize (); }) -#define GF_ATOMIC_GET(op) __sync_add_and_fetch (&(op.cnt), 0) -#define GF_ATOMIC_INC(op) __sync_add_and_fetch (&(op.cnt), 1) -#define GF_ATOMIC_DEC(op) __sync_sub_and_fetch (&(op.cnt), 1) -#define GF_ATOMIC_ADD(op, n) __sync_add_and_fetch (&(op.cnt), n) -#define GF_ATOMIC_SUB(op, n) __sync_sub_and_fetch (&(op.cnt), n) - -#endif /* HAVE_ATOMIC_BUILTINS || HAVE_SYNC_BUILTINS */ - -#else /* no HAVE_(ATOMIC|SYNC)_BUILTINS */ -/* fallback implementation, using small inline functions to improve type - * checking while compiling */ +/* If compiler supports __atomic builtins, we use them. */ -#include "locking.h" +#define GF_ATOMIC_BASE_INIT(_atomic, _value) \ + __atomic_store_n(&(_atomic).value, (_value), __ATOMIC_RELEASE) + +#define GF_ATOMIC_BASE_GET(_atomic) \ + __atomic_load_n(&(_atomic).value, __ATOMIC_ACQUIRE) + +#define GF_ATOMIC_BASE_ADD(_atomic, _value) \ + __atomic_add_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_SUB(_atomic, _value) \ + __atomic_sub_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_AND(_atomic, _value) \ + __atomic_and_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_OR(_atomic, _value) \ + __atomic_or_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_XOR(_atomic, _value) \ + __atomic_xor_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_NAND(_atomic, _value) \ + __atomic_nand_fetch(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_FETCH_ADD(_atomic, _value) \ + __atomic_fetch_add(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_FETCH_SUB(_atomic, _value) \ + __atomic_fetch_sub(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_FETCH_AND(_atomic, _value) \ + __atomic_fetch_and(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_FETCH_OR(_atomic, _value) \ + __atomic_fetch_or(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_FETCH_XOR(_atomic, _value) \ + __atomic_fetch_xor(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_FETCH_NAND(_atomic, _value) \ + __atomic_fetch_nand(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_SWAP(_atomic, _value) \ + __atomic_exchange_n(&(_atomic).value, (_value), __ATOMIC_ACQ_REL) + +#define GF_ATOMIC_BASE_CMP_SWAP(_atomic, _expected, _value) \ + ({ \ + typeof((_atomic).value) __expected = (_expected); \ + __atomic_compare_exchange_n(&(_atomic).value, &__expected, \ + (_value), 0, __ATOMIC_ACQ_REL, \ + __ATOMIC_ACQUIRE); \ + }) + +#elif defined(HAVE_SYNC_BUILTINS) + +/* If compiler doesn't support __atomic builtins but supports __sync builtins, + * we use them. */ + +#define GF_ATOMIC_BASE_INIT(_atomic, _value) \ + do { \ + (_atomic).value = (_value); \ + __sync_synchronize(); \ + } while (0) + +#define GF_ATOMIC_BASE_ADD(_atomic, _value) \ + __sync_add_and_fetch(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_SUB(_atomic, _value) \ + __sync_sub_and_fetch(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_AND(_atomic, _value) \ + __sync_and_and_fetch(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_OR(_atomic, _value) \ + __sync_or_and_fetch(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_XOR(_atomic, _value) \ + __sync_xor_and_fetch(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_NAND(_atomic, _value) \ + __sync_nand_and_fetch(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_FETCH_ADD(_atomic, _value) \ + __sync_fetch_and_add(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_FETCH_SUB(_atomic, _value) \ + __sync_fetch_and_sub(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_FETCH_AND(_atomic, _value) \ + __sync_fetch_and_and(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_FETCH_OR(_atomic, _value) \ + __sync_fetch_and_or(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_FETCH_XOR(_atomic, _value) \ + __sync_fetch_and_xor(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_FETCH_NAND(_atomic, _value) \ + __sync_fetch_and_nand(&(_atomic).value, (_value)) + +#define GF_ATOMIC_BASE_SWAP(_atomic, _value) \ + ({ \ + __sync_synchronize(); \ + __sync_lock_test_and_set(&(_atomic).value, (_value)); \ + }) + +#define GF_ATOMIC_BASE_CMP_SWAP(_atomic, _expected, _value) \ + __sync_bool_compare_and_swap(&(_atomic).value, (_expected), (_value)) + +#define GF_ATOMIC_BASE_GET(_atomic) GF_ATOMIC_BASE_ADD(_atomic, 0) + +#else /* !HAVE_ATOMIC_BUILTINS && !HAVE_SYNC_BUILTINS */ + +/* The compiler doesn't support any atomic builtin. We fallback to the + * mutex-based implementation. */ + +#define GF_ATOMIC_BASE_INIT(_atomic, _value) \ + GF_ATOMIC_LOCK_INIT(_atomic, _value) + +#define GF_ATOMIC_BASE_GET(_atomic) \ + GF_ATOMIC_LOCK_GET(_atomic) + +#define GF_ATOMIC_BASE_ADD(_atomic, _value) \ + GF_ATOMIC_LOCK_ADD(_atomic, _value) + +#define GF_ATOMIC_BASE_SUB(_atomic, _value) \ + GF_ATOMIC_LOCK_SUB(_atomic, _value) + +#define GF_ATOMIC_BASE_AND(_atomic, _value) \ + GF_ATOMIC_LOCK_AND(_atomic, _value) + +#define GF_ATOMIC_BASE_OR(_atomic, _value) \ + GF_ATOMIC_LOCK_OR(_atomic, _value) + +#define GF_ATOMIC_BASE_XOR(_atomic, _value) \ + GF_ATOMIC_LOCK_XOR(_atomic, _value) + +#define GF_ATOMIC_BASE_NAND(_atomic, _value) \ + GF_ATOMIC_LOCK_NAND(_atomic, _value) + +#define GF_ATOMIC_BASE_FETCH_ADD(_atomic, _value) \ + GF_ATOMIC_LOCK_FETCH_ADD(_atomic, _value) + +#define GF_ATOMIC_BASE_FETCH_SUB(_atomic, _value) \ + GF_ATOMIC_LOCK_FETCH_SUB(_atomic, _value) + +#define GF_ATOMIC_BASE_FETCH_AND(_atomic, _value) \ + GF_ATOMIC_LOCK_FETCH_AND(_atomic, _value) + +#define GF_ATOMIC_BASE_FETCH_OR(_atomic, _value) \ + GF_ATOMIC_LOCK_FETCH_OR(_atomic, _value) + +#define GF_ATOMIC_BASE_FETCH_XOR(_atomic, _value) \ + GF_ATOMIC_LOCK_FETCH_XOR(_atomic, _value) + +#define GF_ATOMIC_BASE_FETCH_NAND(_atomic, _value) \ + GF_ATOMIC_LOCK_FETCH_NAND(_atomic, _value) + +#define GF_ATOMIC_BASE_SWAP(_atomic, _value) \ + GF_ATOMIC_LOCK_SWAP(_atomic, _value) -typedef struct gf_atomic_t { - int64_t cnt; - gf_lock_t lk; -} gf_atomic_t; +#define GF_ATOMIC_BASE_CMP_SWAP(_atomic, _expected, _value) \ + GF_ATOMIC_LOCK_CMP_SWAP(_atomic, _expected, _value) +#endif /* HAVE_(ATOMIC|SYNC)_BUILTINS */ -static inline void -gf_atomic_init (gf_atomic_t *op, int64_t cnt) -{ - LOCK_INIT (&op->lk); - op->cnt = cnt; -} +/* Here we declare the real atomic macros available to the user. */ +/* All macros have a 'gf_atomic_xxx' as 1st argument */ -static inline uint64_t -gf_atomic_get (gf_atomic_t *op) -{ - uint64_t ret; +#define GF_ATOMIC_INIT(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, INIT, _value) +#define GF_ATOMIC_GET(_atomic) GF_ATOMIC_CHOOSE(_atomic, GET) +#define GF_ATOMIC_ADD(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, ADD, _value) +#define GF_ATOMIC_SUB(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, SUB, _value) +#define GF_ATOMIC_AND(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, AND, _value) +#define GF_ATOMIC_OR(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, OR, _value) +#define GF_ATOMIC_XOR(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, XOR, _value) +#define GF_ATOMIC_NAND(_atomic, _value) GF_ATOMIC_CHOOSE(_atomic, NAND, _value) - LOCK (&op->lk); - { - ret = op->cnt; - } - UNLOCK (&op->lk); +#define GF_ATOMIC_FETCH_ADD(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, FETCH_ADD, _value) - return ret; -} +#define GF_ATOMIC_FETCH_SUB(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, FETCH_SUB, _value) +#define GF_ATOMIC_FETCH_AND(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, FETCH_AND, _value) -static inline int64_t -gf_atomic_add (gf_atomic_t *op, int64_t n) -{ - uint64_t ret; +#define GF_ATOMIC_FETCH_OR(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, FETCH_OR, _value) - LOCK (&op->lk); - { - op->cnt += n; - ret = op->cnt; - } - UNLOCK (&op->lk); +#define GF_ATOMIC_FETCH_XOR(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, FETCH_XOR, _value) - return ret; -} +#define GF_ATOMIC_FETCH_NAND(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, FETCH_NAND, _value) +#define GF_ATOMIC_SWAP(_atomic, _value) \ + GF_ATOMIC_CHOOSE(_atomic, SWAP, _value) -#define GF_ATOMIC_INIT(op, cnt) gf_atomic_init (&op, cnt) -#define GF_ATOMIC_GET(op) gf_atomic_get (&op) -#define GF_ATOMIC_INC(op) gf_atomic_add (&op, 1) -#define GF_ATOMIC_DEC(op) gf_atomic_add (&op, -1) -#define GF_ATOMIC_ADD(op, n) gf_atomic_add (&op, n) -#define GF_ATOMIC_SUB(op, n) gf_atomic_add (&op, -n) +#define GF_ATOMIC_CMP_SWAP(_atomic, _expected, _value) \ + GF_ATOMIC_CHOOSE(_atomic, CMP_SWAP, _expected, _value) -#endif /* HAVE_ATOMIC_SYNC_OPS */ +#define GF_ATOMIC_INC(_atomic) GF_ATOMIC_ADD(_atomic, 1) +#define GF_ATOMIC_DEC(_atomic) GF_ATOMIC_SUB(_atomic, 1) +#define GF_ATOMIC_FETCH_INC(_atomic) GF_ATOMIC_FETCH_ADD(_atomic, 1) +#define GF_ATOMIC_FETCH_DEC(_atomic) GF_ATOMIC_FETCH_SUB(_atomic, 1) #endif /* _ATOMIC_H */ diff --git a/xlators/debug/io-stats/src/io-stats.c b/xlators/debug/io-stats/src/io-stats.c index 892746c959a..5656e32a4f5 100644 --- a/xlators/debug/io-stats/src/io-stats.c +++ b/xlators/debug/io-stats/src/io-stats.c @@ -1578,8 +1578,8 @@ io_stats_dump (xlator_t *this, struct ios_dump_args *args, gf1_cli_info_op op, gf_boolean_t is_peek) { struct ios_conf *conf = NULL; - struct ios_global_stats cumulative = {{0,}, }; - struct ios_global_stats incremental = {{0,}, }; + struct ios_global_stats cumulative = { }; + struct ios_global_stats incremental = { }; int increment = 0; struct timeval now; diff --git a/xlators/performance/md-cache/src/md-cache.c b/xlators/performance/md-cache/src/md-cache.c index 64a2867f5d9..6938b3150a6 100644 --- a/xlators/performance/md-cache/src/md-cache.c +++ b/xlators/performance/md-cache/src/md-cache.c @@ -2628,21 +2628,21 @@ mdc_priv_dump (xlator_t *this) gf_proc_dump_add_section(key_prefix); gf_proc_dump_write("stat_hit_count", "%"PRId64, - conf->mdc_counter.stat_hit.cnt); + GF_ATOMIC_GET(conf->mdc_counter.stat_hit)); gf_proc_dump_write("stat_miss_count", "%"PRId64, - conf->mdc_counter.stat_miss.cnt); + GF_ATOMIC_GET(conf->mdc_counter.stat_miss)); gf_proc_dump_write("xattr_hit_count", "%"PRId64, - conf->mdc_counter.xattr_hit.cnt); + GF_ATOMIC_GET(conf->mdc_counter.xattr_hit)); gf_proc_dump_write("xattr_miss_count", "%"PRId64, - conf->mdc_counter.xattr_miss.cnt); + GF_ATOMIC_GET(conf->mdc_counter.xattr_miss)); gf_proc_dump_write("nameless_lookup_count", "%"PRId64, - conf->mdc_counter.nameless_lookup.cnt); + GF_ATOMIC_GET(conf->mdc_counter.nameless_lookup)); gf_proc_dump_write("negative_lookup_count", "%"PRId64, - conf->mdc_counter.negative_lookup.cnt); + GF_ATOMIC_GET(conf->mdc_counter.negative_lookup)); gf_proc_dump_write("stat_invalidations_received", "%"PRId64, - conf->mdc_counter.stat_invals.cnt); + GF_ATOMIC_GET(conf->mdc_counter.stat_invals)); gf_proc_dump_write("xattr_invalidations_received", "%"PRId64, - conf->mdc_counter.xattr_invals.cnt); + GF_ATOMIC_GET(conf->mdc_counter.xattr_invals)); return 0; } diff --git a/xlators/performance/nl-cache/src/nl-cache-helper.c b/xlators/performance/nl-cache/src/nl-cache-helper.c index 0b6c884b0de..1556f9ec952 100644 --- a/xlators/performance/nl-cache/src/nl-cache-helper.c +++ b/xlators/performance/nl-cache/src/nl-cache-helper.c @@ -600,8 +600,8 @@ nlc_lru_prune (xlator_t *this, inode_t *inode) LOCK (&conf->lock); { - if ((conf->current_cache_size.cnt < conf->cache_size) && - (conf->refd_inodes.cnt < conf->inode_limit)) + if ((GF_ATOMIC_GET(conf->refd_inodes) < conf->inode_limit) && + (GF_ATOMIC_GET(conf->current_cache_size) < conf->cache_size)) goto unlock; list_for_each_entry_safe (lru_node, tmp, &conf->lru, list) { diff --git a/xlators/performance/nl-cache/src/nl-cache.c b/xlators/performance/nl-cache/src/nl-cache.c index 7dad8d95a53..9fa7ec87616 100644 --- a/xlators/performance/nl-cache/src/nl-cache.c +++ b/xlators/performance/nl-cache/src/nl-cache.c @@ -618,29 +618,29 @@ nlc_priv_dump (xlator_t *this) gf_proc_dump_add_section(key_prefix); gf_proc_dump_write("negative_lookup_hit_count", "%"PRId64, - conf->nlc_counter.nlc_hit.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nlc_hit)); gf_proc_dump_write("negative_lookup_miss_count", "%"PRId64, - conf->nlc_counter.nlc_miss.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nlc_miss)); gf_proc_dump_write("get_real_filename_hit_count", "%"PRId64, - conf->nlc_counter.getrealfilename_hit.cnt); + GF_ATOMIC_GET(conf->nlc_counter.getrealfilename_hit)); gf_proc_dump_write("get_real_filename_miss_count", "%"PRId64, - conf->nlc_counter.getrealfilename_miss.cnt); + GF_ATOMIC_GET(conf->nlc_counter.getrealfilename_miss)); gf_proc_dump_write("nameless_lookup_count", "%"PRId64, - conf->nlc_counter.nameless_lookup.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nameless_lookup)); gf_proc_dump_write("inodes_with_positive_dentry_cache", "%"PRId64, - conf->nlc_counter.pe_inode_cnt.cnt); + GF_ATOMIC_GET(conf->nlc_counter.pe_inode_cnt)); gf_proc_dump_write("inodes_with_negative_dentry_cache", "%"PRId64, - conf->nlc_counter.ne_inode_cnt.cnt); + GF_ATOMIC_GET(conf->nlc_counter.ne_inode_cnt)); gf_proc_dump_write("dentry_invalidations_recieved", "%"PRId64, - conf->nlc_counter.nlc_invals.cnt); + GF_ATOMIC_GET(conf->nlc_counter.nlc_invals)); gf_proc_dump_write("cache_limit", "%"PRIu64, conf->cache_size); gf_proc_dump_write("consumed_cache_size", "%"PRId64, - conf->current_cache_size.cnt); + GF_ATOMIC_GET(conf->current_cache_size)); gf_proc_dump_write("inode_limit", "%"PRIu64, conf->inode_limit); gf_proc_dump_write("consumed_inodes", "%"PRId64, - conf->refd_inodes.cnt); + GF_ATOMIC_GET(conf->refd_inodes)); return 0; } -- cgit