From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753899AbbIKSjm (ORCPT ); Fri, 11 Sep 2015 14:39:42 -0400 Received: from g1t6214.austin.hp.com ([15.73.96.122]:36480 "EHLO g1t6214.austin.hp.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1751388AbbIKSh6 (ORCPT ); Fri, 11 Sep 2015 14:37:58 -0400 From: Waiman Long To: Peter Zijlstra , Ingo Molnar , Thomas Gleixner , "H. Peter Anvin" Cc: x86@kernel.org, linux-kernel@vger.kernel.org, Scott J Norton , Douglas Hatch , Davidlohr Bueso , Waiman Long Subject: [PATCH v6 1/6] locking/qspinlock: relaxes cmpxchg & xchg ops in native code Date: Fri, 11 Sep 2015 14:37:33 -0400 Message-Id: <1441996658-62854-2-git-send-email-Waiman.Long@hpe.com> X-Mailer: git-send-email 1.7.1 In-Reply-To: <1441996658-62854-1-git-send-email-Waiman.Long@hpe.com> References: <1441996658-62854-1-git-send-email-Waiman.Long@hpe.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org This patch replaces the cmpxchg() and xchg() calls in the native qspinlock code with more relaxed versions of those calls to enable other architectures to adopt queued spinlocks with less performance overhead. Signed-off-by: Waiman Long --- arch/x86/include/asm/qspinlock.h | 2 +- include/asm-generic/qspinlock.h | 6 +++--- kernel/locking/qspinlock.c | 21 +++++++++++++++++---- 3 files changed, 21 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 9d51fae..053e70d 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -46,7 +46,7 @@ static inline bool virt_queued_spin_lock(struct qspinlock *lock) if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) return false; - while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) + while (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) != 0) cpu_relax(); return true; diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 83bfb87..efbd1fd 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -62,7 +62,7 @@ static __always_inline int queued_spin_is_contended(struct qspinlock *lock) static __always_inline int queued_spin_trylock(struct qspinlock *lock) { if (!atomic_read(&lock->val) && - (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0)) + (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0)) return 1; return 0; } @@ -77,7 +77,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) { u32 val; - val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL); + val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL); if (likely(val == 0)) return; queued_spin_lock_slowpath(lock, val); @@ -93,7 +93,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) /* * smp_mb__before_atomic() in order to guarantee release semantics */ - smp_mb__before_atomic_dec(); + smp_mb__before_atomic(); atomic_sub(_Q_LOCKED_VAL, &lock->val); } #endif diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index 337c881..28a15c7 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -176,7 +176,12 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) { struct __qspinlock *l = (void *)lock; - return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; + /* + * Use release semantics to make sure that the MCS node is properly + * initialized before changing the tail code. + */ + return (u32)xchg_release(&l->tail, + tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET; } #else /* _Q_PENDING_BITS == 8 */ @@ -208,7 +213,11 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) for (;;) { new = (val & _Q_LOCKED_PENDING_MASK) | tail; - old = atomic_cmpxchg(&lock->val, val, new); + /* + * Use release semantics to make sure that the MCS node is + * properly initialized before changing the tail code. + */ + old = atomic_cmpxchg_release(&lock->val, val, new); if (old == val) break; @@ -319,7 +328,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) if (val == new) new |= _Q_PENDING_VAL; - old = atomic_cmpxchg(&lock->val, val, new); + old = atomic_cmpxchg_acquire(&lock->val, val, new); if (old == val) break; @@ -426,7 +435,11 @@ queue: set_locked(lock); break; } - old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL); + /* + * The smp_load_acquire() call above has provided the necessary + * acquire semantics required for locking. + */ + old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL); if (old == val) goto release; /* No contention */ -- 1.7.1