From: Vineet Gupta <Vineet.Gupta1@synopsys.com> To: <linux-arch@vger.kernel.org>, <linux-kernel@vger.kernel.org> Cc: <arnd@arndb.de>, <arc-linux-dev@synopsys.com>, Vineet Gupta <Vineet.Gupta1@synopsys.com>, "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>, "Peter Zijlstra (Intel)" <peterz@infradead.org> Subject: [PATCH 18/28] ARC: add smp barriers around atomics per memory-barrriers.txt Date: Tue, 9 Jun 2015 17:18:18 +0530 [thread overview] Message-ID: <1433850508-26317-19-git-send-email-vgupta@synopsys.com> (raw) In-Reply-To: <1433850508-26317-1-git-send-email-vgupta@synopsys.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/include/asm/atomic.h | 10 ++++++++++ arch/arc/include/asm/bitops.h | 12 ++++++++++++ arch/arc/include/asm/cmpxchg.h | 10 ++++++++++ arch/arc/include/asm/spinlock.h | 10 ++++++++++ 4 files changed, 42 insertions(+) diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 9917a45fc430..6fc968f78500 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -43,6 +43,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned int temp; \ \ + smp_mb(); \ + \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ " " #asm_op " %0, %0, %2 \n" \ @@ -52,6 +54,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ : "r"(&v->counter), "ir"(i) \ : "cc"); \ \ + smp_mb(); \ + \ return temp; \ } @@ -142,9 +146,15 @@ ATOMIC_OP(and, &=, and) #define __atomic_add_unless(v, a, u) \ ({ \ int c, old; \ + \ + smp_mb(); \ + \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ c = old; \ + \ + smp_mb(); \ + \ c; \ }) diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 829a8a2e9704..47878d85e3a3 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -117,6 +117,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bset %1, %0, %3 \n" @@ -126,6 +128,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -139,6 +143,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bclr %1, %0, %3 \n" @@ -148,6 +154,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -161,6 +169,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bxor %1, %0, %3 \n" @@ -170,6 +180,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index 90de5c528da2..96a3dd8fe4bf 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -10,6 +10,8 @@ #define __ASM_ARC_CMPXCHG_H #include <linux/types.h> + +#include <asm/barrier.h> #include <asm/smp.h> #ifdef CONFIG_ARC_HAS_LLSC @@ -19,6 +21,8 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { unsigned long prev; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%1] \n" " brne %0, %2, 2f \n" @@ -31,6 +35,8 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ : "cc", "memory"); /* so that gcc knows memory is being written here */ + smp_mb(); + return prev; } @@ -78,12 +84,16 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, switch (size) { case 4: + smp_mb(); + __asm__ __volatile__( " ex %0, [%1] \n" : "+r"(val) : "r"(ptr) : "memory"); + smp_mb(); + return val; } return __xchg_bad_pointer(); diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index b6a8c2dfbe6e..8af8eaad4999 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -22,24 +22,32 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + smp_mb(); + __asm__ __volatile__( "1: ex %0, [%1] \n" " breq %0, %2, 1b \n" : "+&r" (tmp) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "memory"); + + smp_mb(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + smp_mb(); + __asm__ __volatile__( "1: ex %0, [%1] \n" : "+r" (tmp) : "r"(&(lock->slock)) : "memory"); + smp_mb(); + return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); } @@ -47,6 +55,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + smp_mb(); + __asm__ __volatile__( " ex %0, [%1] \n" : "+r" (tmp) -- 1.9.1
WARNING: multiple messages have this Message-ID (diff)
From: Vineet Gupta <Vineet.Gupta1@synopsys.com> To: linux-arch@vger.kernel.org, linux-kernel@vger.kernel.org Cc: arnd@arndb.de, arc-linux-dev@synopsys.com, Vineet Gupta <Vineet.Gupta1@synopsys.com>, "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>, "Peter Zijlstra (Intel)" <peterz@infradead.org> Subject: [PATCH 18/28] ARC: add smp barriers around atomics per memory-barrriers.txt Date: Tue, 9 Jun 2015 17:18:18 +0530 [thread overview] Message-ID: <1433850508-26317-19-git-send-email-vgupta@synopsys.com> (raw) In-Reply-To: <1433850508-26317-1-git-send-email-vgupta@synopsys.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> --- arch/arc/include/asm/atomic.h | 10 ++++++++++ arch/arc/include/asm/bitops.h | 12 ++++++++++++ arch/arc/include/asm/cmpxchg.h | 10 ++++++++++ arch/arc/include/asm/spinlock.h | 10 ++++++++++ 4 files changed, 42 insertions(+) diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 9917a45fc430..6fc968f78500 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h @@ -43,6 +43,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ { \ unsigned int temp; \ \ + smp_mb(); \ + \ __asm__ __volatile__( \ "1: llock %0, [%1] \n" \ " " #asm_op " %0, %0, %2 \n" \ @@ -52,6 +54,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ : "r"(&v->counter), "ir"(i) \ : "cc"); \ \ + smp_mb(); \ + \ return temp; \ } @@ -142,9 +146,15 @@ ATOMIC_OP(and, &=, and) #define __atomic_add_unless(v, a, u) \ ({ \ int c, old; \ + \ + smp_mb(); \ + \ c = atomic_read(v); \ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\ c = old; \ + \ + smp_mb(); \ + \ c; \ }) diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index 829a8a2e9704..47878d85e3a3 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -117,6 +117,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bset %1, %0, %3 \n" @@ -126,6 +128,8 @@ static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -139,6 +143,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bclr %1, %0, %3 \n" @@ -148,6 +154,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } @@ -161,6 +169,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) if (__builtin_constant_p(nr)) nr &= 0x1f; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%2] \n" " bxor %1, %0, %3 \n" @@ -170,6 +180,8 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m) : "r"(m), "ir"(nr) : "cc"); + smp_mb(); + return (old & (1 << nr)) != 0; } diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index 90de5c528da2..96a3dd8fe4bf 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -10,6 +10,8 @@ #define __ASM_ARC_CMPXCHG_H #include <linux/types.h> + +#include <asm/barrier.h> #include <asm/smp.h> #ifdef CONFIG_ARC_HAS_LLSC @@ -19,6 +21,8 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) { unsigned long prev; + smp_mb(); + __asm__ __volatile__( "1: llock %0, [%1] \n" " brne %0, %2, 2f \n" @@ -31,6 +35,8 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new) "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ : "cc", "memory"); /* so that gcc knows memory is being written here */ + smp_mb(); + return prev; } @@ -78,12 +84,16 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, switch (size) { case 4: + smp_mb(); + __asm__ __volatile__( " ex %0, [%1] \n" : "+r"(val) : "r"(ptr) : "memory"); + smp_mb(); + return val; } return __xchg_bad_pointer(); diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index b6a8c2dfbe6e..8af8eaad4999 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h @@ -22,24 +22,32 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + smp_mb(); + __asm__ __volatile__( "1: ex %0, [%1] \n" " breq %0, %2, 1b \n" : "+&r" (tmp) : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) : "memory"); + + smp_mb(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; + smp_mb(); + __asm__ __volatile__( "1: ex %0, [%1] \n" : "+r" (tmp) : "r"(&(lock->slock)) : "memory"); + smp_mb(); + return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); } @@ -47,6 +55,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; + smp_mb(); + __asm__ __volatile__( " ex %0, [%1] \n" : "+r" (tmp) -- 1.9.1
next prev parent reply other threads:[~2015-06-09 11:50 UTC|newest] Thread overview: 109+ messages / expand[flat|nested] mbox.gz Atom feed top 2015-06-09 11:48 [PATCH 00/28] ARCv2 port to Linux - (B) ISA / Core / platform support Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 01/28] ARCv2: [intc] HS38 core interrupt controller Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 02/28] ARCv2: Support for ARCv2 ISA and HS38x cores Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 03/28] ARCv2: STAR 9000793984: Handle return from intr to Delay Slot Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 04/28] ARCv2: STAR 9000808988: signals involving " Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 05/28] ARCv2: STAR 9000814690: Really Re-enable interrupts to avoid deadlocks Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 06/28] ARCv2: MMUv4: TLB programming Model changes Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 07/28] ARCv2: MMUv4: cache programming model changes Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 08/28] ARCv2: MMUv4: support aliasing icache config Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 09/28] ARCv2: optimised string/mem lib routines Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 10/28] ARCv2: Adhere to Zero Delay loop restriction Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 11/28] ARCv2: extable: Enable sorting at build time Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-24 5:51 ` Vineet Gupta 2015-06-24 5:51 ` Vineet Gupta 2015-06-29 20:38 ` David Daney 2015-06-30 4:41 ` Vineet Gupta 2015-06-30 4:41 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 12/28] ARCv2: clocksource: Introduce 64bit local RTC counter Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 13/28] ARC: make plat_smp_ops weak to allow over-rides Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 14/28] ARCv2: SMP: ARConnect debug/robustness Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 15/28] ARCv2: SMP: clocksource: Enable Global Real Time counter Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 16/28] ARCv2: SMP: intc: IDU 2nd level intc for dynamic IRQ distribution Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 17/28] ARC: add compiler barrier to LLSC based cmpxchg Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 12:23 ` Peter Zijlstra 2015-06-09 11:48 ` Vineet Gupta [this message] 2015-06-09 11:48 ` [PATCH 18/28] ARC: add smp barriers around atomics per memory-barrriers.txt Vineet Gupta 2015-06-09 12:30 ` Peter Zijlstra 2015-06-10 9:17 ` Vineet Gupta 2015-06-10 10:53 ` Peter Zijlstra 2015-06-11 13:03 ` Vineet Gupta 2015-06-12 12:15 ` [PATCH v2] ARC: add smp barriers around atomics per Documentation/atomic_ops.txt Vineet Gupta 2015-06-12 12:15 ` Vineet Gupta 2015-06-12 13:04 ` Peter Zijlstra 2015-06-12 13:16 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 19/28] arch: conditionally define smp_{mb,rmb,wmb} Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 12:32 ` Peter Zijlstra 2015-06-09 11:48 ` [PATCH 20/28] ARCv2: barriers Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 12:40 ` Peter Zijlstra 2015-06-10 9:34 ` Vineet Gupta 2015-06-10 10:58 ` Peter Zijlstra 2015-06-10 13:01 ` Will Deacon 2015-06-11 12:13 ` Vineet Gupta 2015-06-11 13:39 ` Will Deacon 2015-06-19 13:13 ` Vineet Gupta 2015-06-19 13:13 ` Vineet Gupta 2015-06-19 13:13 ` Vineet Gupta 2015-06-22 13:36 ` Will Deacon 2015-06-22 13:36 ` Will Deacon 2015-06-22 13:36 ` Will Deacon 2015-06-23 7:58 ` [PATCH v2 " Vineet Gupta 2015-06-23 7:58 ` Vineet Gupta 2015-06-23 8:49 ` Will Deacon 2015-06-23 9:03 ` Vineet Gupta 2015-06-23 9:26 ` Will Deacon 2015-06-23 9:52 ` [PATCH v3 22/28] " Vineet Gupta 2015-06-23 9:52 ` Vineet Gupta 2015-06-23 16:28 ` Will Deacon 2015-06-23 9:25 ` [PATCH v2 20/28] " Peter Zijlstra 2015-06-23 8:02 ` [PATCH " Vineet Gupta 2015-06-09 11:48 ` [PATCH 21/28] ARC: Reduce bitops lines of code using macros Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-12 12:20 ` [PATCH v2] " Vineet Gupta 2015-06-12 12:20 ` Vineet Gupta 2015-06-12 13:05 ` Peter Zijlstra 2015-06-09 11:48 ` [PATCH 22/28] ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 12:35 ` Peter Zijlstra 2015-06-10 10:01 ` Vineet Gupta 2015-06-10 11:02 ` Peter Zijlstra 2015-06-19 9:55 ` [PATCH v2 " Vineet Gupta 2015-06-19 9:55 ` Vineet Gupta 2015-06-19 9:59 ` Will Deacon 2015-06-19 10:09 ` Vineet Gupta 2015-06-23 7:59 ` Vineet Gupta 2015-06-23 7:59 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 23/28] ARCv2: SLC: Handle explcit flush for DMA ops (w/o IO-coherency) Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 24/28] ARCv2: All bits in place, allow ARCv2 builds Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 25/28] ARCv2: [nsim*hs*] Support simulation platforms for HS38x cores Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 26/28] ARC: [axs101] Prepare for AXS103 Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 27/28] ARCv2: [axs103] Support ARC SDP FPGA platform for HS38x cores Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta 2015-06-09 11:48 ` [PATCH 28/28] ARCv2: [vdk] dts files and defconfig for HS38 VDK Vineet Gupta 2015-06-09 11:48 ` Vineet Gupta
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1433850508-26317-19-git-send-email-vgupta@synopsys.com \ --to=vineet.gupta1@synopsys.com \ --cc=arc-linux-dev@synopsys.com \ --cc=arnd@arndb.de \ --cc=linux-arch@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=paulmck@linux.vnet.ibm.com \ --cc=peterz@infradead.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.