LKML Archive mirror
 help / color / mirror / Atom feed
From: "tip-bot2 for Peter Zijlstra" <tip-bot2@linutronix.de>
To: linux-tip-commits@vger.kernel.org
Cc: "Peter Zijlstra (Intel)" <peterz@infradead.org>,
	Arnd Bergmann <arnd@arndb.de>,
	Mark Rutland <mark.rutland@arm.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	x86@kernel.org, linux-kernel@vger.kernel.org
Subject: [tip: locking/core] arch: Introduce arch_{,try_}_cmpxchg128{,_local}()
Date: Mon, 05 Jun 2023 07:42:51 -0000	[thread overview]
Message-ID: <168595097198.404.7071635060895699267.tip-bot2@tip-bot2> (raw)
In-Reply-To: <20230531132323.452120708@infradead.org>

The following commit has been merged into the locking/core branch of tip:

Commit-ID:     b23e139d0b66c0216e7e9361a5021290395f504c
Gitweb:        https://git.kernel.org/tip/b23e139d0b66c0216e7e9361a5021290395f504c
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Wed, 31 May 2023 15:08:36 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Mon, 05 Jun 2023 09:36:35 +02:00

arch: Introduce arch_{,try_}_cmpxchg128{,_local}()

For all architectures that currently support cmpxchg_double()
implement the cmpxchg128() family of functions that is basically the
same but with a saner interface.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Acked-by: Heiko Carstens <hca@linux.ibm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Link: https://lore.kernel.org/r/20230531132323.452120708@infradead.org
---
 arch/arm64/include/asm/atomic_ll_sc.h | 41 +++++++++++++++++-
 arch/arm64/include/asm/atomic_lse.h   | 31 +++++++++++++-
 arch/arm64/include/asm/cmpxchg.h      | 26 +++++++++++-
 arch/s390/include/asm/cmpxchg.h       | 14 ++++++-
 arch/x86/include/asm/cmpxchg_32.h     |  3 +-
 arch/x86/include/asm/cmpxchg_64.h     | 64 +++++++++++++++++++++++++-
 6 files changed, 177 insertions(+), 2 deletions(-)

diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
index cbb3d96..b100a49 100644
--- a/arch/arm64/include/asm/atomic_ll_sc.h
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -326,6 +326,47 @@ __CMPXCHG_DBL(   ,        ,  ,         )
 __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
 
 #undef __CMPXCHG_DBL
+
+union __u128_halves {
+	u128 full;
+	struct {
+		u64 low, high;
+	};
+};
+
+#define __CMPXCHG128(name, mb, rel, cl...)                             \
+static __always_inline u128						\
+__ll_sc__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new)	\
+{									\
+	union __u128_halves r, o = { .full = (old) },			\
+			       n = { .full = (new) };			\
+       unsigned int tmp;                                               \
+									\
+	asm volatile("// __cmpxchg128" #name "\n"			\
+       "       prfm    pstl1strm, %[v]\n"                              \
+       "1:     ldxp    %[rl], %[rh], %[v]\n"                           \
+       "       cmp     %[rl], %[ol]\n"                                 \
+       "       ccmp    %[rh], %[oh], 0, eq\n"                          \
+       "       b.ne    2f\n"                                           \
+       "       st" #rel "xp    %w[tmp], %[nl], %[nh], %[v]\n"          \
+       "       cbnz    %w[tmp], 1b\n"                                  \
+	"	" #mb "\n"						\
+	"2:"								\
+       : [v] "+Q" (*(u128 *)ptr),                                      \
+         [rl] "=&r" (r.low), [rh] "=&r" (r.high),                      \
+         [tmp] "=&r" (tmp)                                             \
+       : [ol] "r" (o.low), [oh] "r" (o.high),                          \
+         [nl] "r" (n.low), [nh] "r" (n.high)                           \
+       : "cc", ##cl);                                                  \
+									\
+	return r.full;							\
+}
+
+__CMPXCHG128(   ,        ,  )
+__CMPXCHG128(_mb, dmb ish, l, "memory")
+
+#undef __CMPXCHG128
+
 #undef K
 
 #endif	/* __ASM_ATOMIC_LL_SC_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 319958b..c781281 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -317,4 +317,35 @@ __CMPXCHG_DBL(_mb, al, "memory")
 
 #undef __CMPXCHG_DBL
 
+#define __CMPXCHG128(name, mb, cl...)					\
+static __always_inline u128						\
+__lse__cmpxchg128##name(volatile u128 *ptr, u128 old, u128 new)		\
+{									\
+	union __u128_halves r, o = { .full = (old) },			\
+			       n = { .full = (new) };			\
+	register unsigned long x0 asm ("x0") = o.low;			\
+	register unsigned long x1 asm ("x1") = o.high;			\
+	register unsigned long x2 asm ("x2") = n.low;			\
+	register unsigned long x3 asm ("x3") = n.high;			\
+	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
+									\
+	asm volatile(							\
+	__LSE_PREAMBLE							\
+	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
+	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
+	  [v] "+Q" (*(u128 *)ptr)					\
+	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
+	  [oldval1] "r" (o.low), [oldval2] "r" (o.high)			\
+	: cl);								\
+									\
+	r.low = x0; r.high = x1;					\
+									\
+	return r.full;							\
+}
+
+__CMPXCHG128(   ,   )
+__CMPXCHG128(_mb, al, "memory")
+
+#undef __CMPXCHG128
+
 #endif	/* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index c6bc5d8..097b832 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -146,6 +146,19 @@ __CMPXCHG_DBL(_mb)
 
 #undef __CMPXCHG_DBL
 
+#define __CMPXCHG128(name)						\
+static inline u128 __cmpxchg128##name(volatile u128 *ptr,		\
+				      u128 old, u128 new)		\
+{									\
+	return __lse_ll_sc_body(_cmpxchg128##name,			\
+				ptr, old, new);				\
+}
+
+__CMPXCHG128(   )
+__CMPXCHG128(_mb)
+
+#undef __CMPXCHG128
+
 #define __CMPXCHG_GEN(sfx)						\
 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr,	\
 					   unsigned long old,		\
@@ -228,6 +241,19 @@ __CMPXCHG_GEN(_mb)
 	__ret;									\
 })
 
+/* cmpxchg128 */
+#define system_has_cmpxchg128()		1
+
+#define arch_cmpxchg128(ptr, o, n)						\
+({										\
+	__cmpxchg128_mb((ptr), (o), (n));					\
+})
+
+#define arch_cmpxchg128_local(ptr, o, n)					\
+({										\
+	__cmpxchg128((ptr), (o), (n));						\
+})
+
 #define __CMPWAIT_CASE(w, sfx, sz)					\
 static inline void __cmpwait_case_##sz(volatile void *ptr,		\
 				       unsigned long val)		\
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 06e0e42..7517376 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -224,4 +224,18 @@ static __always_inline int __cmpxchg_double(unsigned long p1, unsigned long p2,
 			 (unsigned long)(n1), (unsigned long)(n2));	\
 })
 
+#define system_has_cmpxchg128()		1
+
+static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
+{
+	asm volatile(
+		"	cdsg	%[old],%[new],%[ptr]\n"
+		: [old] "+d" (old), [ptr] "+QS" (*ptr)
+		: [new] "d" (new)
+		: "memory", "cc");
+	return old;
+}
+
+#define arch_cmpxchg128		arch_cmpxchg128
+
 #endif /* __ASM_CMPXCHG_H */
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index 6ba80ce..d12f9ed 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -103,6 +103,7 @@ static inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *pold, u64 new)
 
 #endif
 
-#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
+#define system_has_cmpxchg_double()	boot_cpu_has(X86_FEATURE_CX8)
+#define system_has_cmpxchg64()		boot_cpu_has(X86_FEATURE_CX8)
 
 #endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 0d3beb2..fe29107 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -20,6 +20,68 @@
 	arch_try_cmpxchg((ptr), (po), (n));				\
 })
 
-#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
+union __u128_halves {
+	u128 full;
+	struct {
+		u64 low, high;
+	};
+};
+
+#define __arch_cmpxchg128(_ptr, _old, _new, _lock)			\
+({									\
+	union __u128_halves o = { .full = (_old), },			\
+			    n = { .full = (_new), };			\
+									\
+	asm volatile(_lock "cmpxchg16b %[ptr]"				\
+		     : [ptr] "+m" (*(_ptr)),				\
+		       "+a" (o.low), "+d" (o.high)			\
+		     : "b" (n.low), "c" (n.high)			\
+		     : "memory");					\
+									\
+	o.full;								\
+})
+
+static __always_inline u128 arch_cmpxchg128(volatile u128 *ptr, u128 old, u128 new)
+{
+	return __arch_cmpxchg128(ptr, old, new, LOCK_PREFIX);
+}
+
+static __always_inline u128 arch_cmpxchg128_local(volatile u128 *ptr, u128 old, u128 new)
+{
+	return __arch_cmpxchg128(ptr, old, new,);
+}
+
+#define __arch_try_cmpxchg128(_ptr, _oldp, _new, _lock)			\
+({									\
+	union __u128_halves o = { .full = *(_oldp), },			\
+			    n = { .full = (_new), };			\
+	bool ret;							\
+									\
+	asm volatile(_lock "cmpxchg16b %[ptr]"				\
+		     CC_SET(e)						\
+		     : CC_OUT(e) (ret),					\
+		       [ptr] "+m" (*ptr),				\
+		       "+a" (o.low), "+d" (o.high)			\
+		     : "b" (n.low), "c" (n.high)			\
+		     : "memory");					\
+									\
+	if (unlikely(!ret))						\
+		*(_oldp) = o.full;					\
+									\
+	likely(ret);							\
+})
+
+static __always_inline bool arch_try_cmpxchg128(volatile u128 *ptr, u128 *oldp, u128 new)
+{
+	return __arch_try_cmpxchg128(ptr, oldp, new, LOCK_PREFIX);
+}
+
+static __always_inline bool arch_try_cmpxchg128_local(volatile u128 *ptr, u128 *oldp, u128 new)
+{
+	return __arch_try_cmpxchg128(ptr, oldp, new,);
+}
+
+#define system_has_cmpxchg_double()	boot_cpu_has(X86_FEATURE_CX16)
+#define system_has_cmpxchg128()		boot_cpu_has(X86_FEATURE_CX16)
 
 #endif /* _ASM_X86_CMPXCHG_64_H */

  reply	other threads:[~2023-06-05  7:43 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-31 13:08 [PATCH 00/12] Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() Peter Zijlstra
2023-05-31 13:08 ` [PATCH 01/12] cyrpto/b128ops: Remove struct u128 Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 02/12] types: Introduce [us]128 Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 03/12] arch: Introduce arch_{,try_}_cmpxchg128{,_local}() Peter Zijlstra
2023-06-05  7:42   ` tip-bot2 for Peter Zijlstra [this message]
2023-05-31 13:08 ` [PATCH 04/12] instrumentation: Wire up cmpxchg128() Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 05/12] percpu: Add {raw,this}_cpu_try_cmpxchg() Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-06-09 16:10   ` [PATCH v2 5/12] " Konrad Dybcio
2023-06-09 16:13     ` Nathan Chancellor
2023-06-09 16:20       ` Konrad Dybcio
2023-05-31 13:08 ` [PATCH 06/12] percpu: Wire up cmpxchg128 Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 07/12] percpu: #ifndef __SIZEOF_INT128__ Peter Zijlstra
2023-05-31 14:21   ` Arnd Bergmann
2023-05-31 15:50     ` Peter Zijlstra
2023-06-01 10:14     ` [PATCH v2 07/12] parisc/percpu: Work around the lack of __SIZEOF_INT128__ Peter Zijlstra
2023-06-01 10:32       ` Helge Deller
2023-06-01 10:50         ` Peter Zijlstra
2023-06-01 21:08           ` Sam James
2023-06-01 13:29         ` Linus Torvalds
2023-06-01 17:38           ` Masahiro Yamada
2023-06-02 14:39           ` Peter Zijlstra
2023-06-02 14:50             ` Mark Rutland
2023-06-02 16:09             ` Linus Torvalds
2023-06-02 17:00             ` H. Peter Anvin
2023-06-02 19:10               ` Peter Zijlstra
2023-06-02 19:20                 ` H. Peter Anvin
2023-06-02 19:40                   ` Peter Zijlstra
2023-06-02 19:57                     ` Linus Torvalds
2023-06-02 20:42             ` Helge Deller
2023-05-31 13:08 ` [PATCH 08/12] x86,amd_iommu: Replace cmpxchg_double() Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 09/12] x86,intel_iommu: " Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 10/12] slub: " Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 11/12] arch: Remove cmpxchg_double Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 13:08 ` [PATCH 12/12] s390/cpum_sf: Convert to cmpxchg128() Peter Zijlstra
2023-06-05  7:42   ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-31 14:47 ` [PATCH 00/12] Introduce cmpxchg128() -- aka. the demise of cmpxchg_double() Mark Rutland
  -- strict thread matches above, loose matches on Subject: below --
2023-05-15  7:57 [PATCH v3 03/11] arch: Introduce arch_{,try_}_cmpxchg128{,_local}() Peter Zijlstra
2023-05-20 10:49 ` [tip: locking/core] " tip-bot2 for Peter Zijlstra
2023-05-22 10:27 ` tip-bot2 for Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=168595097198.404.7071635060895699267.tip-bot2@tip-bot2 \
    --to=tip-bot2@linutronix.de \
    --cc=arnd@arndb.de \
    --cc=hca@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-tip-commits@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=peterz@infradead.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).