From: Alexander Aring <aahringo@redhat.com>
To: teigland@redhat.com
Cc: gfs2@lists.linux.dev, aahringo@redhat.com
Subject: [PATCH dlm/next 05/10] dlm: convert ls_waiters_mutex to spinlock
Date: Tue, 24 Oct 2023 20:53:48 -0400 [thread overview]
Message-ID: <20231025005353.855904-5-aahringo@redhat.com> (raw)
In-Reply-To: <20231025005353.855904-1-aahringo@redhat.com>
This patch converts the per dlm lockspace waiters lock from a mutex to a
spinlock.
Signed-off-by: Alexander Aring <aahringo@redhat.com>
---
fs/dlm/debug_fs.c | 4 ++--
fs/dlm/dlm_internal.h | 2 +-
fs/dlm/lock.c | 20 ++++++++++----------
fs/dlm/lockspace.c | 2 +-
4 files changed, 14 insertions(+), 14 deletions(-)
diff --git a/fs/dlm/debug_fs.c b/fs/dlm/debug_fs.c
index 1ef59f7223a6..26abeff5b602 100644
--- a/fs/dlm/debug_fs.c
+++ b/fs/dlm/debug_fs.c
@@ -824,7 +824,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
mutex_lock(&debug_buf_lock);
dlm_lock_recovery(ls);
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
memset(debug_buf, 0, sizeof(debug_buf));
list_for_each_entry(lkb, &ls->ls_waiters, lkb_wait_reply) {
@@ -835,7 +835,7 @@ static ssize_t waiters_read(struct file *file, char __user *userbuf,
break;
pos += ret;
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
dlm_unlock_recovery(ls);
rv = simple_read_from_buffer(userbuf, count, ppos, debug_buf, pos);
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 66c67b17d273..0f37436be075 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -564,7 +564,7 @@ struct dlm_ls {
struct dlm_rsbtable *ls_rsbtbl;
uint32_t ls_rsbtbl_size;
- struct mutex ls_waiters_mutex;
+ spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
struct mutex ls_orphans_mutex;
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 79f1f741af13..005e2d167cbd 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1407,7 +1407,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
int error = 0;
int wc;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
if (is_overlap_unlock(lkb) ||
(is_overlap_cancel(lkb) && (mstype == DLM_MSG_CANCEL))) {
@@ -1447,7 +1447,7 @@ static int add_to_waiters(struct dlm_lkb *lkb, int mstype, int to_nodeid)
log_error(ls, "addwait error %x %d flags %x %d %d %s",
lkb->lkb_id, error, dlm_iflags_val(lkb), mstype,
lkb->lkb_wait_type, lkb->lkb_resource->res_name);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return error;
}
@@ -1546,9 +1546,9 @@ static int remove_from_waiters(struct dlm_lkb *lkb, int mstype)
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
int error;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
error = _remove_from_waiters(lkb, mstype, NULL);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return error;
}
@@ -1566,13 +1566,13 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb,
int error;
if (!local)
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
else
WARN_ON_ONCE(!rwsem_is_locked(&ls->ls_in_recovery) ||
!dlm_locking_stopped(ls));
error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
if (!local)
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return error;
}
@@ -4990,7 +4990,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
{
struct dlm_lkb *lkb = NULL, *iter;
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
list_for_each_entry(iter, &ls->ls_waiters, lkb_wait_reply) {
if (test_bit(DLM_IFL_RESEND_BIT, &iter->lkb_iflags)) {
hold_lkb(iter);
@@ -4998,7 +4998,7 @@ static struct dlm_lkb *find_resend_waiter(struct dlm_ls *ls)
break;
}
}
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
return lkb;
}
@@ -5065,9 +5065,9 @@ int dlm_recover_waiters_post(struct dlm_ls *ls)
while (!atomic_dec_and_test(&lkb->lkb_wait_count))
unhold_lkb(lkb);
- mutex_lock(&ls->ls_waiters_mutex);
+ spin_lock(&ls->ls_waiters_lock);
list_del_init(&lkb->lkb_wait_reply);
- mutex_unlock(&ls->ls_waiters_mutex);
+ spin_unlock(&ls->ls_waiters_lock);
if (oc || ou) {
/* do an unlock or cancel instead of resending */
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index cf7528144827..79001cc646d8 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -515,7 +515,7 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock_init(&ls->ls_lkbidr_spin);
INIT_LIST_HEAD(&ls->ls_waiters);
- mutex_init(&ls->ls_waiters_mutex);
+ spin_lock_init(&ls->ls_waiters_lock);
INIT_LIST_HEAD(&ls->ls_orphans);
mutex_init(&ls->ls_orphans_mutex);
--
2.39.3
next prev parent reply other threads:[~2023-10-25 0:54 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-10-25 0:53 [PATCH dlm/next 01/10] dlm: remove allocation parameter in msg allocation Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 02/10] dlm: switch to GFP_ATOMIC in dlm allocations Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 03/10] dlm: move root_list to ls_recover() stack Alexander Aring
2023-10-25 1:34 ` Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 04/10] dlm: drop holding waiters mutex in waiters recovery Alexander Aring
2023-10-25 0:53 ` Alexander Aring [this message]
2023-10-25 0:53 ` [PATCH dlm/next 06/10] dlm: convert res_lock to spinlock Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 07/10] dlm: make requestqueue handling non sleepable Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 08/10] dlm: ls_recv_active semaphore to rwlock Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 09/10] dlm: convert message parsing locks to disable bh Alexander Aring
2023-10-25 0:53 ` [PATCH dlm/next 10/10] dlm: do dlm message processing in softirq context Alexander Aring
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231025005353.855904-5-aahringo@redhat.com \
--to=aahringo@redhat.com \
--cc=gfs2@lists.linux.dev \
--cc=teigland@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).