gfs2.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Andreas Gruenbacher <agruenba@redhat.com>
To: gfs2@lists.linux.dev
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Subject: [PATCH 10/13] gfs2: Add some missing quota locking
Date: Tue, 18 Jun 2024 18:05:14 +0200	[thread overview]
Message-ID: <20240618160517.901589-11-agruenba@redhat.com> (raw)
In-Reply-To: <20240618160517.901589-1-agruenba@redhat.com>

The quota code is missing some locking between local quota changes and
syncing those quota changes to the global quota file (gfs2_quota_sync);
in particular, qd->qd_change needs to be kept in sync with the
QDF_CHANGE change flag and the number of references held.  Use the
qd->qd_lockref.lock spinlock for that.

With the qd->qd_lockref.lock spinlock held, we can no longer call
lockref_get(), so turn qd_hold() into a variant that assumes that the
lock is held.  This function is really supposed to take an additional
reference when one or more references are already held, so check for
that instead of checking if the lockref is dead.

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
---
 fs/gfs2/quota.c | 82 ++++++++++++++++++++++++++++++++-----------------
 1 file changed, 53 insertions(+), 29 deletions(-)

diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index ae02403407aa..283c6ff21911 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -316,11 +316,11 @@ static int qd_get(struct gfs2_sbd *sdp, struct kqid qid,
 }
 
 
-static void qd_hold(struct gfs2_quota_data *qd)
+static void __qd_hold(struct gfs2_quota_data *qd)
 {
 	struct gfs2_sbd *sdp = qd->qd_sbd;
-	gfs2_assert(sdp, !__lockref_is_dead(&qd->qd_lockref));
-	lockref_get(&qd->qd_lockref);
+	gfs2_assert(sdp, qd->qd_lockref.count > 0);
+	qd->qd_lockref.count++;
 }
 
 static void qd_put(struct gfs2_quota_data *qd)
@@ -462,19 +462,27 @@ static void bh_put(struct gfs2_quota_data *qd)
 static bool qd_grab_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
 			 u64 sync_gen)
 {
+	bool ret = false;
+
+	spin_lock(&qd->qd_lockref.lock);
 	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
 	    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
 	    qd->qd_sync_gen >= sync_gen)
-		return false;
+		goto out;
 
-	if (!lockref_get_not_dead(&qd->qd_lockref))
-		return false;
+	if (__lockref_is_dead(&qd->qd_lockref))
+		goto out;
+	qd->qd_lockref.count++;
 
 	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
 	set_bit(QDF_LOCKED, &qd->qd_flags);
 	qd->qd_change_sync = qd->qd_change;
 	slot_hold(qd);
-	return true;
+	ret = true;
+
+out:
+	spin_unlock(&qd->qd_lockref.lock);
+	return ret;
 }
 
 static void qd_ungrab_sync(struct gfs2_quota_data *qd)
@@ -493,8 +501,10 @@ static void qdsb_put(struct gfs2_quota_data *qd)
 
 static void qd_unlock(struct gfs2_quota_data *qd)
 {
+	spin_lock(&qd->qd_lockref.lock);
 	gfs2_assert_warn(qd->qd_sbd, test_bit(QDF_LOCKED, &qd->qd_flags));
 	clear_bit(QDF_LOCKED, &qd->qd_flags);
+	spin_unlock(&qd->qd_lockref.lock);
 	qdsb_put(qd);
 }
 
@@ -663,6 +673,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
 	struct gfs2_sbd *sdp = qd->qd_sbd;
 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
 	struct gfs2_quota_change *qc = qd->qd_bh_qc;
+	bool needs_put = false;
 	s64 x;
 
 	mutex_lock(&sdp->sd_quota_mutex);
@@ -674,26 +685,24 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
 	 * used, and we assume a value of 0 otherwise.
 	 */
 
+	spin_lock(&qd->qd_lockref.lock);
+
 	x = 0;
 	if (test_bit(QDF_CHANGE, &qd->qd_flags))
 		x = be64_to_cpu(qc->qc_change);
 	x += change;
-
-	spin_lock(&qd_lock);
 	qd->qd_change += change;
-	spin_unlock(&qd_lock);
 
 	if (!x && test_bit(QDF_CHANGE, &qd->qd_flags)) {
 		/* The slot in the quota change file becomes unused. */
 		clear_bit(QDF_CHANGE, &qd->qd_flags);
 		qc->qc_flags = 0;
 		qc->qc_id = 0;
-		slot_put(qd);
-		qd_put(qd);
+		needs_put = true;
 	} else if (x && !test_bit(QDF_CHANGE, &qd->qd_flags)) {
 		/* The slot in the quota change file becomes used. */
 		set_bit(QDF_CHANGE, &qd->qd_flags);
-		qd_hold(qd);
+		__qd_hold(qd);
 		slot_hold(qd);
 
 		qc->qc_flags = 0;
@@ -703,6 +712,12 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
 	}
 	qc->qc_change = cpu_to_be64(x);
 
+	spin_unlock(&qd->qd_lockref.lock);
+
+	if (needs_put) {
+		slot_put(qd);
+		qd_put(qd);
+	}
 	if (change < 0) /* Reset quiet flag if we freed some blocks */
 		clear_bit(QDF_QMSG_QUIET, &qd->qd_flags);
 	mutex_unlock(&sdp->sd_quota_mutex);
@@ -844,6 +859,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
 	be64_add_cpu(&q.qu_value, change);
 	if (((s64)be64_to_cpu(q.qu_value)) < 0)
 		q.qu_value = 0; /* Never go negative on quota usage */
+	spin_lock(&qd->qd_lockref.lock);
 	qd->qd_qb.qb_value = q.qu_value;
 	if (fdq) {
 		if (fdq->d_fieldmask & QC_SPC_SOFT) {
@@ -859,6 +875,7 @@ static int gfs2_adjust_quota(struct gfs2_sbd *sdp, loff_t loc,
 			qd->qd_qb.qb_value = q.qu_value;
 		}
 	}
+	spin_unlock(&qd->qd_lockref.lock);
 
 	err = gfs2_write_disk_quota(sdp, &q, loc);
 	if (!err) {
@@ -990,7 +1007,9 @@ static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
 	qlvb->qb_limit = q.qu_limit;
 	qlvb->qb_warn = q.qu_warn;
 	qlvb->qb_value = q.qu_value;
+	spin_lock(&qd->qd_lockref.lock);
 	qd->qd_qb = *qlvb;
+	spin_unlock(&qd->qd_lockref.lock);
 
 	return 0;
 }
@@ -1012,7 +1031,9 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
 	if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
 		force_refresh = FORCE;
 
+	spin_lock(&qd->qd_lockref.lock);
 	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
+	spin_unlock(&qd->qd_lockref.lock);
 
 	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
 		gfs2_glock_dq_uninit(q_gh);
@@ -1085,20 +1106,19 @@ static bool need_sync(struct gfs2_quota_data *qd)
 	struct gfs2_tune *gt = &sdp->sd_tune;
 	s64 value, change, limit;
 	unsigned int num, den;
+	int ret = false;
 
+	spin_lock(&qd->qd_lockref.lock);
 	if (!qd->qd_qb.qb_limit)
-		return false;
+		goto out;
 
-	spin_lock(&qd_lock);
 	change = qd->qd_change;
-	spin_unlock(&qd_lock);
-
 	if (change <= 0)
-		return false;
+		goto out;
 	value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
 	limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
 	if (value >= limit)
-		return false;
+		goto out;
 
 	spin_lock(&gt->gt_spin);
 	num = gt->gt_quota_scale_num;
@@ -1108,8 +1128,12 @@ static bool need_sync(struct gfs2_quota_data *qd)
 	change *= gfs2_jindex_size(sdp) * num;
 	change = div_s64(change, den);
 	if (value + change < limit)
-		return false;
-	return true;
+		goto out;
+
+	ret = true;
+out:
+	spin_unlock(&qd->qd_lockref.lock);
+	return ret;
 }
 
 void gfs2_quota_unlock(struct gfs2_inode *ip)
@@ -1211,12 +1235,12 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
 		      qid_eq(qd->qd_id, make_kqid_gid(gid))))
 			continue;
 
+		spin_lock(&qd->qd_lockref.lock);
 		warn = (s64)be64_to_cpu(qd->qd_qb.qb_warn);
 		limit = (s64)be64_to_cpu(qd->qd_qb.qb_limit);
 		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
-		spin_lock(&qd_lock);
 		value += qd->qd_change;
-		spin_unlock(&qd_lock);
+		spin_unlock(&qd->qd_lockref.lock);
 
 		if (limit > 0 && (limit - value) < ap->allowed)
 			ap->allowed = limit - value;
@@ -1282,12 +1306,12 @@ static bool qd_changed(struct gfs2_sbd *sdp)
 
 	spin_lock(&qd_lock);
 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
-		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
-		    !test_bit(QDF_CHANGE, &qd->qd_flags))
-			continue;
-
-		changed = true;
-		break;
+		spin_lock(&qd->qd_lockref.lock);
+		changed = !test_bit(QDF_LOCKED, &qd->qd_flags) &&
+			  test_bit(QDF_CHANGE, &qd->qd_flags);
+		spin_unlock(&qd->qd_lockref.lock);
+		if (changed)
+			break;
 	}
 	spin_unlock(&qd_lock);
 	return changed;
-- 
2.45.1


  parent reply	other threads:[~2024-06-18 16:05 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-18 16:05 [PATCH 00/13] gfs2: quota changes Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 01/13] gfs2: Minor gfs2_quota_init error path cleanup Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 02/13] gfs2: Check quota consistency on mount Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 03/13] gfs2: Revert "introduce qd_bh_get_or_undo" Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 04/13] gfs2: qd_check_sync cleanups Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 05/13] gfs2: Revert "ignore negated quota changes" Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 06/13] gfs2: Revert "Add quota_change type" Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 07/13] gfs2: Fix and clean up function do_qc Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 08/13] gfs2: quota need_sync cleanup Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 09/13] gfs2: Fold qd_fish into gfs2_quota_sync Andreas Gruenbacher
2024-06-18 16:05 ` Andreas Gruenbacher [this message]
2024-06-18 16:05 ` [PATCH 11/13] gfs2: Get rid of some unnecessary quota locking Andreas Gruenbacher
2024-06-20 14:22   ` Alexander Aring
2024-06-20 14:45     ` Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 12/13] gfs2: Be more careful with the quota sync generation Andreas Gruenbacher
2024-06-18 16:05 ` [PATCH 13/13] gfs2: Revert "check for no eligible quota changes" Andreas Gruenbacher

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240618160517.901589-11-agruenba@redhat.com \
    --to=agruenba@redhat.com \
    --cc=gfs2@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).