Linux-mm Archive mirror
 help / color / mirror / Atom feed
From: Byungchul Park <byungchul@sk.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: kernel_team@skhynix.com, akpm@linux-foundation.org,
	ying.huang@intel.com, vernhao@tencent.com,
	mgorman@techsingularity.net, hughd@google.com,
	willy@infradead.org, david@redhat.com, peterz@infradead.org,
	luto@kernel.org, tglx@linutronix.de, mingo@redhat.com,
	bp@alien8.de, dave.hansen@linux.intel.com, rjgolo@gmail.com
Subject: [PATCH v10 05/12] mm: buddy: make room for a new variable, ugen, in struct page
Date: Fri, 10 May 2024 15:51:59 +0900	[thread overview]
Message-ID: <20240510065206.76078-6-byungchul@sk.com> (raw)
In-Reply-To: <20240510065206.76078-1-byungchul@sk.com>

Functionally, no change.  This is a preparation for luf mechanism that
tracks need of tlb flush for each page residing in buddy, using a
generation number in struct page.

Fortunately, since the private field in struct page is used only to
store page order in buddy, ranging from 0 to MAX_PAGE_ORDER, that can be
covered with unsigned short int.  So splitted it into two smaller ones,
order and ugen, so that the both can be used in buddy at the same time.

Signed-off-by: Byungchul Park <byungchul@sk.com>
---
 include/linux/mm_types.h | 40 +++++++++++++++++++++++++++++++++-------
 mm/internal.h            |  4 ++--
 mm/page_alloc.c          | 13 ++++++++-----
 3 files changed, 43 insertions(+), 14 deletions(-)

diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index db0adf5721cc..cd4ec0d10ffb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -108,13 +108,25 @@ struct page {
 				pgoff_t index;		/* Our offset within mapping. */
 				unsigned long share;	/* share count for fsdax */
 			};
-			/**
-			 * @private: Mapping-private opaque data.
-			 * Usually used for buffer_heads if PagePrivate.
-			 * Used for swp_entry_t if PageSwapCache.
-			 * Indicates order in the buddy system if PageBuddy.
-			 */
-			unsigned long private;
+			union {
+				/**
+				 * @private: Mapping-private opaque data.
+				 * Usually used for buffer_heads if PagePrivate.
+				 * Used for swp_entry_t if PageSwapCache.
+				 */
+				unsigned long private;
+				struct {
+					/*
+					 * Indicates order in the buddy system if PageBuddy.
+					 */
+					unsigned short int order;
+					/*
+					 * Tracks need of tlb flush used by luf,
+					 * which stands for lazy unmap flush.
+					 */
+					unsigned short int ugen;
+				};
+			};
 		};
 		struct {	/* page_pool used by netstack */
 			/**
@@ -521,6 +533,20 @@ static inline void set_page_private(struct page *page, unsigned long private)
 	page->private = private;
 }
 
+#define page_buddy_order(page)		((page)->order)
+
+static inline void set_page_buddy_order(struct page *page, unsigned int order)
+{
+	page->order = (unsigned short int)order;
+}
+
+#define page_buddy_ugen(page)		((page)->ugen)
+
+static inline void set_page_buddy_ugen(struct page *page, unsigned short int ugen)
+{
+	page->ugen = ugen;
+}
+
 static inline void *folio_get_private(struct folio *folio)
 {
 	return folio->private;
diff --git a/mm/internal.h b/mm/internal.h
index c6483f73ec13..eb9c7d8650fc 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -453,7 +453,7 @@ struct alloc_context {
 static inline unsigned int buddy_order(struct page *page)
 {
 	/* PageBuddy() must be checked by the caller */
-	return page_private(page);
+	return page_buddy_order(page);
 }
 
 /*
@@ -467,7 +467,7 @@ static inline unsigned int buddy_order(struct page *page)
  * times, potentially observing different values in the tests and the actual
  * use of the result.
  */
-#define buddy_order_unsafe(page)	READ_ONCE(page_private(page))
+#define buddy_order_unsafe(page)	READ_ONCE(page_buddy_order(page))
 
 /*
  * This function checks whether a page is free && is the buddy
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 33d4a1be927b..917b22b429d1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -565,9 +565,12 @@ void prep_compound_page(struct page *page, unsigned int order)
 	prep_compound_head(page, order);
 }
 
-static inline void set_buddy_order(struct page *page, unsigned int order)
+static inline void set_buddy_order_ugen(struct page *page,
+					unsigned int order,
+					unsigned short int ugen)
 {
-	set_page_private(page, order);
+	set_page_buddy_order(page, order);
+	set_page_buddy_ugen(page, order);
 	__SetPageBuddy(page);
 }
 
@@ -834,7 +837,7 @@ static inline void __free_one_page(struct page *page,
 	}
 
 done_merging:
-	set_buddy_order(page, order);
+	set_buddy_order_ugen(page, order, 0);
 
 	if (fpi_flags & FPI_TO_TAIL)
 		to_tail = true;
@@ -1344,7 +1347,7 @@ static inline void expand(struct zone *zone, struct page *page,
 			continue;
 
 		__add_to_free_list(&page[size], zone, high, migratetype, false);
-		set_buddy_order(&page[size], high);
+		set_buddy_order_ugen(&page[size], high, 0);
 		nr_added += size;
 	}
 	account_freepages(zone, nr_added, migratetype);
@@ -6802,7 +6805,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
 			continue;
 
 		add_to_free_list(current_buddy, zone, high, migratetype, false);
-		set_buddy_order(current_buddy, high);
+		set_buddy_order_ugen(current_buddy, high, 0);
 	}
 }
 
-- 
2.17.1



  parent reply	other threads:[~2024-05-10  6:52 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-05-10  6:51 [PATCH v10 00/12] LUF(Lazy Unmap Flush) reducing tlb numbers over 90% Byungchul Park
2024-05-10  6:51 ` [PATCH v10 01/12] x86/tlb: add APIs manipulating tlb batch's arch data Byungchul Park
2024-05-10  6:51 ` [PATCH v10 02/12] arm64: tlbflush: " Byungchul Park
2024-05-10  6:51 ` [PATCH v10 03/12] riscv, tlb: " Byungchul Park
2024-05-10  6:51 ` [PATCH v10 04/12] x86/tlb, riscv/tlb, mm/rmap: separate arch_tlbbatch_clear() out of arch_tlbbatch_flush() Byungchul Park
2024-05-10  6:51 ` Byungchul Park [this message]
2024-05-10  6:52 ` [PATCH v10 06/12] mm: add folio_put_ugen() to deliver unmap generation number to pcp or buddy Byungchul Park
2024-05-10  6:52 ` [PATCH v10 07/12] mm: add a parameter, unmap generation number, to free_unref_folios() Byungchul Park
2024-05-10  6:52 ` [PATCH v10 08/12] mm/rmap: recognize read-only tlb entries during batched tlb flush Byungchul Park
2024-05-10  6:52 ` [PATCH v10 09/12] mm: implement LUF(Lazy Unmap Flush) defering tlb flush when folios get unmapped Byungchul Park
2024-05-10  6:52 ` [PATCH v10 10/12] mm: separate move/undo parts from migrate_pages_batch() Byungchul Park
2024-05-10  6:52 ` [PATCH v10 11/12] mm, migrate: apply luf mechanism to unmapping during migration Byungchul Park
2024-05-10  6:52 ` [PATCH v10 12/12] mm, vmscan: apply luf mechanism to unmapping during folio reclaim Byungchul Park
2024-05-11  6:54 ` [PATCH v10 00/12] LUF(Lazy Unmap Flush) reducing tlb numbers over 90% Huang, Ying
2024-05-13  1:41   ` Byungchul Park
2024-05-11  7:15 ` Huang, Ying
2024-05-13  1:44   ` Byungchul Park
2024-05-22  2:16     ` Byungchul Park
2024-05-22  7:38       ` Huang, Ying
2024-05-22 10:27         ` Byungchul Park
2024-05-22 14:15           ` Byungchul Park
2024-05-24 17:16 ` Dave Hansen
2024-05-27  1:57   ` Byungchul Park
2024-05-27  2:43     ` Dave Hansen
2024-05-27  3:46       ` Byungchul Park
2024-05-27  4:19         ` Byungchul Park
2024-05-27  4:25           ` Byungchul Park
2024-05-27 22:58       ` Byungchul Park
2024-05-29  2:16         ` Huang, Ying
2024-05-30  1:02           ` Byungchul Park
2024-05-27  3:10     ` Huang, Ying
2024-05-27  3:56       ` Byungchul Park
2024-05-28 15:14       ` Dave Hansen
2024-05-29  5:00         ` Byungchul Park
2024-05-29 16:41           ` Dave Hansen
2024-05-30  0:50             ` Byungchul Park
2024-05-30  0:59               ` Byungchul Park
2024-05-30  1:11               ` Huang, Ying
2024-05-30  1:33                 ` Byungchul Park
2024-05-30  7:18                 ` Byungchul Park
2024-05-30  8:24                   ` Huang, Ying
2024-05-30  8:41                     ` Byungchul Park
2024-05-30 13:50                       ` Dave Hansen
2024-05-31  2:06                         ` Byungchul Park
2024-05-30  9:33                     ` Byungchul Park
2024-05-31  1:45                       ` Huang, Ying
2024-05-31  2:20                         ` Byungchul Park
2024-05-28  8:41 ` David Hildenbrand
2024-05-29  4:39   ` Byungchul Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240510065206.76078-6-byungchul@sk.com \
    --to=byungchul@sk.com \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=kernel_team@skhynix.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=luto@kernel.org \
    --cc=mgorman@techsingularity.net \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rjgolo@gmail.com \
    --cc=tglx@linutronix.de \
    --cc=vernhao@tencent.com \
    --cc=willy@infradead.org \
    --cc=ying.huang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).