* [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()
@ 2024-04-25 3:51 Kefeng Wang
2024-04-25 4:00 ` Matthew Wilcox
0 siblings, 1 reply; 6+ messages in thread
From: Kefeng Wang @ 2024-04-25 3:51 UTC (permalink / raw
To: Andrew Morton; +Cc: Ryan Roberts, linux-mm, David Hildenbrand, Kefeng Wang
There are too many bool arguments in thp_vma_allowable_orders(), adding
some more readable thp_vma_allowable_order_foo(),
thp_vma_allowable_orders_smaps() is used in smaps
thp_vma_allowable_order[s]_pf() is used in page fault
thp_vma_allowable_order_khugepaged() is used in khugepaged scan and madvise
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
v2:
- use new thp_vma_allowable_order_khugepaged() naming, suggested by
Ryan/David
fs/proc/task_mmu.c | 3 +--
include/linux/huge_mm.h | 14 ++++++++++++--
mm/khugepaged.c | 24 ++++++++++++------------
mm/memory.c | 8 ++++----
4 files changed, 29 insertions(+), 20 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f4259b7edfde..e95ec49bf190 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
- true, THP_ORDERS_ALL));
+ thp_vma_allowable_orders_smaps(vma, vma->vm_flags));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 56c7ea73090b..87409e87c241 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
*/
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
- (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define thp_vma_allowable_orders_smaps(vma, vm_flags) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
+
+#define thp_vma_allowable_orders_pf(vma, vm_flags, orders) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
+
+#define thp_vma_allowable_order_pf(vma, vm_flags, order) \
+ (!!thp_vma_allowable_orders_pf(vma, vm_flags, BIT(order)))
+
+#define thp_vma_allowable_order_khugepaged(vma, vm_flags, enforce_sysfs, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(order)))
+
#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
#define HPAGE_PMD_SHIFT PMD_SHIFT
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2f73d2aa9ae8..006c8c9a5b68 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,8 +453,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
- PMD_ORDER))
+ if (thp_vma_allowable_order_khugepaged(vma, vm_flags, true,
+ PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
}
}
@@ -909,15 +909,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- cc->is_khugepaged, PMD_ORDER))
+ if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags,
+ cc->is_khugepaged, PMD_ORDER))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
* remapped to file after khugepaged reaquired the mmap_lock.
*
- * thp_vma_allowable_order may return true for qualified file
- * vmas.
+ * thp_vma_allowable_order_khugepaged may return true for
+ * qualified file vmas.
*/
if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
return SCAN_PAGE_ANON;
@@ -1493,8 +1493,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags, false,
+ PMD_ORDER))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2355,8 +2355,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- true, PMD_ORDER)) {
+ if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags, true,
+ PMD_ORDER)) {
skip:
progress++;
continue;
@@ -2693,8 +2693,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags, false,
+ PMD_ORDER))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 09ed76e5b8c0..a1255fb2c709 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
* for this vma. Then filter out the orders that can't be allocated over
* the faulting address and still be fully contained in the vma.
*/
- orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
- BIT(PMD_ORDER) - 1);
+ orders = thp_vma_allowable_orders_pf(vma, vma->vm_flags,
+ BIT(PMD_ORDER) - 1);
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
@@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+ thp_vma_allowable_order_pf(vma, vm_flags, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+ thp_vma_allowable_order_pf(vma, vm_flags, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
--
2.41.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()
2024-04-25 3:51 [PATCH v2] mm: add more readable thp_vma_allowable_order_foo() Kefeng Wang
@ 2024-04-25 4:00 ` Matthew Wilcox
2024-04-25 7:18 ` Kefeng Wang
0 siblings, 1 reply; 6+ messages in thread
From: Matthew Wilcox @ 2024-04-25 4:00 UTC (permalink / raw
To: Kefeng Wang; +Cc: Andrew Morton, Ryan Roberts, linux-mm, David Hildenbrand
On Thu, Apr 25, 2024 at 11:51:08AM +0800, Kefeng Wang wrote:
> There are too many bool arguments in thp_vma_allowable_orders(), adding
> some more readable thp_vma_allowable_order_foo(),
Here's an alternative approach I came up with and forgot to send out.
I take no position on which is better.
commit a761d4b9cf14
Author: Matthew Wilcox (Oracle) <willy@infradead.org>
Date: Tue Apr 16 00:25:09 2024 -0400
mm: Simplify thp_vma_allowable_order
Combine the three boolean arguments into one flags argument for
readability.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 23fbab954c20..0ffa8902f973 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -866,8 +866,8 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
- true, THP_ORDERS_ALL));
+ !!thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index de0c89105076..0d0ba39b86ae 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -84,8 +84,12 @@ extern struct kobj_attribute shmem_enabled_attr;
*/
#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
- (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
+#define TVA_IN_PF (1 << 1) /* Page fault handler */
+#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
+
+#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
+ (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define HPAGE_PMD_SHIFT PMD_SHIFT
@@ -210,17 +214,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
}
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs,
+ unsigned long vm_flags,
+ unsigned long tva_flags,
unsigned long orders);
/**
* thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
* @vma: the vm area to check
* @vm_flags: use these vm_flags instead of vma->vm_flags
- * @smaps: whether answer will be used for smaps file
- * @in_pf: whether answer will be used by page fault handler
- * @enforce_sysfs: whether sysfs config should be taken into account
+ * @tva_flags: Which TVA flags to honour
* @orders: bitfield of all orders to consider
*
* Calculates the intersection of the requested hugepage orders and the allowed
@@ -233,12 +235,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
*/
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs,
+ unsigned long vm_flags,
+ unsigned long tva_flags,
unsigned long orders)
{
/* Optimization to check if required orders are enabled early. */
- if (enforce_sysfs && vma_is_anonymous(vma)) {
+ if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
unsigned long mask = READ_ONCE(huge_anon_orders_always);
if (vm_flags & VM_HUGEPAGE)
@@ -252,8 +254,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
return 0;
}
- return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
- enforce_sysfs, orders);
+ return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
}
#define transparent_hugepage_use_zero_page() \
@@ -404,8 +405,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs,
+ unsigned long vm_flags,
+ unsigned long tva_flags,
unsigned long orders)
{
return 0;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8bc4ffd4725e..5d3d9c0c4153 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -80,10 +80,13 @@ unsigned long huge_anon_orders_madvise __read_mostly;
unsigned long huge_anon_orders_inherit __read_mostly;
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags, bool smaps,
- bool in_pf, bool enforce_sysfs,
+ unsigned long vm_flags,
+ unsigned long tva_flags,
unsigned long orders)
{
+ bool smaps = tva_flags & TVA_SMAPS;
+ bool in_pf = tva_flags & TVA_IN_PF;
+ bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
/* Check the intersection of requested and supported orders. */
orders &= vma_is_anonymous(vma) ?
THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 38830174608f..9642d3c6ee7e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,7 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
hugepage_flags_enabled()) {
- if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
+ if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
PMD_ORDER))
__khugepaged_enter(vma->vm_mm);
}
@@ -917,6 +917,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
struct collapse_control *cc)
{
struct vm_area_struct *vma;
+ unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
return SCAN_ANY_PROCESS;
@@ -927,8 +928,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
return SCAN_ADDRESS_RANGE;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- cc->is_khugepaged, PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
return SCAN_VMA_CHECK;
/*
* Anon VMA expected, the address may be unmapped then
@@ -1510,8 +1510,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
* and map it by a PMD, regardless of sysfs THP settings. As such, let's
* analogously elide sysfs THP settings here.
*/
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2376,8 +2375,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
progress++;
break;
}
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
- true, PMD_ORDER)) {
+ if (!thp_vma_allowable_order(vma, vma->vm_flags,
+ TVA_ENFORCE_SYSFS, PMD_ORDER)) {
skip:
progress++;
continue;
@@ -2714,8 +2713,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma;
- if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
- PMD_ORDER))
+ if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 5624b881b662..287f7d6eb9ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4346,8 +4346,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
* for this vma. Then filter out the orders that can't be allocated over
* the faulting address and still be fully contained in the vma.
*/
- orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
- BIT(PMD_ORDER) - 1);
+ orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
@@ -5395,7 +5395,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
return VM_FAULT_OOM;
retry_pud:
if (pud_none(*vmf.pud) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+ thp_vma_allowable_order(vma, vm_flags,
+ TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
ret = create_huge_pud(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
@@ -5429,7 +5430,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
goto retry_pud;
if (pmd_none(*vmf.pmd) &&
- thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+ thp_vma_allowable_order(vma, vm_flags,
+ TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
ret = create_huge_pmd(&vmf);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()
2024-04-25 4:00 ` Matthew Wilcox
@ 2024-04-25 7:18 ` Kefeng Wang
2024-04-25 7:22 ` David Hildenbrand
0 siblings, 1 reply; 6+ messages in thread
From: Kefeng Wang @ 2024-04-25 7:18 UTC (permalink / raw
To: Matthew Wilcox; +Cc: Andrew Morton, Ryan Roberts, linux-mm, David Hildenbrand
On 2024/4/25 12:00, Matthew Wilcox wrote:
> On Thu, Apr 25, 2024 at 11:51:08AM +0800, Kefeng Wang wrote:
>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>> some more readable thp_vma_allowable_order_foo(),
>
> Here's an alternative approach I came up with and forgot to send out.
> I take no position on which is better.
Always confuse, either way is fine, even combine the two way, let's see
Ryan/David's option.
>
> commit a761d4b9cf14
> Author: Matthew Wilcox (Oracle) <willy@infradead.org>
> Date: Tue Apr 16 00:25:09 2024 -0400
>
> mm: Simplify thp_vma_allowable_order
>
> Combine the three boolean arguments into one flags argument for
> readability.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
>
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 23fbab954c20..0ffa8902f973 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -866,8 +866,8 @@ static int show_smap(struct seq_file *m, void *v)
> __show_smap(m, &mss, false);
>
> seq_printf(m, "THPeligible: %8u\n",
> - !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
> - true, THP_ORDERS_ALL));
> + !!thp_vma_allowable_orders(vma, vma->vm_flags,
> + TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
>
> if (arch_pkeys_enabled())
> seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index de0c89105076..0d0ba39b86ae 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -84,8 +84,12 @@ extern struct kobj_attribute shmem_enabled_attr;
> */
> #define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
>
> -#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
> - (!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
> +#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
> +#define TVA_IN_PF (1 << 1) /* Page fault handler */
> +#define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
> +
> +#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
> + (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
>
> #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> #define HPAGE_PMD_SHIFT PMD_SHIFT
> @@ -210,17 +214,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
> }
>
> unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
> - unsigned long vm_flags, bool smaps,
> - bool in_pf, bool enforce_sysfs,
> + unsigned long vm_flags,
> + unsigned long tva_flags,
> unsigned long orders);
>
> /**
> * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
> * @vma: the vm area to check
> * @vm_flags: use these vm_flags instead of vma->vm_flags
> - * @smaps: whether answer will be used for smaps file
> - * @in_pf: whether answer will be used by page fault handler
> - * @enforce_sysfs: whether sysfs config should be taken into account
> + * @tva_flags: Which TVA flags to honour
> * @orders: bitfield of all orders to consider
> *
> * Calculates the intersection of the requested hugepage orders and the allowed
> @@ -233,12 +235,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
> */
> static inline
> unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
> - unsigned long vm_flags, bool smaps,
> - bool in_pf, bool enforce_sysfs,
> + unsigned long vm_flags,
> + unsigned long tva_flags,
> unsigned long orders)
> {
> /* Optimization to check if required orders are enabled early. */
> - if (enforce_sysfs && vma_is_anonymous(vma)) {
> + if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
> unsigned long mask = READ_ONCE(huge_anon_orders_always);
>
> if (vm_flags & VM_HUGEPAGE)
> @@ -252,8 +254,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
> return 0;
> }
>
> - return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
> - enforce_sysfs, orders);
> + return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
> }
>
> #define transparent_hugepage_use_zero_page() \
> @@ -404,8 +405,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
> }
>
> static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
> - unsigned long vm_flags, bool smaps,
> - bool in_pf, bool enforce_sysfs,
> + unsigned long vm_flags,
> + unsigned long tva_flags,
> unsigned long orders)
> {
> return 0;
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 8bc4ffd4725e..5d3d9c0c4153 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -80,10 +80,13 @@ unsigned long huge_anon_orders_madvise __read_mostly;
> unsigned long huge_anon_orders_inherit __read_mostly;
>
> unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
> - unsigned long vm_flags, bool smaps,
> - bool in_pf, bool enforce_sysfs,
> + unsigned long vm_flags,
> + unsigned long tva_flags,
> unsigned long orders)
> {
> + bool smaps = tva_flags & TVA_SMAPS;
> + bool in_pf = tva_flags & TVA_IN_PF;
> + bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
> /* Check the intersection of requested and supported orders. */
> orders &= vma_is_anonymous(vma) ?
> THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index 38830174608f..9642d3c6ee7e 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -453,7 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
> {
> if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
> hugepage_flags_enabled()) {
> - if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
> + if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
> PMD_ORDER))
> __khugepaged_enter(vma->vm_mm);
> }
> @@ -917,6 +917,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
> struct collapse_control *cc)
> {
> struct vm_area_struct *vma;
> + unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
>
> if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
> return SCAN_ANY_PROCESS;
> @@ -927,8 +928,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>
> if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
> return SCAN_ADDRESS_RANGE;
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
> - cc->is_khugepaged, PMD_ORDER))
> + if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
> return SCAN_VMA_CHECK;
> /*
> * Anon VMA expected, the address may be unmapped then
> @@ -1510,8 +1510,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
> * and map it by a PMD, regardless of sysfs THP settings. As such, let's
> * analogously elide sysfs THP settings here.
> */
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
> - PMD_ORDER))
> + if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
> return SCAN_VMA_CHECK;
>
> /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
> @@ -2376,8 +2375,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
> progress++;
> break;
> }
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
> - true, PMD_ORDER)) {
> + if (!thp_vma_allowable_order(vma, vma->vm_flags,
> + TVA_ENFORCE_SYSFS, PMD_ORDER)) {
> skip:
> progress++;
> continue;
> @@ -2714,8 +2713,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
>
> *prev = vma;
>
> - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
> - PMD_ORDER))
> + if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
> return -EINVAL;
>
> cc = kmalloc(sizeof(*cc), GFP_KERNEL);
> diff --git a/mm/memory.c b/mm/memory.c
> index 5624b881b662..287f7d6eb9ed 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4346,8 +4346,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
> * for this vma. Then filter out the orders that can't be allocated over
> * the faulting address and still be fully contained in the vma.
> */
> - orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
> - BIT(PMD_ORDER) - 1);
> + orders = thp_vma_allowable_orders(vma, vma->vm_flags,
> + TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
> orders = thp_vma_suitable_orders(vma, vmf->address, orders);
>
> if (!orders)
> @@ -5395,7 +5395,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
> return VM_FAULT_OOM;
> retry_pud:
> if (pud_none(*vmf.pud) &&
> - thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
> + thp_vma_allowable_order(vma, vm_flags,
> + TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
> ret = create_huge_pud(&vmf);
> if (!(ret & VM_FAULT_FALLBACK))
> return ret;
> @@ -5429,7 +5430,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
> goto retry_pud;
>
> if (pmd_none(*vmf.pmd) &&
> - thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
> + thp_vma_allowable_order(vma, vm_flags,
> + TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
> ret = create_huge_pmd(&vmf);
> if (!(ret & VM_FAULT_FALLBACK))
> return ret;
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()
2024-04-25 7:18 ` Kefeng Wang
@ 2024-04-25 7:22 ` David Hildenbrand
2024-04-25 7:37 ` Kefeng Wang
0 siblings, 1 reply; 6+ messages in thread
From: David Hildenbrand @ 2024-04-25 7:22 UTC (permalink / raw
To: Kefeng Wang, Matthew Wilcox; +Cc: Andrew Morton, Ryan Roberts, linux-mm
On 25.04.24 09:18, Kefeng Wang wrote:
>
>
> On 2024/4/25 12:00, Matthew Wilcox wrote:
>> On Thu, Apr 25, 2024 at 11:51:08AM +0800, Kefeng Wang wrote:
>>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>>> some more readable thp_vma_allowable_order_foo(),
>>
>> Here's an alternative approach I came up with and forgot to send out.
>> I take no position on which is better.
>
> Always confuse, either way is fine, even combine the two way, let's see
> Ryan/David's option.
It's shocking how Willy and I sometimes create almost identical patches ;)
I played with the same flags idea during mTHP bringup.
And I think I prefer the flags version.
--
Cheers,
David / dhildenb
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()
2024-04-25 7:22 ` David Hildenbrand
@ 2024-04-25 7:37 ` Kefeng Wang
2024-04-25 7:58 ` Ryan Roberts
0 siblings, 1 reply; 6+ messages in thread
From: Kefeng Wang @ 2024-04-25 7:37 UTC (permalink / raw
To: David Hildenbrand, Matthew Wilcox; +Cc: Andrew Morton, Ryan Roberts, linux-mm
On 2024/4/25 15:22, David Hildenbrand wrote:
> On 25.04.24 09:18, Kefeng Wang wrote:
>>
>>
>> On 2024/4/25 12:00, Matthew Wilcox wrote:
>>> On Thu, Apr 25, 2024 at 11:51:08AM +0800, Kefeng Wang wrote:
>>>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>>>> some more readable thp_vma_allowable_order_foo(),
>>>
>>> Here's an alternative approach I came up with and forgot to send out.
>>> I take no position on which is better.
>>
>> Always confuse, either way is fine, even combine the two way, let's see
>> Ryan/David's option.
>
> It's shocking how Willy and I sometimes create almost identical patches ;)
>
:)
> I played with the same flags idea during mTHP bringup.
>
> And I think I prefer the flags version.
>
OK, wait for Andrew to pick it
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()
2024-04-25 7:37 ` Kefeng Wang
@ 2024-04-25 7:58 ` Ryan Roberts
0 siblings, 0 replies; 6+ messages in thread
From: Ryan Roberts @ 2024-04-25 7:58 UTC (permalink / raw
To: Kefeng Wang, David Hildenbrand, Matthew Wilcox; +Cc: Andrew Morton, linux-mm
On 25/04/2024 08:37, Kefeng Wang wrote:
>
>
> On 2024/4/25 15:22, David Hildenbrand wrote:
>> On 25.04.24 09:18, Kefeng Wang wrote:
>>>
>>>
>>> On 2024/4/25 12:00, Matthew Wilcox wrote:
>>>> On Thu, Apr 25, 2024 at 11:51:08AM +0800, Kefeng Wang wrote:
>>>>> There are too many bool arguments in thp_vma_allowable_orders(), adding
>>>>> some more readable thp_vma_allowable_order_foo(),
>>>>
>>>> Here's an alternative approach I came up with and forgot to send out.
>>>> I take no position on which is better.
>>>
>>> Always confuse, either way is fine, even combine the two way, let's see
>>> Ryan/David's option.
>>
>> It's shocking how Willy and I sometimes create almost identical patches ;)
>>
> :)
>> I played with the same flags idea during mTHP bringup.
>>
>> And I think I prefer the flags version.
Yeah, FWIW, I prefer the flags approach too.
>>
> OK, wait for Andrew to pick it
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2024-04-25 7:58 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-25 3:51 [PATCH v2] mm: add more readable thp_vma_allowable_order_foo() Kefeng Wang
2024-04-25 4:00 ` Matthew Wilcox
2024-04-25 7:18 ` Kefeng Wang
2024-04-25 7:22 ` David Hildenbrand
2024-04-25 7:37 ` Kefeng Wang
2024-04-25 7:58 ` Ryan Roberts
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).