* [PATCH 0/2] mm: batch mm counter updating in filemap_map_pages() @ 2024-04-11 13:09 Kefeng Wang 2024-04-11 13:09 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang 2024-04-11 13:09 ` [PATCH 2/2] mm: filemap: batch mm counter updating in filemap_map_pages() Kefeng Wang 0 siblings, 2 replies; 7+ messages in thread From: Kefeng Wang @ 2024-04-11 13:09 UTC (permalink / raw To: Andrew Morton Cc: Matthew Wilcox (Oracle), linux-mm, linux-fsdevel, Kefeng Wang Let's batch mm counter updating to accelerate filemap_map_pages(). Kefeng Wang (2): mm: move mm counter updating out of set_pte_range() mm: filemap: batch mm counter updating in filemap_map_pages() include/linux/mm.h | 18 ++++++++++++++++-- mm/filemap.c | 21 ++++++++++++++------- mm/memory.c | 30 +++++++++++------------------- 3 files changed, 41 insertions(+), 28 deletions(-) -- 2.41.0 ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/2] mm: move mm counter updating out of set_pte_range() 2024-04-11 13:09 [PATCH 0/2] mm: batch mm counter updating in filemap_map_pages() Kefeng Wang @ 2024-04-11 13:09 ` Kefeng Wang 2024-04-11 12:53 ` Matthew Wilcox 2024-04-11 13:09 ` [PATCH 2/2] mm: filemap: batch mm counter updating in filemap_map_pages() Kefeng Wang 1 sibling, 1 reply; 7+ messages in thread From: Kefeng Wang @ 2024-04-11 13:09 UTC (permalink / raw To: Andrew Morton Cc: Matthew Wilcox (Oracle), linux-mm, linux-fsdevel, Kefeng Wang In order to support batch mm counter updating in filemap_map_pages(), make set_pte_range() return the type of MM_COUNTERS and move mm counter updating out of set_pte_range(). Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- include/linux/mm.h | 4 ++-- mm/filemap.c | 10 +++++++--- mm/memory.c | 16 +++++++++++----- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0b4046b1e63d..6ad440ac3706 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1366,8 +1366,8 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) } vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page); -void set_pte_range(struct vm_fault *vmf, struct folio *folio, - struct page *page, unsigned int nr, unsigned long addr); +int set_pte_range(struct vm_fault *vmf, struct folio *folio, + struct page *page, unsigned int nr, unsigned long addr); vm_fault_t finish_fault(struct vm_fault *vmf); #endif diff --git a/mm/filemap.c b/mm/filemap.c index 92e2d43e4c9d..2274e590bab4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3512,6 +3512,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct page *page = folio_page(folio, start); unsigned int count = 0; pte_t *old_ptep = vmf->pte; + int type; do { if (PageHWPoison(page + count)) @@ -3539,7 +3540,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, continue; skip: if (count) { - set_pte_range(vmf, folio, page, count, addr); + type = set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, type, count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3553,7 +3555,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, } while (--nr_pages > 0); if (count) { - set_pte_range(vmf, folio, page, count, addr); + type = set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, type, count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3589,7 +3592,8 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, if (vmf->address == addr) ret = VM_FAULT_NOPAGE; - set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, + set_pte_range(vmf, folio, page, 1, addr), 1); folio_ref_inc(folio); return ret; diff --git a/mm/memory.c b/mm/memory.c index 78422d1c7381..485ffec9d4c7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4661,15 +4661,18 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page) * @page: The first page to create a PTE for. * @nr: The number of PTEs to create. * @addr: The first address to create a PTE for. + * + * Return: type of MM_COUNTERS to be updated */ -void set_pte_range(struct vm_fault *vmf, struct folio *folio, - struct page *page, unsigned int nr, unsigned long addr) +int set_pte_range(struct vm_fault *vmf, struct folio *folio, + struct page *page, unsigned int nr, unsigned long addr) { struct vm_area_struct *vma = vmf->vma; bool uffd_wp = vmf_orig_pte_uffd_wp(vmf); bool write = vmf->flags & FAULT_FLAG_WRITE; bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE); pte_t entry; + int type; flush_icache_pages(vma, page, nr); entry = mk_pte(page, vma->vm_page_prot); @@ -4685,18 +4688,20 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); + type = MM_ANONPAGES; VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); + type = mm_counter_file(folio); folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr); + + return type; } static bool vmf_pte_changed(struct vm_fault *vmf) @@ -4765,8 +4770,9 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { struct folio *folio = page_folio(page); + int type = set_pte_range(vmf, folio, page, 1, vmf->address); - set_pte_range(vmf, folio, page, 1, vmf->address); + add_mm_counter(vmf->vma->vm_mm, type, 1); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte); -- 2.41.0 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 1/2] mm: move mm counter updating out of set_pte_range() 2024-04-11 13:09 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang @ 2024-04-11 12:53 ` Matthew Wilcox 2024-04-11 14:08 ` Kefeng Wang 0 siblings, 1 reply; 7+ messages in thread From: Matthew Wilcox @ 2024-04-11 12:53 UTC (permalink / raw To: Kefeng Wang; +Cc: Andrew Morton, linux-mm, linux-fsdevel On Thu, Apr 11, 2024 at 09:09:49PM +0800, Kefeng Wang wrote: > In order to support batch mm counter updating in filemap_map_pages(), > make set_pte_range() return the type of MM_COUNTERS and move mm counter > updating out of set_pte_range(). I don't like this. You're making set_pte_range() harder to use. It's also rather overengineered; if you're calling set_pte_range() from filemap.c, you already know the folios are MM_FILEPAGES. ^ permalink raw reply [flat|nested] 7+ messages in thread
* Re: [PATCH 1/2] mm: move mm counter updating out of set_pte_range() 2024-04-11 12:53 ` Matthew Wilcox @ 2024-04-11 14:08 ` Kefeng Wang 0 siblings, 0 replies; 7+ messages in thread From: Kefeng Wang @ 2024-04-11 14:08 UTC (permalink / raw To: Matthew Wilcox; +Cc: Andrew Morton, linux-mm, linux-fsdevel On 2024/4/11 20:53, Matthew Wilcox wrote: > On Thu, Apr 11, 2024 at 09:09:49PM +0800, Kefeng Wang wrote: >> In order to support batch mm counter updating in filemap_map_pages(), >> make set_pte_range() return the type of MM_COUNTERS and move mm counter >> updating out of set_pte_range(). > > I don't like this. You're making set_pte_range() harder to use. > It's also rather overengineered; if you're calling set_pte_range() > from filemap.c, you already know the folios are MM_FILEPAGES. or MM_SHMEMS, and another caller finish_fault(), which already check vmf->flags and vma->vm_flags, we could use it to distinguish anon or file, I will try this way, thanks. > > ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 2/2] mm: filemap: batch mm counter updating in filemap_map_pages() 2024-04-11 13:09 [PATCH 0/2] mm: batch mm counter updating in filemap_map_pages() Kefeng Wang 2024-04-11 13:09 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang @ 2024-04-11 13:09 ` Kefeng Wang 1 sibling, 0 replies; 7+ messages in thread From: Kefeng Wang @ 2024-04-11 13:09 UTC (permalink / raw To: Andrew Morton Cc: Matthew Wilcox (Oracle), linux-mm, linux-fsdevel, Kefeng Wang Like copy_pte_range()/zap_pte_range(), make mm counter batch updating in filemap_map_pages(), the 'lat_pagefault -P 1 file' test from lmbench shows 12% improve, and the percpu_counter_add_batch() is gone from perf flame graph. Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- include/linux/mm.h | 14 ++++++++++++++ mm/filemap.c | 19 +++++++++++-------- mm/memory.c | 14 -------------- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 6ad440ac3706..c7dffd358088 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2655,6 +2655,20 @@ static inline int mm_counter(struct folio *folio) return mm_counter_file(folio); } +static inline void init_rss_vec(int *rss) +{ + memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); +} + +static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) +{ + int i; + + for (i = 0; i < NR_MM_COUNTERS; i++) + if (rss[i]) + add_mm_counter(mm, i, rss[i]); +} + static inline unsigned long get_mm_rss(struct mm_struct *mm) { return get_mm_counter(mm, MM_FILEPAGES) + diff --git a/mm/filemap.c b/mm/filemap.c index 2274e590bab4..d8b23e976a43 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3506,7 +3506,7 @@ static struct folio *next_uptodate_folio(struct xa_state *xas, static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct folio *folio, unsigned long start, unsigned long addr, unsigned int nr_pages, - unsigned int *mmap_miss) + int *rss, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = folio_page(folio, start); @@ -3541,7 +3541,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { type = set_pte_range(vmf, folio, page, count, addr); - add_mm_counter(vmf->vma->vm_mm, type, count); + rss[type] += count; folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3556,7 +3556,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, if (count) { type = set_pte_range(vmf, folio, page, count, addr); - add_mm_counter(vmf->vma->vm_mm, type, count); + rss[type] += count; folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3569,7 +3569,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, struct folio *folio, unsigned long addr, - unsigned int *mmap_miss) + int *rss, unsigned int *mmap_miss) { vm_fault_t ret = 0; struct page *page = &folio->page; @@ -3592,8 +3592,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, if (vmf->address == addr) ret = VM_FAULT_NOPAGE; - add_mm_counter(vmf->vma->vm_mm, - set_pte_range(vmf, folio, page, 1, addr), 1); + rss[set_pte_range(vmf, folio, page, 1, addr)]++; folio_ref_inc(folio); return ret; @@ -3610,6 +3609,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, XA_STATE(xas, &mapping->i_pages, start_pgoff); struct folio *folio; vm_fault_t ret = 0; + int rss[NR_MM_COUNTERS]; unsigned int nr_pages = 0, mmap_miss = 0, mmap_miss_saved; rcu_read_lock(); @@ -3629,6 +3629,8 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, folio_put(folio); goto out; } + + init_rss_vec(rss); do { unsigned long end; @@ -3640,15 +3642,16 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, if (!folio_test_large(folio)) ret |= filemap_map_order0_folio(vmf, - folio, addr, &mmap_miss); + folio, addr, rss, &mmap_miss); else ret |= filemap_map_folio_range(vmf, folio, xas.xa_index - folio->index, addr, - nr_pages, &mmap_miss); + nr_pages, rss, &mmap_miss); folio_unlock(folio); folio_put(folio); } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); + add_mm_rss_vec(vma->vm_mm, rss); pte_unmap_unlock(vmf->pte, vmf->ptl); out: rcu_read_unlock(); diff --git a/mm/memory.c b/mm/memory.c index 485ffec9d4c7..149208da1652 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -465,20 +465,6 @@ int __pte_alloc_kernel(pmd_t *pmd) return 0; } -static inline void init_rss_vec(int *rss) -{ - memset(rss, 0, sizeof(int) * NR_MM_COUNTERS); -} - -static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss) -{ - int i; - - for (i = 0; i < NR_MM_COUNTERS; i++) - if (rss[i]) - add_mm_counter(mm, i, rss[i]); -} - /* * This function is called to print an error when a bad pte * is found. For example, we might have a PFN-mapped pte in -- 2.41.0 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 0/2] mm: batch mm counter updating in filemap_map_pages() @ 2024-04-12 2:57 Kefeng Wang 2024-04-12 2:57 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang 0 siblings, 1 reply; 7+ messages in thread From: Kefeng Wang @ 2024-04-12 2:57 UTC (permalink / raw To: Andrew Morton Cc: Matthew Wilcox (Oracle), linux-mm, linux-fsdevel, Kefeng Wang Let's batch mm counter updating to accelerate filemap_map_pages(). v2: - estimate folio type from caller and no need to return from set_pte_range() - use unsigned long for rss Kefeng Wang (2): mm: move mm counter updating out of set_pte_range() mm: filemap: batch mm counter updating in filemap_map_pages() mm/filemap.c | 14 ++++++++++---- mm/memory.c | 8 +++++--- 2 files changed, 15 insertions(+), 7 deletions(-) -- 2.41.0 ^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/2] mm: move mm counter updating out of set_pte_range() 2024-04-12 2:57 [PATCH v2 0/2] mm: " Kefeng Wang @ 2024-04-12 2:57 ` Kefeng Wang 2024-04-12 2:33 ` Kefeng Wang 0 siblings, 1 reply; 7+ messages in thread From: Kefeng Wang @ 2024-04-12 2:57 UTC (permalink / raw To: Andrew Morton Cc: Matthew Wilcox (Oracle), linux-mm, linux-fsdevel, Kefeng Wang In order to support batch mm counter updating in filemap_map_pages(), move mm counter updating out of set_pte_range(), the folios are file from filemap, and distinguish folios type by vmf->flags and vma->vm_flags from another caller finish_fault(). Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- mm/filemap.c | 4 ++++ mm/memory.c | 8 +++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 92e2d43e4c9d..04b813f0146c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3540,6 +3540,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), + count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3554,6 +3556,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3590,6 +3593,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1); folio_ref_inc(folio); return ret; diff --git a/mm/memory.c b/mm/memory.c index 78422d1c7381..69bc63a5d6c8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4685,12 +4685,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); @@ -4727,9 +4725,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct page *page; vm_fault_t ret; + int is_cow = (vmf->flags & FAULT_FLAG_WRITE) && + !(vma->vm_flags & VM_SHARED); /* Did we COW the page? */ - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if (is_cow) page = vmf->cow_page; else page = vmf->page; @@ -4765,8 +4765,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { struct folio *folio = page_folio(page); + int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); set_pte_range(vmf, folio, page, 1, vmf->address); + add_mm_counter(vma->vm_mm, type, 1); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte); -- 2.41.0 ^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 1/2] mm: move mm counter updating out of set_pte_range() 2024-04-12 2:57 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang @ 2024-04-12 2:33 ` Kefeng Wang 0 siblings, 0 replies; 7+ messages in thread From: Kefeng Wang @ 2024-04-12 2:33 UTC (permalink / raw To: Andrew Morton; +Cc: Matthew Wilcox (Oracle), linux-mm, linux-fsdevel On 2024/4/12 10:57, Kefeng Wang wrote: > In order to support batch mm counter updating in filemap_map_pages(), > move mm counter updating out of set_pte_range(), the folios are file > from filemap, and distinguish folios type by vmf->flags and vma->vm_flags > from another caller finish_fault(). > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > mm/filemap.c | 4 ++++ > mm/memory.c | 8 +++++--- > 2 files changed, 9 insertions(+), 3 deletions(-) > > diff --git a/mm/filemap.c b/mm/filemap.c > index 92e2d43e4c9d..04b813f0146c 100644 > --- a/mm/filemap.c > +++ b/mm/filemap.c > @@ -3540,6 +3540,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, > skip: > if (count) { > set_pte_range(vmf, folio, page, count, addr); > + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), > + count); > folio_ref_add(folio, count); > if (in_range(vmf->address, addr, count * PAGE_SIZE)) > ret = VM_FAULT_NOPAGE; > @@ -3554,6 +3556,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, > > if (count) { > set_pte_range(vmf, folio, page, count, addr); > + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count); > folio_ref_add(folio, count); > if (in_range(vmf->address, addr, count * PAGE_SIZE)) > ret = VM_FAULT_NOPAGE; > @@ -3590,6 +3593,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, > ret = VM_FAULT_NOPAGE; > > set_pte_range(vmf, folio, page, 1, addr); > + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1); > folio_ref_inc(folio); > > return ret; > diff --git a/mm/memory.c b/mm/memory.c > index 78422d1c7381..69bc63a5d6c8 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4685,12 +4685,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, > entry = pte_mkuffd_wp(entry); > /* copy-on-write page */ > if (write && !(vma->vm_flags & VM_SHARED)) { > - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); > VM_BUG_ON_FOLIO(nr != 1, folio); > folio_add_new_anon_rmap(folio, vma, addr); > folio_add_lru_vma(folio, vma); > } else { > - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); > folio_add_file_rmap_ptes(folio, page, nr, vma); > } > set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); > @@ -4727,9 +4725,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) > struct vm_area_struct *vma = vmf->vma; > struct page *page; > vm_fault_t ret; > + int is_cow = (vmf->flags & FAULT_FLAG_WRITE) && > + !(vma->vm_flags & VM_SHARED); oops, bool is enough. > > /* Did we COW the page? */ > - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) > + if (is_cow) > page = vmf->cow_page; > else > page = vmf->page; > @@ -4765,8 +4765,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) > /* Re-check under ptl */ > if (likely(!vmf_pte_changed(vmf))) { > struct folio *folio = page_folio(page); > + int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); > > set_pte_range(vmf, folio, page, 1, vmf->address); > + add_mm_counter(vma->vm_mm, type, 1); > ret = 0; > } else { > update_mmu_tlb(vma, vmf->address, vmf->pte); ^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2024-04-12 2:33 UTC | newest] Thread overview: 7+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2024-04-11 13:09 [PATCH 0/2] mm: batch mm counter updating in filemap_map_pages() Kefeng Wang 2024-04-11 13:09 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang 2024-04-11 12:53 ` Matthew Wilcox 2024-04-11 14:08 ` Kefeng Wang 2024-04-11 13:09 ` [PATCH 2/2] mm: filemap: batch mm counter updating in filemap_map_pages() Kefeng Wang -- strict thread matches above, loose matches on Subject: below -- 2024-04-12 2:57 [PATCH v2 0/2] mm: " Kefeng Wang 2024-04-12 2:57 ` [PATCH 1/2] mm: move mm counter updating out of set_pte_range() Kefeng Wang 2024-04-12 2:33 ` Kefeng Wang
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).