From: Mike Kravetz <mike.kravetz@oracle.com>
To: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
akpm@linux-foundation.org, songmuchun@bytedance.com,
willy@infradead.org, david@redhat.com, nphamcs@gmail.com,
jthoughton@google.com
Subject: Re: [PATCH 1/2] mm/filemap: remove hugetlb special casing in filemap.c
Date: Thu, 15 Jun 2023 15:13:46 -0700 [thread overview]
Message-ID: <20230615221346.GA29046@monkey> (raw)
In-Reply-To: <20230609194947.37196-2-sidhartha.kumar@oracle.com>
On 06/09/23 12:49, Sidhartha Kumar wrote:
> This patch aims to remove special cased hugetlb handling code within the
> page cache by changing the granularity of each index to the base page size
> rather than the huge page size.
>
> Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
> ---
> include/linux/pagemap.h | 6 ------
> mm/filemap.c | 36 +++++++++++-------------------------
> 2 files changed, 11 insertions(+), 31 deletions(-)
I agree with Matthew that this patch can not be sent independently/prior
to the patch with hugetlb changes.
Code changes to remove hugetlb special casing below look fine.
Does not matter for your code changes, but I think some of the routines where
you are removing hugetlb checks can not be passed hugetlb folios/vmas today.
Specifically: folio_more_pages, filemap_get_folios_contig and
filemap_get_folios_tag.
--
Mike Kravetz
>
> diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
> index 716953ee1ebdb..17c414fc2136e 100644
> --- a/include/linux/pagemap.h
> +++ b/include/linux/pagemap.h
> @@ -723,9 +723,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
> */
> static inline bool folio_contains(struct folio *folio, pgoff_t index)
> {
> - /* HugeTLBfs indexes the page cache in units of hpage_size */
> - if (folio_test_hugetlb(folio))
> - return folio->index == index;
> return index - folio_index(folio) < folio_nr_pages(folio);
> }
>
> @@ -850,12 +847,9 @@ static inline loff_t folio_file_pos(struct folio *folio)
>
> /*
> * Get the offset in PAGE_SIZE (even for hugetlb folios).
> - * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
> */
> static inline pgoff_t folio_pgoff(struct folio *folio)
> {
> - if (unlikely(folio_test_hugetlb(folio)))
> - return hugetlb_basepage_index(&folio->page);
> return folio->index;
> }
>
> diff --git a/mm/filemap.c b/mm/filemap.c
> index 60f6f63cfacba..7462d33f70e2f 100644
> --- a/mm/filemap.c
> +++ b/mm/filemap.c
> @@ -134,11 +134,8 @@ static void page_cache_delete(struct address_space *mapping,
>
> mapping_set_update(&xas, mapping);
>
> - /* hugetlb pages are represented by a single entry in the xarray */
> - if (!folio_test_hugetlb(folio)) {
> - xas_set_order(&xas, folio->index, folio_order(folio));
> - nr = folio_nr_pages(folio);
> - }
> + xas_set_order(&xas, folio->index, folio_order(folio));
> + nr = folio_nr_pages(folio);
>
> VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
>
> @@ -237,7 +234,7 @@ void filemap_free_folio(struct address_space *mapping, struct folio *folio)
> if (free_folio)
> free_folio(folio);
>
> - if (folio_test_large(folio) && !folio_test_hugetlb(folio))
> + if (folio_test_large(folio))
> refs = folio_nr_pages(folio);
> folio_put_refs(folio, refs);
> }
> @@ -858,14 +855,15 @@ noinline int __filemap_add_folio(struct address_space *mapping,
>
> if (!huge) {
> int error = mem_cgroup_charge(folio, NULL, gfp);
> - VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
> if (error)
> return error;
> charged = true;
> - xas_set_order(&xas, index, folio_order(folio));
> - nr = folio_nr_pages(folio);
> }
>
> + VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);
> + xas_set_order(&xas, index, folio_order(folio));
> + nr = folio_nr_pages(folio);
> +
> gfp &= GFP_RECLAIM_MASK;
> folio_ref_add(folio, nr);
> folio->mapping = mapping;
> @@ -2048,7 +2046,7 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,
> int idx = folio_batch_count(fbatch) - 1;
>
> folio = fbatch->folios[idx];
> - if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
> + if (!xa_is_value(folio))
> nr = folio_nr_pages(folio);
> *start = indices[idx] + nr;
> }
> @@ -2112,7 +2110,7 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,
> int idx = folio_batch_count(fbatch) - 1;
>
> folio = fbatch->folios[idx];
> - if (!xa_is_value(folio) && !folio_test_hugetlb(folio))
> + if (!xa_is_value(folio))
> nr = folio_nr_pages(folio);
> *start = indices[idx] + nr;
> }
> @@ -2153,9 +2151,6 @@ unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,
> continue;
> if (!folio_batch_add(fbatch, folio)) {
> unsigned long nr = folio_nr_pages(folio);
> -
> - if (folio_test_hugetlb(folio))
> - nr = 1;
> *start = folio->index + nr;
> goto out;
> }
> @@ -2181,7 +2176,7 @@ EXPORT_SYMBOL(filemap_get_folios);
> static inline
> bool folio_more_pages(struct folio *folio, pgoff_t index, pgoff_t max)
> {
> - if (!folio_test_large(folio) || folio_test_hugetlb(folio))
> + if (!folio_test_large(folio))
> return false;
> if (index >= max)
> return false;
> @@ -2231,9 +2226,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
>
> if (!folio_batch_add(fbatch, folio)) {
> nr = folio_nr_pages(folio);
> -
> - if (folio_test_hugetlb(folio))
> - nr = 1;
> *start = folio->index + nr;
> goto out;
> }
> @@ -2250,10 +2242,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
>
> if (nr) {
> folio = fbatch->folios[nr - 1];
> - if (folio_test_hugetlb(folio))
> - *start = folio->index + 1;
> - else
> - *start = folio->index + folio_nr_pages(folio);
> + *start = folio->index + folio_nr_pages(folio);
> }
> out:
> rcu_read_unlock();
> @@ -2291,9 +2280,6 @@ unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
> continue;
> if (!folio_batch_add(fbatch, folio)) {
> unsigned long nr = folio_nr_pages(folio);
> -
> - if (folio_test_hugetlb(folio))
> - nr = 1;
> *start = folio->index + nr;
> goto out;
> }
> --
> 2.40.1
next prev parent reply other threads:[~2023-06-15 22:14 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-09 19:49 [PATCH 0/2] change ->index to PAGE_SIZE for hugetlb pages Sidhartha Kumar
2023-06-09 19:49 ` [PATCH 1/2] mm/filemap: remove hugetlb special casing in filemap.c Sidhartha Kumar
2023-06-09 20:05 ` Matthew Wilcox
2023-06-09 20:18 ` Sidhartha Kumar
2023-06-15 22:13 ` Mike Kravetz [this message]
2023-06-09 19:49 ` [PATCH 2/2] mm/hugetlb: add wrapper functions for interactions with page cache Sidhartha Kumar
2023-06-09 19:52 ` Sidhartha Kumar
2023-06-15 23:36 ` Mike Kravetz
2023-06-16 18:52 ` Sidhartha Kumar
2023-06-16 23:41 ` Mike Kravetz
2023-06-09 20:10 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230615221346.GA29046@monkey \
--to=mike.kravetz@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=jthoughton@google.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=nphamcs@gmail.com \
--cc=sidhartha.kumar@oracle.com \
--cc=songmuchun@bytedance.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).