CEPH-Devel archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: Xiubo Li <xiubli@redhat.com>, Ilya Dryomov <idryomov@gmail.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Jeff Layton <jlayton@kernel.org>,
	ceph-devel@vger.kernel.org, David Howells <dhowells@redhat.com>,
	linux-fsdevel@vger.kernel.org
Subject: [PATCH 07/15] ceph: Convert writepage_nounlock() to take a folio
Date: Fri, 25 Aug 2023 21:12:17 +0100	[thread overview]
Message-ID: <20230825201225.348148-8-willy@infradead.org> (raw)
In-Reply-To: <20230825201225.348148-1-willy@infradead.org>

Remove the use of a lot of old APIs and use folio->index to
identify folios in debug output.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 fs/ceph/addr.c | 66 +++++++++++++++++++++++++-------------------------
 1 file changed, 33 insertions(+), 33 deletions(-)

diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 0027906a9257..02caf10d43ed 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -648,52 +648,52 @@ static u64 get_writepages_data_length(struct inode *inode,
 }
 
 /*
- * Write a single page, but leave the page locked.
+ * Write a single folio, but leave the folio locked.
  *
  * If we get a write error, mark the mapping for error, but still adjust the
- * dirty page accounting (i.e., page is no longer dirty).
+ * dirty page accounting (i.e., folio is no longer dirty).
  */
-static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+static int writepage_nounlock(struct folio *folio, struct writeback_control *wbc)
 {
-	struct folio *folio = page_folio(page);
-	struct inode *inode = page->mapping->host;
+	struct inode *inode = folio->mapping->host;
 	struct ceph_inode_info *ci = ceph_inode(inode);
 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
 	struct ceph_snap_context *snapc, *oldest;
-	loff_t page_off = page_offset(page);
+	loff_t page_off = folio_pos(folio);
 	int err;
-	loff_t len = thp_size(page);
+	loff_t len = folio_size(folio);
 	loff_t wlen;
 	struct ceph_writeback_ctl ceph_wbc;
 	struct ceph_osd_client *osdc = &fsc->client->osdc;
 	struct ceph_osd_request *req;
 	bool caching = ceph_is_cache_enabled(inode);
+	struct page *page = &folio->page;
 	struct page *bounce_page = NULL;
 
-	dout("writepage %p idx %lu\n", page, page->index);
+	dout("writepage %lu\n", folio->index);
 
 	if (ceph_inode_is_shutdown(inode))
 		return -EIO;
 
 	/* verify this is a writeable snap context */
-	snapc = page_snap_context(page);
+	snapc = folio->private;
 	if (!snapc) {
-		dout("writepage %p page %p not dirty?\n", inode, page);
+		dout("writepage %p folio %lu not dirty?\n", inode, folio->index);
 		return 0;
 	}
 	oldest = get_oldest_context(inode, &ceph_wbc, snapc);
 	if (snapc->seq > oldest->seq) {
-		dout("writepage %p page %p snapc %p not writeable - noop\n",
-		     inode, page, snapc);
+		dout("writepage %p folio %lu snapc %p not writeable - noop\n",
+		     inode, folio->index, snapc);
 		/* we should only noop if called by kswapd */
 		WARN_ON(!(current->flags & PF_MEMALLOC));
 		ceph_put_snap_context(oldest);
-		redirty_page_for_writepage(wbc, page);
+		folio_redirty_for_writepage(wbc, folio);
 		return 0;
 	}
 	ceph_put_snap_context(oldest);
 
-	/* is this a partial page at end of file? */
+	/* is this a partial folio at end of file? */
 	if (page_off >= ceph_wbc.i_size) {
 		dout("folio at %lu beyond eof %llu\n", folio->index,
 				ceph_wbc.i_size);
@@ -705,8 +705,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 		len = ceph_wbc.i_size - page_off;
 
 	wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
-	dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
-	     inode, page, page->index, page_off, wlen, snapc, snapc->seq);
+	dout("writepage %p folio %lu on %llu~%llu snapc %p seq %lld\n",
+	     inode, folio->index, page_off, wlen, snapc, snapc->seq);
 
 	if (atomic_long_inc_return(&fsc->writeback_count) >
 	    CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
@@ -718,32 +718,32 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 				    ceph_wbc.truncate_seq,
 				    ceph_wbc.truncate_size, true);
 	if (IS_ERR(req)) {
-		redirty_page_for_writepage(wbc, page);
+		folio_redirty_for_writepage(wbc, folio);
 		return PTR_ERR(req);
 	}
 
 	if (wlen < len)
 		len = wlen;
 
-	set_page_writeback(page);
+	folio_start_writeback(folio);
 	if (caching)
-		ceph_set_page_fscache(page);
+		ceph_set_page_fscache(&folio->page);
 	ceph_fscache_write_to_cache(inode, page_off, len, caching);
 
 	if (IS_ENCRYPTED(inode)) {
-		bounce_page = fscrypt_encrypt_pagecache_blocks(page,
+		bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
 						    CEPH_FSCRYPT_BLOCK_SIZE, 0,
 						    GFP_NOFS);
 		if (IS_ERR(bounce_page)) {
-			redirty_page_for_writepage(wbc, page);
-			end_page_writeback(page);
+			folio_redirty_for_writepage(wbc, folio);
+			folio_end_writeback(folio);
 			ceph_osdc_put_request(req);
 			return PTR_ERR(bounce_page);
 		}
 	}
 
 	/* it may be a short write due to an object boundary */
-	WARN_ON_ONCE(len > thp_size(page));
+	WARN_ON_ONCE(len > folio_size(folio));
 	osd_req_op_extent_osd_data_pages(req, 0,
 			bounce_page ? &bounce_page : &page, wlen, 0,
 			false, false);
@@ -767,26 +767,26 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 			wbc = &tmp_wbc;
 		if (err == -ERESTARTSYS) {
 			/* killed by SIGKILL */
-			dout("writepage interrupted page %p\n", page);
-			redirty_page_for_writepage(wbc, page);
-			end_page_writeback(page);
+			dout("writepage interrupted folio %lu\n", folio->index);
+			folio_redirty_for_writepage(wbc, folio);
+			folio_end_writeback(folio);
 			return err;
 		}
 		if (err == -EBLOCKLISTED)
 			fsc->blocklisted = true;
-		dout("writepage setting page/mapping error %d %p\n",
-		     err, page);
+		dout("writepage setting folio/mapping error %d %lu\n",
+		     err, folio->index);
 		mapping_set_error(&inode->i_data, err);
 		wbc->pages_skipped++;
 	} else {
-		dout("writepage cleaned page %p\n", page);
+		dout("writepage cleaned folio %lu\n", folio->index);
 		err = 0;  /* vfs expects us to return 0 */
 	}
-	oldest = detach_page_private(page);
+	oldest = folio_detach_private(folio);
 	WARN_ON_ONCE(oldest != snapc);
-	end_page_writeback(page);
+	folio_end_writeback(folio);
 	ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
-	ceph_put_snap_context(snapc);  /* page's reference */
+	ceph_put_snap_context(snapc);  /* folio's reference */
 
 	if (atomic_long_dec_return(&fsc->writeback_count) <
 	    CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
@@ -1432,7 +1432,7 @@ static struct ceph_snap_context *ceph_find_incompatible(struct folio *folio)
 		dout(" folio %ld snapc %p not current, but oldest\n",
 				folio->index, snapc);
 		if (folio_clear_dirty_for_io(folio)) {
-			int r = writepage_nounlock(&folio->page, NULL);
+			int r = writepage_nounlock(folio, NULL);
 			if (r < 0)
 				return ERR_PTR(r);
 		}
-- 
2.40.1


  parent reply	other threads:[~2023-08-25 20:13 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-25 20:12 [PATCH 00/15] Many folio conversions for ceph Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 01/15] ceph: Convert ceph_writepages_start() to use folios a little more Matthew Wilcox (Oracle)
2023-08-28  1:18   ` Xiubo Li
2023-11-20  0:30     ` Xiubo Li
2023-08-25 20:12 ` [PATCH 02/15] ceph: Convert ceph_page_mkwrite() to use a folio Matthew Wilcox (Oracle)
2023-08-28  1:21   ` Xiubo Li
2023-08-25 20:12 ` [PATCH 03/15] mm: Delete page_mkwrite_check_truncate() Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 04/15] ceph: Add a migrate_folio method Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 05/15] ceph: Remove ceph_writepage() Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 06/15] ceph: Convert ceph_find_incompatible() to take a folio Matthew Wilcox (Oracle)
2023-08-25 20:12 ` Matthew Wilcox (Oracle) [this message]
2023-08-25 20:12 ` [PATCH 08/15] ceph: Convert writepages_finish() to use " Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 09/15] ceph: Use a folio in ceph_filemap_fault() Matthew Wilcox (Oracle)
2023-08-26  3:00   ` Matthew Wilcox
2023-08-28  1:19     ` Xiubo Li
2023-08-29 11:55       ` Jeff Layton
2023-08-29 13:30         ` Matthew Wilcox
2023-08-30 10:44           ` Ilya Dryomov
2023-08-31  3:52             ` Xiubo Li
2023-08-25 20:12 ` [PATCH 10/15] ceph: Convert ceph_read_iter() to use a folio to read inline data Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 11/15] ceph: Convert __ceph_do_getattr() to take a folio Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 12/15] ceph: Convert ceph_fill_inode() " Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 13/15] ceph: Convert ceph_fill_inline_data() " Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 14/15] ceph: Convert ceph_set_page_fscache() to ceph_folio_start_fscache() Matthew Wilcox (Oracle)
2023-08-25 20:12 ` [PATCH 15/15] netfs: Remove unused functions Matthew Wilcox (Oracle)
2023-11-17 15:37 ` [PATCH 00/15] Many folio conversions for ceph Matthew Wilcox
2023-11-20  0:32   ` Xiubo Li

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230825201225.348148-8-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=ceph-devel@vger.kernel.org \
    --cc=dhowells@redhat.com \
    --cc=idryomov@gmail.com \
    --cc=jlayton@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=xiubli@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).