From: John Groves <John@Groves.net>
To: John Groves <John@Groves.net>, Jonathan Corbet <corbet@lwn.net>,
Jonathan Cameron <Jonathan.Cameron@huawei.com>,
Dan Williams <dan.j.williams@intel.com>,
Vishal Verma <vishal.l.verma@intel.com>,
Dave Jiang <dave.jiang@intel.com>,
Alexander Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>, Jan Kara <jack@suse.cz>,
Matthew Wilcox <willy@infradead.org>,
linux-cxl@vger.kernel.org, linux-fsdevel@vger.kernel.org,
nvdimm@lists.linux.dev
Cc: John Groves <jgroves@micron.com>,
john@jagalactic.com, Dave Chinner <david@fromorbit.com>,
Christoph Hellwig <hch@infradead.org>,
dave.hansen@linux.intel.com, gregory.price@memverge.com,
Randy Dunlap <rdunlap@infradead.org>,
Jerome Glisse <jglisse@google.com>,
Aravind Ramesh <arramesh@micron.com>,
Ajay Joshi <ajayjoshi@micron.com>,
Eishan Mirakhur <emirakhur@micron.com>,
Ravi Shankar <venkataravis@micron.com>,
Srinivasulu Thanneeru <sthanneeru@micron.com>,
Luis Chamberlain <mcgrof@kernel.org>,
Amir Goldstein <amir73il@gmail.com>,
Chandan Babu R <chandanbabu@kernel.org>,
Bagas Sanjaya <bagasdotme@gmail.com>,
"Darrick J . Wong" <djwong@kernel.org>,
Kent Overstreet <kent.overstreet@linux.dev>,
Steve French <stfrench@microsoft.com>,
Nathan Lynch <nathanl@linux.ibm.com>,
Michael Ellerman <mpe@ellerman.id.au>,
Thomas Zimmermann <tzimmermann@suse.de>,
Julien Panis <jpanis@baylibre.com>,
Stanislav Fomichev <sdf@google.com>,
Dongsheng Yang <dongsheng.yang@easystack.cn>,
John Groves <john@groves.net>
Subject: [RFC PATCH v2 05/12] dev_dax_iomap: Add dax_operations for use by fs-dax on devdax
Date: Mon, 29 Apr 2024 12:04:21 -0500 [thread overview]
Message-ID: <2a8b926ce25a9ef242c933fa451b29401e62bb37.1714409084.git.john@groves.net> (raw)
In-Reply-To: <cover.1714409084.git.john@groves.net>
Notes about this commit:
* These methods are based on pmem_dax_ops from drivers/nvdimm/pmem.c
* dev_dax_direct_access() is returns the hpa, pfn and kva. The kva was
newly stored as dev_dax->virt_addr by dev_dax_probe().
* The hpa/pfn are used for mmap (dax_iomap_fault()), and the kva is used
for read/write (dax_iomap_rw())
* dev_dax_recovery_write() and dev_dax_zero_page_range() have not been
tested yet. I'm looking for suggestions as to how to test those.
Signed-off-by: John Groves <john@groves.net>
---
drivers/dax/bus.c | 120 ++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 115 insertions(+), 5 deletions(-)
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index f894272beab8..9c57d4139b74 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -7,6 +7,10 @@
#include <linux/slab.h>
#include <linux/dax.h>
#include <linux/io.h>
+#include <linux/backing-dev.h>
+#include <linux/pfn_t.h>
+#include <linux/range.h>
+#include <linux/uio.h>
#include "dax-private.h"
#include "bus.h"
@@ -1471,6 +1475,105 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
}
EXPORT_SYMBOL_GPL(dax_pgoff_to_phys);
+#if IS_ENABLED(CONFIG_DEV_DAX_IOMAP)
+
+static void write_dax(void *pmem_addr, struct page *page,
+ unsigned int off, unsigned int len)
+{
+ unsigned int chunk;
+ void *mem;
+
+ while (len) {
+ mem = kmap_local_page(page);
+ chunk = min_t(unsigned int, len, PAGE_SIZE - off);
+ memcpy_flushcache(pmem_addr, mem + off, chunk);
+ kunmap_local(mem);
+ len -= chunk;
+ off = 0;
+ page++;
+ pmem_addr += chunk;
+ }
+}
+
+static long __dev_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
+ long nr_pages, enum dax_access_mode mode, void **kaddr,
+ pfn_t *pfn)
+{
+ struct dev_dax *dev_dax = dax_get_private(dax_dev);
+ size_t size = nr_pages << PAGE_SHIFT;
+ size_t offset = pgoff << PAGE_SHIFT;
+ void *virt_addr = dev_dax->virt_addr + offset;
+ u64 flags = PFN_DEV|PFN_MAP;
+ phys_addr_t phys;
+ pfn_t local_pfn;
+ size_t dax_size;
+
+ WARN_ON(!dev_dax->virt_addr);
+
+ if (down_read_interruptible(&dax_dev_rwsem))
+ return 0; /* no valid data since we were killed */
+ dax_size = dev_dax_size(dev_dax);
+ up_read(&dax_dev_rwsem);
+
+ phys = dax_pgoff_to_phys(dev_dax, pgoff, nr_pages << PAGE_SHIFT);
+
+ if (kaddr)
+ *kaddr = virt_addr;
+
+ local_pfn = phys_to_pfn_t(phys, flags); /* are flags correct? */
+ if (pfn)
+ *pfn = local_pfn;
+
+ /* This the valid size at the specified address */
+ return PHYS_PFN(min_t(size_t, size, dax_size - offset));
+}
+
+static int dev_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ long resid = nr_pages << PAGE_SHIFT;
+ long offset = pgoff << PAGE_SHIFT;
+
+ /* Break into one write per dax region */
+ while (resid > 0) {
+ void *kaddr;
+ pgoff_t poff = offset >> PAGE_SHIFT;
+ long len = __dev_dax_direct_access(dax_dev, poff,
+ nr_pages, DAX_ACCESS, &kaddr, NULL);
+ len = min_t(long, len, PAGE_SIZE);
+ write_dax(kaddr, ZERO_PAGE(0), offset, len);
+
+ offset += len;
+ resid -= len;
+ }
+ return 0;
+}
+
+static long dev_dax_direct_access(struct dax_device *dax_dev,
+ pgoff_t pgoff, long nr_pages, enum dax_access_mode mode,
+ void **kaddr, pfn_t *pfn)
+{
+ return __dev_dax_direct_access(dax_dev, pgoff, nr_pages, mode, kaddr, pfn);
+}
+
+static size_t dev_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i)
+{
+ size_t off;
+
+ off = offset_in_page(addr);
+
+ return _copy_from_iter_flushcache(addr, bytes, i);
+}
+
+static const struct dax_operations dev_dax_ops = {
+ .direct_access = dev_dax_direct_access,
+ .zero_page_range = dev_dax_zero_page_range,
+ .recovery_write = dev_dax_recovery_write,
+};
+
+#endif /* IS_ENABLED(CONFIG_DEV_DAX_IOMAP) */
+
static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data)
{
struct dax_region *dax_region = data->dax_region;
@@ -1526,11 +1629,18 @@ static struct dev_dax *__devm_create_dev_dax(struct dev_dax_data *data)
}
}
- /*
- * No dax_operations since there is no access to this device outside of
- * mmap of the resulting character device.
- */
- dax_dev = alloc_dax(dev_dax, NULL);
+ if (IS_ENABLED(CONFIG_DEV_DAX_IOMAP))
+ /* holder_ops currently populated separately in a slightly
+ * hacky way
+ */
+ dax_dev = alloc_dax(dev_dax, &dev_dax_ops);
+ else
+ /*
+ * No dax_operations since there is no access to this device
+ * outside of mmap of the resulting character device.
+ */
+ dax_dev = alloc_dax(dev_dax, NULL);
+
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto err_alloc_dax;
--
2.43.0
next prev parent reply other threads:[~2024-04-29 17:05 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-29 17:04 [RFC PATCH v2 00/12] Introduce the famfs shared-memory file system John Groves
2024-04-29 17:04 ` [RFC PATCH v2 01/12] famfs: Introduce famfs documentation John Groves
2024-04-30 6:46 ` Bagas Sanjaya
2024-04-29 17:04 ` [RFC PATCH v2 02/12] dev_dax_iomap: Move dax_pgoff_to_phys() from device.c to bus.c John Groves
2024-04-29 17:04 ` [RFC PATCH v2 03/12] dev_dax_iomap: Add fs_dax_get() func to prepare dax for fs-dax usage John Groves
2024-04-29 17:04 ` [RFC PATCH v2 04/12] dev_dax_iomap: Save the kva from memremap John Groves
2024-04-29 17:04 ` John Groves [this message]
2024-04-29 17:04 ` [RFC PATCH v2 06/12] dev_dax_iomap: export dax_dev_get() John Groves
2024-04-29 17:04 ` [RFC PATCH v2 07/12] famfs prep: Add fs/super.c:kill_char_super() John Groves
2024-05-02 18:17 ` Al Viro
2024-05-02 22:25 ` John Groves
2024-05-03 9:04 ` Christian Brauner
2024-05-03 15:38 ` John Groves
2024-04-29 17:04 ` [RFC PATCH v2 08/12] famfs: module operations & fs_context John Groves
2024-04-30 11:01 ` Christian Brauner
2024-05-02 15:51 ` John Groves
2024-05-03 14:15 ` John Groves
2024-05-02 18:23 ` Al Viro
2024-05-02 21:50 ` John Groves
2024-04-29 17:04 ` [RFC PATCH v2 09/12] famfs: Introduce inode_operations and super_operations John Groves
2024-04-29 17:04 ` [RFC PATCH v2 10/12] famfs: Introduce file_operations read/write John Groves
2024-05-02 18:29 ` Al Viro
2024-05-02 21:51 ` John Groves
2024-04-29 17:04 ` [RFC PATCH v2 11/12] famfs: Introduce mmap and VM fault handling John Groves
2024-04-29 17:04 ` [RFC PATCH v2 12/12] famfs: famfs_ioctl and core file-to-memory mapping logic & iomap_ops John Groves
2024-04-29 18:32 ` [RFC PATCH v2 00/12] Introduce the famfs shared-memory file system Matthew Wilcox
2024-04-29 23:08 ` Kent Overstreet
2024-04-30 2:24 ` John Groves
2024-04-30 3:11 ` Kent Overstreet
2024-05-01 2:09 ` John Groves
2024-04-30 2:11 ` John Groves
2024-04-30 21:01 ` Matthew Wilcox
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=2a8b926ce25a9ef242c933fa451b29401e62bb37.1714409084.git.john@groves.net \
--to=john@groves.net \
--cc=Jonathan.Cameron@huawei.com \
--cc=ajayjoshi@micron.com \
--cc=amir73il@gmail.com \
--cc=arramesh@micron.com \
--cc=bagasdotme@gmail.com \
--cc=brauner@kernel.org \
--cc=chandanbabu@kernel.org \
--cc=corbet@lwn.net \
--cc=dan.j.williams@intel.com \
--cc=dave.hansen@linux.intel.com \
--cc=dave.jiang@intel.com \
--cc=david@fromorbit.com \
--cc=djwong@kernel.org \
--cc=dongsheng.yang@easystack.cn \
--cc=emirakhur@micron.com \
--cc=gregory.price@memverge.com \
--cc=hch@infradead.org \
--cc=jack@suse.cz \
--cc=jglisse@google.com \
--cc=jgroves@micron.com \
--cc=john@jagalactic.com \
--cc=jpanis@baylibre.com \
--cc=kent.overstreet@linux.dev \
--cc=linux-cxl@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
--cc=mcgrof@kernel.org \
--cc=mpe@ellerman.id.au \
--cc=nathanl@linux.ibm.com \
--cc=nvdimm@lists.linux.dev \
--cc=rdunlap@infradead.org \
--cc=sdf@google.com \
--cc=stfrench@microsoft.com \
--cc=sthanneeru@micron.com \
--cc=tzimmermann@suse.de \
--cc=venkataravis@micron.com \
--cc=viro@zeniv.linux.org.uk \
--cc=vishal.l.verma@intel.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).