From: Mikulas Patocka <mpatocka@redhat.com> To: "Javier González" <javier@javigon.com> Cc: Chaitanya Kulkarni <chaitanyak@nvidia.com>, "linux-block@vger.kernel.org" <linux-block@vger.kernel.org>, "linux-scsi@vger.kernel.org" <linux-scsi@vger.kernel.org>, "dm-devel@redhat.com" <dm-devel@redhat.com>, "linux-nvme@lists.infradead.org" <linux-nvme@lists.infradead.org>, linux-fsdevel <linux-fsdevel@vger.kernel.org>, Jens Axboe <axboe@kernel.dk>, "msnitzer@redhat.com >> msnitzer@redhat.com" <msnitzer@redhat.com>, Bart Van Assche <bvanassche@acm.org>, "martin.petersen@oracle.com >> Martin K. Petersen" <martin.petersen@oracle.com>, "roland@purestorage.com" <roland@purestorage.com>, Hannes Reinecke <hare@suse.de>, "kbus @imap.gmail.com>> Keith Busch" <kbusch@kernel.org>, Christoph Hellwig <hch@lst.de>, "Frederick.Knight@netapp.com" <Frederick.Knight@netapp.com>, "zach.brown@ni.com" <zach.brown@ni.com>, "osandov@fb.com" <osandov@fb.com>, "lsf-pc@lists.linux-foundation.org" <lsf-pc@lists.linux-foundation.org>, "djwong@kernel.org" <djwong@kernel.org>, "josef@toxicpanda.com" <josef@toxicpanda.com>, "clm@fb.com" <clm@fb.com>, "dsterba@suse.com" <dsterba@suse.com>, "tytso@mit.edu" <tytso@mit.edu>, "jack@suse.com" <jack@suse.com>, Kanchan Joshi <joshi.k@samsung.com> Subject: [RFC PATCH 2/3] nvme: add copy offload support Date: Tue, 1 Feb 2022 13:33:12 -0500 (EST) [thread overview] Message-ID: <alpine.LRH.2.02.2202011332330.22481@file01.intranet.prod.int.rdu2.redhat.com> (raw) In-Reply-To: <alpine.LRH.2.02.2202011327350.22481@file01.intranet.prod.int.rdu2.redhat.com> This patch adds copy offload support to the nvme host driver. The function nvme_setup_read_token stores namespace and location in the token and the function nvme_setup_write_token retrieves information from the token and submits the copy command to the device. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> --- drivers/nvme/host/core.c | 94 +++++++++++++++++++++++++++++++++++++++++++++ drivers/nvme/host/fc.c | 5 ++ drivers/nvme/host/nvme.h | 1 drivers/nvme/host/pci.c | 5 ++ drivers/nvme/host/rdma.c | 5 ++ drivers/nvme/host/tcp.c | 5 ++ drivers/nvme/target/loop.c | 5 ++ include/linux/nvme.h | 33 +++++++++++++++ 8 files changed, 153 insertions(+) Index: linux-2.6/drivers/nvme/host/core.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/core.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/core.c 2022-02-01 18:34:19.000000000 +0100 @@ -975,6 +975,85 @@ static inline blk_status_t nvme_setup_rw return 0; } +struct nvme_copy_token { + char subsys[4]; + struct nvme_ns *ns; + u64 src_sector; + u64 sectors; +}; + +static inline blk_status_t nvme_setup_read_token(struct nvme_ns *ns, struct request *req) +{ + struct bio *bio = req->bio; + struct nvme_copy_token *token = page_to_virt(bio->bi_io_vec[0].bv_page) + bio->bi_io_vec[0].bv_offset; + memcpy(token->subsys, "nvme", 4); + token->ns = ns; + token->src_sector = bio->bi_iter.bi_sector; + token->sectors = bio->bi_iter.bi_size >> 9; + return 0; +} + +static inline blk_status_t nvme_setup_write_token(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd) +{ + sector_t src_sector, dst_sector, n_sectors; + u64 src_lba, dst_lba, n_lba; + + unsigned n_descriptors, i; + struct nvme_copy_desc *descriptors; + + struct bio *bio = req->bio; + struct nvme_copy_token *token = page_to_virt(bio->bi_io_vec[0].bv_page) + bio->bi_io_vec[0].bv_offset; + if (unlikely(memcmp(token->subsys, "nvme", 4))) + return BLK_STS_NOTSUPP; + if (unlikely(token->ns != ns)) + return BLK_STS_NOTSUPP; + + src_sector = token->src_sector; + dst_sector = bio->bi_iter.bi_sector; + n_sectors = token->sectors; + if (WARN_ON(n_sectors != bio->bi_iter.bi_size >> 9)) + return BLK_STS_NOTSUPP; + + src_lba = nvme_sect_to_lba(ns, src_sector); + dst_lba = nvme_sect_to_lba(ns, dst_sector); + n_lba = nvme_sect_to_lba(ns, n_sectors); + + if (unlikely(nvme_lba_to_sect(ns, src_lba) != src_sector) || + unlikely(nvme_lba_to_sect(ns, dst_lba) != dst_sector) || + unlikely(nvme_lba_to_sect(ns, n_lba) != n_sectors)) + return BLK_STS_NOTSUPP; + + if (WARN_ON(!n_lba)) + return BLK_STS_NOTSUPP; + + n_descriptors = (n_lba + 0xffff) / 0x10000; + descriptors = kzalloc(n_descriptors * sizeof(struct nvme_copy_desc), GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!descriptors)) + return BLK_STS_RESOURCE; + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->copy.opcode = nvme_cmd_copy; + cmnd->copy.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->copy.sdlba = cpu_to_le64(dst_lba); + cmnd->copy.length = n_descriptors - 1; + + for (i = 0; i < n_descriptors; i++) { + u64 this_step = min(n_lba, (u64)0x10000); + descriptors[i].slba = cpu_to_le64(src_lba); + descriptors[i].length = cpu_to_le16(this_step - 1); + src_lba += this_step; + n_lba -= this_step; + } + + req->special_vec.bv_page = virt_to_page(descriptors); + req->special_vec.bv_offset = offset_in_page(descriptors); + req->special_vec.bv_len = n_descriptors * sizeof(struct nvme_copy_desc); + req->rq_flags |= RQF_SPECIAL_PAYLOAD; + + return 0; +} + void nvme_cleanup_cmd(struct request *req) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { @@ -1032,6 +1111,12 @@ blk_status_t nvme_setup_cmd(struct nvme_ case REQ_OP_ZONE_APPEND: ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); break; + case REQ_OP_COPY_READ_TOKEN: + ret = nvme_setup_read_token(ns, req); + break; + case REQ_OP_COPY_WRITE_TOKEN: + ret = nvme_setup_write_token(ns, req, cmd); + break; default: WARN_ON_ONCE(1); return BLK_STS_IOERR; @@ -1865,6 +1950,8 @@ static void nvme_update_disk_info(struct blk_queue_max_write_zeroes_sectors(disk->queue, ns->ctrl->max_zeroes_sectors); + blk_queue_max_copy_sectors(disk->queue, ns->ctrl->max_copy_sectors); + set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || test_bit(NVME_NS_FORCE_RO, &ns->flags)); } @@ -2891,6 +2978,12 @@ static int nvme_init_non_mdts_limits(str else ctrl->max_zeroes_sectors = 0; + if (ctrl->oncs & NVME_CTRL_ONCS_COPY) { + ctrl->max_copy_sectors = 1U << 24; + } else { + ctrl->max_copy_sectors = 0; + } + if (nvme_ctrl_limited_cns(ctrl)) return 0; @@ -4716,6 +4809,7 @@ static inline void _nvme_check_size(void { BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_copy_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); BUILD_BUG_ON(sizeof(struct nvme_features) != 64); BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); Index: linux-2.6/drivers/nvme/host/nvme.h =================================================================== --- linux-2.6.orig/drivers/nvme/host/nvme.h 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/nvme.h 2022-02-01 18:34:19.000000000 +0100 @@ -277,6 +277,7 @@ struct nvme_ctrl { #ifdef CONFIG_BLK_DEV_ZONED u32 max_zone_append; #endif + u32 max_copy_sectors; u16 crdt[3]; u16 oncs; u16 oacs; Index: linux-2.6/include/linux/nvme.h =================================================================== --- linux-2.6.orig/include/linux/nvme.h 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/include/linux/nvme.h 2022-02-01 18:34:19.000000000 +0100 @@ -335,6 +335,8 @@ enum { NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, + NVME_CTRL_ONCS_VERIFY = 1 << 7, + NVME_CTRL_ONCS_COPY = 1 << 8, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, @@ -704,6 +706,7 @@ enum nvme_opcode { nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, nvme_cmd_resv_release = 0x15, + nvme_cmd_copy = 0x19, nvme_cmd_zone_mgmt_send = 0x79, nvme_cmd_zone_mgmt_recv = 0x7a, nvme_cmd_zone_append = 0x7d, @@ -872,6 +875,35 @@ enum { NVME_RW_DTYPE_STREAMS = 1 << 4, }; +struct nvme_copy_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 sdlba; + __u8 length; + __u8 control2; + __le16 control; + __le32 dspec; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +struct nvme_copy_desc { + __u64 rsvd; + __le64 slba; + __le16 length; + __u16 rsvd2; + __u32 rsvd3; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + struct nvme_dsm_cmd { __u8 opcode; __u8 flags; @@ -1441,6 +1473,7 @@ struct nvme_command { union { struct nvme_common_command common; struct nvme_rw_command rw; + struct nvme_copy_command copy; struct nvme_identify identify; struct nvme_features features; struct nvme_create_cq create_cq; Index: linux-2.6/drivers/nvme/host/pci.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/pci.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/pci.c 2022-02-01 18:34:19.000000000 +0100 @@ -949,6 +949,11 @@ static blk_status_t nvme_queue_rq(struct struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; + if (unlikely((req->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(req, BLK_STS_OK); + return BLK_STS_OK; + } + /* * We should not need to do this, but we're still using this to * ensure we can drain requests on a dying queue. Index: linux-2.6/drivers/nvme/host/fc.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/fc.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/fc.c 2022-02-01 18:34:19.000000000 +0100 @@ -2780,6 +2780,11 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *h u32 data_len; blk_status_t ret; + if (unlikely((rq->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(rq, BLK_STS_OK); + return BLK_STS_OK; + } + if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); Index: linux-2.6/drivers/nvme/host/rdma.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/rdma.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/rdma.c 2022-02-01 18:34:19.000000000 +0100 @@ -2048,6 +2048,11 @@ static blk_status_t nvme_rdma_queue_rq(s blk_status_t ret; int err; + if (unlikely((rq->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(rq, BLK_STS_OK); + return BLK_STS_OK; + } + WARN_ON_ONCE(rq->tag < 0); if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) Index: linux-2.6/drivers/nvme/host/tcp.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/tcp.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/tcp.c 2022-02-01 18:34:19.000000000 +0100 @@ -2372,6 +2372,11 @@ static blk_status_t nvme_tcp_queue_rq(st bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); blk_status_t ret; + if (unlikely((rq->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(rq, BLK_STS_OK); + return BLK_STS_OK; + } + if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); Index: linux-2.6/drivers/nvme/target/loop.c =================================================================== --- linux-2.6.orig/drivers/nvme/target/loop.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/target/loop.c 2022-02-01 18:34:19.000000000 +0100 @@ -138,6 +138,11 @@ static blk_status_t nvme_loop_queue_rq(s bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags); blk_status_t ret; + if (unlikely((req->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(req, BLK_STS_OK); + return BLK_STS_OK; + } + if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
WARNING: multiple messages have this Message-ID (diff)
From: Mikulas Patocka <mpatocka@redhat.com> To: "Javier González" <javier@javigon.com> Cc: "djwong@kernel.org" <djwong@kernel.org>, "linux-nvme@lists.infradead.org" <linux-nvme@lists.infradead.org>, "clm@fb.com" <clm@fb.com>, "dm-devel@redhat.com" <dm-devel@redhat.com>, "osandov@fb.com" <osandov@fb.com>, "msnitzer@redhat.com >> msnitzer@redhat.com" <msnitzer@redhat.com>, Bart Van Assche <bvanassche@acm.org>, "linux-scsi@vger.kernel.org" <linux-scsi@vger.kernel.org>, Christoph Hellwig <hch@lst.de>, "roland@purestorage.com" <roland@purestorage.com>, "zach.brown@ni.com" <zach.brown@ni.com>, Chaitanya Kulkarni <chaitanyak@nvidia.com>, "josef@toxicpanda.com" <josef@toxicpanda.com>, "linux-block@vger.kernel.org" <linux-block@vger.kernel.org>, "dsterba@suse.com" <dsterba@suse.com>, "kbus @imap.gmail.com>> Keith Busch" <kbusch@kernel.org>, "Frederick.Knight@netapp.com" <Frederick.Knight@netapp.com>, Jens Axboe <axboe@kernel.dk>, "tytso@mit.edu" <tytso@mit.edu>, Kanchan Joshi <joshi.k@samsung.com>, "martin.petersen@oracle.com >> Martin K. Petersen" <martin.petersen@oracle.com>, "jack@suse.com" <jack@suse.com>, linux-fsdevel <linux-fsdevel@vger.kernel.org>, "lsf-pc@lists.linux-foundation.org" <lsf-pc@lists.linux-foundation.org> Subject: [dm-devel] [RFC PATCH 2/3] nvme: add copy offload support Date: Tue, 1 Feb 2022 13:33:12 -0500 (EST) [thread overview] Message-ID: <alpine.LRH.2.02.2202011332330.22481@file01.intranet.prod.int.rdu2.redhat.com> (raw) In-Reply-To: <alpine.LRH.2.02.2202011327350.22481@file01.intranet.prod.int.rdu2.redhat.com> This patch adds copy offload support to the nvme host driver. The function nvme_setup_read_token stores namespace and location in the token and the function nvme_setup_write_token retrieves information from the token and submits the copy command to the device. Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> --- drivers/nvme/host/core.c | 94 +++++++++++++++++++++++++++++++++++++++++++++ drivers/nvme/host/fc.c | 5 ++ drivers/nvme/host/nvme.h | 1 drivers/nvme/host/pci.c | 5 ++ drivers/nvme/host/rdma.c | 5 ++ drivers/nvme/host/tcp.c | 5 ++ drivers/nvme/target/loop.c | 5 ++ include/linux/nvme.h | 33 +++++++++++++++ 8 files changed, 153 insertions(+) Index: linux-2.6/drivers/nvme/host/core.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/core.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/core.c 2022-02-01 18:34:19.000000000 +0100 @@ -975,6 +975,85 @@ static inline blk_status_t nvme_setup_rw return 0; } +struct nvme_copy_token { + char subsys[4]; + struct nvme_ns *ns; + u64 src_sector; + u64 sectors; +}; + +static inline blk_status_t nvme_setup_read_token(struct nvme_ns *ns, struct request *req) +{ + struct bio *bio = req->bio; + struct nvme_copy_token *token = page_to_virt(bio->bi_io_vec[0].bv_page) + bio->bi_io_vec[0].bv_offset; + memcpy(token->subsys, "nvme", 4); + token->ns = ns; + token->src_sector = bio->bi_iter.bi_sector; + token->sectors = bio->bi_iter.bi_size >> 9; + return 0; +} + +static inline blk_status_t nvme_setup_write_token(struct nvme_ns *ns, + struct request *req, struct nvme_command *cmnd) +{ + sector_t src_sector, dst_sector, n_sectors; + u64 src_lba, dst_lba, n_lba; + + unsigned n_descriptors, i; + struct nvme_copy_desc *descriptors; + + struct bio *bio = req->bio; + struct nvme_copy_token *token = page_to_virt(bio->bi_io_vec[0].bv_page) + bio->bi_io_vec[0].bv_offset; + if (unlikely(memcmp(token->subsys, "nvme", 4))) + return BLK_STS_NOTSUPP; + if (unlikely(token->ns != ns)) + return BLK_STS_NOTSUPP; + + src_sector = token->src_sector; + dst_sector = bio->bi_iter.bi_sector; + n_sectors = token->sectors; + if (WARN_ON(n_sectors != bio->bi_iter.bi_size >> 9)) + return BLK_STS_NOTSUPP; + + src_lba = nvme_sect_to_lba(ns, src_sector); + dst_lba = nvme_sect_to_lba(ns, dst_sector); + n_lba = nvme_sect_to_lba(ns, n_sectors); + + if (unlikely(nvme_lba_to_sect(ns, src_lba) != src_sector) || + unlikely(nvme_lba_to_sect(ns, dst_lba) != dst_sector) || + unlikely(nvme_lba_to_sect(ns, n_lba) != n_sectors)) + return BLK_STS_NOTSUPP; + + if (WARN_ON(!n_lba)) + return BLK_STS_NOTSUPP; + + n_descriptors = (n_lba + 0xffff) / 0x10000; + descriptors = kzalloc(n_descriptors * sizeof(struct nvme_copy_desc), GFP_ATOMIC | __GFP_NOWARN); + if (unlikely(!descriptors)) + return BLK_STS_RESOURCE; + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->copy.opcode = nvme_cmd_copy; + cmnd->copy.nsid = cpu_to_le32(ns->head->ns_id); + cmnd->copy.sdlba = cpu_to_le64(dst_lba); + cmnd->copy.length = n_descriptors - 1; + + for (i = 0; i < n_descriptors; i++) { + u64 this_step = min(n_lba, (u64)0x10000); + descriptors[i].slba = cpu_to_le64(src_lba); + descriptors[i].length = cpu_to_le16(this_step - 1); + src_lba += this_step; + n_lba -= this_step; + } + + req->special_vec.bv_page = virt_to_page(descriptors); + req->special_vec.bv_offset = offset_in_page(descriptors); + req->special_vec.bv_len = n_descriptors * sizeof(struct nvme_copy_desc); + req->rq_flags |= RQF_SPECIAL_PAYLOAD; + + return 0; +} + void nvme_cleanup_cmd(struct request *req) { if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { @@ -1032,6 +1111,12 @@ blk_status_t nvme_setup_cmd(struct nvme_ case REQ_OP_ZONE_APPEND: ret = nvme_setup_rw(ns, req, cmd, nvme_cmd_zone_append); break; + case REQ_OP_COPY_READ_TOKEN: + ret = nvme_setup_read_token(ns, req); + break; + case REQ_OP_COPY_WRITE_TOKEN: + ret = nvme_setup_write_token(ns, req, cmd); + break; default: WARN_ON_ONCE(1); return BLK_STS_IOERR; @@ -1865,6 +1950,8 @@ static void nvme_update_disk_info(struct blk_queue_max_write_zeroes_sectors(disk->queue, ns->ctrl->max_zeroes_sectors); + blk_queue_max_copy_sectors(disk->queue, ns->ctrl->max_copy_sectors); + set_disk_ro(disk, (id->nsattr & NVME_NS_ATTR_RO) || test_bit(NVME_NS_FORCE_RO, &ns->flags)); } @@ -2891,6 +2978,12 @@ static int nvme_init_non_mdts_limits(str else ctrl->max_zeroes_sectors = 0; + if (ctrl->oncs & NVME_CTRL_ONCS_COPY) { + ctrl->max_copy_sectors = 1U << 24; + } else { + ctrl->max_copy_sectors = 0; + } + if (nvme_ctrl_limited_cns(ctrl)) return 0; @@ -4716,6 +4809,7 @@ static inline void _nvme_check_size(void { BUILD_BUG_ON(sizeof(struct nvme_common_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64); + BUILD_BUG_ON(sizeof(struct nvme_copy_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_identify) != 64); BUILD_BUG_ON(sizeof(struct nvme_features) != 64); BUILD_BUG_ON(sizeof(struct nvme_download_firmware) != 64); Index: linux-2.6/drivers/nvme/host/nvme.h =================================================================== --- linux-2.6.orig/drivers/nvme/host/nvme.h 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/nvme.h 2022-02-01 18:34:19.000000000 +0100 @@ -277,6 +277,7 @@ struct nvme_ctrl { #ifdef CONFIG_BLK_DEV_ZONED u32 max_zone_append; #endif + u32 max_copy_sectors; u16 crdt[3]; u16 oncs; u16 oacs; Index: linux-2.6/include/linux/nvme.h =================================================================== --- linux-2.6.orig/include/linux/nvme.h 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/include/linux/nvme.h 2022-02-01 18:34:19.000000000 +0100 @@ -335,6 +335,8 @@ enum { NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3, NVME_CTRL_ONCS_RESERVATIONS = 1 << 5, NVME_CTRL_ONCS_TIMESTAMP = 1 << 6, + NVME_CTRL_ONCS_VERIFY = 1 << 7, + NVME_CTRL_ONCS_COPY = 1 << 8, NVME_CTRL_VWC_PRESENT = 1 << 0, NVME_CTRL_OACS_SEC_SUPP = 1 << 0, NVME_CTRL_OACS_DIRECTIVES = 1 << 5, @@ -704,6 +706,7 @@ enum nvme_opcode { nvme_cmd_resv_report = 0x0e, nvme_cmd_resv_acquire = 0x11, nvme_cmd_resv_release = 0x15, + nvme_cmd_copy = 0x19, nvme_cmd_zone_mgmt_send = 0x79, nvme_cmd_zone_mgmt_recv = 0x7a, nvme_cmd_zone_append = 0x7d, @@ -872,6 +875,35 @@ enum { NVME_RW_DTYPE_STREAMS = 1 << 4, }; +struct nvme_copy_command { + __u8 opcode; + __u8 flags; + __u16 command_id; + __le32 nsid; + __u64 rsvd2; + __le64 metadata; + union nvme_data_ptr dptr; + __le64 sdlba; + __u8 length; + __u8 control2; + __le16 control; + __le32 dspec; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + +struct nvme_copy_desc { + __u64 rsvd; + __le64 slba; + __le16 length; + __u16 rsvd2; + __u32 rsvd3; + __le32 reftag; + __le16 apptag; + __le16 appmask; +}; + struct nvme_dsm_cmd { __u8 opcode; __u8 flags; @@ -1441,6 +1473,7 @@ struct nvme_command { union { struct nvme_common_command common; struct nvme_rw_command rw; + struct nvme_copy_command copy; struct nvme_identify identify; struct nvme_features features; struct nvme_create_cq create_cq; Index: linux-2.6/drivers/nvme/host/pci.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/pci.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/pci.c 2022-02-01 18:34:19.000000000 +0100 @@ -949,6 +949,11 @@ static blk_status_t nvme_queue_rq(struct struct nvme_iod *iod = blk_mq_rq_to_pdu(req); blk_status_t ret; + if (unlikely((req->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(req, BLK_STS_OK); + return BLK_STS_OK; + } + /* * We should not need to do this, but we're still using this to * ensure we can drain requests on a dying queue. Index: linux-2.6/drivers/nvme/host/fc.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/fc.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/fc.c 2022-02-01 18:34:19.000000000 +0100 @@ -2780,6 +2780,11 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *h u32 data_len; blk_status_t ret; + if (unlikely((rq->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(rq, BLK_STS_OK); + return BLK_STS_OK; + } + if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || !nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); Index: linux-2.6/drivers/nvme/host/rdma.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/rdma.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/rdma.c 2022-02-01 18:34:19.000000000 +0100 @@ -2048,6 +2048,11 @@ static blk_status_t nvme_rdma_queue_rq(s blk_status_t ret; int err; + if (unlikely((rq->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(rq, BLK_STS_OK); + return BLK_STS_OK; + } + WARN_ON_ONCE(rq->tag < 0); if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) Index: linux-2.6/drivers/nvme/host/tcp.c =================================================================== --- linux-2.6.orig/drivers/nvme/host/tcp.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/host/tcp.c 2022-02-01 18:34:19.000000000 +0100 @@ -2372,6 +2372,11 @@ static blk_status_t nvme_tcp_queue_rq(st bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); blk_status_t ret; + if (unlikely((rq->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(rq, BLK_STS_OK); + return BLK_STS_OK; + } + if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); Index: linux-2.6/drivers/nvme/target/loop.c =================================================================== --- linux-2.6.orig/drivers/nvme/target/loop.c 2022-02-01 18:34:19.000000000 +0100 +++ linux-2.6/drivers/nvme/target/loop.c 2022-02-01 18:34:19.000000000 +0100 @@ -138,6 +138,11 @@ static blk_status_t nvme_loop_queue_rq(s bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags); blk_status_t ret; + if (unlikely((req->cmd_flags & REQ_OP_MASK) == REQ_OP_COPY_READ_TOKEN)) { + blk_mq_end_request(req, BLK_STS_OK); + return BLK_STS_OK; + } + if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready)) return nvme_fail_nonready_command(&queue->ctrl->ctrl, req); -- dm-devel mailing list dm-devel@redhat.com https://listman.redhat.com/mailman/listinfo/dm-devel
next prev parent reply other threads:[~2022-02-01 18:33 UTC|newest] Thread overview: 183+ messages / expand[flat|nested] mbox.gz Atom feed top [not found] <CGME20220127071544uscas1p2f70f4d2509f3ebd574b7ed746d3fa551@uscas1p2.samsung.com> 2022-01-27 7:14 ` [LSF/MM/BFP ATTEND] [LSF/MM/BFP TOPIC] Storage: Copy Offload Chaitanya Kulkarni 2022-01-28 19:59 ` Adam Manzanares 2022-01-28 19:59 ` [dm-devel] " Adam Manzanares 2022-01-31 11:49 ` Johannes Thumshirn 2022-01-31 11:49 ` Johannes Thumshirn 2022-01-31 19:03 ` Bart Van Assche 2022-01-31 19:03 ` [dm-devel] " Bart Van Assche 2022-02-01 1:54 ` Luis Chamberlain 2022-02-01 1:54 ` Luis Chamberlain 2022-02-01 10:21 ` Javier González 2022-02-01 18:31 ` [RFC PATCH 0/3] NVMe copy offload patches Mikulas Patocka 2022-02-01 18:31 ` [dm-devel] " Mikulas Patocka 2022-02-01 18:32 ` [RFC PATCH 1/3] block: add copy offload support Mikulas Patocka 2022-02-01 18:32 ` [dm-devel] " Mikulas Patocka 2022-02-01 19:18 ` Bart Van Assche 2022-02-01 19:18 ` [dm-devel] " Bart Van Assche 2022-02-03 18:50 ` Mikulas Patocka 2022-02-03 18:50 ` Mikulas Patocka 2022-02-03 20:11 ` Keith Busch 2022-02-03 20:11 ` [dm-devel] " Keith Busch 2022-02-03 22:49 ` Bart Van Assche 2022-02-03 22:49 ` Bart Van Assche 2022-02-04 12:09 ` [dm-devel] " Mikulas Patocka 2022-02-04 12:09 ` Mikulas Patocka 2022-02-04 13:34 ` Jens Axboe 2022-02-04 13:34 ` [dm-devel] " Jens Axboe 2022-02-02 16:21 ` Keith Busch 2022-02-02 16:21 ` [dm-devel] " Keith Busch 2022-02-02 16:40 ` Mikulas Patocka 2022-02-02 16:40 ` [dm-devel] " Mikulas Patocka 2022-02-02 18:40 ` Knight, Frederick 2022-02-02 18:40 ` [dm-devel] " Knight, Frederick 2022-02-01 18:33 ` Mikulas Patocka [this message] 2022-02-01 18:33 ` [dm-devel] [RFC PATCH 2/3] nvme: " Mikulas Patocka 2022-02-01 19:18 ` Bart Van Assche 2022-02-01 19:18 ` [dm-devel] " Bart Van Assche 2022-02-01 19:25 ` Mikulas Patocka 2022-02-01 19:25 ` [dm-devel] " Mikulas Patocka 2022-02-01 18:33 ` [RFC PATCH 3/3] nvme: add the "debug" host driver Mikulas Patocka 2022-02-01 18:33 ` [dm-devel] " Mikulas Patocka 2022-02-01 21:32 ` kernel test robot 2022-02-02 6:01 ` Adam Manzanares 2022-02-02 6:01 ` [dm-devel] " Adam Manzanares 2022-02-03 16:06 ` Luis Chamberlain 2022-02-03 16:06 ` Luis Chamberlain 2022-02-03 16:15 ` [dm-devel] " Christoph Hellwig 2022-02-03 16:15 ` Christoph Hellwig 2022-02-03 19:34 ` Luis Chamberlain 2022-02-03 19:34 ` [dm-devel] " Luis Chamberlain 2022-02-03 19:46 ` Adam Manzanares 2022-02-03 19:46 ` [dm-devel] " Adam Manzanares 2022-02-03 20:57 ` Mikulas Patocka 2022-02-03 20:57 ` Mikulas Patocka 2022-02-03 22:52 ` Adam Manzanares 2022-02-03 22:52 ` [dm-devel] " Adam Manzanares 2022-02-04 3:00 ` Chaitanya Kulkarni 2022-02-04 3:00 ` [dm-devel] " Chaitanya Kulkarni 2022-02-04 3:05 ` Chaitanya Kulkarni 2022-02-04 3:05 ` [dm-devel] " Chaitanya Kulkarni 2022-02-02 7:23 ` kernel test robot 2022-02-02 7:23 ` kernel test robot 2022-02-02 8:00 ` Chaitanya Kulkarni 2022-02-02 8:00 ` [dm-devel] " Chaitanya Kulkarni 2022-02-02 12:38 ` Klaus Jensen 2022-02-02 12:38 ` [dm-devel] " Klaus Jensen 2022-02-03 15:38 ` Luis Chamberlain 2022-02-03 15:38 ` Luis Chamberlain 2022-02-03 16:52 ` Keith Busch 2022-02-03 16:52 ` [dm-devel] " Keith Busch 2022-02-03 19:50 ` Adam Manzanares 2022-02-03 19:50 ` [dm-devel] " Adam Manzanares 2022-02-04 3:12 ` Chaitanya Kulkarni 2022-02-04 3:12 ` [dm-devel] " Chaitanya Kulkarni 2022-02-04 6:28 ` Damien Le Moal 2022-02-04 6:28 ` [dm-devel] " Damien Le Moal 2022-02-04 7:58 ` Chaitanya Kulkarni 2022-02-04 7:58 ` [dm-devel] " Chaitanya Kulkarni 2022-02-04 8:24 ` Javier González 2022-02-04 8:24 ` [dm-devel] " Javier González 2022-02-04 9:58 ` Chaitanya Kulkarni 2022-02-04 9:58 ` [dm-devel] " Chaitanya Kulkarni 2022-02-04 11:34 ` Javier González 2022-02-04 11:34 ` [dm-devel] " Javier González 2022-02-04 14:15 ` Hannes Reinecke 2022-02-04 14:15 ` [dm-devel] " Hannes Reinecke 2022-02-04 14:24 ` Keith Busch 2022-02-04 14:24 ` [dm-devel] " Keith Busch 2022-02-04 16:01 ` Christoph Hellwig 2022-02-04 16:01 ` [dm-devel] " Christoph Hellwig 2022-02-04 19:41 ` [RFC PATCH 0/3] NVMe copy offload patches Nitesh Shetty 2022-02-04 19:41 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141901epcas5p162ec2387815be7a1fd67ce0ab7082119@epcas5p1.samsung.com> 2022-02-07 14:13 ` [PATCH v2 00/10] Add Copy offload support Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141908epcas5p4f270c89fc32434ea8b525fa973098231@epcas5p4.samsung.com> 2022-02-07 14:13 ` [PATCH v2 01/10] block: make bio_map_kern() non static Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141913epcas5p4d41cb549b7cca1ede5c7a66bbd110da0@epcas5p4.samsung.com> 2022-02-07 14:13 ` [PATCH v2 02/10] block: Introduce queue limits for copy-offload support Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-08 7:01 ` Damien Le Moal 2022-02-08 7:01 ` [dm-devel] " Damien Le Moal 2022-02-08 18:43 ` Nitesh Shetty 2022-02-08 18:43 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141918epcas5p4f9badc0c3f3f0913f091c850d2d3bd81@epcas5p4.samsung.com> 2022-02-07 14:13 ` [PATCH v2 03/10] block: Add copy offload support infrastructure Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-07 22:45 ` kernel test robot 2022-02-07 22:45 ` kernel test robot 2022-02-07 22:45 ` [dm-devel] " kernel test robot 2022-02-07 23:26 ` kernel test robot 2022-02-07 23:26 ` kernel test robot 2022-02-07 23:26 ` [dm-devel] " kernel test robot 2022-02-08 7:21 ` Damien Le Moal 2022-02-08 7:21 ` [dm-devel] " Damien Le Moal 2022-02-09 10:22 ` Nitesh Shetty 2022-02-09 10:22 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141924epcas5p26ad9cf5de732224f408aded12ed0a577@epcas5p2.samsung.com> 2022-02-07 14:13 ` [PATCH v2 04/10] block: Introduce a new ioctl for copy Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-09 3:39 ` kernel test robot 2022-02-09 3:39 ` kernel test robot 2022-02-09 3:39 ` kernel test robot [not found] ` <CGME20220207141930epcas5p2bcbff65f78ad1dede64648d73ddb3770@epcas5p2.samsung.com> 2022-02-07 14:13 ` [PATCH v2 05/10] block: add emulation " Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-08 3:20 ` kernel test robot 2022-02-08 3:20 ` kernel test robot 2022-02-08 3:20 ` [dm-devel] " kernel test robot 2022-02-16 13:32 ` Mikulas Patocka 2022-02-16 13:32 ` Mikulas Patocka 2022-02-17 13:18 ` Nitesh Shetty 2022-02-17 13:18 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141937epcas5p2bd57ae35056c69b3e2f9ee2348d6af19@epcas5p2.samsung.com> 2022-02-07 14:13 ` [PATCH v2 06/10] nvme: add copy support Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-10 7:08 ` kernel test robot 2022-02-10 7:08 ` kernel test robot 2022-02-10 7:08 ` kernel test robot [not found] ` <CGME20220207141942epcas5p4bda894a5833513c9211dcecc7928a951@epcas5p4.samsung.com> 2022-02-07 14:13 ` [PATCH v2 07/10] nvmet: add copy command support for bdev and file ns Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-07 18:10 ` kernel test robot 2022-02-07 18:10 ` kernel test robot 2022-02-07 18:10 ` [dm-devel] " kernel test robot 2022-02-07 20:12 ` kernel test robot 2022-02-07 20:12 ` kernel test robot 2022-02-07 20:12 ` [dm-devel] " kernel test robot 2022-02-10 8:31 ` kernel test robot 2022-02-10 8:31 ` kernel test robot 2022-02-10 8:31 ` kernel test robot [not found] ` <CGME20220207141948epcas5p4534f6bdc5a1e2e676d7d09c04f8b4a5b@epcas5p4.samsung.com> 2022-02-07 14:13 ` [PATCH v2 08/10] dm: Add support for copy offload Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-16 13:51 ` Mikulas Patocka 2022-02-16 13:51 ` Mikulas Patocka 2022-02-24 12:42 ` Nitesh Shetty 2022-02-24 12:42 ` [dm-devel] " Nitesh Shetty 2022-02-25 9:12 ` Mikulas Patocka 2022-02-25 9:12 ` [dm-devel] " Mikulas Patocka [not found] ` <CGME20220207141953epcas5p32ccc3c0bbe642cea074edefcc32302a5@epcas5p3.samsung.com> 2022-02-07 14:13 ` [PATCH v2 09/10] dm: Enable copy offload for dm-linear target Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty [not found] ` <CGME20220207141958epcas5p25f1cd06726217696d13c2dfbea010565@epcas5p2.samsung.com> 2022-02-07 14:13 ` [PATCH v2 10/10] dm kcopyd: use copy offload support Nitesh Shetty 2022-02-07 14:13 ` [dm-devel] " Nitesh Shetty 2022-02-07 9:57 ` [LSF/MM/BFP ATTEND] [LSF/MM/BFP TOPIC] Storage: Copy Offload Nitesh Shetty 2022-02-07 9:57 ` [dm-devel] " Nitesh Shetty 2022-02-02 5:57 ` Kanchan Joshi 2022-02-02 5:57 ` [dm-devel] " Kanchan Joshi 2022-02-07 10:45 ` David Disseldorp 2022-02-07 10:45 ` [dm-devel] " David Disseldorp 2022-03-01 17:34 ` Nikos Tsironis 2022-03-01 17:34 ` [dm-devel] " Nikos Tsironis 2022-03-01 21:32 ` Chaitanya Kulkarni 2022-03-03 18:36 ` [dm-devel] " Nikos Tsironis 2022-03-03 18:36 ` Nikos Tsironis 2022-03-08 20:48 ` Nikos Tsironis 2022-03-08 20:48 ` [dm-devel] " Nikos Tsironis 2022-03-09 8:51 ` Mikulas Patocka 2022-03-09 8:51 ` Mikulas Patocka 2022-03-09 15:49 ` Nikos Tsironis 2022-03-09 15:49 ` [dm-devel] " Nikos Tsironis [not found] <CGME20220209075901epcas5p3cff468deadd8ef836522f032bd4ed36c@epcas5p3.samsung.com> 2022-02-08 23:31 ` [PATCH v2 03/10] block: Add copy offload support infrastructure kernel test robot 2022-02-09 7:48 ` Dan Carpenter 2022-02-09 7:48 ` [dm-devel] " Dan Carpenter 2022-02-09 7:48 ` Dan Carpenter 2022-02-09 10:32 ` Nitesh Shetty 2022-02-10 6:13 ` Nitesh Shetty 2022-02-09 10:32 ` [dm-devel] " Nitesh Shetty 2022-02-10 22:49 [PATCH v2 07/10] nvmet: add copy command support for bdev and file ns kernel test robot 2022-02-11 7:52 ` Dan Carpenter 2022-02-11 7:52 ` [dm-devel] " Dan Carpenter 2022-02-11 7:52 ` Dan Carpenter
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=alpine.LRH.2.02.2202011332330.22481@file01.intranet.prod.int.rdu2.redhat.com \ --to=mpatocka@redhat.com \ --cc=Frederick.Knight@netapp.com \ --cc=axboe@kernel.dk \ --cc=bvanassche@acm.org \ --cc=chaitanyak@nvidia.com \ --cc=clm@fb.com \ --cc=djwong@kernel.org \ --cc=dm-devel@redhat.com \ --cc=dsterba@suse.com \ --cc=hare@suse.de \ --cc=hch@lst.de \ --cc=jack@suse.com \ --cc=javier@javigon.com \ --cc=josef@toxicpanda.com \ --cc=joshi.k@samsung.com \ --cc=kbusch@kernel.org \ --cc=linux-block@vger.kernel.org \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-nvme@lists.infradead.org \ --cc=linux-scsi@vger.kernel.org \ --cc=lsf-pc@lists.linux-foundation.org \ --cc=martin.petersen@oracle.com \ --cc=msnitzer@redhat.com \ --cc=osandov@fb.com \ --cc=roland@purestorage.com \ --cc=tytso@mit.edu \ --cc=zach.brown@ni.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.