From: Larysa Zaremba <larysa.zaremba@intel.com>
To: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: <netdev@vger.kernel.org>, "Michael S. Tsirkin" <mst@redhat.com>,
"Jason Wang" <jasowang@redhat.com>,
"David S. Miller" <davem@davemloft.net>,
"Eric Dumazet" <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
<virtualization@lists.linux.dev>
Subject: Re: [PATCH net-next v4 3/4] virtio_net: rx remove premapped failover code
Date: Fri, 10 May 2024 09:42:07 +0200 [thread overview]
Message-ID: <Zj3PzyXEMfyNDKe6@lzaremba-mobl.ger.corp.intel.com> (raw)
In-Reply-To: <20240508063718.69806-4-xuanzhuo@linux.alibaba.com>
On Wed, May 08, 2024 at 02:37:17PM +0800, Xuan Zhuo wrote:
> Now, the premapped mode can be enabled unconditionally.
>
> So we can remove the failover code for merge and small mode.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Acked-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/net/virtio_net.c | 85 +++++++++++++++++-----------------------
> 1 file changed, 35 insertions(+), 50 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index a2452d35bb93..070a6ed0d812 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -344,9 +344,6 @@ struct receive_queue {
>
> /* Record the last dma info to free after new pages is allocated. */
> struct virtnet_rq_dma *last_dma;
> -
> - /* Do dma by self */
> - bool do_dma;
> };
>
> /* This structure can contain rss message with maximum settings for indirection table and keysize
> @@ -846,7 +843,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
> void *buf;
>
> buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
> - if (buf && rq->do_dma)
> + if (buf)
> virtnet_rq_unmap(rq, buf, *len);
>
> return buf;
> @@ -859,11 +856,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
> u32 offset;
> void *head;
>
> - if (!rq->do_dma) {
> - sg_init_one(rq->sg, buf, len);
> - return;
> - }
> -
> head = page_address(rq->alloc_frag.page);
>
> offset = buf - head;
> @@ -889,44 +881,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
>
> head = page_address(alloc_frag->page);
>
> - if (rq->do_dma) {
> - dma = head;
> -
> - /* new pages */
> - if (!alloc_frag->offset) {
> - if (rq->last_dma) {
> - /* Now, the new page is allocated, the last dma
> - * will not be used. So the dma can be unmapped
> - * if the ref is 0.
> - */
> - virtnet_rq_unmap(rq, rq->last_dma, 0);
> - rq->last_dma = NULL;
> - }
> + dma = head;
>
> - dma->len = alloc_frag->size - sizeof(*dma);
> + /* new pages */
> + if (!alloc_frag->offset) {
> + if (rq->last_dma) {
> + /* Now, the new page is allocated, the last dma
> + * will not be used. So the dma can be unmapped
> + * if the ref is 0.
> + */
> + virtnet_rq_unmap(rq, rq->last_dma, 0);
> + rq->last_dma = NULL;
> + }
>
> - addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> - dma->len, DMA_FROM_DEVICE, 0);
> - if (virtqueue_dma_mapping_error(rq->vq, addr))
> - return NULL;
> + dma->len = alloc_frag->size - sizeof(*dma);
>
> - dma->addr = addr;
> - dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
> + addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
> + dma->len, DMA_FROM_DEVICE, 0);
> + if (virtqueue_dma_mapping_error(rq->vq, addr))
> + return NULL;
>
> - /* Add a reference to dma to prevent the entire dma from
> - * being released during error handling. This reference
> - * will be freed after the pages are no longer used.
> - */
> - get_page(alloc_frag->page);
> - dma->ref = 1;
> - alloc_frag->offset = sizeof(*dma);
> + dma->addr = addr;
> + dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
>
> - rq->last_dma = dma;
> - }
> + /* Add a reference to dma to prevent the entire dma from
> + * being released during error handling. This reference
> + * will be freed after the pages are no longer used.
> + */
> + get_page(alloc_frag->page);
> + dma->ref = 1;
> + alloc_frag->offset = sizeof(*dma);
>
> - ++dma->ref;
> + rq->last_dma = dma;
> }
>
> + ++dma->ref;
> +
> buf = head + alloc_frag->offset;
>
> get_page(alloc_frag->page);
> @@ -943,12 +933,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
> if (!vi->mergeable_rx_bufs && vi->big_packets)
> return;
>
> - for (i = 0; i < vi->max_queue_pairs; i++) {
> - if (virtqueue_set_dma_premapped(vi->rq[i].vq))
> - continue;
> -
> - vi->rq[i].do_dma = true;
> - }
> + for (i = 0; i < vi->max_queue_pairs; i++)
> + /* error never happen */
/* error should never happen */
Code seems fine
Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
> + BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
> }
>
> static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
> @@ -2020,8 +2007,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
>
> err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> if (err < 0) {
> - if (rq->do_dma)
> - virtnet_rq_unmap(rq, buf, 0);
> + virtnet_rq_unmap(rq, buf, 0);
> put_page(virt_to_head_page(buf));
> }
>
> @@ -2135,8 +2121,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
> ctx = mergeable_len_to_ctx(len + room, headroom);
> err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
> if (err < 0) {
> - if (rq->do_dma)
> - virtnet_rq_unmap(rq, buf, 0);
> + virtnet_rq_unmap(rq, buf, 0);
> put_page(virt_to_head_page(buf));
> }
>
> @@ -5206,7 +5191,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
> int i;
> for (i = 0; i < vi->max_queue_pairs; i++)
> if (vi->rq[i].alloc_frag.page) {
> - if (vi->rq[i].do_dma && vi->rq[i].last_dma)
> + if (vi->rq[i].last_dma)
> virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
> put_page(vi->rq[i].alloc_frag.page);
> }
> --
> 2.32.0.3.g01195cf9f
>
>
next prev parent reply other threads:[~2024-05-10 7:42 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-08 6:37 [PATCH net-next v4 0/4] virtio_net: rx enable premapped mode by default Xuan Zhuo
2024-05-08 6:37 ` [PATCH net-next v4 1/4] virtio_ring: enable premapped mode whatever use_dma_api Xuan Zhuo
2024-05-10 7:19 ` Larysa Zaremba
2024-05-08 6:37 ` [PATCH net-next v4 2/4] virtio_net: big mode skip the unmap check Xuan Zhuo
2024-05-08 6:37 ` [PATCH net-next v4 3/4] virtio_net: rx remove premapped failover code Xuan Zhuo
2024-05-10 7:42 ` Larysa Zaremba [this message]
2024-05-08 6:37 ` [PATCH net-next v4 4/4] virtio_net: remove the misleading comment Xuan Zhuo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Zj3PzyXEMfyNDKe6@lzaremba-mobl.ger.corp.intel.com \
--to=larysa.zaremba@intel.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=jasowang@redhat.com \
--cc=kuba@kernel.org \
--cc=mst@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=virtualization@lists.linux.dev \
--cc=xuanzhuo@linux.alibaba.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).