All the mail mirrored from lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA
@ 2019-01-19  0:10 Logan Gunthorpe
  2019-01-19  0:25 ` Dave Jiang
  2019-02-14 13:44 ` lravich
  0 siblings, 2 replies; 5+ messages in thread
From: Logan Gunthorpe @ 2019-01-19  0:10 UTC (permalink / raw
  To: linux-kernel, linux-ntb
  Cc: Logan Gunthorpe, Jon Mason, Dave Jiang, Allen Hubbe

Presently, when ntb_transport is used with DMA and the IOMMU turned on,
it fails with errors from the IOMMU such as:

  DMAR: DRHD: handling fault status reg 202
  DMAR: [DMA Write] Request device [00:04.0] fault addr
	381fc0340000 [fault reason 05] PTE Write access is not set

This is because ntb_transport does not map the BAR space with the IOMMU.

To fix this, we map the entire MW region for each QP after we assign
the DMA channel. This prevents needing an extra DMA map in the fast
path.

Link: https://lore.kernel.org/linux-pci/499934e7-3734-1aee-37dd-b42a5d2a2608@intel.com/
Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
Cc: Jon Mason <jdmason@kudzu.us>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Allen Hubbe <allenbh@gmail.com>
---
 drivers/ntb/ntb_transport.c | 28 ++++++++++++++++++++++++++--
 1 file changed, 26 insertions(+), 2 deletions(-)

diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3bfdb4562408..526b65afc16a 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -144,7 +144,9 @@ struct ntb_transport_qp {
 	struct list_head tx_free_q;
 	spinlock_t ntb_tx_free_q_lock;
 	void __iomem *tx_mw;
-	dma_addr_t tx_mw_phys;
+	phys_addr_t tx_mw_phys;
+	size_t tx_mw_size;
+	dma_addr_t tx_mw_dma_addr;
 	unsigned int tx_index;
 	unsigned int tx_max_entry;
 	unsigned int tx_max_frame;
@@ -1049,6 +1051,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
 	tx_size = (unsigned int)mw_size / num_qps_mw;
 	qp_offset = tx_size * (qp_num / mw_count);
 
+	qp->tx_mw_size = tx_size;
 	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
 	if (!qp->tx_mw)
 		return -EINVAL;
@@ -1644,7 +1647,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
 	dma_cookie_t cookie;
 
 	device = chan->device;
-	dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
+	dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
 	buff_off = (size_t)buf & ~PAGE_MASK;
 	dest_off = (size_t)dest & ~PAGE_MASK;
 
@@ -1863,6 +1866,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
 		qp->rx_dma_chan = NULL;
 	}
 
+	if (qp->tx_dma_chan) {
+		qp->tx_mw_dma_addr =
+			dma_map_resource(qp->tx_dma_chan->device->dev,
+					 qp->tx_mw_phys, qp->tx_mw_size,
+					 DMA_FROM_DEVICE, 0);
+		if (dma_mapping_error(qp->tx_dma_chan->device->dev,
+				      qp->tx_mw_dma_addr)) {
+			qp->tx_mw_dma_addr = 0;
+			goto err1;
+		}
+	}
+
 	dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
 		qp->tx_dma_chan ? "DMA" : "CPU");
 
@@ -1904,6 +1919,10 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
 	qp->rx_alloc_entry = 0;
 	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
 		kfree(entry);
+	if (qp->tx_mw_dma_addr)
+		dma_unmap_resource(qp->tx_dma_chan->device->dev,
+				   qp->tx_mw_dma_addr, qp->tx_mw_size,
+				   DMA_FROM_DEVICE, 0);
 	if (qp->tx_dma_chan)
 		dma_release_channel(qp->tx_dma_chan);
 	if (qp->rx_dma_chan)
@@ -1945,6 +1964,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 		 */
 		dma_sync_wait(chan, qp->last_cookie);
 		dmaengine_terminate_all(chan);
+
+		dma_unmap_resource(chan->device->dev,
+				   qp->tx_mw_dma_addr, qp->tx_mw_size,
+				   DMA_FROM_DEVICE, 0);
+
 		dma_release_channel(chan);
 	}
 
-- 
2.19.0


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA
  2019-01-19  0:10 [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA Logan Gunthorpe
@ 2019-01-19  0:25 ` Dave Jiang
  2019-02-11 14:35   ` Jon Mason
  2019-02-14 13:44 ` lravich
  1 sibling, 1 reply; 5+ messages in thread
From: Dave Jiang @ 2019-01-19  0:25 UTC (permalink / raw
  To: Logan Gunthorpe, linux-kernel, linux-ntb; +Cc: Jon Mason, Allen Hubbe



On 1/18/19 5:10 PM, Logan Gunthorpe wrote:
> Presently, when ntb_transport is used with DMA and the IOMMU turned on,
> it fails with errors from the IOMMU such as:
> 
>   DMAR: DRHD: handling fault status reg 202
>   DMAR: [DMA Write] Request device [00:04.0] fault addr
> 	381fc0340000 [fault reason 05] PTE Write access is not set
> 
> This is because ntb_transport does not map the BAR space with the IOMMU.
> 
> To fix this, we map the entire MW region for each QP after we assign
> the DMA channel. This prevents needing an extra DMA map in the fast
> path.
> 
> Link: https://lore.kernel.org/linux-pci/499934e7-3734-1aee-37dd-b42a5d2a2608@intel.com/
> Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
> Cc: Jon Mason <jdmason@kudzu.us>
> Cc: Dave Jiang <dave.jiang@intel.com>
> Cc: Allen Hubbe <allenbh@gmail.com>

Nice! I actually never encountered this on the Intel NTB with IOMMU on.
It also could be that the Intel BIOS already took care of it for all
embedded device BARs on the uncore. Nevertheless it's needed. Thanks!

Reviewed-by: Dave Jiang <dave.jiang@intel.com>

> ---
>  drivers/ntb/ntb_transport.c | 28 ++++++++++++++++++++++++++--
>  1 file changed, 26 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
> index 3bfdb4562408..526b65afc16a 100644
> --- a/drivers/ntb/ntb_transport.c
> +++ b/drivers/ntb/ntb_transport.c
> @@ -144,7 +144,9 @@ struct ntb_transport_qp {
>  	struct list_head tx_free_q;
>  	spinlock_t ntb_tx_free_q_lock;
>  	void __iomem *tx_mw;
> -	dma_addr_t tx_mw_phys;
> +	phys_addr_t tx_mw_phys;
> +	size_t tx_mw_size;
> +	dma_addr_t tx_mw_dma_addr;
>  	unsigned int tx_index;
>  	unsigned int tx_max_entry;
>  	unsigned int tx_max_frame;
> @@ -1049,6 +1051,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
>  	tx_size = (unsigned int)mw_size / num_qps_mw;
>  	qp_offset = tx_size * (qp_num / mw_count);
>  
> +	qp->tx_mw_size = tx_size;
>  	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
>  	if (!qp->tx_mw)
>  		return -EINVAL;
> @@ -1644,7 +1647,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
>  	dma_cookie_t cookie;
>  
>  	device = chan->device;
> -	dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
> +	dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
>  	buff_off = (size_t)buf & ~PAGE_MASK;
>  	dest_off = (size_t)dest & ~PAGE_MASK;
>  
> @@ -1863,6 +1866,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
>  		qp->rx_dma_chan = NULL;
>  	}
>  
> +	if (qp->tx_dma_chan) {
> +		qp->tx_mw_dma_addr =
> +			dma_map_resource(qp->tx_dma_chan->device->dev,
> +					 qp->tx_mw_phys, qp->tx_mw_size,
> +					 DMA_FROM_DEVICE, 0);
> +		if (dma_mapping_error(qp->tx_dma_chan->device->dev,
> +				      qp->tx_mw_dma_addr)) {
> +			qp->tx_mw_dma_addr = 0;
> +			goto err1;
> +		}
> +	}
> +
>  	dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
>  		qp->tx_dma_chan ? "DMA" : "CPU");
>  
> @@ -1904,6 +1919,10 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
>  	qp->rx_alloc_entry = 0;
>  	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
>  		kfree(entry);
> +	if (qp->tx_mw_dma_addr)
> +		dma_unmap_resource(qp->tx_dma_chan->device->dev,
> +				   qp->tx_mw_dma_addr, qp->tx_mw_size,
> +				   DMA_FROM_DEVICE, 0);
>  	if (qp->tx_dma_chan)
>  		dma_release_channel(qp->tx_dma_chan);
>  	if (qp->rx_dma_chan)
> @@ -1945,6 +1964,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
>  		 */
>  		dma_sync_wait(chan, qp->last_cookie);
>  		dmaengine_terminate_all(chan);
> +
> +		dma_unmap_resource(chan->device->dev,
> +				   qp->tx_mw_dma_addr, qp->tx_mw_size,
> +				   DMA_FROM_DEVICE, 0);
> +
>  		dma_release_channel(chan);
>  	}
>  
> 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA
  2019-01-19  0:25 ` Dave Jiang
@ 2019-02-11 14:35   ` Jon Mason
  0 siblings, 0 replies; 5+ messages in thread
From: Jon Mason @ 2019-02-11 14:35 UTC (permalink / raw
  To: Dave Jiang; +Cc: Logan Gunthorpe, linux-kernel, linux-ntb, Allen Hubbe

On Fri, Jan 18, 2019 at 05:25:20PM -0700, Dave Jiang wrote:
> 
> 
> On 1/18/19 5:10 PM, Logan Gunthorpe wrote:
> > Presently, when ntb_transport is used with DMA and the IOMMU turned on,
> > it fails with errors from the IOMMU such as:
> > 
> >   DMAR: DRHD: handling fault status reg 202
> >   DMAR: [DMA Write] Request device [00:04.0] fault addr
> > 	381fc0340000 [fault reason 05] PTE Write access is not set
> > 
> > This is because ntb_transport does not map the BAR space with the IOMMU.
> > 
> > To fix this, we map the entire MW region for each QP after we assign
> > the DMA channel. This prevents needing an extra DMA map in the fast
> > path.
> > 
> > Link: https://lore.kernel.org/linux-pci/499934e7-3734-1aee-37dd-b42a5d2a2608@intel.com/
> > Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
> > Cc: Jon Mason <jdmason@kudzu.us>
> > Cc: Dave Jiang <dave.jiang@intel.com>
> > Cc: Allen Hubbe <allenbh@gmail.com>
> 
> Nice! I actually never encountered this on the Intel NTB with IOMMU on.
> It also could be that the Intel BIOS already took care of it for all
> embedded device BARs on the uncore. Nevertheless it's needed. Thanks!
> 
> Reviewed-by: Dave Jiang <dave.jiang@intel.com>

Added to the ntb branch, thanks!

> 
> > ---
> >  drivers/ntb/ntb_transport.c | 28 ++++++++++++++++++++++++++--
> >  1 file changed, 26 insertions(+), 2 deletions(-)
> > 
> > diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
> > index 3bfdb4562408..526b65afc16a 100644
> > --- a/drivers/ntb/ntb_transport.c
> > +++ b/drivers/ntb/ntb_transport.c
> > @@ -144,7 +144,9 @@ struct ntb_transport_qp {
> >  	struct list_head tx_free_q;
> >  	spinlock_t ntb_tx_free_q_lock;
> >  	void __iomem *tx_mw;
> > -	dma_addr_t tx_mw_phys;
> > +	phys_addr_t tx_mw_phys;
> > +	size_t tx_mw_size;
> > +	dma_addr_t tx_mw_dma_addr;
> >  	unsigned int tx_index;
> >  	unsigned int tx_max_entry;
> >  	unsigned int tx_max_frame;
> > @@ -1049,6 +1051,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
> >  	tx_size = (unsigned int)mw_size / num_qps_mw;
> >  	qp_offset = tx_size * (qp_num / mw_count);
> >  
> > +	qp->tx_mw_size = tx_size;
> >  	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
> >  	if (!qp->tx_mw)
> >  		return -EINVAL;
> > @@ -1644,7 +1647,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
> >  	dma_cookie_t cookie;
> >  
> >  	device = chan->device;
> > -	dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
> > +	dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
> >  	buff_off = (size_t)buf & ~PAGE_MASK;
> >  	dest_off = (size_t)dest & ~PAGE_MASK;
> >  
> > @@ -1863,6 +1866,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
> >  		qp->rx_dma_chan = NULL;
> >  	}
> >  
> > +	if (qp->tx_dma_chan) {
> > +		qp->tx_mw_dma_addr =
> > +			dma_map_resource(qp->tx_dma_chan->device->dev,
> > +					 qp->tx_mw_phys, qp->tx_mw_size,
> > +					 DMA_FROM_DEVICE, 0);
> > +		if (dma_mapping_error(qp->tx_dma_chan->device->dev,
> > +				      qp->tx_mw_dma_addr)) {
> > +			qp->tx_mw_dma_addr = 0;
> > +			goto err1;
> > +		}
> > +	}
> > +
> >  	dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
> >  		qp->tx_dma_chan ? "DMA" : "CPU");
> >  
> > @@ -1904,6 +1919,10 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
> >  	qp->rx_alloc_entry = 0;
> >  	while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
> >  		kfree(entry);
> > +	if (qp->tx_mw_dma_addr)
> > +		dma_unmap_resource(qp->tx_dma_chan->device->dev,
> > +				   qp->tx_mw_dma_addr, qp->tx_mw_size,
> > +				   DMA_FROM_DEVICE, 0);
> >  	if (qp->tx_dma_chan)
> >  		dma_release_channel(qp->tx_dma_chan);
> >  	if (qp->rx_dma_chan)
> > @@ -1945,6 +1964,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
> >  		 */
> >  		dma_sync_wait(chan, qp->last_cookie);
> >  		dmaengine_terminate_all(chan);
> > +
> > +		dma_unmap_resource(chan->device->dev,
> > +				   qp->tx_mw_dma_addr, qp->tx_mw_size,
> > +				   DMA_FROM_DEVICE, 0);
> > +
> >  		dma_release_channel(chan);
> >  	}
> >  
> > 

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA
  2019-01-19  0:10 [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA Logan Gunthorpe
  2019-01-19  0:25 ` Dave Jiang
@ 2019-02-14 13:44 ` lravich
  2019-02-15  5:29   ` Logan Gunthorpe
  1 sibling, 1 reply; 5+ messages in thread
From: lravich @ 2019-02-14 13:44 UTC (permalink / raw
  To: linux-ntb


[-- Attachment #1.1: Type: text/plain, Size: 4770 bytes --]

Hi Logan , 
I took a look on the dma_map_resource() you used in this patch , looks 
like ops->map_resource not implemented for intel 
if I see it right, this will not fix the issue for Intel NTB .

Please let me know if I am right 

Thanks.
Leonid Ravich

On Saturday, January 19, 2019 at 2:10:12 AM UTC+2, Logan Gunthorpe wrote:
>
> Presently, when ntb_transport is used with DMA and the IOMMU turned on, 
> it fails with errors from the IOMMU such as: 
>
>   DMAR: DRHD: handling fault status reg 202 
>   DMAR: [DMA Write] Request device [00:04.0] fault addr 
>         381fc0340000 [fault reason 05] PTE Write access is not set 
>
> This is because ntb_transport does not map the BAR space with the IOMMU. 
>
> To fix this, we map the entire MW region for each QP after we assign 
> the DMA channel. This prevents needing an extra DMA map in the fast 
> path. 
>
> Link: https://lore.kernel.org/linux-pci/499934e7-3734-1aee-37dd-b42a5d2a2608@intel.com/ 
>
> Signed-off-by 
> <https://lore.kernel.org/linux-pci/499934e7-3734-1aee-37dd-b42a5d2a2608@intel.com/Signed-off-by>: 
> Logan Gunthorpe <log...@deltatee.com <javascript:>> 
> Cc: Jon Mason <jdm...@kudzu.us <javascript:>> 
> Cc: Dave Jiang <dave....@intel.com <javascript:>> 
> Cc: Allen Hubbe <all...@gmail.com <javascript:>> 
> --- 
>  drivers/ntb/ntb_transport.c | 28 ++++++++++++++++++++++++++-- 
>  1 file changed, 26 insertions(+), 2 deletions(-) 
>
> diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c 
> index 3bfdb4562408..526b65afc16a 100644 
> --- a/drivers/ntb/ntb_transport.c 
> +++ b/drivers/ntb/ntb_transport.c 
> @@ -144,7 +144,9 @@ struct ntb_transport_qp { 
>          struct list_head tx_free_q; 
>          spinlock_t ntb_tx_free_q_lock; 
>          void __iomem *tx_mw; 
> -        dma_addr_t tx_mw_phys; 
> +        phys_addr_t tx_mw_phys; 
> +        size_t tx_mw_size; 
> +        dma_addr_t tx_mw_dma_addr; 
>          unsigned int tx_index; 
>          unsigned int tx_max_entry; 
>          unsigned int tx_max_frame; 
> @@ -1049,6 +1051,7 @@ static int ntb_transport_init_queue(struct 
> ntb_transport_ctx *nt, 
>          tx_size = (unsigned int)mw_size / num_qps_mw; 
>          qp_offset = tx_size * (qp_num / mw_count); 
>   
> +        qp->tx_mw_size = tx_size; 
>          qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 
>          if (!qp->tx_mw) 
>                  return -EINVAL; 
> @@ -1644,7 +1647,7 @@ static int ntb_async_tx_submit(struct 
> ntb_transport_qp *qp, 
>          dma_cookie_t cookie; 
>   
>          device = chan->device; 
> -        dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; 
> +        dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index; 
>          buff_off = (size_t)buf & ~PAGE_MASK; 
>          dest_off = (size_t)dest & ~PAGE_MASK; 
>   
> @@ -1863,6 +1866,18 @@ ntb_transport_create_queue(void *data, struct 
> device *client_dev, 
>                  qp->rx_dma_chan = NULL; 
>          } 
>   
> +        if (qp->tx_dma_chan) { 
> +                qp->tx_mw_dma_addr = 
> +                        dma_map_resource(qp->tx_dma_chan->device->dev, 
> +                                         qp->tx_mw_phys, qp->tx_mw_size, 
> +                                         DMA_FROM_DEVICE, 0); 
> +                if (dma_mapping_error(qp->tx_dma_chan->device->dev, 
> +                                      qp->tx_mw_dma_addr)) { 
> +                        qp->tx_mw_dma_addr = 0; 
> +                        goto err1; 
> +                } 
> +        } 
> + 
>          dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 
>                  qp->tx_dma_chan ? "DMA" : "CPU"); 
>   
> @@ -1904,6 +1919,10 @@ ntb_transport_create_queue(void *data, struct 
> device *client_dev, 
>          qp->rx_alloc_entry = 0; 
>          while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 
>                  kfree(entry); 
> +        if (qp->tx_mw_dma_addr) 
> +                dma_unmap_resource(qp->tx_dma_chan->device->dev, 
> +                                   qp->tx_mw_dma_addr, qp->tx_mw_size, 
> +                                   DMA_FROM_DEVICE, 0); 
>          if (qp->tx_dma_chan) 
>                  dma_release_channel(qp->tx_dma_chan); 
>          if (qp->rx_dma_chan) 
> @@ -1945,6 +1964,11 @@ void ntb_transport_free_queue(struct 
> ntb_transport_qp *qp) 
>                   */ 
>                  dma_sync_wait(chan, qp->last_cookie); 
>                  dmaengine_terminate_all(chan); 
> + 
> +                dma_unmap_resource(chan->device->dev, 
> +                                   qp->tx_mw_dma_addr, qp->tx_mw_size, 
> +                                   DMA_FROM_DEVICE, 0); 
> + 
>                  dma_release_channel(chan); 
>          } 
>   
> -- 
> 2.19.0 
>
>

[-- Attachment #1.2: Type: text/html, Size: 7706 bytes --]

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA
  2019-02-14 13:44 ` lravich
@ 2019-02-15  5:29   ` Logan Gunthorpe
  0 siblings, 0 replies; 5+ messages in thread
From: Logan Gunthorpe @ 2019-02-15  5:29 UTC (permalink / raw
  To: lravich, linux-ntb



On 2019-02-14 6:44 a.m., lravich@gmail.com wrote:
> Hi Logan , 
> I took a look on the dma_map_resource() you used in this patch , looks
> like ops->map_resource not implemented for intel 
> if I see it right, this will not fix the issue for Intel NTB .

> Please let me know if I am right 

Yes, but I've already fixed that[1] ;)

Logan

[1]
https://lore.kernel.org/lkml/20190122213045.5667-1-logang@deltatee.com/T/#u

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2019-02-15  5:29 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-01-19  0:10 [PATCH] NTB: ntb_transport: Ensure the destination buffer is mapped for TX DMA Logan Gunthorpe
2019-01-19  0:25 ` Dave Jiang
2019-02-11 14:35   ` Jon Mason
2019-02-14 13:44 ` lravich
2019-02-15  5:29   ` Logan Gunthorpe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.