* [PATCH v1 0/2] netdevsim: add NAPI support
@ 2024-04-16 5:15 David Wei
2024-04-16 5:15 ` [PATCH v1 1/2] " David Wei
2024-04-16 5:15 ` [PATCH v1 2/2] net: selftest: add test for netdev netlink queue-get API David Wei
0 siblings, 2 replies; 6+ messages in thread
From: David Wei @ 2024-04-16 5:15 UTC (permalink / raw
To: netdev; +Cc: Jakub Kicinski, David S. Miller, Eric Dumazet, Paolo Abeni
Add NAPI support to netdevsim and register its Rx queues with NAPI
instances. Then add a selftest using the new netdev Python selftest
infra to exercise the existing Netdev Netlink API, specifically the
queue-get API.
This expands test coverage and further fleshes out netdevsim as a test
device. It's still my goal to make it useful for testing things like
flow steering and ZC Rx.
David Wei (2):
netdevsim: add NAPI support
net: selftest: add test for netdev netlink queue-get API
drivers/net/netdevsim/netdev.c | 227 +++++++++++++++++-
drivers/net/netdevsim/netdevsim.h | 7 +
tools/testing/selftests/drivers/net/Makefile | 1 +
.../selftests/drivers/net/lib/py/env.py | 10 +-
tools/testing/selftests/drivers/net/queues.py | 67 ++++++
tools/testing/selftests/net/lib/py/nsim.py | 4 +-
6 files changed, 301 insertions(+), 15 deletions(-)
create mode 100755 tools/testing/selftests/drivers/net/queues.py
--
2.43.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH v1 1/2] netdevsim: add NAPI support
2024-04-16 5:15 [PATCH v1 0/2] netdevsim: add NAPI support David Wei
@ 2024-04-16 5:15 ` David Wei
2024-04-17 0:27 ` Jakub Kicinski
2024-04-16 5:15 ` [PATCH v1 2/2] net: selftest: add test for netdev netlink queue-get API David Wei
1 sibling, 1 reply; 6+ messages in thread
From: David Wei @ 2024-04-16 5:15 UTC (permalink / raw
To: netdev; +Cc: Jakub Kicinski, David S. Miller, Eric Dumazet, Paolo Abeni
Add NAPI support to netdevim, similar to veth.
* Add a nsim_rq rx queue structure to hold a NAPI instance and a skb
queue.
* During xmit, store the skb in the peer skb queue and schedule NAPI.
* During napi_poll(), drain the skb queue and pass up the stack.
* Add assoc between rxq and NAPI instance using netif_queue_set_napi().
Signed-off-by: David Wei <dw@davidwei.uk>
---
drivers/net/netdevsim/netdev.c | 227 ++++++++++++++++++++++++++++--
drivers/net/netdevsim/netdevsim.h | 7 +
2 files changed, 223 insertions(+), 11 deletions(-)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index d127856f8f36..c1a99825be91 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -28,11 +28,33 @@
#include "netdevsim.h"
+#define NSIM_RING_SIZE 256
+
+static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
+{
+ if (list_count_nodes(&rq->skb_queue) > NSIM_RING_SIZE) {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ list_add_tail(&skb->list, &rq->skb_queue);
+ return NET_RX_SUCCESS;
+}
+
+static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
+ struct nsim_rq *rq)
+{
+ return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
+}
+
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
+ struct nsim_rq *rq;
+ int rxq;
rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
@@ -42,10 +64,18 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!peer_ns)
goto out_drop_free;
+ peer_dev = peer_ns->netdev;
+ rxq = skb_get_queue_mapping(skb);
+ if (rxq >= peer_dev->num_rx_queues)
+ rxq = rxq % peer_dev->num_rx_queues;
+ rq = &peer_ns->rq[rxq];
+
skb_tx_timestamp(skb);
- if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
+ napi_schedule(&rq->napi);
+
rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
@@ -300,25 +330,153 @@ static int nsim_get_iflink(const struct net_device *dev)
return iflink;
}
+static int nsim_rcv(struct nsim_rq *rq, int budget)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < budget; i++) {
+ if (list_empty(&rq->skb_queue))
+ break;
+
+ skb = list_first_entry(&rq->skb_queue, struct sk_buff, list);
+ list_del(&skb->list);
+
+ netif_receive_skb(skb);
+ }
+
+ return i;
+}
+
+static int nsim_poll(struct napi_struct *napi, int budget)
+{
+ struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
+ int done;
+
+ done = nsim_rcv(rq, budget);
+
+ if (done < budget && napi_complete_done(napi, done)) {
+ if (unlikely(!list_empty(&rq->skb_queue)))
+ napi_schedule(&rq->napi);
+ }
+
+ return done;
+}
+
+static int nsim_create_page_pool(struct nsim_rq *rq)
+{
+ struct page_pool_params p = {
+ .order = 0,
+ .pool_size = NSIM_RING_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = &rq->napi.dev->dev,
+ .napi = &rq->napi,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .netdev = rq->napi.dev,
+ };
+
+ rq->page_pool = page_pool_create(&p);
+ if (IS_ERR(rq->page_pool)) {
+ int err = PTR_ERR(rq->page_pool);
+
+ rq->page_pool = NULL;
+ return err;
+ }
+ return 0;
+}
+
+static int nsim_init_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ struct nsim_rq *rq;
+ int err, i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ rq = &ns->rq[i];
+
+ netif_napi_add(dev, &rq->napi, nsim_poll);
+ }
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ rq = &ns->rq[i];
+
+ err = nsim_create_page_pool(rq);
+ if (err)
+ goto err_pp_destroy;
+ }
+
+ return 0;
+
+err_pp_destroy:
+ while (i--) {
+ page_pool_destroy(ns->rq[i].page_pool);
+ ns->rq[i].page_pool = NULL;
+ }
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ __netif_napi_del(&ns->rq[i].napi);
+
+ return err;
+}
+
+static void nsim_enable_napi(struct netdevsim *ns)
+{
+ int i;
+
+ for (i = 0; i < ns->netdev->num_rx_queues; i++) {
+ struct nsim_rq *rq = &ns->rq[i];
+
+ netif_queue_set_napi(ns->netdev, i,
+ NETDEV_QUEUE_TYPE_RX, &rq->napi);
+ napi_enable(&rq->napi);
+ }
+}
+
static int nsim_open(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
- struct page_pool_params pp = { 0 };
+ int err;
+
+ err = nsim_init_napi(ns);
+ if (err)
+ return err;
+
+ nsim_enable_napi(ns);
- pp.pool_size = 128;
- pp.dev = &dev->dev;
- pp.dma_dir = DMA_BIDIRECTIONAL;
- pp.netdev = dev;
+ netif_carrier_on(dev);
- ns->pp = page_pool_create(&pp);
- return PTR_ERR_OR_ZERO(ns->pp);
+ return 0;
+}
+
+static void nsim_del_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct nsim_rq *rq = &ns->rq[i];
+
+ napi_disable(&rq->napi);
+ __netif_napi_del(&rq->napi);
+ }
+ synchronize_net();
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ page_pool_destroy(ns->rq[i].page_pool);
+ ns->rq[i].page_pool = NULL;
+ }
}
static int nsim_stop(struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer = rtnl_dereference(ns->peer);
+
+ netif_carrier_off(dev);
+ if (peer)
+ netif_carrier_off(peer->netdev);
- page_pool_destroy(ns->pp);
+ nsim_del_napi(ns);
return 0;
}
@@ -437,7 +595,7 @@ nsim_pp_hold_write(struct file *file, const char __user *data,
if (!netif_running(ns->netdev) && val) {
ret = -ENETDOWN;
} else if (val) {
- ns->page = page_pool_dev_alloc_pages(ns->pp);
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
if (!ns->page)
ret = -ENOMEM;
} else {
@@ -477,6 +635,46 @@ static void nsim_setup(struct net_device *dev)
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
+static int nsim_queue_init(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ if (!ns->rq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ INIT_LIST_HEAD(&ns->rq[i].skb_queue);
+
+ return 0;
+}
+
+static void __nsim_skb_queue_purge(struct list_head *head)
+{
+ struct sk_buff *skb, *tmp;
+
+ list_for_each_entry_safe(skb, tmp, head, list) {
+ list_del(&skb->list);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void nsim_queue_free(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ if (!list_empty(&ns->rq[i].skb_queue))
+ __nsim_skb_queue_purge(&ns->rq[i].skb_queue);
+ }
+
+ kvfree(ns->rq);
+ ns->rq = NULL;
+}
+
static int nsim_init_netdevsim(struct netdevsim *ns)
{
struct mock_phc *phc;
@@ -495,10 +693,14 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
goto err_phc_destroy;
rtnl_lock();
- err = nsim_bpf_init(ns);
+ err = nsim_queue_init(ns);
if (err)
goto err_utn_destroy;
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_rq_destroy;
+
nsim_macsec_init(ns);
nsim_ipsec_init(ns);
@@ -512,6 +714,8 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
+err_rq_destroy:
+ nsim_queue_free(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -594,6 +798,7 @@ void nsim_destroy(struct netdevsim *ns)
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
}
+ nsim_queue_free(ns);
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
nsim_exit_netdevsim(ns);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 7664ab823e29..87bf45ec4dd2 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -90,11 +90,18 @@ struct nsim_ethtool {
struct ethtool_fecparam fec;
};
+struct nsim_rq {
+ struct napi_struct napi;
+ struct list_head skb_queue;
+ struct page_pool *page_pool;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
+ struct nsim_rq *rq;
u64 tx_packets;
u64 tx_bytes;
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH v1 2/2] net: selftest: add test for netdev netlink queue-get API
2024-04-16 5:15 [PATCH v1 0/2] netdevsim: add NAPI support David Wei
2024-04-16 5:15 ` [PATCH v1 1/2] " David Wei
@ 2024-04-16 5:15 ` David Wei
2024-04-17 0:31 ` Jakub Kicinski
1 sibling, 1 reply; 6+ messages in thread
From: David Wei @ 2024-04-16 5:15 UTC (permalink / raw
To: netdev; +Cc: Jakub Kicinski, David S. Miller, Eric Dumazet, Paolo Abeni
Add a selftest for Netdev Netlink queue-get API. The ground truth is
determined by sysfs.
The test works with netdevsim by default or with a real device by
setting NETIF.
Minor changes to NetdevSimDev to accept the number of queues to create
per port and pass through NetdevSimDev args in NetDrvEnv ctor.
Signed-off-by: David Wei <dw@davidwei.uk>
---
tools/testing/selftests/drivers/net/Makefile | 1 +
.../selftests/drivers/net/lib/py/env.py | 10 ++-
tools/testing/selftests/drivers/net/queues.py | 67 +++++++++++++++++++
tools/testing/selftests/net/lib/py/nsim.py | 4 +-
4 files changed, 78 insertions(+), 4 deletions(-)
create mode 100755 tools/testing/selftests/drivers/net/queues.py
diff --git a/tools/testing/selftests/drivers/net/Makefile b/tools/testing/selftests/drivers/net/Makefile
index 379cdb1960a7..118a73650dbc 100644
--- a/tools/testing/selftests/drivers/net/Makefile
+++ b/tools/testing/selftests/drivers/net/Makefile
@@ -3,5 +3,6 @@
TEST_INCLUDES := $(wildcard lib/py/*.py)
TEST_PROGS := stats.py
+TEST_PROGS += queues.py
include ../../lib.mk
diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py
index e1abe9491daf..7187aca5c516 100644
--- a/tools/testing/selftests/drivers/net/lib/py/env.py
+++ b/tools/testing/selftests/drivers/net/lib/py/env.py
@@ -7,7 +7,7 @@ from lib.py import ip
from lib.py import NetdevSimDev
class NetDrvEnv:
- def __init__(self, src_path):
+ def __init__(self, src_path, **kwargs):
self._ns = None
self.env = os.environ.copy()
@@ -16,7 +16,7 @@ class NetDrvEnv:
if 'NETIF' in self.env:
self.dev = ip("link show dev " + self.env['NETIF'], json=True)[0]
else:
- self._ns = NetdevSimDev()
+ self._ns = NetdevSimDev(**kwargs)
self.dev = self._ns.nsims[0].dev
self.ifindex = self.dev['ifindex']
@@ -50,3 +50,9 @@ class NetDrvEnv:
else:
self.env[k] = token
k = None
+
+ def dev_up(self):
+ ip(f"link set dev {self.dev['ifname']} up")
+
+ def dev_down(self):
+ ip(f"link set dev {self.dev['ifname']} down")
diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
new file mode 100755
index 000000000000..de2820f2759a
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/queues.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+
+from lib.py import ksft_run, ksft_in, ksft_true, ksft_eq, KsftSkipEx, KsftXfailEx
+from lib.py import NetdevFamily, NlError
+from lib.py import NetDrvEnv
+from lib.py import cmd
+import glob
+
+netnl = NetdevFamily()
+
+
+def sys_get_queues(ifname) -> int:
+ folders = glob.glob(f'/sys/class/net/{ifname}/queues/rx-*')
+ return len(folders)
+
+
+def nl_get_queues(cfg):
+ global netnl
+ queues = netnl.queue_get({'ifindex': cfg.ifindex}, dump=True)
+ if queues:
+ return len([q for q in queues if q['type'] == 'rx'])
+ return None
+
+
+def get_queues(cfg) -> None:
+ global netnl
+
+ queues = nl_get_queues(cfg)
+ if not queues:
+ raise KsftSkipEx("queue-get not supported by device")
+
+ expected = sys_get_queues(cfg.dev['ifname'])
+ ksft_eq(queues, expected)
+
+
+def addremove_queues(cfg) -> None:
+ global netnl
+
+ queues = nl_get_queues(cfg)
+ if not queues:
+ raise KsftSkipEx("queue-get not supported by device")
+
+ expected = sys_get_queues(cfg.dev['ifname'])
+ ksft_eq(queues, expected)
+
+ # reduce queue count by 1
+ expected = expected - 1
+ cmd(f"ethtool -L {cfg.dev['ifname']} combined {expected}")
+ queues = nl_get_queues(cfg)
+ ksft_eq(queues, expected)
+
+ # increase queue count by 1
+ expected = expected + 1
+ cmd(f"ethtool -L {cfg.dev['ifname']} combined {expected}")
+ queues = nl_get_queues(cfg)
+ ksft_eq(queues, expected)
+
+
+def main() -> None:
+ with NetDrvEnv(__file__, queue_count=3) as cfg:
+ cfg.dev_up()
+ ksft_run([get_queues, addremove_queues], args=(cfg, ))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/testing/selftests/net/lib/py/nsim.py b/tools/testing/selftests/net/lib/py/nsim.py
index 06896cdf7c18..f571a8b3139b 100644
--- a/tools/testing/selftests/net/lib/py/nsim.py
+++ b/tools/testing/selftests/net/lib/py/nsim.py
@@ -49,7 +49,7 @@ class NetdevSimDev:
with open(fullpath, "w") as f:
f.write(val)
- def __init__(self, port_count=1, ns=None):
+ def __init__(self, port_count=1, queue_count=1, ns=None):
# nsim will spawn in init_net, we'll set to actual ns once we switch it there
self.ns = None
@@ -59,7 +59,7 @@ class NetdevSimDev:
addr = random.randrange(1 << 15)
while True:
try:
- self.ctrl_write("new_device", "%u %u" % (addr, port_count))
+ self.ctrl_write("new_device", "%u %u %u" % (addr, port_count, queue_count))
except OSError as e:
if e.errno == errno.ENOSPC:
addr = random.randrange(1 << 15)
--
2.43.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH v1 1/2] netdevsim: add NAPI support
2024-04-16 5:15 ` [PATCH v1 1/2] " David Wei
@ 2024-04-17 0:27 ` Jakub Kicinski
2024-04-19 16:08 ` David Wei
0 siblings, 1 reply; 6+ messages in thread
From: Jakub Kicinski @ 2024-04-17 0:27 UTC (permalink / raw
To: David Wei; +Cc: netdev, David S. Miller, Eric Dumazet, Paolo Abeni
On Mon, 15 Apr 2024 22:15:26 -0700 David Wei wrote:
> Add NAPI support to netdevim, similar to veth.
>
> * Add a nsim_rq rx queue structure to hold a NAPI instance and a skb
> queue.
> * During xmit, store the skb in the peer skb queue and schedule NAPI.
> * During napi_poll(), drain the skb queue and pass up the stack.
> * Add assoc between rxq and NAPI instance using netif_queue_set_napi().
> +#define NSIM_RING_SIZE 256
> +
> +static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
> +{
> + if (list_count_nodes(&rq->skb_queue) > NSIM_RING_SIZE) {
> + dev_kfree_skb_any(skb);
> + return NET_RX_DROP;
> + }
> +
> + list_add_tail(&skb->list, &rq->skb_queue);
Why not use struct sk_buff_head ?
It has a purge helper for freeing, and remembers its own length.
> +static int nsim_poll(struct napi_struct *napi, int budget)
> +{
> + struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
> + int done;
> +
> + done = nsim_rcv(rq, budget);
> +
> + if (done < budget && napi_complete_done(napi, done)) {
> + if (unlikely(!list_empty(&rq->skb_queue)))
> + napi_schedule(&rq->napi);
I think you can drop the re-check, NAPI has a built in protection
for this kind of race.
> + }
> +
> + return done;
> +}
> static int nsim_open(struct net_device *dev)
> {
> struct netdevsim *ns = netdev_priv(dev);
> - struct page_pool_params pp = { 0 };
> + int err;
> +
> + err = nsim_init_napi(ns);
> + if (err)
> + return err;
> +
> + nsim_enable_napi(ns);
>
> - pp.pool_size = 128;
> - pp.dev = &dev->dev;
> - pp.dma_dir = DMA_BIDIRECTIONAL;
> - pp.netdev = dev;
> + netif_carrier_on(dev);
Why the carrier? It's on by default.
Should be a separate patch if needed.
> - ns->pp = page_pool_create(&pp);
> - return PTR_ERR_OR_ZERO(ns->pp);
> + return 0;
> +}
> diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
> index 7664ab823e29..87bf45ec4dd2 100644
> --- a/drivers/net/netdevsim/netdevsim.h
> +++ b/drivers/net/netdevsim/netdevsim.h
> @@ -90,11 +90,18 @@ struct nsim_ethtool {
> struct ethtool_fecparam fec;
> };
>
> +struct nsim_rq {
> + struct napi_struct napi;
> + struct list_head skb_queue;
> + struct page_pool *page_pool;
You added the new page_pool pointer but didn't delete the one
I added recently to the device?
> +};
> +
> struct netdevsim {
> struct net_device *netdev;
> struct nsim_dev *nsim_dev;
> struct nsim_dev_port *nsim_dev_port;
> struct mock_phc *phc;
> + struct nsim_rq *rq;
>
> u64 tx_packets;
> u64 tx_bytes;
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v1 2/2] net: selftest: add test for netdev netlink queue-get API
2024-04-16 5:15 ` [PATCH v1 2/2] net: selftest: add test for netdev netlink queue-get API David Wei
@ 2024-04-17 0:31 ` Jakub Kicinski
0 siblings, 0 replies; 6+ messages in thread
From: Jakub Kicinski @ 2024-04-17 0:31 UTC (permalink / raw
To: David Wei; +Cc: netdev, David S. Miller, Eric Dumazet, Paolo Abeni
On Mon, 15 Apr 2024 22:15:27 -0700 David Wei wrote:
> Add a selftest for Netdev Netlink queue-get API. The ground truth is
> determined by sysfs.
>
> The test works with netdevsim by default or with a real device by
> setting NETIF.
Nice!
> + def dev_up(self):
> + ip(f"link set dev {self.dev['ifname']} up")
> +
> + def dev_down(self):
> + ip(f"link set dev {self.dev['ifname']} down")
Let's ifup the device as part of env setup instead?
> diff --git a/tools/testing/selftests/drivers/net/queues.py b/tools/testing/selftests/drivers/net/queues.py
> new file mode 100755
> index 000000000000..de2820f2759a
> --- /dev/null
> +++ b/tools/testing/selftests/drivers/net/queues.py
> @@ -0,0 +1,67 @@
> +#!/usr/bin/env python3
> +# SPDX-License-Identifier: GPL-2.0
> +
> +from lib.py import ksft_run, ksft_in, ksft_true, ksft_eq, KsftSkipEx, KsftXfailEx
> +from lib.py import NetdevFamily, NlError
> +from lib.py import NetDrvEnv
> +from lib.py import cmd
> +import glob
> +
> +netnl = NetdevFamily()
Would it be cleaner to pass this as an argument to the tests instead of
using a global?
--
pw-bot: cr
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH v1 1/2] netdevsim: add NAPI support
2024-04-17 0:27 ` Jakub Kicinski
@ 2024-04-19 16:08 ` David Wei
0 siblings, 0 replies; 6+ messages in thread
From: David Wei @ 2024-04-19 16:08 UTC (permalink / raw
To: Jakub Kicinski; +Cc: netdev, David S. Miller, Eric Dumazet, Paolo Abeni
On 2024-04-16 5:27 pm, Jakub Kicinski wrote:
> On Mon, 15 Apr 2024 22:15:26 -0700 David Wei wrote:
>> Add NAPI support to netdevim, similar to veth.
>>
>> * Add a nsim_rq rx queue structure to hold a NAPI instance and a skb
>> queue.
>> * During xmit, store the skb in the peer skb queue and schedule NAPI.
>> * During napi_poll(), drain the skb queue and pass up the stack.
>> * Add assoc between rxq and NAPI instance using netif_queue_set_napi().
>
>> +#define NSIM_RING_SIZE 256
>> +
>> +static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
>> +{
>> + if (list_count_nodes(&rq->skb_queue) > NSIM_RING_SIZE) {
>> + dev_kfree_skb_any(skb);
>> + return NET_RX_DROP;
>> + }
>> +
>> + list_add_tail(&skb->list, &rq->skb_queue);
>
> Why not use struct sk_buff_head ?
> It has a purge helper for freeing, and remembers its own length.
Didn't know about sk_buff_head! Thanks, it's much better.
>
>> +static int nsim_poll(struct napi_struct *napi, int budget)
>> +{
>> + struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
>> + int done;
>> +
>> + done = nsim_rcv(rq, budget);
>> +
>> + if (done < budget && napi_complete_done(napi, done)) {
>> + if (unlikely(!list_empty(&rq->skb_queue)))
>> + napi_schedule(&rq->napi);
>
> I think you can drop the re-check, NAPI has a built in protection
> for this kind of race.
Would veth also want this dropped, or does it serve a different purpose
there?
>
>> + }
>> +
>> + return done;
>> +}
>
>> static int nsim_open(struct net_device *dev)
>> {
>> struct netdevsim *ns = netdev_priv(dev);
>> - struct page_pool_params pp = { 0 };
>> + int err;
>> +
>> + err = nsim_init_napi(ns);
>> + if (err)
>> + return err;
>> +
>> + nsim_enable_napi(ns);
>>
>> - pp.pool_size = 128;
>> - pp.dev = &dev->dev;
>> - pp.dma_dir = DMA_BIDIRECTIONAL;
>> - pp.netdev = dev;
>> + netif_carrier_on(dev);
>
> Why the carrier? It's on by default.
> Should be a separate patch if needed.
Symmetry, not sure if it was needed or not. Will remove.
>
>> - ns->pp = page_pool_create(&pp);
>> - return PTR_ERR_OR_ZERO(ns->pp);
>> + return 0;
>> +}
>
>> diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
>> index 7664ab823e29..87bf45ec4dd2 100644
>> --- a/drivers/net/netdevsim/netdevsim.h
>> +++ b/drivers/net/netdevsim/netdevsim.h
>> @@ -90,11 +90,18 @@ struct nsim_ethtool {
>> struct ethtool_fecparam fec;
>> };
>>
>> +struct nsim_rq {
>> + struct napi_struct napi;
>> + struct list_head skb_queue;
>> + struct page_pool *page_pool;
>
> You added the new page_pool pointer but didn't delete the one
> I added recently to the device?
Yeah, sorry that slipped through the rebase.
>
>> +};
>> +
>> struct netdevsim {
>> struct net_device *netdev;
>> struct nsim_dev *nsim_dev;
>> struct nsim_dev_port *nsim_dev_port;
>> struct mock_phc *phc;
>> + struct nsim_rq *rq;
>>
>> u64 tx_packets;
>> u64 tx_bytes;
>
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2024-04-19 16:08 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-04-16 5:15 [PATCH v1 0/2] netdevsim: add NAPI support David Wei
2024-04-16 5:15 ` [PATCH v1 1/2] " David Wei
2024-04-17 0:27 ` Jakub Kicinski
2024-04-19 16:08 ` David Wei
2024-04-16 5:15 ` [PATCH v1 2/2] net: selftest: add test for netdev netlink queue-get API David Wei
2024-04-17 0:31 ` Jakub Kicinski
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).