Netdev Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Paolo Abeni <pabeni@redhat.com>
To: netdev@vger.kernel.org
Cc: "David S. Miller" <davem@davemloft.net>,
Jakub Kicinski <kuba@kernel.org>, Shuah Khan <shuah@kernel.org>,
toke@redhat.com
Subject: [PATCH net-next 2/5] veth: factor out initialization helper
Date: Tue, 20 Jul 2021 10:41:49 +0200 [thread overview]
Message-ID: <7a8829c1dee8ddbe3fc40c43067ed1bafbe8d7e7.1626768072.git.pabeni@redhat.com> (raw)
In-Reply-To: <cover.1626768072.git.pabeni@redhat.com>
Extract in simpler helpers the code to enable and disable a
range of xdp/napi instance, with the common property that
"disable" helpers can't fail.
Will be used by the next patch. No functional change intended.
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
---
drivers/net/veth.c | 141 +++++++++++++++++++++++++++++----------------
1 file changed, 92 insertions(+), 49 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4b3e2617fdb5..9eb8c1034e98 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -926,12 +926,12 @@ static int veth_poll(struct napi_struct *napi, int budget)
return done;
}
-static int __veth_napi_enable(struct net_device *dev)
+static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL);
@@ -939,7 +939,7 @@ static int __veth_napi_enable(struct net_device *dev)
goto err_xdp_ring;
}
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
napi_enable(&rq->xdp_napi);
@@ -947,19 +947,25 @@ static int __veth_napi_enable(struct net_device *dev)
}
return 0;
+
err_xdp_ring:
- for (i--; i >= 0; i--)
+ for (i--; i >= start; i--)
ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
return err;
}
-static void veth_napi_del(struct net_device *dev)
+static int __veth_napi_enable(struct net_device *dev)
+{
+ return __veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
+}
+
+static void veth_napi_del_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rcu_assign_pointer(priv->rq[i].napi, NULL);
@@ -968,7 +974,7 @@ static void veth_napi_del(struct net_device *dev)
}
synchronize_net();
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
rq->rx_notify_masked = false;
@@ -976,41 +982,90 @@ static void veth_napi_del(struct net_device *dev)
}
}
+static void veth_napi_del(struct net_device *dev)
+{
+ veth_napi_del_range(dev, 0, dev->real_num_rx_queues);
+}
+
static bool veth_gro_requested(const struct net_device *dev)
{
return !!(dev->wanted_features & NETIF_F_GRO);
}
-static int veth_enable_xdp(struct net_device *dev)
+static int veth_enable_xdp_range(struct net_device *dev, int start, int end,
+ bool napi_already_on)
{
- bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- struct veth_rq *rq = &priv->rq[i];
+ for (i = start; i < end; i++) {
+ struct veth_rq *rq = &priv->rq[i];
- if (!napi_already_on)
- netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
- err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
- if (err < 0)
- goto err_rxq_reg;
+ if (!napi_already_on)
+ netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
+ err = xdp_rxq_info_reg(&rq->xdp_rxq, dev, i, rq->xdp_napi.napi_id);
+ if (err < 0)
+ goto err_rxq_reg;
- err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err < 0)
- goto err_reg_mem;
+ err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err < 0)
+ goto err_reg_mem;
- /* Save original mem info as it can be overwritten */
- rq->xdp_mem = rq->xdp_rxq.mem;
- }
+ /* Save original mem info as it can be overwritten */
+ rq->xdp_mem = rq->xdp_rxq.mem;
+ }
+ return 0;
+
+err_reg_mem:
+ xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
+err_rxq_reg:
+ for (i--; i >= start; i--) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ if (!napi_already_on)
+ netif_napi_del(&rq->xdp_napi);
+ }
+
+ return err;
+}
+
+static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
+ bool delete_napi)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+ int i;
+
+ for (i = start; i < end; i++) {
+ struct veth_rq *rq = &priv->rq[i];
+
+ rq->xdp_rxq.mem = rq->xdp_mem;
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+
+ if (delete_napi)
+ netif_napi_del(&rq->xdp_napi);
+ }
+}
+
+static int veth_enable_xdp(struct net_device *dev)
+{
+ bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
+ struct veth_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
+ err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
+ if (err)
+ return err;
if (!napi_already_on) {
err = __veth_napi_enable(dev);
- if (err)
- goto err_rxq_reg;
+ if (err) {
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, true);
+ return err;
+ }
if (!veth_gro_requested(dev)) {
/* user-space did not require GRO, but adding XDP
@@ -1028,18 +1083,6 @@ static int veth_enable_xdp(struct net_device *dev)
}
return 0;
-err_reg_mem:
- xdp_rxq_info_unreg(&priv->rq[i].xdp_rxq);
-err_rxq_reg:
- for (i--; i >= 0; i--) {
- struct veth_rq *rq = &priv->rq[i];
-
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- if (!napi_already_on)
- netif_napi_del(&rq->xdp_napi);
- }
-
- return err;
}
static void veth_disable_xdp(struct net_device *dev)
@@ -1062,28 +1105,23 @@ static void veth_disable_xdp(struct net_device *dev)
}
}
- for (i = 0; i < dev->real_num_rx_queues; i++) {
- struct veth_rq *rq = &priv->rq[i];
-
- rq->xdp_rxq.mem = rq->xdp_mem;
- xdp_rxq_info_unreg(&rq->xdp_rxq);
- }
+ veth_disable_xdp_range(dev, 0, dev->real_num_rx_queues, false);
}
-static int veth_napi_enable(struct net_device *dev)
+static int veth_napi_enable_range(struct net_device *dev, int start, int end)
{
struct veth_priv *priv = netdev_priv(dev);
int err, i;
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_add(dev, &rq->xdp_napi, veth_poll, NAPI_POLL_WEIGHT);
}
- err = __veth_napi_enable(dev);
+ err = __veth_napi_enable_range(dev, start, end);
if (err) {
- for (i = 0; i < dev->real_num_rx_queues; i++) {
+ for (i = start; i < end; i++) {
struct veth_rq *rq = &priv->rq[i];
netif_napi_del(&rq->xdp_napi);
@@ -1093,6 +1131,11 @@ static int veth_napi_enable(struct net_device *dev)
return err;
}
+static int veth_napi_enable(struct net_device *dev)
+{
+ return veth_napi_enable_range(dev, 0, dev->real_num_rx_queues);
+}
+
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
--
2.26.3
next prev parent reply other threads:[~2021-07-20 8:53 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-20 8:41 [PATCH net-next 0/5] veth: more flexible channels number configuration Paolo Abeni
2021-07-20 8:41 ` [PATCH net-next 1/5] veth: always report zero combined channels Paolo Abeni
2021-07-20 8:41 ` Paolo Abeni [this message]
2021-07-20 8:41 ` [PATCH net-next 3/5] veth: implement support for set_channel ethtool op Paolo Abeni
2021-07-20 8:41 ` [PATCH net-next 4/5] veth: create by default nr_possible_cpus queues Paolo Abeni
2021-07-20 8:41 ` [PATCH net-next 5/5] selftests: net: veth: add tests for set_channel Paolo Abeni
2021-07-20 13:30 ` [PATCH net-next 0/5] veth: more flexible channels number configuration patchwork-bot+netdevbpf
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=7a8829c1dee8ddbe3fc40c43067ed1bafbe8d7e7.1626768072.git.pabeni@redhat.com \
--to=pabeni@redhat.com \
--cc=davem@davemloft.net \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=shuah@kernel.org \
--cc=toke@redhat.com \
--subject='Re: [PATCH net-next 2/5] veth: factor out initialization helper' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).