LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Yunsheng Lin <linyunsheng@huawei.com>
To: <davem@davemloft.net>, <kuba@kernel.org>
Cc: <alexander.duyck@gmail.com>, <linux@armlinux.org.uk>,
<mw@semihalf.com>, <linuxarm@openeuler.org>,
<yisen.zhuang@huawei.com>, <salil.mehta@huawei.com>,
<thomas.petazzoni@bootlin.com>, <hawk@kernel.org>,
<ilias.apalodimas@linaro.org>, <ast@kernel.org>,
<daniel@iogearbox.net>, <john.fastabend@gmail.com>,
<akpm@linux-foundation.org>, <peterz@infradead.org>,
<will@kernel.org>, <willy@infradead.org>, <vbabka@suse.cz>,
<fenghua.yu@intel.com>, <guro@fb.com>, <peterx@redhat.com>,
<feng.tang@intel.com>, <jgg@ziepe.ca>, <mcroce@microsoft.com>,
<hughd@google.com>, <jonathan.lemon@gmail.com>, <alobakin@pm.me>,
<willemb@google.com>, <wenxu@ucloud.cn>,
<cong.wang@bytedance.com>, <haokexin@gmail.com>,
<nogikh@google.com>, <elver@google.com>, <yhs@fb.com>,
<kpsingh@kernel.org>, <andrii@kernel.org>, <kafai@fb.com>,
<songliubraving@fb.com>, <netdev@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bpf@vger.kernel.org>
Subject: [PATCH rfc v3 1/4] page_pool: keep pp info as long as page pool owns the page
Date: Mon, 12 Jul 2021 17:19:37 +0800 [thread overview]
Message-ID: <1626081581-54524-2-git-send-email-linyunsheng@huawei.com> (raw)
In-Reply-To: <1626081581-54524-1-git-send-email-linyunsheng@huawei.com>
Currently, page->pp is cleared and set everytime the page
is recycled, which is unnecessary.
So only set the page->pp when the page is added to the page
pool and only clear it when the page is released from the
page pool.
This is also a preparation to support elevated refcnt in page
pool.
Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
---
drivers/net/ethernet/marvell/mvneta.c | 6 +-----
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c | 2 +-
drivers/net/ethernet/ti/cpsw.c | 2 +-
drivers/net/ethernet/ti/cpsw_new.c | 2 +-
include/linux/skbuff.h | 4 +---
include/net/page_pool.h | 7 -------
net/core/page_pool.c | 21 +++++++++++++++++----
7 files changed, 22 insertions(+), 22 deletions(-)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 361bc4f..89bf31fd 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
if (!skb)
return ERR_PTR(-ENOMEM);
- skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
+ skb_mark_for_recycle(skb);
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
@@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE);
- /* We don't need to reset pp_recycle here. It's already set, so
- * just mark fragments for recycling.
- */
- page_pool_store_mem_info(skb_frag_page(frag), pool);
}
return skb;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 3229baf..320eddb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
if (pp)
- skb_mark_for_recycle(skb, page, pp);
+ skb_mark_for_recycle(skb);
else
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE,
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index cbbd0f6..9d59143 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
- skb_mark_for_recycle(skb, page, pool);
+ skb_mark_for_recycle(skb);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 57d279f..a4234a3 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -374,7 +374,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb->protocol = eth_type_trans(skb, ndev);
/* mark skb for recycling */
- skb_mark_for_recycle(skb, page, pool);
+ skb_mark_for_recycle(skb);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b2db9cd..7795979 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -4711,11 +4711,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
}
#ifdef CONFIG_PAGE_POOL
-static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page,
- struct page_pool *pp)
+static inline void skb_mark_for_recycle(struct sk_buff *skb)
{
skb->pp_recycle = 1;
- page_pool_store_mem_info(page, pp);
}
#endif
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 3dd62dd..8d7744d 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -253,11 +253,4 @@ static inline void page_pool_ring_unlock(struct page_pool *pool)
spin_unlock_bh(&pool->ring.producer_lock);
}
-/* Store mem_info on struct page and use it while recycling skb frags */
-static inline
-void page_pool_store_mem_info(struct page *page, struct page_pool *pp)
-{
- page->pp = pp;
-}
-
#endif /* _NET_PAGE_POOL_H */
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 5e4eb45..78838c6 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -206,6 +206,19 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
return true;
}
+static void page_pool_set_pp_info(struct page_pool *pool,
+ struct page *page)
+{
+ page->pp = pool;
+ page->pp_magic |= PP_SIGNATURE;
+}
+
+static void page_pool_clear_pp_info(struct page *page)
+{
+ page->pp_magic = 0;
+ page->pp = NULL;
+}
+
static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
gfp_t gfp)
{
@@ -222,7 +235,7 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
return NULL;
}
- page->pp_magic |= PP_SIGNATURE;
+ page_pool_set_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -266,7 +279,8 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
put_page(page);
continue;
}
- page->pp_magic |= PP_SIGNATURE;
+
+ page_pool_set_pp_info(pool, page);
pool->alloc.cache[pool->alloc.count++] = page;
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
@@ -345,7 +359,7 @@ void page_pool_release_page(struct page_pool *pool, struct page *page)
DMA_ATTR_SKIP_CPU_SYNC);
page_pool_set_dma_addr(page, 0);
skip_dma_unmap:
- page->pp_magic = 0;
+ page_pool_clear_pp_info(page);
/* This may be the last page returned, releasing the pool, so
* it is not safe to reference pool afterwards.
@@ -644,7 +658,6 @@ bool page_pool_return_skb_page(struct page *page)
* The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not.
*/
- page->pp = NULL;
page_pool_put_full_page(pp, page, false);
return true;
--
2.7.4
next prev parent reply other threads:[~2021-07-12 9:20 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-07-12 9:19 [PATCH rfc v3 0/4] add frag page support in page pool Yunsheng Lin
2021-07-12 9:19 ` Yunsheng Lin [this message]
2021-07-12 9:19 ` [PATCH rfc v3 2/4] page_pool: add interface for getting and setting pagecnt_bias Yunsheng Lin
2021-07-12 9:19 ` [PATCH rfc v3 3/4] page_pool: add frag page recycling support in page pool Yunsheng Lin
2021-07-12 9:19 ` [PATCH rfc v3 3/4] page_pool: add page recycling support based on elevated refcnt Yunsheng Lin
2021-07-12 9:28 ` [Linuxarm] " Yunsheng Lin
2021-07-12 9:19 ` [PATCH rfc v3 4/4] net: hns3: support skb's frag page recycling based on page pool Yunsheng Lin
2021-07-12 10:39 ` [Linuxarm] [PATCH rfc v3 0/4] add frag page support in " Yunsheng Lin
2021-07-12 12:16 Yunsheng Lin
2021-07-12 12:16 ` [PATCH rfc v3 1/4] page_pool: keep pp info as long as page pool owns the page Yunsheng Lin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1626081581-54524-2-git-send-email-linyunsheng@huawei.com \
--to=linyunsheng@huawei.com \
--cc=akpm@linux-foundation.org \
--cc=alexander.duyck@gmail.com \
--cc=alobakin@pm.me \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=cong.wang@bytedance.com \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=elver@google.com \
--cc=feng.tang@intel.com \
--cc=fenghua.yu@intel.com \
--cc=guro@fb.com \
--cc=haokexin@gmail.com \
--cc=hawk@kernel.org \
--cc=hughd@google.com \
--cc=ilias.apalodimas@linaro.org \
--cc=jgg@ziepe.ca \
--cc=john.fastabend@gmail.com \
--cc=jonathan.lemon@gmail.com \
--cc=kafai@fb.com \
--cc=kpsingh@kernel.org \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux@armlinux.org.uk \
--cc=linuxarm@openeuler.org \
--cc=mcroce@microsoft.com \
--cc=mw@semihalf.com \
--cc=netdev@vger.kernel.org \
--cc=nogikh@google.com \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=salil.mehta@huawei.com \
--cc=songliubraving@fb.com \
--cc=thomas.petazzoni@bootlin.com \
--cc=vbabka@suse.cz \
--cc=wenxu@ucloud.cn \
--cc=will@kernel.org \
--cc=willemb@google.com \
--cc=willy@infradead.org \
--cc=yhs@fb.com \
--cc=yisen.zhuang@huawei.com \
--subject='Re: [PATCH rfc v3 1/4] page_pool: keep pp info as long as page pool owns the page' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).