Netdev Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Boris Pismenny <borisp@nvidia.com>
To: <dsahern@gmail.com>, <kuba@kernel.org>, <davem@davemloft.net>,
	<saeedm@nvidia.com>, <hch@lst.de>, <sagi@grimberg.me>,
	<axboe@fb.com>, <kbusch@kernel.org>, <viro@zeniv.linux.org.uk>,
	<edumazet@google.com>, <smalin@marvell.com>
Cc: <boris.pismenny@gmail.com>, <linux-nvme@lists.infradead.org>,
	<netdev@vger.kernel.org>, <benishay@nvidia.com>,
	<ogerlitz@nvidia.com>, <yorayz@nvidia.com>
Subject: [PATCH v5 net-next 36/36] net/mlx5e: NVMEoTCP DDGST TX statistics
Date: Thu, 22 Jul 2021 14:03:25 +0300	[thread overview]
Message-ID: <20210722110325.371-37-borisp@nvidia.com> (raw)
In-Reply-To: <20210722110325.371-1-borisp@nvidia.com>

From: Yoray Zack <yorayz@nvidia.com>

    NVMEoTCP Tx offload statistics includes both control and data path
    statistic: counters for contex, offloaded packets/bytes,
    out-of-order packets resync operation (success/fail),
    and DUMP packets/bytes.

Signed-off-by: Yoray Zack <yorayz@nvidia.com>
---
 .../mellanox/mlx5/core/en_accel/ktls_tx.c     |  4 ++
 .../mellanox/mlx5/core/en_accel/nvmeotcp.c    | 22 ++++++++++-
 .../ethernet/mellanox/mlx5/core/en_stats.c    | 37 +++++++++++++++++++
 .../ethernet/mellanox/mlx5/core/en_stats.h    | 23 ++++++++++++
 4 files changed, 84 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 64780d0143ec..4b0d4bd88b9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -366,6 +366,10 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
 		stats->tls_dump_bytes += wi->num_bytes;
 		break;
 	case MLX5E_DUMP_WQE_NVMEOTCP:
+#ifdef CONFIG_ULP_DDP
+		stats->nvmeotcp_dump_packets++;
+		stats->nvmeotcp_dump_bytes += wi->num_bytes;
+#endif
 		break;
 	}
 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
index f3ef92167e25..34676c81d889 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/nvmeotcp.c
@@ -1388,8 +1388,10 @@ bool mlx5e_nvmeotcp_resync_cap(struct mlx5e_nvmeotcp_queue *queue,
 	if (unlikely(ret))
 		goto err_out;
 out:
+	sq->stats->nvmeotcp_resync++;
 	return true;
 err_out:
+	sq->stats->nvmeotcp_resync_fail++;
 	return false;
 }
 
@@ -1413,21 +1415,29 @@ mlx5e_nvmeotcp_handle_ooo_skb(struct mlx5e_nvmeotcp_queue *queue,
 			      u32  seq, int datalen)
 {
 	struct ulp_ddp_pdu_info *pdu_info = NULL;
+	struct mlx5e_sq_stats *stats = sq->stats;
 
+	stats->nvmeotcp_ooo++;
 	if (mlx5e_nvmeotcp_check_if_need_offload(queue, seq + datalen, seq)) {
+		stats->nvmeotcp_no_need_offload++;
 		return MLX5E_NVMEOTCP_RESYNC_SKIP;
+	}
 
 	/* ask for pdu_info that includes the tcp_seq */
 	pdu_info = ulp_ddp_get_pdu_info(skb->sk, seq);
 
-	if (!pdu_info)
+	if (!pdu_info) {
+		stats->nvmeotcp_no_pdu_info++;
 		return MLX5E_NVMEOTCP_RESYNC_SKIP;
+	}
 
 	queue->end_seq_hint = pdu_info->end_seq - 4;
 	queue->start_pdu_hint = pdu_info->start_seq;
 	/* check if this packet contain crc - if so offload else no */
 	if (mlx5e_nvmeotcp_check_if_need_offload(queue, seq + datalen, seq)) {
+		stats->nvmeotcp_no_need_offload++;
 		return MLX5E_NVMEOTCP_RESYNC_SKIP;
+	}
 
 	/*update NIC about resync - he will rebuild parse machine
 	 *send psv with small fence
@@ -1464,6 +1474,7 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
 				  struct sk_buff *skb, int *nvmeotcp_tisn)
 {
 	struct mlx5e_nvmeotcp_queue *ctx;
+	struct mlx5e_sq_stats *stats = sq->stats;
 	int datalen;
 	u32 seq;
 
@@ -1484,8 +1495,10 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
 	if (WARN_ON_ONCE(ctx->ulp_ddp_ctx.netdev != netdev))
 		goto err_out;
 
-	if (unlikely(mlx5e_nvmeotcp_test_and_clear_pending(ctx)))
+	if (unlikely(mlx5e_nvmeotcp_test_and_clear_pending(ctx))) {
 		mlx5e_nvmeotcp_tx_post_param_wqes(sq, skb->sk, ctx);
+		stats->nvmeotcp_ctx++;
+	}
 
 	seq = ntohl(tcp_hdr(skb)->seq);
 	if (unlikely(ctx->ulp_ddp_ctx.expected_seq != seq)) {
@@ -1504,6 +1517,11 @@ bool mlx5e_nvmeotcp_handle_tx_skb(struct net_device *netdev,
 
 	*nvmeotcp_tisn = ctx->tisn;
 	ctx->ulp_ddp_ctx.expected_seq = seq + datalen;
+	stats->nvmeotcp_offload_packets += skb_is_gso(skb) ?
+		skb_shinfo(skb)->gso_segs : 1;
+
+	stats->nvmeotcp_offload_bytes   += datalen;
+
 	goto good_out;
 out:
 	*nvmeotcp_tisn = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 3d95e46422e5..c51d1a76b22f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -125,6 +125,18 @@ static const struct counter_desc sw_stats_desc[] = {
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
 #endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_offload_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_offload_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_ooo) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_dump_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_dump_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_resync) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_ctx) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_resync_fail) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_no_need_offload) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nvmeotcp_no_pdu_info) },
+#endif
 
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
 	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
@@ -420,6 +432,19 @@ static void mlx5e_stats_grp_sw_update_stats_sq(struct mlx5e_sw_stats *s,
 	s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
 	s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
 #endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+	s->tx_nvmeotcp_offload_packets += sq_stats->nvmeotcp_offload_packets;
+	s->tx_nvmeotcp_offload_bytes   += sq_stats->nvmeotcp_offload_bytes;
+	s->tx_nvmeotcp_ooo             += sq_stats->nvmeotcp_ooo;
+	s->tx_nvmeotcp_dump_bytes      += sq_stats->nvmeotcp_dump_bytes;
+	s->tx_nvmeotcp_dump_packets    += sq_stats->nvmeotcp_dump_packets;
+	s->tx_nvmeotcp_resync	       += sq_stats->nvmeotcp_resync;
+	s->tx_nvmeotcp_ctx             += sq_stats->nvmeotcp_ctx;
+	s->tx_nvmeotcp_resync_fail     += sq_stats->nvmeotcp_resync_fail;
+	s->tx_nvmeotcp_no_need_offload += sq_stats->nvmeotcp_no_need_offload;
+	s->tx_nvmeotcp_no_pdu_info     += sq_stats->nvmeotcp_no_pdu_info;
+#endif
+
 	s->tx_cqes                  += sq_stats->cqes;
 }
 
@@ -1850,6 +1875,18 @@ static const struct counter_desc sq_stats_desc[] = {
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_offload_packets) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_offload_bytes) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_ooo) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_dump_packets) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_dump_bytes) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_resync) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_ctx) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_no_need_offload) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_no_pdu_info) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nvmeotcp_resync_fail) },
 #endif
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
 	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e645ee83de97..3ca48d69a2d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -205,6 +205,17 @@ struct mlx5e_sw_stats {
 	u64 rx_nvmeotcp_resync;
 	u64 rx_nvmeotcp_offload_packets;
 	u64 rx_nvmeotcp_offload_bytes;
+
+	u64 tx_nvmeotcp_offload_packets;
+	u64 tx_nvmeotcp_offload_bytes;
+	u64 tx_nvmeotcp_ooo;
+	u64 tx_nvmeotcp_resync;
+	u64 tx_nvmeotcp_dump_packets;
+	u64 tx_nvmeotcp_dump_bytes;
+	u64 tx_nvmeotcp_ctx;
+	u64 tx_nvmeotcp_no_need_offload;
+	u64 tx_nvmeotcp_no_pdu_info;
+	u64 tx_nvmeotcp_resync_fail;
 #endif
 	u64 ch_events;
 	u64 ch_poll;
@@ -405,6 +416,18 @@ struct mlx5e_sq_stats {
 	u64 tls_skip_no_sync_data;
 	u64 tls_drop_no_sync_data;
 	u64 tls_drop_bypass_req;
+#endif
+#ifdef CONFIG_MLX5_EN_NVMEOTCP
+	u64 nvmeotcp_offload_packets;
+	u64 nvmeotcp_offload_bytes;
+	u64 nvmeotcp_ooo;
+	u64 nvmeotcp_resync;
+	u64 nvmeotcp_dump_packets;
+	u64 nvmeotcp_dump_bytes;
+	u64 nvmeotcp_ctx;
+	u64 nvmeotcp_resync_fail;
+	u64 nvmeotcp_no_need_offload;
+	u64 nvmeotcp_no_pdu_info;
 #endif
 	/* less likely accessed in data path */
 	u64 csum_none;
-- 
2.24.1


  parent reply	other threads:[~2021-07-22 11:07 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-07-22 11:02 [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 01/36] net: Introduce direct data placement tcp offload Boris Pismenny
2021-07-22 11:26   ` Eric Dumazet
2021-07-22 12:18     ` Boris Pismenny
2021-07-22 13:10       ` Eric Dumazet
2021-07-22 13:33         ` Boris Pismenny
2021-07-22 13:39           ` Eric Dumazet
2021-07-22 14:02             ` Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 02/36] iov_iter: DDP copy to iter/pages Boris Pismenny
2021-07-22 13:31   ` Christoph Hellwig
2021-07-22 20:23     ` Boris Pismenny
2021-07-23  5:03       ` Christoph Hellwig
2021-07-23  5:21         ` Al Viro
2021-08-04 14:13           ` Or Gerlitz
2021-08-10 13:29             ` Or Gerlitz
2021-07-22 20:55   ` Al Viro
2021-07-22 11:02 ` [PATCH v5 net-next 03/36] net: skb copy(+hash) iterators for DDP offloads Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 04/36] net/tls: expose get_netdev_for_sock Boris Pismenny
2021-07-23  6:06   ` Christoph Hellwig
2021-08-04 13:26     ` Or Gerlitz
     [not found]       ` <20210804072918.17ba9cff@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com>
2021-08-04 15:07         ` Or Gerlitz
2021-08-10 13:25           ` Or Gerlitz
2021-07-22 11:02 ` [PATCH v5 net-next 05/36] nvme-tcp: Add DDP offload control path Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 06/36] nvme-tcp: Add DDP data-path Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 07/36] nvme-tcp: RX DDGST offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 08/36] nvme-tcp: Deal with netdevice DOWN events Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 09/36] net/mlx5: Header file changes for nvme-tcp offload Boris Pismenny
2021-07-22 11:02 ` [PATCH v5 net-next 10/36] net/mlx5: Add 128B CQE for NVMEoTCP offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 11/36] net/mlx5e: TCP flow steering for nvme-tcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 12/36] net/mlx5e: NVMEoTCP offload initialization Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 13/36] net/mlx5e: KLM UMR helper macros Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 14/36] net/mlx5e: NVMEoTCP use KLM UMRs Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 15/36] net/mlx5e: NVMEoTCP queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 16/36] net/mlx5e: NVMEoTCP async ddp invalidation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 17/36] net/mlx5e: NVMEoTCP ddp setup and resync Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 18/36] net/mlx5e: NVMEoTCP, data-path for DDP+DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 19/36] net/mlx5e: NVMEoTCP statistics Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 20/36] Documentation: add ULP DDP offload documentation Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 21/36] net: drop ULP DDP HW offload feature if no CSUM offload feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 22/36] net: Add ulp_ddp_pdu_info struct Boris Pismenny
2021-07-23 19:42   ` Sagi Grimberg
2021-07-22 11:03 ` [PATCH v5 net-next 23/36] net: Add to ulp_ddp support for fallback flow Boris Pismenny
2021-07-23  6:09   ` Christoph Hellwig
2021-07-22 11:03 ` [PATCH v5 net-next 24/36] net: Add MSG_DDP_CRC flag Boris Pismenny
2021-07-22 14:23   ` Eric Dumazet
2021-07-22 11:03 ` [PATCH v5 net-next 25/36] nvme-tcp: TX DDGST offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 26/36] nvme-tcp: Mapping between Tx NVMEoTCP pdu and TCP sequence Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 27/36] mlx5e: make preparation in TLS code for NVMEoTCP CRC Tx offload Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 28/36] mlx5: Add sq state test bit for nvmeotcp Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 29/36] mlx5: Add support to NETIF_F_HW_TCP_DDP_CRC_TX feature Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 30/36] net/mlx5e: NVMEoTCP DDGST TX offload TIS Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 31/36] net/mlx5e: NVMEoTCP DDGST Tx offload queue init/teardown Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 32/36] net/mlx5e: NVMEoTCP DDGST TX BSF and PSV Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 33/36] net/mlx5e: NVMEoTCP DDGST TX Data path Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 34/36] net/mlx5e: NVMEoTCP DDGST TX handle OOO packets Boris Pismenny
2021-07-22 11:03 ` [PATCH v5 net-next 35/36] net/mlx5e: NVMEoTCP DDGST TX offload optimization Boris Pismenny
2021-07-22 11:03 ` Boris Pismenny [this message]
2021-07-23  5:56 ` [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Christoph Hellwig
2021-07-23 19:58   ` Sagi Grimberg
2021-08-04 13:51     ` Or Gerlitz
2021-08-06 19:46       ` Sagi Grimberg
2021-08-10 13:37         ` Or Gerlitz

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210722110325.371-37-borisp@nvidia.com \
    --to=borisp@nvidia.com \
    --cc=axboe@fb.com \
    --cc=benishay@nvidia.com \
    --cc=boris.pismenny@gmail.com \
    --cc=davem@davemloft.net \
    --cc=dsahern@gmail.com \
    --cc=edumazet@google.com \
    --cc=hch@lst.de \
    --cc=kbusch@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=netdev@vger.kernel.org \
    --cc=ogerlitz@nvidia.com \
    --cc=saeedm@nvidia.com \
    --cc=sagi@grimberg.me \
    --cc=smalin@marvell.com \
    --cc=viro@zeniv.linux.org.uk \
    --cc=yorayz@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).