Netdev Archive on lore.kernel.org help / color / mirror / Atom feed
From: Boris Pismenny <borisp@nvidia.com> To: <dsahern@gmail.com>, <kuba@kernel.org>, <davem@davemloft.net>, <saeedm@nvidia.com>, <hch@lst.de>, <sagi@grimberg.me>, <axboe@fb.com>, <kbusch@kernel.org>, <viro@zeniv.linux.org.uk>, <edumazet@google.com>, <smalin@marvell.com> Cc: <boris.pismenny@gmail.com>, <linux-nvme@lists.infradead.org>, <netdev@vger.kernel.org>, <benishay@nvidia.com>, <ogerlitz@nvidia.com>, <yorayz@nvidia.com> Subject: [PATCH v5 net-next 26/36] nvme-tcp: Mapping between Tx NVMEoTCP pdu and TCP sequence Date: Thu, 22 Jul 2021 14:03:15 +0300 [thread overview] Message-ID: <20210722110325.371-27-borisp@nvidia.com> (raw) In-Reply-To: <20210722110325.371-1-borisp@nvidia.com> From: Yoray Zack <yorayz@nvidia.com> This commit maintains a mapping from TCP sequence number to NVMEoTCP pdus, for DDGST tx offload using the ULP_DDP API. When send a pdu it save the req in ulp_ddp_pdu_info struct. This mapping is used: 1. When packet is retransmitted, If this packet contain NVMEoTCP DDGST, The NIC might needs all the pdu again for computing the DDGST. 2. If packet is offloaded but will not go to the offloaded netdev, Then SW will need to be able to fallback and compute the DDGST. Add founction nvme_tcp_ddgest_fallback(pdu_info): caclulate the data digest for ulp_ddp_pdu_info (requested by the netdev). Signed-off-by: Yoray Zack <yorayz@nvidia.com> --- drivers/nvme/host/tcp.c | 93 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 91 insertions(+), 2 deletions(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index b2a4316eddce..e030d1baa6bb 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -154,6 +154,55 @@ static const struct blk_mq_ops nvme_tcp_mq_ops; static const struct blk_mq_ops nvme_tcp_admin_mq_ops; static int nvme_tcp_try_send(struct nvme_tcp_queue *queue); +#ifdef CONFIG_ULP_DDP +static int nvme_tcp_map_pdu_info(struct nvme_tcp_queue *queue, + size_t sent_size, + u32 pdu_len, u32 data_len) +{ + u32 start_seq = tcp_sk(queue->sock->sk)->write_seq - sent_size; + struct nvme_tcp_request *req = queue->request; + struct request *rq = blk_mq_rq_from_pdu(req); + + return ulp_ddp_map_pdu_info(queue->sock->sk, start_seq, req->pdu, + pdu_len, data_len, rq); +} + +static void nvme_tcp_close_pdu_info(struct nvme_tcp_queue *queue) +{ + if (queue->data_digest && + test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags)) + ulp_ddp_close_pdu_info(queue->sock->sk); +} + +bool nvme_tcp_need_map(struct nvme_tcp_queue *queue) +{ + return queue->data_digest && + test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags) + && queue->sock && queue->sock->sk + && ulp_ddp_need_map(queue->sock->sk); + +} +#else + +static int nvme_tcp_map_pdu_info(struct nvme_tcp_queue *queue, + size_t sent_size, + u32 pdu_len, u32 data_len) +{ + return 0; +} + +static void nvme_tcp_close_pdu_info(struct nvme_tcp_queue *queue) +{ +} + +bool nvme_tcp_need_map(struct nvme_tcp_queue *queue) +{ + return false; +} +#endif + + + static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) { return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); @@ -285,11 +334,13 @@ static void nvme_tcp_ddp_ddgst_recalc(struct ahash_request *hash, #ifdef CONFIG_ULP_DDP +void nvme_tcp_ddp_ddgst_fallback(struct ulp_ddp_pdu_info *pdu_info); static bool nvme_tcp_resync_request(struct sock *sk, u32 seq, u32 flags); static void nvme_tcp_ddp_teardown_done(void *ddp_ctx); static const struct ulp_ddp_ulp_ops nvme_tcp_ddp_ulp_ops = { .resync_request = nvme_tcp_resync_request, .ddp_teardown_done = nvme_tcp_ddp_teardown_done, + .ddp_ddgst_fallback = nvme_tcp_ddp_ddgst_fallback, }; static int nvme_tcp_teardown_ddp(struct nvme_tcp_queue *queue, @@ -371,6 +422,12 @@ static int nvme_tcp_offload_socket(struct nvme_tcp_queue *queue) inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = &nvme_tcp_ddp_ulp_ops; if (netdev->features & NETIF_F_HW_ULP_DDP) { + if (ulp_ddp_init_tx_offload(queue->sock->sk)) { + netdev->ulp_ddp_ops->ulp_ddp_sk_del(netdev, queue->sock->sk); + dev_put(netdev); + return -ENOMEM; + } + set_bit(NVME_TCP_Q_OFF_DDP, &queue->flags); set_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags); set_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags); @@ -392,6 +449,9 @@ static void nvme_tcp_unoffload_socket(struct nvme_tcp_queue *queue) clear_bit(NVME_TCP_Q_OFF_DDGST_RX, &queue->flags); clear_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags); + if (test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags)) + ulp_ddp_release_tx_offload(queue->sock->sk); + netdev->ulp_ddp_ops->ulp_ddp_sk_del(netdev, queue->sock->sk); inet_csk(queue->sock->sk)->icsk_ulp_ddp_ops = NULL; @@ -1269,6 +1329,19 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req) nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR); } +#ifdef CONFIG_ULP_DDP +void nvme_tcp_ddp_ddgst_fallback(struct ulp_ddp_pdu_info *pdu_info) +{ + struct request *rq = pdu_info->req; + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); + struct nvme_tcp_queue *queue = req->queue; + + nvme_tcp_ddp_ddgst_recalc(queue->snd_hash, rq); + nvme_tcp_ddgst_final(queue->snd_hash, &pdu_info->ddgst); +} + +#endif + static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; @@ -1333,7 +1406,8 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) int len = sizeof(*pdu) + hdgst - req->offset; struct request *rq = blk_mq_rq_from_pdu(req); int flags = MSG_DONTWAIT; - int ret; + int ret, check; + u32 data_len; if (test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags) && queue->data_digest) flags |= MSG_DDP_CRC; @@ -1353,6 +1427,13 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) if (unlikely(ret <= 0)) return ret; + if (nvme_tcp_need_map(queue)) { + data_len = inline_data ? req->data_len : 0; + check = nvme_tcp_map_pdu_info(queue, ret, len, data_len); + if (unlikely(check)) + return check; + } + len -= ret; if (!len) { if (inline_data) { @@ -1360,6 +1441,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) if (queue->data_digest) crypto_ahash_init(queue->snd_hash); } else { + nvme_tcp_close_pdu_info(queue); nvme_tcp_done_send_req(queue); } return 1; @@ -1376,7 +1458,7 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) struct nvme_tcp_data_pdu *pdu = req->pdu; u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) - req->offset + hdgst; - int ret; + int ret, check; if (test_bit(NVME_TCP_Q_OFF_DDGST_TX, &queue->flags) && queue->data_digest) flags |= MSG_DDP_CRC; @@ -1389,6 +1471,12 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) if (unlikely(ret <= 0)) return ret; + if (nvme_tcp_need_map(queue)) { + check = nvme_tcp_map_pdu_info(queue, ret, len, req->data_len); + if (unlikely(check)) + return check; + } + len -= ret; if (!len) { req->state = NVME_TCP_SEND_DATA; @@ -1424,6 +1512,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) return ret; if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) { + nvme_tcp_close_pdu_info(queue); nvme_tcp_done_send_req(queue); return 1; } -- 2.24.1
next prev parent reply other threads:[~2021-07-22 11:06 UTC|newest] Thread overview: 62+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-07-22 11:02 [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 01/36] net: Introduce direct data placement tcp offload Boris Pismenny 2021-07-22 11:26 ` Eric Dumazet 2021-07-22 12:18 ` Boris Pismenny 2021-07-22 13:10 ` Eric Dumazet 2021-07-22 13:33 ` Boris Pismenny 2021-07-22 13:39 ` Eric Dumazet 2021-07-22 14:02 ` Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 02/36] iov_iter: DDP copy to iter/pages Boris Pismenny 2021-07-22 13:31 ` Christoph Hellwig 2021-07-22 20:23 ` Boris Pismenny 2021-07-23 5:03 ` Christoph Hellwig 2021-07-23 5:21 ` Al Viro 2021-08-04 14:13 ` Or Gerlitz 2021-08-10 13:29 ` Or Gerlitz 2021-07-22 20:55 ` Al Viro 2021-07-22 11:02 ` [PATCH v5 net-next 03/36] net: skb copy(+hash) iterators for DDP offloads Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 04/36] net/tls: expose get_netdev_for_sock Boris Pismenny 2021-07-23 6:06 ` Christoph Hellwig 2021-08-04 13:26 ` Or Gerlitz [not found] ` <20210804072918.17ba9cff@kicinski-fedora-pc1c0hjn.dhcp.thefacebook.com> 2021-08-04 15:07 ` Or Gerlitz 2021-08-10 13:25 ` Or Gerlitz 2021-07-22 11:02 ` [PATCH v5 net-next 05/36] nvme-tcp: Add DDP offload control path Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 06/36] nvme-tcp: Add DDP data-path Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 07/36] nvme-tcp: RX DDGST offload Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 08/36] nvme-tcp: Deal with netdevice DOWN events Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 09/36] net/mlx5: Header file changes for nvme-tcp offload Boris Pismenny 2021-07-22 11:02 ` [PATCH v5 net-next 10/36] net/mlx5: Add 128B CQE for NVMEoTCP offload Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 11/36] net/mlx5e: TCP flow steering for nvme-tcp Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 12/36] net/mlx5e: NVMEoTCP offload initialization Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 13/36] net/mlx5e: KLM UMR helper macros Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 14/36] net/mlx5e: NVMEoTCP use KLM UMRs Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 15/36] net/mlx5e: NVMEoTCP queue init/teardown Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 16/36] net/mlx5e: NVMEoTCP async ddp invalidation Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 17/36] net/mlx5e: NVMEoTCP ddp setup and resync Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 18/36] net/mlx5e: NVMEoTCP, data-path for DDP+DDGST offload Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 19/36] net/mlx5e: NVMEoTCP statistics Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 20/36] Documentation: add ULP DDP offload documentation Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 21/36] net: drop ULP DDP HW offload feature if no CSUM offload feature Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 22/36] net: Add ulp_ddp_pdu_info struct Boris Pismenny 2021-07-23 19:42 ` Sagi Grimberg 2021-07-22 11:03 ` [PATCH v5 net-next 23/36] net: Add to ulp_ddp support for fallback flow Boris Pismenny 2021-07-23 6:09 ` Christoph Hellwig 2021-07-22 11:03 ` [PATCH v5 net-next 24/36] net: Add MSG_DDP_CRC flag Boris Pismenny 2021-07-22 14:23 ` Eric Dumazet 2021-07-22 11:03 ` [PATCH v5 net-next 25/36] nvme-tcp: TX DDGST offload Boris Pismenny 2021-07-22 11:03 ` Boris Pismenny [this message] 2021-07-22 11:03 ` [PATCH v5 net-next 27/36] mlx5e: make preparation in TLS code for NVMEoTCP CRC Tx offload Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 28/36] mlx5: Add sq state test bit for nvmeotcp Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 29/36] mlx5: Add support to NETIF_F_HW_TCP_DDP_CRC_TX feature Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 30/36] net/mlx5e: NVMEoTCP DDGST TX offload TIS Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 31/36] net/mlx5e: NVMEoTCP DDGST Tx offload queue init/teardown Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 32/36] net/mlx5e: NVMEoTCP DDGST TX BSF and PSV Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 33/36] net/mlx5e: NVMEoTCP DDGST TX Data path Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 34/36] net/mlx5e: NVMEoTCP DDGST TX handle OOO packets Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 35/36] net/mlx5e: NVMEoTCP DDGST TX offload optimization Boris Pismenny 2021-07-22 11:03 ` [PATCH v5 net-next 36/36] net/mlx5e: NVMEoTCP DDGST TX statistics Boris Pismenny 2021-07-23 5:56 ` [PATCH v5 net-next 00/36] nvme-tcp receive and tarnsmit offloads Christoph Hellwig 2021-07-23 19:58 ` Sagi Grimberg 2021-08-04 13:51 ` Or Gerlitz 2021-08-06 19:46 ` Sagi Grimberg 2021-08-10 13:37 ` Or Gerlitz
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210722110325.371-27-borisp@nvidia.com \ --to=borisp@nvidia.com \ --cc=axboe@fb.com \ --cc=benishay@nvidia.com \ --cc=boris.pismenny@gmail.com \ --cc=davem@davemloft.net \ --cc=dsahern@gmail.com \ --cc=edumazet@google.com \ --cc=hch@lst.de \ --cc=kbusch@kernel.org \ --cc=kuba@kernel.org \ --cc=linux-nvme@lists.infradead.org \ --cc=netdev@vger.kernel.org \ --cc=ogerlitz@nvidia.com \ --cc=saeedm@nvidia.com \ --cc=sagi@grimberg.me \ --cc=smalin@marvell.com \ --cc=viro@zeniv.linux.org.uk \ --cc=yorayz@nvidia.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).