Netdev Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Harshitha Ramamurthy <harshitha.ramamurthy@intel.com>
To: bpf@vger.kernel.org, netdev@vger.kernel.org, ast@kernel.org,
	daniel@iogearbox.net, davem@davemloft.net, kuba@kernel.org
Cc: dsahern@gmail.com, alexander.h.duyck@intel.com,
	tom.herbert@intel.com,
	Harshitha Ramamurthy <harshitha.ramamurthy@intel.com>
Subject: [PATCH bpf-next] bpf: add bpf_get_xdp_hash helper function
Date: Mon, 31 Aug 2020 15:25:06 -0400	[thread overview]
Message-ID: <20200831192506.28896-1-harshitha.ramamurthy@intel.com> (raw)

This patch adds a helper function called bpf_get_xdp_hash to calculate
the hash for a packet at the XDP layer. In the helper function, we call
the kernel flow dissector in non-skb mode by passing the net pointer
to calculate the hash.

Changes since RFC:
- accounted for vlans(David Ahern)
- return the correct hash by not using skb_get_hash(David Ahern)
- call __skb_flow_dissect in non-skb mode

Signed-off-by: Harshitha Ramamurthy <harshitha.ramamurthy@intel.com>
---
 include/uapi/linux/bpf.h       |  9 +++++++++
 net/core/filter.c              | 29 +++++++++++++++++++++++++++++
 tools/include/uapi/linux/bpf.h |  9 +++++++++
 3 files changed, 47 insertions(+)

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index a613750d5515..bffe93b526e7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3576,6 +3576,14 @@ union bpf_attr {
  * 		the data in *dst*. This is a wrapper of copy_from_user().
  * 	Return
  * 		0 on success, or a negative error in case of failure.
+ *
+ * u32 bpf_get_xdp_hash(struct xdp_buff *xdp_md)
+ *	Description
+ *		Return the hash for the xdp context passed. This function
+ *		calls skb_flow_dissect in non-skb mode to calculate the
+ *		hash for the packet.
+ *	Return
+ *		The 32-bit hash.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3727,6 +3735,7 @@ union bpf_attr {
 	FN(inode_storage_delete),	\
 	FN(d_path),			\
 	FN(copy_from_user),		\
+	FN(get_xdp_hash),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/net/core/filter.c b/net/core/filter.c
index 47eef9a0be6a..cfb5a6aea6c3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3765,6 +3765,33 @@ static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
 	.arg3_type      = ARG_ANYTHING,
 };
 
+BPF_CALL_1(bpf_get_xdp_hash, struct xdp_buff *, xdp)
+{
+	void *data_end = xdp->data_end;
+	struct ethhdr *eth = xdp->data;
+	void *data = xdp->data;
+	struct flow_keys keys;
+	u32 ret = 0;
+	int len;
+
+	len = data_end - data;
+	if (len <= 0)
+		return ret;
+	memset(&keys, 0, sizeof(keys));
+	__skb_flow_dissect(dev_net(xdp->rxq->dev), NULL, &flow_keys_dissector,
+			   &keys, data, eth->h_proto, sizeof(*eth), len,
+			   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+	ret = flow_hash_from_keys(&keys);
+	return ret;
+}
+
+static const struct bpf_func_proto bpf_get_xdp_hash_proto = {
+	.func		= bpf_get_xdp_hash,
+	.gpl_only	= false,
+	.ret_type	= RET_INTEGER,
+	.arg1_type	= ARG_PTR_TO_CTX,
+};
+
 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
 				  unsigned long off, unsigned long len)
 {
@@ -6837,6 +6864,8 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 		return &bpf_xdp_adjust_tail_proto;
 	case BPF_FUNC_fib_lookup:
 		return &bpf_xdp_fib_lookup_proto;
+	case BPF_FUNC_get_xdp_hash:
+		return &bpf_get_xdp_hash_proto;
 #ifdef CONFIG_INET
 	case BPF_FUNC_sk_lookup_udp:
 		return &bpf_xdp_sk_lookup_udp_proto;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index a613750d5515..bffe93b526e7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3576,6 +3576,14 @@ union bpf_attr {
  * 		the data in *dst*. This is a wrapper of copy_from_user().
  * 	Return
  * 		0 on success, or a negative error in case of failure.
+ *
+ * u32 bpf_get_xdp_hash(struct xdp_buff *xdp_md)
+ *	Description
+ *		Return the hash for the xdp context passed. This function
+ *		calls skb_flow_dissect in non-skb mode to calculate the
+ *		hash for the packet.
+ *	Return
+ *		The 32-bit hash.
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -3727,6 +3735,7 @@ union bpf_attr {
 	FN(inode_storage_delete),	\
 	FN(d_path),			\
 	FN(copy_from_user),		\
+	FN(get_xdp_hash),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
-- 
2.20.1


             reply	other threads:[~2020-08-31 19:28 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-31 19:25 Harshitha Ramamurthy [this message]
2020-08-31 19:54 ` David Ahern
2020-09-02  0:52   ` Ramamurthy, Harshitha
2020-09-02  2:09     ` David Ahern
2020-08-31 20:33 ` Daniel Borkmann
2020-09-02  0:54   ` Ramamurthy, Harshitha
2020-09-02 15:56   ` Tom Herbert

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200831192506.28896-1-harshitha.ramamurthy@intel.com \
    --to=harshitha.ramamurthy@intel.com \
    --cc=alexander.h.duyck@intel.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=dsahern@gmail.com \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=tom.herbert@intel.com \
    --subject='Re: [PATCH bpf-next] bpf: add bpf_get_xdp_hash helper function' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).