LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Song Liu <songliubraving@fb.com>
To: <bpf@vger.kernel.org>, <linux-kernel@vger.kernel.org>
Cc: <acme@kernel.org>, <peterz@infradead.org>, <mingo@redhat.com>,
	<kernel-team@fb.com>, Song Liu <songliubraving@fb.com>
Subject: [PATCH bpf-next 1/3] perf: enable branch record for software events
Date: Mon, 23 Aug 2021 23:01:55 -0700	[thread overview]
Message-ID: <20210824060157.3889139-2-songliubraving@fb.com> (raw)
In-Reply-To: <20210824060157.3889139-1-songliubraving@fb.com>

The typical way to access branch record (e.g. Intel LBR) is via hardware
perf_event. For CPUs with FREEZE_LBRS_ON_PMI support, PMI could capture
reliable LBR. On the other hand, LBR could also be useful in non-PMI
scenario. For example, in kretprobe or bpf fexit program, LBR could
provide a lot of information on what happened with the function. Add API
to use branch record for software use.

Note that, when the software event triggers, it is necessary to stop the
branch record hardware asap. Therefore, static_call is used to remove some
branch instructions in this process.

Signed-off-by: Song Liu <songliubraving@fb.com>
---
 arch/x86/events/intel/core.c |  5 ++++-
 arch/x86/events/intel/lbr.c  | 12 ++++++++++++
 arch/x86/events/perf_event.h |  2 ++
 include/linux/perf_event.h   | 33 +++++++++++++++++++++++++++++++++
 kernel/events/core.c         | 28 ++++++++++++++++++++++++++++
 5 files changed, 79 insertions(+), 1 deletion(-)

diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ac6fd2dabf6a2..a29649e7241cc 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -6283,8 +6283,11 @@ __init int intel_pmu_init(void)
 			x86_pmu.lbr_nr = 0;
 	}
 
-	if (x86_pmu.lbr_nr)
+	if (x86_pmu.lbr_nr) {
 		pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
+		static_call_update(perf_snapshot_branch_stack,
+				   intel_pmu_snapshot_branch_stack);
+	}
 
 	intel_pmu_check_extra_regs(x86_pmu.extra_regs);
 
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 9e6d6eaeb4cb6..b73b444cf229d 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1862,3 +1862,15 @@ EXPORT_SYMBOL_GPL(x86_perf_get_lbr);
 struct event_constraint vlbr_constraint =
 	__EVENT_CONSTRAINT(INTEL_FIXED_VLBR_EVENT, (1ULL << INTEL_PMC_IDX_FIXED_VLBR),
 			  FIXED_EVENT_FLAGS, 1, 0, PERF_X86_EVENT_LBR_SELECT);
+
+void intel_pmu_snapshot_branch_stack(void)
+{
+	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+	intel_pmu_lbr_disable_all();
+	intel_pmu_lbr_read();
+	memcpy(this_cpu_ptr(&perf_branch_snapshot_entries), cpuc->lbr_entries,
+	       sizeof(struct perf_branch_entry) * x86_pmu.lbr_nr);
+	*this_cpu_ptr(&perf_branch_snapshot_size) = x86_pmu.lbr_nr;
+	intel_pmu_lbr_enable_all(false);
+}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index e3ac05c97b5e5..5262083f4e13b 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -1379,6 +1379,8 @@ void intel_pmu_pebs_data_source_skl(bool pmem);
 
 int intel_pmu_setup_lbr_filter(struct perf_event *event);
 
+void intel_pmu_snapshot_branch_stack(void);
+
 void intel_pt_interrupt(void);
 
 int intel_bts_interrupt(void);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fe156a8170aa3..7cd2af7c5eda6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -57,6 +57,7 @@ struct perf_guest_info_callbacks {
 #include <linux/cgroup.h>
 #include <linux/refcount.h>
 #include <linux/security.h>
+#include <linux/static_call.h>
 #include <asm/local.h>
 
 struct perf_callchain_entry {
@@ -1612,4 +1613,36 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
 extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
 #endif
 
+/*
+ * Snapshot branch stack on software events.
+ *
+ * Branch stack can be very useful in understanding software events. For
+ * example, when a long function, e.g. sys_perf_event_open, returns an
+ * errno, it is not obvious why the function failed. Branch stack could
+ * provide very helpful information in this type of scenarios.
+ *
+ * On software event, it is necessary to stop the hardware branch recorder
+ * fast. Otherwise, the hardware register/buffer will be flushed with
+ * entries af the triggering event. Therefore, static call is used to
+ * stop the hardware recorder.
+ *
+ * To use the snapshot:
+ * 1) After the event triggers, call perf_snapshot_branch_stack asap;
+ * 2) On the same cpu, access the snapshot with perf_read_branch_snapshot;
+ */
+#define MAX_BRANCH_SNAPSHOT 32
+DECLARE_PER_CPU(struct perf_branch_entry,
+		perf_branch_snapshot_entries[MAX_BRANCH_SNAPSHOT]);
+DECLARE_PER_CPU(int, perf_branch_snapshot_size);
+
+void perf_default_snapshot_branch_stack(void);
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+DECLARE_STATIC_CALL(perf_snapshot_branch_stack,
+		    perf_default_snapshot_branch_stack);
+#else
+extern void (*perf_snapshot_branch_stack)(void);
+#endif
+
+int perf_read_branch_snapshot(void *buf, size_t len);
 #endif /* _LINUX_PERF_EVENT_H */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 011cc5069b7ba..b42cc20451709 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -13437,3 +13437,31 @@ struct cgroup_subsys perf_event_cgrp_subsys = {
 	.threaded	= true,
 };
 #endif /* CONFIG_CGROUP_PERF */
+
+DEFINE_PER_CPU(struct perf_branch_entry,
+	       perf_branch_snapshot_entries[MAX_BRANCH_SNAPSHOT]);
+DEFINE_PER_CPU(int, perf_branch_snapshot_size);
+
+void perf_default_snapshot_branch_stack(void)
+{
+	*this_cpu_ptr(&perf_branch_snapshot_size) = 0;
+}
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+DEFINE_STATIC_CALL(perf_snapshot_branch_stack,
+		   perf_default_snapshot_branch_stack);
+#else
+void (*perf_snapshot_branch_stack)(void) = perf_default_snapshot_branch_stack;
+#endif
+
+int perf_read_branch_snapshot(void *buf, size_t len)
+{
+	int cnt;
+
+	memcpy(buf, *this_cpu_ptr(&perf_branch_snapshot_entries),
+	       min_t(u32, (u32)len,
+		     sizeof(struct perf_branch_entry) * MAX_BRANCH_SNAPSHOT));
+	cnt =  *this_cpu_ptr(&perf_branch_snapshot_size);
+
+	return (cnt > 0) ? cnt : -EOPNOTSUPP;
+}
-- 
2.30.2


  reply	other threads:[~2021-08-24  6:02 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-24  6:01 [PATCH bpf-next 0/3] bpf: introduce bpf_get_branch_trace Song Liu
2021-08-24  6:01 ` Song Liu [this message]
2021-08-25 12:09   ` [PATCH bpf-next 1/3] perf: enable branch record for software events Peter Zijlstra
2021-08-25 15:22     ` Song Liu
2021-08-26  7:56       ` kajoljain
2021-08-26 16:41         ` Song Liu
2021-08-24  6:01 ` [PATCH bpf-next 2/3] bpf: introduce helper bpf_get_branch_trace Song Liu
2021-08-25  1:14   ` kernel test robot
2021-08-24  6:01 ` [PATCH bpf-next 3/3] selftests/bpf: add test for bpf_get_branch_trace Song Liu
2021-08-31  1:30 [selftests/bpf] 8dff2c1958: BUG:using_smp_processor_id()in_preemptible kernel test robot
2021-08-31  4:45 ` Song Liu

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210824060157.3889139-2-songliubraving@fb.com \
    --to=songliubraving@fb.com \
    --cc=acme@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=kernel-team@fb.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --subject='Re: [PATCH bpf-next 1/3] perf: enable branch record for software events' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).