LKML Archive on lore.kernel.org help / color / mirror / Atom feed
From: Alexei Lozovsky <me@ilammy.net> To: Thomas Gleixner <tglx@linutronix.de> Cc: Alexey Dobriyan <adobriyan@gmail.com>, Christoph Lameter <cl@linux.com>, LKML <linux-kernel@vger.kernel.org>, linux-fsdevel@vger.kernel.org Subject: [PATCH v2 06/12] x86/irq: Use READ_ONCE for IRQ counter reads Date: Thu, 16 Sep 2021 02:58:42 +0900 [thread overview] Message-ID: <20210915175848.162260-7-me@ilammy.net> (raw) In-Reply-To: <20210915175848.162260-1-me@ilammy.net> Just like with generic IRQ counters, wrap accesses to counters from irq_cpustat_t into READ_ONCE to ensure these loads don't get torn. mce_exception_count and mce_poll_count are also updated by each CPU independently and we don't want these loads to tear as well. Signed-off-by: Alexei Lozovsky <me@ilammy.net> --- arch/x86/kernel/irq.c | 69 ++++++++++++++++++++++--------------------- 1 file changed, 35 insertions(+), 34 deletions(-) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index e28f6a5d14f1..4ff04ce22eb6 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -62,77 +62,77 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "NMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->__nmi_count)); seq_puts(p, " Non-maskable interrupts\n"); #ifdef CONFIG_X86_LOCAL_APIC seq_printf(p, "%*s: ", prec, "LOC"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->apic_timer_irqs)); seq_puts(p, " Local timer interrupts\n"); seq_printf(p, "%*s: ", prec, "SPU"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_spurious_count)); seq_puts(p, " Spurious interrupts\n"); seq_printf(p, "%*s: ", prec, "PMI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->apic_perf_irqs)); seq_puts(p, " Performance monitoring interrupts\n"); seq_printf(p, "%*s: ", prec, "IWI"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->apic_irq_work_irqs)); seq_puts(p, " IRQ work interrupts\n"); seq_printf(p, "%*s: ", prec, "RTR"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->icr_read_retry_count)); seq_puts(p, " APIC ICR read retries\n"); if (x86_platform_ipi_callback) { seq_printf(p, "%*s: ", prec, "PLT"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->x86_platform_ipis)); seq_puts(p, " Platform interrupts\n"); } #endif #ifdef CONFIG_SMP seq_printf(p, "%*s: ", prec, "RES"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_resched_count)); seq_puts(p, " Rescheduling interrupts\n"); seq_printf(p, "%*s: ", prec, "CAL"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_call_count)); seq_puts(p, " Function call interrupts\n"); seq_printf(p, "%*s: ", prec, "TLB"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_tlb_count)); seq_puts(p, " TLB shootdowns\n"); #endif #ifdef CONFIG_X86_THERMAL_VECTOR seq_printf(p, "%*s: ", prec, "TRM"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_thermal_count)); seq_puts(p, " Thermal event interrupts\n"); #endif #ifdef CONFIG_X86_MCE_THRESHOLD seq_printf(p, "%*s: ", prec, "THR"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_threshold_count)); seq_puts(p, " Threshold APIC interrupts\n"); #endif #ifdef CONFIG_X86_MCE_AMD seq_printf(p, "%*s: ", prec, "DFR"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count); + seq_printf(p, "%10u ", READ_ONCE(irq_stats(j)->irq_deferred_error_count)); seq_puts(p, " Deferred Error APIC interrupts\n"); #endif #ifdef CONFIG_X86_MCE seq_printf(p, "%*s: ", prec, "MCE"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); + seq_printf(p, "%10u ", READ_ONCE(per_cpu(mce_exception_count, j))); seq_puts(p, " Machine check exceptions\n"); seq_printf(p, "%*s: ", prec, "MCP"); for_each_online_cpu(j) - seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); + seq_printf(p, "%10u ", READ_ONCE(per_cpu(mce_poll_count, j))); seq_puts(p, " Machine check polls\n"); #endif #ifdef CONFIG_X86_HV_CALLBACK_VECTOR @@ -140,7 +140,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "HYP"); for_each_online_cpu(j) seq_printf(p, "%10u ", - irq_stats(j)->irq_hv_callback_count); + READ_ONCE(irq_stats(j)->irq_hv_callback_count)); seq_puts(p, " Hypervisor callback interrupts\n"); } #endif @@ -149,14 +149,14 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "HRE"); for_each_online_cpu(j) seq_printf(p, "%10u ", - irq_stats(j)->irq_hv_reenlightenment_count); + READ_ONCE(irq_stats(j)->irq_hv_reenlightenment_count)); seq_puts(p, " Hyper-V reenlightenment interrupts\n"); } if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) { seq_printf(p, "%*s: ", prec, "HVS"); for_each_online_cpu(j) seq_printf(p, "%10u ", - irq_stats(j)->hyperv_stimer0_count); + READ_ONCE(irq_stats(j)->hyperv_stimer0_count)); seq_puts(p, " Hyper-V stimer0 interrupts\n"); } #endif @@ -167,19 +167,20 @@ int arch_show_interrupts(struct seq_file *p, int prec) #ifdef CONFIG_HAVE_KVM seq_printf(p, "%*s: ", prec, "PIN"); for_each_online_cpu(j) - seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis); + seq_printf(p, "%10u ", + READ_ONCE(irq_stats(j)->kvm_posted_intr_ipis)); seq_puts(p, " Posted-interrupt notification event\n"); seq_printf(p, "%*s: ", prec, "NPI"); for_each_online_cpu(j) seq_printf(p, "%10u ", - irq_stats(j)->kvm_posted_intr_nested_ipis); + READ_ONCE(irq_stats(j)->kvm_posted_intr_nested_ipis)); seq_puts(p, " Nested posted-interrupt event\n"); seq_printf(p, "%*s: ", prec, "PIW"); for_each_online_cpu(j) seq_printf(p, "%10u ", - irq_stats(j)->kvm_posted_intr_wakeup_ipis); + READ_ONCE(irq_stats(j)->kvm_posted_intr_wakeup_ipis)); seq_puts(p, " Posted-interrupt wakeup event\n"); #endif return 0; @@ -190,30 +191,30 @@ int arch_show_interrupts(struct seq_file *p, int prec) */ u64 arch_irq_stat_cpu(unsigned int cpu) { - u64 sum = irq_stats(cpu)->__nmi_count; + u64 sum = READ_ONCE(irq_stats(cpu)->__nmi_count); #ifdef CONFIG_X86_LOCAL_APIC - sum += irq_stats(cpu)->apic_timer_irqs; - sum += irq_stats(cpu)->irq_spurious_count; - sum += irq_stats(cpu)->apic_perf_irqs; - sum += irq_stats(cpu)->apic_irq_work_irqs; - sum += irq_stats(cpu)->icr_read_retry_count; + sum += READ_ONCE(irq_stats(cpu)->apic_timer_irqs); + sum += READ_ONCE(irq_stats(cpu)->irq_spurious_count); + sum += READ_ONCE(irq_stats(cpu)->apic_perf_irqs); + sum += READ_ONCE(irq_stats(cpu)->apic_irq_work_irqs); + sum += READ_ONCE(irq_stats(cpu)->icr_read_retry_count); if (x86_platform_ipi_callback) - sum += irq_stats(cpu)->x86_platform_ipis; + sum += READ_ONCE(irq_stats(cpu)->x86_platform_ipis); #endif #ifdef CONFIG_SMP - sum += irq_stats(cpu)->irq_resched_count; - sum += irq_stats(cpu)->irq_call_count; + sum += READ_ONCE(irq_stats(cpu)->irq_resched_count); + sum += READ_ONCE(irq_stats(cpu)->irq_call_count); #endif #ifdef CONFIG_X86_THERMAL_VECTOR - sum += irq_stats(cpu)->irq_thermal_count; + sum += READ_ONCE(irq_stats(cpu)->irq_thermal_count); #endif #ifdef CONFIG_X86_MCE_THRESHOLD - sum += irq_stats(cpu)->irq_threshold_count; + sum += READ_ONCE(irq_stats(cpu)->irq_threshold_count); #endif #ifdef CONFIG_X86_MCE - sum += per_cpu(mce_exception_count, cpu); - sum += per_cpu(mce_poll_count, cpu); + sum += READ_ONCE(per_cpu(mce_exception_count, cpu)); + sum += READ_ONCE(per_cpu(mce_poll_count, cpu)); #endif return sum; } -- 2.25.1
next prev parent reply other threads:[~2021-09-15 17:59 UTC|newest] Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top 2021-09-10 8:53 /proc/stat interrupt counter wrap-around Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 0/7] proc/stat: Maintain monotonicity of "intr" and "softirq" Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 1/7] genirq: Use unsigned int for irqs_sum Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 2/7] powerpc/irq: arch_irq_stat_cpu() returns unsigned int Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 3/7] x86/irq: " Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 4/7] x86/irq: arch_irq_stat() " Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 5/7] proc/stat: Use unsigned int for "intr" sum Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 6/7] proc/stat: Use unsigned int for "softirq" sum Alexei Lozovsky 2021-09-11 3:48 ` [PATCH 7/7] docs: proc.rst: stat: Note the interrupt counter wrap-around Alexei Lozovsky 2021-09-11 3:59 ` Randy Dunlap 2021-09-12 9:30 ` [PATCH 0/7] proc/stat: Maintain monotonicity of "intr" and "softirq" Alexey Dobriyan 2021-09-12 12:37 ` Alexei Lozovsky 2021-09-14 14:11 ` Thomas Gleixner 2021-09-15 4:24 ` Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 00/12] " Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 01/12] genirq: Use READ_ONCE for IRQ counter reads Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 02/12] genirq: Use unsigned long for IRQ counters Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 03/12] powerpc/irq: Use READ_ONCE for IRQ counter reads Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 04/12] powerpc/irq: Use unsigned long for IRQ counters Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 05/12] powerpc/irq: Use unsigned long for IRQ counter sum Alexei Lozovsky 2021-09-15 17:58 ` Alexei Lozovsky [this message] 2021-09-15 17:58 ` [PATCH v2 07/12] x86/irq: Use unsigned long for IRQ counters Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 08/12] x86/irq: Use unsigned long for IRQ counters more Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 09/12] x86/irq: Use unsigned long for IRQ counter sum Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 10/12] proc/stat: Use unsigned long for "intr" sum Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 11/12] proc/stat: Use unsigned long for "softirq" sum Alexei Lozovsky 2021-09-15 17:58 ` [PATCH v2 12/12] docs: proc.rst: stat: Note the interrupt counter wrap-around Alexei Lozovsky
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20210915175848.162260-7-me@ilammy.net \ --to=me@ilammy.net \ --cc=adobriyan@gmail.com \ --cc=cl@linux.com \ --cc=linux-fsdevel@vger.kernel.org \ --cc=linux-kernel@vger.kernel.org \ --cc=tglx@linutronix.de \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).