* [RFC PATCH v3 02/15] powerpc/radix: Make kuap_check_amr() and kuap_restore_amr() generic
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 03/15] powerpc/32s: Create C version of kuap_restore() and kuap_check() Christophe Leroy
` (12 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
In preparation of porting powerpc32 to C syscall entry/exit,
rename kuap_check_amr() and kuap_restore_amr() as kuap_check()
and kuap_restore(), and move the stub for when CONFIG_PPC_KUAP is
not selected in the generic asm/kup.h
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/book3s/64/kup-radix.h | 12 ++----------
arch/powerpc/include/asm/kup.h | 2 ++
arch/powerpc/kernel/syscall_64.c | 10 +++++-----
3 files changed, 9 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 3bcef989a35d..1f2716a0dcd8 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -60,13 +60,13 @@
#include <asm/mmu.h>
#include <asm/ptrace.h>
-static inline void kuap_restore_amr(struct pt_regs *regs)
+static inline void kuap_restore(struct pt_regs *regs)
{
if (mmu_has_feature(MMU_FTR_RADIX_KUAP))
mtspr(SPRN_AMR, regs->kuap);
}
-static inline void kuap_check_amr(void)
+static inline void kuap_check(void)
{
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_RADIX_KUAP))
WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
@@ -141,14 +141,6 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
-#else /* CONFIG_PPC_KUAP */
-static inline void kuap_restore_amr(struct pt_regs *regs)
-{
-}
-
-static inline void kuap_check_amr(void)
-{
-}
#endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 92bcd1a26d73..1100c13b6d9e 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -62,6 +62,8 @@ bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}
+static inline void kuap_restore(struct pt_regs *regs) { }
+static inline void kuap_check(void) { }
#endif /* CONFIG_PPC_KUAP */
static inline void allow_read_from_user(const void __user *from, unsigned long size)
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall_64.c
index f021db893ec2..4c46f3aefaf8 100644
--- a/arch/powerpc/kernel/syscall_64.c
+++ b/arch/powerpc/kernel/syscall_64.c
@@ -2,7 +2,7 @@
#include <linux/err.h>
#include <asm/asm-prototypes.h>
-#include <asm/book3s/64/kup-radix.h>
+#include <asm/kup.h>
#include <asm/cputime.h>
#include <asm/hw_irq.h>
#include <asm/kprobes.h>
@@ -48,7 +48,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
}
#endif
- kuap_check_amr();
+ kuap_check();
/*
* This is not required for the syscall exit path, but makes the
@@ -220,7 +220,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
local_paca->tm_scratch = regs->msr;
#endif
- kuap_check_amr();
+ kuap_check();
account_cpu_user_exit();
@@ -300,7 +300,7 @@ notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned
local_paca->tm_scratch = regs->msr;
#endif
- kuap_check_amr();
+ kuap_check();
account_cpu_user_exit();
@@ -370,7 +370,7 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsign
* We don't need to restore AMR on the way back to userspace for KUAP.
* The value of AMR only matters while we're in the kernel.
*/
- kuap_restore_amr(regs);
+ kuap_restore(regs);
return ret;
}
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 03/15] powerpc/32s: Create C version of kuap_restore() and kuap_check()
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 02/15] powerpc/radix: Make kuap_check_amr() and kuap_restore_amr() generic Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 04/15] powerpc/8xx: " Christophe Leroy
` (11 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
In preparation of porting PPC32 to C syscall entry/exit,
create C version of kuap_restore() and kuap_check() on book3s/32.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/book3s/32/kup.h | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 3c0ba22dc360..c85bc5b56366 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -102,6 +102,27 @@ static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
isync(); /* Context sync required after mtsrin() */
}
+static inline void kuap_restore(struct pt_regs *regs)
+{
+ u32 kuap = current->thread.kuap;
+ u32 addr = kuap & 0xf0000000;
+ u32 end = kuap << 28;
+
+ if (unlikely(!kuap))
+ return;
+
+ current->thread.kuap = 0;
+ kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
+}
+
+static inline void kuap_check(void)
+{
+ if (!IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ return;
+
+ WARN_ON_ONCE(current->thread.kuap != 0);
+}
+
static __always_inline void allow_user_access(void __user *to, const void __user *from,
u32 size, unsigned long dir)
{
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 04/15] powerpc/8xx: Create C version of kuap_restore() and kuap_check()
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 02/15] powerpc/radix: Make kuap_check_amr() and kuap_restore_amr() generic Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 03/15] powerpc/32s: Create C version of kuap_restore() and kuap_check() Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 05/15] powerpc/irq: Add helpers to get and set regs->softe Christophe Leroy
` (10 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
In preparation of porting PPC32 to C syscall entry/exit,
create C version of kuap_restore() and kuap_check() on 8xx
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/nohash/32/kup-8xx.h | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 85ed2390fb99..1918d2e55da3 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -34,6 +34,19 @@
#include <asm/reg.h>
+static inline void kuap_restore(struct pt_regs *regs)
+{
+ mtspr(SPRN_MD_AP, regs->kuap);
+}
+
+static inline void kuap_check(void)
+{
+ if (!IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ return;
+
+ WARN_ON_ONCE((mfspr(SPRN_MD_AP) & 0xffff0000) != (MD_APG_KUAP & 0xffff0000));
+}
+
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 05/15] powerpc/irq: Add helpers to get and set regs->softe
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (2 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 04/15] powerpc/8xx: " Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-07 0:50 ` Nicholas Piggin
2020-04-06 18:16 ` [RFC PATCH v3 06/15] powerpc/irq: Add new helpers to play with MSR_EE and MSR_RI on PPC32 Christophe Leroy
` (9 subsequent siblings)
13 siblings, 1 reply; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
regs->softe doesn't exist on PPC32.
Add helpers to get and set regs->softe.
Those helpers will void on PPC32.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/hw_irq.h | 21 +++++++++++++++++++--
1 file changed, 19 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e0e71777961f..e69466867d5f 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -39,6 +39,8 @@
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
#endif
+#endif /* CONFIG_PPC64 */
+
/*
* flags for paca->irq_soft_mask
*/
@@ -47,8 +49,6 @@
#define IRQS_PMI_DISABLED 2
#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
-#endif /* CONFIG_PPC64 */
-
#ifndef __ASSEMBLY__
extern void replay_system_reset(void);
@@ -282,6 +282,15 @@ extern void irq_set_pending_from_srr1(unsigned long srr1);
extern void force_external_irq_replay(void);
+static inline unsigned long get_softe(struct pt_regs *regs)
+{
+ return regs->softe;
+}
+
+static inline void set_softe(struct pt_regs *regs, unsigned long val)
+{
+ regs->softe = val;
+}
#else /* CONFIG_PPC64 */
static inline unsigned long arch_local_save_flags(void)
@@ -350,6 +359,14 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
static inline void may_hard_irq_enable(void) { }
+static inline unsigned long get_softe(struct pt_regs *regs)
+{
+ return 0;
+}
+
+static inline void set_softe(struct pt_regs *regs, unsigned long val)
+{
+}
#endif /* CONFIG_PPC64 */
#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [RFC PATCH v3 05/15] powerpc/irq: Add helpers to get and set regs->softe
2020-04-06 18:16 ` [RFC PATCH v3 05/15] powerpc/irq: Add helpers to get and set regs->softe Christophe Leroy
@ 2020-04-07 0:50 ` Nicholas Piggin
0 siblings, 0 replies; 17+ messages in thread
From: Nicholas Piggin @ 2020-04-07 0:50 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Christophe Leroy, Michael Ellerman,
msuchanek, Paul Mackerras
Cc: linux-kernel, linuxppc-dev
Christophe Leroy's on April 7, 2020 4:16 am:
> regs->softe doesn't exist on PPC32.
>
> Add helpers to get and set regs->softe.
> Those helpers will void on PPC32.
>
> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> ---
> arch/powerpc/include/asm/hw_irq.h | 21 +++++++++++++++++++--
> 1 file changed, 19 insertions(+), 2 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
> index e0e71777961f..e69466867d5f 100644
> --- a/arch/powerpc/include/asm/hw_irq.h
> +++ b/arch/powerpc/include/asm/hw_irq.h
> @@ -39,6 +39,8 @@
> #define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
> #endif
>
> +#endif /* CONFIG_PPC64 */
> +
> /*
> * flags for paca->irq_soft_mask
> */
> @@ -47,8 +49,6 @@
> #define IRQS_PMI_DISABLED 2
> #define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
>
> -#endif /* CONFIG_PPC64 */
> -
> #ifndef __ASSEMBLY__
>
> extern void replay_system_reset(void);
> @@ -282,6 +282,15 @@ extern void irq_set_pending_from_srr1(unsigned long srr1);
>
> extern void force_external_irq_replay(void);
>
> +static inline unsigned long get_softe(struct pt_regs *regs)
> +{
> + return regs->softe;
> +}
> +
> +static inline void set_softe(struct pt_regs *regs, unsigned long val)
> +{
> + regs->softe = val;
> +}
> #else /* CONFIG_PPC64 */
>
> static inline unsigned long arch_local_save_flags(void)
> @@ -350,6 +359,14 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
>
> static inline void may_hard_irq_enable(void) { }
>
> +static inline unsigned long get_softe(struct pt_regs *regs)
> +{
> + return 0;
> +}
> +
> +static inline void set_softe(struct pt_regs *regs, unsigned long val)
> +{
> +}
If this goes into a general shared header, I would prefer if we could
do something a bit more general (at least with the name).
I think get_softe() could just be replaced with arch_irq_disabled_regs().
For set, could we call it irq_soft_mask_regs_set_state()? 32 has no soft
mask state in regs, so it's more obvious that it's a no-op. Or you could
make 32-bit version a BUG(), and then always guard it with IS_ENABLED().
Thanks,
Nick
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 06/15] powerpc/irq: Add new helpers to play with MSR_EE and MSR_RI on PPC32
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (3 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 05/15] powerpc/irq: Add helpers to get and set regs->softe Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 07/15] powerpc/irq: Add stub irq_soft_mask_return() for PPC32 Christophe Leroy
` (8 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
In preparation of porting PPC32 to C syscall entry/exit,
add PPC32 version of following helpers:
__hard_irq_enable()
__hard_irq_disable()
__hard_EE_RI_disable()
__hard_RI_enable()
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/hw_irq.h | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e69466867d5f..8c30a72262fd 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -330,6 +330,16 @@ static inline void arch_local_irq_disable(void)
mtmsr(mfmsr() & ~MSR_EE);
}
+static inline void arch_local_recovery_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_NRI);
+ else
+ mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
+}
+
static inline void arch_local_irq_enable(void)
{
if (IS_ENABLED(CONFIG_BOOKE))
@@ -352,6 +362,11 @@ static inline bool arch_irqs_disabled(void)
#define hard_irq_disable() arch_local_irq_disable()
+#define __hard_irq_enable() arch_local_irq_enable()
+#define __hard_irq_disable() arch_local_irq_disable()
+#define __hard_EE_RI_disable() arch_local_recovery_disable()
+#define __hard_RI_enable() arch_local_irq_disable()
+
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
{
return !(regs->msr & MSR_EE);
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 07/15] powerpc/irq: Add stub irq_soft_mask_return() for PPC32
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (4 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 06/15] powerpc/irq: Add new helpers to play with MSR_EE and MSR_RI on PPC32 Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 08/15] powerpc/syscall: Rename syscall_64.c into syscall.c Christophe Leroy
` (7 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
To allow building syscall_64.c smoothly on PPC32, add stub version
of irq_soft_mask_return().
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/hw_irq.h | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 8c30a72262fd..1c25a84a3159 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -293,6 +293,11 @@ static inline void set_softe(struct pt_regs *regs, unsigned long val)
}
#else /* CONFIG_PPC64 */
+static inline notrace unsigned long irq_soft_mask_return(void)
+{
+ return 0;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
return mfmsr();
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 08/15] powerpc/syscall: Rename syscall_64.c into syscall.c
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (5 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 07/15] powerpc/irq: Add stub irq_soft_mask_return() for PPC32 Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 09/15] powerpc/syscall: Make syscall_64.c buildable on PPC32 Christophe Leroy
` (6 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
syscall_64.c will be reused almost as is for PPC32.
Rename it syscall.c
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/Makefile | 2 +-
arch/powerpc/kernel/{syscall_64.c => syscall.c} | 0
2 files changed, 1 insertion(+), 1 deletion(-)
rename arch/powerpc/kernel/{syscall_64.c => syscall.c} (100%)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 570660efbb3d..8cc3c831dccd 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -49,7 +49,7 @@ obj-y := cputable.o syscalls.o \
obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o signal_64.o \
paca.o nvram_64.o firmware.o note.o \
- syscall_64.o
+ syscall.o
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/arch/powerpc/kernel/syscall_64.c b/arch/powerpc/kernel/syscall.c
similarity index 100%
rename from arch/powerpc/kernel/syscall_64.c
rename to arch/powerpc/kernel/syscall.c
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 09/15] powerpc/syscall: Make syscall_64.c buildable on PPC32
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (6 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 08/15] powerpc/syscall: Rename syscall_64.c into syscall.c Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 10/15] powerpc/syscall: Use is_compat_task() Christophe Leroy
` (5 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
ifdef out specific PPC64 stuff to allow building
syscall_64.c on PPC32.
Modify Makefile to always build syscall.o
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/Makefile | 5 ++---
arch/powerpc/kernel/syscall.c | 9 +++++----
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 8cc3c831dccd..e4be425b7718 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -45,11 +45,10 @@ obj-y := cputable.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o
+ of_platform.o prom_parse.o syscall.o
obj-y += ptrace/
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o signal_64.o \
- paca.o nvram_64.o firmware.o note.o \
- syscall.o
+ paca.o nvram_64.o firmware.o note.o
obj-$(CONFIG_VDSO32) += vdso32/
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index 4c46f3aefaf8..98c98ce12f7d 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -34,7 +34,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(!(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
- BUG_ON(regs->softe != IRQS_ENABLED);
+ BUG_ON(IS_ENABLED(CONFIG_PPC64) && get_softe(regs) != IRQS_ENABLED);
account_cpu_user_entry();
@@ -56,7 +56,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
* frame, or if the unwinder was taught the first stack frame always
* returns to user with IRQS_ENABLED, this store could be avoided!
*/
- regs->softe = IRQS_ENABLED;
+ set_softe(regs, IRQS_ENABLED);
local_irq_enable();
@@ -114,6 +114,7 @@ static notrace inline bool prep_irq_for_enabled_exit(void)
/* This pattern matches prep_irq_for_idle */
__hard_EE_RI_disable();
+#ifdef CONFIG_PPC64
if (unlikely(lazy_irq_pending())) {
/* Took an interrupt, may have more exit work to do. */
__hard_RI_enable();
@@ -124,7 +125,7 @@ static notrace inline bool prep_irq_for_enabled_exit(void)
}
local_paca->irq_happened = 0;
irq_soft_mask_set(IRQS_ENABLED);
-
+#endif
return true;
}
@@ -227,7 +228,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
return ret;
}
-#ifdef CONFIG_PPC_BOOK3S /* BOOK3E not yet using this */
+#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E not yet using this */
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
{
#ifdef CONFIG_PPC_BOOK3E
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 10/15] powerpc/syscall: Use is_compat_task()
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (7 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 09/15] powerpc/syscall: Make syscall_64.c buildable on PPC32 Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 11/15] powerpc/syscall: Save r3 in regs->orig_r3 Christophe Leroy
` (4 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
Instead of hard comparing task flags with _TIF_32BIT, use
is_compat_task(). The advantage is that it returns 0 on PPC32
allthough _TIF_32BIT is always set.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/syscall.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index 98c98ce12f7d..0ad4250d2ce8 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/err.h>
+#include <linux/compat.h>
+
#include <asm/asm-prototypes.h>
#include <asm/kup.h>
#include <asm/cputime.h>
@@ -86,7 +88,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
/* May be faster to do array_index_nospec? */
barrier_nospec();
- if (unlikely(ti_flags & _TIF_32BIT)) {
+ if (is_compat_task()) {
f = (void *)compat_sys_call_table[r0];
r3 &= 0x00000000ffffffffULL;
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 11/15] powerpc/syscall: Save r3 in regs->orig_r3
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (8 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 10/15] powerpc/syscall: Use is_compat_task() Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 12/15] powerpc/syscall: Selectively check MSR_RI and MSR_PR on syscall entry Christophe Leroy
` (3 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
Save r3 in regs->orig_r3 in system_call_exception()
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/entry_64.S | 1 -
arch/powerpc/kernel/syscall.c | 2 ++
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 63f0a4414618..5ccb65f75712 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -114,7 +114,6 @@ END_BTB_FLUSH_SECTION
std r10,_LINK(r1)
std r11,_TRAP(r1)
std r12,_CCR(r1)
- std r3,ORIG_GPR3(r1)
addi r10,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
std r11,-16(r10) /* "regshere" marker */
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index 0ad4250d2ce8..dfd7b28239b8 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -27,6 +27,8 @@ notrace long system_call_exception(long r3, long r4, long r5,
unsigned long ti_flags;
syscall_fn f;
+ regs->orig_gpr3 = r3;
+
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 12/15] powerpc/syscall: Selectively check MSR_RI and MSR_PR on syscall entry
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (9 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 11/15] powerpc/syscall: Save r3 in regs->orig_r3 Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 13/15] powerpc/syscall: system call implement entry/exit logic in C for PPC32 Christophe Leroy
` (2 subsequent siblings)
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
In system_call_exception(), MSR_RI needs to also be checked on 8xx.
Only book3e doesn't have MSR_RI.
On PPC32, MSR_PR is checked in real mode to avoid clobbering the
stack, so no need to check and panic in system_call_exception().
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/syscall.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index dfd7b28239b8..f9fca9985b0f 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -34,9 +34,9 @@ notrace long system_call_exception(long r3, long r4, long r5,
trace_hardirqs_off(); /* finish reconciling */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S))
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
BUG_ON(!(regs->msr & MSR_RI));
- BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(IS_ENABLED(CONFIG_PPC64) && !(regs->msr & MSR_PR));
BUG_ON(!FULL_REGS(regs));
BUG_ON(IS_ENABLED(CONFIG_PPC64) && get_softe(regs) != IRQS_ENABLED);
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 13/15] powerpc/syscall: system call implement entry/exit logic in C for PPC32
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (10 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 12/15] powerpc/syscall: Selectively check MSR_RI and MSR_PR on syscall entry Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 14/15] powerpc/syscall: Avoid stack frame in likely part of syscall_call_exception() Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 15/15] powerpc/kernel: Do not inconditionally save non volatile registers on system call Christophe Leroy
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
That's port of PPC64 syscall entry/exit logic in C to PPC32.
Performancewise:
Before : 311 cycles on null_syscall
After : 353 cycles on null_syscall
Note: before the patch, if calling NVGPRS all the time as well,
we have 335 cycles on null_syscall
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/entry_32.S | 259 ++++-----------------------------
arch/powerpc/kernel/head_32.h | 16 +-
2 files changed, 29 insertions(+), 246 deletions(-)
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index a6371fb8f761..103f5158bc44 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -315,162 +315,37 @@ stack_ovf:
RFI
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
-trace_syscall_entry_irq_off:
- /*
- * Syscall shouldn't happen while interrupts are disabled,
- * so let's do a warning here.
- */
-0: trap
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
- bl trace_hardirqs_on
-
- /* Now enable for real */
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
- mtmsr r10
-
- REST_GPR(0, r1)
- REST_4GPRS(3, r1)
- REST_2GPRS(7, r1)
- b DoSyscall
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
.globl transfer_to_syscall
transfer_to_syscall:
-#ifdef CONFIG_TRACE_IRQFLAGS
- andi. r12,r9,MSR_EE
- beq- trace_syscall_entry_irq_off
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
-/*
- * Handle a system call.
- */
- .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
- .stabs "entry_32.S",N_SO,0,0,0f
-0:
-
-_GLOBAL(DoSyscall)
- stw r3,ORIG_GPR3(r1)
- li r12,0
- stw r12,RESULT(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Make sure interrupts are enabled */
- mfmsr r11
- andi. r12,r11,MSR_EE
- /* We came in with interrupts disabled, we WARN and mark them enabled
- * for lockdep now */
-0: tweqi r12, 0
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
-#endif /* CONFIG_TRACE_IRQFLAGS */
- lwz r11,TI_FLAGS(r2)
- andi. r11,r11,_TIF_SYSCALL_DOTRACE
- bne- syscall_dotrace
-syscall_dotrace_cont:
- cmplwi 0,r0,NR_syscalls
- lis r10,sys_call_table@h
- ori r10,r10,sys_call_table@l
- slwi r0,r0,2
- bge- 66f
-
- barrier_nospec_asm
- /*
- * Prevent the load of the handler below (based on the user-passed
- * system call number) being speculatively executed until the test
- * against NR_syscalls and branch to .66f above has
- * committed.
- */
-
- lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
- mtlr r10
- addi r9,r1,STACK_FRAME_OVERHEAD
- PPC440EP_ERR42
- blrl /* Call handler */
- .globl ret_from_syscall
+ mr r9, r0
+ addi r10, r1, STACK_FRAME_OVERHEAD
+ bl system_call_exception
ret_from_syscall:
-#ifdef CONFIG_DEBUG_RSEQ
- /* Check whether the syscall is issued inside a restartable sequence */
- stw r3,GPR3(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl rseq_syscall
- lwz r3,GPR3(r1)
-#endif
- mr r6,r3
- /* disable interrupts so current_thread_info()->flags can't change */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
- /* Note: We don't bother telling lockdep about it */
- SYNC
- mtmsr r10
- lwz r9,TI_FLAGS(r2)
- li r8,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
- bne- syscall_exit_work
- cmplw 0,r3,r8
- blt+ syscall_exit_cont
- lwz r11,_CCR(r1) /* Load CR */
- neg r3,r3
- oris r11,r11,0x1000 /* Set SO bit in CR */
- stw r11,_CCR(r1)
-syscall_exit_cont:
- lwz r8,_MSR(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* If we are going to return from the syscall with interrupts
- * off, we trace that here. It shouldn't normally happen.
- */
- andi. r10,r8,MSR_EE
- bne+ 1f
- stw r3,GPR3(r1)
- bl trace_hardirqs_off
- lwz r3,GPR3(r1)
-1:
-#endif /* CONFIG_TRACE_IRQFLAGS */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* If the process has its own DBCR0 value, load it up. The internal
- debug mode bit tells us that dbcr0 should be loaded. */
- lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IDM@h
- bnel- load_dbcr0
-#endif
-#ifdef CONFIG_44x
-BEGIN_MMU_FTR_SECTION
- lis r4,icache_44x_need_flush@ha
- lwz r5,icache_44x_need_flush@l(r4)
- cmplwi cr0,r5,0
- bne- 2f
-1:
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
-#endif /* CONFIG_44x */
-BEGIN_FTR_SECTION
- lwarx r7,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
- stwcx. r0,0,r1 /* to clear the reservation */
- ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_unlock r5, r7
-#endif
- kuap_check r2, r4
- lwz r4,_LINK(r1)
- lwz r5,_CCR(r1)
- mtlr r4
- mtcr r5
- lwz r7,_NIP(r1)
- lwz r2,GPR2(r1)
- lwz r1,GPR1(r1)
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
- mtspr SPRN_SRR0,r7
- mtspr SPRN_SRR1,r8
- SYNC
- RFI
-#ifdef CONFIG_44x
-2: li r7,0
- iccci r0,r0
- stw r7,icache_44x_need_flush@l(r4)
+ addi r4, r1, STACK_FRAME_OVERHEAD
+ bl syscall_exit_prepare
+ lwz r2, _CCR(r1)
+ lwz r4, _NIP(r1)
+ lwz r5, _MSR(r1)
+ lwz r6, _LINK(r1)
+ mtspr SPRN_SRR0, r4
+ mtspr SPRN_SRR1, r5
+ mtlr r6
+ cmpwi r3, 0
+ bne 2f
+1: mtcr r2
+ REST_GPR(2, r1)
+ REST_GPR(3, r1)
+ REST_GPR(1, r1)
+ rfi
+2: lwz r3, _CTR(r1)
+ lwz r4, _XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r3
+ mtspr SPRN_XER, r4
+ REST_GPR(0, r1)
+ REST_8GPRS(4, r1)
+ REST_GPR(12, r1)
b 1b
-#endif /* CONFIG_44x */
-
-66: li r3,-ENOSYS
- b ret_from_syscall
.globl ret_from_fork
ret_from_fork:
@@ -490,86 +365,6 @@ ret_from_kernel_thread:
li r3,0
b ret_from_syscall
-/* Traced system call support */
-syscall_dotrace:
- SAVE_NVGPRS(r1)
- li r0,0xc00
- stw r0,_TRAP(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_enter
- /*
- * Restore argument registers possibly just changed.
- * We use the return value of do_syscall_trace_enter
- * for call number to look up in the table (r0).
- */
- mr r0,r3
- lwz r3,GPR3(r1)
- lwz r4,GPR4(r1)
- lwz r5,GPR5(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
- REST_NVGPRS(r1)
-
- cmplwi r0,NR_syscalls
- /* Return code is already in r3 thanks to do_syscall_trace_enter() */
- bge- ret_from_syscall
- b syscall_dotrace_cont
-
-syscall_exit_work:
- andi. r0,r9,_TIF_RESTOREALL
- beq+ 0f
- REST_NVGPRS(r1)
- b 2f
-0: cmplw 0,r3,r8
- blt+ 1f
- andi. r0,r9,_TIF_NOERROR
- bne- 1f
- lwz r11,_CCR(r1) /* Load CR */
- neg r3,r3
- oris r11,r11,0x1000 /* Set SO bit in CR */
- stw r11,_CCR(r1)
-
-1: stw r6,RESULT(r1) /* Save result */
- stw r3,GPR3(r1) /* Update return value */
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
- beq 4f
-
- /* Clear per-syscall TIF flags if any are set. */
-
- li r11,_TIF_PERSYSCALL_MASK
- addi r12,r2,TI_FLAGS
-3: lwarx r8,0,r12
- andc r8,r8,r11
-#ifdef CONFIG_IBM405_ERR77
- dcbt 0,r12
-#endif
- stwcx. r8,0,r12
- bne- 3b
-
-4: /* Anything which requires enabling interrupts? */
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
- beq ret_from_except
-
- /* Re-enable interrupts. There is no need to trace that with
- * lockdep as we are supposed to have IRQs on at this point
- */
- ori r10,r10,MSR_EE
- SYNC
- mtmsr r10
-
- /* Save NVGPRS if they're not saved already */
- lwz r4,_TRAP(r1)
- andi. r4,r4,1
- beq 5f
- SAVE_NVGPRS(r1)
- li r4,0xc00
- stw r4,_TRAP(r1)
-5:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_leave
- b ret_from_except_full
-
/*
* System call was called from kernel. We get here with SRR1 in r9.
* Mark the exception as recoverable once we have retrieved SRR0,
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 9abec6cd099c..c301d666a3e5 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -174,12 +174,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
stw r2,GPR2(r11)
addi r10,r10,STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r11)
- li r2, \trapno + 1
+ li r2, \trapno
stw r10,8(r11)
stw r2,_TRAP(r11)
SAVE_GPR(0, r11)
SAVE_4GPRS(3, r11)
SAVE_2GPRS(7, r11)
+ SAVE_NVGPRS(r11)
addi r11,r1,STACK_FRAME_OVERHEAD
addi r2,r12,-THREAD
stw r11,PT_REGS(r12)
@@ -188,9 +189,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
internal debug mode bit to do this. */
lwz r12,THREAD_DBCR0(r12)
andis. r12,r12,DBCR0_IDM@h
-#endif
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
-#if defined(CONFIG_40x)
beq+ 3f
/* From user and task is ptraced - load up global dbcr0 */
li r12,-1 /* clear all pending debug events */
@@ -209,17 +207,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
tovirt_novmstack r2, r2 /* set r2 to current */
lis r11, transfer_to_syscall@h
ori r11, r11, transfer_to_syscall@l
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * If MSR is changing we need to keep interrupts disabled at this point
- * otherwise we might risk taking an interrupt before we tell lockdep
- * they are enabled.
- */
LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
- rlwimi r10, r9, 0, MSR_EE
-#else
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
-#endif
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
mtspr SPRN_NRI, r0
#endif
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 14/15] powerpc/syscall: Avoid stack frame in likely part of syscall_call_exception()
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (11 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 13/15] powerpc/syscall: system call implement entry/exit logic in C for PPC32 Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-06 18:16 ` [RFC PATCH v3 15/15] powerpc/kernel: Do not inconditionally save non volatile registers on system call Christophe Leroy
13 siblings, 0 replies; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
When r3 is not modified, reload it from regs->orig_r3 to free
volatile registers. This avoids a stack frame for the likely part
of syscall_call_exception()
Before : 353 cycles on null_syscall
After : 347 cycles on null_syscall
Before the patch:
c000b4d4 <system_call_exception>:
c000b4d4: 7c 08 02 a6 mflr r0
c000b4d8: 94 21 ff e0 stwu r1,-32(r1)
c000b4dc: 93 e1 00 1c stw r31,28(r1)
c000b4e0: 90 01 00 24 stw r0,36(r1)
c000b4e4: 90 6a 00 88 stw r3,136(r10)
c000b4e8: 81 6a 00 84 lwz r11,132(r10)
c000b4ec: 69 6b 00 02 xori r11,r11,2
c000b4f0: 55 6b ff fe rlwinm r11,r11,31,31,31
c000b4f4: 0f 0b 00 00 twnei r11,0
c000b4f8: 81 6a 00 a0 lwz r11,160(r10)
c000b4fc: 55 6b 07 fe clrlwi r11,r11,31
c000b500: 0f 0b 00 00 twnei r11,0
c000b504: 7c 0c 42 e6 mftb r0
c000b508: 83 e2 00 08 lwz r31,8(r2)
c000b50c: 81 82 00 28 lwz r12,40(r2)
c000b510: 90 02 00 24 stw r0,36(r2)
c000b514: 7d 8c f8 50 subf r12,r12,r31
c000b518: 7c 0c 02 14 add r0,r12,r0
c000b51c: 90 02 00 08 stw r0,8(r2)
c000b520: 7c 10 13 a6 mtspr 80,r0
c000b524: 81 62 00 70 lwz r11,112(r2)
c000b528: 71 60 86 91 andi. r0,r11,34449
c000b52c: 40 82 00 34 bne c000b560 <system_call_exception+0x8c>
c000b530: 2b 89 01 b6 cmplwi cr7,r9,438
c000b534: 41 9d 00 64 bgt cr7,c000b598 <system_call_exception+0xc4>
c000b538: 3d 40 c0 5c lis r10,-16292
c000b53c: 55 29 10 3a rlwinm r9,r9,2,0,29
c000b540: 39 4a 41 e8 addi r10,r10,16872
c000b544: 80 01 00 24 lwz r0,36(r1)
c000b548: 7d 2a 48 2e lwzx r9,r10,r9
c000b54c: 7c 08 03 a6 mtlr r0
c000b550: 7d 29 03 a6 mtctr r9
c000b554: 83 e1 00 1c lwz r31,28(r1)
c000b558: 38 21 00 20 addi r1,r1,32
c000b55c: 4e 80 04 20 bctr
After the patch:
c000b4d4 <system_call_exception>:
c000b4d4: 81 6a 00 84 lwz r11,132(r10)
c000b4d8: 90 6a 00 88 stw r3,136(r10)
c000b4dc: 69 6b 00 02 xori r11,r11,2
c000b4e0: 55 6b ff fe rlwinm r11,r11,31,31,31
c000b4e4: 0f 0b 00 00 twnei r11,0
c000b4e8: 80 6a 00 a0 lwz r3,160(r10)
c000b4ec: 54 63 07 fe clrlwi r3,r3,31
c000b4f0: 0f 03 00 00 twnei r3,0
c000b4f4: 7d 6c 42 e6 mftb r11
c000b4f8: 81 82 00 08 lwz r12,8(r2)
c000b4fc: 80 02 00 28 lwz r0,40(r2)
c000b500: 91 62 00 24 stw r11,36(r2)
c000b504: 7c 00 60 50 subf r0,r0,r12
c000b508: 7d 60 5a 14 add r11,r0,r11
c000b50c: 91 62 00 08 stw r11,8(r2)
c000b510: 7c 10 13 a6 mtspr 80,r0
c000b514: 80 62 00 70 lwz r3,112(r2)
c000b518: 70 6b 86 91 andi. r11,r3,34449
c000b51c: 40 82 00 28 bne c000b544 <system_call_exception+0x70>
c000b520: 2b 89 01 b6 cmplwi cr7,r9,438
c000b524: 41 9d 00 84 bgt cr7,c000b5a8 <system_call_exception+0xd4>
c000b528: 80 6a 00 88 lwz r3,136(r10)
c000b52c: 3d 40 c0 5c lis r10,-16292
c000b530: 55 29 10 3a rlwinm r9,r9,2,0,29
c000b534: 39 4a 41 e4 addi r10,r10,16868
c000b538: 7d 2a 48 2e lwzx r9,r10,r9
c000b53c: 7d 29 03 a6 mtctr r9
c000b540: 4e 80 04 20 bctr
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/kernel/syscall.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index f9fca9985b0f..af449a4a8e8f 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -85,6 +85,9 @@ notrace long system_call_exception(long r3, long r4, long r5,
} else if (unlikely(r0 >= NR_syscalls)) {
return -ENOSYS;
+ } else {
+ /* Restore r3 from orig_gpr3 to free up a volatile reg */
+ r3 = regs->orig_gpr3;
}
/* May be faster to do array_index_nospec? */
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* [RFC PATCH v3 15/15] powerpc/kernel: Do not inconditionally save non volatile registers on system call
2020-04-06 18:16 [RFC PATCH v3 01/15] powerpc/syscall: Refactorise from Nick Christophe Leroy
` (12 preceding siblings ...)
2020-04-06 18:16 ` [RFC PATCH v3 14/15] powerpc/syscall: Avoid stack frame in likely part of syscall_call_exception() Christophe Leroy
@ 2020-04-06 18:16 ` Christophe Leroy
2020-04-07 1:10 ` Nicholas Piggin
13 siblings, 1 reply; 17+ messages in thread
From: Christophe Leroy @ 2020-04-06 18:16 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Paul Mackerras, Michael Ellerman,
npiggin, msuchanek
Cc: linux-kernel, linuxppc-dev
To allow that, syscall_exit_prepare() gets split in 3 parts.
On PPC32, the three parts are called from entry_32.S
On PPC64, we keep a syscall_exit_prepare() function which
concatenates the three parts.
One benefit is also that the likely part of
syscall_exit_prepare_begin() and the syscall_exit_prepare_end()
functions are frameless whereas there was no way to get the
likely part of syscall_exit_prepare() frameless.
Before : 347 cycles on null_syscall
After : 307 cycles on null_syscall, ie better than before C porting.
Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
---
arch/powerpc/include/asm/asm-prototypes.h | 11 +++
arch/powerpc/kernel/entry_32.S | 25 ++++++-
arch/powerpc/kernel/head_32.h | 3 +-
arch/powerpc/kernel/syscall.c | 83 +++++++++++++++--------
4 files changed, 92 insertions(+), 30 deletions(-)
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 7d81e86a1e5d..eea5133733bb 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -98,6 +98,17 @@ unsigned long __init early_init(unsigned long dt_ptr);
void __init machine_init(u64 dt_ptr);
#endif
long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+#define static64 static
+#else
+#define static64
+#endif
+static64 notrace unsigned long
+syscall_exit_prepare_begin(unsigned long r3, struct pt_regs *regs, unsigned long ti_flags);
+static64 notrace unsigned long
+syscall_exit_prepare_loop(unsigned long ret, struct pt_regs *regs, unsigned long ti_flags);
+static64 notrace unsigned long
+syscall_exit_prepare_end(unsigned long ret, struct pt_regs *regs, unsigned long ti_flags);
notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs);
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 103f5158bc44..b9287fd0fcc6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -315,14 +315,37 @@ stack_ovf:
RFI
#endif
+save_nvgprs:
+ lwz r11, _TRAP(r1)
+ andi. r12, r11, 1
+ rlwinm r11, r11, 0, ~1
+ beqlr
+ SAVE_NVGPRS(r1)
+ stw r11, _TRAP(r1)
+ blr
+
.globl transfer_to_syscall
transfer_to_syscall:
+ lwz r10, TI_FLAGS(r2)
mr r9, r0
+ andi. r10, r10, _TIF_SYSCALL_DOTRACE
addi r10, r1, STACK_FRAME_OVERHEAD
+ bnel- save_nvgprs
bl system_call_exception
ret_from_syscall:
+ lwz r5, TI_FLAGS(r2)
addi r4, r1, STACK_FRAME_OVERHEAD
- bl syscall_exit_prepare
+ andi. r0, r5, _TIF_SYSCALL_DOTRACE | _TIF_SINGLESTEP | _TIF_USER_WORK_MASK
+ bnel- save_nvgprs
+ bl syscall_exit_prepare_begin
+1: lwz r5, TI_FLAGS(r2)
+ addi r4, r1, STACK_FRAME_OVERHEAD
+ andi. r0, r5, _TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
+ beq+ 1f
+ bl save_nvgprs
+ bl syscall_exit_prepare_loop
+ b 1b
+1: bl syscall_exit_prepare_end
lwz r2, _CCR(r1)
lwz r4, _NIP(r1)
lwz r5, _MSR(r1)
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index c301d666a3e5..1cc9a67cb42c 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -174,13 +174,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
stw r2,GPR2(r11)
addi r10,r10,STACK_FRAME_REGS_MARKER@l
stw r9,_MSR(r11)
- li r2, \trapno
+ li r2, \trapno + 1
stw r10,8(r11)
stw r2,_TRAP(r11)
SAVE_GPR(0, r11)
SAVE_4GPRS(3, r11)
SAVE_2GPRS(7, r11)
- SAVE_NVGPRS(r11)
addi r11,r1,STACK_FRAME_OVERHEAD
addi r2,r12,-THREAD
stw r11,PT_REGS(r12)
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
index af449a4a8e8f..b15f19c00ccb 100644
--- a/arch/powerpc/kernel/syscall.c
+++ b/arch/powerpc/kernel/syscall.c
@@ -37,7 +37,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
if (!IS_ENABLED(CONFIG_PPC_BOOK3E))
BUG_ON(!(regs->msr & MSR_RI));
BUG_ON(IS_ENABLED(CONFIG_PPC64) && !(regs->msr & MSR_PR));
- BUG_ON(!FULL_REGS(regs));
+ BUG_ON(IS_ENABLED(CONFIG_PPC64) && !FULL_REGS(regs));
BUG_ON(IS_ENABLED(CONFIG_PPC64) && get_softe(regs) != IRQS_ENABLED);
account_cpu_user_entry();
@@ -145,11 +145,9 @@ static notrace inline bool prep_irq_for_enabled_exit(void)
* The function graph tracer can not trace the return side of this function,
* because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
*/
-notrace unsigned long syscall_exit_prepare(unsigned long r3,
- struct pt_regs *regs)
+static64 notrace unsigned long
+syscall_exit_prepare_begin(unsigned long r3, struct pt_regs *regs, unsigned long ti_flags)
{
- unsigned long *ti_flagsp = ¤t_thread_info()->flags;
- unsigned long ti_flags;
unsigned long ret = 0;
regs->result = r3;
@@ -157,8 +155,6 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
/* Check whether the syscall is issued inside a restartable sequence */
rseq_syscall(regs);
- ti_flags = *ti_flagsp;
-
if (unlikely(r3 >= (unsigned long)-MAX_ERRNO)) {
if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
r3 = -r3;
@@ -171,7 +167,7 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
ret = _TIF_RESTOREALL;
else
regs->gpr[3] = r3;
- clear_bits(_TIF_PERSYSCALL_MASK, ti_flagsp);
+ clear_bits(_TIF_PERSYSCALL_MASK, ¤t_thread_info()->flags);
} else {
regs->gpr[3] = r3;
}
@@ -181,27 +177,35 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
ret |= _TIF_RESTOREALL;
}
-again:
local_irq_disable();
- ti_flags = READ_ONCE(*ti_flagsp);
- while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
- local_irq_enable();
- if (ti_flags & _TIF_NEED_RESCHED) {
- schedule();
- } else {
- /*
- * SIGPENDING must restore signal handler function
- * argument GPRs, and some non-volatiles (e.g., r1).
- * Restore all for now. This could be made lighter.
- */
- if (ti_flags & _TIF_SIGPENDING)
- ret |= _TIF_RESTOREALL;
- do_notify_resume(regs, ti_flags);
- }
- local_irq_disable();
- ti_flags = READ_ONCE(*ti_flagsp);
+
+ return ret;
+}
+
+static64 notrace unsigned long
+syscall_exit_prepare_loop(unsigned long ret, struct pt_regs *regs, unsigned long ti_flags)
+{
+ local_irq_enable();
+ if (ti_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ } else {
+ /*
+ * SIGPENDING must restore signal handler function
+ * argument GPRs, and some non-volatiles (e.g., r1).
+ * Restore all for now. This could be made lighter.
+ */
+ if (ti_flags & _TIF_SIGPENDING)
+ ret |= _TIF_RESTOREALL;
+ do_notify_resume(regs, ti_flags);
}
+ local_irq_disable();
+
+ return ret;
+}
+static64 notrace unsigned long
+syscall_exit_prepare_end(unsigned long ret, struct pt_regs *regs, unsigned long ti_flags)
+{
if (IS_ENABLED(CONFIG_PPC_BOOK3S) && IS_ENABLED(CONFIG_PPC_FPU)) {
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
unlikely((ti_flags & _TIF_RESTORE_TM))) {
@@ -221,7 +225,8 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
if (unlikely(!prep_irq_for_enabled_exit())) {
local_irq_enable();
- goto again;
+ local_irq_disable();
+ return ret | 0x80000000;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -235,6 +240,30 @@ notrace unsigned long syscall_exit_prepare(unsigned long r3,
return ret;
}
+#ifdef CONFIG_PPC64
+notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs)
+{
+ unsigned long ret;
+ unsigned long *ti_flagsp = ¤t_thread_info()->flags;
+ unsigned long ti_flags = *ti_flagsp;
+
+ ret = syscall_exit_prepare_begin(r3, regs, ti_flags);
+
+again:
+ ti_flags = READ_ONCE(*ti_flagsp);
+ if (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+ ret = syscall_exit_prepare_loop(ret, regs, ti_flags);
+ goto again;
+ }
+ ret = syscall_exit_prepare_end(ret, regs, ti_flags);
+ if (unlikely(ret & 0x80000000)) {
+ ret &= ~0x80000000;
+ goto again;
+ }
+ return ret;
+}
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64 /* BOOK3E not yet using this */
notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr)
{
--
2.25.0
^ permalink raw reply [flat|nested] 17+ messages in thread
* Re: [RFC PATCH v3 15/15] powerpc/kernel: Do not inconditionally save non volatile registers on system call
2020-04-06 18:16 ` [RFC PATCH v3 15/15] powerpc/kernel: Do not inconditionally save non volatile registers on system call Christophe Leroy
@ 2020-04-07 1:10 ` Nicholas Piggin
0 siblings, 0 replies; 17+ messages in thread
From: Nicholas Piggin @ 2020-04-07 1:10 UTC (permalink / raw)
To: Benjamin Herrenschmidt, Christophe Leroy, Michael Ellerman,
msuchanek, Paul Mackerras
Cc: linux-kernel, linuxppc-dev
Christophe Leroy's on April 7, 2020 4:16 am:
> + ret = syscall_exit_prepare_end(ret, regs, ti_flags);
> + if (unlikely(ret & 0x80000000)) {
> + ret &= ~0x80000000;
We could just add our own set of defines for these, there's no real
reason to use _TIF_RESTOREALL as I had.
Thanks,
Nick
^ permalink raw reply [flat|nested] 17+ messages in thread