LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH 1/4] powerpc/cache: add cache flush operation for various e500
@ 2015-03-26 10:18 Chenhui Zhao
2015-03-26 10:18 ` [PATCH 2/4] powerpc/rcpm: add RCPM driver Chenhui Zhao
` (3 more replies)
0 siblings, 4 replies; 15+ messages in thread
From: Chenhui Zhao @ 2015-03-26 10:18 UTC (permalink / raw)
To: linuxppc-dev, devicetree; +Cc: linux-kernel, scottwood, leoli, Jason.Jin
Various e500 core have different cache architecture, so they
need different cache flush operations. Therefore, add a callback
function cpu_flush_caches to the struct cpu_spec. The cache flush
operation for the specific kind of e500 is selected at init time.
The callback function will flush all caches inside the current cpu.
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---
arch/powerpc/include/asm/cacheflush.h | 2 -
arch/powerpc/include/asm/cputable.h | 11 +++
arch/powerpc/kernel/asm-offsets.c | 3 +
arch/powerpc/kernel/cpu_setup_fsl_booke.S | 114 +++++++++++++++++++++++++++++-
arch/powerpc/kernel/cputable.c | 4 ++
arch/powerpc/kernel/head_fsl_booke.S | 74 -------------------
arch/powerpc/platforms/85xx/smp.c | 3 +-
7 files changed, 133 insertions(+), 78 deletions(-)
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 30b35ff..729fde4 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-extern void __flush_disable_L1(void);
-
extern void flush_icache_range(unsigned long, unsigned long);
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 5cf5a6d..c776efe4 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -43,6 +43,13 @@ extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
+#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
+extern void __flush_caches_e500v2(void);
+extern void __flush_caches_e500mc(void);
+extern void __flush_caches_e5500(void);
+extern void __flush_caches_e6500(void);
+#endif
+
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
struct cpu_spec {
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
@@ -59,6 +66,10 @@ struct cpu_spec {
unsigned int icache_bsize;
unsigned int dcache_bsize;
+#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
+ /* flush caches inside the current cpu */
+ void (*cpu_flush_caches)(void);
+#endif
/* number of performance monitor counters */
unsigned int num_pmcs;
enum powerpc_pmc_type pmc_type;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4717859..9567930 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -372,6 +372,9 @@ int main(void)
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
+#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
+ DEFINE(CPU_FLUSH_CACHES, offsetof(struct cpu_spec, cpu_flush_caches));
+#endif
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index dddba3e..c8c251f 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -1,7 +1,7 @@
/*
* This file contains low level CPU setup functions.
* Kumar Gala <galak@kernel.crashing.org>
- * Copyright 2009 Freescale Semiconductor, Inc.
+ * Copyright 2009, 2015 Freescale Semiconductor, Inc.
*
* Based on cpu_setup_6xx code by
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
@@ -13,11 +13,13 @@
*
*/
+#include <asm/page.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/mmu-book3e.h>
#include <asm/asm-offsets.h>
+#include <asm/mpc85xx.h>
_GLOBAL(__e500_icache_setup)
mfspr r0, SPRN_L1CSR1
@@ -233,3 +235,113 @@ _GLOBAL(__setup_cpu_e5500)
mtlr r5
blr
#endif
+
+/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
+_GLOBAL(flush_dcache_L1)
+ mfmsr r10
+ wrteei 0
+
+ mfspr r3,SPRN_L1CFG0
+ rlwinm r5,r3,9,3 /* Extract cache block size */
+ twlgti r5,1 /* Only 32 and 64 byte cache blocks
+ * are currently defined.
+ */
+ li r4,32
+ subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
+ * log2(number of ways)
+ */
+ slw r5,r4,r5 /* r5 = cache block size */
+
+ rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
+ mulli r7,r7,13 /* An 8-way cache will require 13
+ * loads per set.
+ */
+ slw r7,r7,r6
+
+ /* save off HID0 and set DCFA */
+ mfspr r8,SPRN_HID0
+ ori r9,r8,HID0_DCFA@l
+ mtspr SPRN_HID0,r9
+ isync
+
+ LOAD_REG_IMMEDIATE(r6, KERNELBASE)
+ mr r4, r6
+ mtctr r7
+
+1: lwz r3,0(r4) /* Load... */
+ add r4,r4,r5
+ bdnz 1b
+
+ msync
+ mr r4, r6
+ mtctr r7
+
+1: dcbf 0,r4 /* ...and flush. */
+ add r4,r4,r5
+ bdnz 1b
+
+ /* restore HID0 */
+ mtspr SPRN_HID0,r8
+ isync
+
+ wrtee r10
+
+ blr
+
+has_L2_cache:
+ /* skip L2 cache on P2040/P2040E as they have no L2 cache */
+ mfspr r3, SPRN_SVR
+ /* shift right by 8 bits and clear E bit of SVR */
+ rlwinm r4, r3, 24, ~0x800
+
+ lis r3, SVR_P2040@h
+ ori r3, r3, SVR_P2040@l
+ cmpw r4, r3
+ beq 1f
+
+ li r3, 1
+ blr
+1:
+ li r3, 0
+ blr
+
+/* flush backside L2 cache */
+flush_backside_L2_cache:
+ mflr r10
+ bl has_L2_cache
+ mtlr r10
+ cmpwi r3, 0
+ beq 2f
+
+ /* Flush the L2 cache */
+ mfspr r3, SPRN_L2CSR0
+ ori r3, r3, L2CSR0_L2FL@l
+ msync
+ isync
+ mtspr SPRN_L2CSR0,r3
+ isync
+
+ /* check if it is complete */
+1: mfspr r3,SPRN_L2CSR0
+ andi. r3, r3, L2CSR0_L2FL@l
+ bne 1b
+2:
+ blr
+
+_GLOBAL(__flush_caches_e500v2)
+ mflr r0
+ bl flush_dcache_L1
+ mtlr r0
+ blr
+
+_GLOBAL(__flush_caches_e500mc)
+_GLOBAL(__flush_caches_e5500)
+ mflr r0
+ bl flush_dcache_L1
+ bl flush_backside_L2_cache
+ mtlr r0
+ blr
+
+/* L1 Data Cache of e6500 contains no modified data, no flush is required */
+_GLOBAL(__flush_caches_e6500)
+ blr
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f830468..10e48a7 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2021,6 +2021,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_e500v2,
.machine_check = machine_check_e500,
.platform = "ppc8548",
+ .cpu_flush_caches = __flush_caches_e500v2,
},
#else
{ /* e500mc */
@@ -2040,6 +2041,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_e500mc,
.machine_check = machine_check_e500mc,
.platform = "ppce500mc",
+ .cpu_flush_caches = __flush_caches_e500mc,
},
#endif /* CONFIG_PPC_E500MC */
#endif /* CONFIG_PPC32 */
@@ -2064,6 +2066,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
+ .cpu_flush_caches = __flush_caches_e5500,
},
{ /* e6500 */
.pvr_mask = 0xffff0000,
@@ -2086,6 +2089,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
#endif
.machine_check = machine_check_e500mc,
.platform = "ppce6500",
+ .cpu_flush_caches = __flush_caches_e6500,
},
#endif /* CONFIG_PPC_E500MC */
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index fffd1f9..709bc50 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -1075,80 +1075,6 @@ _GLOBAL(set_context)
isync /* Force context change */
blr
-_GLOBAL(flush_dcache_L1)
- mfspr r3,SPRN_L1CFG0
-
- rlwinm r5,r3,9,3 /* Extract cache block size */
- twlgti r5,1 /* Only 32 and 64 byte cache blocks
- * are currently defined.
- */
- li r4,32
- subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
- * log2(number of ways)
- */
- slw r5,r4,r5 /* r5 = cache block size */
-
- rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
- mulli r7,r7,13 /* An 8-way cache will require 13
- * loads per set.
- */
- slw r7,r7,r6
-
- /* save off HID0 and set DCFA */
- mfspr r8,SPRN_HID0
- ori r9,r8,HID0_DCFA@l
- mtspr SPRN_HID0,r9
- isync
-
- lis r4,KERNELBASE@h
- mtctr r7
-
-1: lwz r3,0(r4) /* Load... */
- add r4,r4,r5
- bdnz 1b
-
- msync
- lis r4,KERNELBASE@h
- mtctr r7
-
-1: dcbf 0,r4 /* ...and flush. */
- add r4,r4,r5
- bdnz 1b
-
- /* restore HID0 */
- mtspr SPRN_HID0,r8
- isync
-
- blr
-
-/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
-_GLOBAL(__flush_disable_L1)
- mflr r10
- bl flush_dcache_L1 /* Flush L1 d-cache */
- mtlr r10
-
- mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
- li r5, 2
- rlwimi r4, r5, 0, 3
-
- msync
- isync
- mtspr SPRN_L1CSR0, r4
- isync
-
-1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
- andi. r4, r4, 2
- bne 1b
-
- mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
- li r5, 2
- rlwimi r4, r5, 0, 3
-
- mtspr SPRN_L1CSR1, r4
- isync
-
- blr
-
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d7c1e69..fba474f 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -139,7 +139,8 @@ static void smp_85xx_mach_cpu_die(void)
mtspr(SPRN_TCR, 0);
- __flush_disable_L1();
+ cur_cpu_spec->cpu_flush_caches();
+
tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
mtspr(SPRN_HID0, tmp);
isync();
--
1.9.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 2/4] powerpc/rcpm: add RCPM driver
2015-03-26 10:18 [PATCH 1/4] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
@ 2015-03-26 10:18 ` Chenhui Zhao
2015-03-31 1:30 ` [2/4] " Scott Wood
2015-03-26 10:18 ` [PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500 Chenhui Zhao
` (2 subsequent siblings)
3 siblings, 1 reply; 15+ messages in thread
From: Chenhui Zhao @ 2015-03-26 10:18 UTC (permalink / raw)
To: linuxppc-dev, devicetree; +Cc: linux-kernel, scottwood, leoli, Jason.Jin
There is a RCPM (Run Control/Power Management) in Freescale QorIQ
series processors. The device performs tasks associated with device
run control and power management.
The driver implements some features: mask/unmask irq, enter/exit low
power states, freeze time base, etc.
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---
Documentation/devicetree/bindings/soc/fsl/rcpm.txt | 23 ++
arch/powerpc/include/asm/fsl_guts.h | 105 ++++++
arch/powerpc/include/asm/fsl_pm.h | 49 +++
arch/powerpc/platforms/85xx/Kconfig | 1 +
arch/powerpc/sysdev/Kconfig | 5 +
arch/powerpc/sysdev/Makefile | 1 +
arch/powerpc/sysdev/fsl_rcpm.c | 353 +++++++++++++++++++++
7 files changed, 537 insertions(+)
create mode 100644 Documentation/devicetree/bindings/soc/fsl/rcpm.txt
create mode 100644 arch/powerpc/include/asm/fsl_pm.h
create mode 100644 arch/powerpc/sysdev/fsl_rcpm.c
diff --git a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
new file mode 100644
index 0000000..8c21b6c
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
@@ -0,0 +1,23 @@
+* Run Control and Power Management
+
+The RCPM performs all device-level tasks associated with device run control
+and power management.
+
+Required properites:
+ - reg : Offset and length of the register set of RCPM block.
+ - compatible : Specifies the compatibility list for the RCPM. The type
+ should be string, such as "fsl,qoriq-rcpm-1.0", "fsl,qoriq-rcpm-2.0".
+
+Example:
+The RCPM node for T4240:
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
+ reg = <0xe2000 0x1000>;
+ };
+
+The RCPM node for P4080:
+ rcpm: global-utilities@e2000 {
+ compatible = "fsl,qoriq-rcpm-1.0";
+ reg = <0xe2000 0x1000>;
+ #sleep-cells = <1>;
+ };
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index 43b6bb1..96018ee 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -188,5 +188,110 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts,
#endif
+struct ccsr_rcpm_v1 {
+ u8 res0000[4];
+ __be32 cdozsr; /* 0x0004 Core Doze Status Register */
+ u8 res0008[4];
+ __be32 cdozcr; /* 0x000c Core Doze Control Register */
+ u8 res0010[4];
+ __be32 cnapsr; /* 0x0014 Core Nap Status Register */
+ u8 res0018[4];
+ __be32 cnapcr; /* 0x001c Core Nap Control Register */
+ u8 res0020[4];
+ __be32 cdozpsr; /* 0x0024 Core Doze Previous Status Register */
+ u8 res0028[4];
+ __be32 cnappsr; /* 0x002c Core Nap Previous Status Register */
+ u8 res0030[4];
+ __be32 cwaitsr; /* 0x0034 Core Wait Status Register */
+ u8 res0038[4];
+ __be32 cwdtdsr; /* 0x003c Core Watchdog Detect Status Register */
+ __be32 powmgtcsr; /* 0x0040 Power Management Control&Status Register */
+#define RCPM_POWMGTCSR_SLP 0x00020000
+ u8 res0044[12];
+ __be32 ippdexpcr; /* 0x0050 IP Powerdown Exception Control Register */
+ u8 res0054[16];
+ __be32 cpmimr; /* 0x0064 Core PM IRQ Mask Register */
+ u8 res0068[4];
+ __be32 cpmcimr; /* 0x006c Core PM Critical IRQ Mask Register */
+ u8 res0070[4];
+ __be32 cpmmcmr; /* 0x0074 Core PM Machine Check Mask Register */
+ u8 res0078[4];
+ __be32 cpmnmimr; /* 0x007c Core PM NMI Mask Register */
+ u8 res0080[4];
+ __be32 ctbenr; /* 0x0084 Core Time Base Enable Register */
+ u8 res0088[4];
+ __be32 ctbckselr; /* 0x008c Core Time Base Clock Select Register */
+ u8 res0090[4];
+ __be32 ctbhltcr; /* 0x0094 Core Time Base Halt Control Register */
+ u8 res0098[4];
+ __be32 cmcpmaskcr; /* 0x00a4 Core Machine Check Mask Register */
+};
+
+struct ccsr_rcpm_v2 {
+ u8 res_00[12];
+ __be32 tph10sr0; /* Thread PH10 Status Register */
+ u8 res_10[12];
+ __be32 tph10setr0; /* Thread PH10 Set Control Register */
+ u8 res_20[12];
+ __be32 tph10clrr0; /* Thread PH10 Clear Control Register */
+ u8 res_30[12];
+ __be32 tph10psr0; /* Thread PH10 Previous Status Register */
+ u8 res_40[12];
+ __be32 twaitsr0; /* Thread Wait Status Register */
+ u8 res_50[96];
+ __be32 pcph15sr; /* Physical Core PH15 Status Register */
+ __be32 pcph15setr; /* Physical Core PH15 Set Control Register */
+ __be32 pcph15clrr; /* Physical Core PH15 Clear Control Register */
+ __be32 pcph15psr; /* Physical Core PH15 Prev Status Register */
+ u8 res_c0[16];
+ __be32 pcph20sr; /* Physical Core PH20 Status Register */
+ __be32 pcph20setr; /* Physical Core PH20 Set Control Register */
+ __be32 pcph20clrr; /* Physical Core PH20 Clear Control Register */
+ __be32 pcph20psr; /* Physical Core PH20 Prev Status Register */
+ __be32 pcpw20sr; /* Physical Core PW20 Status Register */
+ u8 res_e0[12];
+ __be32 pcph30sr; /* Physical Core PH30 Status Register */
+ __be32 pcph30setr; /* Physical Core PH30 Set Control Register */
+ __be32 pcph30clrr; /* Physical Core PH30 Clear Control Register */
+ __be32 pcph30psr; /* Physical Core PH30 Prev Status Register */
+ u8 res_100[32];
+ __be32 ippwrgatecr; /* IP Power Gating Control Register */
+ u8 res_124[12];
+ __be32 powmgtcsr; /* Power Management Control & Status Reg */
+#define RCPM_POWMGTCSR_LPM20_RQ 0x00100000
+#define RCPM_POWMGTCSR_LPM20_ST 0x00000200
+#define RCPM_POWMGTCSR_P_LPM20_ST 0x00000100
+ u8 res_134[12];
+ __be32 ippdexpcr[4]; /* IP Powerdown Exception Control Reg */
+ u8 res_150[12];
+ __be32 tpmimr0; /* Thread PM Interrupt Mask Reg */
+ u8 res_160[12];
+ __be32 tpmcimr0; /* Thread PM Crit Interrupt Mask Reg */
+ u8 res_170[12];
+ __be32 tpmmcmr0; /* Thread PM Machine Check Interrupt Mask Reg */
+ u8 res_180[12];
+ __be32 tpmnmimr0; /* Thread PM NMI Mask Reg */
+ u8 res_190[12];
+ __be32 tmcpmaskcr0; /* Thread Machine Check Mask Control Reg */
+ __be32 pctbenr; /* Physical Core Time Base Enable Reg */
+ __be32 pctbclkselr; /* Physical Core Time Base Clock Select */
+ __be32 tbclkdivr; /* Time Base Clock Divider Register */
+ u8 res_1ac[4];
+ __be32 ttbhltcr[4]; /* Thread Time Base Halt Control Register */
+ __be32 clpcl10sr; /* Cluster PCL10 Status Register */
+ __be32 clpcl10setr; /* Cluster PCL30 Set Control Register */
+ __be32 clpcl10clrr; /* Cluster PCL30 Clear Control Register */
+ __be32 clpcl10psr; /* Cluster PCL30 Prev Status Register */
+ __be32 cddslpsetr; /* Core Domain Deep Sleep Set Register */
+ __be32 cddslpclrr; /* Core Domain Deep Sleep Clear Register */
+ __be32 cdpwroksetr; /* Core Domain Power OK Set Register */
+ __be32 cdpwrokclrr; /* Core Domain Power OK Clear Register */
+ __be32 cdpwrensr; /* Core Domain Power Enable Status Register */
+ __be32 cddslsr; /* Core Domain Deep Sleep Status Register */
+ u8 res_1e8[8];
+ __be32 dslpcntcr[8]; /* Deep Sleep Counter Cfg Register */
+ u8 res_300[3568];
+};
+
#endif
#endif
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
new file mode 100644
index 0000000..bbe6089
--- /dev/null
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -0,0 +1,49 @@
+/*
+ * Support Power Management
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __PPC_FSL_PM_H
+#define __PPC_FSL_PM_H
+#ifdef __KERNEL__
+
+#define E500_PM_PH10 1
+#define E500_PM_PH15 2
+#define E500_PM_PH20 3
+#define E500_PM_PH30 4
+#define E500_PM_DOZE E500_PM_PH10
+#define E500_PM_NAP E500_PM_PH15
+
+#define PLAT_PM_SLEEP 20
+#define PLAT_PM_LPM20 30
+
+#define FSL_PM_SLEEP (1 << 0)
+#define FSL_PM_DEEP_SLEEP (1 << 1)
+
+struct fsl_pm_ops {
+ /* mask pending interrupts to the RCPM from MPIC */
+ void (*irq_mask)(int cpu);
+ /* unmask pending interrupts to the RCPM from MPIC */
+ void (*irq_unmask)(int cpu);
+ /* place the CPU in the specified state */
+ void (*cpu_enter_state)(int cpu, int state);
+ /* exit the CPU from the specified state */
+ void (*cpu_exit_state)(int cpu, int state);
+ /* place the platform in the sleep state */
+ int (*plat_enter_sleep)(void);
+ /* freeze the time base */
+ void (*freeze_time_base)(int freeze);
+ /* keep the power of IP blocks during sleep/deep sleep */
+ void (*set_ip_power)(int enable, u32 *mask);
+ /* get platform supported power management modes */
+ unsigned int (*get_pm_modes)(void);
+};
+
+extern const struct fsl_pm_ops *qoriq_pm_ops;
+#endif /* __KERNEL__ */
+#endif /* __PPC_FSL_PM_H */
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 2fb4b24..ae1b8a2 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -8,6 +8,7 @@ menuconfig FSL_SOC_BOOKE
select FSL_PCI if PCI
select SERIAL_8250_EXTENDED if SERIAL_8250
select SERIAL_8250_SHARE_IRQ if SERIAL_8250
+ select FSL_CORENET_RCPM if PPC_E500MC
default y
if FSL_SOC_BOOKE
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index a19332a..52dc165 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -40,3 +40,8 @@ config SCOM_DEBUGFS
config GE_FPGA
bool
default n
+
+config FSL_CORENET_RCPM
+ bool
+ help
+ This option enables support for RCPM (Run Control/Power Management).
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index f7cb2a1..8bbedfd 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o
obj-$(CONFIG_FSL_SOC) += fsl_soc.o fsl_mpic_err.o
obj-$(CONFIG_FSL_PCI) += fsl_pci.o $(fsl-msi-obj-y)
obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
+obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
diff --git a/arch/powerpc/sysdev/fsl_rcpm.c b/arch/powerpc/sysdev/fsl_rcpm.c
new file mode 100644
index 0000000..e30f1bc
--- /dev/null
+++ b/arch/powerpc/sysdev/fsl_rcpm.c
@@ -0,0 +1,353 @@
+/*
+ * RCPM(Run Control/Power Management) support
+ *
+ * Copyright 2012-2015 Freescale Semiconductor Inc.
+ *
+ * Author: Chenhui Zhao <chenhui.zhao@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of_address.h>
+#include <linux/export.h>
+
+#include <asm/io.h>
+#include <asm/fsl_guts.h>
+#include <asm/cputhreads.h>
+#include <asm/fsl_pm.h>
+
+#define RCPM_V1 1
+#define RCPM_V2 2
+
+const struct fsl_pm_ops *qoriq_pm_ops;
+
+static struct ccsr_rcpm_v1 __iomem *rcpm_v1_regs;
+static struct ccsr_rcpm_v2 __iomem *rcpm_v2_regs;
+static unsigned int fsl_supported_pm_modes;
+
+static void rcpm_v1_irq_mask(int cpu)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ unsigned int mask = 1 << hw_cpu;
+
+ setbits32(&rcpm_v1_regs->cpmimr, mask);
+ setbits32(&rcpm_v1_regs->cpmcimr, mask);
+ setbits32(&rcpm_v1_regs->cpmmcmr, mask);
+ setbits32(&rcpm_v1_regs->cpmnmimr, mask);
+}
+
+static void rcpm_v2_irq_mask(int cpu)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ unsigned int mask = 1 << hw_cpu;
+
+ setbits32(&rcpm_v2_regs->tpmimr0, mask);
+ setbits32(&rcpm_v2_regs->tpmcimr0, mask);
+ setbits32(&rcpm_v2_regs->tpmmcmr0, mask);
+ setbits32(&rcpm_v2_regs->tpmnmimr0, mask);
+}
+
+static void rcpm_v1_irq_unmask(int cpu)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ unsigned int mask = 1 << hw_cpu;
+
+ clrbits32(&rcpm_v1_regs->cpmimr, mask);
+ clrbits32(&rcpm_v1_regs->cpmcimr, mask);
+ clrbits32(&rcpm_v1_regs->cpmmcmr, mask);
+ clrbits32(&rcpm_v1_regs->cpmnmimr, mask);
+}
+
+static void rcpm_v2_irq_unmask(int cpu)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ unsigned int mask = 1 << hw_cpu;
+
+ clrbits32(&rcpm_v2_regs->tpmimr0, mask);
+ clrbits32(&rcpm_v2_regs->tpmcimr0, mask);
+ clrbits32(&rcpm_v2_regs->tpmmcmr0, mask);
+ clrbits32(&rcpm_v2_regs->tpmnmimr0, mask);
+}
+
+static void rcpm_v1_set_ip_power(int enable, u32 *mask)
+{
+ if (enable)
+ setbits32(&rcpm_v1_regs->ippdexpcr, *mask);
+ else
+ clrbits32(&rcpm_v1_regs->ippdexpcr, *mask);
+}
+
+static void rcpm_v2_set_ip_power(int enable, u32 *mask)
+{
+ if (enable)
+ setbits32(&rcpm_v2_regs->ippdexpcr[0], *mask);
+ else
+ clrbits32(&rcpm_v2_regs->ippdexpcr[0], *mask);
+}
+
+static void rcpm_v1_cpu_enter_state(int cpu, int state)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ unsigned int mask = 1 << hw_cpu;
+
+ switch (state) {
+ case E500_PM_PH10:
+ setbits32(&rcpm_v1_regs->cdozcr, mask);
+ break;
+ case E500_PM_PH15:
+ setbits32(&rcpm_v1_regs->cnapcr, mask);
+ break;
+ default:
+ pr_err("%s: Unknown cpu PM state (%d)\n", __func__, state);
+ break;
+ }
+}
+
+static void rcpm_v2_cpu_enter_state(int cpu, int state)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+ switch (state) {
+ case E500_PM_PH10:
+ /* one bit corresponds to one thread for PH10 of 6500 */
+ setbits32(&rcpm_v2_regs->tph10setr0, 1 << hw_cpu);
+ break;
+ case E500_PM_PH15:
+ setbits32(&rcpm_v2_regs->pcph15setr, mask);
+ break;
+ case E500_PM_PH20:
+ setbits32(&rcpm_v2_regs->pcph20setr, mask);
+ break;
+ case E500_PM_PH30:
+ setbits32(&rcpm_v2_regs->pcph30setr, mask);
+ break;
+ default:
+ pr_err("%s: Unknown cpu PM state (%d)\n", __func__, state);
+ }
+}
+
+static void rcpm_v1_cpu_exit_state(int cpu, int state)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ unsigned int mask = 1 << hw_cpu;
+
+ switch (state) {
+ case E500_PM_PH10:
+ clrbits32(&rcpm_v1_regs->cdozcr, mask);
+ break;
+ case E500_PM_PH15:
+ clrbits32(&rcpm_v1_regs->cnapcr, mask);
+ break;
+ default:
+ pr_err("%s: Unknown cpu PM state (%d)\n", __func__, state);
+ break;
+ }
+}
+
+static void rcpm_v2_cpu_exit_state(int cpu, int state)
+{
+ int hw_cpu = get_hard_smp_processor_id(cpu);
+ u32 mask = 1 << cpu_core_index_of_thread(hw_cpu);
+
+ switch (state) {
+ case E500_PM_PH10:
+ setbits32(&rcpm_v2_regs->tph10clrr0, 1 << hw_cpu);
+ break;
+ case E500_PM_PH15:
+ setbits32(&rcpm_v2_regs->pcph15clrr, mask);
+ break;
+ case E500_PM_PH20:
+ setbits32(&rcpm_v2_regs->pcph20clrr, mask);
+ break;
+ case E500_PM_PH30:
+ setbits32(&rcpm_v2_regs->pcph30clrr, mask);
+ break;
+ default:
+ pr_err("%s: Unknown cpu PM state (%d)\n", __func__, state);
+ }
+}
+
+static int rcpm_v1_plat_enter_state(int state)
+{
+ u32 *pmcsr_reg = &rcpm_v1_regs->powmgtcsr;
+ int ret = 0;
+ int result;
+
+ switch (state) {
+ case PLAT_PM_SLEEP:
+ setbits32(pmcsr_reg, RCPM_POWMGTCSR_SLP);
+
+ /* At this point, the device is in sleep mode. */
+
+ /* Upon resume, wait for RCPM_POWMGTCSR_SLP bit to be clear. */
+ result = spin_event_timeout(
+ !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_SLP), 10000, 10);
+ if (!result) {
+ pr_err("%s: timeout waiting for SLP bit to be cleared\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ }
+ break;
+ default:
+ pr_err("%s: Unknown platform PM state (%d)\n",
+ __func__, state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rcpm_v2_plat_enter_state(int state)
+{
+ u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
+ int ret = 0;
+ int result;
+
+ switch (state) {
+ case PLAT_PM_LPM20:
+ /* clear previous LPM20 status */
+ setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
+ /* enter LPM20 status */
+ setbits32(pmcsr_reg, RCPM_POWMGTCSR_LPM20_RQ);
+
+ /* At this point, the device is in LPM20 status. */
+
+ /* resume ... */
+ result = spin_event_timeout(
+ !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_LPM20_ST), 10000, 10);
+ if (!result) {
+ pr_err("%s: timeout waiting for LPM20 bit to be cleared\n",
+ __func__);
+ ret = -ETIMEDOUT;
+ }
+ break;
+ default:
+ pr_err("%s: Unknown platform PM state (%d)\n",
+ __func__, state);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rcpm_v1_plat_enter_sleep(void)
+{
+ return rcpm_v1_plat_enter_state(PLAT_PM_SLEEP);
+}
+
+static int rcpm_v2_plat_enter_sleep(void)
+{
+ return rcpm_v2_plat_enter_state(PLAT_PM_LPM20);
+}
+
+static void rcpm_common_freeze_time_base(u32 *tben_reg, int freeze)
+{
+ static u32 mask;
+
+ if (freeze) {
+ mask = in_be32(tben_reg);
+ clrbits32(tben_reg, mask);
+ } else {
+ setbits32(tben_reg, mask);
+ }
+
+ /* read back to push the previous write */
+ in_be32(tben_reg);
+}
+
+static void rcpm_v1_freeze_time_base(int freeze)
+{
+ rcpm_common_freeze_time_base(&rcpm_v1_regs->ctbenr, freeze);
+}
+
+static void rcpm_v2_freeze_time_base(int freeze)
+{
+ rcpm_common_freeze_time_base(&rcpm_v2_regs->pctbenr, freeze);
+}
+
+static unsigned int rcpm_get_pm_modes(void)
+{
+ return fsl_supported_pm_modes;
+}
+
+static const struct fsl_pm_ops qoriq_rcpm_v1_ops = {
+ .irq_mask = rcpm_v1_irq_mask,
+ .irq_unmask = rcpm_v1_irq_unmask,
+ .cpu_enter_state = rcpm_v1_cpu_enter_state,
+ .cpu_exit_state = rcpm_v1_cpu_exit_state,
+ .plat_enter_sleep = rcpm_v1_plat_enter_sleep,
+ .set_ip_power = rcpm_v1_set_ip_power,
+ .freeze_time_base = rcpm_v1_freeze_time_base,
+ .get_pm_modes = rcpm_get_pm_modes,
+};
+
+static const struct fsl_pm_ops qoriq_rcpm_v2_ops = {
+ .irq_mask = rcpm_v2_irq_mask,
+ .irq_unmask = rcpm_v2_irq_unmask,
+ .cpu_enter_state = rcpm_v2_cpu_enter_state,
+ .cpu_exit_state = rcpm_v2_cpu_exit_state,
+ .plat_enter_sleep = rcpm_v2_plat_enter_sleep,
+ .set_ip_power = rcpm_v2_set_ip_power,
+ .freeze_time_base = rcpm_v2_freeze_time_base,
+ .get_pm_modes = rcpm_get_pm_modes,
+};
+
+static const struct of_device_id rcpm_matches[] = {
+ {
+ .compatible = "fsl,qoriq-rcpm-1.0",
+ .data = (void *)RCPM_V1,
+ },
+ {
+ .compatible = "fsl,qoriq-rcpm-2.0",
+ .data = (void *)RCPM_V2,
+ },
+ {},
+};
+
+int fsl_rcpm_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ void __iomem *base;
+
+ np = of_find_matching_node_and_match(NULL, rcpm_matches, &match);
+ if (!np) {
+ pr_err("%s: can't find the rcpm node.\n", __func__);
+ return -ENODEV;
+ }
+
+ base = of_iomap(np, 0);
+ if (!base) {
+ pr_err("%s: of_iomap() error.\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* support sleep by default */
+ fsl_supported_pm_modes = FSL_PM_SLEEP;
+ of_node_put(np);
+
+ switch ((unsigned long)match->data) {
+ case RCPM_V1:
+ rcpm_v1_regs = base;
+ qoriq_pm_ops = &qoriq_rcpm_v1_ops;
+ break;
+
+ case RCPM_V2:
+ rcpm_v2_regs = base;
+ qoriq_pm_ops = &qoriq_rcpm_v2_ops;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/* need to call this before SMP init */
+early_initcall(fsl_rcpm_init);
--
1.9.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
2015-03-26 10:18 [PATCH 1/4] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
2015-03-26 10:18 ` [PATCH 2/4] powerpc/rcpm: add RCPM driver Chenhui Zhao
@ 2015-03-26 10:18 ` Chenhui Zhao
2015-03-31 2:07 ` [3/4] " Scott Wood
2015-03-26 10:18 ` [PATCH 4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM Chenhui Zhao
2015-03-31 1:10 ` [1/4] powerpc/cache: add cache flush operation for various e500 Scott Wood
3 siblings, 1 reply; 15+ messages in thread
From: Chenhui Zhao @ 2015-03-26 10:18 UTC (permalink / raw)
To: linuxppc-dev, devicetree; +Cc: linux-kernel, scottwood, leoli, Jason.Jin
Implemented CPU hotplug on e500mc, e5500 and e6500, and support
multiple threads mode and 64-bits mode.
For e6500 with two threads, if one thread is online, it can
enable/disable the other thread in the same core. If two threads of
one core are offline, the core will enter the PH20 state (a low power
state). When the core is up again, Thread0 is up first, and it will be
bound with the present booting cpu. This way, all CPUs can hotplug
separately.
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---
arch/powerpc/Kconfig | 2 +-
arch/powerpc/include/asm/fsl_pm.h | 4 +
arch/powerpc/include/asm/smp.h | 2 +
arch/powerpc/kernel/head_64.S | 20 +++--
arch/powerpc/kernel/smp.c | 5 ++
arch/powerpc/platforms/85xx/smp.c | 182 +++++++++++++++++++++++++++++---------
arch/powerpc/sysdev/fsl_rcpm.c | 56 ++++++++++++
7 files changed, 220 insertions(+), 51 deletions(-)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 22b0940..9846c83 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -380,7 +380,7 @@ config SWIOTLB
config HOTPLUG_CPU
bool "Support for enabling/disabling CPUs"
depends on SMP && (PPC_PSERIES || \
- PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
+ PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
---help---
Say Y here to be able to disable and re-enable individual
CPUs at runtime on SMP machines.
diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
index bbe6089..579f495 100644
--- a/arch/powerpc/include/asm/fsl_pm.h
+++ b/arch/powerpc/include/asm/fsl_pm.h
@@ -34,6 +34,10 @@ struct fsl_pm_ops {
void (*cpu_enter_state)(int cpu, int state);
/* exit the CPU from the specified state */
void (*cpu_exit_state)(int cpu, int state);
+ /* cpu up */
+ void (*cpu_up)(int cpu);
+ /* cpu die */
+ void (*cpu_die)(int cpu);
/* place the platform in the sleep state */
int (*plat_enter_sleep)(void);
/* freeze the time base */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d607df5..1e500ed 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -67,6 +67,7 @@ void generic_cpu_die(unsigned int cpu);
void generic_set_cpu_dead(unsigned int cpu);
void generic_set_cpu_up(unsigned int cpu);
int generic_check_cpu_restart(unsigned int cpu);
+int generic_check_cpu_dead(unsigned int cpu);
#endif
#ifdef CONFIG_PPC64
@@ -198,6 +199,7 @@ extern void generic_secondary_thread_init(void);
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
+extern unsigned int __cur_boot_cpu;
extern void __early_start(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d48125d..ac89050 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -181,6 +181,10 @@ exception_marker:
#endif
#ifdef CONFIG_PPC_BOOK3E
+ .globl __cur_boot_cpu
+__cur_boot_cpu:
+ .long 0x0
+ .align 3
_GLOBAL(fsl_secondary_thread_init)
/* Enable branch prediction */
lis r3,BUCSR_INIT@h
@@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
isync
/*
- * Fix PIR to match the linear numbering in the device tree.
- *
- * On e6500, the reset value of PIR uses the low three bits for
- * the thread within a core, and the upper bits for the core
- * number. There are two threads per core, so shift everything
- * but the low bit right by two bits so that the cpu numbering is
- * continuous.
+ * The current thread has been in 64-bit mode,
+ * see the value of TMRN_IMSR.
+ * compute the address of __cur_boot_cpu
*/
- mfspr r3, SPRN_PIR
- rlwimi r3, r3, 30, 2, 30
+ bl 10f
+10: mflr r22
+ addi r22,r22,(__cur_boot_cpu - 10b)
+ lwz r3,0(r22)
mtspr SPRN_PIR, r3
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..2cca27a 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu)
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
}
+int generic_check_cpu_dead(unsigned int cpu)
+{
+ return per_cpu(cpu_state, cpu) == CPU_DEAD;
+}
+
static bool secondaries_inhibited(void)
{
return kvm_hv_mode_active();
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index fba474f..f51441b 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -2,7 +2,7 @@
* Author: Andy Fleming <afleming@freescale.com>
* Kumar Gala <galak@kernel.crashing.org>
*
- * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
+ * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -29,6 +29,7 @@
#include <asm/fsl_guts.h>
#include <asm/code-patching.h>
#include <asm/cputhreads.h>
+#include <asm/fsl_pm.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/mpic.h>
@@ -43,10 +44,20 @@ struct epapr_spin_table {
u32 pir;
};
-static struct ccsr_guts __iomem *guts;
+#ifdef CONFIG_HOTPLUG_CPU
static u64 timebase;
static int tb_req;
static int tb_valid;
+/* if it is non-zero, synchronize time base */
+static int sync_tb;
+
+#ifdef CONFIG_PPC_E500MC
+static void mpc85xx_timebase_freeze(int freeze)
+{
+ qoriq_pm_ops->freeze_time_base(freeze);
+}
+#else
+static struct ccsr_guts __iomem *guts;
static void mpc85xx_timebase_freeze(int freeze)
{
@@ -60,11 +71,15 @@ static void mpc85xx_timebase_freeze(int freeze)
in_be32(&guts->devdisr);
}
+#endif
static void mpc85xx_give_timebase(void)
{
unsigned long flags;
+ if (!sync_tb)
+ return;
+
local_irq_save(flags);
while (!tb_req)
@@ -113,6 +128,9 @@ static void mpc85xx_take_timebase(void)
{
unsigned long flags;
+ if (!sync_tb)
+ return;
+
local_irq_save(flags);
tb_req = 1;
@@ -126,7 +144,46 @@ static void mpc85xx_take_timebase(void)
local_irq_restore(flags);
}
-#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PPC_E500MC
+static void qoriq_cpu_wait_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ hard_irq_disable();
+ /* mask all irqs to prevent cpu wakeup */
+ qoriq_pm_ops->irq_mask(cpu);
+ idle_task_exit();
+
+ mtspr(SPRN_TCR, 0);
+ mtspr(SPRN_TSR, mfspr(SPRN_TSR));
+
+ cur_cpu_spec->cpu_flush_caches();
+
+ generic_set_cpu_dead(cpu);
+ smp_mb();
+ while (1)
+ ;
+}
+
+static void qoriq_real_cpu_die(unsigned int cpu)
+{
+ int i;
+
+ for (i = 0; i < 100; i++) {
+ smp_rmb();
+ if (generic_check_cpu_dead(cpu)) {
+ qoriq_pm_ops->cpu_die(cpu);
+#ifdef CONFIG_PPC64
+ paca[cpu].cpu_start = 0;
+#endif
+ return;
+ }
+ msleep(10);
+ }
+ pr_err("%s: CPU%d didn't die...\n", __func__, cpu);
+}
+
+#else
static void smp_85xx_mach_cpu_die(void)
{
unsigned int cpu = smp_processor_id();
@@ -156,6 +213,7 @@ static void smp_85xx_mach_cpu_die(void)
;
}
#endif
+#endif /* CONFIG_HOTPLUG_CPU */
static inline void flush_spin_table(void *spin_table)
{
@@ -174,17 +232,29 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
static void wake_hw_thread(void *info)
{
void fsl_secondary_thread_init(void);
- unsigned long imsr1, inia1;
+ unsigned long imsr, inia;
int nr = *(const int *)info;
-
- imsr1 = MSR_KERNEL;
- inia1 = *(unsigned long *)fsl_secondary_thread_init;
-
- mttmr(TMRN_IMSR1, imsr1);
- mttmr(TMRN_INIA1, inia1);
- mtspr(SPRN_TENS, TEN_THREAD(1));
+ int hw_cpu = get_hard_smp_processor_id(nr);
+ int thread_idx = cpu_thread_in_core(hw_cpu);
+
+ __cur_boot_cpu = (u32)hw_cpu;
+ imsr = MSR_KERNEL;
+ inia = *(unsigned long *)fsl_secondary_thread_init;
+ smp_mb();
+ if (thread_idx == 0) {
+ mttmr(TMRN_IMSR0, imsr);
+ mttmr(TMRN_INIA0, inia);
+ } else {
+ mttmr(TMRN_IMSR1, imsr);
+ mttmr(TMRN_INIA1, inia);
+ }
+ isync();
+ mtspr(SPRN_TENS, TEN_THREAD(thread_idx));
smp_generic_kick_cpu(nr);
+#ifdef CONFIG_HOTPLUG_CPU
+ generic_set_cpu_up(nr);
+#endif
}
#endif
@@ -203,28 +273,46 @@ static int smp_85xx_kick_cpu(int nr)
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
+#ifdef CONFIG_HOTPLUG_CPU
+ sync_tb = 0;
+ smp_mb();
+#endif
#ifdef CONFIG_PPC64
- /* Threads don't use the spin table */
- if (cpu_thread_in_core(nr) != 0) {
+ if (threads_per_core > 1) {
int primary = cpu_first_thread_sibling(nr);
if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
return -ENOENT;
- if (cpu_thread_in_core(nr) != 1) {
- pr_err("%s: cpu %d: invalid hw thread %d\n",
- __func__, nr, cpu_thread_in_core(nr));
- return -ENOENT;
+ /*
+ * If either one of threads in the same core is online,
+ * use the online one to start the other.
+ */
+ if (cpu_online(primary) || cpu_online(primary + 1)) {
+ qoriq_pm_ops->cpu_up(nr);
+ if (cpu_online(primary))
+ smp_call_function_single(primary,
+ wake_hw_thread, &nr, 1);
+ else
+ smp_call_function_single(primary + 1,
+ wake_hw_thread, &nr, 1);
+ return 0;
}
-
- if (!cpu_online(primary)) {
- pr_err("%s: cpu %d: primary %d not online\n",
- __func__, nr, primary);
- return -ENOENT;
+ /*
+ * If both threads are offline, reset core to start.
+ * When core is up, Thread 0 always gets up first,
+ * so bind the current logical cpu with Thread 0.
+ */
+ if (hw_cpu != cpu_first_thread_sibling(hw_cpu)) {
+ int hw_cpu1, hw_cpu2;
+
+ hw_cpu1 = get_hard_smp_processor_id(primary);
+ hw_cpu2 = get_hard_smp_processor_id(primary + 1);
+ set_hard_smp_processor_id(primary, hw_cpu2);
+ set_hard_smp_processor_id(primary + 1, hw_cpu1);
+ /* get new physical cpu id */
+ hw_cpu = get_hard_smp_processor_id(nr);
}
-
- smp_call_function_single(primary, wake_hw_thread, &nr, 0);
- return 0;
}
#endif
@@ -252,11 +340,7 @@ static int smp_85xx_kick_cpu(int nr)
spin_table = phys_to_virt(*cpu_rel_addr);
local_irq_save(flags);
-#ifdef CONFIG_PPC32
#ifdef CONFIG_HOTPLUG_CPU
- /* Corresponding to generic_set_cpu_dead() */
- generic_set_cpu_up(nr);
-
if (system_state == SYSTEM_RUNNING) {
/*
* To keep it compatible with old boot program which uses
@@ -269,11 +353,16 @@ static int smp_85xx_kick_cpu(int nr)
out_be32(&spin_table->addr_l, 0);
flush_spin_table(spin_table);
+#ifdef CONFIG_PPC_E500MC
+ qoriq_pm_ops->cpu_up(nr);
+#endif
/*
* We don't set the BPTR register here since it already points
* to the boot page properly.
*/
mpic_reset_core(nr);
+ sync_tb = 1;
+ smp_mb();
/*
* wait until core is ready...
@@ -292,7 +381,12 @@ static int smp_85xx_kick_cpu(int nr)
/* clear the acknowledge status */
__secondary_hold_acknowledge = -1;
}
+
+ /* Corresponding to generic_set_cpu_dead() */
+ generic_set_cpu_up(nr);
#endif
+
+#ifdef CONFIG_PPC32
flush_spin_table(spin_table);
out_be32(&spin_table->pir, hw_cpu);
out_be32(&spin_table->addr_l, __pa(__early_start));
@@ -304,9 +398,7 @@ static int smp_85xx_kick_cpu(int nr)
pr_err("%s: timeout waiting for core %d to ack\n",
__func__, hw_cpu);
ret = -ENOENT;
- goto out;
}
-out:
#else
smp_generic_kick_cpu(nr);
@@ -317,6 +409,9 @@ out:
flush_spin_table(spin_table);
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+out:
+#endif
local_irq_restore(flags);
if (ioremappable)
@@ -328,10 +423,6 @@ out:
struct smp_ops_t smp_85xx_ops = {
.kick_cpu = smp_85xx_kick_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
-#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = generic_cpu_disable,
- .cpu_die = generic_cpu_die,
-#endif
#ifdef CONFIG_KEXEC
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
@@ -447,6 +538,7 @@ static void smp_85xx_setup_cpu(int cpu_nr)
smp_85xx_basic_setup(cpu_nr);
}
+#ifdef CONFIG_HOTPLUG_CPU
static const struct of_device_id mpc85xx_smp_guts_ids[] = {
{ .compatible = "fsl,mpc8572-guts", },
{ .compatible = "fsl,p1020-guts", },
@@ -456,12 +548,12 @@ static const struct of_device_id mpc85xx_smp_guts_ids[] = {
{ .compatible = "fsl,p2020-guts", },
{},
};
+#endif
void __init mpc85xx_smp_init(void)
{
struct device_node *np;
-
np = of_find_node_by_type(NULL, "open-pic");
if (np) {
smp_85xx_ops.probe = smp_mpic_probe;
@@ -480,6 +572,11 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.probe = NULL;
}
+#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PPC_E500MC
+ smp_85xx_ops.cpu_die = qoriq_real_cpu_die;
+ ppc_md.cpu_die = qoriq_cpu_wait_die;
+#else
np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
if (np) {
guts = of_iomap(np, 0);
@@ -489,13 +586,16 @@ void __init mpc85xx_smp_init(void)
__func__);
return;
}
- smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
- smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
-#ifdef CONFIG_HOTPLUG_CPU
- ppc_md.cpu_die = smp_85xx_mach_cpu_die;
-#endif
}
+ smp_85xx_ops.cpu_die = generic_cpu_die;
+ ppc_md.cpu_die = smp_85xx_mach_cpu_die;
+#endif
+ smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
+ smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
+ smp_85xx_ops.cpu_disable = generic_cpu_disable;
+#endif /* CONFIG_HOTPLUG_CPU */
+
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/sysdev/fsl_rcpm.c b/arch/powerpc/sysdev/fsl_rcpm.c
index e30f1bc..a507fd0 100644
--- a/arch/powerpc/sysdev/fsl_rcpm.c
+++ b/arch/powerpc/sysdev/fsl_rcpm.c
@@ -131,6 +131,46 @@ static void rcpm_v2_cpu_enter_state(int cpu, int state)
}
}
+static void rcpm_v1_cpu_die(int cpu)
+{
+ rcpm_v1_cpu_enter_state(cpu, E500_PM_PH15);
+}
+
+static void qoriq_disable_thread(void *info)
+{
+ int hw_cpu = get_hard_smp_processor_id(*(const int *)info);
+ int thread = cpu_thread_in_core(hw_cpu);
+
+ mtspr(SPRN_TENC, TEN_THREAD(thread));
+}
+
+static void rcpm_v2_cpu_die(int cpu)
+{
+ int primary;
+
+ if (threads_per_core == 1) {
+ rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
+ return;
+ }
+
+ primary = cpu_first_thread_sibling(cpu);
+ if (cpu_is_offline(primary) && cpu_is_offline(primary + 1)) {
+ /* when two threads are all offline, put core in PH20 */
+ rcpm_v2_cpu_enter_state(cpu, E500_PM_PH20);
+ } else {
+ /*
+ * When one thread is offline, disable the thread
+ * by running qoriq_disable_thread() on the other thread.
+ */
+ if (cpu_online(primary))
+ smp_call_function_single(primary,
+ qoriq_disable_thread, &cpu, 1);
+ else
+ smp_call_function_single(primary + 1,
+ qoriq_disable_thread, &cpu, 1);
+ }
+}
+
static void rcpm_v1_cpu_exit_state(int cpu, int state)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
@@ -149,6 +189,12 @@ static void rcpm_v1_cpu_exit_state(int cpu, int state)
}
}
+static void rcpm_v1_cpu_up(int cpu)
+{
+ rcpm_v1_cpu_exit_state(cpu, E500_PM_PH15);
+ rcpm_v1_irq_unmask(cpu);
+}
+
static void rcpm_v2_cpu_exit_state(int cpu, int state)
{
int hw_cpu = get_hard_smp_processor_id(cpu);
@@ -172,6 +218,12 @@ static void rcpm_v2_cpu_exit_state(int cpu, int state)
}
}
+static void rcpm_v2_cpu_up(int cpu)
+{
+ rcpm_v2_cpu_exit_state(cpu, E500_PM_PH20);
+ rcpm_v2_irq_unmask(cpu);
+}
+
static int rcpm_v1_plat_enter_state(int state)
{
u32 *pmcsr_reg = &rcpm_v1_regs->powmgtcsr;
@@ -280,6 +332,8 @@ static const struct fsl_pm_ops qoriq_rcpm_v1_ops = {
.irq_unmask = rcpm_v1_irq_unmask,
.cpu_enter_state = rcpm_v1_cpu_enter_state,
.cpu_exit_state = rcpm_v1_cpu_exit_state,
+ .cpu_up = rcpm_v1_cpu_up,
+ .cpu_die = rcpm_v1_cpu_die,
.plat_enter_sleep = rcpm_v1_plat_enter_sleep,
.set_ip_power = rcpm_v1_set_ip_power,
.freeze_time_base = rcpm_v1_freeze_time_base,
@@ -291,6 +345,8 @@ static const struct fsl_pm_ops qoriq_rcpm_v2_ops = {
.irq_unmask = rcpm_v2_irq_unmask,
.cpu_enter_state = rcpm_v2_cpu_enter_state,
.cpu_exit_state = rcpm_v2_cpu_exit_state,
+ .cpu_up = rcpm_v2_cpu_up,
+ .cpu_die = rcpm_v2_cpu_die,
.plat_enter_sleep = rcpm_v2_plat_enter_sleep,
.set_ip_power = rcpm_v2_set_ip_power,
.freeze_time_base = rcpm_v2_freeze_time_base,
--
1.9.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH 4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM
2015-03-26 10:18 [PATCH 1/4] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
2015-03-26 10:18 ` [PATCH 2/4] powerpc/rcpm: add RCPM driver Chenhui Zhao
2015-03-26 10:18 ` [PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500 Chenhui Zhao
@ 2015-03-26 10:18 ` Chenhui Zhao
2015-03-31 2:35 ` [4/4] " Scott Wood
2015-03-31 1:10 ` [1/4] powerpc/cache: add cache flush operation for various e500 Scott Wood
3 siblings, 1 reply; 15+ messages in thread
From: Chenhui Zhao @ 2015-03-26 10:18 UTC (permalink / raw)
To: linuxppc-dev, devicetree; +Cc: linux-kernel, scottwood, leoli, Jason.Jin
In sleep mode, the clocks of e500 cores and unused IP blocks is
turned off. The IP blocks which are allowed to wake up the processor
are still running.
The sleep mode is equal to the Standby state in Linux. Use the
command to enter sleep mode:
echo standby > /sys/power/state
Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
---
arch/powerpc/Kconfig | 3 +-
arch/powerpc/platforms/85xx/Kconfig | 5 +++
arch/powerpc/platforms/85xx/Makefile | 1 +
arch/powerpc/platforms/85xx/qoriq_pm.c | 59 ++++++++++++++++++++++++++++++++++
arch/powerpc/platforms/86xx/Kconfig | 1 +
5 files changed, 67 insertions(+), 2 deletions(-)
create mode 100644 arch/powerpc/platforms/85xx/qoriq_pm.c
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9846c83..162eb53 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -233,7 +233,7 @@ config ARCH_HIBERNATION_POSSIBLE
config ARCH_SUSPEND_POSSIBLE
def_bool y
depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
- (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
+ FSL_SOC_BOOKE || PPC_86xx || PPC_PSERIES \
|| 44x || 40x
config PPC_DCR_NATIVE
@@ -747,7 +747,6 @@ config FSL_PCI
config FSL_PMC
bool
- default y
depends on SUSPEND && (PPC_85xx || PPC_86xx)
help
Freescale MPC85xx/MPC86xx power management controller support
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index ae1b8a2..b7c762e 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -9,6 +9,8 @@ menuconfig FSL_SOC_BOOKE
select SERIAL_8250_EXTENDED if SERIAL_8250
select SERIAL_8250_SHARE_IRQ if SERIAL_8250
select FSL_CORENET_RCPM if PPC_E500MC
+ select FSL_QORIQ_PM if SUSPEND && PPC_E500MC
+ select FSL_PMC if SUSPEND && !PPC_E500MC
default y
if FSL_SOC_BOOKE
@@ -289,3 +291,6 @@ endif # FSL_SOC_BOOKE
config TQM85xx
bool
+
+config FSL_QORIQ_PM
+ bool
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 1fe7fb9..65dfb60 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -2,6 +2,7 @@
# Makefile for the PowerPC 85xx linux kernel.
#
obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_FSL_QORIQ_PM) += qoriq_pm.o
obj-y += common.o
diff --git a/arch/powerpc/platforms/85xx/qoriq_pm.c b/arch/powerpc/platforms/85xx/qoriq_pm.c
new file mode 100644
index 0000000..7594f08
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/qoriq_pm.c
@@ -0,0 +1,59 @@
+/*
+ * Support Power Management feature
+ *
+ * Copyright 2014-2015 Freescale Semiconductor Inc.
+ *
+ * Author: Chenhui Zhao <chenhui.zhao@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/suspend.h>
+#include <linux/of_platform.h>
+
+#include <asm/fsl_pm.h>
+
+static int qoriq_suspend_enter(suspend_state_t state)
+{
+ int ret = 0;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ cur_cpu_spec->cpu_flush_caches();
+ ret = qoriq_pm_ops->plat_enter_sleep();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int qoriq_suspend_valid(suspend_state_t state)
+{
+ unsigned int pm_modes;
+
+ pm_modes = qoriq_pm_ops->get_pm_modes();
+
+ if ((state == PM_SUSPEND_STANDBY) && (pm_modes & FSL_PM_SLEEP))
+ return 1;
+
+ return 0;
+}
+
+static const struct platform_suspend_ops qoriq_suspend_ops = {
+ .valid = qoriq_suspend_valid,
+ .enter = qoriq_suspend_enter,
+};
+
+static int __init qoriq_suspend_init(void)
+{
+ suspend_set_ops(&qoriq_suspend_ops);
+
+ return 0;
+}
+arch_initcall(qoriq_suspend_init);
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 1afd1e4..09638e0 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -5,6 +5,7 @@ menuconfig PPC_86xx
select FSL_SOC
select ALTIVEC
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select FSL_PMC if SUSPEND
help
The Freescale E600 SoCs have 74xx cores.
--
1.9.1
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [1/4] powerpc/cache: add cache flush operation for various e500
2015-03-26 10:18 [PATCH 1/4] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
` (2 preceding siblings ...)
2015-03-26 10:18 ` [PATCH 4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM Chenhui Zhao
@ 2015-03-31 1:10 ` Scott Wood
2015-04-02 10:14 ` chenhui.zhao
3 siblings, 1 reply; 15+ messages in thread
From: Scott Wood @ 2015-03-31 1:10 UTC (permalink / raw)
To: chenhui zhao; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
On Thu, Mar 26, 2015 at 06:18:12PM +0800, chenhui zhao wrote:
> Various e500 core have different cache architecture, so they
> need different cache flush operations. Therefore, add a callback
> function cpu_flush_caches to the struct cpu_spec. The cache flush
> operation for the specific kind of e500 is selected at init time.
> The callback function will flush all caches inside the current cpu.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> arch/powerpc/include/asm/cacheflush.h | 2 -
> arch/powerpc/include/asm/cputable.h | 11 +++
> arch/powerpc/kernel/asm-offsets.c | 3 +
> arch/powerpc/kernel/cpu_setup_fsl_booke.S | 114 +++++++++++++++++++++++++++++-
> arch/powerpc/kernel/cputable.c | 4 ++
> arch/powerpc/kernel/head_fsl_booke.S | 74 -------------------
> arch/powerpc/platforms/85xx/smp.c | 3 +-
> 7 files changed, 133 insertions(+), 78 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
> index 30b35ff..729fde4 100644
> --- a/arch/powerpc/include/asm/cacheflush.h
> +++ b/arch/powerpc/include/asm/cacheflush.h
> @@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
> #define flush_dcache_mmap_lock(mapping) do { } while (0)
> #define flush_dcache_mmap_unlock(mapping) do { } while (0)
>
> -extern void __flush_disable_L1(void);
> -
> extern void flush_icache_range(unsigned long, unsigned long);
> extern void flush_icache_user_range(struct vm_area_struct *vma,
> struct page *page, unsigned long addr,
> diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
> index 5cf5a6d..c776efe4 100644
> --- a/arch/powerpc/include/asm/cputable.h
> +++ b/arch/powerpc/include/asm/cputable.h
> @@ -43,6 +43,13 @@ extern int machine_check_e500(struct pt_regs *regs);
> extern int machine_check_e200(struct pt_regs *regs);
> extern int machine_check_47x(struct pt_regs *regs);
>
> +#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
> +extern void __flush_caches_e500v2(void);
> +extern void __flush_caches_e500mc(void);
> +extern void __flush_caches_e5500(void);
> +extern void __flush_caches_e6500(void);
> +#endif
Why the leading underscores?
> /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
> struct cpu_spec {
> /* CPU is matched via (PVR & pvr_mask) == pvr_value */
> @@ -59,6 +66,10 @@ struct cpu_spec {
> unsigned int icache_bsize;
> unsigned int dcache_bsize;
>
> +#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
CONFIG_PPC_E500MC implies CONFIG_E500. Why do we need this ifdef?
> + /* flush caches inside the current cpu */
> + void (*cpu_flush_caches)(void);
> +#endif
It seems you literally mean "in the cpu" -- If it's a threaded core, then
by "cpu" do you mean "thread" (like we usually do) and thus no caches get
flushed (ignore the fact that it's moot on e6500 -- this is an interface
and needs to be clear).
Also, no-oping L1 flush on e6500 is not compliant with the claim that
you're flushing the cache. You're relying on an unstated assumption that
you'll invalidate that cache later instead.
If you want to make this "flush whatever needs to be flushed for
suspend/hotplug", call it that.
-Scott
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [2/4] powerpc/rcpm: add RCPM driver
2015-03-26 10:18 ` [PATCH 2/4] powerpc/rcpm: add RCPM driver Chenhui Zhao
@ 2015-03-31 1:30 ` Scott Wood
2015-04-02 10:33 ` chenhui.zhao
0 siblings, 1 reply; 15+ messages in thread
From: Scott Wood @ 2015-03-31 1:30 UTC (permalink / raw)
To: chenhui zhao; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
On Thu, Mar 26, 2015 at 06:18:13PM +0800, chenhui zhao wrote:
> There is a RCPM (Run Control/Power Management) in Freescale QorIQ
> series processors. The device performs tasks associated with device
> run control and power management.
>
> The driver implements some features: mask/unmask irq, enter/exit low
> power states, freeze time base, etc.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> Documentation/devicetree/bindings/soc/fsl/rcpm.txt | 23 ++
> arch/powerpc/include/asm/fsl_guts.h | 105 ++++++
> arch/powerpc/include/asm/fsl_pm.h | 49 +++
> arch/powerpc/platforms/85xx/Kconfig | 1 +
> arch/powerpc/sysdev/Kconfig | 5 +
> arch/powerpc/sysdev/Makefile | 1 +
> arch/powerpc/sysdev/fsl_rcpm.c | 353 +++++++++++++++++++++
> 7 files changed, 537 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/soc/fsl/rcpm.txt
> create mode 100644 arch/powerpc/include/asm/fsl_pm.h
> create mode 100644 arch/powerpc/sysdev/fsl_rcpm.c
>
> diff --git a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
> new file mode 100644
> index 0000000..8c21b6c
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
> @@ -0,0 +1,23 @@
> +* Run Control and Power Management
> +
> +The RCPM performs all device-level tasks associated with device run control
> +and power management.
> +
> +Required properites:
> + - reg : Offset and length of the register set of RCPM block.
> + - compatible : Specifies the compatibility list for the RCPM. The type
> + should be string, such as "fsl,qoriq-rcpm-1.0", "fsl,qoriq-rcpm-2.0".
> +
> +Example:
> +The RCPM node for T4240:
> + rcpm: global-utilities@e2000 {
> + compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
> + reg = <0xe2000 0x1000>;
> + };
> +
> +The RCPM node for P4080:
> + rcpm: global-utilities@e2000 {
> + compatible = "fsl,qoriq-rcpm-1.0";
> + reg = <0xe2000 0x1000>;
> + #sleep-cells = <1>;
> + };
Where is #sleep-cells documented? It's copy-and-paste from something
that was never finished from many years ago.
> diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
> new file mode 100644
> index 0000000..bbe6089
> --- /dev/null
> +++ b/arch/powerpc/include/asm/fsl_pm.h
> @@ -0,0 +1,49 @@
> +/*
> + * Support Power Management
> + *
> + * Copyright 2014-2015 Freescale Semiconductor Inc.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License as published by the
> + * Free Software Foundation; either version 2 of the License, or (at your
> + * option) any later version.
> + */
> +#ifndef __PPC_FSL_PM_H
> +#define __PPC_FSL_PM_H
> +#ifdef __KERNEL__
Put a space after #ifdef, not a tab.
> +#define E500_PM_PH10 1
> +#define E500_PM_PH15 2
> +#define E500_PM_PH20 3
> +#define E500_PM_PH30 4
> +#define E500_PM_DOZE E500_PM_PH10
> +#define E500_PM_NAP E500_PM_PH15
> +
> +#define PLAT_PM_SLEEP 20
> +#define PLAT_PM_LPM20 30
> +
> +#define FSL_PM_SLEEP (1 << 0)
> +#define FSL_PM_DEEP_SLEEP (1 << 1)
> +
> +struct fsl_pm_ops {
> + /* mask pending interrupts to the RCPM from MPIC */
> + void (*irq_mask)(int cpu);
> + /* unmask pending interrupts to the RCPM from MPIC */
> + void (*irq_unmask)(int cpu);
> + /* place the CPU in the specified state */
> + void (*cpu_enter_state)(int cpu, int state);
> + /* exit the CPU from the specified state */
> + void (*cpu_exit_state)(int cpu, int state);
> + /* place the platform in the sleep state */
> + int (*plat_enter_sleep)(void);
> + /* freeze the time base */
> + void (*freeze_time_base)(int freeze);
> + /* keep the power of IP blocks during sleep/deep sleep */
> + void (*set_ip_power)(int enable, u32 *mask);
> + /* get platform supported power management modes */
> + unsigned int (*get_pm_modes)(void);
> +};
Drop the comments that are basically just a restatement of the function
name. Where there are comments, it'd be easier to read with a blank line
between a function and the next comment.
s/int enable/bool enable/
s/int freeze/bool freeze/
> +#endif /* __KERNEL__ */
> +#endif /* __PPC_FSL_PM_H */
Please be consistent with whitespace.
> + default:
> + pr_err("%s: Unknown cpu PM state (%d)\n", __func__, state);
WARN?
> +static int rcpm_v2_plat_enter_state(int state)
> +{
> + u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
> + int ret = 0;
> + int result;
> +
> + switch (state) {
> + case PLAT_PM_LPM20:
> + /* clear previous LPM20 status */
> + setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
How would the bit be set when you enter here, given that you wait for it
to clear when leaving?
> + /* enter LPM20 status */
> + setbits32(pmcsr_reg, RCPM_POWMGTCSR_LPM20_RQ);
> +
> + /* At this point, the device is in LPM20 status. */
> +
> + /* resume ... */
> + result = spin_event_timeout(
> + !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_LPM20_ST), 10000, 10);
> + if (!result) {
> + pr_err("%s: timeout waiting for LPM20 bit to be cleared\n",
> + __func__);
> + ret = -ETIMEDOUT;
> + }
> + break;
"At this point" is a bit misleading. I think it's clear enough if you
just drop that comment.
> + default:
> + pr_err("%s: Unknown platform PM state (%d)\n",
> + __func__, state);
> + ret = -EINVAL;
> + }
WARN?
> +static const struct of_device_id rcpm_matches[] = {
> + {
> + .compatible = "fsl,qoriq-rcpm-1.0",
> + .data = (void *)RCPM_V1,
> + },
> + {
> + .compatible = "fsl,qoriq-rcpm-2.0",
> + .data = (void *)RCPM_V2,
> + },
Why not point .data directly at the ops?
> + switch ((unsigned long)match->data) {
> + case RCPM_V1:
> + rcpm_v1_regs = base;
> + qoriq_pm_ops = &qoriq_rcpm_v1_ops;
> + break;
> +
> + case RCPM_V2:
> + rcpm_v2_regs = base;
> + qoriq_pm_ops = &qoriq_rcpm_v2_ops;
> + break;
> +
> + default:
> + break;
> + }
default: break; is unnecessary (and impossible to hit -- if you really
want default: it should probably WARN).
-Scott
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
2015-03-26 10:18 ` [PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500 Chenhui Zhao
@ 2015-03-31 2:07 ` Scott Wood
2015-04-02 11:16 ` chenhui.zhao
0 siblings, 1 reply; 15+ messages in thread
From: Scott Wood @ 2015-03-31 2:07 UTC (permalink / raw)
To: chenhui zhao; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
On Thu, Mar 26, 2015 at 06:18:14PM +0800, chenhui zhao wrote:
> Implemented CPU hotplug on e500mc, e5500 and e6500, and support
> multiple threads mode and 64-bits mode.
>
> For e6500 with two threads, if one thread is online, it can
> enable/disable the other thread in the same core. If two threads of
> one core are offline, the core will enter the PH20 state (a low power
> state). When the core is up again, Thread0 is up first, and it will be
> bound with the present booting cpu. This way, all CPUs can hotplug
> separately.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> arch/powerpc/Kconfig | 2 +-
> arch/powerpc/include/asm/fsl_pm.h | 4 +
> arch/powerpc/include/asm/smp.h | 2 +
> arch/powerpc/kernel/head_64.S | 20 +++--
> arch/powerpc/kernel/smp.c | 5 ++
> arch/powerpc/platforms/85xx/smp.c | 182 +++++++++++++++++++++++++++++---------
> arch/powerpc/sysdev/fsl_rcpm.c | 56 ++++++++++++
> 7 files changed, 220 insertions(+), 51 deletions(-)
Please factor out changes to generic code (including but not limited to
cur_boot_cpu and PIR handling) into separate patches with clear
explanations.
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 22b0940..9846c83 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -380,7 +380,7 @@ config SWIOTLB
> config HOTPLUG_CPU
> bool "Support for enabling/disabling CPUs"
> depends on SMP && (PPC_PSERIES || \
> - PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
> + PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
> ---help---
> Say Y here to be able to disable and re-enable individual
> CPUs at runtime on SMP machines.
> diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
> index bbe6089..579f495 100644
> --- a/arch/powerpc/include/asm/fsl_pm.h
> +++ b/arch/powerpc/include/asm/fsl_pm.h
> @@ -34,6 +34,10 @@ struct fsl_pm_ops {
> void (*cpu_enter_state)(int cpu, int state);
> /* exit the CPU from the specified state */
> void (*cpu_exit_state)(int cpu, int state);
> + /* cpu up */
> + void (*cpu_up)(int cpu);
Again, this sort of comment is useless. Tell us what "cpu up" *does*,
when it should be called, etc.
> @@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
> isync
>
> /*
> - * Fix PIR to match the linear numbering in the device tree.
> - *
> - * On e6500, the reset value of PIR uses the low three bits for
> - * the thread within a core, and the upper bits for the core
> - * number. There are two threads per core, so shift everything
> - * but the low bit right by two bits so that the cpu numbering is
> - * continuous.
Why are you getting rid of this? If it's to avoid doing it twice on the
same thread, in my work-in-progress kexec patches I instead check to see
whether BUCSR has already been set up -- if it has, I assume we've
already been here.
> + * The current thread has been in 64-bit mode,
> + * see the value of TMRN_IMSR.
I don't see what the relevance of this comment is here.
> + * compute the address of __cur_boot_cpu
> */
> - mfspr r3, SPRN_PIR
> - rlwimi r3, r3, 30, 2, 30
> + bl 10f
> +10: mflr r22
> + addi r22,r22,(__cur_boot_cpu - 10b)
> + lwz r3,0(r22)
Please save non-volatile registers for things that need to stick around
for a while.
> mtspr SPRN_PIR, r3
If __cur_boot_cpu is meant to be the PIR of the currently booting CPU,
it's a misleading. It looks like it's supposed to have something to do
with the boot cpu (not "booting").
Also please don't put leading underscores on symbols just because the
adjacent symbols have them.
> -#ifdef CONFIG_HOTPLUG_CPU
> +#ifdef CONFIG_PPC_E500MC
> +static void qoriq_cpu_wait_die(void)
> +{
> + unsigned int cpu = smp_processor_id();
> +
> + hard_irq_disable();
> + /* mask all irqs to prevent cpu wakeup */
> + qoriq_pm_ops->irq_mask(cpu);
> + idle_task_exit();
> +
> + mtspr(SPRN_TCR, 0);
> + mtspr(SPRN_TSR, mfspr(SPRN_TSR));
> +
> + cur_cpu_spec->cpu_flush_caches();
> +
> + generic_set_cpu_dead(cpu);
> + smp_mb();
Comment memory barriers, as checkpatch says.
> + while (1)
> + ;
Indent the ;
> @@ -174,17 +232,29 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
> static void wake_hw_thread(void *info)
> {
> void fsl_secondary_thread_init(void);
> - unsigned long imsr1, inia1;
> + unsigned long imsr, inia;
> int nr = *(const int *)info;
> -
> - imsr1 = MSR_KERNEL;
> - inia1 = *(unsigned long *)fsl_secondary_thread_init;
> -
> - mttmr(TMRN_IMSR1, imsr1);
> - mttmr(TMRN_INIA1, inia1);
> - mtspr(SPRN_TENS, TEN_THREAD(1));
> + int hw_cpu = get_hard_smp_processor_id(nr);
> + int thread_idx = cpu_thread_in_core(hw_cpu);
> +
> + __cur_boot_cpu = (u32)hw_cpu;
> + imsr = MSR_KERNEL;
> + inia = *(unsigned long *)fsl_secondary_thread_init;
> + smp_mb();
> + if (thread_idx == 0) {
> + mttmr(TMRN_IMSR0, imsr);
> + mttmr(TMRN_INIA0, inia);
> + } else {
> + mttmr(TMRN_IMSR1, imsr);
> + mttmr(TMRN_INIA1, inia);
> + }
> + isync();
> + mtspr(SPRN_TENS, TEN_THREAD(thread_idx));
Support for waking a secondary core should be a separate patch (I have
similar code on the way for kexec). Likewise adding smp_mb()/isync() if
it's really needed. In general, this patch tries to do too much at once.
> smp_generic_kick_cpu(nr);
> +#ifdef CONFIG_HOTPLUG_CPU
> + generic_set_cpu_up(nr);
> +#endif
> }
> #endif
>
> @@ -203,28 +273,46 @@ static int smp_85xx_kick_cpu(int nr)
>
> pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
>
> +#ifdef CONFIG_HOTPLUG_CPU
> + sync_tb = 0;
> + smp_mb();
> +#endif
Timebase synchronization should also be separate.
> #ifdef CONFIG_PPC64
> - /* Threads don't use the spin table */
> - if (cpu_thread_in_core(nr) != 0) {
> + if (threads_per_core > 1) {
> int primary = cpu_first_thread_sibling(nr);
>
> if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
> return -ENOENT;
>
> - if (cpu_thread_in_core(nr) != 1) {
> - pr_err("%s: cpu %d: invalid hw thread %d\n",
> - __func__, nr, cpu_thread_in_core(nr));
> - return -ENOENT;
> + /*
> + * If either one of threads in the same core is online,
> + * use the online one to start the other.
> + */
> + if (cpu_online(primary) || cpu_online(primary + 1)) {
> + qoriq_pm_ops->cpu_up(nr);
What if we don't have qoriq_pm_ops (e.g. VM guest, or some failure)?
> + if (cpu_online(primary))
> + smp_call_function_single(primary,
> + wake_hw_thread, &nr, 1);
> + else
> + smp_call_function_single(primary + 1,
> + wake_hw_thread, &nr, 1);
> + return 0;
> }
> -
> - if (!cpu_online(primary)) {
> - pr_err("%s: cpu %d: primary %d not online\n",
> - __func__, nr, primary);
> - return -ENOENT;
> + /*
> + * If both threads are offline, reset core to start.
> + * When core is up, Thread 0 always gets up first,
> + * so bind the current logical cpu with Thread 0.
> + */
What if the core is not in a PM state that requires a reset?
Where does this reset occur?
> + if (hw_cpu != cpu_first_thread_sibling(hw_cpu)) {
> + int hw_cpu1, hw_cpu2;
> +
> + hw_cpu1 = get_hard_smp_processor_id(primary);
> + hw_cpu2 = get_hard_smp_processor_id(primary + 1);
> + set_hard_smp_processor_id(primary, hw_cpu2);
> + set_hard_smp_processor_id(primary + 1, hw_cpu1);
> + /* get new physical cpu id */
> + hw_cpu = get_hard_smp_processor_id(nr);
Why are you swapping the hard smp ids?
> }
> -
> - smp_call_function_single(primary, wake_hw_thread, &nr, 0);
> - return 0;
> }
> #endif
>
> @@ -252,11 +340,7 @@ static int smp_85xx_kick_cpu(int nr)
> spin_table = phys_to_virt(*cpu_rel_addr);
>
> local_irq_save(flags);
> -#ifdef CONFIG_PPC32
> #ifdef CONFIG_HOTPLUG_CPU
> - /* Corresponding to generic_set_cpu_dead() */
> - generic_set_cpu_up(nr);
> -
Why did you move this?
> if (system_state == SYSTEM_RUNNING) {
> /*
> * To keep it compatible with old boot program which uses
> @@ -269,11 +353,16 @@ static int smp_85xx_kick_cpu(int nr)
> out_be32(&spin_table->addr_l, 0);
> flush_spin_table(spin_table);
>
> +#ifdef CONFIG_PPC_E500MC
> + qoriq_pm_ops->cpu_up(nr);
> +#endif
Again, you've killed a VM guest kernel (this time, even if the guest
doesn't see SMT).
> @@ -489,13 +586,16 @@ void __init mpc85xx_smp_init(void)
> __func__);
> return;
> }
> - smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
> - smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
> -#ifdef CONFIG_HOTPLUG_CPU
> - ppc_md.cpu_die = smp_85xx_mach_cpu_die;
> -#endif
You're moving this from a place that only runs when guts is found...
> }
>
> + smp_85xx_ops.cpu_die = generic_cpu_die;
> + ppc_md.cpu_die = smp_85xx_mach_cpu_die;
> +#endif
> + smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
> + smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
> + smp_85xx_ops.cpu_disable = generic_cpu_disable;
> +#endif /* CONFIG_HOTPLUG_CPU */
...to a place that runs unconditionally. Again, you're breaking VM
guests.
-Scott
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM
2015-03-26 10:18 ` [PATCH 4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM Chenhui Zhao
@ 2015-03-31 2:35 ` Scott Wood
2015-04-02 11:18 ` chenhui.zhao
0 siblings, 1 reply; 15+ messages in thread
From: Scott Wood @ 2015-03-31 2:35 UTC (permalink / raw)
To: chenhui zhao; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
On Thu, Mar 26, 2015 at 06:18:15PM +0800, chenhui zhao wrote:
> In sleep mode, the clocks of e500 cores and unused IP blocks is
> turned off. The IP blocks which are allowed to wake up the processor
> are still running.
>
> The sleep mode is equal to the Standby state in Linux. Use the
> command to enter sleep mode:
> echo standby > /sys/power/state
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> arch/powerpc/Kconfig | 3 +-
> arch/powerpc/platforms/85xx/Kconfig | 5 +++
> arch/powerpc/platforms/85xx/Makefile | 1 +
> arch/powerpc/platforms/85xx/qoriq_pm.c | 59 ++++++++++++++++++++++++++++++++++
> arch/powerpc/platforms/86xx/Kconfig | 1 +
> 5 files changed, 67 insertions(+), 2 deletions(-)
> create mode 100644 arch/powerpc/platforms/85xx/qoriq_pm.c
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 9846c83..162eb53 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -233,7 +233,7 @@ config ARCH_HIBERNATION_POSSIBLE
> config ARCH_SUSPEND_POSSIBLE
> def_bool y
> depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
> - (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
> + FSL_SOC_BOOKE || PPC_86xx || PPC_PSERIES \
> || 44x || 40x
>
> config PPC_DCR_NATIVE
> @@ -747,7 +747,6 @@ config FSL_PCI
>
> config FSL_PMC
> bool
> - default y
> depends on SUSPEND && (PPC_85xx || PPC_86xx)
Get rid of this depends line if you're going to use select instead.
> +static int qoriq_suspend_valid(suspend_state_t state)
> +{
> + unsigned int pm_modes;
> +
> + pm_modes = qoriq_pm_ops->get_pm_modes();
> +
> + if ((state == PM_SUSPEND_STANDBY) && (pm_modes & FSL_PM_SLEEP))
> + return 1;
Unnecessary parentheses around ==
-Scott
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [1/4] powerpc/cache: add cache flush operation for various e500
2015-03-31 1:10 ` [1/4] powerpc/cache: add cache flush operation for various e500 Scott Wood
@ 2015-04-02 10:14 ` chenhui.zhao
0 siblings, 0 replies; 15+ messages in thread
From: chenhui.zhao @ 2015-04-02 10:14 UTC (permalink / raw)
To: Scott Wood; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
________________________________________
From: Wood Scott-B07421
Sent: Tuesday, March 31, 2015 9:10
To: Zhao Chenhui-B35336
Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
Subject: Re: [1/4] powerpc/cache: add cache flush operation for various e500
On Thu, Mar 26, 2015 at 06:18:12PM +0800, chenhui zhao wrote:
> Various e500 core have different cache architecture, so they
> need different cache flush operations. Therefore, add a callback
> function cpu_flush_caches to the struct cpu_spec. The cache flush
> operation for the specific kind of e500 is selected at init time.
> The callback function will flush all caches inside the current cpu.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> arch/powerpc/include/asm/cacheflush.h | 2 -
> arch/powerpc/include/asm/cputable.h | 11 +++
> arch/powerpc/kernel/asm-offsets.c | 3 +
> arch/powerpc/kernel/cpu_setup_fsl_booke.S | 114 +++++++++++++++++++++++++++++-
> arch/powerpc/kernel/cputable.c | 4 ++
> arch/powerpc/kernel/head_fsl_booke.S | 74 -------------------
> arch/powerpc/platforms/85xx/smp.c | 3 +-
> 7 files changed, 133 insertions(+), 78 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
> index 30b35ff..729fde4 100644
> --- a/arch/powerpc/include/asm/cacheflush.h
> +++ b/arch/powerpc/include/asm/cacheflush.h
> @@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
> #define flush_dcache_mmap_lock(mapping) do { } while (0)
> #define flush_dcache_mmap_unlock(mapping) do { } while (0)
>
> -extern void __flush_disable_L1(void);
> -
> extern void flush_icache_range(unsigned long, unsigned long);
> extern void flush_icache_user_range(struct vm_area_struct *vma,
> struct page *page, unsigned long addr,
> diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
> index 5cf5a6d..c776efe4 100644
> --- a/arch/powerpc/include/asm/cputable.h
> +++ b/arch/powerpc/include/asm/cputable.h
> @@ -43,6 +43,13 @@ extern int machine_check_e500(struct pt_regs *regs);
> extern int machine_check_e200(struct pt_regs *regs);
> extern int machine_check_47x(struct pt_regs *regs);
>
> +#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
> +extern void __flush_caches_e500v2(void);
> +extern void __flush_caches_e500mc(void);
> +extern void __flush_caches_e5500(void);
> +extern void __flush_caches_e6500(void);
> +#endif
Why the leading underscores?
[chenhui] Will get rid of them.
> /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
> struct cpu_spec {
> /* CPU is matched via (PVR & pvr_mask) == pvr_value */
> @@ -59,6 +66,10 @@ struct cpu_spec {
> unsigned int icache_bsize;
> unsigned int dcache_bsize;
>
> +#if defined(CONFIG_E500) || defined(CONFIG_PPC_E500MC)
CONFIG_PPC_E500MC implies CONFIG_E500. Why do we need this ifdef?
[chenhui] Change to "#ifdef CONFIG_E500".
> + /* flush caches inside the current cpu */
> + void (*cpu_flush_caches)(void);
> +#endif
It seems you literally mean "in the cpu" -- If it's a threaded core, then
by "cpu" do you mean "thread" (like we usually do) and thus no caches get
flushed (ignore the fact that it's moot on e6500 -- this is an interface
and needs to be clear).
Also, no-oping L1 flush on e6500 is not compliant with the claim that
you're flushing the cache. You're relying on an unstated assumption that
you'll invalidate that cache later instead.
If you want to make this "flush whatever needs to be flushed for
suspend/hotplug", call it that.
-Scott
[chenhui] OK. Then, call "cpu_down_flush".
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [2/4] powerpc/rcpm: add RCPM driver
2015-03-31 1:30 ` [2/4] " Scott Wood
@ 2015-04-02 10:33 ` chenhui.zhao
2015-04-02 15:50 ` Scott Wood
0 siblings, 1 reply; 15+ messages in thread
From: chenhui.zhao @ 2015-04-02 10:33 UTC (permalink / raw)
To: Scott Wood; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
________________________________________
From: Wood Scott-B07421
Sent: Tuesday, March 31, 2015 9:30
To: Zhao Chenhui-B35336
Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
Subject: Re: [2/4] powerpc/rcpm: add RCPM driver
On Thu, Mar 26, 2015 at 06:18:13PM +0800, chenhui zhao wrote:
> There is a RCPM (Run Control/Power Management) in Freescale QorIQ
> series processors. The device performs tasks associated with device
> run control and power management.
>
> The driver implements some features: mask/unmask irq, enter/exit low
> power states, freeze time base, etc.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> Documentation/devicetree/bindings/soc/fsl/rcpm.txt | 23 ++
> arch/powerpc/include/asm/fsl_guts.h | 105 ++++++
> arch/powerpc/include/asm/fsl_pm.h | 49 +++
> arch/powerpc/platforms/85xx/Kconfig | 1 +
> arch/powerpc/sysdev/Kconfig | 5 +
> arch/powerpc/sysdev/Makefile | 1 +
> arch/powerpc/sysdev/fsl_rcpm.c | 353 +++++++++++++++++++++
> 7 files changed, 537 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/soc/fsl/rcpm.txt
> create mode 100644 arch/powerpc/include/asm/fsl_pm.h
> create mode 100644 arch/powerpc/sysdev/fsl_rcpm.c
>
> diff --git a/Documentation/devicetree/bindings/soc/fsl/rcpm.txt b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
> new file mode 100644
> index 0000000..8c21b6c
> --- /dev/null
> +++ b/Documentation/devicetree/bindings/soc/fsl/rcpm.txt
> @@ -0,0 +1,23 @@
> +* Run Control and Power Management
> +
> +The RCPM performs all device-level tasks associated with device run control
> +and power management.
> +
> +Required properites:
> + - reg : Offset and length of the register set of RCPM block.
> + - compatible : Specifies the compatibility list for the RCPM. The type
> + should be string, such as "fsl,qoriq-rcpm-1.0", "fsl,qoriq-rcpm-2.0".
> +
> +Example:
> +The RCPM node for T4240:
> + rcpm: global-utilities@e2000 {
> + compatible = "fsl,t4240-rcpm", "fsl,qoriq-rcpm-2.0";
> + reg = <0xe2000 0x1000>;
> + };
> +
> +The RCPM node for P4080:
> + rcpm: global-utilities@e2000 {
> + compatible = "fsl,qoriq-rcpm-1.0";
> + reg = <0xe2000 0x1000>;
> + #sleep-cells = <1>;
> + };
Where is #sleep-cells documented? It's copy-and-paste from something
that was never finished from many years ago.
[chenhui] Will get rid of them.
> diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
> new file mode 100644
> index 0000000..bbe6089
> --- /dev/null
> +++ b/arch/powerpc/include/asm/fsl_pm.h
> @@ -0,0 +1,49 @@
> +/*
> + * Support Power Management
> + *
> + * Copyright 2014-2015 Freescale Semiconductor Inc.
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License as published by the
> + * Free Software Foundation; either version 2 of the License, or (at your
> + * option) any later version.
> + */
> +#ifndef __PPC_FSL_PM_H
> +#define __PPC_FSL_PM_H
> +#ifdef __KERNEL__
Put a space after #ifdef, not a tab.
[Chenhui] Will change it.
> +#define E500_PM_PH10 1
> +#define E500_PM_PH15 2
> +#define E500_PM_PH20 3
> +#define E500_PM_PH30 4
> +#define E500_PM_DOZE E500_PM_PH10
> +#define E500_PM_NAP E500_PM_PH15
> +
> +#define PLAT_PM_SLEEP 20
> +#define PLAT_PM_LPM20 30
> +
> +#define FSL_PM_SLEEP (1 << 0)
> +#define FSL_PM_DEEP_SLEEP (1 << 1)
> +
> +struct fsl_pm_ops {
> + /* mask pending interrupts to the RCPM from MPIC */
> + void (*irq_mask)(int cpu);
> + /* unmask pending interrupts to the RCPM from MPIC */
> + void (*irq_unmask)(int cpu);
> + /* place the CPU in the specified state */
> + void (*cpu_enter_state)(int cpu, int state);
> + /* exit the CPU from the specified state */
> + void (*cpu_exit_state)(int cpu, int state);
> + /* place the platform in the sleep state */
> + int (*plat_enter_sleep)(void);
> + /* freeze the time base */
> + void (*freeze_time_base)(int freeze);
> + /* keep the power of IP blocks during sleep/deep sleep */
> + void (*set_ip_power)(int enable, u32 *mask);
> + /* get platform supported power management modes */
> + unsigned int (*get_pm_modes)(void);
> +};
Drop the comments that are basically just a restatement of the function
name. Where there are comments, it'd be easier to read with a blank line
between a function and the next comment.
s/int enable/bool enable/
s/int freeze/bool freeze/
[chenhui] Yes, you are right.
> +#endif /* __KERNEL__ */
> +#endif /* __PPC_FSL_PM_H */
Please be consistent with whitespace.
> + default:
> + pr_err("%s: Unknown cpu PM state (%d)\n", __func__, state);
WARN?
> +static int rcpm_v2_plat_enter_state(int state)
> +{
> + u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
> + int ret = 0;
> + int result;
> +
> + switch (state) {
> + case PLAT_PM_LPM20:
> + /* clear previous LPM20 status */
> + setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
How would the bit be set when you enter here, given that you wait for it
to clear when leaving?
[chenhui] Actually, the bit is not used by software. Just follow the instruction in RM.
> + /* enter LPM20 status */
> + setbits32(pmcsr_reg, RCPM_POWMGTCSR_LPM20_RQ);
> +
> + /* At this point, the device is in LPM20 status. */
> +
> + /* resume ... */
> + result = spin_event_timeout(
> + !(in_be32(pmcsr_reg) & RCPM_POWMGTCSR_LPM20_ST), 10000, 10);
> + if (!result) {
> + pr_err("%s: timeout waiting for LPM20 bit to be cleared\n",
> + __func__);
> + ret = -ETIMEDOUT;
> + }
> + break;
"At this point" is a bit misleading. I think it's clear enough if you
just drop that comment.
> + default:
> + pr_err("%s: Unknown platform PM state (%d)\n",
> + __func__, state);
> + ret = -EINVAL;
> + }
WARN?
> +static const struct of_device_id rcpm_matches[] = {
> + {
> + .compatible = "fsl,qoriq-rcpm-1.0",
> + .data = (void *)RCPM_V1,
> + },
> + {
> + .compatible = "fsl,qoriq-rcpm-2.0",
> + .data = (void *)RCPM_V2,
> + },
Why not point .data directly at the ops?
[chenhui] I agree.
> + switch ((unsigned long)match->data) {
> + case RCPM_V1:
> + rcpm_v1_regs = base;
> + qoriq_pm_ops = &qoriq_rcpm_v1_ops;
> + break;
> +
> + case RCPM_V2:
> + rcpm_v2_regs = base;
> + qoriq_pm_ops = &qoriq_rcpm_v2_ops;
> + break;
> +
> + default:
> + break;
> + }
default: break; is unnecessary (and impossible to hit -- if you really
want default: it should probably WARN).
-Scott
[chenhui] Will get rid of them.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
2015-03-31 2:07 ` [3/4] " Scott Wood
@ 2015-04-02 11:16 ` chenhui.zhao
2015-04-02 16:03 ` Scott Wood
0 siblings, 1 reply; 15+ messages in thread
From: chenhui.zhao @ 2015-04-02 11:16 UTC (permalink / raw)
To: Scott Wood; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
________________________________________
From: Wood Scott-B07421
Sent: Tuesday, March 31, 2015 10:07
To: Zhao Chenhui-B35336
Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
Subject: Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
On Thu, Mar 26, 2015 at 06:18:14PM +0800, chenhui zhao wrote:
> Implemented CPU hotplug on e500mc, e5500 and e6500, and support
> multiple threads mode and 64-bits mode.
>
> For e6500 with two threads, if one thread is online, it can
> enable/disable the other thread in the same core. If two threads of
> one core are offline, the core will enter the PH20 state (a low power
> state). When the core is up again, Thread0 is up first, and it will be
> bound with the present booting cpu. This way, all CPUs can hotplug
> separately.
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> arch/powerpc/Kconfig | 2 +-
> arch/powerpc/include/asm/fsl_pm.h | 4 +
> arch/powerpc/include/asm/smp.h | 2 +
> arch/powerpc/kernel/head_64.S | 20 +++--
> arch/powerpc/kernel/smp.c | 5 ++
> arch/powerpc/platforms/85xx/smp.c | 182 +++++++++++++++++++++++++++++---------
> arch/powerpc/sysdev/fsl_rcpm.c | 56 ++++++++++++
> 7 files changed, 220 insertions(+), 51 deletions(-)
Please factor out changes to generic code (including but not limited to
cur_boot_cpu and PIR handling) into separate patches with clear
explanations.
[chenhui] OK.
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 22b0940..9846c83 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -380,7 +380,7 @@ config SWIOTLB
> config HOTPLUG_CPU
> bool "Support for enabling/disabling CPUs"
> depends on SMP && (PPC_PSERIES || \
> - PPC_PMAC || PPC_POWERNV || (PPC_85xx && !PPC_E500MC))
> + PPC_PMAC || PPC_POWERNV || FSL_SOC_BOOKE)
> ---help---
> Say Y here to be able to disable and re-enable individual
> CPUs at runtime on SMP machines.
> diff --git a/arch/powerpc/include/asm/fsl_pm.h b/arch/powerpc/include/asm/fsl_pm.h
> index bbe6089..579f495 100644
> --- a/arch/powerpc/include/asm/fsl_pm.h
> +++ b/arch/powerpc/include/asm/fsl_pm.h
> @@ -34,6 +34,10 @@ struct fsl_pm_ops {
> void (*cpu_enter_state)(int cpu, int state);
> /* exit the CPU from the specified state */
> void (*cpu_exit_state)(int cpu, int state);
> + /* cpu up */
> + void (*cpu_up)(int cpu);
Again, this sort of comment is useless. Tell us what "cpu up" *does*,
when it should be called, etc.
> @@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
> isync
>
> /*
> - * Fix PIR to match the linear numbering in the device tree.
> - *
> - * On e6500, the reset value of PIR uses the low three bits for
> - * the thread within a core, and the upper bits for the core
> - * number. There are two threads per core, so shift everything
> - * but the low bit right by two bits so that the cpu numbering is
> - * continuous.
Why are you getting rid of this? If it's to avoid doing it twice on the
same thread, in my work-in-progress kexec patches I instead check to see
whether BUCSR has already been set up -- if it has, I assume we've
already been here.
[chenhui] I didn't delete the branch prediction related code.
> + * The current thread has been in 64-bit mode,
> + * see the value of TMRN_IMSR.
I don't see what the relevance of this comment is here.
[chenhui] Will explain it more clear.
> + * compute the address of __cur_boot_cpu
> */
> - mfspr r3, SPRN_PIR
> - rlwimi r3, r3, 30, 2, 30
> + bl 10f
> +10: mflr r22
> + addi r22,r22,(__cur_boot_cpu - 10b)
> + lwz r3,0(r22)
Please save non-volatile registers for things that need to stick around
for a while.
[chenhui] OK.
> mtspr SPRN_PIR, r3
If __cur_boot_cpu is meant to be the PIR of the currently booting CPU,
it's a misleading. It looks like it's supposed to have something to do
with the boot cpu (not "booting").
[chenhui] I mean the PIR of the currently booting CPU. Change to "booting_cpu_hwid".
Also please don't put leading underscores on symbols just because the
adjacent symbols have them.
> -#ifdef CONFIG_HOTPLUG_CPU
> +#ifdef CONFIG_PPC_E500MC
> +static void qoriq_cpu_wait_die(void)
> +{
> + unsigned int cpu = smp_processor_id();
> +
> + hard_irq_disable();
> + /* mask all irqs to prevent cpu wakeup */
> + qoriq_pm_ops->irq_mask(cpu);
> + idle_task_exit();
> +
> + mtspr(SPRN_TCR, 0);
> + mtspr(SPRN_TSR, mfspr(SPRN_TSR));
> +
> + cur_cpu_spec->cpu_flush_caches();
> +
> + generic_set_cpu_dead(cpu);
> + smp_mb();
Comment memory barriers, as checkpatch says.
> + while (1)
> + ;
Indent the ;
[chenhui] OK
> @@ -174,17 +232,29 @@ static inline u32 read_spin_table_addr_l(void *spin_table)
> static void wake_hw_thread(void *info)
> {
> void fsl_secondary_thread_init(void);
> - unsigned long imsr1, inia1;
> + unsigned long imsr, inia;
> int nr = *(const int *)info;
> -
> - imsr1 = MSR_KERNEL;
> - inia1 = *(unsigned long *)fsl_secondary_thread_init;
> -
> - mttmr(TMRN_IMSR1, imsr1);
> - mttmr(TMRN_INIA1, inia1);
> - mtspr(SPRN_TENS, TEN_THREAD(1));
> + int hw_cpu = get_hard_smp_processor_id(nr);
> + int thread_idx = cpu_thread_in_core(hw_cpu);
> +
> + __cur_boot_cpu = (u32)hw_cpu;
> + imsr = MSR_KERNEL;
> + inia = *(unsigned long *)fsl_secondary_thread_init;
> + smp_mb();
> + if (thread_idx == 0) {
> + mttmr(TMRN_IMSR0, imsr);
> + mttmr(TMRN_INIA0, inia);
> + } else {
> + mttmr(TMRN_IMSR1, imsr);
> + mttmr(TMRN_INIA1, inia);
> + }
> + isync();
> + mtspr(SPRN_TENS, TEN_THREAD(thread_idx));
Support for waking a secondary core should be a separate patch (I have
similar code on the way for kexec). Likewise adding smp_mb()/isync() if
it's really needed. In general, this patch tries to do too much at once.
> smp_generic_kick_cpu(nr);
> +#ifdef CONFIG_HOTPLUG_CPU
> + generic_set_cpu_up(nr);
> +#endif
> }
> #endif
>
> @@ -203,28 +273,46 @@ static int smp_85xx_kick_cpu(int nr)
>
> pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
>
> +#ifdef CONFIG_HOTPLUG_CPU
> + sync_tb = 0;
> + smp_mb();
> +#endif
Timebase synchronization should also be separate.
> #ifdef CONFIG_PPC64
> - /* Threads don't use the spin table */
> - if (cpu_thread_in_core(nr) != 0) {
> + if (threads_per_core > 1) {
> int primary = cpu_first_thread_sibling(nr);
>
> if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT)))
> return -ENOENT;
>
> - if (cpu_thread_in_core(nr) != 1) {
> - pr_err("%s: cpu %d: invalid hw thread %d\n",
> - __func__, nr, cpu_thread_in_core(nr));
> - return -ENOENT;
> + /*
> + * If either one of threads in the same core is online,
> + * use the online one to start the other.
> + */
> + if (cpu_online(primary) || cpu_online(primary + 1)) {
> + qoriq_pm_ops->cpu_up(nr);
What if we don't have qoriq_pm_ops (e.g. VM guest, or some failure)?
[chenhui] Will put it in an if statement.
> + if (cpu_online(primary))
> + smp_call_function_single(primary,
> + wake_hw_thread, &nr, 1);
> + else
> + smp_call_function_single(primary + 1,
> + wake_hw_thread, &nr, 1);
> + return 0;
> }
> -
> - if (!cpu_online(primary)) {
> - pr_err("%s: cpu %d: primary %d not online\n",
> - __func__, nr, primary);
> - return -ENOENT;
> + /*
> + * If both threads are offline, reset core to start.
> + * When core is up, Thread 0 always gets up first,
> + * so bind the current logical cpu with Thread 0.
> + */
What if the core is not in a PM state that requires a reset?
Where does this reset occur?
[chenhui] Reset occurs in the function mpic_reset_core().
> + if (hw_cpu != cpu_first_thread_sibling(hw_cpu)) {
> + int hw_cpu1, hw_cpu2;
> +
> + hw_cpu1 = get_hard_smp_processor_id(primary);
> + hw_cpu2 = get_hard_smp_processor_id(primary + 1);
> + set_hard_smp_processor_id(primary, hw_cpu2);
> + set_hard_smp_processor_id(primary + 1, hw_cpu1);
> + /* get new physical cpu id */
> + hw_cpu = get_hard_smp_processor_id(nr);
Why are you swapping the hard smp ids?
[chenhui] For example, Core1 has two threads, Thread0 and Thread1. In normal boot, Thread0 is CPU2, and Thread1 is CPU3.
But, if CPU2 and CPU3 are all off, user wants CPU3 up first. we need to call Thread0 as CPU3 and Thead1 as CPU2, considering
the limitation, after core is reset, only Thread0 is up, then Thread0 kicks up Thread1.
> }
> -
> - smp_call_function_single(primary, wake_hw_thread, &nr, 0);
> - return 0;
> }
> #endif
>
> @@ -252,11 +340,7 @@ static int smp_85xx_kick_cpu(int nr)
> spin_table = phys_to_virt(*cpu_rel_addr);
>
> local_irq_save(flags);
> -#ifdef CONFIG_PPC32
> #ifdef CONFIG_HOTPLUG_CPU
> - /* Corresponding to generic_set_cpu_dead() */
> - generic_set_cpu_up(nr);
> -
Why did you move this?
[chenhui] It would be better to set this after CPU is really up.
> if (system_state == SYSTEM_RUNNING) {
> /*
> * To keep it compatible with old boot program which uses
> @@ -269,11 +353,16 @@ static int smp_85xx_kick_cpu(int nr)
> out_be32(&spin_table->addr_l, 0);
> flush_spin_table(spin_table);
>
> +#ifdef CONFIG_PPC_E500MC
> + qoriq_pm_ops->cpu_up(nr);
> +#endif
Again, you've killed a VM guest kernel (this time, even if the guest
doesn't see SMT).
> @@ -489,13 +586,16 @@ void __init mpc85xx_smp_init(void)
> __func__);
> return;
> }
> - smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
> - smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
> -#ifdef CONFIG_HOTPLUG_CPU
> - ppc_md.cpu_die = smp_85xx_mach_cpu_die;
> -#endif
You're moving this from a place that only runs when guts is found...
> }
>
> + smp_85xx_ops.cpu_die = generic_cpu_die;
> + ppc_md.cpu_die = smp_85xx_mach_cpu_die;
> +#endif
> + smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
> + smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
> + smp_85xx_ops.cpu_disable = generic_cpu_disable;
> +#endif /* CONFIG_HOTPLUG_CPU */
...to a place that runs unconditionally. Again, you're breaking VM
guests.
-Scott
[chenhui] Will correct it.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM
2015-03-31 2:35 ` [4/4] " Scott Wood
@ 2015-04-02 11:18 ` chenhui.zhao
0 siblings, 0 replies; 15+ messages in thread
From: chenhui.zhao @ 2015-04-02 11:18 UTC (permalink / raw)
To: Scott Wood; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
________________________________________
From: Wood Scott-B07421
Sent: Tuesday, March 31, 2015 10:35
To: Zhao Chenhui-B35336
Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
Subject: Re: [4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM
On Thu, Mar 26, 2015 at 06:18:15PM +0800, chenhui zhao wrote:
> In sleep mode, the clocks of e500 cores and unused IP blocks is
> turned off. The IP blocks which are allowed to wake up the processor
> are still running.
>
> The sleep mode is equal to the Standby state in Linux. Use the
> command to enter sleep mode:
> echo standby > /sys/power/state
>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com>
> ---
> arch/powerpc/Kconfig | 3 +-
> arch/powerpc/platforms/85xx/Kconfig | 5 +++
> arch/powerpc/platforms/85xx/Makefile | 1 +
> arch/powerpc/platforms/85xx/qoriq_pm.c | 59 ++++++++++++++++++++++++++++++++++
> arch/powerpc/platforms/86xx/Kconfig | 1 +
> 5 files changed, 67 insertions(+), 2 deletions(-)
> create mode 100644 arch/powerpc/platforms/85xx/qoriq_pm.c
>
> diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
> index 9846c83..162eb53 100644
> --- a/arch/powerpc/Kconfig
> +++ b/arch/powerpc/Kconfig
> @@ -233,7 +233,7 @@ config ARCH_HIBERNATION_POSSIBLE
> config ARCH_SUSPEND_POSSIBLE
> def_bool y
> depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \
> - (PPC_85xx && !PPC_E500MC) || PPC_86xx || PPC_PSERIES \
> + FSL_SOC_BOOKE || PPC_86xx || PPC_PSERIES \
> || 44x || 40x
>
> config PPC_DCR_NATIVE
> @@ -747,7 +747,6 @@ config FSL_PCI
>
> config FSL_PMC
> bool
> - default y
> depends on SUSPEND && (PPC_85xx || PPC_86xx)
Get rid of this depends line if you're going to use select instead.
> +static int qoriq_suspend_valid(suspend_state_t state)
> +{
> + unsigned int pm_modes;
> +
> + pm_modes = qoriq_pm_ops->get_pm_modes();
> +
> + if ((state == PM_SUSPEND_STANDBY) && (pm_modes & FSL_PM_SLEEP))
> + return 1;
Unnecessary parentheses around ==
-Scott
[chenhui] OK
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [2/4] powerpc/rcpm: add RCPM driver
2015-04-02 10:33 ` chenhui.zhao
@ 2015-04-02 15:50 ` Scott Wood
0 siblings, 0 replies; 15+ messages in thread
From: Scott Wood @ 2015-04-02 15:50 UTC (permalink / raw)
To: Zhao Chenhui-B35336
Cc: linuxppc-dev, devicetree, linux-kernel, Jin Zhengxiong-R64188
On Thu, 2015-04-02 at 05:33 -0500, Zhao Chenhui-B35336 wrote:
> > +static int rcpm_v2_plat_enter_state(int state)
> > +{
> > + u32 *pmcsr_reg = &rcpm_v2_regs->powmgtcsr;
> > + int ret = 0;
> > + int result;
> > +
> > + switch (state) {
> > + case PLAT_PM_LPM20:
> > + /* clear previous LPM20 status */
> > + setbits32(pmcsr_reg, RCPM_POWMGTCSR_P_LPM20_ST);
>
> How would the bit be set when you enter here, given that you wait for it
> to clear when leaving?
>
> [chenhui] Actually, the bit is not used by software. Just follow the instruction in RM.
Sorry, I missed the "_P_" and thought it was RCPM_POWMGTCSR_LPM20_ST.
-Scott
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
2015-04-02 11:16 ` chenhui.zhao
@ 2015-04-02 16:03 ` Scott Wood
2015-04-03 2:54 ` chenhui.zhao
0 siblings, 1 reply; 15+ messages in thread
From: Scott Wood @ 2015-04-02 16:03 UTC (permalink / raw)
To: Zhao Chenhui-B35336
Cc: linuxppc-dev, devicetree, linux-kernel, Jin Zhengxiong-R64188
On Thu, 2015-04-02 at 06:16 -0500, Zhao Chenhui-B35336 wrote:
>
> ________________________________________
> From: Wood Scott-B07421
> Sent: Tuesday, March 31, 2015 10:07
> To: Zhao Chenhui-B35336
> Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
> Subject: Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
>
> On Thu, Mar 26, 2015 at 06:18:14PM +0800, chenhui zhao wrote:
> > @@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
> > isync
> >
> > /*
> > - * Fix PIR to match the linear numbering in the device tree.
> > - *
> > - * On e6500, the reset value of PIR uses the low three bits for
> > - * the thread within a core, and the upper bits for the core
> > - * number. There are two threads per core, so shift everything
> > - * but the low bit right by two bits so that the cpu numbering is
> > - * continuous.
>
> Why are you getting rid of this? If it's to avoid doing it twice on the
> same thread, in my work-in-progress kexec patches I instead check to see
> whether BUCSR has already been set up -- if it has, I assume we've
> already been here.
>
> [chenhui] I didn't delete the branch prediction related code.
I didn't say you did. I'm saying that you can check whether BUCSR has
been set up, to determine whether PIR has already been adjusted, if your
concern is avoiding running this twice on a thread between core resets.
If that's not your concern, then please explain.
> > + /*
> > + * If both threads are offline, reset core to start.
> > + * When core is up, Thread 0 always gets up first,
> > + * so bind the current logical cpu with Thread 0.
> > + */
>
> What if the core is not in a PM state that requires a reset?
> Where does this reset occur?
>
> [chenhui] Reset occurs in the function mpic_reset_core().
>
> > + if (hw_cpu != cpu_first_thread_sibling(hw_cpu)) {
> > + int hw_cpu1, hw_cpu2;
> > +
> > + hw_cpu1 = get_hard_smp_processor_id(primary);
> > + hw_cpu2 = get_hard_smp_processor_id(primary + 1);
> > + set_hard_smp_processor_id(primary, hw_cpu2);
> > + set_hard_smp_processor_id(primary + 1, hw_cpu1);
> > + /* get new physical cpu id */
> > + hw_cpu = get_hard_smp_processor_id(nr);
>
> Why are you swapping the hard smp ids?
>
> [chenhui] For example, Core1 has two threads, Thread0 and Thread1. In normal boot, Thread0 is CPU2, and Thread1 is CPU3.
> But, if CPU2 and CPU3 are all off, user wants CPU3 up first. we need to call Thread0 as CPU3 and Thead1 as CPU2, considering
> the limitation, after core is reset, only Thread0 is up, then Thread0 kicks up Thread1.
There's no need for this. I have booting from a thread1, and having it
kick its thread0, working locally without messing with the hwid/cpu
mapping.
> > @@ -252,11 +340,7 @@ static int smp_85xx_kick_cpu(int nr)
> > spin_table = phys_to_virt(*cpu_rel_addr);
> >
> > local_irq_save(flags);
> > -#ifdef CONFIG_PPC32
> > #ifdef CONFIG_HOTPLUG_CPU
> > - /* Corresponding to generic_set_cpu_dead() */
> > - generic_set_cpu_up(nr);
> > -
>
> Why did you move this?
>
> [chenhui] It would be better to set this after CPU is really up.
Please make it a separate patch with an explanation.
-Scott
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
2015-04-02 16:03 ` Scott Wood
@ 2015-04-03 2:54 ` chenhui.zhao
0 siblings, 0 replies; 15+ messages in thread
From: chenhui.zhao @ 2015-04-03 2:54 UTC (permalink / raw)
To: Scott Wood; +Cc: linuxppc-dev, devicetree, linux-kernel, Jason.Jin
________________________________________
From: Wood Scott-B07421
Sent: Friday, April 3, 2015 0:03
To: Zhao Chenhui-B35336
Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
Subject: Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
On Thu, 2015-04-02 at 06:16 -0500, Zhao Chenhui-B35336 wrote:
>
> ________________________________________
> From: Wood Scott-B07421
> Sent: Tuesday, March 31, 2015 10:07
> To: Zhao Chenhui-B35336
> Cc: linuxppc-dev@lists.ozlabs.org; devicetree@vger.kernel.org; linux-kernel@vger.kernel.org; Jin Zhengxiong-R64188
> Subject: Re: [3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500
>
> On Thu, Mar 26, 2015 at 06:18:14PM +0800, chenhui zhao wrote:
> > @@ -189,16 +193,14 @@ _GLOBAL(fsl_secondary_thread_init)
> > isync
> >
> > /*
> > - * Fix PIR to match the linear numbering in the device tree.
> > - *
> > - * On e6500, the reset value of PIR uses the low three bits for
> > - * the thread within a core, and the upper bits for the core
> > - * number. There are two threads per core, so shift everything
> > - * but the low bit right by two bits so that the cpu numbering is
> > - * continuous.
>
> Why are you getting rid of this? If it's to avoid doing it twice on the
> same thread, in my work-in-progress kexec patches I instead check to see
> whether BUCSR has already been set up -- if it has, I assume we've
> already been here.
>
> [chenhui] I didn't delete the branch prediction related code.
I didn't say you did. I'm saying that you can check whether BUCSR has
been set up, to determine whether PIR has already been adjusted, if your
concern is avoiding running this twice on a thread between core resets.
If that's not your concern, then please explain.
[chenhui] If no need to change PIR in CPU hotplug, I will change the code as you mentioned.
> > + /*
> > + * If both threads are offline, reset core to start.
> > + * When core is up, Thread 0 always gets up first,
> > + * so bind the current logical cpu with Thread 0.
> > + */
>
> What if the core is not in a PM state that requires a reset?
> Where does this reset occur?
>
> [chenhui] Reset occurs in the function mpic_reset_core().
>
> > + if (hw_cpu != cpu_first_thread_sibling(hw_cpu)) {
> > + int hw_cpu1, hw_cpu2;
> > +
> > + hw_cpu1 = get_hard_smp_processor_id(primary);
> > + hw_cpu2 = get_hard_smp_processor_id(primary + 1);
> > + set_hard_smp_processor_id(primary, hw_cpu2);
> > + set_hard_smp_processor_id(primary + 1, hw_cpu1);
> > + /* get new physical cpu id */
> > + hw_cpu = get_hard_smp_processor_id(nr);
>
> Why are you swapping the hard smp ids?
>
> [chenhui] For example, Core1 has two threads, Thread0 and Thread1. In normal boot, Thread0 is CPU2, and Thread1 is CPU3.
> But, if CPU2 and CPU3 are all off, user wants CPU3 up first. we need to call Thread0 as CPU3 and Thead1 as CPU2, considering
> the limitation, after core is reset, only Thread0 is up, then Thread0 kicks up Thread1.
There's no need for this. I have booting from a thread1, and having it
kick its thread0, working locally without messing with the hwid/cpu
mapping.
[chenhui] Great. If you have completed your patches, can we merge our code?
> > @@ -252,11 +340,7 @@ static int smp_85xx_kick_cpu(int nr)
> > spin_table = phys_to_virt(*cpu_rel_addr);
> >
> > local_irq_save(flags);
> > -#ifdef CONFIG_PPC32
> > #ifdef CONFIG_HOTPLUG_CPU
> > - /* Corresponding to generic_set_cpu_dead() */
> > - generic_set_cpu_up(nr);
> > -
>
> Why did you move this?
>
> [chenhui] It would be better to set this after CPU is really up.
Please make it a separate patch with an explanation.
-Scott
[chenhui] OK.
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2015-04-03 2:54 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-26 10:18 [PATCH 1/4] powerpc/cache: add cache flush operation for various e500 Chenhui Zhao
2015-03-26 10:18 ` [PATCH 2/4] powerpc/rcpm: add RCPM driver Chenhui Zhao
2015-03-31 1:30 ` [2/4] " Scott Wood
2015-04-02 10:33 ` chenhui.zhao
2015-04-02 15:50 ` Scott Wood
2015-03-26 10:18 ` [PATCH 3/4] powerpc: support CPU hotplug for e500mc, e5500 and e6500 Chenhui Zhao
2015-03-31 2:07 ` [3/4] " Scott Wood
2015-04-02 11:16 ` chenhui.zhao
2015-04-02 16:03 ` Scott Wood
2015-04-03 2:54 ` chenhui.zhao
2015-03-26 10:18 ` [PATCH 4/4] powerpc/85xx: support sleep feature on QorIQ SoCs with RCPM Chenhui Zhao
2015-03-31 2:35 ` [4/4] " Scott Wood
2015-04-02 11:18 ` chenhui.zhao
2015-03-31 1:10 ` [1/4] powerpc/cache: add cache flush operation for various e500 Scott Wood
2015-04-02 10:14 ` chenhui.zhao
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).