LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH 0 of 4] x86: some more patches
@ 2008-01-15 22:17 Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 1 of 4] x86: refactor mmu ops in paravirt.h Jeremy Fitzhardinge
` (4 more replies)
0 siblings, 5 replies; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-15 22:17 UTC (permalink / raw)
To: Ingo Molnar; +Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
Hi Ingo,
More patches:
- rearrange paravirt.h to remove duplicate code, and to make it easy
to drop in 4th level pagetable functions.
- fix a warning I got from clear_bit in pgtable.h
- fix up some bogosity in pte_modify
- mask NX from pte_pfn
The last two may help with the problem that Andi's seeing.
Thanks,
J
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH 1 of 4] x86: refactor mmu ops in paravirt.h
2008-01-15 22:17 [PATCH 0 of 4] x86: some more patches Jeremy Fitzhardinge
@ 2008-01-15 22:17 ` Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 2 of 4] x86: fix warning Jeremy Fitzhardinge
` (3 subsequent siblings)
4 siblings, 0 replies; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-15 22:17 UTC (permalink / raw)
To: Ingo Molnar
Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich,
Eduardo Habkost
Rearrange mmu ops in paravirt.h to remove duplication and to make
adding 4th level operations simple.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
---
include/asm-x86/page.h | 10 +
include/asm-x86/paravirt.h | 223 +++++++++++++++++++++++++-------------------
2 files changed, 137 insertions(+), 96 deletions(-)
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -91,6 +91,11 @@ static inline pudval_t native_pud_val(pu
}
#else /* PAGETABLE_LEVELS == 3 */
#include <asm-generic/pgtable-nopud.h>
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+ return native_pgd_val(pud.pgd);
+}
#endif /* PAGETABLE_LEVELS == 4 */
typedef struct { pmdval_t pmd; } pmd_t;
@@ -106,6 +111,11 @@ static inline pmdval_t native_pmd_val(pm
}
#else /* PAGETABLE_LEVELS == 2 */
#include <asm-generic/pgtable-nopmd.h>
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+ return native_pgd_val(pmd.pud.pgd);
+}
#endif /* PAGETABLE_LEVELS >= 3 */
static inline pte_t native_make_pte(pteval_t val)
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -230,28 +230,31 @@ struct pv_mmu_ops {
void (*pte_update_defer)(struct mm_struct *mm,
unsigned long addr, pte_t *ptep);
+ pteval_t (*pte_val)(pte_t);
+ pte_t (*make_pte)(pteval_t pte);
+
+ pgdval_t (*pgd_val)(pgd_t);
+ pgd_t (*make_pgd)(pgdval_t pgd);
+
+#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
- void (*set_pud)(pud_t *pudp, pud_t pudval);
void (*pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
void (*pmd_clear)(pmd_t *pmdp);
+#endif /* X86_PAE */
- unsigned long long (*pte_val)(pte_t);
- unsigned long long (*pmd_val)(pmd_t);
- unsigned long long (*pgd_val)(pgd_t);
+ void (*set_pud)(pud_t *pudp, pud_t pudval);
- pte_t (*make_pte)(unsigned long long pte);
- pmd_t (*make_pmd)(unsigned long long pmd);
- pgd_t (*make_pgd)(unsigned long long pgd);
-#else
- unsigned long (*pte_val)(pte_t);
- unsigned long (*pgd_val)(pgd_t);
+ pmdval_t (*pmd_val)(pmd_t);
+ pmd_t (*make_pmd)(pmdval_t pmd);
- pte_t (*make_pte)(unsigned long pte);
- pgd_t (*make_pgd)(unsigned long pgd);
-#endif
+#if PAGETABLE_LEVELS == 4
+ pudval_t (*pud_val)(pud_t);
+ pud_t (*make_pud)(pudval_t pud);
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
#ifdef CONFIG_HIGHPTE
void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
@@ -916,57 +919,134 @@ static inline void pte_update_defer(stru
PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
}
-#ifdef CONFIG_X86_PAE
-static inline pte_t __pte(unsigned long long val)
+static inline pteval_t pte_val(pte_t pte)
{
- unsigned long long ret = PVOP_CALL2(unsigned long long,
- pv_mmu_ops.make_pte,
- val, val >> 32);
+ pteval_t ret;
+
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
+ pte.pte, (u64)pte.pte >> 32);
+ else
+ ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val, pte.pte);
+
+ return ret;
+}
+
+static inline pte_t __pte(pteval_t val)
+{
+ pteval_t ret;
+
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pteval_t, pv_mmu_ops.make_pte,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALL1(pteval_t, pv_mmu_ops.make_pte, val);
+
return (pte_t) { .pte = ret };
}
-static inline pmd_t __pmd(unsigned long long val)
+static inline pgdval_t pgd_val(pgd_t pgd)
{
- return (pmd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pmd,
- val, val >> 32) };
+ pgdval_t ret;
+
+ if (sizeof(pgdval_t) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
+ pgd.pgd, (u64)pgd.pgd >> 32);
+ else
+ ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val, pgd.pgd);
+
+ return ret;
}
-static inline pgd_t __pgd(unsigned long long val)
+static inline pgd_t __pgd(pgdval_t val)
{
- return (pgd_t) { PVOP_CALL2(unsigned long long, pv_mmu_ops.make_pgd,
- val, val >> 32) };
+ pgdval_t ret;
+
+ if (sizeof(pgdval_t) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd, val);
+
+ return (pgd_t) { .pgd = ret };
}
-static inline unsigned long long pte_val(pte_t x)
+static inline void set_pte(pte_t *ptep, pte_t pte)
{
- return PVOP_CALL2(unsigned long long, pv_mmu_ops.pte_val,
- x.pte_low, x.pte_high);
-}
+ pteval_t val = native_pte_val(pte);
-static inline unsigned long long pmd_val(pmd_t x)
-{
- return PVOP_CALL2(unsigned long long, pv_mmu_ops.pmd_val,
- x.pmd, x.pmd >> 32);
-}
-
-static inline unsigned long long pgd_val(pgd_t x)
-{
- return PVOP_CALL2(unsigned long long, pv_mmu_ops.pgd_val,
- x.pgd, x.pgd >> 32);
-}
-
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, pteval.pte_low, pteval.pte_high);
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, val);
}
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
+ pte_t *ptep, pte_t pte)
{
- /* 5 arg words */
- pv_mmu_ops.set_pte_at(mm, addr, ptep, pteval);
+ pteval_t val = native_pte_val(pte);
+
+ if (sizeof(pteval_t) > sizeof(unsigned long))
+ /* 5 arg words */
+ pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
+ else
+ PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, val);
}
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ pmdval_t val = native_pmd_val(pmd);
+
+ if (sizeof(pmdval_t) > sizeof(unsigned long))
+ PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
+ val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
+}
+
+#if PAGETABLE_LEVELS >= 3
+static inline pmdval_t pmd_val(pmd_t pmd)
+{
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
+ pmd.pmd, (u64)pmd.pmd >> 32);
+ else
+ ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val, pmd.pmd);
+
+ return ret;
+}
+
+static inline pmd_t __pmd(pmdval_t val)
+{
+ pmdval_t ret;
+
+ if (sizeof(pmdval_t) > sizeof(unsigned long))
+ ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
+ val, (u64)val >> 32);
+ else
+ ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd, val);
+
+ return (pmd_t) { .pmd = ret };
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ pudval_t val = native_pud_val(pud);
+
+ if (sizeof(pudval_t) > sizeof(unsigned long))
+ PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
+ val, (u64)val >> 32);
+ else
+ PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, val);
+}
+
+#if PAGETABLE_LEVELS == 4
+#endif /* PAGETABLE_LEVELS == 4 */
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+#ifdef CONFIG_X86_PAE
static inline void set_pte_atomic(pte_t *ptep, pte_t pteval)
{
PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
@@ -980,18 +1060,6 @@ static inline void set_pte_present(struc
pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
}
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp,
- pmdval.pmd, pmdval.pmd >> 32);
-}
-
-static inline void set_pud(pud_t *pudp, pud_t pudval)
-{
- PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
- pudval.pgd.pgd, pudval.pgd.pgd >> 32);
-}
-
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
@@ -1001,45 +1069,7 @@ static inline void pmd_clear(pmd_t *pmdp
{
PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
}
-
#else /* !CONFIG_X86_PAE */
-
-static inline pte_t __pte(unsigned long val)
-{
- return (pte_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pte, val) };
-}
-
-static inline pgd_t __pgd(unsigned long val)
-{
- return (pgd_t) { PVOP_CALL1(unsigned long, pv_mmu_ops.make_pgd, val) };
-}
-
-static inline unsigned long pte_val(pte_t x)
-{
- return PVOP_CALL1(unsigned long, pv_mmu_ops.pte_val, x.pte_low);
-}
-
-static inline unsigned long pgd_val(pgd_t x)
-{
- return PVOP_CALL1(unsigned long, pv_mmu_ops.pgd_val, x.pgd);
-}
-
-static inline void set_pte(pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, pteval.pte_low);
-}
-
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
-{
- PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pteval.pte_low);
-}
-
-static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval)
-{
- PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, pmdval.pud.pgd.pgd);
-}
-
static inline void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, __pmd(0));
@@ -1061,6 +1091,7 @@ static inline void set_pte_present(struc
set_pte(ptep, pte);
}
#endif /* CONFIG_X86_PAE */
+
/* Lazy mode for batching updates / context switch */
enum paravirt_lazy_mode {
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH 2 of 4] x86: fix warning
2008-01-15 22:17 [PATCH 0 of 4] x86: some more patches Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 1 of 4] x86: refactor mmu ops in paravirt.h Jeremy Fitzhardinge
@ 2008-01-15 22:17 ` Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 3 of 4] x86: clean up pte_modify Jeremy Fitzhardinge
` (2 subsequent siblings)
4 siblings, 0 replies; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-15 22:17 UTC (permalink / raw)
To: Ingo Molnar; +Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
&ptep->pte isn't always an unsigned long *, so cast it to avoid a warning.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
---
include/asm-x86/pgtable.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -340,7 +340,7 @@ static inline pte_t ptep_get_and_clear_f
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- clear_bit(_PAGE_BIT_RW, &ptep->pte);
+ clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
pte_update(mm, addr, ptep);
}
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH 3 of 4] x86: clean up pte_modify
2008-01-15 22:17 [PATCH 0 of 4] x86: some more patches Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 1 of 4] x86: refactor mmu ops in paravirt.h Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 2 of 4] x86: fix warning Jeremy Fitzhardinge
@ 2008-01-15 22:17 ` Jeremy Fitzhardinge
2008-01-16 0:43 ` Andi Kleen
2008-01-15 22:17 ` [PATCH 4 of 4] x86: mask NX from pte_pfn Jeremy Fitzhardinge
2008-01-15 22:35 ` [PATCH 0 of 4] x86: some more patches Ingo Molnar
4 siblings, 1 reply; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-15 22:17 UTC (permalink / raw)
To: Ingo Molnar; +Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
pte_modify() got mushed in an apparent mismerge. Fix it up.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
---
include/asm-x86/pgtable.h | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -176,14 +176,11 @@ static inline pte_t pte_modify(pte_t pte
{
pteval_t val = pte_val(pte);
- val &= _PAGE_CHG_MASK;
- val |= pgprot_val(newprot);
-
/*
* Chop off the NX bit (if present), and add the NX portion of
* the newprot (if present):
*/
- val &= ~_PAGE_NX;
+ val &= _PAGE_CHG_MASK & ~_PAGE_NX;
val |= pgprot_val(newprot) & __supported_pte_mask;
return __pte(val);
^ permalink raw reply [flat|nested] 28+ messages in thread
* [PATCH 4 of 4] x86: mask NX from pte_pfn
2008-01-15 22:17 [PATCH 0 of 4] x86: some more patches Jeremy Fitzhardinge
` (2 preceding siblings ...)
2008-01-15 22:17 ` [PATCH 3 of 4] x86: clean up pte_modify Jeremy Fitzhardinge
@ 2008-01-15 22:17 ` Jeremy Fitzhardinge
2008-01-18 13:52 ` Hugh Dickins
2008-01-15 22:35 ` [PATCH 0 of 4] x86: some more patches Ingo Molnar
4 siblings, 1 reply; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-15 22:17 UTC (permalink / raw)
To: Ingo Molnar; +Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
In 32-bit PAE, mask NX from pte_pfn, since it isn't part of the PFN.
This code is due for unification anyway, but this fixes a latent bug.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
---
include/asm-x86/pgtable-3level.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -139,7 +139,7 @@ static inline int pte_none(pte_t pte)
static inline unsigned long pte_pfn(pte_t pte)
{
- return pte_val(pte) >> PAGE_SHIFT;
+ return (pte_val(pte) >> PAGE_SHIFT) & ~_PAGE_NX;
}
/*
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-15 22:17 [PATCH 0 of 4] x86: some more patches Jeremy Fitzhardinge
` (3 preceding siblings ...)
2008-01-15 22:17 ` [PATCH 4 of 4] x86: mask NX from pte_pfn Jeremy Fitzhardinge
@ 2008-01-15 22:35 ` Ingo Molnar
2008-01-15 23:28 ` Jeremy Fitzhardinge
4 siblings, 1 reply; 28+ messages in thread
From: Ingo Molnar @ 2008-01-15 22:35 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
* Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> Hi Ingo,
>
> More patches:
>
> - rearrange paravirt.h to remove duplicate code, and to make it easy
> to drop in 4th level pagetable functions.
> - fix a warning I got from clear_bit in pgtable.h
> - fix up some bogosity in pte_modify
> - mask NX from pte_pfn
>
> The last two may help with the problem that Andi's seeing.
unfortunately they dont solve it:
[ 92.042586] Freeing unused kernel memory: 328k freed
[ 92.091838] khelper used greatest stack depth: 6244 bytes left
[ 92.281738] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
[ 92.288761] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
[ 92.295763] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
[...]
[ 97.312484] printk: 611046 messages suppressed.
32-bit, PAE and RAM above 4GB seems to be enough to trigger that bug.
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-15 22:35 ` [PATCH 0 of 4] x86: some more patches Ingo Molnar
@ 2008-01-15 23:28 ` Jeremy Fitzhardinge
2008-01-16 0:44 ` Andi Kleen
2008-01-16 14:22 ` Ingo Molnar
0 siblings, 2 replies; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-15 23:28 UTC (permalink / raw)
To: Ingo Molnar; +Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
Ingo Molnar wrote:
> unfortunately they dont solve it:
>
> [ 92.042586] Freeing unused kernel memory: 328k freed
> [ 92.091838] khelper used greatest stack depth: 6244 bytes left
> [ 92.281738] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
> [ 92.288761] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
> [ 92.295763] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
> [...]
> [ 97.312484] printk: 611046 messages suppressed.
>
> 32-bit, PAE and RAM above 4GB seems to be enough to trigger that bug.
>
Bum. We established that once you fix the accessors+sign extension bug,
there's another bug somewhere later on in the series. Any chance you
could move "x86/pgtable: fix constant sign extension problem" to be just
after "x86/pgtable: unify pagetable accessors" and bisect the following
patches?
Thanks,
J
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 3 of 4] x86: clean up pte_modify
2008-01-15 22:17 ` [PATCH 3 of 4] x86: clean up pte_modify Jeremy Fitzhardinge
@ 2008-01-16 0:43 ` Andi Kleen
0 siblings, 0 replies; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 0:43 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Ingo Molnar, LKML, Glauber de Oliveira Costa, Jan Beulich
On Tuesday 15 January 2008 23:17:08 Jeremy Fitzhardinge wrote:
> pte_modify() got mushed in an apparent mismerge. Fix it up.
Note it's not just a clean up -- it would actually drop bits of the PFN.
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-15 23:28 ` Jeremy Fitzhardinge
@ 2008-01-16 0:44 ` Andi Kleen
2008-01-16 7:25 ` Ingo Molnar
2008-01-16 14:22 ` Ingo Molnar
1 sibling, 1 reply; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 0:44 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Ingo Molnar, LKML, Glauber de Oliveira Costa, Jan Beulich
On Wednesday 16 January 2008 00:28:23 Jeremy Fitzhardinge wrote:
> Ingo Molnar wrote:
> > unfortunately they dont solve it:
> >
> > [ 92.042586] Freeing unused kernel memory: 328k freed
> > [ 92.091838] khelper used greatest stack depth: 6244 bytes left
> > [ 92.281738] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
> > [ 92.288761] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
> > [ 92.295763] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
> > [...]
> > [ 97.312484] printk: 611046 messages suppressed.
> >
> > 32-bit, PAE and RAM above 4GB seems to be enough to trigger that bug.
> >
>
> Bum. We established that once you fix the accessors+sign extension bug,
> there's another bug somewhere later on in the series. Any chance you
> could move "x86/pgtable: fix constant sign extension problem" to be just
> after "x86/pgtable: unify pagetable accessors" and bisect the following
> patches?
Bisecting is somewhat difficult because there seem to be quite a lot of problems
in the current git-x86 stream. I found that Jan's ioremap fix also
causes silent hangs here during earlier bisecting (not sure why)
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 0:44 ` Andi Kleen
@ 2008-01-16 7:25 ` Ingo Molnar
0 siblings, 0 replies; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 7:25 UTC (permalink / raw)
To: Andi Kleen
Cc: Jeremy Fitzhardinge, LKML, Glauber de Oliveira Costa, Jan Beulich
* Andi Kleen <ak@suse.de> wrote:
> [...] I found that Jan's ioremap fix also causes silent hangs here
> during earlier bisecting (not sure why)
please send a fuller bugreport - all these problems on your testbox
might be interrelated. Jan's patch is undone in a later part of the
series so it's a bisection artifact. I've fixed that up.
Ingo
------------------->
Subject: x86: fix 64-bit ioremap()
From: From: Jan Beulich <jbeulich@novell.com>
> Yeah, that may be true, but this particular tree is weird, and I'm trying
> to understand what's going on here. Specifically, 64-bit ioremap()s
> *don't* set _PAGE_GLOBAL, which appears to be an accident resulting from
> the strange definitions of __PAGE_KERNEL_* vs PAGE_KERNEL_*.
ioremap() should set G agreed.
> For example, ioremap_64.c:__ioremap() creates a vma for the io mapping, and
> explicitly sets _PAGE_GLOBAL in the vma's version of pgprot - but then it
> calls ioremap_page_range() to actually create the mapping, which ends up
> making a non-global mapping, because its rolling its own version of
> PAGE_KERNEL by using pgprot(__PAGE_KERNEL) - which is not the actual
> definition of PAGE_KERNEL.
That should not really matter because ioremap_change_attr()->c_p_a is only called
when flags is != 0 and that means it is already different from PAGE_KERNEL.
>
> I think there's a bug around here, but I think its currently being hidden
There's one Jan pointed out: iounmap does not subtract the guard page size
so it ends up resetting one page too much. That is probably what causes your
problem. But again you should be passing in G in the first place.
Additionally I found it necessary to fix ioremap_64.c's use of
change_page_attr_addr():
[ mingo@elte.hu: fixed coding style errors ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
arch/x86/mm/ioremap_64.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
Index: linux-x86.q/arch/x86/mm/ioremap_64.c
===================================================================
--- linux-x86.q.orig/arch/x86/mm/ioremap_64.c
+++ linux-x86.q/arch/x86/mm/ioremap_64.c
@@ -45,10 +45,12 @@ ioremap_change_attr(unsigned long phys_a
unsigned long vaddr = (unsigned long) __va(phys_addr);
/*
- * Must use a address here and not struct page because the phys addr
- * can be a in hole between nodes and not have an memmap entry.
+ * Must use an address here and not struct page because the
+ * phys addr can be a in hole between nodes and not have an
+ * memmap entry:
*/
- err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
+ err = change_page_attr_addr(vaddr, npages,
+ MAKE_GLOBAL(__PAGE_KERNEL|flags));
if (!err)
global_flush_tlb();
}
@@ -181,7 +183,7 @@ void iounmap(volatile void __iomem *addr
/* Reset the direct mapping. Can block */
if (p->flags >> 20)
- ioremap_change_attr(p->phys_addr, p->size, 0);
+ ioremap_change_attr(p->phys_addr, get_vm_area_size(p), 0);
/* Finally remove it */
o = remove_vm_area((void *)addr);
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-15 23:28 ` Jeremy Fitzhardinge
2008-01-16 0:44 ` Andi Kleen
@ 2008-01-16 14:22 ` Ingo Molnar
2008-01-16 14:44 ` Andi Kleen
1 sibling, 1 reply; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 14:22 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
* Jeremy Fitzhardinge <jeremy@goop.org> wrote:
> Ingo Molnar wrote:
>> unfortunately they dont solve it:
>>
>> [ 92.042586] Freeing unused kernel memory: 328k freed
>> [ 92.091838] khelper used greatest stack depth: 6244 bytes left
>> [ 92.281738] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
>> [ 92.288761] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
>> [ 92.295763] init[1]: segfault at 00000004 ip 49471cbb sp bff8dbb0 error 4
>> [...]
>> [ 97.312484] printk: 611046 messages suppressed.
>>
>> 32-bit, PAE and RAM above 4GB seems to be enough to trigger that bug.
>>
>
> Bum. We established that once you fix the accessors+sign extension
> bug, there's another bug somewhere later on in the series. Any chance
> you could move "x86/pgtable: fix constant sign extension problem" to
> be just after "x86/pgtable: unify pagetable accessors" and bisect the
> following patches?
after a day of debugging i finally tracked it down to another
unsigned-type and masking mismatch on PAE, caused by:
Subject: x86: unify pgtable accessors which use supported_pte_mask
From: Jeremy Fitzhardinge <jeremy@goop.org>
the fix is below.
I have to say, i'm less than impressed about the structure of that
patch. The patch too was way too coarse for easy bisection. You did
multiple nontrivial steps in the same patch.
I'm not going to apply such all-in-one patches to x86.git anymore, and
will start straight with another patch you sent yesterday:
Subject: x86: refactor mmu ops in paravirt.h
2 files changed, 129 insertions(+), 88 deletions(-)
If anyone finds a problem in that patch we'll have a hard time figuring
out what went wrong. Please split that patch up properly. To demonstrate
the kind of splitup we need for such patches, i've done a sample splitup
of the following patch in x86.git:
Subject: x86/pgtable: unify pagetable accessors
From: Jeremy Fitzhardinge <jeremy@goop.org>
and turned it into 6 easy-to-review and easy-to-validate patches. Each
step is _provably trivial_, and even if something goes wrong, it's easy
to figure out where the problem comes from. At each step, document in
the patch description whether the patch is intended to cause any code
changes or not.
Ingo
Index: linux-x86.q/include/asm-x86/page.h
===================================================================
--- linux-x86.q.orig/include/asm-x86/page.h
+++ linux-x86.q/include/asm-x86/page.h
@@ -11,7 +11,7 @@
#ifdef __KERNEL__
#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
-#define PTE_MASK PHYSICAL_PAGE_MASK
+#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK))
#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 14:22 ` Ingo Molnar
@ 2008-01-16 14:44 ` Andi Kleen
2008-01-16 14:54 ` Ingo Molnar
0 siblings, 1 reply; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 14:44 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jeremy Fitzhardinge, LKML, Glauber de Oliveira Costa, Jan Beulich
> Index: linux-x86.q/include/asm-x86/page.h
> ===================================================================
> --- linux-x86.q.orig/include/asm-x86/page.h
> +++ linux-x86.q/include/asm-x86/page.h
> @@ -11,7 +11,7 @@
> #ifdef __KERNEL__
>
> #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
I haven't tested yet, but we looked at that one earlier and I thought
it was ok because
#define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
and
typedef u64 phys_addr_t;
for PAE. So the expression above should have been already 64bit.
> -#define PTE_MASK PHYSICAL_PAGE_MASK
> +#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK))
That would mean it is 32bit again because 32bit long is 32bit, but on 32bit
PAE PTE_MASK definitely needs to be 64bit.
So I would be surprised if the patch works.
The problem seems to be rather that PAGE_MASK does not include the bits above 32bit.
Something like
#define PHYSICAL_PAGE_MASK (__PHYSICAL_MASK & ~(phys_addr_t)(PAGE_SIZE-1))
should work (untested)
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 14:44 ` Andi Kleen
@ 2008-01-16 14:54 ` Ingo Molnar
2008-01-16 15:18 ` Ingo Molnar
2008-01-16 15:26 ` Andi Kleen
0 siblings, 2 replies; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 14:54 UTC (permalink / raw)
To: Andi Kleen
Cc: Jeremy Fitzhardinge, LKML, Glauber de Oliveira Costa, Jan Beulich
* Andi Kleen <ak@suse.de> wrote:
> > #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
>
>
> I haven't tested yet, but we looked at that one earlier and I thought
> it was ok because
>
> #define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
>
> and
>
> typedef u64 phys_addr_t;
>
> for PAE. So the expression above should have been already 64bit.
no. The problem is that PAGE_MASK is:
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
that's u32 on PAE, and __PHYSICAL_MASK is u64. So PAGE_MASK gets
zero-extended to u64. So the combined mask:
#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
has the high bits chopped off. Please try my patch.
(PHYSICAL_PAGE_MASK is broken too in the same way, i just fixed that in
my tree - but it's not used by anything on 32-bit PAE but by PAGE_MASK)
> So I would be surprised if the patch works.
try it ...
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 14:54 ` Ingo Molnar
@ 2008-01-16 15:18 ` Ingo Molnar
2008-01-16 15:26 ` Andi Kleen
1 sibling, 0 replies; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 15:18 UTC (permalink / raw)
To: Andi Kleen
Cc: Jeremy Fitzhardinge, LKML, Glauber de Oliveira Costa, Jan Beulich
today's x86.git now has all the fixes included - can you confirm that it
boots fine on your box?
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 14:54 ` Ingo Molnar
2008-01-16 15:18 ` Ingo Molnar
@ 2008-01-16 15:26 ` Andi Kleen
2008-01-16 15:42 ` Jan Beulich
1 sibling, 1 reply; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 15:26 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jeremy Fitzhardinge, LKML, Glauber de Oliveira Costa, Jan Beulich
On Wednesday 16 January 2008 15:54:27 Ingo Molnar wrote:
>
> * Andi Kleen <ak@suse.de> wrote:
>
> > > #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
> >
> >
> > I haven't tested yet, but we looked at that one earlier and I thought
> > it was ok because
> >
> > #define __PHYSICAL_MASK _AT(phys_addr_t, (_AC(1,ULL) << __PHYSICAL_MASK_SHIFT) - 1)
> >
> > and
> >
> > typedef u64 phys_addr_t;
> >
> > for PAE. So the expression above should have been already 64bit.
>
> no. The problem is that PAGE_MASK is:
I covered that in the end of the email.
>
>
> (PHYSICAL_PAGE_MASK is broken too in the same way, i just fixed that in
> my tree - but it's not used by anything on 32-bit PAE but by PAGE_MASK)
Yes, but if you cast to long the result will be 32bit again. Or do you
rely on it being always used in 64bit signed context with sign extension?
While that might work it would seem rather fragile to me, just asking
for similar future bugs.
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 15:26 ` Andi Kleen
@ 2008-01-16 15:42 ` Jan Beulich
2008-01-16 15:47 ` Ingo Molnar
0 siblings, 1 reply; 28+ messages in thread
From: Jan Beulich @ 2008-01-16 15:42 UTC (permalink / raw)
To: Ingo Molnar, Andi Kleen
Cc: Glauber de Oliveira Costa, Jeremy Fitzhardinge, LKML
>Yes, but if you cast to long the result will be 32bit again. Or do you
>rely on it being always used in 64bit signed context with sign extension?
>While that might work it would seem rather fragile to me, just asking
>for similar future bugs.
Even if conversion is to 64-bit unsigned, the value (being signed) will be
sign-extended first (to preserve its value modulo 2**<target type width>).
Jan
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 15:42 ` Jan Beulich
@ 2008-01-16 15:47 ` Ingo Molnar
2008-01-16 16:06 ` Ingo Molnar
` (2 more replies)
0 siblings, 3 replies; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 15:47 UTC (permalink / raw)
To: Jan Beulich
Cc: Andi Kleen, Glauber de Oliveira Costa, Jeremy Fitzhardinge, LKML
* Jan Beulich <jbeulich@novell.com> wrote:
> > Yes, but if you cast to long the result will be 32bit again. Or do
> > you rely on it being always used in 64bit signed context with sign
> > extension? While that might work it would seem rather fragile to me,
> > just asking for similar future bugs.
>
> Even if conversion is to 64-bit unsigned, the value (being signed)
> will be sign-extended first (to preserve its value modulo 2**<target
> type width>).
yes. It would be nice if Andi could test my fix instead of arguing why
he thinks it's unlikely to work ;-)
(i've got the cleanup patch below as well ontop of today's x86.git which
includes the first fix, but it's an RFC as it has wider impact and might
break stuff.)
Ingo
--------------->
Subject: x86: PAGE_MASK cleanup
From: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
include/asm-x86/page.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
Index: linux-x86.q/include/asm-x86/page.h
===================================================================
--- linux-x86.q.orig/include/asm-x86/page.h
+++ linux-x86.q/include/asm-x86/page.h
@@ -6,12 +6,12 @@
/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE-1))
+#define PAGE_MASK (_AT(phys_addr_t, ~(PAGE_SIZE-1)))
#ifdef __KERNEL__
#define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
-#define PTE_MASK (_AT(long, PHYSICAL_PAGE_MASK))
+#define PTE_MASK PHYSICAL_PAGE_MASK
#define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 15:47 ` Ingo Molnar
@ 2008-01-16 16:06 ` Ingo Molnar
2008-01-16 17:05 ` Jeremy Fitzhardinge
2008-01-16 17:40 ` Andi Kleen
2 siblings, 0 replies; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 16:06 UTC (permalink / raw)
To: Jan Beulich
Cc: Andi Kleen, Glauber de Oliveira Costa, Jeremy Fitzhardinge, LKML
* Ingo Molnar <mingo@elte.hu> wrote:
> -#define PAGE_MASK (~(PAGE_SIZE-1))
> +#define PAGE_MASK (_AT(phys_addr_t, ~(PAGE_SIZE-1)))
that should be:
#define PAGE_MASK (_AT(phys_addr_t, PAGE_SIZE)-1)
but still - PAGE_MASK is used in a lot of places so i'm nervous about
this change.
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 15:47 ` Ingo Molnar
2008-01-16 16:06 ` Ingo Molnar
@ 2008-01-16 17:05 ` Jeremy Fitzhardinge
2008-01-16 17:12 ` Andi Kleen
2008-01-16 17:40 ` Andi Kleen
2 siblings, 1 reply; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-16 17:05 UTC (permalink / raw)
To: Ingo Molnar; +Cc: Jan Beulich, Andi Kleen, Glauber de Oliveira Costa, LKML
Ingo Molnar wrote:
> * Jan Beulich <jbeulich@novell.com> wrote:
>
>
>>> Yes, but if you cast to long the result will be 32bit again. Or do
>>> you rely on it being always used in 64bit signed context with sign
>>> extension? While that might work it would seem rather fragile to me,
>>> just asking for similar future bugs.
>>>
>> Even if conversion is to 64-bit unsigned, the value (being signed)
>> will be sign-extended first (to preserve its value modulo 2**<target
>> type width>).
>>
>
> yes. It would be nice if Andi could test my fix instead of arguing why
> he thinks it's unlikely to work ;-)
>
> (i've got the cleanup patch below as well ontop of today's x86.git which
> includes the first fix, but it's an RFC as it has wider impact and might
> break stuff.)
>
> Ingo
>
> --------------->
> Subject: x86: PAGE_MASK cleanup
> From: Ingo Molnar <mingo@elte.hu>
>
> Signed-off-by: Ingo Molnar <mingo@elte.hu>
> ---
> include/asm-x86/page.h | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> Index: linux-x86.q/include/asm-x86/page.h
> ===================================================================
> --- linux-x86.q.orig/include/asm-x86/page.h
> +++ linux-x86.q/include/asm-x86/page.h
> @@ -6,12 +6,12 @@
> /* PAGE_SHIFT determines the page size */
> #define PAGE_SHIFT 12
> #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
> -#define PAGE_MASK (~(PAGE_SIZE-1))
> +#define PAGE_MASK (_AT(phys_addr_t, ~(PAGE_SIZE-1)))
>
Hm, this seems fairly wide-ranging. How about just making it signed?
In fact, all these masks which are of the form "all high bits set, some
low bits clear" (or perhaps "all set except some") should be signed, so
that the "all high bits set" property is maintained when its extended to
larger types.
Also, Andi, do you want to get rid of the PHYSICAL_MASK stuff?
J
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 17:05 ` Jeremy Fitzhardinge
@ 2008-01-16 17:12 ` Andi Kleen
0 siblings, 0 replies; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 17:12 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Ingo Molnar, Jan Beulich, Glauber de Oliveira Costa, LKML
> Also, Andi, do you want to get rid of the PHYSICAL_MASK stuff?
That would be fine, just all the accessors need to mask out NX explicitely.
I didn't have plans to code that up myself though.
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 15:47 ` Ingo Molnar
2008-01-16 16:06 ` Ingo Molnar
2008-01-16 17:05 ` Jeremy Fitzhardinge
@ 2008-01-16 17:40 ` Andi Kleen
2008-01-16 20:22 ` Ingo Molnar
2 siblings, 1 reply; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 17:40 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jan Beulich, Glauber de Oliveira Costa, Jeremy Fitzhardinge, LKML
git-x86 upto a9f7faa5fd229a65747f02ab0f2d45ee35856760 boots, but gives
VFS: Mounted root (nfs filesystem).
Freeing unused kernel memory: 264k freed
eth0: no IPv6 routers present
Clocksource tsc unstable (delta = 18747555652 ns)
BUG: soft lockup - CPU#2 stuck for 55s! [swapper:0]
Pid: 0, comm: swapper Not tainted (2.6.24-rc8-ga9f7faa5 #23)
EIP: 0060:[<c0103b71>] EFLAGS: 00000206 CPU: 2
EIP is at default_idle+0x36/0x58
EAX: 6f179d4d EBX: 6f179d4d ECX: 000001c9 EDX: 0000006a
ESI: 0000006a EDI: 00000000 EBP: 00000000 ESP: f7c49fa4
DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
CR0: 8005003b CR2: bfaaf990 CR3: 3749f000 CR4: 000006f0
DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
DR6: ffff0ff0 DR7: 00000400
[<c0103b3b>] default_idle+0x0/0x58
[<c0103a7e>] cpu_idle+0xad/0xcd
=======================
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 17:40 ` Andi Kleen
@ 2008-01-16 20:22 ` Ingo Molnar
2008-01-16 20:59 ` Andi Kleen
0 siblings, 1 reply; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 20:22 UTC (permalink / raw)
To: Andi Kleen
Cc: Jan Beulich, Glauber de Oliveira Costa, Jeremy Fitzhardinge, LKML
* Andi Kleen <ak@suse.de> wrote:
> git-x86 upto a9f7faa5fd229a65747f02ab0f2d45ee35856760 boots, but gives
>
> VFS: Mounted root (nfs filesystem).
> Freeing unused kernel memory: 264k freed
> eth0: no IPv6 routers present
> Clocksource tsc unstable (delta = 18747555652 ns)
>
> BUG: soft lockup - CPU#2 stuck for 55s! [swapper:0]
>
> Pid: 0, comm: swapper Not tainted (2.6.24-rc8-ga9f7faa5 #23)
> EIP: 0060:[<c0103b71>] EFLAGS: 00000206 CPU: 2
> EIP is at default_idle+0x36/0x58
> EAX: 6f179d4d EBX: 6f179d4d ECX: 000001c9 EDX: 0000006a
> ESI: 0000006a EDI: 00000000 EBP: 00000000 ESP: f7c49fa4
> DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068
> CR0: 8005003b CR2: bfaaf990 CR3: 3749f000 CR4: 000006f0
> DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000
> DR6: ffff0ff0 DR7: 00000400
> [<c0103b3b>] default_idle+0x0/0x58
> [<c0103a7e>] cpu_idle+0xad/0xcd
> =======================
truly stuck, or just an annoying message?
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 20:22 ` Ingo Molnar
@ 2008-01-16 20:59 ` Andi Kleen
2008-01-16 21:06 ` Ingo Molnar
0 siblings, 1 reply; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 20:59 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jan Beulich, Glauber de Oliveira Costa, Jeremy Fitzhardinge, LKML
>
> truly stuck, or just an annoying message?
Just annoying message; system works after that for simple login etc.
(haven't run anything complicated)
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 20:59 ` Andi Kleen
@ 2008-01-16 21:06 ` Ingo Molnar
2008-01-16 21:35 ` Andi Kleen
0 siblings, 1 reply; 28+ messages in thread
From: Ingo Molnar @ 2008-01-16 21:06 UTC (permalink / raw)
To: Andi Kleen
Cc: Jan Beulich, Glauber de Oliveira Costa, Jeremy Fitzhardinge,
LKML, Thomas Gleixner, H. Peter Anvin
* Andi Kleen <ak@suse.de> wrote:
> > truly stuck, or just an annoying message?
>
> Just annoying message; system works after that for simple login etc.
> (haven't run anything complicated)
ok, if you see no other failures and if you have some time it would be
nice to figure out why that message is happening. Is NOHZ enabled
perhaps?
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 0 of 4] x86: some more patches
2008-01-16 21:06 ` Ingo Molnar
@ 2008-01-16 21:35 ` Andi Kleen
0 siblings, 0 replies; 28+ messages in thread
From: Andi Kleen @ 2008-01-16 21:35 UTC (permalink / raw)
To: Ingo Molnar
Cc: Jan Beulich, Glauber de Oliveira Costa, Jeremy Fitzhardinge,
LKML, Thomas Gleixner, H. Peter Anvin
On Wednesday 16 January 2008 22:06:46 Ingo Molnar wrote:
>
> * Andi Kleen <ak@suse.de> wrote:
>
> > > truly stuck, or just an annoying message?
> >
> > Just annoying message; system works after that for simple login etc.
> > (haven't run anything complicated)
>
> ok, if you see no other failures and if you have some time it would be
> nice to figure out why that message is happening. Is NOHZ enabled
> perhaps?
I updated now to latest git-x86 and the problem seems to have gone away.
But NO_HZ was enabled yes and the system also had a very unsynchronous
TSC (dual K8 opteron with cpufreq)
Also I should say my patches were applied, but I didn't change
anything in this area.
-Andi
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 4 of 4] x86: mask NX from pte_pfn
2008-01-15 22:17 ` [PATCH 4 of 4] x86: mask NX from pte_pfn Jeremy Fitzhardinge
@ 2008-01-18 13:52 ` Hugh Dickins
2008-01-18 14:01 ` Ingo Molnar
2008-01-18 15:55 ` Jeremy Fitzhardinge
0 siblings, 2 replies; 28+ messages in thread
From: Hugh Dickins @ 2008-01-18 13:52 UTC (permalink / raw)
To: Jeremy Fitzhardinge
Cc: Ingo Molnar, LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
On Tue, 15 Jan 2008, Jeremy Fitzhardinge wrote:
> In 32-bit PAE, mask NX from pte_pfn, since it isn't part of the PFN.
> This code is due for unification anyway, but this fixes a latent bug.
>
> Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
>
> ---
> include/asm-x86/pgtable-3level.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
> --- a/include/asm-x86/pgtable-3level.h
> +++ b/include/asm-x86/pgtable-3level.h
> @@ -139,7 +139,7 @@ static inline int pte_none(pte_t pte)
>
> static inline unsigned long pte_pfn(pte_t pte)
> {
> - return pte_val(pte) >> PAGE_SHIFT;
> + return (pte_val(pte) >> PAGE_SHIFT) & ~_PAGE_NX;
> }
>
> /*
Shouldn't that be
return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
? Hugh
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 4 of 4] x86: mask NX from pte_pfn
2008-01-18 13:52 ` Hugh Dickins
@ 2008-01-18 14:01 ` Ingo Molnar
2008-01-18 15:55 ` Jeremy Fitzhardinge
1 sibling, 0 replies; 28+ messages in thread
From: Ingo Molnar @ 2008-01-18 14:01 UTC (permalink / raw)
To: Hugh Dickins
Cc: Jeremy Fitzhardinge, LKML, Andi Kleen, Glauber de Oliveira Costa,
Jan Beulich
* Hugh Dickins <hugh@veritas.com> wrote:
> > - return pte_val(pte) >> PAGE_SHIFT;
> > + return (pte_val(pte) >> PAGE_SHIFT) & ~_PAGE_NX;
> Shouldn't that be
>
> return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
ouch! Sharp eyes! Fixed it.
Ingo
^ permalink raw reply [flat|nested] 28+ messages in thread
* Re: [PATCH 4 of 4] x86: mask NX from pte_pfn
2008-01-18 13:52 ` Hugh Dickins
2008-01-18 14:01 ` Ingo Molnar
@ 2008-01-18 15:55 ` Jeremy Fitzhardinge
1 sibling, 0 replies; 28+ messages in thread
From: Jeremy Fitzhardinge @ 2008-01-18 15:55 UTC (permalink / raw)
To: Hugh Dickins
Cc: Ingo Molnar, LKML, Andi Kleen, Glauber de Oliveira Costa, Jan Beulich
Hugh Dickins wrote:
> Shouldn't that be
>
> return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT;
>
Yes, it should be.
Thanks,
J
^ permalink raw reply [flat|nested] 28+ messages in thread
end of thread, other threads:[~2008-01-18 15:55 UTC | newest]
Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-01-15 22:17 [PATCH 0 of 4] x86: some more patches Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 1 of 4] x86: refactor mmu ops in paravirt.h Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 2 of 4] x86: fix warning Jeremy Fitzhardinge
2008-01-15 22:17 ` [PATCH 3 of 4] x86: clean up pte_modify Jeremy Fitzhardinge
2008-01-16 0:43 ` Andi Kleen
2008-01-15 22:17 ` [PATCH 4 of 4] x86: mask NX from pte_pfn Jeremy Fitzhardinge
2008-01-18 13:52 ` Hugh Dickins
2008-01-18 14:01 ` Ingo Molnar
2008-01-18 15:55 ` Jeremy Fitzhardinge
2008-01-15 22:35 ` [PATCH 0 of 4] x86: some more patches Ingo Molnar
2008-01-15 23:28 ` Jeremy Fitzhardinge
2008-01-16 0:44 ` Andi Kleen
2008-01-16 7:25 ` Ingo Molnar
2008-01-16 14:22 ` Ingo Molnar
2008-01-16 14:44 ` Andi Kleen
2008-01-16 14:54 ` Ingo Molnar
2008-01-16 15:18 ` Ingo Molnar
2008-01-16 15:26 ` Andi Kleen
2008-01-16 15:42 ` Jan Beulich
2008-01-16 15:47 ` Ingo Molnar
2008-01-16 16:06 ` Ingo Molnar
2008-01-16 17:05 ` Jeremy Fitzhardinge
2008-01-16 17:12 ` Andi Kleen
2008-01-16 17:40 ` Andi Kleen
2008-01-16 20:22 ` Ingo Molnar
2008-01-16 20:59 ` Andi Kleen
2008-01-16 21:06 ` Ingo Molnar
2008-01-16 21:35 ` Andi Kleen
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).