LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH 0/2] x86_64: switch_to fixes
@ 2014-12-08 19:44 Andy Lutomirski
  2014-12-08 19:44 ` [PATCH 1/2] x86_64, switch_to: Load TLS descriptors before switching DS and ES Andy Lutomirski
  2014-12-08 19:44 ` [PATCH 2/2] x86_64, switch_to: Fix an incorrect comment Andy Lutomirski
  0 siblings, 2 replies; 3+ messages in thread
From: Andy Lutomirski @ 2014-12-08 19:44 UTC (permalink / raw)
  To: x86, linux-kernel; +Cc: Andi Kleen, Thomas Gleixner, Andy Lutomirski

Patch 1 is for stable.  It fixes a bug and an information leak.
Patch 2 is just cosmetic and is therefore not marked for stable.

Andy Lutomirski (2):
  x86_64, switch_to: Load TLS descriptors before switching DS and ES
  x86_64, switch_to: Fix an incorrect comment

 arch/x86/kernel/process_64.c | 40 +++++++++++++++++++---------------------
 1 file changed, 19 insertions(+), 21 deletions(-)

-- 
1.9.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] x86_64, switch_to: Load TLS descriptors before switching DS and ES
  2014-12-08 19:44 [PATCH 0/2] x86_64: switch_to fixes Andy Lutomirski
@ 2014-12-08 19:44 ` Andy Lutomirski
  2014-12-08 19:44 ` [PATCH 2/2] x86_64, switch_to: Fix an incorrect comment Andy Lutomirski
  1 sibling, 0 replies; 3+ messages in thread
From: Andy Lutomirski @ 2014-12-08 19:44 UTC (permalink / raw)
  To: x86, linux-kernel; +Cc: Andi Kleen, Thomas Gleixner, Andy Lutomirski, stable

Otherwise, if buggy user code points DS or ES into the TLS array,
they would be corrupted after a context switch.

----- begin test case -----

/*
 * Copyright (c) 2014 Andy Lutomirski
 * GPL v2
 */

static unsigned short GDT3(int idx)
{
	return (idx << 3) | 3;
}

static int create_tls(int idx, unsigned int base)
{
	struct user_desc desc = {
		.entry_number    = idx,
		.base_addr       = base,
		.limit           = 0xfffff,
		.seg_32bit       = 1,
		.contents        = 0, /* Data, grow-up */
		.read_exec_only  = 0,
		.limit_in_pages  = 1,
		.seg_not_present = 0,
		.useable         = 0,
	};

	if (syscall(SYS_set_thread_area, &desc) != 0)
		err(1, "set_thread_area");

	return desc.entry_number;
}

int main()
{
	int idx = create_tls(-1, 0);
	printf("Allocated GDT index %d\n", idx);

	unsigned short orig_es;
	asm volatile ("mov %%es,%0" : "=rm" (orig_es));

	int errors = 0;
	int total = 1000;
	for (int i = 0; i < total; i++) {
		asm volatile ("mov %0,%%es" : : "rm" (GDT3(idx)));
		usleep(100);

		unsigned short es;
		asm volatile ("mov %%es,%0" : "=rm" (es));
		asm volatile ("mov %0,%%es" : : "rm" (orig_es));
		if (es != GDT3(idx)) {
			if (errors == 0)
				printf("[FAIL]\tES changed from 0x%hx to 0x%hx\n",
				       GDT3(idx), es);
			errors++;
		}
	}

	if (errors) {
		printf("[FAIL]\tES was corrupted %d/%d times\n", errors, total);
		return 1;
	} else {
		printf("[OK]\tES was preserved\n");
		return 0;
	}
}

----- end test case -----

Cc: stable@vger.kernel.org
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
---
 arch/x86/kernel/process_64.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 3ed4a68d4013..e2efaa3473fa 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -288,19 +288,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	 */
 	load_sp0(tss, next);
 
-	/*
-	 * Switch DS and ES.
-	 * This won't pick up thread selector changes, but I guess that is ok.
-	 */
-	savesegment(es, prev->es);
-	if (unlikely(next->es | prev->es))
-		loadsegment(es, next->es);
-
-	savesegment(ds, prev->ds);
-	if (unlikely(next->ds | prev->ds))
-		loadsegment(ds, next->ds);
-
-
 	/* We must save %fs and %gs before load_TLS() because
 	 * %fs and %gs may be cleared by load_TLS().
 	 *
@@ -309,17 +296,30 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 	savesegment(fs, fsindex);
 	savesegment(gs, gsindex);
 
+	/*
+	 * Load TLS before restoring any segments so that segment loads
+	 * reference the correct GDT entries.
+	 */
 	load_TLS(next, cpu);
 
 	/*
-	 * Leave lazy mode, flushing any hypercalls made here.
-	 * This must be done before restoring TLS segments so
-	 * the GDT and LDT are properly updated, and must be
-	 * done before math_state_restore, so the TS bit is up
-	 * to date.
+	 * Leave lazy mode, flushing any hypercalls made here.  This
+	 * must be done after loading TLS entries in the GDT but before
+	 * loading segments that might reference them, and and it must
+	 * be done before math_state_restore, so the TS bit is up to
+	 * date.
 	 */
 	arch_end_context_switch(next_p);
 
+	/* Switch DS and ES. */
+	savesegment(es, prev->es);
+	if (unlikely(next->es | prev->es))
+		loadsegment(es, next->es);
+
+	savesegment(ds, prev->ds);
+	if (unlikely(next->ds | prev->ds))
+		loadsegment(ds, next->ds);
+
 	/*
 	 * Switch FS and GS.
 	 *
-- 
1.9.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 2/2] x86_64, switch_to: Fix an incorrect comment
  2014-12-08 19:44 [PATCH 0/2] x86_64: switch_to fixes Andy Lutomirski
  2014-12-08 19:44 ` [PATCH 1/2] x86_64, switch_to: Load TLS descriptors before switching DS and ES Andy Lutomirski
@ 2014-12-08 19:44 ` Andy Lutomirski
  1 sibling, 0 replies; 3+ messages in thread
From: Andy Lutomirski @ 2014-12-08 19:44 UTC (permalink / raw)
  To: x86, linux-kernel; +Cc: Andi Kleen, Thomas Gleixner, Andy Lutomirski

I have no idea why load_sp0 would have anything to do with the page
table pointer (i.e. cr3).

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
---
 arch/x86/kernel/process_64.c | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e2efaa3473fa..726aa694991f 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -283,9 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
 	fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
-	/*
-	 * Reload esp0, LDT and the page table pointer:
-	 */
+	/* Reload esp0 and ss1. */
 	load_sp0(tss, next);
 
 	/* We must save %fs and %gs before load_TLS() because
-- 
1.9.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2014-12-08 19:44 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-12-08 19:44 [PATCH 0/2] x86_64: switch_to fixes Andy Lutomirski
2014-12-08 19:44 ` [PATCH 1/2] x86_64, switch_to: Load TLS descriptors before switching DS and ES Andy Lutomirski
2014-12-08 19:44 ` [PATCH 2/2] x86_64, switch_to: Fix an incorrect comment Andy Lutomirski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).