LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation
@ 2011-02-14 19:28 Tejun Heo
  2011-02-14 19:28 ` [PATCH 1/7] x86-64, NUMA: Trivial changes to prepare for emulation updates Tejun Heo
                   ` (7 more replies)
  0 siblings, 8 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita

Hello,

NUMA emulation is quite convoluted involving unnecessary mapping and
reverse mapping between apicids, PXMs, nodes and memory addresses.
This patchset tries to restore some sanity to the whole thing.

Tested on an opteron NUMA machine which can do both ACPI and AMD
configs.  All NUMA configs, emulations, !NUMA and UP work as expected.

This patchset is on top of tip/x86/numa[1] +
bring-sanity-to-NUMA-configuration patchset[2] and contains the
following 7 patches.

 0001-x86-64-NUMA-Trivial-changes-to-prepare-for-emulation.patch
 0002-x86-64-NUMA-Build-and-use-direct-emulated-nid-phys-n.patch
 0003-x86-64-NUMA-Make-emulation-code-build-numa_meminfo-a.patch
 0004-x86-64-NUMA-Wrap-node-ID-during-emulation.patch
 0005-x86-64-NUMA-Emulate-directly-from-numa_meminfo.patch
 0006-x86-64-NUMA-Unify-emulated-apicid-node-mapping-trans.patch
 0007-x86-64-NUMA-Unify-emulated-distance-mapping.patch

The patchset is available in the following git branch.

 git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc.git x86_64-numa-emu-unify

Diffstat follows.

 arch/x86/include/asm/acpi.h    |    6 
 arch/x86/include/asm/amd_nb.h  |    4 
 arch/x86/include/asm/numa_64.h |    1 
 arch/x86/mm/amdtopology_64.c   |   47 ----
 arch/x86/mm/numa_64.c          |  429 ++++++++++++++++++-----------------------
 arch/x86/mm/srat_64.c          |   87 --------
 6 files changed, 197 insertions(+), 377 deletions(-)

Thanks.

--
tejun

[1] eff9073790e1286aa12bf1c65814d3e0132b12e1 (x86: Rename incorrectly
    named parameter of numa_cpu_node())
[2] http://thread.gmane.org/gmane.linux.kernel/1099256/

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/7] x86-64, NUMA: Trivial changes to prepare for emulation updates
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-14 19:28 ` [PATCH 2/7] x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping Tejun Heo
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

* Separate out numa_add_memblk_to() from numa_add_memblk() so that
  different numa_meminfo can be used.

* Rename cmdline to emu_cmdline.

* Drop @start/last_pfn from numa_emulation() and use max_pfn directly.

This patch doesn't introduce any behavior change.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/mm/numa_64.c |   34 ++++++++++++++++++----------------
 1 files changed, 18 insertions(+), 16 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index b3c1418..10544c2 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -193,10 +193,9 @@ static void * __init early_node_mem(int nodeid, unsigned long start,
 	return NULL;
 }
 
-int __init numa_add_memblk(int nid, u64 start, u64 end)
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+				     struct numa_meminfo *mi)
 {
-	struct numa_meminfo *mi = &numa_meminfo;
-
 	/* ignore zero length blks */
 	if (start == end)
 		return 0;
@@ -227,6 +226,11 @@ static void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
 		(mi->nr_blks - idx) * sizeof(mi->blk[0]));
 }
 
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+	return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
 /* Initialize bootmem allocator for a node */
 void __init
 setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
@@ -537,11 +541,11 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
 /* Numa emulation */
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
 static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
-static char *cmdline __initdata;
+static char *emu_cmdline __initdata;
 
 void __init numa_emu_cmdline(char *str)
 {
-	cmdline = str;
+	emu_cmdline = str;
 }
 
 int __init find_node_by_addr(unsigned long addr)
@@ -859,12 +863,10 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
  * Sets up the system RAM area from start_pfn to last_pfn according to the
  * numa=fake command-line option.
  */
-static int __init numa_emulation(unsigned long start_pfn,
-			unsigned long last_pfn, int acpi, int amd)
+static int __init numa_emulation(int acpi, int amd)
 {
 	static struct numa_meminfo ei __initdata;
-	u64 addr = start_pfn << PAGE_SHIFT;
-	u64 max_addr = last_pfn << PAGE_SHIFT;
+	const u64 max_addr = max_pfn << PAGE_SHIFT;
 	int num_nodes;
 	int i;
 
@@ -873,16 +875,16 @@ static int __init numa_emulation(unsigned long start_pfn,
 	 * the fixed node size.  Otherwise, if it is just a single number N,
 	 * split the system RAM into N fake nodes.
 	 */
-	if (strchr(cmdline, 'M') || strchr(cmdline, 'G')) {
+	if (strchr(emu_cmdline, 'M') || strchr(emu_cmdline, 'G')) {
 		u64 size;
 
-		size = memparse(cmdline, &cmdline);
-		num_nodes = split_nodes_size_interleave(addr, max_addr, size);
+		size = memparse(emu_cmdline, &emu_cmdline);
+		num_nodes = split_nodes_size_interleave(0, max_addr, size);
 	} else {
 		unsigned long n;
 
-		n = simple_strtoul(cmdline, NULL, 0);
-		num_nodes = split_nodes_interleave(addr, max_addr, n);
+		n = simple_strtoul(emu_cmdline, NULL, 0);
+		num_nodes = split_nodes_interleave(0, max_addr, n);
 	}
 
 	if (num_nodes < 0)
@@ -913,7 +915,7 @@ static int __init numa_emulation(unsigned long start_pfn,
 						nodes[i].end >> PAGE_SHIFT);
 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
-	setup_physnodes(addr, max_addr);
+	setup_physnodes(0, max_addr);
 	fake_physnodes(acpi, amd, num_nodes);
 	numa_init_array();
 	numa_emu_dist = true;
@@ -969,7 +971,7 @@ void __init initmem_init(void)
 			continue;
 #ifdef CONFIG_NUMA_EMU
 		setup_physnodes(0, max_pfn << PAGE_SHIFT);
-		if (cmdline && !numa_emulation(0, max_pfn, i == 0, i == 1))
+		if (emu_cmdline && !numa_emulation(i == 0, i == 1))
 			return;
 		setup_physnodes(0, max_pfn << PAGE_SHIFT);
 		nodes_clear(node_possible_map);
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 2/7] x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
  2011-02-14 19:28 ` [PATCH 1/7] x86-64, NUMA: Trivial changes to prepare for emulation updates Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-15 16:36   ` [PATCH UPDATED " Tejun Heo
  2011-02-14 19:28 ` [PATCH 3/7] x86-64, NUMA: Make emulation code build numa_meminfo and share the registration path Tejun Heo
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

NUMA emulation copied physical NUMA configuration into physnodes[] and
used it to reverse-map emulated nodes to physical nodes, which is
unnecessarily convoluted.  Build emu_nid_to_phys[] array to map
emulated nids directly to the matching physical nids and use it in
numa_add_cpu().

physnodes[] will be removed with further patches.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/mm/numa_64.c |   64 +++++++++++++++++++++++++++---------------------
 1 files changed, 36 insertions(+), 28 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 10544c2..253a5c3 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -540,7 +540,9 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
+static struct bootnode physnodes[MAX_NUMNODES] __initdata;
+
+static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
 static char *emu_cmdline __initdata;
 
 void __init numa_emu_cmdline(char *str)
@@ -647,7 +649,8 @@ static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
  * allocation past addr and -1 otherwise.  addr is adjusted to be at
  * the end of the node.
  */
-static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
+static int __init setup_node_range(int nid, int physnid,
+				   u64 *addr, u64 size, u64 max_addr)
 {
 	int ret = 0;
 	nodes[nid].start = *addr;
@@ -658,6 +661,10 @@ static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
 	}
 	nodes[nid].end = *addr;
 	node_set(nid, node_possible_map);
+
+	if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
+		emu_nid_to_phys[nid] = physnid;
+
 	printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
 	       nodes[nid].start, nodes[nid].end,
 	       (nodes[nid].end - nodes[nid].start) >> 20);
@@ -754,7 +761,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 			if (nodes_weight(physnode_mask) + ret >= nr_nodes)
 				end = physnodes[i].end;
 
-			if (setup_node_range(ret++, &physnodes[i].start,
+			if (setup_node_range(ret++, i, &physnodes[i].start,
 						end - physnodes[i].start,
 						physnodes[i].end) < 0)
 				node_clear(i, physnode_mask);
@@ -850,7 +857,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
 			 * later.  If setup_node_range() returns non-zero, there
 			 * is no more memory available on this physical node.
 			 */
-			if (setup_node_range(ret++, &physnodes[i].start,
+			if (setup_node_range(ret++, i, &physnodes[i].start,
 						end - physnodes[i].start,
 						physnodes[i].end) < 0)
 				node_clear(i, physnode_mask);
@@ -870,6 +877,9 @@ static int __init numa_emulation(int acpi, int amd)
 	int num_nodes;
 	int i;
 
+	for (i = 0; i < MAX_NUMNODES; i++)
+		emu_nid_to_phys[i] = NUMA_NO_NODE;
+
 	/*
 	 * If the numa=fake command-line contains a 'M' or 'G', it represents
 	 * the fixed node size.  Otherwise, if it is just a single number N,
@@ -890,6 +900,11 @@ static int __init numa_emulation(int acpi, int amd)
 	if (num_nodes < 0)
 		return num_nodes;
 
+	/* make sure all emulated nodes are mapped to a physical node */
+	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+		if (emu_nid_to_phys[i] == NUMA_NO_NODE)
+			emu_nid_to_phys[i] = 0;
+
 	ei.nr_blks = num_nodes;
 	for (i = 0; i < ei.nr_blks; i++) {
 		ei.blk[i].start = nodes[i].start;
@@ -915,7 +930,6 @@ static int __init numa_emulation(int acpi, int amd)
 						nodes[i].end >> PAGE_SHIFT);
 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
-	setup_physnodes(0, max_addr);
 	fake_physnodes(acpi, amd, num_nodes);
 	numa_init_array();
 	numa_emu_dist = true;
@@ -973,7 +987,11 @@ void __init initmem_init(void)
 		setup_physnodes(0, max_pfn << PAGE_SHIFT);
 		if (emu_cmdline && !numa_emulation(i == 0, i == 1))
 			return;
-		setup_physnodes(0, max_pfn << PAGE_SHIFT);
+
+		/* not emulating, build identity mapping for numa_add_cpu() */
+		for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
+			emu_nid_to_phys[j] = j;
+
 		nodes_clear(node_possible_map);
 		nodes_clear(node_online_map);
 #endif
@@ -1030,7 +1048,6 @@ int __cpuinit numa_cpu_node(int cpu)
 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
 void __cpuinit numa_add_cpu(int cpu)
 {
-	unsigned long addr;
 	int physnid, nid;
 
 	nid = numa_cpu_node(cpu);
@@ -1038,26 +1055,15 @@ void __cpuinit numa_add_cpu(int cpu)
 		nid = early_cpu_to_node(cpu);
 	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
 
-	/*
-	 * Use the starting address of the emulated node to find which physical
-	 * node it is allocated on.
-	 */
-	addr = node_start_pfn(nid) << PAGE_SHIFT;
-	for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
-		if (addr >= physnodes[physnid].start &&
-		    addr < physnodes[physnid].end)
-			break;
+	physnid = emu_nid_to_phys[nid];
 
 	/*
 	 * Map the cpu to each emulated node that is allocated on the physical
 	 * node of the cpu's apic id.
 	 */
-	for_each_online_node(nid) {
-		addr = node_start_pfn(nid) << PAGE_SHIFT;
-		if (addr >= physnodes[physnid].start &&
-		    addr < physnodes[physnid].end)
+	for_each_online_node(nid)
+		if (emu_nid_to_phys[nid] == physnid)
 			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
-	}
 }
 
 void __cpuinit numa_remove_cpu(int cpu)
@@ -1070,16 +1076,18 @@ void __cpuinit numa_remove_cpu(int cpu)
 # else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
 static void __cpuinit numa_set_cpumask(int cpu, int enable)
 {
-	int node = early_cpu_to_node(cpu);
 	struct cpumask *mask;
-	int i;
+	int nid, i;
 
-	for_each_online_node(i) {
-		unsigned long addr;
+	nid = numa_cpu_node(cpu);
+	if (nid == NUMA_NO_NODE)
+		nid = early_cpu_to_node(cpu);
+	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
 
-		addr = node_start_pfn(i) << PAGE_SHIFT;
-		if (addr < physnodes[node].start ||
-					addr >= physnodes[node].end)
+	physnid = emu_nid_to_phys[nid];
+
+	for_each_online_node(i) {
+		if (emu_nid_to_phys[nid] != physnid)
 			continue;
 		mask = debug_cpumask_set_cpu(cpu, enable);
 		if (!mask)
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 3/7] x86-64, NUMA: Make emulation code build numa_meminfo and share the registration path
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
  2011-02-14 19:28 ` [PATCH 1/7] x86-64, NUMA: Trivial changes to prepare for emulation updates Tejun Heo
  2011-02-14 19:28 ` [PATCH 2/7] x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-14 19:28 ` [PATCH 4/7] x86-64, NUMA: Wrap node ID during emulation Tejun Heo
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

NUMA emulation code built nodes[] array and had its own registration
path to set up the emulated nodes.  Update it such that it generates
emulated numa_meminfo and returns control to initmem_init() and shares
the same registration path with non-emulated cases.

Because {acpi|amd}_fake_nodes() expect nodes[] parameter,
fake_physnodes() now generates nodes[] from numa_meminfo.  This will
go away with further updates.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/mm/numa_64.c |  172 ++++++++++++++++++++++++------------------------
 1 files changed, 86 insertions(+), 86 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 253a5c3..093530f 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -539,7 +539,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
 
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
-static struct bootnode nodes[MAX_NUMNODES] __initdata;
 static struct bootnode physnodes[MAX_NUMNODES] __initdata;
 
 static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
@@ -624,9 +623,24 @@ static int __init setup_physnodes(unsigned long start, unsigned long end)
 	return ret;
 }
 
-static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
+static void __init fake_physnodes(int acpi, int amd,
+				  const struct numa_meminfo *ei)
 {
-	int i;
+	static struct bootnode nodes[MAX_NUMNODES] __initdata;
+	int i, nr_nodes = 0;
+
+	for (i = 0; i < ei->nr_blks; i++) {
+		int nid = ei->blk[i].nid;
+
+		if (nodes[nid].start == nodes[nid].end) {
+			nodes[nid].start = ei->blk[i].start;
+			nodes[nid].end = ei->blk[i].end;
+			nr_nodes++;
+		} else {
+			nodes[nid].start = min(ei->blk[i].start, nodes[nid].start);
+			nodes[nid].end = max(ei->blk[i].end, nodes[nid].end);
+		}
+	}
 
 	BUG_ON(acpi && amd);
 #ifdef CONFIG_ACPI_NUMA
@@ -643,45 +657,44 @@ static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
 }
 
 /*
- * Setups up nid to range from addr to addr + size.  If the end
- * boundary is greater than max_addr, then max_addr is used instead.
- * The return value is 0 if there is additional memory left for
- * allocation past addr and -1 otherwise.  addr is adjusted to be at
- * the end of the node.
+ * Sets up nid to range from @start to @end.  The return value is -errno if
+ * something went wrong, 0 otherwise.
  */
-static int __init setup_node_range(int nid, int physnid,
-				   u64 *addr, u64 size, u64 max_addr)
+static int __init emu_setup_memblk(struct numa_meminfo *ei,
+				   int nid, int physnid, u64 start, u64 end)
 {
-	int ret = 0;
-	nodes[nid].start = *addr;
-	*addr += size;
-	if (*addr >= max_addr) {
-		*addr = max_addr;
-		ret = -1;
+	struct numa_memblk *eb = &ei->blk[ei->nr_blks];
+
+	if (ei->nr_blks >= NR_NODE_MEMBLKS) {
+		pr_err("NUMA: Too many emulated memblks, failing emulation\n");
+		return -EINVAL;
 	}
-	nodes[nid].end = *addr;
-	node_set(nid, node_possible_map);
+
+	ei->nr_blks++;
+	eb->start = start;
+	eb->end = end;
+	eb->nid = nid;
 
 	if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
 		emu_nid_to_phys[nid] = physnid;
 
 	printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
-	       nodes[nid].start, nodes[nid].end,
-	       (nodes[nid].end - nodes[nid].start) >> 20);
-	return ret;
+	       eb->start, eb->end, (eb->end - eb->start) >> 20);
+	return 0;
 }
 
 /*
  * Sets up nr_nodes fake nodes interleaved over physical nodes ranging from addr
  * to max_addr.  The return value is the number of nodes allocated.
  */
-static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
+static int __init split_nodes_interleave(struct numa_meminfo *ei,
+					 u64 addr, u64 max_addr, int nr_nodes)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
 	u64 size;
 	int big;
-	int ret = 0;
-	int i;
+	int nid = 0;
+	int i, ret;
 
 	if (nr_nodes <= 0)
 		return -1;
@@ -719,7 +732,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 			u64 end = physnodes[i].start + size;
 			u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
 
-			if (ret < big)
+			if (nid < big)
 				end += FAKE_NODE_MIN_SIZE;
 
 			/*
@@ -758,16 +771,21 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 			 * happen as a result of rounding down each node's size
 			 * to FAKE_NODE_MIN_SIZE.
 			 */
-			if (nodes_weight(physnode_mask) + ret >= nr_nodes)
+			if (nodes_weight(physnode_mask) + nid >= nr_nodes)
 				end = physnodes[i].end;
 
-			if (setup_node_range(ret++, i, &physnodes[i].start,
-						end - physnodes[i].start,
-						physnodes[i].end) < 0)
+			ret = emu_setup_memblk(ei, nid++, i,
+					       physnodes[i].start,
+					       min(end, physnodes[i].end));
+			if (ret < 0)
+				return ret;
+
+			physnodes[i].start = min(end, physnodes[i].end);
+			if (physnodes[i].start == physnodes[i].end)
 				node_clear(i, physnode_mask);
 		}
 	}
-	return ret;
+	return 0;
 }
 
 /*
@@ -792,12 +810,13 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
  * Sets up fake nodes of `size' interleaved over physical nodes ranging from
  * `addr' to `max_addr'.  The return value is the number of nodes allocated.
  */
-static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
+static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+					      u64 addr, u64 max_addr, u64 size)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
 	u64 min_size;
-	int ret = 0;
-	int i;
+	int nid = 0;
+	int i, ret;
 
 	if (!size)
 		return -1;
@@ -852,30 +871,31 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
 			    memblock_x86_hole_size(end, physnodes[i].end) < size)
 				end = physnodes[i].end;
 
-			/*
-			 * Setup the fake node that will be allocated as bootmem
-			 * later.  If setup_node_range() returns non-zero, there
-			 * is no more memory available on this physical node.
-			 */
-			if (setup_node_range(ret++, i, &physnodes[i].start,
-						end - physnodes[i].start,
-						physnodes[i].end) < 0)
+			ret = emu_setup_memblk(ei, nid++, i,
+					       physnodes[i].start,
+					       min(end, physnodes[i].end));
+			if (ret < 0)
+				return ret;
+
+			physnodes[i].start = min(end, physnodes[i].end);
+			if (physnodes[i].start == physnodes[i].end)
 				node_clear(i, physnode_mask);
 		}
 	}
-	return ret;
+	return 0;
 }
 
 /*
  * Sets up the system RAM area from start_pfn to last_pfn according to the
  * numa=fake command-line option.
  */
-static int __init numa_emulation(int acpi, int amd)
+static bool __init numa_emulation(int acpi, int amd)
 {
 	static struct numa_meminfo ei __initdata;
 	const u64 max_addr = max_pfn << PAGE_SHIFT;
-	int num_nodes;
-	int i;
+	int i, ret;
+
+	memset(&ei, 0, sizeof(ei));
 
 	for (i = 0; i < MAX_NUMNODES; i++)
 		emu_nid_to_phys[i] = NUMA_NO_NODE;
@@ -889,51 +909,33 @@ static int __init numa_emulation(int acpi, int amd)
 		u64 size;
 
 		size = memparse(emu_cmdline, &emu_cmdline);
-		num_nodes = split_nodes_size_interleave(0, max_addr, size);
+		ret = split_nodes_size_interleave(&ei, 0, max_addr, size);
 	} else {
 		unsigned long n;
 
 		n = simple_strtoul(emu_cmdline, NULL, 0);
-		num_nodes = split_nodes_interleave(0, max_addr, n);
+		ret = split_nodes_interleave(&ei, 0, max_addr, n);
+	}
+
+	if (ret < 0)
+		return false;
+
+	if (numa_cleanup_meminfo(&ei) < 0) {
+		pr_warning("NUMA: Warning: constructed meminfo invalid, disabling emulation\n");
+		return false;
 	}
 
-	if (num_nodes < 0)
-		return num_nodes;
+	/* commit */
+	numa_meminfo = ei;
 
 	/* make sure all emulated nodes are mapped to a physical node */
 	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
 		if (emu_nid_to_phys[i] == NUMA_NO_NODE)
 			emu_nid_to_phys[i] = 0;
 
-	ei.nr_blks = num_nodes;
-	for (i = 0; i < ei.nr_blks; i++) {
-		ei.blk[i].start = nodes[i].start;
-		ei.blk[i].end = nodes[i].end;
-		ei.blk[i].nid = i;
-	}
-
-	memnode_shift = compute_hash_shift(&ei);
-	if (memnode_shift < 0) {
-		memnode_shift = 0;
-		printk(KERN_ERR "No NUMA hash function found.  NUMA emulation "
-		       "disabled.\n");
-		return -1;
-	}
-
-	/*
-	 * We need to vacate all active ranges that may have been registered for
-	 * the e820 memory map.
-	 */
-	remove_all_active_ranges();
-	for_each_node_mask(i, node_possible_map) {
-		memblock_x86_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
-						nodes[i].end >> PAGE_SHIFT);
-		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
-	}
-	fake_physnodes(acpi, amd, num_nodes);
-	numa_init_array();
+	fake_physnodes(acpi, amd, &ei);
 	numa_emu_dist = true;
-	return 0;
+	return true;
 }
 #endif /* CONFIG_NUMA_EMU */
 
@@ -985,15 +987,13 @@ void __init initmem_init(void)
 			continue;
 #ifdef CONFIG_NUMA_EMU
 		setup_physnodes(0, max_pfn << PAGE_SHIFT);
-		if (emu_cmdline && !numa_emulation(i == 0, i == 1))
-			return;
-
-		/* not emulating, build identity mapping for numa_add_cpu() */
-		for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
-			emu_nid_to_phys[j] = j;
-
-		nodes_clear(node_possible_map);
-		nodes_clear(node_online_map);
+		/*
+		 * If requested, try emulation.  If emulation is not used,
+		 * build identity emu_nid_to_phys[] for numa_add_cpu()
+		 */
+		if (!emu_cmdline || !numa_emulation(i == 0, i == 1))
+			for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
+				emu_nid_to_phys[j] = j;
 #endif
 		if (numa_register_memblks(&numa_meminfo) < 0)
 			continue;
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 4/7] x86-64, NUMA: Wrap node ID during emulation
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
                   ` (2 preceding siblings ...)
  2011-02-14 19:28 ` [PATCH 3/7] x86-64, NUMA: Make emulation code build numa_meminfo and share the registration path Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-14 19:28 ` [PATCH 5/7] x86-64, NUMA: Emulate directly from numa_meminfo Tejun Heo
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

Both emulation layout functions - split_nodes[_size]_interleave() -
didn't wrap emulated nid while laying out the fake nodes and tried to
avoid interating over the specified number of nodes, which is fragile.

Now that the emulation code generates numa_meminfo, the node memblks
don't need to be consecutive and emulated node IDs can simply wrap.
This makes the code more robust and is necessary for updates to better
handle the cases where the physical nodes are interleaved.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/mm/numa_64.c |   12 ++----------
 1 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 093530f..f26e7cd 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -766,15 +766,7 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
 			    memblock_x86_hole_size(end, physnodes[i].end) < size)
 				end = physnodes[i].end;
 
-			/*
-			 * Avoid allocating more nodes than requested, which can
-			 * happen as a result of rounding down each node's size
-			 * to FAKE_NODE_MIN_SIZE.
-			 */
-			if (nodes_weight(physnode_mask) + nid >= nr_nodes)
-				end = physnodes[i].end;
-
-			ret = emu_setup_memblk(ei, nid++, i,
+			ret = emu_setup_memblk(ei, nid++ % nr_nodes, i,
 					       physnodes[i].start,
 					       min(end, physnodes[i].end));
 			if (ret < 0)
@@ -871,7 +863,7 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
 			    memblock_x86_hole_size(end, physnodes[i].end) < size)
 				end = physnodes[i].end;
 
-			ret = emu_setup_memblk(ei, nid++, i,
+			ret = emu_setup_memblk(ei, nid++ % MAX_NUMNODES, i,
 					       physnodes[i].start,
 					       min(end, physnodes[i].end));
 			if (ret < 0)
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 5/7] x86-64, NUMA: Emulate directly from numa_meminfo
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
                   ` (3 preceding siblings ...)
  2011-02-14 19:28 ` [PATCH 4/7] x86-64, NUMA: Wrap node ID during emulation Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-14 19:28 ` [PATCH 6/7] x86-64, NUMA: Unify emulated apicid -> node mapping transformation Tejun Heo
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

NUMA emulation built physnodes[] array which could only represent
configurations from the physical meminfo and emulated nodes using the
information.  There's no reason to take this extra level of
indirection.  Update emulation functions so that they operate directly
on numa_meminfo.  This simplifies the code and makes emulation layout
behave better with interleaved physical nodes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/mm/numa_64.c |  171 ++++++++++++++++++++----------------------------
 1 files changed, 71 insertions(+), 100 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index f26e7cd..5d10ec0 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -539,8 +539,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
 
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
-static struct bootnode physnodes[MAX_NUMNODES] __initdata;
-
 static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
 static char *emu_cmdline __initdata;
 
@@ -549,6 +547,16 @@ void __init numa_emu_cmdline(char *str)
 	emu_cmdline = str;
 }
 
+static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
+{
+	int i;
+
+	for (i = 0; i < mi->nr_blks; i++)
+		if (mi->blk[i].nid == nid)
+			return i;
+	return -ENOENT;
+}
+
 int __init find_node_by_addr(unsigned long addr)
 {
 	const struct numa_meminfo *mi = &numa_meminfo;
@@ -566,63 +574,6 @@ int __init find_node_by_addr(unsigned long addr)
 	return NUMA_NO_NODE;
 }
 
-static int __init setup_physnodes(unsigned long start, unsigned long end)
-{
-	const struct numa_meminfo *mi = &numa_meminfo;
-	int ret = 0;
-	int i;
-
-	memset(physnodes, 0, sizeof(physnodes));
-
-	for (i = 0; i < mi->nr_blks; i++) {
-		int nid = mi->blk[i].nid;
-
-		if (physnodes[nid].start == physnodes[nid].end) {
-			physnodes[nid].start = mi->blk[i].start;
-			physnodes[nid].end = mi->blk[i].end;
-		} else {
-			physnodes[nid].start = min(physnodes[nid].start,
-						   mi->blk[i].start);
-			physnodes[nid].end = max(physnodes[nid].end,
-						 mi->blk[i].end);
-		}
-	}
-
-	/*
-	 * Basic sanity checking on the physical node map: there may be errors
-	 * if the SRAT or AMD code incorrectly reported the topology or the mem=
-	 * kernel parameter is used.
-	 */
-	for (i = 0; i < MAX_NUMNODES; i++) {
-		if (physnodes[i].start == physnodes[i].end)
-			continue;
-		if (physnodes[i].start > end) {
-			physnodes[i].end = physnodes[i].start;
-			continue;
-		}
-		if (physnodes[i].end < start) {
-			physnodes[i].start = physnodes[i].end;
-			continue;
-		}
-		if (physnodes[i].start < start)
-			physnodes[i].start = start;
-		if (physnodes[i].end > end)
-			physnodes[i].end = end;
-		ret++;
-	}
-
-	/*
-	 * If no physical topology was detected, a single node is faked to cover
-	 * the entire address space.
-	 */
-	if (!ret) {
-		physnodes[ret].start = start;
-		physnodes[ret].end = end;
-		ret = 1;
-	}
-	return ret;
-}
-
 static void __init fake_physnodes(int acpi, int amd,
 				  const struct numa_meminfo *ei)
 {
@@ -661,9 +612,11 @@ static void __init fake_physnodes(int acpi, int amd,
  * something went wrong, 0 otherwise.
  */
 static int __init emu_setup_memblk(struct numa_meminfo *ei,
-				   int nid, int physnid, u64 start, u64 end)
+				   struct numa_meminfo *pi,
+				   int nid, int phys_blk, u64 size)
 {
 	struct numa_memblk *eb = &ei->blk[ei->nr_blks];
+	struct numa_memblk *pb = &pi->blk[phys_blk];
 
 	if (ei->nr_blks >= NR_NODE_MEMBLKS) {
 		pr_err("NUMA: Too many emulated memblks, failing emulation\n");
@@ -671,12 +624,18 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
 	}
 
 	ei->nr_blks++;
-	eb->start = start;
-	eb->end = end;
+	eb->start = pb->start;
+	eb->end = pb->start + size;
 	eb->nid = nid;
 
 	if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
-		emu_nid_to_phys[nid] = physnid;
+		emu_nid_to_phys[nid] = pb->nid;
+
+	pb->start += size;
+	if (pb->start >= pb->end) {
+		WARN_ON_ONCE(pb->start > pb->end);
+		numa_remove_memblk_from(phys_blk, pi);
+	}
 
 	printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
 	       eb->start, eb->end, (eb->end - eb->start) >> 20);
@@ -688,6 +647,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
  * to max_addr.  The return value is the number of nodes allocated.
  */
 static int __init split_nodes_interleave(struct numa_meminfo *ei,
+					 struct numa_meminfo *pi,
 					 u64 addr, u64 max_addr, int nr_nodes)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
@@ -719,9 +679,8 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
 		return -1;
 	}
 
-	for (i = 0; i < MAX_NUMNODES; i++)
-		if (physnodes[i].start != physnodes[i].end)
-			node_set(i, physnode_mask);
+	for (i = 0; i < pi->nr_blks; i++)
+		node_set(pi->blk[i].nid, physnode_mask);
 
 	/*
 	 * Continue to fill physical nodes with fake nodes until there is no
@@ -729,8 +688,18 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
 	 */
 	while (nodes_weight(physnode_mask)) {
 		for_each_node_mask(i, physnode_mask) {
-			u64 end = physnodes[i].start + size;
 			u64 dma32_end = PFN_PHYS(MAX_DMA32_PFN);
+			u64 start, limit, end;
+			int phys_blk;
+
+			phys_blk = emu_find_memblk_by_nid(i, pi);
+			if (phys_blk < 0) {
+				node_clear(i, physnode_mask);
+				continue;
+			}
+			start = pi->blk[phys_blk].start;
+			limit = pi->blk[phys_blk].end;
+			end = start + size;
 
 			if (nid < big)
 				end += FAKE_NODE_MIN_SIZE;
@@ -739,11 +708,11 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
 			 * Continue to add memory to this fake node if its
 			 * non-reserved memory is less than the per-node size.
 			 */
-			while (end - physnodes[i].start -
-				memblock_x86_hole_size(physnodes[i].start, end) < size) {
+			while (end - start -
+			       memblock_x86_hole_size(start, end) < size) {
 				end += FAKE_NODE_MIN_SIZE;
-				if (end > physnodes[i].end) {
-					end = physnodes[i].end;
+				if (end > limit) {
+					end = limit;
 					break;
 				}
 			}
@@ -762,19 +731,15 @@ static int __init split_nodes_interleave(struct numa_meminfo *ei,
 			 * next node, this one must extend to the end of the
 			 * physical node.
 			 */
-			if (physnodes[i].end - end -
-			    memblock_x86_hole_size(end, physnodes[i].end) < size)
-				end = physnodes[i].end;
+			if (limit - end -
+			    memblock_x86_hole_size(end, limit) < size)
+				end = limit;
 
-			ret = emu_setup_memblk(ei, nid++ % nr_nodes, i,
-					       physnodes[i].start,
-					       min(end, physnodes[i].end));
+			ret = emu_setup_memblk(ei, pi, nid++ % nr_nodes,
+					       phys_blk,
+					       min(end, limit) - start);
 			if (ret < 0)
 				return ret;
-
-			physnodes[i].start = min(end, physnodes[i].end);
-			if (physnodes[i].start == physnodes[i].end)
-				node_clear(i, physnode_mask);
 		}
 	}
 	return 0;
@@ -803,6 +768,7 @@ static u64 __init find_end_of_node(u64 start, u64 max_addr, u64 size)
  * `addr' to `max_addr'.  The return value is the number of nodes allocated.
  */
 static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
+					      struct numa_meminfo *pi,
 					      u64 addr, u64 max_addr, u64 size)
 {
 	nodemask_t physnode_mask = NODE_MASK_NONE;
@@ -831,9 +797,9 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
 	}
 	size &= FAKE_NODE_MIN_HASH_MASK;
 
-	for (i = 0; i < MAX_NUMNODES; i++)
-		if (physnodes[i].start != physnodes[i].end)
-			node_set(i, physnode_mask);
+	for (i = 0; i < pi->nr_blks; i++)
+		node_set(pi->blk[i].nid, physnode_mask);
+
 	/*
 	 * Fill physical nodes with fake nodes of size until there is no memory
 	 * left on any of them.
@@ -841,10 +807,18 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
 	while (nodes_weight(physnode_mask)) {
 		for_each_node_mask(i, physnode_mask) {
 			u64 dma32_end = MAX_DMA32_PFN << PAGE_SHIFT;
-			u64 end;
+			u64 start, limit, end;
+			int phys_blk;
 
-			end = find_end_of_node(physnodes[i].start,
-						physnodes[i].end, size);
+			phys_blk = emu_find_memblk_by_nid(i, pi);
+			if (phys_blk < 0) {
+				node_clear(i, physnode_mask);
+				continue;
+			}
+			start = pi->blk[phys_blk].start;
+			limit = pi->blk[phys_blk].end;
+
+			end = find_end_of_node(start, limit, size);
 			/*
 			 * If there won't be at least FAKE_NODE_MIN_SIZE of
 			 * non-reserved memory in ZONE_DMA32 for the next node,
@@ -859,19 +833,15 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
 			 * next node, this one must extend to the end of the
 			 * physical node.
 			 */
-			if (physnodes[i].end - end -
-			    memblock_x86_hole_size(end, physnodes[i].end) < size)
-				end = physnodes[i].end;
+			if (limit - end -
+			    memblock_x86_hole_size(end, limit) < size)
+				end = limit;
 
-			ret = emu_setup_memblk(ei, nid++ % MAX_NUMNODES, i,
-					       physnodes[i].start,
-					       min(end, physnodes[i].end));
+			ret = emu_setup_memblk(ei, pi, nid++ % MAX_NUMNODES,
+					       phys_blk,
+					       min(end, limit) - start);
 			if (ret < 0)
 				return ret;
-
-			physnodes[i].start = min(end, physnodes[i].end);
-			if (physnodes[i].start == physnodes[i].end)
-				node_clear(i, physnode_mask);
 		}
 	}
 	return 0;
@@ -884,10 +854,12 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
 static bool __init numa_emulation(int acpi, int amd)
 {
 	static struct numa_meminfo ei __initdata;
+	static struct numa_meminfo pi __initdata;
 	const u64 max_addr = max_pfn << PAGE_SHIFT;
 	int i, ret;
 
 	memset(&ei, 0, sizeof(ei));
+	pi = numa_meminfo;
 
 	for (i = 0; i < MAX_NUMNODES; i++)
 		emu_nid_to_phys[i] = NUMA_NO_NODE;
@@ -901,12 +873,12 @@ static bool __init numa_emulation(int acpi, int amd)
 		u64 size;
 
 		size = memparse(emu_cmdline, &emu_cmdline);
-		ret = split_nodes_size_interleave(&ei, 0, max_addr, size);
+		ret = split_nodes_size_interleave(&ei, &pi, 0, max_addr, size);
 	} else {
 		unsigned long n;
 
 		n = simple_strtoul(emu_cmdline, NULL, 0);
-		ret = split_nodes_interleave(&ei, 0, max_addr, n);
+		ret = split_nodes_interleave(&ei, &pi, 0, max_addr, n);
 	}
 
 	if (ret < 0)
@@ -978,7 +950,6 @@ void __init initmem_init(void)
 		if (numa_cleanup_meminfo(&numa_meminfo) < 0)
 			continue;
 #ifdef CONFIG_NUMA_EMU
-		setup_physnodes(0, max_pfn << PAGE_SHIFT);
 		/*
 		 * If requested, try emulation.  If emulation is not used,
 		 * build identity emu_nid_to_phys[] for numa_add_cpu()
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 6/7] x86-64, NUMA: Unify emulated apicid -> node mapping transformation
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
                   ` (4 preceding siblings ...)
  2011-02-14 19:28 ` [PATCH 5/7] x86-64, NUMA: Emulate directly from numa_meminfo Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-14 19:28 ` [PATCH 7/7] x86-64, NUMA: Unify emulated distance mapping Tejun Heo
  2011-02-14 20:00 ` [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Yinghai Lu
  7 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

NUMA emulation changes node mappings and thus apicid -> node mapping
needs to be updated accordingly.  srat_64 and amdtopology_64 did this
separately; however, all the necessary information is the mapping from
emulated nodes to physical nodes which is available in
emu_nid_to_phys[].

Implement common __apicid_to_node[] transformation in numa_emulation()
and drop duplicate implementations.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/mm/amdtopology_64.c |    9 ---------
 arch/x86/mm/numa_64.c        |   16 +++++++++++++++-
 arch/x86/mm/srat_64.c        |   24 +-----------------------
 3 files changed, 16 insertions(+), 33 deletions(-)

diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index fd7b609..f37ea2f 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -196,10 +196,6 @@ int __init amd_numa_init(void)
 }
 
 #ifdef CONFIG_NUMA_EMU
-static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
-	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
-
 /*
  * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
  * setup to represent the physical topology but reflect the emulated
@@ -224,20 +220,15 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
 	for (i = 0; i < nr_nodes; i++) {
 		int index;
 		int nid;
-		int j;
 
 		nid = find_node_by_addr(nodes[i].start);
 		if (nid == NUMA_NO_NODE)
 			continue;
 
 		index = nodeids[nid] << bits;
-		if (fake_apicid_to_node[index + apicid_base] == NUMA_NO_NODE)
-			for (j = apicid_base; j < cores + apicid_base; j++)
-				fake_apicid_to_node[index + j] = i;
 #ifdef CONFIG_ACPI_NUMA
 		__acpi_map_pxm_to_node(nid, i);
 #endif
 	}
-	memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
 }
 #endif /* CONFIG_NUMA_EMU */
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 5d10ec0..bbf5565 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -856,7 +856,7 @@ static bool __init numa_emulation(int acpi, int amd)
 	static struct numa_meminfo ei __initdata;
 	static struct numa_meminfo pi __initdata;
 	const u64 max_addr = max_pfn << PAGE_SHIFT;
-	int i, ret;
+	int i, j, ret;
 
 	memset(&ei, 0, sizeof(ei));
 	pi = numa_meminfo;
@@ -892,6 +892,20 @@ static bool __init numa_emulation(int acpi, int amd)
 	/* commit */
 	numa_meminfo = ei;
 
+	/*
+	 * Transform __apicid_to_node table to use emulated nids by
+	 * reverse-mapping phys_nid.  The maps should always exist but fall
+	 * back to zero just in case.
+	 */
+	for (i = 0; i < ARRAY_SIZE(__apicid_to_node); i++) {
+		if (__apicid_to_node[i] == NUMA_NO_NODE)
+			continue;
+		for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
+			if (__apicid_to_node[i] == emu_nid_to_phys[j])
+				break;
+		__apicid_to_node[i] = j < ARRAY_SIZE(emu_nid_to_phys) ? j : 0;
+	}
+
 	/* make sure all emulated nodes are mapped to a physical node */
 	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
 		if (emu_nid_to_phys[i] == NUMA_NO_NODE)
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index d2f53f3..d4fbfea 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -265,9 +265,6 @@ int __init x86_acpi_numa_init(void)
 static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
 	[0 ... MAX_NUMNODES-1] = PXM_INVAL
 };
-static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
-	[0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
-};
 
 /*
  * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
@@ -279,7 +276,7 @@ static s16 fake_apicid_to_node[MAX_LOCAL_APIC] __initdata = {
  */
 void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
 {
-	int i, j;
+	int i;
 
 	for (i = 0; i < num_nodes; i++) {
 		int nid, pxm;
@@ -291,29 +288,10 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
 		if (pxm == PXM_INVAL)
 			continue;
 		fake_node_to_pxm_map[i] = pxm;
-		/*
-		 * For each apicid_to_node mapping that exists for this real
-		 * node, it must now point to the fake node ID.
-		 */
-		for (j = 0; j < MAX_LOCAL_APIC; j++)
-			if (__apicid_to_node[j] == nid &&
-			    fake_apicid_to_node[j] == NUMA_NO_NODE)
-				fake_apicid_to_node[j] = i;
 	}
 
-	/*
-	 * If there are apicid-to-node mappings for physical nodes that do not
-	 * have a corresponding emulated node, it should default to a guaranteed
-	 * value.
-	 */
-	for (i = 0; i < MAX_LOCAL_APIC; i++)
-		if (__apicid_to_node[i] != NUMA_NO_NODE &&
-		    fake_apicid_to_node[i] == NUMA_NO_NODE)
-			fake_apicid_to_node[i] = 0;
-
 	for (i = 0; i < num_nodes; i++)
 		__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
-	memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
 
 	for (i = 0; i < num_nodes; i++)
 		if (fake_nodes[i].start != fake_nodes[i].end)
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 7/7] x86-64, NUMA: Unify emulated distance mapping
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
                   ` (5 preceding siblings ...)
  2011-02-14 19:28 ` [PATCH 6/7] x86-64, NUMA: Unify emulated apicid -> node mapping transformation Tejun Heo
@ 2011-02-14 19:28 ` Tejun Heo
  2011-02-14 20:00 ` [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Yinghai Lu
  7 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-14 19:28 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita
  Cc: Tejun Heo

NUMA emulation needs to update node distance information.  It did it
by remapping apicid to PXM mapping, even when amdtopology is being
used.  There is no reason to go through such convolution.  The generic
code has all the information necessary to transform the distance table
to the emulated nid space.

Implement generic distance table transformation in numa_emulation()
and drop private implementations in srat_64 and amdtopology_64.  This
makes find_node_by_addr() and fake_physnodes() and related functions
unnecessary, drop them.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
 arch/x86/include/asm/acpi.h    |    6 --
 arch/x86/include/asm/amd_nb.h  |    4 --
 arch/x86/include/asm/numa_64.h |    1 -
 arch/x86/mm/amdtopology_64.c   |   38 ---------------
 arch/x86/mm/numa_64.c          |  102 ++++++++++++++++------------------------
 arch/x86/mm/srat_64.c          |   65 -------------------------
 6 files changed, 40 insertions(+), 176 deletions(-)

diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 9c9fe1b..a37da6d 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -186,12 +186,6 @@ struct bootnode;
 #ifdef CONFIG_ACPI_NUMA
 extern int acpi_numa;
 extern int x86_acpi_numa_init(void);
-
-#ifdef CONFIG_NUMA_EMU
-extern void acpi_fake_nodes(const struct bootnode *fake_nodes,
-				   int num_nodes);
-extern int acpi_emu_node_distance(int a, int b);
-#endif
 #endif /* CONFIG_ACPI_NUMA */
 
 #define acpi_unlazy_tlb(x)	leave_mm(x)
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 627aff3..6982743 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -18,10 +18,6 @@ extern int amd_cache_northbridges(void);
 extern void amd_flush_garts(void);
 extern int amd_numa_init(void);
 
-#ifdef CONFIG_NUMA_EMU
-extern void amd_fake_nodes(const struct bootnode *nodes, int nr_nodes);
-#endif
-
 struct amd_northbridge {
 	struct pci_dev *misc;
 };
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 972af9d..fc71c68 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -35,7 +35,6 @@ extern void __init numa_set_distance(int from, int to, int distance);
 #define FAKE_NODE_MIN_SIZE	((u64)32 << 20)
 #define FAKE_NODE_MIN_HASH_MASK	(~(FAKE_NODE_MIN_SIZE - 1UL))
 void numa_emu_cmdline(char *);
-int __init find_node_by_addr(unsigned long addr);
 #endif /* CONFIG_NUMA_EMU */
 #else
 static inline int numa_cpu_node(int cpu)		{ return NUMA_NO_NODE; }
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index f37ea2f..0919c26 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -194,41 +194,3 @@ int __init amd_numa_init(void)
 
 	return 0;
 }
-
-#ifdef CONFIG_NUMA_EMU
-/*
- * For NUMA emulation, fake proximity domain (_PXM) to node id mappings must be
- * setup to represent the physical topology but reflect the emulated
- * environment.  For each emulated node, the real node which it appears on is
- * found and a fake pxm to nid mapping is created which mirrors the actual
- * locality.  node_distance() then represents the correct distances between
- * emulated nodes by using the fake acpi mappings to pxms.
- */
-void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
-{
-	unsigned int bits;
-	unsigned int cores;
-	unsigned int apicid_base = 0;
-	int i;
-
-	bits = boot_cpu_data.x86_coreid_bits;
-	cores = 1 << bits;
-	early_get_boot_cpu_id();
-	if (boot_cpu_physical_apicid > 0)
-		apicid_base = boot_cpu_physical_apicid;
-
-	for (i = 0; i < nr_nodes; i++) {
-		int index;
-		int nid;
-
-		nid = find_node_by_addr(nodes[i].start);
-		if (nid == NUMA_NO_NODE)
-			continue;
-
-		index = nodeids[nid] << bits;
-#ifdef CONFIG_ACPI_NUMA
-		__acpi_map_pxm_to_node(nid, i);
-#endif
-	}
-}
-#endif /* CONFIG_NUMA_EMU */
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index bbf5565..a27bea7 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -48,10 +48,6 @@ static struct numa_meminfo numa_meminfo __initdata;
 static int numa_distance_cnt;
 static u8 *numa_distance;
 
-#ifdef CONFIG_NUMA_EMU
-static bool numa_emu_dist;
-#endif
-
 /*
  * Given a shift value, try to populate memnodemap[]
  * Returns :
@@ -443,10 +439,6 @@ void __init numa_set_distance(int from, int to, int distance)
 
 int __node_distance(int from, int to)
 {
-#if defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA_EMU)
-	if (numa_emu_dist)
-		return acpi_emu_node_distance(from, to);
-#endif
 	if (from >= numa_distance_cnt || to >= numa_distance_cnt)
 		return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
 	return numa_distance[from * numa_distance_cnt + to];
@@ -557,56 +549,6 @@ static int __init emu_find_memblk_by_nid(int nid, const struct numa_meminfo *mi)
 	return -ENOENT;
 }
 
-int __init find_node_by_addr(unsigned long addr)
-{
-	const struct numa_meminfo *mi = &numa_meminfo;
-	int i;
-
-	for (i = 0; i < mi->nr_blks; i++) {
-		/*
-		 * Find the real node that this emulated node appears on.  For
-		 * the sake of simplicity, we only use a real node's starting
-		 * address to determine which emulated node it appears on.
-		 */
-		if (addr >= mi->blk[i].start && addr < mi->blk[i].end)
-			return mi->blk[i].nid;
-	}
-	return NUMA_NO_NODE;
-}
-
-static void __init fake_physnodes(int acpi, int amd,
-				  const struct numa_meminfo *ei)
-{
-	static struct bootnode nodes[MAX_NUMNODES] __initdata;
-	int i, nr_nodes = 0;
-
-	for (i = 0; i < ei->nr_blks; i++) {
-		int nid = ei->blk[i].nid;
-
-		if (nodes[nid].start == nodes[nid].end) {
-			nodes[nid].start = ei->blk[i].start;
-			nodes[nid].end = ei->blk[i].end;
-			nr_nodes++;
-		} else {
-			nodes[nid].start = min(ei->blk[i].start, nodes[nid].start);
-			nodes[nid].end = max(ei->blk[i].end, nodes[nid].end);
-		}
-	}
-
-	BUG_ON(acpi && amd);
-#ifdef CONFIG_ACPI_NUMA
-	if (acpi)
-		acpi_fake_nodes(nodes, nr_nodes);
-#endif
-#ifdef CONFIG_AMD_NUMA
-	if (amd)
-		amd_fake_nodes(nodes, nr_nodes);
-#endif
-	if (!acpi && !amd)
-		for (i = 0; i < nr_cpu_ids; i++)
-			numa_set_node(i, 0);
-}
-
 /*
  * Sets up nid to range from @start to @end.  The return value is -errno if
  * something went wrong, 0 otherwise.
@@ -851,11 +793,13 @@ static int __init split_nodes_size_interleave(struct numa_meminfo *ei,
  * Sets up the system RAM area from start_pfn to last_pfn according to the
  * numa=fake command-line option.
  */
-static bool __init numa_emulation(int acpi, int amd)
+static bool __init numa_emulation(void)
 {
 	static struct numa_meminfo ei __initdata;
 	static struct numa_meminfo pi __initdata;
 	const u64 max_addr = max_pfn << PAGE_SHIFT;
+	int phys_dist_cnt = numa_distance_cnt;
+	u8 *phys_dist = NULL;
 	int i, j, ret;
 
 	memset(&ei, 0, sizeof(ei));
@@ -889,6 +833,25 @@ static bool __init numa_emulation(int acpi, int amd)
 		return false;
 	}
 
+	/*
+	 * Copy the original distance table.  It's temporary so no need to
+	 * reserve it.
+	 */
+	if (phys_dist_cnt) {
+		size_t size = phys_dist_cnt * sizeof(numa_distance[0]);
+		u64 phys;
+
+		phys = memblock_find_in_range(0,
+					      (u64)max_pfn_mapped << PAGE_SHIFT,
+					      size, PAGE_SIZE);
+		if (phys == MEMBLOCK_ERROR) {
+			pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n");
+			return false;
+		}
+		phys_dist = __va(phys);
+		memcpy(phys_dist, numa_distance, size);
+	}
+
 	/* commit */
 	numa_meminfo = ei;
 
@@ -911,8 +874,23 @@ static bool __init numa_emulation(int acpi, int amd)
 		if (emu_nid_to_phys[i] == NUMA_NO_NODE)
 			emu_nid_to_phys[i] = 0;
 
-	fake_physnodes(acpi, amd, &ei);
-	numa_emu_dist = true;
+	/* transform distance table */
+	numa_reset_distance();
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		for (j = 0; j < MAX_NUMNODES; j++) {
+			int physi = emu_nid_to_phys[i];
+			int physj = emu_nid_to_phys[j];
+			int dist;
+
+			if (physi >= phys_dist_cnt || physj >= phys_dist_cnt)
+				dist = physi == physj ?
+					LOCAL_DISTANCE : REMOTE_DISTANCE;
+			else
+				dist = phys_dist[physi * phys_dist_cnt + physj];
+
+			numa_set_distance(i, j, dist);
+		}
+	}
 	return true;
 }
 #endif /* CONFIG_NUMA_EMU */
@@ -968,7 +946,7 @@ void __init initmem_init(void)
 		 * If requested, try emulation.  If emulation is not used,
 		 * build identity emu_nid_to_phys[] for numa_add_cpu()
 		 */
-		if (!emu_cmdline || !numa_emulation(i == 0, i == 1))
+		if (!emu_cmdline || !numa_emulation())
 			for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
 				emu_nid_to_phys[j] = j;
 #endif
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index d4fbfea..8e9d339 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -26,8 +26,6 @@
 
 int acpi_numa __initdata;
 
-static struct acpi_table_slit *acpi_slit;
-
 static struct bootnode nodes_add[MAX_NUMNODES];
 
 static __init int setup_node(int pxm)
@@ -51,25 +49,11 @@ static __init inline int srat_disabled(void)
 void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
 {
 	int i, j;
-	unsigned length;
-	unsigned long phys;
 
 	for (i = 0; i < slit->locality_count; i++)
 		for (j = 0; j < slit->locality_count; j++)
 			numa_set_distance(pxm_to_node(i), pxm_to_node(j),
 				slit->entry[slit->locality_count * i + j]);
-
-	/* acpi_slit is used only by emulation */
-	length = slit->header.length;
-	phys = memblock_find_in_range(0, max_pfn_mapped<<PAGE_SHIFT, length,
-		 PAGE_SIZE);
-
-	if (phys == MEMBLOCK_ERROR)
-		panic(" Can not save slit!\n");
-
-	acpi_slit = __va(phys);
-	memcpy(acpi_slit, slit, length);
-	memblock_x86_reserve_range(phys, phys + length, "ACPI SLIT");
 }
 
 /* Callback for Proximity Domain -> x2APIC mapping */
@@ -261,55 +245,6 @@ int __init x86_acpi_numa_init(void)
 	return srat_disabled() ? -EINVAL : 0;
 }
 
-#ifdef CONFIG_NUMA_EMU
-static int fake_node_to_pxm_map[MAX_NUMNODES] __initdata = {
-	[0 ... MAX_NUMNODES-1] = PXM_INVAL
-};
-
-/*
- * In NUMA emulation, we need to setup proximity domain (_PXM) to node ID
- * mappings that respect the real ACPI topology but reflect our emulated
- * environment.  For each emulated node, we find which real node it appears on
- * and create PXM to NID mappings for those fake nodes which mirror that
- * locality.  SLIT will now represent the correct distances between emulated
- * nodes as a result of the real topology.
- */
-void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
-{
-	int i;
-
-	for (i = 0; i < num_nodes; i++) {
-		int nid, pxm;
-
-		nid = find_node_by_addr(fake_nodes[i].start);
-		if (nid == NUMA_NO_NODE)
-			continue;
-		pxm = node_to_pxm(nid);
-		if (pxm == PXM_INVAL)
-			continue;
-		fake_node_to_pxm_map[i] = pxm;
-	}
-
-	for (i = 0; i < num_nodes; i++)
-		__acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
-
-	for (i = 0; i < num_nodes; i++)
-		if (fake_nodes[i].start != fake_nodes[i].end)
-			node_set(i, numa_nodes_parsed);
-}
-
-int acpi_emu_node_distance(int a, int b)
-{
-	int index;
-
-	if (!acpi_slit)
-		return node_to_pxm(a) == node_to_pxm(b) ?
-			LOCAL_DISTANCE : REMOTE_DISTANCE;
-	index = acpi_slit->locality_count * node_to_pxm(a);
-	return acpi_slit->entry[index + node_to_pxm(b)];
-}
-#endif /* CONFIG_NUMA_EMU */
-
 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || defined(CONFIG_ACPI_HOTPLUG_MEMORY)
 int memory_add_physaddr_to_nid(u64 start)
 {
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation
  2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
                   ` (6 preceding siblings ...)
  2011-02-14 19:28 ` [PATCH 7/7] x86-64, NUMA: Unify emulated distance mapping Tejun Heo
@ 2011-02-14 20:00 ` Yinghai Lu
  2011-02-15  2:28   ` Ingo Molnar
  2011-02-15  9:26   ` Tejun Heo
  7 siblings, 2 replies; 13+ messages in thread
From: Yinghai Lu @ 2011-02-14 20:00 UTC (permalink / raw)
  To: Tejun Heo
  Cc: linux-kernel, x86, brgerst, gorcunov, shaohui.zheng, rientjes,
	mingo, hpa, ankita

On Mon, Feb 14, 2011 at 11:28 AM, Tejun Heo <tj@kernel.org> wrote:
> Hello,
>
> NUMA emulation is quite convoluted involving unnecessary mapping and
> reverse mapping between apicids, PXMs, nodes and memory addresses.
> This patchset tries to restore some sanity to the whole thing.
>
> Tested on an opteron NUMA machine which can do both ACPI and AMD
> configs.  All NUMA configs, emulations, !NUMA and UP work as expected.
>
> This patchset is on top of tip/x86/numa[1] +
> bring-sanity-to-NUMA-configuration patchset[2] and contains the
> following 7 patches.
>
>  0001-x86-64-NUMA-Trivial-changes-to-prepare-for-emulation.patch
>  0002-x86-64-NUMA-Build-and-use-direct-emulated-nid-phys-n.patch
>  0003-x86-64-NUMA-Make-emulation-code-build-numa_meminfo-a.patch
>  0004-x86-64-NUMA-Wrap-node-ID-during-emulation.patch
>  0005-x86-64-NUMA-Emulate-directly-from-numa_meminfo.patch
>  0006-x86-64-NUMA-Unify-emulated-apicid-node-mapping-trans.patch
>  0007-x86-64-NUMA-Unify-emulated-distance-mapping.patch
>
> The patchset is available in the following git branch.
>
>  git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc.git x86_64-numa-emu-unify
>
> Diffstat follows.
>
>  arch/x86/include/asm/acpi.h    |    6
>  arch/x86/include/asm/amd_nb.h  |    4
>  arch/x86/include/asm/numa_64.h |    1
>  arch/x86/mm/amdtopology_64.c   |   47 ----
>  arch/x86/mm/numa_64.c          |  429 ++++++++++++++++++-----------------------
>  arch/x86/mm/srat_64.c          |   87 --------
>  6 files changed, 197 insertions(+), 377 deletions(-)

wonder if numa_emu code could be put into one single file like numa_emu.c

Yinghai

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation
  2011-02-14 20:00 ` [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Yinghai Lu
@ 2011-02-15  2:28   ` Ingo Molnar
  2011-02-15  5:44     ` Yinghai Lu
  2011-02-15  9:26   ` Tejun Heo
  1 sibling, 1 reply; 13+ messages in thread
From: Ingo Molnar @ 2011-02-15  2:28 UTC (permalink / raw)
  To: Yinghai Lu
  Cc: Tejun Heo, linux-kernel, x86, brgerst, gorcunov, shaohui.zheng,
	rientjes, hpa, ankita


* Yinghai Lu <yinghai@kernel.org> wrote:

> On Mon, Feb 14, 2011 at 11:28 AM, Tejun Heo <tj@kernel.org> wrote:
> > Hello,
> >
> > NUMA emulation is quite convoluted involving unnecessary mapping and
> > reverse mapping between apicids, PXMs, nodes and memory addresses.
> > This patchset tries to restore some sanity to the whole thing.
> >
> > Tested on an opteron NUMA machine which can do both ACPI and AMD
> > configs.  All NUMA configs, emulations, !NUMA and UP work as expected.
> >
> > This patchset is on top of tip/x86/numa[1] +
> > bring-sanity-to-NUMA-configuration patchset[2] and contains the
> > following 7 patches.
> >
> >  0001-x86-64-NUMA-Trivial-changes-to-prepare-for-emulation.patch
> >  0002-x86-64-NUMA-Build-and-use-direct-emulated-nid-phys-n.patch
> >  0003-x86-64-NUMA-Make-emulation-code-build-numa_meminfo-a.patch
> >  0004-x86-64-NUMA-Wrap-node-ID-during-emulation.patch
> >  0005-x86-64-NUMA-Emulate-directly-from-numa_meminfo.patch
> >  0006-x86-64-NUMA-Unify-emulated-apicid-node-mapping-trans.patch
> >  0007-x86-64-NUMA-Unify-emulated-distance-mapping.patch
> >
> > The patchset is available in the following git branch.
> >
> >  git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc.git x86_64-numa-emu-unify
> >
> > Diffstat follows.
> >
> >  arch/x86/include/asm/acpi.h    |    6
> >  arch/x86/include/asm/amd_nb.h  |    4
> >  arch/x86/include/asm/numa_64.h |    1
> >  arch/x86/mm/amdtopology_64.c   |   47 ----
> >  arch/x86/mm/numa_64.c          |  429 ++++++++++++++++++-----------------------
> >  arch/x86/mm/srat_64.c          |   87 --------
> >  6 files changed, 197 insertions(+), 377 deletions(-)
> 
> wonder if numa_emu code could be put into one single file like numa_emu.c

That would be nice if it can be done sanely.

We could do it as a delta, on top of these existing patches, to not delay the 
testing of these fixes/improvements, they are looking pretty good (on paper) 
already, do you agree?

Thanks,

	Ingo

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation
  2011-02-15  2:28   ` Ingo Molnar
@ 2011-02-15  5:44     ` Yinghai Lu
  0 siblings, 0 replies; 13+ messages in thread
From: Yinghai Lu @ 2011-02-15  5:44 UTC (permalink / raw)
  To: Ingo Molnar, David Rientjes
  Cc: Tejun Heo, linux-kernel, x86, brgerst, gorcunov, shaohui.zheng,
	rientjes, hpa, ankita

On 02/14/2011 06:28 PM, Ingo Molnar wrote:
> 
> * Yinghai Lu <yinghai@kernel.org> wrote:
> 
>> On Mon, Feb 14, 2011 at 11:28 AM, Tejun Heo <tj@kernel.org> wrote:
>>> Hello,
>>>
>>> NUMA emulation is quite convoluted involving unnecessary mapping and
>>> reverse mapping between apicids, PXMs, nodes and memory addresses.
>>> This patchset tries to restore some sanity to the whole thing.
>>>
>>> Tested on an opteron NUMA machine which can do both ACPI and AMD
>>> configs.  All NUMA configs, emulations, !NUMA and UP work as expected.
>>>
>>> This patchset is on top of tip/x86/numa[1] +
>>> bring-sanity-to-NUMA-configuration patchset[2] and contains the
>>> following 7 patches.
>>>
>>>  0001-x86-64-NUMA-Trivial-changes-to-prepare-for-emulation.patch
>>>  0002-x86-64-NUMA-Build-and-use-direct-emulated-nid-phys-n.patch
>>>  0003-x86-64-NUMA-Make-emulation-code-build-numa_meminfo-a.patch
>>>  0004-x86-64-NUMA-Wrap-node-ID-during-emulation.patch
>>>  0005-x86-64-NUMA-Emulate-directly-from-numa_meminfo.patch
>>>  0006-x86-64-NUMA-Unify-emulated-apicid-node-mapping-trans.patch
>>>  0007-x86-64-NUMA-Unify-emulated-distance-mapping.patch
>>>
>>> The patchset is available in the following git branch.
>>>
>>>  git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc.git x86_64-numa-emu-unify
>>>
>>> Diffstat follows.
>>>
>>>  arch/x86/include/asm/acpi.h    |    6
>>>  arch/x86/include/asm/amd_nb.h  |    4
>>>  arch/x86/include/asm/numa_64.h |    1
>>>  arch/x86/mm/amdtopology_64.c   |   47 ----
>>>  arch/x86/mm/numa_64.c          |  429 ++++++++++++++++++-----------------------
>>>  arch/x86/mm/srat_64.c          |   87 --------
>>>  6 files changed, 197 insertions(+), 377 deletions(-)
>>
>> wonder if numa_emu code could be put into one single file like numa_emu.c
> 
> That would be nice if it can be done sanely.
> 
> We could do it as a delta, on top of these existing patches, to not delay the 
> testing of these fixes/improvements, they are looking pretty good (on paper) 
> already, do you agree?

yes.

I went through TJ's numa-unify patches, and it looks good to me.
<except that I don't like putting setup_bootmem() etc in numa_register_memblks,
 but that could be addressed later>

For the numa-emu-unify, may need David to have a look on them.

Thanks

Yinghai


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation
  2011-02-14 20:00 ` [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Yinghai Lu
  2011-02-15  2:28   ` Ingo Molnar
@ 2011-02-15  9:26   ` Tejun Heo
  1 sibling, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-15  9:26 UTC (permalink / raw)
  To: Yinghai Lu
  Cc: linux-kernel, x86, brgerst, gorcunov, shaohui.zheng, rientjes,
	mingo, hpa, ankita

Hello,

On Mon, Feb 14, 2011 at 12:00:43PM -0800, Yinghai Lu wrote:
> wonder if numa_emu code could be put into one single file like
> numa_emu.c

Yeah, it isn't difficult at all.  After the series, emulation code is
isolated into <400 lines of code and the states it modifies is
immediately apparent.  We can just move all the functions inside
CONFIG_NUMA_EMU and let initmem_init() pass all the states which are
necessary for emulation to numa_emulation().  If that's desirable,
I'll send a patch to do that on top once the whole thing settles a
bit.

Thanks.

-- 
tejun

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH UPDATED 2/7] x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping
  2011-02-14 19:28 ` [PATCH 2/7] x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping Tejun Heo
@ 2011-02-15 16:36   ` Tejun Heo
  0 siblings, 0 replies; 13+ messages in thread
From: Tejun Heo @ 2011-02-15 16:36 UTC (permalink / raw)
  To: linux-kernel, x86, yinghai, brgerst, gorcunov, shaohui.zheng,
	rientjes, mingo, hpa, ankita

NUMA emulation copied physical NUMA configuration into physnodes[] and
used it to reverse-map emulated nodes to physical nodes, which is
unnecessarily convoluted.  Build emu_nid_to_phys[] array to map
emulated nids directly to the matching physical nids and use it in
numa_add_cpu().

physnodes[] will be removed with further patches.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
---
Rebased on top of the current x86/numa.

 arch/x86/mm/numa_64.c |   64 ++++++++++++++++++++++++++----------------------
 1 files changed, 35 insertions(+), 29 deletions(-)

diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 6547bfc..4c5a00b 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -540,7 +540,9 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
 #ifdef CONFIG_NUMA_EMU
 /* Numa emulation */
 static struct bootnode nodes[MAX_NUMNODES] __initdata;
-static struct bootnode physnodes[MAX_NUMNODES] __cpuinitdata;
+static struct bootnode physnodes[MAX_NUMNODES] __initdata;
+
+static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata;
 static char *emu_cmdline __initdata;
 
 void __init numa_emu_cmdline(char *str)
@@ -647,7 +649,8 @@ static void __init fake_physnodes(int acpi, int amd, int nr_nodes)
  * allocation past addr and -1 otherwise.  addr is adjusted to be at
  * the end of the node.
  */
-static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
+static int __init setup_node_range(int nid, int physnid,
+				   u64 *addr, u64 size, u64 max_addr)
 {
 	int ret = 0;
 	nodes[nid].start = *addr;
@@ -658,6 +661,10 @@ static int __init setup_node_range(int nid, u64 *addr, u64 size, u64 max_addr)
 	}
 	nodes[nid].end = *addr;
 	node_set(nid, node_possible_map);
+
+	if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
+		emu_nid_to_phys[nid] = physnid;
+
 	printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
 	       nodes[nid].start, nodes[nid].end,
 	       (nodes[nid].end - nodes[nid].start) >> 20);
@@ -754,7 +761,7 @@ static int __init split_nodes_interleave(u64 addr, u64 max_addr, int nr_nodes)
 			if (nodes_weight(physnode_mask) + ret >= nr_nodes)
 				end = physnodes[i].end;
 
-			if (setup_node_range(ret++, &physnodes[i].start,
+			if (setup_node_range(ret++, i, &physnodes[i].start,
 						end - physnodes[i].start,
 						physnodes[i].end) < 0)
 				node_clear(i, physnode_mask);
@@ -850,7 +857,7 @@ static int __init split_nodes_size_interleave(u64 addr, u64 max_addr, u64 size)
 			 * later.  If setup_node_range() returns non-zero, there
 			 * is no more memory available on this physical node.
 			 */
-			if (setup_node_range(ret++, &physnodes[i].start,
+			if (setup_node_range(ret++, i, &physnodes[i].start,
 						end - physnodes[i].start,
 						physnodes[i].end) < 0)
 				node_clear(i, physnode_mask);
@@ -870,6 +877,9 @@ static int __init numa_emulation(int acpi, int amd)
 	int num_nodes;
 	int i;
 
+	for (i = 0; i < MAX_NUMNODES; i++)
+		emu_nid_to_phys[i] = NUMA_NO_NODE;
+
 	/*
 	 * If the numa=fake command-line contains a 'M' or 'G', it represents
 	 * the fixed node size.  Otherwise, if it is just a single number N,
@@ -890,6 +900,11 @@ static int __init numa_emulation(int acpi, int amd)
 	if (num_nodes < 0)
 		return num_nodes;
 
+	/* make sure all emulated nodes are mapped to a physical node */
+	for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++)
+		if (emu_nid_to_phys[i] == NUMA_NO_NODE)
+			emu_nid_to_phys[i] = 0;
+
 	ei.nr_blks = num_nodes;
 	for (i = 0; i < ei.nr_blks; i++) {
 		ei.blk[i].start = nodes[i].start;
@@ -915,7 +930,6 @@ static int __init numa_emulation(int acpi, int amd)
 						nodes[i].end >> PAGE_SHIFT);
 		setup_node_bootmem(i, nodes[i].start, nodes[i].end);
 	}
-	setup_physnodes(0, max_addr);
 	fake_physnodes(acpi, amd, num_nodes);
 	numa_init_array();
 	numa_emu_dist = true;
@@ -973,7 +987,11 @@ void __init initmem_init(void)
 		setup_physnodes(0, max_pfn << PAGE_SHIFT);
 		if (emu_cmdline && !numa_emulation(i == 0, i == 1))
 			return;
-		setup_physnodes(0, max_pfn << PAGE_SHIFT);
+
+		/* not emulating, build identity mapping for numa_add_cpu() */
+		for (j = 0; j < ARRAY_SIZE(emu_nid_to_phys); j++)
+			emu_nid_to_phys[j] = j;
+
 		nodes_clear(node_possible_map);
 		nodes_clear(node_online_map);
 #endif
@@ -1030,7 +1048,6 @@ int __cpuinit numa_cpu_node(int cpu)
 # ifndef CONFIG_DEBUG_PER_CPU_MAPS
 void __cpuinit numa_add_cpu(int cpu)
 {
-	unsigned long addr;
 	int physnid, nid;
 
 	nid = numa_cpu_node(cpu);
@@ -1038,26 +1055,15 @@ void __cpuinit numa_add_cpu(int cpu)
 		nid = early_cpu_to_node(cpu);
 	BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
 
-	/*
-	 * Use the starting address of the emulated node to find which physical
-	 * node it is allocated on.
-	 */
-	addr = node_start_pfn(nid) << PAGE_SHIFT;
-	for (physnid = 0; physnid < MAX_NUMNODES; physnid++)
-		if (addr >= physnodes[physnid].start &&
-		    addr < physnodes[physnid].end)
-			break;
+	physnid = emu_nid_to_phys[nid];
 
 	/*
 	 * Map the cpu to each emulated node that is allocated on the physical
 	 * node of the cpu's apic id.
 	 */
-	for_each_online_node(nid) {
-		addr = node_start_pfn(nid) << PAGE_SHIFT;
-		if (addr >= physnodes[physnid].start &&
-		    addr < physnodes[physnid].end)
+	for_each_online_node(nid)
+		if (emu_nid_to_phys[nid] == physnid)
 			cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
-	}
 }
 
 void __cpuinit numa_remove_cpu(int cpu)
@@ -1070,21 +1076,21 @@ void __cpuinit numa_remove_cpu(int cpu)
 # else	/* !CONFIG_DEBUG_PER_CPU_MAPS */
 static void __cpuinit numa_set_cpumask(int cpu, int enable)
 {
-	int node = early_cpu_to_node(cpu);
 	struct cpumask *mask;
-	int i;
+	int nid, i;
 
-	if (node == NUMA_NO_NODE) {
+	nid = early_cpu_to_node(cpu);
+	if (nid == NUMA_NO_NODE) {
 		/* early_cpu_to_node() already emits a warning and trace */
 		return;
 	}
-	for_each_online_node(i) {
-		unsigned long addr;
 
-		addr = node_start_pfn(i) << PAGE_SHIFT;
-		if (addr < physnodes[node].start ||
-					addr >= physnodes[node].end)
+	physnid = emu_nid_to_phys[nid];
+
+	for_each_online_node(i) {
+		if (emu_nid_to_phys[nid] != physnid)
 			continue;
+
 		mask = debug_cpumask_set_cpu(cpu, enable);
 		if (!mask)
 			return;
-- 
1.7.1


^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2011-02-15 16:37 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-02-14 19:28 [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Tejun Heo
2011-02-14 19:28 ` [PATCH 1/7] x86-64, NUMA: Trivial changes to prepare for emulation updates Tejun Heo
2011-02-14 19:28 ` [PATCH 2/7] x86-64, NUMA: Build and use direct emulated nid -> phys nid mapping Tejun Heo
2011-02-15 16:36   ` [PATCH UPDATED " Tejun Heo
2011-02-14 19:28 ` [PATCH 3/7] x86-64, NUMA: Make emulation code build numa_meminfo and share the registration path Tejun Heo
2011-02-14 19:28 ` [PATCH 4/7] x86-64, NUMA: Wrap node ID during emulation Tejun Heo
2011-02-14 19:28 ` [PATCH 5/7] x86-64, NUMA: Emulate directly from numa_meminfo Tejun Heo
2011-02-14 19:28 ` [PATCH 6/7] x86-64, NUMA: Unify emulated apicid -> node mapping transformation Tejun Heo
2011-02-14 19:28 ` [PATCH 7/7] x86-64, NUMA: Unify emulated distance mapping Tejun Heo
2011-02-14 20:00 ` [PATCHSET x86/numa] x86-64, NUMA: bring sanity to NUMA emulation Yinghai Lu
2011-02-15  2:28   ` Ingo Molnar
2011-02-15  5:44     ` Yinghai Lu
2011-02-15  9:26   ` Tejun Heo

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).