LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Mike Travis <travis@sgi.com>
To: Ingo Molnar <mingo@elte.hu>, Rusty Russell <rusty@rustcorp.com.au>
Cc: Andi Kleen <andi@firstfloor.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	davej@codemonkey.org.uk, David Miller <davem@davemloft.net>,
	Eric Dumazet <dada1@cosmosbay.com>,
	Jack Steiner <steiner@sgi.com>,
	Jeremy Fitzhardinge <jeremy@goop.org>, Jes Sorensen <jes@sgi.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	peterz@infradead.org, Thomas Gleixner <tglx@linutronix.de>,
	Yinghai Lu <yhlu.kernel@gmail.com>,
	IA64 <linux-ia64@vger.kernel.org>,
	PowerPC <linuxppc-dev@ozlabs.org>,
	S390 <linux-s390@vger.kernel.org>,
	SPARC <sparclinux@vger.kernel.org>,
	linux-kernel@vger.kernel.org
Subject: [PATCH 26/35] cpumask: get rid of boutique sched.c allocations, use cpumask_var_t.
Date: Mon, 20 Oct 2008 10:03:45 -0700	[thread overview]
Message-ID: <20081020170323.199606000@polaris-admin.engr.sgi.com> (raw)
In-Reply-To: <20081020170319.539427000@polaris-admin.engr.sgi.com>

[-- Attachment #1: cpumask:use-alloc_cpumask_var-in-sched.patch --]
[-- Type: text/plain, Size: 6762 bytes --]

Using lots of allocs rather than one big alloc is less efficient, but
who cares for this setup function?

From: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
---
 kernel/sched.c |  133 ++++++++++++++++++++++++---------------------------------
 1 file changed, 56 insertions(+), 77 deletions(-)

--- linux-2.6.28.orig/kernel/sched.c
+++ linux-2.6.28/kernel/sched.c
@@ -7291,40 +7291,6 @@ SD_INIT_FUNC(CPU)
  SD_INIT_FUNC(MC)
 #endif
 
-/*
- * To minimize stack usage kmalloc room for cpumasks and share the
- * space as the usage in build_sched_domains() dictates.  Used only
- * if the amount of space is significant.
- */
-struct allmasks {
-	cpumask_t tmpmask;			/* make this one first */
-	union {
-		cpumask_t nodemask;
-		cpumask_t this_sibling_map;
-		cpumask_t this_core_map;
-	};
-	cpumask_t send_covered;
-
-#ifdef CONFIG_NUMA
-	cpumask_t domainspan;
-	cpumask_t covered;
-	cpumask_t notcovered;
-#endif
-};
-
-#if	NR_CPUS > 128
-#define	SCHED_CPUMASK_ALLOC		1
-#define	SCHED_CPUMASK_FREE(v)		kfree(v)
-#define	SCHED_CPUMASK_DECLARE(v)	struct allmasks *v
-#else
-#define	SCHED_CPUMASK_ALLOC		0
-#define	SCHED_CPUMASK_FREE(v)
-#define	SCHED_CPUMASK_DECLARE(v)	struct allmasks _v, *v = &_v
-#endif
-
-#define	SCHED_CPUMASK_VAR(v, a) 	cpumask_t *v = (cpumask_t *) \
-			((unsigned long)(a) + offsetof(struct allmasks, v))
-
 static int default_relax_domain_level = -1;
 
 static int __init setup_relax_domain_level(char *str)
@@ -7367,14 +7333,35 @@ static void set_domain_attribute(struct 
 static int __build_sched_domains(const cpumask_t *cpu_map,
 				 struct sched_domain_attr *attr)
 {
-	int i;
+	int i, err = -ENOMEM;
 	struct root_domain *rd;
-	SCHED_CPUMASK_DECLARE(allmasks);
-	cpumask_t *tmpmask;
+	cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
+		tmpmask;
 #ifdef CONFIG_NUMA
+	cpumask_var_t domainspan, covered, notcovered;
 	struct sched_group **sched_group_nodes = NULL;
 	int sd_allnodes = 0;
 
+	if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
+		goto out;
+	if (!alloc_cpumask_var(&covered, GFP_KERNEL))
+		goto free_domainspan;
+	if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
+		goto free_covered;
+#endif
+
+	if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
+		goto free_notcovered;
+	if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
+		goto free_nodemask;
+	if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
+		goto free_this_sibling_map;
+	if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
+		goto free_this_core_map;
+	if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
+		goto free_send_covered;
+
+#ifdef CONFIG_NUMA
 	/*
 	 * Allocate the per-node list of sched groups
 	 */
@@ -7382,34 +7369,16 @@ static int __build_sched_domains(const c
 				    GFP_KERNEL);
 	if (!sched_group_nodes) {
 		printk(KERN_WARNING "Can not alloc sched group node list\n");
-		return -ENOMEM;
+		goto free_tmpmask;
 	}
 #endif
 
 	rd = alloc_rootdomain();
 	if (!rd) {
 		printk(KERN_WARNING "Cannot alloc root domain\n");
-#ifdef CONFIG_NUMA
-		kfree(sched_group_nodes);
-#endif
-		return -ENOMEM;
+		goto free_sched_groups;
 	}
 
-#if SCHED_CPUMASK_ALLOC
-	/* get space for all scratch cpumask variables */
-	allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
-	if (!allmasks) {
-		printk(KERN_WARNING "Cannot alloc cpumask array\n");
-		kfree(rd);
-#ifdef CONFIG_NUMA
-		kfree(sched_group_nodes);
-#endif
-		return -ENOMEM;
-	}
-#endif
-	tmpmask = (cpumask_t *)allmasks;
-
-
 #ifdef CONFIG_NUMA
 	sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
 #endif
@@ -7419,7 +7388,6 @@ static int __build_sched_domains(const c
 	 */
 	for_each_cpu(i, cpu_map) {
 		struct sched_domain *sd = NULL, *p;
-		SCHED_CPUMASK_VAR(nodemask, allmasks);
 
 		*nodemask = node_to_cpumask(cpu_to_node(i));
 		cpus_and(*nodemask, *nodemask, *cpu_map);
@@ -7485,9 +7453,6 @@ static int __build_sched_domains(const c
 #ifdef CONFIG_SCHED_SMT
 	/* Set up CPU (sibling) groups */
 	for_each_cpu(i, cpu_map) {
-		SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
-		SCHED_CPUMASK_VAR(send_covered, allmasks);
-
 		*this_sibling_map = per_cpu(cpu_sibling_map, i);
 		cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
 		if (i != first_cpu(*this_sibling_map))
@@ -7502,9 +7467,6 @@ static int __build_sched_domains(const c
 #ifdef CONFIG_SCHED_MC
 	/* Set up multi-core groups */
 	for_each_cpu(i, cpu_map) {
-		SCHED_CPUMASK_VAR(this_core_map, allmasks);
-		SCHED_CPUMASK_VAR(send_covered, allmasks);
-
 		*this_core_map = cpu_coregroup_map(i);
 		cpus_and(*this_core_map, *this_core_map, *cpu_map);
 		if (i != first_cpu(*this_core_map))
@@ -7518,9 +7480,6 @@ static int __build_sched_domains(const c
 
 	/* Set up physical groups */
 	for (i = 0; i < nr_node_ids; i++) {
-		SCHED_CPUMASK_VAR(nodemask, allmasks);
-		SCHED_CPUMASK_VAR(send_covered, allmasks);
-
 		*nodemask = node_to_cpumask(i);
 		cpus_and(*nodemask, *nodemask, *cpu_map);
 		if (cpus_empty(*nodemask))
@@ -7534,8 +7493,6 @@ static int __build_sched_domains(const c
 #ifdef CONFIG_NUMA
 	/* Set up node groups */
 	if (sd_allnodes) {
-		SCHED_CPUMASK_VAR(send_covered, allmasks);
-
 		init_sched_build_groups(cpu_map, cpu_map,
 					&cpu_to_allnodes_group,
 					send_covered, tmpmask);
@@ -7544,9 +7501,6 @@ static int __build_sched_domains(const c
 	for (i = 0; i < nr_node_ids; i++) {
 		/* Set up node groups */
 		struct sched_group *sg, *prev;
-		SCHED_CPUMASK_VAR(nodemask, allmasks);
-		SCHED_CPUMASK_VAR(domainspan, allmasks);
-		SCHED_CPUMASK_VAR(covered, allmasks);
 		int j;
 
 		*nodemask = node_to_cpumask(i);
@@ -7581,7 +7535,6 @@ static int __build_sched_domains(const c
 		prev = sg;
 
 		for (j = 0; j < nr_node_ids; j++) {
-			SCHED_CPUMASK_VAR(notcovered, allmasks);
 			int n = (i + j) % nr_node_ids;
 			node_to_cpumask_ptr(pnodemask, n);
 
@@ -7660,14 +7613,40 @@ static int __build_sched_domains(const c
 		cpu_attach_domain(sd, rd, i);
 	}
 
-	SCHED_CPUMASK_FREE((void *)allmasks);
-	return 0;
+	err = 0;
+
+free_tmpmask:
+	free_cpumask_var(tmpmask);
+free_send_covered:
+	free_cpumask_var(send_covered);
+free_this_core_map:
+	free_cpumask_var(this_core_map);
+free_this_sibling_map:
+	free_cpumask_var(this_sibling_map);
+free_nodemask:
+	free_cpumask_var(nodemask);
+free_notcovered:
+#ifdef CONFIG_NUMA
+	free_cpumask_var(notcovered);
+free_covered:
+	free_cpumask_var(covered);
+free_domainspan:
+	free_cpumask_var(domainspan);
+out:
+#endif
+	return err;
+
+free_sched_groups:
+#ifdef CONFIG_NUMA
+	kfree(sched_group_nodes);
+#endif
+	goto free_tmpmask;
 
 #ifdef CONFIG_NUMA
 error:
 	free_sched_groups(cpu_map, tmpmask);
-	SCHED_CPUMASK_FREE((void *)allmasks);
-	return -ENOMEM;
+	kfree(rd);
+	goto free_tmpmask;
 #endif
 }
 

-- 

  parent reply	other threads:[~2008-10-20 17:20 UTC|newest]

Thread overview: 40+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2008-10-20 17:03 [PATCH 00/35] cpumask: Replace cpumask_t with struct cpumask Mike Travis
2008-10-20 17:03 ` [PATCH 01/35] x86: clean up speedctep-centrino and reduce cpumask_t usage Mike Travis
2008-10-21  0:09   ` Stephen Rothwell
2008-10-21 12:28     ` Mike Travis
2008-10-20 17:03 ` [PATCH 02/35] cpumask: remove min from first_cpu/next_cpu Mike Travis
2008-10-20 17:03 ` [PATCH 03/35] cpumask: add for_each_cpu_mask_and function Mike Travis
2008-10-20 17:03 ` [PATCH 04/35] x86 smp: modify send_IPI_mask interface to accept cpumask_t pointers Mike Travis
2008-10-20 17:03 ` [PATCH 05/35] sched: Reduce stack size requirements in kernel/sched.c Mike Travis
2008-10-20 17:03 ` [PATCH 06/35] cpumask: introduce struct cpumask Mike Travis
2008-10-20 17:03 ` [PATCH 07/35] cpumask: change cpumask_scnprintf, cpumask_parse_user, cpulist_parse, and cpulist_scnprintf to take pointers Mike Travis
2008-10-20 17:03 ` [PATCH 08/35] cpumask: cpumask_size() Mike Travis
2008-10-20 17:03 ` [PATCH 09/35] cpumask: add cpumask_copy() Mike Travis
2008-10-20 17:03 ` [PATCH 10/35] cpumask: introduce cpumask_var_t for local cpumask vars Mike Travis
2008-10-20 17:03 ` [PATCH 11/35] x86: enable MAXSMP Mike Travis
2008-10-20 17:03 ` [PATCH 12/35] cpumask: make CONFIG_NR_CPUS always valid Mike Travis
2008-10-20 17:03 ` [PATCH 13/35] cpumask: use setup_nr_cpu_ids() instead of direct assignment Mike Travis
2008-10-20 17:03 ` [PATCH 14/35] cpumask: make nr_cpu_ids valid in all configurations Mike Travis
2008-10-20 17:03 ` [PATCH 15/35] cpumask: prepare for iterators to only go to nr_cpu_ids Mike Travis
2008-10-20 17:03 ` [PATCH 16/35] percpu: fix percpu accessors to potentially !cpu_possible() cpus Mike Travis
2008-10-20 17:03 ` [PATCH 17/35] cpumask: make nr_cpu_ids the actual limit on bitmap size Mike Travis
2008-10-20 17:03 ` [PATCH 18/35] cpumask: add nr_cpumask_bits Mike Travis
2008-10-21 12:26   ` Rusty Russell
2008-10-21 13:53     ` Mike Travis
2008-10-20 17:03 ` [PATCH 19/35] cpumask: use cpumask_bits() everywhere Mike Travis
2008-10-20 17:03 ` [PATCH 20/35] cpumask: cpumask_of(): cpumask_of_cpu() which returns a pointer Mike Travis
2008-10-20 17:03 ` [PATCH 21/35] cpumask: for_each_cpu(): for_each_cpu_mask which takes " Mike Travis
2008-10-20 17:03 ` [PATCH 22/35] cpumask: cpumask_first/cpumask_next Mike Travis
2008-10-20 17:03 ` [PATCH 23/35] cpumask: deprecate any_online_cpu() in favour of cpumask_any/cpumask_any_and Mike Travis
2008-10-20 17:03 ` [PATCH 24/35] cpumask: cpumask_any_but() Mike Travis
2008-10-20 17:03 ` [PATCH 25/35] cpumask: Deprecate CPUMASK_ALLOC etc in favor of cpumask_var_t Mike Travis
2008-10-20 17:03 ` Mike Travis [this message]
2008-10-20 17:03 ` [PATCH 27/35] cpumask: to_cpumask() Mike Travis
2008-10-20 17:03 ` [PATCH 28/35] cpumask: accessors to manipulate possible/present/online/active maps Mike Travis
2008-10-20 17:03 ` [PATCH 29/35] cpumask: Use accessors code Mike Travis
2008-10-20 17:03 ` [PATCH 30/35] cpumask: CONFIG_BITS_ALL, CONFIG_BITS_NONE and CONFIG_BITS_CPU0 Mike Travis
2008-10-20 17:03 ` [PATCH 31/35] cpumask: switch over to cpu_online/possible/active/present_mask Mike Travis
2008-10-20 17:03 ` [PATCH 32/35] cpumask: cpu_all_mask and cpu_none_mask Mike Travis
2008-10-20 17:03 ` [PATCH 33/35] cpumask: reorder header to minimize separate #ifdefs Mike Travis
2008-10-20 17:03 ` [PATCH 34/35] cpumask: debug options for cpumasks Mike Travis
2008-10-20 17:03 ` [PATCH 35/35] cpumask: smp_call_function_many() Mike Travis

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20081020170323.199606000@polaris-admin.engr.sgi.com \
    --to=travis@sgi.com \
    --cc=akpm@linux-foundation.org \
    --cc=andi@firstfloor.org \
    --cc=dada1@cosmosbay.com \
    --cc=davej@codemonkey.org.uk \
    --cc=davem@davemloft.net \
    --cc=hpa@zytor.com \
    --cc=jeremy@goop.org \
    --cc=jes@sgi.com \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linuxppc-dev@ozlabs.org \
    --cc=mingo@elte.hu \
    --cc=peterz@infradead.org \
    --cc=rusty@rustcorp.com.au \
    --cc=sparclinux@vger.kernel.org \
    --cc=steiner@sgi.com \
    --cc=tglx@linutronix.de \
    --cc=yhlu.kernel@gmail.com \
    --subject='Re: [PATCH 26/35] cpumask: get rid of boutique sched.c allocations, use cpumask_var_t.' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).