* [PATCH 1/2] init: move setup of nr_cpu_ids to as early as possible v3
2008-03-26 21:23 [PATCH 0/2] generic: simplify setup_nr_cpu_ids and add set_cpus_allowed_ptr Mike Travis
@ 2008-03-26 21:23 ` Mike Travis
2008-03-26 21:23 ` [PATCH 2/2] sched: add new set_cpus_allowed_ptr function Mike Travis
2008-03-27 9:01 ` [PATCH 0/2] generic: simplify setup_nr_cpu_ids and add set_cpus_allowed_ptr Ingo Molnar
2 siblings, 0 replies; 5+ messages in thread
From: Mike Travis @ 2008-03-26 21:23 UTC (permalink / raw)
To: Andrew Morton, Ingo Molnar; +Cc: linux-mm, linux-kernel
[-- Attachment #1: setup-nr_cpu_ids --]
[-- Type: text/plain, Size: 2477 bytes --]
Move the setting of nr_cpu_ids from sched_init() to start_kernel()
so that it's available as early as possible.
Note that an arch has the option of setting it even earlier if need be,
but it should not result in a different value than the setup_nr_cpu_ids()
function.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
Signed-off-by: Mike Travis <travis@sgi.com>
---
init/main.c | 17 +++++++++++++++++
kernel/sched.c | 7 -------
2 files changed, 17 insertions(+), 7 deletions(-)
--- linux.trees.git.orig/init/main.c
+++ linux.trees.git/init/main.c
@@ -360,10 +360,26 @@ static void __init smp_init(void)
#endif
static inline void setup_per_cpu_areas(void) { }
+static inline void setup_nr_cpu_ids(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#else
+/* Setup number of possible processor ids */
+int nr_cpu_ids __read_mostly = NR_CPUS;
+EXPORT_SYMBOL(nr_cpu_ids);
+
+/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
+static void __init setup_nr_cpu_ids(void)
+{
+ int cpu, highest_cpu = 0;
+
+ for_each_possible_cpu(cpu)
+ highest_cpu = cpu;
+
+ nr_cpu_ids = highest_cpu + 1;
+}
+
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
@@ -544,6 +560,7 @@ asmlinkage void __init start_kernel(void
setup_command_line(command_line);
unwind_setup();
setup_per_cpu_areas();
+ setup_nr_cpu_ids();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
/*
--- linux.trees.git.orig/kernel/sched.c
+++ linux.trees.git/kernel/sched.c
@@ -5923,10 +5923,6 @@ void __init migration_init(void)
#ifdef CONFIG_SMP
-/* Number of possible processor ids */
-int nr_cpu_ids __read_mostly = NR_CPUS;
-EXPORT_SYMBOL(nr_cpu_ids);
-
#ifdef CONFIG_SCHED_DEBUG
static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
@@ -7152,7 +7148,6 @@ static void init_tg_rt_entry(struct rq *
void __init sched_init(void)
{
- int highest_cpu = 0;
int i, j;
#ifdef CONFIG_SMP
@@ -7207,7 +7202,6 @@ void __init sched_init(void)
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
- highest_cpu = i;
}
set_load_weight(&init_task);
@@ -7217,7 +7211,6 @@ void __init sched_init(void)
#endif
#ifdef CONFIG_SMP
- nr_cpu_ids = highest_cpu + 1;
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
#endif
--
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 2/2] sched: add new set_cpus_allowed_ptr function
2008-03-26 21:23 [PATCH 0/2] generic: simplify setup_nr_cpu_ids and add set_cpus_allowed_ptr Mike Travis
2008-03-26 21:23 ` [PATCH 1/2] init: move setup of nr_cpu_ids to as early as possible v3 Mike Travis
@ 2008-03-26 21:23 ` Mike Travis
2008-03-27 9:01 ` [PATCH 0/2] generic: simplify setup_nr_cpu_ids and add set_cpus_allowed_ptr Ingo Molnar
2 siblings, 0 replies; 5+ messages in thread
From: Mike Travis @ 2008-03-26 21:23 UTC (permalink / raw)
To: Andrew Morton, Ingo Molnar; +Cc: linux-mm, linux-kernel
[-- Attachment #1: add-set_cpus_allowed_ptr --]
[-- Type: text/plain, Size: 4042 bytes --]
Add a new function that accepts a pointer to the "newly allowed cpus"
cpumask argument.
int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
The current set_cpus_allowed() function is modified to use the above
but this does not result in an ABI change. And with some compiler
optimization help, it may not introduce any additional overhead.
Additionally, to enforce the read only nature of the new_mask arg, the
"const" property is migrated to sub-functions called by set_cpus_allowed.
This silences compiler warnings.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
Signed-off-by: Mike Travis <travis@sgi.com>
---
include/linux/sched.h | 15 +++++++++++----
kernel/sched.c | 14 +++++++-------
kernel/sched_rt.c | 3 ++-
3 files changed, 20 insertions(+), 12 deletions(-)
--- linux.trees.git.orig/include/linux/sched.h
+++ linux.trees.git/include/linux/sched.h
@@ -889,7 +889,8 @@ struct sched_class {
void (*set_curr_task) (struct rq *rq);
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_new) (struct rq *rq, struct task_struct *p);
- void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask);
+ void (*set_cpus_allowed)(struct task_struct *p,
+ const cpumask_t *newmask);
void (*join_domain)(struct rq *rq);
void (*leave_domain)(struct rq *rq);
@@ -1501,15 +1502,21 @@ static inline void put_task_struct(struc
#define used_math() tsk_used_math(current)
#ifdef CONFIG_SMP
-extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
+extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const cpumask_t *new_mask);
#else
-static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ const cpumask_t *new_mask)
{
- if (!cpu_isset(0, new_mask))
+ if (!cpu_isset(0, *new_mask))
return -EINVAL;
return 0;
}
#endif
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+ return set_cpus_allowed_ptr(p, &new_mask);
+}
extern unsigned long long sched_clock(void);
--- linux.trees.git.orig/kernel/sched.c
+++ linux.trees.git/kernel/sched.c
@@ -5295,7 +5295,7 @@ static inline void sched_init_granularit
* task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
-int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask)
{
struct migration_req req;
unsigned long flags;
@@ -5303,23 +5303,23 @@ int set_cpus_allowed(struct task_struct
int ret = 0;
rq = task_rq_lock(p, &flags);
- if (!cpus_intersects(new_mask, cpu_online_map)) {
+ if (!cpus_intersects(*new_mask, cpu_online_map)) {
ret = -EINVAL;
goto out;
}
if (p->sched_class->set_cpus_allowed)
- p->sched_class->set_cpus_allowed(p, &new_mask);
+ p->sched_class->set_cpus_allowed(p, new_mask);
else {
- p->cpus_allowed = new_mask;
- p->rt.nr_cpus_allowed = cpus_weight(new_mask);
+ p->cpus_allowed = *new_mask;
+ p->rt.nr_cpus_allowed = cpus_weight(*new_mask);
}
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpu_isset(task_cpu(p), new_mask))
+ if (cpu_isset(task_cpu(p), *new_mask))
goto out;
- if (migrate_task(p, any_online_cpu(new_mask), &req)) {
+ if (migrate_task(p, any_online_cpu(*new_mask), &req)) {
/* Need help from migration thread: drop lock and wait. */
task_rq_unlock(rq, &flags);
wake_up_process(rq->migration_thread);
--- linux.trees.git.orig/kernel/sched_rt.c
+++ linux.trees.git/kernel/sched_rt.c
@@ -1001,7 +1001,8 @@ move_one_task_rt(struct rq *this_rq, int
return 0;
}
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+static void set_cpus_allowed_rt(struct task_struct *p,
+ const cpumask_t *new_mask)
{
int weight = cpus_weight(*new_mask);
--
^ permalink raw reply [flat|nested] 5+ messages in thread