LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Paul Turner <pjt@google.com>
To: linux-kernel@vger.kernel.org
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Ingo Molnar <mingo@elte.hu>, Mike Galbraith <efault@gmx.de>
Subject: [patch 3/5] sched: simplify update_cfs_shares parameters
Date: Fri, 21 Jan 2011 20:45:01 -0800	[thread overview]
Message-ID: <20110122044851.915214637@google.com> (raw)
In-Reply-To: <20110122044458.058531078@google.com>

[-- Attachment #1: sched-clean_update_shares.patch --]
[-- Type: text/plain, Size: 3992 bytes --]

Re-visiting this:  Since update_cfs_shares will now only ever re-weight an
entity that is a relative parent of the current entity in enqueue_entity; we
can safely issue the account_entity_enqueue relative to that cfs_rq and avoid
the requirement for special handling of the enqueue case in update_cfs_shares.

Signed-off-by: Paul Turner <pjt@google.com>

---
 kernel/sched.c      |    2 +-
 kernel/sched_fair.c |   22 +++++++++++-----------
 2 files changed, 12 insertions(+), 12 deletions(-)

Index: tip3/kernel/sched_fair.c
===================================================================
--- tip3.orig/kernel/sched_fair.c
+++ tip3/kernel/sched_fair.c
@@ -540,7 +540,7 @@ static u64 sched_vslice(struct cfs_rq *c
 }
 
 static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
+static void update_cfs_shares(struct cfs_rq *cfs_rq);
 
 /*
  * Update the current task's runtime statistics. Skip current tasks that
@@ -778,7 +778,7 @@ static void reweight_entity(struct cfs_r
 		account_entity_enqueue(cfs_rq, se);
 }
 
-static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
 	struct task_group *tg;
 	struct sched_entity *se;
@@ -789,11 +789,11 @@ static void update_cfs_shares(struct cfs
 	if (!se)
 		return;
 
-	load = cfs_rq->load.weight + weight_delta;
+	load = cfs_rq->load.weight;
 
 	load_weight = atomic_read(&tg->load_weight);
-	load_weight -= cfs_rq->load_contribution;
 	load_weight += load;
+	load_weight -= cfs_rq->load_contribution;
 
 	shares = (tg->shares * load);
 	if (load_weight)
@@ -811,7 +811,7 @@ static void update_entity_shares_tick(st
 {
 	if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
 		update_cfs_load(cfs_rq, 0);
-		update_cfs_shares(cfs_rq, 0);
+		update_cfs_shares(cfs_rq);
 	}
 }
 #else /* CONFIG_FAIR_GROUP_SCHED */
@@ -819,7 +819,7 @@ static void update_cfs_load(struct cfs_r
 {
 }
 
-static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
+static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
 {
 }
 
@@ -950,8 +950,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
 	 */
 	update_curr(cfs_rq);
 	update_cfs_load(cfs_rq, 0);
-	update_cfs_shares(cfs_rq, se->load.weight);
 	account_entity_enqueue(cfs_rq, se);
+	update_cfs_shares(cfs_rq);
 
 	if (flags & ENQUEUE_WAKEUP) {
 		place_entity(cfs_rq, se, 0);
@@ -1013,7 +1013,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
 	update_cfs_load(cfs_rq, 0);
 	account_entity_dequeue(cfs_rq, se);
 	update_min_vruntime(cfs_rq);
-	update_cfs_shares(cfs_rq, 0);
+	update_cfs_shares(cfs_rq);
 
 	/*
 	 * Normalize the entity after updating the min_vruntime because the
@@ -1254,7 +1254,7 @@ enqueue_task_fair(struct rq *rq, struct 
 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
 		update_cfs_load(cfs_rq, 0);
-		update_cfs_shares(cfs_rq, 0);
+		update_cfs_shares(cfs_rq);
 	}
 
 	hrtick_update(rq);
@@ -1284,7 +1284,7 @@ static void dequeue_task_fair(struct rq 
 		struct cfs_rq *cfs_rq = cfs_rq_of(se);
 
 		update_cfs_load(cfs_rq, 0);
-		update_cfs_shares(cfs_rq, 0);
+		update_cfs_shares(cfs_rq);
 	}
 
 	hrtick_update(rq);
@@ -2095,7 +2095,7 @@ static int update_shares_cpu(struct task
 	 * We need to update shares after updating tg->load_weight in
 	 * order to adjust the weight of groups with long running tasks.
 	 */
-	update_cfs_shares(cfs_rq, 0);
+	update_cfs_shares(cfs_rq);
 
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
 
Index: tip3/kernel/sched.c
===================================================================
--- tip3.orig/kernel/sched.c
+++ tip3/kernel/sched.c
@@ -8510,7 +8510,7 @@ int sched_group_set_shares(struct task_g
 		/* Propagate contribution to hierarchy */
 		raw_spin_lock_irqsave(&rq->lock, flags);
 		for_each_sched_entity(se)
-			update_cfs_shares(group_cfs_rq(se), 0);
+			update_cfs_shares(group_cfs_rq(se));
 		raw_spin_unlock_irqrestore(&rq->lock, flags);
 	}
 



  parent reply	other threads:[~2011-01-22  5:05 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2011-01-22  4:44 [patch 0/5] scheduler fixlets Paul Turner
2011-01-22  4:44 ` [patch 1/5] sched: fix sign under-flows in wake_affine Paul Turner
2011-01-26 12:09   ` [tip:sched/core] sched: Fix " tip-bot for Paul Turner
2011-01-22  4:45 ` [patch 2/5] sched: (cleanup) remove redundant cfs_rq checks Paul Turner
2011-01-26 12:10   ` [tip:sched/core] sched: Fix/remove " tip-bot for Paul Turner
2011-01-22  4:45 ` Paul Turner [this message]
2011-01-26 12:11   ` [tip:sched/core] sched: Simplify update_cfs_shares parameters tip-bot for Paul Turner
2011-01-22  4:45 ` [patch 4/5] sched: use rq->clock_task instead of rq->clock for maintaining load averages Paul Turner
2011-01-26 12:10   ` [tip:sched/core] sched: Use rq->clock_task instead of rq->clock for correctly " tip-bot for Paul Turner
2011-01-22  4:45 ` [patch 5/5] sched: avoid expensive initial update_cfs_load() Paul Turner
2011-01-26 12:11   ` [tip:sched/core] sched: Avoid " tip-bot for Paul Turner
2011-01-26 12:36     ` Peter Zijlstra
2011-01-26 12:45   ` [tip:sched/core] sched: Avoid expensive initial update_cfs_load(), on UP too tip-bot for Peter Zijlstra
2011-01-27 11:58   ` tip-bot for Peter Zijlstra
2011-01-24 10:17 ` [patch 0/5] scheduler fixlets Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20110122044851.915214637@google.com \
    --to=pjt@google.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=efault@gmx.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --subject='Re: [patch 3/5] sched: simplify update_cfs_shares parameters' \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).