LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: "Satoshi UCHIDA" <s-uchida@ap.jp.nec.com>
To: <linux-kernel@vger.kernel.org>,
<containers@lists.linux-foundation.org>, <axboe@kernel.dk>,
<menage@google.com>
Cc: <s-uchida@ap.jp.nec.com>, <tom-sugawara@ap.jp.nec.com>,
<m-takahashi@ex.jp.nec.com>
Subject: [RFC][patch 7/11][CFQ-cgroup] Control cfq_data per driver
Date: Tue, 1 Apr 2008 18:37:40 +0900 [thread overview]
Message-ID: <008401c893dc$07669270$1633b750$@jp.nec.com> (raw)
In-Reply-To: <007801c893d9$d89726f0$89c574d0$@jp.nec.com>
This patch expands cfq_meta_data to handling multi cfq_data.
This control is used rb_tree and the key is used a pointer of cfq_data.
Signed-off-by: Satoshi UCHIDA <uchida@ap.jp.nec.com>
diff --git a/block/cfq-cgroup.c b/block/cfq-cgroup.c
index 34894d9..55090dc 100644
--- a/block/cfq-cgroup.c
+++ b/block/cfq-cgroup.c
@@ -54,9 +54,42 @@ static struct cfq_meta_data *cfq_cgroup_init_meta_data(struct cfq_data *cfqd, st
cfqmd->cfq_driv_d.idle_slice_timer.data = (unsigned long) cfqd;
cfqmd->cfq_driv_d.cfq_slice_idle = cfq_cgroup_slice_idle;
+ cfqmd->sibling_tree = RB_ROOT;
+ cfqmd->siblings = 0;
+
return cfqmd;
}
+static void cfq_meta_data_sibling_tree_add(struct cfq_meta_data *cfqmd,
+ struct cfq_data *cfqd)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+
+ BUG_ON(!RB_EMPTY_NODE(&cfqd->sib_node));
+
+ p = &cfqmd->sibling_tree.rb_node;
+
+ while (*p) {
+ struct cfq_data *__cfqd;
+ struct rb_node **n;
+
+ parent = *p;
+ __cfqd = rb_entry(parent, struct cfq_data, sib_node);
+
+ if (cfqd < __cfqd) {
+ n = &(*p)->rb_left;
+ } else {
+ n = &(*p)->rb_right;
+ }
+ p = n;
+ }
+
+ rb_link_node(&cfqd->sib_node, parent, p);
+ rb_insert_color(&cfqd->sib_node, &cfqmd->sibling_tree);
+ cfqmd->siblings++;
+ cfqd->cfqmd = cfqmd;
+}
struct cfq_data *__cfq_cgroup_init_queue(struct request_queue *q, void *data)
{
@@ -66,6 +99,8 @@ struct cfq_data *__cfq_cgroup_init_queue(struct request_queue *q, void *data)
if (!cfqd)
return NULL;
+ RB_CLEAR_NODE(&cfqd->sib_node);
+
if (!cfqmd) {
cfqmd = cfq_cgroup_init_meta_data(cfqd, q);
if (!cfqmd) {
@@ -73,6 +108,7 @@ struct cfq_data *__cfq_cgroup_init_queue(struct request_queue *q, void *data)
return NULL;
}
}
+ cfq_meta_data_sibling_tree_add(cfqmd, cfqd);
return cfqd;
}
@@ -102,11 +138,35 @@ cfq_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
/*
* Remove device or cgroup data functions.
*/
+static void cfq_cgroup_erase_meta_data_siblings(struct cfq_meta_data *cfqmd, struct cfq_data *cfqd)
+{
+ rb_erase(&cfqd->sib_node, &cfqmd->sibling_tree);
+ cfqmd->siblings--;
+}
+
+static void cfq_exit_device_group(struct cfq_meta_data *cfqmd)
+{
+ struct rb_node *p, *n;
+ struct cfq_data *cfqd;
+
+ p = rb_first(&cfqmd->sibling_tree);
+
+ while (p) {
+ n = rb_next(p);
+ cfqd = rb_entry(p, struct cfq_data, sib_node);
+
+ cfq_cgroup_erase_meta_data_siblings(cfqmd, cfqd);
+ __cfq_exit_data(cfqd);
+
+ p = n;
+ }
+}
+
static void __cfq_cgroup_exit_data(struct cfq_data *cfqd)
{
struct cfq_meta_data *cfqmd = cfqd->cfqmd;
- __cfq_exit_data(cfqd);
+ cfq_exit_device_group(cfqmd);
kfree(cfqmd);
}
diff --git a/include/linux/cfq-iosched.h b/include/linux/cfq-iosched.h
index e5b41da..c8879a7 100644
--- a/include/linux/cfq-iosched.h
+++ b/include/linux/cfq-iosched.h
@@ -54,6 +54,10 @@ struct cfq_driver_data {
*/
struct cfq_meta_data {
struct cfq_data *elv_data;
+
+ /* device siblings */
+ struct rb_root sibling_tree;
+ unsigned int siblings;
struct cfq_driver_data cfq_driv_d;
};
@@ -91,6 +95,9 @@ struct cfq_data {
#ifdef CONFIG_CGROUP_CFQ
struct cfq_meta_data *cfqmd;
+ /* sibling_tree member for cfq_meta_data */
+ struct rb_node sib_node;
+
#else
struct cfq_driver_data cfq_driv_d;
#endif
next prev parent reply other threads:[~2008-04-01 9:38 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-04-01 9:22 [RFC][patch 0/11][CFQ-cgroup]Yet another I/O bandwidth controlling subsystem for CGroups based on CFQ Satoshi UCHIDA
2008-04-01 9:27 ` [RFC][patch 1/11][CFQ-cgroup] Add Configuration Satoshi UCHIDA
2008-04-01 9:30 ` [RFC][patch 2/11][CFQ-cgroup] Move header file Satoshi UCHIDA
2008-04-01 9:32 ` [RFC][patch 3/11][CFQ-cgroup] Introduce cgroup subsystem Satoshi UCHIDA
2008-04-02 22:41 ` Paul Menage
2008-04-03 2:31 ` Satoshi UCHIDA
2008-04-03 2:39 ` Li Zefan
2008-04-03 15:31 ` Paul Menage
2008-04-03 7:09 ` [RFC][v2][patch 0/12][CFQ-cgroup]Yet another I/O bandwidth controlling subsystem for CGroups based on CFQ Satoshi UCHIDA
2008-04-03 7:11 ` [PATCH] [RFC][patch 1/12][CFQ-cgroup] Add Configuration Satoshi UCHIDA
2008-04-03 7:12 ` [RFC][patch 2/11][CFQ-cgroup] Move header file Satoshi UCHIDA
2008-04-03 7:12 ` [RFC][patch 3/12][CFQ-cgroup] Introduce cgroup subsystem Satoshi UCHIDA
2008-04-03 7:13 ` [PATCH] [RFC][patch 4/12][CFQ-cgroup] Add ioprio entry Satoshi UCHIDA
2008-04-03 7:14 ` [RFC][patch 5/12][CFQ-cgroup] Create cfq driver unique data Satoshi UCHIDA
2008-04-03 7:14 ` [RFC][patch 6/12][CFQ-cgroup] Add cfq optional operation framework Satoshi UCHIDA
2008-04-03 7:15 ` [RFC][patch 7/12][CFQ-cgroup] Add new control layer over traditional control layer Satoshi UCHIDA
2008-04-03 7:15 ` [RFC][patch 8/12][CFQ-cgroup] Control cfq_data per driver Satoshi UCHIDA
2008-04-03 7:16 ` [RFC][patch 9/12][CFQ-cgroup] Control cfq_data per cgroup Satoshi UCHIDA
2008-04-03 7:16 ` [PATCH] [RFC][patch 10/12][CFQ-cgroup] Search cfq_data when not connected Satoshi UCHIDA
2008-04-03 7:17 ` [RFC][patch 11/12][CFQ-cgroup] Control service tree: Main functions Satoshi UCHIDA
2008-04-03 7:18 ` [RFC][patch 12/12][CFQ-cgroup] entry/remove active cfq_data Satoshi UCHIDA
2008-04-25 9:54 ` [RFC][v2][patch 0/12][CFQ-cgroup]Yet another I/O bandwidth controlling subsystem for CGroups based on CFQ Ryo Tsuruta
2008-04-25 21:37 ` [Devel] " Florian Westphal
2008-04-29 0:44 ` Ryo Tsuruta
2008-05-09 10:17 ` Satoshi UCHIDA
2008-05-12 3:10 ` Ryo Tsuruta
2008-05-12 15:33 ` Ryo Tsuruta
2008-05-22 13:04 ` Ryo Tsuruta
2008-05-23 2:53 ` Satoshi UCHIDA
2008-05-26 2:46 ` Ryo Tsuruta
2008-05-27 11:32 ` Satoshi UCHIDA
2008-05-30 10:37 ` Andrea Righi
2008-06-18 9:48 ` Satoshi UCHIDA
2008-06-18 22:33 ` Andrea Righi
2008-06-22 17:04 ` Andrea Righi
2008-06-03 8:15 ` Ryo Tsuruta
2008-06-26 4:49 ` Satoshi UCHIDA
2008-04-01 9:33 ` [RFC][patch 4/11][CFQ-cgroup] Create cfq driver unique data Satoshi UCHIDA
2008-04-01 9:35 ` [RFC][patch 5/11][CFQ-cgroup] Add cfq optional operation framework Satoshi UCHIDA
2008-04-01 9:36 ` [RFC][patch 6/11][CFQ-cgroup] Add new control layer over traditional control layer Satoshi UCHIDA
2008-04-01 9:37 ` Satoshi UCHIDA [this message]
2008-04-01 9:38 ` [RFC][patch 8/11][CFQ-cgroup] Control cfq_data per cgroup Satoshi UCHIDA
2008-04-03 15:35 ` Paul Menage
2008-04-04 6:20 ` Satoshi UCHIDA
2008-04-04 9:00 ` Paul Menage
2008-04-04 9:46 ` Satoshi UCHIDA
2008-04-01 9:40 ` [RFC][patch 9/11][CFQ-cgroup] Search cfq_data when not connected Satoshi UCHIDA
2008-04-01 9:41 ` [RFC][patch 10/11][CFQ-cgroup] Control service tree: Main functions Satoshi UCHIDA
2008-04-01 9:42 ` [RFC][patch 11/11][CFQ-cgroup] entry/remove active cfq_data Satoshi UCHIDA
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to='008401c893dc$07669270$1633b750$@jp.nec.com' \
--to=s-uchida@ap.jp.nec.com \
--cc=axboe@kernel.dk \
--cc=containers@lists.linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=m-takahashi@ex.jp.nec.com \
--cc=menage@google.com \
--cc=tom-sugawara@ap.jp.nec.com \
--subject='Re: [RFC][patch 7/11][CFQ-cgroup] Control cfq_data per driver' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).