LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [PATCH] slab: cache alloc cleanups
@ 2007-01-05 11:46 Pekka J Enberg
2007-01-05 19:05 ` Christoph Lameter
2007-01-05 19:50 ` Andrew Morton
0 siblings, 2 replies; 12+ messages in thread
From: Pekka J Enberg @ 2007-01-05 11:46 UTC (permalink / raw)
To: akpm; +Cc: linux-kernel, apw, hch, manfred, christoph, pj
From: Pekka Enberg <penberg@cs.helsinki.fi>
Clean up __cache_alloc and __cache_alloc_node functions a bit. We no
longer need to do NUMA_BUILD tricks and the UMA allocation path is much
simpler. No functional changes in this patch.
Note: saves few kernel text bytes on x86 NUMA build due to using gotos in
__cache_alloc_node() and moving __GFP_THISNODE check in to fallback_alloc().
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Christoph Lameter <christoph@lameter.com>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---
diff --git a/mm/slab.c b/mm/slab.c
index 0d4e574..5edb7bf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3197,35 +3197,6 @@ static inline void *____cache_alloc(stru
return objp;
}
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
- gfp_t flags, void *caller)
-{
- unsigned long save_flags;
- void *objp = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
-
- local_irq_save(save_flags);
-
- if (unlikely(NUMA_BUILD &&
- current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
- objp = alternate_node_alloc(cachep, flags);
-
- if (!objp)
- objp = ____cache_alloc(cachep, flags);
- /*
- * We may just have run out of memory on the local node.
- * ____cache_alloc_node() knows how to locate memory on other nodes
- */
- if (NUMA_BUILD && !objp)
- objp = ____cache_alloc_node(cachep, flags, numa_node_id());
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- caller);
- prefetchw(objp);
- return objp;
-}
-
#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -3257,14 +3228,20 @@ static void *alternate_node_alloc(struct
* allocator to do its reclaim / fallback magic. We then insert the
* slab into the proper nodelist and then allocate from it.
*/
-void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
+static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
- struct zonelist *zonelist = &NODE_DATA(slab_node(current->mempolicy))
- ->node_zonelists[gfp_zone(flags)];
+ struct zonelist *zonelist;
+ gfp_t local_flags;
struct zone **z;
void *obj = NULL;
int nid;
- gfp_t local_flags = (flags & GFP_LEVEL_MASK);
+
+ if (flags & __GFP_THISNODE)
+ return NULL;
+
+ zonelist = &NODE_DATA(slab_node(current->mempolicy))
+ ->node_zonelists[gfp_zone(flags)];
+ local_flags = (flags & GFP_LEVEL_MASK);
retry:
/*
@@ -3374,16 +3351,110 @@ must_grow:
if (x)
goto retry;
- if (!(flags & __GFP_THISNODE))
- /* Unable to grow the cache. Fall back to other nodes. */
- return fallback_alloc(cachep, flags);
-
- return NULL;
+ return fallback_alloc(cachep, flags);
done:
return obj;
}
-#endif
+
+/**
+ * kmem_cache_alloc_node - Allocate an object on the specified node
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ * @nodeid: node number of the target node.
+ * @caller: return address of caller, used for debug information
+ *
+ * Identical to kmem_cache_alloc but it will allocate memory on the given
+ * node, which can improve the performance for cpu bound structures.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ */
+static __always_inline void *
+__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
+ void *caller)
+{
+ unsigned long save_flags;
+ void *ptr;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+ local_irq_save(save_flags);
+
+ if (unlikely(nodeid == -1))
+ nodeid = numa_node_id();
+
+ if (unlikely(!cachep->nodelists[nodeid])) {
+ /* Node not bootstrapped yet */
+ ptr = fallback_alloc(cachep, flags);
+ goto out;
+ }
+
+ if (nodeid == numa_node_id()) {
+ /*
+ * Use the locally cached objects if possible.
+ * However ____cache_alloc does not allow fallback
+ * to other nodes. It may fail while we still have
+ * objects on other nodes available.
+ */
+ ptr = ____cache_alloc(cachep, flags);
+ if (ptr)
+ goto out;
+ }
+ /* ___cache_alloc_node can fall back to other nodes */
+ ptr = ____cache_alloc_node(cachep, flags, nodeid);
+ out:
+ local_irq_restore(save_flags);
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+
+ return ptr;
+}
+
+static __always_inline void *
+__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+{
+ void *objp;
+
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+ objp = alternate_node_alloc(cache, flags);
+ if (objp)
+ goto out;
+ }
+ objp = ____cache_alloc(cache, flags);
+
+ /*
+ * We may just have run out of memory on the local node.
+ * ____cache_alloc_node() knows how to locate memory on other nodes
+ */
+ if (!objp)
+ objp = ____cache_alloc_node(cache, flags, numa_node_id());
+
+ out:
+ return objp;
+}
+#else
+
+static __always_inline void *
+__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+ return ____cache_alloc(cachep, flags);
+}
+
+#endif /* CONFIG_NUMA */
+
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
+{
+ unsigned long save_flags;
+ void *objp;
+
+ cache_alloc_debugcheck_before(cachep, flags);
+ local_irq_save(save_flags);
+ objp = __do_cache_alloc(cachep, flags);
+ local_irq_restore(save_flags);
+ objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
+ prefetchw(objp);
+
+ return objp;
+}
/*
* Caller needs to acquire correct kmem_list's list_lock
@@ -3582,57 +3653,6 @@ out:
}
#ifdef CONFIG_NUMA
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- * @caller: return address of caller, used for debug information
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- */
-static __always_inline void *
-__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
- int nodeid, void *caller)
-{
- unsigned long save_flags;
- void *ptr = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
-
- if (unlikely(nodeid == -1))
- nodeid = numa_node_id();
-
- if (likely(cachep->nodelists[nodeid])) {
- if (nodeid == numa_node_id()) {
- /*
- * Use the locally cached objects if possible.
- * However ____cache_alloc does not allow fallback
- * to other nodes. It may fail while we still have
- * objects on other nodes available.
- */
- ptr = ____cache_alloc(cachep, flags);
- }
- if (!ptr) {
- /* ___cache_alloc_node can fall back to other nodes */
- ptr = ____cache_alloc_node(cachep, flags, nodeid);
- }
- } else {
- /* Node not bootstrapped yet */
- if (!(flags & __GFP_THISNODE))
- ptr = fallback_alloc(cachep, flags);
- }
-
- local_irq_restore(save_flags);
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
-
- return ptr;
-}
-
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-05 11:46 [PATCH] slab: cache alloc cleanups Pekka J Enberg
@ 2007-01-05 19:05 ` Christoph Lameter
2007-01-05 19:50 ` Andrew Morton
1 sibling, 0 replies; 12+ messages in thread
From: Christoph Lameter @ 2007-01-05 19:05 UTC (permalink / raw)
To: Pekka J Enberg; +Cc: akpm, linux-kernel, apw, hch, manfred, christoph, pj
On Fri, 5 Jan 2007, Pekka J Enberg wrote:
> From: Pekka Enberg <penberg@cs.helsinki.fi>
>
> Clean up __cache_alloc and __cache_alloc_node functions a bit. We no
> longer need to do NUMA_BUILD tricks and the UMA allocation path is much
> simpler. No functional changes in this patch.
Looks good.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-05 11:46 [PATCH] slab: cache alloc cleanups Pekka J Enberg
2007-01-05 19:05 ` Christoph Lameter
@ 2007-01-05 19:50 ` Andrew Morton
2007-01-05 19:59 ` Pekka J Enberg
2007-01-05 22:12 ` [PATCH] " Christoph Lameter
1 sibling, 2 replies; 12+ messages in thread
From: Andrew Morton @ 2007-01-05 19:50 UTC (permalink / raw)
To: Pekka J Enberg; +Cc: linux-kernel, apw, hch, manfred, christoph, pj
On Fri, 5 Jan 2007 13:46:45 +0200 (EET)
Pekka J Enberg <penberg@cs.helsinki.fi> wrote:
> Clean up __cache_alloc and __cache_alloc_node functions a bit. We no
> longer need to do NUMA_BUILD tricks and the UMA allocation path is much
> simpler. No functional changes in this patch.
>
> Note: saves few kernel text bytes on x86 NUMA build due to using gotos in
> __cache_alloc_node() and moving __GFP_THISNODE check in to fallback_alloc().
Does this actually clean things up, or does it randomly move things around
while carefully retaining existing obscurity? Not sure..
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: slab: cache alloc cleanups
2007-01-05 19:50 ` Andrew Morton
@ 2007-01-05 19:59 ` Pekka J Enberg
2007-01-05 22:12 ` [PATCH] " Christoph Lameter
1 sibling, 0 replies; 12+ messages in thread
From: Pekka J Enberg @ 2007-01-05 19:59 UTC (permalink / raw)
To: Andrew Morton; +Cc: linux-kernel, apw, hch, manfred, christoph, pj
Andrew Morton writes:
> Does this actually clean things up, or does it randomly move things around
> while carefully retaining existing obscurity? Not sure..
Heh, the bulk of it is basically splitting the current __cache_alloc into
separate UMA and NUMA paths via __do_cache_alloc so we don't have to play
with NUMA_BUILD tricks. I also moved __cache_alloc_node in the same
CONFIG_NUMA block as __cache_alloc while I was at it.
Pekka
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-05 19:50 ` Andrew Morton
2007-01-05 19:59 ` Pekka J Enberg
@ 2007-01-05 22:12 ` Christoph Lameter
1 sibling, 0 replies; 12+ messages in thread
From: Christoph Lameter @ 2007-01-05 22:12 UTC (permalink / raw)
To: Andrew Morton
Cc: Pekka J Enberg, linux-kernel, apw, hch, manfred, christoph, pj
On Fri, 5 Jan 2007, Andrew Morton wrote:
> Does this actually clean things up, or does it randomly move things around
> while carefully retaining existing obscurity? Not sure..
Looks like a good cleanup to me.
^ permalink raw reply [flat|nested] 12+ messages in thread
* [PATCH] slab: cache alloc cleanups
@ 2007-01-02 13:47 Pekka J Enberg
2007-01-02 14:29 ` Andy Whitcroft
` (3 more replies)
0 siblings, 4 replies; 12+ messages in thread
From: Pekka J Enberg @ 2007-01-02 13:47 UTC (permalink / raw)
To: akpm; +Cc: linux-kernel, apw, hch, manfred, christoph, pj
[Andrew, I have been unable to find a NUMA-capable tester for this patch,
so can we please put this in to -mm for some exposure?]
From: Pekka Enberg <penberg@cs.helsinki.fi>
This patch cleans up __cache_alloc and __cache_alloc_node functions. We no
longer need to do NUMA_BUILD tricks and the UMA allocation path is much
simpler. Note: we now do alternate_node_alloc() for kmem_cache_alloc_node as
well.
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Manfred Spraul <manfred@colorfullife.com>
Cc: Christoph Lameter <christoph@lameter.com>
Cc: Paul Jackson <pj@sgi.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
---
mm/slab.c | 165 +++++++++++++++++++++++++++++++-------------------------------
1 file changed, 84 insertions(+), 81 deletions(-)
Index: 2.6/mm/slab.c
===================================================================
--- 2.6.orig/mm/slab.c
+++ 2.6/mm/slab.c
@@ -3197,35 +3197,6 @@ static inline void *____cache_alloc(stru
return objp;
}
-static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
- gfp_t flags, void *caller)
-{
- unsigned long save_flags;
- void *objp = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
-
- local_irq_save(save_flags);
-
- if (unlikely(NUMA_BUILD &&
- current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY)))
- objp = alternate_node_alloc(cachep, flags);
-
- if (!objp)
- objp = ____cache_alloc(cachep, flags);
- /*
- * We may just have run out of memory on the local node.
- * ____cache_alloc_node() knows how to locate memory on other nodes
- */
- if (NUMA_BUILD && !objp)
- objp = ____cache_alloc_node(cachep, flags, numa_node_id());
- local_irq_restore(save_flags);
- objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- caller);
- prefetchw(objp);
- return objp;
-}
-
#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
@@ -3383,7 +3354,90 @@ must_grow:
done:
return obj;
}
-#endif
+
+/**
+ * __do_cache_alloc_node - Allocate an object on the specified node
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ * @nodeid: node number of the target node.
+ *
+ * Fallback to other node is possible if __GFP_THISNODE is not set.
+ */
+static __always_inline void *
+__do_cache_alloc_node(struct kmem_cache *cache, gfp_t flags, int nodeid)
+{
+ void *obj;
+
+ if (nodeid == -1 || nodeid == numa_node_id()) {
+ if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
+ obj = alternate_node_alloc(cache, flags);
+ if (obj)
+ goto out;
+ }
+
+ /*
+ * Use the locally cached objects if possible. However,
+ * ____cache_alloc does not allow fallback to other nodes.
+ * It may fail while we still have objects on other nodes
+ * available.
+ */
+ obj = ____cache_alloc(cache, flags);
+ if (obj)
+ goto out;
+
+ /* Fall back to other nodes. */
+ obj = ____cache_alloc_node(cache, flags, numa_node_id());
+ } else {
+ if (likely(cache->nodelists[nodeid]))
+ obj = ____cache_alloc_node(cache, flags, nodeid);
+ else {
+ /* Node is not bootstrapped yet. */
+ if (!(flags & __GFP_THISNODE))
+ obj = fallback_alloc(cache, flags);
+ else
+ obj = NULL;
+ }
+ }
+ out:
+ return obj;
+}
+
+#else
+
+static __always_inline void *
+__do_cache_alloc_node(struct kmem_cache *cache, gfp_t flags, int nodeid)
+{
+ /*
+ * For UMA, we always allocate from the local cache.
+ */
+ return ____cache_alloc(cache, flags);
+}
+#endif /* CONFIG_NUMA */
+
+static __always_inline void *
+__cache_alloc_node(struct kmem_cache *cache, gfp_t flags, int nodeid,
+ void *caller)
+{
+ unsigned long save_flags;
+ void *obj;
+
+ cache_alloc_debugcheck_before(cache, flags);
+ local_irq_save(save_flags);
+ obj = __do_cache_alloc_node(cache, flags, nodeid);
+ local_irq_restore(save_flags);
+ obj = cache_alloc_debugcheck_after(cache, flags, obj, caller);
+ return obj;
+}
+
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cache, gfp_t flags, void *caller)
+{
+ void *obj;
+
+ obj = __cache_alloc_node(cache, flags, -1, caller);
+ prefetchw(obj);
+ return obj;
+}
/*
* Caller needs to acquire correct kmem_list's list_lock
@@ -3582,57 +3636,6 @@ out:
}
#ifdef CONFIG_NUMA
-/**
- * kmem_cache_alloc_node - Allocate an object on the specified node
- * @cachep: The cache to allocate from.
- * @flags: See kmalloc().
- * @nodeid: node number of the target node.
- * @caller: return address of caller, used for debug information
- *
- * Identical to kmem_cache_alloc but it will allocate memory on the given
- * node, which can improve the performance for cpu bound structures.
- *
- * Fallback to other node is possible if __GFP_THISNODE is not set.
- */
-static __always_inline void *
-__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
- int nodeid, void *caller)
-{
- unsigned long save_flags;
- void *ptr = NULL;
-
- cache_alloc_debugcheck_before(cachep, flags);
- local_irq_save(save_flags);
-
- if (unlikely(nodeid == -1))
- nodeid = numa_node_id();
-
- if (likely(cachep->nodelists[nodeid])) {
- if (nodeid == numa_node_id()) {
- /*
- * Use the locally cached objects if possible.
- * However ____cache_alloc does not allow fallback
- * to other nodes. It may fail while we still have
- * objects on other nodes available.
- */
- ptr = ____cache_alloc(cachep, flags);
- }
- if (!ptr) {
- /* ___cache_alloc_node can fall back to other nodes */
- ptr = ____cache_alloc_node(cachep, flags, nodeid);
- }
- } else {
- /* Node not bootstrapped yet */
- if (!(flags & __GFP_THISNODE))
- ptr = fallback_alloc(cachep, flags);
- }
-
- local_irq_restore(save_flags);
- ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
-
- return ptr;
-}
-
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-02 13:47 Pekka J Enberg
@ 2007-01-02 14:29 ` Andy Whitcroft
2007-01-02 16:25 ` Christoph Lameter
` (2 subsequent siblings)
3 siblings, 0 replies; 12+ messages in thread
From: Andy Whitcroft @ 2007-01-02 14:29 UTC (permalink / raw)
To: Pekka J Enberg; +Cc: akpm, linux-kernel, hch, manfred, christoph, pj
Pekka J Enberg wrote:
> [Andrew, I have been unable to find a NUMA-capable tester for this patch,
> so can we please put this in to -mm for some exposure?]
>
> From: Pekka Enberg <penberg@cs.helsinki.fi>
>
> This patch cleans up __cache_alloc and __cache_alloc_node functions. We no
> longer need to do NUMA_BUILD tricks and the UMA allocation path is much
> simpler. Note: we now do alternate_node_alloc() for kmem_cache_alloc_node as
> well.
I'll push this through our tests here if that helps. I need to rerun
the -rc2-mm1 tests by the looks of it ... looks like the test lab had
some time off over Christmas.
-apw
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-02 13:47 Pekka J Enberg
2007-01-02 14:29 ` Andy Whitcroft
@ 2007-01-02 16:25 ` Christoph Lameter
2007-01-02 20:27 ` Pekka Enberg
2007-01-02 20:22 ` Andrew Morton
2007-01-04 21:15 ` Christoph Hellwig
3 siblings, 1 reply; 12+ messages in thread
From: Christoph Lameter @ 2007-01-02 16:25 UTC (permalink / raw)
To: Pekka J Enberg; +Cc: akpm, linux-kernel, apw, hch, manfred, christoph, pj
On Tue, 2 Jan 2007, Pekka J Enberg wrote:
> +
> + if (nodeid == -1 || nodeid == numa_node_id()) {
> + if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
> + obj = alternate_node_alloc(cache, flags);
> + if (obj)
> + goto out;
> + }
This reintroduces a bug that was fixed a while ago.
kmalloc_node() must never obey memory policies.
Alternate_node_alloc implements memory policies.
With this patch kmalloc_node(...., numa_node_id()) would get redirected
again to other nodes if a memory policy is in effect.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-02 16:25 ` Christoph Lameter
@ 2007-01-02 20:27 ` Pekka Enberg
0 siblings, 0 replies; 12+ messages in thread
From: Pekka Enberg @ 2007-01-02 20:27 UTC (permalink / raw)
To: Christoph Lameter; +Cc: akpm, linux-kernel, apw, hch, manfred, christoph, pj
On Tue, 2 Jan 2007, Pekka J Enberg wrote:
> > +
> > + if (nodeid == -1 || nodeid == numa_node_id()) {
> > + if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
> > + obj = alternate_node_alloc(cache, flags);
> > + if (obj)
> > + goto out;
> > + }
On 1/2/07, Christoph Lameter <clameter@sgi.com> wrote:
> This reintroduces a bug that was fixed a while ago.
Aah, well, we could have a can_mempolicy parameter, but I'm not sure
if it's an improvement to the current version still...
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-02 13:47 Pekka J Enberg
2007-01-02 14:29 ` Andy Whitcroft
2007-01-02 16:25 ` Christoph Lameter
@ 2007-01-02 20:22 ` Andrew Morton
2007-01-04 21:15 ` Christoph Hellwig
3 siblings, 0 replies; 12+ messages in thread
From: Andrew Morton @ 2007-01-02 20:22 UTC (permalink / raw)
To: Pekka J Enberg; +Cc: linux-kernel, apw, hch, manfred, christoph, pj
On Tue, 2 Jan 2007 15:47:06 +0200 (EET)
Pekka J Enberg <penberg@cs.helsinki.fi> wrote:
> I have been unable to find a NUMA-capable tester for this patch,
Any x86_64 box can be used to test NUMA code via the numa=fake=N boot option.
fake-numa is somewhat sick in mainline and you might find that it doesn't
work right on some machines, or that it fails with high values of N, but
works OK with N=2. There are fixes to address this problem in -mm.
^ permalink raw reply [flat|nested] 12+ messages in thread
* Re: [PATCH] slab: cache alloc cleanups
2007-01-02 13:47 Pekka J Enberg
` (2 preceding siblings ...)
2007-01-02 20:22 ` Andrew Morton
@ 2007-01-04 21:15 ` Christoph Hellwig
2007-01-04 21:23 ` Pekka Enberg
3 siblings, 1 reply; 12+ messages in thread
From: Christoph Hellwig @ 2007-01-04 21:15 UTC (permalink / raw)
To: Pekka J Enberg; +Cc: akpm, linux-kernel, apw, hch, manfred, christoph, pj
On Tue, Jan 02, 2007 at 03:47:06PM +0200, Pekka J Enberg wrote:
> [Andrew, I have been unable to find a NUMA-capable tester for this patch,
> so can we please put this in to -mm for some exposure?]
>
> From: Pekka Enberg <penberg@cs.helsinki.fi>
>
> This patch cleans up __cache_alloc and __cache_alloc_node functions. We no
> longer need to do NUMA_BUILD tricks and the UMA allocation path is much
> simpler. Note: we now do alternate_node_alloc() for kmem_cache_alloc_node as
> well.
Seems to work nicely on my 2node cell blade.
^ permalink raw reply [flat|nested] 12+ messages in thread
end of thread, other threads:[~2007-01-05 22:13 UTC | newest]
Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2007-01-05 11:46 [PATCH] slab: cache alloc cleanups Pekka J Enberg
2007-01-05 19:05 ` Christoph Lameter
2007-01-05 19:50 ` Andrew Morton
2007-01-05 19:59 ` Pekka J Enberg
2007-01-05 22:12 ` [PATCH] " Christoph Lameter
-- strict thread matches above, loose matches on Subject: below --
2007-01-02 13:47 Pekka J Enberg
2007-01-02 14:29 ` Andy Whitcroft
2007-01-02 16:25 ` Christoph Lameter
2007-01-02 20:27 ` Pekka Enberg
2007-01-02 20:22 ` Andrew Morton
2007-01-04 21:15 ` Christoph Hellwig
2007-01-04 21:23 ` Pekka Enberg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).