LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
From: Peter Zijlstra <a.p.zijlstra@chello.nl>
To: Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
netdev@vger.kernel.org, trond.myklebust@fys.uio.no
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Subject: [PATCH 04/28] mm: kmem_estimate_pages()
Date: Wed, 20 Feb 2008 15:46:14 +0100 [thread overview]
Message-ID: <20080220150305.774294000@chello.nl> (raw)
In-Reply-To: <20080220144610.548202000@chello.nl>
[-- Attachment #1: mm-kmem_estimate_pages.patch --]
[-- Type: text/plain, Size: 6384 bytes --]
Provide a method to get the upper bound on the pages needed to allocate
a given number of objects from a given kmem_cache.
This lays the foundation for a generic reserve framework as presented in
a later patch in this series. This framework needs to convert object demand
(kmalloc() bytes, kmem_cache_alloc() objects) to pages.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
include/linux/slab.h | 4 ++
mm/slab.c | 75 ++++++++++++++++++++++++++++++++++++++++++++++
mm/slub.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++++++
3 files changed, 161 insertions(+)
Index: linux-2.6/include/linux/slab.h
===================================================================
--- linux-2.6.orig/include/linux/slab.h
+++ linux-2.6/include/linux/slab.h
@@ -60,6 +60,8 @@ void kmem_cache_free(struct kmem_cache *
unsigned int kmem_cache_size(struct kmem_cache *);
const char *kmem_cache_name(struct kmem_cache *);
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
+unsigned kmem_estimate_pages(struct kmem_cache *cachep,
+ gfp_t flags, int objects);
/*
* Please use this macro to create slab caches. Simply specify the
@@ -94,6 +96,8 @@ int kmem_ptr_validate(struct kmem_cache
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
size_t ksize(const void *);
+unsigned kestimate_single(size_t, gfp_t, int);
+unsigned kestimate(gfp_t, size_t);
/*
* Allocator specific definitions. These are mainly used to establish optimized
Index: linux-2.6/mm/slub.c
===================================================================
--- linux-2.6.orig/mm/slub.c
+++ linux-2.6/mm/slub.c
@@ -2465,6 +2465,37 @@ const char *kmem_cache_name(struct kmem_
EXPORT_SYMBOL(kmem_cache_name);
/*
+ * return the max number of pages required to allocated count
+ * objects from the given cache
+ */
+unsigned kmem_estimate_pages(struct kmem_cache *s, gfp_t flags, int objects)
+{
+ unsigned long slabs;
+
+ if (WARN_ON(!s) || WARN_ON(!s->objects))
+ return 0;
+
+ slabs = DIV_ROUND_UP(objects, s->objects);
+
+ /*
+ * Account the possible additional overhead if the slab holds more that
+ * one object.
+ */
+ if (s->objects > 1) {
+ /*
+ * Account the possible additional overhead if per cpu slabs
+ * are currently empty and have to be allocated. This is very
+ * unlikely but a possible scenario immediately after
+ * kmem_cache_shrink.
+ */
+ slabs += num_online_cpus();
+ }
+
+ return slabs << s->order;
+}
+EXPORT_SYMBOL_GPL(kmem_estimate_pages);
+
+/*
* Attempt to free all slabs on a node. Return the number of slabs we
* were unable to free.
*/
@@ -2818,6 +2849,57 @@ static unsigned long count_partial(struc
}
/*
+ * return the max number of pages required to allocate @count objects
+ * of @size bytes from kmalloc given @flags.
+ */
+unsigned kestimate_single(size_t size, gfp_t flags, int count)
+{
+ struct kmem_cache *s = get_slab(size, flags);
+ if (!s)
+ return 0;
+
+ return kmem_estimate_pages(s, flags, count);
+
+}
+EXPORT_SYMBOL_GPL(kestimate_single);
+
+/*
+ * return the max number of pages required to allocate @bytes from kmalloc
+ * in an unspecified number of allocation of heterogeneous size.
+ */
+unsigned kestimate(gfp_t flags, size_t bytes)
+{
+ int i;
+ unsigned long pages;
+
+ /*
+ * multiply by two, in order to account the worst case slack space
+ * due to the power-of-two allocation sizes.
+ */
+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
+
+ /*
+ * add the kmem_cache overhead of each possible kmalloc cache
+ */
+ for (i = 1; i < PAGE_SHIFT; i++) {
+ struct kmem_cache *s;
+
+#ifdef CONFIG_ZONE_DMA
+ if (unlikely(flags & SLUB_DMA))
+ s = dma_kmalloc_cache(i, flags);
+ else
+#endif
+ s = &kmalloc_caches[i];
+
+ if (s)
+ pages += kmem_estimate_pages(s, flags, 0);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kestimate);
+
+/*
* kmem_cache_shrink removes empty slabs from the partial lists and sorts
* the remaining slabs by the number of items in use. The slabs with the
* most items in use come first. New allocations will then fill those up
Index: linux-2.6/mm/slab.c
===================================================================
--- linux-2.6.orig/mm/slab.c
+++ linux-2.6/mm/slab.c
@@ -3851,6 +3851,81 @@ const char *kmem_cache_name(struct kmem_
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
+ * return the max number of pages required to allocated count
+ * objects from the given cache
+ */
+unsigned kmem_estimate_pages(struct kmem_cache *cachep,
+ gfp_t flags, int objects)
+{
+ /*
+ * (1) memory for objects,
+ */
+ unsigned nr_slabs = DIV_ROUND_UP(objects, cachep->num);
+ unsigned nr_pages = nr_slabs << cachep->gfporder;
+
+ /*
+ * (2) memory for each per-cpu queue (nr_cpu_ids),
+ * (3) memory for each per-node alien queues (nr_cpu_ids), and
+ * (4) some amount of memory for the slab management structures
+ *
+ * XXX: truely account these
+ */
+ nr_pages += 1 + ilog2(nr_pages);
+
+ return nr_pages;
+}
+
+/*
+ * return the max number of pages required to allocate @count objects
+ * of @size bytes from kmalloc given @flags.
+ */
+unsigned kestimate_single(size_t size, gfp_t flags, int count)
+{
+ struct kmem_cache *s = kmem_find_general_cachep(size, flags);
+ if (!s)
+ return 0;
+
+ return kmem_estimate_pages(s, flags, count);
+}
+EXPORT_SYMBOL_GPL(kestimate_single);
+
+/*
+ * return the max number of pages required to allocate @bytes from kmalloc
+ * in an unspecified number of allocation of heterogeneous size.
+ */
+unsigned kestimate(gfp_t flags, size_t bytes)
+{
+ unsigned long pages;
+ struct cache_sizes *csizep = malloc_sizes;
+
+ /*
+ * multiply by two, in order to account the worst case slack space
+ * due to the power-of-two allocation sizes.
+ */
+ pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
+
+ /*
+ * add the kmem_cache overhead of each possible kmalloc cache
+ */
+ for (csizep = malloc_sizes; csizep->cs_cachep; csizep++) {
+ struct kmem_cache *s;
+
+#ifdef CONFIG_ZONE_DMA
+ if (unlikely(flags & __GFP_DMA))
+ s = csizep->cs_dmacachep;
+ else
+#endif
+ s = csizep->cs_cachep;
+
+ if (s)
+ pages += kmem_estimate_pages(s, flags, 0);
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL_GPL(kestimate);
+
+/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep)
--
next prev parent reply other threads:[~2008-02-20 15:20 UTC|newest]
Thread overview: 73+ messages / expand[flat|nested] mbox.gz Atom feed top
2008-02-20 14:46 [PATCH 00/28] Swap over NFS -v16 Peter Zijlstra
2008-02-20 14:46 ` [PATCH 01/28] mm: gfp_to_alloc_flags() Peter Zijlstra
2008-02-20 14:46 ` [PATCH 02/28] mm: tag reseve pages Peter Zijlstra
2008-02-20 14:46 ` [PATCH 03/28] mm: slb: add knowledge of reserve pages Peter Zijlstra
2008-02-20 14:46 ` Peter Zijlstra [this message]
2008-02-23 8:05 ` [PATCH 04/28] mm: kmem_estimate_pages() Andrew Morton
2008-02-20 14:46 ` [PATCH 05/28] mm: allow PF_MEMALLOC from softirq context Peter Zijlstra
2008-02-23 8:05 ` Andrew Morton
2008-02-20 14:46 ` [PATCH 06/28] mm: serialize access to min_free_kbytes Peter Zijlstra
2008-02-20 14:46 ` [PATCH 07/28] mm: emergency pool Peter Zijlstra
2008-02-23 8:05 ` Andrew Morton
2008-02-20 14:46 ` [PATCH 08/28] mm: system wide ALLOC_NO_WATERMARK Peter Zijlstra
2008-02-23 8:05 ` Andrew Morton
2008-02-20 14:46 ` [PATCH 09/28] mm: __GFP_MEMALLOC Peter Zijlstra
2008-02-23 8:06 ` Andrew Morton
2008-02-20 14:46 ` [PATCH 10/28] mm: memory reserve management Peter Zijlstra
2008-02-23 8:06 ` Andrew Morton
2008-02-20 14:46 ` [PATCH 11/28] selinux: tag avc cache alloc as non-critical Peter Zijlstra
2008-02-20 14:46 ` [PATCH 12/28] net: wrap sk->sk_backlog_rcv() Peter Zijlstra
2008-02-20 14:46 ` [PATCH 13/28] net: packet split receive api Peter Zijlstra
2008-02-20 14:46 ` [PATCH 14/28] net: sk_allocation() - concentrate socket related allocations Peter Zijlstra
2008-02-20 14:46 ` [PATCH 15/28] netvm: network reserve infrastructure Peter Zijlstra
2008-02-23 8:06 ` Andrew Morton
2008-02-24 6:52 ` Mike Snitzer
2008-02-20 14:46 ` [PATCH 16/28] netvm: INET reserves Peter Zijlstra
2008-02-20 14:46 ` [PATCH 17/28] netvm: hook skb allocation to reserves Peter Zijlstra
2008-02-23 8:06 ` Andrew Morton
2008-02-20 14:46 ` [PATCH 18/28] netvm: filter emergency skbs Peter Zijlstra
2008-02-20 14:46 ` [PATCH 19/28] netvm: prevent a stream specific deadlock Peter Zijlstra
2008-02-20 14:46 ` [PATCH 20/28] netfilter: NF_QUEUE vs emergency skbs Peter Zijlstra
2008-02-20 14:46 ` [PATCH 21/28] netvm: skb processing Peter Zijlstra
2008-02-20 14:46 ` [PATCH 22/28] mm: add support for non block device backed swap files Peter Zijlstra
2008-02-20 16:30 ` Randy Dunlap
2008-02-20 16:46 ` Peter Zijlstra
2008-02-26 12:45 ` Miklos Szeredi
2008-02-26 12:58 ` Peter Zijlstra
2008-02-20 14:46 ` [PATCH 23/28] mm: methods for teaching filesystems about PG_swapcache pages Peter Zijlstra
2008-02-20 14:46 ` [PATCH 24/28] nfs: remove mempools Peter Zijlstra
2008-02-20 14:46 ` [PATCH 25/28] nfs: teach the NFS client how to treat PG_swapcache pages Peter Zijlstra
2008-02-20 14:46 ` [PATCH 26/28] nfs: disable data cache revalidation for swapfiles Peter Zijlstra
2008-02-20 14:46 ` [PATCH 27/28] nfs: enable swap on NFS Peter Zijlstra
2008-02-20 14:46 ` [PATCH 28/28] nfs: fix various memory recursions possible with swap over NFS Peter Zijlstra
2008-02-23 8:06 ` [PATCH 00/28] Swap over NFS -v16 Andrew Morton
2008-02-26 6:03 ` Neil Brown
2008-02-26 10:50 ` Peter Zijlstra
2008-02-26 12:00 ` Peter Zijlstra
2008-02-26 15:29 ` Miklos Szeredi
2008-02-26 15:41 ` Peter Zijlstra
2008-02-26 15:43 ` Peter Zijlstra
2008-02-26 15:47 ` Miklos Szeredi
2008-02-26 17:56 ` Andrew Morton
2008-02-27 5:51 ` Neil Brown
2008-02-27 7:58 ` Peter Zijlstra
2008-02-27 8:05 ` Pekka Enberg
2008-02-27 8:14 ` Peter Zijlstra
2008-02-27 8:33 ` Peter Zijlstra
2008-02-27 8:43 ` Pekka J Enberg
2008-02-29 11:51 ` Peter Zijlstra
2008-02-29 11:58 ` Pekka Enberg
2008-02-29 12:18 ` Peter Zijlstra
2008-02-29 12:29 ` Pekka Enberg
2008-02-29 1:29 ` Neil Brown
2008-02-29 10:21 ` Peter Zijlstra
2008-03-02 22:18 ` Neil Brown
2008-03-02 23:33 ` Peter Zijlstra
2008-03-03 23:41 ` Neil Brown
2008-03-04 10:28 ` Peter Zijlstra
[not found] ` <1837 <1204626509.6241.39.camel@lappy>
2008-03-07 3:33 ` Neil Brown
2008-03-07 11:17 ` Peter Zijlstra
2008-03-07 11:55 ` Peter Zijlstra
2008-03-10 5:15 ` Neil Brown
2008-03-10 9:17 ` Peter Zijlstra
2008-03-14 5:22 ` Neil Brown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20080220150305.774294000@chello.nl \
--to=a.p.zijlstra@chello.nl \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=netdev@vger.kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=trond.myklebust@fys.uio.no \
--subject='Re: [PATCH 04/28] mm: kmem_estimate_pages()' \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).