LKML Archive on lore.kernel.org
help / color / mirror / Atom feed
* [2.6 patch] mm/page_alloc.c: cleanups
@ 2008-05-05 18:25 Adrian Bunk
2008-05-05 20:24 ` KOSAKI Motohiro
0 siblings, 1 reply; 11+ messages in thread
From: Adrian Bunk @ 2008-05-05 18:25 UTC (permalink / raw)
To: linux-kernel; +Cc: Peter Zijlstra, Andrew Morton
This patch contains the following cleanups:
- make the following needlessly global variables static:
- required_kernelcore
- zone_movable_pfn[]
- make the following needlessly global functions static:
- move_freepages()
- move_freepages_block()
- setup_pageset()
- find_usable_zone_for_movable()
- adjust_zone_range_for_zone_movable()
- __absent_pages_in_range()
- find_min_pfn_for_node()
- find_zone_movable_pfns_for_nodes()
- remove the following no longer used global function:
- free_cold_page()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
---
This patch has been sent on:
- 14 Apr 2008
include/linux/gfp.h | 1 -
mm/page_alloc.c | 30 +++++++++++++-----------------
2 files changed, 13 insertions(+), 18 deletions(-)
10a85861ad4361685b5cab50937a10ea4a0f34d1 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9d..2b8148c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -220,7 +220,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_page(struct page *page);
-extern void free_cold_page(struct page *page);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 402a504..da4f033 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -152,9 +152,9 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
- unsigned long __initdata required_kernelcore;
+ static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
- unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+ static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -697,9 +697,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype)
+static int move_freepages(struct zone *zone,
+ struct page *start_page, struct page *end_page,
+ int migratetype)
{
struct page *page;
unsigned long order;
@@ -738,7 +738,8 @@ int move_freepages(struct zone *zone,
return pages_moved;
}
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -1021,11 +1022,6 @@ void free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-
-void free_cold_page(struct page *page)
-{
- free_hot_cold_page(page, 1);
-}
/*
* split_page takes a non-compound higher-order page, and splits it into
@@ -2604,7 +2600,7 @@ static int zone_batchsize(struct zone *zone)
return batch;
}
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
@@ -3033,7 +3029,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3059,7 +3055,7 @@ void __init find_usable_zone_for_movable(void)
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -3120,7 +3116,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -3604,7 +3600,7 @@ static void __init sort_node_map(void)
}
/* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(unsigned long nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
@@ -3676,7 +3672,7 @@ static unsigned long __init early_calculate_totalpages(void)
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
{
int i, nid;
unsigned long usable_startpfn;
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [2.6 patch] mm/page_alloc.c: cleanups
2008-05-05 18:25 [2.6 patch] mm/page_alloc.c: cleanups Adrian Bunk
@ 2008-05-05 20:24 ` KOSAKI Motohiro
2008-05-05 20:45 ` Andrew Morton
0 siblings, 1 reply; 11+ messages in thread
From: KOSAKI Motohiro @ 2008-05-05 20:24 UTC (permalink / raw)
To: Adrian Bunk; +Cc: linux-kernel, Peter Zijlstra, Andrew Morton
> -void free_cold_page(struct page *page)
> -{
> - free_hot_cold_page(page, 1);
> -}
Wow, I didn't know nobody use cold page :)
Of cource, you are right.
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [2.6 patch] mm/page_alloc.c: cleanups
2008-05-05 20:24 ` KOSAKI Motohiro
@ 2008-05-05 20:45 ` Andrew Morton
2008-05-05 20:59 ` KOSAKI Motohiro
0 siblings, 1 reply; 11+ messages in thread
From: Andrew Morton @ 2008-05-05 20:45 UTC (permalink / raw)
To: KOSAKI Motohiro; +Cc: bunk, linux-kernel, peterz
On Tue, 6 May 2008 05:24:07 +0900
"KOSAKI Motohiro" <kosaki.motohiro@jp.fujitsu.com> wrote:
> > -void free_cold_page(struct page *page)
> > -{
> > - free_hot_cold_page(page, 1);
> > -}
>
> Wow, I didn't know nobody use cold page :)
that's a bug, surely? I bet there are plenty of places which should be
using this hint.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [2.6 patch] mm/page_alloc.c: cleanups
2008-05-05 20:45 ` Andrew Morton
@ 2008-05-05 20:59 ` KOSAKI Motohiro
2008-05-05 21:13 ` Andrew Morton
0 siblings, 1 reply; 11+ messages in thread
From: KOSAKI Motohiro @ 2008-05-05 20:59 UTC (permalink / raw)
To: Andrew Morton; +Cc: bunk, linux-kernel, peterz
> > > -void free_cold_page(struct page *page)
> > > -{
> > > - free_hot_cold_page(page, 1);
> > > -}
> >
> > Wow, I didn't know nobody use cold page :)
>
> that's a bug, surely? I bet there are plenty of places which should be
> using this hint.
Hm, as far as I investigate
1. nobody used free_cold_page() in kernel code tree.
2. free_cold_page() doesn't exported.
I investigated to 2.6.25-mm1.
I don't understand yet why you think plenty....
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [2.6 patch] mm/page_alloc.c: cleanups
2008-05-05 20:59 ` KOSAKI Motohiro
@ 2008-05-05 21:13 ` Andrew Morton
2008-05-05 22:58 ` KOSAKI Motohiro
0 siblings, 1 reply; 11+ messages in thread
From: Andrew Morton @ 2008-05-05 21:13 UTC (permalink / raw)
To: KOSAKI Motohiro; +Cc: bunk, linux-kernel, peterz
On Tue, 6 May 2008 05:59:31 +0900
"KOSAKI Motohiro" <kosaki.motohiro@jp.fujitsu.com> wrote:
> > > > -void free_cold_page(struct page *page)
> > > > -{
> > > > - free_hot_cold_page(page, 1);
> > > > -}
> > >
> > > Wow, I didn't know nobody use cold page :)
> >
> > that's a bug, surely? I bet there are plenty of places which should be
> > using this hint.
>
> Hm, as far as I investigate
>
> 1. nobody used free_cold_page() in kernel code tree.
> 2. free_cold_page() doesn't exported.
>
> I investigated to 2.6.25-mm1.
> I don't understand yet why you think plenty....
Any place in the kernel which frees a page which is probably cache-cold
should be using this function. (Including any place which caused the page
to be evicted from CPU cache by placing it under DMA).
But nobody thinks about providing the hint.
Probably it doesn't matter much at all, except in those few places which
free a lot of pages.
^ permalink raw reply [flat|nested] 11+ messages in thread
* Re: [2.6 patch] mm/page_alloc.c: cleanups
2008-05-05 21:13 ` Andrew Morton
@ 2008-05-05 22:58 ` KOSAKI Motohiro
0 siblings, 0 replies; 11+ messages in thread
From: KOSAKI Motohiro @ 2008-05-05 22:58 UTC (permalink / raw)
To: Andrew Morton, bunk; +Cc: linux-kernel, peterz
> > > that's a bug, surely? I bet there are plenty of places which should be
> > > using this hint.
> >
> > Hm, as far as I investigate
> >
> > 1. nobody used free_cold_page() in kernel code tree.
> > 2. free_cold_page() doesn't exported.
> >
> > I investigated to 2.6.25-mm1.
> > I don't understand yet why you think plenty....
>
> Any place in the kernel which frees a page which is probably cache-cold
> should be using this function. (Including any place which caused the page
> to be evicted from CPU cache by placing it under DMA).
Ah, Agreed.
Adrian, Can you remove free_cold_page() change in your patch?
^ permalink raw reply [flat|nested] 11+ messages in thread
* [2.6 patch] mm/page_alloc.c: cleanups
@ 2008-06-25 16:26 Adrian Bunk
0 siblings, 0 replies; 11+ messages in thread
From: Adrian Bunk @ 2008-06-25 16:26 UTC (permalink / raw)
To: Andrew Morton; +Cc: linux-kernel
This patch contains the following cleanups:
- make the following needlessly global variables static:
- required_kernelcore
- zone_movable_pfn[]
- make the following needlessly global functions static:
- move_freepages()
- move_freepages_block()
- setup_pageset()
- find_usable_zone_for_movable()
- adjust_zone_range_for_zone_movable()
- __absent_pages_in_range()
- find_min_pfn_for_node()
- find_zone_movable_pfns_for_nodes()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
---
mm/page_alloc.c | 25 +++++++++++++------------
1 file changed, 13 insertions(+), 12 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -152,9 +152,9 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
- unsigned long __initdata required_kernelcore;
+ static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
- unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+ static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -697,9 +697,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype)
+static int move_freepages(struct zone *zone,
+ struct page *start_page, struct page *end_page,
+ int migratetype)
{
struct page *page;
unsigned long order;
@@ -738,7 +738,8 @@ int move_freepages(struct zone *zone,
return pages_moved;
}
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -2604,7 +2600,7 @@ static int zone_batchsize(struct zone *zone)
return batch;
}
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
@@ -3033,7 +3029,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3059,7 +3055,7 @@ void __init find_usable_zone_for_movable(void)
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -3120,7 +3116,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -3604,7 +3600,7 @@ static void __init sort_node_map(void)
}
/* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(unsigned long nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
@@ -3676,7 +3672,7 @@ static unsigned long __init early_calculate_totalpages(void)
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
{
int i, nid;
unsigned long usable_startpfn;
^ permalink raw reply [flat|nested] 11+ messages in thread
* [2.6 patch] mm/page_alloc.c: cleanups
@ 2008-03-30 22:53 Adrian Bunk
2008-03-31 11:32 ` Peter Zijlstra
0 siblings, 1 reply; 11+ messages in thread
From: Adrian Bunk @ 2008-03-30 22:53 UTC (permalink / raw)
To: linux-kernel
This patch contains the following cleanups:
- make the following needlessly global variables static:
- required_kernelcore
- zone_movable_pfn[]
- make the following needlessly global functions static:
- move_freepages()
- move_freepages_block()
- setup_pageset()
- find_usable_zone_for_movable()
- adjust_zone_range_for_zone_movable()
- __absent_pages_in_range()
- find_min_pfn_for_node()
- find_zone_movable_pfns_for_nodes()
- #if 0 the following no longer used global function:
- free_cold_page()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
---
This patch has been sent on:
- 25 Feb 2008
include/linux/gfp.h | 1 -
mm/page_alloc.c | 29 ++++++++++++++++-------------
2 files changed, 16 insertions(+), 14 deletions(-)
cf9b9e8ae11df98bf68e5fd2f95fd8f8a429cea3 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9d..2b8148c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -220,7 +220,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_page(struct page *page);
-extern void free_cold_page(struct page *page);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 75b9793..6710b45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -151,9 +151,9 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
- unsigned long __initdata required_kernelcore;
+ static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
- unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+ static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -688,9 +688,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype)
+static int move_freepages(struct zone *zone,
+ struct page *start_page, struct page *end_page,
+ int migratetype)
{
struct page *page;
unsigned long order;
@@ -729,7 +729,8 @@ int move_freepages(struct zone *zone,
return pages_moved;
}
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -1013,11 +1014,13 @@ void free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-
+
+#if 0
void free_cold_page(struct page *page)
{
free_hot_cold_page(page, 1);
}
+#endif /* 0 */
/*
* split_page takes a non-compound higher-order page, and splits it into
@@ -2597,7 +2600,7 @@ static int zone_batchsize(struct zone *zone)
return batch;
}
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
@@ -3026,7 +3029,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3052,7 +3055,7 @@ void __init find_usable_zone_for_movable(void)
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -3113,7 +3116,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -3597,7 +3600,7 @@ static void __init sort_node_map(void)
}
/* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(unsigned long nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
@@ -3669,7 +3672,7 @@ static unsigned long __init early_calculate_totalpages(void)
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
{
int i, nid;
unsigned long usable_startpfn;
^ permalink raw reply related [flat|nested] 11+ messages in thread
* Re: [2.6 patch] mm/page_alloc.c: cleanups
2008-03-30 22:53 Adrian Bunk
@ 2008-03-31 11:32 ` Peter Zijlstra
2008-04-14 18:14 ` Adrian Bunk
0 siblings, 1 reply; 11+ messages in thread
From: Peter Zijlstra @ 2008-03-31 11:32 UTC (permalink / raw)
To: Adrian Bunk; +Cc: linux-kernel
On Mon, 2008-03-31 at 01:53 +0300, Adrian Bunk wrote:
> This patch contains the following cleanups:
> - make the following needlessly global variables static:
> - required_kernelcore
> - zone_movable_pfn[]
> - make the following needlessly global functions static:
> - move_freepages()
> - move_freepages_block()
> - setup_pageset()
> - find_usable_zone_for_movable()
> - adjust_zone_range_for_zone_movable()
> - __absent_pages_in_range()
> - find_min_pfn_for_node()
> - find_zone_movable_pfns_for_nodes()
> - #if 0 the following no longer used global function:
> - free_cold_page()
Why no outright removal? #if 0 only leaves more junk around to clean up
later.
^ permalink raw reply [flat|nested] 11+ messages in thread
* [2.6 patch] mm/page_alloc.c: cleanups
2008-03-31 11:32 ` Peter Zijlstra
@ 2008-04-14 18:14 ` Adrian Bunk
0 siblings, 0 replies; 11+ messages in thread
From: Adrian Bunk @ 2008-04-14 18:14 UTC (permalink / raw)
To: Peter Zijlstra; +Cc: linux-kernel
On Mon, Mar 31, 2008 at 01:32:18PM +0200, Peter Zijlstra wrote:
> On Mon, 2008-03-31 at 01:53 +0300, Adrian Bunk wrote:
> > This patch contains the following cleanups:
> > - make the following needlessly global variables static:
> > - required_kernelcore
> > - zone_movable_pfn[]
> > - make the following needlessly global functions static:
> > - move_freepages()
> > - move_freepages_block()
> > - setup_pageset()
> > - find_usable_zone_for_movable()
> > - adjust_zone_range_for_zone_movable()
> > - __absent_pages_in_range()
> > - find_min_pfn_for_node()
> > - find_zone_movable_pfns_for_nodes()
> > - #if 0 the following no longer used global function:
> > - free_cold_page()
>
> Why no outright removal? #if 0 only leaves more junk around to clean up
> later.
Updated patch below.
cu
Adrian
<-- snip -->
This patch contains the following cleanups:
- make the following needlessly global variables static:
- required_kernelcore
- zone_movable_pfn[]
- make the following needlessly global functions static:
- move_freepages()
- move_freepages_block()
- setup_pageset()
- find_usable_zone_for_movable()
- adjust_zone_range_for_zone_movable()
- __absent_pages_in_range()
- find_min_pfn_for_node()
- find_zone_movable_pfns_for_nodes()
- remove the following no longer used global function:
- free_cold_page()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
---
include/linux/gfp.h | 1 -
mm/page_alloc.c | 30 +++++++++++++-----------------
2 files changed, 13 insertions(+), 18 deletions(-)
10a85861ad4361685b5cab50937a10ea4a0f34d1 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9d..2b8148c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -220,7 +220,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_page(struct page *page);
-extern void free_cold_page(struct page *page);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 402a504..da4f033 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -152,9 +152,9 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
- unsigned long __initdata required_kernelcore;
+ static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
- unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+ static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -697,9 +697,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype)
+static int move_freepages(struct zone *zone,
+ struct page *start_page, struct page *end_page,
+ int migratetype)
{
struct page *page;
unsigned long order;
@@ -738,7 +738,8 @@ int move_freepages(struct zone *zone,
return pages_moved;
}
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -1021,11 +1022,6 @@ void free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-
-void free_cold_page(struct page *page)
-{
- free_hot_cold_page(page, 1);
-}
/*
* split_page takes a non-compound higher-order page, and splits it into
@@ -2604,7 +2600,7 @@ static int zone_batchsize(struct zone *zone)
return batch;
}
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
@@ -3033,7 +3029,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3059,7 +3055,7 @@ void __init find_usable_zone_for_movable(void)
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -3120,7 +3116,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -3604,7 +3600,7 @@ static void __init sort_node_map(void)
}
/* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(unsigned long nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
@@ -3676,7 +3672,7 @@ static unsigned long __init early_calculate_totalpages(void)
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
{
int i, nid;
unsigned long usable_startpfn;
^ permalink raw reply related [flat|nested] 11+ messages in thread
* [2.6 patch] mm/page_alloc.c: cleanups
@ 2008-02-25 0:09 Adrian Bunk
0 siblings, 0 replies; 11+ messages in thread
From: Adrian Bunk @ 2008-02-25 0:09 UTC (permalink / raw)
To: linux-kernel
This patch contains the following cleanups:
- make the following needlessly global variables static:
- required_kernelcore
- zone_movable_pfn[]
- make the following needlessly global functions static:
- move_freepages()
- move_freepages_block()
- setup_pageset()
- find_usable_zone_for_movable()
- adjust_zone_range_for_zone_movable()
- __absent_pages_in_range()
- find_min_pfn_for_node()
- find_zone_movable_pfns_for_nodes()
- #if 0 the following no longer used global function:
- free_cold_page()
Signed-off-by: Adrian Bunk <bunk@kernel.org>
---
include/linux/gfp.h | 1 -
mm/page_alloc.c | 29 ++++++++++++++++-------------
2 files changed, 16 insertions(+), 14 deletions(-)
cf9b9e8ae11df98bf68e5fd2f95fd8f8a429cea3 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 164be9d..2b8148c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -220,7 +220,6 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
extern void __free_pages(struct page *page, unsigned int order);
extern void free_pages(unsigned long addr, unsigned int order);
extern void free_hot_page(struct page *page);
-extern void free_cold_page(struct page *page);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr),0)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 75b9793..6710b45 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -151,9 +151,9 @@ static unsigned long __meminitdata dma_reserve;
static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
- unsigned long __initdata required_kernelcore;
+ static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
- unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+ static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -688,9 +688,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype)
+static int move_freepages(struct zone *zone,
+ struct page *start_page, struct page *end_page,
+ int migratetype)
{
struct page *page;
unsigned long order;
@@ -729,7 +729,8 @@ int move_freepages(struct zone *zone,
return pages_moved;
}
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
{
unsigned long start_pfn, end_pfn;
struct page *start_page, *end_page;
@@ -1013,11 +1014,13 @@ void free_hot_page(struct page *page)
{
free_hot_cold_page(page, 0);
}
-
+
+#if 0
void free_cold_page(struct page *page)
{
free_hot_cold_page(page, 1);
}
+#endif /* 0 */
/*
* split_page takes a non-compound higher-order page, and splits it into
@@ -2597,7 +2600,7 @@ static int zone_batchsize(struct zone *zone)
return batch;
}
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{
struct per_cpu_pages *pcp;
@@ -3026,7 +3029,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
* assumption is made that zones within a node are ordered in monotonic
* increasing memory addresses so that the "highest" populated zone is used
*/
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
{
int zone_index;
for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3052,7 +3055,7 @@ void __init find_usable_zone_for_movable(void)
* highest usable zone for ZONE_MOVABLE. This preserves the assumption that
* zones within a node are in order of monotonic increases memory addresses
*/
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
unsigned long zone_type,
unsigned long node_start_pfn,
unsigned long node_end_pfn,
@@ -3113,7 +3116,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
* Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
* then all holes in the requested range will be accounted for.
*/
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
unsigned long range_start_pfn,
unsigned long range_end_pfn)
{
@@ -3597,7 +3600,7 @@ static void __init sort_node_map(void)
}
/* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(unsigned long nid)
{
int i;
unsigned long min_pfn = ULONG_MAX;
@@ -3669,7 +3672,7 @@ static unsigned long __init early_calculate_totalpages(void)
* memory. When they don't, some nodes will have more kernelcore than
* others
*/
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
{
int i, nid;
unsigned long usable_startpfn;
^ permalink raw reply related [flat|nested] 11+ messages in thread
end of thread, other threads:[~2008-06-25 16:30 UTC | newest]
Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2008-05-05 18:25 [2.6 patch] mm/page_alloc.c: cleanups Adrian Bunk
2008-05-05 20:24 ` KOSAKI Motohiro
2008-05-05 20:45 ` Andrew Morton
2008-05-05 20:59 ` KOSAKI Motohiro
2008-05-05 21:13 ` Andrew Morton
2008-05-05 22:58 ` KOSAKI Motohiro
-- strict thread matches above, loose matches on Subject: below --
2008-06-25 16:26 Adrian Bunk
2008-03-30 22:53 Adrian Bunk
2008-03-31 11:32 ` Peter Zijlstra
2008-04-14 18:14 ` Adrian Bunk
2008-02-25 0:09 Adrian Bunk
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).