diff -urpN numaq-2.5.44/mm/page_alloc.c lb1-2.5.44/mm/page_alloc.c --- numaq-2.5.44/mm/page_alloc.c 2002-10-28 16:34:42.000000000 -0800 +++ lb1-2.5.44/mm/page_alloc.c 2002-10-28 17:18:53.000000000 -0800 @@ -97,7 +97,7 @@ static void bad_page(const char *functio * -- wli */ -static inline void __free_pages_bulk (struct page *page, struct page *base, +static inline void buddy_free(struct page *page, struct page *base, struct zone *zone, struct free_area *area, unsigned long mask, unsigned int order) { @@ -136,6 +136,13 @@ static inline void __free_pages_bulk (st list_add(&(base + page_idx)->list, &area->free_list); } +static inline void __free_pages_bulk (struct page *page, struct page *base, + struct zone *zone, struct free_area *area, unsigned long mask, + unsigned int order) +{ + buddy_free(page, base, zone, area, mask, order); +} + static inline void free_pages_check(const char *function, struct page *page) { if ( page_mapped(page) || @@ -240,7 +247,7 @@ static struct page *prep_new_page(struct * Do the hard work of removing an element from the buddy allocator. * Call me with the zone->lock already held. */ -static struct page *__rmqueue(struct zone *zone, unsigned int order) +static struct page *buddy_alloc(struct zone *zone, unsigned int order) { struct free_area * area; unsigned int current_order = order; @@ -268,6 +275,11 @@ static struct page *__rmqueue(struct zon return NULL; } +static inline struct page *__rmqueue(struct zone *zone, unsigned int order) +{ + buddy_alloc(zone, order); +} + /* * Obtain a specified number of elements from the buddy allocator, all under * a single hold of the lock, for efficiency. Add them to the supplied list.