diff -urpN lb3-2.5.44/mm/page_alloc.c lb4-2.5.44/mm/page_alloc.c --- lb3-2.5.44/mm/page_alloc.c 2002-10-28 18:46:41.000000000 -0800 +++ lb4-2.5.44/mm/page_alloc.c 2002-10-28 18:44:31.000000000 -0800 @@ -142,7 +142,22 @@ static inline void __free_pages_bulk (st struct zone *zone, struct free_area *area, unsigned long mask, unsigned int order) { - buddy_free(page, base, zone, area, mask, order); + switch (area->active - area->locally_free) { + case 0: + if (!list_empty(&area->deferred_pages)) { + struct page *defer = list_entry(area->deferred_pages.next, struct page, list); + list_del(&defer->list); + area->locally_free--; + buddy_free(defer, base, zone, area, mask, order); + } + /* fall through */ + case 1: + buddy_free(page, base, zone, area, mask, order); + default: + list_add(&page->list, &area->deferred_pages); + area->locally_free++; + break; + } area->active--; } @@ -273,18 +288,34 @@ static struct page *buddy_alloc(struct z index = page - zone->zone_mem_map; if (current_order != MAX_ORDER-1) MARK_USED(index, current_order, area); - zone->free_pages -= 1UL << order; return expand(zone, page, index, order, current_order, area); } return NULL; } +static inline struct page *steal_deferred_page(struct zone *zone, int order) +{ + return NULL; +} + static inline struct page *__rmqueue(struct zone *zone, unsigned int order) { - struct page *page = buddy_alloc(zone, order); - if (page) - zone->free_area[order].active++; + struct free_area *area = &zone->free_area[order]; + struct page *page; + + if (!list_empty(&area->deferred_pages)) { + page= list_entry(area->deferred_pages.next, struct page, list); + list_del(&page->list); + area->locally_free--; + } else + page = buddy_alloc(zone, order); + if (!page) + page = steal_deferred_page(zone, order); + if (!page) + return NULL; + zone->free_area[order].active++; + zone->free_pages -= 1UL << order; return page; }