diff -urN linux-2.4.17-rc2-virgin/include/asm-i386/pgtable.h linux-2.4.17-rc2/include/asm-i386/pgtable.h --- linux-2.4.17-rc2-virgin/include/asm-i386/pgtable.h Thu Nov 22 11:46:19 2001 +++ linux-2.4.17-rc2/include/asm-i386/pgtable.h Tue Dec 18 21:31:40 2001 @@ -267,7 +267,18 @@ * Permanent address of a page. Obviously must never be * called on a highmem page. */ +#ifndef CONFIG_HIGHMEM + +#define page_address(page) \ + __va( (((page) - PageZone(page)->zone_mem_map) << PAGE_SHIFT) \ + + PageZone(page)->zone_start_paddr) + +#else /* CONFIG_HIGHMEM */ + #define page_address(page) ((page)->virtual) + +#endif /* CONFIG_HIGHMEM */ + #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* diff -urN linux-2.4.17-rc2-virgin/include/linux/mm.h linux-2.4.17-rc2/include/linux/mm.h --- linux-2.4.17-rc2-virgin/include/linux/mm.h Tue Dec 18 21:21:56 2001 +++ linux-2.4.17-rc2/include/linux/mm.h Tue Dec 18 21:45:25 2001 @@ -162,9 +162,20 @@ wait_queue_head_t wait; /* Page locked? Stand in line... */ struct page **pprev_hash; /* Complement to *next_hash. */ struct buffer_head * buffers; /* Buffer maps us to a disk block. */ + + + /* + * On ordinary machines the direct mapped kernel virtual address + * space allows kernel virtual addresses for given pages to be + * computed by address calculation. On machines where kernel + * virtual mapping for some regions of physical memory is done + * dynamically the only way to deduce the kernel virtual address + * of a physical page is by storing it somewhere, namely here. + */ +#ifdef CONFIG_HIGHMEM void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ - struct zone_struct *zone; /* Memory zone we are in. */ +#endif /* CONFIG_HIGHMEM */ } mem_map_t; /* @@ -286,6 +297,15 @@ #define PG_reserved 14 #define PG_launder 15 /* written out by VM pressure.. */ +/* + * PG_zone is actually a two-bit bitfield starting at bit 16 of the + * ->flags component of struct page. + * PG_zonemask is used to extract only those relevant bits of the word + * in order to help extract the PG_zone field of ->flags. + */ +#define PG_zone 16 +#define PG_zonemask (0x3UL << PG_zone) + /* Make it prettier to test the above... */ #define UnlockPage(page) unlock_page(page) #define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags) @@ -302,6 +322,40 @@ #define PageLaunder(page) test_bit(PG_launder, &(page)->flags) #define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags) +/* + * The zone field within ->flags is never updated after free_area_init_core() + * sets it, so none of the operations on it need be atomic. + */ +#define PageZone(page) \ + (zone_table[(((page)->flags & PG_zonemask) >> PG_zone)]) +#define SetPageZone(page, zone_number) \ + do { \ + (page)->flags &= ~PG_zonemask; \ + (page)->flags |= (zone_number << PG_zone) & PG_zonemask;\ + } while(0) + +/* + * In order to avoid #ifdefs within C code itself, a CONFIG_HIGHMEM + * conditional macro to access the ->virtual field of struct page is + * provided here. + */ +#ifdef CONFIG_HIGHMEM + +#define SetPageVirtual(page, address) \ + do { \ + (page)->virtual = (address); \ + } while(0) + +#else /* !CONFIG_HIGHMEM */ + +/* + * With no highmem, there is no field to be set, and so + * this expands to a no-op. + */ +#define SetPageVirtual(page, address) do { ; } while(0) + +#endif /* !CONFIG_HIGHMEM */ + extern void FASTCALL(set_page_dirty(struct page *)); /* @@ -448,6 +502,9 @@ extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void swapin_readahead(swp_entry_t); + +struct zone_struct; +extern struct zone_struct *zone_table[]; extern struct address_space swapper_space; #define PageSwapCache(page) ((page)->mapping == &swapper_space) diff -urN linux-2.4.17-rc2-virgin/mm/Makefile linux-2.4.17-rc2/mm/Makefile --- linux-2.4.17-rc2-virgin/mm/Makefile Wed Oct 24 15:21:18 2001 +++ linux-2.4.17-rc2/mm/Makefile Tue Dec 18 21:12:43 2001 @@ -9,7 +9,7 @@ O_TARGET := mm.o -export-objs := shmem.o filemap.o +export-objs := shmem.o filemap.o page_alloc.o obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \ diff -urN linux-2.4.17-rc2-virgin/mm/page_alloc.c linux-2.4.17-rc2/mm/page_alloc.c --- linux-2.4.17-rc2-virgin/mm/page_alloc.c Mon Nov 19 16:35:40 2001 +++ linux-2.4.17-rc2/mm/page_alloc.c Tue Dec 18 21:44:55 2001 @@ -18,6 +18,7 @@ #include #include #include +#include int nr_swap_pages; int nr_active_pages; @@ -26,6 +27,17 @@ struct list_head active_list; pg_data_t *pgdat_list; + +/* + * The zone_table array is used to look up the address of the + * struct zone corresponding to a given zone number (ZONE_DMA, + * ZONE_NORMAL, or ZONE_HIGHMEM). Specifically, struct page uses + * a bitfield within ->flags to store the zone to which a page + * belongs, and so lookups to this tabl are essential. + */ +zone_t *zone_table[MAX_NR_ZONES]; +EXPORT_SYMBOL(zone_table); + static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" }; static int zone_balance_ratio[MAX_NR_ZONES] __initdata = { 128, 128, 128, }; static int zone_balance_min[MAX_NR_ZONES] __initdata = { 20 , 20, 20, }; @@ -54,7 +66,12 @@ /* * Temporary debugging check. */ -#define BAD_RANGE(zone,x) (((zone) != (x)->zone) || (((x)-mem_map) < (zone)->zone_start_mapnr) || (((x)-mem_map) >= (zone)->zone_start_mapnr+(zone)->size)) +#define BAD_RANGE(zone, page) \ +( \ + (((page) - mem_map) >= ((zone)->zone_start_mapnr+(zone)->size)) \ + || (((page) - mem_map) < (zone)->zone_start_mapnr) \ + || ((zone) != PageZone(page)) \ +) /* * Buddy system. Hairy. You really aren't expected to understand this @@ -90,7 +107,7 @@ goto local_freelist; back_local_freelist: - zone = page->zone; + zone = PageZone(page); mask = (~0UL) << order; base = zone->zone_mem_map; @@ -255,7 +272,7 @@ entry = local_pages->next; do { tmp = list_entry(entry, struct page, list); - if (tmp->index == order && memclass(tmp->zone, classzone)) { + if (tmp->index == order && memclass(PageZone(tmp), classzone)) { list_del(entry); current->nr_local_pages--; set_page_count(tmp, 1); @@ -699,6 +716,7 @@ unsigned long mask; unsigned long size, realsize; + zone_table[j] = zone; realsize = size = zones_size[j]; if (zholes_size) realsize -= zholes_size[j]; @@ -733,9 +751,10 @@ for (i = 0; i < size; i++) { struct page *page = mem_map + offset + i; - page->zone = zone; + SetPageZone(page, j); + if (j != ZONE_HIGHMEM) - page->virtual = __va(zone_start_paddr); + SetPageVirtual(page, __va(zone_start_paddr)); zone_start_paddr += PAGE_SIZE; } diff -urN linux-2.4.17-rc2-virgin/mm/vmscan.c linux-2.4.17-rc2/mm/vmscan.c --- linux-2.4.17-rc2-virgin/mm/vmscan.c Tue Dec 18 21:21:57 2001 +++ linux-2.4.17-rc2/mm/vmscan.c Tue Dec 18 21:22:38 2001 @@ -58,7 +58,7 @@ return 0; /* Don't bother replenishing zones not under pressure.. */ - if (!memclass(page->zone, classzone)) + if (!memclass(PageZone(page), classzone)) return 0; if (TryLockPage(page)) @@ -369,7 +369,7 @@ if (unlikely(!page_count(page))) continue; - if (!memclass(page->zone, classzone)) + if (!memclass(PageZone(page), classzone)) continue; /* Racy check to avoid trylocking when not worthwhile */