Use slabs for PMD's in addition to pgd's and allocate the PMD's only in the PGD slab ctor and free in the PGD slab dtor, and zero in the PMD slab ctor. arch/i386/mm/init.c | 17 ++++++++++++-- arch/i386/mm/pgtable.c | 53 +++++++++++++++++++++++++++------------------ include/asm-i386/pgalloc.h | 17 +++++++++++++- 3 files changed, 63 insertions(+), 24 deletions(-) diff -urpN wli-2.5.50-bk6-12/arch/i386/mm/init.c wli-2.5.50-bk6-13/arch/i386/mm/init.c --- wli-2.5.50-bk6-12/arch/i386/mm/init.c 2002-12-06 13:58:33.000000000 -0800 +++ wli-2.5.50-bk6-13/arch/i386/mm/init.c 2002-12-06 16:08:00.000000000 -0800 @@ -493,15 +493,28 @@ void __init mem_init(void) } #if CONFIG_X86_PAE -struct kmem_cache_s *pae_pgd_cachep; +#include + +kmem_cache_t *pae_pmd_cachep; +kmem_cache_t *pae_pgd_cachep; + +int pae_pmd_ctor(void *, kmem_cache_t *, unsigned long); +int pae_pgd_ctor(void *, kmem_cache_t *, unsigned long); +void pae_pgd_dtor(void *, kmem_cache_t *, unsigned long); void __init pgtable_cache_init(void) { /* * PAE pgds must be 16-byte aligned: */ + pae_pmd_cachep = kmem_cache_create("pae_pmd", 4096, 0, + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pmd_ctor, NULL); + + if (!pae_pmd_cachep) + panic("init_pae(): cannot allocate pae_pmd SLAB cache"); + pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0, - SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL); + SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, pae_pgd_ctor, pae_pgd_dtor); if (!pae_pgd_cachep) panic("init_pae(): Cannot alloc pae_pgd SLAB cache"); } diff -urpN wli-2.5.50-bk6-12/arch/i386/mm/pgtable.c wli-2.5.50-bk6-13/arch/i386/mm/pgtable.c --- wli-2.5.50-bk6-12/arch/i386/mm/pgtable.c 2002-11-27 14:36:24.000000000 -0800 +++ wli-2.5.50-bk6-13/arch/i386/mm/pgtable.c 2002-12-07 23:04:16.000000000 -0800 @@ -168,38 +168,49 @@ struct page *pte_alloc_one(struct mm_str #if CONFIG_X86_PAE -pgd_t *pgd_alloc(struct mm_struct *mm) +extern kmem_cache_t *pae_pmd_cachep; + +int pae_pmd_ctor(void *__pmd, kmem_cache_t *pmd_cache, unsigned long flags) +{ + pmd_t *pmd = __pmd; + if ((unsigned long)pmd & ~PAGE_MASK) + printk("slab.c handed back a bad pmd\n"); + clear_page(pmd); + return 0; +} + +int pae_pgd_ctor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags) { int i; - pgd_t *pgd = kmem_cache_alloc(pae_pgd_cachep, GFP_KERNEL); + pgd_t *pgd = __pgd; - if (pgd) { - for (i = 0; i < USER_PTRS_PER_PGD; i++) { - unsigned long pmd = __get_free_page(GFP_KERNEL); - if (!pmd) - goto out_oom; - clear_page(pmd); - set_pgd(pgd + i, __pgd(1 + __pa(pmd))); + memcpy(pgd + USER_PTRS_PER_PGD, + swapper_pg_dir + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + for (i = 0; i < USER_PTRS_PER_PGD; ++i) { + pmd_t *pmd = kmem_cache_alloc(pae_pmd_cachep, SLAB_KERNEL); + if (!pmd) + goto out_oom; + else if ((unsigned long)pmd & ~PAGE_MASK) { + printk("kmem_cache_alloc did wrong! death ensues!\n"); + goto out_oom; } - memcpy(pgd + USER_PTRS_PER_PGD, - swapper_pg_dir + USER_PTRS_PER_PGD, - (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); + set_pgd(pgd + i, __pgd(1 + __pa((unsigned long long)((unsigned long)pmd)))); } - return pgd; + return 0; out_oom: - for (i--; i >= 0; i--) - free_page((unsigned long)__va(pgd_val(pgd[i])-1)); - kmem_cache_free(pae_pgd_cachep, pgd); - return NULL; + for (i--; i >= 0; --i) + kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1)); + return 1; } -void pgd_free(pgd_t *pgd) +void pae_pgd_dtor(void *__pgd, kmem_cache_t *pgd_cache, unsigned long flags) { int i; + pgd_t *pgd = __pgd; - for (i = 0; i < USER_PTRS_PER_PGD; i++) - free_page((unsigned long)__va(pgd_val(pgd[i])-1)); - kmem_cache_free(pae_pgd_cachep, pgd); + for (i = 0; i < USER_PTRS_PER_PGD; ++i) + kmem_cache_free(pae_pmd_cachep, (void *)__va(pgd_val(pgd[i])-1)); } #else diff -urpN wli-2.5.50-bk6-12/include/asm-i386/pgalloc.h wli-2.5.50-bk6-13/include/asm-i386/pgalloc.h --- wli-2.5.50-bk6-12/include/asm-i386/pgalloc.h 2002-11-27 14:35:49.000000000 -0800 +++ wli-2.5.50-bk6-13/include/asm-i386/pgalloc.h 2002-12-07 23:03:45.000000000 -0800 @@ -19,9 +19,24 @@ static inline void pmd_populate(struct m /* * Allocate and free page tables. */ - +#ifndef CONFIG_X86_PAE extern pgd_t *pgd_alloc(struct mm_struct *); extern void pgd_free(pgd_t *pgd); +#else +#include /* for kmem_cache_alloc() and kmem_cache_t */ + +extern kmem_cache_t *pae_pgd_cachep; + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return kmem_cache_alloc(pae_pgd_cachep, SLAB_KERNEL); +} + +static inline void pgd_free(pgd_t *pgd) +{ + kmem_cache_free(pae_pgd_cachep, pgd); +} +#endif extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);