? lib/libperfuse/sh.core
? sbin/routed/rtquery/.depend
? sbin/routed/rtquery/rtquery.d
? sys/arch/amd64/compile/APHRODITE
? sys/arch/amd64/compile/DIAGNOSTIC
? sys/arch/amd64/conf/APHRODITE
? sys/arch/amd64/conf/DIAGNOSTIC
? sys/arch/i386/compile/APHRODITE
? sys/arch/i386/conf/APHRODITE
? usr.sbin/pstat/.gdbinit
Index: external/cddl/osnet/dist/uts/common/fs/zfs/arc.c
===================================================================
RCS file: /cvsroot/src/external/cddl/osnet/dist/uts/common/fs/zfs/arc.c,v
retrieving revision 1.10
diff -u -p -r1.10 arc.c
--- external/cddl/osnet/dist/uts/common/fs/zfs/arc.c	20 Nov 2011 02:54:25 -0000	1.10
+++ external/cddl/osnet/dist/uts/common/fs/zfs/arc.c	23 Jan 2012 22:00:33 -0000
@@ -152,33 +152,12 @@
 #define	curproc		curlwp
 #define	proc_pageout	uvm.pagedaemon_lwp
 
-#define	heap_arena	kernel_map
-#define	VMEM_ALLOC	1
-#define	VMEM_FREE	2
-static inline size_t
-vmem_size(struct vm_map *map, int flag)
-{
-	switch (flag) {
-	case VMEM_ALLOC:
-		return map->size;
-	case VMEM_FREE:
-		return vm_map_max(map) - vm_map_min(map) - map->size;
-	case VMEM_FREE|VMEM_ALLOC:
-		return vm_map_max(map) - vm_map_min(map);
-	default:
-		panic("vmem_size");
-	}
-}
 static void	*zio_arena;
 
 #include <sys/callback.h>
 /* Structures used for memory and kva space reclaim. */
 static struct callback_entry arc_kva_reclaim_entry;
 
-#ifdef _KERNEL
-static struct uvm_reclaim_hook arc_hook;
-#endif
-
 #endif	/* __NetBSD__ */
 
 static kmutex_t		arc_reclaim_thr_lock;
@@ -2028,7 +2007,7 @@ arc_reclaim_needed(void)
 	/*
 	 * If we're on an i386 platform, it's possible that we'll exhaust the
 	 * kernel heap space before we ever run out of available physical
-	 * memory.  Most checks of the size of the heap_area compare against
+	 * memory.  Most checks of the size of the kmem_area compare against
 	 * tune.t_minarmem, which is the minimum available real memory that we
 	 * can have in the system.  However, this is generally fixed at 25 pages
 	 * which is so low that it's useless.  In this comparison, we seek to
@@ -2036,8 +2015,8 @@ arc_reclaim_needed(void)
 	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
 	 * free)
 	 */
-	if (btop(vmem_size(heap_arena, VMEM_FREE)) <
-	    (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
+	if (btop(vmem_size(kmem_arena, VMEM_FREE)) <
+	    (btop(vmem_size(kmem_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
 		return (1);
 #endif
 
@@ -3359,7 +3338,7 @@ arc_memory_throttle(uint64_t reserve, ui
 	static uint64_t last_txg = 0;
 
 	available_memory =
-	    MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
+	    MIN(available_memory, vmem_size(kmem_arena, VMEM_FREE));
 	if (available_memory >= zfs_write_limit_max)
 		return (0);
 
@@ -3511,7 +3490,7 @@ arc_init(void)
 	 * than the addressable space (intel in 32-bit mode), we may
 	 * need to limit the cache to 1/8 of VM size.
 	 */
-	arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
+	arc_c = MIN(arc_c, vmem_size(kmem_arena, VMEM_ALLOC | VMEM_FREE) / 8);
 #endif
 
 	/* set min cache to 1/32 of all memory, or 64MB, whichever is more */
@@ -3615,11 +3594,11 @@ arc_init(void)
 	    TS_RUN, maxclsyspri);
 
 #if defined(__NetBSD__) && defined(_KERNEL)
-	arc_hook.uvm_reclaim_hook = &arc_uvm_reclaim_hook;
+/* 	arc_hook.uvm_reclaim_hook = &arc_uvm_reclaim_hook;
 
 	uvm_reclaim_hook_add(&arc_hook);
 	callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
-	    &arc_kva_reclaim_entry, NULL, arc_kva_reclaim_callback);
+	    &arc_kva_reclaim_entry, NULL, arc_kva_reclaim_callback); */
 
 #endif
 
@@ -3674,9 +3653,9 @@ arc_fini(void)
 	mutex_destroy(&zfs_write_limit_lock);
 
 #if defined(__NetBSD__) && defined(_KERNEL)
-	uvm_reclaim_hook_del(&arc_hook);
+/*	uvm_reclaim_hook_del(&arc_hook);
 	callback_unregister(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
-	    &arc_kva_reclaim_entry);
+	    &arc_kva_reclaim_entry); */
 #endif 	
 	
 	buf_fini();
Index: sys/arch/algor/dev/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/algor/dev/mainbus.c,v
retrieving revision 1.25
diff -u -p -r1.25 mainbus.c
--- sys/arch/algor/dev/mainbus.c	9 Jul 2011 16:03:01 -0000	1.25
+++ sys/arch/algor/dev/mainbus.c	23 Jan 2012 22:01:36 -0000
@@ -159,9 +159,9 @@ mainbus_attach(device_t parent, device_t
 	 * Reserve the bottom 64K of the I/O space for ISA devices.
 	 */
 	ioext  = extent_create("pciio",  0x00010000, 0x000effff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", 0x01000000, 0x07ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	pc = &p4032_configuration.ac_pc;
 #elif defined(ALGOR_P5064)
@@ -171,9 +171,9 @@ mainbus_attach(device_t parent, device_t
 	 * a bug in the ISA bridge.
 	 */
 	ioext  = extent_create("pciio",  0x00080000, 0x00ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", 0x01000000, 0x07ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	pc = &p5064_configuration.ac_pc;
 #if defined(PCI_NETBSD_ENABLE_IDE)
@@ -184,9 +184,9 @@ mainbus_attach(device_t parent, device_t
 	 * Reserve the bottom 64K of the I/O space for ISA devices.
 	 */
 	ioext  = extent_create("pciio",  0x00010000, 0x000effff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", 0x01000000, 0x0affffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	pc = &p6032_configuration.ac_pc;
 #if defined(PCI_NETBSD_ENABLE_IDE)
Index: sys/arch/alpha/alpha/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/alpha/pmap.c,v
retrieving revision 1.255
diff -u -p -r1.255 pmap.c
--- sys/arch/alpha/alpha/pmap.c	12 Nov 2010 07:59:25 -0000	1.255
+++ sys/arch/alpha/alpha/pmap.c	23 Jan 2012 22:01:37 -0000
@@ -757,11 +757,6 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
 #endif
 
 	/*
-	 * Compute the number of pages kmem_map will have.
-	 */
-	kmeminit_nkmempages();
-
-	/*
 	 * Figure out how many initial PTE's are necessary to map the
 	 * kernel.  We also reserve space for kmem_alloc_pageable()
 	 * for vm_fork().
@@ -774,7 +769,7 @@ pmap_bootstrap(paddr_t ptaddr, u_int max
 	lev3mapsize =
 		(VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
 		 bufsz + 16 * NCARGS + pager_map_size) / PAGE_SIZE +
-		(maxproc * UPAGES) + nkmempages;
+		(maxproc * UPAGES) + (256 * 1024 * 1024) / PAGE_SIZE;
 
 #ifdef SYSVSHM
 	lev3mapsize += shminfo.shmall;
Index: sys/arch/alpha/common/sgmap_common.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/common/sgmap_common.c,v
retrieving revision 1.25
diff -u -p -r1.25 sgmap_common.c
--- sys/arch/alpha/common/sgmap_common.c	1 Jul 2011 19:22:35 -0000	1.25
+++ sys/arch/alpha/common/sgmap_common.c	23 Jan 2012 22:01:37 -0000
@@ -107,7 +107,7 @@ alpha_sgmap_init(bus_dma_tag_t t, struct
 	 * space.
 	 */
 	sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1,
-	    M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
+	    NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
 	if (sgmap->aps_ex == NULL) {
 		printf("unable to create extent map for sgmap `%s'\n",
 		    name);
Index: sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c,v
retrieving revision 1.19
diff -u -p -r1.19 pci_bwx_bus_io_chipdep.c
--- sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c	25 Sep 2011 13:36:53 -0000	1.19
+++ sys/arch/alpha/pci/pci_bwx_bus_io_chipdep.c	23 Jan 2012 22:01:37 -0000
@@ -302,7 +302,7 @@ __C(CHIP,_bus_io_init)(t, v)
 	t->abs_c_8 =		__C(CHIP,_io_copy_region_8);
 
 	ex = extent_create(__S(__C(CHIP,_bus_io)), 0x0UL, 0xffffffffUL,
-	    M_DEVBUF, (void *)CHIP_IO_EX_STORE(v), CHIP_IO_EX_STORE_SIZE(v),
+	    (void *)CHIP_IO_EX_STORE(v), CHIP_IO_EX_STORE_SIZE(v),
 	    EX_NOWAIT|EX_NOCOALESCE);
 
 	CHIP_IO_EXTENT(v) = ex;
Index: sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c,v
retrieving revision 1.23
diff -u -p -r1.23 pci_bwx_bus_mem_chipdep.c
--- sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c	25 Sep 2011 13:36:53 -0000	1.23
+++ sys/arch/alpha/pci/pci_bwx_bus_mem_chipdep.c	23 Jan 2012 22:01:37 -0000
@@ -302,7 +302,7 @@ __C(CHIP,_bus_mem_init)(t, v)
 	t->abs_c_8 =		__C(CHIP,_mem_copy_region_8);
 
 	ex = extent_create(__S(__C(CHIP,_bus_mem)), 0x0UL, 0xffffffffUL,
-	    M_DEVBUF, (void *)CHIP_MEM_EX_STORE(v), CHIP_MEM_EX_STORE_SIZE(v),
+	    (void *)CHIP_MEM_EX_STORE(v), CHIP_MEM_EX_STORE_SIZE(v),
 	    EX_NOWAIT|EX_NOCOALESCE);
 
         CHIP_MEM_EXTENT(v) = ex;
Index: sys/arch/alpha/pci/pci_swiz_bus_io_chipdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/pci/pci_swiz_bus_io_chipdep.c,v
retrieving revision 1.39
diff -u -p -r1.39 pci_swiz_bus_io_chipdep.c
--- sys/arch/alpha/pci/pci_swiz_bus_io_chipdep.c	25 Sep 2011 13:36:53 -0000	1.39
+++ sys/arch/alpha/pci/pci_swiz_bus_io_chipdep.c	23 Jan 2012 22:01:38 -0000
@@ -307,7 +307,7 @@ __C(CHIP,_bus_io_init)(bus_space_tag_t t
 
 	/* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */
 	ex = extent_create(__S(__C(CHIP,_bus_io)), 0x0UL, 0xffffffffUL,
-	    M_DEVBUF, (void *)CHIP_IO_EX_STORE(v), CHIP_IO_EX_STORE_SIZE(v),
+	    (void *)CHIP_IO_EX_STORE(v), CHIP_IO_EX_STORE_SIZE(v),
 	    EX_NOWAIT);
 	extent_alloc_region(ex, 0, 0xffffffffUL, EX_NOWAIT);
 
Index: sys/arch/alpha/pci/pci_swiz_bus_mem_chipdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/alpha/pci/pci_swiz_bus_mem_chipdep.c,v
retrieving revision 1.44
diff -u -p -r1.44 pci_swiz_bus_mem_chipdep.c
--- sys/arch/alpha/pci/pci_swiz_bus_mem_chipdep.c	25 Sep 2011 13:36:53 -0000	1.44
+++ sys/arch/alpha/pci/pci_swiz_bus_mem_chipdep.c	23 Jan 2012 22:01:38 -0000
@@ -329,7 +329,7 @@ __C(CHIP,_bus_mem_init)(bus_space_tag_t 
 #ifdef CHIP_D_MEM_W1_SYS_START
 	/* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */
 	dex = extent_create(__S(__C(CHIP,_bus_dmem)), 0x0UL,
-	    0xffffffffffffffffUL, M_DEVBUF,
+	    0xffffffffffffffffUL,
 	    (void *)CHIP_D_MEM_EX_STORE(v), CHIP_D_MEM_EX_STORE_SIZE(v),
 	    EX_NOWAIT);
 	extent_alloc_region(dex, 0, 0xffffffffffffffffUL, EX_NOWAIT);
@@ -352,7 +352,7 @@ __C(CHIP,_bus_mem_init)(bus_space_tag_t 
 
 	/* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */
 	sex = extent_create(__S(__C(CHIP,_bus_smem)), 0x0UL,
-	    0xffffffffffffffffUL, M_DEVBUF,
+	    0xffffffffffffffffUL,
 	    (void *)CHIP_S_MEM_EX_STORE(v), CHIP_S_MEM_EX_STORE_SIZE(v),
 	    EX_NOWAIT);
 	extent_alloc_region(sex, 0, 0xffffffffffffffffUL, EX_NOWAIT);
Index: sys/arch/amd64/amd64/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/amd64/amd64/machdep.c,v
retrieving revision 1.174
diff -u -p -r1.174 machdep.c
--- sys/arch/amd64/amd64/machdep.c	12 Jan 2012 19:49:37 -0000	1.174
+++ sys/arch/amd64/amd64/machdep.c	23 Jan 2012 22:01:39 -0000
@@ -278,7 +278,7 @@ vaddr_t lo32_vaddr;
 paddr_t lo32_paddr;
 
 vaddr_t module_start, module_end;
-static struct vm_map_kernel module_map_store;
+static struct vm_map module_map_store;
 extern struct vm_map *module_map;
 vaddr_t kern_end;
 
@@ -386,9 +386,9 @@ cpu_startup(void)
 	phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
 				   VM_PHYS_SIZE, 0, false, NULL);
 
-	uvm_map_setup_kernel(&module_map_store, module_start, module_end, 0);
-	module_map_store.vmk_map.pmap = pmap_kernel();
-	module_map = &module_map_store.vmk_map;
+	uvm_map_setup(&module_map_store, module_start, module_end, 0);
+	module_map_store.pmap = pmap_kernel();
+	module_map = &module_map_store;
 
 	/* Say hello. */
 	banner();
Index: sys/arch/arc/arc/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arc/arc/bus_space.c,v
retrieving revision 1.11
diff -u -p -r1.11 bus_space.c
--- sys/arch/arc/arc/bus_space.c	17 Jul 2011 01:36:50 -0000	1.11
+++ sys/arch/arc/arc/bus_space.c	23 Jan 2012 22:01:40 -0000
@@ -343,7 +343,7 @@ arc_bus_space_init_extent(bus_space_tag_
 {
 
 	bst->bs_extent = extent_create(bst->bs_name,
-	    bst->bs_start, bst->bs_start + bst->bs_size, M_DEVBUF,
+	    bst->bs_start, bst->bs_start + bst->bs_size,
 	    storage, storagesize, EX_NOWAIT);
 	if (bst->bs_extent == NULL)
 		panic("arc_bus_space_init_extent: cannot create extent map %s",
Index: sys/arch/arc/arc/wired_map_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arc/arc/wired_map_machdep.c,v
retrieving revision 1.6
diff -u -p -r1.6 wired_map_machdep.c
--- sys/arch/arc/arc/wired_map_machdep.c	16 Dec 2009 23:19:06 -0000	1.6
+++ sys/arch/arc/arc/wired_map_machdep.c	23 Jan 2012 22:01:40 -0000
@@ -55,7 +55,7 @@ arc_init_wired_map(void)
 
 	mips3_nwired_page = 0;
 	arc_wired_map_ex = extent_create("wired_map",
-	    VM_MIN_WIRED_MAP_ADDRESS, VM_MAX_WIRED_MAP_ADDRESS, M_DEVBUF,
+	    VM_MIN_WIRED_MAP_ADDRESS, VM_MAX_WIRED_MAP_ADDRESS,
 	    (void *)wired_map_ex_storage, sizeof(wired_map_ex_storage),
 	    EX_NOWAIT);
 	if (arc_wired_map_ex == NULL)
Index: sys/arch/arc/jazz/jazzdmatlb.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arc/jazz/jazzdmatlb.c,v
retrieving revision 1.15
diff -u -p -r1.15 jazzdmatlb.c
--- sys/arch/arc/jazz/jazzdmatlb.c	1 Jul 2011 19:25:41 -0000	1.15
+++ sys/arch/arc/jazz/jazzdmatlb.c	23 Jan 2012 22:01:40 -0000
@@ -85,8 +85,7 @@ jazz_dmatlb_init(bus_space_tag_t iot, bu
 	mips_dcache_wbinv_all();/* Make sure no map entries are cached */
 	memset((char *)dma_tlb, 0, JAZZ_DMATLB_SIZE);
 
-	dmatlbmap = extent_create("dmatlb", 0, NDMATLB, M_DEVBUF, NULL, 0,
-	    EX_NOWAIT);
+	dmatlbmap = extent_create("dmatlb", 0, NDMATLB, NULL, 0, EX_NOWAIT);
 	if (dmatlbmap == NULL)
 		panic("jazz_dmatlb_init: cannot create extent map");
 
Index: sys/arch/arc/pci/necpb.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arc/pci/necpb.c,v
retrieving revision 1.37
diff -u -p -r1.37 necpb.c
--- sys/arch/arc/pci/necpb.c	1 Jul 2011 19:28:00 -0000	1.37
+++ sys/arch/arc/pci/necpb.c	23 Jan 2012 22:01:40 -0000
@@ -243,9 +243,9 @@ necpbattach(device_t parent, device_t se
 	pc = &sc->sc_ncp->nc_pc;
 #ifdef PCI_NETBSD_CONFIGURE
 	pc->pc_ioext = extent_create("necpbio", 0x00100000, 0x01ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pc->pc_memext = extent_create("necpbmem", 0x08000000, 0x3fffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pci_configure_bus(pc, pc->pc_ioext, pc->pc_memext, NULL, 0,
 	    mips_cache_info.mci_dcache_align);
 #endif
Index: sys/arch/arm/arm/arm_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm/arm_machdep.c,v
retrieving revision 1.30
diff -u -p -r1.30 arm_machdep.c
--- sys/arch/arm/arm/arm_machdep.c	4 Mar 2011 22:25:25 -0000	1.30
+++ sys/arch/arm/arm/arm_machdep.c	23 Jan 2012 22:01:40 -0000
@@ -262,10 +262,6 @@ cpu_upcall(struct lwp *l, int type, int 
 void
 cpu_need_resched(struct cpu_info *ci, int flags)
 {
-	bool immed = (flags & RESCHED_IMMED) != 0;
-
-	if (ci->ci_want_resched && !immed)
-		return;
 
 	ci->ci_want_resched = 1;
 	if (curlwp != ci->ci_data.cpu_idlelwp)
Index: sys/arch/arm/arm32/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/arm32/pmap.c,v
retrieving revision 1.224
diff -u -p -r1.224 pmap.c
--- sys/arch/arm/arm32/pmap.c	1 Jul 2011 20:57:45 -0000	1.224
+++ sys/arch/arm/arm32/pmap.c	23 Jan 2012 22:01:42 -0000
@@ -197,8 +197,9 @@
 #include <sys/kernel.h>
 #include <sys/systm.h>
 #include <sys/proc.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/pool.h>
+#include <sys/kmem.h>
 #include <sys/cdefs.h>
 #include <sys/cpu.h>
 #include <sys/sysctl.h>
@@ -5497,7 +5498,7 @@ pmap_postinit(void)
 	needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0);
 	needed -= 1;
 
-	l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK);
+	l1 = kmem_alloc(sizeof(*l1) * needed, KM_SLEEP);
 
 	for (loop = 0; loop < needed; loop++, l1++) {
 		/* Allocate a L1 page table */
@@ -5506,7 +5507,7 @@ pmap_postinit(void)
 			panic("Cannot allocate L1 KVM");
 
 		error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start,
-		    physical_end, L1_TABLE_SIZE, 0, &plist, 1, M_WAITOK);
+		    physical_end, L1_TABLE_SIZE, 0, &plist, 1, 1);
 		if (error)
 			panic("Cannot allocate L1 physical pages");
 
Index: sys/arch/arm/gemini/gemini_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/gemini/gemini_pci.c,v
retrieving revision 1.10
diff -u -p -r1.10 gemini_pci.c
--- sys/arch/arm/gemini/gemini_pci.c	1 Jul 2011 19:32:28 -0000	1.10
+++ sys/arch/arm/gemini/gemini_pci.c	23 Jan 2012 22:01:42 -0000
@@ -211,7 +211,7 @@ gemini_pci_init(pci_chipset_tag_t pc, vo
 	ioext  = extent_create("pciio",
 		GEMINI_PCIIO_BASE,
 		GEMINI_PCIIO_BASE + GEMINI_PCIIO_SIZE - 1,
-		M_DEVBUF, NULL, 0, EX_NOWAIT);
+		NULL, 0, EX_NOWAIT);
 
 	/*
 	 * XXX PCI mem addr should be inherited ?
@@ -219,7 +219,7 @@ gemini_pci_init(pci_chipset_tag_t pc, vo
 	memext = extent_create("pcimem",
 		GEMINI_PCIMEM_BASE,
 		GEMINI_PCIMEM_BASE + GEMINI_PCIMEM_SIZE - 1,
-		M_DEVBUF, NULL, 0, EX_NOWAIT);
+		NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(pc, ioext, memext, NULL, 0, arm_dcache_align);
 
Index: sys/arch/arm/ixp12x0/ixp12x0_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/ixp12x0/ixp12x0_pci.c,v
retrieving revision 1.10
diff -u -p -r1.10 ixp12x0_pci.c
--- sys/arch/arm/ixp12x0/ixp12x0_pci.c	14 Mar 2009 21:04:05 -0000	1.10
+++ sys/arch/arm/ixp12x0/ixp12x0_pci.c	23 Jan 2012 22:01:42 -0000
@@ -95,12 +95,12 @@ ixp12x0_pci_init(pci_chipset_tag_t pc, v
 
 #if NPCI > 0 && defined(PCI_NETBSD_CONFIGURE)
 	ioext  = extent_create("pciio", 0, IXP12X0_PCI_IO_SIZE - 1,
-				M_DEVBUF, NULL, 0, EX_NOWAIT);
+				NULL, 0, EX_NOWAIT);
 	/* PCI MEM space is mapped same address as real memory */
 	memext = extent_create("pcimem", IXP12X0_PCI_MEM_HWBASE,
 				IXP12X0_PCI_MEM_HWBASE +
 				IXP12X0_PCI_MEM_SIZE - 1,
-				M_DEVBUF, NULL, 0, EX_NOWAIT);
+				NULL, 0, EX_NOWAIT);
 	printf("%s: configuring PCI bus\n", sc->sc_dev.dv_xname);
 	pci_configure_bus(pc, ioext, memext, NULL, 0 /* XXX bus = 0 */,
 			  arm_dcache_align);
Index: sys/arch/arm/s3c2xx0/s3c2800_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/s3c2xx0/s3c2800_pci.c,v
retrieving revision 1.16
diff -u -p -r1.16 s3c2800_pci.c
--- sys/arch/arm/s3c2xx0/s3c2800_pci.c	1 Jul 2011 20:31:39 -0000	1.16
+++ sys/arch/arm/s3c2xx0/s3c2800_pci.c	23 Jan 2012 22:01:42 -0000
@@ -293,10 +293,10 @@ sspci_attach(struct device *parent, stru
 
 #if defined(PCI_NETBSD_CONFIGURE)
 	ioext = extent_create("pciio", 0x100, S3C2800_PCI_IOSPACE_SIZE - 0x100,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	memext = extent_create("pcimem", 0, S3C2800_PCI_MEMSPACE_SIZE,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	sspci_chipset.pc_conf_v = (void *) sc;
 	sspci_chipset.pc_intr_v = (void *) sc;
Index: sys/arch/arm/xscale/becc_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/xscale/becc_pci.c,v
retrieving revision 1.10
diff -u -p -r1.10 becc_pci.c
--- sys/arch/arm/xscale/becc_pci.c	1 Jul 2011 20:32:51 -0000	1.10
+++ sys/arch/arm/xscale/becc_pci.c	23 Jan 2012 22:01:43 -0000
@@ -125,10 +125,10 @@ becc_pci_init(pci_chipset_tag_t pc, void
 	/* Reserve the bottom 32K of the PCI address space. */
 	ioext  = extent_create("pciio", sc->sc_ioout_xlate + (32 * 1024),
 	    sc->sc_ioout_xlate + (64 * 1024) - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", sc->sc_owin_xlate[0],
 	    sc->sc_owin_xlate[0] + BECC_PCI_MEM1_SIZE - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	aprint_normal("%s: configuring PCI bus\n", sc->sc_dev.dv_xname);
 	pci_configure_bus(pc, ioext, memext, NULL, 0, arm_dcache_align);
Index: sys/arch/arm/xscale/i80312_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/xscale/i80312_pci.c,v
retrieving revision 1.10
diff -u -p -r1.10 i80312_pci.c
--- sys/arch/arm/xscale/i80312_pci.c	1 Jul 2011 20:32:51 -0000	1.10
+++ sys/arch/arm/xscale/i80312_pci.c	23 Jan 2012 22:01:43 -0000
@@ -108,10 +108,10 @@ i80312_pci_init(pci_chipset_tag_t pc, vo
 
 	ioext  = extent_create("pciio", sc->sc_sioout_base,
 	    sc->sc_sioout_base + sc->sc_sioout_size - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", sc->sc_smemout_base,
 	    sc->sc_smemout_base + sc->sc_smemout_size - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	aprint_normal("%s: configuring Secondary PCI bus\n", sc->sc_dev.dv_xname);
 	pci_configure_bus(pc, ioext, memext, NULL, sbus, arm_dcache_align);
Index: sys/arch/arm/xscale/i80321_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/xscale/i80321_pci.c,v
retrieving revision 1.10
diff -u -p -r1.10 i80321_pci.c
--- sys/arch/arm/xscale/i80321_pci.c	1 Jul 2011 20:32:51 -0000	1.10
+++ sys/arch/arm/xscale/i80321_pci.c	23 Jan 2012 22:01:43 -0000
@@ -110,16 +110,16 @@ i80321_pci_init(pci_chipset_tag_t pc, vo
 	ioext  = extent_create("pciio",
 	    sc->sc_ioout_xlate + sc->sc_ioout_xlate_offset,
 	    sc->sc_ioout_xlate + VERDE_OUT_XLATE_IO_WIN_SIZE - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 #ifdef I80321_USE_DIRECT_WIN
 	memext = extent_create("pcimem", VERDE_OUT_DIRECT_WIN_BASE + VERDE_OUT_DIRECT_WIN_SKIP,
 	    VERDE_OUT_DIRECT_WIN_BASE + VERDE_OUT_DIRECT_WIN_SIZE- 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 #else
 	memext = extent_create("pcimem", sc->sc_owin[0].owin_xlate_lo,
 	    sc->sc_owin[0].owin_xlate_lo + VERDE_OUT_XLATE_MEM_WIN_SIZE - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 #endif
 
 	aprint_normal("%s: configuring PCI bus\n", sc->sc_dev.dv_xname);
Index: sys/arch/arm/xscale/ixp425_pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/arm/xscale/ixp425_pci.c,v
retrieving revision 1.7
diff -u -p -r1.7 ixp425_pci.c
--- sys/arch/arm/xscale/ixp425_pci.c	1 Jul 2011 20:32:51 -0000	1.7
+++ sys/arch/arm/xscale/ixp425_pci.c	23 Jan 2012 22:01:43 -0000
@@ -89,12 +89,12 @@ ixp425_pci_init(struct ixp425_softc *sc)
 
 #if NPCI > 0 && defined(PCI_NETBSD_CONFIGURE)
 	ioext  = extent_create("pciio", 0, IXP425_PCI_IO_SIZE - 1,
-				M_DEVBUF, NULL, 0, EX_NOWAIT);
+				NULL, 0, EX_NOWAIT);
 	/* PCI MEM space is mapped same address as real memory */
 	memext = extent_create("pcimem", IXP425_PCI_MEM_HWBASE,
 				IXP425_PCI_MEM_HWBASE +
 				IXP425_PCI_MEM_SIZE - 1,
-				M_DEVBUF, NULL, 0, EX_NOWAIT);
+				NULL, 0, EX_NOWAIT);
 	printf("%s: configuring PCI bus\n", sc->sc_dev.dv_xname);
 	pci_configure_bus(pc, ioext, memext, NULL, 0 /* XXX bus = 0 */,
 			  arm_dcache_align);
Index: sys/arch/atari/atari/atari_init.c
===================================================================
RCS file: /cvsroot/src/sys/arch/atari/atari/atari_init.c,v
retrieving revision 1.96
diff -u -p -r1.96 atari_init.c
--- sys/arch/atari/atari/atari_init.c	26 Nov 2011 14:05:52 -0000	1.96
+++ sys/arch/atari/atari/atari_init.c	23 Jan 2012 22:01:43 -0000
@@ -588,7 +588,7 @@ start_c(int id, u_int ttphystart, u_int 
 	 * on the machine.  When the amount of RAM is found, all
 	 * extents of RAM are allocated from the map.
 	 */
-	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
+	iomem_ex = extent_create("iomem", 0x0, 0xffffffff,
 	    (void *)iomem_ex_storage, sizeof(iomem_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 
Index: sys/arch/atari/atari/bus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/atari/atari/bus.c,v
retrieving revision 1.56
diff -u -p -r1.56 bus.c
--- sys/arch/atari/atari/bus.c	1 Jul 2011 20:34:05 -0000	1.56
+++ sys/arch/atari/atari/bus.c	23 Jan 2012 22:01:43 -0000
@@ -86,7 +86,7 @@ static int	bootm_free(vaddr_t va, u_long
 void
 bootm_init(vaddr_t va, pt_entry_t *ptep, u_long size)
 {
-	bootm_ex = extent_create("bootmem", va, va + size, M_DEVBUF,
+	bootm_ex = extent_create("bootmem", va, va + size,
 	    (void *)bootm_ex_storage, sizeof(bootm_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 	bootm_ptep = ptep;
Index: sys/arch/bebox/bebox/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/bebox/bebox/mainbus.c,v
retrieving revision 1.28
diff -u -p -r1.28 mainbus.c
--- sys/arch/bebox/bebox/mainbus.c	7 Aug 2011 15:31:35 -0000	1.28
+++ sys/arch/bebox/bebox/mainbus.c	23 Jan 2012 22:01:43 -0000
@@ -135,9 +135,9 @@ mainbus_attach(device_t parent, device_t
 	SIMPLEQ_INSERT_TAIL(&genppc_pct->pc_pbi, pbi, next);
 
 #ifdef PCI_NETBSD_CONFIGURE
-	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff, M_DEVBUF,
+	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff,
 	    NULL, 0, EX_NOWAIT);
-	memext = extent_create("pcimem", 0x00000000, 0x0fffffff, M_DEVBUF,
+	memext = extent_create("pcimem", 0x00000000, 0x0fffffff,
 	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(genppc_pct, ioext, memext, NULL, 0, CACHELINESIZE);
Index: sys/arch/cobalt/dev/gt.c
===================================================================
RCS file: /cvsroot/src/sys/arch/cobalt/dev/gt.c,v
retrieving revision 1.27
diff -u -p -r1.27 gt.c
--- sys/arch/cobalt/dev/gt.c	9 Jul 2011 16:09:01 -0000	1.27
+++ sys/arch/cobalt/dev/gt.c	23 Jan 2012 22:01:44 -0000
@@ -126,9 +126,9 @@ gt_attach(device_t parent, device_t self
 
 #ifdef PCI_NETBSD_CONFIGURE
 	pc->pc_ioext = extent_create("pciio", 0x10001000, 0x11ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pc->pc_memext = extent_create("pcimem", 0x12000000, 0x13ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pci_configure_bus(pc, pc->pc_ioext, pc->pc_memext, NULL, 0,
 	    mips_cache_info.mci_dcache_align);
 #endif
Index: sys/arch/dreamcast/dev/g2/gapspci_dma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/dreamcast/dev/g2/gapspci_dma.c,v
retrieving revision 1.19
diff -u -p -r1.19 gapspci_dma.c
--- sys/arch/dreamcast/dev/g2/gapspci_dma.c	19 Jul 2011 15:52:29 -0000	1.19
+++ sys/arch/dreamcast/dev/g2/gapspci_dma.c	23 Jan 2012 22:01:44 -0000
@@ -109,7 +109,7 @@ gaps_dma_init(struct gaps_softc *sc)
 	 */
 	sc->sc_dma_ex = extent_create("gaps dma",
 	    sc->sc_dmabase, sc->sc_dmabase + (sc->sc_dmasize - 1),
-	    M_DEVBUF, NULL, 0, EX_WAITOK | EXF_NOCOALESCE);
+	    NULL, 0, EX_WAITOK | EXF_NOCOALESCE);
 
 	if (bus_space_map(sc->sc_memt, sc->sc_dmabase, sc->sc_dmasize,
 	    0, &sc->sc_dma_memh) != 0)
Index: sys/arch/emips/emips/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/emips/emips/machdep.c,v
retrieving revision 1.5
diff -u -p -r1.5 machdep.c
--- sys/arch/emips/emips/machdep.c	12 Jun 2011 03:21:21 -0000	1.5
+++ sys/arch/emips/emips/machdep.c	23 Jan 2012 22:01:44 -0000
@@ -338,7 +338,7 @@ consinit(void)
 	 */
 	KASSERT(iospace != 0);
 	iomap_ex = extent_create("iomap", iospace,
-	    iospace + iospace_size - 1, M_DEVBUF,
+	    iospace + iospace_size - 1,
 	    (void *) iomap_ex_storage, sizeof(iomap_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 
Index: sys/arch/evbarm/ifpga/ifpga.c
===================================================================
RCS file: /cvsroot/src/sys/arch/evbarm/ifpga/ifpga.c,v
retrieving revision 1.24
diff -u -p -r1.24 ifpga.c
--- sys/arch/evbarm/ifpga/ifpga.c	17 May 2011 17:34:49 -0000	1.24
+++ sys/arch/evbarm/ifpga/ifpga.c	23 Jan 2012 22:01:44 -0000
@@ -296,13 +296,13 @@ ifpga_attach(device_t parent, device_t s
 
 #if defined(PCI_NETBSD_CONFIGURE)
 	ioext = extent_create("pciio", 0x00000000,
-	    0x00000000 + IFPGA_PCI_IO_VSIZE, M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    0x00000000 + IFPGA_PCI_IO_VSIZE, NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", IFPGA_PCI_APP0_BASE,
 	    IFPGA_PCI_APP0_BASE + IFPGA_PCI_APP0_SIZE,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pmemext = extent_create("pcipmem", IFPGA_PCI_APP1_BASE,
 	    IFPGA_PCI_APP1_BASE + IFPGA_PCI_APP1_SIZE,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	ifpga_pci_chipset.pc_conf_v = (void *)pci_sc;
 	pci_configure_bus(&ifpga_pci_chipset, ioext, memext, pmemext, 0,
 	    arm_dcache_align);
Index: sys/arch/evbarm/tsarm/isa/isa_io.c
===================================================================
RCS file: /cvsroot/src/sys/arch/evbarm/tsarm/isa/isa_io.c,v
retrieving revision 1.8
diff -u -p -r1.8 isa_io.c
--- sys/arch/evbarm/tsarm/isa/isa_io.c	1 Jul 2011 19:11:34 -0000	1.8
+++ sys/arch/evbarm/tsarm/isa/isa_io.c	23 Jan 2012 22:01:45 -0000
@@ -242,10 +242,10 @@ isa_io_init(vm_offset_t isa_io_addr, vm_
 	isa_io_bs_tag.bs_cookie = (void *)isa_io_addr;
 	isa_mem_bs_tag.bs_cookie = (void *)isa_mem_addr;
 
-	isaio_ex = extent_create("isaio", 0x0, 0xffff, M_DEVBUF, 
+	isaio_ex = extent_create("isaio", 0x0, 0xffff, 
 		(void *)isaio_ex_storage, sizeof(isaio_ex_storage),
 		EX_NOWAIT|EX_NOCOALESCE);
-	isamem_ex = extent_create("isamem", 0x0, 0xfffff, M_DEVBUF, 
+	isamem_ex = extent_create("isamem", 0x0, 0xfffff, 
 		(void *)isamem_ex_storage, sizeof(isamem_ex_storage),
 		EX_NOWAIT|EX_NOCOALESCE);
 	if (isaio_ex == NULL || isamem_ex == NULL)
Index: sys/arch/evbmips/gdium/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/evbmips/gdium/mainbus.c,v
retrieving revision 1.3
diff -u -p -r1.3 mainbus.c
--- sys/arch/evbmips/gdium/mainbus.c	11 Aug 2009 02:32:38 -0000	1.3
+++ sys/arch/evbmips/gdium/mainbus.c	23 Jan 2012 22:01:45 -0000
@@ -103,9 +103,9 @@ mainbus_attach(device_t parent, device_t
 		struct extent *ioext, *memext;
 
 		ioext = extent_create("pciio",  0x00001000, 0x00003fff,
-		    M_DEVBUF, NULL, 0, EX_NOWAIT);
+		    NULL, 0, EX_NOWAIT);
 		memext = extent_create("pcimem", 0, BONITO_PCILO_SIZE,
-		    M_DEVBUF, NULL, 0, EX_NOWAIT);
+		    NULL, 0, EX_NOWAIT);
 
 		pci_configure_bus(&gdium_configuration.gc_pc, ioext, memext,
 		    NULL, 0, mips_dcache_align);
Index: sys/arch/evbmips/rmixl/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/evbmips/rmixl/machdep.c,v
retrieving revision 1.9
diff -u -p -r1.9 machdep.c
--- sys/arch/evbmips/rmixl/machdep.c	12 Apr 2011 00:21:10 -0000	1.9
+++ sys/arch/evbmips/rmixl/machdep.c	23 Jan 2012 22:01:45 -0000
@@ -611,7 +611,7 @@ rmixl_physaddr_init(void)
 	u_long size;
 	uint32_t r;
 
-	ext = extent_create("physaddr", start, end, M_DEVBUF,
+	ext = extent_create("physaddr", start, end,
 		(void *)rmixl_physaddr_storage, sizeof(rmixl_physaddr_storage),
 		EX_NOWAIT | EX_NOCOALESCE);
 
Index: sys/arch/evbppc/mpc85xx/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/evbppc/mpc85xx/machdep.c,v
retrieving revision 1.21
diff -u -p -r1.21 machdep.c
--- sys/arch/evbppc/mpc85xx/machdep.c	2 Aug 2011 00:25:38 -0000	1.21
+++ sys/arch/evbppc/mpc85xx/machdep.c	23 Jan 2012 22:01:46 -0000
@@ -1369,12 +1369,12 @@ cpu_startup(void)
 #if NPCI > 0 && defined(PCI_MEMBASE)
 	pcimem_ex = extent_create("pcimem",
 	    PCI_MEMBASE, PCI_MEMBASE + 4*PCI_MEMSIZE,
-	    M_DEVBUF, NULL, 0, EX_WAITOK);
+	    NULL, 0, EX_WAITOK);
 #endif
 #if NPCI > 0 && defined(PCI_IOBASE)
 	pciio_ex = extent_create("pciio",
 	    PCI_IOBASE, PCI_IOBASE + 4*PCI_IOSIZE,
-	    M_DEVBUF, NULL, 0, EX_WAITOK);
+	    NULL, 0, EX_WAITOK);
 #endif
 	mpc85xx_extirq_setup();
 	/*
Index: sys/arch/ews4800mips/ews4800mips/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/ews4800mips/ews4800mips/bus_space.c,v
retrieving revision 1.4
diff -u -p -r1.4 bus_space.c
--- sys/arch/ews4800mips/ews4800mips/bus_space.c	28 Apr 2008 20:23:18 -0000	1.4
+++ sys/arch/ews4800mips/ews4800mips/bus_space.c	23 Jan 2012 22:01:46 -0000
@@ -166,7 +166,7 @@ bus_space_create(bus_space_tag_t t, cons
 		ebs->ebs_size = size;
 	} else {
 		ebs->ebs_extent = extent_create(name, addr, addr + size - 1,
-		    M_DEVBUF, 0, 0, EX_NOWAIT);
+		    0, 0, EX_NOWAIT);
 		if (ebs->ebs_extent == NULL) {
 			panic("%s:: unable to create bus_space for "
 			    "0x%08lx-%#lx", __func__, addr, size);
Index: sys/arch/hp300/hp300/autoconf.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hp300/hp300/autoconf.c,v
retrieving revision 1.98
diff -u -p -r1.98 autoconf.c
--- sys/arch/hp300/hp300/autoconf.c	10 May 2011 14:38:08 -0000	1.98
+++ sys/arch/hp300/hp300/autoconf.c	23 Jan 2012 22:01:46 -0000
@@ -900,7 +900,7 @@ iomap_init(void)
 
 	/* extiobase is initialized by pmap_bootstrap(). */
 	extio_ex = extent_create("extio", (u_long) extiobase,
-	    (u_long) extiobase + (ptoa(EIOMAPSIZE) - 1), M_DEVBUF,
+	    (u_long) extiobase + (ptoa(EIOMAPSIZE) - 1),
 	    (void *) extio_ex_storage, sizeof(extio_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 }
Index: sys/arch/hp700/dev/astro.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hp700/dev/astro.c,v
retrieving revision 1.13
diff -u -p -r1.13 astro.c
--- sys/arch/hp700/dev/astro.c	13 Jan 2011 21:15:13 -0000	1.13
+++ sys/arch/hp700/dev/astro.c	23 Jan 2012 22:01:46 -0000
@@ -328,7 +328,7 @@ astro_attach(device_t parent, device_t s
 	snprintf(sc->sc_dvmamapname, sizeof(sc->sc_dvmamapname),
 	    "%s_dvma", device_xname(sc->sc_dv));
 	sc->sc_dvmamap = extent_create(sc->sc_dvmamapname, 0, (1 << iova_bits),
-	    M_DEVBUF, 0, 0, EX_NOWAIT);
+	    0, 0, EX_NOWAIT);
 
 	sc->sc_dmatag = astro_dmat;
 	sc->sc_dmatag._cookie = sc;
Index: sys/arch/hp700/dev/dino.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hp700/dev/dino.c,v
retrieving revision 1.32
diff -u -p -r1.32 dino.c
--- sys/arch/hp700/dev/dino.c	17 May 2011 17:34:49 -0000	1.32
+++ sys/arch/hp700/dev/dino.c	23 Jan 2012 22:01:47 -0000
@@ -1643,7 +1643,7 @@ dinoattach(device_t parent, device_t sel
 	snprintf(sc->sc_ioexname, sizeof(sc->sc_ioexname),
 	    "%s_io", device_xname(self));
 	if ((sc->sc_ioex = extent_create(sc->sc_ioexname, 0, 0xffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT | EX_MALLOCOK)) == NULL) {
+	    NULL, 0, EX_NOWAIT | EX_MALLOCOK)) == NULL) {
 		aprint_error(": can't allocate I/O extent map\n");
 		bus_space_unmap(sc->sc_bt, sc->sc_bh, PAGE_SIZE);
 		return;
Index: sys/arch/hp700/dev/elroy.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hp700/dev/elroy.c,v
retrieving revision 1.11
diff -u -p -r1.11 elroy.c
--- sys/arch/hp700/dev/elroy.c	17 May 2011 17:34:49 -0000	1.11
+++ sys/arch/hp700/dev/elroy.c	23 Jan 2012 22:01:47 -0000
@@ -454,7 +454,7 @@ elroy_alloc_parent(device_t self, struct
 			snprintf(sc->sc_memexname, sizeof(sc->sc_memexname),
 			    "%s_mem", sc->sc_dv.dv_xname);
 			if ((sc->sc_memex = extent_create(sc->sc_memexname,
-			    mem_start, mem_start + ELROY_MEM_WINDOW, M_DEVBUF,
+			    mem_start, mem_start + ELROY_MEM_WINDOW,
 			    NULL, 0, EX_NOWAIT | EX_MALLOCOK)) == NULL) {
 				extent_destroy(sc->sc_ioex);
 				bus_space_free(sc->sc_bt, memh,
Index: sys/arch/hp700/hp700/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hp700/hp700/machdep.c,v
retrieving revision 1.104
diff -u -p -r1.104 machdep.c
--- sys/arch/hp700/hp700/machdep.c	9 Jan 2012 19:40:54 -0000	1.104
+++ sys/arch/hp700/hp700/machdep.c	23 Jan 2012 22:01:48 -0000
@@ -515,7 +515,7 @@ hppa_init(paddr_t start, void *bi)
 
 	/* we hope this won't fail */
 	hp700_io_extent = extent_create("io",
-	    HPPA_IOSPACE, 0xffffffff, M_DEVBUF,
+	    HPPA_IOSPACE, 0xffffffff,
 	    (void *)hp700_io_extent_store, sizeof(hp700_io_extent_store),
 	    EX_NOCOALESCE|EX_NOWAIT);
 
Index: sys/arch/hpcmips/hpcmips/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hpcmips/hpcmips/bus_space.c,v
retrieving revision 1.30
diff -u -p -r1.30 bus_space.c
--- sys/arch/hpcmips/hpcmips/bus_space.c	26 Feb 2011 12:07:45 -0000	1.30
+++ sys/arch/hpcmips/hpcmips/bus_space.c	23 Jan 2012 22:01:48 -0000
@@ -216,7 +216,7 @@ hpcmips_init_bus_space(struct bus_space_
 	}
 
 	t->extent = (void*)extent_create(t->name, t->base, 
-	    t->base + t->size, M_DEVBUF,
+	    t->base + t->size,
 	    0, 0, EX_NOWAIT);
 	if (!t->extent) {
 		panic("hpcmips_init_bus_space_extent:"
Index: sys/arch/hpcmips/tx/tx3912video.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hpcmips/tx/tx3912video.c,v
retrieving revision 1.40
diff -u -p -r1.40 tx3912video.c
--- sys/arch/hpcmips/tx/tx3912video.c	14 Mar 2009 15:36:07 -0000	1.40
+++ sys/arch/hpcmips/tx/tx3912video.c	23 Jan 2012 22:01:48 -0000
@@ -366,7 +366,7 @@ tx3912video_framebuffer_alloc(struct vid
 
 	/* extent V-RAM region */
 	ex = extent_create("Frame buffer address", fb_start, *fb_end,
-	    0, (void *)ex_fixed, sizeof ex_fixed,
+	    (void *)ex_fixed, sizeof ex_fixed,
 	    EX_NOWAIT);
 	if (ex == 0)
 		return (1);
Index: sys/arch/hpcsh/hpcsh/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hpcsh/hpcsh/bus_space.c,v
retrieving revision 1.18
diff -u -p -r1.18 bus_space.c
--- sys/arch/hpcsh/hpcsh/bus_space.c	20 Jul 2011 20:46:49 -0000	1.18
+++ sys/arch/hpcsh/hpcsh/bus_space.c	23 Jan 2012 22:01:48 -0000
@@ -156,7 +156,7 @@ bus_space_create(struct hpcsh_bus_space 
 		hbs->hbs_base_addr = addr; /* no extent */
 	} else {
 		hbs->hbs_extent = extent_create(name, addr, addr + size - 1,
-						M_DEVBUF, 0, 0, EX_NOWAIT);
+						0, 0, EX_NOWAIT);
 		if (hbs->hbs_extent == NULL) {
 			panic("%s:: unable to create bus_space for "
 			      "0x%08lx-%#lx", __func__, addr, size);
Index: sys/arch/hppa/hppa/hppa_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/hppa/hppa/hppa_machdep.c,v
retrieving revision 1.26
diff -u -p -r1.26 hppa_machdep.c
--- sys/arch/hppa/hppa/hppa_machdep.c	8 Dec 2011 21:00:49 -0000	1.26
+++ sys/arch/hppa/hppa/hppa_machdep.c	23 Jan 2012 22:01:49 -0000
@@ -365,16 +365,12 @@ hppa_ras(struct lwp *l)
 void
 cpu_need_resched(struct cpu_info *ci, int flags)
 {
-	bool immed = (flags & RESCHED_IMMED) != 0;
-
-	if (ci->ci_want_resched && !immed)
-		return;
 	ci->ci_want_resched = 1;
 	setsoftast(ci->ci_data.cpu_onproc);
 
 #ifdef MULTIPROCESSOR
 	if (ci->ci_curlwp != ci->ci_data.cpu_idlelwp) {
-		if (immed && ci != curcpu()) {
+		if (ci != curcpu()) {
 			/* XXX send IPI */
 		}
 	}
Index: sys/arch/ibmnws/ibmnws/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/ibmnws/ibmnws/mainbus.c,v
retrieving revision 1.10
diff -u -p -r1.10 mainbus.c
--- sys/arch/ibmnws/ibmnws/mainbus.c	1 Jul 2011 20:47:43 -0000	1.10
+++ sys/arch/ibmnws/ibmnws/mainbus.c	23 Jan 2012 22:01:50 -0000
@@ -124,9 +124,9 @@ mainbus_attach(device_t parent, device_t
 	ibmnws_pci_get_chipset_tag_indirect (genppc_pct);
 
 #ifdef PCI_NETBSD_CONFIGURE
-	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff, M_DEVBUF,
+	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff,
 	    NULL, 0, EX_NOWAIT);
-	memext = extent_create("pcimem", 0x00000000, 0x0fffffff, M_DEVBUF,
+	memext = extent_create("pcimem", 0x00000000, 0x0fffffff,
 	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(genppc_pct, ioext, memext, NULL, 0, CACHELINESIZE);
Index: sys/arch/m68k/m68k/pmap_motorola.c
===================================================================
RCS file: /cvsroot/src/sys/arch/m68k/m68k/pmap_motorola.c,v
retrieving revision 1.64
diff -u -p -r1.64 pmap_motorola.c
--- sys/arch/m68k/m68k/pmap_motorola.c	3 Nov 2011 14:39:05 -0000	1.64
+++ sys/arch/m68k/m68k/pmap_motorola.c	23 Jan 2012 22:01:51 -0000
@@ -243,7 +243,7 @@ vsize_t		Sysptsize = VM_KERNEL_PT_PAGES;
 static struct pmap kernel_pmap_store;
 struct pmap	*const kernel_pmap_ptr = &kernel_pmap_store;
 struct vm_map	*st_map, *pt_map;
-struct vm_map_kernel st_map_store, pt_map_store;
+struct vm_map st_map_store, pt_map_store;
 
 vaddr_t		lwp0uarea;	/* lwp0 u-area VA, initialized in bootstrap */
 
Index: sys/arch/mac68k/mac68k/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mac68k/mac68k/machdep.c,v
retrieving revision 1.342
diff -u -p -r1.342 machdep.c
--- sys/arch/mac68k/mac68k/machdep.c	12 Dec 2011 19:03:10 -0000	1.342
+++ sys/arch/mac68k/mac68k/machdep.c	23 Jan 2012 22:01:51 -0000
@@ -278,7 +278,7 @@ mac68k_init(void)
 	 * on the machine.  When the amount of RAM is found, all
 	 * extents of RAM are allocated from the map.
 	 */
-	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
+	iomem_ex = extent_create("iomem", 0x0, 0xffffffff,
 	    (void *)iomem_ex_storage, sizeof(iomem_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 
Index: sys/arch/mips/alchemy/au_himem_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/alchemy/au_himem_space.c,v
retrieving revision 1.13
diff -u -p -r1.13 au_himem_space.c
--- sys/arch/mips/alchemy/au_himem_space.c	10 Jul 2011 23:13:23 -0000	1.13
+++ sys/arch/mips/alchemy/au_himem_space.c	23 Jan 2012 22:01:52 -0000
@@ -701,7 +701,7 @@ au_himem_space_init(bus_space_tag_t bst,
 	c->c_physoff = physoff;
 
 	/* allocate extent manager */
-	c->c_extent = extent_create(name, start, end, M_DEVBUF,
+	c->c_extent = extent_create(name, start, end,
 	    NULL, 0, EX_NOWAIT);
 	if (c->c_extent == NULL)
 		panic("au_himem_space_init: %s: cannot create extent", name);
Index: sys/arch/mips/alchemy/au_wired_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/alchemy/au_wired_space.c,v
retrieving revision 1.8
diff -u -p -r1.8 au_wired_space.c
--- sys/arch/mips/alchemy/au_wired_space.c	10 Jul 2011 23:13:23 -0000	1.8
+++ sys/arch/mips/alchemy/au_wired_space.c	23 Jan 2012 22:01:52 -0000
@@ -641,7 +641,7 @@ au_wired_space_init(bus_space_tag_t bst,
 	c->c_size = size;
 
 	/* allocate extent manager */
-	c->c_extent = extent_create(name, start, start + size, M_DEVBUF,
+	c->c_extent = extent_create(name, start, start + size, 
 	    (void *)c->c_exstore, sizeof (c->c_exstore), EX_NOWAIT);
 	if (c->c_extent == NULL)
 		panic("au_wired_space_init: %s: cannot create extent", name);
Index: sys/arch/mips/alchemy/dev/aupci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/alchemy/dev/aupci.c,v
retrieving revision 1.12
diff -u -p -r1.12 aupci.c
--- sys/arch/mips/alchemy/dev/aupci.c	3 Jan 2012 07:36:02 -0000	1.12
+++ sys/arch/mips/alchemy/dev/aupci.c	23 Jan 2012 22:01:52 -0000
@@ -259,10 +259,10 @@ aupciattach(device_t parent, device_t se
 
 #ifdef PCI_NETBSD_CONFIGURE
 	mem_ex = extent_create("pcimem", mstart, 0xffffffff,
-	    M_DEVBUF, NULL, 0, EX_WAITOK);
+	    NULL, 0, EX_WAITOK);
 
 	io_ex = extent_create("pciio", AUPCI_IO_START, AUPCI_IO_END,
-	    M_DEVBUF, NULL, 0, EX_WAITOK);
+	    NULL, 0, EX_WAITOK);
 
 	pci_configure_bus(&sc->sc_pc,
 	    io_ex, mem_ex, NULL, 0, mips_cache_info.mci_dcache_align);
Index: sys/arch/mips/mips/bus_space_alignstride_chipdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/bus_space_alignstride_chipdep.c,v
retrieving revision 1.18
diff -u -p -r1.18 bus_space_alignstride_chipdep.c
--- sys/arch/mips/mips/bus_space_alignstride_chipdep.c	23 Sep 2011 12:42:15 -0000	1.18
+++ sys/arch/mips/mips/bus_space_alignstride_chipdep.c	23 Jan 2012 22:01:52 -0000
@@ -1256,7 +1256,7 @@ __BS(init)(bus_space_tag_t t, void *v)
 
 #ifdef CHIP_EXTENT
 	/* XXX WE WANT EXTENT_NOCOALESCE, BUT WE CAN'T USE IT. XXX */
-	ex = extent_create(__S(__BS(bus)), 0x0UL, ~0UL, M_DEVBUF,
+	ex = extent_create(__S(__BS(bus)), 0x0UL, ~0UL, 
 	    (void *)CHIP_EX_STORE(v), CHIP_EX_STORE_SIZE(v), EX_NOWAIT);
 	extent_alloc_region(ex, 0, ~0UL, EX_NOWAIT);
 
Index: sys/arch/mips/mips/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/pmap.c,v
retrieving revision 1.205
diff -u -p -r1.205 pmap.c
--- sys/arch/mips/mips/pmap.c	23 Sep 2011 23:02:23 -0000	1.205
+++ sys/arch/mips/mips/pmap.c	23 Jan 2012 22:01:53 -0000
@@ -496,11 +496,6 @@ pmap_bootstrap(void)
 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
 
 	/*
-	 * Compute the number of pages kmem_map will have.
-	 */
-	kmeminit_nkmempages();
-
-	/*
 	 * Figure out how many PTE's are necessary to map the kernel.
 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
 	 */
@@ -512,7 +507,7 @@ pmap_bootstrap(void)
 
 	Sysmapsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
 	    bufsz + 16 * NCARGS + pager_map_size + iospace_size) / NBPG +
-	    (maxproc * UPAGES) + nkmempages;
+	    (maxproc * UPAGES) + 1024 * 1024;
 #ifdef DEBUG
 	{
 		extern int kmem_guard_depth;
Index: sys/arch/mips/rmi/rmixl_pcie.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/rmi/rmixl_pcie.c,v
retrieving revision 1.8
diff -u -p -r1.8 rmixl_pcie.c
--- sys/arch/mips/rmi/rmixl_pcie.c	10 Jul 2011 23:13:22 -0000	1.8
+++ sys/arch/mips/rmi/rmixl_pcie.c	23 Jan 2012 22:01:54 -0000
@@ -703,12 +703,12 @@ rmixl_pcie_init(struct rmixl_pcie_softc 
 	ioext  = extent_create("pciio",
 		rcp->rc_pci_io_pbase,
 		rcp->rc_pci_io_pbase + rcp->rc_pci_io_size - 1,
-		M_DEVBUF, NULL, 0, EX_NOWAIT);
+		NULL, 0, EX_NOWAIT);
 
 	memext = extent_create("pcimem",
 		rcp->rc_pci_mem_pbase,
 		rcp->rc_pci_mem_pbase + rcp->rc_pci_mem_size - 1,
-		M_DEVBUF, NULL, 0, EX_NOWAIT);
+		NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(pc, ioext, memext, NULL, 0,
 	    mips_cache_info.mci_dcache_align);
Index: sys/arch/mipsco/mipsco/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mipsco/mipsco/bus_space.c,v
retrieving revision 1.11
diff -u -p -r1.11 bus_space.c
--- sys/arch/mipsco/mipsco/bus_space.c	18 Mar 2009 10:22:32 -0000	1.11
+++ sys/arch/mipsco/mipsco/bus_space.c	23 Jan 2012 22:01:54 -0000
@@ -69,7 +69,7 @@ void
 mipsco_bus_space_init_extent(bus_space_tag_t bst, void *storage, size_t storagesize)
 {
 	bst->bs_extent = extent_create(bst->bs_name,
-	    bst->bs_start, bst->bs_start + bst->bs_size, M_DEVBUF,
+	    bst->bs_start, bst->bs_start + bst->bs_size,
 	    storage, storagesize, EX_NOWAIT);
 	if (bst->bs_extent == NULL)
 	    panic("mipsco_bus_space_init_extent: cannot create extent map %s",
Index: sys/arch/mvmeppc/mvmeppc/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mvmeppc/mvmeppc/mainbus.c,v
retrieving revision 1.14
diff -u -p -r1.14 mainbus.c
--- sys/arch/mvmeppc/mvmeppc/mainbus.c	1 Jul 2011 20:49:38 -0000	1.14
+++ sys/arch/mvmeppc/mvmeppc/mainbus.c	23 Jan 2012 22:01:54 -0000
@@ -118,9 +118,9 @@ mainbus_attach(device_t parent, device_t
 	SIMPLEQ_INSERT_TAIL(&genppc_pct->pc_pbi, pbi, next);
 
 #ifdef PCI_NETBSD_CONFIGURE
-	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff, M_DEVBUF,
+	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff,
 	    NULL, 0, EX_NOWAIT);
-	memext = extent_create("pcimem", 0x00000000, 0x0fffffff, M_DEVBUF,
+	memext = extent_create("pcimem", 0x00000000, 0x0fffffff,
 	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(0, ioext, memext, NULL, 0, 32);
Index: sys/arch/ofppc/pci/ofwpci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/ofppc/pci/ofwpci.c,v
retrieving revision 1.10
diff -u -p -r1.10 ofwpci.c
--- sys/arch/ofppc/pci/ofwpci.c	18 Jun 2011 08:08:29 -0000	1.10
+++ sys/arch/ofppc/pci/ofwpci.c	23 Jan 2012 22:01:54 -0000
@@ -202,9 +202,9 @@ ofwpci_attach(device_t parent, device_t 
 	ioext  = extent_create("pciio",
 	    modeldata.pciiodata[device_unit(self)].start,
 	    modeldata.pciiodata[device_unit(self)].limit,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", sc->sc_memt.pbs_base,
-	    sc->sc_memt.pbs_limit-1, M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    sc->sc_memt.pbs_limit-1, NULL, 0, EX_NOWAIT);
 
 	if (pci_configure_bus(pc, ioext, memext, NULL, 0, CACHELINESIZE))
 		aprint_error("pci_configure_bus() failed\n");
Index: sys/arch/powerpc/booke/booke_pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/booke/booke_pmap.c,v
retrieving revision 1.10
diff -u -p -r1.10 booke_pmap.c
--- sys/arch/powerpc/booke/booke_pmap.c	27 Sep 2011 01:02:35 -0000	1.10
+++ sys/arch/powerpc/booke/booke_pmap.c	23 Jan 2012 22:01:55 -0000
@@ -146,11 +146,6 @@ pmap_bootstrap(vaddr_t startkernel, vadd
 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
 
 	/*
-	 * Compute the number of pages kmem_map will have.
-	 */
-	kmeminit_nkmempages();
-
-	/*
 	 * Figure out how many PTE's are necessary to map the kernel.
 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
 	 */
@@ -169,7 +164,7 @@ pmap_bootstrap(vaddr_t startkernel, vadd
 #ifdef SYSVSHM
 	    + NBPG * shminfo.shmall
 #endif
-	    + NBPG * nkmempages);
+	    + NBPG * 32 * 1024);
 
 	/*
 	 * Initialize `FYI' variables.	Note we're relying on
Index: sys/arch/powerpc/booke/pci/pq3pci.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/booke/pci/pq3pci.c,v
retrieving revision 1.11
diff -u -p -r1.11 pq3pci.c
--- sys/arch/powerpc/booke/pci/pq3pci.c	27 Sep 2011 01:02:35 -0000	1.11
+++ sys/arch/powerpc/booke/pci/pq3pci.c	23 Jan 2012 22:01:55 -0000
@@ -858,9 +858,9 @@ pq3pci_cpunode_attach(device_t parent, d
 		}
 
 		struct extent *ioext = extent_create("pciio", 0, PCI_IOSIZE,
-		     M_DEVBUF, NULL, 0, EX_NOWAIT);
+		     NULL, 0, EX_NOWAIT);
 		struct extent *memext = extent_create("pcimem", membase,
-		     membase + PCI_MEMSIZE, M_DEVBUF, NULL, 0, EX_NOWAIT);
+		     membase + PCI_MEMSIZE, NULL, 0, EX_NOWAIT);
 
 		error = pci_configure_bus(pc, ioext, memext, NULL, 0,
 		    curcpu()->ci_ci.dcache_line_size);
Index: sys/arch/powerpc/ibm4xx/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/ibm4xx/pmap.c,v
retrieving revision 1.71
diff -u -p -r1.71 pmap.c
--- sys/arch/powerpc/ibm4xx/pmap.c	9 Jan 2012 06:49:25 -0000	1.71
+++ sys/arch/powerpc/ibm4xx/pmap.c	23 Jan 2012 22:01:55 -0000
@@ -72,7 +72,7 @@ __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.7
 #include <sys/param.h>
 #include <sys/cpu.h>
 #include <sys/device.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/pool.h>
 #include <sys/proc.h>
 #include <sys/queue.h>
@@ -612,7 +612,7 @@ pmap_create(void)
 {
 	struct pmap *pm;
 
-	pm = malloc(sizeof *pm, M_VMPMAP, M_WAITOK);
+	pm = kmem_alloc(sizeof(*pm), KM_SLEEP);
 	memset(pm, 0, sizeof *pm);
 	pm->pm_refs = 1;
 	return pm;
@@ -650,7 +650,7 @@ pmap_destroy(struct pmap *pm)
 		}
 	if (pm->pm_ctx)
 		ctx_free(pm);
-	free(pm, M_VMPMAP);
+	kmem_free(pm, sizeof(*pm));
 }
 
 /*
Index: sys/arch/powerpc/ibm4xx/pci/pchb.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/ibm4xx/pci/pchb.c,v
retrieving revision 1.10
diff -u -p -r1.10 pchb.c
--- sys/arch/powerpc/ibm4xx/pci/pchb.c	22 Jun 2011 18:06:34 -0000	1.10
+++ sys/arch/powerpc/ibm4xx/pci/pchb.c	23 Jan 2012 22:01:55 -0000
@@ -161,11 +161,11 @@ pchbattach(device_t parent, device_t sel
 #ifdef PCI_NETBSD_CONFIGURE
 	struct extent *memext = extent_create("pcimem",
 	    IBM405GP_PCI_MEM_START,
-	    IBM405GP_PCI_MEM_START + 0x1fffffff, M_DEVBUF, NULL, 0,
+	    IBM405GP_PCI_MEM_START + 0x1fffffff, NULL, 0,
 	    EX_NOWAIT);
 	struct extent *ioext = extent_create("pciio",
 	    IBM405GP_PCI_PCI_IO_START,
-	    IBM405GP_PCI_PCI_IO_START + 0xffff, M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    IBM405GP_PCI_PCI_IO_START + 0xffff, NULL, 0, EX_NOWAIT);
 	pci_configure_bus(pc, ioext, memext, NULL, 0, 32);
 	extent_destroy(ioext);
 	extent_destroy(memext);
Index: sys/arch/powerpc/powerpc/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/powerpc/bus_space.c,v
retrieving revision 1.29
diff -u -p -r1.29 bus_space.c
--- sys/arch/powerpc/powerpc/bus_space.c	17 Dec 2011 19:34:07 -0000	1.29
+++ sys/arch/powerpc/powerpc/bus_space.c	23 Jan 2012 22:01:56 -0000
@@ -401,7 +401,7 @@ bus_space_init(struct powerpc_bus_space 
 {
 	if (t->pbs_extent == NULL && extent_name != NULL) {
 		t->pbs_extent = extent_create(extent_name, t->pbs_base,
-		    t->pbs_limit-1, M_DEVBUF, storage, storage_size,
+		    t->pbs_limit-1, storage, storage_size,
 		    EX_NOCOALESCE|EX_NOWAIT);
 		if (t->pbs_extent == NULL)
 			return ENOMEM;
Index: sys/arch/powerpc/powerpc/powerpc_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/powerpc/powerpc/powerpc_machdep.c,v
retrieving revision 1.61
diff -u -p -r1.61 powerpc_machdep.c
--- sys/arch/powerpc/powerpc/powerpc_machdep.c	12 Dec 2011 19:03:11 -0000	1.61
+++ sys/arch/powerpc/powerpc/powerpc_machdep.c	23 Jan 2012 22:01:56 -0000
@@ -434,7 +434,7 @@ cpu_need_resched(struct cpu_info *ci, in
 #endif
 	l->l_md.md_astpending = 1;		/* force call to ast() */
 #if defined(MULTIPROCESSOR)
-	if (ci != cur_ci && (flags & RESCHED_IMMED)) {
+	if (ci != cur_ci) {
 		cpu_send_ipi(cpu_index(ci), IPI_NOMESG);
 	} 
 #endif
Index: sys/arch/prep/prep/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/prep/prep/mainbus.c,v
retrieving revision 1.32
diff -u -p -r1.32 mainbus.c
--- sys/arch/prep/prep/mainbus.c	1 Jul 2011 20:52:02 -0000	1.32
+++ sys/arch/prep/prep/mainbus.c	23 Jan 2012 22:01:56 -0000
@@ -144,9 +144,9 @@ mainbus_attach(device_t parent, device_t
 	setup_pciintr_map(pbi, 0, 0, 0);
 
 #ifdef PCI_NETBSD_CONFIGURE
-	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff, M_DEVBUF,
+	ioext  = extent_create("pciio",  0x00008000, 0x0000ffff,
 	    NULL, 0, EX_NOWAIT);
-	memext = extent_create("pcimem", 0x00000000, 0x0fffffff, M_DEVBUF,
+	memext = extent_create("pcimem", 0x00000000, 0x0fffffff,
 	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(genppc_pct, ioext, memext, NULL, 0, CACHELINESIZE);
Index: sys/arch/sandpoint/sandpoint/mainbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sandpoint/sandpoint/mainbus.c,v
retrieving revision 1.27
diff -u -p -r1.27 mainbus.c
--- sys/arch/sandpoint/sandpoint/mainbus.c	14 Jan 2012 19:39:25 -0000	1.27
+++ sys/arch/sandpoint/sandpoint/mainbus.c	23 Jan 2012 22:01:56 -0000
@@ -112,9 +112,9 @@ mainbus_attach(device_t parent, device_t
 	 */
 #if NPCI > 0
 #if defined(PCI_NETBSD_CONFIGURE)
-	ioext  = extent_create("pciio",  0x00001000, 0x0000ffff, M_DEVBUF,
+	ioext  = extent_create("pciio",  0x00001000, 0x0000ffff,
 	    NULL, 0, EX_NOWAIT);
-	memext = extent_create("pcimem", 0x80000000, 0x8fffffff, M_DEVBUF,
+	memext = extent_create("pcimem", 0x80000000, 0x8fffffff,
 	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(0, ioext, memext, NULL, 0, 32);
Index: sys/arch/sgimips/gio/pci_gio.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sgimips/gio/pci_gio.c,v
retrieving revision 1.9
diff -u -p -r1.9 pci_gio.c
--- sys/arch/sgimips/gio/pci_gio.c	1 Jul 2011 18:53:46 -0000	1.9
+++ sys/arch/sgimips/gio/pci_gio.c	23 Jan 2012 22:01:56 -0000
@@ -214,7 +214,7 @@ giopci_attach(struct device *parent, str
 
 #ifdef PCI_NETBSD_CONFIGURE
 	pc->pc_memext = extent_create("giopcimem", m_start, m_end,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pci_configure_bus(pc, NULL, pc->pc_memext, NULL, 0,
 	    mips_cache_info.mci_dcache_align);
 #endif
Index: sys/arch/sgimips/mace/pci_mace.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sgimips/mace/pci_mace.c,v
retrieving revision 1.13
diff -u -p -r1.13 pci_mace.c
--- sys/arch/sgimips/mace/pci_mace.c	1 Jul 2011 18:53:47 -0000	1.13
+++ sys/arch/sgimips/mace/pci_mace.c	23 Jan 2012 22:01:57 -0000
@@ -146,9 +146,9 @@ macepci_attach(struct device *parent, st
 
 #if NPCI > 0
 	pc->pc_ioext = extent_create("macepciio", 0x00001000, 0x01ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pc->pc_memext = extent_create("macepcimem", 0x80100000, 0x81ffffff,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	pci_configure_bus(pc, pc->pc_ioext, pc->pc_memext, NULL, 0,
 	    mips_cache_info.mci_dcache_align);
 	memset(&pba, 0, sizeof pba);
Index: sys/arch/sh3/dev/shpcic.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sh3/dev/shpcic.c,v
retrieving revision 1.16
diff -u -p -r1.16 shpcic.c
--- sys/arch/sh3/dev/shpcic.c	21 Jan 2012 19:44:30 -0000	1.16
+++ sys/arch/sh3/dev/shpcic.c	23 Jan 2012 22:01:57 -0000
@@ -228,10 +228,10 @@ shpcic_attach(device_t parent, device_t 
 #ifdef PCI_NETBSD_CONFIGURE
 	ioext  = extent_create("pciio",
 	    SH4_PCIC_IO, SH4_PCIC_IO + SH4_PCIC_IO_SIZE - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem",
 	    SH4_PCIC_MEM, SH4_PCIC_MEM + SH4_PCIC_MEM_SIZE - 1,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(NULL, ioext, memext, NULL, 0, sh_cache_line_size);
 
Index: sys/arch/sparc/dev/vme_machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/dev/vme_machdep.c,v
retrieving revision 1.65
diff -u -p -r1.65 vme_machdep.c
--- sys/arch/sparc/dev/vme_machdep.c	18 Jul 2011 00:31:13 -0000	1.65
+++ sys/arch/sparc/dev/vme_machdep.c	23 Jan 2012 22:01:57 -0000
@@ -315,7 +315,7 @@ vmeattach_mainbus(device_t parent, devic
 		sizeof(vmebus_translations)/sizeof(vmebus_translations[0]);
 
 	vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END,
-				    M_DEVBUF, 0, 0, EX_NOWAIT);
+				    0, 0, EX_NOWAIT);
 	if (vme_dvmamap == NULL)
 		panic("vme: unable to allocate DVMA map");
 
Index: sys/arch/sparc/sparc/iommu.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/sparc/iommu.c,v
retrieving revision 1.94
diff -u -p -r1.94 iommu.c
--- sys/arch/sparc/sparc/iommu.c	17 Jul 2011 23:18:23 -0000	1.94
+++ sys/arch/sparc/sparc/iommu.c	23 Jan 2012 22:01:58 -0000
@@ -276,7 +276,7 @@ iommu_attach(device_t parent, device_t s
 
 	sc->sc_dvmamap = extent_create("iommudvma",
 					IOMMU_DVMA_BASE, IOMMU_DVMA_END,
-					M_DEVBUF, 0, 0, EX_NOWAIT);
+					0, 0, EX_NOWAIT);
 	if (sc->sc_dvmamap == NULL)
 		panic("iommu: unable to allocate DVMA map");
 
Index: sys/arch/sparc/sparc/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc/sparc/machdep.c,v
retrieving revision 1.314
diff -u -p -r1.314 machdep.c
--- sys/arch/sparc/sparc/machdep.c	12 Dec 2011 19:03:11 -0000	1.314
+++ sys/arch/sparc/sparc/machdep.c	23 Jan 2012 22:02:00 -0000
@@ -323,7 +323,7 @@ cpu_startup(void)
 		 */
 		dvmamap24 = extent_create("dvmamap24",
 					  D24_DVMA_BASE, D24_DVMA_END,
-					  M_DEVBUF, 0, 0, EX_NOWAIT);
+					  0, 0, EX_NOWAIT);
 		if (dvmamap24 == NULL)
 			panic("unable to allocate DVMA map");
 	}
Index: sys/arch/sparc64/dev/iommu.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/dev/iommu.c,v
retrieving revision 1.105
diff -u -p -r1.105 iommu.c
--- sys/arch/sparc64/dev/iommu.c	8 Oct 2011 08:49:07 -0000	1.105
+++ sys/arch/sparc64/dev/iommu.c	23 Jan 2012 22:02:01 -0000
@@ -200,7 +200,7 @@ iommu_init(char *name, struct iommu_stat
 		(unsigned long long)(is->is_ptsb + size - 1));
 	is->is_dvmamap = extent_create(name,
 	    is->is_dvmabase, is->is_dvmaend,
-	    M_DEVBUF, 0, 0, EX_NOWAIT);
+	    0, 0, EX_NOWAIT);
 	/* XXXMRG Check is_dvmamap is valid. */
 
 	/*
Index: sys/arch/sparc64/dev/psycho.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/dev/psycho.c,v
retrieving revision 1.111
diff -u -p -r1.111 psycho.c
--- sys/arch/sparc64/dev/psycho.c	4 Sep 2011 12:17:14 -0000	1.111
+++ sys/arch/sparc64/dev/psycho.c	23 Jan 2012 22:02:02 -0000
@@ -796,8 +796,7 @@ psycho_alloc_extent(struct psycho_pbm *p
 	}
 
 	/* create extent */
-	ex = extent_create(name, baddr, bsize - baddr - 1, M_DEVBUF, 0, 0,
-			   EX_NOWAIT);
+	ex = extent_create(name, baddr, bsize - baddr - 1, 0, 0, EX_NOWAIT);
 	if (ex == NULL) {
 		printf("psycho_alloc_extent: extent_create failed\n");
 		goto ret;
Index: sys/arch/sparc64/sparc64/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sparc64/sparc64/machdep.c,v
retrieving revision 1.263
diff -u -p -r1.263 machdep.c
--- sys/arch/sparc64/sparc64/machdep.c	12 Dec 2011 19:03:11 -0000	1.263
+++ sys/arch/sparc64/sparc64/machdep.c	23 Jan 2012 22:02:02 -0000
@@ -2304,7 +2304,7 @@ sparc_bus_map(bus_space_tag_t t, bus_add
 		 */
 		io_space = extent_create("IOSPACE",
 					 (u_long)IODEV_BASE, (u_long)IODEV_END,
-					 M_DEVBUF, 0, 0, EX_NOWAIT);
+					 0, 0, EX_NOWAIT);
 
 
 	size = round_page(size);
Index: sys/arch/sun2/sun2/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sun2/sun2/machdep.c,v
retrieving revision 1.71
diff -u -p -r1.71 machdep.c
--- sys/arch/sun2/sun2/machdep.c	12 Dec 2011 19:03:11 -0000	1.71
+++ sys/arch/sun2/sun2/machdep.c	23 Jan 2012 22:02:03 -0000
@@ -358,7 +358,7 @@ cpu_startup(void)
 	 */
 	dvmamap = extent_create("dvmamap",
 	    DVMA_MAP_BASE, DVMA_MAP_BASE + DVMA_MAP_AVAIL,
-	    M_DEVBUF, 0, 0, EX_NOWAIT);
+	    0, 0, EX_NOWAIT);
 	if (dvmamap == NULL)
 		panic("unable to allocate DVMA map");
 
Index: sys/arch/sun3/sun3/dvma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sun3/sun3/dvma.c,v
retrieving revision 1.36
diff -u -p -r1.36 dvma.c
--- sys/arch/sun3/sun3/dvma.c	11 Dec 2009 13:52:57 -0000	1.36
+++ sys/arch/sun3/sun3/dvma.c	23 Jan 2012 22:02:03 -0000
@@ -43,7 +43,7 @@ __KERNEL_RCSID(0, "$NetBSD: dvma.c,v 1.3
 #include <sys/core.h>
 #include <sys/exec.h>
 
-#include <uvm/uvm.h> /* XXX: not _extern ... need uvm_map_create */
+#include <uvm/uvm.h> /* XXX: not _extern ... need uvm_map_setup */
 
 #define _SUN68K_BUS_DMA_PRIVATE
 #include <machine/autoconf.h>
@@ -82,11 +82,13 @@ dvma_init(void)
 	 * dvma_extent manages things handled in interrupt
 	 * context.
 	 */
-	phys_map = uvm_map_create(pmap_kernel(),
-	    DVMA_MAP_BASE, DVMA_MAP_END, 0);
+	phys_map = kmem_alloc(sizeof(struct vm_map), KM_SLEEP);
 	if (phys_map == NULL)
 		panic("unable to create DVMA map");
 
+	uvm_map_setup(phys_map, DVMA_MAP_BASE, DVMA_MAP_END, 0);
+	phys_map->pmap = pmap_kernel();
+
 	/*
 	 * Reserve the DVMA space used for segment remapping.
 	 * The remainder of phys_map is used for DVMA scratch
@@ -102,7 +104,7 @@ dvma_init(void)
 	 * into DVMA space for the purpose of data transfer.
 	 */
 	dvma_extent = extent_create("dvma", segmap_addr,
-	    segmap_addr + (dvma_segmap_size - 1), M_DEVBUF,
+	    segmap_addr + (dvma_segmap_size - 1),
 	    NULL, 0, EX_NOCOALESCE|EX_NOWAIT);
 }
 
Index: sys/arch/sun3/sun3x/dvma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sun3/sun3x/dvma.c,v
retrieving revision 1.40
diff -u -p -r1.40 dvma.c
--- sys/arch/sun3/sun3x/dvma.c	21 Nov 2009 04:16:53 -0000	1.40
+++ sys/arch/sun3/sun3x/dvma.c	23 Jan 2012 22:02:03 -0000
@@ -114,7 +114,7 @@ dvma_init(void)
 	 * Create the extent map for DVMA pages.
 	 */
 	dvma_extent = extent_create("dvma", DVMA_MAP_BASE,
-	    DVMA_MAP_BASE + (DVMA_MAP_AVAIL - 1), M_DEVBUF,
+	    DVMA_MAP_BASE + (DVMA_MAP_AVAIL - 1),
 	    NULL, 0, EX_NOCOALESCE|EX_NOWAIT);
 
 	/*
Index: sys/arch/vax/vax/machdep.c
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/vax/machdep.c,v
retrieving revision 1.184
diff -u -p -r1.184 machdep.c
--- sys/arch/vax/vax/machdep.c	7 Jan 2012 16:47:42 -0000	1.184
+++ sys/arch/vax/vax/machdep.c	23 Jan 2012 22:02:03 -0000
@@ -309,7 +309,7 @@ consinit(void)
 	 */
 	KASSERT(iospace != 0);
 	iomap_ex = extent_create("iomap", iospace + VAX_NBPG,
-	    iospace + ((IOSPSZ * VAX_NBPG) - 1), M_DEVBUF,
+	    iospace + ((IOSPSZ * VAX_NBPG) - 1),
 	    (void *) iomap_ex_storage, sizeof(iomap_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 #ifdef DEBUG
Index: sys/arch/vax/vax/sgmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/vax/vax/sgmap.c,v
retrieving revision 1.16
diff -u -p -r1.16 sgmap.c
--- sys/arch/vax/vax/sgmap.c	14 Dec 2010 23:44:50 -0000	1.16
+++ sys/arch/vax/vax/sgmap.c	23 Jan 2012 22:02:03 -0000
@@ -93,7 +93,7 @@ vax_sgmap_init(bus_dma_tag_t t, struct v
 	 * space.
 	 */
 	sgmap->aps_ex = extent_create(name, sgvabase, sgvasize - 1,
-	    M_DMAMAP, NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
+	    NULL, 0, EX_NOWAIT|EX_NOCOALESCE);
 	if (sgmap->aps_ex == NULL) {
 		printf("unable to create extent map for sgmap `%s'\n", name);
 		goto die;
Index: sys/arch/x68k/dev/intio.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x68k/dev/intio.c,v
retrieving revision 1.42
diff -u -p -r1.42 intio.c
--- sys/arch/x68k/dev/intio.c	18 Jan 2009 05:00:39 -0000	1.42
+++ sys/arch/x68k/dev/intio.c	23 Jan 2012 22:02:04 -0000
@@ -162,7 +162,7 @@ intio_attach(device_t parent, device_t s
 	sc->sc_map = extent_create("intiomap",
 				  INTIOBASE,
 				  INTIOBASE + 0x400000,
-				  M_DEVBUF, NULL, 0, EX_NOWAIT);
+				  NULL, 0, EX_NOWAIT);
 	intio_alloc_system_ports(sc);
 
 	sc->sc_bst = &intio_bus;
Index: sys/arch/x86/pci/pci_addr_fixup.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/pci/pci_addr_fixup.c,v
retrieving revision 1.8
diff -u -p -r1.8 pci_addr_fixup.c
--- sys/arch/x86/pci/pci_addr_fixup.c	28 Aug 2011 06:08:15 -0000	1.8
+++ sys/arch/x86/pci/pci_addr_fixup.c	23 Jan 2012 22:02:04 -0000
@@ -90,12 +90,12 @@ pci_addr_fixup(pci_chipset_tag_t pc, int
 	pciaddr.extent_mem = extent_create("PCI I/O memory space",
 					   PCIADDR_MEM_START,
 					   PCIADDR_MEM_END,
-					   M_DEVBUF, 0, 0, EX_NOWAIT);
+					   0, 0, EX_NOWAIT);
 	KASSERT(pciaddr.extent_mem);
 	pciaddr.extent_port = extent_create("PCI I/O port space",
 					    PCIADDR_PORT_START,
 					    PCIADDR_PORT_END,
-					    M_DEVBUF, 0, 0, EX_NOWAIT);
+					    0, 0, EX_NOWAIT);
 	KASSERT(pciaddr.extent_port);
 
 	/*
Index: sys/arch/x86/x86/bus_space.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/bus_space.c,v
retrieving revision 1.37
diff -u -p -r1.37 bus_space.c
--- sys/arch/x86/x86/bus_space.c	25 Aug 2011 15:08:49 -0000	1.37
+++ sys/arch/x86/x86/bus_space.c	23 Jan 2012 22:02:04 -0000
@@ -126,10 +126,10 @@ x86_bus_space_init(void)
 	 * extents of RAM are allocated from the map (0 -> ISA hole
 	 * and end of ISA hole -> end of RAM).
 	 */
-	ioport_ex = extent_create("ioport", 0x0, 0xffff, M_DEVBUF,
+	ioport_ex = extent_create("ioport", 0x0, 0xffff,
 	    (void *)ioport_ex_storage, sizeof(ioport_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
-	iomem_ex = extent_create("iomem", 0x0, 0xffffffff, M_DEVBUF,
+	iomem_ex = extent_create("iomem", 0x0, 0xffffffff,
 	    (void *)iomem_ex_storage, sizeof(iomem_ex_storage),
 	    EX_NOCOALESCE|EX_NOWAIT);
 
Index: sys/arch/x86/x86/intr.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/intr.c,v
retrieving revision 1.72
diff -u -p -r1.72 intr.c
--- sys/arch/x86/x86/intr.c	1 Aug 2011 10:42:24 -0000	1.72
+++ sys/arch/x86/x86/intr.c	23 Jan 2012 22:02:04 -0000
@@ -514,7 +514,7 @@ intr_allocate_slot(struct pic *pic, int 
 			if ((lci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0) {
 				continue;
 			}
-#if 0
+#if 0 
 			if (ci == NULL ||
 			    ci->ci_nintrhand > lci->ci_nintrhand) {
 			    	ci = lci;
@@ -988,7 +988,6 @@ cpu_intr_init(struct cpu_info *ci)
 	struct intrsource *isp;
 #if NLAPIC > 0 && defined(MULTIPROCESSOR)
 	int i;
-	static int first = 1;
 #endif
 #ifdef INTRSTACKSIZE
 	vaddr_t istack;
@@ -1003,10 +1002,8 @@ cpu_intr_init(struct cpu_info *ci)
 	isp->is_handlers = &fake_timer_intrhand;
 	isp->is_pic = &local_pic;
 	ci->ci_isources[LIR_TIMER] = isp;
-	evcnt_attach_dynamic(&isp->is_evcnt,
-	    first ? EVCNT_TYPE_INTR : EVCNT_TYPE_MISC, NULL,
+	evcnt_attach_dynamic(&isp->is_evcnt, EVCNT_TYPE_INTR, NULL,
 	    device_xname(ci->ci_dev), "timer");
-	first = 0;
 
 #ifdef MULTIPROCESSOR
 	isp = kmem_zalloc(sizeof(*isp), KM_SLEEP);
Index: sys/arch/x86/x86/pmap.c
===================================================================
RCS file: /cvsroot/src/sys/arch/x86/x86/pmap.c,v
retrieving revision 1.154
diff -u -p -r1.154 pmap.c
--- sys/arch/x86/x86/pmap.c	22 Jan 2012 18:16:35 -0000	1.154
+++ sys/arch/x86/x86/pmap.c	23 Jan 2012 22:02:06 -0000
@@ -1512,24 +1512,6 @@ pmap_bootstrap(vaddr_t kva_start)
 	LIST_INIT(&pmaps);
 
 	/*
-	 * initialize caches.
-	 */
-
-	pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
-	    "pmappl", NULL, IPL_NONE, NULL, NULL, NULL);
-#ifdef PAE
-	pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE * PDP_SIZE, 0, 0, 0,
-	    "pdppl", &pmap_pdp_allocator, IPL_NONE,
-	    pmap_pdp_ctor, pmap_pdp_dtor, NULL);
-#else /* PAE */
-	pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE, 0, 0, 0,
-	    "pdppl", NULL, IPL_NONE, pmap_pdp_ctor, pmap_pdp_dtor, NULL);
-#endif /* PAE */
-	pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
-	    PR_LARGECACHE, "pvpl", &pool_allocator_meta, IPL_NONE, NULL,
-	    NULL, NULL);
-
-	/*
 	 * ensure the TLB is sync'd with reality by flushing it...
 	 */
 
@@ -1633,6 +1615,24 @@ pmap_init(void)
 		mutex_init(&pv_hash_locks[i].lock, MUTEX_NODEBUG, IPL_VM);
 	}
 
+	/*
+	 * initialize caches.
+	 */
+
+	pool_cache_bootstrap(&pmap_cache, sizeof(struct pmap), 0, 0, 0,
+	    "pmappl", NULL, IPL_NONE, NULL, NULL, NULL);
+#ifdef PAE
+	pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE * PDP_SIZE, 0, 0, 0,
+	    "pdppl", &pmap_pdp_allocator, IPL_NONE,
+	    pmap_pdp_ctor, pmap_pdp_dtor, NULL);
+#else /* PAE */
+	pool_cache_bootstrap(&pmap_pdp_cache, PAGE_SIZE, 0, 0, 0,
+	    "pdppl", NULL, IPL_NONE, pmap_pdp_ctor, pmap_pdp_dtor, NULL);
+#endif /* PAE */
+	pool_cache_bootstrap(&pmap_pv_cache, sizeof(struct pv_entry), 0, 0,
+	    PR_LARGECACHE, "pvpl", &pool_allocator_kmem, IPL_NONE, NULL,
+	    NULL, NULL);
+
 	pmap_tlb_init();
 
 	evcnt_attach_dynamic(&pmap_iobmp_evcnt, EVCNT_TYPE_MISC,
Index: sys/arch/xen/xen/xbd_xenbus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/xen/xen/xbd_xenbus.c,v
retrieving revision 1.50
diff -u -p -r1.50 xbd_xenbus.c
--- sys/arch/xen/xen/xbd_xenbus.c	7 Dec 2011 15:47:43 -0000	1.50
+++ sys/arch/xen/xen/xbd_xenbus.c	23 Jan 2012 22:02:06 -0000
@@ -1064,11 +1064,13 @@ static int
 xbd_map_align(struct xbd_req *req)
 {
 	int s = splvm();
+	int rc;
 
-	req->req_data = (void *)uvm_km_alloc(kmem_map, req->req_bp->b_bcount,
-	    PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_NOWAIT);
+	rc = uvm_km_kmem_alloc(kmem_va_arena,
+	    req->req_bp->b_bcount, (VM_NOSLEEP | VM_INSTANTFIT),
+	    req->req_data);
 	splx(s);
-	if (__predict_false(req->req_data == NULL))
+	if (__predict_false(rc != 0))
 		return ENOMEM;
 	if ((req->req_bp->b_flags & B_READ) == 0)
 		memcpy(req->req_data, req->req_bp->b_data,
@@ -1084,7 +1086,6 @@ xbd_unmap_align(struct xbd_req *req)
 		memcpy(req->req_bp->b_data, req->req_data,
 		    req->req_bp->b_bcount);
 	s = splvm();
-	uvm_km_free(kmem_map, (vaddr_t)req->req_data, req->req_bp->b_bcount,
-	    UVM_KMF_WIRED);
+	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)req->req_data, req->req_bp->b_bcount);
 	splx(s);
 }
Index: sys/dev/cardbus/rbus.c
===================================================================
RCS file: /cvsroot/src/sys/dev/cardbus/rbus.c,v
retrieving revision 1.28
diff -u -p -r1.28 rbus.c
--- sys/dev/cardbus/rbus.c	15 Dec 2009 22:17:12 -0000	1.28
+++ sys/dev/cardbus/rbus.c	23 Jan 2012 22:02:07 -0000
@@ -258,7 +258,7 @@ rbus_new(rbus_tag_t parent, bus_addr_t s
 	if (flags == RBUS_SPACE_SHARE) {
 		ex = parent->rb_ext;
 	} else if (flags == RBUS_SPACE_DEDICATE) {
-		if (NULL == (ex = extent_create("rbus", start, end, M_DEVBUF,
+		if (NULL == (ex = extent_create("rbus", start, end,
 		    NULL, 0, EX_NOCOALESCE|EX_NOWAIT))) {
 			return NULL;
 		}
@@ -295,7 +295,7 @@ rbus_new_root_delegate(bus_space_tag_t b
 	struct extent *ex;
 
 	if (NULL == (ex = extent_create("rbus root", start, start + size,
-	    M_DEVBUF, NULL, 0, EX_NOCOALESCE|EX_NOWAIT))) {
+	    NULL, 0, EX_NOCOALESCE|EX_NOWAIT))) {
 		return NULL;
 	}
 
Index: sys/dev/ic/cpc700.c
===================================================================
RCS file: /cvsroot/src/sys/dev/ic/cpc700.c,v
retrieving revision 1.18
diff -u -p -r1.18 cpc700.c
--- sys/dev/ic/cpc700.c	17 May 2011 17:34:54 -0000	1.18
+++ sys/dev/ic/cpc700.c	23 Jan 2012 22:02:07 -0000
@@ -191,9 +191,9 @@ cpc_attach(device_t self, pci_chipset_ta
 
 #if NPCI > 0 && defined(PCI_NETBSD_CONFIGURE)
 	ioext  = extent_create("pciio",  CPC_PCI_IO_START, CPC_PCI_IO_END,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 	memext = extent_create("pcimem", CPC_PCI_MEM_BASE, CPC_PCI_MEM_END,
-	    M_DEVBUF, NULL, 0, EX_NOWAIT);
+	    NULL, 0, EX_NOWAIT);
 
 	pci_configure_bus(0, ioext, memext, NULL, 0, 32);
 
Index: sys/dev/iscsi/iscsi_ioctl.c
===================================================================
RCS file: /cvsroot/src/sys/dev/iscsi/iscsi_ioctl.c,v
retrieving revision 1.1
diff -u -p -r1.1 iscsi_ioctl.c
--- sys/dev/iscsi/iscsi_ioctl.c	23 Oct 2011 21:15:02 -0000	1.1
+++ sys/dev/iscsi/iscsi_ioctl.c	23 Jan 2012 22:02:08 -0000
@@ -1166,7 +1166,7 @@ unmap_databuf(struct proc *p, void *buf,
 	vm_map_lock(kernel_map);
 	uvm_unmap_remove(kernel_map, databuf, databuf + datalen, &dead_entries
 #if (__NetBSD_Version__ >= 399000500)
-					 , NULL, 0
+					 , 0
 #elif   (__NetBSD_Version__ >= 300000000)
 					 , NULL
 #endif
Index: sys/dev/marvell/gtpci.c
===================================================================
RCS file: /cvsroot/src/sys/dev/marvell/gtpci.c,v
retrieving revision 1.28
diff -u -p -r1.28 gtpci.c
--- sys/dev/marvell/gtpci.c	17 May 2011 17:34:54 -0000	1.28
+++ sys/dev/marvell/gtpci.c	23 Jan 2012 22:02:08 -0000
@@ -454,10 +454,8 @@ gtpci_pci_config(struct gtpci_softc *sc,
 	p2pc = GTPCI_READ(sc, GTPCI_P2PC);
 
 #ifdef PCI_NETBSD_CONFIGURE
-	ioext = extent_create("pciio", iostart, ioend, M_DEVBUF, NULL, 0,
-	    EX_NOWAIT);
-	memext = extent_create("pcimem", memstart, memend, M_DEVBUF, NULL, 0,
-	    EX_NOWAIT);
+	ioext = extent_create("pciio", iostart, ioend, NULL, 0, EX_NOWAIT);
+	memext = extent_create("pcimem", memstart, memend, NULL, 0, EX_NOWAIT);
 	if (ioext != NULL && memext != NULL)
 		pci_configure_bus(pc, ioext, memext, NULL,
 		    GTPCI_P2PC_BUSNUMBER(p2pc), cacheline_size);
Index: sys/dev/marvell/mvpex.c
===================================================================
RCS file: /cvsroot/src/sys/dev/marvell/mvpex.c,v
retrieving revision 1.5
diff -u -p -r1.5 mvpex.c
--- sys/dev/marvell/mvpex.c	17 May 2011 17:34:54 -0000	1.5
+++ sys/dev/marvell/mvpex.c	23 Jan 2012 22:02:08 -0000
@@ -417,10 +417,8 @@ mvpex_pci_config(struct mvpex_softc *sc,
 	stat = bus_space_read_4(sc->sc_iot, sc->sc_ioh, MVPEX_STAT);
 
 #ifdef PCI_NETBSD_CONFIGURE
-	ioext = extent_create("pexio", iostart, ioend, M_DEVBUF, NULL, 0,
-	    EX_NOWAIT);
-	memext = extent_create("pexmem", memstart, memend, M_DEVBUF, NULL, 0,
-	    EX_NOWAIT);
+	ioext = extent_create("pexio", iostart, ioend, NULL, 0, EX_NOWAIT);
+	memext = extent_create("pexmem", memstart, memend, NULL, 0, EX_NOWAIT);
 	if (ioext != NULL && memext != NULL)
 		pci_configure_bus(pc, ioext, memext, NULL,
 		    MVPEX_STAT_PEXBUSNUM(stat), cacheline_size);
Index: sys/dev/pci/btvmei.c
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/btvmei.c,v
retrieving revision 1.26
diff -u -p -r1.26 btvmei.c
--- sys/dev/pci/btvmei.c	26 Nov 2009 15:17:08 -0000	1.26
+++ sys/dev/pci/btvmei.c	23 Jan 2012 22:02:08 -0000
@@ -303,16 +303,16 @@ b3_617_init(struct b3_617_softc *sc)
 	 * set up scatter page allocation control
 	 */
 	sc->vmeext = extent_create("pcivme", MR_PCI_VME,
-				   MR_PCI_VME + MR_PCI_VME_SIZE - 1, M_DEVBUF,
+				   MR_PCI_VME + MR_PCI_VME_SIZE - 1,
 				   sc->vmemap, sizeof(sc->vmemap),
 				   EX_NOCOALESCE);
 #if 0
 	sc->pciext = extent_create("vmepci", MR_VME_PCI,
-				   MR_VME_PCI + MR_VME_PCI_SIZE - 1, M_DEVBUF,
+				   MR_VME_PCI + MR_VME_PCI_SIZE - 1,
 				   sc->pcimap, sizeof(sc->pcimap),
 				   EX_NOCOALESCE);
 	sc->dmaext = extent_create("dmapci", MR_DMA_PCI,
-				   MR_DMA_PCI + MR_DMA_PCI_SIZE - 1, M_DEVBUF,
+				   MR_DMA_PCI + MR_DMA_PCI_SIZE - 1,
 				   sc->dmamap, sizeof(sc->dmamap),
 				   EX_NOCOALESCE);
 #endif
Index: sys/dev/pci/pciconf.c
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/pciconf.c,v
retrieving revision 1.33
diff -u -p -r1.33 pciconf.c
--- sys/dev/pci/pciconf.c	24 Aug 2011 20:27:35 -0000	1.33
+++ sys/dev/pci/pciconf.c	23 Jan 2012 22:02:09 -0000
@@ -708,7 +708,7 @@ setup_iowins(pciconf_bus_t *pb)
 		}
 		if (pd->ppb && pi->reg == 0) {
 			pd->ppb->ioext = extent_create("pciconf", pi->address,
-			    pi->address + pi->size, M_DEVBUF, NULL, 0,
+			    pi->address + pi->size, NULL, 0,
 			    EX_NOWAIT);
 			if (pd->ppb->ioext == NULL) {
 				print_tag(pd->pc, pd->tag);
@@ -759,8 +759,7 @@ setup_memwins(pciconf_bus_t *pb)
 		}
 		if (pd->ppb && pm->reg == 0) {
 			ex = extent_create("pciconf", pm->address,
-			    pm->address + pm->size, M_DEVBUF, NULL, 0,
-			    EX_NOWAIT);
+			    pm->address + pm->size, NULL, 0, EX_NOWAIT);
 			if (ex == NULL) {
 				print_tag(pd->pc, pd->tag);
 				printf("Failed to alloc MEM ext. for bus %d\n",
Index: sys/dev/pci/twa.c
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/twa.c,v
retrieving revision 1.38
diff -u -p -r1.38 twa.c
--- sys/dev/pci/twa.c	31 Aug 2011 18:31:02 -0000	1.38
+++ sys/dev/pci/twa.c	23 Jan 2012 22:02:10 -0000
@@ -525,8 +525,8 @@ twa_unmap_request(struct twa_request *tr
 	/* Free alignment buffer if it was used. */
 	if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
 		s = splvm();
-		uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
-		    tr->tr_length, UVM_KMF_WIRED);
+		uvm_km_kmem_free(kmem_va_arena, (vaddr_t)tr->tr_data,
+		    tr->tr_length);
 		splx(s);
 		tr->tr_data = tr->tr_real_data;
 		tr->tr_length = tr->tr_real_length;
@@ -1786,7 +1786,7 @@ int
 twa_map_request(struct twa_request *tr)
 {
 	struct twa_softc	*sc = tr->tr_sc;
-	int			 s, rv;
+	int			 s, rv, rc;
 
 	/* If the command involves data, map that too. */
 	if (tr->tr_data != NULL) {
@@ -1796,11 +1796,12 @@ twa_map_request(struct twa_request *tr)
 			tr->tr_real_data = tr->tr_data;
 			tr->tr_real_length = tr->tr_length;
 			s = splvm();
-			tr->tr_data = (void *)uvm_km_alloc(kmem_map,
-			    tr->tr_length, 512, UVM_KMF_NOWAIT|UVM_KMF_WIRED);
+			rc = uvm_km_kmem_alloc(kmem_va_arena,
+			    tr->tr_length, (VM_NOSLEEP | VM_INSTANTFIT),
+			    (vmem_addr_t *)&tr->tr_data);
 			splx(s);
 
-			if (tr->tr_data == NULL) {
+			if (rc != 0) {
 				tr->tr_data = tr->tr_real_data;
 				tr->tr_length = tr->tr_real_length;
 				return(ENOMEM);
@@ -1820,8 +1821,9 @@ twa_map_request(struct twa_request *tr)
 		if (rv != 0) {
 			if ((tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) != 0) {
 				s = splvm();
-				uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
-				    tr->tr_length, UVM_KMF_WIRED);
+				uvm_km_kmem_free(kmem_va_arena,
+				    (vaddr_t)tr->tr_data,
+				    tr->tr_length);
 				splx(s);
 			}
 			return (rv);
@@ -1833,8 +1835,8 @@ twa_map_request(struct twa_request *tr)
 
 			if (tr->tr_flags & TWA_CMD_DATA_COPY_NEEDED) {
 				s = splvm();
-				uvm_km_free(kmem_map, (vaddr_t)tr->tr_data,
-				    tr->tr_length, UVM_KMF_WIRED);
+				uvm_km_kmem_free(kmem_va_arena, (vaddr_t)tr->tr_data,
+				    tr->tr_length);
 				splx(s);
 				tr->tr_data = tr->tr_real_data;
 				tr->tr_length = tr->tr_real_length;
Index: sys/dev/pci/twe.c
===================================================================
RCS file: /cvsroot/src/sys/dev/pci/twe.c,v
retrieving revision 1.95
diff -u -p -r1.95 twe.c
--- sys/dev/pci/twe.c	30 Jun 2011 20:09:40 -0000	1.95
+++ sys/dev/pci/twe.c	23 Jan 2012 22:02:10 -0000
@@ -1475,7 +1475,7 @@ int
 twe_ccb_map(struct twe_softc *sc, struct twe_ccb *ccb)
 {
 	struct twe_cmd *tc;
-	int flags, nsegs, i, s, rv;
+	int flags, nsegs, i, s, rv, rc;
 	void *data;
 
 	/*
@@ -1484,8 +1484,9 @@ twe_ccb_map(struct twe_softc *sc, struct
 	if (((u_long)ccb->ccb_data & (TWE_ALIGNMENT - 1)) != 0) {
 		s = splvm();
 		/* XXX */
-		ccb->ccb_abuf = uvm_km_alloc(kmem_map,
-		    ccb->ccb_datasize, 0, UVM_KMF_NOWAIT|UVM_KMF_WIRED);
+		rc = uvm_km_kmem_alloc(kmem_va_arena,
+		    ccb->ccb_datasize, (VM_NOSLEEP | VM_INSTANTFIT),
+		    (vmem_addr_t *)&ccb->ccb_abuf);
 		splx(s);
 		data = (void *)ccb->ccb_abuf;
 		if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
@@ -1506,8 +1507,8 @@ twe_ccb_map(struct twe_softc *sc, struct
 		if (ccb->ccb_abuf != (vaddr_t)0) {
 			s = splvm();
 			/* XXX */
-			uvm_km_free(kmem_map, ccb->ccb_abuf,
-			    ccb->ccb_datasize, UVM_KMF_WIRED);
+			uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
+			    ccb->ccb_datasize);
 			splx(s);
 		}
 		return (rv);
@@ -1592,8 +1593,8 @@ twe_ccb_unmap(struct twe_softc *sc, stru
 			    ccb->ccb_datasize);
 		s = splvm();
 		/* XXX */
-		uvm_km_free(kmem_map, ccb->ccb_abuf, ccb->ccb_datasize,
-		    UVM_KMF_WIRED);
+		uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
+		    ccb->ccb_datasize);
 		splx(s);
 	}
 }
Index: sys/dev/usb/usb_mem.c
===================================================================
RCS file: /cvsroot/src/sys/dev/usb/usb_mem.c,v
retrieving revision 1.50
diff -u -p -r1.50 usb_mem.c
--- sys/dev/usb/usb_mem.c	27 Sep 2011 01:02:38 -0000	1.50
+++ sys/dev/usb/usb_mem.c	23 Jan 2012 22:02:11 -0000
@@ -432,8 +432,7 @@ usb_setup_reserve(device_t dv, struct us
 
 	rs->paddr = rs->map->dm_segs[0].ds_addr;
 	rs->extent = extent_create(device_xname(dv), (u_long)rs->paddr,
-	    (u_long)(rs->paddr + USB_MEM_RESERVE - 1),
-	    M_USB, 0, 0, 0);
+	    (u_long)(rs->paddr + USB_MEM_RESERVE - 1), 0, 0, 0);
 	if (rs->extent == NULL) {
 		rs->vaddr = 0;
 		return ENOMEM;
Index: sys/dev/vme/vme.c
===================================================================
RCS file: /cvsroot/src/sys/dev/vme/vme.c,v
retrieving revision 1.24
diff -u -p -r1.24 vme.c
--- sys/dev/vme/vme.c	11 Dec 2010 18:12:45 -0000	1.24
+++ sys/dev/vme/vme.c	23 Jan 2012 22:02:11 -0000
@@ -187,22 +187,19 @@ vmeattach(device_t parent, device_t self
 	/*
 	 * set up address space accounting - assume incomplete decoding
 	 */
-	sc->vme32ext = extent_create("vme32", 0, 0xffffffff,
-				     M_DEVBUF, 0, 0, 0);
+	sc->vme32ext = extent_create("vme32", 0, 0xffffffff, 0, 0, 0);
 	if (!sc->vme32ext) {
 		printf("error creating A32 map\n");
 		return;
 	}
 
-	sc->vme24ext = extent_create("vme24", 0, 0x00ffffff,
-				     M_DEVBUF, 0, 0, 0);
+	sc->vme24ext = extent_create("vme24", 0, 0x00ffffff, 0, 0, 0);
 	if (!sc->vme24ext) {
 		printf("error creating A24 map\n");
 		return;
 	}
 
-	sc->vme16ext = extent_create("vme16", 0, 0x0000ffff,
-				     M_DEVBUF, 0, 0, 0);
+	sc->vme16ext = extent_create("vme16", 0, 0x0000ffff, 0, 0, 0);
 	if (!sc->vme16ext) {
 		printf("error creating A16 map\n");
 		return;
Index: sys/external/bsd/drm/dist/bsd-core/drm_bufs.c
===================================================================
RCS file: /cvsroot/src/sys/external/bsd/drm/dist/bsd-core/drm_bufs.c,v
retrieving revision 1.8
diff -u -p -r1.8 drm_bufs.c
--- sys/external/bsd/drm/dist/bsd-core/drm_bufs.c	21 Jul 2010 09:06:38 -0000	1.8
+++ sys/external/bsd/drm/dist/bsd-core/drm_bufs.c	23 Jan 2012 22:02:11 -0000
@@ -36,6 +36,10 @@
 #include "dev/pci/pcireg.h"
 #endif
 
+#if defined(__NetBSD__)
+#include <sys/kmem.h>
+#endif
+
 #include "drmP.h"
 
 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
@@ -184,7 +188,11 @@ int drm_addmap(struct drm_device * dev, 
 			map->mtrr = 1;
 		break;
 	case _DRM_SHM:
+#if defined(__NetBSD__)
+		map->handle = kmem_alloc(map->size, KM_NOSLEEP);
+#else
 		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
+#endif
 		DRM_DEBUG("%lu %d %p\n",
 		    map->size, drm_order(map->size), map->handle);
 		if (!map->handle) {
@@ -198,7 +206,11 @@ int drm_addmap(struct drm_device * dev, 
 			DRM_LOCK();
 			if (dev->lock.hw_lock != NULL) {
 				DRM_UNLOCK();
+#if defined(__NetBSD__)
+				kmem_free(map->handle, map->size);
+#else
 				free(map->handle, DRM_MEM_MAPS);
+#endif
 				free(map, DRM_MEM_MAPS);
 				return EBUSY;
 			}
@@ -338,7 +350,11 @@ void drm_rmmap(struct drm_device *dev, d
 		}
 		break;
 	case _DRM_SHM:
+#if defined(__NetBSD__)
+		kmem_free(map->handle, map->size);
+#else
 		free(map->handle, DRM_MEM_MAPS);
+#endif
 		break;
 	case _DRM_AGP:
 	case _DRM_SCATTER_GATHER:
Index: sys/fs/efs/efs_ihash.c
===================================================================
RCS file: /cvsroot/src/sys/fs/efs/efs_ihash.c,v
retrieving revision 1.7
diff -u -p -r1.7 efs_ihash.c
--- sys/fs/efs/efs_ihash.c	12 Jun 2011 03:35:52 -0000	1.7
+++ sys/fs/efs/efs_ihash.c	23 Jan 2012 22:02:12 -0000
@@ -40,6 +40,7 @@ __KERNEL_RCSID(0, "$NetBSD: efs_ihash.c,
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/malloc.h>
 #include <sys/vnode.h>
 #include <sys/proc.h>
 #include <sys/mutex.h>
Index: sys/fs/ntfs/ntfs_ihash.c
===================================================================
RCS file: /cvsroot/src/sys/fs/ntfs/ntfs_ihash.c,v
retrieving revision 1.9
diff -u -p -r1.9 ntfs_ihash.c
--- sys/fs/ntfs/ntfs_ihash.c	15 Mar 2009 17:15:58 -0000	1.9
+++ sys/fs/ntfs/ntfs_ihash.c	23 Jan 2012 22:02:12 -0000
@@ -42,6 +42,7 @@ __KERNEL_RCSID(0, "$NetBSD: ntfs_ihash.c
 #include <sys/vnode.h>
 #include <sys/proc.h>
 #include <sys/mount.h>
+#include <sys/mallocvar.h>
 
 #include <fs/ntfs/ntfs.h>
 #include <fs/ntfs/ntfs_inode.h>
Index: sys/fs/smbfs/smbfs_kq.c
===================================================================
RCS file: /cvsroot/src/sys/fs/smbfs/smbfs_kq.c,v
retrieving revision 1.24
diff -u -p -r1.24 smbfs_kq.c
--- sys/fs/smbfs/smbfs_kq.c	23 Oct 2011 08:42:06 -0000	1.24
+++ sys/fs/smbfs/smbfs_kq.c	23 Jan 2012 22:02:12 -0000
@@ -47,6 +47,7 @@ __KERNEL_RCSID(0, "$NetBSD: smbfs_kq.c,v
 #include <sys/kthread.h>
 #include <sys/file.h>
 #include <sys/dirent.h>
+#include <sys/mallocvar.h>
 
 #include <machine/limits.h>
 
Index: sys/fs/smbfs/smbfs_vnops.c
===================================================================
RCS file: /cvsroot/src/sys/fs/smbfs/smbfs_vnops.c,v
retrieving revision 1.77
diff -u -p -r1.77 smbfs_vnops.c
--- sys/fs/smbfs/smbfs_vnops.c	30 Nov 2010 10:43:03 -0000	1.77
+++ sys/fs/smbfs/smbfs_vnops.c	23 Jan 2012 22:02:12 -0000
@@ -78,6 +78,7 @@ __KERNEL_RCSID(0, "$NetBSD: smbfs_vnops.
 #include <sys/vnode.h>
 #include <sys/lockf.h>
 #include <sys/kauth.h>
+#include <sys/mallocvar.h>
 
 #include <machine/limits.h>
 
Index: sys/kern/core_elf32.c
===================================================================
RCS file: /cvsroot/src/sys/kern/core_elf32.c,v
retrieving revision 1.35
diff -u -p -r1.35 core_elf32.c
--- sys/kern/core_elf32.c	14 Dec 2009 00:48:35 -0000	1.35
+++ sys/kern/core_elf32.c	23 Jan 2012 22:02:12 -0000
@@ -57,7 +57,7 @@ __KERNEL_RCSID(1, "$NetBSD: core_elf32.c
 #include <sys/exec.h>
 #include <sys/exec_elf.h>
 #include <sys/ptrace.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/kauth.h>
 
 #include <machine/reg.h>
@@ -104,6 +104,7 @@ ELFNAMEEND(coredump)(struct lwp *l, void
 	struct proc *p;
 	Elf_Ehdr ehdr;
 	Elf_Phdr phdr, *psections;
+	size_t psectionssize;
 	struct countsegs_state cs;
 	struct writesegs_state ws;
 	off_t notestart, secstart, offset;
@@ -180,8 +181,8 @@ ELFNAMEEND(coredump)(struct lwp *l, void
 	notestart = offset + sizeof(phdr) * cs.npsections;
 	secstart = notestart + notesize;
 
-	psections = malloc(cs.npsections * sizeof(Elf_Phdr),
-	    M_TEMP, M_WAITOK|M_ZERO);
+	psectionssize = cs.npsections * sizeof(Elf_Phdr);
+	psections = kmem_zalloc(psectionssize, KM_SLEEP);
 
 	/* Pass 2: now write the P-section headers. */
 	ws.secoff = secstart;
@@ -250,7 +251,7 @@ ELFNAMEEND(coredump)(struct lwp *l, void
 
   out:
 	if (psections)
-		free(psections, M_TEMP);
+		kmem_free(psections, psectionssize);
 	return (error);
 }
 
Index: sys/kern/kern_malloc.c
===================================================================
RCS file: /cvsroot/src/sys/kern/kern_malloc.c,v
retrieving revision 1.133
diff -u -p -r1.133 kern_malloc.c
--- sys/kern/kern_malloc.c	15 Oct 2011 21:14:57 -0000	1.133
+++ sys/kern/kern_malloc.c	23 Jan 2012 22:02:13 -0000
@@ -72,6 +72,7 @@ __KERNEL_RCSID(0, "$NetBSD: kern_malloc.
 #include <sys/proc.h>
 #include <sys/kernel.h>
 #include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/systm.h>
 #include <sys/debug.h>
 #include <sys/mutex.h>
@@ -79,247 +80,23 @@ __KERNEL_RCSID(0, "$NetBSD: kern_malloc.
 
 #include <uvm/uvm_extern.h>
 
-static struct vm_map_kernel kmem_map_store;
-struct vm_map *kmem_map = NULL;
-
-#include "opt_kmempages.h"
-
-#ifdef NKMEMCLUSTERS
-#error NKMEMCLUSTERS is obsolete; remove it from your kernel config file and use NKMEMPAGES instead or let the kernel auto-size
-#endif
-
-/*
- * Default number of pages in kmem_map.  We attempt to calculate this
- * at run-time, but allow it to be either patched or set in the kernel
- * config file.
- */
-#ifndef NKMEMPAGES
-#define	NKMEMPAGES	0
-#endif
-int	nkmempages = NKMEMPAGES;
-
-/*
- * Defaults for lower- and upper-bounds for the kmem_map page count.
- * Can be overridden by kernel config options.
- */
-#ifndef	NKMEMPAGES_MIN
-#define	NKMEMPAGES_MIN	NKMEMPAGES_MIN_DEFAULT
-#endif
-
-#ifndef NKMEMPAGES_MAX
-#define	NKMEMPAGES_MAX	NKMEMPAGES_MAX_DEFAULT
-#endif
-
 #include "opt_kmemstats.h"
 #include "opt_malloclog.h"
 #include "opt_malloc_debug.h"
 
-#define	MINALLOCSIZE	(1 << MINBUCKET)
-#define	BUCKETINDX(size) \
-	((size) <= (MINALLOCSIZE * 128) \
-		? (size) <= (MINALLOCSIZE * 8) \
-			? (size) <= (MINALLOCSIZE * 2) \
-				? (size) <= (MINALLOCSIZE * 1) \
-					? (MINBUCKET + 0) \
-					: (MINBUCKET + 1) \
-				: (size) <= (MINALLOCSIZE * 4) \
-					? (MINBUCKET + 2) \
-					: (MINBUCKET + 3) \
-			: (size) <= (MINALLOCSIZE* 32) \
-				? (size) <= (MINALLOCSIZE * 16) \
-					? (MINBUCKET + 4) \
-					: (MINBUCKET + 5) \
-				: (size) <= (MINALLOCSIZE * 64) \
-					? (MINBUCKET + 6) \
-					: (MINBUCKET + 7) \
-		: (size) <= (MINALLOCSIZE * 2048) \
-			? (size) <= (MINALLOCSIZE * 512) \
-				? (size) <= (MINALLOCSIZE * 256) \
-					? (MINBUCKET + 8) \
-					: (MINBUCKET + 9) \
-				: (size) <= (MINALLOCSIZE * 1024) \
-					? (MINBUCKET + 10) \
-					: (MINBUCKET + 11) \
-			: (size) <= (MINALLOCSIZE * 8192) \
-				? (size) <= (MINALLOCSIZE * 4096) \
-					? (MINBUCKET + 12) \
-					: (MINBUCKET + 13) \
-				: (size) <= (MINALLOCSIZE * 16384) \
-					? (MINBUCKET + 14) \
-					: (MINBUCKET + 15))
-
-/*
- * Array of descriptors that describe the contents of each page
- */
-struct kmemusage {
-	short ku_indx;		/* bucket index */
-	union {
-		u_short freecnt;/* for small allocations, free pieces in page */
-		u_short pagecnt;/* for large allocations, pages alloced */
-	} ku_un;
-};
-#define	ku_freecnt ku_un.freecnt
-#define	ku_pagecnt ku_un.pagecnt
-
 struct kmembuckets kmembuckets[MINBUCKET + 16];
 struct kmemusage *kmemusage;
-char *kmembase, *kmemlimit;
-
-#ifdef DEBUG
-static void *malloc_freecheck;
-#endif
-
-/*
- * Turn virtual addresses into kmem map indicies
- */
-#define	btokup(addr)	(&kmemusage[((char *)(addr) - kmembase) >> PGSHIFT])
-
 struct malloc_type *kmemstatistics;
 
-#ifdef MALLOCLOG
-#ifndef MALLOCLOGSIZE
-#define	MALLOCLOGSIZE	100000
-#endif
-
-struct malloclog {
-	void *addr;
-	long size;
-	struct malloc_type *type;
-	int action;
-	const char *file;
-	long line;
-} malloclog[MALLOCLOGSIZE];
-
-long	malloclogptr;
-
-/*
- * Fuzz factor for neighbour address match this must be a mask of the lower
- * bits we wish to ignore when comparing addresses
- */
-__uintptr_t malloclog_fuzz = 0x7FL;
-
-
-static void
-domlog(void *a, long size, struct malloc_type *type, int action,
-    const char *file, long line)
-{
-
-	malloclog[malloclogptr].addr = a;
-	malloclog[malloclogptr].size = size;
-	malloclog[malloclogptr].type = type;
-	malloclog[malloclogptr].action = action;
-	malloclog[malloclogptr].file = file;
-	malloclog[malloclogptr].line = line;
-	malloclogptr++;
-	if (malloclogptr >= MALLOCLOGSIZE)
-		malloclogptr = 0;
-}
-
-#ifdef DIAGNOSTIC
-static void
-hitmlog(void *a)
-{
-	struct malloclog *lp;
-	long l;
-
-#define	PRT do { \
-	lp = &malloclog[l]; \
-	if (lp->addr == a && lp->action) { \
-		printf("malloc log entry %ld:\n", l); \
-		printf("\taddr = %p\n", lp->addr); \
-		printf("\tsize = %ld\n", lp->size); \
-		printf("\ttype = %s\n", lp->type->ks_shortdesc); \
-		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
-		printf("\tfile = %s\n", lp->file); \
-		printf("\tline = %ld\n", lp->line); \
-	} \
-} while (/* CONSTCOND */0)
-
-/*
- * Print fuzzy matched "neighbour" - look for the memory block that has
- * been allocated below the address we are interested in.  We look for a
- * base address + size that is within malloclog_fuzz of our target
- * address. If the base address and target address are the same then it is
- * likely we have found a free (size is 0 in this case) so we won't report
- * those, they will get reported by PRT anyway.
- */
-#define	NPRT do { \
-	__uintptr_t fuzz_mask = ~(malloclog_fuzz); \
-	lp = &malloclog[l]; \
-	if ((__uintptr_t)lp->addr != (__uintptr_t)a && \
-	    (((__uintptr_t)lp->addr + lp->size + malloclog_fuzz) & fuzz_mask) \
-	    == ((__uintptr_t)a & fuzz_mask) && lp->action) {		\
-		printf("neighbour malloc log entry %ld:\n", l); \
-		printf("\taddr = %p\n", lp->addr); \
-		printf("\tsize = %ld\n", lp->size); \
-		printf("\ttype = %s\n", lp->type->ks_shortdesc); \
-		printf("\taction = %s\n", lp->action == 1 ? "alloc" : "free"); \
-		printf("\tfile = %s\n", lp->file); \
-		printf("\tline = %ld\n", lp->line); \
-	} \
-} while (/* CONSTCOND */0)
-
-	for (l = malloclogptr; l < MALLOCLOGSIZE; l++) {
-		PRT;
-		NPRT;
-	}
-
-
-	for (l = 0; l < malloclogptr; l++) {
-		PRT;
-		NPRT;
-	}
-
-#undef PRT
-}
-#endif /* DIAGNOSTIC */
-#endif /* MALLOCLOG */
-
-#ifdef DIAGNOSTIC
-/*
- * This structure provides a set of masks to catch unaligned frees.
- */
-const long addrmask[] = { 0,
-	0x00000001, 0x00000003, 0x00000007, 0x0000000f,
-	0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
-	0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
-	0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
-};
+kmutex_t malloc_lock;
 
-/*
- * The WEIRD_ADDR is used as known text to copy into free objects so
- * that modifications after frees can be detected.
- */
-#define	WEIRD_ADDR	((uint32_t) 0xdeadbeef)
-#ifdef DEBUG
-#define	MAX_COPY	PAGE_SIZE
-#else
-#define	MAX_COPY	32
-#endif
+extern void *kmem_intr_alloc(size_t, km_flag_t);
+extern void *kmem_intr_zalloc(size_t, km_flag_t);
+extern void kmem_intr_free(void *, size_t);
 
-/*
- * Normally the freelist structure is used only to hold the list pointer
- * for free objects.  However, when running with diagnostics, the first
- * 8/16 bytes of the structure is unused except for diagnostic information,
- * and the free list pointer is at offset 8/16 in the structure.  Since the
- * first 8 bytes is the portion of the structure most often modified, this
- * helps to detect memory reuse problems and avoid free list corruption.
- */
-struct freelist {
-	uint32_t spare0;
-#ifdef _LP64
-	uint32_t spare1;		/* explicit padding */
-#endif
-	struct malloc_type *type;
-	void *	next;
-};
-#else /* !DIAGNOSTIC */
-struct freelist {
-	void *	next;
+struct malloc_header {
+	size_t mh_size;
 };
-#endif /* DIAGNOSTIC */
-
-kmutex_t malloc_lock;
 
 /*
  * Allocate a block of memory
@@ -333,212 +110,23 @@ void *
 kern_malloc(unsigned long size, struct malloc_type *ksp, int flags)
 #endif /* MALLOCLOG */
 {
-	struct kmembuckets *kbp;
-	struct kmemusage *kup;
-	struct freelist *freep;
-	long indx, npg, allocsize;
-	char *va, *cp, *savedlist;
-#ifdef DIAGNOSTIC
-	uint32_t *end, *lp;
-	int copysize;
-#endif
+	struct malloc_header *mh;
+	int kmflags = ((flags & M_NOWAIT) != 0
+	    ? KM_NOSLEEP : KM_SLEEP);
+	size_t allocsize = sizeof(struct malloc_header) + size;
+	void *p;
+
+	p = kmem_intr_alloc(allocsize, kmflags);
+	if (p == NULL)
+		return NULL;
 
-#ifdef LOCKDEBUG
-	if ((flags & M_NOWAIT) == 0) {
-		ASSERT_SLEEPABLE();
-	}
-#endif
-#ifdef MALLOC_DEBUG
-	if (debug_malloc(size, ksp, flags, (void *) &va)) {
-		if (va != 0) {
-			FREECHECK_OUT(&malloc_freecheck, (void *)va);
-		}
-		return ((void *) va);
-	}
-#endif
-	indx = BUCKETINDX(size);
-	kbp = &kmembuckets[indx];
-	mutex_spin_enter(&malloc_lock);
-#ifdef KMEMSTATS
-	while (ksp->ks_memuse >= ksp->ks_limit) {
-		if (flags & M_NOWAIT) {
-			mutex_spin_exit(&malloc_lock);
-			return (NULL);
-		}
-		if (ksp->ks_limblocks < 65535)
-			ksp->ks_limblocks++;
-		mtsleep((void *)ksp, PSWP+2, ksp->ks_shortdesc, 0,
-			&malloc_lock);
-	}
-	ksp->ks_size |= 1 << indx;
-#ifdef DIAGNOSTIC
-	if (ksp->ks_active[indx - MINBUCKET] == UINT_MAX)
-		panic("too many allocations in bucket");
-#endif
-	ksp->ks_active[indx - MINBUCKET]++;
-#endif
-#ifdef DIAGNOSTIC
-	copysize = 1 << indx < MAX_COPY ? 1 << indx : MAX_COPY;
-#endif
-	if (kbp->kb_next == NULL) {
-		int s;
-		kbp->kb_last = NULL;
-		if (size > MAXALLOCSAVE)
-			allocsize = round_page(size);
-		else
-			allocsize = 1 << indx;
-		npg = btoc(allocsize);
-		mutex_spin_exit(&malloc_lock);
-		s = splvm();
-		va = (void *) uvm_km_alloc(kmem_map,
-		    (vsize_t)ctob(npg), 0,
-		    ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
-		    ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0) |
-		    UVM_KMF_WIRED);
-		splx(s);
-		if (__predict_false(va == NULL)) {
-			/*
-			 * Kmem_malloc() can return NULL, even if it can
-			 * wait, if there is no map space available, because
-			 * it can't fix that problem.  Neither can we,
-			 * right now.  (We should release pages which
-			 * are completely free and which are in kmembuckets
-			 * with too many free elements.)
-			 */
-			if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
-				panic("malloc: out of space in kmem_map");
-			return (NULL);
-		}
-		mutex_spin_enter(&malloc_lock);
-#ifdef KMEMSTATS
-		kbp->kb_total += kbp->kb_elmpercl;
-#endif
-		kup = btokup(va);
-		kup->ku_indx = indx;
-		if (allocsize > MAXALLOCSAVE) {
-			if (npg > 65535)
-				panic("malloc: allocation too large");
-			kup->ku_pagecnt = npg;
-#ifdef KMEMSTATS
-			ksp->ks_memuse += allocsize;
-#endif
-			goto out;
-		}
-#ifdef KMEMSTATS
-		kup->ku_freecnt = kbp->kb_elmpercl;
-		kbp->kb_totalfree += kbp->kb_elmpercl;
-#endif
-		/*
-		 * Just in case we blocked while allocating memory,
-		 * and someone else also allocated memory for this
-		 * kmembucket, don't assume the list is still empty.
-		 */
-		savedlist = kbp->kb_next;
-		kbp->kb_next = cp = va + (npg << PAGE_SHIFT) - allocsize;
-		for (;;) {
-			freep = (struct freelist *)cp;
-#ifdef DIAGNOSTIC
-			/*
-			 * Copy in known text to detect modification
-			 * after freeing.
-			 */
-			end = (uint32_t *)&cp[copysize];
-			for (lp = (uint32_t *)cp; lp < end; lp++)
-				*lp = WEIRD_ADDR;
-			freep->type = M_FREE;
-#endif /* DIAGNOSTIC */
-			if (cp <= va)
-				break;
-			cp -= allocsize;
-			freep->next = cp;
-		}
-		freep->next = savedlist;
-		if (savedlist == NULL)
-			kbp->kb_last = (void *)freep;
-	}
-	va = kbp->kb_next;
-	kbp->kb_next = ((struct freelist *)va)->next;
-#ifdef DIAGNOSTIC
-	freep = (struct freelist *)va;
-	/* XXX potential to get garbage pointer here. */
-	if (kbp->kb_next) {
-		int rv;
-		vaddr_t addr = (vaddr_t)kbp->kb_next;
-
-		vm_map_lock(kmem_map);
-		rv = uvm_map_checkprot(kmem_map, addr,
-		    addr + sizeof(struct freelist), VM_PROT_WRITE);
-		vm_map_unlock(kmem_map);
-
-		if (__predict_false(rv == 0)) {
-			printf("Data modified on freelist: "
-			    "word %ld of object %p size %ld previous type %s "
-			    "(invalid addr %p)\n",
-			    (long)((int32_t *)&kbp->kb_next - (int32_t *)kbp),
-			    va, size, "foo", kbp->kb_next);
-#ifdef MALLOCLOG
-			hitmlog(va);
-#endif
-			kbp->kb_next = NULL;
-		}
-	}
-
-	/* Fill the fields that we've used with WEIRD_ADDR */
-#ifdef _LP64
-	freep->type = (struct malloc_type *)
-	    (WEIRD_ADDR | (((u_long) WEIRD_ADDR) << 32));
-#else
-	freep->type = (struct malloc_type *) WEIRD_ADDR;
-#endif
-	end = (uint32_t *)&freep->next +
-	    (sizeof(freep->next) / sizeof(int32_t));
-	for (lp = (uint32_t *)&freep->next; lp < end; lp++)
-		*lp = WEIRD_ADDR;
-
-	/* and check that the data hasn't been modified. */
-	end = (uint32_t *)&va[copysize];
-	for (lp = (uint32_t *)va; lp < end; lp++) {
-		if (__predict_true(*lp == WEIRD_ADDR))
-			continue;
-		printf("Data modified on freelist: "
-		    "word %ld of object %p size %ld previous type %s "
-		    "(0x%x != 0x%x)\n",
-		    (long)(lp - (uint32_t *)va), va, size,
-		    "bar", *lp, WEIRD_ADDR);
-#ifdef MALLOCLOG
-		hitmlog(va);
-#endif
-		break;
+	if ((flags & M_ZERO) != 0) {
+		memset(p, 0, allocsize);
 	}
+	mh = (void *)p;
+	mh->mh_size = allocsize;
 
-	freep->spare0 = 0;
-#endif /* DIAGNOSTIC */
-#ifdef KMEMSTATS
-	kup = btokup(va);
-	if (kup->ku_indx != indx)
-		panic("malloc: wrong bucket");
-	if (kup->ku_freecnt == 0)
-		panic("malloc: lost data");
-	kup->ku_freecnt--;
-	kbp->kb_totalfree--;
-	ksp->ks_memuse += 1 << indx;
-out:
-	kbp->kb_calls++;
-	ksp->ks_inuse++;
-	ksp->ks_calls++;
-	if (ksp->ks_memuse > ksp->ks_maxused)
-		ksp->ks_maxused = ksp->ks_memuse;
-#else
-out:
-#endif
-#ifdef MALLOCLOG
-	domlog(va, size, ksp, 1, file, line);
-#endif
-	mutex_spin_exit(&malloc_lock);
-	if ((flags & M_ZERO) != 0)
-		memset(va, 0, size);
-	FREECHECK_OUT(&malloc_freecheck, (void *)va);
-	return ((void *) va);
+	return mh + 1;
 }
 
 /*
@@ -552,145 +140,12 @@ void
 kern_free(void *addr, struct malloc_type *ksp)
 #endif /* MALLOCLOG */
 {
-	struct kmembuckets *kbp;
-	struct kmemusage *kup;
-	struct freelist *freep;
-	long size;
-#ifdef DIAGNOSTIC
-	void *cp;
-	int32_t *end, *lp;
-	long alloc, copysize;
-#endif
+	struct malloc_header *mh;
 
-	FREECHECK_IN(&malloc_freecheck, addr);
-#ifdef MALLOC_DEBUG
-	if (debug_free(addr, ksp))
-		return;
-#endif
+	mh = addr;
+	mh--;
 
-#ifdef DIAGNOSTIC
-	/*
-	 * Ensure that we're free'ing something that we could
-	 * have allocated in the first place.  That is, check
-	 * to see that the address is within kmem_map.
-	 */
-	if (__predict_false((vaddr_t)addr < vm_map_min(kmem_map) ||
-	    (vaddr_t)addr >= vm_map_max(kmem_map)))
-		panic("free: addr %p not within kmem_map", addr);
-#endif
-
-	kup = btokup(addr);
-	size = 1 << kup->ku_indx;
-	kbp = &kmembuckets[kup->ku_indx];
-
-	LOCKDEBUG_MEM_CHECK(addr,
-	    size <= MAXALLOCSAVE ? size : ctob(kup->ku_pagecnt));
-
-	mutex_spin_enter(&malloc_lock);
-#ifdef MALLOCLOG
-	domlog(addr, 0, ksp, 2, file, line);
-#endif
-#ifdef DIAGNOSTIC
-	/*
-	 * Check for returns of data that do not point to the
-	 * beginning of the allocation.
-	 */
-	if (size > PAGE_SIZE)
-		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
-	else
-		alloc = addrmask[kup->ku_indx];
-	if (((u_long)addr & alloc) != 0)
-		panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
-		    addr, size, ksp->ks_shortdesc, alloc);
-#endif /* DIAGNOSTIC */
-	if (size > MAXALLOCSAVE) {
-		uvm_km_free(kmem_map, (vaddr_t)addr, ctob(kup->ku_pagecnt),
-		    UVM_KMF_WIRED);
-#ifdef KMEMSTATS
-		size = kup->ku_pagecnt << PGSHIFT;
-		ksp->ks_memuse -= size;
-#ifdef DIAGNOSTIC
-		if (ksp->ks_active[kup->ku_indx - MINBUCKET] == 0)
-			panic("no active allocation(1), probably double free");
-#endif
-		ksp->ks_active[kup->ku_indx - MINBUCKET]--;
-		kup->ku_indx = 0;
-		kup->ku_pagecnt = 0;
-		if (ksp->ks_memuse + size >= ksp->ks_limit &&
-		    ksp->ks_memuse < ksp->ks_limit)
-			wakeup((void *)ksp);
-#ifdef DIAGNOSTIC
-		if (ksp->ks_inuse == 0)
-			panic("free 1: inuse 0, probable double free");
-#endif
-		ksp->ks_inuse--;
-		kbp->kb_total -= 1;
-#endif
-		mutex_spin_exit(&malloc_lock);
-		return;
-	}
-	freep = (struct freelist *)addr;
-#ifdef DIAGNOSTIC
-	/*
-	 * Check for multiple frees. Use a quick check to see if
-	 * it looks free before laboriously searching the freelist.
-	 */
-	if (__predict_false(freep->spare0 == WEIRD_ADDR)) {
-		for (cp = kbp->kb_next; cp;
-		    cp = ((struct freelist *)cp)->next) {
-			if (addr != cp)
-				continue;
-			printf("multiply freed item %p\n", addr);
-#ifdef MALLOCLOG
-			hitmlog(addr);
-#endif
-			panic("free: duplicated free");
-		}
-	}
-
-	/*
-	 * Copy in known text to detect modification after freeing
-	 * and to make it look free. Also, save the type being freed
-	 * so we can list likely culprit if modification is detected
-	 * when the object is reallocated.
-	 */
-	copysize = size < MAX_COPY ? size : MAX_COPY;
-	end = (int32_t *)&((char *)addr)[copysize];
-	for (lp = (int32_t *)addr; lp < end; lp++)
-		*lp = WEIRD_ADDR;
-	freep->type = ksp;
-#endif /* DIAGNOSTIC */
-#ifdef KMEMSTATS
-	kup->ku_freecnt++;
-	if (kup->ku_freecnt >= kbp->kb_elmpercl) {
-		if (kup->ku_freecnt > kbp->kb_elmpercl)
-			panic("free: multiple frees");
-		else if (kbp->kb_totalfree > kbp->kb_highwat)
-			kbp->kb_couldfree++;
-	}
-	kbp->kb_totalfree++;
-	ksp->ks_memuse -= size;
-#ifdef DIAGNOSTIC
-	if (ksp->ks_active[kup->ku_indx - MINBUCKET] == 0)
-		panic("no active allocation(2), probably double free");
-#endif
-	ksp->ks_active[kup->ku_indx - MINBUCKET]--;
-	if (ksp->ks_memuse + size >= ksp->ks_limit &&
-	    ksp->ks_memuse < ksp->ks_limit)
-		wakeup((void *)ksp);
-#ifdef DIAGNOSTIC
-	if (ksp->ks_inuse == 0)
-		panic("free 2: inuse 0, probable double free");
-#endif
-	ksp->ks_inuse--;
-#endif
-	if (kbp->kb_next == NULL)
-		kbp->kb_next = addr;
-	else
-		((struct freelist *)kbp->kb_last)->next = addr;
-	freep->next = NULL;
-	kbp->kb_last = addr;
-	mutex_spin_exit(&malloc_lock);
+	kmem_intr_free(mh, mh->mh_size);
 }
 
 /*
@@ -700,12 +155,9 @@ void *
 kern_realloc(void *curaddr, unsigned long newsize, struct malloc_type *ksp,
     int flags)
 {
-	struct kmemusage *kup;
+	struct malloc_header *mh;
 	unsigned long cursize;
 	void *newaddr;
-#ifdef DIAGNOSTIC
-	long alloc;
-#endif
 
 	/*
 	 * realloc() with a NULL pointer is the same as malloc().
@@ -727,30 +179,10 @@ kern_realloc(void *curaddr, unsigned lon
 	}
 #endif
 
-	/*
-	 * Find out how large the old allocation was (and do some
-	 * sanity checking).
-	 */
-	kup = btokup(curaddr);
-	cursize = 1 << kup->ku_indx;
-
-#ifdef DIAGNOSTIC
-	/*
-	 * Check for returns of data that do not point to the
-	 * beginning of the allocation.
-	 */
-	if (cursize > PAGE_SIZE)
-		alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
-	else
-		alloc = addrmask[kup->ku_indx];
-	if (((u_long)curaddr & alloc) != 0)
-		panic("realloc: "
-		    "unaligned addr %p, size %ld, type %s, mask %ld\n",
-		    curaddr, cursize, ksp->ks_shortdesc, alloc);
-#endif /* DIAGNOSTIC */
+	mh = curaddr;
+	mh--;
 
-	if (cursize > MAXALLOCSAVE)
-		cursize = ctob(kup->ku_pagecnt);
+	cursize = mh->mh_size;
 
 	/*
 	 * If we already actually have as much as they want, we're done.
@@ -782,28 +214,12 @@ kern_realloc(void *curaddr, unsigned lon
 }
 
 /*
- * Roundup size to the actual allocation size.
- */
-unsigned long
-malloc_roundup(unsigned long size)
-{
-
-	if (size > MAXALLOCSAVE)
-		return (roundup(size, PAGE_SIZE));
-	else
-		return (1 << BUCKETINDX(size));
-}
-
-/*
  * Add a malloc type to the system.
  */
 void
 malloc_type_attach(struct malloc_type *type)
 {
 
-	if (nkmempages == 0)
-		panic("malloc_type_attach: nkmempages == 0");
-
 	if (type->ks_magic != M_MAGIC)
 		panic("malloc_type_attach: bad magic");
 
@@ -819,8 +235,6 @@ malloc_type_attach(struct malloc_type *t
 #endif
 
 #ifdef KMEMSTATS
-	if (type->ks_limit == 0)
-		type->ks_limit = ((u_long)nkmempages << PAGE_SHIFT) * 6U / 10U;
 #else
 	type->ks_limit = 0;
 #endif
@@ -874,34 +288,6 @@ malloc_type_setlimit(struct malloc_type 
 }
 
 /*
- * Compute the number of pages that kmem_map will map, that is,
- * the size of the kernel malloc arena.
- */
-void
-kmeminit_nkmempages(void)
-{
-	int npages;
-
-	if (nkmempages != 0) {
-		/*
-		 * It's already been set (by us being here before, or
-		 * by patching or kernel config options), bail out now.
-		 */
-		return;
-	}
-
-	npages = physmem;
-
-	if (npages > NKMEMPAGES_MAX)
-		npages = NKMEMPAGES_MAX;
-
-	if (npages < NKMEMPAGES_MIN)
-		npages = NKMEMPAGES_MIN;
-
-	nkmempages = npages;
-}
-
-/*
  * Initialize the kernel memory allocator
  */
 void
@@ -909,42 +295,12 @@ kmeminit(void)
 {
 	__link_set_decl(malloc_types, struct malloc_type);
 	struct malloc_type * const *ksp;
-	vaddr_t kmb, kml;
 #ifdef KMEMSTATS
 	long indx;
 #endif
 
-#if	((MAXALLOCSAVE & (MAXALLOCSAVE - 1)) != 0)
-		ERROR!_kmeminit:_MAXALLOCSAVE_not_power_of_2
-#endif
-#if	(MAXALLOCSAVE > MINALLOCSIZE * 32768)
-		ERROR!_kmeminit:_MAXALLOCSAVE_too_big
-#endif
-#if	(MAXALLOCSAVE < NBPG)
-		ERROR!_kmeminit:_MAXALLOCSAVE_too_small
-#endif
-
-	if (sizeof(struct freelist) > (1 << MINBUCKET))
-		panic("minbucket too small/struct freelist too big");
-
 	mutex_init(&malloc_lock, MUTEX_DEFAULT, IPL_VM);
 
-	/*
-	 * Compute the number of kmem_map pages, if we have not
-	 * done so already.
-	 */
-	kmeminit_nkmempages();
-
-	kmemusage = (struct kmemusage *) uvm_km_alloc(kernel_map,
-	    (vsize_t)(nkmempages * sizeof(struct kmemusage)), 0,
-	    UVM_KMF_WIRED|UVM_KMF_ZERO);
-	kmb = 0;
-	kmem_map = uvm_km_suballoc(kernel_map, &kmb,
-	    &kml, ((vsize_t)nkmempages << PAGE_SHIFT),
-	    VM_MAP_INTRSAFE, false, &kmem_map_store);
-	uvm_km_vacache_init(kmem_map, "kvakmem", 0);
-	kmembase = (char *)kmb;
-	kmemlimit = (char *)kml;
 #ifdef KMEMSTATS
 	for (indx = 0; indx < MINBUCKET + 16; indx++) {
 		if (1 << indx >= PAGE_SIZE)
Index: sys/kern/subr_extent.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_extent.c,v
retrieving revision 1.72
diff -u -p -r1.72 subr_extent.c
--- sys/kern/subr_extent.c	28 Apr 2008 20:24:04 -0000	1.72
+++ sys/kern/subr_extent.c	23 Jan 2012 22:02:13 -0000
@@ -41,7 +41,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_extent.
 
 #include <sys/param.h>
 #include <sys/extent.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/pool.h>
 #include <sys/time.h>
 #include <sys/systm.h>
@@ -69,15 +69,15 @@ __KERNEL_RCSID(0, "$NetBSD: subr_extent.
  * in subr_extent.c rather than subr_prf.c.
  */
 #define	\
-malloc(s, t, flags)		malloc(s)
+kmem_alloc(s, flags)		malloc(s)
 #define	\
-free(p, t)			free(p)
+kmem_free(p, s)			free(p)
 #define	\
 cv_wait_sig(cv, lock)		(EWOULDBLOCK)
 #define	\
-pool_get(pool, flags)		malloc((pool)->pr_size,0,0)
+pool_get(pool, flags)		kmem_alloc((pool)->pr_size,0)
 #define	\
-pool_put(pool, rp)		free(rp,0)
+pool_put(pool, rp)		kmem_free(rp,0)
 #define	\
 panic(a)			printf(a)
 #define	mutex_init(a, b, c)
@@ -127,13 +127,6 @@ extent_alloc_region_descriptor(struct ex
 	int exflags, error;
 
 	/*
-	 * If the kernel memory allocator is not yet running, we can't
-	 * use it (obviously).
-	 */
-	if (KMEM_IS_RUNNING == 0)
-		flags &= ~EX_MALLOCOK;
-
-	/*
 	 * XXX Make a static, create-time flags word, so we don't
 	 * XXX have to lock to read it!
 	 */
@@ -235,7 +228,7 @@ extent_free_region_descriptor(struct ext
  */
 struct extent *
 extent_create(const char *name, u_long start, u_long end,
-    struct malloc_type *mtype, void *storage, size_t storagesize, int flags)
+    void *storage, size_t storagesize, int flags)
 {
 	struct extent *ex;
 	char *cp = storage;
@@ -291,8 +284,8 @@ extent_create(const char *name, u_long s
 			LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link);
 		}
 	} else {
-		ex = (struct extent *)malloc(sizeof(struct extent),
-		    mtype, (flags & EX_WAITOK) ? M_WAITOK : M_NOWAIT);
+		ex = (struct extent *)kmem_alloc(sizeof(struct extent),
+		    (flags & EX_WAITOK) ? KM_SLEEP : KM_NOSLEEP);
 		if (ex == NULL)
 			return (NULL);
 	}
@@ -304,7 +297,6 @@ extent_create(const char *name, u_long s
 	ex->ex_name = name;
 	ex->ex_start = start;
 	ex->ex_end = end;
-	ex->ex_mtype = mtype;
 	ex->ex_flags = 0;
 	if (fixed_extent)
 		ex->ex_flags |= EXF_FIXED;
@@ -342,7 +334,7 @@ extent_destroy(struct extent *ex)
 
 	/* If we're not a fixed extent, free the extent descriptor itself. */
 	if ((ex->ex_flags & EXF_FIXED) == 0)
-		free(ex, ex->ex_mtype);
+		kmem_free(ex, sizeof(*ex));
 }
 
 /*
Index: sys/kern/subr_kmem.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_kmem.c,v
retrieving revision 1.38
diff -u -p -r1.38 subr_kmem.c
--- sys/kern/subr_kmem.c	20 Nov 2011 22:58:31 -0000	1.38
+++ sys/kern/subr_kmem.c	23 Jan 2012 22:02:13 -0000
@@ -58,8 +58,6 @@
 /*
  * allocator of kernel wired memory.
  *
- * TODO:
- * -	worth to have "intrsafe" version?  maybe..
  */
 
 #include <sys/cdefs.h>
@@ -68,7 +66,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,
 #include <sys/param.h>
 #include <sys/callback.h>
 #include <sys/kmem.h>
-#include <sys/vmem.h>
+#include <sys/pool.h>
 #include <sys/debug.h>
 #include <sys/lockdebug.h>
 #include <sys/cpu.h>
@@ -79,24 +77,51 @@ __KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,
 
 #include <lib/libkern/libkern.h>
 
-#define	KMEM_QUANTUM_SIZE	(ALIGNBYTES + 1)
-#define	KMEM_QCACHE_MAX		(KMEM_QUANTUM_SIZE * 32)
-#define	KMEM_CACHE_COUNT	16
-
-typedef struct kmem_cache {
-	pool_cache_t		kc_cache;
-	struct pool_allocator	kc_pa;
-	char			kc_name[12];
-} kmem_cache_t;
+struct kmem_cache_info {
+	int kc_size;
+	const char *kc_name;
+};
+
+static const struct kmem_cache_info kmem_cache_sizes[] = {
+	{  8, "kmem-8" },
+	{ 16, "kmem-16" },
+	{ 24, "kmem-24" },
+	{ 32, "kmem-32" },
+	{ 40, "kmem-40" },
+	{ 48, "kmem-48" },
+	{ 56, "kmem-56" },
+	{ 64, "kmem-64" },
+	{ 80, "kmem-80" },
+	{ 96, "kmem-96" },
+	{ 112, "kmem-112" },
+	{ 128, "kmem-128" },
+	{ 160, "kmem-160" },
+	{ 192, "kmem-192" },
+	{ 224, "kmem-224" },
+	{ 256, "kmem-256" },
+	{ 320, "kmem-320" },
+	{ 384, "kmem-384" },
+	{ 448, "kmem-448" },
+	{ 512, "kmem-512" },
+	{ 768, "kmem-768" },
+	{ 1024, "kmem-1024" },
+	{ 2048, "kmem-2048" },
+	{ 4096, "kmem-4096" },
+	{ 0, NULL }
+};
 
-static vmem_t *kmem_arena;
-static struct callback_entry kmem_kva_reclaim_entry;
+/*
+ * KMEM_ALIGN is the smalles guaranteed alignment and
+ * also the smallest allocateable quanta.
+ * Every cache size which is a multiply of CACHE_LINE_SIZE
+ * gets CACHE_LINE_SIZE alignment.
+ */
+#define KMEM_ALIGN	8
+#define KMEM_SHIFT	3
+#define KMEM_MAXSIZE	4096
 
-static kmem_cache_t kmem_cache[KMEM_CACHE_COUNT + 1];
+static pool_cache_t kmem_cache[KMEM_MAXSIZE >> KMEM_SHIFT];
 static size_t kmem_cache_max;
-static size_t kmem_cache_min;
-static size_t kmem_cache_mask;
-static int kmem_cache_shift;
 
 #if defined(DEBUG)
 int kmem_guard_depth = 0;
@@ -110,11 +135,12 @@ static void *kmem_freecheck;
 #endif /* defined(DEBUG) */
 
 #if defined(KMEM_POISON)
+static int kmem_poison_ctor(void *, void *, int);
 static void kmem_poison_fill(void *, size_t);
 static void kmem_poison_check(void *, size_t);
 #else /* defined(KMEM_POISON) */
-#define	kmem_poison_fill(p, sz)		/* nothing */
-#define	kmem_poison_check(p, sz)	/* nothing */
+#define kmem_poison_fill(p, sz)		/* nothing */
+#define kmem_poison_check(p, sz)		/* nothing */
 #endif /* defined(KMEM_POISON) */
 
 #if defined(KMEM_REDZONE)
@@ -124,77 +150,30 @@ static void kmem_poison_check(void *, si
 #endif /* defined(KMEM_REDZONE) */
 
 #if defined(KMEM_SIZE)
-#define	SIZE_SIZE	(max(KMEM_QUANTUM_SIZE, sizeof(size_t)))
+#define	SIZE_SIZE	(max(KMEM_ALIGN, sizeof(size_t)))
 static void kmem_size_set(void *, size_t);
-static void kmem_size_check(const void *, size_t);
+static void kmem_size_check(void *, size_t);
 #else
 #define	SIZE_SIZE	0
 #define	kmem_size_set(p, sz)	/* nothing */
 #define	kmem_size_check(p, sz)	/* nothing */
 #endif
 
-static int kmem_backend_alloc(void *, vmem_size_t, vmem_size_t *,
-    vm_flag_t, vmem_addr_t *);
-static void kmem_backend_free(void *, vmem_addr_t, vmem_size_t);
-static int kmem_kva_reclaim_callback(struct callback_entry *, void *, void *);
-
 CTASSERT(KM_SLEEP == PR_WAITOK);
 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
 
-static inline vm_flag_t
-kmf_to_vmf(km_flag_t kmflags)
-{
-	vm_flag_t vmflags;
-
-	KASSERT((kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
-	KASSERT((~kmflags & (KM_SLEEP|KM_NOSLEEP)) != 0);
-
-	vmflags = 0;
-	if ((kmflags & KM_SLEEP) != 0) {
-		vmflags |= VM_SLEEP;
-	}
-	if ((kmflags & KM_NOSLEEP) != 0) {
-		vmflags |= VM_NOSLEEP;
-	}
-
-	return vmflags;
-}
-
-static void *
-kmem_poolpage_alloc(struct pool *pool, int prflags)
-{
-	vmem_addr_t addr;
-	int rc;
-
-	rc = vmem_alloc(kmem_arena, pool->pr_alloc->pa_pagesz,
-	    kmf_to_vmf(prflags) | VM_INSTANTFIT, &addr);
-	return (rc == 0) ? (void *)addr : NULL;
-
-}
-
-static void
-kmem_poolpage_free(struct pool *pool, void *addr)
-{
-
-	vmem_free(kmem_arena, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz);
-}
-
-/* ---- kmem API */
-
-/*
- * kmem_alloc: allocate wired memory.
- *
- * => must not be called from interrupt context.
- */
+void * kmem_intr_alloc(size_t size, km_flag_t kmflags);
+void * kmem_intr_zalloc(size_t size, km_flag_t kmflags);
+void kmem_intr_free(void *, size_t size);
 
 void *
-kmem_alloc(size_t size, km_flag_t kmflags)
+kmem_intr_alloc(size_t size, km_flag_t kmflags)
 {
-	kmem_cache_t *kc;
+	size_t index;
+	size_t allocsz;
+	pool_cache_t pc;
 	uint8_t *p;
 
-	KASSERT(!cpu_intr_p());
-	KASSERT(!cpu_softintr_p());
 	KASSERT(size > 0);
 
 #ifdef KMEM_GUARD
@@ -204,61 +183,48 @@ kmem_alloc(size_t size, km_flag_t kmflag
 	}
 #endif
 
-	size += REDZONE_SIZE + SIZE_SIZE;
-	if (size >= kmem_cache_min && size <= kmem_cache_max) {
-		kc = &kmem_cache[(size + kmem_cache_mask) >> kmem_cache_shift];
-		KASSERT(size <= kc->kc_pa.pa_pagesz);
-		kmflags &= (KM_SLEEP | KM_NOSLEEP);
-		p = pool_cache_get(kc->kc_cache, kmflags);
+	allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE;
+	if ((index = ((allocsz - 1) >> KMEM_SHIFT))
+	    < kmem_cache_max >> KMEM_SHIFT) {
+		pc = kmem_cache[index];
 	} else {
-		vmem_addr_t addr;
-
-		if (vmem_alloc(kmem_arena, size,
-		    kmf_to_vmf(kmflags) | VM_INSTANTFIT, &addr) == 0)
-			p = (void *)addr;
-		else
-			p = NULL;
+		int rc;
+		rc = uvm_km_kmem_alloc(kmem_va_arena,
+		    (vsize_t)round_page(allocsz),
+		    ((kmflags & KM_SLEEP) ? VM_SLEEP : VM_NOSLEEP)
+		     | VM_INSTANTFIT, (vmem_addr_t *)&p);
+		return (rc != 0) ? NULL : p;
 	}
+
+	p = pool_cache_get(pc, kmflags);
+
 	if (__predict_true(p != NULL)) {
 		kmem_poison_check(p, kmem_roundup_size(size));
 		FREECHECK_OUT(&kmem_freecheck, p);
-		kmem_size_set(p, size);
-		p = (uint8_t *)p + SIZE_SIZE;
+		kmem_size_set(p, allocsz);
 	}
 	return p;
 }
 
-/*
- * kmem_zalloc: allocate wired memory.
- *
- * => must not be called from interrupt context.
- */
-
 void *
-kmem_zalloc(size_t size, km_flag_t kmflags)
+kmem_intr_zalloc(size_t size, km_flag_t kmflags)
 {
 	void *p;
 
-	p = kmem_alloc(size, kmflags);
+	p = kmem_intr_alloc(size, kmflags);
 	if (p != NULL) {
 		memset(p, 0, size);
 	}
 	return p;
 }
 
-/*
- * kmem_free: free wired memory allocated by kmem_alloc.
- *
- * => must not be called from interrupt context.
- */
-
 void
-kmem_free(void *p, size_t size)
+kmem_intr_free(void *p, size_t size)
 {
-	kmem_cache_t *kc;
+	size_t index;
+	size_t allocsz;
+	pool_cache_t pc;
 
-	KASSERT(!cpu_intr_p());
-	KASSERT(!cpu_softintr_p());
 	KASSERT(p != NULL);
 	KASSERT(size > 0);
 
@@ -268,128 +234,135 @@ kmem_free(void *p, size_t size)
 		return;
 	}
 #endif
-	size += SIZE_SIZE;
-	p = (uint8_t *)p - SIZE_SIZE;
-	kmem_size_check(p, size + REDZONE_SIZE);
-	FREECHECK_IN(&kmem_freecheck, p);
-	LOCKDEBUG_MEM_CHECK(p, size);
-	kmem_poison_check((char *)p + size,
-	    kmem_roundup_size(size + REDZONE_SIZE) - size);
-	kmem_poison_fill(p, size);
-	size += REDZONE_SIZE;
-	if (size >= kmem_cache_min && size <= kmem_cache_max) {
-		kc = &kmem_cache[(size + kmem_cache_mask) >> kmem_cache_shift];
-		KASSERT(size <= kc->kc_pa.pa_pagesz);
-		pool_cache_put(kc->kc_cache, p);
+
+	allocsz = kmem_roundup_size(size) + REDZONE_SIZE + SIZE_SIZE;
+	if ((index = ((allocsz - 1) >> KMEM_SHIFT))
+	    < kmem_cache_max >> KMEM_SHIFT) {
+		pc = kmem_cache[index];
 	} else {
-		vmem_free(kmem_arena, (vmem_addr_t)p, size);
+		uvm_km_kmem_free(kmem_va_arena, (vaddr_t)p,
+		    round_page(allocsz));
+		return;
 	}
+
+	kmem_size_check(p, allocsz);
+	FREECHECK_IN(&kmem_freecheck, p);
+	LOCKDEBUG_MEM_CHECK(p, allocsz - (REDZONE_SIZE + SIZE_SIZE));
+	kmem_poison_check((uint8_t *)p + size, allocsz - size - SIZE_SIZE);
+	kmem_poison_fill(p, allocsz);
+
+	pool_cache_put(pc, p);
 }
 
 
-void
-kmem_init(void)
-{
-	kmem_cache_t *kc;
-	size_t sz;
-	int i;
+/* ---- kmem API */
 
-#ifdef KMEM_GUARD
-	uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
-	    kernel_map);
-#endif
+/*
+ * kmem_alloc: allocate wired memory.
+ * => must not be called from interrupt context.
+ */
 
-	kmem_arena = vmem_create("kmem", 0, 0, KMEM_QUANTUM_SIZE,
-	    kmem_backend_alloc, kmem_backend_free, NULL, KMEM_QCACHE_MAX,
-	    VM_SLEEP, IPL_NONE);
-	callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
-	    &kmem_kva_reclaim_entry, kmem_arena, kmem_kva_reclaim_callback);
-
-	/*
-	 * kmem caches start at twice the size of the largest vmem qcache
-	 * and end at PAGE_SIZE or earlier.  assert that KMEM_QCACHE_MAX
-	 * is a power of two.
-	 */
-	KASSERT(ffs(KMEM_QCACHE_MAX) != 0);
-	KASSERT(KMEM_QCACHE_MAX - (1 << (ffs(KMEM_QCACHE_MAX) - 1)) == 0);
-	kmem_cache_shift = ffs(KMEM_QCACHE_MAX);
-	kmem_cache_min = 1 << kmem_cache_shift;
-	kmem_cache_mask = kmem_cache_min - 1;
-	for (i = 1; i <= KMEM_CACHE_COUNT; i++) {
-		sz = i << kmem_cache_shift;
-		if (sz > PAGE_SIZE) {
-			break;
-		}
-		kmem_cache_max = sz;
-		kc = &kmem_cache[i];
-		kc->kc_pa.pa_pagesz = sz;
-		kc->kc_pa.pa_alloc = kmem_poolpage_alloc;
-		kc->kc_pa.pa_free = kmem_poolpage_free;
-		sprintf(kc->kc_name, "kmem-%zu", sz);
-		kc->kc_cache = pool_cache_init(sz,
-		    KMEM_QUANTUM_SIZE, 0, PR_NOALIGN | PR_NOTOUCH,
-		    kc->kc_name, &kc->kc_pa, IPL_NONE,
-		    NULL, NULL, NULL);
-		KASSERT(kc->kc_cache != NULL);
-	}
+void *
+kmem_alloc(size_t size, km_flag_t kmflags)
+{
+
+	KASSERT(!cpu_intr_p());
+	KASSERT(!cpu_softintr_p());
+	return kmem_intr_alloc(size, kmflags);
 }
 
-size_t
-kmem_roundup_size(size_t size)
+/*
+ * kmem_zalloc: allocate zeroed wired memory.
+ * => must not be called from interrupt context.
+ */
+
+void *
+kmem_zalloc(size_t size, km_flag_t kmflags)
 {
 
-	return vmem_roundup_size(kmem_arena, size);
+	KASSERT(!cpu_intr_p());
+	KASSERT(!cpu_softintr_p());
+	return kmem_intr_zalloc(size, kmflags);
 }
 
-/* ---- uvm glue */
+/*
+ * kmem_free: free wired memory allocated by kmem_alloc.
+ * => must not be called from interrupt context.
+ */
 
-static int
-kmem_backend_alloc(void *dummy, vmem_size_t size, vmem_size_t *resultsize,
-    vm_flag_t vmflags, vmem_addr_t *addrp)
+void
+kmem_free(void *p, size_t size)
 {
-	uvm_flag_t uflags;
-	vaddr_t va;
 
-	KASSERT(dummy == NULL);
-	KASSERT(size != 0);
-	KASSERT((vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
-	KASSERT((~vmflags & (VM_SLEEP|VM_NOSLEEP)) != 0);
+	KASSERT(!cpu_intr_p());
+	KASSERT(!cpu_softintr_p());
+	kmem_intr_free(p, size);
+}
 
-	if ((vmflags & VM_NOSLEEP) != 0) {
-		uflags = UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT;
-	} else {
-		uflags = UVM_KMF_WAITVA;
+static void
+kmem_create_caches(const struct kmem_cache_info *array,
+    pool_cache_t alloc_table[], size_t maxsize)
+{
+	size_t table_unit = (1 << KMEM_SHIFT);
+	size_t size = table_unit;
+	int i;
+
+	for (i = 0; array[i].kc_size != 0 ; i++) {
+		size_t cache_size = array[i].kc_size;
+		size_t align;
+
+		if ((cache_size & (CACHE_LINE_SIZE - 1)) == 0)
+			align = CACHE_LINE_SIZE;
+		else if ((cache_size & (PAGE_SIZE - 1)) == 0)
+			align = PAGE_SIZE;
+		else
+			align = KMEM_ALIGN;
+
+		const char *name = array[i].kc_name;
+		pool_cache_t pc;
+		int flags = PR_NOALIGN;
+		if (cache_size < CACHE_LINE_SIZE)
+			flags |= PR_NOTOUCH;
+
+		/* check if we reached the requested size */
+		if (cache_size > maxsize)
+			break;
+
+		kmem_cache_max = cache_size;
+
+#if defined(KMEM_POISON)
+		pc = pool_cache_init(cache_size, align, 0, flags,
+		    name, &pool_allocator_kmem, IPL_VM, kmem_poison_ctor,
+		    NULL, (void *)cache_size);
+#else /* defined(KMEM_POISON) */
+		pc = pool_cache_init(cache_size, align, 0, flags,
+		    name, &pool_allocator_kmem, IPL_VM, NULL, NULL, NULL);
+#endif /* defined(KMEM_POISON) */
+
+		while (size <= cache_size) {
+			alloc_table[(size - 1) >> KMEM_SHIFT] = pc;
+			size += table_unit;
+		}
 	}
-	*resultsize = size = round_page(size);
-	va = uvm_km_alloc(kernel_map, size, 0,
-	    uflags | UVM_KMF_WIRED | UVM_KMF_CANFAIL);
-	if (va == 0)
-		return ENOMEM;
-	kmem_poison_fill((void *)va, size);
-	*addrp = (vmem_addr_t)va;
-	return 0;
 }
 
-static void
-kmem_backend_free(void *dummy, vmem_addr_t addr, vmem_size_t size)
+void
+kmem_init(void)
 {
 
-	KASSERT(dummy == NULL);
-	KASSERT(addr != 0);
-	KASSERT(size != 0);
-	KASSERT(size == round_page(size));
+#ifdef KMEM_GUARD
+	uvm_kmguard_init(&kmem_guard, &kmem_guard_depth, &kmem_guard_size,
+		kernel_map);
+#endif
 
-	kmem_poison_check((void *)addr, size);
-	uvm_km_free(kernel_map, (vaddr_t)addr, size, UVM_KMF_WIRED);
+	kmem_create_caches(kmem_cache_sizes, kmem_cache, KMEM_MAXSIZE);
 }
 
-static int
-kmem_kva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
+size_t
+kmem_roundup_size(size_t size)
 {
-	vmem_t *vm = obj;
 
-	vmem_reap(vm);
-	return CALLBACK_CHAIN_CONTINUE;
+	return (size + (KMEM_ALIGN - 1)) & ~(KMEM_ALIGN - 1);
 }
 
 /* ---- debug */
@@ -397,17 +370,27 @@ kmem_kva_reclaim_callback(struct callbac
 #if defined(KMEM_POISON)
 
 #if defined(_LP64)
-#define	PRIME	0x9e37fffffffc0001UL
+#define PRIME 0x9e37fffffffc0000UL
 #else /* defined(_LP64) */
-#define	PRIME	0x9e3779b1
+#define PRIME 0x9e3779b1
 #endif /* defined(_LP64) */
 
 static inline uint8_t
 kmem_poison_pattern(const void *p)
 {
 
-	return (uint8_t)((((uintptr_t)p) * PRIME)
-	    >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
+	return (uint8_t)(((uintptr_t)p) * PRIME
+	   >> ((sizeof(uintptr_t) - sizeof(uint8_t))) * CHAR_BIT);
+}
+
+static int
+kmem_poison_ctor(void *arg, void *obj, int flag)
+{
+	size_t sz = (size_t)arg;
+
+	kmem_poison_fill(obj, sz);
+
+	return 0;
 }
 
 static void
@@ -437,7 +420,7 @@ kmem_poison_check(void *p, size_t sz)
 
 		if (*cp != expected) {
 			panic("%s: %p: 0x%02x != 0x%02x\n",
-			    __func__, cp, *cp, expected);
+			   __func__, cp, *cp, expected);
 		}
 		cp++;
 	}
@@ -449,16 +432,20 @@ kmem_poison_check(void *p, size_t sz)
 static void
 kmem_size_set(void *p, size_t sz)
 {
+	void *szp;
 
-	memcpy(p, &sz, sizeof(sz));
+	szp = (uint8_t *)p + sz - SIZE_SIZE;
+	memcpy(szp, &sz, sizeof(sz));
 }
 
 static void
-kmem_size_check(const void *p, size_t sz)
+kmem_size_check(void *p, size_t sz)
 {
+	uint8_t *szp;
 	size_t psz;
 
-	memcpy(&psz, p, sizeof(psz));
+	szp = (uint8_t *)p + sz - SIZE_SIZE;
+	memcpy(&psz, szp, sizeof(psz));
 	if (psz != sz) {
 		panic("kmem_free(%p, %zu) != allocated size %zu",
 		    (const uint8_t *)p + SIZE_SIZE, sz - SIZE_SIZE, psz);
Index: sys/kern/subr_percpu.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_percpu.c,v
retrieving revision 1.15
diff -u -p -r1.15 subr_percpu.c
--- sys/kern/subr_percpu.c	2 Sep 2011 22:25:08 -0000	1.15
+++ sys/kern/subr_percpu.c	23 Jan 2012 22:02:13 -0000
@@ -161,7 +161,7 @@ percpu_cpu_enlarge(size_t size)
  */
 
 static int
-percpu_backend_alloc(void *dummy, vmem_size_t size, vmem_size_t *resultsize,
+percpu_backend_alloc(vmem_t *dummy, vmem_size_t size, vmem_size_t *resultsize,
     vm_flag_t vmflags, vmem_addr_t *addrp)
 {
 	unsigned int offset;
@@ -218,7 +218,7 @@ percpu_init(void)
 	mutex_init(&percpu_allocation_lock, MUTEX_DEFAULT, IPL_NONE);
 	percpu_nextoff = PERCPU_QUANTUM_SIZE;
 
-	percpu_offset_arena = vmem_create("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
+	percpu_offset_arena = vmem_xcreate("percpu", 0, 0, PERCPU_QUANTUM_SIZE,
 	    percpu_backend_alloc, NULL, NULL, PERCPU_QCACHE_MAX, VM_SLEEP,
 	    IPL_NONE);
 }
Index: sys/kern/subr_pool.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_pool.c,v
retrieving revision 1.190
diff -u -p -r1.190 subr_pool.c
--- sys/kern/subr_pool.c	27 Sep 2011 01:02:39 -0000	1.190
+++ sys/kern/subr_pool.c	23 Jan 2012 22:02:14 -0000
@@ -46,6 +46,7 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,
 #include <sys/errno.h>
 #include <sys/kernel.h>
 #include <sys/malloc.h>
+#include <sys/vmem.h>
 #include <sys/pool.h>
 #include <sys/syslog.h>
 #include <sys/debug.h>
@@ -55,9 +56,6 @@ __KERNEL_RCSID(0, "$NetBSD: subr_pool.c,
 #include <sys/atomic.h>
 
 #include <uvm/uvm_extern.h>
-#ifdef DIAGNOSTIC
-#include <uvm/uvm_km.h>	/* uvm_km_va_drain */
-#endif
 
 /*
  * Pool resource management utility.
@@ -86,16 +84,14 @@ static struct pool phpool[PHPOOL_MAX];
 static struct pool psppool;
 #endif
 
-static SLIST_HEAD(, pool_allocator) pa_deferinitq =
-    SLIST_HEAD_INITIALIZER(pa_deferinitq);
-
 static void *pool_page_alloc_meta(struct pool *, int);
 static void pool_page_free_meta(struct pool *, void *);
 
 /* allocator for pool metadata */
 struct pool_allocator pool_allocator_meta = {
-	pool_page_alloc_meta, pool_page_free_meta,
-	.pa_backingmapptr = &kmem_map,
+	.pa_alloc = pool_page_alloc_meta,
+	.pa_free = pool_page_free_meta,
+	.pa_pagesz = 0
 };
 
 /* # of seconds to retain page after last use */
@@ -529,108 +525,59 @@ pr_rmpage(struct pool *pp, struct pool_i
 	pool_update_curpage(pp);
 }
 
-static bool
-pa_starved_p(struct pool_allocator *pa)
-{
-
-	if (pa->pa_backingmap != NULL) {
-		return vm_map_starved_p(pa->pa_backingmap);
-	}
-	return false;
-}
-
-static int
-pool_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
-{
-	struct pool *pp = obj;
-	struct pool_allocator *pa = pp->pr_alloc;
-
-	KASSERT(&pp->pr_reclaimerentry == ce);
-	pool_reclaim(pp);
-	if (!pa_starved_p(pa)) {
-		return CALLBACK_CHAIN_ABORT;
-	}
-	return CALLBACK_CHAIN_CONTINUE;
-}
-
-static void
-pool_reclaim_register(struct pool *pp)
-{
-	struct vm_map *map = pp->pr_alloc->pa_backingmap;
-	int s;
-
-	if (map == NULL) {
-		return;
-	}
-
-	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
-	callback_register(&vm_map_to_kernel(map)->vmk_reclaim_callback,
-	    &pp->pr_reclaimerentry, pp, pool_reclaim_callback);
-	splx(s);
-
-#ifdef DIAGNOSTIC
-	/* Diagnostic drain attempt. */
-	uvm_km_va_drain(map, 0);
-#endif
-}
-
-static void
-pool_reclaim_unregister(struct pool *pp)
-{
-	struct vm_map *map = pp->pr_alloc->pa_backingmap;
-	int s;
-
-	if (map == NULL) {
-		return;
-	}
-
-	s = splvm(); /* not necessary for INTRSAFE maps, but don't care. */
-	callback_unregister(&vm_map_to_kernel(map)->vmk_reclaim_callback,
-	    &pp->pr_reclaimerentry);
-	splx(s);
-}
-
-static void
-pa_reclaim_register(struct pool_allocator *pa)
-{
-	struct vm_map *map = *pa->pa_backingmapptr;
-	struct pool *pp;
-
-	KASSERT(pa->pa_backingmap == NULL);
-	if (map == NULL) {
-		SLIST_INSERT_HEAD(&pa_deferinitq, pa, pa_q);
-		return;
-	}
-	pa->pa_backingmap = map;
-	TAILQ_FOREACH(pp, &pa->pa_list, pr_alloc_list) {
-		pool_reclaim_register(pp);
-	}
-}
-
 /*
  * Initialize all the pools listed in the "pools" link set.
  */
 void
 pool_subsystem_init(void)
 {
-	struct pool_allocator *pa;
+	int idx;
+	size_t size;
 
 	mutex_init(&pool_head_lock, MUTEX_DEFAULT, IPL_NONE);
 	mutex_init(&pool_allocator_lock, MUTEX_DEFAULT, IPL_NONE);
 	cv_init(&pool_busy, "poolbusy");
 
-	while ((pa = SLIST_FIRST(&pa_deferinitq)) != NULL) {
-		KASSERT(pa->pa_backingmapptr != NULL);
-		KASSERT(*pa->pa_backingmapptr != NULL);
-		SLIST_REMOVE_HEAD(&pa_deferinitq, pa_q);
-		pa_reclaim_register(pa);
+	/*
+	 * Initialize private page header pool and cache magazine pool if we
+	 * haven't done so yet.
+	 */
+	for (idx = 0; idx < PHPOOL_MAX; idx++) {
+		static char phpool_names[PHPOOL_MAX][6+1+6+1];
+		int nelem;
+		size_t sz;
+
+		nelem = PHPOOL_FREELIST_NELEM(idx);
+		snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
+		    "phpool-%d", nelem);
+		sz = sizeof(struct pool_item_header);
+		if (nelem) {
+			sz = offsetof(struct pool_item_header,
+			    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
+		}
+		pool_init(&phpool[idx], sz, 0, 0, 0,
+		    phpool_names[idx], &pool_allocator_meta, IPL_VM);
 	}
+#ifdef POOL_SUBPAGE
+	pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
+	    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
+#endif
+
+	size = sizeof(pcg_t) +
+	    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
+	pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
+	    "pcgnormal", &pool_allocator_meta, IPL_VM);
+
+	size = sizeof(pcg_t) +
+	    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
+	pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
+	    "pcglarge", &pool_allocator_meta, IPL_VM);
 
 	pool_init(&cache_pool, sizeof(struct pool_cache), coherency_unit,
-	    0, 0, "pcache", &pool_allocator_nointr, IPL_NONE);
+	    0, 0, "pcache", &pool_allocator_meta, IPL_NONE);
 
 	pool_init(&cache_cpu_pool, sizeof(pool_cache_cpu_t), coherency_unit,
-	    0, 0, "pcachecpu", &pool_allocator_nointr, IPL_NONE);
+	    0, 0, "pcachecpu", &pool_allocator_meta, IPL_NONE);
 }
 
 /*
@@ -688,10 +635,6 @@ pool_init(struct pool *pp, size_t size, 
 		mutex_init(&palloc->pa_lock, MUTEX_DEFAULT, IPL_VM);
 		palloc->pa_pagemask = ~(palloc->pa_pagesz - 1);
 		palloc->pa_pageshift = ffs(palloc->pa_pagesz) - 1;
-
-		if (palloc->pa_backingmapptr != NULL) {
-			pa_reclaim_register(palloc);
-		}
 	}
 	if (!cold)
 		mutex_exit(&pool_allocator_lock);
@@ -826,45 +769,6 @@ pool_init(struct pool *pp, size_t size, 
 	cv_init(&pp->pr_cv, wchan);
 	pp->pr_ipl = ipl;
 
-	/*
-	 * Initialize private page header pool and cache magazine pool if we
-	 * haven't done so yet.
-	 * XXX LOCKING.
-	 */
-	if (phpool[0].pr_size == 0) {
-		int idx;
-		for (idx = 0; idx < PHPOOL_MAX; idx++) {
-			static char phpool_names[PHPOOL_MAX][6+1+6+1];
-			int nelem;
-			size_t sz;
-
-			nelem = PHPOOL_FREELIST_NELEM(idx);
-			snprintf(phpool_names[idx], sizeof(phpool_names[idx]),
-			    "phpool-%d", nelem);
-			sz = sizeof(struct pool_item_header);
-			if (nelem) {
-				sz = offsetof(struct pool_item_header,
-				    ph_bitmap[howmany(nelem, BITMAP_SIZE)]);
-			}
-			pool_init(&phpool[idx], sz, 0, 0, 0,
-			    phpool_names[idx], &pool_allocator_meta, IPL_VM);
-		}
-#ifdef POOL_SUBPAGE
-		pool_init(&psppool, POOL_SUBPAGE, POOL_SUBPAGE, 0,
-		    PR_RECURSIVE, "psppool", &pool_allocator_meta, IPL_VM);
-#endif
-
-		size = sizeof(pcg_t) +
-		    (PCG_NOBJECTS_NORMAL - 1) * sizeof(pcgpair_t);
-		pool_init(&pcg_normal_pool, size, coherency_unit, 0, 0,
-		    "pcgnormal", &pool_allocator_meta, IPL_VM);
-
-		size = sizeof(pcg_t) +
-		    (PCG_NOBJECTS_LARGE - 1) * sizeof(pcgpair_t);
-		pool_init(&pcg_large_pool, size, coherency_unit, 0, 0,
-		    "pcglarge", &pool_allocator_meta, IPL_VM);
-	}
-
 	/* Insert into the list of all pools. */
 	if (!cold)
 		mutex_enter(&pool_head_lock);
@@ -885,8 +789,6 @@ pool_init(struct pool *pp, size_t size, 
 	TAILQ_INSERT_TAIL(&palloc->pa_list, pp, pr_alloc_list);
 	if (!cold)
 		mutex_exit(&palloc->pa_lock);
-
-	pool_reclaim_register(pp);
 }
 
 /*
@@ -908,7 +810,6 @@ pool_destroy(struct pool *pp)
 	mutex_exit(&pool_head_lock);
 
 	/* Remove this pool from its allocator's list of pools. */
-	pool_reclaim_unregister(pp);
 	mutex_enter(&pp->pr_alloc->pa_lock);
 	TAILQ_REMOVE(&pp->pr_alloc->pa_list, pp, pr_alloc_list);
 	mutex_exit(&pp->pr_alloc->pa_lock);
@@ -1674,8 +1575,7 @@ pool_reclaim(struct pool *pp)
 			break;
 
 		KASSERT(ph->ph_nmissing == 0);
-		if (curtime - ph->ph_time < pool_inactive_time
-		    && !pa_starved_p(pp->pr_alloc))
+		if (curtime - ph->ph_time < pool_inactive_time)
 			continue;
 
 		/*
@@ -2157,6 +2057,19 @@ pool_cache_bootstrap(pool_cache_t pc, si
 void
 pool_cache_destroy(pool_cache_t pc)
 {
+
+	pool_cache_bootstrap_destroy(pc);
+	pool_put(&cache_pool, pc);
+}
+
+/*
+ * pool_cache_bootstrap_destroy:
+ *
+ *	Destroy a pool cache.
+ */
+void
+pool_cache_bootstrap_destroy(pool_cache_t pc)
+{
 	struct pool *pp = &pc->pc_pool;
 	u_int i;
 
@@ -2182,7 +2095,6 @@ pool_cache_destroy(pool_cache_t pc)
 	/* Finally, destroy it. */
 	mutex_destroy(&pc->pc_lock);
 	pool_destroy(pp);
-	pool_put(&cache_pool, pc);
 }
 
 /*
@@ -2806,13 +2718,13 @@ void	pool_page_free(struct pool *, void 
 
 #ifdef POOL_SUBPAGE
 struct pool_allocator pool_allocator_kmem_fullpage = {
-	pool_page_alloc, pool_page_free, 0,
-	.pa_backingmapptr = &kmem_map,
+	pool_page_alloc, pool_page_free, 0
 };
 #else
 struct pool_allocator pool_allocator_kmem = {
-	pool_page_alloc, pool_page_free, 0,
-	.pa_backingmapptr = &kmem_map,
+	.pa_alloc = pool_page_alloc,
+	.pa_free = pool_page_free,
+	.pa_pagesz = 0
 };
 #endif
 
@@ -2822,12 +2734,12 @@ void	pool_page_free_nointr(struct pool *
 #ifdef POOL_SUBPAGE
 struct pool_allocator pool_allocator_nointr_fullpage = {
 	pool_page_alloc_nointr, pool_page_free_nointr, 0,
-	.pa_backingmapptr = &kernel_map,
 };
 #else
 struct pool_allocator pool_allocator_nointr = {
-	pool_page_alloc_nointr, pool_page_free_nointr, 0,
-	.pa_backingmapptr = &kernel_map,
+	.pa_alloc = pool_page_alloc,
+	.pa_free = pool_page_free,
+	.pa_pagesz = 0
 };
 #endif
 
@@ -2837,7 +2749,6 @@ void	pool_subpage_free(struct pool *, vo
 
 struct pool_allocator pool_allocator_kmem = {
 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
-	.pa_backingmapptr = &kmem_map,
 };
 
 void	*pool_subpage_alloc_nointr(struct pool *, int);
@@ -2845,7 +2756,6 @@ void	pool_subpage_free_nointr(struct poo
 
 struct pool_allocator pool_allocator_nointr = {
 	pool_subpage_alloc, pool_subpage_free, POOL_SUBPAGE,
-	.pa_backingmapptr = &kmem_map,
 };
 #endif /* POOL_SUBPAGE */
 
@@ -2882,30 +2792,48 @@ void *
 pool_page_alloc(struct pool *pp, int flags)
 {
 	bool waitok = (flags & PR_WAITOK) ? true : false;
+	int rc;
+	vmem_addr_t va;
+
+	rc = uvm_km_kmem_alloc(kmem_va_arena,
+	    pp->pr_alloc->pa_pagesz,
+	    ((waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT), &va);
 
-	return ((void *) uvm_km_alloc_poolpage_cache(kmem_map, waitok));
+	if (rc != 0)
+		return NULL;
+	else
+		return (void *)va;
 }
 
 void
 pool_page_free(struct pool *pp, void *v)
 {
 
-	uvm_km_free_poolpage_cache(kmem_map, (vaddr_t) v);
+	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, pp->pr_alloc->pa_pagesz);
 }
 
 static void *
 pool_page_alloc_meta(struct pool *pp, int flags)
 {
 	bool waitok = (flags & PR_WAITOK) ? true : false;
+	int rc;
+	vmem_addr_t addr;
+
+	rc = vmem_alloc(kmem_meta_arena, pp->pr_alloc->pa_pagesz,
+	    (waitok ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT, &addr);
 
-	return ((void *) uvm_km_alloc_poolpage(kmem_map, waitok));
+	if (rc != 0)
+		return 0;
+	else
+		return (void *)addr;
 }
 
 static void
 pool_page_free_meta(struct pool *pp, void *v)
 {
 
-	uvm_km_free_poolpage(kmem_map, (vaddr_t) v);
+	vmem_free(kmem_meta_arena, (vmem_addr_t)v,
+	    pp->pr_alloc->pa_pagesz);
 }
 
 #ifdef POOL_SUBPAGE
@@ -2937,20 +2865,6 @@ pool_subpage_free_nointr(struct pool *pp
 	pool_subpage_free(pp, v);
 }
 #endif /* POOL_SUBPAGE */
-void *
-pool_page_alloc_nointr(struct pool *pp, int flags)
-{
-	bool waitok = (flags & PR_WAITOK) ? true : false;
-
-	return ((void *) uvm_km_alloc_poolpage_cache(kernel_map, waitok));
-}
-
-void
-pool_page_free_nointr(struct pool *pp, void *v)
-{
-
-	uvm_km_free_poolpage_cache(kernel_map, (vaddr_t) v);
-}
 
 #if defined(DDB)
 static bool
Index: sys/kern/subr_vmem.c
===================================================================
RCS file: /cvsroot/src/sys/kern/subr_vmem.c,v
retrieving revision 1.65
diff -u -p -r1.65 subr_vmem.c
--- sys/kern/subr_vmem.c	20 Oct 2011 03:05:14 -0000	1.65
+++ sys/kern/subr_vmem.c	23 Jan 2012 22:02:14 -0000
@@ -31,10 +31,6 @@
  * -	Magazines and Vmem: Extending the Slab Allocator
  *	to Many CPUs and Arbitrary Resources
  *	http://www.usenix.org/event/usenix01/bonwick.html
- *
- * todo:
- * -	decide how to import segments for vmem_xalloc.
- * -	don't rely on malloc(9).
  */
 
 #include <sys/cdefs.h>
@@ -54,27 +50,54 @@ __KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,
 #include <sys/systm.h>
 #include <sys/kernel.h>	/* hz */
 #include <sys/callout.h>
-#include <sys/malloc.h>
-#include <sys/once.h>
+#include <sys/kmem.h>
 #include <sys/pool.h>
 #include <sys/vmem.h>
 #include <sys/workqueue.h>
+#include <sys/atomic.h>
+#include <uvm/uvm.h>
+#include <uvm/uvm_extern.h>
+#include <uvm/uvm_km.h>
+#include <uvm/uvm_page.h>
+#include <uvm/uvm_pdaemon.h>
 #else /* defined(_KERNEL) */
 #include "../sys/vmem.h"
 #endif /* defined(_KERNEL) */
 
+
 #if defined(_KERNEL)
+#include <sys/evcnt.h>
+#define VMEM_EVCNT_DEFINE(name) \
+struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \
+    "vmemev", #name); \
+EVCNT_ATTACH_STATIC(vmem_evcnt_##name);
+#define VMEM_EVCNT_INCR(ev)	vmem_evcnt_##ev.ev_count++
+#define VMEM_EVCNT_DECR(ev)	vmem_evcnt_##ev.ev_count--
+
+VMEM_EVCNT_DEFINE(bt_pages)
+VMEM_EVCNT_DEFINE(bt_count)
+VMEM_EVCNT_DEFINE(bt_inuse)
+
 #define	LOCK_DECL(name)		\
     kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)]
+
+#define CONDVAR_DECL(name)	\
+    kcondvar_t name;
+
 #else /* defined(_KERNEL) */
 #include <errno.h>
 #include <assert.h>
 #include <stdlib.h>
 #include <string.h>
 
+#define VMEM_EVCNT_INCR(ev)	/* nothing */
+#define VMEM_EVCNT_DECR(ev)	/* nothing */
+
 #define	UNITTEST
 #define	KASSERT(a)		assert(a)
 #define	LOCK_DECL(name)		/* nothing */
+#define	CONDVAR_DECL(name)	/* nothing */
+#define	VMEM_CONDVAR_INIT(vm, wchan)	/* nothing */
 #define	mutex_init(a, b, c)	/* nothing */
 #define	mutex_destroy(a)	/* nothing */
 #define	mutex_enter(a)		/* nothing */
@@ -98,7 +121,7 @@ static void vmem_check(vmem_t *);
 
 #define	VMEM_HASHSIZE_MIN	1	/* XXX */
 #define	VMEM_HASHSIZE_MAX	65536	/* XXX */
-#define	VMEM_HASHSIZE_INIT	128
+#define	VMEM_HASHSIZE_INIT	1
 
 #define	VM_FITMASK	(VM_BESTFIT | VM_INSTANTFIT)
 
@@ -124,19 +147,24 @@ typedef struct qcache qcache_t;
 
 /* vmem arena */
 struct vmem {
+	CONDVAR_DECL(vm_cv);
 	LOCK_DECL(vm_lock);
-	int (*vm_importfn)(void *, vmem_size_t, vmem_size_t *,
-	    vm_flag_t, vmem_addr_t *);
-	void (*vm_releasefn)(void *, vmem_addr_t, vmem_size_t);
-	vmem_t *vm_source;
+	vm_flag_t vm_flags;
+	vmem_import_t *vm_importfn;
+	vmem_release_t *vm_releasefn;
+	size_t vm_nfreetags;
+	LIST_HEAD(, vmem_btag) vm_freetags;
 	void *vm_arg;
 	struct vmem_seglist vm_seglist;
 	struct vmem_freelist vm_freelist[VMEM_MAXORDER];
 	size_t vm_hashsize;
 	size_t vm_nbusytag;
 	struct vmem_hashlist *vm_hashlist;
+	struct vmem_hashlist vm_hash0;
 	size_t vm_quantum_mask;
 	int vm_quantum_shift;
+	size_t vm_size;
+	size_t vm_inuse;
 	char vm_name[VMEM_NAME_MAX+1];
 	LIST_ENTRY(vmem) vm_alllist;
 
@@ -156,6 +184,13 @@ struct vmem {
 #define	VMEM_LOCK_DESTROY(vm)	mutex_destroy(&vm->vm_lock)
 #define	VMEM_ASSERT_LOCKED(vm)	KASSERT(mutex_owned(&vm->vm_lock))
 
+#if defined(_KERNEL)
+#define	VMEM_CONDVAR_INIT(vm, wchan)	cv_init(&vm->vm_cv, wchan)
+#define	VMEM_CONDVAR_DESTROY(vm)	cv_destroy(&vm->vm_cv)
+#define	VMEM_CONDVAR_WAIT(vm)		cv_wait(&vm->vm_cv, &vm->vm_lock)
+#define	VMEM_CONDVAR_BROADCAST(vm)	cv_broadcast(&vm->vm_cv)
+#endif /* defined(_KERNEL) */
+
 /* boundary tag */
 struct vmem_btag {
 	CIRCLEQ_ENTRY(vmem_btag) bt_seglist;
@@ -180,6 +215,12 @@ struct vmem_btag {
 
 typedef struct vmem_btag bt_t;
 
+#if defined(_KERNEL)
+static kmutex_t vmem_list_lock;
+static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
+#endif /* defined(_KERNEL) */
+
+
 /* ---- misc */
 
 #define	VMEM_ALIGNUP(addr, align) \
@@ -198,36 +239,180 @@ typedef struct vmem_btag bt_t;
 #define	bt_free(vm, bt)		free(bt)
 #else	/* !defined(_KERNEL) */
 
-static MALLOC_DEFINE(M_VMEM, "vmem", "vmem");
-
 static inline void *
 xmalloc(size_t sz, vm_flag_t flags)
 {
-	return malloc(sz, M_VMEM,
-	    M_CANFAIL | ((flags & VM_SLEEP) ? M_WAITOK : M_NOWAIT));
+
+#if defined(_KERNEL)
+	return kmem_alloc(sz, (flags & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP);
+#else /* defined(_KERNEL) */
+	return malloc(sz);
+#endif /* defined(_KERNEL) */
 }
 
 static inline void
-xfree(void *p)
+xfree(void *p, size_t sz)
 {
-	free(p, M_VMEM);
+
+#if defined(_KERNEL)
+	kmem_free(p, sz);
+#else /* defined(_KERNEL) */
+	free(p);
+#endif /* defined(_KERNEL) */
 }
 
+#if defined(_KERNEL)
+
+#define BT_MINRESERVE 6
+#define BT_MAXFREE 64
+#define STATIC_VMEM_COUNT 5
+#define STATIC_BT_COUNT 200
+#define STATIC_QC_POOL_COUNT (VMEM_QCACHE_IDX_MAX + 1)
+
+static struct vmem static_vmems[STATIC_VMEM_COUNT];
+static int static_vmem_count = STATIC_VMEM_COUNT;
+
+static struct vmem_btag static_bts[STATIC_BT_COUNT];
+static int static_bt_count = STATIC_BT_COUNT;
+
+static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT];
+static int static_qc_pool_count = STATIC_QC_POOL_COUNT;
+
+vmem_t *kmem_va_meta_arena;
+vmem_t *kmem_meta_arena;
+
+static kmutex_t vmem_btag_lock;
+static LIST_HEAD(, vmem_btag) vmem_btag_freelist;
+static size_t vmem_btag_freelist_count = 0;
+static size_t vmem_btag_count = STATIC_BT_COUNT;
+
 /* ---- boundary tag */
 
-static struct pool_cache bt_cache;
+#define BT_PER_PAGE \
+	(PAGE_SIZE / sizeof(bt_t))
+
+static int bt_refill(vmem_t *vm, vm_flag_t flags);
+
+static int
+bt_refillglobal(vm_flag_t flags)
+{
+	vmem_addr_t va;
+	bt_t *btp;
+	bt_t *bt;
+	int i;
+
+	mutex_enter(&vmem_btag_lock);
+	if (vmem_btag_freelist_count > (BT_MINRESERVE * 16)) {
+		mutex_exit(&vmem_btag_lock);
+		return 0;
+	}
+
+	if (vmem_alloc(kmem_meta_arena, PAGE_SIZE,
+	    (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va) != 0) {
+		mutex_exit(&vmem_btag_lock);
+		return ENOMEM;
+	}
+	VMEM_EVCNT_INCR(bt_pages);
+
+	btp = (void *) va;
+	for (i = 0; i < (BT_PER_PAGE); i++) {
+		bt = btp;
+		memset(bt, 0, sizeof(*bt));
+		LIST_INSERT_HEAD(&vmem_btag_freelist, bt,
+		    bt_freelist);
+		vmem_btag_freelist_count++;
+		vmem_btag_count++;
+		VMEM_EVCNT_INCR(bt_count);
+		btp++;
+	}
+	mutex_exit(&vmem_btag_lock);
+
+	bt_refill(kmem_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
+	bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
+	bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) | VM_INSTANTFIT);
+
+	return 0;
+}
+
+static int
+bt_refill(vmem_t *vm, vm_flag_t flags)
+{
+	bt_t *bt;
+
+	bt_refillglobal(flags);
+
+	VMEM_LOCK(vm);
+	mutex_enter(&vmem_btag_lock);
+	while (!LIST_EMPTY(&vmem_btag_freelist) &&
+	    vm->vm_nfreetags < (BT_MINRESERVE * 2)) {
+		bt = LIST_FIRST(&vmem_btag_freelist);
+		LIST_REMOVE(bt, bt_freelist);
+		LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
+		vm->vm_nfreetags++;
+		vmem_btag_freelist_count--;
+	}
+	mutex_exit(&vmem_btag_lock);
+
+	if (vm->vm_nfreetags == 0) {
+		VMEM_UNLOCK(vm);
+		return ENOMEM;
+	}
+	VMEM_UNLOCK(vm);
+
+	return 0;
+}
+#endif /* defined(_KERNEL) */
 
 static inline bt_t *
 bt_alloc(vmem_t *vm, vm_flag_t flags)
 {
-	return pool_cache_get(&bt_cache,
-	    (flags & VM_SLEEP) ? PR_WAITOK : PR_NOWAIT);
+	bt_t *bt;
+
+#if defined(_KERNEL)
+again:
+	VMEM_LOCK(vm);
+	if (vm->vm_nfreetags < BT_MINRESERVE &&
+	    (flags & VM_POPULATING) == 0) {
+		VMEM_UNLOCK(vm);
+		if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) {
+			return NULL;
+		}
+		goto again;
+	}
+	bt = LIST_FIRST(&vm->vm_freetags);
+	LIST_REMOVE(bt, bt_freelist);
+	vm->vm_nfreetags--;
+	VMEM_UNLOCK(vm);
+	VMEM_EVCNT_INCR(bt_inuse);
+#else /* defined(_KERNEL) */
+	bt = malloc(sizeof *bt);
+#endif /* defined(_KERNEL) */
+
+	return bt;
 }
 
 static inline void
 bt_free(vmem_t *vm, bt_t *bt)
 {
-	pool_cache_put(&bt_cache, bt);
+
+#if defined(_KERNEL)
+	VMEM_LOCK(vm);
+	LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
+	vm->vm_nfreetags++;
+	while (vm->vm_nfreetags > BT_MAXFREE) {
+		bt = LIST_FIRST(&vm->vm_freetags);
+		LIST_REMOVE(bt, bt_freelist);
+		vm->vm_nfreetags--;
+		mutex_enter(&vmem_btag_lock);
+		LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
+		vmem_btag_freelist_count++;
+		mutex_exit(&vmem_btag_lock);
+	}
+	VMEM_UNLOCK(vm);
+	VMEM_EVCNT_DECR(bt_inuse);
+#else /* defined(_KERNEL) */
+	free(bt);
+#endif /* defined(_KERNEL) */
 }
 
 #endif	/* !defined(_KERNEL) */
@@ -378,11 +563,6 @@ bt_insfree(vmem_t *vm, bt_t *bt)
 
 /* ---- vmem internal functions */
 
-#if defined(_KERNEL)
-static kmutex_t vmem_list_lock;
-static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
-#endif /* defined(_KERNEL) */
-
 #if defined(QCACHE)
 static inline vm_flag_t
 prf_to_vmf(int prflags)
@@ -468,19 +648,36 @@ qc_init(vmem_t *vm, size_t qcache_max, i
 	for (i = qcache_idx_max; i > 0; i--) {
 		qcache_t *qc = &vm->vm_qcache_store[i - 1];
 		size_t size = i << vm->vm_quantum_shift;
+		pool_cache_t pc;
 
 		qc->qc_vmem = vm;
 		snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
 		    vm->vm_name, size);
-		qc->qc_cache = pool_cache_init(size,
-		    ORDER2SIZE(vm->vm_quantum_shift), 0,
-		    PR_NOALIGN | PR_NOTOUCH /* XXX */,
-		    qc->qc_name, pa, ipl, NULL, NULL, NULL);
+
+		if (vm->vm_flags & VM_BOOTSTRAP) {
+			KASSERT(static_qc_pool_count > 0);
+			pc = &static_qc_pools[--static_qc_pool_count];
+			pool_cache_bootstrap(pc, size,
+			    ORDER2SIZE(vm->vm_quantum_shift), 0,
+			    PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */,
+			    qc->qc_name, pa, ipl, NULL, NULL, NULL);
+		} else {
+			pc = pool_cache_init(size,
+			    ORDER2SIZE(vm->vm_quantum_shift), 0,
+			    PR_NOALIGN | PR_NOTOUCH /* XXX */,
+			    qc->qc_name, pa, ipl, NULL, NULL, NULL);
+		}
+		qc->qc_cache = pc;
 		KASSERT(qc->qc_cache != NULL);	/* XXX */
 		if (prevqc != NULL &&
 		    qc->qc_cache->pc_pool.pr_itemsperpage ==
 		    prevqc->qc_cache->pc_pool.pr_itemsperpage) {
-			pool_cache_destroy(qc->qc_cache);
+			if (vm->vm_flags & VM_BOOTSTRAP) {
+				pool_cache_bootstrap_destroy(pc);
+				//static_qc_pool_count++;
+			} else {
+				pool_cache_destroy(qc->qc_cache);
+			}
 			vm->vm_qcache[i - 1] = prevqc;
 			continue;
 		}
@@ -505,46 +702,44 @@ qc_destroy(vmem_t *vm)
 		if (prevqc == qc) {
 			continue;
 		}
-		pool_cache_destroy(qc->qc_cache);
+		if (vm->vm_flags & VM_BOOTSTRAP) {
+			pool_cache_bootstrap_destroy(qc->qc_cache);
+		} else {
+			pool_cache_destroy(qc->qc_cache);
+		}
 		prevqc = qc;
 	}
 }
+#endif
 
-static bool
-qc_reap(vmem_t *vm)
+#if defined(_KERNEL)
+void
+vmem_bootstrap(void)
 {
-	const qcache_t *prevqc;
-	int i;
-	int qcache_idx_max;
-	bool didsomething = false;
 
-	qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
-	prevqc = NULL;
-	for (i = 0; i < qcache_idx_max; i++) {
-		qcache_t *qc = vm->vm_qcache[i];
+	mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM);
+	mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM);
 
-		if (prevqc == qc) {
-			continue;
-		}
-		if (pool_cache_reclaim(qc->qc_cache) != 0) {
-			didsomething = true;
-		}
-		prevqc = qc;
+	while (static_bt_count-- > 0) {
+		bt_t *bt = &static_bts[static_bt_count];
+		LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
+		VMEM_EVCNT_INCR(bt_count);
+		vmem_btag_freelist_count++;
 	}
-
-	return didsomething;
 }
-#endif /* defined(QCACHE) */
 
-#if defined(_KERNEL)
-static int
-vmem_init(void)
+void
+vmem_init(vmem_t *vm)
 {
 
-	mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_NONE);
-	pool_cache_bootstrap(&bt_cache, sizeof(bt_t), 0, 0, 0, "vmembt",
-	    NULL, IPL_VM, NULL, NULL, NULL);
-	return 0;
+	kmem_va_meta_arena = vmem_create("vmem-va", 0, 0, PAGE_SIZE,
+	    vmem_alloc, vmem_free, vm,
+	    0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT,
+	    IPL_VM);
+
+	kmem_meta_arena = vmem_create("vmem-meta", 0, 0, PAGE_SIZE,
+	    uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena,
+	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
 }
 #endif /* defined(_KERNEL) */
 
@@ -582,6 +777,7 @@ vmem_add1(vmem_t *vm, vmem_addr_t addr, 
 	bt_insseg_tail(vm, btspan);
 	bt_insseg(vm, btfree, btspan);
 	bt_insfree(vm, btfree);
+	vm->vm_size += size;
 	VMEM_UNLOCK(vm);
 
 	return 0;
@@ -605,10 +801,26 @@ vmem_destroy1(vmem_t *vm)
 				bt_free(vm, bt);
 			}
 		}
-		xfree(vm->vm_hashlist);
+		if (vm->vm_hashlist != &vm->vm_hash0) {
+			xfree(vm->vm_hashlist,
+			    sizeof(struct vmem_hashlist *) * vm->vm_hashsize);
+		}
+	}
+
+	while (vm->vm_nfreetags > 0) {
+		bt_t *bt = LIST_FIRST(&vm->vm_freetags);
+		LIST_REMOVE(bt, bt_freelist);
+		vm->vm_nfreetags--;
+		mutex_enter(&vmem_btag_lock);
+#if defined (_KERNEL)
+		LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist);
+		vmem_btag_freelist_count++;
+#endif /* defined(_KERNEL) */
+		mutex_exit(&vmem_btag_lock);
 	}
+
 	VMEM_LOCK_DESTROY(vm);
-	xfree(vm);
+	xfree(vm, sizeof(*vm));
 }
 
 static int
@@ -621,9 +833,21 @@ vmem_import(vmem_t *vm, vmem_size_t size
 		return EINVAL;
 	}
 
-	rc = (*vm->vm_importfn)(vm->vm_arg, size, &size, flags, &addr);
-	if (rc != 0) {
-		return ENOMEM;
+	if (vm->vm_flags & VM_LARGEIMPORT) {
+		size *= 32;
+	}
+
+	if (vm->vm_flags & VM_XIMPORT) {
+		rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size,
+		    &size, flags, &addr);
+		if (rc != 0) {
+			return ENOMEM;
+		}
+	} else {
+		rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
+		if (rc != 0) {
+			return ENOMEM;
+		}
 	}
 
 	if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) {
@@ -655,7 +879,8 @@ vmem_rehash(vmem_t *vm, size_t newhashsi
 	}
 
 	if (!VMEM_TRYLOCK(vm)) {
-		xfree(newhashlist);
+		xfree(newhashlist,
+		    sizeof(struct vmem_hashlist *) * newhashsize);
 		return EBUSY;
 	}
 	oldhashlist = vm->vm_hashlist;
@@ -674,7 +899,10 @@ vmem_rehash(vmem_t *vm, size_t newhashsi
 	}
 	VMEM_UNLOCK(vm);
 
-	xfree(oldhashlist);
+	if (oldhashlist != &vm->vm_hash0) {
+		xfree(oldhashlist,
+		    sizeof(struct vmem_hashlist *) * oldhashsize);
+	}
 
 	return 0;
 }
@@ -735,43 +963,40 @@ vmem_fit(const bt_t const *bt, vmem_size
 	return ENOMEM;
 }
 
-/* ---- vmem API */
 
 /*
- * vmem_create: create an arena.
- *
- * => must not be called from interrupt context.
+ * vmem_create_internal: creates a vmem arena.
  */
 
-vmem_t *
-vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
-    vmem_size_t quantum,
-    int (*importfn)(void *, vmem_size_t, vmem_size_t *, vm_flag_t,
-        vmem_addr_t *),
-    void (*releasefn)(void *, vmem_addr_t, vmem_size_t),
+static vmem_t *
+vmem_create_internal(const char *name, vmem_addr_t base, vmem_size_t size,
+    vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
     void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl)
 {
-	vmem_t *vm;
+	vmem_t *vm = NULL;
 	int i;
-#if defined(_KERNEL)
-	static ONCE_DECL(control);
-#endif /* defined(_KERNEL) */
 
 	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
 	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
 	KASSERT(quantum > 0);
 
+	if (flags & VM_BOOTSTRAP) {
 #if defined(_KERNEL)
-	if (RUN_ONCE(&control, vmem_init)) {
-		return NULL;
-	}
+		KASSERT(static_vmem_count > 0);
+		vm = &static_vmems[--static_vmem_count];
 #endif /* defined(_KERNEL) */
-	vm = xmalloc(sizeof(*vm), flags);
+	} else {
+		vm = xmalloc(sizeof(*vm), flags);
+	}
 	if (vm == NULL) {
 		return NULL;
 	}
 
+	VMEM_CONDVAR_INIT(vm, "vmem");
 	VMEM_LOCK_INIT(vm, ipl);
+	vm->vm_flags = flags;
+	vm->vm_nfreetags = 0;
+	LIST_INIT(&vm->vm_freetags);
 	strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
 	vm->vm_quantum_mask = quantum - 1;
 	vm->vm_quantum_shift = SIZE2ORDER(quantum);
@@ -780,6 +1005,8 @@ vmem_create(const char *name, vmem_addr_
 	vm->vm_releasefn = releasefn;
 	vm->vm_arg = arg;
 	vm->vm_nbusytag = 0;
+	vm->vm_size = 0;
+	vm->vm_inuse = 0;
 #if defined(QCACHE)
 	qc_init(vm, qcache_max, ipl);
 #endif /* defined(QCACHE) */
@@ -789,7 +1016,10 @@ vmem_create(const char *name, vmem_addr_
 		LIST_INIT(&vm->vm_freelist[i]);
 	}
 	vm->vm_hashlist = NULL;
-	if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
+	if (flags & VM_BOOTSTRAP) {
+		vm->vm_hashsize = 1;
+		vm->vm_hashlist = &vm->vm_hash0;
+	} else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) {
 		vmem_destroy1(vm);
 		return NULL;
 	}
@@ -802,6 +1032,10 @@ vmem_create(const char *name, vmem_addr_
 	}
 
 #if defined(_KERNEL)
+	if (flags & VM_BOOTSTRAP) {
+		bt_refill(vm, VM_NOSLEEP);
+	}
+
 	mutex_enter(&vmem_list_lock);
 	LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
 	mutex_exit(&vmem_list_lock);
@@ -810,6 +1044,52 @@ vmem_create(const char *name, vmem_addr_
 	return vm;
 }
 
+
+/* ---- vmem API */
+
+/*
+ * vmem_create: create an arena.
+ *
+ * => must not be called from interrupt context.
+ */
+
+vmem_t *
+vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
+    vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn,
+    vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags,
+    int ipl)
+{
+
+	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
+	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
+	KASSERT((flags & (VM_XIMPORT)) == 0);
+
+	return vmem_create_internal(name, base, size, quantum,
+	    importfn, releasefn, source, qcache_max, flags, ipl);
+}
+
+/*
+ * vmem_xcreate: create an arena takes alternative import func.
+ *
+ * => must not be called from interrupt context.
+ */
+
+vmem_t *
+vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size,
+    vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn,
+    vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags,
+    int ipl)
+{
+
+	KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
+	KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0);
+	KASSERT((flags & (VM_XIMPORT)) == 0);
+
+	return vmem_create_internal(name, base, size, quantum,
+	    (vmem_import_t *)importfn, releasefn, source,
+	    qcache_max, flags | VM_XIMPORT, ipl);
+}
+
 void
 vmem_destroy(vmem_t *vm)
 {
@@ -999,6 +1279,7 @@ retry:
 		goto retry;
 	}
 	/* XXX */
+
 fail:
 	bt_free(vm, btnew);
 	bt_free(vm, btnew2);
@@ -1083,6 +1364,9 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr,
 {
 	bt_t *bt;
 	bt_t *t;
+	LIST_HEAD(, vmem_btag) tofree;
+
+	LIST_INIT(&tofree);
 
 	KASSERT(size > 0);
 
@@ -1097,6 +1381,8 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr,
 	bt_rembusy(vm, bt);
 	bt->bt_type = BT_TYPE_FREE;
 
+	vm->vm_inuse -= bt->bt_size;
+
 	/* coalesce */
 	t = CIRCLEQ_NEXT(bt, bt_seglist);
 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
@@ -1104,7 +1390,7 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr,
 		bt_remfree(vm, t);
 		bt_remseg(vm, t);
 		bt->bt_size += t->bt_size;
-		bt_free(vm, t);
+		LIST_INSERT_HEAD(&tofree, t, bt_freelist);
 	}
 	t = CIRCLEQ_PREV(bt, bt_seglist);
 	if (t != NULL && t->bt_type == BT_TYPE_FREE) {
@@ -1113,7 +1399,7 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr,
 		bt_remseg(vm, t);
 		bt->bt_size += t->bt_size;
 		bt->bt_start = t->bt_start;
-		bt_free(vm, t);
+		LIST_INSERT_HEAD(&tofree, t, bt_freelist);
 	}
 
 	t = CIRCLEQ_PREV(bt, bt_seglist);
@@ -1128,15 +1414,25 @@ vmem_xfree(vmem_t *vm, vmem_addr_t addr,
 		spanaddr = bt->bt_start;
 		spansize = bt->bt_size;
 		bt_remseg(vm, bt);
-		bt_free(vm, bt);
+		LIST_INSERT_HEAD(&tofree, bt, bt_freelist);
 		bt_remseg(vm, t);
-		bt_free(vm, t);
+		LIST_INSERT_HEAD(&tofree, t, bt_freelist);
+		vm->vm_size -= spansize;
 		VMEM_UNLOCK(vm);
 		(*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize);
 	} else {
 		bt_insfree(vm, bt);
 		VMEM_UNLOCK(vm);
 	}
+
+	while (!LIST_EMPTY(&tofree)) {
+		t = LIST_FIRST(&tofree);
+		LIST_REMOVE(t, bt_freelist);
+		bt_free(vm, t);
+	}
+#if defined(_KERNEL)
+	VMEM_CONDVAR_BROADCAST(vm);
+#endif /* defined(_KERNEL) */
 }
 
 /*
@@ -1154,20 +1450,24 @@ vmem_add(vmem_t *vm, vmem_addr_t addr, v
 }
 
 /*
- * vmem_reap: reap unused resources.
+ * vmem_size: information about arenas size
  *
- * => return true if we successfully reaped something.
+ * => return free/allocated size in arena
  */
-
-bool
-vmem_reap(vmem_t *vm)
+vmem_size_t
+vmem_size(vmem_t *vm, int typemask)
 {
-	bool didsomething = false;
 
-#if defined(QCACHE)
-	didsomething = qc_reap(vm);
-#endif /* defined(QCACHE) */
-	return didsomething;
+	switch (typemask) {
+	case VMEM_ALLOC:
+		return vm->vm_inuse;
+	case VMEM_FREE:
+		return vm->vm_size - vm->vm_inuse;
+	case VMEM_FREE|VMEM_ALLOC:
+		return vm->vm_size;
+	default:
+		panic("vmem_size");
+	}
 }
 
 /* ---- rehash */
@@ -1204,6 +1504,7 @@ vmem_rehash_all(struct work *wk, void *d
 		if (desired > current * 2 || desired * 2 < current) {
 			vmem_rehash(vm, desired, VM_NOSLEEP);
 		}
+		VMEM_CONDVAR_BROADCAST(vm);
 	}
 	mutex_exit(&vmem_list_lock);
 
@@ -1564,3 +1865,4 @@ main(void)
 	exit(EXIT_SUCCESS);
 }
 #endif /* defined(UNITTEST) */
+
Index: sys/kern/uipc_mbuf.c
===================================================================
RCS file: /cvsroot/src/sys/kern/uipc_mbuf.c,v
retrieving revision 1.143
diff -u -p -r1.143 uipc_mbuf.c
--- sys/kern/uipc_mbuf.c	31 Aug 2011 18:31:02 -0000	1.143
+++ sys/kern/uipc_mbuf.c	23 Jan 2012 22:02:15 -0000
@@ -154,7 +154,7 @@ nmbclusters_limit(void)
 	/* direct mapping, doesn't use space in kmem_map */
 	vsize_t max_size = physmem / 4;
 #else
-	vsize_t max_size = MIN(physmem / 4, nkmempages / 2);
+	vsize_t max_size = MIN(physmem / 4, (128 * 1024) / 2);
 #endif
 
 	max_size = max_size * PAGE_SIZE / MCLBYTES;
Index: sys/kern/uipc_socket.c
===================================================================
RCS file: /cvsroot/src/sys/kern/uipc_socket.c,v
retrieving revision 1.206
diff -u -p -r1.206 uipc_socket.c
--- sys/kern/uipc_socket.c	20 Dec 2011 23:56:28 -0000	1.206
+++ sys/kern/uipc_socket.c	23 Jan 2012 22:02:16 -0000
@@ -136,8 +136,6 @@ EVCNT_ATTACH_STATIC(sosend_kvalimit);
 
 #endif /* SOSEND_COUNTERS */
 
-static struct callback_entry sokva_reclaimerentry;
-
 #if defined(SOSEND_NO_LOAN) || defined(MULTIPROCESSOR)
 int sock_loan_thresh = -1;
 #else
@@ -384,19 +382,6 @@ sosend_loan(struct socket *so, struct ui
 	return (space);
 }
 
-static int
-sokva_reclaim_callback(struct callback_entry *ce, void *obj, void *arg)
-{
-
-	KASSERT(ce == &sokva_reclaimerentry);
-	KASSERT(obj == NULL);
-
-	if (!vm_map_starved_p(kernel_map)) {
-		return CALLBACK_CHAIN_ABORT;
-	}
-	return CALLBACK_CHAIN_CONTINUE;
-}
-
 struct mbuf *
 getsombuf(struct socket *so, int type)
 {
@@ -479,9 +464,6 @@ soinit(void)
 	if (sb_max_set(sb_max))
 		panic("bad initial sb_max value: %lu", sb_max);
 
-	callback_register(&vm_map_to_kernel(kernel_map)->vmk_reclaim_callback,
-	    &sokva_reclaimerentry, NULL, sokva_reclaim_callback);
-
 	socket_listener = kauth_listen_scope(KAUTH_SCOPE_NETWORK,
 	    socket_listener_cb, NULL);
 }
Index: sys/kern/vfs_bio.c
===================================================================
RCS file: /cvsroot/src/sys/kern/vfs_bio.c,v
retrieving revision 1.232
diff -u -p -r1.232 vfs_bio.c
--- sys/kern/vfs_bio.c	5 Oct 2011 01:53:03 -0000	1.232
+++ sys/kern/vfs_bio.c	23 Jan 2012 22:02:16 -0000
@@ -231,18 +231,24 @@ static struct vm_map *buf_map;
 static void *
 bufpool_page_alloc(struct pool *pp, int flags)
 {
+	int rc;
+	vmem_addr_t va;
 
-	return (void *)uvm_km_alloc(buf_map,
-	    MAXBSIZE, MAXBSIZE,
-	    ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK)
-	    | UVM_KMF_WIRED);
+	rc = uvm_km_kmem_alloc(kmem_va_arena, MAXBSIZE,
+	    ((flags & PR_WAITOK) ? VM_SLEEP : VM_NOSLEEP) | VM_INSTANTFIT,
+	    &va);
+
+	if (rc != 0)
+		return NULL;
+	else
+		return (void *)va;
 }
 
 static void
 bufpool_page_free(struct pool *pp, void *v)
 {
 
-	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED);
+	uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, MAXBSIZE);
 }
 
 static struct pool_allocator bufmempool_allocator = {
@@ -474,7 +480,6 @@ bufinit(void)
 	bufio_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0,
 	    "biopl", NULL, IPL_BIO, NULL, NULL, NULL);
 
-	bufmempool_allocator.pa_backingmap = buf_map;
 	for (i = 0; i < NMEMPOOLS; i++) {
 		struct pool_allocator *pa;
 		struct pool *pp = &bmempools[i];
@@ -489,7 +494,7 @@ bufinit(void)
 		pa = (size <= PAGE_SIZE && use_std)
 			? &pool_allocator_nointr
 			: &bufmempool_allocator;
-		pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE);
+		pool_init(pp, size, 0, 0, PR_NOALIGN, name, pa, IPL_NONE);
 		pool_setlowat(pp, 1);
 		pool_sethiwat(pp, 1);
 	}
Index: sys/kern/vfs_dirhash.c
===================================================================
RCS file: /cvsroot/src/sys/kern/vfs_dirhash.c,v
retrieving revision 1.10
diff -u -p -r1.10 vfs_dirhash.c
--- sys/kern/vfs_dirhash.c	6 Feb 2009 23:56:26 -0000	1.10
+++ sys/kern/vfs_dirhash.c	23 Jan 2012 22:02:16 -0000
@@ -37,7 +37,7 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_dirhash.
 #include <sys/dirent.h>
 #include <sys/hash.h>
 #include <sys/mutex.h>
-#include <sys/pool.h>
+#include <sys/kmem.h>
 #include <sys/queue.h>
 #include <sys/vnode.h>
 #include <sys/sysctl.h>
@@ -65,8 +65,6 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_dirhash.
  */
 
 static struct sysctllog *sysctl_log;
-static struct pool dirhash_pool;
-static struct pool dirhash_entry_pool;
 
 static kmutex_t dirhashmutex;
 static uint32_t maxdirhashsize = DIRHASH_SIZE;
@@ -78,24 +76,11 @@ void
 dirhash_init(void)
 {
 	const struct sysctlnode *rnode, *cnode;
-	size_t sz;
-	uint32_t max_entries;
 
 	/* initialise dirhash queue */
 	TAILQ_INIT(&dirhash_queue);
 
-	/* init dirhash pools */
-	sz = sizeof(struct dirhash);
-	pool_init(&dirhash_pool, sz, 0, 0, 0,
-		"dirhpl", NULL, IPL_NONE);
-
-	sz = sizeof(struct dirhash_entry);
-	pool_init(&dirhash_entry_pool, sz, 0, 0, 0,
-		"dirhepl", NULL, IPL_NONE);
-
 	mutex_init(&dirhashmutex, MUTEX_DEFAULT, IPL_NONE);
-	max_entries = maxdirhashsize / sz;
-	pool_sethiwat(&dirhash_entry_pool, max_entries);
 	dirhashsize = 0;
 
 	/* create sysctl knobs and dials */
@@ -124,8 +109,6 @@ dirhash_init(void)
 void
 dirhash_finish(void)
 {
-	pool_destroy(&dirhash_pool);
-	pool_destroy(&dirhash_entry_pool);
 
 	mutex_destroy(&dirhashmutex);
 
@@ -154,13 +137,13 @@ dirhash_purge_entries(struct dirhash *di
 		while ((dirh_e =
 		    LIST_FIRST(&dirh->entries[hashline])) != NULL) {
 			LIST_REMOVE(dirh_e, next);
-			pool_put(&dirhash_entry_pool, dirh_e);
+			kmem_free(dirh_e, sizeof(*dirh_e));
 		}
 	}
 
 	while ((dirh_e = LIST_FIRST(&dirh->free_entries)) != NULL) {
 		LIST_REMOVE(dirh_e, next);
-		pool_put(&dirhash_entry_pool, dirh_e);
+		kmem_free(dirh_e, sizeof(*dirh_e));
 	}
 
 	dirh->flags &= ~DIRH_COMPLETE;
@@ -187,7 +170,7 @@ dirhash_purge(struct dirhash **dirhp)
 	TAILQ_REMOVE(&dirhash_queue, dirh, next);
 	mutex_exit(&dirhashmutex);
 
-	pool_put(&dirhash_pool, dirh);
+	kmem_free(dirh, sizeof(*dirh));
 	*dirhp = NULL;
 }
 
@@ -201,8 +184,7 @@ dirhash_get(struct dirhash **dirhp)
 	/* if no dirhash was given, allocate one */
 	dirh = *dirhp;
 	if (dirh == NULL) {
-		dirh = pool_get(&dirhash_pool, PR_WAITOK);
-		memset(dirh, 0, sizeof(struct dirhash));
+		dirh = kmem_zalloc(sizeof(struct dirhash), KM_SLEEP);
 		for (hashline = 0; hashline < DIRHASH_HASHSIZE; hashline++) {
 			LIST_INIT(&dirh->entries[hashline]);
 		}
@@ -275,7 +257,7 @@ dirhash_enter(struct dirhash *dirh,
 		if (dirh_e->offset == offset) {
 			DPRINTF(("\tremoving free entry\n"));
 			LIST_REMOVE(dirh_e, next);
-			pool_put(&dirhash_entry_pool, dirh_e);
+			kmem_free(dirh_e, sizeof(*dirh_e));
 			break;
 		}
 	}
@@ -300,8 +282,7 @@ dirhash_enter(struct dirhash *dirh,
 	}
 
 	/* add to the hashline */
-	dirh_e = pool_get(&dirhash_entry_pool, PR_WAITOK);
-	memset(dirh_e, 0, sizeof(struct dirhash_entry));
+	dirh_e = kmem_zalloc(sizeof(struct dirhash_entry), KM_SLEEP);
 
 	dirh_e->hashvalue = hashvalue;
 	dirh_e->offset    = offset;
@@ -331,8 +312,7 @@ dirhash_enter_freed(struct dirhash *dirh
 
 	DPRINTF(("dirhash enter FREED %"PRIu64", %d\n",
 		offset, entry_size));
-	dirh_e = pool_get(&dirhash_entry_pool, PR_WAITOK);
-	memset(dirh_e, 0, sizeof(struct dirhash_entry));
+	dirh_e = kmem_zalloc(sizeof(struct dirhash_entry), KM_SLEEP);
 
 	dirh_e->hashvalue = 0;		/* not relevant */
 	dirh_e->offset    = offset;
Index: sys/kern/vfs_wapbl.c
===================================================================
RCS file: /cvsroot/src/sys/kern/vfs_wapbl.c,v
retrieving revision 1.49
diff -u -p -r1.49 vfs_wapbl.c
--- sys/kern/vfs_wapbl.c	11 Jan 2012 00:11:32 -0000	1.49
+++ sys/kern/vfs_wapbl.c	23 Jan 2012 22:02:17 -0000
@@ -64,9 +64,9 @@ __KERNEL_RCSID(0, "$NetBSD: vfs_wapbl.c,
 #include <miscfs/specfs/specdev.h>
 
 #if 0 /* notyet */
-#define	wapbl_malloc(s) kmem_alloc((s), KM_SLEEP)
-#define	wapbl_free(a, s) kmem_free((a), (s))
-#define	wapbl_calloc(n, s) kmem_zalloc((n)*(s), KM_SLEEP)
+#define	wapbl_malloc(s) kmem_intr_alloc((s), KM_SLEEP)
+#define	wapbl_free(a, s) kmem_intr_free((a), (s))
+#define	wapbl_calloc(n, s) kmem_intr_zalloc((n)*(s), KM_SLEEP)
 #else
 MALLOC_JUSTDEFINE(M_WAPBL, "wapbl", "write-ahead physical block logging");
 #define	wapbl_malloc(s) malloc((s), M_WAPBL, M_WAITOK)
@@ -310,7 +310,7 @@ wapbl_sysctl_init(void)
 static void
 wapbl_init(void)
 {
-	malloc_type_attach(M_WAPBL);
+	//malloc_type_attach(M_WAPBL);
 	wapbl_sysctl_init();
 }
 
Index: sys/rump/librump/rumpkern/vm.c
===================================================================
RCS file: /cvsroot/src/sys/rump/librump/rumpkern/vm.c,v
retrieving revision 1.120
diff -u -p -r1.120 vm.c
--- sys/rump/librump/rumpkern/vm.c	31 Oct 2011 13:23:55 -0000	1.120
+++ sys/rump/librump/rumpkern/vm.c	23 Jan 2012 22:02:18 -0000
@@ -48,6 +48,7 @@ __KERNEL_RCSID(0, "$NetBSD: vm.c,v 1.120
 #include <sys/buf.h>
 #include <sys/kernel.h>
 #include <sys/kmem.h>
+#include <sys/vmem.h>
 #include <sys/mman.h>
 #include <sys/null.h>
 #include <sys/vnode.h>
@@ -78,11 +79,12 @@ int *uvmexp_pageshift = &uvmexp.pageshif
 #endif
 
 struct vm_map rump_vmmap;
-static struct vm_map_kernel kmem_map_store;
-struct vm_map *kmem_map = &kmem_map_store.vmk_map;
 
-static struct vm_map_kernel kernel_map_store;
-struct vm_map *kernel_map = &kernel_map_store.vmk_map;
+static struct vm_map kernel_map_store;
+struct vm_map *kernel_map = &kernel_map_store;
+
+vmem_t *kmem_arena;
+vmem_t *kmem_va_arena;
 
 static unsigned int pdaemon_waiters;
 static kmutex_t pdaemonmtx;
@@ -327,9 +329,17 @@ uvm_init(void)
 	cv_init(&oomwait, "oomwait");
 
 	kernel_map->pmap = pmap_kernel();
-	callback_head_init(&kernel_map_store.vmk_reclaim_callback, IPL_VM);
-	kmem_map->pmap = pmap_kernel();
-	callback_head_init(&kmem_map_store.vmk_reclaim_callback, IPL_VM);
+
+	vmem_bootstrap();
+	kmem_arena = vmem_create("kmem", 0, 1024*1024, PAGE_SIZE,
+	    NULL, NULL, NULL,
+	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
+
+	vmem_init(kmem_arena);
+
+	kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
+	    vmem_alloc, vmem_free, kmem_arena,
+	    32 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
 
 	pool_cache_bootstrap(&pagecache, sizeof(struct vm_page), 0, 0, 0,
 	    "page$", NULL, IPL_NONE, pgctor, pgdtor, NULL);
@@ -357,21 +367,6 @@ uvm_pageunwire(struct vm_page *pg)
 	/* nada */
 }
 
-/*
- * The uvm reclaim hook is not currently necessary because it is
- * used only by ZFS and implements exactly the same functionality
- * as the kva reclaim hook which we already run in the pagedaemon
- * (rump vm does not have a concept of uvm_map(), so we cannot
- * reclaim kva it when a mapping operation fails due to insufficient
- * available kva).
- */
-void
-uvm_reclaim_hook_add(struct uvm_reclaim_hook *hook_entry)
-{
-
-}
-__strong_alias(uvm_reclaim_hook_del,uvm_reclaim_hook_add);
-
 /* where's your schmonz now? */
 #define PUNLIMIT(a)	\
 p->p_rlimit[a].rlim_cur = p->p_rlimit[a].rlim_max = RLIM_INFINITY;
@@ -595,13 +590,6 @@ uvm_estimatepageable(int *active, int *i
 	*inactive = 1024;
 }
 
-struct vm_map_kernel *
-vm_map_to_kernel(struct vm_map *map)
-{
-
-	return (struct vm_map_kernel *)map;
-}
-
 bool
 vm_map_starved_p(struct vm_map *map)
 {
@@ -735,46 +723,33 @@ uvm_km_free(struct vm_map *map, vaddr_t 
 
 struct vm_map *
 uvm_km_suballoc(struct vm_map *map, vaddr_t *minaddr, vaddr_t *maxaddr,
-	vsize_t size, int pageable, bool fixed, struct vm_map_kernel *submap)
+	vsize_t size, int pageable, bool fixed, struct vm_map *submap)
 {
 
 	return (struct vm_map *)417416;
 }
 
-vaddr_t
-uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
-{
-
-	return (vaddr_t)rump_hypermalloc(PAGE_SIZE, PAGE_SIZE,
-	    waitok, "kmalloc");
-}
-
-void
-uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
-{
-
-	rump_hyperfree((void *)addr, PAGE_SIZE);
-}
-
-vaddr_t
-uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
-{
-
-	return uvm_km_alloc_poolpage(map, waitok);
-}
-
-void
-uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t vaddr)
+int
+uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
+    vmem_addr_t *addr)
 {
+	vaddr_t va;
+	va = (vaddr_t)rump_hypermalloc(size, PAGE_SIZE,
+	    (flags & VM_SLEEP), "kmalloc");
 
-	uvm_km_free_poolpage(map, vaddr);
+	if (va) {
+		*addr = va;
+		return 0;
+	} else {
+		return ENOMEM;
+	}
 }
 
 void
-uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
+uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
 {
 
-	/* we eventually maybe want some model for available memory */
+	rump_hyperfree((void *)addr, size);
 }
 
 /*
@@ -1013,7 +988,6 @@ uvm_pageout(void *arg)
 	for (;;) {
 		if (!NEED_PAGEDAEMON()) {
 			kernel_map->flags &= ~VM_MAP_WANTVA;
-			kmem_map->flags &= ~VM_MAP_WANTVA;
 		}
 
 		if (pdaemon_waiters) {
@@ -1027,7 +1001,6 @@ uvm_pageout(void *arg)
 
 		/* tell the world that we are hungry */
 		kernel_map->flags |= VM_MAP_WANTVA;
-		kmem_map->flags |= VM_MAP_WANTVA;
 		mutex_exit(&pdaemonmtx);
 
 		/*
@@ -1108,15 +1081,6 @@ uvm_pageout(void *arg)
 		}
 
 		/*
-		 * Still not there?  sleeves come off right about now.
-		 * First: do reclaim on kernel/kmem map.
-		 */
-		callback_run_roundrobin(&kernel_map_store.vmk_reclaim_callback,
-		    NULL);
-		callback_run_roundrobin(&kmem_map_store.vmk_reclaim_callback,
-		    NULL);
-
-		/*
 		 * And then drain the pools.  Wipe them out ... all of them.
 		 */
 
Index: sys/sys/extent.h
===================================================================
RCS file: /cvsroot/src/sys/sys/extent.h,v
retrieving revision 1.18
diff -u -p -r1.18 extent.h
--- sys/sys/extent.h	28 Apr 2008 20:24:10 -0000	1.18
+++ sys/sys/extent.h	23 Jan 2012 22:02:18 -0000
@@ -54,7 +54,6 @@ struct extent {
 	LIST_HEAD(, extent_region) ex_regions;
 	u_long	ex_start;		/* start of extent */
 	u_long	ex_end;			/* end of extent */
-	struct malloc_type *ex_mtype;	/* memory type */
 	int	ex_flags;		/* misc. information */
 };
 
@@ -79,7 +78,7 @@ struct extent_fixed {
 #define EX_FAST		0x02		/* take first fit in extent_alloc() */
 #define EX_CATCH	0x04		/* catch signals while sleeping */
 #define EX_NOCOALESCE	0x08		/* create a non-coalescing extent */
-#define EX_MALLOCOK	0x10		/* safe to call malloc() */
+#define EX_MALLOCOK	0x10		/* safe to call kmem_alloc() */
 #define EX_WAITSPACE	0x20		/* wait for space to become free */
 #define EX_BOUNDZERO	0x40		/* boundary lines start at 0 */
 
@@ -96,10 +95,8 @@ struct extent_fixed {
 	((ALIGN(sizeof(struct extent_region))) *	\
 	 (_nregions)))
 
-struct malloc_type;
-
 struct	extent *extent_create(const char *, u_long, u_long,
-	    struct malloc_type *, void *, size_t, int);
+	    void *, size_t, int);
 void	extent_destroy(struct extent *);
 int	extent_alloc_subregion1(struct extent *, u_long, u_long,
 	    u_long, u_long, u_long, u_long, int, u_long *);
Index: sys/sys/kmem.h
===================================================================
RCS file: /cvsroot/src/sys/sys/kmem.h,v
retrieving revision 1.6
diff -u -p -r1.6 kmem.h
--- sys/sys/kmem.h	21 Nov 2011 04:36:05 -0000	1.6
+++ sys/sys/kmem.h	23 Jan 2012 22:02:18 -0000
@@ -36,6 +36,7 @@ typedef unsigned int km_flag_t;
 void *kmem_alloc(size_t, km_flag_t);
 void *kmem_zalloc(size_t, km_flag_t);
 void kmem_free(void *, size_t);
+
 void kmem_init(void);
 size_t kmem_roundup_size(size_t);
 
Index: sys/sys/pool.h
===================================================================
RCS file: /cvsroot/src/sys/sys/pool.h,v
retrieving revision 1.72
diff -u -p -r1.72 pool.h
--- sys/sys/pool.h	21 Nov 2011 04:36:05 -0000	1.72
+++ sys/sys/pool.h	23 Jan 2012 22:02:18 -0000
@@ -67,11 +67,6 @@ struct pool_allocator {
 	uint32_t	pa_refcnt;	/* number of pools using this allocator */
 	int		pa_pagemask;
 	int		pa_pageshift;
-	struct vm_map *pa_backingmap;
-#if defined(_KERNEL)
-	struct vm_map **pa_backingmapptr;
-	SLIST_ENTRY(pool_allocator) pa_q;
-#endif /* defined(_KERNEL) */
 };
 
 LIST_HEAD(pool_pagelist,pool_item_header);
@@ -318,6 +313,7 @@ void		pool_cache_bootstrap(pool_cache_t,
 		    int (*)(void *, void *, int), void (*)(void *, void *),
 		    void *);
 void		pool_cache_destroy(pool_cache_t);
+void		pool_cache_bootstrap_destroy(pool_cache_t);
 void		*pool_cache_get_paddr(pool_cache_t, int, paddr_t *);
 void		pool_cache_put_paddr(pool_cache_t, void *, paddr_t);
 void		pool_cache_destruct_object(pool_cache_t, void *);
Index: sys/sys/sysctl.h
===================================================================
RCS file: /cvsroot/src/sys/sys/sysctl.h,v
retrieving revision 1.198
diff -u -p -r1.198 sysctl.h
--- sys/sys/sysctl.h	19 Nov 2011 22:51:31 -0000	1.198
+++ sys/sys/sysctl.h	23 Jan 2012 22:02:19 -0000
@@ -45,6 +45,7 @@
 #include <sys/ucred.h>
 #include <sys/ucontext.h>
 #include <sys/proc.h>
+#include <sys/mallocvar.h>
 #include <uvm/uvm_extern.h>
 
 
Index: sys/sys/vmem.h
===================================================================
RCS file: /cvsroot/src/sys/sys/vmem.h,v
retrieving revision 1.16
diff -u -p -r1.16 vmem.h
--- sys/sys/vmem.h	21 Nov 2011 04:36:06 -0000	1.16
+++ sys/sys/vmem.h	23 Jan 2012 22:02:19 -0000
@@ -44,9 +44,24 @@ typedef size_t vmem_size_t;
 #define	VMEM_ADDR_MIN	0
 #define	VMEM_ADDR_MAX	(~(vmem_addr_t)0)
 
+typedef int (vmem_import_t)(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *);
+typedef void (vmem_release_t)(vmem_t *, vmem_addr_t, vmem_size_t);
+
+typedef int (vmem_ximport_t)(vmem_t *, vmem_size_t, vmem_size_t *,
+    vm_flag_t, vmem_addr_t *);
+
+extern vmem_t *kmem_arena;
+extern vmem_t *kmem_meta_arena;
+extern vmem_t *kmem_va_arena;
+
+void vmem_bootstrap(void);
+void vmem_init(vmem_t *vm);
+
 vmem_t *vmem_create(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
-    int (*)(void *, vmem_size_t, vmem_size_t *, vm_flag_t, vmem_addr_t *),
-    void (*)(void *, vmem_addr_t, vmem_size_t), void *, vmem_size_t,
+    vmem_import_t *, vmem_release_t *, vmem_t *, vmem_size_t,
+    vm_flag_t, int);
+vmem_t *vmem_xcreate(const char *, vmem_addr_t, vmem_size_t, vmem_size_t,
+    vmem_ximport_t *, vmem_release_t *, vmem_t *, vmem_size_t,
     vm_flag_t, int);
 void vmem_destroy(vmem_t *);
 int vmem_alloc(vmem_t *, vmem_size_t, vm_flag_t, vmem_addr_t *);
@@ -56,7 +71,7 @@ int vmem_xalloc(vmem_t *, vmem_size_t, v
 void vmem_xfree(vmem_t *, vmem_addr_t, vmem_size_t);
 int vmem_add(vmem_t *, vmem_addr_t, vmem_size_t, vm_flag_t);
 vmem_size_t vmem_roundup_size(vmem_t *, vmem_size_t);
-bool vmem_reap(vmem_t *);
+vmem_size_t vmem_size(vmem_t *, int typemask);
 void vmem_rehash_start(void);
 void vmem_whatis(uintptr_t, void (*)(const char *, ...) __printflike(1, 2));
 void vmem_print(uintptr_t, const char *, void (*)(const char *, ...)
@@ -69,5 +84,13 @@ void vmem_printall(const char *, void (*
 #define	VM_NOSLEEP	0x00000002
 #define	VM_INSTANTFIT	0x00001000
 #define	VM_BESTFIT	0x00002000
+#define	VM_BOOTSTRAP	0x00010000
+#define	VM_POPULATING	0x00040000
+#define	VM_LARGEIMPORT	0x00080000
+#define	VM_XIMPORT	0x00100000
+
+/* vmem_size typemask */
+#define VMEM_ALLOC	0x01
+#define VMEM_FREE	0x02
 
 #endif /* !_SYS_VMEM_H_ */
Index: sys/ufs/ext2fs/ext2fs_inode.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ext2fs/ext2fs_inode.c,v
retrieving revision 1.74
diff -u -p -r1.74 ext2fs_inode.c
--- sys/ufs/ext2fs/ext2fs_inode.c	16 Jun 2011 09:21:03 -0000	1.74
+++ sys/ufs/ext2fs/ext2fs_inode.c	23 Jan 2012 22:02:19 -0000
@@ -70,7 +70,7 @@ __KERNEL_RCSID(0, "$NetBSD: ext2fs_inode
 #include <sys/buf.h>
 #include <sys/vnode.h>
 #include <sys/kernel.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/trace.h>
 #include <sys/resourcevar.h>
 #include <sys/kauth.h>
@@ -499,7 +499,7 @@ ext2fs_indirtrunc(struct inode *ip, dadd
 	bap = (int32_t *)bp->b_data;	/* XXX ondisk32 */
 	if (lastbn >= 0) {
 		/* XXX ondisk32 */
-		copy = malloc(fs->e2fs_bsize, M_TEMP, M_WAITOK);
+		copy = kmem_alloc(fs->e2fs_bsize, KM_SLEEP);
 		memcpy((void *)copy, (void *)bap, (u_int)fs->e2fs_bsize);
 		memset((void *)&bap[last + 1], 0,
 			(u_int)(NINDIR(fs) - (last + 1)) * sizeof (uint32_t));
@@ -548,7 +548,7 @@ ext2fs_indirtrunc(struct inode *ip, dadd
 	}
 
 	if (copy != NULL) {
-		free(copy, M_TEMP);
+		kmem_free(copy, fs->e2fs_bsize);
 	} else {
 		brelse(bp, BC_INVAL);
 	}
Index: sys/ufs/ext2fs/ext2fs_lookup.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ext2fs/ext2fs_lookup.c,v
retrieving revision 1.66
diff -u -p -r1.66 ext2fs_lookup.c
--- sys/ufs/ext2fs/ext2fs_lookup.c	12 Jul 2011 16:59:48 -0000	1.66
+++ sys/ufs/ext2fs/ext2fs_lookup.c	23 Jan 2012 22:02:20 -0000
@@ -57,6 +57,7 @@ __KERNEL_RCSID(0, "$NetBSD: ext2fs_looku
 #include <sys/file.h>
 #include <sys/mount.h>
 #include <sys/vnode.h>
+#include <sys/kmem.h>
 #include <sys/malloc.h>
 #include <sys/dirent.h>
 #include <sys/kauth.h>
@@ -167,15 +168,14 @@ ext2fs_readdir(void *v)
 	aiov.iov_len = e2fs_count;
 	auio.uio_resid = e2fs_count;
 	UIO_SETUP_SYSSPACE(&auio);
-	dirbuf = malloc(e2fs_count, M_TEMP, M_WAITOK);
-	dstd = malloc(sizeof(struct dirent), M_TEMP, M_WAITOK | M_ZERO);
+	dirbuf = kmem_zalloc(e2fs_count, KM_SLEEP);
+	dstd = kmem_zalloc(sizeof(struct dirent), KM_SLEEP);
 	if (ap->a_ncookies) {
 		nc = e2fs_count / _DIRENT_MINSIZE((struct dirent *)0);
 		ncookies = nc;
 		cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK);
 		*ap->a_cookies = cookies;
 	}
-	memset(dirbuf, 0, e2fs_count);
 	aiov.iov_base = dirbuf;
 
 	error = VOP_READ(ap->a_vp, &auio, 0, ap->a_cred);
@@ -209,8 +209,8 @@ ext2fs_readdir(void *v)
 		/* we need to correct uio_offset */
 		uio->uio_offset = off;
 	}
-	free(dirbuf, M_TEMP);
-	free(dstd, M_TEMP);
+	kmem_free(dirbuf, e2fs_count);
+	kmem_free(dstd, sizeof(*dstd));
 	*ap->a_eofflag = ext2fs_size(VTOI(ap->a_vp)) <= uio->uio_offset;
 	if (ap->a_ncookies) {
 		if (error) {
Index: sys/ufs/ffs/ffs_inode.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ffs/ffs_inode.c,v
retrieving revision 1.108
diff -u -p -r1.108 ffs_inode.c
--- sys/ufs/ffs/ffs_inode.c	23 Nov 2011 19:42:10 -0000	1.108
+++ sys/ufs/ffs/ffs_inode.c	23 Jan 2012 22:02:20 -0000
@@ -75,7 +75,7 @@ __KERNEL_RCSID(0, "$NetBSD: ffs_inode.c,
 #include <sys/fstrans.h>
 #include <sys/kauth.h>
 #include <sys/kernel.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/mount.h>
 #include <sys/proc.h>
 #include <sys/resourcevar.h>
@@ -621,7 +621,7 @@ ffs_indirtrunc(struct inode *ip, daddr_t
 	else
 		bap2 = (int64_t *)bp->b_data;
 	if (lastbn >= 0) {
-		copy = malloc(fs->fs_bsize, M_TEMP, M_WAITOK);
+		copy = kmem_alloc(fs->fs_bsize, KM_SLEEP);
 		memcpy((void *)copy, bp->b_data, (u_int)fs->fs_bsize);
 		for (i = last + 1; i < NINDIR(fs); i++)
 			BAP_ASSIGN(ip, i, 0);
@@ -676,7 +676,7 @@ ffs_indirtrunc(struct inode *ip, daddr_t
 	}
 
 	if (copy != NULL) {
-		free(copy, M_TEMP);
+		kmem_free(copy, fs->fs_bsize);
 	} else {
 		brelse(bp, BC_INVAL);
 	}
Index: sys/ufs/ffs/ffs_vfsops.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ffs/ffs_vfsops.c,v
retrieving revision 1.272
diff -u -p -r1.272 ffs_vfsops.c
--- sys/ufs/ffs/ffs_vfsops.c	3 Jan 2012 15:44:00 -0000	1.272
+++ sys/ufs/ffs/ffs_vfsops.c	23 Jan 2012 22:02:20 -0000
@@ -85,7 +85,7 @@ __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c
 #include <sys/disklabel.h>
 #include <sys/ioctl.h>
 #include <sys/errno.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/pool.h>
 #include <sys/lock.h>
 #include <sys/sysctl.h>
@@ -648,7 +648,7 @@ ffs_reload(struct mount *mp, kauth_cred_
 		brelse(bp, 0);
 		return (error);
 	}
-	newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
+	newfs = kmem_alloc(fs->fs_sbsize, KM_SLEEP);
 	memcpy(newfs, bp->b_data, fs->fs_sbsize);
 #ifdef FFS_EI
 	if (ump->um_flags & UFS_NEEDSWAP) {
@@ -662,7 +662,7 @@ ffs_reload(struct mount *mp, kauth_cred_
 	     newfs->fs_bsize > MAXBSIZE ||
 	     newfs->fs_bsize < sizeof(struct fs)) {
 		brelse(bp, 0);
-		free(newfs, M_UFSMNT);
+		kmem_free(newfs, fs->fs_sbsize);
 		return (EIO);		/* XXX needs translation */
 	}
 	/* Store off old fs_sblockloc for fs_oldfscompat_read. */
@@ -679,7 +679,7 @@ ffs_reload(struct mount *mp, kauth_cred_
 	newfs->fs_active = fs->fs_active;
 	memcpy(fs, newfs, (u_int)fs->fs_sbsize);
 	brelse(bp, 0);
-	free(newfs, M_UFSMNT);
+	kmem_free(newfs, fs->fs_sbsize);
 
 	/* Recheck for apple UFS filesystem */
 	ump->um_flags &= ~UFS_ISAPPLEUFS;
@@ -865,6 +865,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 	int32_t *lp;
 	kauth_cred_t cred;
 	u_int32_t sbsize = 8192;	/* keep gcc happy*/
+	u_int32_t allocsbsize;
 	int32_t fsbsize;
 
 	dev = devvp->v_rdev;
@@ -889,7 +890,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 	if (error)
 		return error;
 
-	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
+	ump = kmem_alloc(sizeof(*ump), KM_SLEEP);
 	memset(ump, 0, sizeof *ump);
 	mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
 	error = ffs_snapshot_init(ump);
@@ -986,7 +987,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 		break;
 	}
 
-	fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
+	fs = kmem_alloc((u_long)sbsize, KM_SLEEP);
 	memcpy(fs, bp->b_data, sbsize);
 	ump->um_fs = fs;
 
@@ -1023,7 +1024,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 			/* Force a re-read of the superblock */
 			brelse(bp, BC_INVAL);
 			bp = NULL;
-			free(fs, M_UFSMNT);
+			kmem_free(fs, sbsize);
 			fs = NULL;
 			goto sbagain;
 		}
@@ -1150,7 +1151,8 @@ ffs_mountfs(struct vnode *devvp, struct 
 	if (fs->fs_contigsumsize > 0)
 		bsize += fs->fs_ncg * sizeof(int32_t);
 	bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
-	space = malloc((u_long)bsize, M_UFSMNT, M_WAITOK);
+	allocsbsize = bsize;
+	space = kmem_alloc((u_long)allocsbsize, KM_SLEEP);
 	fs->fs_csp = space;
 	for (i = 0; i < blks; i += fs->fs_frag) {
 		bsize = fs->fs_bsize;
@@ -1159,7 +1161,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), bsize,
 			      cred, 0, &bp);
 		if (error) {
-			free(fs->fs_csp, M_UFSMNT);
+			kmem_free(fs->fs_csp, allocsbsize);
 			goto out;
 		}
 #ifdef FFS_EI
@@ -1243,7 +1245,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 
 		error = ffs_wapbl_start(mp);
 		if (error) {
-			free(fs->fs_csp, M_UFSMNT);
+			kmem_free(fs->fs_csp, allocsbsize);
 			goto out;
 		}
 	}
@@ -1252,7 +1254,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 #ifdef QUOTA2
 		error = ffs_quota2_mount(mp);
 		if (error) {
-			free(fs->fs_csp, M_UFSMNT);
+			kmem_free(fs->fs_csp, allocsbsize);
 			goto out;
 		}
 #else
@@ -1263,7 +1265,7 @@ ffs_mountfs(struct vnode *devvp, struct 
 			    (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
 			if ((mp->mnt_flag & MNT_FORCE) == 0) {
 				error = EINVAL;
-				free(fs->fs_csp, M_UFSMNT);
+				kmem_free(fs->fs_csp, allocsbsize);
 				goto out;
 			}
 		}
@@ -1290,15 +1292,15 @@ out:
 
 	fstrans_unmount(mp);
 	if (fs)
-		free(fs, M_UFSMNT);
+		kmem_free(fs, fs->fs_sbsize);
 	devvp->v_specmountpoint = NULL;
 	if (bp)
 		brelse(bp, bset);
 	if (ump) {
 		if (ump->um_oldfscompat)
-			free(ump->um_oldfscompat, M_UFSMNT);
+			kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
 		mutex_destroy(&ump->um_lock);
-		free(ump, M_UFSMNT);
+		kmem_free(ump, sizeof(*ump));
 		mp->mnt_data = NULL;
 	}
 	return (error);
@@ -1322,8 +1324,8 @@ ffs_oldfscompat_read(struct fs *fs, stru
 		return;
 
 	if (!ump->um_oldfscompat)
-		ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
-		    M_UFSMNT, M_WAITOK);
+		ump->um_oldfscompat = kmem_alloc(512 + 3*sizeof(int32_t),
+		    KM_SLEEP);
 
 	memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
 	extrasave = ump->um_oldfscompat;
@@ -1429,6 +1431,7 @@ ffs_unmount(struct mount *mp, int mntfla
 	struct ufsmount *ump = VFSTOUFS(mp);
 	struct fs *fs = ump->um_fs;
 	int error, flags;
+	u_int32_t bsize;
 #ifdef WAPBL
 	extern int doforce;
 #endif
@@ -1475,13 +1478,19 @@ ffs_unmount(struct mount *mp, int mntfla
 	(void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
 		NOCRED);
 	vput(ump->um_devvp);
-	free(fs->fs_csp, M_UFSMNT);
-	free(fs, M_UFSMNT);
+
+	bsize = fs->fs_cssize;
+	if (fs->fs_contigsumsize > 0)
+		bsize += fs->fs_ncg * sizeof(int32_t);
+	bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
+	kmem_free(fs->fs_csp, bsize);
+
+	kmem_free(fs, fs->fs_sbsize);
 	if (ump->um_oldfscompat != NULL)
-		free(ump->um_oldfscompat, M_UFSMNT);
+		kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
 	mutex_destroy(&ump->um_lock);
 	ffs_snapshot_fini(ump);
-	free(ump, M_UFSMNT);
+	kmem_free(ump, sizeof(*ump));
 	mp->mnt_data = NULL;
 	mp->mnt_flag &= ~MNT_LOCAL;
 	fstrans_unmount(mp);
Index: sys/ufs/ufs/ufs_extattr.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ufs/ufs_extattr.c,v
retrieving revision 1.35
diff -u -p -r1.35 ufs_extattr.c
--- sys/ufs/ufs/ufs_extattr.c	7 Jul 2011 14:56:45 -0000	1.35
+++ sys/ufs/ufs/ufs_extattr.c	23 Jan 2012 22:02:21 -0000
@@ -60,7 +60,7 @@ __KERNEL_RCSID(0, "$NetBSD: ufs_extattr.
 #include <sys/kauth.h>
 #include <sys/kernel.h>
 #include <sys/namei.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/fcntl.h>
 #include <sys/lwp.h>
 #include <sys/vnode.h>
@@ -77,8 +77,6 @@ __KERNEL_RCSID(0, "$NetBSD: ufs_extattr.
 #include <ufs/ufs/ufs_bswap.h>
 #include <ufs/ufs/ufs_extern.h>
 
-static MALLOC_JUSTDEFINE(M_UFS_EXTATTR, "ufs_extattr","ufs extended attribute");
-
 int ufs_extattr_sync = 1;
 int ufs_extattr_autocreate = 1024;
 
@@ -506,7 +504,7 @@ ufs_extattr_iterate_directory(struct ufs
 	if (dvp->v_type != VDIR)
 		return (ENOTDIR);
 
-	dirbuf = malloc(DIRBLKSIZ, M_TEMP, M_WAITOK);
+	dirbuf = kmem_alloc(DIRBLKSIZ, KM_SLEEP);
 
 	auio.uio_iov = &aiov;
 	auio.uio_iovcnt = 1;
@@ -578,7 +576,7 @@ ufs_extattr_iterate_directory(struct ufs
 				break;
 		}
 	}
-	free(dirbuf, M_TEMP);
+	kmem_free(dirbuf, DIRBLKSIZ);
 	
 	return (0);
 }
@@ -736,8 +734,7 @@ ufs_extattr_enable(struct ufsmount *ump,
 	if (backing_vnode->v_type != VREG)
 		return (EINVAL);
 
-	attribute = malloc(sizeof(*attribute), M_UFS_EXTATTR,
-	    M_WAITOK | M_ZERO);
+	attribute = kmem_zalloc(sizeof(*attribute), KM_SLEEP);
 
 	if (!(ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)) {
 		error = EOPNOTSUPP;
@@ -818,7 +815,7 @@ ufs_extattr_enable(struct ufsmount *ump,
 	VOP_UNLOCK(backing_vnode);
 
  free_exit:
-	free(attribute, M_UFS_EXTATTR);
+	kmem_free(attribute, sizeof(*attribute));
 	return (error);
 }
 
@@ -844,7 +841,7 @@ ufs_extattr_disable(struct ufsmount *ump
 	error = vn_close(uele->uele_backing_vnode, FREAD|FWRITE,
 	    l->l_cred);
 
-	free(uele, M_UFS_EXTATTR);
+	kmem_free(uele, sizeof(*uele));
 
 	return (error);
 }
@@ -1540,12 +1537,10 @@ void
 ufs_extattr_init(void)
 {
 
-	malloc_type_attach(M_UFS_EXTATTR);
 }
 
 void
 ufs_extattr_done(void)
 {
 
-	malloc_type_detach(M_UFS_EXTATTR);
 }
Index: sys/ufs/ufs/ufs_vfsops.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ufs/ufs_vfsops.c,v
retrieving revision 1.42
diff -u -p -r1.42 ufs_vfsops.c
--- sys/ufs/ufs/ufs_vfsops.c	24 Mar 2011 17:05:46 -0000	1.42
+++ sys/ufs/ufs/ufs_vfsops.c	23 Jan 2012 22:02:21 -0000
@@ -50,7 +50,7 @@ __KERNEL_RCSID(0, "$NetBSD: ufs_vfsops.c
 #include <sys/proc.h>
 #include <sys/buf.h>
 #include <sys/vnode.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 #include <sys/kauth.h>
 
 #include <miscfs/specfs/specdev.h>
Index: sys/ufs/ufs/ufs_vnops.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ufs/ufs_vnops.c,v
retrieving revision 1.206
diff -u -p -r1.206 ufs_vnops.c
--- sys/ufs/ufs/ufs_vnops.c	18 Nov 2011 21:18:52 -0000	1.206
+++ sys/ufs/ufs/ufs_vnops.c	23 Jan 2012 22:02:21 -0000
@@ -84,6 +84,7 @@ __KERNEL_RCSID(0, "$NetBSD: ufs_vnops.c,
 #include <sys/proc.h>
 #include <sys/mount.h>
 #include <sys/vnode.h>
+#include <sys/kmem.h>
 #include <sys/malloc.h>
 #include <sys/dirent.h>
 #include <sys/lockf.h>
@@ -2329,7 +2330,7 @@ ufs_readdir(void *v)
 	struct uio	auio, *uio;
 	struct iovec	aiov;
 	int		error;
-	size_t		count, ccount, rcount;
+	size_t		count, ccount, rcount, cdbufsz, ndbufsz;
 	off_t		off, *ccp;
 	off_t		startoff;
 	size_t		skipbytes;
@@ -2357,12 +2358,13 @@ ufs_readdir(void *v)
 	auio.uio_resid = rcount;
 	UIO_SETUP_SYSSPACE(&auio);
 	auio.uio_rw = UIO_READ;
-	cdbuf = malloc(rcount, M_TEMP, M_WAITOK);
+	cdbufsz = rcount;
+	cdbuf = kmem_alloc(cdbufsz, KM_SLEEP);
 	aiov.iov_base = cdbuf;
 	aiov.iov_len = rcount;
 	error = VOP_READ(vp, &auio, 0, ap->a_cred);
 	if (error != 0) {
-		free(cdbuf, M_TEMP);
+		kmem_free(cdbuf, cdbufsz);
 		return error;
 	}
 
@@ -2371,7 +2373,8 @@ ufs_readdir(void *v)
 	cdp = (struct direct *)(void *)cdbuf;
 	ecdp = (struct direct *)(void *)&cdbuf[rcount];
 
-	ndbuf = malloc(count, M_TEMP, M_WAITOK);
+	ndbufsz = count;
+	ndbuf = kmem_alloc(ndbufsz, KM_SLEEP);
 	ndp = (struct dirent *)(void *)ndbuf;
 	endp = &ndbuf[count];
 
@@ -2445,8 +2448,8 @@ out:
 		}
 	}
 	uio->uio_offset = off;
-	free(ndbuf, M_TEMP);
-	free(cdbuf, M_TEMP);
+	kmem_free(ndbuf, ndbufsz);
+	kmem_free(cdbuf, cdbufsz);
 	*ap->a_eofflag = VTOI(vp)->i_size <= uio->uio_offset;
 	return error;
 }
Index: sys/ufs/ufs/ufs_wapbl.c
===================================================================
RCS file: /cvsroot/src/sys/ufs/ufs/ufs_wapbl.c,v
retrieving revision 1.22
diff -u -p -r1.22 ufs_wapbl.c
--- sys/ufs/ufs/ufs_wapbl.c	18 Jul 2011 06:46:05 -0000	1.22
+++ sys/ufs/ufs/ufs_wapbl.c	23 Jan 2012 22:02:22 -0000
@@ -79,7 +79,6 @@ __KERNEL_RCSID(0, "$NetBSD: ufs_wapbl.c,
 #include <sys/proc.h>
 #include <sys/mount.h>
 #include <sys/vnode.h>
-#include <sys/malloc.h>
 #include <sys/dirent.h>
 #include <sys/lockf.h>
 #include <sys/kauth.h>
Index: sys/uvm/uvm_amap.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_amap.c,v
retrieving revision 1.104
diff -u -p -r1.104 uvm_amap.c
--- sys/uvm/uvm_amap.c	11 Oct 2011 23:57:50 -0000	1.104
+++ sys/uvm/uvm_amap.c	23 Jan 2012 22:02:22 -0000
@@ -800,11 +800,11 @@ amap_copy(struct vm_map *map, struct vm_
 			UVMHIST_LOG(maphist, "  chunk amap ==> clip 0x%x->0x%x"
 			    "to 0x%x->0x%x", entry->start, entry->end, startva,
 			    endva);
-			UVM_MAP_CLIP_START(map, entry, startva, NULL);
+			UVM_MAP_CLIP_START(map, entry, startva);
 
 			/* Watch out for endva wrap-around! */
 			if (endva >= startva) {
-				UVM_MAP_CLIP_END(map, entry, endva, NULL);
+				UVM_MAP_CLIP_END(map, entry, endva);
 			}
 		}
 
Index: sys/uvm/uvm_device.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_device.c,v
retrieving revision 1.62
diff -u -p -r1.62 uvm_device.c
--- sys/uvm/uvm_device.c	12 Jun 2011 03:36:02 -0000	1.62
+++ sys/uvm/uvm_device.c	23 Jan 2012 22:02:22 -0000
@@ -40,7 +40,7 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_device.c
 #include <sys/systm.h>
 #include <sys/conf.h>
 #include <sys/proc.h>
-#include <sys/malloc.h>
+#include <sys/kmem.h>
 
 #include <uvm/uvm.h>
 #include <uvm/uvm_device.h>
@@ -216,7 +216,7 @@ udv_attach(void *arg, vm_prot_t accesspr
 		mutex_exit(&udv_lock);
 
 		/* Note: both calls may allocate memory and sleep. */
-		udv = malloc(sizeof(*udv), M_TEMP, M_WAITOK);
+		udv = kmem_alloc(sizeof(*udv), KM_SLEEP);
 		uvm_obj_init(&udv->u_obj, &uvm_deviceops, true, 1);
 
 		mutex_enter(&udv_lock);
@@ -239,7 +239,7 @@ udv_attach(void *arg, vm_prot_t accesspr
 		if (lcv) {
 			mutex_exit(&udv_lock);
 			uvm_obj_destroy(&udv->u_obj, true);
-			free(udv, M_TEMP);
+			kmem_free(udv, sizeof(*udv));
 			continue;
 		}
 
@@ -329,7 +329,7 @@ again:
 	mutex_exit(uobj->vmobjlock);
 
 	uvm_obj_destroy(uobj, true);
-	free(udv, M_TEMP);
+	kmem_free(udv, sizeof(*udv));
 	UVMHIST_LOG(maphist," <- done, freed uobj=0x%x", uobj,0,0,0);
 }
 
Index: sys/uvm/uvm_extern.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_extern.h,v
retrieving revision 1.179
diff -u -p -r1.179 uvm_extern.h
--- sys/uvm/uvm_extern.h	5 Jan 2012 15:19:53 -0000	1.179
+++ sys/uvm/uvm_extern.h	23 Jan 2012 22:02:22 -0000
@@ -138,8 +138,7 @@ typedef voff_t pgoff_t;		/* XXX: number 
 #define UVM_FLAG_AMAPPAD 0x100000 /* for bss: pad amap to reduce allocations */
 #define UVM_FLAG_TRYLOCK 0x200000 /* fail if we can not lock map */
 #define UVM_FLAG_NOWAIT  0x400000 /* not allowed to sleep */
-#define UVM_FLAG_QUANTUM 0x800000 /* entry can never be split later */
-#define UVM_FLAG_WAITVA  0x1000000 /* wait for va */
+#define UVM_FLAG_WAITVA  0x800000 /* wait for va */
 #define UVM_FLAG_VAONLY  0x2000000 /* unmap: no pages are mapped */
 #define UVM_FLAG_COLORMATCH 0x4000000 /* match color given in off */
 
@@ -471,6 +470,9 @@ extern bool vm_page_zero_enable;
 #include <sys/vmmeter.h>
 #include <sys/queue.h>
 #include <sys/lock.h>
+#ifdef _KERNEL
+#include <sys/vmem.h>
+#endif
 #include <uvm/uvm_param.h>
 #include <uvm/uvm_prot.h>
 #include <uvm/uvm_pmap.h>
@@ -530,23 +532,9 @@ struct uvm_coredump_state {
 #define	UVM_COREDUMP_STACK	0x01	/* region is user stack */
 
 /*
- * Structure containig uvm reclaim hooks, uvm_reclaim_list is guarded by
- * uvm_reclaim_lock.
- */
-struct uvm_reclaim_hook {
-	void (*uvm_reclaim_hook)(void);
-	SLIST_ENTRY(uvm_reclaim_hook) uvm_reclaim_next;
-};
-
-void	uvm_reclaim_init(void);
-void	uvm_reclaim_hook_add(struct uvm_reclaim_hook *);
-void	uvm_reclaim_hook_del(struct uvm_reclaim_hook *);
-
-/*
  * the various kernel maps, owned by MD code
  */
 extern struct vm_map *kernel_map;
-extern struct vm_map *kmem_map;
 extern struct vm_map *phys_map;
 
 /*
@@ -555,9 +543,6 @@ extern struct vm_map *phys_map;
 
 #define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
 
-#include <sys/mallocvar.h>
-MALLOC_DECLARE(M_VMMAP);
-MALLOC_DECLARE(M_VMPMAP);
 
 /* vm_machdep.c */
 int		vmapbuf(struct buf *, vsize_t);
@@ -649,13 +634,13 @@ void			uvm_km_free(struct vm_map *, vadd
 
 struct vm_map		*uvm_km_suballoc(struct vm_map *, vaddr_t *,
 			    vaddr_t *, vsize_t, int, bool,
-			    struct vm_map_kernel *);
-vaddr_t			uvm_km_alloc_poolpage(struct vm_map *, bool);
-void			uvm_km_free_poolpage(struct vm_map *, vaddr_t);
-vaddr_t			uvm_km_alloc_poolpage_cache(struct vm_map *, bool);
-void			uvm_km_free_poolpage_cache(struct vm_map *, vaddr_t);
-void			uvm_km_vacache_init(struct vm_map *,
-			    const char *, size_t);
+			    struct vm_map *);
+#ifdef _KERNEL
+int			uvm_km_kmem_alloc(vmem_t *, vmem_size_t, vm_flag_t,
+			    vmem_addr_t *);
+void			uvm_km_kmem_free(vmem_t *, vmem_addr_t, vmem_size_t);
+bool			uvm_km_va_starved_p(void);
+#endif
 
 /* uvm_map.c */
 int			uvm_map(struct vm_map *, vaddr_t *, vsize_t,
@@ -764,9 +749,8 @@ bool			uvn_clean_p(struct uvm_object *);
 bool			uvn_needs_writefault_p(struct uvm_object *);
 
 /* kern_malloc.c */
-void			kmeminit_nkmempages(void);
 void			kmeminit(void);
-extern int		nkmempages;
+void			kmeminit_nkmempages(void);
 
 #endif /* _KERNEL */
 
Index: sys/uvm/uvm_fault.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.191
diff -u -p -r1.191 uvm_fault.c
--- sys/uvm/uvm_fault.c	28 Nov 2011 14:06:59 -0000	1.191
+++ sys/uvm/uvm_fault.c	23 Jan 2012 22:02:23 -0000
@@ -583,25 +583,8 @@ uvmfault_promote(struct uvm_faultinfo *u
 	if (*spare != NULL) {
 		anon = *spare;
 		*spare = NULL;
-	} else if (ufi->map != kernel_map) {
-		anon = uvm_analloc();
 	} else {
-		UVMHIST_LOG(maphist, "kernel_map, unlock and retry", 0,0,0,0);
-
-		/*
-		 * we can't allocate anons with kernel_map locked.
-		 */
-
-		uvm_page_unbusy(&uobjpage, 1);
-		uvmfault_unlockall(ufi, amap, uobj);
-
-		*spare = uvm_analloc();
-		if (*spare == NULL) {
-			goto nomem;
-		}
-		KASSERT((*spare)->an_lock == NULL);
-		error = ERESTART;
-		goto done;
+		anon = uvm_analloc();
 	}
 	if (anon) {
 
@@ -636,7 +619,6 @@ uvmfault_promote(struct uvm_faultinfo *u
 		/* unlock and fail ... */
 		uvm_page_unbusy(&uobjpage, 1);
 		uvmfault_unlockall(ufi, amap, uobj);
-nomem:
 		if (!uvm_reclaimable()) {
 			UVMHIST_LOG(maphist, "out of VM", 0,0,0,0);
 			uvmexp.fltnoanon++;
Index: sys/uvm/uvm_glue.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_glue.c,v
retrieving revision 1.152
diff -u -p -r1.152 uvm_glue.c
--- sys/uvm/uvm_glue.c	23 Nov 2011 01:07:50 -0000	1.152
+++ sys/uvm/uvm_glue.c	23 Jan 2012 22:02:23 -0000
@@ -240,26 +240,15 @@ static pool_cache_t uvm_uarea_system_cac
 static void *
 uarea_poolpage_alloc(struct pool *pp, int flags)
 {
-#if defined(PMAP_MAP_POOLPAGE)
-	if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
-		struct vm_page *pg;
-		vaddr_t va;
-
-#if defined(PMAP_ALLOC_POOLPAGE)
-		pg = PMAP_ALLOC_POOLPAGE(
-		   ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
-#else
-		pg = uvm_pagealloc(NULL, 0, NULL,
-		   ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0));
-#endif
-		if (pg == NULL)
-			return NULL;
-		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
-		if (va == 0)
-			uvm_pagefree(pg);
-		return (void *)va;
+	if (USPACE_ALIGN == 0) {
+		int rc;
+		vmem_addr_t va;
+
+		rc = uvm_km_kmem_alloc(kmem_va_arena, USPACE,
+		    ((flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP) |
+		    VM_INSTANTFIT, &va);
+		return (rc != 0) ? NULL : (void *)va;
 	}
-#endif
 #if defined(__HAVE_CPU_UAREA_ROUTINES)
 	void *va = cpu_uarea_alloc(false);
 	if (va)
@@ -274,16 +263,10 @@ uarea_poolpage_alloc(struct pool *pp, in
 static void
 uarea_poolpage_free(struct pool *pp, void *addr)
 {
-#if defined(PMAP_MAP_POOLPAGE)
-	if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) {
-		paddr_t pa;
-
-		pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr);
-		KASSERT(pa != 0);
-		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
+	if (USPACE_ALIGN == 0) {
+		uvm_km_kmem_free(kmem_va_arena, (vmem_addr_t)addr, USPACE);
 		return;
 	}
-#endif
 #if defined(__HAVE_CPU_UAREA_ROUTINES)
 	if (cpu_uarea_free(addr))
 		return;
Index: sys/uvm/uvm_init.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_init.c,v
retrieving revision 1.41
diff -u -p -r1.41 uvm_init.c
--- sys/uvm/uvm_init.c	24 Apr 2011 03:56:50 -0000	1.41
+++ sys/uvm/uvm_init.c	23 Jan 2012 22:02:23 -0000
@@ -91,7 +91,6 @@ uvm_init(void)
 
 	memset(&uvm, 0, sizeof(uvm));
 	averunnable.fscale = FSCALE;
-	uvm_amap_init();
 
 	/*
 	 * step 2: init the page sub-system.  this includes allocating the
@@ -104,9 +103,7 @@ uvm_init(void)
 	uvm_page_init(&kvm_start, &kvm_end);
 
 	/*
-	 * step 3: init the map sub-system.  allocates the static pool of
-	 * vm_map_entry structures that are used for "special" kernel maps
-	 * (e.g. kernel_map, kmem_map, etc...).
+	 * step 3: init the map sub-system. 
 	 */
 
 	uvm_map_init();
@@ -114,9 +111,18 @@ uvm_init(void)
 	/*
 	 * step 4: setup the kernel's virtual memory data structures.  this
 	 * includes setting up the kernel_map/kernel_object.
+	 * Bootstrap all kernel memory allocators.
 	 */
 
-	uvm_km_init(kvm_start, kvm_end);
+	uao_init();
+	uvm_km_bootstrap(kvm_start, kvm_end);
+
+	/* 
+	 * step 5: setup uvm_map pool_caches and init the amap.
+	 */
+
+	uvm_map_init_caches();
+	uvm_amap_init();
 
 	/*
 	 * step 5: init the pmap module.   the pmap module is free to allocate
@@ -125,38 +131,24 @@ uvm_init(void)
 
 	pmap_init();
 
-	/*
-	 * step 6: init the kernel memory allocator.   after this call the
-	 * kernel memory allocator (malloc) can be used. this includes
-	 * setting up the kmem_map.
+	/* step 6: init the kernel maps virtual address caches.
+	 * make kernel memory allocator ready for use.
+         * After this call the pool/kmem memory allocators can be used.
 	 */
 
-	kmeminit();
+	uvm_km_init();
 
 #ifdef DEBUG
 	debug_init();
 #endif
 
 	/*
-	 * step 7: init all pagers and the pager_map.
+	 * step 6: init all pagers and the pager_map.
 	 */
 
 	uvm_pager_init();
 
 	/*
-	 * Initialize pools.  This must be done before anyone manipulates
-	 * any vm_maps because we use a pool for some map entry structures.
-	 */
-
-	pool_subsystem_init();
-
-	/*
-	 * init slab memory allocator kmem(9).
-	 */
-
-	kmem_init();
-
-	/*
 	 * Initialize the uvm_loan() facility.
 	 */
 
Index: sys/uvm/uvm_io.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_io.c,v
retrieving revision 1.26
diff -u -p -r1.26 uvm_io.c
--- sys/uvm/uvm_io.c	23 Apr 2011 18:14:12 -0000	1.26
+++ sys/uvm/uvm_io.c	23 Jan 2012 22:02:23 -0000
@@ -129,7 +129,7 @@ uvm_io(struct vm_map *map, struct uio *u
 
 		vm_map_lock(kernel_map);
 		uvm_unmap_remove(kernel_map, kva, kva + chunksz, &dead_entries,
-		    NULL, 0);
+		   0);
 		vm_map_unlock(kernel_map);
 		if (dead_entries != NULL)
 			uvm_unmap_detach(dead_entries, AMAP_REFALL);
Index: sys/uvm/uvm_km.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.c,v
retrieving revision 1.111
diff -u -p -r1.111 uvm_km.c
--- sys/uvm/uvm_km.c	1 Sep 2011 06:40:28 -0000	1.111
+++ sys/uvm/uvm_km.c	23 Jan 2012 22:02:23 -0000
@@ -84,8 +84,6 @@
  * chunks.
  *
  * the vm system has several standard kernel submaps, including:
- *   kmem_map => contains only wired kernel memory for the kernel
- *		malloc.
  *   pager_map => used to map "buf" structures into kernel space
  *   exec_map => used during exec to handle exec args
  *   etc...
@@ -127,10 +125,11 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1
 #include "opt_uvmhist.h"
 
 #include <sys/param.h>
-#include <sys/malloc.h>
 #include <sys/systm.h>
 #include <sys/proc.h>
 #include <sys/pool.h>
+#include <sys/vmem.h>
+#include <sys/kmem.h>
 
 #include <uvm/uvm.h>
 
@@ -144,126 +143,18 @@ struct vm_map *kernel_map = NULL;
  * local data structues
  */
 
-static struct vm_map_kernel	kernel_map_store;
-static struct vm_map_entry	kernel_first_mapent_store;
+static struct vm_map		kernel_map_store;
+static struct vm_map_entry	kernel_image_mapent_store;
+static struct vm_map_entry	kernel_kmem_mapent_store;
 
-#if !defined(PMAP_MAP_POOLPAGE)
+vaddr_t kmembase;
+vsize_t kmemsize;
 
-/*
- * kva cache
- *
- * XXX maybe it's better to do this at the uvm_map layer.
- */
-
-#define	KM_VACACHE_SIZE	(32 * PAGE_SIZE) /* XXX tune */
-
-static void *km_vacache_alloc(struct pool *, int);
-static void km_vacache_free(struct pool *, void *);
-static void km_vacache_init(struct vm_map *, const char *, size_t);
-
-/* XXX */
-#define	KM_VACACHE_POOL_TO_MAP(pp) \
-	((struct vm_map *)((char *)(pp) - \
-	    offsetof(struct vm_map_kernel, vmk_vacache)))
-
-static void *
-km_vacache_alloc(struct pool *pp, int flags)
-{
-	vaddr_t va;
-	size_t size;
-	struct vm_map *map;
-	size = pp->pr_alloc->pa_pagesz;
-
-	map = KM_VACACHE_POOL_TO_MAP(pp);
-
-	va = vm_map_min(map); /* hint */
-	if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size,
-	    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
-	    UVM_ADV_RANDOM, UVM_FLAG_QUANTUM |
-	    ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA :
-	    UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT))))
-		return NULL;
-
-	return (void *)va;
-}
-
-static void
-km_vacache_free(struct pool *pp, void *v)
-{
-	vaddr_t va = (vaddr_t)v;
-	size_t size = pp->pr_alloc->pa_pagesz;
-	struct vm_map *map;
-
-	map = KM_VACACHE_POOL_TO_MAP(pp);
-	uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
-}
-
-/*
- * km_vacache_init: initialize kva cache.
- */
-
-static void
-km_vacache_init(struct vm_map *map, const char *name, size_t size)
-{
-	struct vm_map_kernel *vmk;
-	struct pool *pp;
-	struct pool_allocator *pa;
-	int ipl;
-
-	KASSERT(VM_MAP_IS_KERNEL(map));
-	KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */
-
-
-	vmk = vm_map_to_kernel(map);
-	pp = &vmk->vmk_vacache;
-	pa = &vmk->vmk_vacache_allocator;
-	memset(pa, 0, sizeof(*pa));
-	pa->pa_alloc = km_vacache_alloc;
-	pa->pa_free = km_vacache_free;
-	pa->pa_pagesz = (unsigned int)size;
-	pa->pa_backingmap = map;
-	pa->pa_backingmapptr = NULL;
-
-	if ((map->flags & VM_MAP_INTRSAFE) != 0)
-		ipl = IPL_VM;
-	else
-		ipl = IPL_NONE;
-
-	pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa,
-	    ipl);
-}
-
-void
-uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
-{
-
-	map->flags |= VM_MAP_VACACHE;
-	if (size == 0)
-		size = KM_VACACHE_SIZE;
-	km_vacache_init(map, name, size);
-}
-
-#else /* !defined(PMAP_MAP_POOLPAGE) */
-
-void
-uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size)
-{
-
-	/* nothing */
-}
-
-#endif /* !defined(PMAP_MAP_POOLPAGE) */
-
-void
-uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags)
-{
-	struct vm_map_kernel *vmk = vm_map_to_kernel(map);
-
-	callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL);
-}
+vmem_t *kmem_arena;
+vmem_t *kmem_va_arena;
 
 /*
- * uvm_km_init: init kernel maps and objects to reflect reality (i.e.
+ * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e.
  * KVM already allocated for text, data, bss, and static data structures).
  *
  * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS.
@@ -272,53 +163,98 @@ uvm_km_va_drain(struct vm_map *map, uvm_
  */
 
 void
-uvm_km_init(vaddr_t start, vaddr_t end)
+uvm_km_bootstrap(vaddr_t start, vaddr_t end)
 {
 	vaddr_t base = VM_MIN_KERNEL_ADDRESS;
 
+	kmemsize = MIN(((((vsize_t)(end - start)) / 3) * 2),
+	    ((((vsize_t)uvmexp.npages) * PAGE_SIZE) / 3));
+	kmemsize = round_page(kmemsize);
+
 	/*
 	 * next, init kernel memory objects.
 	 */
 
 	/* kernel_object: for pageable anonymous kernel memory */
-	uao_init();
 	uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS -
-				 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
+				VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ);
 
 	/*
 	 * init the map and reserve any space that might already
 	 * have been allocated kernel space before installing.
 	 */
 
-	uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
-	kernel_map_store.vmk_map.pmap = pmap_kernel();
+	uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE);
+	kernel_map_store.pmap = pmap_kernel();
 	if (start != base) {
 		int error;
 		struct uvm_map_args args;
 
-		error = uvm_map_prepare(&kernel_map_store.vmk_map,
+		error = uvm_map_prepare(&kernel_map_store,
 		    base, start - base,
 		    NULL, UVM_UNKNOWN_OFFSET, 0,
 		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
 		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
 		if (!error) {
-			kernel_first_mapent_store.flags =
-			    UVM_MAP_KERNEL | UVM_MAP_FIRST;
-			error = uvm_map_enter(&kernel_map_store.vmk_map, &args,
-			    &kernel_first_mapent_store);
+			kernel_image_mapent_store.flags =
+			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
+			error = uvm_map_enter(&kernel_map_store, &args,
+			    &kernel_image_mapent_store);
 		}
 
 		if (error)
 			panic(
-			    "uvm_km_init: could not reserve space for kernel");
+			    "uvm_km_bootstrap: could not reserve space for kernel");
+
+		kmembase = args.uma_start + args.uma_size;
+		error = uvm_map_prepare(&kernel_map_store,
+		    kmembase, kmemsize,
+		    NULL, UVM_UNKNOWN_OFFSET, 0,
+		    UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
+		    		UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args);
+		if (!error) {
+			kernel_kmem_mapent_store.flags =
+			    UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE;
+			error = uvm_map_enter(&kernel_map_store, &args,
+			    &kernel_kmem_mapent_store);
+		}
+
+		if (error)
+			panic(
+			    "uvm_km_bootstrap: could not reserve kernel kmem");
 	}
 
 	/*
 	 * install!
 	 */
 
-	kernel_map = &kernel_map_store.vmk_map;
-	uvm_km_vacache_init(kernel_map, "kvakernel", 0);
+	kernel_map = &kernel_map_store;
+
+	pool_subsystem_init();
+	vmem_bootstrap();
+
+	kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE,
+	    NULL, NULL, NULL,
+	    0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
+
+	vmem_init(kmem_arena);
+
+	kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE,
+	    vmem_alloc, vmem_free, kmem_arena,
+	    16 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM);
+}
+
+/*
+ * uvm_km_init: init the kernel maps virtual memory caches
+ * and start the pool/kmem allocator.
+ */
+void
+uvm_km_init(void)
+{
+
+	kmem_init();
+
+	kmeminit(); // killme
 }
 
 /*
@@ -327,6 +263,7 @@ uvm_km_init(vaddr_t start, vaddr_t end)
  * allows the locking of VAs in kernel_map to be broken up into regions.
  *
  * => if `fixed' is true, *vmin specifies where the region described
+ *   pager_map => used to map "buf" structures into kernel space
  *      by the submap must start
  * => if submap is non NULL we use that as the submap, otherwise we
  *	alloc a new map
@@ -335,14 +272,13 @@ uvm_km_init(vaddr_t start, vaddr_t end)
 struct vm_map *
 uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */,
     vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed,
-    struct vm_map_kernel *submap)
+    struct vm_map *submap)
 {
 	int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0);
 
 	KASSERT(vm_map_pmap(map) == pmap_kernel());
 
 	size = round_page(size);	/* round up to pagesize */
-	size += uvm_mapent_overhead(size, flags);
 
 	/*
 	 * first allocate a blank spot in the parent map
@@ -366,21 +302,21 @@ uvm_km_suballoc(struct vm_map *map, vadd
 
 	pmap_reference(vm_map_pmap(map));
 	if (submap == NULL) {
-		submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK);
+		submap = kmem_alloc(sizeof(*submap), KM_SLEEP);
 		if (submap == NULL)
 			panic("uvm_km_suballoc: unable to create submap");
 	}
-	uvm_map_setup_kernel(submap, *vmin, *vmax, flags);
-	submap->vmk_map.pmap = vm_map_pmap(map);
+	uvm_map_setup(submap, *vmin, *vmax, flags);
+	submap->pmap = vm_map_pmap(map);
 
 	/*
 	 * now let uvm_map_submap plug in it...
 	 */
 
-	if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0)
+	if (uvm_map_submap(map, *vmin, *vmax, submap) != 0)
 		panic("uvm_km_suballoc: submap allocation failed");
 
-	return(&submap->vmk_map);
+	return(submap);
 }
 
 /*
@@ -554,8 +490,7 @@ uvm_km_alloc(struct vm_map *map, vsize_t
 	    align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE,
 	    UVM_ADV_RANDOM,
 	    (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA
-	     | UVM_KMF_COLORMATCH))
-	    | UVM_FLAG_QUANTUM)) != 0)) {
+	     | UVM_KMF_COLORMATCH)))) != 0)) {
 		UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0);
 		return(0);
 	}
@@ -634,7 +569,7 @@ uvm_km_alloc(struct vm_map *map, vsize_t
 		loopsize -= PAGE_SIZE;
 	}
 
-       	pmap_update(pmap_kernel());
+	pmap_update(pmap_kernel());
 
 	UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0);
 	return(kva);
@@ -672,7 +607,7 @@ uvm_km_free(struct vm_map *map, vaddr_t 
 	 * KVA becomes globally available.
 	 */
 
-	uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY);
+	uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY);
 }
 
 /* Sanity; must specify both or none. */
@@ -681,127 +616,114 @@ uvm_km_free(struct vm_map *map, vaddr_t 
 #error Must specify MAP and UNMAP together.
 #endif
 
-/*
- * uvm_km_alloc_poolpage: allocate a page for the pool allocator
- *
- * => if the pmap specifies an alternate mapping method, we use it.
- */
-
-/* ARGSUSED */
-vaddr_t
-uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok)
+int
+uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags,
+    vmem_addr_t *addr)
 {
-#if defined(PMAP_MAP_POOLPAGE)
-	return uvm_km_alloc_poolpage(map, waitok);
-#else
 	struct vm_page *pg;
-	struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache;
-	vaddr_t va;
+	vmem_addr_t va;
+	int rc;
+	vaddr_t loopva;
+	vsize_t loopsize;
 
-	if ((map->flags & VM_MAP_VACACHE) == 0)
-		return uvm_km_alloc_poolpage(map, waitok);
+	size = round_page(size);
 
-	va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT);
-	if (va == 0)
-		return 0;
-	KASSERT(!pmap_extract(pmap_kernel(), va, NULL));
+#if defined(PMAP_MAP_POOLPAGE)
+	if (size == PAGE_SIZE) {
 again:
-	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
-	if (__predict_false(pg == NULL)) {
-		if (waitok) {
-			uvm_wait("plpg");
-			goto again;
-		} else {
-			pool_put(pp, (void *)va);
-			return 0;
+#ifdef PMAP_ALLOC_POOLPAGE
+		pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ?
+		   0 : UVM_PGA_USERESERVE);
+#else
+		pg = uvm_pagealloc(NULL, 0, NULL,
+		   (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
+#endif /* PMAP_ALLOC_POOLPAGE */
+		if (__predict_false(pg == NULL)) {
+			if (flags & VM_SLEEP) {
+				uvm_wait("plpg");
+				goto again;
+			}
+		}
+		va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
+		if (__predict_false(va == 0)) {
+			uvm_pagefree(pg);
+			return ENOMEM;
 		}
+		*addr = va;
+		return 0;
 	}
-	pg->flags &= ~PG_BUSY;	/* new page */
-	UVM_PAGE_OWN(pg, NULL);
-	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
-	    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
-	pmap_update(pmap_kernel());
-
-	return va;
 #endif /* PMAP_MAP_POOLPAGE */
-}
 
-vaddr_t
-uvm_km_alloc_poolpage(struct vm_map *map, bool waitok)
-{
-#if defined(PMAP_MAP_POOLPAGE)
-	struct vm_page *pg;
-	vaddr_t va;
+	rc = vmem_alloc(vm, size, flags, &va);
+	if (rc != 0)
+		return rc;
 
+	loopva = va;
+	loopsize = size;
 
- again:
-#ifdef PMAP_ALLOC_POOLPAGE
-	pg = PMAP_ALLOC_POOLPAGE(waitok ? 0 : UVM_PGA_USERESERVE);
-#else
-	pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE);
-#endif
-	if (__predict_false(pg == NULL)) {
-		if (waitok) {
-			uvm_wait("plpg");
-			goto again;
-		} else
-			return (0);
+	while (loopsize) {
+		KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL));
+
+		pg = uvm_pagealloc(NULL, 0, NULL,
+		    (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE);
+		if (__predict_false(pg == NULL)) {
+			if (flags & VM_SLEEP) {
+				uvm_wait("plpg");
+				continue;
+			} else {
+				uvm_km_pgremove_intrsafe(kernel_map, va,
+				    va + size);
+				pmap_kremove(va, size);
+				vmem_free(kmem_va_arena, va, size);
+				return ENOMEM;
+			}
+		}
+	
+		pg->flags &= ~PG_BUSY;	/* new page */
+		UVM_PAGE_OWN(pg, NULL);
+		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
+		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
+
+		loopva += PAGE_SIZE;
+		loopsize -= PAGE_SIZE;
 	}
-	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
-	if (__predict_false(va == 0))
-		uvm_pagefree(pg);
-	return (va);
-#else
-	vaddr_t va;
+	pmap_update(pmap_kernel());
 
-	va = uvm_km_alloc(map, PAGE_SIZE, 0,
-	    (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED);
-	return (va);
-#endif /* PMAP_MAP_POOLPAGE */
-}
+	*addr = va;
 
-/*
- * uvm_km_free_poolpage: free a previously allocated pool page
- *
- * => if the pmap specifies an alternate unmapping method, we use it.
- */
+	return 0;
+}
 
-/* ARGSUSED */
 void
-uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr)
+uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size)
 {
+
+	size = round_page(size);
 #if defined(PMAP_UNMAP_POOLPAGE)
-	uvm_km_free_poolpage(map, addr);
-#else
-	struct pool *pp;
+	if (size == PAGE_SIZE) {
+		paddr_t pa;
 
-	if ((map->flags & VM_MAP_VACACHE) == 0) {
-		uvm_km_free_poolpage(map, addr);
+		pa = PMAP_UNMAP_POOLPAGE(addr);
+		uvm_pagefree(PHYS_TO_VM_PAGE(pa));
 		return;
 	}
-
-	KASSERT(pmap_extract(pmap_kernel(), addr, NULL));
-	uvm_km_pgremove_intrsafe(map, addr, addr + PAGE_SIZE);
-	pmap_kremove(addr, PAGE_SIZE);
-#if defined(DEBUG)
+#endif /* PMAP_UNMAP_POOLPAGE */
+	uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size);
+	pmap_kremove(addr, size);
 	pmap_update(pmap_kernel());
-#endif
-	KASSERT(!pmap_extract(pmap_kernel(), addr, NULL));
-	pp = &vm_map_to_kernel(map)->vmk_vacache;
-	pool_put(pp, (void *)addr);
-#endif
+
+	vmem_free(vm, addr, size);
 }
 
-/* ARGSUSED */
-void
-uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr)
+bool
+uvm_km_va_starved_p(void)
 {
-#if defined(PMAP_UNMAP_POOLPAGE)
-	paddr_t pa;
+	vmem_size_t total;
+	vmem_size_t free;
 
-	pa = PMAP_UNMAP_POOLPAGE(addr);
-	uvm_pagefree(PHYS_TO_VM_PAGE(pa));
-#else
-	uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED);
-#endif /* PMAP_UNMAP_POOLPAGE */
+	total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE);
+	free = vmem_size(kmem_arena, VMEM_FREE);
+
+	return (free < (total / 10));
 }
+
Index: sys/uvm/uvm_km.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_km.h,v
retrieving revision 1.19
diff -u -p -r1.19 uvm_km.h
--- sys/uvm/uvm_km.h	2 Feb 2011 15:13:34 -0000	1.19
+++ sys/uvm/uvm_km.h	23 Jan 2012 22:02:23 -0000
@@ -40,7 +40,8 @@
  * prototypes
  */
 
-void uvm_km_init(vaddr_t, vaddr_t);
+void uvm_km_bootstrap(vaddr_t, vaddr_t);
+void uvm_km_init(void);
 void uvm_km_pgremove(vaddr_t, vaddr_t);
 void uvm_km_pgremove_intrsafe(struct vm_map *, vaddr_t, vaddr_t);
 #if defined(DEBUG)
@@ -48,7 +49,6 @@ void uvm_km_check_empty(struct vm_map *,
 #else
 #define	uvm_km_check_empty(a, b, c)	/* nothing */
 #endif /* defined(DEBUG) */
-void uvm_km_va_drain(struct vm_map *, uvm_flag_t);
 
 #endif /* _KERNEL */
 
Index: sys/uvm/uvm_kmguard.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_kmguard.c,v
retrieving revision 1.5
diff -u -p -r1.5 uvm_kmguard.c
--- sys/uvm/uvm_kmguard.c	23 Apr 2011 18:14:12 -0000	1.5
+++ sys/uvm/uvm_kmguard.c	23 Jan 2012 22:02:23 -0000
@@ -121,8 +121,8 @@ uvm_kmguard_alloc(struct uvm_kmguard *kg
 	va = vm_map_min(kg->kg_map);
 	if (__predict_false(uvm_map(kg->kg_map, &va, PAGE_SIZE*2, NULL,
 	    UVM_UNKNOWN_OFFSET, PAGE_SIZE, UVM_MAPFLAG(UVM_PROT_ALL,
-	    UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flag
-	    | UVM_FLAG_QUANTUM)) != 0)) {
+	    UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flag))
+	    != 0)) {
 		return NULL;
 	}
 
Index: sys/uvm/uvm_map.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.c,v
retrieving revision 1.310
diff -u -p -r1.310 uvm_map.c
--- sys/uvm/uvm_map.c	5 Jan 2012 15:19:53 -0000	1.310
+++ sys/uvm/uvm_map.c	23 Jan 2012 22:02:25 -0000
@@ -77,7 +77,6 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 
 #include <sys/systm.h>
 #include <sys/mman.h>
 #include <sys/proc.h>
-#include <sys/malloc.h>
 #include <sys/pool.h>
 #include <sys/kernel.h>
 #include <sys/mount.h>
@@ -135,11 +134,6 @@ UVMMAP_EVCNT_DEFINE(mlk_tree)
 UVMMAP_EVCNT_DEFINE(mlk_treeloop)
 UVMMAP_EVCNT_DEFINE(mlk_listloop)
 
-UVMMAP_EVCNT_DEFINE(uke_alloc)
-UVMMAP_EVCNT_DEFINE(uke_free)
-UVMMAP_EVCNT_DEFINE(ukh_alloc)
-UVMMAP_EVCNT_DEFINE(ukh_free)
-
 const char vmmapbsy[] = "vmmapbsy";
 
 /*
@@ -154,9 +148,6 @@ static struct pool_cache uvm_vmspace_cac
 
 static struct pool_cache uvm_map_entry_cache;
 
-MALLOC_DEFINE(M_VMMAP, "VM map", "VM map structures");
-MALLOC_DEFINE(M_VMPMAP, "VM pmap", "VM pmap");
-
 #ifdef PMAP_GROWKERNEL
 /*
  * This global represents the end of the kernel virtual address
@@ -184,24 +175,14 @@ static int user_va0_disable = __USER_VA0
  */
 
 /*
- * VM_MAP_USE_KMAPENT: determine if uvm_kmapent_alloc/free is used
- * for the vm_map.
- */
-extern struct vm_map *pager_map; /* XXX */
-#define	VM_MAP_USE_KMAPENT_FLAGS(flags) \
-	(((flags) & VM_MAP_INTRSAFE) != 0)
-#define	VM_MAP_USE_KMAPENT(map) \
-	(VM_MAP_USE_KMAPENT_FLAGS((map)->flags) || (map) == kernel_map)
-
-/*
  * UVM_ET_ISCOMPATIBLE: check some requirements for map entry merging
  */
+extern struct vm_map *pager_map;
 
 #define	UVM_ET_ISCOMPATIBLE(ent, type, uobj, meflags, \
     prot, maxprot, inh, adv, wire) \
 	((ent)->etype == (type) && \
-	(((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE | UVM_MAP_QUANTUM)) \
-	== 0 && \
+	(((ent)->flags ^ (meflags)) & (UVM_MAP_NOMERGE)) == 0 && \
 	(ent)->object.uvm_obj == (uobj) && \
 	(ent)->protection == (prot) && \
 	(ent)->max_protection == (maxprot) && \
@@ -285,10 +266,6 @@ clear_hints(struct vm_map *map, struct v
 
 static struct vm_map_entry *
 		uvm_mapent_alloc(struct vm_map *, int);
-static struct vm_map_entry *
-		uvm_mapent_alloc_split(struct vm_map *,
-		    const struct vm_map_entry *, int,
-		    struct uvm_mapent_reservation *);
 static void	uvm_mapent_copy(struct vm_map_entry *, struct vm_map_entry *);
 static void	uvm_mapent_free(struct vm_map_entry *);
 #if defined(DEBUG)
@@ -298,10 +275,6 @@ static void	_uvm_mapent_check(const stru
 #else /* defined(DEBUG) */
 #define	uvm_mapent_check(e)	/* nothing */
 #endif /* defined(DEBUG) */
-static struct vm_map_entry *
-		uvm_kmapent_alloc(struct vm_map *, int);
-static void	uvm_kmapent_free(struct vm_map_entry *);
-static vsize_t	uvm_kmapent_overhead(vsize_t);
 
 static void	uvm_map_entry_unwire(struct vm_map *, struct vm_map_entry *);
 static void	uvm_map_reference_amap(struct vm_map_entry *, int);
@@ -631,10 +604,6 @@ _uvm_tree_sanity(struct vm_map *map)
 }
 #endif /* defined(DEBUG) || defined(DDB) */
 
-#ifdef DIAGNOSTIC
-static struct vm_map *uvm_kmapent_map(struct vm_map_entry *);
-#endif
-
 /*
  * vm_map_lock: acquire an exclusive (write) lock on a map.
  *
@@ -805,14 +774,11 @@ uvm_mapent_alloc(struct vm_map *map, int
 	int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
 	UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
 
-	if (VM_MAP_USE_KMAPENT(map)) {
-		me = uvm_kmapent_alloc(map, flags);
-	} else {
-		me = pool_cache_get(&uvm_map_entry_cache, pflags);
-		if (__predict_false(me == NULL))
-			return NULL;
-		me->flags = 0;
-	}
+	me = pool_cache_get(&uvm_map_entry_cache, pflags);
+	if (__predict_false(me == NULL))
+		return NULL;
+	me->flags = 0;
+
 
 	UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me,
 	    ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0);
@@ -820,37 +786,6 @@ uvm_mapent_alloc(struct vm_map *map, int
 }
 
 /*
- * uvm_mapent_alloc_split: allocate a map entry for clipping.
- *
- * => map must be locked by caller if UVM_MAP_QUANTUM is set.
- */
-
-static struct vm_map_entry *
-uvm_mapent_alloc_split(struct vm_map *map,
-    const struct vm_map_entry *old_entry, int flags,
-    struct uvm_mapent_reservation *umr)
-{
-	struct vm_map_entry *me;
-
-	KASSERT(!VM_MAP_USE_KMAPENT(map) ||
-	    (old_entry->flags & UVM_MAP_QUANTUM) || !UMR_EMPTY(umr));
-
-	if (old_entry->flags & UVM_MAP_QUANTUM) {
-		struct vm_map_kernel *vmk = vm_map_to_kernel(map);
-
-		KASSERT(vm_map_locked_p(map));
-		me = vmk->vmk_merged_entries;
-		KASSERT(me);
-		vmk->vmk_merged_entries = me->next;
-		KASSERT(me->flags & UVM_MAP_QUANTUM);
-	} else {
-		me = uvm_mapent_alloc(map, flags);
-	}
-
-	return me;
-}
-
-/*
  * uvm_mapent_free: free map entry
  */
 
@@ -861,44 +796,7 @@ uvm_mapent_free(struct vm_map_entry *me)
 
 	UVMHIST_LOG(maphist,"<- freeing map entry=0x%x [flags=%d]",
 		me, me->flags, 0, 0);
-	if (me->flags & UVM_MAP_KERNEL) {
-		uvm_kmapent_free(me);
-	} else {
-		pool_cache_put(&uvm_map_entry_cache, me);
-	}
-}
-
-/*
- * uvm_mapent_free_merged: free merged map entry
- *
- * => keep the entry if needed.
- * => caller shouldn't hold map locked if VM_MAP_USE_KMAPENT(map) is true.
- * => map should be locked if UVM_MAP_QUANTUM is set.
- */
-
-static void
-uvm_mapent_free_merged(struct vm_map *map, struct vm_map_entry *me)
-{
-
-	KASSERT(!(me->flags & UVM_MAP_KERNEL) || uvm_kmapent_map(me) == map);
-
-	if (me->flags & UVM_MAP_QUANTUM) {
-		/*
-		 * keep this entry for later splitting.
-		 */
-		struct vm_map_kernel *vmk;
-
-		KASSERT(vm_map_locked_p(map));
-		KASSERT(VM_MAP_IS_KERNEL(map));
-		KASSERT(!VM_MAP_USE_KMAPENT(map) ||
-		    (me->flags & UVM_MAP_KERNEL));
-
-		vmk = vm_map_to_kernel(map);
-		me->next = vmk->vmk_merged_entries;
-		vmk->vmk_merged_entries = me;
-	} else {
-		uvm_mapent_free(me);
-	}
+	pool_cache_put(&uvm_map_entry_cache, me);
 }
 
 /*
@@ -913,23 +811,6 @@ uvm_mapent_copy(struct vm_map_entry *src
 	    ((char *)src));
 }
 
-/*
- * uvm_mapent_overhead: calculate maximum kva overhead necessary for
- * map entries.
- *
- * => size and flags are the same as uvm_km_suballoc's ones.
- */
-
-vsize_t
-uvm_mapent_overhead(vsize_t size, int flags)
-{
-
-	if (VM_MAP_USE_KMAPENT_FLAGS(flags)) {
-		return uvm_kmapent_overhead(size);
-	}
-	return 0;
-}
-
 #if defined(DEBUG)
 static void
 _uvm_mapent_check(const struct vm_map_entry *entry, const char *file, int line)
@@ -1031,7 +912,14 @@ uvm_map_init(void)
 	 */
 
 	mutex_init(&uvm_kentry_lock, MUTEX_DRIVER, IPL_VM);
+}
 
+/*
+ * uvm_map_init_caches: init mapping system caches.
+ */
+void
+uvm_map_init_caches(void)
+{ 
 	/*
 	 * initialize caches.
 	 */
@@ -1089,7 +977,7 @@ uvm_mapent_splitadj(struct vm_map_entry 
 
 void
 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
-    vaddr_t start, struct uvm_mapent_reservation *umr)
+    vaddr_t start)
 {
 	struct vm_map_entry *new_entry;
 
@@ -1103,7 +991,7 @@ uvm_map_clip_start(struct vm_map *map, s
 	 * entry BEFORE this one, so that this entry has the specified
 	 * starting address.
 	 */
-	new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
+	new_entry = uvm_mapent_alloc(map, 0);
 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
 	uvm_mapent_splitadj(new_entry, entry, start);
 	uvm_map_entry_link(map, entry->prev, new_entry);
@@ -1121,8 +1009,7 @@ uvm_map_clip_start(struct vm_map *map, s
  */
 
 void
-uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end,
-    struct uvm_mapent_reservation *umr)
+uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t end)
 {
 	struct vm_map_entry *new_entry;
 
@@ -1133,7 +1020,7 @@ uvm_map_clip_end(struct vm_map *map, str
 	 *	Create a new entry and insert it
 	 *	AFTER the specified entry
 	 */
-	new_entry = uvm_mapent_alloc_split(map, entry, 0, umr);
+	new_entry = uvm_mapent_alloc(map, 0);
 	uvm_mapent_copy(entry, new_entry); /* entry -> new_entry */
 	uvm_mapent_splitadj(entry, new_entry, end);
 	uvm_map_entry_link(map, entry, new_entry);
@@ -1141,17 +1028,6 @@ uvm_map_clip_end(struct vm_map *map, str
 	uvm_map_check(map, "clip_end leave");
 }
 
-static void
-vm_map_drain(struct vm_map *map, uvm_flag_t flags)
-{
-
-	if (!VM_MAP_IS_KERNEL(map)) {
-		return;
-	}
-
-	uvm_km_va_drain(map, flags);
-}
-
 /*
  *   M A P   -   m a i n   e n t r y   p o i n t
  */
@@ -1188,7 +1064,6 @@ uvm_map(struct vm_map *map, vaddr_t *sta
 	struct vm_map_entry *new_entry;
 	int error;
 
-	KASSERT((flags & UVM_FLAG_QUANTUM) == 0 || VM_MAP_IS_KERNEL(map));
 	KASSERT((size & PAGE_MASK) == 0);
 
 #ifndef __USER_VA0_IS_SAFE
@@ -1200,20 +1075,13 @@ uvm_map(struct vm_map *map, vaddr_t *sta
 	/*
 	 * for pager_map, allocate the new entry first to avoid sleeping
 	 * for memory while we have the map locked.
-	 *
-	 * Also, because we allocate entries for in-kernel maps
-	 * a bit differently (cf. uvm_kmapent_alloc/free), we need to
-	 * allocate them before locking the map.
 	 */
 
 	new_entry = NULL;
-	if (VM_MAP_USE_KMAPENT(map) || (flags & UVM_FLAG_QUANTUM) ||
-	    map == pager_map) {
+	if (map == pager_map) {
 		new_entry = uvm_mapent_alloc(map, (flags & UVM_FLAG_NOWAIT));
 		if (__predict_false(new_entry == NULL))
 			return ENOMEM;
-		if (flags & UVM_FLAG_QUANTUM)
-			new_entry->flags |= UVM_MAP_QUANTUM;
 	}
 	if (map == pager_map)
 		flags |= UVM_FLAG_NOMERGE;
@@ -1314,8 +1182,6 @@ retry:
 		 * recheck the condition.
 		 */
 
-		vm_map_drain(map, flags);
-
 		mutex_enter(&map->misc_lock);
 		while ((map->flags & VM_MAP_WANTVA) != 0 &&
 		   map->timestamp == timestamp) {
@@ -1400,8 +1266,7 @@ uvm_map_enter(struct vm_map *map, const 
 	const int amapwaitflag = (flags & UVM_FLAG_NOWAIT) ?
 	    AMAP_EXTEND_NOWAIT : 0;
 	const int advice = UVM_ADVICE(flags);
-	const int meflagval = (flags & UVM_FLAG_QUANTUM) ?
-	    UVM_MAP_QUANTUM : 0;
+	const int meflagval = 0;
 
 	vaddr_t start = args->uma_start;
 	vsize_t size = args->uma_size;
@@ -1423,11 +1288,6 @@ uvm_map_enter(struct vm_map *map, const 
 	KASSERT(map->hint == prev_entry); /* bimerge case assumes this */
 	KASSERT(vm_map_locked_p(map));
 
-	if (flags & UVM_FLAG_QUANTUM) {
-		KASSERT(new_entry);
-		KASSERT(new_entry->flags & UVM_MAP_QUANTUM);
-	}
-
 	if (uobj)
 		newetype = UVM_ET_OBJ;
 	else
@@ -1714,27 +1574,17 @@ nomerge:
 
 	error = 0;
 done:
-	if ((flags & UVM_FLAG_QUANTUM) == 0) {
-		/*
-		 * vmk_merged_entries is locked by the map's lock.
-		 */
-		vm_map_unlock(map);
-	}
-	if (new_entry && error == 0) {
-		KDASSERT(merged);
-		uvm_mapent_free_merged(map, new_entry);
-		new_entry = NULL;
+	vm_map_unlock(map);
+
+	if (new_entry) {
+		uvm_mapent_free(new_entry);
 	}
+
 	if (dead) {
 		KDASSERT(merged);
-		uvm_mapent_free_merged(map, dead);
-	}
-	if ((flags & UVM_FLAG_QUANTUM) != 0) {
-		vm_map_unlock(map);
-	}
-	if (new_entry != NULL) {
-		uvm_mapent_free(new_entry);
+		uvm_mapent_free(dead);
 	}
+
 	return error;
 }
 
@@ -2307,8 +2157,7 @@ nextgap:
 
 void
 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
-    struct vm_map_entry **entry_list /* OUT */,
-    struct uvm_mapent_reservation *umr, int flags)
+    struct vm_map_entry **entry_list /* OUT */, int flags)
 {
 	struct vm_map_entry *entry, *first_entry, *next;
 	vaddr_t len;
@@ -2327,7 +2176,7 @@ uvm_unmap_remove(struct vm_map *map, vad
 	if (uvm_map_lookup_entry(map, start, &first_entry) == true) {
 		/* clip and go... */
 		entry = first_entry;
-		UVM_MAP_CLIP_START(map, entry, start, umr);
+		UVM_MAP_CLIP_START(map, entry, start);
 		/* critical!  prevents stale hint */
 		SAVE_HINT(map, entry, entry->prev);
 	} else {
@@ -2367,9 +2216,9 @@ uvm_unmap_remove(struct vm_map *map, vad
 	 */
 
 	while ((entry != &map->header) && (entry->start < end)) {
-		KASSERT((entry->flags & UVM_MAP_FIRST) == 0);
+		KASSERT((entry->flags & UVM_MAP_STATIC) == 0);
 
-		UVM_MAP_CLIP_END(map, entry, end, umr);
+		UVM_MAP_CLIP_END(map, entry, end);
 		next = entry->next;
 		len = entry->end - entry->start;
 
@@ -2606,8 +2455,8 @@ uvm_map_replace(struct vm_map *map, vadd
 	 * check to make sure we have a proper blank entry
 	 */
 
-	if (end < oldent->end && !VM_MAP_USE_KMAPENT(map)) {
-		UVM_MAP_CLIP_END(map, oldent, end, NULL);
+	if (end < oldent->end) {
+		UVM_MAP_CLIP_END(map, oldent, end);
 	}
 	if (oldent->start != start || oldent->end != end ||
 	    oldent->object.uvm_obj != NULL || oldent->aref.ar_amap != NULL) {
@@ -2799,7 +2648,7 @@ uvm_map_extract(struct vm_map *srcmap, v
 			 * fudge is zero)
 			 */
 
-			UVM_MAP_CLIP_START(srcmap, entry, start, NULL);
+			UVM_MAP_CLIP_START(srcmap, entry, start);
 			SAVE_HINT(srcmap, srcmap->hint, entry->prev);
 			fudge = 0;
 		}
@@ -2828,7 +2677,7 @@ uvm_map_extract(struct vm_map *srcmap, v
 
 		/* if we are not doing a quick reference, clip it */
 		if ((flags & UVM_EXTRACT_QREF) == 0)
-			UVM_MAP_CLIP_END(srcmap, entry, end, NULL);
+			UVM_MAP_CLIP_END(srcmap, entry, end);
 
 		/* clear needs_copy (allow chunking) */
 		if (UVM_ET_ISNEEDSCOPY(entry)) {
@@ -3077,17 +2926,14 @@ uvm_map_submap(struct vm_map *map, vaddr
     struct vm_map *submap)
 {
 	struct vm_map_entry *entry;
-	struct uvm_mapent_reservation umr;
 	int error;
 
-	uvm_mapent_reserve(map, &umr, 2, 0);
-
 	vm_map_lock(map);
 	VM_MAP_RANGE_CHECK(map, start, end);
 
 	if (uvm_map_lookup_entry(map, start, &entry)) {
-		UVM_MAP_CLIP_START(map, entry, start, &umr);
-		UVM_MAP_CLIP_END(map, entry, end, &umr);	/* to be safe */
+		UVM_MAP_CLIP_START(map, entry, start);
+		UVM_MAP_CLIP_END(map, entry, end);	/* to be safe */
 	} else {
 		entry = NULL;
 	}
@@ -3106,30 +2952,10 @@ uvm_map_submap(struct vm_map *map, vaddr
 	}
 	vm_map_unlock(map);
 
-	uvm_mapent_unreserve(map, &umr);
-
 	return error;
 }
 
 /*
- * uvm_map_setup_kernel: init in-kernel map
- *
- * => map must not be in service yet.
- */
-
-void
-uvm_map_setup_kernel(struct vm_map_kernel *map,
-    vaddr_t vmin, vaddr_t vmax, int flags)
-{
-
-	uvm_map_setup(&map->vmk_map, vmin, vmax, flags);
-	callback_head_init(&map->vmk_reclaim_callback, IPL_VM);
-	LIST_INIT(&map->vmk_kentry_free);
-	map->vmk_merged_entries = NULL;
-}
-
-
-/*
  * uvm_map_protect: change map protection
  *
  * => set_max means set max_protection.
@@ -3152,7 +2978,7 @@ uvm_map_protect(struct vm_map *map, vadd
 	vm_map_lock(map);
 	VM_MAP_RANGE_CHECK(map, start, end);
 	if (uvm_map_lookup_entry(map, start, &entry)) {
-		UVM_MAP_CLIP_START(map, entry, start, NULL);
+		UVM_MAP_CLIP_START(map, entry, start);
 	} else {
 		entry = entry->next;
 	}
@@ -3197,7 +3023,7 @@ uvm_map_protect(struct vm_map *map, vadd
 	while ((current != &map->header) && (current->start < end)) {
 		vm_prot_t old_prot;
 
-		UVM_MAP_CLIP_END(map, current, end, NULL);
+		UVM_MAP_CLIP_END(map, current, end);
 		old_prot = current->protection;
 		if (set_max)
 			current->protection =
@@ -3308,12 +3134,12 @@ uvm_map_inherit(struct vm_map *map, vadd
 	VM_MAP_RANGE_CHECK(map, start, end);
 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
 		entry = temp_entry;
-		UVM_MAP_CLIP_START(map, entry, start, NULL);
+		UVM_MAP_CLIP_START(map, entry, start);
 	}  else {
 		entry = temp_entry->next;
 	}
 	while ((entry != &map->header) && (entry->start < end)) {
-		UVM_MAP_CLIP_END(map, entry, end, NULL);
+		UVM_MAP_CLIP_END(map, entry, end);
 		entry->inheritance = new_inheritance;
 		entry = entry->next;
 	}
@@ -3340,7 +3166,7 @@ uvm_map_advice(struct vm_map *map, vaddr
 	VM_MAP_RANGE_CHECK(map, start, end);
 	if (uvm_map_lookup_entry(map, start, &temp_entry)) {
 		entry = temp_entry;
-		UVM_MAP_CLIP_START(map, entry, start, NULL);
+		UVM_MAP_CLIP_START(map, entry, start);
 	} else {
 		entry = temp_entry->next;
 	}
@@ -3350,7 +3176,7 @@ uvm_map_advice(struct vm_map *map, vaddr
 	 */
 
 	while ((entry != &map->header) && (entry->start < end)) {
-		UVM_MAP_CLIP_END(map, entry, end, NULL);
+		UVM_MAP_CLIP_END(map, entry, end);
 
 		switch (new_advice) {
 		case MADV_NORMAL:
@@ -3479,7 +3305,7 @@ uvm_map_pageable(struct vm_map *map, vad
 	 */
 
 	if (new_pageable) {		/* unwire */
-		UVM_MAP_CLIP_START(map, entry, start, NULL);
+		UVM_MAP_CLIP_START(map, entry, start);
 
 		/*
 		 * unwiring.  first ensure that the range to be unwired is
@@ -3507,7 +3333,7 @@ uvm_map_pageable(struct vm_map *map, vad
 
 		entry = start_entry;
 		while ((entry != &map->header) && (entry->start < end)) {
-			UVM_MAP_CLIP_END(map, entry, end, NULL);
+			UVM_MAP_CLIP_END(map, entry, end);
 			if (VM_MAPENT_ISWIRED(entry))
 				uvm_map_entry_unwire(map, entry);
 			entry = entry->next;
@@ -3559,8 +3385,8 @@ uvm_map_pageable(struct vm_map *map, vad
 				}
 			}
 		}
-		UVM_MAP_CLIP_START(map, entry, start, NULL);
-		UVM_MAP_CLIP_END(map, entry, end, NULL);
+		UVM_MAP_CLIP_START(map, entry, start);
+		UVM_MAP_CLIP_END(map, entry, end);
 		entry->wired_count++;
 
 		/*
@@ -4354,7 +4180,7 @@ uvmspace_free(struct vmspace *vm)
 #endif
 	if (map->nentries) {
 		uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map),
-		    &dead_entries, NULL, 0);
+		    &dead_entries, 0);
 		if (dead_entries != NULL)
 			uvm_unmap_detach(dead_entries, 0);
 	}
@@ -4608,354 +4434,6 @@ uvmspace_fork(struct vmspace *vm1)
 
 
 /*
- * in-kernel map entry allocation.
- */
-
-struct uvm_kmapent_hdr {
-	LIST_ENTRY(uvm_kmapent_hdr) ukh_listq;
-	int ukh_nused;
-	struct vm_map_entry *ukh_freelist;
-	struct vm_map *ukh_map;
-	struct vm_map_entry ukh_entries[0];
-};
-
-#define	UVM_KMAPENT_CHUNK				\
-	((PAGE_SIZE - sizeof(struct uvm_kmapent_hdr))	\
-	/ sizeof(struct vm_map_entry))
-
-#define	UVM_KHDR_FIND(entry)	\
-	((struct uvm_kmapent_hdr *)(((vaddr_t)entry) & ~PAGE_MASK))
-
-
-#ifdef DIAGNOSTIC
-static struct vm_map *
-uvm_kmapent_map(struct vm_map_entry *entry)
-{
-	const struct uvm_kmapent_hdr *ukh;
-
-	ukh = UVM_KHDR_FIND(entry);
-	return ukh->ukh_map;
-}
-#endif
-
-static inline struct vm_map_entry *
-uvm_kmapent_get(struct uvm_kmapent_hdr *ukh)
-{
-	struct vm_map_entry *entry;
-
-	KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
-	KASSERT(ukh->ukh_nused >= 0);
-
-	entry = ukh->ukh_freelist;
-	if (entry) {
-		KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
-		    == UVM_MAP_KERNEL);
-		ukh->ukh_freelist = entry->next;
-		ukh->ukh_nused++;
-		KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
-	} else {
-		KASSERT(ukh->ukh_nused == UVM_KMAPENT_CHUNK);
-	}
-
-	return entry;
-}
-
-static inline void
-uvm_kmapent_put(struct uvm_kmapent_hdr *ukh, struct vm_map_entry *entry)
-{
-
-	KASSERT((entry->flags & (UVM_MAP_KERNEL | UVM_MAP_KMAPENT))
-	    == UVM_MAP_KERNEL);
-	KASSERT(ukh->ukh_nused <= UVM_KMAPENT_CHUNK);
-	KASSERT(ukh->ukh_nused > 0);
-	KASSERT(ukh->ukh_freelist != NULL ||
-	    ukh->ukh_nused == UVM_KMAPENT_CHUNK);
-	KASSERT(ukh->ukh_freelist == NULL ||
-	    ukh->ukh_nused < UVM_KMAPENT_CHUNK);
-
-	ukh->ukh_nused--;
-	entry->next = ukh->ukh_freelist;
-	ukh->ukh_freelist = entry;
-}
-
-/*
- * uvm_kmapent_alloc: allocate a map entry for in-kernel map
- */
-
-static struct vm_map_entry *
-uvm_kmapent_alloc(struct vm_map *map, int flags)
-{
-	struct vm_page *pg;
-	struct uvm_kmapent_hdr *ukh;
-	struct vm_map_entry *entry;
-#ifndef PMAP_MAP_POOLPAGE
-	struct uvm_map_args args;
-	uvm_flag_t mapflags = UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL,
-	    UVM_INH_NONE, UVM_ADV_RANDOM, flags | UVM_FLAG_NOMERGE);
-	int error;
-#endif
-	vaddr_t va;
-	int i;
-
-	KDASSERT(UVM_KMAPENT_CHUNK > 2);
-	KDASSERT(kernel_map != NULL);
-	KASSERT(vm_map_pmap(map) == pmap_kernel());
-
-	UVMMAP_EVCNT_INCR(uke_alloc);
-	entry = NULL;
-again:
-	/*
-	 * try to grab an entry from freelist.
-	 */
-	mutex_spin_enter(&uvm_kentry_lock);
-	ukh = LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free);
-	if (ukh) {
-		entry = uvm_kmapent_get(ukh);
-		if (ukh->ukh_nused == UVM_KMAPENT_CHUNK)
-			LIST_REMOVE(ukh, ukh_listq);
-	}
-	mutex_spin_exit(&uvm_kentry_lock);
-
-	if (entry)
-		return entry;
-
-	/*
-	 * there's no free entry for this vm_map.
-	 * now we need to allocate some vm_map_entry.
-	 * for simplicity, always allocate one page chunk of them at once.
-	 */
-
-#ifdef PMAP_ALLOC_POOLPAGE
-	pg = PMAP_ALLOC_POOLPAGE(
-	    (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
-#else
-	pg = uvm_pagealloc(NULL, 0, NULL,
-	    (flags & UVM_KMF_NOWAIT) != 0 ? UVM_PGA_USERESERVE : 0);
-#endif
-	if (__predict_false(pg == NULL)) {
-		if (flags & UVM_FLAG_NOWAIT)
-			return NULL;
-		uvm_wait("kme_alloc");
-		goto again;
-	}
-
-#ifdef PMAP_MAP_POOLPAGE
-	va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg));
-	KASSERT(va != 0);
-#else
-	error = uvm_map_prepare(map, 0, PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET,
-	    VM_PGCOLOR_BUCKET(pg), mapflags | UVM_FLAG_COLORMATCH, &args);
-	if (error) {
-		uvm_pagefree(pg);
-		return NULL;
-	}
-
-	va = args.uma_start;
-
-	pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
-	    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
-	pmap_update(vm_map_pmap(map));
-
-#endif
-	ukh = (void *)va;
-
-	/*
-	 * use the last entry for ukh itsself.
-	 */
-
-	i = UVM_KMAPENT_CHUNK - 1;
-#ifndef PMAP_MAP_POOLPAGE
-	entry = &ukh->ukh_entries[i--];
-	entry->flags = UVM_MAP_KERNEL | UVM_MAP_KMAPENT;
-	error = uvm_map_enter(map, &args, entry);
-	KASSERT(error == 0);
-#endif
-
-	ukh->ukh_nused = UVM_KMAPENT_CHUNK;
-	ukh->ukh_map = map;
-	ukh->ukh_freelist = NULL;
-	for (; i >= 1; i--) {
-		struct vm_map_entry *xentry = &ukh->ukh_entries[i];
-
-		xentry->flags = UVM_MAP_KERNEL;
-		uvm_kmapent_put(ukh, xentry);
-	}
-#ifdef PMAP_MAP_POOLPAGE
-	KASSERT(ukh->ukh_nused == 1);
-#else
-	KASSERT(ukh->ukh_nused == 2);
-#endif
-
-	mutex_spin_enter(&uvm_kentry_lock);
-	LIST_INSERT_HEAD(&vm_map_to_kernel(map)->vmk_kentry_free,
-	    ukh, ukh_listq);
-	mutex_spin_exit(&uvm_kentry_lock);
-
-	/*
-	 * return first entry.
-	 */
-
-	entry = &ukh->ukh_entries[0];
-	entry->flags = UVM_MAP_KERNEL;
-	UVMMAP_EVCNT_INCR(ukh_alloc);
-
-	return entry;
-}
-
-/*
- * uvm_mapent_free: free map entry for in-kernel map
- */
-
-static void
-uvm_kmapent_free(struct vm_map_entry *entry)
-{
-	struct uvm_kmapent_hdr *ukh;
-	struct vm_page *pg;
-	struct vm_map *map;
-#ifndef PMAP_UNMAP_POOLPAGE
-	struct pmap *pmap;
-	struct vm_map_entry *deadentry;
-#endif
-	vaddr_t va;
-	paddr_t pa;
-
-	UVMMAP_EVCNT_INCR(uke_free);
-	ukh = UVM_KHDR_FIND(entry);
-	map = ukh->ukh_map;
-
-	mutex_spin_enter(&uvm_kentry_lock);
-	uvm_kmapent_put(ukh, entry);
-#ifdef PMAP_UNMAP_POOLPAGE
-	if (ukh->ukh_nused > 0) {
-#else
-	if (ukh->ukh_nused > 1) {
-#endif
-		if (ukh->ukh_nused == UVM_KMAPENT_CHUNK - 1)
-			LIST_INSERT_HEAD(
-			    &vm_map_to_kernel(map)->vmk_kentry_free,
-			    ukh, ukh_listq);
-		mutex_spin_exit(&uvm_kentry_lock);
-		return;
-	}
-
-	/*
-	 * now we can free this ukh.
-	 *
-	 * however, keep an empty ukh to avoid ping-pong.
-	 */
-
-	if (LIST_FIRST(&vm_map_to_kernel(map)->vmk_kentry_free) == ukh &&
-	    LIST_NEXT(ukh, ukh_listq) == NULL) {
-		mutex_spin_exit(&uvm_kentry_lock);
-		return;
-	}
-	LIST_REMOVE(ukh, ukh_listq);
-	mutex_spin_exit(&uvm_kentry_lock);
-
-	va = (vaddr_t)ukh;
-
-#ifdef PMAP_UNMAP_POOLPAGE
-	KASSERT(ukh->ukh_nused == 0);
-	pa = PMAP_UNMAP_POOLPAGE(va);
-	KASSERT(pa != 0);
-#else
-	KASSERT(ukh->ukh_nused == 1);
-
-	/*
-	 * remove map entry for ukh itsself.
-	 */
-
-	KASSERT((va & PAGE_MASK) == 0);
-	vm_map_lock(map);
-	uvm_unmap_remove(map, va, va + PAGE_SIZE, &deadentry, NULL, 0);
-	KASSERT(deadentry->flags & UVM_MAP_KERNEL);
-	KASSERT(deadentry->flags & UVM_MAP_KMAPENT);
-	KASSERT(deadentry->next == NULL);
-	KASSERT(deadentry == &ukh->ukh_entries[UVM_KMAPENT_CHUNK - 1]);
-
-	/*
-	 * unmap the page from pmap and free it.
-	 */
-
-	pmap = vm_map_pmap(map);
-	KASSERT(pmap == pmap_kernel());
-	if (!pmap_extract(pmap, va, &pa))
-		panic("%s: no mapping", __func__);
-	pmap_kremove(va, PAGE_SIZE);
-	pmap_update(vm_map_pmap(map));
-	vm_map_unlock(map);
-#endif /* !PMAP_UNMAP_POOLPAGE */
-	pg = PHYS_TO_VM_PAGE(pa);
-	uvm_pagefree(pg);
-	UVMMAP_EVCNT_INCR(ukh_free);
-}
-
-static vsize_t
-uvm_kmapent_overhead(vsize_t size)
-{
-
-	/*
-	 * - the max number of unmerged entries is howmany(size, PAGE_SIZE)
-	 *   as the min allocation unit is PAGE_SIZE.
-	 * - UVM_KMAPENT_CHUNK "kmapent"s are allocated from a page.
-	 *   one of them are used to map the page itself.
-	 */
-
-	return howmany(howmany(size, PAGE_SIZE), (UVM_KMAPENT_CHUNK - 1)) *
-	    PAGE_SIZE;
-}
-
-/*
- * map entry reservation
- */
-
-/*
- * uvm_mapent_reserve: reserve map entries for clipping before locking map.
- *
- * => needed when unmapping entries allocated without UVM_FLAG_QUANTUM.
- * => caller shouldn't hold map locked.
- */
-int
-uvm_mapent_reserve(struct vm_map *map, struct uvm_mapent_reservation *umr,
-    int nentries, int flags)
-{
-
-	umr->umr_nentries = 0;
-
-	if ((flags & UVM_FLAG_QUANTUM) != 0)
-		return 0;
-
-	if (!VM_MAP_USE_KMAPENT(map))
-		return 0;
-
-	while (nentries--) {
-		struct vm_map_entry *ent;
-		ent = uvm_kmapent_alloc(map, flags);
-		if (!ent) {
-			uvm_mapent_unreserve(map, umr);
-			return ENOMEM;
-		}
-		UMR_PUTENTRY(umr, ent);
-	}
-
-	return 0;
-}
-
-/*
- * uvm_mapent_unreserve:
- *
- * => caller shouldn't hold map locked.
- * => never fail or sleep.
- */
-void
-uvm_mapent_unreserve(struct vm_map *map, struct uvm_mapent_reservation *umr)
-{
-
-	while (!UMR_EMPTY(umr))
-		uvm_kmapent_free(UMR_GETENTRY(umr));
-}
-
-/*
  * uvm_mapent_trymerge: try to merge an entry with its neighbors.
  *
  * => called with map locked.
@@ -4973,10 +4451,6 @@ uvm_mapent_trymerge(struct vm_map *map, 
 	bool copying;
 	int newetype;
 
-	KASSERT(vm_map_locked_p(map));
-	if (VM_MAP_USE_KMAPENT(map)) {
-		return 0;
-	}
 	if (entry->aref.ar_amap != NULL) {
 		return 0;
 	}
@@ -5023,7 +4497,7 @@ uvm_mapent_trymerge(struct vm_map *map, 
 				entry->etype &= ~UVM_ET_NEEDSCOPY;
 			}
 			uvm_map_check(map, "trymerge forwardmerge");
-			uvm_mapent_free_merged(map, next);
+			uvm_mapent_free(next);
 			merged++;
 		}
 	}
@@ -5064,7 +4538,7 @@ uvm_mapent_trymerge(struct vm_map *map, 
 				entry->etype &= ~UVM_ET_NEEDSCOPY;
 			}
 			uvm_map_check(map, "trymerge backmerge");
-			uvm_mapent_free_merged(map, prev);
+			uvm_mapent_free(prev);
 			merged++;
 		}
 	}
@@ -5073,21 +4547,6 @@ uvm_mapent_trymerge(struct vm_map *map, 
 }
 
 /*
- * uvm_map_create: create map
- */
-
-struct vm_map *
-uvm_map_create(pmap_t pmap, vaddr_t vmin, vaddr_t vmax, int flags)
-{
-	struct vm_map *result;
-
-	result = malloc(sizeof(struct vm_map), M_VMMAP, M_WAITOK);
-	uvm_map_setup(result, vmin, vmax, flags);
-	result->pmap = pmap;
-	return(result);
-}
-
-/*
  * uvm_map_setup: init map
  *
  * => map must not be in service yet.
@@ -5140,7 +4599,6 @@ void
 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
 {
 	struct vm_map_entry *dead_entries;
-	struct uvm_mapent_reservation umr;
 	UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
 
 	UVMHIST_LOG(maphist, "  (map=0x%x, start=0x%x, end=0x%x)",
@@ -5152,11 +4610,9 @@ uvm_unmap1(struct vm_map *map, vaddr_t s
 	 * work now done by helper functions.   wipe the pmap's and then
 	 * detach from the dead entries...
 	 */
-	uvm_mapent_reserve(map, &umr, 2, flags);
 	vm_map_lock(map);
-	uvm_unmap_remove(map, start, end, &dead_entries, &umr, flags);
+	uvm_unmap_remove(map, start, end, &dead_entries, flags);
 	vm_map_unlock(map);
-	uvm_mapent_unreserve(map, &umr);
 
 	if (dead_entries != NULL)
 		uvm_unmap_detach(dead_entries, 0);
@@ -5179,15 +4635,6 @@ uvm_map_reference(struct vm_map *map)
 	mutex_exit(&map->misc_lock);
 }
 
-struct vm_map_kernel *
-vm_map_to_kernel(struct vm_map *map)
-{
-
-	KASSERT(VM_MAP_IS_KERNEL(map));
-
-	return (struct vm_map_kernel *)map;
-}
-
 bool
 vm_map_starved_p(struct vm_map *map)
 {
Index: sys/uvm/uvm_map.h
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_map.h,v
retrieving revision 1.69
diff -u -p -r1.69 uvm_map.h
--- sys/uvm/uvm_map.h	21 Jan 2012 16:51:38 -0000	1.69
+++ sys/uvm/uvm_map.h	23 Jan 2012 22:02:25 -0000
@@ -81,9 +81,9 @@
  * => map must be locked by caller
  */
 
-#define UVM_MAP_CLIP_START(MAP,ENTRY,VA,UMR) { \
+#define UVM_MAP_CLIP_START(MAP,ENTRY,VA) { \
 	if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
-		uvm_map_clip_start(MAP,ENTRY,VA,UMR); \
+		uvm_map_clip_start(MAP,ENTRY,VA); \
 	} \
 }
 
@@ -94,11 +94,11 @@
  * => map must be locked by caller
  */
 
-#define UVM_MAP_CLIP_END(MAP,ENTRY,VA,UMR) { \
+#define UVM_MAP_CLIP_END(MAP,ENTRY,VA) { \
 	if ((VA) > (ENTRY)->start && (VA) < (ENTRY)->end) { \
-		uvm_map_clip_end(MAP,ENTRY,VA,UMR); \
+		uvm_map_clip_end(MAP,ENTRY,VA); \
 	} \
-} 
+}
 
 /*
  * extract flags
@@ -151,10 +151,8 @@ struct vm_map_entry {
 
 #define	UVM_MAP_KERNEL		0x01		/* kernel map entry */
 #define	UVM_MAP_KMAPENT		0x02		/* contains map entries */
-#define	UVM_MAP_FIRST		0x04		/* the first special entry */
-#define	UVM_MAP_QUANTUM		0x08		/* allocated with
-						 * UVM_FLAG_QUANTUM */
-#define	UVM_MAP_NOMERGE		0x10		/* this entry is not mergable */
+#define	UVM_MAP_STATIC		0x04		/* special static entries */
+#define	UVM_MAP_NOMERGE		0x08		/* this entry is not mergable */
 
 };
 
@@ -232,19 +230,6 @@ struct vm_map {
 
 #include <sys/callback.h>
 
-struct vm_map_kernel {
-	struct vm_map vmk_map;
-	LIST_HEAD(, uvm_kmapent_hdr) vmk_kentry_free;
-			/* Freelist of map entry */
-	struct vm_map_entry	*vmk_merged_entries;
-			/* Merged entries, kept for later splitting */
-
-	struct callback_head vmk_reclaim_callback;
-#if !defined(PMAP_MAP_POOLPAGE)
-	struct pool vmk_vacache; /* kva cache */
-	struct pool_allocator vmk_vacache_allocator; /* ... and its allocator */
-#endif
-};
 #endif /* defined(_KERNEL) */
 
 #define	VM_MAP_IS_KERNEL(map)	(vm_map_pmap(map) == pmap_kernel())
@@ -255,19 +240,9 @@ struct vm_map_kernel {
 #define	VM_MAP_WIREFUTURE	0x04		/* rw: wire future mappings */
 #define	VM_MAP_DYING		0x20		/* rw: map is being destroyed */
 #define	VM_MAP_TOPDOWN		0x40		/* ro: arrange map top-down */
-#define	VM_MAP_VACACHE		0x80		/* ro: use kva cache */
 #define	VM_MAP_WANTVA		0x100		/* rw: want va */
 
 #ifdef _KERNEL
-struct uvm_mapent_reservation {
-	struct vm_map_entry *umr_entries[2];
-	int umr_nentries;
-};
-#define	UMR_EMPTY(umr)		((umr) == NULL || (umr)->umr_nentries == 0)
-#define	UMR_GETENTRY(umr)	((umr)->umr_entries[--(umr)->umr_nentries])
-#define	UMR_PUTENTRY(umr, ent)	\
-	(umr)->umr_entries[(umr)->umr_nentries++] = (ent)
-
 struct uvm_map_args {
 	struct vm_map_entry *uma_prev;
 
@@ -302,10 +277,9 @@ void		uvm_map_deallocate(struct vm_map *
 int		uvm_map_willneed(struct vm_map *, vaddr_t, vaddr_t);
 int		uvm_map_clean(struct vm_map *, vaddr_t, vaddr_t, int);
 void		uvm_map_clip_start(struct vm_map *, struct vm_map_entry *,
-		    vaddr_t, struct uvm_mapent_reservation *);
+		    vaddr_t);
 void		uvm_map_clip_end(struct vm_map *, struct vm_map_entry *,
-		    vaddr_t, struct uvm_mapent_reservation *);
-struct vm_map	*uvm_map_create(pmap_t, vaddr_t, vaddr_t, int);
+		    vaddr_t);
 int		uvm_map_extract(struct vm_map *, vaddr_t, vsize_t,
 		    struct vm_map *, vaddr_t *, int);
 struct vm_map_entry *
@@ -315,24 +289,20 @@ int		uvm_map_inherit(struct vm_map *, va
 		    vm_inherit_t);
 int		uvm_map_advice(struct vm_map *, vaddr_t, vaddr_t, int);
 void		uvm_map_init(void);
+void		uvm_map_init_caches(void);
 bool		uvm_map_lookup_entry(struct vm_map *, vaddr_t,
 		    struct vm_map_entry **);
 void		uvm_map_reference(struct vm_map *);
 int		uvm_map_reserve(struct vm_map *, vsize_t, vaddr_t, vsize_t,
 		    vaddr_t *, uvm_flag_t);
 void		uvm_map_setup(struct vm_map *, vaddr_t, vaddr_t, int);
-void		uvm_map_setup_kernel(struct vm_map_kernel *,
-		    vaddr_t, vaddr_t, int);
-struct vm_map_kernel *
-		vm_map_to_kernel(struct vm_map *);
 int		uvm_map_submap(struct vm_map *, vaddr_t, vaddr_t,
 		    struct vm_map *);
 void		uvm_unmap1(struct vm_map *, vaddr_t, vaddr_t, int);
 #define	uvm_unmap(map, s, e)	uvm_unmap1((map), (s), (e), 0)
 void		uvm_unmap_detach(struct vm_map_entry *,int);
 void		uvm_unmap_remove(struct vm_map *, vaddr_t, vaddr_t,
-		    struct vm_map_entry **, struct uvm_mapent_reservation *,
-		    int);
+		    struct vm_map_entry **, int);
 
 int		uvm_map_prepare(struct vm_map *, vaddr_t, vsize_t,
 		    struct uvm_object *, voff_t, vsize_t, uvm_flag_t,
@@ -340,13 +310,6 @@ int		uvm_map_prepare(struct vm_map *, va
 int		uvm_map_enter(struct vm_map *, const struct uvm_map_args *,
 		    struct vm_map_entry *);
 
-int		uvm_mapent_reserve(struct vm_map *,
-		    struct uvm_mapent_reservation *, int, int);
-void		uvm_mapent_unreserve(struct vm_map *,
-		    struct uvm_mapent_reservation *);
-
-vsize_t		uvm_mapent_overhead(vsize_t, int);
-
 int		uvm_mapent_trymerge(struct vm_map *,
 		    struct vm_map_entry *, int);
 #define	UVM_MERGE_COPYING	1
Index: sys/uvm/uvm_meter.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_meter.c,v
retrieving revision 1.58
diff -u -p -r1.58 uvm_meter.c
--- sys/uvm/uvm_meter.c	30 Dec 2011 19:01:07 -0000	1.58
+++ sys/uvm/uvm_meter.c	23 Jan 2012 22:02:25 -0000
@@ -249,12 +249,6 @@ SYSCTL_SETUP(sysctl_vm_setup, "sysctl vm
 		       CTL_VM, VM_UVMEXP, CTL_EOL);
 	sysctl_createv(clog, 0, NULL, NULL,
 		       CTLFLAG_PERMANENT,
-		       CTLTYPE_INT, "nkmempages",
-		       SYSCTL_DESCR("Default number of pages in kmem_map"),
-		       NULL, 0, &nkmempages, 0,
-		       CTL_VM, VM_NKMEMPAGES, CTL_EOL);
-	sysctl_createv(clog, 0, NULL, NULL,
-		       CTLFLAG_PERMANENT,
 		       CTLTYPE_STRUCT, "uvmexp2",
 		       SYSCTL_DESCR("Detailed system-wide virtual memory "
 				    "statistics (MI)"),
Index: sys/uvm/uvm_mmap.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_mmap.c,v
retrieving revision 1.143
diff -u -p -r1.143 uvm_mmap.c
--- sys/uvm/uvm_mmap.c	5 Jan 2012 15:19:53 -0000	1.143
+++ sys/uvm/uvm_mmap.c	23 Jan 2012 22:02:26 -0000
@@ -701,7 +701,7 @@ sys_munmap(struct lwp *l, const struct s
 		return (EINVAL);
 	}
 #endif
-	uvm_unmap_remove(map, addr, addr + size, &dead_entries, NULL, 0);
+	uvm_unmap_remove(map, addr, addr + size, &dead_entries, 0);
 	vm_map_unlock(map);
 	if (dead_entries != NULL)
 		uvm_unmap_detach(dead_entries, 0);
Index: sys/uvm/uvm_page.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_page.c,v
retrieving revision 1.178
diff -u -p -r1.178 uvm_page.c
--- sys/uvm/uvm_page.c	6 Oct 2011 12:26:03 -0000	1.178
+++ sys/uvm/uvm_page.c	23 Jan 2012 22:02:26 -0000
@@ -74,7 +74,6 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v
 
 #include <sys/param.h>
 #include <sys/systm.h>
-#include <sys/malloc.h>
 #include <sys/sched.h>
 #include <sys/kernel.h>
 #include <sys/vnode.h>
@@ -136,9 +135,7 @@ static vaddr_t      virtual_space_end;
  * uvm_pageboot_alloc().
  */
 
-static bool have_recolored_pages /* = false */;
-
-MALLOC_DEFINE(M_VMPAGE, "VM page", "VM page");
+static size_t recolored_pages_memsize /* = 0 */;
 
 #ifdef DEBUG
 vaddr_t uvm_zerocheckkva;
@@ -353,7 +350,6 @@ uvm_page_init(vaddr_t *kvm_startp, vaddr
 
 	uvm.cpus[0] = &boot_cpu;
 	curcpu()->ci_data.cpu_uvm = &boot_cpu;
-	uvm_reclaim_init();
 	uvmpdpol_init();
 	mutex_init(&uvm_pageqlock, MUTEX_DRIVER, IPL_NONE);
 	mutex_init(&uvm_fpageqlock, MUTEX_DRIVER, IPL_VM);
@@ -766,7 +762,7 @@ uvm_page_physload(paddr_t start, paddr_t
 
 	/*
 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
-	 * called yet, so malloc is not available).
+	 * called yet, so kmem is not available).
 	 */
 
 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
@@ -776,7 +772,7 @@ uvm_page_physload(paddr_t start, paddr_t
 	preload = (lcv == vm_nphysmem);
 
 	/*
-	 * if VM is already running, attempt to malloc() vm_page structures
+	 * if VM is already running, attempt to kmem_alloc vm_page structures
 	 */
 
 	if (!preload) {
@@ -984,6 +980,7 @@ uvm_page_recolor(int newncolors)
 	struct pgfreelist gpgfl, pgfl;
 	struct vm_page *pg;
 	vsize_t bucketcount;
+	size_t bucketmemsize, oldbucketmemsize;
 	int lcv, color, i, ocolors;
 	struct uvm_cpu *ucpu;
 
@@ -998,8 +995,8 @@ uvm_page_recolor(int newncolors)
 	}
 
 	bucketcount = newncolors * VM_NFREELIST;
-	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket) * 2,
-	    M_VMPAGE, M_NOWAIT);
+	bucketmemsize = bucketcount * sizeof(struct pgflbucket) * 2;
+	bucketarray = kmem_alloc(bucketmemsize, KM_SLEEP);
 	cpuarray = bucketarray + bucketcount;
 	if (bucketarray == NULL) {
 		printf("WARNING: unable to allocate %ld page color buckets\n",
@@ -1012,7 +1009,7 @@ uvm_page_recolor(int newncolors)
 	/* Make sure we should still do this. */
 	if (newncolors <= uvmexp.ncolors) {
 		mutex_spin_exit(&uvm_fpageqlock);
-		free(bucketarray, M_VMPAGE);
+		kmem_free(bucketarray, bucketmemsize);
 		return;
 	}
 
@@ -1048,14 +1045,14 @@ uvm_page_recolor(int newncolors)
 		ucpu->page_free[lcv].pgfl_buckets = pgfl.pgfl_buckets;
 	}
 
-	if (!have_recolored_pages)
-		oldbucketarray = NULL;
+	oldbucketmemsize = recolored_pages_memsize;
 
-	have_recolored_pages = true;
+	recolored_pages_memsize = bucketmemsize;
 	mutex_spin_exit(&uvm_fpageqlock);
 
-	if (oldbucketarray)
-		free(oldbucketarray, M_VMPAGE);
+	if (oldbucketmemsize) {
+		kmem_free(oldbucketarray, recolored_pages_memsize);
+	}
 
 	/*
 	 * this calls uvm_km_alloc() which may want to hold
@@ -1087,8 +1084,8 @@ uvm_cpu_attach(struct cpu_info *ci)
 
 	/* Configure this CPU's free lists. */
 	bucketcount = uvmexp.ncolors * VM_NFREELIST;
-	bucketarray = malloc(bucketcount * sizeof(struct pgflbucket),
-	    M_VMPAGE, M_WAITOK);
+	bucketarray = kmem_alloc(bucketcount * sizeof(struct pgflbucket),
+	    KM_SLEEP);
 	ucpu = kmem_zalloc(sizeof(*ucpu), KM_SLEEP);
 	uvm.cpus[cpu_index(ci)] = ucpu;
 	ci->ci_data.cpu_uvm = ucpu;
Index: sys/uvm/uvm_pager.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pager.c,v
retrieving revision 1.107
diff -u -p -r1.107 uvm_pager.c
--- sys/uvm/uvm_pager.c	11 Oct 2011 23:57:07 -0000	1.107
+++ sys/uvm/uvm_pager.c	23 Jan 2012 22:02:26 -0000
@@ -272,7 +272,7 @@ uvm_pagermapout(vaddr_t kva, int npages)
 	}
 
 	vm_map_lock(pager_map);
-	uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0);
+	uvm_unmap_remove(pager_map, kva, kva + size, &entries, 0);
 	mutex_enter(&pager_map_wanted_lock);
 	if (pager_map_wanted) {
 		pager_map_wanted = false;
Index: sys/uvm/uvm_pdaemon.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pdaemon.c,v
retrieving revision 1.103
diff -u -p -r1.103 uvm_pdaemon.c
--- sys/uvm/uvm_pdaemon.c	12 Jun 2011 03:36:03 -0000	1.103
+++ sys/uvm/uvm_pdaemon.c	23 Jan 2012 22:02:26 -0000
@@ -109,10 +109,6 @@ static unsigned int uvm_pagedaemon_waite
  */
 u_int uvm_extrapages;
 
-static kmutex_t uvm_reclaim_lock;
-
-SLIST_HEAD(uvm_reclaim_hooks, uvm_reclaim_hook) uvm_reclaim_list;
-
 /*
  * uvm_wait: wait (sleep) for the page daemon to free some pages
  *
@@ -232,7 +228,6 @@ uvm_pageout(void *arg)
 	int extrapages = 0;
 	struct pool *pp;
 	uint64_t where;
-	struct uvm_reclaim_hook *hook;
 	
 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
 
@@ -325,7 +320,7 @@ uvm_pageout(void *arg)
 		 * if we don't need free memory, we're done.
 		 */
 
-		if (!needsfree) 
+		if (!needsfree && !uvm_km_va_starved_p())
 			continue;
 
 		/*
@@ -341,12 +336,6 @@ uvm_pageout(void *arg)
 		buf_drain(bufcnt << PAGE_SHIFT);
 		mutex_exit(&bufcache_lock);
 
-		mutex_enter(&uvm_reclaim_lock);
-		SLIST_FOREACH(hook, &uvm_reclaim_list, uvm_reclaim_next) {
-			(*hook->uvm_reclaim_hook)();
-		}
-		mutex_exit(&uvm_reclaim_lock);
-		
 		/*
 		 * complete draining the pools.
 		 */
@@ -1032,43 +1021,3 @@ uvm_estimatepageable(int *active, int *i
 	uvmpdpol_estimatepageable(active, inactive);
 }
 
-void
-uvm_reclaim_init(void)
-{
-	
-	/* Initialize UVM reclaim hooks. */
-	mutex_init(&uvm_reclaim_lock, MUTEX_DEFAULT, IPL_NONE);
-	SLIST_INIT(&uvm_reclaim_list);
-}
-
-void
-uvm_reclaim_hook_add(struct uvm_reclaim_hook *hook)
-{
-
-	KASSERT(hook != NULL);
-	
-	mutex_enter(&uvm_reclaim_lock);
-	SLIST_INSERT_HEAD(&uvm_reclaim_list, hook, uvm_reclaim_next);
-	mutex_exit(&uvm_reclaim_lock);
-}
-
-void
-uvm_reclaim_hook_del(struct uvm_reclaim_hook *hook_entry)
-{
-	struct uvm_reclaim_hook *hook;
-
-	KASSERT(hook_entry != NULL);
-	
-	mutex_enter(&uvm_reclaim_lock);
-	SLIST_FOREACH(hook, &uvm_reclaim_list, uvm_reclaim_next) {
-		if (hook != hook_entry) {
-			continue;
-		}
-
-		SLIST_REMOVE(&uvm_reclaim_list, hook, uvm_reclaim_hook,
-		    uvm_reclaim_next);
-		break;
-	}
-
-	mutex_exit(&uvm_reclaim_lock);
-}
Index: sys/uvm/uvm_pdpolicy_clock.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_pdpolicy_clock.c,v
retrieving revision 1.14
diff -u -p -r1.14 uvm_pdpolicy_clock.c
--- sys/uvm/uvm_pdpolicy_clock.c	12 Jun 2011 03:36:04 -0000	1.14
+++ sys/uvm/uvm_pdpolicy_clock.c	23 Jan 2012 22:02:27 -0000
@@ -419,8 +419,23 @@ uvmpdpol_reinit(void)
 bool
 uvmpdpol_needsscan_p(void)
 {
+	vmem_size_t kva_size;
+	vmem_size_t kva_free;
 
-	return pdpol_state.s_inactive < pdpol_state.s_inactarg;
+	kva_size = vmem_size(kmem_arena, VMEM_FREE|VMEM_ALLOC);
+	kva_free = vmem_size(kmem_arena, VMEM_FREE);
+
+	if (kva_free < (kva_size / 10)) {
+		return true;
+	}
+
+	if (pdpol_state.s_inactive < pdpol_state.s_inactarg) {
+		return true;
+	}
+		if (pdpol_state.s_inactive < pdpol_state.s_inactarg) {
+		return true;
+	}
+	return false;
 }
 
 void
Index: sys/uvm/uvm_swap.c
===================================================================
RCS file: /cvsroot/src/sys/uvm/uvm_swap.c,v
retrieving revision 1.158
diff -u -p -r1.158 uvm_swap.c
--- sys/uvm/uvm_swap.c	12 Dec 2011 19:03:13 -0000	1.158
+++ sys/uvm/uvm_swap.c	23 Jan 2012 22:02:27 -0000
@@ -46,13 +46,13 @@ __KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v
 #include <sys/disklabel.h>
 #include <sys/errno.h>
 #include <sys/kernel.h>
-#include <sys/malloc.h>
 #include <sys/vnode.h>
 #include <sys/file.h>
 #include <sys/vmem.h>
 #include <sys/blist.h>
 #include <sys/mount.h>
 #include <sys/pool.h>
+#include <sys/kmem.h>
 #include <sys/syscallargs.h>
 #include <sys/swap.h>
 #include <sys/kauth.h>
@@ -208,7 +208,6 @@ static struct pool vndxfer_pool, vndbuf_
 /*
  * local variables
  */
-MALLOC_DEFINE(M_VMSWAP, "VM swap", "VM swap structures");
 static vmem_t *swapmap;	/* controls the mapping of /dev/drum */
 
 /* list of all active swap devices [by priority] */
@@ -344,7 +343,7 @@ swaplist_insert(struct swapdev *sdp, str
 			LIST_INSERT_HEAD(&swap_priority, spp, spi_swappri);
 	} else {
 	  	/* we don't need a new priority structure, free it */
-		free(newspp, M_VMSWAP);
+		kmem_free(newspp, sizeof(*newspp));
 	}
 
 	/*
@@ -405,7 +404,7 @@ swaplist_trim(void)
 		    (void *)&spp->spi_swapdev)
 			continue;
 		LIST_REMOVE(spp, spi_swappri);
-		free(spp, M_VMSWAP);
+		kmem_free(spp, sizeof(*spp));
 	}
 }
 
@@ -462,12 +461,13 @@ sys_swapctl(struct lwp *l, const struct 
 
 	misc = SCARG(uap, misc);
 
+	userpath = kmem_alloc(SWAP_PATH_MAX, KM_SLEEP);
+
 	/*
 	 * ensure serialized syscall access by grabbing the swap_syscall_lock
 	 */
 	rw_enter(&swap_syscall_lock, RW_WRITER);
 
-	userpath = malloc(SWAP_PATH_MAX, M_TEMP, M_WAITOK);
 	/*
 	 * we handle the non-priv NSWAP and STATS request first.
 	 *
@@ -511,12 +511,12 @@ sys_swapctl(struct lwp *l, const struct 
 		else
 #endif
 			len = sizeof(struct swapent) * misc;
-		sep = (struct swapent *)malloc(len, M_TEMP, M_WAITOK);
+		sep = (struct swapent *)kmem_alloc(len, KM_SLEEP);
 
 		uvm_swap_stats(SCARG(uap, cmd), sep, misc, retval);
 		error = copyout(sep, SCARG(uap, arg), len);
 
-		free(sep, M_TEMP);
+		kmem_free(sep, len);
 		UVMHIST_LOG(pdhist, "<- done SWAP_STATS", 0, 0, 0, 0);
 		goto out;
 	}
@@ -615,7 +615,7 @@ sys_swapctl(struct lwp *l, const struct 
 		 * any empty priority structures.
 		 */
 		priority = SCARG(uap, misc);
-		spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
+		spp = kmem_alloc(sizeof(*spp), KM_SLEEP);
 		mutex_enter(&uvm_swap_data_lock);
 		if ((sdp = swaplist_find(vp, true)) == NULL) {
 			error = ENOENT;
@@ -625,7 +625,7 @@ sys_swapctl(struct lwp *l, const struct 
 		}
 		mutex_exit(&uvm_swap_data_lock);
 		if (error)
-			free(spp, M_VMSWAP);
+			kmem_free(spp, sizeof(*spp));
 		break;
 
 	case SWAP_ON:
@@ -638,8 +638,8 @@ sys_swapctl(struct lwp *l, const struct 
 		 */
 
 		priority = SCARG(uap, misc);
-		sdp = malloc(sizeof *sdp, M_VMSWAP, M_WAITOK);
-		spp = malloc(sizeof *spp, M_VMSWAP, M_WAITOK);
+		sdp = kmem_alloc(sizeof(*sdp), KM_SLEEP);
+		spp = kmem_alloc(sizeof(*spp), KM_SLEEP);
 		memset(sdp, 0, sizeof(*sdp));
 		sdp->swd_flags = SWF_FAKE;
 		sdp->swd_vp = vp;
@@ -650,15 +650,15 @@ sys_swapctl(struct lwp *l, const struct 
 			error = EBUSY;
 			mutex_exit(&uvm_swap_data_lock);
 			bufq_free(sdp->swd_tab);
-			free(sdp, M_VMSWAP);
-			free(spp, M_VMSWAP);
+			kmem_free(sdp, sizeof(*sdp));
+			kmem_free(spp, sizeof(*spp));
 			break;
 		}
 		swaplist_insert(sdp, spp, priority);
 		mutex_exit(&uvm_swap_data_lock);
 
 		sdp->swd_pathlen = len;
-		sdp->swd_path = malloc(sdp->swd_pathlen, M_VMSWAP, M_WAITOK);
+		sdp->swd_path = kmem_alloc(sdp->swd_pathlen, KM_SLEEP);
 		if (copystr(userpath, sdp->swd_path, sdp->swd_pathlen, 0) != 0)
 			panic("swapctl: copystr");
 
@@ -675,8 +675,8 @@ sys_swapctl(struct lwp *l, const struct 
 			swaplist_trim();
 			mutex_exit(&uvm_swap_data_lock);
 			bufq_free(sdp->swd_tab);
-			free(sdp->swd_path, M_VMSWAP);
-			free(sdp, M_VMSWAP);
+			kmem_free(sdp->swd_path, sdp->swd_pathlen);
+			kmem_free(sdp, sizeof(*sdp));
 			break;
 		}
 		break;
@@ -715,7 +715,7 @@ sys_swapctl(struct lwp *l, const struct 
 	vput(vp);
 
 out:
-	free(userpath, M_TEMP);
+	kmem_free(userpath, SWAP_PATH_MAX);
 	rw_exit(&swap_syscall_lock);
 
 	UVMHIST_LOG(pdhist, "<- done!  error=%d", error, 0, 0, 0);
@@ -1100,7 +1100,7 @@ swap_off(struct lwp *l, struct swapdev *
 	vmem_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize);
 	blist_destroy(sdp->swd_blist);
 	bufq_free(sdp->swd_tab);
-	free(sdp, M_VMSWAP);
+	kmem_free(sdp, sizeof(*sdp));
 	return (0);
 }
 
Index: tests/kernel/t_extent.c
===================================================================
RCS file: /cvsroot/src/tests/kernel/t_extent.c,v
retrieving revision 1.3
diff -u -p -r1.3 t_extent.c
--- tests/kernel/t_extent.c	11 Jun 2011 18:03:17 -0000	1.3
+++ tests/kernel/t_extent.c	23 Jan 2012 22:02:27 -0000
@@ -48,7 +48,7 @@ static struct extent *ex;
 
 #define h_create(name, start, end, flags) \
 	ATF_REQUIRE((ex = extent_create(name, \
-	    start, end, 0, 0, 0, flags)) != NULL);
+	    start, end, 0, 0, flags)) != NULL);
 
 #define h_alloc_region(start, size) \
 	ATF_REQUIRE_EQ_MSG(ret = extent_alloc_region(ex, \
Index: usr.bin/pmap/pmap.c
===================================================================
RCS file: /cvsroot/src/usr.bin/pmap/pmap.c,v
retrieving revision 1.47
diff -u -p -r1.47 pmap.c
--- usr.bin/pmap/pmap.c	11 Oct 2011 12:25:56 -0000	1.47
+++ usr.bin/pmap/pmap.c	23 Jan 2012 22:02:29 -0000
@@ -302,12 +302,11 @@ dump_vm_map_entry(kvm_t *kd, struct kinf
 		printf("%*s    aref = { ar_pageoff = %x, ar_amap = %p },",
 		       indent(2), "", vme->aref.ar_pageoff, vme->aref.ar_amap);
 		printf(" advice = %d,\n", vme->advice);
-		printf("%*s    flags = %x <%s%s%s%s%s > }\n", indent(2), "",
+		printf("%*s    flags = %x <%s%s%s%s > }\n", indent(2), "",
 		       vme->flags,
 		       vme->flags & UVM_MAP_KERNEL ? " KERNEL" : "",
 		       vme->flags & UVM_MAP_KMAPENT ? " KMAPENT" : "",
-		       vme->flags & UVM_MAP_FIRST ? " FIRST" : "",
-		       vme->flags & UVM_MAP_QUANTUM ? " QUANTUM" : "",
+		       vme->flags & UVM_MAP_STATIC ? " STATIC" : "",
 		       vme->flags & UVM_MAP_NOMERGE ? " NOMERGE" : "");
 	}
 
