svn commit: r198010 - in user/kmacy/releng_8_fcs/sys: amd64/amd64 amd64/include sys vm

Kip Macy kmacy at FreeBSD.org
Tue Oct 13 01:15:24 UTC 2009


Author: kmacy
Date: Tue Oct 13 01:15:23 2009
New Revision: 198010
URL: http://svn.freebsd.org/changeset/base/198010

Log:
  - add support for excluding portions of wired memory from core dumps

Modified:
  user/kmacy/releng_8_fcs/sys/amd64/amd64/minidump_machdep.c
  user/kmacy/releng_8_fcs/sys/amd64/amd64/pmap.c
  user/kmacy/releng_8_fcs/sys/amd64/amd64/uma_machdep.c
  user/kmacy/releng_8_fcs/sys/amd64/include/md_var.h
  user/kmacy/releng_8_fcs/sys/amd64/include/vmparam.h
  user/kmacy/releng_8_fcs/sys/sys/malloc.h
  user/kmacy/releng_8_fcs/sys/vm/pmap.h
  user/kmacy/releng_8_fcs/sys/vm/uma.h
  user/kmacy/releng_8_fcs/sys/vm/uma_core.c
  user/kmacy/releng_8_fcs/sys/vm/vm.h
  user/kmacy/releng_8_fcs/sys/vm/vm_glue.c
  user/kmacy/releng_8_fcs/sys/vm/vm_kern.c
  user/kmacy/releng_8_fcs/sys/vm/vnode_pager.c

Modified: user/kmacy/releng_8_fcs/sys/amd64/amd64/minidump_machdep.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/amd64/amd64/minidump_machdep.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/amd64/amd64/minidump_machdep.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -56,6 +56,7 @@ CTASSERT(sizeof(struct kerneldumpheader)
 extern uint64_t KPDPphys;
 
 uint64_t *vm_page_dump;
+uint64_t *vm_page_dump_exclude;
 int vm_page_dump_size;
 
 static struct kerneldumpheader kdh;
@@ -71,10 +72,16 @@ CTASSERT(sizeof(*vm_page_dump) == 8);
 static int
 is_dumpable(vm_paddr_t pa)
 {
-	int i;
+	int i, idx, bit, isdata;
+	uint64_t pfn = pa;
+
+	pfn >>= PAGE_SHIFT;
+	idx = pfn >> 6;		/* 2^6 = 64 */
+	bit = pfn & 63;
+	isdata = ((vm_page_dump_exclude[idx] & (1ul << bit)) == 0);
 
 	for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
-		if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
+		if (pa >= dump_avail[i] && pa < dump_avail[i + 1] && isdata)
 			return (1);
 	}
 	return (0);
@@ -226,6 +233,7 @@ minidumpsys(struct dumperinfo *di)
 	dumpsize = ptesize;
 	dumpsize += round_page(msgbufp->msg_size);
 	dumpsize += round_page(vm_page_dump_size);
+	printf("dumpsize: ");
 	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
 		bits = vm_page_dump[i];
 		while (bits) {
@@ -238,10 +246,13 @@ minidumpsys(struct dumperinfo *di)
 				dump_drop_page(pa);
 			}
 			bits &= ~(1ul << bit);
+			if ((dumpsize % (1<<29)) == 0)
+				printf("%ldMB ", (dumpsize>>20));
 		}
 	}
 	dumpsize += PAGE_SIZE;
 
+	printf("\n");
 	/* Determine dump offset on device. */
 	if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
 		error = ENOSPC;
@@ -273,6 +284,7 @@ minidumpsys(struct dumperinfo *di)
 		goto fail;
 	dumplo += sizeof(kdh);
 
+	printf("write header\n");
 	/* Dump my header */
 	bzero(&fakept, sizeof(fakept));
 	bcopy(&mdhdr, &fakept, sizeof(mdhdr));
@@ -280,16 +292,19 @@ minidumpsys(struct dumperinfo *di)
 	if (error)
 		goto fail;
 
+	printf("write msgbuf\n");
 	/* Dump msgbuf up front */
 	error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size));
 	if (error)
 		goto fail;
 
+	printf("write bitmap\n");
 	/* Dump bitmap */
 	error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size));
 	if (error)
 		goto fail;
 
+	printf("\nDump kernel page table pages\n");
 	/* Dump kernel page table pages */
 	pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys);
 	for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR,
@@ -343,8 +358,10 @@ minidumpsys(struct dumperinfo *di)
 
 	/* Dump memory chunks */
 	/* XXX cluster it up and use blk_dump() */
-	for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
-		bits = vm_page_dump[i];
+	printf("\nclustering memory chunks\n");
+	for (i = 0;
+	     i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
+		bits = vm_page_dump[i] & ~(vm_page_dump_exclude[i]);
 		while (bits) {
 			bit = bsfq(bits);
 			pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE;
@@ -354,7 +371,6 @@ minidumpsys(struct dumperinfo *di)
 			bits &= ~(1ul << bit);
 		}
 	}
-
 	error = blk_flush(di);
 	if (error)
 		goto fail;
@@ -365,6 +381,7 @@ minidumpsys(struct dumperinfo *di)
 		goto fail;
 	dumplo += sizeof(kdh);
 
+	printf("\nstarting dump\n");
 	/* Signal completion, signoff and exit stage left. */
 	dump_write(di, NULL, 0, 0, 0);
 	printf("\nDump complete\n");
@@ -403,3 +420,25 @@ dump_drop_page(vm_paddr_t pa)
 	bit = pa & 63;
 	atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
 }
+
+void
+dump_exclude_page(vm_paddr_t pa)
+{
+	int idx, bit;
+
+	pa >>= PAGE_SHIFT;
+	idx = pa >> 6;		/* 2^6 = 64 */
+	bit = pa & 63;
+	atomic_set_long(&vm_page_dump_exclude[idx], 1ul << bit);
+}
+
+void
+dump_unexclude_page(vm_paddr_t pa)
+{
+	int idx, bit;
+
+	pa >>= PAGE_SHIFT;
+	idx = pa >> 6;		/* 2^6 = 64 */
+	bit = pa & 63;
+	atomic_clear_long(&vm_page_dump_exclude[idx], 1ul << bit);
+}

Modified: user/kmacy/releng_8_fcs/sys/amd64/amd64/pmap.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/amd64/amd64/pmap.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/amd64/amd64/pmap.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -1132,10 +1132,16 @@ pmap_map(vm_offset_t *virt, vm_paddr_t s
  * Note: SMP coherent.  Uses a ranged shootdown IPI.
  */
 void
-pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+pmap_qenter_prot(vm_offset_t sva, vm_page_t *ma, int count, vm_prot_t prot)
 {
 	pt_entry_t *endpte, oldpte, *pte;
+	uint64_t flags = PG_V;
 
+	if (prot & VM_PROT_WRITE)
+		flags |= PG_RW;
+	if ((prot & VM_PROT_EXECUTE) == 0)
+		flags |= PG_NX;
+	
 	oldpte = 0;
 	pte = vtopte(sva);
 	endpte = pte + count;
@@ -1143,6 +1149,9 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
 		oldpte |= *pte;
 		pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G |
 		    pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
+		pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | flags);
+		if (prot & VM_PROT_EXCLUDE)
+			dump_exclude_page(VM_PAGE_TO_PHYS(*ma));
 		pte++;
 		ma++;
 	}
@@ -1151,6 +1160,16 @@ pmap_qenter(vm_offset_t sva, vm_page_t *
 		    PAGE_SIZE);
 }
 
+void
+pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+{
+
+	pmap_qenter_prot(sva, ma, count,
+	    VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
+
+}
+
+
 /*
  * This routine tears out page mappings from the
  * kernel -- it is meant only for temporary mappings.
@@ -1163,6 +1182,7 @@ pmap_qremove(vm_offset_t sva, int count)
 
 	va = sva;
 	while (count-- > 0) {
+		dump_unexclude_page(pmap_kextract(va));
 		pmap_kremove(va);
 		va += PAGE_SIZE;
 	}

Modified: user/kmacy/releng_8_fcs/sys/amd64/amd64/uma_machdep.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/amd64/amd64/uma_machdep.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/amd64/amd64/uma_machdep.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -66,7 +66,8 @@ uma_small_alloc(uma_zone_t zone, int byt
 			break;
 	}
 	pa = m->phys_addr;
-	dump_add_page(pa);
+	if ((wait & M_NODUMP) == 0)
+		dump_add_page(pa);
 	va = (void *)PHYS_TO_DMAP(pa);
 	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
 		pagezero(va);

Modified: user/kmacy/releng_8_fcs/sys/amd64/include/md_var.h
==============================================================================
--- user/kmacy/releng_8_fcs/sys/amd64/include/md_var.h	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/amd64/include/md_var.h	Tue Oct 13 01:15:23 2009	(r198010)
@@ -60,6 +60,7 @@ extern	char	kstack[];
 extern	char	sigcode[];
 extern	int	szsigcode;
 extern	uint64_t *vm_page_dump;
+extern	uint64_t *vm_page_dump_exclude;
 extern	int	vm_page_dump_size;
 extern	int	_udatasel;
 extern	int	_ucodesel;
@@ -88,6 +89,8 @@ void	fs_load_fault(void) __asm(__STRING(
 void	gs_load_fault(void) __asm(__STRING(gs_load_fault));
 void	dump_add_page(vm_paddr_t);
 void	dump_drop_page(vm_paddr_t);
+void	dump_exclude_page(vm_paddr_t);
+void	dump_unexclude_page(vm_paddr_t);
 void	initializecpu(void);
 void	fillw(int /*u_short*/ pat, void *base, size_t cnt);
 void	fpstate_drop(struct thread *td);

Modified: user/kmacy/releng_8_fcs/sys/amd64/include/vmparam.h
==============================================================================
--- user/kmacy/releng_8_fcs/sys/amd64/include/vmparam.h	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/amd64/include/vmparam.h	Tue Oct 13 01:15:23 2009	(r198010)
@@ -88,6 +88,11 @@
 #define	UMA_MD_SMALL_ALLOC
 
 /*
+ * We machine specific sparse kernel dump
+ */
+#define	VM_MD_MINIDUMP
+
+/*
  * The physical address space is densely populated.
  */
 #define	VM_PHYSSEG_DENSE

Modified: user/kmacy/releng_8_fcs/sys/sys/malloc.h
==============================================================================
--- user/kmacy/releng_8_fcs/sys/sys/malloc.h	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/sys/malloc.h	Tue Oct 13 01:15:23 2009	(r198010)
@@ -50,6 +50,7 @@
 #define	M_ZERO		0x0100		/* bzero the allocation */
 #define	M_NOVM		0x0200		/* don't ask VM for pages */
 #define	M_USE_RESERVE	0x0400		/* can alloc out of reserve memory */
+#define	M_NODUMP	0x0800		/* don't dump pages in this allocation */
 
 #define	M_MAGIC		877983977	/* time when first defined :-) */
 

Modified: user/kmacy/releng_8_fcs/sys/vm/pmap.h
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/pmap.h	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/pmap.h	Tue Oct 13 01:15:23 2009	(r198010)
@@ -127,6 +127,7 @@ int		 pmap_pinit(pmap_t);
 void		 pmap_pinit0(pmap_t);
 void		 pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
 void		 pmap_qenter(vm_offset_t, vm_page_t *, int);
+void		 pmap_qenter_prot(vm_offset_t, vm_page_t *, int, vm_prot_t);
 void		 pmap_qremove(vm_offset_t, int);
 void		 pmap_release(pmap_t);
 void		 pmap_remove(pmap_t, vm_offset_t, vm_offset_t);

Modified: user/kmacy/releng_8_fcs/sys/vm/uma.h
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/uma.h	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/uma.h	Tue Oct 13 01:15:23 2009	(r198010)
@@ -248,6 +248,9 @@ int uma_zsecond_add(uma_zone_t zone, uma
 					 * backend pages and can fail early.
 					 */
 #define	UMA_ZONE_VTOSLAB	0x2000	/* Zone uses vtoslab for lookup. */
+#define	UMA_ZONE_NODUMP		0x4000	/* Zone's pages will not be included in
+					 * mini-dumps
+					 */
 
 /*
  * These flags are shared between the keg and zone.  In zones wishing to add

Modified: user/kmacy/releng_8_fcs/sys/vm/uma_core.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/uma_core.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/uma_core.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -842,6 +842,9 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t
 	else
 		wait &= ~M_ZERO;
 
+	if (keg->uk_flags & UMA_ZONE_NODUMP)
+		wait |= M_NODUMP;
+	
 	/* zone is passed for legacy reasons. */
 	mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
 	if (mem == NULL) {

Modified: user/kmacy/releng_8_fcs/sys/vm/vm.h
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/vm.h	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/vm.h	Tue Oct 13 01:15:23 2009	(r198010)
@@ -83,6 +83,7 @@ typedef u_char vm_prot_t;	/* protection 
 #define	VM_PROT_WRITE		((vm_prot_t) 0x02)
 #define	VM_PROT_EXECUTE		((vm_prot_t) 0x04)
 #define	VM_PROT_OVERRIDE_WRITE	((vm_prot_t) 0x08)	/* copy-on-write */
+#define	VM_PROT_EXCLUDE		((vm_prot_t) 0x10)	/* don't include in core-dump */
 
 #define	VM_PROT_ALL		(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
 #define VM_PROT_RW		(VM_PROT_READ|VM_PROT_WRITE)

Modified: user/kmacy/releng_8_fcs/sys/vm/vm_glue.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/vm_glue.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/vm_glue.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -405,7 +405,8 @@ vm_thread_new(struct thread *td, int pag
 		m->valid = VM_PAGE_BITS_ALL;
 	}
 	VM_OBJECT_UNLOCK(ksobj);
-	pmap_qenter(ks, ma, pages);
+	pmap_qenter_prot(ks, ma, pages,
+	    (VM_PROT_READ|VM_PROT_WRITE));
 	return (1);
 }
 
@@ -546,7 +547,8 @@ vm_thread_swapin(struct thread *td)
 		vm_page_wakeup(m);
 	}
 	VM_OBJECT_UNLOCK(ksobj);
-	pmap_qenter(td->td_kstack, ma, pages);
+	pmap_qenter_prot(td->td_kstack, ma, pages,
+	    (VM_PROT_READ|VM_PROT_WRITE));
 	cpu_thread_swapin(td);
 }
 

Modified: user/kmacy/releng_8_fcs/sys/vm/vm_kern.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/vm_kern.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/vm_kern.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -85,6 +85,10 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm_extern.h>
 #include <vm/uma.h>
 
+#ifdef	VM_MD_MINIDUMP
+#include <machine/md_var.h>
+#endif
+
 vm_map_t kernel_map=0;
 vm_map_t kmem_map=0;
 vm_map_t exec_map=0;
@@ -207,8 +211,15 @@ kmem_free(map, addr, size)
 	vm_offset_t addr;
 	vm_size_t size;
 {
-
-	(void) vm_map_remove(map, trunc_page(addr), round_page(addr + size));
+	vm_offset_t start = trunc_page(addr);
+	vm_offset_t end = round_page(addr + size);
+#ifdef VM_MD_MINIDUMP
+	vm_offset_t temp = start;
+
+	for (; temp < end; temp += PAGE_SIZE)
+		dump_add_page(pmap_kextract(temp));
+#endif	
+	(void) vm_map_remove(map, start, end);
 }
 
 /*
@@ -363,6 +374,10 @@ retry:
 		}
 		if (flags & M_ZERO && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
+#ifdef VM_MD_MINIDUMP
+		if (flags & M_NODUMP)
+			dump_drop_page(VM_PAGE_TO_PHYS(m));
+#endif		
 		m->valid = VM_PAGE_BITS_ALL;
 		KASSERT((m->flags & PG_UNMANAGED) != 0,
 		    ("kmem_malloc: page %p is managed", m));

Modified: user/kmacy/releng_8_fcs/sys/vm/vnode_pager.c
==============================================================================
--- user/kmacy/releng_8_fcs/sys/vm/vnode_pager.c	Tue Oct 13 00:43:31 2009	(r198009)
+++ user/kmacy/releng_8_fcs/sys/vm/vnode_pager.c	Tue Oct 13 01:15:23 2009	(r198010)
@@ -885,7 +885,8 @@ vnode_pager_generic_getpages(vp, m, byte
 	/*
 	 * and map the pages to be read into the kva
 	 */
-	pmap_qenter(kva, m, count);
+	pmap_qenter_prot(kva, m, count,
+	    (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXCLUDE));
 
 	/* build a minimal buffer header */
 	bp->b_iocmd = BIO_READ;


More information about the svn-src-user mailing list