svn commit: r237801 - projects/amd64_xen_pv/sys/amd64/xen

Attilio Rao attilio at FreeBSD.org
Fri Jun 29 17:04:43 UTC 2012


Author: attilio
Date: Fri Jun 29 17:04:42 2012
New Revision: 237801
URL: http://svn.freebsd.org/changeset/base/237801

Log:
  PMTB addresses are tought to be mostly opaque, so they are not turned
  at all in void ptrs. However, vm_offset_t is not a correct choice
  because it doesn't express at all the concept.
  
  Simply handle the virutal addresses involved as uintptr_t.
  
  Reviewed by:	cherry

Modified:
  projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c
  projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h
  projects/amd64_xen_pv/sys/amd64/xen/pmap.c

Modified: projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c	Fri Jun 29 17:00:52 2012	(r237800)
+++ projects/amd64_xen_pv/sys/amd64/xen/mmu_map.c	Fri Jun 29 17:04:42 2012	(r237801)
@@ -59,7 +59,7 @@ __FBSDID("$FreeBSD$");
 #include <amd64/xen/mmu_map.h>
 
 static int
-pml4t_index(vm_offset_t va)
+pml4t_index(uintptr_t va)
 {
 	/* amd64 sign extends 48th bit and upwards */
 	const uint64_t SIGNMASK = (1UL << 48) - 1;
@@ -69,7 +69,7 @@ pml4t_index(vm_offset_t va)
 }
 
 static int
-pdpt_index(vm_offset_t va)
+pdpt_index(uintptr_t va)
 {
 	/* amd64 sign extends 48th bit and upwards */
 	const uint64_t SIGNMASK = (1UL << 48) - 1;
@@ -79,7 +79,7 @@ pdpt_index(vm_offset_t va)
 }
 
 static int
-pdt_index(vm_offset_t va)
+pdt_index(uintptr_t va)
 {
 	/* amd64 sign extends 48th bit and upwards */
 	const uint64_t SIGNMASK = (1UL << 48) - 1;
@@ -108,7 +108,7 @@ pmap_get_pml4t(struct pmap *pm)
 
 /* Returns physical address */
 static vm_paddr_t
-pmap_get_pdpt(vm_offset_t va, pml4_entry_t *pml4t)
+pmap_get_pdpt(uintptr_t va, pml4_entry_t *pml4t)
 {
 	pml4_entry_t pml4e;
 
@@ -127,7 +127,7 @@ pmap_get_pdpt(vm_offset_t va, pml4_entry
 
 /* Returns physical address */
 static vm_paddr_t
-pmap_get_pdt(vm_offset_t va, pdp_entry_t *pdpt)
+pmap_get_pdt(uintptr_t va, pdp_entry_t *pdpt)
 {
 	pdp_entry_t pdpe;
 
@@ -146,7 +146,7 @@ pmap_get_pdt(vm_offset_t va, pdp_entry_t
 
 /* Returns physical address */
 static vm_paddr_t
-pmap_get_pt(vm_offset_t va, pd_entry_t *pdt)
+pmap_get_pt(uintptr_t va, pd_entry_t *pdt)
 {
 	pd_entry_t pdte;
 
@@ -271,7 +271,7 @@ mmu_map_pt(void *addr)
 }
 
 bool
-mmu_map_inspect_va(struct pmap *pm, void *addr, vm_offset_t va)
+mmu_map_inspect_va(struct pmap *pm, void *addr, uintptr_t va)
 {
 	KASSERT(addr != NULL && pm != NULL, ("NULL arg(s) given"));
 
@@ -310,7 +310,7 @@ mmu_map_inspect_va(struct pmap *pm, void
 }
 extern uint64_t xenstack; /* The stack Xen gives us at boot */
 void
-mmu_map_hold_va(struct pmap *pm, void *addr, vm_offset_t va)
+mmu_map_hold_va(struct pmap *pm, void *addr, uintptr_t va)
 {
 	KASSERT(addr != NULL && pm != NULL, ("NULL arg(s) given"));
 
@@ -331,8 +331,8 @@ mmu_map_hold_va(struct pmap *pm, void *a
 		pti->pdpt = (pdp_entry_t *)pti->ptmb.alloc();
 
 		pml4tep = &pti->pml4t[pml4t_index(va)];
-		pml4tep_ma = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pml4tep));
-		pml4te = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pti->pdpt)) | PG_RW | PG_V | PG_U; /* XXX: revisit flags */
+		pml4tep_ma = xpmap_ptom(pti->ptmb.vtop((uintptr_t)pml4tep));
+		pml4te = xpmap_ptom(pti->ptmb.vtop((uintptr_t)pti->pdpt)) | PG_RW | PG_V | PG_U; /* XXX: revisit flags */
 		xen_queue_pt_update(pml4tep_ma, pml4te);
 
 	} else {
@@ -349,8 +349,8 @@ mmu_map_hold_va(struct pmap *pm, void *a
 		pti->pdt = (pd_entry_t *)pti->ptmb.alloc();
 
 		pdptep = &pti->pdpt[pdpt_index(va)];
-		pdptep_ma = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pdptep));
-		pdpte = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pti->pdt)) | PG_RW | PG_V | PG_U; /*	XXX: revisit flags */
+		pdptep_ma = xpmap_ptom(pti->ptmb.vtop((uintptr_t)pdptep));
+		pdpte = xpmap_ptom(pti->ptmb.vtop((uintptr_t)pti->pdt)) | PG_RW | PG_V | PG_U; /*	XXX: revisit flags */
 		xen_queue_pt_update(pdptep_ma, pdpte);
 		
 	} else {
@@ -367,8 +367,8 @@ mmu_map_hold_va(struct pmap *pm, void *a
 		pti->pt = (pt_entry_t *) pti->ptmb.alloc();
 
 		pdtep = &pti->pdt[pdt_index(va)];
-		pdtep_ma = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pdtep));
-		pdte = xpmap_ptom(pti->ptmb.vtop((vm_offset_t)pti->pt)) | PG_RW | PG_V | PG_U; /*	XXX: revisit flags */
+		pdtep_ma = xpmap_ptom(pti->ptmb.vtop((uintptr_t)pdtep));
+		pdte = xpmap_ptom(pti->ptmb.vtop((uintptr_t)pti->pt)) | PG_RW | PG_V | PG_U; /*	XXX: revisit flags */
 		xen_queue_pt_update(pdtep_ma, pdte);
 
 	} else {
@@ -377,7 +377,7 @@ mmu_map_hold_va(struct pmap *pm, void *a
 }
 
 void
-mmu_map_release_va(struct pmap *pm, void *addr, vm_offset_t va)
+mmu_map_release_va(struct pmap *pm, void *addr, uintptr_t va)
 {
 
 	KASSERT(addr != NULL && pm != NULL, ("NULL arg(s) given"));

Modified: projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h	Fri Jun 29 17:00:52 2012	(r237800)
+++ projects/amd64_xen_pv/sys/amd64/xen/mmu_map.h	Fri Jun 29 17:04:42 2012	(r237801)
@@ -90,8 +90,8 @@ typedef void * mmu_map_t;
 
 struct mmu_map_mbackend { /* Callbacks */
 
-	vm_offset_t (*alloc)(void);
-	void (*free)(vm_offset_t); /* May be NULL */
+	uintptr_t (*alloc)(void);
+	void (*free)(uintptr_t); /* May be NULL */
 
 	/* 
 	 * vtop()/ptov() conversion functions:
@@ -101,8 +101,8 @@ struct mmu_map_mbackend { /* Callbacks *
 	 * multiple instances of use; ie; mappings may persist across 
 	 * one pair of mmu_map_t_init()/.._finit() calls.
 	 */
-	vm_offset_t (*ptov)(vm_paddr_t);
-	vm_paddr_t (*vtop)(vm_offset_t);
+	uintptr_t (*ptov)(vm_paddr_t);
+	vm_paddr_t (*vtop)(uintptr_t);
 };
 
 /* 

Modified: projects/amd64_xen_pv/sys/amd64/xen/pmap.c
==============================================================================
--- projects/amd64_xen_pv/sys/amd64/xen/pmap.c	Fri Jun 29 17:00:52 2012	(r237800)
+++ projects/amd64_xen_pv/sys/amd64/xen/pmap.c	Fri Jun 29 17:04:42 2012	(r237801)
@@ -153,15 +153,15 @@ extern unsigned long physfree; /* from m
 
 struct pmap kernel_pmap_store;
 
-vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
-vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
+uintptr_t virtual_avail;	/* VA of first avail page (after kernel bss) */
+uintptr_t virtual_end;	/* VA of last avail page (end of kernel AS) */
 
 #ifdef SUPERPAGESUPPORT
 static int ndmpdp;
 static vm_paddr_t dmaplimit;
 #endif /* SUPERPAGESUPPORT */
 
-vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
+uintptr_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
 pt_entry_t pg_nx; /* XXX: do we need this ? */
 
 struct msgbuf *msgbufp = 0;
@@ -185,13 +185,13 @@ static vm_paddr_t	boot_ptendphys;	/* phy
 
 static uma_zone_t xen_pagezone;
 static size_t tsz; /* mmu_map.h opaque cookie size */
-static vm_offset_t (*ptmb_mappedalloc)(void) = NULL;
-static void (*ptmb_mappedfree)(vm_offset_t) = NULL;
-static vm_offset_t ptmb_ptov(vm_paddr_t p)
+static uintptr_t (*ptmb_mappedalloc)(void) = NULL;
+static void (*ptmb_mappedfree)(uintptr_t) = NULL;
+static uintptr_t ptmb_ptov(vm_paddr_t p)
 {
 	return PTOV(p);
 }
-static vm_paddr_t ptmb_vtop(vm_offset_t v)
+static vm_paddr_t ptmb_vtop(uintptr_t v)
 {
 	return VTOP(v);
 }
@@ -200,10 +200,10 @@ extern uint64_t xenstack; /* The stack X
 extern char *console_page; /* The shared ring for console i/o */
 
 /* return kernel virtual address of  'n' claimed physical pages at boot. */
-static vm_offset_t
+static uintptr_t
 vallocpages(vm_paddr_t *firstaddr, int n)
 {
-	u_int64_t ret = *firstaddr + KERNBASE;
+	uintptr_t ret = *firstaddr + KERNBASE;
 	bzero((void *)ret, n * PAGE_SIZE);
 	*firstaddr += n * PAGE_SIZE;
 
@@ -224,7 +224,7 @@ vallocpages(vm_paddr_t *firstaddr, int n
 
 /* Set page addressed by va to r/o */
 static void
-pmap_xen_setpages_ro(vm_offset_t va, vm_size_t npages)
+pmap_xen_setpages_ro(uintptr_t va, vm_size_t npages)
 {
 	vm_size_t i;
 	for (i = 0; i < npages; i++) {
@@ -235,7 +235,7 @@ pmap_xen_setpages_ro(vm_offset_t va, vm_
 
 /* Set page addressed by va to r/w */
 static void
-pmap_xen_setpages_rw(vm_offset_t va, vm_size_t npages)
+pmap_xen_setpages_rw(uintptr_t va, vm_size_t npages)
 {
 	vm_size_t i;
 	for (i = 0; i < npages; i++) {
@@ -248,12 +248,12 @@ extern int etext;	/* End of kernel text 
 extern int end;		/* End of kernel binary (virtual address) */
 /* Return pte flags according to kernel va access restrictions */
 static pt_entry_t
-pmap_xen_kernel_vaflags(vm_offset_t va)
+pmap_xen_kernel_vaflags(uintptr_t va)
 {
-	if ((va > (vm_offset_t) &etext && /* .data, .bss et. al */
-	     (va < (vm_offset_t) &end))
+	if ((va > (uintptr_t) &etext && /* .data, .bss et. al */
+	     (va < (uintptr_t) &end))
 	    ||
-	    ((va > (vm_offset_t)(xen_start_info->pt_base +
+	    ((va > (uintptr_t)(xen_start_info->pt_base +
 	    			xen_start_info->nr_pt_frames * PAGE_SIZE)) &&
 	     va < PTOV(boot_ptphys))
 	    ||
@@ -430,7 +430,7 @@ create_boot_pagetables(vm_paddr_t *first
 static void
 pmap_xen_bootpages(vm_paddr_t *firstaddr)
 {
-	vm_offset_t va;
+	uintptr_t va;
 	vm_paddr_t ma;
 
 	/* Share info */
@@ -460,13 +460,15 @@ pmap_xen_bootpages(vm_paddr_t *firstaddr
 }
 
 /* alloc from linear mapped boot time virtual address space */
-static vm_offset_t
+static uintptr_t
 mmu_alloc(void)
 {
+	uintptr_t va;
+
 	KASSERT(physfree != 0,
 		("physfree must have been set before using mmu_alloc"));
 				
-	vm_offset_t va = vallocpages(&physfree, atop(PAGE_SIZE));
+	va = vallocpages(&physfree, atop(PAGE_SIZE));
 
 	/* 
 	 * Xen requires the page table hierarchy to be R/O.
@@ -525,7 +527,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
 	 * is available.
 	 */
 
-	virtual_avail = (vm_offset_t) xenstack + 512 * 1024;
+	virtual_avail = (uintptr_t) xenstack + 512 * 1024;
 	/* XXX: Check we don't overlap xen pgdir entries. */
 	virtual_end = VM_MAX_KERNEL_ADDRESS; 
 
@@ -560,7 +562,7 @@ pmap_page_init(vm_page_t m)
  * and update kernel_vm_end.
  */
 void
-pmap_growkernel(vm_offset_t addr)
+pmap_growkernel(uintptr_t addr)
 {
 	KASSERT(kernel_vm_end < addr, ("trying to shrink kernel VA!"));
 
@@ -600,6 +602,8 @@ pmap_growkernel(vm_offset_t addr)
 void
 pmap_init(void)
 {
+	uintptr_t va;
+
 	/* XXX: review the use of gdtset for the purpose below */
 	gdtset = 1; /* xpq may assert for locking sanity from this point onwards */
 
@@ -608,7 +612,7 @@ pmap_init(void)
 	/* Get a va for console and map the console mfn into it */
 	vm_paddr_t console_ma = xen_start_info->console.domU.mfn << PAGE_SHIFT;
 
-	vm_offset_t va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
+	va = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
 	KASSERT(va != 0, ("Could not allocate KVA for console page!\n"));
 
 	pmap_kenter(va, xpmap_mtop(console_ma));
@@ -672,7 +676,7 @@ pmap_release(pmap_t pmap)
 }
 
 __inline pt_entry_t *
-vtopte(vm_offset_t va)
+vtopte(uintptr_t va)
 {
 	KASSERT(0, ("XXX: REVIEW\n"));
 	u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
@@ -1163,12 +1167,12 @@ pmap_change_attr(vm_offset_t va, vm_size
 		return -1;
 }
 
-static vm_offset_t
+static uintptr_t
 xen_pagezone_alloc(void)
 {
-	vm_offset_t ret;
+	uintptr_t ret;
 
-	ret = (vm_offset_t)uma_zalloc(xen_pagezone, M_NOWAIT | M_ZERO);
+	ret = (uintptr_t)uma_zalloc(xen_pagezone, M_NOWAIT | M_ZERO);
 	if (ret == 0)
 		panic("%s: failed allocation\n", __func__);
 	return (ret);
@@ -1184,9 +1188,9 @@ xen_pagezone_free(vm_offset_t page)
 static int
 xen_pagezone_init(void *mem, int size, int flags)
 {
-	vm_offset_t va;
+	uintptr_t va;
 
-	va = (vm_offset_t)mem;
+	va = (uintptr_t)mem;
 
 	/* Xen requires the page table hierarchy to be R/O. */
 	pmap_xen_setpages_ro(va, atop(size));


More information about the svn-src-projects mailing list