PERFORCE change 133994 for review

Rafal Jaworowski raj at FreeBSD.org
Thu Jan 24 02:08:54 PST 2008


http://perforce.freebsd.org/chv.cgi?CH=133994

Change 133994 by raj at raj_mimi on 2008/01/24 10:07:56

	Make all mmu_booke_* methods static. Minor style corrections.

Affected files ...

.. //depot/projects/e500/sys/powerpc/booke/pmap.c#10 edit

Differences ...

==== //depot/projects/e500/sys/powerpc/booke/pmap.c#10 (text+ko) ====

@@ -27,8 +27,9 @@
  *
  * Some hw specific parts of this pmap were derived or influenced
  * by NetBSD's ibm4xx pmap module. More generic code is shared with
- * a few other pmap modules from the FreeBSD tree. 
+ * a few other pmap modules from the FreeBSD tree.
  */
+
  /*
   * VM layout notes:
   *
@@ -267,51 +268,51 @@
 /*
  * Kernel MMU interface
  */
-vm_offset_t	mmu_booke_addr_hint(mmu_t, vm_object_t, vm_offset_t, vm_size_t);
-void	mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
-void	mmu_booke_clear_modify(mmu_t, vm_page_t);
-void	mmu_booke_clear_reference(mmu_t, vm_page_t);
-void	mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
-void	mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
-void	mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
-void	mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
-		vm_page_t, vm_prot_t);
-void	mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
-vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
-vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
-void	mmu_booke_init(mmu_t);
-boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
-boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
-boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
-vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
-int	mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
-void	mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t,
-		vm_pindex_t, vm_size_t);
-boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
-void	mmu_booke_page_init(mmu_t, vm_page_t);
-int	mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
-void	mmu_booke_pinit(mmu_t, pmap_t);
-void	mmu_booke_pinit0(mmu_t, pmap_t);
-void	mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
-void	mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
-void	mmu_booke_qremove(mmu_t, vm_offset_t, int);
-void	mmu_booke_release(mmu_t, pmap_t);
-void	mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
-void	mmu_booke_remove_all(mmu_t, vm_page_t);
-void	mmu_booke_remove_write(mmu_t, vm_page_t);
-void	mmu_booke_zero_page(mmu_t, vm_page_t);
-void	mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
-void	mmu_booke_zero_page_idle(mmu_t, vm_page_t);
-void	mmu_booke_activate(mmu_t, struct thread *);
-void	mmu_booke_deactivate(mmu_t, struct thread *);
-void	mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
-void	*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
-void	mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
-vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
-void	mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
-void	mmu_booke_kremove(mmu_t, vm_offset_t);
-boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
-boolean_t	mmu_booke_page_executable(mmu_t, vm_page_t);
+static vm_offset_t	mmu_booke_addr_hint(mmu_t, vm_object_t, vm_offset_t, vm_size_t);
+static void	mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
+static void	mmu_booke_clear_modify(mmu_t, vm_page_t);
+static void	mmu_booke_clear_reference(mmu_t, vm_page_t);
+static void	mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
+static void	mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
+static void	mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+static void	mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
+			vm_page_t, vm_prot_t);
+static void	mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+static vm_paddr_t	mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
+static vm_page_t	mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
+static void	mmu_booke_init(mmu_t);
+static boolean_t	mmu_booke_is_modified(mmu_t, vm_page_t);
+static boolean_t	mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
+static boolean_t	mmu_booke_ts_referenced(mmu_t, vm_page_t);
+static vm_offset_t	mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
+static int	mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
+static void	mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t,
+			vm_pindex_t, vm_size_t);
+static boolean_t	mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+static void	mmu_booke_page_init(mmu_t, vm_page_t);
+static int	mmu_booke_page_wired_mappings(mmu_t, vm_page_t);
+static void	mmu_booke_pinit(mmu_t, pmap_t);
+static void	mmu_booke_pinit0(mmu_t, pmap_t);
+static void	mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+static void	mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
+static void	mmu_booke_qremove(mmu_t, vm_offset_t, int);
+static void	mmu_booke_release(mmu_t, pmap_t);
+static void	mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
+static void	mmu_booke_remove_all(mmu_t, vm_page_t);
+static void	mmu_booke_remove_write(mmu_t, vm_page_t);
+static void	mmu_booke_zero_page(mmu_t, vm_page_t);
+static void	mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
+static void	mmu_booke_zero_page_idle(mmu_t, vm_page_t);
+static void	mmu_booke_activate(mmu_t, struct thread *);
+static void	mmu_booke_deactivate(mmu_t, struct thread *);
+static void	mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
+static void	*mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
+static void	mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
+static vm_offset_t	mmu_booke_kextract(mmu_t, vm_offset_t);
+static void	mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
+static void	mmu_booke_kremove(mmu_t, vm_offset_t);
+static boolean_t	mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
+static boolean_t	mmu_booke_page_executable(mmu_t, vm_page_t);
 
 static mmu_method_t mmu_booke_methods[] = {
 	/* pmap dispatcher interface */
@@ -376,7 +377,7 @@
  * not be tested for the modified bit.
  */
 static __inline int
-mmu_booke_track_modified(pmap_t pmap, vm_offset_t va)
+track_modified_needed(pmap_t pmap, vm_offset_t va)
 {
 	if (pmap == kernel_pmap)
 		return ((va < kmi.clean_sva) || (va >= kmi.clean_eva));
@@ -772,7 +773,7 @@
 
 			/* Handle modified pages. */
 			if (PTE_ISMODIFIED(pte)) {
-				if (mmu_booke_track_modified(pmap, va))
+				if (track_modified_needed(pmap, va))
 					vm_page_dirty(m);
 			}
 
@@ -898,7 +899,7 @@
 /*
  * This is called during e500_init, before the system is really initialized.
  */
-void
+static void
 mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
 {
 	vm_offset_t phys_kernelend;
@@ -1141,7 +1142,7 @@
 /*
  * Get the physical page address for the given pmap/virtual address.
  */
-vm_paddr_t
+static vm_paddr_t
 mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
 {
 	vm_paddr_t pa;
@@ -1157,7 +1158,7 @@
  * Extract the physical page address associated with the given
  * kernel virtual address.
  */
-vm_paddr_t
+static vm_paddr_t
 mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
 {
 	return pte_vatopa(mmu, kernel_pmap, va);
@@ -1168,7 +1169,7 @@
  * Called by vm_init, to initialize any structures that the pmap
  * system needs to map virtual memory.
  */
-void
+static void
 mmu_booke_init(mmu_t mmu)
 {
 	int shpgperproc = PMAP_SHPGPERPROC;
@@ -1205,7 +1206,7 @@
  * intended for temporary mappings which do not need page modification or
  * references recorded.  Existing mappings in the region are overwritten.
  */
-void
+static void
 mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
 {
 	vm_offset_t va;
@@ -1226,7 +1227,7 @@
  * Remove page mappings from kernel virtual address space.  Intended for
  * temporary mappings entered by mmu_booke_qenter.
  */
-void
+static void
 mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
 {
 	vm_offset_t va;
@@ -1245,7 +1246,7 @@
 /*
  * Map a wired page into kernel virtual address space.
  */
-void
+static void
 mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
 {
 	unsigned int pdir_idx = PDIR_IDX(va);
@@ -1305,7 +1306,7 @@
 /*
  * Remove a page from kernel page table.
  */
-void
+static void
 mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
 {
 	unsigned int pdir_idx = PDIR_IDX(va);
@@ -1336,7 +1337,7 @@
 /*
  * Initialize pmap associated with process 0.
  */
-void
+static void
 mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
 {
 	//debugf("mmu_booke_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
@@ -1349,7 +1350,7 @@
  * Initialize a preallocated and zeroed pmap structure,
  * such as one in a vmspace structure.
  */
-void
+static void
 mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
 {
 	//struct thread *td;
@@ -1378,7 +1379,7 @@
  * Called when a pmap initialized by mmu_booke_pinit is being released.
  * Should only be called if the map contains no valid mappings.
  */
-void
+static void
 mmu_booke_release(mmu_t mmu, pmap_t pmap)
 {
 	//debugf("mmu_booke_release: s\n");
@@ -1401,7 +1402,7 @@
  * target physical map with the protection requested. If specified the page
  * will be wired down.
  */
-void
+static void
 mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
 		vm_prot_t prot, boolean_t wired)
 {
@@ -1475,7 +1476,7 @@
 		} else {
 			/* Handle modified pages, sense modify status. */
 			if (PTE_ISMODIFIED(pte)) {
-				if (mmu_booke_track_modified(pmap, va))
+				if (track_modified_needed(pmap, va))
 					vm_page_dirty(m);
 			}
 		}
@@ -1568,7 +1569,7 @@
  * is mapped; only those for which a resident page exists with the
  * corresponding offset from m_start are mapped.
  */
-void
+static void
 mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
 		       vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
 {
@@ -1586,7 +1587,7 @@
 	PMAP_UNLOCK(pmap);
 }
 
-void
+static void
 mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
 		      vm_prot_t prot)
 {
@@ -1605,7 +1606,7 @@
  *
  * It is assumed that the start and end are properly rounded to the page size.
  */
-void
+static void
 mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
 {
 	pte_t *pte;
@@ -1652,7 +1653,7 @@
 /*
  * Remove physical page from all pmaps in which it resides.
  */
-void
+static void
 mmu_booke_remove_all(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv, pvn;
@@ -1687,7 +1688,7 @@
  * unchanged.  We cannot and therefore do not; *virt is updated with the
  * first usable address after the mapped region.
  */
-vm_offset_t
+static vm_offset_t
 mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
 	      vm_offset_t pa_end, int prot)
 {
@@ -1712,7 +1713,7 @@
  * The pmap must be activated before it's address space can be accessed in any
  * way.
  */
-void
+static void
 mmu_booke_activate(mmu_t mmu, struct thread *td)
 {
 	pmap_t pmap;
@@ -1744,7 +1745,7 @@
 /*
  * Deactivate the specified process's address space.
  */
-void
+static void
 mmu_booke_deactivate(mmu_t mmu, struct thread *td)
 {
 	pmap_t pmap;
@@ -1761,7 +1762,7 @@
  *
  * This routine is only advisory and need not do anything.
  */
-void
+static void
 mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
           vm_size_t len, vm_offset_t src_addr)
 {
@@ -1770,7 +1771,7 @@
 /*
  * Set the physical protection on the specified range of this map as requested.
  */
-void
+static void
 mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
 		  vm_prot_t prot)
 {
@@ -1795,7 +1796,7 @@
 
 				/* Handle modified pages. */
 				if (PTE_ISMODIFIED(pte)) {
-					if (mmu_booke_track_modified(pmap, va))
+					if (track_modified_needed(pmap, va))
 						vm_page_dirty(m);
 				}
 
@@ -1816,7 +1817,7 @@
 /*
  * Clear the write and modified bits in each of the given page's mappings.
  */
-void
+static void
 mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv;
@@ -1835,7 +1836,7 @@
 
 				/* Handle modified pages. */
 				if (PTE_ISMODIFIED(pte)) {
-					if (mmu_booke_track_modified(pv->pv_pmap, pv->pv_va))
+					if (track_modified_needed(pv->pv_pmap, pv->pv_va))
 						vm_page_dirty(m);
 				}
 
@@ -1853,7 +1854,7 @@
 	vm_page_flag_clear(m, PG_WRITEABLE);
 }
 
-boolean_t
+static boolean_t
 mmu_booke_page_executable(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv;
@@ -1879,7 +1880,7 @@
  * pmap and virtual address pair if that mapping permits the given
  * protection.
  */
-vm_page_t
+static vm_page_t
 mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va,
 			   vm_prot_t prot)
 {
@@ -1912,7 +1913,7 @@
 /*
  * Initialize a vm_page's machine-dependent fields.
  */
-void
+static void
 mmu_booke_page_init(mmu_t mmu, vm_page_t m)
 {
 	TAILQ_INIT(&m->md.pv_list);
@@ -1925,7 +1926,7 @@
  *
  * off and size must reside within a single page.
  */
-void
+static void
 mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
 {
 	vm_offset_t va;
@@ -1947,7 +1948,7 @@
 /*
  * mmu_booke_zero_page zeros the specified hardware page.
  */
-void
+static void
 mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
 {
 	//debugf("mmu_booke_zero_page: s\n");
@@ -1960,7 +1961,7 @@
  * mapping the page into virtual memory and using memcopy to copy the page,
  * one machine dependent page at a time.
  */
-void
+static void
 mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm)
 {
 	vm_offset_t sva, dva;
@@ -2000,7 +2001,7 @@
  * to be called from the vm_pagezero process only and outside of Giant. No
  * lock is required.
  */
-void
+static void
 mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m)
 {
 	vm_offset_t va;
@@ -2019,7 +2020,7 @@
  * Return whether or not the specified physical page was modified
  * in any of physical maps.
  */
-boolean_t
+static boolean_t
 mmu_booke_is_modified(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
@@ -2035,7 +2036,7 @@
 			if (!PTE_ISVALID(pte))
 				goto make_sure_to_unlock;
 
-			if (!mmu_booke_track_modified(pv->pv_pmap, pv->pv_va))
+			if (!track_modified_needed(pv->pv_pmap, pv->pv_va))
 				goto make_sure_to_unlock;
 
 			if (PTE_ISMODIFIED(pte)) {
@@ -2053,7 +2054,7 @@
  * Return whether or not the specified virtual address is elgible
  * for prefault.
  */
-boolean_t
+static boolean_t
 mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
 {
 	return (FALSE);
@@ -2062,7 +2063,7 @@
 /*
  * Clear the modify bits on the specified physical page.
  */
-void
+static void
 mmu_booke_clear_modify(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
@@ -2098,7 +2099,7 @@
  * should be tested and standardized at some point in the future for
  * optimal aging of shared pages.
  */
-int
+static int
 mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
@@ -2116,7 +2117,7 @@
 			if (!PTE_ISVALID(pte))
 				goto make_sure_to_unlock;
 
-			if (!mmu_booke_track_modified(pv->pv_pmap, pv->pv_va))
+			if (!track_modified_needed(pv->pv_pmap, pv->pv_va))
 				goto make_sure_to_unlock;
 
 			if (PTE_ISREFERENCED(pte)) {
@@ -2138,7 +2139,7 @@
 /*
  * Clear the reference bit on the specified physical page.
  */
-void
+static void
 mmu_booke_clear_reference(mmu_t mmu, vm_page_t m)
 {
 	pte_t *pte;
@@ -2167,7 +2168,7 @@
 /*
  * Change wiring attribute for a map/virtual-address pair.
  */
-void
+static void
 mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired)
 {
 	pte_t *pte;;
@@ -2195,7 +2196,7 @@
  * only necessary that true be returned for a small subset of pmaps for proper
  * page aging.
  */
-boolean_t
+static boolean_t
 mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
 {
 	pv_entry_t pv;
@@ -2221,7 +2222,7 @@
  * Return the number of managed mappings to the given physical page that are
  * wired.
  */
-int
+static int
 mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m)
 {
 	pv_entry_t pv;
@@ -2243,7 +2244,7 @@
 	return (count);
 }
 
-int
+static int
 mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
 {
 	int i;
@@ -2266,7 +2267,7 @@
  * Return a pointer to where it is mapped. This routine is intended to be used
  * for mapping device memory, NOT real memory.
  */
-void *
+static void *
 mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
 {
 	uintptr_t va;
@@ -2282,7 +2283,7 @@
 /*
  * 'Unmap' a range mapped by mmu_booke_mapdev().
  */
-void
+static void
 mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
 {
 	vm_offset_t base, offset;
@@ -2307,7 +2308,7 @@
  * into the specified pmap. This eliminates the blast of soft
  * faults on process startup and immediately after an mmap.
  */
-void
+static void
 mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr,
 		vm_object_t object, vm_pindex_t pindex, vm_size_t size)
 {
@@ -2319,14 +2320,14 @@
 /*
  * Perform the pmap work for mincore.
  */
-int
+static int
 mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr)
 {
 	TODO;
 	return (0);
 }
 
-vm_offset_t
+static vm_offset_t
 mmu_booke_addr_hint(mmu_t mmu, vm_object_t object, vm_offset_t va,
 		    vm_size_t size)
 {


More information about the p4-projects mailing list