PERFORCE change 92755 for review
Kip Macy
kmacy at FreeBSD.org
Sat Mar 4 17:54:06 PST 2006
http://perforce.freebsd.org/chv.cgi?CH=92755
Change 92755 by kmacy at kmacy_storage:sun4v_work on 2006/03/05 01:53:11
Switch kernel over to being backed by a hashtable for 8K pages
Affected files ...
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#2 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#14 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#2 edit
Differences ...
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#2 (text+ko) ====
@@ -7,6 +7,8 @@
void tte_hash_init(void);
+tte_hash_t tte_hash_kernel_create(vm_offset_t, uint64_t);
+
tte_hash_t tte_hash_create(uint64_t context);
void tte_hash_destroy(tte_hash_t th);
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#14 (text+ko) ====
@@ -113,6 +113,12 @@
hv_tsb_info_t kernel_td[MAX_TSB_INFO];
+/*
+ * This should be determined at boot time
+ * with tiny TLBS it doesn't make sense to try and selectively
+ * invalidate more than this
+ */
+#define MAX_INVALIDATES 32
/*
* Allocate physical memory for use in pmap_bootstrap.
@@ -208,10 +214,6 @@
vm_offset_t va;
vm_page_t m;
-
- KASSERT(locked_pmap->pm_context != 0,
- ("context 0 not backed by pv_entry management"));
-
PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
allocated_pv = uma_zalloc(pvzone, M_NOWAIT);
@@ -352,7 +354,7 @@
pmap_bootstrap(vm_offset_t ekva)
{
struct pmap *pm;
- vm_offset_t off, va;
+ vm_offset_t off, va, kernel_hash;
vm_paddr_t pa;
vm_size_t physsz, virtsz;
ihandle_t pmem, vmem;
@@ -408,6 +410,24 @@
*/
virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
+
+
+
+ /*
+ * Allocate and map a 4MB page for the kernel hashtable
+ *
+ */
+ pa = pmap_bootstrap_alloc(PAGE_SIZE_4M);
+ if (pa & PAGE_MASK_4M)
+ panic("pmap_bootstrap: hashtable unaligned\n");
+ kernel_hash = virtual_avail;
+ virtual_avail += PAGE_SIZE_4M;
+ pmap_scrub_pages(pa, PAGE_SIZE_4M);
+
+ /*
+ * Set up TSB descriptors for the hypervisor
+ *
+ */
tsb_8k_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
pa = pmap_bootstrap_alloc(tsb_8k_size);
@@ -421,8 +441,19 @@
kernel_td[TSB8K_INDEX].hvtsb_rsvd = 0;
kernel_td[TSB8K_INDEX].hvtsb_pa = pa;
+ /*
+ * Initialize kernel's private TSB from 8K page TSB
+ *
+ */
+ kernel_pmap->pm_tsb.hvtsb_idxpgsz = TTE8K;
+ kernel_pmap->pm_tsb.hvtsb_assoc = 1;
+ kernel_pmap->pm_tsb.hvtsb_ntte = (tsb_8k_size >> TTE_SHIFT);
+ kernel_pmap->pm_tsb.hvtsb_ctx_index = 0;
+ kernel_pmap->pm_tsb.hvtsb_pgszs = TSB8K;
+ kernel_pmap->pm_tsb.hvtsb_rsvd = 0;
+ kernel_pmap->pm_tsb.hvtsb_pa = kernel_td[TSB8K_INDEX].hvtsb_pa;
+
tsb_4m_size = virtsz >> (PAGE_SHIFT_4M - TTE_SHIFT);
-
pa = pmap_bootstrap_alloc(tsb_4m_size);
kernel_td[TSB4M_INDEX].hvtsb_idxpgsz = TTE4M;
@@ -438,13 +469,12 @@
pmap_scrub_pages(kernel_td[TSB4M_INDEX].hvtsb_pa, tsb_4m_size);
/*
- * Set up TSB descriptors for the hypervisor
- *
+ * Install kernel hashtable into 4M TSB
+ *
*/
+ tsb_set_tte(&kernel_td[TSB4M_INDEX], kernel_hash,
+ pa | TTE_KERNEL | VTD_4M, 0);
-
-
-
/*
* allocate MMU fault status areas for all CPUS
*/
@@ -471,6 +501,7 @@
virtual_end = vm_max_kernel_address;
kernel_vm_end = vm_max_kernel_address;
+
/*
* Allocate a kernel stack with guard page for thread0 and map it into
* the kernel tsb.
@@ -560,6 +591,17 @@
if (error != H_EOK)
panic("failed to set ctx0 TSBs error: %ld", error);
+ /*
+ * This could happen earlier - but I put it here to avoid
+ * attempts to do updates until they're legal
+ */
+ pm->pm_hash = tte_hash_kernel_create(kernel_hash, PAGE_SIZE_4M);
+
+ /*
+ * XXX - We should read the kernel mappings into the hash table
+ *
+ */
+
}
@@ -633,8 +675,6 @@
printf("ctx=%d va=%lx prot=%x wired=%x\n", pmap->pm_context,
va, prot, wired);
#endif
- KASSERT(pmap->pm_context != 0,
- ("inserting faultable entries into context 0 without backing hash"));
om = NULL;
@@ -895,15 +935,15 @@
#endif
/* XXX SUN4V_FIXME - oversimplified logic */
- if (pmap->pm_context != 0) {
- tsb_clear_range(&pmap->pm_tsb, sva, eva);
- }
+ tsb_clear_range(&pmap->pm_tsb, sva, eva);
+
- if (((sva & PAGE_MASK_4M) != 0) || ((eva & PAGE_MASK_4M) != 0)) {
+ if ((((eva - sva) >> PAGE_SHIFT) < MAX_INVALIDATES) ||
+ ((sva & PAGE_MASK_4M) != 0) || ((eva & PAGE_MASK_4M) != 0)) {
for (tva = sva; tva < eva; tva += PAGE_SIZE_8K)
invlpg(tva, pmap->pm_context);
} else
- UNIMPLEMENTED;
+ invlctx(pmap->pm_context);
}
@@ -1125,9 +1165,6 @@
uint64_t *tte;
- KASSERT(pmap->pm_context != 0,
- ("protection downgrades not handled correctly without backing hash"));
-
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
pmap_remove(pmap, sva, eva);
return;
@@ -1173,7 +1210,7 @@
sched_unpin();
if (anychanged)
- pmap_invalidate_all(pmap);
+ pmap_invalidate_range(pmap, sva, eva);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -1292,8 +1329,6 @@
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
PMAP_LOCK(pv->pv_pmap);
pv->pv_pmap->pm_stats.resident_count--;
- KASSERT(pv->pv_pmap->pm_context != 0,
- ("cannot special case absence of backing hash"));
tte = tte_hash_lookup(pv->pv_pmap->pm_hash, pv->pv_va);
tte_data = *tte;
tte_hash_delete(pv->pv_pmap->pm_hash, pv->pv_va);
@@ -1360,9 +1395,6 @@
pv_entry_t pv, npv;
tte_t *tte, tte_data;
- KASSERT(pmap->pm_context != 0,
- ("cannot special case absence of backing hash"));
-
vm_page_lock_queues();
PMAP_LOCK(pmap);
sched_pin();
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#2 (text+ko) ====
@@ -57,6 +57,8 @@
tte_hash_entry_t th_hashtable; /* hash of TTEs */
};
+static struct tte_hash kernel_tte_hash;
+
/*
* Data for the tte_hash allocation mechanism
@@ -95,6 +97,19 @@
}
+tte_hash_t
+tte_hash_kernel_create(vm_offset_t va, uint64_t size)
+{
+ tte_hash_t th;
+
+ th = &kernel_tte_hash;
+ th->th_size = (size >> PAGE_SHIFT);
+ th->th_entries = 0;
+ th->th_context = 0;
+
+ return th;
+}
+
tte_hash_t
tte_hash_create(uint64_t context)
More information about the p4-projects
mailing list