PERFORCE change 92351 for review

Kip Macy kmacy at FreeBSD.org
Fri Feb 24 10:01:55 PST 2006


http://perforce.freebsd.org/chv.cgi?CH=92351

Change 92351 by kmacy at kmacy_storage:sun4v_work on 2006/02/24 18:01:31

	fix up TSB get / set functions
	switch to using hv_tsb_info structures
	make clearer which kernel TSB we're referencing

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tsb.h#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte.h#4 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#10 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tick.c#3 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tsb.c#4 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tsb.h#4 (text+ko) ====

@@ -75,13 +75,13 @@
 	
 
 
+struct hv_tsb_info;
 
+void tsb_set_tte(struct hv_tsb_info *tsb, vm_offset_t va, vm_paddr_t pa, uint64_t flags, uint64_t ctx);
 
-void tsb_set_tte(tsb_info_t tsb, vm_offset_t va, vm_paddr_t pa, uint64_t flags, uint64_t ctx);
+uint64_t tsb_get_tte(struct hv_tsb_info *tsb, vm_offset_t va, uint64_t ctx);
 
-tte_t tsb_get_tte(tsb_info_t tsb, vm_offset_t va, uint64_t ctx);
-
-void tsb_clear_tte(tsb_info_t tsb, vm_offset_t, uint64_t ctx);
+void tsb_clear_tte(struct hv_tsb_info *tsb, vm_offset_t, uint64_t ctx);
 
 
 #endif /* !_MACHINE_TSB_H_ */

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte.h#4 (text+ko) ====

@@ -34,7 +34,6 @@
 
 #define	TTE_SHIFT	(4)
 
-#define	TD_SIZE_SHIFT	(61)
 #define	TD_SOFT2_SHIFT	(50)
 #define	TD_DIAG_SHIFT	(41)
 #define	TD_PA_SHIFT	(13)
@@ -42,7 +41,7 @@
 
 #define	TD_SOFT2_BITS	(9)
 #define	TD_DIAG_BITS	(9)
-#define	TD_PA_BITS	(28)
+#define	TD_PA_BITS	(42)
 #define	TD_SOFT_BITS	(6)
 
 #define	TD_SOFT2_MASK	((1UL << TD_SOFT2_BITS) - 1)
@@ -59,13 +58,6 @@
 #define	TTE2G		(6UL)
 #define	TTE16G		(7UL)
 
-#define	TD_V		(1UL << 63)
-#define	TD_8K		(TS_8K << TD_SIZE_SHIFT)
-#define	TD_64K		(TS_64K << TD_SIZE_SHIFT)
-#define	TD_512K		(TS_512K << TD_SIZE_SHIFT)
-#define	TD_4M		(TS_4M << TD_SIZE_SHIFT)
-#define	TD_NFO		(1UL << 60)
-#define	TD_IE		(1UL << 59)
 #define	TD_PA(pa)	((pa) & (TD_PA_MASK << TD_PA_SHIFT))
 /* NOTE: bit 6 of TD_SOFT will be sign-extended if used as an immediate. */
 #define	TD_FAKE		((1UL << 5) << TD_SOFT_SHIFT)
@@ -83,21 +75,13 @@
 #define	TD_G		(1UL << 0)
 
 
-#define	TTE_SIZE_SPREAD	(3)
-#define	TTE_PAGE_SHIFT(sz) \
-	(PAGE_SHIFT + ((sz) * TTE_SIZE_SPREAD))
-
-#define	TTE_GET_SIZE(tp) \
-	(((tp)->tte_data >> TD_SIZE_SHIFT) & TD_SIZE_MASK)
-#define	TTE_GET_PAGE_SHIFT(tp) \
-	TTE_PAGE_SHIFT(TTE_GET_SIZE(tp))
 #define	TTE_GET_PAGE_SIZE(tp) \
 	(1 << TTE_GET_PAGE_SHIFT(tp))
 #define	TTE_GET_PAGE_MASK(tp) \
 	(TTE_GET_PAGE_SIZE(tp) - 1)
 
-#define	TTE_GET_PA(tp) \
-	((tp)->tte_data & (TD_PA_MASK << TD_PA_SHIFT))
+#define	TTE_GET_PA(tte_data) \
+	(tte_data & (TD_PA_MASK << TD_PA_SHIFT))
 #define	TTE_GET_VPN(tp) \
 	((tp)->tte_vpn >> TV_SIZE_BITS)
 #define	TTE_GET_VA(tp) \
@@ -144,6 +128,18 @@
  */
 #define TTE_KERNEL      VTD_V | VTD_CP | VTD_CV | VTD_P | VTD_X | VTD_W
 
+#define VTD_SIZE_BITS   (4)
+#define VTD_SIZE_MASK   ((1 << VTD_SIZE_BITS) - 1)
+
+
+#define	TTE_SIZE_SPREAD	(3)
+#define	TTE_PAGE_SHIFT(sz) \
+	(PAGE_SHIFT + ((sz) * TTE_SIZE_SPREAD))
+#define	TTE_GET_SIZE(tte_data) \
+	(tte_data & VTD_SIZE_MASK)
+#define	TTE_GET_PAGE_SHIFT(tte_data) \
+	TTE_PAGE_SHIFT(TTE_GET_SIZE(tte_data))
+
 
 typedef union {
 	struct tte {

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#10 (text+ko) ====

@@ -103,6 +103,8 @@
 /*
  * Kernel TSBs
  */
+#define TSB8K_INDEX           0
+#define TSB4M_INDEX           1
 
 static struct tsb_info kernel_tsbs[MAX_TSB_INFO];
 static hv_tsb_info_t kernel_td[MAX_TSB_INFO];
@@ -135,7 +137,7 @@
 
 #define UNIMPLEMENTED panic("%s not implemented", __FUNCTION__)
 
-
+static void pmap_scrub_pages(vm_paddr_t pa, int64_t size);
 /*
  * Quick sort callout for comparing memory regions.
  */
@@ -204,7 +206,6 @@
 void
 pmap_activate(struct thread *td)
 {
-	struct proc *p = td->td_proc;
 	pmap_t pmap, oldpmap;
 
 	critical_enter();
@@ -218,18 +219,6 @@
 	pmap->pm_active |= 1;
 #endif
 
-
-	/* XXX complains of incomplete type in spite of being defined */
-	if (p->p_flag & P_SA) {
-		/* Make sure all other cr3 entries are updated. */
-		/* what if they are running?  XXXKSE (maybe abort them) */
-		FOREACH_THREAD_IN_PROC(p, td) {
-			td->td_pcb->pcb_pdir = 	pmap->pm_pdir;
-		}
-	} else {
-		td->td_pcb->pcb_pdir = pmap->pm_pdir;
-	}
-
 	set_pdir_scratchpad(pmap->pm_pdir);
 	PCPU_SET(curpmap, pmap);
 	critical_exit();
@@ -250,7 +239,7 @@
 	struct pmap *pm;
 	vm_offset_t off, va;
 	vm_paddr_t pa;
-	vm_size_t physsz, virtsz, scrubbed;
+	vm_size_t physsz, virtsz;
 	ihandle_t pmem, vmem;
 	int i, sz, j;
 	uint64_t tsb_8k_size, tsb_4m_size, error;
@@ -298,8 +287,6 @@
 	}
 	physmem = btoc(physsz);
 	
-	
-	
 	/*
 	 * Calculate the size of kernel virtual memory, and the size and mask
 	 * for the kernel tsb.
@@ -311,55 +298,47 @@
 	pa = pmap_bootstrap_alloc(tsb_8k_size);
 	if (pa & PAGE_MASK_4M)
 		panic("pmap_bootstrap: tsb unaligned\n");
-	kernel_tsbs[0].tsb_pa = pa;
-	kernel_tsbs[0].tsb_szc = (tsb_8k_size >> (PAGE_SHIFT - TTE_SHIFT));
-	kernel_tsbs[0].tsb_ttesz_mask = TSB8K;
+	kernel_tsbs[TSB8K_INDEX].tsb_pa = pa;
+	kernel_tsbs[TSB8K_INDEX].tsb_szc = (tsb_8k_size >> (PAGE_SHIFT - TTE_SHIFT));
+	kernel_tsbs[TSB8K_INDEX].tsb_ttesz_mask = TSB8K;
 
 	tsb_4m_size = virtsz >> (PAGE_SHIFT_4M - TTE_SHIFT);
 	pa = pmap_bootstrap_alloc(tsb_4m_size);
-	kernel_tsbs[1].tsb_pa = pa;
-	kernel_tsbs[1].tsb_szc = (tsb_4m_size >> (PAGE_SHIFT - TTE_SHIFT));
-	kernel_tsbs[1].tsb_ttesz_mask = TSB4M;
+	kernel_tsbs[TSB4M_INDEX].tsb_pa = pa;
+	kernel_tsbs[TSB4M_INDEX].tsb_szc = (tsb_4m_size >> (PAGE_SHIFT - TTE_SHIFT));
+	kernel_tsbs[TSB4M_INDEX].tsb_ttesz_mask = TSB4M;
+
 
-	error = hv_mem_scrub(kernel_tsbs[0].tsb_pa, tsb_8k_size, &scrubbed);
-	if (scrubbed != tsb_8k_size || error)
-		panic("unexpected return from mem_scrub - expected size: %ld - got size: %ld - error: %ld",
-		      tsb_8k_size, scrubbed, error);
+	pmap_scrub_pages(kernel_tsbs[TSB8K_INDEX].tsb_pa, tsb_8k_size);
 
-	error = hv_mem_scrub(kernel_tsbs[1].tsb_pa, tsb_4m_size, &scrubbed);
-	if (scrubbed != tsb_8k_size || error)
-		panic("unexpected return from mem_scrub - expected size: %ld - got size: %ld - error: %ld",
-		      tsb_4m_size, scrubbed, error);
+	pmap_scrub_pages(kernel_tsbs[TSB4M_INDEX].tsb_pa, tsb_4m_size);
 
 
 	/*
 	 * Set up TSB descriptors for the hypervisor
 	 *
 	 */
-	kernel_td[0].hvtsb_idxpgsz = TTE8K;
-	kernel_td[0].hvtsb_assoc = 1;
-	kernel_td[0].hvtsb_ntte = TSB_ENTRIES(kernel_tsbs[0].tsb_szc);
-	kernel_td[0].hvtsb_ctx_index = 0;
-	kernel_td[0].hvtsb_pgszs = kernel_tsbs[0].tsb_ttesz_mask;
-	kernel_td[0].hvtsb_rsvd = 0;
-	kernel_td[0].hvtsb_pa = kernel_tsbs[0].tsb_pa;
+	kernel_td[TSB8K_INDEX].hvtsb_idxpgsz = TTE8K;
+	kernel_td[TSB8K_INDEX].hvtsb_assoc = 1;
+	kernel_td[TSB8K_INDEX].hvtsb_ntte = TSB_ENTRIES(kernel_tsbs[TSB8K_INDEX].tsb_szc);
+	kernel_td[TSB8K_INDEX].hvtsb_ctx_index = 0;
+	kernel_td[TSB8K_INDEX].hvtsb_pgszs = kernel_tsbs[TSB8K_INDEX].tsb_ttesz_mask;
+	kernel_td[TSB8K_INDEX].hvtsb_rsvd = 0;
+	kernel_td[TSB8K_INDEX].hvtsb_pa = kernel_tsbs[TSB8K_INDEX].tsb_pa;
 
-	kernel_td[1].hvtsb_idxpgsz = TTE4M;
-	kernel_td[1].hvtsb_assoc = 1;
-	kernel_td[1].hvtsb_ntte = TSB_ENTRIES(kernel_tsbs[1].tsb_szc);
-	kernel_td[1].hvtsb_ctx_index = 0;
-	kernel_td[1].hvtsb_pgszs = kernel_tsbs[1].tsb_ttesz_mask;
-	kernel_td[1].hvtsb_rsvd = 0;
-	kernel_td[1].hvtsb_pa = kernel_tsbs[1].tsb_pa;
+	kernel_td[TSB4M_INDEX].hvtsb_idxpgsz = TTE4M;
+	kernel_td[TSB4M_INDEX].hvtsb_assoc = 1;
+	kernel_td[TSB4M_INDEX].hvtsb_ntte = TSB_ENTRIES(kernel_tsbs[TSB4M_INDEX].tsb_szc);
+	kernel_td[TSB4M_INDEX].hvtsb_ctx_index = 0;
+	kernel_td[TSB4M_INDEX].hvtsb_pgszs = kernel_tsbs[TSB4M_INDEX].tsb_ttesz_mask;
+	kernel_td[TSB4M_INDEX].hvtsb_rsvd = 0;
+	kernel_td[TSB4M_INDEX].hvtsb_pa = kernel_tsbs[TSB4M_INDEX].tsb_pa;
 
-	error = hv_set_ctx0(2, OF_vtophys((vm_offset_t)&kernel_td));
-	
-	if (error != H_EOK)
-		panic("failed to set ctx0 TSBs error: %ld", error);
 
 	/*
 	 * allocate MMU fault status areas for all CPUS
 	 */
+	printf("allocate fault status area\n");
 	mmu_fault_status_area = pmap_bootstrap_alloc(MMFSA_SIZE*MAXCPU);
 
 	/*
@@ -368,6 +347,13 @@
 	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
 	msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
 
+#ifdef notyet
+	/* XXX this tries to map at a wacky address */
+	for (i = 0; i < (MSGBUF_SIZE / PAGE_SIZE); i++)
+		tsb_set_tte(&kernel_td[TSB8K_INDEX], ((vm_offset_t)msgbufp) + i*PAGE_SIZE , msgbuf_phys + i*PAGE_SIZE, 
+			    TTE_KERNEL | VTD_8K, 0);
+#endif
+
 	/*
 	 * Set the start and end of kva.  The kernel is loaded at the first
 	 * available 4 meg super page, so round up to the end of the page.
@@ -385,10 +371,11 @@
 	virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE;
 	kstack0 = virtual_avail;
 	virtual_avail += KSTACK_PAGES * PAGE_SIZE;
+	printf("setting ttes\n");
 	for (i = 0; i < KSTACK_PAGES; i++) {
 		pa = kstack0_phys + i * PAGE_SIZE;
 		va = kstack0 + i * PAGE_SIZE;
-		UNIMPLEMENTED;
+		tsb_set_tte(&kernel_td[TSB8K_INDEX], va , pa, TTE_KERNEL | VTD_8K, 0);
 	}
 
 	/*
@@ -419,17 +406,23 @@
 		    "translation: start=%#lx size=%#lx tte=%#lx",
 		    translations[i].om_start, translations[i].om_size,
 		    translations[i].om_tte);
+#if 0
 		if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
 		    translations[i].om_start > VM_MAX_PROM_ADDRESS)
 			continue;
-		for (off = 0; off < translations[i].om_size;
-		    off += PAGE_SIZE) {
-			va = translations[i].om_start + off;
-			pa = ((translations[i].om_tte &
-			       ~(TD_SOFT_MASK << TD_SOFT_SHIFT)) | TD_EXEC) +
-			    off;
-			tsb_set_tte(&kernel_tsbs[0], va, pa, 0/* XXX ADD FLAGS */, 0);
-		}
+#endif
+		printf("om_size: %ld om_start: %lx om_tte: %lx\n", translations[i].om_size,
+		       translations[i].om_start, translations[i].om_tte);
+		if (translations[i].om_size == PAGE_SIZE_4M) 
+			tsb_set_tte(&kernel_td[TSB4M_INDEX], translations[i].om_start, 
+				    TTE_GET_PA(translations[i].om_tte), TTE_KERNEL | VTD_4M, 0);
+		else 
+			for (off = 0; off < translations[i].om_size;
+			     off += PAGE_SIZE) {
+				va = translations[i].om_start + off;
+				pa = TTE_GET_PA(translations[i].om_tte) + off;
+				tsb_set_tte(&kernel_td[TSB8K_INDEX], va, pa, TTE_KERNEL | VTD_8K, 0);
+			}
 	}
 
 	/*
@@ -450,6 +443,14 @@
 
 	PMAP_LOCK_INIT(kernel_pmap);
 	TAILQ_INIT(&kernel_pmap->pm_pvlist);
+
+	printf("physical address of kernel_td: 0x%lx\n", vtophys((vm_offset_t)&kernel_td));
+	printf("set ctx0\n");
+	error = hv_set_ctx0(2, vtophys((vm_offset_t)&kernel_td));
+	printf("ctx0 set\n");
+	if (error != H_EOK)
+		panic("failed to set ctx0 TSBs error: %ld", error);
+
 }
 
 
@@ -584,7 +585,7 @@
 pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
 {
 
-	UNIMPLEMENTED;
+	printf("%s unimplemented\n", __FUNCTION__);
 }
 
 
@@ -617,41 +618,47 @@
 void
 pmap_kenter(vm_offset_t va, vm_paddr_t pa)
 {
-	tsb_set_tte(&kernel_tsbs[1], va, pa, TTE_KERNEL | VTD_8K, 0);
+	printf("pmap_kentering\n");
+	tsb_set_tte(&kernel_td[TSB8K_INDEX], va, pa, TTE_KERNEL | VTD_8K, 0);
+	printf("pmap_kentered\n");
 }
 
 /*
  * Extract the physical page address associated with the given kernel virtual
  * address.
  */
+
 vm_paddr_t
 pmap_kextract(vm_offset_t va)
 {
-	tte_t tte;
+	uint64_t tte_data;
 	vm_paddr_t pa;
- 
 	/*
-	 * check 8k TSB
+	 * check 4M TSB
 	 */
-	tte = tsb_get_tte(&kernel_tsbs[1], va, 0);
-	pa = tte.tte_bit.pa << PAGE_SHIFT;
-	if (pa != 0)
+	tte_data = tsb_get_tte(&kernel_td[TSB4M_INDEX], va, 0);
+	pa = TTE_GET_PA(tte_data) | (va & PAGE_MASK_4M);
+	if (TTE_GET_PA(tte_data) != 0) 
 		goto done;
 	/*
-	 * check 4M TSB
+	 * check 8k TSB
 	 */
-	tte = tsb_get_tte(&kernel_tsbs[0], va, 0);
-	pa = tte.tte_bit.pa << PAGE_SHIFT;
+	tte_data = tsb_get_tte(&kernel_td[TSB8K_INDEX], va, 0);
+	pa = TTE_GET_PA(tte_data)| (va & PAGE_MASK);
 	
 done:
-
 	return pa;
 }
 
 void
 pmap_kremove(vm_offset_t va)
-{
-	tsb_set_tte(&kernel_tsbs[1], va, 0, 0, 0);
+{ 
+
+	if ((va & PAGE_MASK_4M) == 0 &&	
+	    tsb_get_tte(&kernel_td[TSB4M_INDEX], va, 0) != 0)
+		tsb_set_tte(&kernel_td[TSB4M_INDEX], va, 0, 0, 0);
+	else
+		tsb_set_tte(&kernel_td[TSB8K_INDEX], va, 0, 0, 0);
 }
 
 static void
@@ -1053,6 +1060,18 @@
 {
 	UNIMPLEMENTED;
 }
+
+static void
+pmap_scrub_pages(vm_paddr_t pa, int64_t size)
+{
+	uint64_t bytes_zeroed;
+	while (size > 0) {
+		hv_mem_scrub(pa, size, &bytes_zeroed);
+		pa += bytes_zeroed;
+		size -= bytes_zeroed;
+	}
+}
+
 /*
  * Set the 2 global kernel TSBs and the per-cpu user TSB
  *

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tick.c#3 (text+ko) ====

@@ -192,5 +192,5 @@
 void
 tick_stop(void)
 {
-	UNIMPLEMENTED;
+
 }

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tsb.c#4 (text+ko) ====

@@ -45,52 +45,65 @@
 #include <vm/vm_page.h>
 
 #include <machine/cpufunc.h>
+#include <machine/hypervisorvar.h>
 #include <machine/smp.h>
 #include <machine/mmu.h>
 #include <machine/tte.h>
 #include <machine/tsb.h>
 
 CTASSERT(sizeof(tte_t) == sizeof(uint64_t));
-
+#define TSB_MASK(tsb) (((tsb->hvtsb_ntte) << (PAGE_SHIFT - TTE_SHIFT)) - 1)
+#ifdef DEBUG_TSB
+#define DPRINTF printf
+#else
+#define DPRINTF(...)
+#endif
 
 void 
-tsb_set_tte(tsb_info_t tsb, vm_offset_t va, vm_paddr_t pa, uint64_t flags, uint64_t ctx)
+tsb_set_tte(hv_tsb_info_t *tsb, vm_offset_t va, vm_paddr_t pa, uint64_t flags, uint64_t ctx)
 {
 	vm_paddr_t tsb_store_pa;
-	uint64_t tsb_index, tsb_shift, tte_tag;
-	tte_t data;
+	uint64_t tsb_index, tsb_shift, tte_tag, tte_data;
+	DPRINTF("tsb_set_tte va: 0x%lx idxpgsz: %x\n", va, tsb->hvtsb_idxpgsz);
+	tsb_shift = TTE_GET_PAGE_SHIFT(tsb->hvtsb_idxpgsz);
+	DPRINTF("tsb_shift: 0x%lx\n", tsb_shift);
+	tsb_index = (va >> tsb_shift) & TSB_MASK(tsb);
+	DPRINTF("tsb_index_absolute: 0x%lx tsb_index: 0x%lx\n", (va >> tsb_shift), tsb_index);
+	tsb_store_pa = tsb->hvtsb_pa + 2*tsb_index*sizeof(uint64_t);
 
-	tsb_shift = TTE_BSZS_SHIFT(tsb->tsb_ttesz_mask) + PAGE_SHIFT;
-	tsb_index = (va >> tsb_shift) % (tsb->tsb_szc << (PAGE_SHIFT - TTE_SHIFT));
-	tsb_store_pa = tsb->tsb_pa + 2*tsb_index*sizeof(uint64_t);
+	tte_data = pa | flags;
 
-	data.ll = pa | flags;
-	data.tte_bit.v = 0;
+	tte_data &= ~VTD_V;
 	/* store new value with valid bit cleared 
 	 * to avoid invalid intermediate value;
 	 */
-	store_real(tsb_store_pa + sizeof(uint64_t), data.ll);
+	store_real(tsb_store_pa + sizeof(uint64_t), tte_data);
+	tte_data |= VTD_V;
 
-	data.tte_bit.v = 1;
 	tte_tag = (ctx << TTARGET_CTX_SHIFT) | (va >> TTARGET_VA_SHIFT);
 	store_real(tsb_store_pa, tte_tag); 
-	store_real_sync(tsb_store_pa + sizeof(uint64_t), data.ll);
+	store_real_sync(tsb_store_pa + sizeof(uint64_t), tte_data);
 } 
 
-tte_t
-tsb_get_tte(tsb_info_t tsb, vm_offset_t va, uint64_t ctx)
+uint64_t
+tsb_get_tte(hv_tsb_info_t *tsb, vm_offset_t va, uint64_t ctx)
 {
 	vm_paddr_t tsb_load_pa;
-	uint64_t tsb_index, tsb_shift, tte_tag;
-	tte_t data;
+	uint64_t tsb_index, tsb_shift, tte_tag, tte_data;
 
-	tsb_shift = TTE_BSZS_SHIFT(tsb->tsb_ttesz_mask) + PAGE_SHIFT;
-	tsb_index = (va >> tsb_shift) % (tsb->tsb_szc << (PAGE_SHIFT - TTE_SHIFT));
-	tsb_load_pa = tsb->tsb_pa + 2*tsb_index*sizeof(uint64_t);
+	DPRINTF("tsb_get_tte va: 0x%lx\n", va);
+	tsb_shift = TTE_PAGE_SHIFT(tsb->hvtsb_idxpgsz);
+	DPRINTF("tsb_shift: %lx\n", tsb_shift);
+	tsb_index = (va >> tsb_shift) & TSB_MASK(tsb);
+	DPRINTF("tsb_index_absolute: %lx tsb_index: %lx\n", (va >> tsb_shift), tsb_index);
+	tsb_load_pa = tsb->hvtsb_pa + 2*tsb_index*sizeof(uint64_t);
 
-	load_real_dw(tsb_load_pa, &tte_tag, &data.ll);
-	if ((tte_tag >> TTARGET_CTX_SHIFT) == ctx && (tte_tag << TTARGET_VA_SHIFT) == va)
-		return data;
+	DPRINTF("load_real_dw - ra: %lx &tte_tag: %p &tte_data: %p \n", tsb_load_pa, &tte_tag, &tte_data);
+	load_real_dw(tsb_load_pa, &tte_tag, &tte_data);
+	DPRINTF("tte_data: %lx ctx: %lx  va: %lx\n", tte_data, tte_tag >> TTARGET_CTX_SHIFT, 
+		tte_tag << TTARGET_VA_SHIFT);
+	if ((tte_tag >> TTARGET_CTX_SHIFT) == ctx && (tte_tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M))
+		return tte_data;
 
-	return ((tte_t)0UL);
+	return (0UL);
 }


More information about the p4-projects mailing list