PERFORCE change 92840 for review

Kip Macy kmacy at FreeBSD.org
Mon Mar 6 00:15:04 PST 2006


http://perforce.freebsd.org/chv.cgi?CH=92840

Change 92840 by kmacy at kmacy_storage:sun4v_work on 2006/03/06 08:13:57

	add support for data protection faults and remove any kernel 
	dependency on TSBs for 8K pages

Affected files ...

.. //depot/projects/kmacy_sun4v/src/sys/sparc64/sparc64/genassym.c#7 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/errata.h#1 add
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#18 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#17 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/swtch.S#7 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#4 edit

Differences ...

==== //depot/projects/kmacy_sun4v/src/sys/sparc64/sparc64/genassym.c#7 (text+ko) ====

@@ -180,8 +180,12 @@
 ASSYM(TV_SIZE_BITS, TV_SIZE_BITS);
 #else 
 ASSYM(VTD_REF, VTD_REF);
-ASSYM(TTARGET_VA_MASK, TTARGET_VA_MASK);
+ASSYM(VTD_W, VTD_W);
+ASSYM(VTD_SW_W, VTD_SW_W);
 ASSYM(TTARGET_VA_BITS, TTARGET_VA_BITS);
+ASSYM(TTARGET_VA_SHIFT, TTARGET_VA_SHIFT);
+ASSYM(TTARGET_CTX_SHIFT, TTARGET_CTX_SHIFT);
+
 ASSYM(THE_SHIFT, THE_SHIFT);
 #endif
 

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#18 (text+ko) ====

@@ -43,9 +43,11 @@
 #include <machine/tstate.h>
 #include <machine/wstate.h>
 #include <machine/hypervisorvar.h>
+#include <machine/errata.h>
 
 #include "assym.s"
 
+#define PMAP_DEBUG
 #if 1
 #define SPILL_FILL_MAGIC_TRAP_ON	nop
 #define SPILL_FILL_MAGIC_TRAP_OFF	nop
@@ -258,8 +260,10 @@
 	.endm
 
 	.macro	insn_miss
-	MAGIC_TRAP_ON
-	illtrap
+	GET_MMFSA_SCRATCH(%g1)		! insn 1
+	GET_HASH_SCRATCH(%g2)		! insn 2,3
+	ba,pt	%xcc, tsb_miss
+          mov   VTD_REF, %g3
 	.align	32
 	.endm
 	
@@ -272,20 +276,25 @@
 	.macro	data_miss
 	GET_MMFSA_SCRATCH(%g1)		! insn 1
 	GET_HASH_SCRATCH(%g2)		! insn 2,3
-	GET_TSB_SCRATCH(%g3)		! insn 4,5
+	add	%g1, MMFSA_D_, %g1	! set fsa to data
 	ba,pt	%xcc, tsb_miss
-	  add	%g1, MMFSA_D_, %g1	! set fsa to data
+          mov   VTD_REF, %g3
 	.align	32
 	.endm
 
 	.macro	data_prot
-	MAGIC_TRAP_ON
-	illtrap
+	GET_MMFSA_SCRATCH(%g1)		! insn 1
+	GET_HASH_SCRATCH(%g2)		! insn 2,3
+        add	%g1, MMFSA_D_, %g1	! set fsa to data
+	ba,pt	%xcc, tsb_miss
+	  mov   VTD_W, %g3
 	.align	32
 	.endm
 
 	.macro	tl0_align
 	MAGIC_TRAP_ON
+	MAGIC_TRAP_ON
+	MAGIC_TRAP_ON
 	illtrap
 	.align	32
 	.endm
@@ -641,7 +650,7 @@
 	tl0_reserved	18				! 0x50-0x61
 tl0_watch_virt_62:
 	tl0_gen		T_VA_WATCHPOINT			! 0x62
-	tl0_reserved	10				! 0x63-0x6c
+	tl0_reserved	9				! 0x63-0x6b
 tl0_data_prot_6c:	
 	data_prot					! 0x6c
 	tl0_reserved	8				! 0x6d-0x75
@@ -1016,98 +1025,136 @@
 END(tl0_intr)
 
 
+! The HV documentation is wrong
+! mappings are not already flushed befor taking a
+! data protection trap
+! IN:	
+! %g2 == hash base
+! %g3 == flags
+! %g5 == fault addr
+! %g6 == context
+! %g1,%g4,%g7 temps
+! OUT:
+! %g6 <- TAG
+ontario_unmap_addr_errata_begin:	
+	! do the unmap call
+	mov %o0, %g1
+	mov %o1, %g4
+	mov %o2, %g7
+	mov %g5, %o0
+	mov %g6, %o1
+	mov MAP_ITLB|MAP_DTLB, %o2
+	ta  MMU_UNMAP_ADDR
+	mov %g1, %o0
+	mov %g4, %o1
+	mov %g7, %o2
+	srlx %g5, TTARGET_VA_SHIFT, %g4
+	sllx %g6, TTARGET_CTX_SHIFT, %g6
+        ba,pt %xcc, tsb_miss_fault_handler
+          or   %g6, %g4, %g6   
+ontario_unmap_addr_errata_end:	
+
+
+	
 ! %g1==mmfsa     (RA)
 ! %g2==hash base (VA)
-! %g3==TSB       (RA)
+! %g3==TTE flags
 ! internal usage:
 ! %g1==absolute index
+! %g2==pointer to hash entry
+! %g3==flag bits, TSB (RA)
 ! %g4==fault type,entry tag
-! %g5==fault address,entry data
-! %g6==hash size,tag, temp
+! %g5==tag
+! %g6==hash size, temp
 ! %g7 temp
 ENTRY(tsb_miss)
 	ldda [%g0 + %g1]ASI_LDTD_REAL, %g4
-	! %g4 == fault type %g5 == fault address
-	! ignore context for now
+	addx %g1, 0x10, %g1 
+	ldda [%g0 + %g1]ASI_LDTD_REAL, %g6
+	MAGIC_TRAP_ON
+/* these two instructions will be patched
+ * at some point
+ */
+#ifdef  ONTARIO_UNMAP_ERRATA
+	cmp %g3, VTD_W
+	be,pn %xcc, ontario_unmap_addr_errata_begin
+#endif
+	  srlx %g5, TTARGET_VA_SHIFT, %g1	
+	sllx %g6, TTARGET_CTX_SHIFT, %g6
+        or   %g6, %g1, %g6			! %g6 == search tag
+
+
+tsb_miss_fault_handler:	
+
+	! %g4 == fault type %g5 == fault addr %g6 == tag
 	! XXX only handle normal miss for now
-	mov 1, %g7
+#ifdef PMAP_DEBUG
+	cmp %g6, %g0			! NULL ptr deref in kernel
+	bne,pt %xcc, 4f
+	  nop
+tsb_miss_null_deref:	
+	illtrap				! give up
+4:	
+#endif	
+        mov 1, %g7	
 	sllx %g7, PAGE_SHIFT, %g7
 	sub %g7, 1, %g7			! %g7==PAGE_MASK
 
-	MAGIC_TRAP_ON
-	and %g2, %g7, %g6		! size stored in lower 13 bits
+	and %g2, %g7, %g4		! size stored in lower 13 bits
 	andn %g2, %g7, %g2		! actual VA of hash
 
 	! XXX only handle 8k page miss
 	! calculate hash index
 	srlx %g5, PAGE_SHIFT, %g1		! absolute hash index
-	sllx %g6, (PAGE_SHIFT - THE_SHIFT), %g6 ! size of hash in THEs
-	sub %g6, 1, %g6				! THE_MASK
-	and %g1, %g6, %g6			! masked hash index
-	sllx %g6, THE_SHIFT, %g6		! masked hash offset
-	srlx %g5, PAGE_SHIFT_4M, %g7		! VA tag
+	sllx %g4, (PAGE_SHIFT - THE_SHIFT), %g4 ! size of hash in THEs
+	sub %g4, 1, %g4				! THE_MASK
+	and %g1, %g4, %g4			! masked hash index
+	sllx %g4, THE_SHIFT, %g4		! masked hash offset
 	! fetch hash entries - exit when we find what were looking for 
 
 	! %g2==entry base
-	add %g2, %g6, %g2		! base + offset == entry base
+	add %g2, %g4, %g2		! base + offset == entry base
 
 	! entry 0
-tsb_miss_lookup_0:	
-	mov 1, %g6
-	sllx %g6, TTARGET_VA_BITS, %g6
-	subx %g6, 1, %g6		! %g6 == TTARGET_VA_MASK
-	
+	! %g1 == abs index %g2 == THE pointer %g3 == flags
+	! %g4 <- tag %g5 <- data
+	! %g6 == search tag %g7 == PAGE_MASK
+tsb_miss_lookup_0:  
 	ldda [%g2 + %g0]ASI_LDTD_N, %g4
-	and %g4, %g6, %g6		! mask off context bits
-	cmp %g6, %g0			! entry tag == 0
+	cmp %g4, %g0			! entry tag == 0 ?
 	be,pn %xcc, 1f
 	  nop
-	cmp %g6, %g7			! entry tag == VA tag?
+	cmp %g4, %g6			! entry tag == VA tag?
 	be,pn %xcc, 2f
 	  nop
 	! entry 1
 tsb_miss_lookup_1:	
-	mov 1, %g6
-	sllx %g6, TTARGET_VA_BITS, %g6
-	subx %g6, 1, %g6		! %g6 == TTARGET_VA_MASK
-
-	add %g2, 16, %g2
+	add %g2, 16, %g2		! next THF
 	ldda [%g2 + %g0]ASI_LDTD_N, %g4
-	and %g4, %g6, %g6		! mask off context bits
-	cmp %g6, %g0			! entry tag == 0
+	cmp %g4, %g0			! entry tag == 0 ?
 	be,pn %xcc, 1f
 	  nop
-	cmp %g6, %g7			! entry tag == VA tag?
+	cmp %g4, %g6			! entry tag == search tag?
 	be,pn %xcc, 2f
 	  nop
 	! entry 2
 tsb_miss_lookup_2:	
-	mov 1, %g6
-	sllx %g6, TTARGET_VA_BITS, %g6
-	subx %g6, 1, %g6		! %g6 == TTARGET_VA_MASK
-	
-	add %g2, 16, %g2
+	add %g2, 16, %g2		! next THF
 	ldda [%g2 + %g0]ASI_LDTD_N, %g4
-	and %g4, %g6, %g6		! mask off context bits
-	cmp %g6, %g0			! entry tag == 0
+	cmp %g4, %g0			! entry tag == 0 ?
 	be,pn %xcc, 1f
 	  nop
-	cmp %g6, %g7			! entry tag == VA tag?
+	cmp %g4, %g6			! entry tag == search tag?
 	be,pn %xcc, 2f
 	  nop
 	! entry 3
 tsb_miss_lookup_3:	
-	mov 1, %g6
-	sllx %g6, TTARGET_VA_BITS, %g6
-	subx %g6, 1, %g6		! %g6 == TTARGET_VA_MASK
-	
-	add %g2, 16, %g2
+	add %g2, 16, %g2		! next THF
 	ldda [%g2 + %g0]ASI_LDTD_N, %g4
-	and %g4, %g6, %g6	! mask off context bits
-	cmp %g6, %g0			! entry tag == 0
+	cmp %g4, %g0			! entry tag == 0 ?
 	be,pn %xcc, 1f
 	  nop
-	cmp %g6, %g7			! entry tag == VA tag?
+	cmp %g4, %g6			! entry tag == search tag?
 	be,pn %xcc, 2f				
 	  nop
 tsb_miss_not_found:	
@@ -1119,32 +1166,57 @@
 	
 tsb_miss_found:	
 2:	!found
-	! set referenced bit unconditionally for now 
-	or %g5, VTD_REF, %g5
-	stx %g5, [%g2 + 8]			! set ref bit
+	! %g1 == abs index %g2 == THE pointer %g3 == flags
+	! %g4 == tag %g5 == data %g7 == PAGE_MASK
+	! %g3 <- TSB RA %g6 <- TSB size, TTE RA
+
+	! XXX set referenced/modified bit unconditionally for now XXX
+	andcc %g5, %g3, %g0			! already set
+	bnz,pt %xcc, 5f
+	  nop
+	andcc %g3, VTD_REF, %g0			! TSB miss
+	bnz,pt %xcc, 6f
+	  or    %g5, %g3, %g5			! add ref/mod bit unconditionally  
+	andcc %g5, VTD_SW_W, %g0		! write enabled?
+	bz,pn %xcc, prot_fault_trap		! write to read only page
+	  or    %g5, %g3, %g5			! add ref/mod bit unconditionally  
+6:		
+	stx   %g5, [%g2 + 8]			! set ref/mod bit
+5:	
+	GET_TSB_SCRATCH(%g3)			! %g3 == TSB (RA)
+
 	
-	mov 1, %g7
-	sllx %g7, PAGE_SHIFT, %g7
-	sub %g7, 1, %g7				! %g7==PAGE_MASK
-	
-	and %g3, %g7, %g6			! size of TSB in pages
+	and  %g3, %g7, %g6			! size of TSB in pages
 	
 	andn %g3, %g7, %g3			! TSB real address
 	sllx %g6, (PAGE_SHIFT - TTE_SHIFT), %g6	! nttes
 	subx %g6, 1, %g6			! TSB_MASK
-	and %g6, %g1, %g6			! masked index
+	and  %g6, %g1, %g6			! masked index
 	sllx %g6, TTE_SHIFT, %g6		! masked byte offset
-	add %g6, %g3, %g6			! TTE RA
-	mov 8, %g7
+	add  %g6, %g3, %g6			! TTE RA
+	mov  8, %g7
+#ifdef PMAP_DEBUG
+	ldda [%g6]ASI_LDTD_REAL, %g2
+	cmp  %g3, %g5
+	bne,pt %xcc, 3f
+	  nop
+	illtrap					! die if all we're doing 
+						! is storing same data
+3:	
+#endif
 	stxa %g4, [%g6]ASI_REAL			! store tag
 	stxa %g5, [%g6 + %g7]ASI_REAL		! store data
 	MAGIC_TRAP_OFF
+
 	retry
 END(tsb_miss)
 
-	
-
-
+/*
+ * Write to read-only page
+ */
+ENTRY(prot_fault_trap)	
+	illtrap
+END(prot_fault_trap)
 /*
  * Freshly forked processes come here when switched to for the first time.
  * The arguments to fork_exit() have been setup in the locals, we must move

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#17 (text+ko) ====

@@ -315,7 +315,6 @@
 	panic("pmap_bootstrap_alloc");
 }
 
-
 /*
  * Activate a user pmap.  The pmap must be activated before its address space
  * can be accessed in any way.
@@ -603,12 +602,19 @@
 	 */
 	pm->pm_hash = tte_hash_kernel_create(kernel_hash, PAGE_SIZE_4M);
 	tte_hash_set_scratchpad(pm->pm_hash);
-	
-	/*
-	 * XXX - We should read the kernel mappings into the hash table
-	 *
-	 */
 
+	for (i = 0; i < sz; i++) {
+		if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
+		    translations[i].om_start > VM_MAX_PROM_ADDRESS)
+			continue;
+		if (translations[i].om_size == PAGE_SIZE_4M) 
+			continue;
+		for (off = 0; off < translations[i].om_size; off += PAGE_SIZE) {
+			va = translations[i].om_start + off;
+			pa = TTE_GET_PA(translations[i].om_tte) + off;
+			tte_hash_insert(kernel_pmap->pm_hash, va, pa | TTE_KERNEL | VTD_8K);
+		}
+	}
 }
 
 
@@ -691,7 +697,7 @@
 
 	tte_data = pa = VM_PAGE_TO_PHYS(m);
 	otte = tte_hash_lookup(pmap->pm_hash, va);
-	otte_data = *otte;
+	otte_data = otte ? *otte : 0;
 	opa = TTE_GET_PA(otte_data);
 	/*
 	 * Mapping has not changed, must be protection or wiring change.
@@ -780,8 +786,9 @@
 			}
 			if (invlva)
 				pmap_invalidate_page(pmap, va);
-		} else
+		} else {
 			tte_hash_insert(pmap->pm_hash, va, tte_data);
+		}
 	}
 
 
@@ -1000,17 +1007,16 @@
 vm_paddr_t
 pmap_kextract(vm_offset_t va)
 {
-	uint64_t tte_data;
+	uint64_t *tte, tte_data;
 	vm_paddr_t pa;
 
         pa = 0;
-#if 0
-	printf("tte_data=%lx TTE_GET_PA(tte_data)=%lx (va & TTE_GET_PAGE_MASK(tte_data))=%lx\n",
-	       tsb_lookup_tte(va, 0), TTE_GET_PA(tte_data), (va & TTE_GET_PAGE_MASK(tte_data)));
-#endif
 	if ((tte_data = tsb_lookup_tte(va, 0)) != 0)
 		pa = TTE_GET_PA(tte_data) | (va & TTE_GET_PAGE_MASK(tte_data));
 
+	if ((pa == 0) && (tte = tte_hash_lookup(kernel_pmap->pm_hash, va)) != NULL)
+		pa = TTE_GET_PA(*tte) | (va & TTE_GET_PAGE_MASK(*tte));
+
 	return pa;
 }
 
@@ -1290,19 +1296,14 @@
 	 */
 	if (pmap->pm_stats.resident_count == 0)
 		return;
-
+	
+	invlva = 0;
 	vm_page_lock_queues();
 	sched_pin();
 	PMAP_LOCK(pmap);
-	if (pmap->pm_context != 0) {
-		invlva = 0;	
-		for (tva = start; tva < end; tva += PAGE_SIZE) {
-			tinvlva = tte_hash_delete(pmap->pm_hash, tva);
-			invlva = tinvlva ? tinvlva : invlva;
-		}
-	} else {
-		tsb_clear_range(&pmap->pm_tsb, start, end);
-		invlva = 1;
+	for (tva = start; tva < end; tva += PAGE_SIZE) {
+		tinvlva = tte_hash_delete(pmap->pm_hash, tva);
+		invlva = tinvlva ? tinvlva : invlva;
 	}
 	sched_unpin();
 	vm_page_unlock_queues();

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/swtch.S#7 (text+ko) ====

@@ -55,7 +55,6 @@
  * void cpu_switch(struct thread *old, struct thread *new)
  */
 ENTRY(cpu_switch)
-	MAGIC_TRAP_ON
 	GET_PCB(PCB_REG)
 	save	%sp, -CCFSZ, %sp
 	mov	%i1, %i0

==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#4 (text+ko) ====

@@ -129,17 +129,21 @@
 	th->th_hashtable = (tte_hash_entry_t)kmem_alloc_nofault(kernel_map, 
 								PAGE_SIZE*HASH_SIZE);
 
-	for (i = 0; i < HASH_SIZE; i++) {
+	printf("th->th_hashtable=%p ", th->th_hashtable);
+	for (i = 0; i < HASH_SIZE;) {
 		m = vm_page_alloc(NULL, color++,
 		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
 		    VM_ALLOC_ZERO);
+		printf("PHYS(m)=0x%010lx ", VM_PAGE_TO_PHYS(m));
 		if (m == NULL)
 			VM_WAIT;
 		else {
 			hash_pages[i++] = m;
 		}
 	}
+	printf("entered\n");
 	pmap_qenter((vm_offset_t)th->th_hashtable, hash_pages, HASH_SIZE);
+
 	for (i = 0; i < HASH_SIZE; i++) {
 		if ((hash_pages[i]->flags & PG_ZERO) == 0)
 			pmap_zero_page(hash_pages[i]);
@@ -188,7 +192,7 @@
 	KASSERT(tte_hash_lookup(th, va) != 0, ("attempting to delete non-existent entry"));
 #endif	
 	for (i = 0; i <= 3; i++) 
-		if ((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & PAGE_MASK_4M)) 
+		if ((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M)) 
 			break;
 	vaindex = i;
 
@@ -215,18 +219,16 @@
 	uint64_t hash_shift, hash_index, tte_tag;
 	tte_hash_field_t fields;
 	int i;
+	
 	/* XXX - only handle 8K pages for now */
-
 	hash_shift = PAGE_SHIFT;
 	hash_index = (va >> hash_shift) & HASH_MASK(th);
-
 	fields = (th->th_hashtable[hash_index].the_fields);
 	tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
 	for (i = 0; i <= 3; i++) {
 		if ((fields[i].tte.tag == 0) || (fields[i].tte.tag == tte_tag)) {
 			fields[i].tte.data = tte_data;
 			fields[i].tte.tag = tte_tag;
-			printf("data: 0x%016lx tag: 0x%016lx\n", fields[i].tte.data, fields[i].tte.tag);
 			goto done;
 		} 
 		
@@ -250,7 +252,7 @@
 	fields = (th->th_hashtable[hash_index].the_fields);
 	
 	for (i = 0; i <= 3; i++) {
-		if ((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & PAGE_MASK_4M))
+		if ((fields[i].tte.tag << TTARGET_VA_SHIFT) == (va & ~PAGE_MASK_4M))
 			return &(fields[i].tte.data);
 	}
 	/* 


More information about the p4-projects mailing list