PERFORCE change 96733 for review
Kip Macy
kmacy at FreeBSD.org
Sat May 6 08:58:58 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=96733
Change 96733 by kmacy at kmacy_storage:sun4v_rwbuf on 2006/05/06 08:58:32
arbitrary collision handling mostly handled now
getting very close to being self-hosting
garbage collection of collision buckets will be needed for
long-lived processes
Affected files ...
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#13 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#46 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#21 edit
Differences ...
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/include/tte_hash.h#13 (text+ko) ====
@@ -28,6 +28,8 @@
tte_t tte_hash_lookup_nolock(tte_hash_t hash, vm_offset_t va);
+void tte_hash_reset(tte_hash_t hash);
+
uint64_t tte_hash_set_scratchpad_kernel(tte_hash_t th);
uint64_t tte_hash_set_scratchpad_user(tte_hash_t th, uint64_t context);
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#46 (text+ko) ====
@@ -1797,6 +1797,7 @@
free_pv_entry(pv);
}
+ tte_hash_reset(pmap->pm_hash);
sched_unpin();
pmap_invalidate_all(pmap);
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#21 (text+ko) ====
@@ -27,6 +27,8 @@
* $ Exp $
*/
+#define DEBUG
+
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/ktr.h>
@@ -36,7 +38,9 @@
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
-
+#ifdef DEBUG
+#include <sys/kdb.h>
+#endif
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
@@ -60,7 +64,7 @@
#define HASH_VALID 0x1
-#define DEBUG
+
struct tte_hash_entry;
@@ -84,7 +88,7 @@
struct fragment_header {
- struct fragment_header *fh_next;
+ struct tte_hash_fragment *fh_next;
uint16_t fh_count;
uint16_t fh_free_head;
uint16_t pad[26];
@@ -213,7 +217,7 @@
}
}
for (i = 0, tm = m; i < HASH_SIZE; i++, tm++) {
- if (tm->flags & PG_ZERO)
+ if ((tm->flags & PG_ZERO) == 0)
pmap_zero_page(tm);
}
th->th_hashtable = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
@@ -226,7 +230,7 @@
if (m == NULL)
VM_WAIT;
}
- if (m->flags & PG_ZERO)
+ if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
th->th_fhtail = th->th_fhhead = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
@@ -244,6 +248,22 @@
free_tte_hash(th);
}
+void
+tte_hash_reset(tte_hash_t th)
+{
+ struct tte_hash_fragment *fh;
+ vm_page_t m;
+
+ for (fh = th->th_fhhead; fh != th->th_fhtail; fh = fh->thf_head.fh_next) {
+ m = PHYS_TO_VM_PAGE((vm_paddr_t)TLB_DIRECT_TO_PHYS((vm_offset_t)fh));
+ m->wire_count--;
+ vm_page_free(m);
+ }
+ fh = th->th_fhhead = th->th_fhtail;
+ fh->thf_head.fh_count = 0;
+ fh->thf_head.fh_free_head = 0;
+}
+
static __inline void
tte_hash_set_field(tte_hash_field_t field, uint64_t tag, tte_t tte)
{
@@ -256,11 +276,27 @@
{
struct tte_hash_fragment *fh;
tte_hash_field_t newfield;
+ vm_page_t m;
+ static int color;
fh = th->th_fhtail;
if (fh->thf_head.fh_count == MAX_FRAGMENT_ENTRIES) {
/* XXX allocate a new page */
- panic("new fragment page allocation unimplemented");
+ m = NULL;
+ while (m == NULL) {
+ m = vm_page_alloc(NULL, color++,
+ VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+
+ if (m == NULL)
+ VM_WAIT;
+ }
+ if (m->flags & PG_ZERO)
+ pmap_zero_page(m);
+ fh->thf_head.fh_next = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
+ fh = th->th_fhtail = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
+ printf("allocating new fragment page fh=%p \n", fh);
+
}
newfield = fh->thf_entries[++fh->thf_head.fh_free_head].the_fields;
fh->thf_head.fh_count++;
@@ -268,16 +304,27 @@
field->of.flags = TH_COLLISION;
field->of.next = newfield;
- return newfield;
+ return (newfield);
}
+/*
+ * if a match for va is found the tte value is returned
+ * and if field is non-null field will point to that entry
+ *
+ * if no match is found 0 is returned and if field is non-null
+ * and toappend is true field points to the first empty entry
+ * allocating a new bucket if the current one is full
+ */
+
+
static __inline tte_t
-tte_hash_lookup_inline(tte_hash_t th, vm_offset_t va, tte_hash_field_t *field)
+tte_hash_lookup_inline(tte_hash_t th, vm_offset_t va, tte_hash_field_t *field, int toappend)
{
uint64_t hash_shift, hash_index;
tte_hash_field_t fields;
int i;
tte_t entry;
+
/* XXX - only handle 8K pages for now */
hash_shift = PAGE_SHIFT;
@@ -294,23 +341,18 @@
}
if (i == 4) {
if (fields[3].of.flags & TH_COLLISION) {
- printf("following next pointer looking up 0x%lx\n", va);
fields = fields[3].of.next;
goto retry;
- }
- printf("allocating fragment entry and shifting entry for tag=0x%lx data=0x%lx\n",
- fields[3].tte.tag, fields[3].tte.data);
- fields = tte_hash_allocate_fragment_entry(th, &fields[3]);
- printf("new fragment address is %p\n", fields);
- /* entry following shifted entry is the first unallocated */
- i = 1;
- }
+ } else if (toappend == TRUE) {
+ fields = tte_hash_allocate_fragment_entry(th, &fields[3]);
+ /* entry following shifted entry is the first unallocated */
+ i = 1;
+ }
+ }
- if (field)
+ if (field)
*field = &fields[i];
- /*
- * XXX handle the case of collisions > 3
- */
+
return (entry);
}
@@ -333,14 +375,19 @@
if (i == 4) {
if (fields[3].of.flags & TH_COLLISION) {
- fields = fields[3].of.next;
- goto retry;
- }
- /* if there is no collision pointer, 3 is the last entry */
- i = 3;
- }
+ if (fields[3].of.next[0].tte.tag != 0) {
+ fields = fields[3].of.next;
+ goto retry;
+ } else {
+ /* 3rd entry is last */
+ *field = &fields[2];
+ /* clear collision pointer */
+ tte_hash_set_field(&fields[3], 0, 0);
- if (field)
+ }
+ } else
+ *field = &fields[3]; /* 4th is the last entry */
+ } else
*field = &fields[i];
}
@@ -360,13 +407,17 @@
hash_bucket_lock(fields);
- if ((tte_data = tte_hash_lookup_inline(th, va, &lookup_field)) == 0)
+ if ((tte_data = tte_hash_lookup_inline(th, va, &lookup_field, FALSE)) == 0)
goto done;
th->th_entries--;
tte_hash_lookup_last_inline(th, va, &last_field);
+#ifdef DEBUG
+ if (last_field->tte.tag == 0)
+ panic("lookup_last failed for va=0x%lx\n", va);
+#endif
/* move last field's values in to the field we are deleting */
if (lookup_field != last_field)
tte_hash_set_field(lookup_field, last_field->tte.tag, last_field->tte.data);
@@ -395,7 +446,7 @@
tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
hash_bucket_lock(fields);
- otte_data = tte_hash_lookup_inline(th, va, &lookup_field);
+ otte_data = tte_hash_lookup_inline(th, va, &lookup_field, TRUE);
#ifdef DEBUG
if (otte_data)
panic("mapping for va=0x%lx already exists tte_data=0x%lx\n", va, otte_data);
@@ -411,7 +462,7 @@
tte_t
tte_hash_lookup_nolock(tte_hash_t th, vm_offset_t va)
{
- return tte_hash_lookup_inline(th, va, NULL);
+ return tte_hash_lookup_inline(th, va, NULL, FALSE);
}
@@ -434,7 +485,7 @@
fields = (th->th_hashtable[hash_index].the_fields);
hash_bucket_lock(fields);
- tte_data = tte_hash_lookup_inline(th, va, NULL);
+ tte_data = tte_hash_lookup_inline(th, va, NULL, FALSE);
hash_bucket_unlock_inline(fields);
return (tte_data);
@@ -468,7 +519,7 @@
hash_scratch = ((vm_offset_t)th->th_hashtable) | ((vm_offset_t)th->th_size);
set_hash_user_scratchpad(hash_scratch);
- return hash_scratch;
+ return (hash_scratch);
}
tte_t
@@ -485,10 +536,7 @@
fields = (th->th_hashtable[hash_index].the_fields);
hash_bucket_lock(fields);
- otte_data = tte_hash_lookup_inline(th, va, &lookup_field);
-#ifdef TTE_DEBUG
- printf("tte_hash_update(va=0x%lx, tte_data=0x%lx, index=%d)\n", va, tte_data, cookie);
-#endif
+ otte_data = tte_hash_lookup_inline(th, va, &lookup_field, TRUE);
tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
@@ -499,5 +547,5 @@
if (otte_data == 0)
th->th_entries++;
- return otte_data;
+ return (otte_data);
}
More information about the p4-projects
mailing list