PERFORCE change 97475 for review
Kip Macy
kmacy at FreeBSD.org
Fri May 19 23:37:29 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=97475
Change 97475 by kmacy at kmacy_storage:sun4v_rwbuf on 2006/05/19 23:36:06
further simplify insertion
fix off by one error in allocate_fragment_entry that was causing most of the problems
Affected files ...
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#33 edit
Differences ...
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/tte_hash.c#33 (text+ko) ====
@@ -191,7 +191,9 @@
}
}
for (i = 0, tm = m; i < HASH_SIZE; i++, tm++)
+#ifndef VM_PAGE_ALLOC_CONTIG_CAN_ALLOCATE_ZEROED_PAGES
if ((tm->flags & PG_ZERO) == 0)
+#endif
pmap_zero_page(tm);
th->th_hashtable = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
@@ -204,6 +206,7 @@
if (m == NULL)
VM_WAIT;
}
+
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
@@ -250,6 +253,25 @@
field->data = tte | (field->data & VTD_LOCK);
}
+static __inline tte_hash_entry_t
+find_entry(tte_hash_t th, vm_offset_t va, int page_shift)
+{
+ uint64_t hash_index;
+
+ hash_index = (va >> page_shift) & HASH_MASK(th);
+ return (&th->th_hashtable[hash_index]);
+}
+
+static __inline tte_hash_entry_t
+tte_hash_lookup_last_entry(tte_hash_entry_t entry)
+{
+
+ while (entry->of.next)
+ entry = entry->of.next;
+
+ return (entry);
+}
+
static tte_hash_entry_t
tte_hash_allocate_fragment_entry(tte_hash_t th)
{
@@ -259,8 +281,7 @@
static int color;
fh = th->th_fhtail;
- if (fh->thf_head.fh_count == MAX_FRAGMENT_ENTRIES) {
- /* XXX allocate a new page */
+ if (fh->thf_head.fh_free_head == MAX_FRAGMENT_ENTRIES) {
m = NULL;
while (m == NULL) {
m = vm_page_alloc(NULL, color++,
@@ -270,17 +291,19 @@
if (m == NULL)
VM_WAIT;
}
-#if 0
+
if ((m->flags & PG_ZERO) == 0)
-#endif
pmap_zero_page(m);
+
fh->thf_head.fh_next = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
fh = th->th_fhtail = (void *)TLB_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m));
+ fh->thf_head.fh_free_head = 1;
printf("new fh=%p \n", fh);
}
- newentry = &fh->thf_entries[++fh->thf_head.fh_free_head];
+ newentry = &fh->thf_entries[fh->thf_head.fh_free_head];
+ fh->thf_head.fh_free_head++;
fh->thf_head.fh_count++;
return (newentry);
@@ -290,50 +313,34 @@
* if a match for va is found the tte value is returned
* and if field is non-null field will point to that entry
*
- * if no match is found 0 is returned and if field is non-null
- * field is set to the first empty entry or the last entry in a bucket
*
*/
-
-
static __inline tte_t
tte_hash_lookup_inline(tte_hash_entry_t entry, tte_t tte_tag, boolean_t insert)
{
int i;
tte_t tte_data;
tte_hash_field_t fields;
- tte_hash_entry_t curentry;
tte_data = 0;
-
do {
- curentry = entry; /* want a valid pointer */
- fields = curentry->the_fields;
+ fields = entry->the_fields;
for (i = 0; i < entry->of.count; i++) {
if (fields[i].tag == tte_tag) {
tte_data = (fields[i].data & ~VTD_LOCK);
PCPU_SET(lookup_field, (u_long)&fields[i]);
- break;
+ goto done;
}
}
-
- entry = entry->of.next;
- } while (curentry->of.flags == MAGIC_VALUE);
-
- if (insert && (tte_data == 0)) {
- if (curentry->of.count == HASH_ENTRIES) {
- curentry->of.flags = MAGIC_VALUE;
- PCPU_SET(lookup_field, (u_long)&curentry->of);
- } else {
- i = curentry->of.count++;
- PCPU_SET(lookup_field, (u_long)&fields[i]);
#ifdef DEBUG
- if (curentry->of.count > HASH_ENTRIES)
- panic("count too large count=%d", i);
+ if (entry->of.next && entry->of.flags != MAGIC_VALUE)
+ panic("overflow pointer not null without flags set entry= %p next=%p flags=0x%x count=%d",
+ entry, entry->of.next, entry->of.flags, entry->of.count);
#endif
- }
- }
+ entry = entry->of.next;
+ } while (entry);
+done:
return (tte_data);
}
@@ -342,36 +349,36 @@
tte_hash_lookup_last_inline(tte_hash_entry_t entry)
{
- int count;
tte_hash_field_t fields;
fields = entry->the_fields;
- while (entry->of.flags == MAGIC_VALUE && (entry->of.next->of.count > 1))
+ while (entry->of.next && (entry->of.next->of.count > 1))
entry = entry->of.next;
- if ((entry->of.flags == MAGIC_VALUE) && entry->of.next->of.count == 1) {
+ if (entry->of.next && entry->of.next->of.count == 1) {
PCPU_SET(last_field, (u_long)&entry->of.next->the_fields[0]);
entry->of.next = NULL;
entry->of.flags = 0;
} else {
- count = --entry->of.count;
- PCPU_SET(last_field, (u_long)&entry->the_fields[count]);
+#ifdef DEBUG
+ if (entry->of.count == 0)
+ panic("count zero");
+#endif
+ PCPU_SET(last_field, (u_long)&entry->the_fields[--entry->of.count]);
}
}
tte_t
tte_hash_clear_bits(tte_hash_t th, vm_offset_t va, uint64_t flags)
{
- uint64_t hash_shift, hash_index, s;
+ uint64_t s;
tte_hash_entry_t entry;
tte_t otte_data, tte_tag;
/* XXX - only handle 8K pages for now */
- hash_shift = PAGE_SHIFT;
- hash_index = (va >> hash_shift) & HASH_MASK(th);
- entry = (&th->th_hashtable[hash_index]);
-
+ entry = find_entry(th, va, PAGE_SHIFT);
+
tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
s = hash_bucket_lock(entry->the_fields);
@@ -388,14 +395,12 @@
tte_t
tte_hash_delete(tte_hash_t th, vm_offset_t va)
{
- uint64_t hash_shift, hash_index, s;
+ uint64_t s;
tte_hash_entry_t entry;
tte_t tte_data, tte_tag;
+
/* XXX - only handle 8K pages for now */
-
- hash_shift = PAGE_SHIFT;
- hash_index = (va >> hash_shift) & HASH_MASK(th);
- entry = (&th->th_hashtable[hash_index]);
+ entry = find_entry(th, va, PAGE_SHIFT);
tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
@@ -430,32 +435,54 @@
void
tte_hash_insert(tte_hash_t th, vm_offset_t va, tte_t tte_data)
{
+
+ tte_hash_entry_t entry, lentry, newentry;
+ tte_t tte_tag;
+ uint64_t s;
+
#ifdef DEBUG
if (tte_hash_lookup(th, va) != 0)
panic("mapping for va=0x%lx already exists", va);
#endif
- tte_hash_update(th, va, tte_data);
+ entry = find_entry(th, va, PAGE_SHIFT);
+ tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
+
+ s = hash_bucket_lock(entry->the_fields);
+ lentry = tte_hash_lookup_last_entry(entry);
+
+ if (lentry->of.count == HASH_ENTRIES) {
+ hash_bucket_unlock(entry->the_fields, s);
+ newentry = tte_hash_allocate_fragment_entry(th);
+ s = hash_bucket_lock(entry->the_fields);
+ lentry->of.flags = MAGIC_VALUE;
+ lentry->of.next = newentry;
+ lentry = newentry;
+ }
+ tte_hash_set_field(&lentry->the_fields[lentry->of.count++],
+ tte_tag, tte_data);
+ hash_bucket_unlock(entry->the_fields, s);
+#ifdef DEBUG
+ if (tte_hash_lookup(th, va) == 0)
+ panic("insert for va=0x%lx failed", va);
+#endif
+ th->th_entries++;
}
/*
* If leave_locked is true the tte's data field will be returned to
* the caller with the hash bucket left locked
*/
-
-
tte_t
tte_hash_lookup(tte_hash_t th, vm_offset_t va)
{
- uint64_t hash_shift, hash_index, s;
+ uint64_t s;
tte_hash_entry_t entry;
tte_t tte_data, tte_tag;
+
/* XXX - only handle 8K pages for now */
+ entry = find_entry(th, va, PAGE_SHIFT);
- hash_shift = PAGE_SHIFT;
- hash_index = (va >> hash_shift) & HASH_MASK(th);
- entry = (&th->th_hashtable[hash_index]);
-
tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
s = hash_bucket_lock(entry->the_fields);
@@ -465,8 +492,6 @@
return (tte_data);
}
-
-
uint64_t
tte_hash_set_scratchpad_kernel(tte_hash_t th)
{
@@ -486,7 +511,7 @@
{
uint64_t hash_scratch;
- /* This will break if a hash table ever grows above 64MB
+ /* This will break if a hash table ever grows above 32MB
* 2^(13+13)
*/
th->th_context = (uint16_t)context;
@@ -500,40 +525,29 @@
tte_hash_update(tte_hash_t th, vm_offset_t va, tte_t tte_data)
{
- uint64_t hash_shift, hash_index, s;
- tte_hash_entry_t entry, newentry;
+ uint64_t s;
+ tte_hash_entry_t entry;
tte_t otte_data, tte_tag;
- /* XXX - only handle 8K pages for now */
- hash_shift = PAGE_SHIFT;
- hash_index = (va >> hash_shift) & HASH_MASK(th);
- entry = (&th->th_hashtable[hash_index]);
+ entry = find_entry(th, va, PAGE_SHIFT);
tte_tag = (((uint64_t)th->th_context << TTARGET_CTX_SHIFT)|(va >> TTARGET_VA_SHIFT));
s = hash_bucket_lock(entry->the_fields);
otte_data = tte_hash_lookup_inline(entry, tte_tag, TRUE);
- if ((otte_data == 0) && ((struct of_field *)PCPU_GET(lookup_field))->flags == MAGIC_VALUE) {
+ if (otte_data == 0) {
+ hash_bucket_unlock(entry->the_fields, s);
+ tte_hash_insert(th, va, tte_data);
+ } else {
+ tte_hash_set_field((tte_hash_field_t)PCPU_GET(lookup_field),
+ tte_tag, tte_data);
hash_bucket_unlock(entry->the_fields, s);
- newentry = tte_hash_allocate_fragment_entry(th);
- s = hash_bucket_lock(entry->the_fields);
- ((struct of_field *)PCPU_GET(lookup_field))->next = newentry;
- newentry->of.count = 1;
- PCPU_SET(lookup_field, (u_long)&newentry[0]);
}
-
- tte_hash_set_field((tte_hash_field_t)PCPU_GET(lookup_field), tte_tag, tte_data);
-
- hash_bucket_unlock(entry->the_fields, s);
#ifdef DEBUG
if (tte_hash_lookup(th, va) == 0)
panic("va=0x%lx not found", va);
#endif
-
- if (otte_data == 0)
- th->th_entries++;
-
return (otte_data);
}
More information about the p4-projects
mailing list