PERFORCE change 97607 for review
Kip Macy
kmacy at FreeBSD.org
Mon May 22 08:24:58 UTC 2006
http://perforce.freebsd.org/chv.cgi?CH=97607
Change 97607 by kmacy at kmacy_storage:sun4v_work on 2006/05/22 08:23:21
fix alignment fault handling by setting %g1 correctly to point to trap
implement pmap_ts_referenced
clear TSB when doing pmap_invalidate on exit from pmap_enter
remove raciness in resetting tlbactive
remove cruft from pmap_ipi
GIANT should no longer be needed in swap_pager
Affected files ...
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#56 edit
.. //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#54 edit
.. //depot/projects/kmacy_sun4v/src/sys/vm/swap_pager.c#5 edit
Differences ...
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/exception.S#56 (text+ko) ====
@@ -322,13 +322,18 @@
! mov MMFSA_D_CTX, %g7
ldxa [%g1 + %g3]ASI_REAL, %g3
! ldxa [%g1 + %g7]ASI_REAL, %g4
+ ba,a,pt %xcc, align_fault
+ .align 32
+ .endm
+
+ENTRY(align_fault)
+! or %g4, %g3, %g3
sub %g0, 1, %g4
- or %g4, %g3, %g3
+ set trap, %g1
ba,pt %xcc, tl0_trap
mov T_MEM_ADDRESS_NOT_ALIGNED, %g2
- .align 32
- .endm
-
+END(align_fault)
+
.macro cpu_mondo
ba,a,pt %xcc, cpu_mondo
.align 32
==== //depot/projects/kmacy_sun4v/src/sys/sun4v/sun4v/pmap.c#54 (text+ko) ====
@@ -995,7 +995,7 @@
}
if (invlva)
- pmap_invalidate_page(pmap, va, FALSE);
+ pmap_invalidate_page(pmap, va, TRUE);
sched_unpin();
PMAP_UNLOCK(pmap);
@@ -1150,20 +1150,20 @@
}
#ifdef SMP
-static void
+static cpumask_t
pmap_ipi(pmap_t pmap, char *func, uint64_t arg1, uint64_t arg2)
{
- int i, active, cpu_count;
+ int i, cpu_count;
u_int cpus;
- cpumask_t cpumask;
+ cpumask_t cpumask, active;
+ cpumask_t active_total, ackmask;
uint16_t *cpulist;
- uint32_t ackmask, ackexpect;
- int inext;
+
if (!smp_started)
- return;
-
+ return (0);
+
cpumask = PCPU_GET(cpumask);
cpulist = PCPU_GET(cpulist);
@@ -1178,68 +1178,60 @@
#endif
- if (cpumask == pmap->pm_tlbactive)
- return;
+ if ((active_total = (pmap->pm_tlbactive & ~cpumask)) == 0)
+ goto done;
if (pmap->pm_context != 0)
- active = (pmap->pm_tlbactive & ~cpumask);
+ active_total = active = (pmap->pm_tlbactive & ~cpumask);
else
- active = PCPU_GET(other_cpus);
+ active_total = active = PCPU_GET(other_cpus);
+ if (active_total == 0)
+ goto done;
+ retry:
- for (cpu_count = 0, i = 0, ackexpect = 0, cpus = active; i < 32 && cpus;) {
-
- if (!(cpus & 0x1) /*|| (i & ~0x3) == (curcpu & ~0x3) */ )
- {
- cpus = cpus >> 1;
- i++;
+ for (cpu_count = 0, i = 0, cpus = active; i < 32 && cpus; i++, cpus >>= 1) {
+ if (!(cpus & 0x1))
continue;
- }
cpulist[cpu_count] = (uint16_t)i;
cpu_count++;
- ackexpect |= (1 << i);
- inext = i++;
- cpus = cpus >> 1;
-
-
}
-
- if (cpu_count == 0)
- return;
-
ackmask = 0;
cpu_ipi_selected(cpu_count, cpulist, (uint64_t)func, (uint64_t)arg1,
(uint64_t)arg2, (uint64_t *)&ackmask);
- while (ackmask != ackexpect) {
+ while (ackmask != active) {
DELAY(1);
i++;
if (i > 1000000)
- panic(" ackmask=0x%x active=0x%x\n", ackmask, ackexpect);
+ panic(" ackmask=0x%x active=0x%x\n", ackmask, active);
}
+
+ active_total |= active;
+ if ((active = (pmap->pm_tlbactive & ~(active_total|cpumask))) != 0)
+ goto retry;
+
+ done:
+ return (active_total);
}
#endif
void
pmap_invalidate_page(pmap_t pmap, vm_offset_t va, int cleartsb)
{
- spinlock_enter();
if (cleartsb == TRUE)
tsb_clear_tte(&pmap->pm_tsb, va);
DPRINTF("pmap_invalidate_page(va=0x%lx)\n", va);
+ spinlock_enter();
invlpg(va, pmap->pm_context);
-
#ifdef SMP
pmap_ipi(pmap, (void *)tl_invlpg, (uint64_t)va, (uint64_t)pmap->pm_context);
#endif
spinlock_exit();
-
-
-
}
void
@@ -1248,6 +1240,7 @@
vm_offset_t tva;
#ifdef SMP
char *func;
+ cpumask_t active;
#endif
@@ -1259,13 +1252,11 @@
if (sva >= eva)
panic("invalidating negative or zero range sva=0x%lx eva=0x%lx", sva, eva);
-
- spinlock_enter();
-
if (cleartsb == TRUE)
tsb_clear_range(&pmap->pm_tsb, sva, eva);
- if ((sva - eva) < PAGE_SIZE*32 ) {
+ spinlock_enter();
+ if ((sva - eva) < PAGE_SIZE*64) {
for (tva = sva; tva < eva; tva += PAGE_SIZE_8K)
invlpg(tva, pmap->pm_context);
} else if (pmap->pm_context)
@@ -1279,46 +1270,35 @@
else
func = tl_invlctx;
- pmap_ipi(pmap, (void *)func, pmap->pm_context, 0);
- if (pmap != kernel_pmap)
- pmap->pm_tlbactive = pmap->pm_active;
+ active = pmap_ipi(pmap, (void *)func, pmap->pm_context, 0);
+ active &= ~pmap->pm_active;
+ atomic_clear_int(&pmap->pm_tlbactive, active);
#endif
-
spinlock_exit();
}
void
pmap_invalidate_all(pmap_t pmap)
{
- char *func;
- spinlock_enter();
-
if (pmap == kernel_pmap)
panic("invalidate_all called on kernel_pmap");
tsb_clear(&pmap->pm_tsb);
- if (pmap->pm_context) {
- invlctx(pmap->pm_context);
- func = tl_invlctx;
- } else {
- invltlb();
- func = tl_invltlb;
- }
-
+ spinlock_enter();
+ invlctx(pmap->pm_context);
#ifdef SMP
- pmap_ipi(pmap, func, pmap->pm_context, 0);
+ pmap_ipi(pmap, tl_invlctx, pmap->pm_context, 0);
pmap->pm_tlbactive = pmap->pm_active;
#endif
-
spinlock_exit();
}
boolean_t
pmap_is_modified(vm_page_t m)
{
- return tte_get_phys_bit(m, VTD_W);
+ return (tte_get_phys_bit(m, VTD_W));
}
@@ -1847,8 +1827,46 @@
int
pmap_ts_referenced(vm_page_t m)
{
- UNIMPLEMENTED;
- return (0);
+
+ int rv;
+ pv_entry_t pv, pvf, pvn;
+ pmap_t pmap;
+
+ rv = 0;
+ if (m->flags & PG_FICTITIOUS)
+ return (rv);
+
+ sched_pin();
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+
+ pvf = pv;
+
+ do {
+ pvn = TAILQ_NEXT(pv, pv_list);
+
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
+
+ pmap = pv->pv_pmap;
+ PMAP_LOCK(pmap);
+ if ((tte_hash_clear_bits(pmap->pm_hash, pv->pv_va, VTD_REF) & VTD_REF) != 0) {
+ pmap_invalidate_page(pmap, pv->pv_va, TRUE);
+
+ rv++;
+ if (rv > 4) {
+ PMAP_UNLOCK(pmap);
+ break;
+ }
+ }
+
+ PMAP_UNLOCK(pmap);
+ } while ((pv = pvn) != NULL && pv != pvf);
+ }
+ sched_unpin();
+
+ return (rv);
}
void
==== //depot/projects/kmacy_sun4v/src/sys/vm/swap_pager.c#5 (text+ko) ====
@@ -1158,7 +1158,6 @@
int i;
int n = 0;
- GIANT_REQUIRED;
if (count && m[0]->object != object) {
panic("swap_pager_getpages: object mismatch %p/%p",
object,
More information about the p4-projects
mailing list