PERFORCE change 158933 for review
Arnar Mar Sig
antab at FreeBSD.org
Mon Mar 9 08:50:49 PDT 2009
http://perforce.freebsd.org/chv.cgi?CH=158933
Change 158933 by antab at antab_farm on 2009/03/09 15:49:56
Cleanup and bugfix in pmap and related code. PD_MASK and PT_MASK wrongly define causing overlappig in pmap_pte.
Add updated version of cfi_disk from Sam Leffler.
Affected files ...
.. //depot/projects/avr32/src/sys/avr32/avr32/machdep.c#10 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#13 edit
.. //depot/projects/avr32/src/sys/avr32/avr32/tlb.c#4 edit
.. //depot/projects/avr32/src/sys/avr32/include/atomic.h#6 edit
.. //depot/projects/avr32/src/sys/avr32/include/pmap.h#6 edit
.. //depot/projects/avr32/src/sys/avr32/include/pte.h#5 edit
.. //depot/projects/avr32/src/sys/avr32/include/tlb.h#4 edit
.. //depot/projects/avr32/src/sys/dev/cfi/cfi_disk.c#2 edit
Differences ...
==== //depot/projects/avr32/src/sys/avr32/avr32/machdep.c#10 (text+ko) ====
@@ -61,6 +61,7 @@
#include <machine/cache.h>
#include <machine/cpu.h>
#include <machine/reg.h>
+#include <machine/reg_sys.h>
#include <machine/pcb.h>
#include <machine/frame.h>
#include <machine/uboot.h>
@@ -282,8 +283,7 @@
u_int32_t
get_cyclecount(void)
{
- avr32_impl();
- return (0);
+ return sysreg_read(COUNT);
}
int
==== //depot/projects/avr32/src/sys/avr32/avr32/pmap.c#13 (text+ko) ====
@@ -15,6 +15,7 @@
#include <sys/sched.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/uma.h>
@@ -31,6 +32,7 @@
#include <machine/reg.h>
#include <machine/reg_sys.h>
#include <machine/trap.h>
+#include <machine/cache.h>
#include <machine/debug.h>
// antab: What does this stand for?
@@ -42,15 +44,14 @@
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
static int page_is_managed(vm_offset_t pa);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m, boolean_t wired);
-static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
+static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va);
static void free_pv_entry(pv_entry_t pv);
static pv_entry_t get_pv_entry(void);
static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
-static struct pmap kernel_pmap_store;
-pmap_t kernel_pmap; /**< Kernel pmap */
+struct pmap kernel_pmap_store;
vm_offset_t kernel_vm_end = 0;
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
@@ -61,8 +62,18 @@
static uma_zone_t pvzone;
static struct vm_object pvzone_obj;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
+static int shpgperproc = PMAP_SHPGPERPROC;
+/**
+ * Sysctl for tuneing
+ */
+// i386 and amd64 are using _vm_pmap but i cant find the declare for it..
+SYSCTL_INT(_vm, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
+ "Max number of PV entries");
+SYSCTL_INT(_vm, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
+ "Page share factor per proc");
+
pt_entry_t
*pmap_pte(pmap_t pmap, vm_offset_t va)
{
@@ -86,7 +97,6 @@
virtual_end = VM_MAX_KERNEL_ADDRESS;
/* Setup kernel pmap */
- kernel_pmap = &kernel_pmap_store;
PMAP_LOCK_INIT(kernel_pmap);
kernel_pmap->pm_active = ~0;
kernel_pmap->pm_asid = 0;
@@ -137,10 +147,19 @@
void
pmap_init(void)
{
+ /*
+ * Initialize the address space (zone) for the pv entries. Set a
+ * high water mark so that the system can recover from excessive
+ * numbers of pv entries.
+ */
pvzone = uma_zcreate("PV_ENTRY", sizeof(struct pv_entry), NULL, NULL,
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
- pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count;
+
+ TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
+ TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
+ pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
pv_entry_high_water = 9 * (pv_entry_max / 10);
+
uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
}
@@ -200,13 +219,16 @@
void
pmap_activate(struct thread *td)
{
- struct proc *p;
- pmap_t pmap;
+ pmap_t pmap, oldpmap;
+
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ oldpmap = PCPU_GET(curpmap);
- p = td->td_proc;
- pmap = vmspace_pmap(p->p_vmspace);
+ oldpmap->pm_active = 0;
+ pmap->pm_active = 1;
- pmap_asid_alloc(pmap);
+ pmap_asid_alloc(pmap);
+ /* XXX: Set tlbear here? */
PCPU_SET(curpmap, pmap);
}
@@ -220,10 +242,28 @@
void
pmap_clear_modify(vm_page_t m)
{
+ pv_entry_t pv;
+ pt_entry_t *pte;
+
+ if (m->flags & PG_FICTITIOUS) {
+ return;
+ }
+
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (m->md.pv_flags & PV_TABLE_MOD) {
- avr32_impl();
- //pmap_changebit(m, PTE_M, FALSE); TODO
+ panic("Need to look more into this");
+ /*
+ * Loop over all current mappings
+ */
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
+ PMAP_LOCK(pv->pv_pmap);
+
+ pte = pmap_pte(pv->pv_pmap, pv->pv_va);
+ KASSERT((pte != NULL || pte != 0), ("Mapped page not found"));
+ *pte &= ~PTE_DIRTY;
+
+ PMAP_UNLOCK(pv->pv_pmap);
+ }
m->md.pv_flags &= ~PV_TABLE_MOD;
}
}
@@ -291,6 +331,8 @@
return (AVR32_P1_TO_PHYS(va));
else if ((va & AVR32_SEG_MASK) == AVR32_SEG_P2)
return (AVR32_P2_TO_PHYS(va));
+ else if ((va & AVR32_SEG_MASK) == AVR32_SEG_P4)
+ return (AVR32_P2_TO_PHYS(va));
return (pmap_extract(kernel_pmap, va));
}
@@ -413,6 +455,7 @@
vm_page_lock_queues();
PMAP_LOCK(pmap);
+ va &= ~PAGE_MASK;
mpte = NULL;
if (va < VM_MAXUSER_ADDRESS) {
mpte = pmap_allocpte(pmap, va, M_WAITOK);
@@ -491,13 +534,14 @@
update:
newpte = 0;
pfn_set(newpte, pa);
- if (access & VM_PROT_READ) {
+ if (prot & VM_PROT_READ) {
newpte |= PTE_PERM_READ;
}
- if (access & VM_PROT_WRITE) {
+ if (prot & VM_PROT_WRITE) {
newpte |= PTE_PERM_WRITE;
+ vm_page_flag_set(m, PG_WRITEABLE);
}
- if (access & VM_PROT_EXECUTE) {
+ if (prot & VM_PROT_EXECUTE) {
newpte |= PTE_PERM_EXECUTE;
}
@@ -530,6 +574,17 @@
}
tlb_update_entry(pmap, va, newpte);
+ /*
+ * XXX: Sync I & D caches for executable pages. Do this only if the the
+ * target pmap belongs to the current process. Otherwise, an
+ * unresolvable TLB miss may occur.
+ */
+ if (pmap == kernel_pmap && (pmap == &curproc->p_vmspace->vm_pmap) &&
+ (prot & VM_PROT_EXECUTE)) {
+ avr32_icache_sync_range(va, PAGE_SIZE);
+ avr32_dcache_wbinv_range(va, PAGE_SIZE);
+ }
+
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
@@ -653,8 +708,9 @@
vm_offset_t va;
pt_entry_t *pte;
- if ((m->flags & PG_WRITEABLE) == 0)
- return;
+ if ((m->flags & PG_WRITEABLE) == 0) {
+ return;
+ }
/*
* Loop over all current mappings setting/clearing as appropos.
@@ -881,11 +937,11 @@
page->wire_count++;
} else {
page = _pmap_allocpte(pmap, pdindex, flags);
- if (!page && flags & M_WAITOK) {
+ if (page == NULL && flags & M_WAITOK) {
goto retry;
}
}
- return page;
+ return (page);
}
static int
@@ -922,7 +978,7 @@
}
static void
-pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va)
+pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
{
pv_entry_t pv;
@@ -1005,9 +1061,9 @@
for (p = 0; p < 1024; p++) {
ent = base + p;
if (*ent) {
- printf("0x%x -> 0x%x\n",
+ printf("0x%08x -> 0x%08x (flags 0x%08x)\n",
(i << PD_SHIFT) | (p << PT_SHIFT),
- *ent);
+ *ent & ~PAGE_MASK, *ent & PAGE_MASK);
}
}
}
==== //depot/projects/avr32/src/sys/avr32/avr32/tlb.c#4 (text+ko) ====
@@ -98,6 +98,19 @@
}
}
+void
+tlb_invalidate_range(pmap_t pmap, vm_offset_t start_va, vm_offset_t end_va)
+{
+ /*
+ * Look more into this. Maybe its batter to loop thru the tlb
+ * and invalidate entries that are within start_va and end_va,
+ * instead of trying to invalidate every page between them.
+ */
+ for (; start_va < end_va; start_va += PAGE_SIZE) {
+ tlb_remove_entry(pmap, start_va);
+ }
+}
+
static void
tlb_dump_entry(uint32_t index, uint32_t tlbehi, uint32_t tlbelo)
{
==== //depot/projects/avr32/src/sys/avr32/include/atomic.h#6 (text+ko) ====
@@ -171,11 +171,21 @@
}
static __inline uint32_t
-atomic_readandclear_32(volatile u_int32_t *p)
+atomic_readandclear_32(volatile u_int32_t *address)
{
- avr32_impl();
- while(1);
- return 0;
+ uint32_t ret, tmp;
+ __asm __volatile(
+ "1:"
+ "ssrf 5\n"
+ "ld.w %0, %3\n"
+ "mov %2, 0\n"
+ "stcond %1, %2\n"
+ "brne 1b\n"
+ : "=&r"(ret), "=m"(*address), "=r" (tmp)
+ : "m"(*address)
+ : "cc", "memory");
+
+ return (ret);
}
static __inline int
==== //depot/projects/avr32/src/sys/avr32/include/pmap.h#6 (text+ko) ====
@@ -134,7 +134,8 @@
} *pmap_t;
#ifdef _KERNEL
-#define pmap_kernel() kernel_pmap
+extern struct pmap kernel_pmap_store;
+#define kernel_pmap (&kernel_pmap_store)
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
#define PMAP_LOCK_ASSERT(pmap, type) mtx_assert(&(pmap)->pm_mtx, (type))
@@ -172,8 +173,6 @@
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
-//extern struct segtab * segtab_active;
-extern pmap_t kernel_pmap;
extern vm_offset_t phys_avail[];
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
==== //depot/projects/avr32/src/sys/avr32/include/pte.h#5 (text+ko) ====
@@ -37,8 +37,8 @@
typedef uint32_t pt_entry_t; /* page table entry (TLBELO register) */
#endif
-#define PD_MASK 0xfff00000 /* Bits used to index into page dir */
-#define PT_MASK 0x000ff000 /* Bits used to index into page table */
+#define PD_MASK 0xffe00000 /* Bits used to index into page dir */
+#define PT_MASK 0x003ff000 /* Bits used to index into page table */
#define PD_SHIFT 22
#define PT_SHIFT 12
==== //depot/projects/avr32/src/sys/avr32/include/tlb.h#4 (text+ko) ====
@@ -35,6 +35,7 @@
void tlb_flush(void); /* Invalid all TLB entries */
void tlb_update_entry(pmap_t, vm_offset_t, pt_entry_t);
void tlb_remove_entry(pmap_t, vm_offset_t);
+void tlb_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
#endif /* !_MACHINE_TLB_H_ */
==== //depot/projects/avr32/src/sys/dev/cfi/cfi_disk.c#2 (text+ko) ====
@@ -32,10 +32,13 @@
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/kernel.h>
-#include <sys/malloc.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
#include <sys/module.h>
#include <sys/rman.h>
#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
#include <machine/bus.h>
@@ -48,6 +51,10 @@
struct disk *disk;
int flags;
#define CFI_DISK_OPEN 0x0001
+ struct bio_queue_head bioq; /* bio queue */
+ struct mtx qlock; /* bioq lock */
+ struct taskqueue *tq; /* private task queue for i/o request */
+ struct task iotask; /* i/o processing */
};
#define CFI_DISK_SECSIZE 512
@@ -56,6 +63,7 @@
static int cfi_disk_detach(device_t);
static int cfi_disk_open(struct disk *);
static int cfi_disk_close(struct disk *);
+static void cfi_io_proc(void *, int);
static void cfi_disk_strategy(struct bio *);
static int cfi_disk_ioctl(struct disk *, u_long, void *, int, struct thread *);
@@ -98,6 +106,15 @@
sc->disk->d_drv1 = sc;
disk_create(sc->disk, DISK_VERSION);
+ mtx_init(&sc->qlock, "CFID I/O lock", NULL, MTX_DEF);
+ bioq_init(&sc->bioq);
+
+ sc->tq = taskqueue_create("cfid_taskq", M_NOWAIT,
+ taskqueue_thread_enqueue, &sc->tq);
+ taskqueue_start_threads(&sc->tq, 1, PI_DISK, "cfid taskq");
+
+ TASK_INIT(&sc->iotask, 0, cfi_io_proc, sc);
+
return 0;
}
@@ -108,7 +125,10 @@
if (sc->flags & CFI_DISK_OPEN)
return EBUSY;
+ taskqueue_free(sc->tq);
+ /* XXX drain bioq */
disk_destroy(sc->disk);
+ mtx_destroy(&sc->qlock);
return 0;
}
@@ -224,10 +244,34 @@
}
static void
+cfi_io_proc(void *arg, int pending)
+{
+ struct cfi_disk_softc *sc = arg;
+ struct cfi_softc *cfi = sc->parent;
+ struct bio *bp;
+
+ for (;;) {
+ mtx_lock(&sc->qlock);
+ bp = bioq_takefirst(&sc->bioq);
+ mtx_unlock(&sc->qlock);
+ if (bp == NULL)
+ break;
+
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ cfi_disk_read(cfi, bp);
+ break;
+ case BIO_WRITE:
+ cfi_disk_write(cfi, bp);
+ break;
+ }
+ }
+}
+
+static void
cfi_disk_strategy(struct bio *bp)
{
struct cfi_disk_softc *sc = bp->bio_disk->d_drv1;
- struct cfi_softc *cfi = sc->parent;
if (sc == NULL)
goto invalid;
@@ -238,10 +282,12 @@
}
switch (bp->bio_cmd) {
case BIO_READ:
- cfi_disk_read(cfi, bp);
- return;
case BIO_WRITE:
- cfi_disk_write(cfi, bp);
+ mtx_lock(&sc->qlock);
+ /* no value in sorting requests? */
+ bioq_insert_tail(&sc->bioq, bp);
+ mtx_unlock(&sc->qlock);
+ taskqueue_enqueue(sc->tq, &sc->iotask);
return;
}
/* fall thru... */
@@ -271,4 +317,3 @@
sizeof(struct cfi_disk_softc),
};
DRIVER_MODULE(cfid, cfi, cfi_disk_driver, cfi_diskclass, 0, NULL);
-
More information about the p4-projects
mailing list