svn commit: r295037 - in head/sys: arm/arm conf
Michal Meloun
mmel at FreeBSD.org
Fri Jan 29 11:00:34 UTC 2016
Author: mmel
Date: Fri Jan 29 11:00:33 2016
New Revision: 295037
URL: https://svnweb.freebsd.org/changeset/base/295037
Log:
ARM: After removal of old pmap-v6 code, rename pmap-v6-new.c to pmap-v6.c.
Added:
head/sys/arm/arm/pmap-v6.c
- copied unchanged from r295036, head/sys/arm/arm/pmap-v6-new.c
Deleted:
head/sys/arm/arm/pmap-v6-new.c
Modified:
head/sys/conf/files.arm
Copied: head/sys/arm/arm/pmap-v6.c (from r295036, head/sys/arm/arm/pmap-v6-new.c)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ head/sys/arm/arm/pmap-v6.c Fri Jan 29 11:00:33 2016 (r295037, copy of r295036, head/sys/arm/arm/pmap-v6-new.c)
@@ -0,0 +1,6634 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * Copyright (c) 1994 John S. Dyson
+ * Copyright (c) 1994 David Greenman
+ * Copyright (c) 2005-2010 Alan L. Cox <alc at cs.rice.edu>
+ * Copyright (c) 2014 Svatopluk Kraus <onwahe at gmail.com>
+ * Copyright (c) 2014 Michal Meloun <meloun at miracle.cz>
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ */
+/*-
+ * Copyright (c) 2003 Networks Associates Technology, Inc.
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jake Burkholder,
+ * Safeport Network Services, and Network Associates Laboratories, the
+ * Security Research Division of Network Associates, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
+ * CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include "opt_vm.h"
+#include "opt_pmap.h"
+#include "opt_ddb.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/malloc.h>
+#include <sys/vmmeter.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/sf_buf.h>
+#include <sys/smp.h>
+#include <sys/sched.h>
+#include <sys/sysctl.h>
+#ifdef SMP
+#include <sys/smp.h>
+#else
+#include <sys/cpuset.h>
+#endif
+
+#ifdef DDB
+#include <ddb/ddb.h>
+#endif
+
+#include <machine/physmem.h>
+#include <machine/vmparam.h>
+
+#include <vm/vm.h>
+#include <vm/uma.h>
+#include <vm/pmap.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_phys.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_reserv.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+
+#include <machine/md_var.h>
+#include <machine/pmap_var.h>
+#include <machine/cpu.h>
+#include <machine/cpu-v6.h>
+#include <machine/pcb.h>
+#include <machine/sf_buf.h>
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+
+#ifndef PMAP_SHPGPERPROC
+#define PMAP_SHPGPERPROC 200
+#endif
+
+#ifndef DIAGNOSTIC
+#define PMAP_INLINE __inline
+#else
+#define PMAP_INLINE
+#endif
+
+#ifdef PMAP_DEBUG
+static void pmap_zero_page_check(vm_page_t m);
+void pmap_debug(int level);
+int pmap_pid_dump(int pid);
+
+#define PDEBUG(_lev_,_stat_) \
+ if (pmap_debug_level >= (_lev_)) \
+ ((_stat_))
+#define dprintf printf
+int pmap_debug_level = 1;
+#else /* PMAP_DEBUG */
+#define PDEBUG(_lev_,_stat_) /* Nothing */
+#define dprintf(x, arg...)
+#endif /* PMAP_DEBUG */
+
+/*
+ * Level 2 page tables map definion ('max' is excluded).
+ */
+
+#define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP)
+#define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE)
+
+#define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP)
+#define UPT2V_MAX_ADDRESS \
+ ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT)))
+
+/*
+ * Promotion to a 1MB (PTE1) page mapping requires that the corresponding
+ * 4KB (PTE2) page mappings have identical settings for the following fields:
+ */
+#define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \
+ PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \
+ PTE2_ATTR_MASK)
+
+#define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \
+ PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \
+ PTE1_ATTR_MASK)
+
+#define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \
+ (((l2_attr) & L2_C) ? L1_S_C : 0) | \
+ (((l2_attr) & L2_B) ? L1_S_B : 0) | \
+ (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \
+ (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \
+ (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \
+ (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \
+ (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \
+ (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \
+ (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \
+ (((l2_attr) & PTE2_W) ? PTE1_W : 0))
+
+#define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \
+ (((l1_attr) & L1_S_C) ? L2_C : 0) | \
+ (((l1_attr) & L1_S_B) ? L2_B : 0) | \
+ (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \
+ (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \
+ (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \
+ (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \
+ (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \
+ (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \
+ (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \
+ (((l1_attr) & PTE1_W) ? PTE2_W : 0))
+
+/*
+ * PTE2 descriptors creation macros.
+ */
+#define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, pt_memattr)
+#define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, pt_memattr)
+
+#define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_NORMAL)
+#define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_NORMAL)
+
+#define PV_STATS
+#ifdef PV_STATS
+#define PV_STAT(x) do { x ; } while (0)
+#else
+#define PV_STAT(x) do { } while (0)
+#endif
+
+/*
+ * The boot_pt1 is used temporary in very early boot stage as L1 page table.
+ * We can init many things with no memory allocation thanks to its static
+ * allocation and this brings two main advantages:
+ * (1) other cores can be started very simply,
+ * (2) various boot loaders can be supported as its arguments can be processed
+ * in virtual address space and can be moved to safe location before
+ * first allocation happened.
+ * Only disadvantage is that boot_pt1 is used only in very early boot stage.
+ * However, the table is uninitialized and so lays in bss. Therefore kernel
+ * image size is not influenced.
+ *
+ * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and
+ * CPU suspend/resume game.
+ */
+extern pt1_entry_t boot_pt1[];
+
+vm_paddr_t base_pt1;
+pt1_entry_t *kern_pt1;
+pt2_entry_t *kern_pt2tab;
+pt2_entry_t *PT2MAP;
+
+static uint32_t ttb_flags;
+static vm_memattr_t pt_memattr;
+ttb_entry_t pmap_kern_ttb;
+
+/* XXX use converion function*/
+#define PTE2_ATTR_NORMAL VM_MEMATTR_DEFAULT
+#define PTE1_ATTR_NORMAL ATTR_TO_L1(PTE2_ATTR_NORMAL)
+
+struct pmap kernel_pmap_store;
+LIST_HEAD(pmaplist, pmap);
+static struct pmaplist allpmaps;
+static struct mtx allpmaps_lock;
+
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+
+static vm_offset_t kernel_vm_end_new;
+vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE;
+vm_offset_t vm_max_kernel_address;
+vm_paddr_t kernel_l1pa;
+
+static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock;
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
+static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */
+static int shpgperproc = PMAP_SHPGPERPROC;
+
+struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */
+int pv_maxchunks; /* How many chunks we have KVA for */
+vm_offset_t pv_vafree; /* freelist stored in the PTE */
+
+vm_paddr_t first_managed_pa;
+#define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)])
+
+/*
+ * All those kernel PT submaps that BSD is so fond of
+ */
+struct sysmaps {
+ struct mtx lock;
+ pt2_entry_t *CMAP1;
+ pt2_entry_t *CMAP2;
+ pt2_entry_t *CMAP3;
+ caddr_t CADDR1;
+ caddr_t CADDR2;
+ caddr_t CADDR3;
+};
+static struct sysmaps sysmaps_pcpu[MAXCPU];
+static pt2_entry_t *CMAP3;
+static caddr_t CADDR3;
+caddr_t _tmppt = 0;
+
+struct msgbuf *msgbufp = 0; /* XXX move it to machdep.c */
+
+/*
+ * Crashdump maps.
+ */
+static caddr_t crashdumpmap;
+
+static pt2_entry_t *PMAP1 = 0, *PMAP2;
+static pt2_entry_t *PADDR1 = 0, *PADDR2;
+#ifdef DDB
+static pt2_entry_t *PMAP3;
+static pt2_entry_t *PADDR3;
+static int PMAP3cpu __unused; /* for SMP only */
+#endif
+#ifdef SMP
+static int PMAP1cpu;
+static int PMAP1changedcpu;
+SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
+ &PMAP1changedcpu, 0,
+ "Number of times pmap_pte2_quick changed CPU with same PMAP1");
+#endif
+static int PMAP1changed;
+SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
+ &PMAP1changed, 0,
+ "Number of times pmap_pte2_quick changed PMAP1");
+static int PMAP1unchanged;
+SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
+ &PMAP1unchanged, 0,
+ "Number of times pmap_pte2_quick didn't change PMAP1");
+static struct mtx PMAP2mutex;
+
+static __inline void pt2_wirecount_init(vm_page_t m);
+static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p,
+ vm_offset_t va);
+void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size);
+
+/*
+ * Function to set the debug level of the pmap code.
+ */
+#ifdef PMAP_DEBUG
+void
+pmap_debug(int level)
+{
+
+ pmap_debug_level = level;
+ dprintf("pmap_debug: level=%d\n", pmap_debug_level);
+}
+#endif /* PMAP_DEBUG */
+
+/*
+ * This table must corespond with memory attribute configuration in vm.h.
+ * First entry is used for normal system mapping.
+ *
+ * Device memory is always marked as shared.
+ * Normal memory is shared only in SMP .
+ * Not outer shareable bits are not used yet.
+ * Class 6 cannot be used on ARM11.
+ */
+#define TEXDEF_TYPE_SHIFT 0
+#define TEXDEF_TYPE_MASK 0x3
+#define TEXDEF_INNER_SHIFT 2
+#define TEXDEF_INNER_MASK 0x3
+#define TEXDEF_OUTER_SHIFT 4
+#define TEXDEF_OUTER_MASK 0x3
+#define TEXDEF_NOS_SHIFT 6
+#define TEXDEF_NOS_MASK 0x1
+
+#define TEX(t, i, o, s) \
+ ((t) << TEXDEF_TYPE_SHIFT) | \
+ ((i) << TEXDEF_INNER_SHIFT) | \
+ ((o) << TEXDEF_OUTER_SHIFT | \
+ ((s) << TEXDEF_NOS_SHIFT))
+
+static uint32_t tex_class[8] = {
+/* type inner cache outer cache */
+ TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */
+ TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */
+ TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */
+ TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */
+ TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */
+ TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */
+ TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */
+ TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */
+};
+#undef TEX
+
+/*
+ * Convert TEX definition entry to TTB flags.
+ */
+static uint32_t
+encode_ttb_flags(int idx)
+{
+ uint32_t inner, outer, nos, reg;
+
+ inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) &
+ TEXDEF_INNER_MASK;
+ outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) &
+ TEXDEF_OUTER_MASK;
+ nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) &
+ TEXDEF_NOS_MASK;
+
+ reg = nos << 5;
+ reg |= outer << 3;
+ if (cpuinfo.coherent_walk)
+ reg |= (inner & 0x1) << 6;
+ reg |= (inner & 0x2) >> 1;
+#ifdef SMP
+ reg |= 1 << 1;
+#endif
+ return reg;
+}
+
+/*
+ * Set TEX remapping registers in current CPU.
+ */
+void
+pmap_set_tex(void)
+{
+ uint32_t prrr, nmrr;
+ uint32_t type, inner, outer, nos;
+ int i;
+
+#ifdef PMAP_PTE_NOCACHE
+ /* XXX fixme */
+ if (cpuinfo.coherent_walk) {
+ pt_memattr = VM_MEMATTR_WB_WA;
+ ttb_flags = encode_ttb_flags(0);
+ }
+ else {
+ pt_memattr = VM_MEMATTR_NOCACHE;
+ ttb_flags = encode_ttb_flags(1);
+ }
+#else
+ pt_memattr = VM_MEMATTR_WB_WA;
+ ttb_flags = encode_ttb_flags(0);
+#endif
+
+ prrr = 0;
+ nmrr = 0;
+
+ /* Build remapping register from TEX classes. */
+ for (i = 0; i < 8; i++) {
+ type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) &
+ TEXDEF_TYPE_MASK;
+ inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) &
+ TEXDEF_INNER_MASK;
+ outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) &
+ TEXDEF_OUTER_MASK;
+ nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) &
+ TEXDEF_NOS_MASK;
+
+ prrr |= type << (i * 2);
+ prrr |= nos << (i + 24);
+ nmrr |= inner << (i * 2);
+ nmrr |= outer << (i * 2 + 16);
+ }
+ /* Add shareable bits for device memory. */
+ prrr |= PRRR_DS0 | PRRR_DS1;
+
+ /* Add shareable bits for normal memory in SMP case. */
+#ifdef SMP
+ prrr |= PRRR_NS1;
+#endif
+ cp15_prrr_set(prrr);
+ cp15_nmrr_set(nmrr);
+
+ /* Caches are disabled, so full TLB flush should be enough. */
+ tlb_flush_all_local();
+}
+
+/*
+ * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words,
+ * KERNBASE is mapped by first L2 page table in L2 page table page. It
+ * meets same constrain due to PT2MAP being placed just under KERNBASE.
+ */
+CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0);
+CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE);
+
+/*
+ * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general.
+ * For now, anyhow, the following check must be fulfilled.
+ */
+CTASSERT(PAGE_SIZE == PTE2_SIZE);
+/*
+ * We don't want to mess up MI code with all MMU and PMAP definitions,
+ * so some things, which depend on other ones, are defined independently.
+ * Now, it is time to check that we don't screw up something.
+ */
+CTASSERT(PDRSHIFT == PTE1_SHIFT);
+/*
+ * Check L1 and L2 page table entries definitions consistency.
+ */
+CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1));
+CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2));
+/*
+ * Check L2 page tables page consistency.
+ */
+CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2));
+CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG);
+/*
+ * Check PT2TAB consistency.
+ * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG.
+ * This should be done without remainder.
+ */
+CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG));
+
+/*
+ * A PT2MAP magic.
+ *
+ * All level 2 page tables (PT2s) are mapped continuously and accordingly
+ * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can
+ * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page
+ * must be used together, but not necessary at once. The first PT2 in a page
+ * must map things on correctly aligned address and the others must follow
+ * in right order.
+ */
+#define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t))
+#define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2)
+#define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE)
+
+/*
+ * Check PT2TAB consistency.
+ * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2.
+ * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE.
+ * The both should be done without remainder.
+ */
+CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2));
+CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE));
+/*
+ * The implementation was made general, however, with the assumption
+ * bellow in mind. In case of another value of NPG_IN_PT2TAB,
+ * the code should be once more rechecked.
+ */
+CTASSERT(NPG_IN_PT2TAB == 1);
+
+/*
+ * Get offset of PT2 in a page
+ * associated with given PT1 index.
+ */
+static __inline u_int
+page_pt2off(u_int pt1_idx)
+{
+
+ return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2);
+}
+
+/*
+ * Get physical address of PT2
+ * associated with given PT2s page and PT1 index.
+ */
+static __inline vm_paddr_t
+page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx)
+{
+
+ return (pgpa + page_pt2off(pt1_idx));
+}
+
+/*
+ * Get first entry of PT2
+ * associated with given PT2s page and PT1 index.
+ */
+static __inline pt2_entry_t *
+page_pt2(vm_offset_t pgva, u_int pt1_idx)
+{
+
+ return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx)));
+}
+
+/*
+ * Get virtual address of PT2s page (mapped in PT2MAP)
+ * which holds PT2 which holds entry which maps given virtual address.
+ */
+static __inline vm_offset_t
+pt2map_pt2pg(vm_offset_t va)
+{
+
+ va &= ~(NPT2_IN_PG * PTE1_SIZE - 1);
+ return ((vm_offset_t)pt2map_entry(va));
+}
+
+/*****************************************************************************
+ *
+ * THREE pmap initialization milestones exist:
+ *
+ * locore.S
+ * -> fundamental init (including MMU) in ASM
+ *
+ * initarm()
+ * -> fundamental init continues in C
+ * -> first available physical address is known
+ *
+ * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins)
+ * -> basic (safe) interface for physical address allocation is made
+ * -> basic (safe) interface for virtual mapping is made
+ * -> limited not SMP coherent work is possible
+ *
+ * -> more fundamental init continues in C
+ * -> locks and some more things are available
+ * -> all fundamental allocations and mappings are done
+ *
+ * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins)
+ * -> phys_avail[] and virtual_avail is set
+ * -> control is passed to vm subsystem
+ * -> physical and virtual address allocation are off limit
+ * -> low level mapping functions, some SMP coherent,
+ * are available, which cannot be used before vm subsystem
+ * is being inited
+ *
+ * mi_startup()
+ * -> vm subsystem is being inited
+ *
+ * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins)
+ * -> pmap is fully inited
+ *
+ *****************************************************************************/
+
+/*****************************************************************************
+ *
+ * PMAP first stage initialization and utility functions
+ * for pre-bootstrap epoch.
+ *
+ * After pmap_bootstrap_prepare() is called, the following functions
+ * can be used:
+ *
+ * (1) strictly only for this stage functions for physical page allocations,
+ * virtual space allocations, and mappings:
+ *
+ * vm_paddr_t pmap_preboot_get_pages(u_int num);
+ * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num);
+ * vm_offset_t pmap_preboot_reserve_pages(u_int num);
+ * vm_offset_t pmap_preboot_get_vpages(u_int num);
+ * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size,
+ * int prot, int attr);
+ *
+ * (2) for all stages:
+ *
+ * vm_paddr_t pmap_kextract(vm_offset_t va);
+ *
+ * NOTE: This is not SMP coherent stage.
+ *
+ *****************************************************************************/
+
+#define KERNEL_P2V(pa) \
+ ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR))
+#define KERNEL_V2P(va) \
+ ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr))
+
+static vm_paddr_t last_paddr;
+
+/*
+ * Pre-bootstrap epoch page allocator.
+ */
+vm_paddr_t
+pmap_preboot_get_pages(u_int num)
+{
+ vm_paddr_t ret;
+
+ ret = last_paddr;
+ last_paddr += num * PAGE_SIZE;
+
+ return (ret);
+}
+
+/*
+ * The fundamental initalization of PMAP stuff.
+ *
+ * Some things already happened in locore.S and some things could happen
+ * before pmap_bootstrap_prepare() is called, so let's recall what is done:
+ * 1. Caches are disabled.
+ * 2. We are running on virtual addresses already with 'boot_pt1'
+ * as L1 page table.
+ * 3. So far, all virtual addresses can be converted to physical ones and
+ * vice versa by the following macros:
+ * KERNEL_P2V(pa) .... physical to virtual ones,
+ * KERNEL_V2P(va) .... virtual to physical ones.
+ *
+ * What is done herein:
+ * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'.
+ * 2. PT2MAP magic is brought to live.
+ * 3. Basic preboot functions for page allocations and mappings can be used.
+ * 4. Everything is prepared for L1 cache enabling.
+ *
+ * Variations:
+ * 1. To use second TTB register, so kernel and users page tables will be
+ * separated. This way process forking - pmap_pinit() - could be faster,
+ * it saves physical pages and KVA per a process, and it's simple change.
+ * However, it will lead, due to hardware matter, to the following:
+ * (a) 2G space for kernel and 2G space for users.
+ * (b) 1G space for kernel in low addresses and 3G for users above it.
+ * A question is: Is the case (b) really an option? Note that case (b)
+ * does save neither physical memory and KVA.
+ */
+void
+pmap_bootstrap_prepare(vm_paddr_t last)
+{
+ vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size;
+ vm_offset_t pt2pg_va;
+ pt1_entry_t *pte1p;
+ pt2_entry_t *pte2p;
+ u_int i;
+ uint32_t actlr_mask, actlr_set;
+
+ /*
+ * Now, we are going to make real kernel mapping. Note that we are
+ * already running on some mapping made in locore.S and we expect
+ * that it's large enough to ensure nofault access to physical memory
+ * allocated herein before switch.
+ *
+ * As kernel image and everything needed before are and will be mapped
+ * by section mappings, we align last physical address to PTE1_SIZE.
+ */
+ last_paddr = pte1_roundup(last);
+
+ /*
+ * Allocate and zero page(s) for kernel L1 page table.
+ *
+ * Note that it's first allocation on space which was PTE1_SIZE
+ * aligned and as such base_pt1 is aligned to NB_IN_PT1 too.
+ */
+ base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1);
+ kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1);
+ bzero((void*)kern_pt1, NB_IN_PT1);
+ pte1_sync_range(kern_pt1, NB_IN_PT1);
+
+ /* Allocate and zero page(s) for kernel PT2TAB. */
+ pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB);
+ kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa);
+ bzero(kern_pt2tab, NB_IN_PT2TAB);
+ pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB);
+
+ /* Allocate and zero page(s) for kernel L2 page tables. */
+ pt2pg_pa = pmap_preboot_get_pages(NKPT2PG);
+ pt2pg_va = KERNEL_P2V(pt2pg_pa);
+ size = NKPT2PG * PAGE_SIZE;
+ bzero((void*)pt2pg_va, size);
+ pte2_sync_range((pt2_entry_t *)pt2pg_va, size);
+
+ /*
+ * Add a physical memory segment (vm_phys_seg) corresponding to the
+ * preallocated pages for kernel L2 page tables so that vm_page
+ * structures representing these pages will be created. The vm_page
+ * structures are required for promotion of the corresponding kernel
+ * virtual addresses to section mappings.
+ */
+ vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0));
+
+ /*
+ * Insert allocated L2 page table pages to PT2TAB and make
+ * link to all PT2s in L1 page table. See how kernel_vm_end
+ * is initialized.
+ *
+ * We play simple and safe. So every KVA will have underlaying
+ * L2 page table, even kernel image mapped by sections.
+ */
+ pte2p = kern_pt2tab_entry(KERNBASE);
+ for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE)
+ pt2tab_store(pte2p++, PTE2_KPT(pa));
+
+ pte1p = kern_pte1(KERNBASE);
+ for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2)
+ pte1_store(pte1p++, PTE1_LINK(pa));
+
+ /* Make section mappings for kernel. */
+ pte1p = kern_pte1(KERNBASE);
+ for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE)
+ pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW,
+ ATTR_TO_L1(PTE2_ATTR_WB_WA)));
+
+ /*
+ * Get free and aligned space for PT2MAP and make L1 page table links
+ * to L2 page tables held in PT2TAB.
+ *
+ * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t
+ * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus
+ * each entry in PT2TAB maps all PT2s in a page. This implies that
+ * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE.
+ */
+ PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE);
+ pte1p = kern_pte1((vm_offset_t)PT2MAP);
+ for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) {
+ pte1_store(pte1p++, PTE1_LINK(pa));
+ }
+
+ /*
+ * Store PT2TAB in PT2TAB itself, i.e. self reference mapping.
+ * Each pmap will hold own PT2TAB, so the mapping should be not global.
+ */
+ pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP);
+ for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) {
+ pt2tab_store(pte2p++, PTE2_KPT_NG(pa));
+ }
+
+ /*
+ * Choose correct L2 page table and make mappings for allocations
+ * made herein which replaces temporary locore.S mappings after a while.
+ * Note that PT2MAP cannot be used until we switch to kern_pt1.
+ *
+ * Note, that these allocations started aligned on 1M section and
+ * kernel PT1 was allocated first. Making of mappings must follow
+ * order of physical allocations as we've used KERNEL_P2V() macro
+ * for virtual addresses resolution.
+ */
+ pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1);
+ pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p)));
+
+ pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1));
+
+ /* Make mapping for kernel L1 page table. */
+ for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE)
+ pte2_store(pte2p++, PTE2_KPT(pa));
+
+ /* Make mapping for kernel PT2TAB. */
+ for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE)
+ pte2_store(pte2p++, PTE2_KPT(pa));
+
+ /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */
+ pmap_kern_ttb = base_pt1 | ttb_flags;
+ cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set);
+ reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set);
+ /*
+ * Initialize the first available KVA. As kernel image is mapped by
+ * sections, we are leaving some gap behind.
+ */
+ virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE;
+}
+
+/*
+ * Setup L2 page table page for given KVA.
+ * Used in pre-bootstrap epoch.
+ *
+ * Note that we have allocated NKPT2PG pages for L2 page tables in advance
+ * and used them for mapping KVA starting from KERNBASE. However, this is not
+ * enough. Vectors and devices need L2 page tables too. Note that they are
+ * even above VM_MAX_KERNEL_ADDRESS.
+ */
+static __inline vm_paddr_t
+pmap_preboot_pt2pg_setup(vm_offset_t va)
+{
+ pt2_entry_t *pte2p, pte2;
+ vm_paddr_t pt2pg_pa;
+
+ /* Get associated entry in PT2TAB. */
+ pte2p = kern_pt2tab_entry(va);
+
+ /* Just return, if PT2s page exists already. */
+ pte2 = pt2tab_load(pte2p);
+ if (pte2_is_valid(pte2))
+ return (pte2_pa(pte2));
+
+ KASSERT(va >= VM_MAX_KERNEL_ADDRESS,
+ ("%s: NKPT2PG too small", __func__));
+
+ /*
+ * Allocate page for PT2s and insert it to PT2TAB.
+ * In other words, map it into PT2MAP space.
+ */
+ pt2pg_pa = pmap_preboot_get_pages(1);
+ pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa));
+
+ /* Zero all PT2s in allocated page. */
+ bzero((void*)pt2map_pt2pg(va), PAGE_SIZE);
+ pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE);
+
+ return (pt2pg_pa);
+}
+
+/*
+ * Setup L2 page table for given KVA.
+ * Used in pre-bootstrap epoch.
+ */
+static void
+pmap_preboot_pt2_setup(vm_offset_t va)
+{
+ pt1_entry_t *pte1p;
+ vm_paddr_t pt2pg_pa, pt2_pa;
+
+ /* Setup PT2's page. */
+ pt2pg_pa = pmap_preboot_pt2pg_setup(va);
+ pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va));
+
+ /* Insert PT2 to PT1. */
+ pte1p = kern_pte1(va);
+ pte1_store(pte1p, PTE1_LINK(pt2_pa));
+}
+
+/*
+ * Get L2 page entry associated with given KVA.
+ * Used in pre-bootstrap epoch.
+ */
+static __inline pt2_entry_t*
+pmap_preboot_vtopte2(vm_offset_t va)
+{
+ pt1_entry_t *pte1p;
+
+ /* Setup PT2 if needed. */
+ pte1p = kern_pte1(va);
+ if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */
+ pmap_preboot_pt2_setup(va);
+
+ return (pt2map_entry(va));
+}
+
+/*
+ * Pre-bootstrap epoch page(s) mapping(s).
+ */
+void
+pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num)
+{
+ u_int i;
+ pt2_entry_t *pte2p;
+
+ /* Map all the pages. */
+ for (i = 0; i < num; i++) {
+ pte2p = pmap_preboot_vtopte2(va);
+ pte2_store(pte2p, PTE2_KRW(pa));
+ va += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ }
+}
+
+/*
+ * Pre-bootstrap epoch virtual space alocator.
+ */
+vm_offset_t
+pmap_preboot_reserve_pages(u_int num)
+{
+ u_int i;
+ vm_offset_t start, va;
+ pt2_entry_t *pte2p;
+
+ /* Allocate virtual space. */
+ start = va = virtual_avail;
+ virtual_avail += num * PAGE_SIZE;
+
+ /* Zero the mapping. */
+ for (i = 0; i < num; i++) {
+ pte2p = pmap_preboot_vtopte2(va);
+ pte2_store(pte2p, 0);
+ va += PAGE_SIZE;
+ }
+
+ return (start);
+}
+
+/*
+ * Pre-bootstrap epoch page(s) allocation and mapping(s).
+ */
+vm_offset_t
+pmap_preboot_get_vpages(u_int num)
+{
+ vm_paddr_t pa;
+ vm_offset_t va;
+
+ /* Allocate physical page(s). */
+ pa = pmap_preboot_get_pages(num);
+
+ /* Allocate virtual space. */
+ va = virtual_avail;
+ virtual_avail += num * PAGE_SIZE;
+
+ /* Map and zero all. */
+ pmap_preboot_map_pages(pa, va, num);
+ bzero((void *)va, num * PAGE_SIZE);
+
+ return (va);
+}
+
+/*
+ * Pre-bootstrap epoch page mapping(s) with attributes.
+ */
+void
+pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, int prot,
+ int attr)
+{
+ u_int num;
+ u_int l1_attr, l1_prot;
+ pt1_entry_t *pte1p;
+ pt2_entry_t *pte2p;
+
+ l1_prot = ATTR_TO_L1(prot);
+ l1_attr = ATTR_TO_L1(attr);
+
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-head
mailing list