PERFORCE change 134708 for review
Andre Oppermann
andre at FreeBSD.org
Sun Feb 3 02:08:55 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=134708
Change 134708 by andre at andre_flirtbox on 2008/02/03 10:08:53
Integrate @134705
Affected files ...
.. //depot/projects/tcp_new/amd64/acpica/madt.c#2 integrate
.. //depot/projects/tcp_new/amd64/amd64/identcpu.c#2 integrate
.. //depot/projects/tcp_new/amd64/conf/GENERIC#2 integrate
.. //depot/projects/tcp_new/amd64/conf/NOTES#2 integrate
.. //depot/projects/tcp_new/amd64/include/proc.h#2 integrate
.. //depot/projects/tcp_new/amd64/include/specialreg.h#2 integrate
.. //depot/projects/tcp_new/arm/arm/pmap.c#2 integrate
.. //depot/projects/tcp_new/arm/include/pmap.h#2 integrate
.. //depot/projects/tcp_new/compat/ndis/kern_ndis.c#2 integrate
.. //depot/projects/tcp_new/conf/files.amd64#2 integrate
.. //depot/projects/tcp_new/conf/files.i386#2 integrate
.. //depot/projects/tcp_new/conf/files.powerpc#2 integrate
.. //depot/projects/tcp_new/conf/kern.pre.mk#2 integrate
.. //depot/projects/tcp_new/ddb/db_textdump.c#2 integrate
.. //depot/projects/tcp_new/dev/bfe/if_bfe.c#2 integrate
.. //depot/projects/tcp_new/dev/bfe/if_bfereg.h#2 integrate
.. //depot/projects/tcp_new/dev/cxgb/cxgb_sge.c#2 integrate
.. //depot/projects/tcp_new/dev/cxgb/ulp/tom/cxgb_cpl_socket.c#2 integrate
.. //depot/projects/tcp_new/dev/hatm/if_hatm_intr.c#2 integrate
.. //depot/projects/tcp_new/dev/iscsi/initiator/isc_soc.c#2 integrate
.. //depot/projects/tcp_new/dev/lge/if_lge.c#2 integrate
.. //depot/projects/tcp_new/dev/mfi/mfi.c#2 integrate
.. //depot/projects/tcp_new/dev/mfi/mfireg.h#2 integrate
.. //depot/projects/tcp_new/dev/msk/if_msk.c#2 integrate
.. //depot/projects/tcp_new/dev/nfe/if_nfe.c#2 integrate
.. //depot/projects/tcp_new/dev/patm/if_patm_rx.c#2 integrate
.. //depot/projects/tcp_new/dev/pci/pci.c#2 integrate
.. //depot/projects/tcp_new/dev/rr232x/LICENSE#2 delete
.. //depot/projects/tcp_new/dev/rr232x/README#2 delete
.. //depot/projects/tcp_new/dev/rr232x/amd64-elf.rr232x_lib.o.uu#2 delete
.. //depot/projects/tcp_new/dev/rr232x/array.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/him.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/himfuncs.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/hptintf.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/i386-elf.rr232x_lib.o.uu#2 delete
.. //depot/projects/tcp_new/dev/rr232x/ldm.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/list.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/os_bsd.c#2 delete
.. //depot/projects/tcp_new/dev/rr232x/os_bsd.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/osm.h#2 delete
.. //depot/projects/tcp_new/dev/rr232x/osm_bsd.c#2 delete
.. //depot/projects/tcp_new/dev/rr232x/rr232x_config.c#2 delete
.. //depot/projects/tcp_new/dev/rr232x/rr232x_config.h#2 delete
.. //depot/projects/tcp_new/dev/sk/if_sk.c#2 integrate
.. //depot/projects/tcp_new/dev/ti/if_ti.c#2 integrate
.. //depot/projects/tcp_new/dev/wpi/if_wpi.c#2 integrate
.. //depot/projects/tcp_new/i386/acpica/madt.c#2 integrate
.. //depot/projects/tcp_new/i386/conf/GENERIC#2 integrate
.. //depot/projects/tcp_new/i386/conf/NOTES#2 integrate
.. //depot/projects/tcp_new/i386/include/proc.h#2 integrate
.. //depot/projects/tcp_new/kern/kern_mbuf.c#2 integrate
.. //depot/projects/tcp_new/kern/uipc_cow.c#2 integrate
.. //depot/projects/tcp_new/kern/uipc_mbuf.c#2 integrate
.. //depot/projects/tcp_new/kern/uipc_sockbuf.c#2 integrate
.. //depot/projects/tcp_new/kern/uipc_socket.c#2 integrate
.. //depot/projects/tcp_new/kern/uipc_syscalls.c#2 integrate
.. //depot/projects/tcp_new/kern/vfs_aio.c#2 integrate
.. //depot/projects/tcp_new/modules/Makefile#2 integrate
.. //depot/projects/tcp_new/modules/rr232x/Makefile#2 delete
.. //depot/projects/tcp_new/net/bpf.c#2 integrate
.. //depot/projects/tcp_new/net80211/ieee80211_ht.c#2 integrate
.. //depot/projects/tcp_new/net80211/ieee80211_ht.h#2 integrate
.. //depot/projects/tcp_new/netgraph/netgraph.h#2 integrate
.. //depot/projects/tcp_new/netgraph/ng_base.c#2 integrate
.. //depot/projects/tcp_new/netgraph/ng_iface.c#2 integrate
.. //depot/projects/tcp_new/netgraph/ng_pppoe.c#2 integrate
.. //depot/projects/tcp_new/netinet/ip_output.c#2 integrate
.. //depot/projects/tcp_new/netinet/sctp_input.c#2 integrate
.. //depot/projects/tcp_new/netinet/sctp_peeloff.c#2 integrate
.. //depot/projects/tcp_new/netinet/sctputil.c#2 integrate
.. //depot/projects/tcp_new/netinet6/ip6_output.c#2 integrate
.. //depot/projects/tcp_new/netipsec/ipsec.c#2 integrate
.. //depot/projects/tcp_new/netipsec/ipsec.h#2 integrate
.. //depot/projects/tcp_new/netipsec/ipsec6.h#2 integrate
.. //depot/projects/tcp_new/pci/if_wb.c#2 integrate
.. //depot/projects/tcp_new/sys/mbuf.h#2 integrate
.. //depot/projects/tcp_new/sys/param.h#2 integrate
.. //depot/projects/tcp_new/sys/proc.h#2 integrate
.. //depot/projects/tcp_new/sys/socketvar.h#2 integrate
Differences ...
==== //depot/projects/tcp_new/amd64/acpica/madt.c#2 (text+ko) ====
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/acpica/madt.c,v 1.24 2007/05/08 22:01:02 jhb Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/acpica/madt.c,v 1.25 2008/01/31 16:51:42 jhb Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -109,9 +109,11 @@
/*
* Code to abuse the crashdump map to map in the tables for the early
* probe. We cheat and make the following assumptions about how we
- * use this KVA: page 0 is used to map in the first page of each table
- * found via the RSDT or XSDT and pages 1 to n are used to map in the
- * RSDT or XSDT. The offset is in pages; the length is in bytes.
+ * use this KVA: pages 0 and 1 are used to map in the header of each
+ * table found via the RSDT or XSDT and pages 2 to n are used to map
+ * in the RSDT or XSDT. We have to use 2 pages for the table headers
+ * in case a header spans a page boundary. The offset is in pages;
+ * the length is in bytes.
*/
static void *
madt_map(vm_paddr_t pa, int offset, vm_offset_t length)
@@ -232,7 +234,7 @@
printf("MADT: RSDP failed extended checksum\n");
return (ENXIO);
}
- xsdt = madt_map_table(rsdp->XsdtPhysicalAddress, 1,
+ xsdt = madt_map_table(rsdp->XsdtPhysicalAddress, 2,
ACPI_SIG_XSDT);
if (xsdt == NULL) {
if (bootverbose)
@@ -246,7 +248,7 @@
break;
madt_unmap_table(xsdt);
} else {
- rsdt = madt_map_table(rsdp->RsdtPhysicalAddress, 1,
+ rsdt = madt_map_table(rsdp->RsdtPhysicalAddress, 2,
ACPI_SIG_RSDT);
if (rsdt == NULL) {
if (bootverbose)
==== //depot/projects/tcp_new/amd64/amd64/identcpu.c#2 (text+ko) ====
@@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/amd64/identcpu.c,v 1.155 2007/12/08 21:13:01 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/identcpu.c,v 1.157 2008/02/02 23:17:27 das Exp $");
#include "opt_cpu.h"
@@ -223,7 +223,7 @@
"\020"
"\001SSE3" /* SSE3 */
"\002<b1>"
- "\003RSVD2" /* "Reserved" bit 2 */
+ "\003DTES64" /* 64-bit Debug Trace */
"\004MON" /* MONITOR/MWAIT Instructions */
"\005DS_CPL" /* CPL Qualified Debug Store */
"\006VMX" /* Virtual Machine Extensions */
@@ -240,11 +240,11 @@
"\021<b16>"
"\022<b17>"
"\023DCA" /* Direct Cache Access */
- "\024<b19>"
- "\025<b20>"
- "\026<b21>"
+ "\024SSE4.1"
+ "\025SSE4.2"
+ "\026x2APIC" /* xAPIC Extensions */
"\027<b22>"
- "\030<b23>"
+ "\030POPCNT"
"\031<b24>"
"\032<b25>"
"\033<b26>"
==== //depot/projects/tcp_new/amd64/conf/GENERIC#2 (text+ko) ====
@@ -16,7 +16,7 @@
# If you are in doubt as to the purpose or necessity of a line, check first
# in NOTES.
#
-# $FreeBSD: src/sys/amd64/conf/GENERIC,v 1.491 2008/01/07 21:40:09 jhb Exp $
+# $FreeBSD: src/sys/amd64/conf/GENERIC,v 1.492 2008/02/03 07:07:30 scottl Exp $
cpu HAMMER
ident GENERIC
@@ -133,7 +133,6 @@
device dpt # DPT Smartcache III, IV - See NOTES for options
device hptmv # Highpoint RocketRAID 182x
device hptrr # Highpoint RocketRAID 17xx, 22xx, 23xx, 25xx
-device rr232x # Highpoint RocketRAID 232x
device iir # Intel Integrated RAID
device ips # IBM (Adaptec) ServeRAID
device mly # Mylex AcceleRAID/eXtremeRAID
==== //depot/projects/tcp_new/amd64/conf/NOTES#2 (text+ko) ====
@@ -4,7 +4,7 @@
# This file contains machine dependent kernel configuration notes. For
# machine independent notes, look in /sys/conf/NOTES.
#
-# $FreeBSD: src/sys/amd64/conf/NOTES,v 1.74 2007/12/28 22:50:04 rpaulo Exp $
+# $FreeBSD: src/sys/amd64/conf/NOTES,v 1.75 2008/02/03 07:07:30 scottl Exp $
#
#
@@ -381,11 +381,6 @@
device hptrr
#
-# Highpoint RocketRAID 232x. This is software RAID but with hardware
-# acceleration assistance for RAID_5.
-device rr232x
-
-#
# Highpoint RocketRaid 3xxx series SATA RAID
device hptiop
==== //depot/projects/tcp_new/amd64/include/proc.h#2 (text+ko) ====
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*
* from: @(#)proc.h 7.1 (Berkeley) 5/15/91
- * $FreeBSD: src/sys/amd64/include/proc.h,v 1.24 2005/04/04 21:53:52 jhb Exp $
+ * $FreeBSD: src/sys/amd64/include/proc.h,v 1.25 2008/01/31 08:24:27 mav Exp $
*/
#ifndef _MACHINE_PROC_H_
@@ -44,4 +44,17 @@
struct mdproc {
};
+#ifdef _KERNEL
+
+/* Get the current kernel thread stack usage. */
+#define GET_STACK_USAGE(total, used) do { \
+ struct thread *td = curthread; \
+ (total) = td->td_kstack_pages * PAGE_SIZE; \
+ (used) = (char *)td->td_kstack + \
+ td->td_kstack_pages * PAGE_SIZE - \
+ (char *)&td; \
+} while (0)
+
+#endif /* _KERNEL */
+
#endif /* !_MACHINE_PROC_H_ */
==== //depot/projects/tcp_new/amd64/include/specialreg.h#2 (text+ko) ====
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*
* from: @(#)specialreg.h 7.1 (Berkeley) 5/9/91
- * $FreeBSD: src/sys/amd64/include/specialreg.h,v 1.41 2007/12/08 21:13:01 alc Exp $
+ * $FreeBSD: src/sys/amd64/include/specialreg.h,v 1.43 2008/02/02 23:17:27 das Exp $
*/
#ifndef _MACHINE_SPECIALREG_H_
@@ -113,6 +113,7 @@
#define CPUID_PBE 0x80000000
#define CPUID2_SSE3 0x00000001
+#define CPUID2_DTES64 0x00000004
#define CPUID2_MON 0x00000008
#define CPUID2_DS_CPL 0x00000010
#define CPUID2_VMX 0x00000020
@@ -125,6 +126,10 @@
#define CPUID2_XTPR 0x00004000
#define CPUID2_PDCM 0x00008000
#define CPUID2_DCA 0x00040000
+#define CPUID2_SSE41 0x00080000
+#define CPUID2_SSE42 0x00100000
+#define CPUID2_X2APIC 0x00200000
+#define CPUID2_POPCNT 0x00800000
/*
* Important bits in the AMD extended cpuid flags
==== //depot/projects/tcp_new/arm/arm/pmap.c#2 (text+ko) ====
@@ -147,7 +147,7 @@
#include "opt_vm.h"
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/arm/arm/pmap.c,v 1.97 2008/01/17 12:41:59 cognet Exp $");
+__FBSDID("$FreeBSD: src/sys/arm/arm/pmap.c,v 1.98 2008/01/31 00:05:40 cognet Exp $");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -199,11 +199,8 @@
static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, boolean_t, int);
-static void pmap_vac_me_harder(struct vm_page *, pmap_t,
- vm_offset_t);
-static void pmap_vac_me_kpmap(struct vm_page *, pmap_t,
+static __inline void pmap_fix_cache(struct vm_page *, pmap_t,
vm_offset_t);
-static void pmap_vac_me_user(struct vm_page *, pmap_t, vm_offset_t);
static void pmap_alloc_l1(pmap_t);
static void pmap_free_l1(pmap_t);
static void pmap_use_l1(pmap_t);
@@ -1260,276 +1257,147 @@
#endif
/*
- * Since we have a virtually indexed cache, we may need to inhibit caching if
- * there is more than one mapping and at least one of them is writable.
- * Since we purge the cache on every context switch, we only need to check for
- * other mappings within the same pmap, or kernel_pmap.
- * This function is also called when a page is unmapped, to possibly reenable
- * caching on any remaining mappings.
- *
- * The code implements the following logic, where:
- *
- * KW = # of kernel read/write pages
- * KR = # of kernel read only pages
- * UW = # of user read/write pages
- * UR = # of user read only pages
- *
- * KC = kernel mapping is cacheable
- * UC = user mapping is cacheable
- *
- * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0
- * +---------------------------------------------
- * UW=0,UR=0 | --- KC=1 KC=1 KC=0
- * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0
- * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
- * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0
+ * cacheable == -1 means we must make the entry uncacheable, 1 means
+ * cacheable;
*/
-
-static const int pmap_vac_flags[4][4] = {
- {-1, 0, 0, PVF_KNC},
- {0, 0, PVF_NC, PVF_NC},
- {0, PVF_NC, PVF_NC, PVF_NC},
- {PVF_UNC, PVF_NC, PVF_NC, PVF_NC}
-};
-
-static PMAP_INLINE int
-pmap_get_vac_flags(const struct vm_page *pg)
-{
- int kidx, uidx;
-
- kidx = 0;
- if (pg->md.kro_mappings || pg->md.krw_mappings > 1)
- kidx |= 1;
- if (pg->md.krw_mappings)
- kidx |= 2;
-
- uidx = 0;
- if (pg->md.uro_mappings || pg->md.urw_mappings > 1)
- uidx |= 1;
- if (pg->md.urw_mappings)
- uidx |= 2;
-
- return (pmap_vac_flags[uidx][kidx]);
-}
-
static __inline void
-pmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable)
{
- int nattr;
+ struct l2_bucket *l2b;
+ pt_entry_t *ptep, pte;
- mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- nattr = pmap_get_vac_flags(pg);
+ l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
- if (nattr < 0) {
- pg->md.pvh_attrs &= ~PVF_NC;
- return;
+ if (cacheable == 1) {
+ pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
+ if (l2pte_valid(pte)) {
+ if (PV_BEEN_EXECD(pv->pv_flags)) {
+ pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
+ } else if (PV_BEEN_REFD(pv->pv_flags)) {
+ pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va);
+ }
+ }
+ } else {
+ pte = *ptep &~ L2_S_CACHE_MASK;
+ if ((va != pv->pv_va || pm != pv->pv_pmap) &&
+ l2pte_valid(pte)) {
+ if (PV_BEEN_EXECD(pv->pv_flags)) {
+ pmap_idcache_wbinv_range(pv->pv_pmap,
+ pv->pv_va, PAGE_SIZE);
+ pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va);
+ } else if (PV_BEEN_REFD(pv->pv_flags)) {
+ pmap_dcache_wb_range(pv->pv_pmap,
+ pv->pv_va, PAGE_SIZE, TRUE,
+ (pv->pv_flags & PVF_WRITE) == 0);
+ pmap_tlb_flushD_SE(pv->pv_pmap,
+ pv->pv_va);
+ }
+ }
}
-
- if (nattr == 0 && (pg->md.pvh_attrs & PVF_NC) == 0) {
- return;
- }
-
- if (pm == pmap_kernel())
- pmap_vac_me_kpmap(pg, pm, va);
- else
- pmap_vac_me_user(pg, pm, va);
-
- pg->md.pvh_attrs = (pg->md.pvh_attrs & ~PVF_NC) | nattr;
+ *ptep = pte;
+ PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
}
static void
-pmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
{
- u_int u_cacheable, u_entries;
+ int pmwc = 0;
+ int writable = 0, kwritable = 0, uwritable = 0;
+ int entries = 0, kentries = 0, uentries = 0;
struct pv_entry *pv;
- pmap_t last_pmap = pm;
- /*
- * Pass one, see if there are both kernel and user pmaps for
- * this page. Calculate whether there are user-writable or
- * kernel-writable pages.
- */
- u_cacheable = 0;
- TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
- if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0)
- u_cacheable++;
- }
+ mtx_assert(&vm_page_queue_mtx, MA_OWNED);
- u_entries = pg->md.urw_mappings + pg->md.uro_mappings;
-
- /*
- * We know we have just been updating a kernel entry, so if
- * all user pages are already cacheable, then there is nothing
- * further to do.
+ /* the cache gets written back/invalidated on context switch.
+ * therefore, if a user page shares an entry in the same page or
+ * with the kernel map and at least one is writable, then the
+ * cache entry must be set write-through.
*/
- if (pg->md.k_mappings == 0 && u_cacheable == u_entries)
- return;
- if (u_entries) {
- /*
- * Scan over the list again, for each entry, if it
- * might not be set correctly, call pmap_vac_me_user
- * to recalculate the settings.
- */
- TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
- /*
- * We know kernel mappings will get set
- * correctly in other calls. We also know
- * that if the pmap is the same as last_pmap
- * then we've just handled this entry.
- */
- if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap)
- continue;
-
- /*
- * If there are kernel entries and this page
- * is writable but non-cacheable, then we can
- * skip this entry also.
- */
- if (pg->md.k_mappings &&
- (pv->pv_flags & (PVF_NC | PVF_WRITE)) ==
- (PVF_NC | PVF_WRITE))
- continue;
-
- /*
- * Similarly if there are no kernel-writable
- * entries and the page is already
- * read-only/cacheable.
- */
- if (pg->md.krw_mappings == 0 &&
- (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0)
- continue;
-
- /*
- * For some of the remaining cases, we know
- * that we must recalculate, but for others we
- * can't tell if they are correct or not, so
- * we recalculate anyway.
- */
- pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0);
+ TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+ /* generate a count of the pv_entry uses */
+ if (pv->pv_flags & PVF_WRITE) {
+ if (pv->pv_pmap == pmap_kernel())
+ kwritable++;
+ else if (pv->pv_pmap == pm)
+ uwritable++;
+ writable++;
+ }
+ if (pv->pv_pmap == pmap_kernel())
+ kentries++;
+ else {
+ if (pv->pv_pmap == pm)
+ uentries++;
+ entries++;
}
-
- if (pg->md.k_mappings == 0)
- return;
}
+ /*
+ * check if the user duplicate mapping has
+ * been removed.
+ */
+ if ((pm != pmap_kernel()) && (((uentries > 1) && uwritable) ||
+ (uwritable > 1)))
+ pmwc = 1;
- pmap_vac_me_user(pg, pm, va);
-}
-
-static void
-pmap_vac_me_user(struct vm_page *pg, pmap_t pm, vm_offset_t va)
-{
- pmap_t kpmap = pmap_kernel();
- struct pv_entry *pv, *npv;
- struct l2_bucket *l2b;
- pt_entry_t *ptep, pte;
- u_int entries = 0;
- u_int writable = 0;
- u_int cacheable_entries = 0;
- u_int kern_cacheable = 0;
- u_int other_writable = 0;
-
- /*
- * Count mappings and writable mappings in this pmap.
- * Include kernel mappings as part of our own.
- * Keep a pointer to the first one.
- */
- npv = TAILQ_FIRST(&pg->md.pv_list);
TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
- /* Count mappings in the same pmap */
- if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) {
- if (entries++ == 0)
- npv = pv;
+ /* check for user uncachable conditions - order is important */
+ if (pm != pmap_kernel() &&
+ (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel())) {
- /* Cacheable mappings */
- if ((pv->pv_flags & PVF_NC) == 0) {
- cacheable_entries++;
- if (kpmap == pv->pv_pmap)
- kern_cacheable++;
- }
+ if ((uentries > 1 && uwritable) || uwritable > 1) {
- /* Writable mappings */
- if (pv->pv_flags & PVF_WRITE)
- ++writable;
- } else
- if (pv->pv_flags & PVF_WRITE)
- other_writable = 1;
- }
+ /* user duplicate mapping */
+ if (pv->pv_pmap != pmap_kernel())
+ pv->pv_flags |= PVF_MWC;
- /*
- * Enable or disable caching as necessary.
- * Note: the first entry might be part of the kernel pmap,
- * so we can't assume this is indicative of the state of the
- * other (maybe non-kpmap) entries.
- */
- if ((entries > 1 && writable) ||
- (entries > 0 && pm == kpmap && other_writable)) {
- if (cacheable_entries == 0)
- return;
-
- for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) {
- if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) ||
- (pv->pv_flags & PVF_NC))
+ if (!(pv->pv_flags & PVF_NC)) {
+ pv->pv_flags |= PVF_NC;
+ pmap_set_cache_entry(pv, pm, va, -1);
+ }
continue;
+ } else /* no longer a duplicate user */
+ pv->pv_flags &= ~PVF_MWC;
+ }
- pv->pv_flags |= PVF_NC;
+ /*
+ * check for kernel uncachable conditions
+ * kernel writable or kernel readable with writable user entry
+ */
+ if ((kwritable && entries) ||
+ ((kwritable != writable) && kentries &&
+ (pv->pv_pmap == pmap_kernel() ||
+ (pv->pv_flags & PVF_WRITE) ||
+ (pv->pv_flags & PVF_MWC)))) {
- l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
- ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
- pte = *ptep & ~L2_S_CACHE_MASK;
-
- if ((va != pv->pv_va || pm != pv->pv_pmap) &&
- l2pte_valid(pte)) {
- if (PV_BEEN_EXECD(pv->pv_flags)) {
- pmap_idcache_wbinv_range(pv->pv_pmap,
- pv->pv_va, PAGE_SIZE);
- pmap_tlb_flushID_SE(pv->pv_pmap,
- pv->pv_va);
- } else
- if (PV_BEEN_REFD(pv->pv_flags)) {
- pmap_dcache_wb_range(pv->pv_pmap,
- pv->pv_va, PAGE_SIZE, TRUE,
- (pv->pv_flags & PVF_WRITE) == 0);
- pmap_tlb_flushD_SE(pv->pv_pmap,
- pv->pv_va);
- }
+ if (!(pv->pv_flags & PVF_NC)) {
+ pv->pv_flags |= PVF_NC;
+ pmap_set_cache_entry(pv, pm, va, -1);
}
+ continue;
+ }
- *ptep = pte;
- PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
- }
- cpu_cpwait();
- } else
- if (entries > cacheable_entries) {
- /*
- * Turn cacheing back on for some pages. If it is a kernel
- * page, only do so if there are no other writable pages.
- */
- for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) {
- if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap &&
- (kpmap != pv->pv_pmap || other_writable)))
- continue;
+ /* kernel and user are cachable */
+ if ((pm == pmap_kernel()) && !(pv->pv_flags & PVF_MWC) &&
+ (pv->pv_flags & PVF_NC)) {
pv->pv_flags &= ~PVF_NC;
+ pmap_set_cache_entry(pv, pm, va, 1);
+ continue;
+ }
+ /* user is no longer sharable and writable */
+ if (pm != pmap_kernel() && (pv->pv_pmap == pm) &&
+ !pmwc && (pv->pv_flags & PVF_NC)) {
- l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va);
- ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
- pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode;
+ pv->pv_flags &= ~(PVF_NC | PVF_MWC);
+ pmap_set_cache_entry(pv, pm, va, 1);
+ }
+ }
- if (l2pte_valid(pte)) {
- if (PV_BEEN_EXECD(pv->pv_flags)) {
- pmap_tlb_flushID_SE(pv->pv_pmap,
- pv->pv_va);
- } else
- if (PV_BEEN_REFD(pv->pv_flags)) {
- pmap_tlb_flushD_SE(pv->pv_pmap,
- pv->pv_va);
- }
- }
-
- *ptep = pte;
- PTE_SYNC_CURRENT(pv->pv_pmap, ptep);
- }
+ if ((kwritable == 0) && (writable == 0)) {
+ pg->md.pvh_attrs &= ~PVF_MOD;
+ vm_page_flag_clear(pg, PG_WRITEABLE);
+ return;
}
}
@@ -1551,6 +1419,8 @@
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ if (maskbits & PVF_WRITE)
+ maskbits |= PVF_MOD;
/*
* Clear saved attributes (modify, reference)
*/
@@ -1567,6 +1437,21 @@
va = pv->pv_va;
pm = pv->pv_pmap;
oflags = pv->pv_flags;
+
+ if (!(oflags & maskbits)) {
+ if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) {
+ /* It is safe to re-enable cacheing here. */
+ PMAP_LOCK(pm);
+ l2b = pmap_get_l2_bucket(pm, va);
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ *ptep |= pte_l2_s_cache_mode;
+ PTE_SYNC(ptep);
+ PMAP_UNLOCK(pm);
+ pv->pv_flags &= ~(PVF_NC | PVF_MWC);
+
+ }
+ continue;
+ }
pv->pv_flags &= ~maskbits;
PMAP_LOCK(pm);
@@ -1584,16 +1469,16 @@
* Don't turn caching on again if this is a
* modified emulation. This would be
* inconsitent with the settings created by
- * pmap_vac_me_harder(). Otherwise, it's safe
+ * pmap_fix_cache(). Otherwise, it's safe
* to re-enable cacheing.
*
- * There's no need to call pmap_vac_me_harder()
+ * There's no need to call pmap_fix_cache()
* here: all pages are losing their write
* permission.
*/
if (maskbits & PVF_WRITE) {
npte |= pte_l2_s_cache_mode;
- pv->pv_flags &= ~PVF_NC;
+ pv->pv_flags &= ~(PVF_NC | PVF_MWC);
}
} else
if (opte & L2_S_PROT_W) {
@@ -1616,22 +1501,6 @@
/* make the pte read only */
npte &= ~L2_S_PROT_W;
-
- if (maskbits & PVF_WRITE) {
- /*
- * Keep alias accounting up to date
- */
- if (pv->pv_pmap == pmap_kernel()) {
- if (oflags & PVF_WRITE) {
- pg->md.krw_mappings--;
- pg->md.kro_mappings++;
- }
- } else
- if (oflags & PVF_WRITE) {
- pg->md.urw_mappings--;
- pg->md.uro_mappings++;
- }
- }
}
if (maskbits & PVF_REF) {
@@ -1728,17 +1597,6 @@
TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist);
pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
- if (pm == pmap_kernel()) {
- if (flags & PVF_WRITE)
- pg->md.krw_mappings++;
- else
- pg->md.kro_mappings++;
- } else {
- if (flags & PVF_WRITE)
- pg->md.urw_mappings++;
- else
- pg->md.uro_mappings++;
- }
pg->md.pv_list_count++;
if (pve->pv_flags & PVF_WIRED)
++pm->pm_stats.wired_count;
@@ -1808,27 +1666,22 @@
pg->md.pv_list_count--;
if (pg->md.pvh_attrs & PVF_MOD)
vm_page_dirty(pg);
- if (pm == pmap_kernel()) {
- if (pve->pv_flags & PVF_WRITE)
- pg->md.krw_mappings--;
- else
- pg->md.kro_mappings--;
- } else
- if (pve->pv_flags & PVF_WRITE)
- pg->md.urw_mappings--;
- else
- pg->md.uro_mappings--;
- if (TAILQ_FIRST(&pg->md.pv_list) == NULL ||
- (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0)) {
- pg->md.pvh_attrs &= ~PVF_MOD;
- if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
- pg->md.pvh_attrs &= ~PVF_REF;
- vm_page_flag_clear(pg, PG_WRITEABLE);
+ if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
+ pg->md.pvh_attrs &= ~PVF_REF;
+ else
+ vm_page_flag_set(pg, PG_REFERENCED);
+ if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) ||
+ (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC)))
+ pmap_fix_cache(pg, pm, 0);
+ else if (pve->pv_flags & PVF_WRITE) {
+ TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list)
+ if (pve->pv_flags & PVF_WRITE)
+ break;
+ if (!pve) {
+ pg->md.pvh_attrs &= ~PVF_MOD;
+ vm_page_flag_clear(pg, PG_WRITEABLE);
+ }
}
- if (TAILQ_FIRST(&pg->md.pv_list))
- vm_page_flag_set(pg, PG_REFERENCED);
- if (pve->pv_flags & PVF_WRITE)
- pmap_vac_me_harder(pg, pm, 0);
}
static struct pv_entry *
@@ -1855,8 +1708,6 @@
*
* => caller should hold lock on vm_page [so that attrs can be adjusted]
* => caller should NOT adjust pmap's wire_count
- * => caller must call pmap_vac_me_harder() if writable status of a page
- * may have changed.
* => we return the old flags
*
* Modify a physical-virtual mapping in the pv table
@@ -1890,29 +1741,8 @@
--pm->pm_stats.wired_count;
}
- if ((flags ^ oflags) & PVF_WRITE) {
- if (pm == pmap_kernel()) {
- if (flags & PVF_WRITE) {
- pg->md.krw_mappings++;
- pg->md.kro_mappings--;
- } else {
- pg->md.kro_mappings++;
- pg->md.krw_mappings--;
- }
- } else
- if (flags & PVF_WRITE) {
- pg->md.urw_mappings++;
- pg->md.uro_mappings--;
- } else {
- pg->md.uro_mappings++;
- pg->md.urw_mappings--;
- }
- if (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0) {
- pg->md.pvh_attrs &= ~PVF_MOD;
- vm_page_flag_clear(pg, PG_WRITEABLE);
- }
- pmap_vac_me_harder(pg, pm, 0);
- }
+ if ((flags ^ oflags) & PVF_WRITE)
+ pmap_fix_cache(pg, pm, 0);
return (oflags);
}
@@ -2073,7 +1903,7 @@
/*
* Re-enable write permissions for the page. No need to call
- * pmap_vac_me_harder(), since this is just a
+ * pmap_fix_cache(), since this is just a
* modified-emulation fault, and the PVF_WRITE bit isn't
* changing. We've already set the cacheable bits based on
* the assumption that we can write to this page.
@@ -2787,6 +2617,7 @@
vm_page_lock_queues();
PMAP_LOCK(pmap);
+ cpu_idcache_wbinv_all();
for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
if (pv->pv_flags & PVF_WIRED) {
/* The page is wired, cannot remove it now. */
@@ -2813,7 +2644,6 @@
pmap_free_l2_bucket(pmap, l2b, 1);
}
vm_page_unlock_queues();
- cpu_idcache_wbinv_all();
cpu_tlb_flushID();
cpu_cpwait();
PMAP_UNLOCK(pmap);
@@ -3180,6 +3010,7 @@
if (TAILQ_EMPTY(&m->md.pv_list))
return;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+ pmap_remove_write(m);
curpm = vmspace_pmap(curproc->p_vmspace);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
if (flush == FALSE && (pv->pv_pmap == curpm ||
@@ -3569,7 +3400,7 @@
if (m)
- pmap_vac_me_harder(m, pmap, va);
+ pmap_fix_cache(m, pmap, va);
}
}
@@ -3828,7 +3659,7 @@
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
pmap->pm_stats.resident_count = 1;
if (vector_page < KERNBASE) {
- pmap_enter(pmap, vector_page,
+ pmap_enter(pmap, vector_page,
VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
VM_PROT_READ, 1);
}
@@ -3867,25 +3698,20 @@
return ret_value;
}
-
/*
* Remove the given range of addresses from the specified map.
*
* It is assumed that the start and end are properly
* rounded to the page size.
*/
-#define PMAP_REMOVE_CLEAN_LIST_SIZE 3
+#define PMAP_REMOVE_CLEAN_LIST_SIZE 3
void
pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
{
struct l2_bucket *l2b;
vm_offset_t next_bucket;
pt_entry_t *ptep;
- u_int cleanlist_idx, total, cnt;
- struct {
- vm_offset_t va;
- pt_entry_t *pte;
- } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE];
+ u_int total;
u_int mappings, is_exec, is_refd;
int flushall = 0;
@@ -3896,11 +3722,6 @@
vm_page_lock_queues();
PMAP_LOCK(pm);
- if (!pmap_is_current(pm)) {
- cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1;
- } else
- cleanlist_idx = 0;
-
total = 0;
while (sva < eva) {
/*
@@ -3956,89 +3777,35 @@
}
}
- if (!l2pte_valid(pte)) {
- *ptep = 0;
- PTE_SYNC_CURRENT(pm, ptep);
- sva += PAGE_SIZE;
- ptep++;
- mappings++;
- continue;
- }
-
- if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) {
- /* Add to the clean list. */
- cleanlist[cleanlist_idx].pte = ptep;
- cleanlist[cleanlist_idx].va =
- sva | (is_exec & 1);
- cleanlist_idx++;
- } else
- if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) {
- /* Nuke everything if needed. */
- pmap_idcache_wbinv_all(pm);
- pmap_tlb_flushID(pm);
-
- /*
- * Roll back the previous PTE list,
- * and zero out the current PTE.
- */
- for (cnt = 0;
- cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) {
- *cleanlist[cnt].pte = 0;
+ if (l2pte_valid(pte) && pmap_is_current(pm)) {
+ if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ total++;
+ if (is_exec) {
+ cpu_idcache_wbinv_range(sva,
+ PAGE_SIZE);
+ cpu_tlb_flushID_SE(sva);
+ } else if (is_refd) {
+ cpu_dcache_wbinv_range(sva,
+ PAGE_SIZE);
+ cpu_tlb_flushD_SE(sva);
+ }
+ } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) {
+ /* flushall will also only get set for
+ * for a current pmap
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list