PERFORCE change 146129 for review
John Baldwin
jhb at FreeBSD.org
Mon Jul 28 17:18:51 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=146129
Change 146129 by jhb at jhb_mutex on 2008/07/28 17:18:37
IFC @146123
Affected files ...
.. //depot/projects/smpng/sys/amd64/amd64/pmap.c#82 integrate
.. //depot/projects/smpng/sys/conf/files#216 integrate
.. //depot/projects/smpng/sys/contrib/ipfilter/netinet/ip_nat.c#18 integrate
.. //depot/projects/smpng/sys/dev/atkbdc/psm.c#11 integrate
.. //depot/projects/smpng/sys/dev/iicbus/ds1339.c#1 branch
.. //depot/projects/smpng/sys/dev/jme/if_jme.c#2 integrate
.. //depot/projects/smpng/sys/dev/wi/if_wi.c#88 integrate
.. //depot/projects/smpng/sys/i386/i386/pmap.c#125 integrate
.. //depot/projects/smpng/sys/kern/sched_4bsd.c#84 edit
.. //depot/projects/smpng/sys/kern/subr_lock.c#13 integrate
.. //depot/projects/smpng/sys/kern/uipc_usrreq.c#82 integrate
.. //depot/projects/smpng/sys/kern/vfs_subr.c#151 integrate
.. //depot/projects/smpng/sys/libkern/arc4random.c#7 integrate
.. //depot/projects/smpng/sys/net/bpf_buffer.c#3 integrate
.. //depot/projects/smpng/sys/net/route.c#36 integrate
.. //depot/projects/smpng/sys/net80211/ieee80211_hostap.c#2 integrate
.. //depot/projects/smpng/sys/netinet/ip_divert.c#57 integrate
.. //depot/projects/smpng/sys/netinet/raw_ip.c#69 integrate
.. //depot/projects/smpng/sys/netinet/udp_usrreq.c#82 integrate
.. //depot/projects/smpng/sys/netinet6/icmp6.c#43 integrate
.. //depot/projects/smpng/sys/netinet6/raw_ip6.c#49 integrate
.. //depot/projects/smpng/sys/netipsec/ipsec.c#25 integrate
.. //depot/projects/smpng/sys/netipx/ipx_input.c#21 integrate
.. //depot/projects/smpng/sys/netipx/ipx_usrreq.c#30 integrate
Differences ...
==== //depot/projects/smpng/sys/amd64/amd64/pmap.c#82 (text+ko) ====
@@ -77,7 +77,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.634 2008/07/18 22:05:51 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/amd64/amd64/pmap.c,v 1.638 2008/07/28 04:59:48 alc Exp $");
/*
* Manages physical address maps.
@@ -3350,9 +3350,8 @@
* are taken, but the code works.
*/
void
-pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
- vm_object_t object, vm_pindex_t pindex,
- vm_size_t size)
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
{
vm_offset_t va;
vm_page_t p, pdpg;
@@ -3500,7 +3499,7 @@
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
- vm_offset_t src_addr)
+ vm_offset_t src_addr)
{
vm_page_t free;
vm_offset_t addr;
@@ -4355,12 +4354,10 @@
}
int
-pmap_change_attr(va, size, mode)
- vm_offset_t va;
- vm_size_t size;
- int mode;
+pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
{
vm_offset_t base, offset, tmpva;
+ pdp_entry_t *pdpe;
pd_entry_t *pde;
pt_entry_t *pte;
@@ -4368,32 +4365,53 @@
offset = va & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- /* Only supported on kernel virtual addresses. */
- if (base <= VM_MAXUSER_ADDRESS)
+ /*
+ * Only supported on kernel virtual addresses, including the direct
+ * map but excluding the recursive map.
+ */
+ if (base < DMAP_MIN_ADDRESS)
return (EINVAL);
/*
- * XXX: We have to support tearing 2MB pages down into 4k pages if
- * needed here.
+ * Pages that aren't mapped aren't supported. Also break down 2MB pages
+ * into 4KB pages if required.
*/
- /* Pages that aren't mapped aren't supported. */
- for (tmpva = base; tmpva < (base + size); ) {
- pde = pmap_pde(kernel_pmap, tmpva);
- if (*pde == 0)
+ PMAP_LOCK(kernel_pmap);
+ for (tmpva = base; tmpva < base + size; ) {
+ pdpe = pmap_pdpe(kernel_pmap, tmpva);
+ if (*pdpe == 0 || (*pdpe & PG_PS)) {
+ PMAP_UNLOCK(kernel_pmap);
+ return (EINVAL);
+ }
+ pde = pmap_pdpe_to_pde(pdpe, tmpva);
+ if (*pde == 0) {
+ PMAP_UNLOCK(kernel_pmap);
return (EINVAL);
+ }
if (*pde & PG_PS) {
- /* Handle 2MB pages that are completely contained. */
- if (size >= NBPDR) {
+ /*
+ * If the current offset aligns with a 2MB page frame
+ * and there is at least 2MB left within the range, then
+ * we need not break down this page into 4KB pages.
+ */
+ if ((tmpva & PDRMASK) == 0 &&
+ tmpva + PDRMASK < base + size) {
tmpva += NBPDR;
continue;
}
- return (EINVAL);
+ if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
+ PMAP_UNLOCK(kernel_pmap);
+ return (ENOMEM);
+ }
}
pte = vtopte(tmpva);
- if (*pte == 0)
+ if (*pte == 0) {
+ PMAP_UNLOCK(kernel_pmap);
return (EINVAL);
+ }
tmpva += PAGE_SIZE;
}
+ PMAP_UNLOCK(kernel_pmap);
/*
* Ok, all the pages exist, so run through them updating their
==== //depot/projects/smpng/sys/conf/files#216 (text+ko) ====
@@ -1,4 +1,4 @@
-# $FreeBSD: src/sys/conf/files,v 1.1317 2008/07/25 14:31:00 ed Exp $
+# $FreeBSD: src/sys/conf/files,v 1.1318 2008/07/25 19:35:40 stas Exp $
#
# The long compile-with and dependency lines are required because of
# limitations in config: backslash-newline doesn't work in strings, and
@@ -697,6 +697,7 @@
dev/igb/e1000_82575.c optional igb | em \
compile-with "${NORMAL_C} -I$S/dev/igb"
dev/iicbus/ad7418.c optional ad7418
+dev/iicbus/ds1339.c optional ds1339
dev/iicbus/ds1672.c optional ds1672
dev/iicbus/icee.c optional icee
dev/iicbus/if_ic.c optional ic
==== //depot/projects/smpng/sys/contrib/ipfilter/netinet/ip_nat.c#18 (text+ko) ====
@@ -1,4 +1,4 @@
-/* $FreeBSD: src/sys/contrib/ipfilter/netinet/ip_nat.c,v 1.45 2008/07/24 12:35:05 darrenr Exp $ */
+/* $FreeBSD: src/sys/contrib/ipfilter/netinet/ip_nat.c,v 1.46 2008/07/26 19:46:00 darrenr Exp $ */
/*
* Copyright (C) 1995-2003 by Darren Reed.
@@ -117,7 +117,7 @@
#if !defined(lint)
static const char sccsid[] = "@(#)ip_nat.c 1.11 6/5/96 (C) 1995 Darren Reed";
-static const char rcsid[] = "@(#)$FreeBSD: src/sys/contrib/ipfilter/netinet/ip_nat.c,v 1.45 2008/07/24 12:35:05 darrenr Exp $";
+static const char rcsid[] = "@(#)$FreeBSD: src/sys/contrib/ipfilter/netinet/ip_nat.c,v 1.46 2008/07/26 19:46:00 darrenr Exp $";
/* static const char rcsid[] = "@(#)$Id: ip_nat.c,v 2.195.2.102 2007/10/16 10:08:10 darrenr Exp $"; */
#endif
@@ -2033,11 +2033,13 @@
* Standard port translation. Select next port.
*/
if (np->in_flags & IPN_SEQUENTIAL) {
- port = htons(np->in_pnext);
+ port = np->in_pnext;
} else {
port = ipf_random() % (ntohs(np->in_pmax) -
ntohs(np->in_pmin));
+ port += ntohs(np->in_pmin);
}
+ port = htons(port);
np->in_pnext++;
if (np->in_pnext > ntohs(np->in_pmax)) {
==== //depot/projects/smpng/sys/dev/atkbdc/psm.c#11 (text+ko) ====
@@ -59,7 +59,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/dev/atkbdc/psm.c,v 1.97 2008/06/01 13:44:51 philip Exp $");
+__FBSDID("$FreeBSD: src/sys/dev/atkbdc/psm.c,v 1.98 2008/07/26 00:01:19 trhodes Exp $");
#include "opt_isa.h"
#include "opt_psm.h"
@@ -2112,26 +2112,34 @@
SYSCTL_NODE(_debug, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse");
SYSCTL_NODE(_hw, OID_AUTO, psm, CTLFLAG_RD, 0, "ps/2 mouse");
-SYSCTL_INT(_debug_psm, OID_AUTO, loglevel, CTLFLAG_RW, &verbose, 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, loglevel, CTLFLAG_RW, &verbose, 0,
+ "Verbosity level");
static int psmhz = 20;
-SYSCTL_INT(_debug_psm, OID_AUTO, hz, CTLFLAG_RW, &psmhz, 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, hz, CTLFLAG_RW, &psmhz, 0,
+ "Frequency of the softcallout (in hz)");
static int psmerrsecs = 2;
-SYSCTL_INT(_debug_psm, OID_AUTO, errsecs, CTLFLAG_RW, &psmerrsecs, 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, errsecs, CTLFLAG_RW, &psmerrsecs, 0,
+ "Number of seconds during which packets will dropped after a sync error");
static int psmerrusecs = 0;
-SYSCTL_INT(_debug_psm, OID_AUTO, errusecs, CTLFLAG_RW, &psmerrusecs, 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, errusecs, CTLFLAG_RW, &psmerrusecs, 0,
+ "Microseconds to add to psmerrsecs");
static int psmsecs = 0;
-SYSCTL_INT(_debug_psm, OID_AUTO, secs, CTLFLAG_RW, &psmsecs, 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, secs, CTLFLAG_RW, &psmsecs, 0,
+ "Max number of seconds between soft interrupts");
static int psmusecs = 500000;
-SYSCTL_INT(_debug_psm, OID_AUTO, usecs, CTLFLAG_RW, &psmusecs, 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, usecs, CTLFLAG_RW, &psmusecs, 0,
+ "Microseconds to add to psmsecs");
static int pkterrthresh = 2;
-SYSCTL_INT(_debug_psm, OID_AUTO, pkterrthresh, CTLFLAG_RW, &pkterrthresh,
- 0, "");
+SYSCTL_INT(_debug_psm, OID_AUTO, pkterrthresh, CTLFLAG_RW, &pkterrthresh, 0,
+ "Number of error packets allowed before reinitializing the mouse");
static int tap_threshold = PSM_TAP_THRESHOLD;
-SYSCTL_INT(_hw_psm, OID_AUTO, tap_threshold, CTLFLAG_RW, &tap_threshold, 0, "");
+SYSCTL_INT(_hw_psm, OID_AUTO, tap_threshold, CTLFLAG_RW, &tap_threshold, 0,
+ "Button tap threshold");
static int tap_timeout = PSM_TAP_TIMEOUT;
-SYSCTL_INT(_hw_psm, OID_AUTO, tap_timeout, CTLFLAG_RW, &tap_timeout, 0, "");
+SYSCTL_INT(_hw_psm, OID_AUTO, tap_timeout, CTLFLAG_RW, &tap_timeout, 0,
+ "Tap timeout for touchpads");
static void
psmintr(void *arg)
==== //depot/projects/smpng/sys/dev/jme/if_jme.c#2 (text+ko) ====
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/dev/jme/if_jme.c,v 1.2 2008/07/18 04:20:48 yongari Exp $");
+__FBSDID("$FreeBSD: src/sys/dev/jme/if_jme.c,v 1.3 2008/07/28 02:37:15 yongari Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -2346,7 +2346,10 @@
if (jme_newbuf(sc, rxd) != 0) {
ifp->if_iqdrops++;
/* Reuse buffer. */
- jme_discard_rxbuf(sc, sc->jme_cdata.jme_rx_cons);
+ for (; count < nsegs; count++) {
+ jme_discard_rxbuf(sc, cons);
+ JME_DESC_INC(cons, JME_RX_RING_CNT);
+ }
if (sc->jme_cdata.jme_rxhead != NULL) {
m_freem(sc->jme_cdata.jme_rxhead);
JME_RXCHAIN_RESET(sc);
==== //depot/projects/smpng/sys/dev/wi/if_wi.c#88 (text+ko) ====
@@ -60,7 +60,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/dev/wi/if_wi.c,v 1.218 2008/05/12 00:15:30 sam Exp $");
+__FBSDID("$FreeBSD: src/sys/dev/wi/if_wi.c,v 1.219 2008/07/26 17:04:30 imp Exp $");
#define WI_HERMES_STATS_WAR /* Work around stats counter bug. */
@@ -250,19 +250,6 @@
}
ic = ifp->if_l2com;
- /*
- * NB: no locking is needed here; don't put it here
- * unless you can prove it!
- */
- error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
- NULL, wi_intr, sc, &sc->wi_intrhand);
-
- if (error) {
- device_printf(dev, "bus_setup_intr() failed! (%d)\n", error);
- wi_free(dev);
- return error;
- }
-
sc->sc_firmware_type = WI_NOTYPE;
sc->wi_cmd_count = 500;
/* Reset the NIC. */
@@ -473,6 +460,17 @@
if (bootverbose)
ieee80211_announce(ic);
+ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, wi_intr, sc, &sc->wi_intrhand);
+ if (error) {
+ device_printf(dev, "bus_setup_intr() failed! (%d)\n", error);
+ bpfdetach(ifp);
+ ieee80211_ifdetach(ic);
+ if_free(sc->sc_ifp);
+ wi_free(dev);
+ return error;
+ }
+
return (0);
}
==== //depot/projects/smpng/sys/i386/i386/pmap.c#125 (text+ko) ====
@@ -75,7 +75,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/i386/i386/pmap.c,v 1.623 2008/07/18 22:05:51 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/i386/i386/pmap.c,v 1.626 2008/07/28 05:41:35 alc Exp $");
/*
* Manages physical address maps.
@@ -3415,9 +3415,8 @@
* are taken, but the code works.
*/
void
-pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
- vm_object_t object, vm_pindex_t pindex,
- vm_size_t size)
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
{
vm_page_t p;
@@ -3545,7 +3544,7 @@
void
pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
- vm_offset_t src_addr)
+ vm_offset_t src_addr)
{
vm_page_t free;
vm_offset_t addr;
@@ -4422,10 +4421,7 @@
}
int
-pmap_change_attr(va, size, mode)
- vm_offset_t va;
- vm_size_t size;
- int mode;
+pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
{
vm_offset_t base, offset, tmpva;
pt_entry_t *pte;
@@ -4436,8 +4432,10 @@
offset = va & PAGE_MASK;
size = roundup(offset + size, PAGE_SIZE);
- /* Only supported on kernel virtual addresses. */
- if (base <= VM_MAXUSER_ADDRESS)
+ /*
+ * Only supported on kernel virtual addresses above the recursive map.
+ */
+ if (base < VM_MIN_KERNEL_ADDRESS)
return (EINVAL);
/* 4MB pages and pages that aren't mapped aren't supported. */
==== //depot/projects/smpng/sys/kern/sched_4bsd.c#84 (text+ko) ====
@@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/sched_4bsd.c,v 1.126 2008/05/25 01:44:58 jb Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/sched_4bsd.c,v 1.127 2008/07/28 15:52:02 jhb Exp $");
#include "opt_hwpmc_hooks.h"
#include "opt_sched.h"
@@ -127,6 +127,7 @@
#ifdef SMP
static int sched_pickcpu(struct thread *td);
static int forward_wakeup(int cpunum);
+static void kick_other_cpu(int pri, int cpuid);
#endif
static struct kproc_desc sched_kp = {
@@ -278,9 +279,7 @@
#ifdef PREEMPTION
struct thread *ctd;
int cpri, pri;
-#endif
-#ifdef PREEMPTION
/*
* The new thread should not preempt the current thread if any of the
* following conditions are true:
@@ -451,7 +450,7 @@
sx_slock(&allproc_lock);
FOREACH_PROC_IN_SYSTEM(p) {
PROC_LOCK(p);
- FOREACH_THREAD_IN_PROC(p, td) {
+ FOREACH_THREAD_IN_PROC(p, td) {
awake = 0;
thread_lock(td);
ts = td->td_sched;
@@ -498,11 +497,10 @@
#endif
ts->ts_cpticks = 0;
}
- /*
+ /*
* If there are ANY running threads in this process,
* then don't count it as sleeping.
-XXX this is broken
-
+ * XXX: this is broken.
*/
if (awake) {
if (ts->ts_slptime > 1) {
@@ -528,9 +526,9 @@
resetpriority(td);
resetpriority_thread(td);
thread_unlock(td);
- } /* end of thread loop */
+ }
PROC_UNLOCK(p);
- } /* end of process loop */
+ }
sx_sunlock(&allproc_lock);
}
@@ -625,6 +623,7 @@
}
/* External interfaces start here */
+
/*
* Very early in the boot some setup of scheduler-specific
* parts of proc0 and of some scheduler resources needs to be done.
@@ -653,7 +652,7 @@
#endif
}
-int
+int
sched_rr_interval(void)
{
if (sched_quantum == 0)
@@ -700,7 +699,7 @@
}
/*
- * charge childs scheduling cpu usage to parent.
+ * Charge child's scheduling CPU usage to parent.
*/
void
sched_exit(struct proc *p, struct thread *td)
@@ -775,7 +774,7 @@
sched_priority(struct thread *td, u_char prio)
{
CTR6(KTR_SCHED, "sched_prio: %p(%s) prio %d newprio %d by %p(%s)",
- td, td->td_name, td->td_priority, prio, curthread,
+ td, td->td_name, td->td_priority, prio, curthread,
curthread->td_name);
THREAD_LOCK_ASSERT(td, MA_OWNED);
@@ -914,7 +913,8 @@
p = td->td_proc;
THREAD_LOCK_ASSERT(td, MA_OWNED);
- /*
+
+ /*
* Switch to the sched lock to fix things up and pick
* a new thread.
*/
@@ -926,13 +926,14 @@
if ((p->p_flag & P_NOLOAD) == 0)
sched_load_rem();
- if (newtd)
+ if (newtd)
newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
td->td_lastcpu = td->td_oncpu;
td->td_flags &= ~TDF_NEEDRESCHED;
td->td_owepreempt = 0;
td->td_oncpu = NOCPU;
+
/*
* At the last moment, if this thread is still marked RUNNING,
* then put it back on the run queue as it has not been suspended
@@ -953,12 +954,12 @@
}
}
if (newtd) {
- /*
+ /*
* The thread we are about to run needs to be counted
* as if it had been added to the run queue and selected.
* It came from:
* * A preemption
- * * An upcall
+ * * An upcall
* * A followon
*/
KASSERT((newtd->td_inhibitors == 0),
@@ -995,13 +996,14 @@
/*
* Where am I? What year is it?
* We are in the same thread that went to sleep above,
- * but any amount of time may have passed. All out context
+ * but any amount of time may have passed. All our context
* will still be available as will local variables.
* PCPU values however may have changed as we may have
* changed CPU so don't trust cached values of them.
* New threads will go to fork_exit() instead of here
* so if you change things here you may need to change
* things there too.
+ *
* If the thread above was exiting it will never wake
* up again here, so either it has saved everything it
* needed to, or the thread_wait() or wait() will
@@ -1040,14 +1042,11 @@
}
#ifdef SMP
-/* enable HTT_2 if you have a 2-way HTT cpu.*/
static int
-forward_wakeup(int cpunum)
+forward_wakeup(int cpunum)
{
- cpumask_t map, me, dontuse;
- cpumask_t map2;
struct pcpu *pc;
- cpumask_t id, map3;
+ cpumask_t dontuse, id, map, map2, map3, me;
mtx_assert(&sched_lock, MA_OWNED);
@@ -1061,14 +1060,13 @@
forward_wakeups_requested++;
-/*
- * check the idle mask we received against what we calculated before
- * in the old version.
- */
+ /*
+ * Check the idle mask we received against what we calculated
+ * before in the old version.
+ */
me = PCPU_GET(cpumask);
- /*
- * don't bother if we should be doing it ourself..
- */
+
+ /* Don't bother if we should be doing it ourself. */
if ((me & idle_cpus_mask) && (cpunum == NOCPU || me == (1 << cpunum)))
return (0);
@@ -1077,7 +1075,7 @@
if (forward_wakeup_use_loop) {
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
id = pc->pc_cpumask;
- if ( (id & dontuse) == 0 &&
+ if ((id & dontuse) == 0 &&
pc->pc_curthread == pc->pc_idlethread) {
map3 |= id;
}
@@ -1088,18 +1086,19 @@
map = 0;
map = idle_cpus_mask & ~dontuse;
- /* If they are both on, compare and use loop if different */
+ /* If they are both on, compare and use loop if different. */
if (forward_wakeup_use_loop) {
if (map != map3) {
- printf("map (%02X) != map3 (%02X)\n",
- map, map3);
+ printf("map (%02X) != map3 (%02X)\n", map,
+ map3);
map = map3;
}
}
} else {
map = map3;
}
- /* If we only allow a specific CPU, then mask off all the others */
+
+ /* If we only allow a specific CPU, then mask off all the others. */
if (cpunum != NOCPU) {
KASSERT((cpunum <= mp_maxcpus),("forward_wakeup: bad cpunum."));
map &= (1 << cpunum);
@@ -1112,7 +1111,7 @@
}
}
- /* set only one bit */
+ /* Set only one bit. */
if (forward_wakeup_use_single) {
map = map & ((~map) + 1);
}
@@ -1126,23 +1125,21 @@
printf("forward_wakeup: Idle processor not found\n");
return (0);
}
-#endif
-#ifdef SMP
-static void kick_other_cpu(int pri,int cpuid);
-
static void
-kick_other_cpu(int pri,int cpuid)
-{
- struct pcpu * pcpu = pcpu_find(cpuid);
- int cpri = pcpu->pc_curthread->td_priority;
+kick_other_cpu(int pri, int cpuid)
+{
+ struct pcpu *pcpu;
+ int cpri;
+ pcpu = pcpu_find(cpuid);
if (idle_cpus_mask & pcpu->pc_cpumask) {
forward_wakeups_delivered++;
ipi_selected(pcpu->pc_cpumask, IPI_AST);
return;
}
+ cpri = pcpu->pc_curthread->td_priority;
if (pri >= cpri)
return;
@@ -1157,16 +1154,12 @@
#endif /* defined(IPI_PREEMPTION) && defined(PREEMPTION) */
pcpu->pc_curthread->td_flags |= TDF_NEEDRESCHED;
- ipi_selected( pcpu->pc_cpumask , IPI_AST);
+ ipi_selected(pcpu->pc_cpumask, IPI_AST);
return;
}
#endif /* SMP */
#ifdef SMP
-/*
- * Find a CPU for this thread to run on. If it can run on any CPU, then
- * this returns NOCPU.
- */
static int
sched_pickcpu(struct thread *td)
{
@@ -1212,6 +1205,7 @@
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
+
/*
* Now that the thread is moving to the run-queue, set the lock
* to the scheduler's lock.
@@ -1227,35 +1221,39 @@
ts->ts_runq = &runq_pcpu[cpu];
single_cpu = 1;
CTR3(KTR_RUNQ,
- "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
+ cpu);
} else if (td->td_flags & TDF_BOUND) {
- /* Find CPU from bound runq */
- KASSERT(SKE_RUNQ_PCPU(ts),("sched_add: bound td_sched not on cpu runq"));
+ /* Find CPU from bound runq. */
+ KASSERT(SKE_RUNQ_PCPU(ts),
+ ("sched_add: bound td_sched not on cpu runq"));
cpu = ts->ts_runq - &runq_pcpu[0];
single_cpu = 1;
CTR3(KTR_RUNQ,
- "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
+ cpu);
} else if (ts->ts_flags & TSF_AFFINITY) {
/* Find a valid CPU for our cpuset */
cpu = sched_pickcpu(td);
ts->ts_runq = &runq_pcpu[cpu];
single_cpu = 1;
CTR3(KTR_RUNQ,
- "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td, cpu);
+ "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
+ cpu);
} else {
CTR2(KTR_RUNQ,
- "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts, td);
+ "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
+ td);
cpu = NOCPU;
ts->ts_runq = &runq;
}
-
+
if (single_cpu && (cpu != PCPU_GET(cpuid))) {
- kick_other_cpu(td->td_priority,cpu);
+ kick_other_cpu(td->td_priority, cpu);
} else {
-
if (!single_cpu) {
cpumask_t me = PCPU_GET(cpumask);
- int idle = idle_cpus_mask & me;
+ cpumask_t idle = idle_cpus_mask & me;
if (!idle && ((flags & SRQ_INTR) == 0) &&
(idle_cpus_mask & ~(hlt_cpus_mask | me)))
@@ -1269,7 +1267,7 @@
maybe_resched(td);
}
}
-
+
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, td, flags);
@@ -1279,6 +1277,7 @@
#else /* SMP */
{
struct td_sched *ts;
+
ts = td->td_sched;
THREAD_LOCK_ASSERT(td, MA_OWNED);
KASSERT((td->td_inhibitors == 0),
@@ -1290,6 +1289,7 @@
CTR5(KTR_SCHED, "sched_add: %p(%s) prio %d by %p(%s)",
td, td->td_name, td->td_priority, curthread,
curthread->td_name);
+
/*
* Now that the thread is moving to the run-queue, set the lock
* to the scheduler's lock.
@@ -1302,21 +1302,19 @@
CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
ts->ts_runq = &runq;
- /*
- * If we are yielding (on the way out anyhow)
- * or the thread being saved is US,
- * then don't try be smart about preemption
- * or kicking off another CPU
- * as it won't help and may hinder.
- * In the YIEDLING case, we are about to run whoever is
- * being put in the queue anyhow, and in the
- * OURSELF case, we are puting ourself on the run queue
- * which also only happens when we are about to yield.
+ /*
+ * If we are yielding (on the way out anyhow) or the thread
+ * being saved is US, then don't try be smart about preemption
+ * or kicking off another CPU as it won't help and may hinder.
+ * In the YIEDLING case, we are about to run whoever is being
+ * put in the queue anyhow, and in the OURSELF case, we are
+ * puting ourself on the run queue which also only happens
+ * when we are about to yield.
*/
- if((flags & SRQ_YIELDING) == 0) {
+ if ((flags & SRQ_YIELDING) == 0) {
if (maybe_preempt(td))
return;
- }
+ }
if ((td->td_proc->p_flag & P_NOLOAD) == 0)
sched_load_add();
runq_add(ts->ts_runq, td, flags);
@@ -1350,8 +1348,8 @@
}
/*
- * Select threads to run.
- * Notice that the running threads still consume a slot.
+ * Select threads to run. Note that running threads still consume a
+ * slot.
*/
struct thread *
sched_choose(void)
@@ -1367,14 +1365,14 @@
td = runq_choose_fuzz(&runq, runq_fuzz);
tdcpu = runq_choose(&runq_pcpu[PCPU_GET(cpuid)]);
- if (td == NULL ||
- (tdcpu != NULL &&
+ if (td == NULL ||
+ (tdcpu != NULL &&
tdcpu->td_priority < td->td_priority)) {
CTR2(KTR_RUNQ, "choosing td %p from pcpu runq %d", tdcpu,
PCPU_GET(cpuid));
td = tdcpu;
rq = &runq_pcpu[PCPU_GET(cpuid)];
- } else {
+ } else {
CTR1(KTR_RUNQ, "choosing td_sched %p from main runq", td);
}
@@ -1394,7 +1392,7 @@
KASSERT(td->td_flags & TDF_INMEM,
("sched_choose: thread swapped out"));
return (td);
- }
+ }
return (PCPU_GET(idlethread));
}
==== //depot/projects/smpng/sys/kern/subr_lock.c#13 (text+ko) ====
@@ -33,7 +33,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/subr_lock.c,v 1.23 2008/05/15 20:10:06 attilio Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/subr_lock.c,v 1.24 2008/07/27 21:45:20 kmacy Exp $");
#include "opt_ddb.h"
#include "opt_mprof.h"
@@ -152,6 +152,7 @@
const char *name;
int line;
int ticks;
+ uintmax_t cnt_wait_max;
uintmax_t cnt_max;
uintmax_t cnt_tot;
uintmax_t cnt_wait;
@@ -267,8 +268,8 @@
for (p = lp->file; p != NULL && strncmp(p, "../", 3) == 0; p += 3);
sbuf_printf(sb,
- "%6ju %12ju %12ju %11ju %5ju %5ju %12ju %12ju %s:%d (%s:%s)\n",
- lp->cnt_max / 1000, lp->cnt_tot / 1000,
+ "%8ju %9ju %11ju %11ju %11ju %6ju %6ju %2ju %6ju %s:%d (%s:%s)\n",
+ lp->cnt_max / 1000, lp->cnt_wait_max / 1000, lp->cnt_tot / 1000,
lp->cnt_wait / 1000, lp->cnt_cur,
lp->cnt_cur == 0 ? (uintmax_t)0 :
lp->cnt_tot / (lp->cnt_cur * 1000),
@@ -304,6 +305,8 @@
l->ticks = t;
if (l->cnt_max > dst->cnt_max)
dst->cnt_max = l->cnt_max;
+ if (l->cnt_wait_max > dst->cnt_wait_max)
+ dst->cnt_wait_max = l->cnt_wait_max;
dst->cnt_tot += l->cnt_tot;
dst->cnt_wait += l->cnt_wait;
dst->cnt_cur += l->cnt_cur;
@@ -344,8 +347,8 @@
retry_sbufops:
sb = sbuf_new(NULL, NULL, LPROF_SBUF_SIZE * multiplier, SBUF_FIXEDLEN);
- sbuf_printf(sb, "\n%6s %12s %12s %11s %5s %5s %12s %12s %s\n",
- "max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
+ sbuf_printf(sb, "\n%8s %9s %11s %11s %11s %6s %6s %2s %6s %s\n",
+ "max", "wait_max", "total", "wait_total", "count", "avg", "wait_avg", "cnt_hold", "cnt_lock", "name");
enabled = lock_prof_enable;
lock_prof_enable = 0;
pause("lpreset", hz / 10);
@@ -542,6 +545,8 @@
*/
if (holdtime > lp->cnt_max)
lp->cnt_max = holdtime;
+ if (l->lpo_waittime > lp->cnt_wait_max)
+ lp->cnt_wait_max = l->lpo_waittime;
lp->cnt_tot += holdtime;
lp->cnt_wait += l->lpo_waittime;
lp->cnt_contest_locking += l->lpo_contest_locking;
==== //depot/projects/smpng/sys/kern/uipc_usrreq.c#82 (text+ko) ====
@@ -56,7 +56,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.215 2008/07/03 23:26:10 emaste Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/uipc_usrreq.c,v 1.216 2008/07/26 00:55:35 trhodes Exp $");
#include "opt_ddb.h"
#include "opt_mac.h"
@@ -139,14 +139,15 @@
SYSCTL_NODE(_net_local, SOCK_DGRAM, dgram, CTLFLAG_RW, 0, "SOCK_DGRAM");
SYSCTL_ULONG(_net_local_stream, OID_AUTO, sendspace, CTLFLAG_RW,
- &unpst_sendspace, 0, "");
+ &unpst_sendspace, 0, "Default stream send space.");
SYSCTL_ULONG(_net_local_stream, OID_AUTO, recvspace, CTLFLAG_RW,
- &unpst_recvspace, 0, "");
+ &unpst_recvspace, 0, "Default stream receive space.");
SYSCTL_ULONG(_net_local_dgram, OID_AUTO, maxdgram, CTLFLAG_RW,
- &unpdg_sendspace, 0, "");
+ &unpdg_sendspace, 0, "Default datagram send space.");
SYSCTL_ULONG(_net_local_dgram, OID_AUTO, recvspace, CTLFLAG_RW,
- &unpdg_recvspace, 0, "");
-SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0, "");
+ &unpdg_recvspace, 0, "Default datagram receive space.");
+SYSCTL_INT(_net_local, OID_AUTO, inflight, CTLFLAG_RD, &unp_rights, 0,
+ "File descriptors in flight.");
/*-
* Locking and synchronization:
@@ -1969,10 +1970,12 @@
}
static int unp_recycled;
-SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0, "");
+SYSCTL_INT(_net_local, OID_AUTO, recycled, CTLFLAG_RD, &unp_recycled, 0,
+ "Number of unreachable sockets claimed by the garbage collector.");
static int unp_taskcount;
-SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0, "");
+SYSCTL_INT(_net_local, OID_AUTO, taskcount, CTLFLAG_RD, &unp_taskcount, 0,
+ "Number of times the garbage collector has run.");
static void
unp_gc(__unused void *arg, int pending)
==== //depot/projects/smpng/sys/kern/vfs_subr.c#151 (text+ko) ====
@@ -39,7 +39,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/vfs_subr.c,v 1.733 2008/07/21 23:01:09 attilio Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/vfs_subr.c,v 1.735 2008/07/27 11:48:15 pjd Exp $");
#include "opt_ddb.h"
#include "opt_mac.h"
@@ -2282,7 +2282,7 @@
vinactive(struct vnode *vp, struct thread *td)
{
- ASSERT_VOP_LOCKED(vp, "vinactive");
+ ASSERT_VOP_ELOCKED(vp, "vinactive");
ASSERT_VI_LOCKED(vp, "vinactive");
VNASSERT((vp->v_iflag & VI_DOINGINACT) == 0, vp,
("vinactive: recursed on VI_DOINGINACT"));
@@ -2439,7 +2439,7 @@
{
int recycled;
- ASSERT_VOP_LOCKED(vp, "vrecycle");
+ ASSERT_VOP_ELOCKED(vp, "vrecycle");
recycled = 0;
VI_LOCK(vp);
if (vp->v_usecount == 0) {
@@ -2474,7 +2474,7 @@
struct mount *mp;
CTR1(KTR_VFS, "vgonel: vp %p", vp);
- ASSERT_VOP_LOCKED(vp, "vgonel");
+ ASSERT_VOP_ELOCKED(vp, "vgonel");
ASSERT_VI_LOCKED(vp, "vgonel");
VNASSERT(vp->v_holdcnt, vp,
("vgonel: vp %p has no reference.", vp));
@@ -3581,7 +3581,8 @@
* This only exists to supress warnings from unlocked specfs accesses. It is
* no longer ok to have an unlocked VFS.
*/
-#define IGNORE_LOCK(vp) ((vp)->v_type == VCHR || (vp)->v_type == VBAD)
+#define IGNORE_LOCK(vp) (panicstr != NULL || (vp) == NULL || \
+ (vp)->v_type == VCHR || (vp)->v_type == VBAD)
int vfs_badlock_ddb = 1; /* Drop into debugger on violation. */
SYSCTL_INT(_debug, OID_AUTO, vfs_badlock_ddb, CTLFLAG_RW, &vfs_badlock_ddb, 0, "");
@@ -3631,7 +3632,7 @@
assert_vop_locked(struct vnode *vp, const char *str)
{
- if (vp && !IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
+ if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == 0)
vfs_badlock("is not locked but should be", str, vp);
}
@@ -3639,8 +3640,7 @@
assert_vop_unlocked(struct vnode *vp, const char *str)
{
- if (vp && !IGNORE_LOCK(vp) &&
- VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
+ if (!IGNORE_LOCK(vp) && VOP_ISLOCKED(vp) == LK_EXCLUSIVE)
vfs_badlock("is locked but should not be", str, vp);
}
@@ -3648,8 +3648,7 @@
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list