PERFORCE change 61580 for review
Peter Wemm
peter at FreeBSD.org
Wed Sep 15 18:17:40 PDT 2004
http://perforce.freebsd.org/chv.cgi?CH=61580
Change 61580 by peter at peter_daintree on 2004/09/16 01:16:45
IFC @61578
Affected files ...
.. //depot/projects/hammer/sys/dev/sound/pci/fm801.c#9 integrate
.. //depot/projects/hammer/sys/fs/nwfs/nwfs_io.c#10 integrate
.. //depot/projects/hammer/sys/fs/smbfs/smbfs_io.c#12 integrate
.. //depot/projects/hammer/sys/i386/i386/gdb_machdep.c#2 integrate
.. //depot/projects/hammer/sys/i386/include/db_machdep.h#3 integrate
.. //depot/projects/hammer/sys/kern/vfs_bio.c#38 integrate
.. //depot/projects/hammer/sys/kern/vfs_init.c#10 integrate
.. //depot/projects/hammer/sys/kern/vfs_subr.c#52 integrate
.. //depot/projects/hammer/sys/netinet/ip_input.c#40 integrate
.. //depot/projects/hammer/sys/nfs4client/nfs4_vnops.c#7 integrate
.. //depot/projects/hammer/sys/nfsclient/nfs_bio.c#20 integrate
.. //depot/projects/hammer/sys/nfsclient/nfs_vnops.c#22 integrate
.. //depot/projects/hammer/sys/sys/buf.h#13 integrate
.. //depot/projects/hammer/tools/lib32/build32.sh#4 integrate
.. //depot/projects/hammer/tools/tools/genericize/genericize.pl#2 integrate
Differences ...
==== //depot/projects/hammer/sys/dev/sound/pci/fm801.c#9 (text+ko) ====
@@ -29,7 +29,7 @@
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
-SND_DECLARE_FILE("$FreeBSD: src/sys/dev/sound/pci/fm801.c,v 1.23 2004/07/16 03:59:27 tanimura Exp $");
+SND_DECLARE_FILE("$FreeBSD: src/sys/dev/sound/pci/fm801.c,v 1.24 2004/09/15 23:47:17 sobomax Exp $");
#define PCI_VENDOR_FORTEMEDIA 0x1319
#define PCI_DEVICE_FORTEMEDIA1 0x08011319
@@ -700,49 +700,11 @@
static int
fm801_pci_probe( device_t dev )
{
- u_int32_t data;
- int id, regtype, regid, result;
- struct resource *reg;
- bus_space_tag_t st;
- bus_space_handle_t sh;
+ int id;
- result = ENXIO;
-
if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA1 ) {
- data = pci_read_config(dev, PCIR_COMMAND, 2);
- data |= (PCIM_CMD_PORTEN|PCIM_CMD_BUSMASTEREN);
- pci_write_config(dev, PCIR_COMMAND, data, 2);
- data = pci_read_config(dev, PCIR_COMMAND, 2);
-
- regid = PCIR_BAR(0);
- regtype = SYS_RES_IOPORT;
- reg = bus_alloc_resource_any(dev, regtype, ®id, RF_ACTIVE);
-
- if (reg == NULL)
- return ENXIO;
-
- st = rman_get_bustag(reg);
- sh = rman_get_bushandle(reg);
- /*
- * XXX: quick check that device actually has sound capabilities.
- * The problem is that some cards built around FM801 chip only
- * have radio tuner onboard, but no sound capabilities. There
- * is no "official" way to quickly check this, because all
- * IDs are exactly the same. The only difference is 0x28
- * device control register, described in FM801 specification
- * as "SRC/Mixer Test Control/DFC Status", but without
- * any more detailed explanation. According to specs, and
- * available sample cards (SF256-PCP-R and SF256-PCS-R) its
- * power-on value should be `0', while on AC97-less tuner
- * card (SF64-PCR) it was 0x80.
- */
- if (bus_space_read_1(st, sh, 0x28) == 0) {
- device_set_desc(dev,
- "Forte Media FM801 Audio Controller");
- result = 0;
- }
-
- bus_release_resource(dev, regtype, regid, reg);
+ device_set_desc(dev, "Forte Media FM801 Audio Controller");
+ return 0;
}
/*
if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA2 ) {
@@ -750,7 +712,7 @@
return ENXIO;
}
*/
- return (result);
+ return ENXIO;
}
static struct resource *
==== //depot/projects/hammer/sys/fs/nwfs/nwfs_io.c#10 (text+ko) ====
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: src/sys/fs/nwfs/nwfs_io.c,v 1.35 2004/09/07 08:53:03 phk Exp $
+ * $FreeBSD: src/sys/fs/nwfs/nwfs_io.c,v 1.36 2004/09/15 21:49:20 phk Exp $
*
*/
#include <sys/param.h>
@@ -328,9 +328,7 @@
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
uiop->uio_rw = UIO_WRITE;
- bp->b_flags |= B_WRITEINPROG;
error = ncp_write(NWFSTOCONN(nmp), &np->n_fh, uiop, cr);
- bp->b_flags &= ~B_WRITEINPROG;
/*
* For an interrupted write, the buffer is still valid
==== //depot/projects/hammer/sys/fs/smbfs/smbfs_io.c#12 (text+ko) ====
@@ -29,7 +29,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: src/sys/fs/smbfs/smbfs_io.c,v 1.28 2004/09/07 08:53:28 phk Exp $
+ * $FreeBSD: src/sys/fs/smbfs/smbfs_io.c,v 1.29 2004/09/15 21:49:21 phk Exp $
*
*/
#include <sys/param.h>
@@ -353,9 +353,7 @@
uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
uiop->uio_rw = UIO_WRITE;
- bp->b_flags |= B_WRITEINPROG;
error = smb_write(smp->sm_share, np->n_fid, uiop, &scred);
- bp->b_flags &= ~B_WRITEINPROG;
/*
* For an interrupted write, the buffer is still valid
==== //depot/projects/hammer/sys/i386/i386/gdb_machdep.c#2 (text+ko) ====
@@ -25,7 +25,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/i386/i386/gdb_machdep.c,v 1.1 2004/07/10 17:47:21 marcel Exp $");
+__FBSDID("$FreeBSD: src/sys/i386/i386/gdb_machdep.c,v 1.2 2004/09/15 23:26:49 julian Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -37,6 +37,8 @@
#include <machine/gdb_machdep.h>
#include <machine/pcb.h>
#include <machine/trap.h>
+#include <machine/frame.h>
+#include <machine/endian.h>
#include <gdb/gdb.h>
@@ -45,6 +47,14 @@
{
*regsz = gdb_cpu_regsz(regnum);
+
+ if (kdb_thread == curthread) {
+ switch (regnum) {
+ case 0: return (&kdb_frame->tf_eax);
+ case 1: return (&kdb_frame->tf_ecx);
+ case 2: return (&kdb_frame->tf_edx);
+ }
+ }
switch (regnum) {
case 3: return (&kdb_thrctx->pcb_ebx);
case 4: return (&kdb_thrctx->pcb_esp);
@@ -60,8 +70,12 @@
gdb_cpu_setreg(int regnum, register_t val)
{
+ val = __bswap32(val);
switch (regnum) {
- case GDB_REG_PC: kdb_thrctx->pcb_eip = val; break;
+ case GDB_REG_PC:
+ kdb_thrctx->pcb_eip = val;
+ if (kdb_thread == curthread)
+ kdb_frame->tf_eip = val;
}
}
==== //depot/projects/hammer/sys/i386/include/db_machdep.h#3 (text+ko) ====
@@ -23,7 +23,7 @@
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*
- * $FreeBSD: src/sys/i386/include/db_machdep.h,v 1.18 2004/07/10 23:47:19 marcel Exp $
+ * $FreeBSD: src/sys/i386/include/db_machdep.h,v 1.19 2004/09/15 23:26:49 julian Exp $
*/
#ifndef _MACHINE_DB_MACHDEP_H_
@@ -41,9 +41,17 @@
#define BKPT_SIZE (1) /* size of breakpoint inst */
#define BKPT_SET(inst) (BKPT_INST)
-#define BKPT_SKIP kdb_frame->tf_eip += 1
+#define BKPT_SKIP \
+do { \
+ kdb_frame->tf_eip += 1; \
+ kdb_thrctx->pcb_eip += 1; \
+} while(0)
-#define FIXUP_PC_AFTER_BREAK kdb_frame->tf_eip -= 1;
+#define FIXUP_PC_AFTER_BREAK \
+do { \
+ kdb_frame->tf_eip -= 1; \
+ kdb_thrctx->pcb_eip -= 1; \
+} while(0);
#define db_clear_single_step kdb_cpu_clear_singlestep
#define db_set_single_step kdb_cpu_set_singlestep
==== //depot/projects/hammer/sys/kern/vfs_bio.c#38 (text+ko) ====
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/kern/vfs_bio.c,v 1.445 2004/09/13 06:50:41 phk Exp $");
+__FBSDID("$FreeBSD: src/sys/kern/vfs_bio.c,v 1.448 2004/09/15 21:49:21 phk Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -76,13 +76,13 @@
static struct proc *bufdaemonproc;
-static void vm_hold_free_pages(struct buf * bp, vm_offset_t from,
+static void vm_hold_free_pages(struct buf *bp, vm_offset_t from,
vm_offset_t to);
-static void vm_hold_load_pages(struct buf * bp, vm_offset_t from,
+static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
vm_offset_t to);
static void vfs_page_set_valid(struct buf *bp, vm_ooffset_t off,
int pageno, vm_page_t m);
-static void vfs_clean_pages(struct buf * bp);
+static void vfs_clean_pages(struct buf *bp);
static void vfs_setdirty(struct buf *bp);
static void vfs_vmio_release(struct buf *bp);
static void vfs_backgroundwritedone(struct buf *bp);
@@ -90,7 +90,7 @@
daddr_t lblkno, daddr_t blkno);
static int flushbufqueues(int flushdeps);
static void buf_daemon(void);
-void bremfreel(struct buf * bp);
+void bremfreel(struct buf *bp);
int vmiodirenable = TRUE;
SYSCTL_INT(_vfs, OID_AUTO, vmiodirenable, CTLFLAG_RW, &vmiodirenable, 0,
@@ -266,6 +266,7 @@
static __inline void
numdirtywakeup(int level)
{
+
if (numdirtybuffers <= level) {
mtx_lock(&nblock);
if (needsbuffer & VFS_BIO_NEED_DIRTYFLUSH) {
@@ -288,6 +289,7 @@
static __inline void
bufspacewakeup(void)
{
+
/*
* If someone is waiting for BUF space, wake them up. Even
* though we haven't freed the kva space yet, the waiting
@@ -308,6 +310,7 @@
static __inline void
runningbufwakeup(struct buf *bp)
{
+
if (bp->b_runningbufspace) {
atomic_subtract_int(&runningbufspace, bp->b_runningbufspace);
bp->b_runningbufspace = 0;
@@ -332,6 +335,7 @@
static __inline void
bufcountwakeup(void)
{
+
atomic_add_int(&numfreebuffers, 1);
mtx_lock(&nblock);
if (needsbuffer) {
@@ -361,6 +365,7 @@
static __inline void
waitrunningbufspace(void)
{
+
mtx_lock(&rbreqlock);
while (runningbufspace > hirunningspace) {
++runningbufreq;
@@ -383,6 +388,7 @@
vm_ooffset_t foff, vm_offset_t off, vm_offset_t size,
vm_page_t m)
{
+
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
@@ -398,6 +404,7 @@
void
bd_wakeup(int dirtybuflevel)
{
+
mtx_lock(&bdlock);
if (bd_request == 0 && numdirtybuffers >= dirtybuflevel) {
bd_request = 1;
@@ -414,6 +421,7 @@
void
bd_speedup(void)
{
+
bd_wakeup(1);
}
@@ -426,6 +434,7 @@
caddr_t
kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est)
{
+
/*
* physmem_est is in pages. Convert it to kilobytes (assumes
* PAGE_SIZE is >= 1K)
@@ -603,8 +612,9 @@
* Since this call frees up buffer space, we call bufspacewakeup().
*/
static void
-bfreekva(struct buf * bp)
+bfreekva(struct buf *bp)
{
+
GIANT_REQUIRED;
if (bp->b_kvasize) {
@@ -625,15 +635,16 @@
* Remove the buffer from the appropriate free list.
*/
void
-bremfree(struct buf * bp)
+bremfree(struct buf *bp)
{
+
mtx_lock(&bqlock);
bremfreel(bp);
mtx_unlock(&bqlock);
}
void
-bremfreel(struct buf * bp)
+bremfreel(struct buf *bp)
{
int s = splbio();
int old_qindex = bp->b_qindex;
@@ -678,7 +689,7 @@
*/
int
bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred,
- struct buf ** bpp)
+ struct buf **bpp)
{
return (breadn(vp, blkno, size, 0, 0, 0, cred, bpp));
@@ -693,7 +704,7 @@
int
breadn(struct vnode * vp, daddr_t blkno, int size,
daddr_t * rablkno, int *rabsize,
- int cnt, struct ucred * cred, struct buf ** bpp)
+ int cnt, struct ucred * cred, struct buf **bpp)
{
struct buf *bp, *rabp;
int i;
@@ -763,7 +774,7 @@
* here.
*/
int
-bwrite(struct buf * bp)
+bwrite(struct buf *bp)
{
KASSERT(bp->b_op != NULL && bp->b_op->bop_write != NULL,
@@ -772,7 +783,7 @@
}
static int
-ibwrite(struct buf * bp)
+ibwrite(struct buf *bp)
{
int oldflags, s;
struct buf *newbp;
@@ -866,7 +877,7 @@
bp->b_flags &= ~B_DONE;
bp->b_ioflags &= ~BIO_ERROR;
- bp->b_flags |= B_WRITEINPROG | B_CACHE;
+ bp->b_flags |= B_CACHE;
bp->b_iocmd = BIO_WRITE;
VI_LOCK(bp->b_vp);
@@ -918,8 +929,7 @@
* Complete a background write started from bwrite.
*/
static void
-vfs_backgroundwritedone(bp)
- struct buf *bp;
+vfs_backgroundwritedone(struct buf *bp)
{
struct buf *origbp;
@@ -974,7 +984,7 @@
* out synchronously.
*/
void
-bdwrite(struct buf * bp)
+bdwrite(struct buf *bp)
{
struct thread *td = curthread;
struct vnode *vp;
@@ -1099,9 +1109,9 @@
* The buffer must be on QUEUE_NONE.
*/
void
-bdirty(bp)
- struct buf *bp;
+bdirty(struct buf *bp)
{
+
KASSERT(bp->b_qindex == QUEUE_NONE,
("bdirty: buffer %p still on queue %d", bp, bp->b_qindex));
bp->b_flags &= ~(B_RELBUF);
@@ -1128,9 +1138,9 @@
*/
void
-bundirty(bp)
- struct buf *bp;
+bundirty(struct buf *bp)
{
+
KASSERT(bp->b_qindex == QUEUE_NONE,
("bundirty: buffer %p still on queue %d", bp, bp->b_qindex));
@@ -1156,8 +1166,9 @@
* B_INVAL buffers. Not us.
*/
void
-bawrite(struct buf * bp)
+bawrite(struct buf *bp)
{
+
bp->b_flags |= B_ASYNC;
(void) bwrite(bp);
}
@@ -1175,6 +1186,7 @@
void
bwillwrite(void)
{
+
if (numdirtybuffers >= hidirtybuffers) {
int s;
@@ -1199,6 +1211,7 @@
int
buf_dirty_count_severe(void)
{
+
return(numdirtybuffers >= hidirtybuffers);
}
@@ -1210,7 +1223,7 @@
* to be accessed later as a cache entity or reused for other purposes.
*/
void
-brelse(struct buf * bp)
+brelse(struct buf *bp)
{
int s;
@@ -1357,11 +1370,13 @@
}
if ((bp->b_flags & B_INVAL) == 0) {
- pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
+ pmap_qenter(
+ trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
m = bp->b_pages[i];
}
- if ((bp->b_flags & B_NOCACHE) || (bp->b_ioflags & BIO_ERROR)) {
+ if ((bp->b_flags & B_NOCACHE) ||
+ (bp->b_ioflags & BIO_ERROR)) {
int poffset = foff & PAGE_MASK;
int presid = resid > (PAGE_SIZE - poffset) ?
(PAGE_SIZE - poffset) : resid;
@@ -1485,13 +1500,14 @@
* XXX we should be able to leave the B_RELBUF hint set on completion.
*/
void
-bqrelse(struct buf * bp)
+bqrelse(struct buf *bp)
{
int s;
s = splbio();
- KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)), ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
+ KASSERT(!(bp->b_flags & (B_CLUSTER|B_PAGING)),
+ ("bqrelse: inappropriate B_PAGING or B_CLUSTER bp %p", bp));
if (bp->b_qindex != QUEUE_NONE)
panic("bqrelse: free buffer onto another queue???");
@@ -1552,8 +1568,7 @@
/* Give pages used by the bp back to the VM system (where possible) */
static void
-vfs_vmio_release(bp)
- struct buf *bp;
+vfs_vmio_release(struct buf *bp)
{
int i;
vm_page_t m;
@@ -1657,7 +1672,7 @@
* correct order, so we search for the cluster in both directions.
*/
int
-vfs_bio_awrite(struct buf * bp)
+vfs_bio_awrite(struct buf *bp)
{
int i;
int j;
@@ -2139,6 +2154,7 @@
int flushwithdeps = 0;
SYSCTL_INT(_vfs, OID_AUTO, flushwithdeps, CTLFLAG_RW, &flushwithdeps,
0, "Number of buffers flushed with dependecies that require rollbacks");
+
static int
flushbufqueues(int flushdeps)
{
@@ -2992,6 +3008,7 @@
void
biodone(struct bio *bp)
{
+
mtx_lock(&bdonelock);
bp->bio_flags |= BIO_DONE;
if (bp->bio_done == NULL)
@@ -3043,7 +3060,7 @@
* error and cleared.
*/
int
-bufwait(register struct buf * bp)
+bufwait(struct buf *bp)
{
int s;
@@ -3284,35 +3301,35 @@
* consistant.
*/
void
-vfs_unbusy_pages(struct buf * bp)
+vfs_unbusy_pages(struct buf *bp)
{
int i;
+ vm_object_t obj;
+ vm_page_t m;
runningbufwakeup(bp);
- if (bp->b_flags & B_VMIO) {
- vm_object_t obj;
+ if (!(bp->b_flags & B_VMIO))
+ return;
- obj = bp->b_object;
- VM_OBJECT_LOCK(obj);
- vm_page_lock_queues();
- for (i = 0; i < bp->b_npages; i++) {
- vm_page_t m = bp->b_pages[i];
-
- if (m == bogus_page) {
- m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
- if (!m) {
- panic("vfs_unbusy_pages: page missing\n");
- }
- bp->b_pages[i] = m;
- pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
+ obj = bp->b_object;
+ VM_OBJECT_LOCK(obj);
+ vm_page_lock_queues();
+ for (i = 0; i < bp->b_npages; i++) {
+ m = bp->b_pages[i];
+ if (m == bogus_page) {
+ m = vm_page_lookup(obj, OFF_TO_IDX(bp->b_offset) + i);
+ if (!m) {
+ panic("vfs_unbusy_pages: page missing\n");
}
- vm_object_pip_subtract(obj, 1);
- vm_page_io_finish(m);
+ bp->b_pages[i] = m;
+ pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
}
- vm_page_unlock_queues();
- vm_object_pip_wakeupn(obj, 0);
- VM_OBJECT_UNLOCK(obj);
+ vm_object_pip_subtract(obj, 1);
+ vm_page_io_finish(m);
}
+ vm_page_unlock_queues();
+ vm_object_pip_wakeupn(obj, 0);
+ VM_OBJECT_UNLOCK(obj);
}
/*
@@ -3366,66 +3383,68 @@
* and should be ignored.
*/
void
-vfs_busy_pages(struct buf * bp, int clear_modify)
+vfs_busy_pages(struct buf *bp, int clear_modify)
{
int i, bogus;
+ vm_object_t obj;
+ vm_ooffset_t foff;
+ vm_page_t m;
- if (bp->b_flags & B_VMIO) {
- vm_object_t obj;
- vm_ooffset_t foff;
+ if (!(bp->b_flags & B_VMIO))
+ return;
- obj = bp->b_object;
- foff = bp->b_offset;
- KASSERT(bp->b_offset != NOOFFSET,
- ("vfs_busy_pages: no buffer offset"));
- vfs_setdirty(bp);
- VM_OBJECT_LOCK(obj);
+ obj = bp->b_object;
+ foff = bp->b_offset;
+ KASSERT(bp->b_offset != NOOFFSET,
+ ("vfs_busy_pages: no buffer offset"));
+ vfs_setdirty(bp);
+ VM_OBJECT_LOCK(obj);
retry:
- vm_page_lock_queues();
- for (i = 0; i < bp->b_npages; i++) {
- vm_page_t m = bp->b_pages[i];
+ vm_page_lock_queues();
+ for (i = 0; i < bp->b_npages; i++) {
+ m = bp->b_pages[i];
+
+ if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
+ goto retry;
+ }
+ bogus = 0;
+ for (i = 0; i < bp->b_npages; i++) {
+ m = bp->b_pages[i];
- if (vm_page_sleep_if_busy(m, FALSE, "vbpage"))
- goto retry;
+ if ((bp->b_flags & B_CLUSTER) == 0) {
+ vm_object_pip_add(obj, 1);
+ vm_page_io_start(m);
}
- bogus = 0;
- for (i = 0; i < bp->b_npages; i++) {
- vm_page_t m = bp->b_pages[i];
-
- if ((bp->b_flags & B_CLUSTER) == 0) {
- vm_object_pip_add(obj, 1);
- vm_page_io_start(m);
- }
- /*
- * When readying a buffer for a read ( i.e
- * clear_modify == 0 ), it is important to do
- * bogus_page replacement for valid pages in
- * partially instantiated buffers. Partially
- * instantiated buffers can, in turn, occur when
- * reconstituting a buffer from its VM backing store
- * base. We only have to do this if B_CACHE is
- * clear ( which causes the I/O to occur in the
- * first place ). The replacement prevents the read
- * I/O from overwriting potentially dirty VM-backed
- * pages. XXX bogus page replacement is, uh, bogus.
- * It may not work properly with small-block devices.
- * We need to find a better way.
- */
- pmap_remove_all(m);
- if (clear_modify)
- vfs_page_set_valid(bp, foff, i, m);
- else if (m->valid == VM_PAGE_BITS_ALL &&
- (bp->b_flags & B_CACHE) == 0) {
- bp->b_pages[i] = bogus_page;
- bogus++;
- }
- foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
+ /*
+ * When readying a buffer for a read ( i.e
+ * clear_modify == 0 ), it is important to do
+ * bogus_page replacement for valid pages in
+ * partially instantiated buffers. Partially
+ * instantiated buffers can, in turn, occur when
+ * reconstituting a buffer from its VM backing store
+ * base. We only have to do this if B_CACHE is
+ * clear ( which causes the I/O to occur in the
+ * first place ). The replacement prevents the read
+ * I/O from overwriting potentially dirty VM-backed
+ * pages. XXX bogus page replacement is, uh, bogus.
+ * It may not work properly with small-block devices.
+ * We need to find a better way.
+ */
+ pmap_remove_all(m);
+ if (clear_modify)
+ vfs_page_set_valid(bp, foff, i, m);
+ else if (m->valid == VM_PAGE_BITS_ALL &&
+ (bp->b_flags & B_CACHE) == 0) {
+ bp->b_pages[i] = bogus_page;
+ bogus++;
}
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(obj);
- if (bogus)
- pmap_qenter(trunc_page((vm_offset_t)bp->b_data), bp->b_pages, bp->b_npages);
+ foff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
}
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(obj);
+ if (bogus)
+ pmap_qenter(trunc_page((vm_offset_t)bp->b_data),
+ bp->b_pages, bp->b_npages);
}
/*
@@ -3437,32 +3456,33 @@
* just go ahead and clean through to b_bufsize.
*/
static void
-vfs_clean_pages(struct buf * bp)
+vfs_clean_pages(struct buf *bp)
{
int i;
+ vm_ooffset_t foff, noff, eoff;
+ vm_page_t m;
- if (bp->b_flags & B_VMIO) {
- vm_ooffset_t foff;
+ if (!(bp->b_flags & B_VMIO))
+ return;
- foff = bp->b_offset;
- KASSERT(bp->b_offset != NOOFFSET,
- ("vfs_clean_pages: no buffer offset"));
- VM_OBJECT_LOCK(bp->b_object);
- vm_page_lock_queues();
- for (i = 0; i < bp->b_npages; i++) {
- vm_page_t m = bp->b_pages[i];
- vm_ooffset_t noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
- vm_ooffset_t eoff = noff;
+ foff = bp->b_offset;
+ KASSERT(bp->b_offset != NOOFFSET,
+ ("vfs_clean_pages: no buffer offset"));
+ VM_OBJECT_LOCK(bp->b_object);
+ vm_page_lock_queues();
+ for (i = 0; i < bp->b_npages; i++) {
+ m = bp->b_pages[i];
+ noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
+ eoff = noff;
- if (eoff > bp->b_offset + bp->b_bufsize)
- eoff = bp->b_offset + bp->b_bufsize;
- vfs_page_set_valid(bp, foff, i, m);
- /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
- foff = noff;
- }
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(bp->b_object);
+ if (eoff > bp->b_offset + bp->b_bufsize)
+ eoff = bp->b_offset + bp->b_bufsize;
+ vfs_page_set_valid(bp, foff, i, m);
+ /* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
+ foff = noff;
}
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(bp->b_object);
}
/*
@@ -3477,35 +3497,36 @@
void
vfs_bio_set_validclean(struct buf *bp, int base, int size)
{
- if (bp->b_flags & B_VMIO) {
- int i;
- int n;
+ int i, n;
+ vm_page_t m;
+
+ if (!(bp->b_flags & B_VMIO))
+ return;
- /*
- * Fixup base to be relative to beginning of first page.
- * Set initial n to be the maximum number of bytes in the
- * first page that can be validated.
- */
+ /*
+ * Fixup base to be relative to beginning of first page.
+ * Set initial n to be the maximum number of bytes in the
+ * first page that can be validated.
+ */
- base += (bp->b_offset & PAGE_MASK);
- n = PAGE_SIZE - (base & PAGE_MASK);
+ base += (bp->b_offset & PAGE_MASK);
+ n = PAGE_SIZE - (base & PAGE_MASK);
- VM_OBJECT_LOCK(bp->b_object);
- vm_page_lock_queues();
- for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
- vm_page_t m = bp->b_pages[i];
+ VM_OBJECT_LOCK(bp->b_object);
+ vm_page_lock_queues();
+ for (i = base / PAGE_SIZE; size > 0 && i < bp->b_npages; ++i) {
+ m = bp->b_pages[i];
- if (n > size)
- n = size;
+ if (n > size)
+ n = size;
- vm_page_set_validclean(m, base & PAGE_MASK, n);
- base += n;
- size -= n;
- n = PAGE_SIZE;
- }
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(bp->b_object);
+ vm_page_set_validclean(m, base & PAGE_MASK, n);
+ base += n;
+ size -= n;
+ n = PAGE_SIZE;
}
+ vm_page_unlock_queues();
+ VM_OBJECT_UNLOCK(bp->b_object);
}
/*
@@ -3526,57 +3547,56 @@
GIANT_REQUIRED;
- if ((bp->b_flags & (B_VMIO | B_MALLOC)) == B_VMIO) {
- bp->b_flags &= ~B_INVAL;
- bp->b_ioflags &= ~BIO_ERROR;
- VM_OBJECT_LOCK(bp->b_object);
- if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
- (bp->b_offset & PAGE_MASK) == 0) {
- if (bp->b_pages[0] == bogus_page)
- goto unlock;
- mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
- if ((bp->b_pages[0]->valid & mask) == mask)
- goto unlock;
- if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
- ((bp->b_pages[0]->valid & mask) == 0)) {
- bzero(bp->b_data, bp->b_bufsize);
- bp->b_pages[0]->valid |= mask;
- goto unlock;
- }
+ if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) {
+ clrbuf(bp);
+ return;
+ }
+ bp->b_flags &= ~B_INVAL;
+ bp->b_ioflags &= ~BIO_ERROR;
+ VM_OBJECT_LOCK(bp->b_object);
+ if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE) &&
+ (bp->b_offset & PAGE_MASK) == 0) {
+ if (bp->b_pages[0] == bogus_page)
+ goto unlock;
+ mask = (1 << (bp->b_bufsize / DEV_BSIZE)) - 1;
+ VM_OBJECT_LOCK_ASSERT(bp->b_pages[0]->object, MA_OWNED);
+ if ((bp->b_pages[0]->valid & mask) == mask)
+ goto unlock;
+ if (((bp->b_pages[0]->flags & PG_ZERO) == 0) &&
+ ((bp->b_pages[0]->valid & mask) == 0)) {
+ bzero(bp->b_data, bp->b_bufsize);
+ bp->b_pages[0]->valid |= mask;
+ goto unlock;
}
- ea = sa = bp->b_data;
- for(i=0;i<bp->b_npages;i++,sa=ea) {
- ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
- ea = (caddr_t)(vm_offset_t)ulmin(
- (u_long)(vm_offset_t)ea,
- (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
- if (bp->b_pages[i] == bogus_page)
- continue;
- j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
- mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
- VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
- if ((bp->b_pages[i]->valid & mask) == mask)
- continue;
- if ((bp->b_pages[i]->valid & mask) == 0) {
- if ((bp->b_pages[i]->flags & PG_ZERO) == 0) {
- bzero(sa, ea - sa);
- }
- } else {
- for (; sa < ea; sa += DEV_BSIZE, j++) {
- if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
- (bp->b_pages[i]->valid & (1<<j)) == 0)
- bzero(sa, DEV_BSIZE);
- }
+ }
+ ea = sa = bp->b_data;
+ for(i = 0; i < bp->b_npages; i++, sa = ea) {
+ ea = (caddr_t)trunc_page((vm_offset_t)sa + PAGE_SIZE);
+ ea = (caddr_t)(vm_offset_t)ulmin(
+ (u_long)(vm_offset_t)ea,
+ (u_long)(vm_offset_t)bp->b_data + bp->b_bufsize);
+ if (bp->b_pages[i] == bogus_page)
+ continue;
+ j = ((vm_offset_t)sa & PAGE_MASK) / DEV_BSIZE;
+ mask = ((1 << ((ea - sa) / DEV_BSIZE)) - 1) << j;
+ VM_OBJECT_LOCK_ASSERT(bp->b_pages[i]->object, MA_OWNED);
+ if ((bp->b_pages[i]->valid & mask) == mask)
+ continue;
+ if ((bp->b_pages[i]->valid & mask) == 0) {
+ if ((bp->b_pages[i]->flags & PG_ZERO) == 0)
+ bzero(sa, ea - sa);
+ } else {
+ for (; sa < ea; sa += DEV_BSIZE, j++) {
+ if (((bp->b_pages[i]->flags & PG_ZERO) == 0) &&
+ (bp->b_pages[i]->valid & (1<<j)) == 0)
+ bzero(sa, DEV_BSIZE);
}
- bp->b_pages[i]->valid |= mask;
}
+ bp->b_pages[i]->valid |= mask;
+ }
unlock:
- VM_OBJECT_UNLOCK(bp->b_object);
- bp->b_resid = 0;
- } else {
- clrbuf(bp);
- }
+ VM_OBJECT_UNLOCK(bp->b_object);
+ bp->b_resid = 0;
}
/*
@@ -3585,7 +3605,7 @@
* not associated with a file object.
*/
static void
-vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
+vm_hold_load_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
{
vm_offset_t pg;
vm_page_t p;
@@ -3627,7 +3647,7 @@
/* Return pages associated with this buf to the vm system */
static void
-vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
+vm_hold_free_pages(struct buf *bp, vm_offset_t from, vm_offset_t to)
{
vm_offset_t pg;
vm_page_t p;
@@ -3685,8 +3705,9 @@
if (bp->b_bufsize < 0)
return (-1);
- prot = (bp->b_iocmd == BIO_READ) ? VM_PROT_READ | VM_PROT_WRITE :
- VM_PROT_READ;
+ prot = VM_PROT_READ;
+ if (bp->b_iocmd == BIO_READ)
+ prot |= VM_PROT_WRITE; /* Less backwards than it looks */
for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
addr < bp->b_data + bp->b_bufsize;
addr += PAGE_SIZE, pidx++) {
@@ -3736,8 +3757,7 @@
int npages;
npages = bp->b_npages;
- pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
- npages);
+ pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
vm_page_lock_queues();
for (pidx = 0; pidx < npages; pidx++)
vm_page_unhold(bp->b_pages[pidx]);
@@ -3749,6 +3769,7 @@
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list