svn commit: r365613 - head/sys/kern

Konstantin Belousov kib at FreeBSD.org
Thu Sep 10 20:54:45 UTC 2020


Author: kib
Date: Thu Sep 10 20:54:44 2020
New Revision: 365613
URL: https://svnweb.freebsd.org/changeset/base/365613

Log:
  Fix interaction between largepages and seals/writes.
  
  On write with SHM_GROW_ON_WRITE, use proper truncate.
  Do not allow to grow largepage shm if F_SEAL_GROW is set. Note that
  shrinks are not supported at all due to unmanaged mappings.
  Call to vm_pager_update_writecount() is only valid for swap objects,
  skip it for unmanaged largepages.
  Largepages cannot support write sealing.
  Do not writecnt largepage mappings.
  
  Reported by:	kevans
  Reviewed by:	kevans, markj
  Sponsored by:	The FreeBSD Foundation
  MFC after:	1 week
  Differential revision:	https://reviews.freebsd.org/D26394

Modified:
  head/sys/kern/uipc_shm.c

Modified: head/sys/kern/uipc_shm.c
==============================================================================
--- head/sys/kern/uipc_shm.c	Thu Sep 10 20:46:16 2020	(r365612)
+++ head/sys/kern/uipc_shm.c	Thu Sep 10 20:54:44 2020	(r365613)
@@ -450,9 +450,7 @@ shm_write(struct file *fp, struct uio *uio, struct ucr
 		error = 0;
 		if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
 		    size > shmfd->shm_size) {
-			VM_OBJECT_WLOCK(shmfd->shm_object);
-			error = shm_dotruncate_locked(shmfd, size, rl_cookie);
-			VM_OBJECT_WUNLOCK(shmfd->shm_object);
+			error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
 		}
 		if (error == 0)
 			error = uiomove_object(shmfd->shm_object,
@@ -767,6 +765,9 @@ shm_dotruncate_largepage(struct shmfd *shmfd, off_t le
 #endif
 	}
 
+	if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
+		return (EPERM);
+
 	aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
 	if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
 		aflags |= VM_ALLOC_WAITFAIL;
@@ -1416,7 +1417,7 @@ out:
 static int
 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
     vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
-    vm_ooffset_t foff, bool writecounted, struct thread *td)
+    vm_ooffset_t foff, struct thread *td)
 {
 	struct vmspace *vms;
 	vm_map_entry_t next_entry, prev_entry;
@@ -1448,8 +1449,6 @@ shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_o
 	docow |= MAP_INHERIT_SHARE;
 	if ((flags & MAP_NOCORE) != 0)
 		docow |= MAP_DISABLE_COREDUMP;
-	if (writecounted)
-		docow |= MAP_WRITECOUNT;
 
 	mask = pagesizes[shmfd->shm_lp_psind] - 1;
 	if ((foff & mask) != 0)
@@ -1594,12 +1593,15 @@ shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *a
 	mtx_unlock(&shm_timestamp_lock);
 	vm_object_reference(shmfd->shm_object);
 
-	if (writecnt)
-		vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
 	if (shm_largepage(shmfd)) {
+		writecnt = false;
 		error = shm_mmap_large(shmfd, map, addr, objsize, prot,
-		    maxprot, flags, foff, writecnt, td);
+		    maxprot, flags, foff, td);
 	} else {
+		if (writecnt) {
+			vm_pager_update_writecount(shmfd->shm_object, 0,
+			    objsize);
+		}
 		error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
 		    shmfd->shm_object, foff, writecnt, td);
 	}
@@ -1838,6 +1840,11 @@ shm_add_seals(struct file *fp, int seals)
 	}
 	nseals = seals & ~shmfd->shm_seals;
 	if ((nseals & F_SEAL_WRITE) != 0) {
+		if (shm_largepage(shmfd)) {
+			error = ENOTSUP;
+			goto out;
+		}
+
 		/*
 		 * The rangelock above prevents writable mappings from being
 		 * added after we've started applying seals.  The RLOCK here


More information about the svn-src-head mailing list