svn commit: r273136 - in stable/10/sys: amd64/amd64 amd64/include dev/drm2/i915 i386/i386 i386/include i386/xen

Konstantin Belousov kib at FreeBSD.org
Wed Oct 15 14:07:27 UTC 2014


Author: kib
Date: Wed Oct 15 14:07:24 2014
New Revision: 273136
URL: https://svnweb.freebsd.org/changeset/base/273136

Log:
  MFC r272761:
  Add an argument to the x86 pmap_invalidate_cache_range() to request
  forced invalidation of the cache range regardless of the presence of
  self-snoop feature.
  
  MFC r272943:
  MFi386 r272761.

Modified:
  stable/10/sys/amd64/amd64/pmap.c
  stable/10/sys/amd64/include/pmap.h
  stable/10/sys/dev/drm2/i915/intel_ringbuffer.c
  stable/10/sys/i386/i386/pmap.c
  stable/10/sys/i386/i386/vm_machdep.c
  stable/10/sys/i386/include/pmap.h
  stable/10/sys/i386/xen/pmap.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/10/sys/amd64/amd64/pmap.c	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/amd64/amd64/pmap.c	Wed Oct 15 14:07:24 2014	(r273136)
@@ -1710,16 +1710,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t
 #define PMAP_CLFLUSH_THRESHOLD   (2 * 1024 * 1024)
 
 void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
 {
 
-	KASSERT((sva & PAGE_MASK) == 0,
-	    ("pmap_invalidate_cache_range: sva not page-aligned"));
-	KASSERT((eva & PAGE_MASK) == 0,
-	    ("pmap_invalidate_cache_range: eva not page-aligned"));
+	if (force) {
+		sva &= ~(vm_offset_t)cpu_clflush_line_size;
+	} else {
+		KASSERT((sva & PAGE_MASK) == 0,
+		    ("pmap_invalidate_cache_range: sva not page-aligned"));
+		KASSERT((eva & PAGE_MASK) == 0,
+		    ("pmap_invalidate_cache_range: eva not page-aligned"));
+	}
 
-	if (cpu_feature & CPUID_SS)
-		; /* If "Self Snoop" is supported, do nothing. */
+	if ((cpu_feature & CPUID_SS) != 0 && !force)
+		; /* If "Self Snoop" is supported and allowed, do nothing. */
 	else if ((cpu_feature & CPUID_CLFSH) != 0 &&
 	    eva - sva < PMAP_CLFLUSH_THRESHOLD) {
 
@@ -6222,7 +6226,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
-	pmap_invalidate_cache_range(va, va + tmpsize);
+	pmap_invalidate_cache_range(va, va + tmpsize, FALSE);
 	return ((void *)(va + offset));
 }
 
@@ -6558,7 +6562,7 @@ pmap_change_attr_locked(vm_offset_t va, 
 	 */
 	if (changed) {
 		pmap_invalidate_range(kernel_pmap, base, tmpva);
-		pmap_invalidate_cache_range(base, tmpva);
+		pmap_invalidate_cache_range(base, tmpva, FALSE);
 	}
 	return (error);
 }

Modified: stable/10/sys/amd64/include/pmap.h
==============================================================================
--- stable/10/sys/amd64/include/pmap.h	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/amd64/include/pmap.h	Wed Oct 15 14:07:24 2014	(r273136)
@@ -394,7 +394,8 @@ void	pmap_invalidate_range(pmap_t, vm_of
 void	pmap_invalidate_all(pmap_t);
 void	pmap_invalidate_cache(void);
 void	pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+	    boolean_t force);
 void	pmap_get_mapping(pmap_t pmap, vm_offset_t va, uint64_t *ptr, int *num);
 #endif /* _KERNEL */
 

Modified: stable/10/sys/dev/drm2/i915/intel_ringbuffer.c
==============================================================================
--- stable/10/sys/dev/drm2/i915/intel_ringbuffer.c	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/dev/drm2/i915/intel_ringbuffer.c	Wed Oct 15 14:07:24 2014	(r273136)
@@ -366,7 +366,7 @@ init_pipe_control(struct intel_ring_buff
 		goto err_unpin;
 	pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
 	pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
-	    (vm_offset_t)pc->cpu_page + PAGE_SIZE);
+	    (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
 
 	pc->obj = obj;
 	ring->private = pc;
@@ -1014,7 +1014,7 @@ static int init_status_page(struct intel
 	pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
 	    1);
 	pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
-	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE);
+	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
 	ring->status_page.obj = obj;
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 

Modified: stable/10/sys/i386/i386/pmap.c
==============================================================================
--- stable/10/sys/i386/i386/pmap.c	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/i386/i386/pmap.c	Wed Oct 15 14:07:24 2014	(r273136)
@@ -1172,16 +1172,20 @@ pmap_update_pde(pmap_t pmap, vm_offset_t
 #define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
 
 void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
 {
 
-	KASSERT((sva & PAGE_MASK) == 0,
-	    ("pmap_invalidate_cache_range: sva not page-aligned"));
-	KASSERT((eva & PAGE_MASK) == 0,
-	    ("pmap_invalidate_cache_range: eva not page-aligned"));
+	if (force) {
+		sva &= ~(vm_offset_t)cpu_clflush_line_size;
+	} else {
+		KASSERT((sva & PAGE_MASK) == 0,
+		    ("pmap_invalidate_cache_range: sva not page-aligned"));
+		KASSERT((eva & PAGE_MASK) == 0,
+		    ("pmap_invalidate_cache_range: eva not page-aligned"));
+	}
 
-	if (cpu_feature & CPUID_SS)
-		; /* If "Self Snoop" is supported, do nothing. */
+	if ((cpu_feature & CPUID_SS) != 0 && !force)
+		; /* If "Self Snoop" is supported and allowed, do nothing. */
 	else if ((cpu_feature & CPUID_CLFSH) != 0 &&
 	    eva - sva < PMAP_CLFLUSH_THRESHOLD) {
 
@@ -5164,7 +5168,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
-	pmap_invalidate_cache_range(va, va + size);
+	pmap_invalidate_cache_range(va, va + size, FALSE);
 	return ((void *)(va + offset));
 }
 
@@ -5370,7 +5374,7 @@ pmap_change_attr(vm_offset_t va, vm_size
 	 */
 	if (changed) {
 		pmap_invalidate_range(kernel_pmap, base, tmpva);
-		pmap_invalidate_cache_range(base, tmpva);
+		pmap_invalidate_cache_range(base, tmpva, FALSE);
 	}
 	return (0);
 }

Modified: stable/10/sys/i386/i386/vm_machdep.c
==============================================================================
--- stable/10/sys/i386/i386/vm_machdep.c	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/i386/i386/vm_machdep.c	Wed Oct 15 14:07:24 2014	(r273136)
@@ -799,7 +799,7 @@ sf_buf_invalidate_cache(vm_page_t m)
 			 */
 			pmap_qenter(sf->kva, &m, 1);
 			pmap_invalidate_cache_range(sf->kva, sf->kva +
-			    PAGE_SIZE);
+			    PAGE_SIZE, FALSE);
 			ret = TRUE;
 			break;
 		}

Modified: stable/10/sys/i386/include/pmap.h
==============================================================================
--- stable/10/sys/i386/include/pmap.h	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/i386/include/pmap.h	Wed Oct 15 14:07:24 2014	(r273136)
@@ -458,7 +458,8 @@ void	pmap_invalidate_range(pmap_t, vm_of
 void	pmap_invalidate_all(pmap_t);
 void	pmap_invalidate_cache(void);
 void	pmap_invalidate_cache_pages(vm_page_t *pages, int count);
-void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
+void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva,
+	    boolean_t force);
 
 #endif /* _KERNEL */
 

Modified: stable/10/sys/i386/xen/pmap.c
==============================================================================
--- stable/10/sys/i386/xen/pmap.c	Wed Oct 15 13:40:29 2014	(r273135)
+++ stable/10/sys/i386/xen/pmap.c	Wed Oct 15 14:07:24 2014	(r273136)
@@ -888,15 +888,19 @@ pmap_invalidate_cache(void)
 #define	PMAP_CLFLUSH_THRESHOLD	(2 * 1024 * 1024)
 
 void
-pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
+pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva, boolean_t force)
 {
 
-	KASSERT((sva & PAGE_MASK) == 0,
-	    ("pmap_invalidate_cache_range: sva not page-aligned"));
-	KASSERT((eva & PAGE_MASK) == 0,
-	    ("pmap_invalidate_cache_range: eva not page-aligned"));
+	if (force) {
+		sva &= ~(vm_offset_t)cpu_clflush_line_size;
+	} else {
+		KASSERT((sva & PAGE_MASK) == 0,
+		    ("pmap_invalidate_cache_range: sva not page-aligned"));
+		KASSERT((eva & PAGE_MASK) == 0,
+		    ("pmap_invalidate_cache_range: eva not page-aligned"));
+	}
 
-	if (cpu_feature & CPUID_SS)
+	if ((cpu_feature & CPUID_SS) != 0 && !force)
 		; /* If "Self Snoop" is supported, do nothing. */
 	else if ((cpu_feature & CPUID_CLFSH) != 0 &&
 	    eva - sva < PMAP_CLFLUSH_THRESHOLD) {
@@ -4073,7 +4077,7 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_
 	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
 		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
 	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
-	pmap_invalidate_cache_range(va, va + size);
+	pmap_invalidate_cache_range(va, va + size, FALSE);
 	return ((void *)(va + offset));
 }
 
@@ -4241,7 +4245,7 @@ pmap_change_attr(vm_offset_t va, vm_size
 	 */
 	if (changed) {
 		pmap_invalidate_range(kernel_pmap, base, tmpva);
-		pmap_invalidate_cache_range(base, tmpva);
+		pmap_invalidate_cache_range(base, tmpva, FALSE);
 	}
 	return (0);
 }


More information about the svn-src-all mailing list