socsvn commit: r268356 - in soc2014/mihai/bhyve-icache-head/sys: amd64/include amd64/vmm modules/vmm

mihai at FreeBSD.org mihai at FreeBSD.org
Mon May 19 12:59:38 UTC 2014


Author: mihai
Date: Mon May 19 12:59:36 2014
New Revision: 268356
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=268356

Log:
  soc2014: mihai: move instr cache to it's own file; add sysctl option

Added:
  soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
  soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c
Modified:
  soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h
  soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_emul.c
  soc2014/mihai/bhyve-icache-head/sys/modules/vmm/Makefile

Added: soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_cache.h	Mon May 19 12:59:36 2014	(r268356)
@@ -0,0 +1,15 @@
+#ifndef	_VMM_INSTRUCTION_CACHE_H_
+#define	_VMM_INSTRUCTION_CACHE_H_
+
+#ifdef _KERNEL
+
+int vmm_init_cached_instruction(void);
+int vmm_cleanup_cached_instruction(void);
+int vmm_add_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+		    struct vie *vie);
+int vmm_get_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+		    struct vie *vie);
+int vmm_rm_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3);
+#endif	/* _KERNEL */
+
+#endif	/* _VMM_INSTRUCTION_EMUL_H_ */

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h	Mon May 19 11:17:44 2014	(r268355)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/include/vmm_instruction_emul.h	Mon May 19 12:59:36 2014	(r268356)
@@ -138,13 +138,7 @@
 int vmm_decode_instruction(struct vm *vm, int cpuid, uint64_t gla,
 			   enum vie_cpu_mode cpu_mode, struct vie *vie);
 
-int vmm_init_cached_instruction(void);
-int vmm_cleanup_cached_instruction(void);
-int vmm_add_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
-		    struct vie *vie);
-int vmm_get_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
-		    struct vie *vie);
-int vmm_rm_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3);
+#include <machine/vmm_instruction_cache.h>
 
 #endif	/* _KERNEL */
 

Added: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_cache.c	Mon May 19 12:59:36 2014	(r268356)
@@ -0,0 +1,204 @@
+#ifdef _KERNEL
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+#include <sys/systm.h>
+#include <sys/types.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/rmlock.h>
+#include <sys/queue.h>
+#include <sys/hash.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/vmparam.h>
+#include <machine/vmm.h>
+
+
+/* Instruction caching */
+
+struct vie_cached;
+
+LIST_HEAD(vie_cached_head, vie_cached);
+
+struct vie_cached {
+	/* key */
+	struct vm *vm;
+	uint64_t rip;
+	uint64_t cr3;
+	/* value */
+	struct vie vie;
+	LIST_ENTRY(vie_cached) vie_link;
+};
+
+static MALLOC_DEFINE(M_VIECACHED, "vie_cached", "vie_cached");
+
+#define VIE_CACHE_HASH_SIZE (1 << 10)
+#define VIE_CACHE_HASH_MASK ((1 << 10) - 1)
+
+struct vie_cached_hash {
+	struct vie_cached_head vie_cached_head;
+	struct rmlock vie_cached_lock;
+};
+
+static struct vie_cached_hash vie_cached_hash[VIE_CACHE_HASH_SIZE];
+
+SYSCTL_DECL(_hw_vmm);
+
+static int vmm_cached_instruction_enable = 1;
+
+int
+vmm_init_cached_instruction(void)
+{
+	int i;
+
+	for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
+		LIST_INIT(&vie_cached_hash[i].vie_cached_head);
+		rm_init(&vie_cached_hash[i].vie_cached_lock, "VIE CACHED HASH LOCK");
+	}
+	return (0);
+}
+
+int
+vmm_cleanup_cached_instruction(void)
+{
+	struct vie_cached *vie_cached, *vie_cached_safe;
+	int i;
+
+	for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
+		LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, vie_link, vie_cached_safe)
+		{
+			LIST_REMOVE(vie_cached, vie_link);
+			free(vie_cached, M_VIECACHED);
+		}
+		rm_destroy(&vie_cached_hash[i].vie_cached_lock);
+	}
+	return (0);
+}
+
+static int
+sysctl_vmm_cached_instruction(SYSCTL_HANDLER_ARGS)
+{
+	struct vie_cached *vie_cached, *vie_cached_safe;
+	int error, temp, i;
+
+	temp = vmm_cached_instruction_enable;
+	error = sysctl_handle_int(oidp, &temp, 0, req);
+	if (error)
+		return (error);
+	if (temp != 0 && temp != 1)
+		return (EINVAL);
+
+	if (req->newptr != NULL) {
+		if (temp != vmm_cached_instruction_enable) {
+			vmm_cached_instruction_enable = temp;
+			if (temp == 0) {
+				for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
+					rm_wlock(&vie_cached_hash[i].vie_cached_lock);
+					LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, vie_link, vie_cached_safe)
+					{
+						LIST_REMOVE(vie_cached, vie_link);
+						free(vie_cached, M_VIECACHED);
+					}
+					rm_wunlock(&vie_cached_hash[i].vie_cached_lock);
+				}
+			}
+		}
+	}
+	return (0);
+}
+SYSCTL_PROC(_hw_vmm, OID_AUTO, instruction_cache, CTLTYPE_INT | CTLFLAG_RW,
+	    0, 0, sysctl_vmm_cached_instruction, "I", "");
+
+
+int
+vmm_add_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+		    struct vie *vie)
+{
+	struct vie_cached *vie_cached = malloc(sizeof(struct vie_cached), M_VIECACHED, M_WAITOK | M_ZERO);
+	int hash;
+
+	/* Check to see if caching is enabled */
+	if (!vmm_cached_instruction_enable)
+		return (0);
+
+	vie_cached->vm = vm;
+	vie_cached->rip = rip;
+	vie_cached->cr3 = cr3;
+	bcopy(vie, &vie_cached->vie, sizeof(struct vie));
+
+	hash =  jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
+
+	rm_wlock(&vie_cached_hash[hash].vie_cached_lock);
+	LIST_INSERT_HEAD(&vie_cached_hash[hash].vie_cached_head, vie_cached, vie_link);
+	rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
+	return (0);
+}
+
+int
+vmm_get_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
+		    struct vie *vie)
+{
+	struct vie_cached *vie_cached;
+	struct rm_priotracker tracker;
+	int hash;
+
+	/* Check to see if caching is enabled */
+	if (!vmm_cached_instruction_enable)
+		return (-1);
+
+	hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
+
+	rm_rlock(&vie_cached_hash[hash].vie_cached_lock, &tracker);
+
+	LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) {
+		if (vie_cached->vm == vm &&
+		    vie_cached->rip == rip &&
+		    vie_cached->cr3 == cr3)
+		{
+			bcopy(&vie_cached->vie, vie, sizeof(struct vie));
+			rm_runlock(&vie_cached_hash[hash].vie_cached_lock, &tracker);
+			return(0);
+		}
+	}
+	rm_runlock(&vie_cached_hash[hash].vie_cached_lock, &tracker);
+	return (-1);
+}
+
+int
+vmm_rm_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3)
+{
+	struct vie_cached *vie_cached;
+	int hash;
+	uint64_t rip_page = trunc_page(rip);
+
+	/* Check to see if caching is enabled */
+	if (!vmm_cached_instruction_enable)
+		return (0);
+
+	hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
+
+	rm_wlock(&vie_cached_hash[hash].vie_cached_lock);
+
+	LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) {
+		if (vie_cached->vm == vm &&
+		    trunc_page(vie_cached->rip) == rip_page &&
+		    vie_cached->cr3 == cr3)
+		{
+			/* Remove the RIP found and continue searching */
+			LIST_REMOVE(vie_cached, vie_link);
+			/* Free the removed node */
+			free(vie_cached, M_VIECACHED);
+		}
+	}
+
+	rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
+	return (0);
+}
+#endif	/* _KERNEL */

Modified: soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_emul.c
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_emul.c	Mon May 19 11:17:44 2014	(r268355)
+++ soc2014/mihai/bhyve-icache-head/sys/amd64/vmm/vmm_instruction_emul.c	Mon May 19 12:59:36 2014	(r268356)
@@ -569,28 +569,6 @@
 
 #ifdef _KERNEL
 
-struct vie_cached;
-LIST_HEAD(vie_cached_head, vie_cached);
-struct vie_cached {
-	/* key */
-	struct vm *vm;
-	uint64_t rip;
-	uint64_t cr3;
-	/* value */
-	struct vie vie;
-	LIST_ENTRY(vie_cached) vie_link;
-};
-
-static MALLOC_DEFINE(M_VIECACHED, "vie_cached", "vie_cached");
-
-#define VIE_CACHE_HASH_SIZE (1 << 10)
-#define VIE_CACHE_HASH_MASK ((1 << 10) - 1)
-struct vie_cached_hash {
-	struct vie_cached_head vie_cached_head;
-	struct rmlock vie_cached_lock;
-};
-static struct vie_cached_hash vie_cached_hash[VIE_CACHE_HASH_SIZE];
-
 void
 vie_init(struct vie *vie)
 {
@@ -1148,106 +1126,4 @@
 	return (0);
 }
 
-int
-vmm_init_cached_instruction(void)
-{
-	int i;
-
-	for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
-		LIST_INIT(&vie_cached_hash[i].vie_cached_head);
-		rm_init(&vie_cached_hash[i].vie_cached_lock, "VIE CACHED HASH LOCK");
-	}
-	return (0);
-}
-
-int
-vmm_cleanup_cached_instruction(void)
-{
-	struct vie_cached *vie_cached, *vie_cached_safe;
-	int i;
-
-	for (i = 0; i < VIE_CACHE_HASH_SIZE; i++) {
-		LIST_FOREACH_SAFE(vie_cached, &vie_cached_hash[i].vie_cached_head, vie_link, vie_cached_safe)
-		{
-			LIST_REMOVE(vie_cached, vie_link);
-			free(vie_cached, M_VIECACHED);
-		}
-		rm_destroy(&vie_cached_hash[i].vie_cached_lock);
-	}
-	return (0);
-}
-
-
-int
-vmm_add_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
-		    struct vie *vie)
-{
-	struct vie_cached *vie_cached = malloc(sizeof(struct vie_cached), M_VIECACHED, M_WAITOK | M_ZERO);
-	int hash;
-
-	vie_cached->vm = vm;
-	vie_cached->rip = rip;
-	vie_cached->cr3 = cr3;
-	bcopy(vie, &vie_cached->vie, sizeof(struct vie));
-
-	hash =  jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
-
-	rm_wlock(&vie_cached_hash[hash].vie_cached_lock);
-	LIST_INSERT_HEAD(&vie_cached_hash[hash].vie_cached_head, vie_cached, vie_link);
-	rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
-	return (0);
-}
-
-int
-vmm_get_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3,
-		    struct vie *vie)
-{
-	struct vie_cached *vie_cached;
-	struct rm_priotracker tracker;
-	int hash;
-
-	hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
-
-	rm_rlock(&vie_cached_hash[hash].vie_cached_lock, &tracker);
-
-	LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) {
-		if (vie_cached->vm == vm &&
-		    vie_cached->rip == rip &&
-		    vie_cached->cr3 == cr3)
-		{
-			bcopy(&vie_cached->vie, vie, sizeof(struct vie));
-			rm_runlock(&vie_cached_hash[hash].vie_cached_lock, &tracker);
-			return(0);
-		}
-	}
-	rm_runlock(&vie_cached_hash[hash].vie_cached_lock, &tracker);
-	return (-1);
-}
-
-int
-vmm_rm_cached_instruction(struct vm *vm, uint64_t rip, uint64_t cr3)
-{
-	struct vie_cached *vie_cached;
-	int hash;
-	uint64_t rip_page = trunc_page(rip);
-
-	hash = jenkins_hash(&vm, sizeof(struct vm *), 0) & VIE_CACHE_HASH_MASK;
-
-	rm_wlock(&vie_cached_hash[hash].vie_cached_lock);
-
-	LIST_FOREACH(vie_cached, &vie_cached_hash[hash].vie_cached_head, vie_link) {
-		if (vie_cached->vm == vm &&
-		    trunc_page(vie_cached->rip) == rip_page &&
-		    vie_cached->cr3 == cr3)
-		{
-			/* Remove the RIP found and continue searching */
-			LIST_REMOVE(vie_cached, vie_link);
-			/* Free the removed node */
-			free(vie_cached, M_VIECACHED);
-		}
-	}
-
-	rm_wunlock(&vie_cached_hash[hash].vie_cached_lock);
-	return (0);
-}
 #endif	/* _KERNEL */

Modified: soc2014/mihai/bhyve-icache-head/sys/modules/vmm/Makefile
==============================================================================
--- soc2014/mihai/bhyve-icache-head/sys/modules/vmm/Makefile	Mon May 19 11:17:44 2014	(r268355)
+++ soc2014/mihai/bhyve-icache-head/sys/modules/vmm/Makefile	Mon May 19 12:59:36 2014	(r268356)
@@ -15,6 +15,7 @@
 	vmm_dev.c	\
 	vmm_host.c	\
 	vmm_instruction_emul.c	\
+	vmm_instruction_cache.c	\
 	vmm_ioport.c	\
 	vmm_ipi.c	\
 	vmm_lapic.c	\


More information about the svn-soc-all mailing list