git: ecadbb2905b4 - releng/14.0 - vmx: Prefer consistent naming for loader tunables

From: Zhenlei Huang <zlei_at_FreeBSD.org>
Date: Tue, 24 Oct 2023 14:37:46 UTC
The branch releng/14.0 has been updated by zlei:

URL: https://cgit.FreeBSD.org/src/commit/?id=ecadbb2905b41f288ee239d40c64f8be6a27eb62

commit ecadbb2905b41f288ee239d40c64f8be6a27eb62
Author:     Zhenlei Huang <zlei@FreeBSD.org>
AuthorDate: 2023-10-19 17:18:25 +0000
Commit:     Zhenlei Huang <zlei@FreeBSD.org>
CommitDate: 2023-10-24 14:35:51 +0000

    vmx: Prefer consistent naming for loader tunables
    
    The following loader tunables do have corresponding sysctl MIBs but
    with different names. That may be historical reason. Let's prefer
    consistent naming for them so that it will be easier to read and
    maintain.
    
     1. hw.vmm.l1d_flush -> hw.vmm.vmx.l1d_flush
     2. hw.vmm.l1d_flush_sw -> hw.vmm.vmx.l1d_flush_sw
     3. hw.vmm.vmx.use_apic_pir -> hw.vmm.vmx.cap.posted_interrupts
     4. hw.vmm.vmx.use_apic_vid -> hw.vmm.vmx.cap.virtual_interrupt_delivery
     5. hw.vmm.vmx.use_tpr_shadowing -> hw.vmm.vmx.cap.tpr_shadowing
    
    Old names are kept for compatibility.
    
    Meanwhile, add sysctl flag CTLFLAG_TUN to them so that `sysctl -T` will
    report them correctly.
    
    Reviewed by:    corvink, jhb, kib, #bhyve
    Approved by:    re (gjb)
    MFC after:      5 days
    Differential Revision:  https://reviews.freebsd.org/D42251
    
    (cherry picked from commit f3ff0918ffcdbcb4c39175f3f9be70999edb14e8)
    (cherry picked from commit 9e48b627aed346bf5e950134a581218d3097eb7c)
---
 sys/amd64/vmm/intel/vmx.c | 32 +++++++++++++++++++++++++++-----
 1 file changed, 27 insertions(+), 5 deletions(-)

diff --git a/sys/amd64/vmm/intel/vmx.c b/sys/amd64/vmm/intel/vmx.c
index 51c6afe5a7da..317ed7e5d7fb 100644
--- a/sys/amd64/vmm/intel/vmx.c
+++ b/sys/amd64/vmm/intel/vmx.c
@@ -192,15 +192,18 @@ SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, invpcid, CTLFLAG_RD, &cap_invpcid,
     0, "Guests are allowed to use INVPCID");
 
 static int tpr_shadowing;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, tpr_shadowing,
+    CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &tpr_shadowing, 0, "TPR shadowing support");
 
 static int virtual_interrupt_delivery;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, virtual_interrupt_delivery,
+    CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &virtual_interrupt_delivery, 0, "APICv virtual interrupt delivery support");
 
 static int posted_interrupts;
-SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx_cap, OID_AUTO, posted_interrupts,
+    CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &posted_interrupts, 0, "APICv posted interrupt support");
 
 static int pirvec = -1;
@@ -213,10 +216,10 @@ SYSCTL_UINT(_hw_vmm_vmx, OID_AUTO, vpid_alloc_failed, CTLFLAG_RD,
 	    &vpid_alloc_failed, 0, NULL);
 
 int guest_l1d_flush;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &guest_l1d_flush, 0, NULL);
 int guest_l1d_flush_sw;
-SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RD,
+SYSCTL_INT(_hw_vmm_vmx, OID_AUTO, l1d_flush_sw, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &guest_l1d_flush_sw, 0, NULL);
 
 static struct msr_entry msr_load_list[1] __aligned(16);
@@ -832,8 +835,12 @@ vmx_modinit(int ipinum)
 	    &tmp);
 	if (error == 0) {
 		tpr_shadowing = 1;
+#ifndef BURN_BRIDGES
 		TUNABLE_INT_FETCH("hw.vmm.vmx.use_tpr_shadowing",
 		    &tpr_shadowing);
+#endif
+		TUNABLE_INT_FETCH("hw.vmm.vmx.cap.tpr_shadowing",
+		    &tpr_shadowing);
 	}
 
 	if (tpr_shadowing) {
@@ -854,8 +861,12 @@ vmx_modinit(int ipinum)
 	    procbased2_vid_bits, 0, &tmp);
 	if (error == 0 && tpr_shadowing) {
 		virtual_interrupt_delivery = 1;
+#ifndef BURN_BRIDGES
 		TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_vid",
 		    &virtual_interrupt_delivery);
+#endif
+		TUNABLE_INT_FETCH("hw.vmm.vmx.cap.virtual_interrupt_delivery",
+		    &virtual_interrupt_delivery);
 	}
 
 	if (virtual_interrupt_delivery) {
@@ -881,8 +892,12 @@ vmx_modinit(int ipinum)
 				}
 			} else {
 				posted_interrupts = 1;
+#ifndef BURN_BRIDGES
 				TUNABLE_INT_FETCH("hw.vmm.vmx.use_apic_pir",
 				    &posted_interrupts);
+#endif
+				TUNABLE_INT_FETCH("hw.vmm.vmx.cap.posted_interrupts",
+				    &posted_interrupts);
 			}
 		}
 	}
@@ -899,7 +914,10 @@ vmx_modinit(int ipinum)
 
 	guest_l1d_flush = (cpu_ia32_arch_caps &
 	    IA32_ARCH_CAP_SKIP_L1DFL_VMENTRY) == 0;
+#ifndef BURN_BRIDGES
 	TUNABLE_INT_FETCH("hw.vmm.l1d_flush", &guest_l1d_flush);
+#endif
+	TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush", &guest_l1d_flush);
 
 	/*
 	 * L1D cache flush is enabled.  Use IA32_FLUSH_CMD MSR when
@@ -911,8 +929,12 @@ vmx_modinit(int ipinum)
 	if (guest_l1d_flush) {
 		if ((cpu_stdext_feature3 & CPUID_STDEXT3_L1D_FLUSH) == 0) {
 			guest_l1d_flush_sw = 1;
+#ifndef BURN_BRIDGES
 			TUNABLE_INT_FETCH("hw.vmm.l1d_flush_sw",
 			    &guest_l1d_flush_sw);
+#endif
+			TUNABLE_INT_FETCH("hw.vmm.vmx.l1d_flush_sw",
+			    &guest_l1d_flush_sw);
 		}
 		if (guest_l1d_flush_sw) {
 			if (nmi_flush_l1d_sw <= 1)