svn commit: r269595 - stable/8/sys/x86/x86

Marius Strobl marius at FreeBSD.org
Tue Aug 5 16:44:28 UTC 2014


Author: marius
Date: Tue Aug  5 16:44:27 2014
New Revision: 269595
URL: http://svnweb.freebsd.org/changeset/base/269595

Log:
  MFC: r260457
  
  The changes in r233781 (MFCed to stable/8 in r235517) attempted to make
  logging during a machine check exception more readable.  In practice they
  prevented all logging during a machine check exception on at least some
  systems.  Specifically, when an uncorrected ECC error is detected in a DIMM
  on a Nehalem/Westmere class machine, all CPUs receive a machine check
  exception, but only CPUs on the same package as the memory controller for
  the erroring DIMM log an error.  The CPUs on the other package would complete
  the scan of their machine check banks and panic before the first set of CPUs
  could log an error.  The end result was a clearer display during the panic
  (no interleaved messages), but a crashdump without any useful info about
  the error that occurred.
  
  To handle this case, make all CPUs spin in the machine check handler
  once they have completed their scan of their machine check banks until
  at least one machine check error is logged.  I tried using a DELAY()
  instead so that the CPUs would not potentially hang forever, but that
  was not reliable in testing.
  
  While here, don't clear MCIP from MSR_MCG_STATUS before invoking panic.
  Only clear it if the machine check handler does not panic and returns
  to the interrupted thread.
  
  MFC: r263113
  
  Correct type for malloc().
  
  Submitted by:	"Conrad Meyer" <conrad.meyer at isilon.com>
  
  MFC: r269052, r269239, r269242
  
  Intel desktop Haswell CPUs may report benign corrected parity errors (see
  HSD131 erratum in [1]) at a considerable rate. So filter these (default),
  unless logging is enabled. Unfortunately, there really is no better way to
  reasonably implement suppressing these errors than to just skipping them
  in mca_log(). Given that they are reported for bank 0, they'd need to be
  masked in MSR_MC0_CTL. However, P6 family processors require that register
  to be set to either all 0s or all 1s, disabling way more than the one error
  in question when using all 0s there. Alternatively, it could be masked for
  the corresponding CMCI, but that still wouldn't keep the periodic scanner
  from detecting these spurious errors. Apart from that, register contents of
  MSR_MC0_CTL{,2} don't seem to be publicly documented, neither in the Intel
  Architectures Developer's Manual nor in the Haswell datasheets.
  
  Note that while HSD131 actually is only about C0-stepping as of revision
  014 of the Intel desktop 4th generation processor family specification
  update, these corrected errors also have been observed with D0-stepping
  aka "Haswell Refresh".
  
  1: http://www.intel.de/content/dam/www/public/us/en/documents/specification-updates/4th-gen-core-family-desktop-specification-update.pdf
  
  Reviewed by:	jhb
  Sponsored by:	Bally Wulff Games & Entertainment GmbH

Modified:
  stable/8/sys/x86/x86/mca.c
Directory Properties:
  stable/8/sys/   (props changed)
  stable/8/sys/x86/   (props changed)

Modified: stable/8/sys/x86/x86/mca.c
==============================================================================
--- stable/8/sys/x86/x86/mca.c	Tue Aug  5 16:31:03 2014	(r269594)
+++ stable/8/sys/x86/x86/mca.c	Tue Aug  5 16:44:27 2014	(r269595)
@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <machine/intr_machdep.h>
 #include <machine/apicvar.h>
+#include <machine/cpu.h>
 #include <machine/cputypes.h>
 #include <machine/mca.h>
 #include <machine/md_var.h>
@@ -84,7 +85,7 @@ struct mca_internal {
 
 static MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture");
 
-static int mca_count;		/* Number of records stored. */
+static volatile int mca_count;	/* Number of records stored. */
 static int mca_banks;		/* Number of per-CPU register banks. */
 
 SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RD, NULL, "Machine Check Architecture");
@@ -99,6 +100,11 @@ TUNABLE_INT("hw.mca.amd10h_L1TP", &amd10
 SYSCTL_INT(_hw_mca, OID_AUTO, amd10h_L1TP, CTLFLAG_RDTUN, &amd10h_L1TP, 0,
     "Administrative toggle for logging of level one TLB parity (L1TP) errors");
 
+static int intel6h_HSD131;
+TUNABLE_INT("hw.mca.intel6h_hsd131", &intel6h_HSD131);
+SYSCTL_INT(_hw_mca, OID_AUTO, intel6h_HSD131, CTLFLAG_RDTUN, &intel6h_HSD131, 0,
+    "Administrative toggle for logging of spurious corrected errors");
+
 int workaround_erratum383;
 SYSCTL_INT(_hw_mca, OID_AUTO, erratum383, CTLFLAG_RD, &workaround_erratum383, 0,
     "Is the workaround for Erratum 383 on AMD Family 10h processors enabled?");
@@ -242,12 +248,34 @@ mca_error_mmtype(uint16_t mca_error)
 	return ("???");
 }
 
+static int __nonnull(1)
+mca_mute(const struct mca_record *rec)
+{
+
+	/*
+	 * Skip spurious corrected parity errors generated by desktop Haswell
+	 * (see HSD131 erratum) unless reporting is enabled.
+	 * Note that these errors also have been observed with D0-stepping,
+	 * while the revision 014 desktop Haswell specification update only
+	 * talks about C0-stepping.
+	 */
+	if (rec->mr_cpu_vendor_id == CPU_VENDOR_INTEL &&
+	    rec->mr_cpu_id == 0x306c3 && rec->mr_bank == 0 &&
+	    rec->mr_status == 0x90000040000f0005 && !intel6h_HSD131)
+	    	return (1);
+
+	return (0);
+}
+
 /* Dump details about a single machine check. */
 static void __nonnull(1)
 mca_log(const struct mca_record *rec)
 {
 	uint16_t mca_error;
 
+	if (mca_mute(rec))
+	    	return;
+
 	printf("MCA: Bank %d, Status 0x%016llx\n", rec->mr_bank,
 	    (long long)rec->mr_status);
 	printf("MCA: Global Cap 0x%016llx, Status 0x%016llx\n",
@@ -698,8 +726,8 @@ cmci_setup(void)
 {
 	int i;
 
-	cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state **),
-	    M_MCA, M_WAITOK);
+	cmc_state = malloc((mp_maxid + 1) * sizeof(struct cmc_state *), M_MCA,
+	    M_WAITOK);
 	for (i = 0; i <= mp_maxid; i++)
 		cmc_state[i] = malloc(sizeof(struct cmc_state) * mca_banks,
 		    M_MCA, M_WAITOK | M_ZERO);
@@ -732,7 +760,8 @@ mca_setup(uint64_t mcg_cap)
 	TASK_INIT(&mca_refill_task, 0, mca_refill, NULL);
 	mca_fill_freelist();
 	SYSCTL_ADD_INT(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
-	    "count", CTLFLAG_RD, &mca_count, 0, "Record count");
+	    "count", CTLFLAG_RD, (int *)(uintptr_t)&mca_count, 0,
+	    "Record count");
 	SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_mca), OID_AUTO,
 	    "interval", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, &mca_ticks,
 	    0, sysctl_positive_int, "I",
@@ -938,7 +967,7 @@ void
 mca_intr(void)
 {
 	uint64_t mcg_status;
-	int recoverable;
+	int old_count, recoverable;
 
 	if (!(cpu_feature & CPUID_MCA)) {
 		/*
@@ -952,15 +981,27 @@ mca_intr(void)
 	}
 
 	/* Scan the banks and check for any non-recoverable errors. */
+	old_count = mca_count;
 	recoverable = mca_scan(MCE);
 	mcg_status = rdmsr(MSR_MCG_STATUS);
 	if (!(mcg_status & MCG_STATUS_RIPV))
 		recoverable = 0;
 
+	if (!recoverable) {
+		/*
+		 * Wait for at least one error to be logged before
+		 * panic'ing.  Some errors will assert a machine check
+		 * on all CPUs, but only certain CPUs will find a valid
+		 * bank to log.
+		 */
+		while (mca_count == old_count)
+			cpu_spinwait();
+
+		panic("Unrecoverable machine check exception");
+	}
+
 	/* Clear MCIP. */
 	wrmsr(MSR_MCG_STATUS, mcg_status & ~MCG_STATUS_MCIP);
-	if (!recoverable)
-		panic("Unrecoverable machine check exception");
 }
 
 #ifdef DEV_APIC


More information about the svn-src-stable-8 mailing list