svn commit: r280260 - in head/sys: amd64/include conf i386/include x86/include x86/iommu x86/x86

Konstantin Belousov kib at FreeBSD.org
Thu Mar 19 13:57:53 UTC 2015


Author: kib
Date: Thu Mar 19 13:57:47 2015
New Revision: 280260
URL: https://svnweb.freebsd.org/changeset/base/280260

Log:
  Use VT-d interrupt remapping block (IR) to perform FSB messages
  translation.  In particular, despite IO-APICs only take 8bit apic id,
  IR translation structures accept 32bit APIC Id, which allows x2APIC
  mode to function properly.  Extend msi_cpu of struct msi_intrsrc and
  io_cpu of ioapic_intsrc to full int from one byte.
  
  KPI of IR is isolated into the x86/iommu/iommu_intrmap.h, to avoid
  bringing all dmar headers into interrupt code. The non-PCI(e) devices
  which generate message interrupts on FSB require special handling. The
  HPET FSB interrupts are remapped, while DMAR interrupts are not.
  
  For each msi and ioapic interrupt source, the iommu cookie is added,
  which is in fact index of the IRE (interrupt remap entry) in the IR
  table. Cookie is made at the source allocation time, and then used at
  the map time to fill both IRE and device registers. The MSI
  address/data registers and IO-APIC redirection registers are
  programmed with the special values which are recognized by IR and used
  to restore the IRE index, to find proper delivery mode and target.
  Map all MSI interrupts in the block when msi_map() is called.
  
  Since an interrupt source setup and dismantle code are done in the
  non-sleepable context, flushing interrupt entries cache in the IR
  hardware, which is done async and ideally waits for the interrupt,
  requires busy-wait for queue to drain.  The dmar_qi_wait_for_seq() is
  modified to take a boolean argument requesting busy-wait for the
  written sequence number instead of waiting for interrupt.
  
  Some interrupts are configured before IR is initialized, e.g. ACPI
  SCI.  Add intr_reprogram() function to reprogram all already
  configured interrupts, and call it immediately before an IR unit is
  enabled.  There is still a small window after the IO-APIC redirection
  entry is reprogrammed with cookie but before the unit is enabled, but
  to fix this properly, IR must be started much earlier.
  
  Add workarounds for 5500 and X58 northbridges, some revisions of which
  have severe flaws in handling IR.  Use the same identification methods
  as employed by Linux.
  
  Review:	https://reviews.freebsd.org/D1892
  Reviewed by:	neel
  Discussed with:	jhb
  Tested by:	glebius, pho (previous versions)
  Sponsored by:	The FreeBSD Foundation
  MFC after:	3 weeks

Added:
  head/sys/x86/iommu/intel_intrmap.c   (contents, props changed)
  head/sys/x86/iommu/iommu_intrmap.h   (contents, props changed)
Modified:
  head/sys/amd64/include/intr_machdep.h
  head/sys/conf/files.amd64
  head/sys/conf/files.i386
  head/sys/i386/include/intr_machdep.h
  head/sys/x86/include/apicvar.h
  head/sys/x86/iommu/busdma_dmar.c
  head/sys/x86/iommu/intel_ctx.c
  head/sys/x86/iommu/intel_dmar.h
  head/sys/x86/iommu/intel_drv.c
  head/sys/x86/iommu/intel_fault.c
  head/sys/x86/iommu/intel_gas.c
  head/sys/x86/iommu/intel_idpgtbl.c
  head/sys/x86/iommu/intel_qi.c
  head/sys/x86/iommu/intel_quirks.c
  head/sys/x86/iommu/intel_utils.c
  head/sys/x86/x86/intr_machdep.c
  head/sys/x86/x86/io_apic.c
  head/sys/x86/x86/msi.c

Modified: head/sys/amd64/include/intr_machdep.h
==============================================================================
--- head/sys/amd64/include/intr_machdep.h	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/amd64/include/intr_machdep.h	Thu Mar 19 13:57:47 2015	(r280260)
@@ -106,6 +106,7 @@ struct pic {
 	int (*pic_config_intr)(struct intsrc *, enum intr_trigger,
 	    enum intr_polarity);
 	int (*pic_assign_cpu)(struct intsrc *, u_int apic_id);
+	void (*pic_reprogram_pin)(struct intsrc *);
 	TAILQ_ENTRY(pic) pics;
 };
 
@@ -172,6 +173,7 @@ int	intr_register_source(struct intsrc *
 int	intr_remove_handler(void *cookie);
 void	intr_resume(bool suspend_cancelled);
 void	intr_suspend(void);
+void	intr_reprogram(void);
 void	intrcnt_add(const char *name, u_long **countp);
 void	nexus_add_irq(u_long irq);
 int	msi_alloc(device_t dev, int count, int maxcount, int *irqs);

Modified: head/sys/conf/files.amd64
==============================================================================
--- head/sys/conf/files.amd64	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/conf/files.amd64	Thu Mar 19 13:57:47 2015	(r280260)
@@ -542,6 +542,7 @@ x86/iommu/intel_drv.c		optional	acpi acp
 x86/iommu/intel_fault.c		optional	acpi acpi_dmar pci
 x86/iommu/intel_gas.c		optional	acpi acpi_dmar pci
 x86/iommu/intel_idpgtbl.c	optional	acpi acpi_dmar pci
+x86/iommu/intel_intrmap.c	optional	acpi acpi_dmar pci
 x86/iommu/intel_qi.c		optional	acpi acpi_dmar pci
 x86/iommu/intel_quirks.c	optional	acpi acpi_dmar pci
 x86/iommu/intel_utils.c		optional	acpi acpi_dmar pci

Modified: head/sys/conf/files.i386
==============================================================================
--- head/sys/conf/files.i386	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/conf/files.i386	Thu Mar 19 13:57:47 2015	(r280260)
@@ -560,6 +560,7 @@ x86/iommu/intel_drv.c		optional acpi acp
 x86/iommu/intel_fault.c		optional acpi acpi_dmar pci
 x86/iommu/intel_gas.c		optional acpi acpi_dmar pci
 x86/iommu/intel_idpgtbl.c	optional acpi acpi_dmar pci
+x86/iommu/intel_intrmap.c	optional acpi acpi_dmar pci
 x86/iommu/intel_qi.c		optional acpi acpi_dmar pci
 x86/iommu/intel_quirks.c	optional acpi acpi_dmar pci
 x86/iommu/intel_utils.c		optional acpi acpi_dmar pci

Modified: head/sys/i386/include/intr_machdep.h
==============================================================================
--- head/sys/i386/include/intr_machdep.h	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/i386/include/intr_machdep.h	Thu Mar 19 13:57:47 2015	(r280260)
@@ -112,6 +112,7 @@ struct pic {
 	int (*pic_config_intr)(struct intsrc *, enum intr_trigger,
 	    enum intr_polarity);
 	int (*pic_assign_cpu)(struct intsrc *, u_int apic_id);
+	void (*pic_reprogram_pin)(struct intsrc *);
 	TAILQ_ENTRY(pic) pics;
 };
 
@@ -168,6 +169,7 @@ int	intr_register_source(struct intsrc *
 int	intr_remove_handler(void *cookie);
 void	intr_resume(bool suspend_cancelled);
 void	intr_suspend(void);
+void	intr_reprogram(void);
 void	intrcnt_add(const char *name, u_long **countp);
 void	nexus_add_irq(u_long irq);
 int	msi_alloc(device_t dev, int count, int maxcount, int *irqs);

Modified: head/sys/x86/include/apicvar.h
==============================================================================
--- head/sys/x86/include/apicvar.h	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/include/apicvar.h	Thu Mar 19 13:57:47 2015	(r280260)
@@ -155,6 +155,11 @@
 #define	APIC_BUS_PCI		2
 #define	APIC_BUS_MAX		APIC_BUS_PCI
 
+#define	IRQ_EXTINT		(NUM_IO_INTS + 1)
+#define	IRQ_NMI			(NUM_IO_INTS + 2)
+#define	IRQ_SMI			(NUM_IO_INTS + 3)
+#define	IRQ_DISABLED		(NUM_IO_INTS + 4)
+
 /*
  * An APIC enumerator is a psuedo bus driver that enumerates APIC's including
  * CPU's and I/O APIC's.

Modified: head/sys/x86/iommu/busdma_dmar.c
==============================================================================
--- head/sys/x86/iommu/busdma_dmar.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/busdma_dmar.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
 #include <sys/uio.h>
+#include <sys/vmem.h>
 #include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
 #include <vm/vm.h>
@@ -92,7 +93,7 @@ dmar_bus_dma_is_dev_disabled(int domain,
  * domain, and must collectively be assigned to use either DMAR or
  * bounce mapping.
  */
-static device_t
+device_t
 dmar_get_requester(device_t dev, uint16_t *rid)
 {
 	devclass_t pci_class;
@@ -255,6 +256,8 @@ dmar_get_dma_tag(device_t dev, device_t 
 	/* Not in scope of any DMAR ? */
 	if (dmar == NULL)
 		return (NULL);
+	if (!dmar->dma_enabled)
+		return (NULL);
 	dmar_quirks_pre_use(dmar);
 	dmar_instantiate_rmrr_ctxs(dmar);
 
@@ -852,6 +855,8 @@ int
 dmar_init_busdma(struct dmar_unit *unit)
 {
 
+	unit->dma_enabled = 1;
+	TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
 	TAILQ_INIT(&unit->delayed_maps);
 	TASK_INIT(&unit->dmamap_load_task, 0, dmar_bus_task_dmamap, unit);
 	unit->delayed_taskqueue = taskqueue_create("dmar", M_WAITOK,

Modified: head/sys/x86/iommu/intel_ctx.c
==============================================================================
--- head/sys/x86/iommu/intel_ctx.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_ctx.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
 #include <sys/uio.h>
+#include <sys/vmem.h>
 #include <vm/vm.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_kern.h>

Modified: head/sys/x86/iommu/intel_dmar.h
==============================================================================
--- head/sys/x86/iommu/intel_dmar.h	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_dmar.h	Thu Mar 19 13:57:47 2015	(r280260)
@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) 2013 The FreeBSD Foundation
+ * Copyright (c) 2013-2015 The FreeBSD Foundation
  * All rights reserved.
  *
  * This software was developed by Konstantin Belousov <kib at FreeBSD.org>
@@ -185,6 +185,13 @@ struct dmar_unit {
 	u_int inv_seq_waiters;	/* count of waiters for seq */
 	u_int inv_queue_full;	/* informational counter */
 
+	/* IR */
+	int ir_enabled;
+	vm_paddr_t irt_phys;
+	dmar_irte_t *irt;
+	u_int irte_cnt;
+	vmem_t *irtids;
+
 	/* Delayed freeing of map entries queue processing */
 	struct dmar_map_entries_tailq tlb_flush_entries;
 	struct task qi_task;
@@ -194,6 +201,8 @@ struct dmar_unit {
 	struct task dmamap_load_task;
 	TAILQ_HEAD(, bus_dmamap_dmar) delayed_maps;
 	struct taskqueue *delayed_taskqueue;
+
+	int dma_enabled;
 };
 
 #define	DMAR_LOCK(dmar)		mtx_lock(&(dmar)->lock)
@@ -206,12 +215,16 @@ struct dmar_unit {
 
 #define	DMAR_IS_COHERENT(dmar)	(((dmar)->hw_ecap & DMAR_ECAP_C) != 0)
 #define	DMAR_HAS_QI(dmar)	(((dmar)->hw_ecap & DMAR_ECAP_QI) != 0)
+#define	DMAR_X2APIC(dmar) \
+	(x2apic_mode && ((dmar)->hw_ecap & DMAR_ECAP_EIM) != 0)
 
 /* Barrier ids */
 #define	DMAR_BARRIER_RMRR	0
 #define	DMAR_BARRIER_USEQ	1
 
 struct dmar_unit *dmar_find(device_t dev);
+struct dmar_unit *dmar_find_hpet(device_t dev, uint16_t *rid);
+struct dmar_unit *dmar_find_ioapic(u_int apic_id, uint16_t *rid);
 
 u_int dmar_nd2mask(u_int nd);
 bool dmar_pglvl_supported(struct dmar_unit *unit, int pglvl);
@@ -238,6 +251,9 @@ void dmar_flush_ctx_to_ram(struct dmar_u
 void dmar_flush_root_to_ram(struct dmar_unit *unit, dmar_root_entry_t *dst);
 int dmar_enable_translation(struct dmar_unit *unit);
 int dmar_disable_translation(struct dmar_unit *unit);
+int dmar_load_irt_ptr(struct dmar_unit *unit);
+int dmar_enable_ir(struct dmar_unit *unit);
+int dmar_disable_ir(struct dmar_unit *unit);
 bool dmar_barrier_enter(struct dmar_unit *dmar, u_int barrier_id);
 void dmar_barrier_exit(struct dmar_unit *dmar, u_int barrier_id);
 
@@ -256,6 +272,8 @@ void dmar_qi_invalidate_locked(struct dm
     dmar_gaddr_t size, struct dmar_qi_genseq *pseq);
 void dmar_qi_invalidate_ctx_glob_locked(struct dmar_unit *unit);
 void dmar_qi_invalidate_iotlb_glob_locked(struct dmar_unit *unit);
+void dmar_qi_invalidate_iec_glob(struct dmar_unit *unit);
+void dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt);
 
 vm_object_t ctx_get_idmap_pgtbl(struct dmar_ctx *ctx, dmar_gaddr_t maxaddr);
 void put_idmap_pgtbl(vm_object_t obj);
@@ -282,6 +300,7 @@ void dmar_ctx_free_entry(struct dmar_map
 
 int dmar_init_busdma(struct dmar_unit *unit);
 void dmar_fini_busdma(struct dmar_unit *unit);
+device_t dmar_get_requester(device_t dev, uint16_t *rid);
 
 void dmar_gas_init_ctx(struct dmar_ctx *ctx);
 void dmar_gas_fini_ctx(struct dmar_ctx *ctx);
@@ -304,6 +323,9 @@ int dmar_instantiate_rmrr_ctxs(struct dm
 void dmar_quirks_post_ident(struct dmar_unit *dmar);
 void dmar_quirks_pre_use(struct dmar_unit *dmar);
 
+int dmar_init_irt(struct dmar_unit *unit);
+void dmar_fini_irt(struct dmar_unit *unit);
+
 #define	DMAR_GM_CANWAIT	0x0001
 #define	DMAR_GM_CANSPLIT 0x0002
 
@@ -374,13 +396,16 @@ dmar_write8(const struct dmar_unit *unit
  * containing the P or R and W bits, is set only after the high word
  * is written.  For clear, the P bit is cleared first, then the high
  * word is cleared.
+ *
+ * dmar_pte_update updates the pte.  For amd64, the update is atomic.
+ * For i386, it first disables the entry by clearing the word
+ * containing the P bit, and then defer to dmar_pte_store.  The locked
+ * cmpxchg8b is probably available on any machine having DMAR support,
+ * but interrupt translation table may be mapped uncached.
  */
 static inline void
-dmar_pte_store(volatile uint64_t *dst, uint64_t val)
+dmar_pte_store1(volatile uint64_t *dst, uint64_t val)
 {
-
-	KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
-	    dst, (uintmax_t)*dst, (uintmax_t)val));
 #ifdef __i386__
 	volatile uint32_t *p;
 	uint32_t hi, lo;
@@ -396,6 +421,28 @@ dmar_pte_store(volatile uint64_t *dst, u
 }
 
 static inline void
+dmar_pte_store(volatile uint64_t *dst, uint64_t val)
+{
+
+	KASSERT(*dst == 0, ("used pte %p oldval %jx newval %jx",
+	    dst, (uintmax_t)*dst, (uintmax_t)val));
+	dmar_pte_store1(dst, val);
+}
+
+static inline void
+dmar_pte_update(volatile uint64_t *dst, uint64_t val)
+{
+
+#ifdef __i386__
+	volatile uint32_t *p;
+
+	p = (volatile uint32_t *)dst;
+	*p = 0;
+#endif
+	dmar_pte_store1(dst, val);
+}
+
+static inline void
 dmar_pte_clear(volatile uint64_t *dst)
 {
 #ifdef __i386__

Modified: head/sys/x86/iommu/intel_drv.c
==============================================================================
--- head/sys/x86/iommu/intel_drv.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_drv.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) 2013 The FreeBSD Foundation
+ * Copyright (c) 2013-2015 The FreeBSD Foundation
  * All rights reserved.
  *
  * This software was developed by Konstantin Belousov <kib at FreeBSD.org>
@@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/smp.h>
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
+#include <sys/vmem.h>
 #include <machine/bus.h>
 #include <contrib/dev/acpica/include/acpi.h>
 #include <contrib/dev/acpica/include/accommon.h>
@@ -65,6 +66,7 @@ __FBSDID("$FreeBSD$");
 #include <x86/iommu/intel_reg.h>
 #include <x86/iommu/busdma_dmar.h>
 #include <x86/iommu/intel_dmar.h>
+#include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
 
 #ifdef DEV_APIC
@@ -243,6 +245,7 @@ dmar_release_resources(device_t dev, str
 	int i;
 
 	dmar_fini_busdma(unit);
+	dmar_fini_irt(unit);
 	dmar_fini_qi(unit);
 	dmar_fini_fault_log(unit);
 	for (i = 0; i < DMAR_INTR_TOTAL; i++)
@@ -509,6 +512,11 @@ dmar_attach(device_t dev)
 		dmar_release_resources(dev, unit);
 		return (error);
 	}
+	error = dmar_init_irt(unit);
+	if (error != 0) {
+		dmar_release_resources(dev, unit);
+		return (error);
+	}
 	error = dmar_init_busdma(unit);
 	if (error != 0) {
 		dmar_release_resources(dev, unit);
@@ -763,6 +771,76 @@ found:
 	return (device_get_softc(dmar_dev));
 }
 
+static struct dmar_unit *
+dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid)
+{
+	device_t dmar_dev;
+	struct dmar_unit *unit;
+	ACPI_DMAR_HARDWARE_UNIT *dmarh;
+	ACPI_DMAR_DEVICE_SCOPE *devscope;
+	ACPI_DMAR_PCI_PATH *path;
+	char *ptr, *ptrend;
+	int i;
+
+	for (i = 0; i < dmar_devcnt; i++) {
+		dmar_dev = dmar_devs[i];
+		if (dmar_dev == NULL)
+			continue;
+		unit = (struct dmar_unit *)device_get_softc(dmar_dev);
+		dmarh = dmar_find_by_index(i);
+		if (dmarh == NULL)
+			continue;
+		ptr = (char *)dmarh + sizeof(*dmarh);
+		ptrend = (char *)dmarh + dmarh->Header.Length;
+		for (;;) {
+			if (ptr >= ptrend)
+				break;
+			devscope = (ACPI_DMAR_DEVICE_SCOPE *)ptr;
+			ptr += devscope->Length;
+			if (devscope->EntryType != entry_type)
+				continue;
+			if (devscope->EnumerationId != id)
+				continue;
+			if (devscope->Length - sizeof(ACPI_DMAR_DEVICE_SCOPE)
+			    == 2) {
+				if (rid != NULL) {
+					path = (ACPI_DMAR_PCI_PATH *)
+					    (devscope + 1);
+					*rid = PCI_RID(devscope->Bus,
+					    path->Device, path->Function);
+				}
+				return (unit);
+			} else {
+				/* XXXKIB */
+				printf(
+		       "dmar_find_nonpci: id %d type %d path length != 2\n",
+				    id, entry_type);
+			}
+		}
+	}
+	return (NULL);
+}
+
+
+struct dmar_unit *
+dmar_find_hpet(device_t dev, uint16_t *rid)
+{
+	ACPI_HANDLE handle;
+	uint32_t hpet_id;
+
+	handle = acpi_get_handle(dev);
+	if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &hpet_id)))
+		return (NULL);
+	return (dmar_find_nonpci(hpet_id, ACPI_DMAR_SCOPE_TYPE_HPET, rid));
+}
+
+struct dmar_unit *
+dmar_find_ioapic(u_int apic_id, uint16_t *rid)
+{
+
+	return (dmar_find_nonpci(apic_id, ACPI_DMAR_SCOPE_TYPE_IOAPIC, rid));
+}
+
 struct rmrr_iter_args {
 	struct dmar_ctx *ctx;
 	device_t dev;

Modified: head/sys/x86/iommu/intel_fault.c
==============================================================================
--- head/sys/x86/iommu/intel_fault.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_fault.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/rman.h>
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
+#include <sys/vmem.h>
 #include <machine/bus.h>
 #include <contrib/dev/acpica/include/acpi.h>
 #include <contrib/dev/acpica/include/accommon.h>

Modified: head/sys/x86/iommu/intel_gas.c
==============================================================================
--- head/sys/x86/iommu/intel_gas.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_gas.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
 #include <sys/uio.h>
+#include <sys/vmem.h>
 #include <dev/pci/pcivar.h>
 #include <vm/vm.h>
 #include <vm/vm_extern.h>

Modified: head/sys/x86/iommu/intel_idpgtbl.c
==============================================================================
--- head/sys/x86/iommu/intel_idpgtbl.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_idpgtbl.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -48,6 +48,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
 #include <sys/uio.h>
+#include <sys/vmem.h>
 #include <vm/vm.h>
 #include <vm/vm_extern.h>
 #include <vm/vm_kern.h>

Added: head/sys/x86/iommu/intel_intrmap.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/x86/iommu/intel_intrmap.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -0,0 +1,380 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Konstantin Belousov <kib at FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/rman.h>
+#include <sys/rwlock.h>
+#include <sys/taskqueue.h>
+#include <sys/tree.h>
+#include <sys/vmem.h>
+#include <machine/bus.h>
+#include <machine/intr_machdep.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <x86/include/apicreg.h>
+#include <x86/include/apicvar.h>
+#include <x86/include/busdma_impl.h>
+#include <x86/iommu/intel_reg.h>
+#include <x86/iommu/busdma_dmar.h>
+#include <x86/iommu/intel_dmar.h>
+#include <dev/pci/pcivar.h>
+#include <x86/iommu/iommu_intrmap.h>
+
+static struct dmar_unit *dmar_ir_find(device_t src, uint16_t *rid,
+    int *is_dmar);
+static void dmar_ir_program_irte(struct dmar_unit *unit, u_int idx,
+    uint64_t low, uint16_t rid);
+static int dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie);
+
+int
+iommu_alloc_msi_intr(device_t src, u_int *cookies, u_int count)
+{
+	struct dmar_unit *unit;
+	vmem_addr_t vmem_res;
+	u_int idx, i;
+	int error;
+
+	unit = dmar_ir_find(src, NULL, NULL);
+	if (unit == NULL || !unit->ir_enabled) {
+		for (i = 0; i < count; i++)
+			cookies[i] = -1;
+		return (EOPNOTSUPP);
+	}
+
+	error = vmem_alloc(unit->irtids, count, M_FIRSTFIT | M_NOWAIT,
+	    &vmem_res);
+	if (error != 0) {
+		KASSERT(error != EOPNOTSUPP,
+		    ("impossible EOPNOTSUPP from vmem"));
+		return (error);
+	}
+	idx = vmem_res;
+	for (i = 0; i < count; i++)
+		cookies[i] = idx + i;
+	return (0);
+}
+
+int
+iommu_map_msi_intr(device_t src, u_int cpu, u_int vector, u_int cookie,
+    uint64_t *addr, uint32_t *data)
+{
+	struct dmar_unit *unit;
+	uint64_t low;
+	uint16_t rid;
+	int is_dmar;
+
+	unit = dmar_ir_find(src, &rid, &is_dmar);
+	if (is_dmar) {
+		KASSERT(unit == NULL, ("DMAR cannot translate itself"));
+
+		/*
+		 * See VT-d specification, 5.1.6 Remapping Hardware -
+		 * Interrupt Programming.
+		 */
+		*data = vector;
+		*addr = MSI_INTEL_ADDR_BASE | ((cpu & 0xff) << 12);
+		if (x2apic_mode)
+			*addr |= ((uint64_t)cpu & 0xffffff00) << 32;
+		else
+			KASSERT(cpu <= 0xff, ("cpu id too big %d", cpu));
+		return (0);
+	}
+	if (unit == NULL || !unit->ir_enabled || cookie == -1)
+		return (EOPNOTSUPP);
+
+	low = (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
+	    DMAR_IRTE1_DST_xAPIC(cpu)) | DMAR_IRTE1_V(vector) |
+	    DMAR_IRTE1_DLM_FM | DMAR_IRTE1_TM_EDGE | DMAR_IRTE1_RH_DIRECT |
+	    DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
+	dmar_ir_program_irte(unit, cookie, low, rid);
+
+	if (addr != NULL) {
+		/*
+		 * See VT-d specification, 5.1.5.2 MSI and MSI-X
+		 * Register Programming.
+		 */
+		*addr = MSI_INTEL_ADDR_BASE | ((cookie & 0x7fff) << 5) |
+		    ((cookie & 0x8000) << 2) | 0x18;
+		*data = 0;
+	}
+	return (0);
+}
+
+int
+iommu_unmap_msi_intr(device_t src, u_int cookie)
+{
+	struct dmar_unit *unit;
+
+	if (cookie == -1)
+		return (0);
+	unit = dmar_ir_find(src, NULL, NULL);
+	return (dmar_ir_free_irte(unit, cookie));
+}
+
+int
+iommu_map_ioapic_intr(u_int ioapic_id, u_int cpu, u_int vector, bool edge,
+    bool activehi, int irq, u_int *cookie, uint32_t *hi, uint32_t *lo)
+{
+	struct dmar_unit *unit;
+	vmem_addr_t vmem_res;
+	uint64_t low, iorte;
+	u_int idx;
+	int error;
+	uint16_t rid;
+
+	unit = dmar_find_ioapic(ioapic_id, &rid);
+	if (unit == NULL || !unit->ir_enabled) {
+		*cookie = -1;
+		return (EOPNOTSUPP);
+	}
+
+	error = vmem_alloc(unit->irtids, 1, M_FIRSTFIT | M_NOWAIT, &vmem_res);
+	if (error != 0) {
+		KASSERT(error != EOPNOTSUPP,
+		    ("impossible EOPNOTSUPP from vmem"));
+		return (error);
+	}
+	idx = vmem_res;
+	low = 0;
+	switch (irq) {
+	case IRQ_EXTINT:
+		low |= DMAR_IRTE1_DLM_ExtINT;
+		break;
+	case IRQ_NMI:
+		low |= DMAR_IRTE1_DLM_NMI;
+		break;
+	case IRQ_SMI:
+		low |= DMAR_IRTE1_DLM_SMI;
+		break;
+	default:
+		KASSERT(vector != 0, ("No vector for IRQ %u", irq));
+		low |= DMAR_IRTE1_DLM_FM | DMAR_IRTE1_V(vector);
+		break;
+	}
+	low |= (DMAR_X2APIC(unit) ? DMAR_IRTE1_DST_x2APIC(cpu) :
+	    DMAR_IRTE1_DST_xAPIC(cpu)) |
+	    (edge ? DMAR_IRTE1_TM_EDGE : DMAR_IRTE1_TM_LEVEL) |
+	    DMAR_IRTE1_RH_DIRECT | DMAR_IRTE1_DM_PHYSICAL | DMAR_IRTE1_P;
+	dmar_ir_program_irte(unit, idx, low, rid);
+
+	if (hi != NULL) {
+		/*
+		 * See VT-d specification, 5.1.5.1 I/OxAPIC
+		 * Programming.
+		 */
+		iorte = (1ULL << 48) | ((uint64_t)(idx & 0x7fff) << 49) |
+		    ((idx & 0x8000) != 0 ? (1 << 11) : 0) |
+		    (edge ? IOART_TRGREDG : IOART_TRGRLVL) |
+		    (activehi ? IOART_INTAHI : IOART_INTALO) |
+		    IOART_DELFIXED | vector;
+		*hi = iorte >> 32;
+		*lo = iorte;
+	}
+	*cookie = idx;
+	return (0);
+}
+
+int
+iommu_unmap_ioapic_intr(u_int ioapic_id, u_int *cookie)
+{
+	struct dmar_unit *unit;
+	u_int idx;
+
+	idx = *cookie;
+	if (idx == -1)
+		return (0);
+	*cookie = -1;
+	unit = dmar_find_ioapic(ioapic_id, NULL);
+	KASSERT(unit != NULL && unit->ir_enabled,
+	    ("unmap: cookie %d unit %p", idx, unit));
+	return (dmar_ir_free_irte(unit, idx));
+}
+
+static struct dmar_unit *
+dmar_ir_find(device_t src, uint16_t *rid, int *is_dmar)
+{
+	devclass_t src_class;
+	struct dmar_unit *unit;
+
+	/*
+	 * We need to determine if the interrupt source generates FSB
+	 * interrupts.  If yes, it is either DMAR, in which case
+	 * interrupts are not remapped.  Or it is HPET, and interrupts
+	 * are remapped.  For HPET, source id is reported by HPET
+	 * record in DMAR ACPI table.
+	 */
+	if (is_dmar != NULL)
+		*is_dmar = FALSE;
+	src_class = device_get_devclass(src);
+	if (src_class == devclass_find("dmar")) {
+		unit = NULL;
+		if (is_dmar != NULL)
+			*is_dmar = TRUE;
+	} else if (src_class == devclass_find("hpet")) {
+		unit = dmar_find_hpet(src, rid);
+	} else {
+		unit = dmar_find(src);
+		if (unit != NULL && rid != NULL)
+			dmar_get_requester(src, rid);
+	}
+	return (unit);
+}
+
+static void
+dmar_ir_program_irte(struct dmar_unit *unit, u_int idx, uint64_t low,
+    uint16_t rid)
+{
+	dmar_irte_t *irte;
+	uint64_t high;
+
+	KASSERT(idx < unit->irte_cnt,
+	    ("bad cookie %d %d", idx, unit->irte_cnt));
+	irte = &(unit->irt[idx]);
+	high = DMAR_IRTE2_SVT_RID | DMAR_IRTE2_SQ_RID |
+	    DMAR_IRTE2_SID_RID(rid);
+	device_printf(unit->dev,
+	    "programming irte[%d] rid %#x high %#jx low %#jx\n",
+	    idx, rid, (uintmax_t)high, (uintmax_t)low);
+	DMAR_LOCK(unit);
+	if ((irte->irte1 & DMAR_IRTE1_P) != 0) {
+		/*
+		 * The rte is already valid.  Assume that the request
+		 * is to remap the interrupt for balancing.  Only low
+		 * word of rte needs to be changed.  Assert that the
+		 * high word contains expected value.
+		 */
+		KASSERT(irte->irte2 == high,
+		    ("irte2 mismatch, %jx %jx", (uintmax_t)irte->irte2,
+		    (uintmax_t)high));
+		dmar_pte_update(&irte->irte1, low);
+	} else {
+		dmar_pte_store(&irte->irte2, high);
+		dmar_pte_store(&irte->irte1, low);
+	}
+	dmar_qi_invalidate_iec(unit, idx, 1);
+	DMAR_UNLOCK(unit);
+
+}
+
+static int
+dmar_ir_free_irte(struct dmar_unit *unit, u_int cookie)
+{
+	dmar_irte_t *irte;
+
+	KASSERT(unit != NULL && unit->ir_enabled,
+	    ("unmap: cookie %d unit %p", cookie, unit));
+	KASSERT(cookie < unit->irte_cnt,
+	    ("bad cookie %u %u", cookie, unit->irte_cnt));
+	irte = &(unit->irt[cookie]);
+	dmar_pte_clear(&irte->irte1);
+	dmar_pte_clear(&irte->irte2);
+	DMAR_LOCK(unit);
+	dmar_qi_invalidate_iec(unit, cookie, 1);
+	DMAR_UNLOCK(unit);
+	vmem_free(unit->irtids, cookie, 1);
+	return (0);
+}
+
+static u_int
+clp2(u_int v)
+{
+
+	return (powerof2(v) ? v : 1 << fls(v));
+}
+
+int
+dmar_init_irt(struct dmar_unit *unit)
+{
+
+	if ((unit->hw_ecap & DMAR_ECAP_IR) == 0)
+		return (0);
+	unit->ir_enabled = 1;
+	TUNABLE_INT_FETCH("hw.dmar.ir", &unit->ir_enabled);
+	if (!unit->ir_enabled)
+		return (0);
+	if (!unit->qi_enabled) {
+		unit->ir_enabled = 0;
+		if (bootverbose)
+			device_printf(unit->dev,
+	     "QI disabled, disabling interrupt remapping\n");
+		return (0);
+	}
+	unit->irte_cnt = clp2(NUM_IO_INTS);
+	unit->irt = (dmar_irte_t *)(uintptr_t)kmem_alloc_contig(kernel_arena,
+	    unit->irte_cnt * sizeof(dmar_irte_t), M_ZERO | M_WAITOK, 0,
+	    dmar_high, PAGE_SIZE, 0, DMAR_IS_COHERENT(unit) ?
+	    VM_MEMATTR_DEFAULT : VM_MEMATTR_UNCACHEABLE);
+	if (unit->irt == NULL)
+		return (ENOMEM);
+	unit->irt_phys = pmap_kextract((vm_offset_t)unit->irt);
+	unit->irtids = vmem_create("dmarirt", 0, unit->irte_cnt, 1, 0,
+	    M_FIRSTFIT | M_NOWAIT);
+	DMAR_LOCK(unit);
+	dmar_load_irt_ptr(unit);
+	dmar_qi_invalidate_iec_glob(unit);
+	DMAR_UNLOCK(unit);
+
+	/*
+	 * Initialize mappings for already configured interrupt pins.
+	 * Required, because otherwise the interrupts fault without
+	 * irtes.
+	 */
+	intr_reprogram();
+
+	DMAR_LOCK(unit);
+	dmar_enable_ir(unit);
+	DMAR_UNLOCK(unit);
+	return (0);
+}
+
+void
+dmar_fini_irt(struct dmar_unit *unit)
+{
+
+	unit->ir_enabled = 0;
+	if (unit->irt != NULL) {
+		dmar_disable_ir(unit);
+		dmar_qi_invalidate_iec_glob(unit);
+		vmem_destroy(unit->irtids);
+		kmem_free(kernel_arena, (vm_offset_t)unit->irt,
+		    unit->irte_cnt * sizeof(dmar_irte_t));
+	}
+}

Modified: head/sys/x86/iommu/intel_qi.c
==============================================================================
--- head/sys/x86/iommu/intel_qi.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_qi.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/rman.h>
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
+#include <sys/vmem.h>
 #include <machine/bus.h>
 #include <contrib/dev/acpica/include/acpi.h>
 #include <contrib/dev/acpica/include/accommon.h>
@@ -194,13 +195,14 @@ dmar_qi_emit_wait_seq(struct dmar_unit *
 }
 
 static void
-dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq)
+dmar_qi_wait_for_seq(struct dmar_unit *unit, const struct dmar_qi_genseq *gseq,
+    bool nowait)
 {
 
 	DMAR_ASSERT_LOCKED(unit);
 	unit->inv_seq_waiters++;
 	while (!dmar_qi_seq_processed(unit, gseq)) {
-		if (cold) {
+		if (cold || nowait) {
 			cpu_spinwait();
 		} else {
 			msleep(&unit->inv_seq_waiters, &unit->lock, 0,
@@ -246,7 +248,7 @@ dmar_qi_invalidate_ctx_glob_locked(struc
 	dmar_qi_emit(unit, DMAR_IQ_DESCR_CTX_INV | DMAR_IQ_DESCR_CTX_GLOB, 0);
 	dmar_qi_emit_wait_seq(unit, &gseq);
 	dmar_qi_advance_tail(unit);
-	dmar_qi_wait_for_seq(unit, &gseq);
+	dmar_qi_wait_for_seq(unit, &gseq, false);
 }
 
 void
@@ -260,7 +262,60 @@ dmar_qi_invalidate_iotlb_glob_locked(str
 	    DMAR_IQ_DESCR_IOTLB_DW | DMAR_IQ_DESCR_IOTLB_DR, 0);
 	dmar_qi_emit_wait_seq(unit, &gseq);
 	dmar_qi_advance_tail(unit);
-	dmar_qi_wait_for_seq(unit, &gseq);
+	dmar_qi_wait_for_seq(unit, &gseq, false);
+}
+
+void
+dmar_qi_invalidate_iec_glob(struct dmar_unit *unit)
+{
+	struct dmar_qi_genseq gseq;
+
+	DMAR_ASSERT_LOCKED(unit);
+	dmar_qi_ensure(unit, 2);
+	dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV, 0);
+	dmar_qi_emit_wait_seq(unit, &gseq);
+	dmar_qi_advance_tail(unit);
+	dmar_qi_wait_for_seq(unit, &gseq, false);
+}
+
+void
+dmar_qi_invalidate_iec(struct dmar_unit *unit, u_int start, u_int cnt)
+{
+	struct dmar_qi_genseq gseq;
+	u_int c, l;
+
+	DMAR_ASSERT_LOCKED(unit);
+	KASSERT(start < unit->irte_cnt && start < start + cnt &&
+	    start + cnt <= unit->irte_cnt,
+	    ("inv iec overflow %d %d %d", unit->irte_cnt, start, cnt));
+	for (; cnt > 0; cnt -= c, start += c) {
+		l = ffs(start | cnt) - 1;
+		c = 1 << l;
+		dmar_qi_ensure(unit, 1);
+		dmar_qi_emit(unit, DMAR_IQ_DESCR_IEC_INV |
+		    DMAR_IQ_DESCR_IEC_IDX | DMAR_IQ_DESCR_IEC_IIDX(start) |
+		    DMAR_IQ_DESCR_IEC_IM(l), 0);
+	}
+	dmar_qi_ensure(unit, 1);
+	dmar_qi_emit_wait_seq(unit, &gseq);
+	dmar_qi_advance_tail(unit);
+
+	/*
+	 * The caller of the function, in particular,
+	 * dmar_ir_program_irte(), may be called from the context
+	 * where the sleeping is forbidden (in fact, the
+	 * intr_table_lock mutex may be held, locked from
+	 * intr_shuffle_irqs()).  Wait for the invalidation completion
+	 * using the busy wait.
+	 *
+	 * The impact on the interrupt input setup code is small, the
+	 * expected overhead is comparable with the chipset register
+	 * read.  It is more harmful for the parallel DMA operations,
+	 * since we own the dmar unit lock until whole invalidation
+	 * queue is processed, which includes requests possibly issued
+	 * before our request.
+	 */
+	dmar_qi_wait_for_seq(unit, &gseq, true);
 }
 
 int
@@ -377,7 +432,7 @@ dmar_fini_qi(struct dmar_unit *unit)
 	dmar_qi_ensure(unit, 1);
 	dmar_qi_emit_wait_seq(unit, &gseq);
 	dmar_qi_advance_tail(unit);
-	dmar_qi_wait_for_seq(unit, &gseq);
+	dmar_qi_wait_for_seq(unit, &gseq, false);
 	/* only after the quisce, disable queue */
 	dmar_disable_qi(unit);
 	KASSERT(unit->inv_seq_waiters == 0,

Modified: head/sys/x86/iommu/intel_quirks.c
==============================================================================
--- head/sys/x86/iommu/intel_quirks.c	Thu Mar 19 13:53:47 2015	(r280259)
+++ head/sys/x86/iommu/intel_quirks.c	Thu Mar 19 13:57:47 2015	(r280260)
@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) 2013 The FreeBSD Foundation
+ * Copyright (c) 2013, 2015 The FreeBSD Foundation
  * All rights reserved.
  *
  * This software was developed by Konstantin Belousov <kib at FreeBSD.org>
@@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/smp.h>
 #include <sys/taskqueue.h>
 #include <sys/tree.h>
+#include <sys/vmem.h>
 #include <machine/bus.h>
 #include <contrib/dev/acpica/include/acpi.h>
 #include <contrib/dev/acpica/include/accommon.h>
@@ -59,7 +60,7 @@ __FBSDID("$FreeBSD$");
 #include <x86/iommu/intel_dmar.h>
 #include <dev/pci/pcivar.h>
 
-typedef void (*dmar_quirk_fun)(struct dmar_unit *);
+typedef void (*dmar_quirk_cpu_fun)(struct dmar_unit *);
 
 struct intel_dmar_quirk_cpu {
 	u_int ext_family;
@@ -67,17 +68,21 @@ struct intel_dmar_quirk_cpu {
 	u_int family_code;
 	u_int model;
 	u_int stepping;
-	dmar_quirk_fun quirk;
+	dmar_quirk_cpu_fun quirk;
 	const char *descr;
 };
 
+typedef void (*dmar_quirk_nb_fun)(struct dmar_unit *, device_t nb);
+
 struct intel_dmar_quirk_nb {
 	u_int dev_id;
 	u_int rev_no;
-	dmar_quirk_fun quirk;
+	dmar_quirk_nb_fun quirk;
 	const char *descr;
 };
 
+#define	QUIRK_NB_ALL_REV	0xffffffff
+
 static void
 dmar_match_quirks(struct dmar_unit *dmar,
     const struct intel_dmar_quirk_nb *nb_quirks, int nb_quirks_len,
@@ -99,13 +104,14 @@ dmar_match_quirks(struct dmar_unit *dmar
 			for (i = 0; i < nb_quirks_len; i++) {
 				nb_quirk = &nb_quirks[i];
 				if (nb_quirk->dev_id == dev_id &&
-				    nb_quirk->rev_no == rev_no) {
+				    (nb_quirk->rev_no == rev_no ||
+				    nb_quirk->rev_no == QUIRK_NB_ALL_REV)) {
 					if (bootverbose) {
 						device_printf(dmar->dev,
 						    "NB IOMMU quirk %s\n",
 						    nb_quirk->descr);
 					}
-					nb_quirk->quirk(dmar);
+					nb_quirk->quirk(dmar, nb);
 				}
 			}
 		} else {
@@ -139,12 +145,29 @@ dmar_match_quirks(struct dmar_unit *dmar
 }
 
 static void
-nb_5400_no_low_high_prot_mem(struct dmar_unit *unit)
+nb_5400_no_low_high_prot_mem(struct dmar_unit *unit, device_t nb __unused)
 {
 
 	unit->hw_cap &= ~(DMAR_CAP_PHMR | DMAR_CAP_PLMR);
 }
 
+static void
+nb_no_ir(struct dmar_unit *unit, device_t nb __unused)
+{
+
+	unit->hw_ecap &= ~(DMAR_ECAP_IR | DMAR_ECAP_EIM);
+}
+
+static void
+nb_5500_no_ir_rev13(struct dmar_unit *unit, device_t nb)
+{
+	u_int rev_no;
+
+	rev_no = pci_get_revid(nb);
+	if (rev_no <= 0x13)
+		nb_no_ir(unit, nb);
+}
+
 static const struct intel_dmar_quirk_nb pre_use_nb[] = {
 	{
 	    .dev_id = 0x4001, .rev_no = 0x20,
@@ -156,6 +179,26 @@ static const struct intel_dmar_quirk_nb 
 	    .quirk = nb_5400_no_low_high_prot_mem,
 	    .descr = "5400 E23" /* no low/high protected memory */
 	},
+	{
+	    .dev_id = 0x3403, .rev_no = QUIRK_NB_ALL_REV,
+	    .quirk = nb_5500_no_ir_rev13,

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-head mailing list