svn commit: r305867 - in head/sys/dev/mlx5: . mlx5_core mlx5_en

Hans Petter Selasky hselasky at FreeBSD.org
Fri Sep 16 11:28:18 UTC 2016


Author: hselasky
Date: Fri Sep 16 11:28:16 2016
New Revision: 305867
URL: https://svnweb.freebsd.org/changeset/base/305867

Log:
  Update the MLX5 core module:
  - Add new firmware commands and update existing ones.
  - Add more firmware related structures and update existing ones.
  - Some minor fixes, like adding missing \n to some prints.
  
  Sponsored by:	Mellanox Technologies
  MFC after:	1 week

Modified:
  head/sys/dev/mlx5/cq.h
  head/sys/dev/mlx5/device.h
  head/sys/dev/mlx5/driver.h
  head/sys/dev/mlx5/flow_table.h
  head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
  head/sys/dev/mlx5/mlx5_core/mlx5_core.h
  head/sys/dev/mlx5/mlx5_core/mlx5_cq.c
  head/sys/dev/mlx5/mlx5_core/mlx5_eq.c
  head/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
  head/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
  head/sys/dev/mlx5/mlx5_core/mlx5_fw.c
  head/sys/dev/mlx5/mlx5_core/mlx5_main.c
  head/sys/dev/mlx5/mlx5_core/mlx5_mr.c
  head/sys/dev/mlx5/mlx5_core/mlx5_port.c
  head/sys/dev/mlx5/mlx5_core/mlx5_qp.c
  head/sys/dev/mlx5/mlx5_core/mlx5_srq.c
  head/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
  head/sys/dev/mlx5/mlx5_core/mlx5_uar.c
  head/sys/dev/mlx5/mlx5_core/mlx5_vport.c
  head/sys/dev/mlx5/mlx5_core/transobj.h
  head/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
  head/sys/dev/mlx5/mlx5_ifc.h
  head/sys/dev/mlx5/qp.h
  head/sys/dev/mlx5/vport.h

Modified: head/sys/dev/mlx5/cq.h
==============================================================================
--- head/sys/dev/mlx5/cq.h	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/cq.h	Fri Sep 16 11:28:16 2016	(r305867)
@@ -49,6 +49,8 @@ struct mlx5_core_cq {
 	unsigned		arm_sn;
 	struct mlx5_rsc_debug	*dbg;
 	int			pid;
+	int			reset_notify_added;
+	struct list_head	reset_notify;
 };
 
 

Modified: head/sys/dev/mlx5/device.h
==============================================================================
--- head/sys/dev/mlx5/device.h	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/device.h	Fri Sep 16 11:28:16 2016	(r305867)
@@ -57,6 +57,7 @@
 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
@@ -112,6 +113,10 @@ enum {
 };
 
 enum {
+	MLX5_CQ_FLAGS_OI	= 2,
+};
+
+enum {
 	MLX5_STAT_RATE_OFFSET	= 5,
 };
 
@@ -129,6 +134,10 @@ enum {
 };
 
 enum {
+	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
 	MLX5_PERM_LOCAL_READ	= 1 << 2,
 	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
 	MLX5_PERM_REMOTE_READ	= 1 << 4,
@@ -184,6 +193,25 @@ enum {
 };
 
 enum {
+	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
+
+	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
+	MLX5_UMR_CHECK_FREE		= (2 << 5),
+
+	MLX5_UMR_INLINE			= (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
+enum {
+	MLX5_EVENT_QUEUE_TYPE_QP = 0,
+	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
+	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+};
+
+enum {
 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
@@ -194,19 +222,28 @@ enum {
 };
 
 enum {
+	MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
+	MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
+	MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
+	MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
+	MLX5_MAX_INLINE_RECEIVE_SIZE		= 64
+};
+
+enum {
 	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
 	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
 	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
 	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
-	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
 	MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD	= 1LL << 21,
 	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
 	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
 	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
+	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 33,
 	MLX5_DEV_CAP_FLAG_ROCE          = 1LL << 34,
 	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
 	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
 	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
+	MLX5_DEV_CAP_FLAG_DRAIN_SIGERR	= 1LL << 48,
 };
 
 enum {
@@ -263,6 +300,7 @@ enum {
 
 	MLX5_OPCODE_UMR			= 0x25,
 
+	MLX5_OPCODE_SIGNATURE_CANCELED	= (1 << 15),
 };
 
 enum {
@@ -299,6 +337,18 @@ struct mlx5_outbox_hdr {
 	__be32		syndrome;
 };
 
+struct mlx5_cmd_set_dc_cnak_mbox_in {
+	struct mlx5_inbox_hdr	hdr;
+	u8			enable;
+	u8			reserved[47];
+	__be64			pa;
+};
+
+struct mlx5_cmd_set_dc_cnak_mbox_out {
+	struct mlx5_outbox_hdr	hdr;
+	u8			rsvd[8];
+};
+
 struct mlx5_cmd_layout {
 	u8		type;
 	u8		rsvd0[3];
@@ -339,9 +389,12 @@ struct mlx5_init_seg {
 	__be32			rsvd1[120];
 	__be32			initializing;
 	struct mlx5_health_buffer  health;
-	__be32			rsvd2[884];
+	__be32			rsvd2[880];
+	__be32			internal_timer_h;
+	__be32			internal_timer_l;
+	__be32			rsvd3[2];
 	__be32			health_counter;
-	__be32			rsvd3[1019];
+	__be32			rsvd4[1019];
 	__be64			ieee1588_clk;
 	__be32			ieee1588_clk_type;
 	__be32			clr_intx;
@@ -420,6 +473,7 @@ enum {
 	MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST              = 0x4,
 	MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER                    = 0x5,
 	MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE                      = 0x6,
+	MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED                      = 0x7,
 };
 
 struct mlx5_eqe_port_module_event {
@@ -832,6 +886,10 @@ struct mlx5_query_eq_mbox_out {
 	struct mlx5_eq_context	ctx;
 };
 
+enum {
+	MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
 struct mlx5_mkey_seg {
 	/* This is a two bit field occupying bits 31-30.
 	 * bit 31 is always 0,
@@ -868,7 +926,7 @@ struct mlx5_query_special_ctxs_mbox_out 
 struct mlx5_create_mkey_mbox_in {
 	struct mlx5_inbox_hdr	hdr;
 	__be32			input_mkey_index;
-	u8			rsvd0[4];
+	__be32			flags;
 	struct mlx5_mkey_seg	seg;
 	u8			rsvd1[16];
 	__be32			xlat_oct_act_size;
@@ -971,6 +1029,17 @@ struct mlx5_destroy_psv_out {
 	u8                      rsvd[8];
 };
 
+static inline int mlx5_host_is_le(void)
+{
+#if defined(__LITTLE_ENDIAN)
+	return 1;
+#elif defined(__BIG_ENDIAN)
+	return 0;
+#else
+#error Host endianness not defined
+#endif
+}
+
 #define MLX5_CMD_OP_MAX 0x939
 
 enum {
@@ -1008,6 +1077,8 @@ enum {
 	MLX5_FLOW_TABLE_TYPE_EGRESS_ACL  = 2,
 	MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
 	MLX5_FLOW_TABLE_TYPE_ESWITCH	 = 4,
+	MLX5_FLOW_TABLE_TYPE_SNIFFER_RX	 = 5,
+	MLX5_FLOW_TABLE_TYPE_SNIFFER_TX	 = 6,
 };
 
 enum {
@@ -1062,6 +1133,10 @@ enum mlx5_cap_type {
 	MLX5_CAP_FLOW_TABLE,
 	MLX5_CAP_ESWITCH_FLOW_TABLE,
 	MLX5_CAP_ESWITCH,
+	MLX5_CAP_SNAPSHOT,
+	MLX5_CAP_VECTOR_CALC,
+	MLX5_CAP_QOS,
+	MLX5_CAP_DEBUG,
 	/* NUM OF CAP Types */
 	MLX5_CAP_NUM
 };
@@ -1107,21 +1182,23 @@ enum mlx5_cap_type {
 	MLX5_GET(flow_table_eswitch_cap, \
 		 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
 
-#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE(dev, \
-			       flow_table_properties_esw_acl_egress.cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL_MAX(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
-				   flow_table_properties_esw_acl_egress.cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE(dev, \
-			       flow_table_properties_esw_acl_ingress.cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL_MAX(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE_MAX(dev, \
-				   flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
 
 #define MLX5_CAP_ESW(mdev, cap) \
 	MLX5_GET(e_switch_cap, \
@@ -1137,6 +1214,38 @@ enum mlx5_cap_type {
 #define MLX5_CAP_ODP_MAX(mdev, cap)\
 	MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
 
+#define MLX5_CAP_SNAPSHOT(mdev, cap) \
+	MLX5_GET(snapshot_cap, \
+		 mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
+
+#define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
+	MLX5_GET(snapshot_cap, \
+		 mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
+
+#define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
+	MLX5_GET(per_protocol_networking_offload_caps,\
+		 mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
+
+#define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
+	MLX5_GET(per_protocol_networking_offload_caps,\
+		 mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
+
+#define MLX5_CAP_DEBUG(mdev, cap) \
+	MLX5_GET(debug_cap, \
+		 mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
+
+#define MLX5_CAP_DEBUG_MAX(mdev, cap) \
+	MLX5_GET(debug_cap, \
+		 mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
+
+#define MLX5_CAP_QOS(mdev, cap) \
+	MLX5_GET(qos_cap,\
+		 mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_QOS_MAX(mdev, cap) \
+	MLX5_GET(qos_cap,\
+		 mdev->hca_caps_max[MLX5_CAP_QOS], cap)
+
 enum {
 	MLX5_CMD_STAT_OK			= 0x0,
 	MLX5_CMD_STAT_INT_ERR			= 0x1,
@@ -1168,6 +1277,22 @@ enum {
 };
 
 enum {
+	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
+	MLX5_PCIE_LANE_COUNTERS_GROUP	      = 0x1,
+	MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
+};
+
+enum {
+	MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
+	MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
+};
+
+enum {
+	NUM_DRIVER_UARS = 4,
+	NUM_LOW_LAT_UUARS = 4,
+};
+
+enum {
 	MLX5_CAP_PORT_TYPE_IB  = 0x0,
 	MLX5_CAP_PORT_TYPE_ETH = 0x1,
 };
@@ -1252,4 +1377,7 @@ static inline int mlx5_get_cqe_format(co
 	return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
 }
 
+/* 8 regular priorities + 1 for multicast */
+#define MLX5_NUM_BYPASS_FTS	9
+
 #endif /* MLX5_DEVICE_H */

Modified: head/sys/dev/mlx5/driver.h
==============================================================================
--- head/sys/dev/mlx5/driver.h	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/driver.h	Fri Sep 16 11:28:16 2016	(r305867)
@@ -42,16 +42,15 @@
 #include <dev/mlx5/device.h>
 #include <dev/mlx5/doorbell.h>
 
+#define MLX5_QCOUNTER_SETS_NETDEV 64
+
 enum {
 	MLX5_BOARD_ID_LEN = 64,
 	MLX5_MAX_NAME_LEN = 16,
 };
 
 enum {
-	/* one minute for the sake of bringup. Generally, commands must always
-	 * complete and we may need to increase this timeout value
-	 */
-	MLX5_CMD_TIMEOUT_MSEC	= 7200 * 1000,
+	MLX5_CMD_TIMEOUT_MSEC	= 8 * 60 * 1000,
 	MLX5_CMD_WQ_MAX_NAME	= 32,
 };
 
@@ -85,20 +84,44 @@ enum {
 };
 
 enum {
-	MLX5_ATOMIC_MODE_IB_COMP	= 1 << 16,
-	MLX5_ATOMIC_MODE_CX		= 2 << 16,
-	MLX5_ATOMIC_MODE_8B		= 3 << 16,
-	MLX5_ATOMIC_MODE_16B		= 4 << 16,
-	MLX5_ATOMIC_MODE_32B		= 5 << 16,
-	MLX5_ATOMIC_MODE_64B		= 6 << 16,
-	MLX5_ATOMIC_MODE_128B		= 7 << 16,
-	MLX5_ATOMIC_MODE_256B		= 8 << 16,
+	MLX5_ATOMIC_MODE_OFF		= 16,
+	MLX5_ATOMIC_MODE_NONE		= 0 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_IB_COMP	= 1 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_CX		= 2 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_8B		= 3 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_16B		= 4 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_32B		= 5 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_64B		= 6 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_128B		= 7 << MLX5_ATOMIC_MODE_OFF,
+	MLX5_ATOMIC_MODE_256B		= 8 << MLX5_ATOMIC_MODE_OFF,
+};
+
+enum {
+	MLX5_ATOMIC_MODE_DCT_OFF	= 20,
+	MLX5_ATOMIC_MODE_DCT_NONE	= 0 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_IB_COMP	= 1 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_CX		= 2 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_8B		= 3 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_16B	= 4 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_32B	= 5 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_64B	= 6 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_128B	= 7 << MLX5_ATOMIC_MODE_DCT_OFF,
+	MLX5_ATOMIC_MODE_DCT_256B	= 8 << MLX5_ATOMIC_MODE_DCT_OFF,
+};
+
+enum {
+	MLX5_ATOMIC_OPS_CMP_SWAP		= 1 << 0,
+	MLX5_ATOMIC_OPS_FETCH_ADD		= 1 << 1,
+	MLX5_ATOMIC_OPS_MASKED_CMP_SWAP		= 1 << 2,
+	MLX5_ATOMIC_OPS_MASKED_FETCH_ADD	= 1 << 3,
 };
 
 enum {
 	MLX5_REG_QETCR		 = 0x4005,
 	MLX5_REG_QPDP		 = 0x4007,
 	MLX5_REG_QTCT		 = 0x400A,
+	MLX5_REG_DCBX_PARAM	 = 0x4020,
+	MLX5_REG_DCBX_APP	 = 0x4021,
 	MLX5_REG_PCAP		 = 0x5001,
 	MLX5_REG_PMTU		 = 0x5003,
 	MLX5_REG_PTYS		 = 0x5004,
@@ -116,6 +139,7 @@ enum {
 	MLX5_REG_NODE_DESC	 = 0x6001,
 	MLX5_REG_HOST_ENDIANNESS = 0x7004,
 	MLX5_REG_MCIA		 = 0x9014,
+	MLX5_REG_MPCNT		 = 0x9051,
 };
 
 enum dbg_rsc_type {
@@ -124,6 +148,12 @@ enum dbg_rsc_type {
 	MLX5_DBG_RSC_CQ,
 };
 
+enum {
+	MLX5_INTERFACE_PROTOCOL_IB  = 0,
+	MLX5_INTERFACE_PROTOCOL_ETH = 1,
+	MLX5_INTERFACE_NUMBER       = 2,
+};
+
 struct mlx5_field_desc {
 	struct dentry	       *dent;
 	int			i;
@@ -147,6 +177,10 @@ enum mlx5_dev_event {
 	MLX5_DEV_EVENT_GUID_CHANGE,
 	MLX5_DEV_EVENT_CLIENT_REREG,
 	MLX5_DEV_EVENT_VPORT_CHANGE,
+	MLX5_DEV_EVENT_ERROR_STATE_DCBX,
+	MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE,
+	MLX5_DEV_EVENT_LOCAL_OPER_CHANGE,
+	MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE,
 };
 
 enum mlx5_port_status {
@@ -375,9 +409,12 @@ struct mlx5_core_mr {
 };
 
 enum mlx5_res_type {
-	MLX5_RES_QP,
-	MLX5_RES_SRQ,
-	MLX5_RES_XSRQ,
+	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
+	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
+	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
+	MLX5_RES_SRQ	= 3,
+	MLX5_RES_XSRQ	= 4,
+	MLX5_RES_DCT	= 5,
 };
 
 struct mlx5_core_rsc_common {
@@ -413,8 +450,6 @@ struct mlx5_eq_table {
 
 struct mlx5_uar {
 	u32			index;
-	struct list_head	bf_list;
-	unsigned		free_bf_bmap;
 	void __iomem	       *bf_map;
 	void __iomem	       *map;
 };
@@ -461,7 +496,7 @@ struct mlx5_srq_table {
 struct mlx5_mr_table {
 	/* protect radix tree
 	 */
-	rwlock_t		lock;
+	spinlock_t		lock;
 	struct radix_tree_root	tree;
 };
 
@@ -483,7 +518,7 @@ struct mlx5_priv {
 	struct workqueue_struct *pg_wq;
 	struct rb_root		page_root;
 	int			fw_pages;
-	int			reg_pages;
+	atomic_t		reg_pages;
 	struct list_head	free_list;
 
 	struct mlx5_core_health health;
@@ -521,6 +556,12 @@ struct mlx5_priv {
 	struct list_head        dev_list;
 	struct list_head        ctx_list;
 	spinlock_t              ctx_lock;
+	unsigned long		pci_dev_data;
+};
+
+enum mlx5_device_state {
+	MLX5_DEVICE_STATE_UP,
+	MLX5_DEVICE_STATE_INTERNAL_ERROR,
 };
 
 struct mlx5_special_contexts {
@@ -535,6 +576,7 @@ struct mlx5_core_dev {
 	u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 	u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
 	struct mlx5_init_seg __iomem *iseg;
+	enum mlx5_device_state	state;
 	void			(*event) (struct mlx5_core_dev *dev,
 					  enum mlx5_dev_event event,
 					  unsigned long param);
@@ -544,6 +586,7 @@ struct mlx5_core_dev {
 	u32			issi;
 	struct mlx5_special_contexts special_contexts;
 	unsigned int module_status[MLX5_MAX_PORTS];
+	u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
 };
 
 enum {
@@ -573,6 +616,8 @@ struct mlx5_net_counters {
 };
 
 struct mlx5_ptys_reg {
+	u8	an_dis_admin;
+	u8	an_dis_ap;
 	u8	local_port;
 	u8	proto_mask;
 	u32	eth_proto_cap;
@@ -620,6 +665,15 @@ enum {
 	MLX5_DB_PER_PAGE = PAGE_SIZE / L1_CACHE_BYTES,
 };
 
+struct mlx5_core_dct {
+	struct mlx5_core_rsc_common	common; /* must be first */
+	void (*event)(struct mlx5_core_dct *, int);
+	int			dctn;
+	struct completion	drained;
+	struct mlx5_rsc_debug	*dbg;
+	int			pid;
+};
+
 enum {
 	MLX5_COMP_EQ_SIZE = 1024,
 };
@@ -725,9 +779,14 @@ static inline void *mlx5_vzalloc(unsigne
 	return rtn;
 }
 
-static inline u32 mlx5_base_mkey(const u32 key)
+static inline void *mlx5_vmalloc(unsigned long size)
 {
-	return key & 0xffffff00u;
+	void *rtn;
+
+	rtn = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+	if (!rtn)
+		rtn = vmalloc(size);
+	return rtn;
 }
 
 int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -809,6 +868,8 @@ int mlx5_stop_eqs(struct mlx5_core_dev *
 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn);
 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
+				u64 addr);
 
 int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
@@ -816,11 +877,16 @@ int mlx5_core_access_reg(struct mlx5_cor
 			 int size_in, void *data_out, int size_out,
 			 u16 reg_num, int arg, int write);
 
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
 int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
 			 int ptys_size, int proto_mask);
 int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
 			      u32 *proto_cap, int proto_mask);
+int mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+			    u8 *an_disable_cap, u8 *an_disable_status);
+int mlx5_set_port_autoneg(struct mlx5_core_dev *dev, bool disable,
+			  u32 eth_proto_admin, int proto_mask);
 int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
 				u32 *proto_admin, int proto_mask);
 int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
@@ -828,10 +894,14 @@ int mlx5_set_port_proto(struct mlx5_core
 int mlx5_set_port_status(struct mlx5_core_dev *dev,
 			 enum mlx5_port_status status);
 int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status);
 int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 port,
 			u32 rx_pause, u32 tx_pause);
 int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 port,
 			  u32 *rx_pause, u32 *tx_pause);
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx);
 
 int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
 int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu);
@@ -884,6 +954,9 @@ int mlx5_modify_port_cong_params(struct 
 				 void *in, int in_size);
 int mlx5_query_port_cong_statistics(struct mlx5_core_dev *mdev, int clear,
 				    void *out, int out_size);
+int mlx5_set_diagnostics(struct mlx5_core_dev *mdev, void *in, int in_size);
+int mlx5_query_diagnostics(struct mlx5_core_dev *mdev, u8 num_of_samples,
+			   u16 sample_index, void *out, int out_size);
 static inline u32 mlx5_mkey_to_idx(u32 mkey)
 {
 	return mkey >> 8;
@@ -905,12 +978,7 @@ enum {
 };
 
 enum {
-	MAX_MR_CACHE_ENTRIES    = 16,
-};
-
-enum {
-	MLX5_INTERFACE_PROTOCOL_IB  = 0,
-	MLX5_INTERFACE_PROTOCOL_ETH = 1,
+	MAX_MR_CACHE_ENTRIES    = 15,
 };
 
 struct mlx5_interface {
@@ -936,6 +1004,14 @@ struct mlx5_profile {
 	} mr_cache[MAX_MR_CACHE_ENTRIES];
 };
 
+enum {
+	MLX5_PCI_DEV_IS_VF		= 1 << 0,
+};
+
+static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+{
+	return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+}
 
 #define MLX5_EEPROM_MAX_BYTES			32
 #define MLX5_EEPROM_IDENTIFIER_BYTE_MASK	0x000000ff

Modified: head/sys/dev/mlx5/flow_table.h
==============================================================================
--- head/sys/dev/mlx5/flow_table.h	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/flow_table.h	Fri Sep 16 11:28:16 2016	(r305867)
@@ -30,6 +30,9 @@
 
 #include <dev/mlx5/driver.h>
 
+#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET      0x0
+#define MLX5_SET_FLOW_TABLE_ROOT_OPMOD_RESET    0x1
+
 struct mlx5_flow_table_group {
 	u8	log_sz;
 	u8	match_criteria_enable;
@@ -44,7 +47,10 @@ void mlx5_destroy_flow_table(void *flow_
 int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
 			      void *match_criteria, void *flow_context,
 			      u32 *flow_index);
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
+int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index);
 u32 mlx5_get_flow_table_id(void *flow_table);
+int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
+			     u8 vport_num, u8 table_type, u32 table_id,
+			     u32 underlay_qpn);
 
 #endif /* MLX5_FLOW_TABLE_H */

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_cmd.c	Fri Sep 16 11:28:16 2016	(r305867)
@@ -121,7 +121,7 @@ static int alloc_ent(struct mlx5_cmd *cm
 		clear_bit(ret, &cmd->bitmask);
 	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
 
-	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
+	return ret < cmd->max_reg_cmds ? ret : -1;
 }
 
 static void free_ent(struct mlx5_cmd *cmd, int idx)
@@ -396,6 +396,9 @@ const char *mlx5_command_str(int command
 	case MLX5_CMD_OP_CREATE_DCT:
 		return "CREATE_DCT";
 
+	case MLX5_CMD_OP_SET_DC_CNAK_TRACE:
+		return "SET_DC_CNAK_TRACE";
+
 	case MLX5_CMD_OP_DESTROY_DCT:
 		return "DESTROY_DCT";
 
@@ -639,6 +642,12 @@ const char *mlx5_command_str(int command
 	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
 		return "DELETE_FLOW_TABLE_ENTRY";
 
+	case MLX5_CMD_OP_SET_DIAGNOSTICS:
+		return "MLX5_CMD_OP_SET_DIAGNOSTICS";
+
+	case MLX5_CMD_OP_QUERY_DIAGNOSTICS:
+		return "MLX5_CMD_OP_QUERY_DIAGNOSTICS";
+
 	default: return "unknown command opcode";
 	}
 }

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_core.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_core.h	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_core.h	Fri Sep 16 11:28:16 2016	(r305867)
@@ -64,6 +64,8 @@ enum {
 	MLX5_CMD_TIME, /* print command execution time */
 };
 
+struct mlx5_core_dev;
+
 int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
 int mlx5_query_board_id(struct mlx5_core_dev *dev);
 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_cq.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_cq.c	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_cq.c	Fri Sep 16 11:28:16 2016	(r305867)
@@ -187,10 +187,12 @@ int mlx5_core_destroy_cq(struct mlx5_cor
 	in.cqn = cpu_to_be32(cq->cqn);
 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
 	if (err)
-		return err;
+		goto out;
 
-	if (out.hdr.status)
-		return mlx5_cmd_status_to_err(&out.hdr);
+	if (out.hdr.status) {
+		err = mlx5_cmd_status_to_err(&out.hdr);
+		goto out;
+	}
 
 	synchronize_irq(cq->irqn);
 
@@ -198,7 +200,9 @@ int mlx5_core_destroy_cq(struct mlx5_cor
 		complete(&cq->free);
 	wait_for_completion(&cq->free);
 
-	return 0;
+out:
+
+	return err;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_cq);
 

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_eq.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_eq.c	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_eq.c	Fri Sep 16 11:28:16 2016	(r305867)
@@ -155,6 +155,8 @@ static const char *eqe_type_str(u8 type)
 		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
 	case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
 		return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
+	case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
+		return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
 	default:
 		return "Unrecognized event";
 	}
@@ -181,6 +183,21 @@ static enum mlx5_dev_event port_subtype_
 	return -1;
 }
 
+static enum mlx5_dev_event dcbx_subevent(u8 subtype)
+{
+	switch (subtype) {
+	case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
+		return MLX5_DEV_EVENT_ERROR_STATE_DCBX;
+	case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
+		return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE;
+	case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
+		return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE;
+	case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
+		return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE;
+	}
+	return -1;
+}
+
 static void eq_update_ci(struct mlx5_eq *eq, int arm)
 {
 	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
@@ -259,6 +276,26 @@ static int mlx5_eq_int(struct mlx5_core_
 					       port, eqe->sub_type);
 			}
 			break;
+
+		case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
+			port = (eqe->data.port.port >> 4) & 0xf;
+			switch (eqe->sub_type) {
+			case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
+			case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
+			case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
+			case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
+				if (dev->event)
+					dev->event(dev,
+						   dcbx_subevent(eqe->sub_type),
+						   0);
+				break;
+			default:
+				mlx5_core_warn(dev,
+					       "dcbx event with unrecognized subtype: port %d, sub_type %d\n",
+					       port, eqe->sub_type);
+			}
+			break;
+
 		case MLX5_EVENT_TYPE_CQ_ERROR:
 			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
 			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
@@ -476,6 +513,10 @@ int mlx5_start_eqs(struct mlx5_core_dev 
 		async_event_mask |= (1ull <<
 				     MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
 
+	if (MLX5_CAP_GEN(dev, dcbx))
+		async_event_mask |= (1ull <<
+				     MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT);
+
 	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
 				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
 				 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]);
@@ -573,6 +614,8 @@ static const char *mlx5_port_module_even
 		return "Unknown identifier";
 	case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
 		return "High Temperature";
+	case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
+		return "Cable is shorted";
 
 	default:
 		return "Unknown error type";
@@ -605,19 +648,19 @@ static void mlx5_port_module_event(struc
 
 	switch (module_status) {
 	case MLX5_MODULE_STATUS_PLUGGED:
-		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged", module_num);
+		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged\n", module_num);
 		break;
 
 	case MLX5_MODULE_STATUS_UNPLUGGED:
-		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged", module_num);
+		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged\n", module_num);
 		break;
 
 	case MLX5_MODULE_STATUS_ERROR:
-		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s", module_num, mlx5_port_module_event_error_type_to_string(error_type));
+		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s\n", module_num, mlx5_port_module_event_error_type_to_string(error_type));
 		break;
 
 	default:
-		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status", module_num);
+		device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status\n", module_num);
 	}
 	/* store module status */
 	if (module_num < MLX5_MAX_PORTS)

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_eswitch_vacl.c	Fri Sep 16 11:28:16 2016	(r305867)
@@ -96,10 +96,10 @@ static int mlx5_vacl_table_allow_vlan(vo
 	MLX5_SET(flow_context, flow_context, action,
 		 MLX5_FLOW_CONTEXT_ACTION_ALLOW);
 	in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-	MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 1);
+	MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 1);
 	MLX5_SET(fte_match_param, in_match_value, outer_headers.first_vid,
 		 vlan);
-	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
+	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
 	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.first_vid,
 		 0xfff);
 	if (acl_table->spoofchk_enabled) {
@@ -255,8 +255,8 @@ static int mlx5_vacl_table_apply_untagge
 	/* Apply new untagged rule */
 	MLX5_SET(flow_context, flow_context, action, new_action);
 	in_match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
-	MLX5_SET(fte_match_param, in_match_value, outer_headers.vlan_tag, 0);
-	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.vlan_tag, 1);
+	MLX5_SET(fte_match_param, in_match_value, outer_headers.cvlan_tag, 0);
+	MLX5_SET(fte_match_param, in_match_criteria, outer_headers.cvlan_tag, 1);
 	if (acl_table->spoofchk_enabled) {
 		smac = MLX5_ADDR_OF(fte_match_param,
 				    in_match_value,
@@ -549,7 +549,7 @@ static int mlx5_vacl_table_create_ft(voi
 			MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET(fte_match_param,
 		 g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx].match_criteria,
-		 outer_headers.vlan_tag, 1);
+		 outer_headers.cvlan_tag, 1);
 	if (spoofchk) {
 		smac = MLX5_ADDR_OF(fte_match_param,
 				    g[MLX5_ACL_UNTAGGED_GROUP_IDX - shift_idx]
@@ -564,7 +564,7 @@ static int mlx5_vacl_table_create_ft(voi
 			MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET(fte_match_param,
 		 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
-		 outer_headers.vlan_tag, 1);
+		 outer_headers.cvlan_tag, 1);
 	MLX5_SET(fte_match_param,
 		 g[MLX5_ACL_VLAN_GROUP_IDX - shift_idx].match_criteria,
 		 outer_headers.first_vid, 0xfff);
@@ -627,10 +627,10 @@ void *mlx5_vacl_table_create(struct mlx5
 	struct mlx5_vacl_table *acl_table;
 	int err = 0;
 
-	if (is_egress && !MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev, ft_support))
+	if (is_egress && !MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
 		return NULL;
 
-	if (!is_egress && !MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev, ft_support))
+	if (!is_egress && !MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
 		return NULL;
 
 	acl_table = kzalloc(sizeof(*acl_table), GFP_KERNEL);
@@ -640,9 +640,9 @@ void *mlx5_vacl_table_create(struct mlx5
 	acl_table->acl_type = is_egress ? MLX5_FLOW_TABLE_TYPE_EGRESS_ACL :
 					  MLX5_FLOW_TABLE_TYPE_INGRESS_ACL;
 	acl_table->max_ft_size = (is_egress ?
-					MLX5_CAP_ESW_FLOWTABLE_EGRESS_ACL(dev,
+					MLX5_CAP_ESW_EGRESS_ACL(dev,
 									  log_max_ft_size) :
-					MLX5_CAP_ESW_FLOWTABLE_INGRESS_ACL(dev,
+					MLX5_CAP_ESW_INGRESS_ACL(dev,
 									   log_max_ft_size));
 	acl_table->dev = dev;
 	acl_table->vport = vport;

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_flow_table.c	Fri Sep 16 11:28:16 2016	(r305867)
@@ -87,7 +87,7 @@ static int mlx5_set_flow_entry_cmd(struc
 	return err;
 }
 
-static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+static int mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
 {
 	u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
 	u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
@@ -103,7 +103,8 @@ static void mlx5_del_flow_entry_cmd(stru
 	MLX5_SET_DFTEI(in, flow_index,   flow_index);
 	MLX5_SET_DFTEI(in, opcode,     MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
 
-	mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+	return mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+					 sizeof(out));
 }
 
 static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
@@ -343,12 +344,15 @@ int mlx5_add_flow_table_entry(void *flow
 }
 EXPORT_SYMBOL(mlx5_add_flow_table_entry);
 
-void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+int mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
 {
 	struct mlx5_flow_table *ft = flow_table;
+	int ret;
 
-	mlx5_del_flow_entry_cmd(ft, flow_index);
-	mlx5_free_flow_index(ft, flow_index);
+	ret = mlx5_del_flow_entry_cmd(ft, flow_index);
+	if (!ret)
+		mlx5_free_flow_index(ft, flow_index);
+	return ret;
 }
 EXPORT_SYMBOL(mlx5_del_flow_table_entry);
 
@@ -430,3 +434,46 @@ u32 mlx5_get_flow_table_id(void *flow_ta
 	return ft->id;
 }
 EXPORT_SYMBOL(mlx5_get_flow_table_id);
+
+int mlx5_set_flow_table_root(struct mlx5_core_dev *mdev, u16 op_mod,
+			     u8 vport_num, u8 table_type, u32 table_id,
+			     u32 underlay_qpn)
+{
+	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)];
+	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)];
+	int err;
+	int is_group_manager;
+
+	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(set_flow_table_root_in, in, op_mod, op_mod);
+	MLX5_SET(set_flow_table_root_in, in, table_type, table_type);
+	MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
+	if (op_mod == MLX5_SET_FLOW_TABLE_ROOT_OPMOD_SET)
+		MLX5_SET(set_flow_table_root_in, in, table_id, table_id);
+
+	MLX5_SET(set_flow_table_root_in, in, opcode,
+		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+
+	if (vport_num) {
+		if (is_group_manager) {
+			MLX5_SET(set_flow_table_root_in, in, other_vport,
+				 1);
+			MLX5_SET(set_flow_table_root_in, in, vport_number,
+				 vport_num);
+		} else {
+			return -EPERM;
+		}
+	}
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+					 sizeof(out));
+	if (err)
+		return err;
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_set_flow_table_root);

Modified: head/sys/dev/mlx5/mlx5_core/mlx5_fw.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_core/mlx5_fw.c	Fri Sep 16 10:04:28 2016	(r305866)
+++ head/sys/dev/mlx5/mlx5_core/mlx5_fw.c	Fri Sep 16 11:28:16 2016	(r305867)
@@ -201,6 +201,50 @@ int mlx5_query_hca_caps(struct mlx5_core
 			return err;
 	}
 
+	if (MLX5_CAP_GEN(dev, snapshot)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
+					 HCA_CAP_OPMOD_GET_CUR);
+		if (err)
+			return err;
+		err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS,
+					 HCA_CAP_OPMOD_GET_MAX);
+		if (err)

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-head mailing list