svn commit: r291070 - head/sys/dev/mlx5/mlx5_en

Hans Petter Selasky hselasky at FreeBSD.org
Thu Nov 19 10:28:53 UTC 2015


Author: hselasky
Date: Thu Nov 19 10:28:51 2015
New Revision: 291070
URL: https://svnweb.freebsd.org/changeset/base/291070

Log:
  Style changes, mostly automated.
  
  Differential Revision:	https://reviews.freebsd.org/D4179
  Submitted by:	Daria Genzel <dariaz at mellanox.com>
  Sponsored by:	Mellanox Technologies
  MFC after:	3 days

Modified:
  head/sys/dev/mlx5/mlx5_en/en.h
  head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
  head/sys/dev/mlx5/mlx5_en/tcp_tlro.c
  head/sys/dev/mlx5/mlx5_en/tcp_tlro.h

Modified: head/sys/dev/mlx5/mlx5_en/en.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en.h	Thu Nov 19 10:23:10 2015	(r291069)
+++ head/sys/dev/mlx5/mlx5_en/en.h	Thu Nov 19 10:28:51 2015	(r291070)
@@ -74,9 +74,9 @@
 #define	MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
 
 /* freeBSD HW LRO is limited by 16KB - the size of max mbuf */
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 MJUM16BYTES
+#define	MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 MJUM16BYTES
 #define	MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
-#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE	0x3
+#define	MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE	0x3
 #define	MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
 #define	MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
 #define	MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
@@ -89,13 +89,14 @@
     ((swmtu) + (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
 #define	MLX5E_SW2MB_MTU(swmtu) \
     (MLX5E_SW2HW_MTU(swmtu) + MLX5E_NET_IP_ALIGN)
-#define MLX5E_MTU_MIN		72					/* Min MTU allowed by the kernel */
-#define MLX5E_MTU_MAX		MIN(ETHERMTU_JUMBO, MJUM16BYTES)	/* Max MTU of Ethernet jumbo frames */
+#define	MLX5E_MTU_MIN		72	/* Min MTU allowed by the kernel */
+#define	MLX5E_MTU_MAX		MIN(ETHERMTU_JUMBO, MJUM16BYTES)	/* Max MTU of Ethernet
+									 * jumbo frames */
 
 #define	MLX5E_BUDGET_MAX	8192	/* RX and TX */
 #define	MLX5E_RX_BUDGET_MAX	256
 #define	MLX5E_SQ_BF_BUDGET	16
-#define MLX5E_SQ_TX_QUEUE_SIZE	4096	/* SQ drbr queue size */
+#define	MLX5E_SQ_TX_QUEUE_SIZE	4096	/* SQ drbr queue size */
 
 #define	MLX5E_MAX_TX_NUM_TC	8	/* units */
 #define	MLX5E_MAX_TX_HEADER	128	/* bytes */
@@ -159,7 +160,7 @@ typedef void (mlx5e_cq_comp_t)(struct ml
 #define	MLX5E_VPORT_STATS_NUM (0 MLX5E_VPORT_STATS(MLX5E_STATS_COUNT))
 
 struct mlx5e_vport_stats {
-	struct sysctl_ctx_list ctx;
+	struct	sysctl_ctx_list ctx;
 	u64	arg [0];
 	MLX5E_VPORT_STATS(MLX5E_STATS_VAR)
 	u32	rx_out_of_buffer_prev;
@@ -226,7 +227,7 @@ struct mlx5e_vport_stats {
   m(+1, u64 out_multicast_pkts, "out_multicast_pkts", "Out multicast packets") \
   m(+1, u64 out_broadcast_pkts, "out_broadcast_pkts", "Out broadcast packets")
 
-#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m)                                    		\
+#define	MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m)                                    		\
   m(+1, u64 time_since_last_clear, "time_since_last_clear",				\
 			"Time since the last counters clear event (msec)")		\
   m(+1, u64 symbol_errors, "symbol_errors", "Symbol errors")				\
@@ -291,19 +292,19 @@ struct mlx5e_vport_stats {
   (0 MLX5E_PPORT_RFC2819_STATS_DEBUG(MLX5E_STATS_COUNT))
 #define	MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM \
   (0 MLX5E_PPORT_RFC2863_STATS_DEBUG(MLX5E_STATS_COUNT))
-#define MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
+#define	MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
   (0 MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(MLX5E_STATS_COUNT))
 #define	MLX5E_PORT_STATS_DEBUG_NUM \
   (0 MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_COUNT))
 
 struct mlx5e_pport_stats {
-	struct sysctl_ctx_list ctx;
+	struct	sysctl_ctx_list ctx;
 	u64	arg [0];
 	MLX5E_PPORT_STATS(MLX5E_STATS_VAR)
 };
 
 struct mlx5e_port_stats_debug {
-	struct sysctl_ctx_list ctx;
+	struct	sysctl_ctx_list ctx;
 	u64	arg [0];
 	MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_VAR)
 };
@@ -320,7 +321,7 @@ struct mlx5e_port_stats_debug {
 #define	MLX5E_RQ_STATS_NUM (0 MLX5E_RQ_STATS(MLX5E_STATS_COUNT))
 
 struct mlx5e_rq_stats {
-	struct sysctl_ctx_list ctx;
+	struct	sysctl_ctx_list ctx;
 	u64	arg [0];
 	MLX5E_RQ_STATS(MLX5E_STATS_VAR)
 };
@@ -337,7 +338,7 @@ struct mlx5e_rq_stats {
 #define	MLX5E_SQ_STATS_NUM (0 MLX5E_SQ_STATS(MLX5E_STATS_COUNT))
 
 struct mlx5e_sq_stats {
-	struct sysctl_ctx_list ctx;
+	struct	sysctl_ctx_list ctx;
 	u64	arg [0];
 	MLX5E_SQ_STATS(MLX5E_STATS_VAR)
 };
@@ -360,8 +361,8 @@ struct mlx5e_params {
 	u16	tx_cq_moderation_usec;
 	u16	tx_cq_moderation_pkts;
 	u16	min_rx_wqes;
-	bool 	hw_lro_en;
-	u32 	lro_wqe_sz;
+	bool	hw_lro_en;
+	u32	lro_wqe_sz;
 	u16	rx_hash_log_tbl_sz;
 };
 
@@ -391,31 +392,31 @@ struct mlx5e_params_ethtool {
 
 /* EEPROM Standards for plug in modules */
 #ifndef MLX5E_ETH_MODULE_SFF_8472
-#define MLX5E_ETH_MODULE_SFF_8472	0x1
-#define MLX5E_ETH_MODULE_SFF_8472_LEN	128
+#define	MLX5E_ETH_MODULE_SFF_8472	0x1
+#define	MLX5E_ETH_MODULE_SFF_8472_LEN	128
 #endif
 
 #ifndef MLX5E_ETH_MODULE_SFF_8636
-#define MLX5E_ETH_MODULE_SFF_8636	0x2
-#define MLX5E_ETH_MODULE_SFF_8636_LEN	256
+#define	MLX5E_ETH_MODULE_SFF_8636	0x2
+#define	MLX5E_ETH_MODULE_SFF_8636_LEN	256
 #endif
 
 #ifndef MLX5E_ETH_MODULE_SFF_8436
-#define MLX5E_ETH_MODULE_SFF_8436	0x3
-#define MLX5E_ETH_MODULE_SFF_8436_LEN	256
+#define	MLX5E_ETH_MODULE_SFF_8436	0x3
+#define	MLX5E_ETH_MODULE_SFF_8436_LEN	256
 #endif
 
 /* EEPROM I2C Addresses */
-#define MLX5E_I2C_ADDR_LOW		0x50
-#define MLX5E_I2C_ADDR_HIGH		0x51
+#define	MLX5E_I2C_ADDR_LOW		0x50
+#define	MLX5E_I2C_ADDR_HIGH		0x51
 
-#define MLX5E_EEPROM_LOW_PAGE		0x0
-#define MLX5E_EEPROM_HIGH_PAGE		0x3
+#define	MLX5E_EEPROM_LOW_PAGE		0x0
+#define	MLX5E_EEPROM_HIGH_PAGE		0x3
 
-#define MLX5E_EEPROM_HIGH_PAGE_OFFSET	128
-#define MLX5E_EEPROM_PAGE_LENGTH	256
+#define	MLX5E_EEPROM_HIGH_PAGE_OFFSET	128
+#define	MLX5E_EEPROM_PAGE_LENGTH	256
 
-#define MLX5E_EEPROM_INFO_BYTES		0x3
+#define	MLX5E_EEPROM_INFO_BYTES		0x3
 
 struct mlx5e_cq {
 	/* data path - accessed per cqe */
@@ -430,9 +431,9 @@ struct mlx5e_cq {
 } __aligned(MLX5E_CACHELINE_SIZE);
 
 struct mlx5e_rq_mbuf {
-	bus_dmamap_t dma_map;
-	caddr_t data;
-	struct mbuf *mbuf;
+	bus_dmamap_t	dma_map;
+	caddr_t		data;
+	struct mbuf	*mbuf;
 };
 
 struct mlx5e_rq {
@@ -474,9 +475,9 @@ enum {
 
 struct mlx5e_sq {
 	/* data path */
-	struct mtx lock;
+	struct	mtx lock;
 	bus_dma_tag_t dma_tag;
-	struct mtx comp_lock;
+	struct	mtx comp_lock;
 
 	/* dirtied @completion */
 	u16	cc;
@@ -484,31 +485,31 @@ struct mlx5e_sq {
 	/* dirtied @xmit */
 	u16	pc __aligned(MLX5E_CACHELINE_SIZE);
 	u16	bf_offset;
-	struct mlx5e_sq_stats stats;
+	struct	mlx5e_sq_stats stats;
 
-	struct mlx5e_cq cq;
-	struct task sq_task;
-	struct taskqueue *sq_tq;
+	struct	mlx5e_cq cq;
+	struct	task sq_task;
+	struct	taskqueue *sq_tq;
 
 	/* pointers to per packet info: write at xmit, read at completion */
-	struct mlx5e_sq_mbuf *mbuf;
-	struct buf_ring *br;
+	struct	mlx5e_sq_mbuf *mbuf;
+	struct	buf_ring *br;
 
 	/* read only */
-	struct mlx5_wq_cyc wq;
-	void __iomem *uar_map;
-	void __iomem *uar_bf_map;
+	struct	mlx5_wq_cyc wq;
+	void	__iomem *uar_map;
+	void	__iomem *uar_bf_map;
 	u32	sqn;
 	u32	bf_buf_size;
-	struct device *pdev;
+	struct  device *pdev;
 	u32	mkey_be;
 
 	/* control path */
-	struct mlx5_wq_ctrl wq_ctrl;
-	struct mlx5_uar uar;
-	struct mlx5e_channel *channel;
+	struct	mlx5_wq_ctrl wq_ctrl;
+	struct	mlx5_uar uar;
+	struct	mlx5e_channel *channel;
 	int	tc;
-	unsigned int	queue_state;
+	unsigned int queue_state;
 } __aligned(MLX5E_CACHELINE_SIZE);
 
 static inline bool
@@ -616,7 +617,7 @@ struct mlx5e_priv {
 	u32	tdn;
 	struct mlx5_core_mr mr;
 
-	struct mlx5e_channel * volatile *channel;
+	struct mlx5e_channel *volatile *channel;
 	u32	tisn[MLX5E_MAX_TX_NUM_TC];
 	u32	rqtn;
 	u32	tirn[MLX5E_NUM_TT];
@@ -663,15 +664,15 @@ struct mlx5e_rx_wqe {
 };
 
 struct mlx5e_eeprom {
-	int lock_bit;
-	int i2c_addr;
-	int page_num;
-	int device_addr;
-	int module_num;
-	int len;
-	int type;
-	int page_valid;
-	u32 *data;
+	int	lock_bit;
+	int	i2c_addr;
+	int	page_num;
+	int	device_addr;
+	int	module_num;
+	int	len;
+	int	type;
+	int	page_valid;
+	u32	*data;
 };
 
 enum mlx5e_link_mode {
@@ -715,7 +716,7 @@ int	mlx5e_close_locked(struct ifnet *);
 void	mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
 void	mlx5e_rx_cq_comp(struct mlx5_core_cq *);
 void	mlx5e_tx_cq_comp(struct mlx5_core_cq *);
-struct	mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
 void	mlx5e_tx_que(void *context, int pending);
 
 int	mlx5e_open_flow_table(struct mlx5e_priv *priv);

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c	Thu Nov 19 10:23:10 2015	(r291069)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c	Thu Nov 19 10:28:51 2015	(r291070)
@@ -191,8 +191,7 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARG
 			if_printf(priv->ifp, "Can't set HW_LRO to a device with LRO turned off");
 			goto done;
 		}
-	}
-	else {
+	} else {
 		priv->params.hw_lro_en = false;
 	}
 
@@ -243,7 +242,7 @@ mlx5e_get_eeprom_info(struct mlx5e_priv 
 	case SFF_8024_ID_QSFPPLUS:
 	case SFF_8024_ID_QSFP28:
 		if ((data & MLX5_EEPROM_IDENTIFIER_BYTE_MASK) == SFF_8024_ID_QSFP28 ||
-		   ((data & MLX5_EEPROM_REVISION_ID_BYTE_MASK) >> 8) >= 0x3) {
+		    ((data & MLX5_EEPROM_REVISION_ID_BYTE_MASK) >> 8) >= 0x3) {
 			eeprom->type = MLX5E_ETH_MODULE_SFF_8636;
 			eeprom->len = MLX5E_ETH_MODULE_SFF_8636_LEN;
 		} else {
@@ -281,7 +280,7 @@ mlx5e_get_eeprom(struct mlx5e_priv *priv
 	while (ee->device_addr < ee->len) {
 		ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num, ee->device_addr,
 		    ee->len - ee->device_addr, ee->module_num,
-		    ee->data + (ee->device_addr/4), &size_read);
+		    ee->data + (ee->device_addr / 4), &size_read);
 		if (ret) {
 			if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
 			    "error = 0x%02x\n", __func__, __LINE__, ret);
@@ -298,8 +297,8 @@ mlx5e_get_eeprom(struct mlx5e_priv *priv
 		while (ee->device_addr < MLX5E_EEPROM_PAGE_LENGTH) {
 			ret = mlx5_query_eeprom(dev, ee->i2c_addr, ee->page_num,
 			    ee->device_addr, MLX5E_EEPROM_PAGE_LENGTH - ee->device_addr,
-			    ee->module_num, ee->data + (ee->len/4) +
-			    ((ee->device_addr - MLX5E_EEPROM_HIGH_PAGE_OFFSET)/4),
+			    ee->module_num, ee->data + (ee->len / 4) +
+			    ((ee->device_addr - MLX5E_EEPROM_HIGH_PAGE_OFFSET) / 4),
 			    &size_read);
 			if (ret) {
 				if_printf(priv->ifp, "%s:%d: Failed reading eeprom, "
@@ -321,9 +320,9 @@ mlx5e_print_eeprom(struct mlx5e_eeprom *
 	printf("\nOffset\t\tValues\n");
 	printf("------\t\t------\n");
 	while (row < eeprom->len) {
-		printf("0x%04x\t\t",row);
+		printf("0x%04x\t\t", row);
 		for (i = 0; i < 16; i++) {
-			printf("%02x ", ((u8*)eeprom->data)[j]);
+			printf("%02x ", ((u8 *)eeprom->data)[j]);
 			j++;
 			row++;
 		}
@@ -336,9 +335,9 @@ mlx5e_print_eeprom(struct mlx5e_eeprom *
 		printf("\nOffset\t\tValues\n");
 		printf("------\t\t------\n");
 		while (row < MLX5E_EEPROM_PAGE_LENGTH) {
-			printf("0x%04x\t\t",row);
+			printf("0x%04x\t\t", row);
 			for (i = 0; i < 16; i++) {
-				printf("%02x ", ((u8*)eeprom->data)[j]);
+				printf("%02x ", ((u8 *)eeprom->data)[j]);
 				j++;
 				row++;
 			}
@@ -385,8 +384,10 @@ mlx5e_read_eeprom(SYSCTL_HANDLER_ARGS)
 			error = 0;
 			goto done;
 		}
-
-		/* Allocate needed length buffer and additional space for the 3rd */
+		/*
+		 * Allocate needed length buffer and additional space for
+		 * page 0x03
+		 */
 		eeprom.data = malloc(eeprom.len + MLX5E_EEPROM_PAGE_LENGTH,
 		    M_MLX5EN, M_WAITOK | M_ZERO);
 
@@ -396,9 +397,11 @@ mlx5e_read_eeprom(SYSCTL_HANDLER_ARGS)
 			if_printf(priv->ifp, "%s:%d: Failed reading eeprom\n",
 			    __func__, __LINE__);
 			error = 0;
-			/* Continue printing partial information in case of an error */
+			/*
+			 * Continue printing partial information in case of
+			 * an error
+			 */
 		}
-
 		mlx5e_print_eeprom(&eeprom);
 		free(eeprom.data, M_MLX5EN);
 	}
@@ -498,4 +501,3 @@ mlx5e_create_ethtool(struct mlx5e_priv *
 	    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
 	    mlx5e_read_eeprom, "I", "EEPROM information");
 }
-

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c	Thu Nov 19 10:23:10 2015	(r291069)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c	Thu Nov 19 10:28:51 2015	(r291070)
@@ -270,7 +270,6 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e
 		}
 		ai->tt_vec |= (1 << MLX5E_TT_ANY);
 	}
-
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,
 	    outer_headers.ethertype);
@@ -288,7 +287,6 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e
 		}
 		ai->tt_vec |= (1 << MLX5E_TT_IPV4);
 	}
-
 	if (tt_vec & (1 << MLX5E_TT_IPV6)) {
 		MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
 		    ETHERTYPE_IPV6);
@@ -381,7 +379,6 @@ mlx5e_add_eth_addr_rule(struct mlx5e_pri
 		err = -ENOMEM;
 		goto add_eth_addr_rule_out;
 	}
-
 	err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, flow_context,
 	    match_criteria);
 	if (err)
@@ -423,12 +420,12 @@ mlx5e_add_vlan_rule(struct mlx5e_priv *p
 	dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
 
 	MLX5_SET(flow_context, flow_context, action,
-		 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+	    MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
 	MLX5_SET(flow_context, flow_context, destination_list_size, 1);
 	MLX5_SET(dest_format_struct, dest, destination_type,
-		 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+	    MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
 	MLX5_SET(dest_format_struct, dest, destination_id,
-		 mlx5_get_flow_table_id(priv->ft.main));
+	    mlx5_get_flow_table_id(priv->ft.main));
 
 	match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 	MLX5_SET_TO_ONES(fte_match_param, match_criteria,

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c	Thu Nov 19 10:23:10 2015	(r291069)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c	Thu Nov 19 10:28:51 2015	(r291070)
@@ -261,7 +261,6 @@ mlx5e_media_change(struct ifnet *dev)
 		error = EINVAL;
 		goto done;
 	}
-
 	link_mode = mlx5e_find_link_mode(IFM_SUBTYPE(priv->media.ifm_media));
 
 	error = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
@@ -279,7 +278,6 @@ mlx5e_media_change(struct ifnet *dev)
 		error = EINVAL;
 		goto done;
 	}
-
 	mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
 	mlx5_set_port_proto(mdev, link_mode, MLX5_PTYS_EN);
 	mlx5_set_port_status(mdev, MLX5_PORT_UP);
@@ -315,7 +313,7 @@ mlx5e_update_pport_counters(struct mlx5e
 	unsigned x;
 	unsigned y;
 
-	in  = mlx5_vzalloc(sz);
+	in = mlx5_vzalloc(sz);
 	out = mlx5_vzalloc(sz);
 	if (in == NULL || out == NULL)
 		goto free_out;
@@ -334,7 +332,7 @@ mlx5e_update_pport_counters(struct mlx5e
 	for (x = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM; x++, y++)
 		s->arg[y] = be64toh(ptr[x]);
 	for (y = 0; x != MLX5E_PPORT_RFC2819_STATS_NUM +
-	   MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
+	    MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM; x++, y++)
 		s_debug->arg[y] = be64toh(ptr[x]);
 
 	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
@@ -343,8 +341,8 @@ mlx5e_update_pport_counters(struct mlx5e
 		s_debug->arg[y] = be64toh(ptr[x]);
 
 	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
-        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-        for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+	for (x = 0; x != MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM; x++, y++)
 		s_debug->arg[y] = be64toh(ptr[x]);
 free_out:
 	kvfree(in);
@@ -364,6 +362,7 @@ mlx5e_update_stats_work(struct work_stru
 #if (__FreeBSD_version < 1100000)
 	struct ifnet *ifp = priv->ifp;
 #endif
+
 	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
 	u32 *out;
 	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
@@ -520,7 +519,7 @@ mlx5e_update_stats_work(struct work_stru
 #if (__FreeBSD_version < 1100000)
 	/* no get_counters interface in fbsd 10 */
 	ifp->if_ipackets = s->rx_packets;
-	ifp->if_ierrors  = s->rx_error_packets;
+	ifp->if_ierrors = s->rx_error_packets;
 	ifp->if_iqdrops = s->rx_out_of_buffer;
 	ifp->if_opackets = s->tx_packets;
 	ifp->if_oerrors = s->tx_error_packets;
@@ -605,18 +604,18 @@ mlx5e_create_rq(struct mlx5e_channel *c,
 
 	/* Create DMA descriptor TAG */
 	if ((err = -bus_dma_tag_create(
-		bus_get_dma_tag(mdev->pdev->dev.bsddev),
-		1,			/* any alignment */
-		0,			/* no boundary */
-	        BUS_SPACE_MAXADDR,	/* lowaddr */
-		BUS_SPACE_MAXADDR,	/* highaddr */
-		NULL, NULL,		/* filter, filterarg */
-		MJUM16BYTES,		/* maxsize */
-		1,			/* nsegments */
-		MJUM16BYTES,		/* maxsegsize */
-		0,			/* flags */
-		NULL, NULL,		/* lockfunc, lockfuncarg */
-		&rq->dma_tag)))
+	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
+	    1,				/* any alignment */
+	    0,				/* no boundary */
+	    BUS_SPACE_MAXADDR,		/* lowaddr */
+	    BUS_SPACE_MAXADDR,		/* highaddr */
+	    NULL, NULL,			/* filter, filterarg */
+	    MJUM16BYTES,		/* maxsize */
+	    1,				/* nsegments */
+	    MJUM16BYTES,		/* maxsegsize */
+	    0,				/* flags */
+	    NULL, NULL,			/* lockfunc, lockfuncarg */
+	    &rq->dma_tag)))
 		goto done;
 
 	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
@@ -626,10 +625,9 @@ mlx5e_create_rq(struct mlx5e_channel *c,
 
 	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
 
-	if (priv->params.hw_lro_en)  {
+	if (priv->params.hw_lro_en) {
 		rq->wqe_sz = priv->params.lro_wqe_sz;
-	}
-	else {
+	} else {
 		rq->wqe_sz = MLX5E_SW2MB_MTU(priv->ifp->if_mtu);
 	}
 	if (rq->wqe_sz > MJUM16BYTES) {
@@ -651,7 +649,6 @@ mlx5e_create_rq(struct mlx5e_channel *c,
 		err = -ENOMEM;
 		goto err_rq_wq_destroy;
 	}
-
 	for (i = 0; i != wq_sz; i++) {
 		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
 		uint32_t byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
@@ -753,7 +750,7 @@ mlx5e_enable_rq(struct mlx5e_rq *rq, str
 	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
 	MLX5_SET(rqc, rqc, flush_in_error_en, 1);
 	if (priv->counter_set_id >= 0)
-		MLX5_SET(rqc,  rqc, counter_set_id, priv->counter_set_id);
+		MLX5_SET(rqc, rqc, counter_set_id, priv->counter_set_id);
 	MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
 	    PAGE_SHIFT);
 	MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -938,18 +935,18 @@ mlx5e_create_sq(struct mlx5e_channel *c,
 
 	/* Create DMA descriptor TAG */
 	if ((err = -bus_dma_tag_create(
-		bus_get_dma_tag(mdev->pdev->dev.bsddev),
-		1,				/* any alignment */
-		0,				/* no boundary */
-	        BUS_SPACE_MAXADDR,		/* lowaddr */
-		BUS_SPACE_MAXADDR,		/* highaddr */
-		NULL, NULL,			/* filter, filterarg */
-		MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
-		MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
-		MLX5E_MAX_TX_MBUF_SIZE,		/* maxsegsize */
-		0,				/* flags */
-		NULL, NULL,			/* lockfunc, lockfuncarg */
-		&sq->dma_tag)))
+	    bus_get_dma_tag(mdev->pdev->dev.bsddev),
+	    1,				/* any alignment */
+	    0,				/* no boundary */
+	    BUS_SPACE_MAXADDR,		/* lowaddr */
+	    BUS_SPACE_MAXADDR,		/* highaddr */
+	    NULL, NULL,			/* filter, filterarg */
+	    MLX5E_MAX_TX_PAYLOAD_SIZE,	/* maxsize */
+	    MLX5E_MAX_TX_MBUF_FRAGS,	/* nsegments */
+	    MLX5E_MAX_TX_MBUF_SIZE,	/* maxsegsize */
+	    0,				/* flags */
+	    NULL, NULL,			/* lockfunc, lockfuncarg */
+	    &sq->dma_tag)))
 		goto done;
 
 	err = mlx5_alloc_map_uar(mdev, &sq->uar);
@@ -963,7 +960,7 @@ mlx5e_create_sq(struct mlx5e_channel *c,
 
 	sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
 	sq->uar_map = sq->uar.map;
-	sq->uar_bf_map  = sq->uar.bf_map;
+	sq->uar_bf_map = sq->uar.bf_map;
 	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
 
 	err = mlx5e_alloc_sq_db(sq);
@@ -992,10 +989,10 @@ mlx5e_create_sq(struct mlx5e_channel *c,
 		err = -ENOMEM;
 		goto err_free_drbr;
 	}
+
 	TASK_INIT(&sq->sq_task, 0, mlx5e_tx_que, sq);
 	taskqueue_start_threads(&sq->sq_tq, 1, PI_NET, "%s tx sq",
-		c->ifp->if_xname);
-
+	    c->ifp->if_xname);
 
 	snprintf(buffer, sizeof(buffer), "txstat%dtc%d", c->ix, tc);
 	mlx5e_create_stats(&sq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
@@ -1427,7 +1424,7 @@ mlx5e_chan_mtx_destroy(struct mlx5e_chan
 static int
 mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
     struct mlx5e_channel_param *cparam,
-    struct mlx5e_channel * volatile *cp)
+    struct mlx5e_channel *volatile *cp)
 {
 	struct mlx5e_channel *c;
 	u8 rx_moderation_mode;
@@ -1505,7 +1502,7 @@ err_free:
 }
 
 static void
-mlx5e_close_channel(struct mlx5e_channel * volatile *pp)
+mlx5e_close_channel(struct mlx5e_channel *volatile *pp)
 {
 	struct mlx5e_channel *c = *pp;
 
@@ -1517,7 +1514,7 @@ mlx5e_close_channel(struct mlx5e_channel
 }
 
 static void
-mlx5e_close_channel_wait(struct mlx5e_channel * volatile *pp)
+mlx5e_close_channel_wait(struct mlx5e_channel *volatile *pp)
 {
 	struct mlx5e_channel *c = *pp;
 
@@ -1814,19 +1811,17 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *p
 				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
 
 	if (priv->params.hw_lro_en) {
-	    MLX5_SET(tirc, tirc, lro_enable_mask,
-	         MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-	         MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
-	    MLX5_SET(tirc, tirc, lro_max_msg_sz,
-	         (priv->params.lro_wqe_sz -
-	         ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+		MLX5_SET(tirc, tirc, lro_enable_mask,
+		    MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+		    MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+		MLX5_SET(tirc, tirc, lro_max_msg_sz,
+		    (priv->params.lro_wqe_sz -
+		    ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
 		/* TODO: add the option to choose timer value dynamically */
-	    MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
-	         MLX5_CAP_ETH(priv->mdev,
-	         lro_timer_supported_periods[2]));
+		MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+		    MLX5_CAP_ETH(priv->mdev,
+		    lro_timer_supported_periods[2]));
 	}
-
-
 	switch (tt) {
 	case MLX5E_TT_ANY:
 		MLX5_SET(tirc, tirc, disp_type,
@@ -2020,6 +2015,7 @@ mlx5e_set_dev_port_mtu(struct ifnet *ifp
 	err = mlx5_set_port_mtu(mdev, 0);
 	if (err)
 		return (err);
+
 	err = mlx5_query_port_oper_mtu(mdev, &min_mtu);
 	if (err) {
 		if_printf(ifp, "Query port minimal MTU failed\n");
@@ -2342,7 +2338,6 @@ mlx5e_ioctl(struct ifnet *ifp, u_long co
 			ifp->if_capenable ^= IFCAP_RXCSUM;
 		if (mask & IFCAP_RXCSUM_IPV6)
 			ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
-
 		if (mask & IFCAP_TSO4) {
 			if (!(IFCAP_TSO4 & ifp->if_capenable) &&
 			    !(IFCAP_TXCSUM & ifp->if_capenable)) {
@@ -2363,7 +2358,6 @@ mlx5e_ioctl(struct ifnet *ifp, u_long co
 			ifp->if_capenable ^= IFCAP_TSO6;
 			ifp->if_hwassist ^= CSUM_IP6_TSO;
 		}
-
 		if (mask & IFCAP_VLAN_HWFILTER) {
 			if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
 				mlx5e_disable_vlan_filter(priv);
@@ -2374,13 +2368,12 @@ mlx5e_ioctl(struct ifnet *ifp, u_long co
 		}
 		if (mask & IFCAP_VLAN_HWTAGGING)
 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
-
 		if (mask & IFCAP_WOL_MAGIC)
 			ifp->if_capenable ^= IFCAP_WOL_MAGIC;
 
 		VLAN_CAPABILITIES(ifp);
 		/* turn off LRO means also turn of HW LRO - if it's on */
-		if (mask & IFCAP_LRO ) {
+		if (mask & IFCAP_LRO) {
 			int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
 			bool need_restart = false;
 
@@ -2390,7 +2383,7 @@ mlx5e_ioctl(struct ifnet *ifp, u_long co
 					priv->params.hw_lro_en = false;
 					need_restart = true;
 					/* Not sure this is the correct way */
-					priv->params_ethtool.hw_lro  = priv->params.hw_lro_en;
+					priv->params_ethtool.hw_lro = priv->params.hw_lro_en;
 				}
 			}
 			if (was_opened && need_restart) {
@@ -2405,7 +2398,10 @@ out:
 	case SIOCGI2C:
 		ifr = (struct ifreq *)data;
 
-		/* Copy from the user-space address ifr_data to the kernel-space address i2c */
+		/*
+		 * Copy from the user-space address ifr_data to the
+		 * kernel-space address i2c
+		 */
 		error = copyin(ifr->ifr_data, &i2c, sizeof(i2c));
 		if (error)
 			break;
@@ -2515,11 +2511,10 @@ mlx5e_build_ifp_priv(struct mlx5_core_de
 	priv->params.default_vlan_prio = 0;
 	priv->counter_set_id = -1;
 
-	/* 
-	 * hw lro is currently defaulted to off. 
-	 * when it won't anymore we will consider the 
-	 * HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
-	*/ 
+	/*
+	 * hw lro is currently defaulted to off. when it won't anymore we
+	 * will consider the HW capability: "!!MLX5_CAP_ETH(mdev, lro_cap)"
+	 */
 	priv->params.hw_lro_en = false;
 	priv->params.lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
 
@@ -2593,11 +2588,12 @@ mlx5e_priv_mtx_destroy(struct mlx5e_priv
 static int
 sysctl_firmware(SYSCTL_HANDLER_ARGS)
 {
-	/* %d.%d%.d the string format.
+	/*
+	 * %d.%d%.d the string format.
 	 * fw_rev_{maj,min,sub} return u16, 2^16 = 65536.
 	 * We need at most 5 chars to store that.
-	 * it also has: two "." and NULL at the end.
-	 * Which means we need 18 (5*3 + 3) chars at most.
+	 * It also has: two "." and NULL at the end, which means we need 18
+	 * (5*3 + 3) chars at most.
 	 */
 	char fw[18];
 	struct mlx5e_priv *priv = arg1;
@@ -2702,6 +2698,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *m
 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
 		goto err_free_sysctl;
 	}
+
 	/* HW sysctl tree */
 	child = SYSCTL_CHILDREN(device_get_sysctl_tree(mdev->pdev->dev.bsddev));
 	priv->sysctl_hw = SYSCTL_ADD_NODE(&priv->sysctl_ctx, child,
@@ -2710,9 +2707,7 @@ mlx5e_create_ifp(struct mlx5_core_dev *m
 		mlx5_core_err(mdev, "SYSCTL_ADD_NODE() failed\n");
 		goto err_free_sysctl;
 	}
-
 	mlx5e_build_ifp_priv(mdev, priv, ncv);
-
 	err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
 	if (err) {
 		if_printf(ifp, "%s: mlx5_alloc_map_uar failed, %d\n",
@@ -2725,15 +2720,12 @@ mlx5e_create_ifp(struct mlx5_core_dev *m
 		    __func__, err);
 		goto err_unmap_free_uar;
 	}
-
 	err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
-
 	if (err) {
 		if_printf(ifp, "%s: mlx5_alloc_transport_domain failed, %d\n",
-			  __func__, err);
+		    __func__, err);
 		goto err_dealloc_pd;
 	}
-
 	err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
 	if (err) {
 		if_printf(ifp, "%s: mlx5e_create_mkey failed, %d\n",

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c	Thu Nov 19 10:23:10 2015	(r291069)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c	Thu Nov 19 10:28:51 2015	(r291070)
@@ -95,7 +95,7 @@ mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
 }
 
 static void
-mlx5e_lro_update_hdr(struct mbuf* mb, struct mlx5_cqe64 *cqe)
+mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
 {
 	/* TODO: consider vlans, ip options, ... */
 	struct ether_header *eh;
@@ -109,8 +109,8 @@ mlx5e_lro_update_hdr(struct mbuf* mb, st
 	eh_type = ntohs(eh->ether_type);
 
 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
-	int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
-			(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+	int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+	    (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
 
 	/* TODO: consider vlan */
 	u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETHER_HDR_LEN;
@@ -131,15 +131,16 @@ mlx5e_lro_update_hdr(struct mbuf* mb, st
 	ts_ptr = (uint32_t *)(th + 1);
 
 	if (get_cqe_lro_tcppsh(cqe))
-		th->th_flags	|= TH_PUSH;
+		th->th_flags |= TH_PUSH;
 
 	if (tcp_ack) {
-		th->th_flags	|= TH_ACK;
-		th->th_ack	= cqe->lro_ack_seq_num;
-		th->th_win	= cqe->lro_tcp_win;
-
-		/* FreeBSD handles only 32bit aligned timestamp
-		 * right after the TCP hdr
+		th->th_flags |= TH_ACK;
+		th->th_ack = cqe->lro_ack_seq_num;
+		th->th_win = cqe->lro_tcp_win;
+
+		/*
+		 * FreeBSD handles only 32bit aligned timestamp right after
+		 * the TCP hdr
 		 * +--------+--------+--------+--------+
 		 * |   NOP  |  NOP   |  TSopt |   10   |
 		 * +--------+--------+--------+--------+
@@ -152,7 +153,8 @@ mlx5e_lro_update_hdr(struct mbuf* mb, st
 		    (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
 		    TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
 		    TCPOLEN_TIMESTAMP))) {
-			/* cqe->timestamp is 64bit long.
+			/*
+			 * cqe->timestamp is 64bit long.
 			 * [0-31] - timestamp.
 			 * [32-64] - timestamp echo replay.
 			 */
@@ -160,15 +162,14 @@ mlx5e_lro_update_hdr(struct mbuf* mb, st
 			ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
 		}
 	}
-
 	if (ip4) {
-		ip4->ip_ttl	= cqe->lro_min_ttl;
-		ip4->ip_len	= cpu_to_be16(tot_len);
-		ip4->ip_sum	= 0;
-		ip4->ip_sum	= in_cksum(mb, ip4->ip_hl << 2);
+		ip4->ip_ttl = cqe->lro_min_ttl;
+		ip4->ip_len = cpu_to_be16(tot_len);
+		ip4->ip_sum = 0;
+		ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
 	} else {
-		ip6->ip6_hlim	= cqe->lro_min_ttl;
-		ip6->ip6_plen	= cpu_to_be16(tot_len -
+		ip6->ip6_hlim = cqe->lro_min_ttl;
+		ip6->ip6_plen = cpu_to_be16(tot_len -
 		    sizeof(struct ip6_hdr));
 	}
 	/* TODO: handle tcp checksum */
@@ -180,7 +181,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *c
     u32 cqe_bcnt)
 {
 	struct ifnet *ifp = rq->ifp;
-	int lro_num_seg; /* HW LRO session aggregated packets counter */
+	int lro_num_seg;	/* HW LRO session aggregated packets counter */
 
 	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
 	if (lro_num_seg > 1) {
@@ -195,6 +196,7 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *c
 		mb->m_pkthdr.flowid = be32_to_cpu(cqe->rss_hash_result);
 	else
 		mb->m_pkthdr.flowid = rq->ix;
+
 	M_HASHTYPE_SET(mb, M_HASHTYPE_OPAQUE);
 	mb->m_pkthdr.rcvif = ifp;
 
@@ -306,6 +308,7 @@ mlx5e_rx_cq_comp(struct mlx5_core_cq *mc
 
 #ifdef HAVE_PER_CQ_EVENT_PACKET
 	struct mbuf *mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
+
 	if (mb != NULL) {
 		/* this code is used for debugging purpose only */
 		mb->m_pkthdr.len = mb->m_len = 15;

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c	Thu Nov 19 10:23:10 2015	(r291069)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c	Thu Nov 19 10:28:51 2015	(r291070)
@@ -166,8 +166,10 @@ mlx5e_get_header_size(struct mbuf *mb)
 	return (eth_hdr_len);
 }
 
-/* The return value is not going back to the stack because of
- * the drbr */
+/*
+ * The return value is not going back to the stack because of
+ * the drbr
+ */
 static int
 mlx5e_sq_xmit(struct mlx5e_sq *sq, struct mbuf **mbp)
 {
@@ -184,8 +186,10 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 	u16 pi;
 	u8 opcode;
 
-	/* Return ENOBUFS if the queue is full, this may trigger reinsertion
-	 * of the mbuf into the drbr (see mlx5e_xmit_locked) */
+	/*
+	 * Return ENOBUFS if the queue is full, this may trigger reinsertion
+	 * of the mbuf into the drbr (see mlx5e_xmit_locked)
+	 */
 	if (unlikely(!mlx5e_sq_has_room_for(sq, 2 * MLX5_SEND_WQE_MAX_WQEBBS))) {
 		return (ENOBUFS);
 	}
@@ -193,7 +197,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 	/* Align SQ edge with NOPs to avoid WQE wrap around */
 	pi = ((~sq->pc) & sq->wq.sz_m1);
 	if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
-		/* send one multi NOP message instead of many */
+		/* Send one multi NOP message instead of many */
 		mlx5e_send_nop(sq, (pi + 1) * MLX5_SEND_WQEBB_NUM_DS, false);
 		pi = ((~sq->pc) & sq->wq.sz_m1);
 		if (pi < (MLX5_SEND_WQE_MAX_WQEBBS - 1)) {
@@ -209,7 +213,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 
 	memset(wqe, 0, sizeof(*wqe));
 
-	/* send a copy of the frame to the BPF listener, if any */
+	/* Send a copy of the frame to the BPF listener, if any */
 	if (ifp != NULL && ifp->if_bpf != NULL)
 		ETHER_BPF_MTAP(ifp, mb);
 
@@ -219,10 +223,9 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 	if (mb->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) {
 		wqe->eth.cs_flags |= MLX5_ETH_WQE_L4_CSUM;
 	}
-	if ( wqe->eth.cs_flags == 0 ) {
+	if (wqe->eth.cs_flags == 0) {
 		sq->stats.csum_offload_none++;
 	}
-
 	if (mb->m_pkthdr.csum_flags & CSUM_TSO) {
 		u32 payload_len;
 		u32 mss = mb->m_pkthdr.tso_segsz;
@@ -249,7 +252,8 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 	if (mb->m_flags & M_VLANTAG) {
 		struct ether_vlan_header *eh =
 		    (struct ether_vlan_header *)wqe->eth.inline_hdr_start;
-		/* range checks */
+
+		/* Range checks */
 		if (ihs > (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN))
 			ihs = (MLX5E_MAX_TX_INLINE - ETHER_VLAN_ENCAP_LEN);
 		else if (ihs < ETHER_HDR_LEN) {
@@ -258,14 +262,14 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 		}
 		m_copydata(mb, 0, ETHER_HDR_LEN, (caddr_t)eh);
 		m_adj(mb, ETHER_HDR_LEN);
-		/* insert 4 bytes VLAN tag into data stream */
+		/* Insert 4 bytes VLAN tag into data stream */
 		eh->evl_proto = eh->evl_encap_proto;
 		eh->evl_encap_proto = htons(ETHERTYPE_VLAN);
 		eh->evl_tag = htons(mb->m_pkthdr.ether_vtag);
-		/* copy rest of header data, if any */
+		/* Copy rest of header data, if any */
 		m_copydata(mb, 0, ihs - ETHER_HDR_LEN, (caddr_t)(eh + 1));
 		m_adj(mb, ihs - ETHER_HDR_LEN);
-		/* extend header by 4 bytes */
+		/* Extend header by 4 bytes */
 		ihs += ETHER_VLAN_ENCAP_LEN;
 	} else {
 		m_copydata(mb, 0, ihs, wqe->eth.inline_hdr_start);
@@ -281,10 +285,10 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 	}
 	dseg = ((struct mlx5_wqe_data_seg *)&wqe->ctrl) + ds_cnt;
 
-	/* trim off empty mbufs */
+	/* Trim off empty mbufs */
 	while (mb->m_len == 0) {
 		mb = m_free(mb);
-		/* check if all data has been inlined */
+		/* Check if all data has been inlined */
 		if (mb == NULL)
 			goto skip_dma;
 	}
@@ -292,7 +296,10 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 	err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
 	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
 	if (err == EFBIG) {
-		/* Update *mbp before defrag in case it was trimmed in the loop above */
+		/*
+		 * Update *mbp before defrag in case it was trimmed in the
+		 * loop above
+		 */
 		*mbp = mb;
 		/* Update statistics */
 		sq->stats.defragged++;
@@ -306,7 +313,7 @@ mlx5e_sq_xmit(struct mlx5e_sq *sq, struc
 		err = bus_dmamap_load_mbuf_sg(sq->dma_tag, sq->mbuf[pi].dma_map,
 		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
 	}
-	/* catch errors */
+	/* Catch errors */
 	if (err != 0) {
 		goto tx_drop;
 	}
@@ -327,12 +334,12 @@ skip_dma:
 	wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
 	wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
 
-	/* store pointer to mbuf */
+	/* Store pointer to mbuf */
 	sq->mbuf[pi].mbuf = mb;
 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 	sq->pc += sq->mbuf[pi].num_wqebbs;
 
-	/* make sure all mbuf data is written to RAM */
+	/* Make sure all mbuf data is written to RAM */
 	if (mb != NULL)
 		bus_dmamap_sync(sq->dma_tag, sq->mbuf[pi].dma_map, BUS_DMASYNC_PREWRITE);
 
@@ -370,7 +377,7 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, in

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-head mailing list