svn commit: r296382 - in head/sys/ofed/drivers/infiniband: hw/mlx4 ulp/ipoib

Hans Petter Selasky hselasky at FreeBSD.org
Fri Mar 4 09:07:33 UTC 2016


Author: hselasky
Date: Fri Mar  4 09:07:30 2016
New Revision: 296382
URL: https://svnweb.freebsd.org/changeset/base/296382

Log:
  Whitespace fixes.
  
  MFC after:	1 week
  Sponsored by:	Mellanox Technologies

Modified:
  head/sys/ofed/drivers/infiniband/hw/mlx4/cm.c
  head/sys/ofed/drivers/infiniband/hw/mlx4/cq.c
  head/sys/ofed/drivers/infiniband/hw/mlx4/mad.c
  head/sys/ofed/drivers/infiniband/hw/mlx4/main.c
  head/sys/ofed/drivers/infiniband/hw/mlx4/mcg.c
  head/sys/ofed/drivers/infiniband/hw/mlx4/mlx4_ib.h
  head/sys/ofed/drivers/infiniband/hw/mlx4/mr.c
  head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c
  head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
  head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/cm.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/cm.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/cm.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -82,8 +82,8 @@ static void set_local_comm_id(struct ib_
 		pr_err("trying to set local_comm_id in SIDR_REP\n");
 		return;
 	} else {
-	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-	msg->local_comm_id = cpu_to_be32(cm_id);
+		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+		msg->local_comm_id = cpu_to_be32(cm_id);
 	}
 }
 
@@ -97,8 +97,8 @@ static u32 get_local_comm_id(struct ib_m
 		pr_err("trying to set local_comm_id in SIDR_REP\n");
 		return -1;
 	} else {
-	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-	return be32_to_cpu(msg->local_comm_id);
+		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+		return be32_to_cpu(msg->local_comm_id);
 	}
 }
 
@@ -112,8 +112,8 @@ static void set_remote_comm_id(struct ib
 		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
 		return;
 	} else {
-	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-	msg->remote_comm_id = cpu_to_be32(cm_id);
+		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+		msg->remote_comm_id = cpu_to_be32(cm_id);
 	}
 }
 
@@ -127,8 +127,8 @@ static u32 get_remote_comm_id(struct ib_
 		pr_err("trying to set remote_comm_id in SIDR_REQ\n");
 		return -1;
 	} else {
-	struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
-	return be32_to_cpu(msg->remote_comm_id);
+		struct cm_generic_msg *msg = (struct cm_generic_msg *)mad;
+		return be32_to_cpu(msg->remote_comm_id);
 	}
 }
 

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/cq.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/cq.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/cq.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -873,10 +873,10 @@ repoll:
 					       + !cqe->timestamp_0_15) << 16)
 					| be16_to_cpu(cqe->timestamp_0_15);
 				wc->wc_flags |= IB_WC_WITH_TIMESTAMP;
-		}
+			}
 		} else {
 			wc->wc_flags |= IB_WC_WITH_SLID;
-		wc->slid	   = be16_to_cpu(cqe->rlid);
+			wc->slid	   = be16_to_cpu(cqe->rlid);
 		}
 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
 		wc->src_qp	   = g_mlpath_rqpn & 0xffffff;
@@ -886,12 +886,12 @@ repoll:
 		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,
 					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
 		if (!timestamp_en) {
-		if (rdma_port_get_link_layer(wc->qp->device,
+			if (rdma_port_get_link_layer(wc->qp->device,
 						     (*cur_qp)->port) ==
 						      IB_LINK_LAYER_ETHERNET)
-			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
-		else
-			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
+				wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
+			else
+				wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
 			wc->wc_flags	  |= IB_WC_WITH_SL;
 		}
 		if ((be32_to_cpu(cqe->vlan_my_qpn) &

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/mad.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/mad.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/mad.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -1813,8 +1813,8 @@ static int create_pv_sqp(struct mlx4_ib_
 		ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
 					      ctx->port, 0xFFFF, &attr.pkey_index);
 	if (ret || !create_tun)
-	attr.pkey_index =
-		to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
+		attr.pkey_index =
+			to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
 	attr.qkey = IB_QP1_QKEY;
 	attr.port_num = ctx->port;
 	ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/main.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/main.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/main.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -1119,7 +1119,7 @@ static int __mlx4_ib_create_flow(struct 
 	if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
 		pr_err("Invalid priority value.\n");
 		return -EINVAL;
-                    }
+	}
 	if (domain >= IB_FLOW_DOMAIN_NUM) {
 		pr_err("Invalid domain value.\n");
 		return -EINVAL;
@@ -1198,7 +1198,7 @@ static struct ib_flow *mlx4_ib_create_fl
 	switch (flow_attr->type) {
 	case IB_FLOW_ATTR_NORMAL:
 		type[0] = MLX4_FS_REGULAR;
-			break;
+		break;
 
 	case IB_FLOW_ATTR_ALL_DEFAULT:
 		type[0] = MLX4_FS_ALL_DEFAULT;
@@ -1221,7 +1221,7 @@ static struct ib_flow *mlx4_ib_create_fl
 	while (i < ARRAY_SIZE(type) && type[i]) {
 		err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
 					    &mflow->reg_id[i]);
-	if (err)
+		if (err)
 			goto err_free;
 		i++;
 	}
@@ -1605,11 +1605,11 @@ static void update_gids_task(struct work
 					IB_LINK_LAYER_ETHERNET) {
 		err = mlx4_cmd(dev, mailbox->dma,
 			       MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
-		       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
-		       MLX4_CMD_WRAPPED);
+			       1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+			       MLX4_CMD_WRAPPED);
 
-	if (err)
-		pr_warn("set port command failed\n");
+		if (err)
+			pr_warn("set port command failed\n");
 		else
 			mlx4_ib_dispatch_event(gw->dev, gw->port,
 					       IB_EVENT_GID_CHANGE);
@@ -1686,8 +1686,8 @@ static int update_gid_table(struct mlx4_
 			if (found >= 0) {
 				need_update = 1;
 				dev->iboe.gid_table[port - 1][found] = zgid;
-					break;
-				}
+				break;
+			}
 		} else {
 			if (found >= 0)
 				break;
@@ -1696,22 +1696,22 @@ static int update_gid_table(struct mlx4_
 			    !memcmp(&dev->iboe.gid_table[port - 1][i],
 				    &zgid, sizeof(*gid)))
 				free = i;
-				}
-			}
+		}
+	}
 
 	if (found == -1 && !clear && free < 0) {
 		pr_err("GID table of port %d is full. Can't add "GID_PRINT_FMT"\n",
 		       port, GID_PRINT_ARGS(gid));
 		return -ENOMEM;
-		}
+	}
 	if (found == -1 && clear) {
 		pr_err(GID_PRINT_FMT" is not in GID table of port %d\n", GID_PRINT_ARGS(gid), port);
 		return -EINVAL;
-        }
+	}
 	if (found == -1 && !clear && free >= 0) {
 		dev->iboe.gid_table[port - 1][free] = *gid;
 		need_update = 1;
-        }
+	}
 
 	if (!need_update)
 		return 0;
@@ -1721,10 +1721,10 @@ static int update_gid_table(struct mlx4_
 		return -ENOMEM;
 
 	memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof(work->gids));
-		INIT_WORK(&work->work, update_gids_task);
-		work->port = port;
-		work->dev = dev;
-		queue_work(wq, &work->work);
+	INIT_WORK(&work->work, update_gids_task);
+	work->port = port;
+	work->dev = dev;
+	queue_work(wq, &work->work);
 
 	return 0;
 }
@@ -1773,7 +1773,7 @@ static u8 mlx4_ib_get_dev_port(struct ne
 	for (port = 1; port <= MLX4_MAX_PORTS; ++port)
 		if ((netif_is_bond_master(real_dev) && (real_dev == iboe->masters[port - 1])) ||
 		    (!netif_is_bond_master(real_dev) && (real_dev == iboe->netdevs[port - 1])))
-		break;
+			break;
 
 	return port > MLX4_MAX_PORTS ? 0 : port;
 }
@@ -1809,11 +1809,11 @@ static void mlx4_ib_get_dev_addr(struct 
 		read_lock_bh(&in6_dev->lock);
 		list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
 			pgid = (union ib_gid *)&ifp->addr;
-			update_gid_table(ibdev, port, pgid, 0, 0);
-	}
+				update_gid_table(ibdev, port, pgid, 0, 0);
+			}
 		read_unlock_bh(&in6_dev->lock);
 		in6_dev_put(in6_dev);
-	}
+		}
 #endif
 }
 
@@ -2002,10 +2002,10 @@ static void mlx4_ib_alloc_eqs(struct mlx
 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
 		for (j = 0; j < eq_per_port; j++) {
 			sprintf(name, "mlx4-ib-%d-%d@%d:%d:%d:%d", i, j,
-			    pci_get_domain(dev->pdev->dev.bsddev),
-			    pci_get_bus(dev->pdev->dev.bsddev),
-			    PCI_SLOT(dev->pdev->devfn),
-			    PCI_FUNC(dev->pdev->devfn));
+						pci_get_domain(dev->pdev->dev.bsddev),
+						pci_get_bus(dev->pdev->dev.bsddev),
+						PCI_SLOT(dev->pdev->devfn),
+						PCI_FUNC(dev->pdev->devfn));
 
 			/* Set IRQ for specific name (per ring) */
 			if (mlx4_assign_eq(dev, name,
@@ -2168,7 +2168,7 @@ static struct attribute_group diag_count
 static void init_dev_assign(void)
 {
 	int i = 1;
-	
+
 	spin_lock_init(&dev_num_str_lock);
 	if (mlx4_fill_dbdf2val_tbl(&dev_assign_str))
 		return;
@@ -2268,7 +2268,7 @@ static void *mlx4_ib_add(struct mlx4_dev
 	if (dev_idx >= 0)
 		sprintf(ibdev->ib_dev.name, "mlx4_%d", dev_idx);
 	else
-	strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
+		strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
 
 	ibdev->ib_dev.owner		= THIS_MODULE;
 	ibdev->ib_dev.node_type		= RDMA_NODE_IB_CA;
@@ -2471,8 +2471,8 @@ static void *mlx4_ib_add(struct mlx4_dev
 
 	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) {
 		if (!iboe->nb.notifier_call) {
-		iboe->nb.notifier_call = mlx4_ib_netdev_event;
-		err = register_netdevice_notifier(&iboe->nb);
+			iboe->nb.notifier_call = mlx4_ib_netdev_event;
+			err = register_netdevice_notifier(&iboe->nb);
 			if (err) {
 				iboe->nb.notifier_call = NULL;
 				goto err_notify;
@@ -2519,8 +2519,8 @@ err_notify:
         }
 
 	if (ibdev->iboe.nb.notifier_call) {
-	if (unregister_netdevice_notifier(&ibdev->iboe.nb))
-		pr_warn("failure unregistering notifier\n");
+		if (unregister_netdevice_notifier(&ibdev->iboe.nb))
+			pr_warn("failure unregistering notifier\n");
 		ibdev->iboe.nb.notifier_call = NULL;
 	}
 	if (ibdev->iboe.nb_inet.notifier_call) {
@@ -2873,12 +2873,12 @@ module_exit(mlx4_ib_cleanup);
 static int
 mlx4ib_evhand(module_t mod, int event, void *arg)
 {
-        return (0);
+	return (0);
 }
 
 static moduledata_t mlx4ib_mod = {
-        .name = "mlx4ib",
-        .evhand = mlx4ib_evhand,
+	.name = "mlx4ib",
+	.evhand = mlx4ib_evhand,
 };
 
 DECLARE_MODULE(mlx4ib, mlx4ib_mod, SI_SUB_SMP, SI_ORDER_ANY);

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/mcg.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/mcg.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/mcg.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -135,8 +135,8 @@ struct mcast_req {
 
 
 #define safe_atomic_dec(ref) \
-        do {\
-                if (atomic_dec_and_test(ref)) \
+	do {\
+		if (atomic_dec_and_test(ref)) \
 			mcg_warn_group(group, "did not expect to reach zero\n"); \
 	} while (0)
 
@@ -570,7 +570,7 @@ static void mlx4_ib_mcg_timeout_handler(
 	group->state = MCAST_IDLE;
 	atomic_inc(&group->refcount);
 	if (!queue_work(group->demux->mcg_wq, &group->work))
-        safe_atomic_dec(&group->refcount);
+		safe_atomic_dec(&group->refcount);
 
 	mutex_unlock(&group->lock);
 }
@@ -877,7 +877,7 @@ static void queue_req(struct mcast_req *
 	list_add_tail(&req->func_list, &group->func[req->func].pending);
 	/* calls mlx4_ib_mcg_work_handler */
 	if (!queue_work(group->demux->mcg_wq, &group->work))
-        safe_atomic_dec(&group->refcount);
+		safe_atomic_dec(&group->refcount);
 }
 
 int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
@@ -913,7 +913,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_
 		/* calls mlx4_ib_mcg_work_handler */
 		atomic_inc(&group->refcount);
 		if (!queue_work(ctx->mcg_wq, &group->work))
-                safe_atomic_dec(&group->refcount);
+			safe_atomic_dec(&group->refcount);
 		mutex_unlock(&group->lock);
 		release_group(group, 0);
 		return 1; /* consumed */

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/mlx4_ib.h
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/mlx4_ib.h	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/mlx4_ib.h	Fri Mar  4 09:07:30 2016	(r296382)
@@ -507,7 +507,7 @@ struct mlx4_ib_iboe {
 	spinlock_t		lock;
 	struct net_device      *netdevs[MLX4_MAX_PORTS];
 	struct net_device      *masters[MLX4_MAX_PORTS];
-	struct notifier_block 	nb;
+	struct notifier_block	nb;
 	struct notifier_block	nb_inet;
 	union ib_gid		gid_table[MLX4_MAX_PORTS][128];
 };

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/mr.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/mr.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/mr.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -240,7 +240,7 @@ int mlx4_ib_umem_write_mtt(struct mlx4_i
 			cur_start_addr =
 				sg_dma_address(sg);
 			len = sg_dma_len(sg);
-		}
+	}
 
 	/* Handle the last block */
 	if (len > 0) {
@@ -365,40 +365,40 @@ int mlx4_ib_umem_calc_optimal_mtt_size(s
 		next_block_start =
 			sg_dma_address(sg);
 		current_block_end = current_block_start
-				+ current_block_len;
-			/* If we have a split (non-contig.) between two block*/
-			if (current_block_end != next_block_start) {
-				block_shift = mlx4_ib_umem_calc_block_mtt(
-						next_block_start,
-						current_block_end,
-						block_shift);
-
-				/* If we reached the minimum shift for 4k
-				     page we stop the loop.
-				*/
-				if (block_shift <= min_shift)
-					goto end;
-
-				/* If not saved yet we are in first block -
-				     we save the length of first block to
-				     calculate the non_aligned_pages number at
-				*    the end.
-				*/
-				total_len += current_block_len;
-
-				/* Start a new block */
-				current_block_start = next_block_start;
-				current_block_len =
-				sg_dma_len(sg);
-				continue;
-			}
-			/* The scatter entry is another part of
-			     the current block, increase the block size
-			* An entry in the scatter can be larger than
-			4k (page) as of dma mapping
-			which merge some blocks together.
+			+ current_block_len;
+		/* If we have a split (non-contig.) between two block*/
+		if (current_block_end != next_block_start) {
+			block_shift = mlx4_ib_umem_calc_block_mtt(
+					next_block_start,
+					current_block_end,
+					block_shift);
+
+			/* If we reached the minimum shift for 4k
+			     page we stop the loop.
+			*/
+			if (block_shift <= min_shift)
+				goto end;
+
+			/* If not saved yet we are in first block -
+			     we save the length of first block to
+			     calculate the non_aligned_pages number at
+			*    the end.
 			*/
-			current_block_len +=
+			total_len += current_block_len;
+
+			/* Start a new block */
+			current_block_start = next_block_start;
+			current_block_len =
+				sg_dma_len(sg);
+			continue;
+		}
+		/* The scatter entry is another part of
+		     the current block, increase the block size
+		* An entry in the scatter can be larger than
+		4k (page) as of dma mapping
+		which merge some blocks together.
+		*/
+		current_block_len +=
 			sg_dma_len(sg);
 	}
 
@@ -641,7 +641,7 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
 	if (!umem)
 		goto end;
 
-		ib_umem_release(mr->umem);
+	ib_umem_release(mr->umem);
 end:
 
 	kfree(mr);

Modified: head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/hw/mlx4/qp.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -1691,7 +1691,7 @@ static int mlx4_set_path(struct mlx4_ib_
 		 * If one was already assigned, but the new mac differs,
 		 * unregister the old one and register the new one.
 		*/
-			u64_mac = mlx4_mac_to_u64(smac);
+		u64_mac = mlx4_mac_to_u64(smac);
 
 		if (!smac_info->smac || smac_info->smac != u64_mac) {
 			/* register candidate now, unreg if needed, after success */
@@ -2746,7 +2746,7 @@ static __be32 convert_access(int acc)
 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) |
 	       (acc & IB_ACCESS_REMOTE_READ   ?
 		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ)  : 0) |
-	       (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  		: 0) |
+	       (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |
 		cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);
 }
 

Modified: head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_ib.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -386,7 +386,6 @@ poll_more:
 	spin_lock(&priv->drain_lock);
 	for (;;) {
 		n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
-
 		for (i = 0; i < n; i++) {
 			struct ib_wc *wc = priv->ibwc + i;
 

Modified: head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
==============================================================================
--- head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	Fri Mar  4 07:07:42 2016	(r296381)
+++ head/sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_verbs.c	Fri Mar  4 09:07:30 2016	(r296382)
@@ -89,6 +89,7 @@ int ipoib_init_qp(struct ipoib_dev_priv 
 	    IB_QP_PORT |
 	    IB_QP_PKEY_INDEX |
 	    IB_QP_STATE;
+
 	ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
 	if (ret) {
 		ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);


More information about the svn-src-all mailing list