svn commit: r343598 - in head/sys: dev/qlnx/qlnxe dev/qlnx/qlnxr modules/qlnx modules/qlnx/qlnxe modules/qlnx/qlnxr

David C Somayajulu davidcs at FreeBSD.org
Thu Jan 31 00:09:39 UTC 2019


Author: davidcs
Date: Thu Jan 31 00:09:38 2019
New Revision: 343598
URL: https://svnweb.freebsd.org/changeset/base/343598

Log:
  Add RDMA (iWARP and RoCEv1) support
  
  David Somayajulu (davidcs): Overall RDMA Driver infrastructure and iWARP
  Anand Khoje (akhoje at marvell.com): RoCEv1 verbs implementation
  
  MFC after:5 days

Added:
  head/sys/dev/qlnx/qlnxe/ecore_iwarp.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxe/ecore_ll2.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxe/ecore_ooo.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxe/ecore_rdma.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxe/ecore_roce.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxe/qlnx_rdma.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxe/qlnx_rdma.h   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/
  head/sys/dev/qlnx/qlnxr/qlnxr_cm.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_cm.h   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_def.h   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_os.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_roce.h   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_user.h   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_verbs.c   (contents, props changed)
  head/sys/dev/qlnx/qlnxr/qlnxr_verbs.h   (contents, props changed)
  head/sys/modules/qlnx/qlnxr/
  head/sys/modules/qlnx/qlnxr/Makefile   (contents, props changed)
Modified:
  head/sys/modules/qlnx/Makefile
  head/sys/modules/qlnx/qlnxe/Makefile

Added: head/sys/dev/qlnx/qlnxe/ecore_iwarp.c
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ head/sys/dev/qlnx/qlnxe/ecore_iwarp.c	Thu Jan 31 00:09:38 2019	(r343598)
@@ -0,0 +1,3970 @@
+/*
+ * Copyright (c) 2018-2019 Cavium, Inc.
+ * All rights reserved.
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ *  POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * File : ecore_iwarp.c
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "bcm_osal.h"
+#include "ecore.h"
+#include "ecore_status.h"
+#include "ecore_sp_commands.h"
+#include "ecore_cxt.h"
+#include "ecore_rdma.h"
+#include "reg_addr.h"
+#include "ecore_hw.h"
+#include "ecore_hsi_iwarp.h"
+#include "ecore_ll2.h"
+#include "ecore_ooo.h"
+#ifndef LINUX_REMOVE
+#include "ecore_tcp_ip.h"
+#endif
+
+#ifdef _NTDDK_
+#pragma warning(push)
+#pragma warning(disable : 28123)
+#pragma warning(disable : 28167)
+#endif
+
+/* Default values used for MPA Rev 1 */
+#define ECORE_IWARP_ORD_DEFAULT 32
+#define ECORE_IWARP_IRD_DEFAULT 32
+
+#define ECORE_IWARP_MAX_FW_MSS  4120
+
+struct mpa_v2_hdr {
+	__be16 ird;
+	__be16 ord;
+};
+
+#define MPA_V2_PEER2PEER_MODEL	0x8000
+#define MPA_V2_SEND_RTR		0x4000 /* on ird */
+#define MPA_V2_READ_RTR		0x4000 /* on ord */
+#define MPA_V2_WRITE_RTR	0x8000
+#define MPA_V2_IRD_ORD_MASK	0x3FFF
+
+#define MPA_REV2(_mpa_rev) (_mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED)
+
+#define ECORE_IWARP_INVALID_TCP_CID 0xffffffff
+/* How many times fin will be sent before FW aborts and send RST */
+#define ECORE_IWARP_MAX_FIN_RT_DEFAULT 2
+#define ECORE_IWARP_RCV_WND_SIZE_MIN (0xffff)
+/* INTERNAL: These numbers are derived from BRB buffer sizes to obtain optimal performance */
+#define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_2_PORTS (200*1024)
+#define ECORE_IWARP_RCV_WND_SIZE_BB_DEF_4_PORTS (100*1024)
+#define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_2_PORTS (150*1024)
+#define ECORE_IWARP_RCV_WND_SIZE_AH_DEF_4_PORTS (90*1024)
+#define ECORE_IWARP_MAX_WND_SCALE    (14)
+/* Timestamp header is the length of the timestamp option (10):
+ * kind:8 bit, length:8 bit, timestamp:32 bit, ack: 32bit
+ * rounded up to a multiple of 4
+ */
+#define TIMESTAMP_HEADER_SIZE (12)
+
+static enum _ecore_status_t
+ecore_iwarp_async_event(struct ecore_hwfn *p_hwfn,
+			u8 fw_event_code,
+			u16 OSAL_UNUSED echo,
+			union event_ring_data *data,
+			u8 fw_return_code);
+
+static enum _ecore_status_t
+ecore_iwarp_empty_ramrod(struct ecore_hwfn *p_hwfn,
+			 struct ecore_iwarp_listener *listener);
+
+static OSAL_INLINE struct ecore_iwarp_fpdu *
+ecore_iwarp_get_curr_fpdu(struct ecore_hwfn *p_hwfn, u16 cid);
+
+/* Override devinfo with iWARP specific values */
+void
+ecore_iwarp_init_devinfo(struct ecore_hwfn *p_hwfn)
+{
+	struct ecore_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+	dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
+	dev->max_qp = OSAL_MIN_T(u64,
+				 IWARP_MAX_QPS,
+				 p_hwfn->p_rdma_info->num_qps) -
+		ECORE_IWARP_PREALLOC_CNT;
+
+	dev->max_cq = dev->max_qp;
+
+	dev->max_qp_resp_rd_atomic_resc = ECORE_IWARP_IRD_DEFAULT;
+	dev->max_qp_req_rd_atomic_resc = ECORE_IWARP_ORD_DEFAULT;
+}
+
+enum _ecore_status_t
+ecore_iwarp_init_hw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+{
+	p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
+	ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+	p_hwfn->b_rdma_enabled_in_prs = true;
+
+	return 0;
+}
+
+void
+ecore_iwarp_init_fw_ramrod(struct ecore_hwfn *p_hwfn,
+			   struct iwarp_init_func_ramrod_data *p_ramrod)
+{
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
+		   "ooo handle = %d\n",
+		   p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle);
+
+	p_ramrod->iwarp.ll2_ooo_q_index =
+		p_hwfn->hw_info.resc_start[ECORE_LL2_QUEUE] +
+		p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+	p_ramrod->tcp.max_fin_rt = ECORE_IWARP_MAX_FIN_RT_DEFAULT;
+	return;
+}
+
+static enum _ecore_status_t
+ecore_iwarp_alloc_cid(struct ecore_hwfn *p_hwfn, u32 *cid)
+{
+	enum _ecore_status_t rc;
+
+	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
+
+	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
+				      &p_hwfn->p_rdma_info->cid_map,
+				      cid);
+
+	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
+	*cid += ecore_cxt_get_proto_cid_start(p_hwfn,
+					      p_hwfn->p_rdma_info->proto);
+	if (rc != ECORE_SUCCESS) {
+		DP_NOTICE(p_hwfn, false, "Failed in allocating iwarp cid\n");
+		return rc;
+	}
+
+	rc = ecore_cxt_dynamic_ilt_alloc(p_hwfn, ECORE_ELEM_CXT, *cid);
+
+	if (rc != ECORE_SUCCESS) {
+		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
+		*cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
+					     p_hwfn->p_rdma_info->proto);
+
+		ecore_bmap_release_id(p_hwfn,
+				      &p_hwfn->p_rdma_info->cid_map,
+				      *cid);
+
+		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
+	}
+
+	return rc;
+}
+
+static void
+ecore_iwarp_set_tcp_cid(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+	cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
+					     p_hwfn->p_rdma_info->proto);
+
+	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
+	ecore_bmap_set_id(p_hwfn,
+			  &p_hwfn->p_rdma_info->tcp_cid_map,
+			  cid);
+	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
+}
+
+/* This function allocates a cid for passive tcp ( called from syn receive)
+ * the reason it's separate from the regular cid allocation is because it
+ * is assured that these cids already have ilt alloacted. They are preallocated
+ * to ensure that we won't need to allocate memory during syn processing
+ */
+static enum _ecore_status_t
+ecore_iwarp_alloc_tcp_cid(struct ecore_hwfn *p_hwfn, u32 *cid)
+{
+	enum _ecore_status_t rc;
+
+	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
+
+	rc = ecore_rdma_bmap_alloc_id(p_hwfn,
+				      &p_hwfn->p_rdma_info->tcp_cid_map,
+				      cid);
+
+	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
+
+	*cid += ecore_cxt_get_proto_cid_start(p_hwfn,
+					      p_hwfn->p_rdma_info->proto);
+	if (rc != ECORE_SUCCESS) {
+		DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
+			   "can't allocate iwarp tcp cid max-count=%d\n",
+			   p_hwfn->p_rdma_info->tcp_cid_map.max_count);
+
+		*cid = ECORE_IWARP_INVALID_TCP_CID;
+	}
+
+	return rc;
+}
+
+/* We have two cid maps, one for tcp which should be used only from passive
+ * syn processing and replacing a pre-allocated ep in the list. the second
+ * for active tcp and for QPs.
+ */
+static void ecore_iwarp_cid_cleaned(struct ecore_hwfn *p_hwfn, u32 cid)
+{
+	cid -= ecore_cxt_get_proto_cid_start(p_hwfn,
+					     p_hwfn->p_rdma_info->proto);
+
+	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->lock);
+
+	if (cid < ECORE_IWARP_PREALLOC_CNT) {
+		ecore_bmap_release_id(p_hwfn,
+				      &p_hwfn->p_rdma_info->tcp_cid_map,
+				      cid);
+	} else {
+		ecore_bmap_release_id(p_hwfn,
+				      &p_hwfn->p_rdma_info->cid_map,
+				      cid);
+	}
+
+	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->lock);
+}
+
+enum _ecore_status_t
+ecore_iwarp_create_qp(struct ecore_hwfn *p_hwfn,
+		      struct ecore_rdma_qp *qp,
+		      struct ecore_rdma_create_qp_out_params *out_params)
+{
+	struct iwarp_create_qp_ramrod_data *p_ramrod;
+	struct ecore_sp_init_data init_data;
+	struct ecore_spq_entry *p_ent;
+	enum _ecore_status_t rc;
+	u16 physical_queue;
+	u32 cid;
+
+	qp->shared_queue =
+		OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
+					&qp->shared_queue_phys_addr,
+					IWARP_SHARED_QUEUE_PAGE_SIZE);
+	if (!qp->shared_queue) {
+		DP_NOTICE(p_hwfn, false,
+			  "ecore iwarp create qp failed: cannot allocate memory (shared queue).\n");
+		return ECORE_NOMEM;
+	} else {
+		out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
+			IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
+		out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
+			IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
+		out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
+			IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
+		out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
+			IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
+	}
+
+	rc = ecore_iwarp_alloc_cid(p_hwfn, &cid);
+	if (rc != ECORE_SUCCESS)
+		goto err1;
+
+	qp->icid = (u16)cid;
+
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.cid = qp->icid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   IWARP_RAMROD_CMD_ID_CREATE_QP,
+				   PROTOCOLID_IWARP, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.iwarp_create_qp;
+
+	SET_FIELD(p_ramrod->flags,
+		  IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
+		  qp->fmr_and_reserved_lkey);
+
+	SET_FIELD(p_ramrod->flags,
+		  IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP,
+		  qp->signal_all);
+
+	SET_FIELD(p_ramrod->flags,
+		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
+		  qp->incoming_rdma_read_en);
+
+	SET_FIELD(p_ramrod->flags,
+		  IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
+		  qp->incoming_rdma_write_en);
+
+	SET_FIELD(p_ramrod->flags,
+		  IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
+		  qp->incoming_atomic_en);
+
+	SET_FIELD(p_ramrod->flags,
+		  IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG,
+		  qp->use_srq);
+
+	p_ramrod->pd = qp->pd;
+	p_ramrod->sq_num_pages = qp->sq_num_pages;
+	p_ramrod->rq_num_pages = qp->rq_num_pages;
+
+	p_ramrod->qp_handle_for_cqe.hi = OSAL_CPU_TO_LE32(qp->qp_handle.hi);
+	p_ramrod->qp_handle_for_cqe.lo = OSAL_CPU_TO_LE32(qp->qp_handle.lo);
+
+	p_ramrod->cq_cid_for_sq =
+		OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
+				 qp->sq_cq_id);
+	p_ramrod->cq_cid_for_rq =
+		OSAL_CPU_TO_LE32((p_hwfn->hw_info.opaque_fid << 16) |
+				 qp->rq_cq_id);
+
+	p_ramrod->dpi = OSAL_CPU_TO_LE16(qp->dpi);
+
+	physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+	p_ramrod->physical_q0 = OSAL_CPU_TO_LE16(physical_queue);
+	physical_queue = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+	p_ramrod->physical_q1 = OSAL_CPU_TO_LE16(physical_queue);
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+	if (rc != ECORE_SUCCESS)
+		goto err1;
+
+	return rc;
+
+err1:
+	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+			       qp->shared_queue,
+			       qp->shared_queue_phys_addr,
+			       IWARP_SHARED_QUEUE_PAGE_SIZE);
+
+	return rc;
+}
+
+static enum _ecore_status_t
+ecore_iwarp_modify_fw(struct ecore_hwfn *p_hwfn,
+		      struct ecore_rdma_qp *qp)
+{
+	struct iwarp_modify_qp_ramrod_data *p_ramrod;
+	struct ecore_sp_init_data init_data;
+	struct ecore_spq_entry *p_ent;
+	enum _ecore_status_t rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   IWARP_RAMROD_CMD_ID_MODIFY_QP,
+				   p_hwfn->p_rdma_info->proto,
+				   &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
+	SET_FIELD(p_ramrod->flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN,
+		  0x1);
+	if (qp->iwarp_state == ECORE_IWARP_QP_STATE_CLOSING)
+		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
+	else
+		p_ramrod->transition_to_state = IWARP_MODIFY_QP_STATE_ERROR;
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x)rc=%d\n",
+		   qp->icid, rc);
+
+	return rc;
+}
+
+enum ecore_iwarp_qp_state
+ecore_roce2iwarp_state(enum ecore_roce_qp_state state)
+{
+	switch (state) {
+	case ECORE_ROCE_QP_STATE_RESET:
+	case ECORE_ROCE_QP_STATE_INIT:
+	case ECORE_ROCE_QP_STATE_RTR:
+		return ECORE_IWARP_QP_STATE_IDLE;
+	case ECORE_ROCE_QP_STATE_RTS:
+		return ECORE_IWARP_QP_STATE_RTS;
+	case ECORE_ROCE_QP_STATE_SQD:
+		return ECORE_IWARP_QP_STATE_CLOSING;
+	case ECORE_ROCE_QP_STATE_ERR:
+		return ECORE_IWARP_QP_STATE_ERROR;
+	case ECORE_ROCE_QP_STATE_SQE:
+		return ECORE_IWARP_QP_STATE_TERMINATE;
+	}
+	return ECORE_IWARP_QP_STATE_ERROR;
+}
+
+static enum ecore_roce_qp_state
+ecore_iwarp2roce_state(enum ecore_iwarp_qp_state state)
+{
+	switch (state) {
+	case ECORE_IWARP_QP_STATE_IDLE:
+		return ECORE_ROCE_QP_STATE_INIT;
+	case ECORE_IWARP_QP_STATE_RTS:
+		return ECORE_ROCE_QP_STATE_RTS;
+	case ECORE_IWARP_QP_STATE_TERMINATE:
+		return ECORE_ROCE_QP_STATE_SQE;
+	case ECORE_IWARP_QP_STATE_CLOSING:
+		return ECORE_ROCE_QP_STATE_SQD;
+	case ECORE_IWARP_QP_STATE_ERROR:
+		return ECORE_ROCE_QP_STATE_ERR;
+	}
+	return ECORE_ROCE_QP_STATE_ERR;
+}
+
+const char *iwarp_state_names[] = {
+	"IDLE",
+	"RTS",
+	"TERMINATE",
+	"CLOSING",
+	"ERROR",
+};
+
+enum _ecore_status_t
+ecore_iwarp_modify_qp(struct ecore_hwfn *p_hwfn,
+		      struct ecore_rdma_qp *qp,
+		      enum ecore_iwarp_qp_state new_state,
+		      bool internal)
+{
+	enum ecore_iwarp_qp_state prev_iw_state;
+	enum _ecore_status_t rc = 0;
+	bool modify_fw = false;
+
+	/* modify QP can be called from upper-layer or as a result of async
+	 * RST/FIN... therefore need to protect
+	 */
+	OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+	prev_iw_state = qp->iwarp_state;
+
+	if (prev_iw_state == new_state) {
+		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+		return ECORE_SUCCESS;
+	}
+
+	switch (prev_iw_state) {
+	case ECORE_IWARP_QP_STATE_IDLE:
+		switch (new_state) {
+		case ECORE_IWARP_QP_STATE_RTS:
+			qp->iwarp_state = ECORE_IWARP_QP_STATE_RTS;
+			break;
+		case ECORE_IWARP_QP_STATE_ERROR:
+			qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR;
+			if (!internal)
+				modify_fw = true;
+			break;
+		default:
+			break;
+		}
+		break;
+	case ECORE_IWARP_QP_STATE_RTS:
+		switch (new_state) {
+		case ECORE_IWARP_QP_STATE_CLOSING:
+			if (!internal)
+				modify_fw = true;
+
+			qp->iwarp_state = ECORE_IWARP_QP_STATE_CLOSING;
+			break;
+		case ECORE_IWARP_QP_STATE_ERROR:
+			if (!internal)
+				modify_fw = true;
+			qp->iwarp_state = ECORE_IWARP_QP_STATE_ERROR;
+			break;
+		default:
+			break;
+		}
+		break;
+	case ECORE_IWARP_QP_STATE_ERROR:
+		switch (new_state) {
+		case ECORE_IWARP_QP_STATE_IDLE:
+			/* TODO: destroy flow -> need to destroy EP&QP */
+			qp->iwarp_state = new_state;
+			break;
+		case ECORE_IWARP_QP_STATE_CLOSING:
+			/* could happen due to race... do nothing.... */
+			break;
+		default:
+			rc = ECORE_INVAL;
+		}
+		break;
+	case ECORE_IWARP_QP_STATE_TERMINATE:
+	case ECORE_IWARP_QP_STATE_CLOSING:
+		qp->iwarp_state = new_state;
+		break;
+	default:
+		break;
+	}
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) %s --> %s %s\n",
+		   qp->icid,
+		   iwarp_state_names[prev_iw_state],
+		   iwarp_state_names[qp->iwarp_state],
+		   internal ? "internal" : " ");
+
+	OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+
+	if (modify_fw)
+		ecore_iwarp_modify_fw(p_hwfn, qp);
+
+	return rc;
+}
+
+enum _ecore_status_t
+ecore_iwarp_fw_destroy(struct ecore_hwfn *p_hwfn,
+		       struct ecore_rdma_qp *qp)
+{
+	struct ecore_sp_init_data init_data;
+	struct ecore_spq_entry *p_ent;
+	enum _ecore_status_t rc;
+
+	/* Get SPQ entry */
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+	init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   IWARP_RAMROD_CMD_ID_DESTROY_QP,
+				   p_hwfn->p_rdma_info->proto,
+				   &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "QP(0x%x) rc = %d\n",  qp->icid, rc);
+
+	return rc;
+}
+
+static void ecore_iwarp_destroy_ep(struct ecore_hwfn *p_hwfn,
+				   struct ecore_iwarp_ep *ep,
+				   bool remove_from_active_list)
+{
+	OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+			       ep->ep_buffer_virt,
+			       ep->ep_buffer_phys,
+			       sizeof(*ep->ep_buffer_virt));
+
+	if (remove_from_active_list) {
+		OSAL_SPIN_LOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+		OSAL_LIST_REMOVE_ENTRY(&ep->list_entry,
+				       &p_hwfn->p_rdma_info->iwarp.ep_list);
+
+		OSAL_SPIN_UNLOCK(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+	}
+
+	if (ep->qp)
+		ep->qp->ep = OSAL_NULL;
+
+	OSAL_FREE(p_hwfn->p_dev, ep);
+}
+
+enum _ecore_status_t
+ecore_iwarp_destroy_qp(struct ecore_hwfn *p_hwfn,
+		       struct ecore_rdma_qp *qp)
+{
+	enum _ecore_status_t rc = ECORE_SUCCESS;
+	struct ecore_iwarp_ep *ep = qp->ep;
+	struct ecore_iwarp_fpdu *fpdu;
+	int wait_count = 0;
+
+	fpdu = ecore_iwarp_get_curr_fpdu(p_hwfn, qp->icid);
+	if (fpdu && fpdu->incomplete_bytes)
+		DP_NOTICE(p_hwfn, false,
+			  "Pending Partial fpdu with incomplete bytes=%d\n",
+			  fpdu->incomplete_bytes);
+
+	if (qp->iwarp_state != ECORE_IWARP_QP_STATE_ERROR) {
+
+		rc = ecore_iwarp_modify_qp(p_hwfn, qp,
+					   ECORE_IWARP_QP_STATE_ERROR,
+					   false);
+
+		if (rc != ECORE_SUCCESS)
+			return rc;
+	}
+
+	/* Make sure ep is closed before returning and freeing memory. */
+	if (ep) {
+		while (ep->state != ECORE_IWARP_EP_CLOSED) {
+			DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
+				   "Waiting for ep->state to be closed...state=%x\n",
+				   ep->state);
+
+			OSAL_MSLEEP(100);
+			if (wait_count++ > 200) {
+				DP_NOTICE(p_hwfn, false, "ep state close timeout state=%x\n",
+					  ep->state);
+				break;
+			}
+		}
+
+		ecore_iwarp_destroy_ep(p_hwfn, ep, false);
+	}
+
+	rc = ecore_iwarp_fw_destroy(p_hwfn, qp);
+
+	if (qp->shared_queue)
+		OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
+				       qp->shared_queue,
+				       qp->shared_queue_phys_addr,
+				       IWARP_SHARED_QUEUE_PAGE_SIZE);
+
+	return rc;
+}
+
+static enum _ecore_status_t
+ecore_iwarp_create_ep(struct ecore_hwfn *p_hwfn,
+		      struct ecore_iwarp_ep **ep_out)
+{
+	struct ecore_iwarp_ep *ep;
+	enum _ecore_status_t rc;
+
+	ep = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*ep));
+	if (!ep) {
+		DP_NOTICE(p_hwfn, false,
+			  "ecore create ep failed: cannot allocate memory (ep). rc = %d\n",
+			  ECORE_NOMEM);
+		return ECORE_NOMEM;
+	}
+
+	ep->state = ECORE_IWARP_EP_INIT;
+
+	/* ep_buffer is allocated once and is structured as follows:
+	 * [MAX_PRIV_DATA_LEN][MAX_PRIV_DATA_LEN][union async_output]
+	 * We could have allocated this in three calls but since all together
+	 * it is less than a page, we do one allocation and initialize pointers
+	 * accordingly
+	 */
+	ep->ep_buffer_virt = OSAL_DMA_ALLOC_COHERENT(
+		p_hwfn->p_dev,
+		&ep->ep_buffer_phys,
+		sizeof(*ep->ep_buffer_virt));
+
+	if (!ep->ep_buffer_virt) {
+		DP_NOTICE(p_hwfn, false,
+			  "ecore create ep failed: cannot allocate memory (ulp buffer). rc = %d\n",
+			  ECORE_NOMEM);
+		rc = ECORE_NOMEM;
+		goto err;
+	}
+
+	ep->sig = 0xdeadbeef;
+
+	*ep_out = ep;
+
+	return ECORE_SUCCESS;
+
+err:
+	OSAL_FREE(p_hwfn->p_dev, ep);
+	return rc;
+}
+
+static void
+ecore_iwarp_print_tcp_ramrod(struct ecore_hwfn *p_hwfn,
+			     struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
+{
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, ">>> PRINT TCP RAMROD\n");
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_mac=%x %x %x\n",
+		   p_tcp_ramrod->tcp.local_mac_addr_lo,
+		   p_tcp_ramrod->tcp.local_mac_addr_mid,
+		   p_tcp_ramrod->tcp.local_mac_addr_hi);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_mac=%x %x %x\n",
+		   p_tcp_ramrod->tcp.remote_mac_addr_lo,
+		   p_tcp_ramrod->tcp.remote_mac_addr_mid,
+		   p_tcp_ramrod->tcp.remote_mac_addr_hi);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "vlan_id=%x\n",
+		   p_tcp_ramrod->tcp.vlan_id);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flags=%x\n",
+		   p_tcp_ramrod->tcp.flags);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ip_version=%x\n",
+		   p_tcp_ramrod->tcp.ip_version);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_ip=%x.%x.%x.%x\n",
+		   p_tcp_ramrod->tcp.local_ip[0],
+		   p_tcp_ramrod->tcp.local_ip[1],
+		   p_tcp_ramrod->tcp.local_ip[2],
+		   p_tcp_ramrod->tcp.local_ip[3]);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_ip=%x.%x.%x.%x\n",
+		   p_tcp_ramrod->tcp.remote_ip[0],
+		   p_tcp_ramrod->tcp.remote_ip[1],
+		   p_tcp_ramrod->tcp.remote_ip[2],
+		   p_tcp_ramrod->tcp.remote_ip[3]);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "flow_label=%x\n",
+		   p_tcp_ramrod->tcp.flow_label);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "ttl=%x\n",
+		   p_tcp_ramrod->tcp.ttl);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "tos_or_tc=%x\n",
+		   p_tcp_ramrod->tcp.tos_or_tc);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "local_port=%x\n",
+		   p_tcp_ramrod->tcp.local_port);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "remote_port=%x\n",
+		   p_tcp_ramrod->tcp.remote_port);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "mss=%x\n",
+		   p_tcp_ramrod->tcp.mss);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "rcv_wnd_scale=%x\n",
+		   p_tcp_ramrod->tcp.rcv_wnd_scale);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "connect_mode=%x\n",
+		   p_tcp_ramrod->tcp.connect_mode);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_ip_payload_length=%x\n",
+		   p_tcp_ramrod->tcp.syn_ip_payload_length);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_lo=%x\n",
+		   p_tcp_ramrod->tcp.syn_phy_addr_lo);
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "syn_phy_addr_hi=%x\n",
+		   p_tcp_ramrod->tcp.syn_phy_addr_hi);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA, "<<<f  PRINT TCP RAMROD\n");
+}
+
+/* Default values for tcp option2 */
+#define ECORE_IWARP_DEF_MAX_RT_TIME (0)
+#define ECORE_IWARP_DEF_CWND_FACTOR (4)
+#define ECORE_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define ECORE_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
+#define ECORE_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
+
+static enum _ecore_status_t
+ecore_iwarp_tcp_offload(struct ecore_hwfn *p_hwfn,
+			struct ecore_iwarp_ep *ep)
+{
+	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
+	struct ecore_sp_init_data init_data;
+	struct ecore_spq_entry *p_ent;
+	dma_addr_t async_output_phys;
+	dma_addr_t in_pdata_phys;
+	enum _ecore_status_t rc;
+	u16 physical_q;
+	u8 tcp_flags;
+	int i;
+
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = ep->tcp_cid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		init_data.comp_mode = ECORE_SPQ_MODE_CB;
+	} else {
+		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+	}
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
+				   PROTOCOLID_IWARP, &init_data);
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
+
+	/* Point to the "second half" of the ulp buffer */
+	in_pdata_phys = ep->ep_buffer_phys +
+		OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata);
+	p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.hi =
+		DMA_HI_LE(in_pdata_phys);
+	p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr.lo =
+		DMA_LO_LE(in_pdata_phys);
+	p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
+		OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata));
+
+	async_output_phys = ep->ep_buffer_phys +
+		OFFSETOF(struct ecore_iwarp_ep_memory, async_output);
+
+	p_tcp_ramrod->iwarp.async_eqe_output_buf.hi =
+		DMA_HI_LE(async_output_phys);
+	p_tcp_ramrod->iwarp.async_eqe_output_buf.lo =
+		DMA_LO_LE(async_output_phys);
+	p_tcp_ramrod->iwarp.handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep));
+	p_tcp_ramrod->iwarp.handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep));
+
+	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+	p_tcp_ramrod->iwarp.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
+	physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+	p_tcp_ramrod->iwarp.physical_q1 = OSAL_CPU_TO_LE16(physical_q);
+	p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
+
+	ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.remote_mac_addr_hi,
+			      &p_tcp_ramrod->tcp.remote_mac_addr_mid,
+			      &p_tcp_ramrod->tcp.remote_mac_addr_lo,
+			      ep->remote_mac_addr);
+	ecore_set_fw_mac_addr(&p_tcp_ramrod->tcp.local_mac_addr_hi,
+			      &p_tcp_ramrod->tcp.local_mac_addr_mid,
+			      &p_tcp_ramrod->tcp.local_mac_addr_lo,
+			      ep->local_mac_addr);
+
+	p_tcp_ramrod->tcp.vlan_id = OSAL_CPU_TO_LE16(ep->cm_info.vlan);
+
+	tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
+	p_tcp_ramrod->tcp.flags = 0;
+	SET_FIELD(p_tcp_ramrod->tcp.flags,
+		  TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
+		  !!(tcp_flags & ECORE_IWARP_TS_EN));
+
+	SET_FIELD(p_tcp_ramrod->tcp.flags,
+		  TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
+		  !!(tcp_flags & ECORE_IWARP_DA_EN));
+
+	p_tcp_ramrod->tcp.ip_version = ep->cm_info.ip_version;
+
+	for (i = 0; i < 4; i++) {
+		p_tcp_ramrod->tcp.remote_ip[i] =
+			OSAL_CPU_TO_LE32(ep->cm_info.remote_ip[i]);
+		p_tcp_ramrod->tcp.local_ip[i] =
+			OSAL_CPU_TO_LE32(ep->cm_info.local_ip[i]);
+	}
+
+	p_tcp_ramrod->tcp.remote_port =
+		OSAL_CPU_TO_LE16(ep->cm_info.remote_port);
+	p_tcp_ramrod->tcp.local_port = OSAL_CPU_TO_LE16(ep->cm_info.local_port);
+	p_tcp_ramrod->tcp.mss = OSAL_CPU_TO_LE16(ep->mss);
+	p_tcp_ramrod->tcp.flow_label = 0;
+	p_tcp_ramrod->tcp.ttl = 0x40;
+	p_tcp_ramrod->tcp.tos_or_tc = 0;
+
+	p_tcp_ramrod->tcp.max_rt_time = ECORE_IWARP_DEF_MAX_RT_TIME;
+	p_tcp_ramrod->tcp.cwnd = ECORE_IWARP_DEF_CWND_FACTOR * p_tcp_ramrod->tcp.mss;
+	p_tcp_ramrod->tcp.ka_max_probe_cnt = ECORE_IWARP_DEF_KA_MAX_PROBE_CNT;
+	p_tcp_ramrod->tcp.ka_timeout = ECORE_IWARP_DEF_KA_TIMEOUT;
+	p_tcp_ramrod->tcp.ka_interval = ECORE_IWARP_DEF_KA_INTERVAL;
+
+	p_tcp_ramrod->tcp.rcv_wnd_scale =
+		(u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
+	p_tcp_ramrod->tcp.connect_mode = ep->connect_mode;
+
+	if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+		p_tcp_ramrod->tcp.syn_ip_payload_length =
+			OSAL_CPU_TO_LE16(ep->syn_ip_payload_length);
+		p_tcp_ramrod->tcp.syn_phy_addr_hi =
+			DMA_HI_LE(ep->syn_phy_addr);
+		p_tcp_ramrod->tcp.syn_phy_addr_lo =
+			DMA_LO_LE(ep->syn_phy_addr);
+	}
+
+	ecore_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
+
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
+		   "EP(0x%x) Offload completed rc=%d\n" , ep->tcp_cid, rc);
+
+	return rc;
+}
+
+/* This function should be called after IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE
+ * is received. it will be called from the dpc context.
+ */
+static enum _ecore_status_t
+ecore_iwarp_mpa_offload(struct ecore_hwfn *p_hwfn,
+			struct ecore_iwarp_ep *ep)
+{
+	struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+	struct ecore_iwarp_info *iwarp_info;
+	struct ecore_sp_init_data init_data;
+	struct ecore_spq_entry *p_ent;
+	dma_addr_t async_output_phys;
+	dma_addr_t out_pdata_phys;
+	dma_addr_t in_pdata_phys;
+	struct ecore_rdma_qp *qp;
+	bool reject;
+	enum _ecore_status_t rc;
+
+	if (!ep)
+		return ECORE_INVAL;
+
+	qp = ep->qp;
+	reject = (qp == OSAL_NULL);
+
+	OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+	init_data.cid = reject ? ep->tcp_cid : qp->icid;
+	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+	if (ep->connect_mode == TCP_CONNECT_ACTIVE || !ep->event_cb)
+		init_data.comp_mode = ECORE_SPQ_MODE_CB;
+	else
+		init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+	rc = ecore_sp_init_request(p_hwfn, &p_ent,
+				   IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+				   PROTOCOLID_IWARP, &init_data);
+
+	if (rc != ECORE_SUCCESS)
+		return rc;
+
+	p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
+	out_pdata_phys = ep->ep_buffer_phys +
+		OFFSETOF(struct ecore_iwarp_ep_memory, out_pdata);
+	p_mpa_ramrod->common.outgoing_ulp_buffer.addr.hi =
+		DMA_HI_LE(out_pdata_phys);
+	p_mpa_ramrod->common.outgoing_ulp_buffer.addr.lo =
+		DMA_LO_LE(out_pdata_phys);
+	p_mpa_ramrod->common.outgoing_ulp_buffer.len =
+		ep->cm_info.private_data_len;
+	p_mpa_ramrod->common.crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
+
+	p_mpa_ramrod->common.out_rq.ord = ep->cm_info.ord;
+	p_mpa_ramrod->common.out_rq.ird = ep->cm_info.ird;
+
+	p_mpa_ramrod->tcp_cid = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+
+	in_pdata_phys = ep->ep_buffer_phys +
+		OFFSETOF(struct ecore_iwarp_ep_memory, in_pdata);
+	p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
+	p_mpa_ramrod->incoming_ulp_buffer.addr.hi =
+		DMA_HI_LE(in_pdata_phys);
+	p_mpa_ramrod->incoming_ulp_buffer.addr.lo =
+		DMA_LO_LE(in_pdata_phys);
+	p_mpa_ramrod->incoming_ulp_buffer.len =
+		OSAL_CPU_TO_LE16(sizeof(ep->ep_buffer_virt->in_pdata));
+	async_output_phys = ep->ep_buffer_phys +
+		OFFSETOF(struct ecore_iwarp_ep_memory, async_output);
+	p_mpa_ramrod->async_eqe_output_buf.hi =
+		DMA_HI_LE(async_output_phys);
+	p_mpa_ramrod->async_eqe_output_buf.lo =
+		DMA_LO_LE(async_output_phys);
+	p_mpa_ramrod->handle_for_async.hi = OSAL_CPU_TO_LE32(PTR_HI(ep));
+	p_mpa_ramrod->handle_for_async.lo = OSAL_CPU_TO_LE32(PTR_LO(ep));
+
+	if (!reject) {
+		p_mpa_ramrod->shared_queue_addr.hi =
+			DMA_HI_LE(qp->shared_queue_phys_addr);
+		p_mpa_ramrod->shared_queue_addr.lo =
+			DMA_LO_LE(qp->shared_queue_phys_addr);
+
+		p_mpa_ramrod->stats_counter_id =
+			RESC_START(p_hwfn, ECORE_RDMA_STATS_QUEUE) +
+			qp->stats_queue;
+	} else {
+		p_mpa_ramrod->common.reject = 1;
+	}
+
+	iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
+	p_mpa_ramrod->mode = ep->mpa_rev;
+	SET_FIELD(p_mpa_ramrod->rtr_pref,
+		  IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED,
+		  ep->rtr_type);
+
+	ep->state = ECORE_IWARP_EP_MPA_OFFLOADED;
+	rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+	if (!reject)
+		ep->cid = qp->icid; /* Now they're migrated. */
+
+	DP_VERBOSE(p_hwfn, ECORE_MSG_RDMA,
+		   "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
+		   reject ? 0xffff : qp->icid, ep->tcp_cid, rc, ep->cm_info.ird,
+		   ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
+	return rc;
+}
+
+static void
+ecore_iwarp_mpa_received(struct ecore_hwfn *p_hwfn,
+			 struct ecore_iwarp_ep *ep)
+{
+	struct ecore_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+	struct ecore_iwarp_cm_event_params params;
+	struct mpa_v2_hdr *mpa_v2_params;
+	union async_output *async_data;
+	u16 mpa_ord, mpa_ird;
+	u8 mpa_hdr_size = 0;
+	u8 mpa_rev;
+

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-head mailing list