PERFORCE change 128761 for review
Kip Macy
kmacy at FreeBSD.org
Tue Nov 6 19:45:27 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=128761
Change 128761 by kmacy at kmacy:storage:toestack on 2007/11/07 03:44:24
import connection accept handling - partially port except for synq handling
Affected files ...
.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c#17 edit
Differences ...
==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c#17 (text+ko) ====
@@ -138,6 +138,12 @@
static void t3_send_reset(struct socket *so);
+static inline
+int is_t3a(const struct toedev *dev)
+{
+ return (dev->ttid == TOE_ID_CHELSIO_T3);
+}
+
/*
* Determine whether to send a CPL message now or defer it. A message is
* deferred if the connection is in SYN_SENT since we don't know the TID yet.
@@ -312,7 +318,7 @@
struct tcpcb *tp = sototcpcb(so);
struct toepcb *toep = tp->t_toe;
unsigned int tid = toep->tp_tid;
-
+
d = TOM_DATA(TOE_DEV(so));
if (tp->t_state != TCPS_SYN_SENT)
@@ -321,17 +327,10 @@
if (toep->tp_flags & TP_FIN_SENT)
return;
- m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m == NULL) {
- /*
- * XXX
- */
- printf("need to defer connection close to taskq thread!!!\n");
- return;
- }
+ m = m_gethdr_nofail(sizeof(*req));
+
toep->tp_flags |= TP_FIN_SENT;
req = mtod(m, struct cpl_close_con_req *);
- m->m_pkthdr.len = m->m_len = sizeof(*req);
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON));
req->wr.wr_lo = htonl(V_WR_TID(tid));
@@ -372,21 +371,11 @@
struct toepcb *toep = tp->t_toe;
struct toedev *tdev = toep->tp_toedev;
- m = m_gethdr(M_NOWAIT, MT_DATA);
-
- if (m == NULL) {
- /*
- * XXX need to cache mbufs for nofail allocation
- */
- if (nofail)
- log(LOG_ERR, "failing nofail t3_send_rx_credits!!!\n");
- return (0);
- }
+ m = m_gethdr_nofail(sizeof(*req));
printf("returning %u credits to HW\n", credits);
req = mtod(m, struct cpl_rx_data_ack *);
- m->m_pkthdr.len = m->m_len = sizeof(*req);
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tp_tid));
req->credit_dack = htonl(dack | V_RX_CREDITS(credits));
@@ -546,12 +535,7 @@
if (tp->t_state == TCPS_CLOSED || (toep->tp_flags & TP_ABORT_SHUTDOWN))
return;
- m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m == NULL) {
- /*
- * XXX need lowmem cache
- */
- }
+ m = m_gethdr_nofail(sizeof(struct cpl_set_tcb_field));
__set_tcb_field(so, m, word, mask, val, 1);
}
@@ -1189,17 +1173,10 @@
/* Purge the send queue so we don't send anything after an abort. */
sbflush(&so->so_snd);
-#ifdef notyet
- if (sock_flag(so, CLOSE_CON_REQUESTED) && is_t3a(TOE_DEV(sk)))
+ if ((toep->tp_flags & TP_CLOSE_CON_REQUESTED) && is_t3a(TOE_DEV(so)))
mode |= CPL_ABORT_POST_CLOSE_REQ;
-#endif
- m = m_gethdr(M_NOWAIT, MT_DATA);
- if (m == NULL) {
- /*
- * XXX add lowmem cache
- */
- }
- m->m_pkthdr.len = m->m_len = sizeof(*req);
+
+ m = m_gethdr_nofail(sizeof(*req));
m_set_priority(m, mkprio(CPL_PRIORITY_DATA, so));
set_arp_failure_handler(m, abort_arp_failure);
@@ -1449,10 +1426,11 @@
#ifdef T3_TRACE
T3_TRACE0(TIDTB(sk),"do_peer_fin:");
#endif
-#ifdef notyet
- if (!is_t3a(TOE_DEV(sk)) && sock_flag(so, ABORT_RPL_PENDING))
+
+ if (!is_t3a(TOE_DEV(so)) && (toep->tp_flags & TP_ABORT_RPL_PENDING))
goto out;
+#ifdef notyet
if (ULP_MODE(tp) == ULP_MODE_TCPDDP) {
keep = handle_peer_close_data(so, skb);
if (keep < 0)
@@ -1503,9 +1481,7 @@
sk_wake_async(so, 1, POLL_IN);
#endif
}
-#ifdef notyet
out:
-#endif
if (!keep)
m_free(m);
}
@@ -1602,8 +1578,679 @@
return (0);
}
+/*
+ * Process abort replies. We only process these messages if we anticipate
+ * them as the coordination between SW and HW in this area is somewhat lacking
+ * and sometimes we get ABORT_RPLs after we are done with the connection that
+ * originated the ABORT_REQ.
+ */
+static void
+process_abort_rpl(struct socket *so, struct mbuf *m)
+{
+ struct tcpcb *tp = sototcpcb(so);
+ struct toepcb *toep = tp->t_toe;
+
+#ifdef T3_TRACE
+ T3_TRACE1(TIDTB(sk),
+ "process_abort_rpl: GTS rpl pending %d",
+ sock_flag(sk, ABORT_RPL_PENDING));
+#endif
+
+ if (toep->tp_flags & TP_ABORT_RPL_PENDING) {
+ if (!(toep->tp_flags & TP_ABORT_RPL_RCVD) && !is_t3a(TOE_DEV(so)))
+ toep->tp_flags |= TP_ABORT_RPL_RCVD;
+ else {
+ toep->tp_flags &= ~(TP_ABORT_RPL_RCVD|TP_ABORT_RPL_PENDING);
+
+ if (!(toep->tp_flags & TP_ABORT_REQ_RCVD) ||
+ !is_t3a(TOE_DEV(so))) {
+ if (toep->tp_flags & TP_ABORT_REQ_RCVD)
+ panic("TP_ABORT_REQ_RCVD set");
+ t3_release_offload_resources(so);
+ tcp_close(tp);
+ }
+ }
+ }
+ m_free(m);
+}
+
+/*
+ * Handle an ABORT_RPL_RSS CPL message.
+ */
+static int
+do_abort_rpl(struct t3cdev *cdev, struct mbuf *m, void *ctx)
+{
+ struct socket *so;
+ struct cpl_abort_rpl_rss *rpl = cplhdr(m);
+ struct toepcb *toep;
+
+ /*
+ * Ignore replies to post-close aborts indicating that the abort was
+ * requested too late. These connections are terminated when we get
+ * PEER_CLOSE or CLOSE_CON_RPL and by the time the abort_rpl_rss
+ * arrives the TID is either no longer used or it has been recycled.
+ */
+ if (rpl->status == CPL_ERR_ABORT_FAILED) {
+discard:
+ m_free(m);
+ return (0);
+ }
+
+ so = (struct socket *)ctx;
+ /*
+ * Sometimes we've already closed the socket, e.g., a post-close
+ * abort races with ABORT_REQ_RSS, the latter frees the socket
+ * expecting the ABORT_REQ will fail with CPL_ERR_ABORT_FAILED,
+ * but FW turns the ABORT_REQ into a regular one and so we get
+ * ABORT_RPL_RSS with status 0 and no socket. Only on T3A.
+ */
+ if (!so)
+ goto discard;
+
+ toep = sototcpcb(so)->t_toe;
+ toepcb_hold(toep);
+ process_abort_rpl(so, m);
+ toepcb_release(toep);
+ return (0);
+}
+
/*
+ * Convert the status code of an ABORT_REQ into a Linux error code. Also
+ * indicate whether RST should be sent in response.
+ */
+static int
+abort_status_to_errno(struct socket *so, int abort_reason, int *need_rst)
+{
+ struct tcpcb *tp = sototcpcb(so);
+
+ switch (abort_reason) {
+ case CPL_ERR_BAD_SYN:
+#if 0
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONSYN); // fall through
+#endif
+ case CPL_ERR_CONN_RESET:
+ // XXX need to handle SYN_RECV due to crossed SYNs
+ return (tp->t_state == TCPS_CLOSE_WAIT ? EPIPE : ECONNRESET);
+ case CPL_ERR_XMIT_TIMEDOUT:
+ case CPL_ERR_PERSIST_TIMEDOUT:
+ case CPL_ERR_FINWAIT2_TIMEDOUT:
+ case CPL_ERR_KEEPALIVE_TIMEDOUT:
+#if 0
+ NET_INC_STATS_BH(LINUX_MIB_TCPABORTONTIMEOUT);
+#endif
+ return (ETIMEDOUT);
+ default:
+ return (EIO);
+ }
+}
+
+static inline void
+set_abort_rpl_wr(struct mbuf *m, unsigned int tid, int cmd)
+{
+ struct cpl_abort_rpl *rpl = cplhdr(m);
+
+ rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
+ rpl->wr.wr_lo = htonl(V_WR_TID(tid));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = cmd;
+}
+
+static void
+send_deferred_abort_rpl(struct toedev *tdev, struct mbuf *m)
+{
+ struct mbuf *reply_mbuf;
+ struct cpl_abort_req_rss *req = cplhdr(m);
+
+ reply_mbuf = m_gethdr_nofail(sizeof(struct cpl_abort_rpl));
+ m_set_priority(m, CPL_PRIORITY_DATA);
+ m->m_len = m->m_pkthdr.len = sizeof(struct cpl_abort_rpl);
+ set_abort_rpl_wr(reply_mbuf, GET_TID(req), req->status);
+ cxgb_ofld_send(TOM_DATA(tdev)->cdev, reply_mbuf);
+ m_free(m);
+}
+
+/*
+ * Returns whether an ABORT_REQ_RSS message is a negative advice.
+ */
+static inline int
+is_neg_adv_abort(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE;
+}
+
+static void
+send_abort_rpl(struct mbuf *m, struct toedev *tdev, int rst_status)
+{
+ struct mbuf *reply_mbuf;
+ struct cpl_abort_req_rss *req = cplhdr(m);
+
+ reply_mbuf = m_gethdr(M_NOWAIT, MT_DATA);
+
+ if (!reply_mbuf) {
+ /* Defer the reply. Stick rst_status into req->cmd. */
+ req->status = rst_status;
+ t3_defer_reply(m, tdev, send_deferred_abort_rpl);
+ return;
+ }
+
+ m_set_priority(reply_mbuf, CPL_PRIORITY_DATA);
+ set_abort_rpl_wr(reply_mbuf, GET_TID(req), rst_status);
+ m_free(m);
+
+ /*
+ * XXX need to sync with ARP as for SYN_RECV connections we can send
+ * these messages while ARP is pending. For other connection states
+ * it's not a problem.
+ */
+ cxgb_ofld_send(TOM_DATA(tdev)->cdev, reply_mbuf);
+}
+
+static void
+cleanup_syn_rcv_conn(struct socket *child, struct socket *parent)
+{
+ UNIMPLEMENTED();
+#ifdef notyet
+ struct request_sock *req = child->sk_user_data;
+
+ inet_csk_reqsk_queue_removed(parent, req);
+ synq_remove(tcp_sk(child));
+ __reqsk_free(req);
+ child->sk_user_data = NULL;
+#endif
+}
+
+/*
+ * Performs the actual work to abort a SYN_RECV connection.
+ */
+static void
+do_abort_syn_rcv(struct socket *child, struct socket *parent)
+{
+ struct tcpcb *parenttp = sototcpcb(parent);
+ struct tcpcb *childtp = sototcpcb(child);
+
+ /*
+ * If the server is still open we clean up the child connection,
+ * otherwise the server already did the clean up as it was purging
+ * its SYN queue and the skb was just sitting in its backlog.
+ */
+ if (__predict_false(parenttp->t_state == TCPS_LISTEN)) {
+ cleanup_syn_rcv_conn(child, parent);
+ t3_release_offload_resources(child);
+ tcp_close(childtp);
+ }
+}
+
+
+/*
+ * Handle abort requests for a SYN_RECV connection. These need extra work
+ * because the socket is on its parent's SYN queue.
+ */
+static int
+abort_syn_rcv(struct socket *so, struct mbuf *m)
+{
+ UNIMPLEMENTED();
+#ifdef notyet
+ struct socket *parent;
+ struct toedev *tdev = TOE_DEV(so);
+ struct t3cdev *cdev = TOM_DATA(tdev)->cdev;
+ struct socket *oreq = so->so_incomp;
+ struct t3c_tid_entry *t3c_stid;
+ struct tid_info *t;
+
+ if (!oreq)
+ return -1; /* somehow we are not on the SYN queue */
+
+ t = &(T3C_DATA(cdev))->tid_maps;
+ t3c_stid = lookup_stid(t, oreq->ts_recent);
+ parent = ((struct listen_ctx *)t3c_stid->ctx)->lso;
+
+ SOCK_LOCK(parent);
+ do_abort_syn_rcv(so, parent);
+ send_abort_rpl(m, tdev, CPL_ABORT_NO_RST);
+ SOCK_UNLOCK(parent);
+#endif
+ return (0);
+}
+
+/*
+ * Process abort requests. If we are waiting for an ABORT_RPL we ignore this
+ * request except that we need to reply to it.
+ */
+static void
+process_abort_req(struct socket *so, struct mbuf *m, struct toedev *tdev)
+{
+ int rst_status = CPL_ABORT_NO_RST;
+ const struct cpl_abort_req_rss *req = cplhdr(m);
+ struct tcpcb *tp = sototcpcb(so);
+ struct toepcb *toep = tp->t_toe;
+
+ if ((toep->tp_flags & TP_ABORT_REQ_RCVD) == 0) {
+ toep->tp_flags |= TP_ABORT_REQ_RCVD;
+ toep->tp_flags |= TP_ABORT_SHUTDOWN;
+ m_free(m);
+ return;
+ }
+
+ toep->tp_flags &= ~TP_ABORT_REQ_RCVD;
+ /*
+ * Three cases to consider:
+ * a) We haven't sent an abort_req; close the connection.
+ * b) We have sent a post-close abort_req that will get to TP too late
+ * and will generate a CPL_ERR_ABORT_FAILED reply. The reply will
+ * be ignored and the connection should be closed now.
+ * c) We have sent a regular abort_req that will get to TP too late.
+ * That will generate an abort_rpl with status 0, wait for it.
+ */
+ if (((toep->tp_flags & TP_ABORT_RPL_PENDING) == 0) ||
+ (is_t3a(TOE_DEV(so)) && (toep->tp_flags & TP_CLOSE_CON_REQUESTED))) {
+ so->so_error = abort_status_to_errno(so, req->status,
+ &rst_status);
+#if 0
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+#endif
+ /*
+ * SYN_RECV needs special processing. If abort_syn_rcv()
+ * returns 0 is has taken care of the abort.2
+ */
+ if ((tp->t_state == TCPS_SYN_RECEIVED) && !abort_syn_rcv(so, m))
+ return;
+
+ t3_release_offload_resources(so);
+ tcp_close(tp);
+ }
+
+ send_abort_rpl(m, tdev, rst_status);
+}
+
+/*
+ * Handle an ABORT_REQ_RSS CPL message.
+ */
+static int
+do_abort_req(struct t3cdev *cdev, struct mbuf *m, void *ctx)
+{
+ const struct cpl_abort_req_rss *req = cplhdr(m);
+ struct socket *so = (struct socket *)ctx;
+ struct tcpcb *tp = sototcpcb(so);
+ struct toepcb *toep = tp->t_toe;
+
+ if (is_neg_adv_abort(req->status)) {
+ m_free(m);
+ return (0);
+ }
+
+ VALIDATE_SOCK(so);
+ toepcb_hold(toep);
+ process_abort_req(so, m, TOE_DEV(so));
+ toepcb_release(toep);
+ return (0);
+}
+
+static void
+pass_open_abort(struct socket *child, struct socket *parent, struct mbuf *m)
+{
+ struct toedev *tdev = TOE_DEV(parent);
+
+ do_abort_syn_rcv(child, parent);
+ if (tdev->ttid == TOE_ID_CHELSIO_T3) {
+ struct cpl_pass_accept_rpl *rpl = cplhdr(m);
+
+ rpl->opt0h = htonl(F_TCAM_BYPASS);
+ rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
+ cxgb_ofld_send(TOM_DATA(tdev)->cdev, m);
+ } else
+ m_free(m);
+}
+
+static void
+handle_pass_open_arp_failure(struct socket *so, struct mbuf *m)
+{
+ UNIMPLEMENTED();
+
+#ifdef notyet
+ struct t3cdev *cdev;
+ struct socket *parent;
+ struct socket *oreq;
+ struct t3c_tid_entry *t3c_stid;
+ struct tid_info *t;
+ struct tcpcb *otp, *tp = sototcpcb(so);
+ struct toepcb *toep = tp->t_toe;
+
+ /*
+ * If the connection is being aborted due to the parent listening
+ * socket going away there's nothing to do, the ABORT_REQ will close
+ * the connection.
+ */
+ if (toep->tp_flags & TP_ABORT_RPL_PENDING) {
+ m_free(m);
+ return;
+ }
+
+ oreq = so->so_incomp;
+ otp = sototcpcb(oreq);
+
+ cdev = T3C_DEV(so);
+ t = &(T3C_DATA(cdev))->tid_maps;
+ t3c_stid = lookup_stid(t, otp->ts_recent);
+ parent = ((struct listen_ctx *)t3c_stid->ctx)->lso;
+
+ SOCK_LOCK(parent);
+ pass_open_abort(so, parent, m);
+ SOCK_UNLOCK(parent);
+#endif
+}
+
+/*
+ * Handle an ARP failure for a CPL_PASS_ACCEPT_RPL. This is treated similarly
+ * to an ABORT_REQ_RSS in SYN_RECV as both events need to tear down a SYN_RECV
+ * connection.
+ */
+static void
+pass_accept_rpl_arp_failure(struct t3cdev *cdev, struct mbuf *m)
+{
+
+ printf("%s UNIMPLEMENTED\n", __FUNCTION__);
+#ifdef notyet
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+ BLOG_SKB_CB(skb)->dev = TOE_DEV(skb->sk);
+ handle_pass_open_arp_failure, skb->sk, skb);
+#endif
+}
+
+/*
+ * Create a new socket as a child of the listening socket 'lsk' and initialize
+ * with the information in the supplied PASS_ACCEPT_REQ message.
+ */
+static struct socket *
+mk_pass_sock(struct socket *lso, struct toedev *dev, int tid,
+ struct cpl_pass_accept_req *req)
+{
+ UNIMPLEMENTED();
+
+#ifdef notyet
+ struct sock *newso;
+ struct l2t_entry *e;
+ struct rtentry *dst;
+ struct tcpcb *newtp;
+ struct ifp *egress;
+ struct socket *oreq = reqsk_alloc(&t3_rsk_ops);
+
+ if (!oreq)
+ goto out_err;
+
+ tcp_rsk(oreq)->rcv_isn = ntohl(req->rcv_isn);
+ inet_rsk(oreq)->rmt_port = req->peer_port;
+ t3_set_req_addr(oreq, req->local_ip, req->peer_ip);
+ t3_set_req_opt(oreq, NULL);
+ if (sysctl_tcp_window_scaling) {
+ inet_rsk(oreq)->wscale_ok = 1;
+ inet_rsk(oreq)->snd_wscale = req->tcp_options.wsf;
+ }
+
+ dst = route_req(lsk, oreq);
+ if (!dst)
+ goto free_or;
+
+ newsk = tcp_create_openreq_child(lsk, oreq, tcphdr_skb);
+ if (!newsk)
+ goto free_dst;
+
+ egress = offload_get_phys_egress(dst->neighbour->dev, newsk, TOE_OPEN);
+ if (!egress || TOEDEV(egress) != dev)
+ goto free_dst;
+
+ e = t3_l2t_get(TOM_DATA(dev)->cdev, dst->neighbour, egress);
+ if (!e)
+ goto free_sk;
+
+
+ if (sock_flag(newsk, SOCK_KEEPOPEN))
+ inet_csk_delete_keepalive_timer(newsk);
+ oreq->ts_recent = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+ newsk->sk_user_data = oreq;
+ sk_setup_caps(newsk, dst);
+
+ newtp = tcp_sk(newsk);
+ init_offload_sk(newsk, dev, tid, e, dst);
+ DELACK_SEQ(newtp) = newtp->rcv_nxt;
+ RCV_WSCALE(newtp) = select_rcv_wscale(tcp_full_space(newsk),
+ WSCALE_OK(newtp),
+ newtp->window_clamp);
+
+#ifdef LINUX_2_4
+ newsk->daddr = req->peer_ip;
+ newsk->rcv_saddr = req->local_ip;
+ newsk->saddr = req->local_ip;
+#else
+ inet_sk(newsk)->daddr = req->peer_ip;
+ inet_sk(newsk)->rcv_saddr = req->local_ip;
+ inet_sk(newsk)->saddr = req->local_ip;
+#endif /* LINUX_2_4 */
+
+ lsk->sk_prot->hash(newsk);
+ inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ install_offload_ops(newsk);
+ bh_unlock_sock(newsk); // counters tcp_create_openreq_child()
+ return newsk;
+
+free_sk:
+ sk_free(newsk);
+free_dst:
+ dst_release(dst);
+free_or:
+ __reqsk_free(oreq);
+out_err:
+#endif
+ return NULL;
+}
+
+/*
+ * Populate a reject CPL_PASS_ACCEPT_RPL WR.
+ */
+static void
+mk_pass_accept_rpl(struct mbuf *reply_mbuf, struct mbuf *req_mbuf)
+{
+ struct cpl_pass_accept_req *req = cplhdr(req_mbuf);
+ struct cpl_pass_accept_rpl *rpl = cplhdr(reply_mbuf);
+ unsigned int tid = GET_TID(req);
+
+ m_set_priority(reply_mbuf, CPL_PRIORITY_SETUP);
+ rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, tid));
+ rpl->peer_ip = req->peer_ip; // req->peer_ip not overwritten yet
+ rpl->opt0h = htonl(F_TCAM_BYPASS);
+ rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT);
+ rpl->opt2 = 0;
+ rpl->rsvd = rpl->opt2; /* workaround for HW bug */
+}
+
+/*
+ * Send a deferred reject to an accept request.
+ */
+static void
+reject_pass_request(struct toedev *tdev, struct mbuf *m)
+{
+ struct mbuf *reply_mbuf;
+
+ reply_mbuf = m_gethdr_nofail(sizeof(struct cpl_pass_accept_rpl));
+ mk_pass_accept_rpl(reply_mbuf, m);
+ cxgb_ofld_send(TOM_DATA(tdev)->cdev, reply_mbuf);
+ m_free(m);
+}
+
+/*
+ * Process a CPL_PASS_ACCEPT_REQ message. Does the part that needs the socket
+ * lock held. Note that the sock here is a listening socket that is not owned
+ * by the TOE.
+ */
+static void
+process_pass_accept_req(struct socket *so, struct mbuf *m, struct toedev *tdev)
+{
+#ifdef notyet
+ int rt_flags;
+#endif
+ struct socket *newso;
+ struct l2t_entry *e;
+ struct iff_mac tim;
+ struct mbuf *reply_mbuf, *ddp_mbuf = NULL;
+ struct cpl_pass_accept_rpl *rpl;
+ struct cpl_pass_accept_req *req = cplhdr(m);
+ unsigned int tid = GET_TID(req);
+ struct tom_data *d = TOM_DATA(tdev);
+ struct t3cdev *cdev = d->cdev;
+ struct tcpcb *tp = sototcpcb(so);
+ struct toepcb *toep, *newtoep;
+
+ UNIMPLEMENTED();
+
+ reply_mbuf = m_gethdr(M_NOWAIT, MT_DATA);
+ if (__predict_false(!reply_mbuf)) {
+ if (tdev->ttid == TOE_ID_CHELSIO_T3)
+ t3_defer_reply(m, tdev, reject_pass_request);
+ else {
+ cxgb_queue_tid_release(cdev, tid);
+ m_free(m);
+ }
+ goto out;
+ }
+
+ if (tp->t_state != TCPS_LISTEN)
+ goto reject;
+#ifdef notyet
+ if (inet_csk_reqsk_queue_is_full(sk))
+ goto reject;
+ if (sk_acceptq_is_full(sk) && d->conf.soft_backlog_limit)
+ goto reject;
+#endif
+ tim.mac_addr = req->dst_mac;
+ tim.vlan_tag = ntohs(req->vlan_tag);
+ if (cdev->ctl(cdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev)
+ goto reject;
+#ifdef notyet
+ if (ip_route_input(skb, req->local_ip, req->peer_ip,
+ G_PASS_OPEN_TOS(ntohl(req->tos_tid)), tim.dev))
+ goto reject;
+ rt_flags = ((struct rtable *)skb->dst)->rt_flags &
+ (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL);
+ dst_release(skb->dst); // done with the input route, release it
+ skb->dst = NULL;
+ if (rt_flags != RTCF_LOCAL)
+ goto reject;
+#endif
+ newso = mk_pass_sock(so, tdev, tid, req);
+ if (!newso)
+ goto reject;
+#ifdef notyet
+ inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
+ synq_add(sk, newsk);
+#endif
+ /* Don't get a reference, newsk starts out with ref count 2 */
+ cxgb_insert_tid(cdev, d->client, newso, tid);
+
+ if (newtoep->tp_ulp_mode) {
+ ddp_mbuf = m_gethdr(M_NOWAIT, MT_DATA);
+
+ if (!ddp_mbuf)
+ newtoep->tp_ulp_mode = 0;
+ }
+#ifdef notyet
+ reply_skb->sk = newsk;
+#endif
+ set_arp_failure_handler(reply_mbuf, pass_accept_rpl_arp_failure);
+
+ e = newtoep->tp_l2t;
+
+ rpl = cplhdr(reply_mbuf);
+ rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
+ OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, tid));
+ rpl->peer_ip = req->peer_ip; // req->peer_ip is not overwritten
+ rpl->opt0h = htonl(calc_opt0h(newso) | V_L2T_IDX(e->idx) |
+ V_TX_CHANNEL(e->smt_idx));
+ rpl->opt0l_status = htonl(calc_opt0l(newso) |
+ CPL_PASS_OPEN_ACCEPT);
+ rpl->opt2 = htonl(calc_opt2(newso));
+
+ rpl->rsvd = rpl->opt2; /* workaround for HW bug */
+ m_set_priority(reply_mbuf, mkprio(CPL_PRIORITY_SETUP, newso));
+ l2t_send(cdev, reply_mbuf, e);
+ m_free(m);
+ if (toep->tp_ulp_mode) {
+ __set_tcb_field(newso, ddp_mbuf, W_TCB_RX_DDP_FLAGS,
+ V_TF_DDP_OFF(1) |
+ TP_DDP_TIMER_WORKAROUND_MASK,
+ V_TF_DDP_OFF(1) |
+ TP_DDP_TIMER_WORKAROUND_VAL, 1);
+
+ return;
+ }
+
+reject:
+ if (tdev->ttid == TOE_ID_CHELSIO_T3)
+ mk_pass_accept_rpl(reply_mbuf, m);
+ else {
+#ifdef notyet
+ __skb_trim(reply_skb, 0);
+ mk_tid_release(reply_mbuf, NULL, tid);
+#endif
+ }
+ cxgb_ofld_send(cdev, reply_mbuf);
+ m_free(m);
+out:
+#if 0
+ TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
+#else
+ return;
+#endif
+}
+
+/*
+ * Handle a CPL_PASS_ACCEPT_REQ message.
+ */
+static int
+do_pass_accept_req(struct t3cdev *cdev, struct mbuf *m, void *ctx)
+{
+ struct listen_ctx *listen_ctx = (struct listen_ctx *)ctx;
+ struct socket *lso = listen_ctx->lso;
+ struct tom_data *d = listen_ctx->tom_data;
+
+#if VALIDATE_TID
+ struct cpl_pass_accept_req *req = cplhdr(m);
+ unsigned int tid = GET_TID(req);
+ struct tid_info *t = &(T3C_DATA(cdev))->tid_maps;
+
+ if (unlikely(!lsk)) {
+ printk(KERN_ERR "%s: PASS_ACCEPT_REQ had unknown STID %lu\n",
+ cdev->name,
+ (unsigned long)((union listen_entry *)ctx -
+ t->stid_tab));
+ return CPL_RET_BUF_DONE;
+ }
+ if (unlikely(tid >= t->ntids)) {
+ printk(KERN_ERR "%s: passive open TID %u too large\n",
+ cdev->name, tid);
+ return CPL_RET_BUF_DONE;
+ }
+ /*
+ * For T3A the current user of the TID may have closed but its last
+ * message(s) may have been backlogged so the TID appears to be still
+ * in use. Just take the TID away, the connection can close at its
+ * own leisure. For T3B this situation is a bug.
+ */
+ if (!valid_new_tid(t, tid) &&
+ cdev->type != T3A) {
+ printk(KERN_ERR "%s: passive open uses existing TID %u\n",
+ cdev->name, tid);
+ return CPL_RET_BUF_DONE;
+ }
+#endif
+
+ process_pass_accept_req(lso, m, &d->tdev);
+ return 0;
+}
+
+/*
* Add a passively open socket to its parent's accept queue. Note that the
* child may be in any state by now, including TCP_CLOSE. We can guarantee
* though that it has not been orphaned yet.
@@ -1621,6 +2268,8 @@
return;
printf("need to move connection from syncache to so_comp for accept XXX\n");
+ UNIMPLEMENTED();
+
#ifdef notyet
oreq = child->sk_user_data;
child->sk_user_data = NULL;
@@ -2093,10 +2742,10 @@
t3tom_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
t3tom_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
t3tom_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
-#ifdef notyet
t3tom_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
t3tom_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
t3tom_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
+#ifdef notyet
t3tom_register_cpl_handler(CPL_RX_URG_NOTIFY, do_rx_urg_notify);
t3tom_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
More information about the p4-projects
mailing list