PERFORCE change 128663 for review

Kip Macy kmacy at FreeBSD.org
Sun Nov 4 18:26:33 PST 2007


http://perforce.freebsd.org/chv.cgi?CH=128663

Change 128663 by kmacy at kmacy:storage:toestack on 2007/11/05 02:26:04

	- add RST send on abort
	- add listen setup and teardown infrastructure
	- reflect changes in notes

Affected files ...

.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c#14 edit
.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_defs.h#5 edit
.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_listen.c#3 edit
.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_tom.c#6 edit
.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_tom.h#5 edit
.. //depot/projects/toestack/sys/dev/cxgb/ulp/tom/notes#2 edit

Differences ...

==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_cpl_io.c#14 (text+ko) ====

@@ -322,7 +322,19 @@
 
 }
 
+/*
+ * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
+ * and send it along.
+ */
+static void
+abort_arp_failure(struct t3cdev *cdev, struct mbuf *m)
+{
+	struct cpl_abort_req *req = cplhdr(m);
 
+	req->cmd = CPL_ABORT_NO_RST;
+	cxgb_ofld_send(cdev, m);
+}
+
 /*
  * Send RX credits through an RX_DATA_ACK CPL message.  If nofail is 0 we are
  * permitted to return without sending the message in case we cannot allocate
@@ -873,7 +885,8 @@
  * check SOCK_DEAD or sk->sk_sock.  Or maybe generate the error here but don't
  * free the atid.  Hmm.
  */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct mbuf *m)
+static void
+act_open_req_arp_failure(struct t3cdev *dev, struct mbuf *m)
 {
 	struct toepcb *toep = m_get_toep(m);
 	struct tcpcb *tp = toep->tp_tp;
@@ -950,46 +963,46 @@
 static void
 t3_send_reset(struct socket *so)
 {
-	printf("t3_send_reset unimplemented\n");
 	
-#ifdef notyet
 	struct cpl_abort_req *req;
-	struct tcp_sock *tp = tcp_sk(sk);
-	unsigned int tid = TID(tp);
+	struct tcpcb *tp = sototcpcb(so);
+	struct toepcb *toep = tp->t_toe;
+	unsigned int tid = toep->tp_tid;
 	int mode = CPL_ABORT_SEND_RST;
+	struct mbuf *m;
 	
-	if (unlikely(sock_flag(sk, ABORT_SHUTDOWN) || !TOE_DEV(sk))) {
-		if (skb)
-			__kfree_skb(skb);
-		return 1;
-	}
+	if (__predict_false((toep->tp_flags & TP_ABORT_SHUTDOWN) || !TOE_DEV(so)))
+		return;
 
-	sock_set_flag(sk, ABORT_RPL_PENDING);
-	sock_set_flag(sk, ABORT_SHUTDOWN);
-
+	toep->tp_flags |= (TP_ABORT_RPL_PENDING|TP_ABORT_SHUTDOWN);
+	
 	/* Purge the send queue so we don't send anything after an abort. */
-	t3_purge_write_queue(sk);
-
+	sbflush(&so->so_snd);
+#ifdef notyet
 	if (sock_flag(sk, CLOSE_CON_REQUESTED) && is_t3a(TOE_DEV(sk)))
 		mode |= CPL_ABORT_POST_CLOSE_REQ;
+#endif
+	m = m_gethdr(M_NOWAIT, MT_DATA);
+	if (m == NULL) {
+		/*
+		 * XXX add lowmem cache
+		 */
+	}
+	m->m_pkthdr.len = m->m_len = sizeof(*req);
+	m_set_priority(m, mkprio(CPL_PRIORITY_DATA, so));
+	set_arp_failure_handler(m, abort_arp_failure);
 
-	if (!skb)
-		skb = alloc_skb_nofail(sizeof(*req));
-	skb->priority = mkprio(CPL_PRIORITY_DATA, sk);
-	set_arp_failure_handler(skb, abort_arp_failure);
-
-	req = (struct cpl_abort_req *)skb_put(skb, sizeof(*req));
+	req = mtod(m, struct cpl_abort_req *);
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ));
 	req->wr.wr_lo = htonl(V_WR_TID(tid));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
 	req->rsvd0 = htonl(tp->snd_nxt);
-	req->rsvd1 = !sock_flag(sk, TX_DATA_SENT);
+	req->rsvd1 = !(toep->tp_flags & TP_DATASENT);
 	req->cmd = mode;
-	if (sk->sk_state == TCP_SYN_SENT)
-		__skb_queue_tail(&tp->out_of_order_queue, skb);	// defer
+	if (tp->t_state == TCPS_SYN_SENT)
+		mbufq_tail(&toep->out_of_order_queue, m);	// defer
 	else
-		l2t_send(T3C_DEV(sk), skb, L2T_ENTRY(sk));
-#endif
+		l2t_send(T3C_DEV(so), m, toep->tp_l2t);
 }
 
 /*
@@ -1256,7 +1269,8 @@
 /*
  * Handler for CLOSE_CON_RPL CPL messages.
  */
-static int do_close_con_rpl(struct t3cdev *cdev, struct mbuf *m,
+static int
+do_close_con_rpl(struct t3cdev *cdev, struct mbuf *m,
 			    void *ctx)
 {
 	struct socket *so = (struct socket *)ctx;
@@ -1267,7 +1281,44 @@
 	return (0);
 }
 
+
 /*
+ * Add a passively open socket to its parent's accept queue.  Note that the
+ * child may be in any state by now, including TCP_CLOSE.  We can guarantee
+ * though that it has not been orphaned yet.
+ */
+static void
+add_pass_open_to_parent(struct socket *child, struct socket *lso,
+    struct toedev *dev)
+{
+	struct tcpcb *tp = sototcpcb(lso);
+	/*
+	 * If the server is closed it has already killed its embryonic
+	 * children.  There is nothing further to do about child.
+	 */
+	if (tp->t_state != TCPS_LISTEN)
+		return;
+
+	printf("need to move connection from syncache to so_comp for accept XXX\n");
+#ifdef notyet
+	oreq = child->sk_user_data;
+	child->sk_user_data = NULL;
+
+	inet_csk_reqsk_queue_removed(lsk, oreq);
+	synq_remove(tcp_sk(child));
+
+	if (sk_acceptq_is_full(lsk) && !TOM_TUNABLE(dev, soft_backlog_limit)) {
+		NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
+		NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
+		__reqsk_free(oreq);
+		add_to_reap_list(child);
+	} else {
+		inet_csk_reqsk_queue_add(lsk, oreq, child);
+		lsk->sk_data_ready(lsk, 0);
+	}
+#endif	
+}
+/*
  * Called when a connection is established to translate the TCP options
  * reported by HW to Linux's native format.
  */
@@ -1325,6 +1376,74 @@
 }
 
 /*
+ * Process a CPL_PASS_ESTABLISH message.  XXX a lot of the locking doesn't work
+ * if we are in TCP_SYN_RECV due to crossed SYNs
+ */
+static int
+do_pass_establish(struct t3cdev *cdev, struct mbuf *m, void *ctx)
+{
+	struct cpl_pass_establish *req = cplhdr(m);
+	struct socket *lso, *so = (struct socket *)ctx;
+	struct toedev *tdev = TOE_DEV(so);
+	// Complete socket initialization now that we have the SND_ISN
+	struct tcpcb *tp = sototcpcb(so);
+	struct toepcb *toep = tp->t_toe;
+	struct toe_tid_entry *t3c_stid;
+	struct tid_info *t;
+	unsigned int stid;
+	
+	VALIDATE_SOCK(so);
+
+	SOCK_LOCK(so);
+
+	toep->tp_wr_max = toep->tp_wr_avail = TOM_TUNABLE(tdev, max_wrs);
+	toep->tp_wr_unacked = 0;
+	toep->tp_qset = G_QNUM(ntohl(m->m_pkthdr.csum_data));
+	make_established(so, ntohl(req->snd_isn), ntohs(req->tcp_opt));
+#ifdef notyet
+	/*
+	 * XXX not sure how these checks map to us
+	 */
+	if (unlikely(sk->sk_socket)) {   // simultaneous opens only
+		sk->sk_state_change(sk);
+		sk_wake_async(sk, 0, POLL_OUT);
+	}
+	/*
+	 * The state for the new connection is now up to date.
+	 * Next check if we should add the connection to the parent's
+	 * accept queue.  When the parent closes it resets connections
+	 * on its SYN queue, so check if we are being reset.  If so we
+	 * don't need to do anything more, the coming ABORT_RPL will
+	 * destroy this socket.  Otherwise move the connection to the
+	 * accept queue.
+	 *
+	 * Note that we reset the synq before closing the server so if
+	 * we are not being reset the stid is still open.
+	 */
+	if (unlikely(!tp->forward_skb_hint)) { // removed from synq
+		__kfree_skb(skb);
+		goto unlock;
+	}
+#endif
+
+	stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
+	t = &(T3C_DATA(cdev))->tid_maps;
+	t3c_stid = lookup_stid(t, stid);
+	lso = ((struct listen_ctx *)t3c_stid->ctx)->lso;
+
+	SOCK_LOCK(lso);
+	m_free(m);
+	add_pass_open_to_parent(so, lso, tdev);
+	SOCK_UNLOCK(lso);
+#if 0
+unlock:
+#endif
+	SOCK_UNLOCK(so);
+		
+	return 0;
+}
+
+/*
  * Fill in the right TID for CPL messages waiting in the out-of-order queue
  * and send them to the TOE.
  */
@@ -1544,7 +1663,8 @@
 /*
  * Handler for TX_DATA_ACK CPL messages.
  */
-static int do_wr_ack(struct t3cdev *dev, struct mbuf *m, void *ctx)
+static int
+do_wr_ack(struct t3cdev *dev, struct mbuf *m, void *ctx)
 {
 	struct socket *so = (struct socket *)ctx;
 
@@ -1556,7 +1676,61 @@
 	return 0;
 }
 
+
+/*
+ * Reset a connection that is on a listener's SYN queue or accept queue,
+ * i.e., one that has not had a struct socket associated with it.
+ * Must be called from process context.
+ *
+ * Modeled after code in inet_csk_listen_stop().
+ */
+static void
+t3_reset_listen_child(struct socket *child)
+{
+	SOCK_LOCK(child);
+	t3_send_reset(child);
+	SOCK_UNLOCK(child);
+}
+
+/*
+ * Disconnect offloaded established but not yet accepted connections sitting
+ * on a server's accept_queue.  We just send an ABORT_REQ at this point and
+ * finish off the disconnect later as we may need to wait for the ABORT_RPL.
+ */
 void
+t3_disconnect_acceptq(struct socket *listen_so)
+{
+	struct socket *so;
+	struct tcpcb *tp;
+
+	TAILQ_FOREACH(so, &listen_so->so_comp, so_list) {
+		tp = sototcpcb(so);
+		
+		if (tp->t_flags & TF_TOE)
+			t3_reset_listen_child(so);
+	}
+}
+
+/*
+ * Reset offloaded connections sitting on a server's syn queue.  As above
+ * we send ABORT_REQ and finish off when we get ABORT_RPL.
+ */
+
+void
+t3_reset_synq(struct socket *listen_so)
+{
+	struct socket *so;
+	struct tcpcb *tp;
+
+	TAILQ_FOREACH(so, &listen_so->so_incomp, so_list) {
+		tp = sototcpcb(so);
+		
+		if (tp->t_flags & TF_TOE)
+			t3_reset_listen_child(so);
+	}
+}
+
+void
 t3_init_wr_tab(unsigned int wr_len)
 {
 	int i;
@@ -1596,14 +1770,15 @@
 	t3tom_register_cpl_handler(CPL_RX_DATA, do_rx_data);
 	t3tom_register_cpl_handler(CPL_CLOSE_CON_RPL, do_close_con_rpl);
 	t3tom_register_cpl_handler(CPL_PEER_CLOSE, do_peer_close);
+	t3tom_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
 #ifdef notyet	
-	t3tom_register_cpl_handler(CPL_PASS_ESTABLISH, do_pass_establish);
 	t3tom_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_pass_accept_req);
+	t3tom_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
+	t3tom_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
+
 	t3tom_register_cpl_handler(CPL_RX_URG_NOTIFY, do_rx_urg_notify);
 	t3tom_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
 	t3tom_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
-	t3tom_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req);
-	t3tom_register_cpl_handler(CPL_ABORT_RPL_RSS, do_abort_rpl);
 	t3tom_register_cpl_handler(CPL_TRACE_PKT, do_trace_pkt);
 	t3tom_register_cpl_handler(CPL_GET_TCB_RPL, do_get_tcb_rpl);
 #endif

==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_defs.h#5 (text+ko) ====

@@ -1,6 +1,8 @@
 #ifndef CXGB_DEFS_H_
 #define CXGB_DEFS_H_
 
+#define VALIDATE_TID 0
+
 #define TOEPCB(so)  ((struct toepcb *)(sototcpcb((so))->t_toe))
 #define TOE_DEV(so) (TOEPCB((so))->tp_toedev)
 #define toeptoso(toep) ((toep)->tp_tp->t_inpcb->inp_socket)
@@ -19,6 +21,8 @@
 uint32_t t3_send_rx_credits(struct tcpcb *tp, uint32_t credits, uint32_t dack, int nofail);
 void t3_cleanup_rbuf(struct tcpcb *tp);
 
+void t3_disconnect_acceptq(struct socket *listen_so);
+void t3_reset_synq(struct socket *listen_so);
 
 void toepcb_hold(struct toepcb *);
 void toepcb_release(struct toepcb *);

==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_listen.c#3 (text+ko) ====

@@ -70,6 +70,9 @@
 #include <dev/cxgb/ulp/tom/cxgb_defs.h>
 #include <dev/cxgb/ulp/tom/cxgb_tom.h>
 
+static struct listen_info *listen_hash_add(struct tom_data *d, struct socket *so, unsigned int stid);
+static int listen_hash_del(struct tom_data *d, struct socket *so);
+
 /*
  * Process a CPL_CLOSE_LISTSRV_RPL message.  If the status is good we release
  * the STID.
@@ -87,7 +90,41 @@
 static int
 do_pass_open_rpl(struct t3cdev *cdev, struct mbuf *m, void *ctx)
 {
-	UNIMPLEMENTED();
+       	struct cpl_pass_open_rpl *rpl = cplhdr(m);
+
+	if (rpl->status != CPL_ERR_NONE) {
+		int stid = GET_TID(rpl);
+		struct listen_ctx *listen_ctx = (struct listen_ctx *)ctx;
+		struct tom_data *d = listen_ctx->tom_data;
+		struct socket *lso = listen_ctx->lso;
+
+#if VALIDATE_TID
+		if (!lso)
+			return (CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE);
+#endif
+		/*
+		 * Note: It is safe to unconditionally call listen_hash_del()
+		 * at this point without risking unhashing a reincarnation of
+		 * an already closed socket (i.e., there is no listen, close,
+		 * listen, free the sock for the second listen while processing
+		 * a message for the first race) because we are still holding
+		 * a reference on the socket.  It is possible that the unhash
+		 * will fail because the socket is already closed, but we can't
+		 * unhash the wrong socket because it is impossible for the
+		 * socket to which this message refers to have reincarnated.
+		 */
+		listen_hash_del(d, lso);
+		cxgb_free_stid(cdev, stid);
+#ifdef notyet
+		/*
+		 * XXX need to unreference the inpcb
+		 * but we have no way of knowing that other TOMs aren't referencing it 
+		 */
+		sock_put(lso);
+#endif
+		free(listen_ctx, M_DEVBUF);
+	}
+	return CPL_RET_BUF_DONE;
 }
 
 void __init
@@ -97,6 +134,82 @@
 	t3tom_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_close_server_rpl);
 }
 
+static inline int
+listen_hashfn(const struct socket *so)
+{
+	return ((unsigned long)so >> 10) & (LISTEN_INFO_HASH_SIZE - 1);
+}
+
+/*
+ * Create and add a listen_info entry to the listen hash table.  This and the
+ * listen hash table functions below cannot be called from softirqs.
+ */
+static struct listen_info *
+listen_hash_add(struct tom_data *d, struct socket *so, unsigned int stid)
+{
+	struct listen_info *p;
+
+	p = malloc(sizeof(*p), M_DEVBUF, M_NOWAIT|M_ZERO);
+	if (p) {
+		int bucket = listen_hashfn(so);
+
+		p->so = so;	/* just a key, no need to take a reference */
+		p->stid = stid;
+		mtx_lock(&d->listen_lock);		
+		p->next = d->listen_hash_tab[bucket];
+		d->listen_hash_tab[bucket] = p;
+		mtx_unlock(&d->listen_lock);
+	}
+	return p;
+}
+
+#if 0
+/*
+ * Given a pointer to a listening socket return its server TID by consulting
+ * the socket->stid map.  Returns -1 if the socket is not in the map.
+ */
+static int
+listen_hash_find(struct tom_data *d, struct socket *so)
+{
+	int stid = -1, bucket = listen_hashfn(so);
+	struct listen_info *p;
+
+	spin_lock(&d->listen_lock);
+	for (p = d->listen_hash_tab[bucket]; p; p = p->next)
+		if (p->sk == sk) {
+			stid = p->stid;
+			break;
+		}
+	spin_unlock(&d->listen_lock);
+	return stid;
+}
+#endif
+
+/*
+ * Delete the listen_info structure for a listening socket.  Returns the server
+ * TID for the socket if it is present in the socket->stid map, or -1.
+ */
+static int
+listen_hash_del(struct tom_data *d, struct socket *so)
+{
+	int bucket, stid = -1;
+	struct listen_info *p, **prev;
+
+	bucket = listen_hashfn(so);
+	prev  = &d->listen_hash_tab[bucket];
+
+	mtx_lock(&d->listen_lock);
+	for (p = *prev; p; prev = &p->next, p = p->next)
+		if (p->so == so) {
+			stid = p->stid;
+			*prev = p->next;
+			free(p, M_DEVBUF);
+			break;
+		}
+	mtx_unlock(&d->listen_lock);
+	
+	return (stid);
+}
 
 /*
  * Start a listening server by sending a passive open request to HW.
@@ -104,47 +217,51 @@
 void
 t3_listen_start(struct toedev *dev, struct socket *so, struct t3cdev *cdev)
 {
-	printf("start listen\n");
-#if 0
 	int stid;
-	struct sk_buff *skb;
+	struct mbuf *m;
 	struct cpl_pass_open_req *req;
 	struct tom_data *d = TOM_DATA(dev);
+	struct inpcb *inp = sotoinpcb(so);
+	struct tcpcb *tp = sototcpcb(so);
 	struct listen_ctx *ctx;
 
 	if (!TOM_TUNABLE(dev, activated))
 		return;
 
-	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	printf("start listen\n");
+	
+	ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT);
+	
 	if (!ctx)
 		return;
 
 	ctx->tom_data = d;
-	ctx->lsk = sk;
+	ctx->lso = so;
 
-	stid = cxgb3_alloc_stid(d->cdev, d->client, ctx);
+	stid = cxgb_alloc_stid(d->cdev, d->client, ctx);
 	if (stid < 0)
 		goto free_ctx;
-	
+
+#ifdef notyet
+	/*
+	 * XXX need to mark inpcb as referenced
+	 */
 	sock_hold(sk);
-
-	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
-	if (!skb)
+#endif
+	m = m_gethdr(M_NOWAIT, MT_DATA);
+	if (m == NULL)
 		goto free_stid;
-
-	if (!listen_hash_add(d, sk, stid))
+	m->m_pkthdr.len = m->m_len = sizeof(*req);
+	
+	if (!listen_hash_add(d, so, stid))
 		goto free_all;
 
-	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+	tp->t_flags |= TF_TOE;
+	req = mtod(m, struct cpl_pass_open_req *);
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
-#ifdef	LINUX_2_4
-	req->local_port = sk->sport;
-	req->local_ip = sk->rcv_saddr;
-#else
-	req->local_port = inet_sk(sk)->sport;
-	req->local_ip = inet_sk(sk)->rcv_saddr;
-#endif	/* LINUX_2_4 */
+	req->local_port = inp->inp_lport; 
+	memcpy(&req->local_ip, &inp->inp_laddr, 4);
 	req->peer_port = 0;
 	req->peer_ip = 0;
 	req->peer_netmask = 0;
@@ -152,18 +269,19 @@
 	req->opt0l = htonl(V_RCV_BUFSIZ(16));
 	req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK));
 
-	skb->priority = CPL_PRIORITY_LISTEN;
-	cxgb3_ofld_send(cdev, skb);
+	m_set_priority(m, CPL_PRIORITY_LISTEN); 
+	cxgb_ofld_send(cdev, m);
 	return;
 
 free_all:
-	__kfree_skb(skb);
+	m_free(m);
 free_stid:
-	cxgb3_free_stid(cdev, stid);
+	cxgb_free_stid(cdev, stid);
+#if 0	
 	sock_put(sk);
+#endif	
 free_ctx:
-	kfree(ctx);
-#endif
+	free(ctx, M_DEVBUF);
 }
 
 /*
@@ -173,12 +291,11 @@
 void
 t3_listen_stop(struct toedev *dev, struct socket *so, struct t3cdev *cdev)
 {
+	struct mbuf *m;
+	struct cpl_close_listserv_req *req;
+	int stid = listen_hash_del(TOM_DATA(dev), so);
 	printf("stop listen\n");
-#if 0
-	struct sk_buff *skb;
-	struct cpl_close_listserv_req *req;
 
-	int stid = listen_hash_del(TOM_DATA(dev), sk);
 	if (stid < 0)
 		return;
 
@@ -188,20 +305,23 @@
 	 * that arrive while we are closing the server will be able to locate
 	 * the listening socket.
 	 */
-	t3_reset_synq(sk);
+	t3_reset_synq(so);
 
 	/* Send the close ASAP to stop further passive opens */
-	skb = alloc_skb_nofail(sizeof(*req));
-	req = (struct cpl_close_listserv_req *)__skb_put(skb, sizeof(*req));
+	m = m_gethdr(M_NOWAIT, MT_DATA);
+	if (m == NULL) {
+		/*
+		 * XXX allocate from lowmem cache
+		 */
+	}
+	m->m_pkthdr.len = m->m_len = sizeof(*req);
+
+	req = mtod(m, struct cpl_close_listserv_req *);
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
 	req->cpu_idx = 0;
-	skb->priority = CPL_PRIORITY_LISTEN;
-	cxgb3_ofld_send(cdev, skb);
+	m_set_priority(m, CPL_PRIORITY_LISTEN);
+	cxgb_ofld_send(cdev, m);
 
-	t3_disconnect_acceptq(sk);
-#endif
+	t3_disconnect_acceptq(so);
 }
-
-
-

==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_tom.c#6 (text+ko) ====

@@ -301,9 +301,9 @@
 	skb_queue_head_init(&t->deferq);
 	T3_INIT_WORK(&t->deferq_task, process_deferq, t);
 	spin_lock_init(&t->listen_lock);
-	spin_lock_init(&t->synq_lock);
 #endif
 	t3_init_tunables(t);
+	mtx_init(&t->listen_lock, "tom data listeners", NULL, MTX_DEF);
 
 	/* Adjust TOE activation for this module */
 	t->conf.activated = activated;

==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/cxgb_tom.h#5 (text+ko) ====

@@ -4,6 +4,13 @@
 
 #define LISTEN_INFO_HASH_SIZE 32 
 
+struct listen_info {
+	struct listen_info *next;  /* Link to next entry */
+	struct socket *so;         /* The listening socket */
+	unsigned int stid;         /* The server TID */
+};
+
+
 /*
  * TOM tunable parameters.  They can be manipulated through sysctl(2) or /proc.
  */
@@ -49,13 +56,13 @@
          */
 
         struct listen_info *listen_hash_tab[LISTEN_INFO_HASH_SIZE];
-        spinlock_t listen_lock;
+        struct mtx listen_lock;
 
         struct mbuf_head deferq;
         struct task deferq_task;
 
-        struct sock **tid_release_list;
-        spinlock_t tid_release_lock;
+        struct socket **tid_release_list;
+        struct mtx tid_release_lock;
         struct task tid_release_task;
 
 #if defined(CONFIG_T3_ZCOPY_SENDMSG) || defined(CONFIG_T3_ZCOPY_SENDMSG_MODULE)
@@ -69,9 +76,15 @@
 
         u8 *ppod_map;
         unsigned int nppods;
-        spinlock_t ppod_map_lock;
+        struct mtx ppod_map_lock;
+	
+        struct adap_ports *ports;
+};
+
 
-        struct adap_ports *ports;
+struct listen_ctx {
+	struct socket *lso;
+	struct tom_data *tom_data;
 };
 
 #define TOM_DATA(dev) (*(struct tom_data **)&(dev)->l4opt)
@@ -82,6 +95,7 @@
 #define TP_TX_WAIT_IDLE      (1 << 1)
 #define TP_FIN_SENT          (1 << 2)
 #define TP_ABORT_RPL_PENDING (1 << 3)
+#define TP_ABORT_SHUTDOWN    (1 << 4)
 
 struct toepcb {
 	struct toedev *tp_toedev;

==== //depot/projects/toestack/sys/dev/cxgb/ulp/tom/notes#2 (text+ko) ====

@@ -1,8 +1,9 @@
+Currently untested:
+ - abort
 
 Currently unimplemented:
  - correct credit return accounting
- - listen
- - abort
+ - complete listen handling
  - close for a subset of states
  - correct ARP failure handling
  - urgent data


More information about the p4-projects mailing list