PERFORCE change 118927 for review

Kip Macy kmacy at FreeBSD.org
Sun Apr 29 03:03:17 UTC 2007


http://perforce.freebsd.org/chv.cgi?CH=118927

Change 118927 by kmacy at kmacy_vt-x:opentoe_init on 2007/04/29 03:02:47

	more mindless BSD-ification

Affected files ...

.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_io.c#3 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_socket.c#1 add
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_defs.h#2 edit
.. //depot/projects/opentoe/sys/dev/cxgb/ulp/toecore/toedev.c#3 edit

Differences ...

==== //depot/projects/opentoe/sys/dev/cxgb/ulp/t3_tom/t3_cpl_io.c#3 (text+ko) ====

@@ -1,39 +1,51 @@
-/*
- * This file implements the Chelsio CPL5 message processing.
- *
- * Copyright (C) 2003-2006 Chelsio Communications.  All rights reserved.
- *
- * Written by Dimitris Michailidis (dm at chelsio.com)
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
- * release for licensing terms and conditions.
- */
+
+/**************************************************************************
+
+Copyright (c) 2007, Chelsio Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+    this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Chelsio Corporation nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
 
-#include "defs.h"
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/ip.h>
-#include <linux/netdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/toedev.h>
-#include <net/tcp.h>
-#include <net/offload.h>
+***************************************************************************/
+#include <net/tcp_var.h>
 #include <net/route.h>
-#include <asm/atomic.h>
-#include "tom.h"
-#include "t3_ddp.h"
-#include "t3cdev.h"
-#include "l2t.h"
-#include "tcb.h"
-#include "cxgb3_defs.h"
-#include "cxgb3_ctl_defs.h"
-#include "firmware_exports.h"
+
+#include <dev/cxgb/common/firmware_exports.h>
+#include <dev/cxgb/common/cxgb_defs.h>
+#include <dev/cxgb/common/cxgb_tcb.h>
+#include <dev/cxgb/common/cxgb_ctl_defs.h>
+#include <dev/cxgb/cxgb_l2t.h>
+#include <dev/cxgb/ulp/t3_tom/t3_tom.h> 
+#include <dev/cxgb/ulp/t3_tom/t3_ddp.h> 
+#include <dev/cxgb/ulp/toedev/toedev.h> 
 
 #define DEBUG_WR 0
 
-extern struct proto t3_tcp_prot;
+extern struct protosw t3_tcp_proto;
 extern struct request_sock_ops t3_rsk_ops;
 
 /*
@@ -53,14 +65,14 @@
  * tcp_create_openreq_child().  It's a RO buffer that may be used by multiple
  * CPUs without locking.
  */
-static struct mbuf *tcphdr_skb __read_mostly;
+static struct mbuf *tcphdr_mbuf __read_mostly;
 
 /*
- * The number of WRs needed for an skb depends on the number of page fragments
- * in the skb and whether it has any payload in its main body.  This maps the
- * length of the gather list represented by an skb into the # of necessary WRs.
+ * The number of WRs needed for an mbuf depends on the number of page fragments
+ * in the mbuf and whether it has any payload in its main body.  This maps the
+ * length of the gather list represented by an mbuf into the # of necessary WRs.
  */
-static unsigned int skb_wrs[MAX_SKB_FRAGS + 2] __read_mostly;
+static unsigned int mbuf_wrs[MAX_MBUF_IOV + 2] __read_mostly;
 
 /*
  * Socket filter that drops everything by specifying a 0-length filter program.
@@ -71,11 +83,11 @@
  * TOE information returned through inet_diag for offloaded connections.
  */
 struct t3_inet_diag_info {
-	u32 toe_id;    /* determines how to interpret the rest of the fields */
-	u32 tid;
-	u16 wrs;
-	u8  ulp_mode;
-	u8  ddp_enabled;
+	uint32_t toe_id;    /* determines how to interpret the rest of the fields */
+	uint32_t tid;
+	uint16_t wrs;
+	uint8_t  ulp_mode;
+	uint8_t  ddp_enabled;
 	char dev_name[TOENAMSIZ];
 };
 
@@ -88,9 +100,9 @@
 process_cpl_msg_ref(void (*fn)(struct socket *, struct mbuf *),
     struct socket *so, struct mbuf *m)
 {
-	sock_hold(sk);
-	process_cpl_msg(fn, sk, skb);
-	sock_put(sk);
+	sock_hold(so);
+	process_cpl_msg(fn, so, m);
+	sock_put(so);
 }
 
 static inline int
@@ -102,14 +114,14 @@
 /*
  * Returns an mbuf for a reply CPL message of size len.  If the input
  * mbuf has no other users it is trimmed and reused, otherwise a new buffer
- * is allocated.  The input skb must be of size at least len.  Note that this
- * operation does not destroy the original skb data even if it decides to reuse
+ * is allocated.  The input mbuf must be of size at least len.  Note that this
+ * operation does not destroy the original mbuf data even if it decides to reuse
  * the buffer.
  */
 static struct mbuf *
-get_cpl_reply_mbuf(struct mbuf *m, size_t len, int gfp)
+get_cpl_reply_mbuf(struct mbuf *m, size_t len)
 {
-	if (likely(!skb_cloned(skb))) {
+	if (__predict_true(!skb_cloned(skb))) {
 		BUG_ON(skb->len < len);
 		__skb_trim(skb, len);
 		skb_get(skb);
@@ -122,12 +134,12 @@
 }
 
 /*
- * Like get_cpl_reply_skb() but the returned buffer starts out empty.
+ * Like get_cpl_reply_mbuf() but the returned buffer starts out empty.
  */
 static struct mbuf *
 __get_cpl_reply_mbuf(struct mbuf *m, size_t len)
 {
-	if (likely(!skb_cloned(skb) && !skb->data_len)) {
+	if (__predict_true(!skb_cloned(skb) && !skb->data_len)) {
 		__skb_trim(skb, 0);
 		skb_get(skb);
 	} else
@@ -143,15 +155,15 @@
  * it is sent directly.
  */
 static inline void
-send_or_defer(struct socket *so, struct tcp_sock *tp, struct mbuf *skb,
+send_or_defer(struct socket *so, struct tcpcb *tp, struct mbuf *m,
     int through_l2t)
 {
-	if (unlikely(sk->sk_state == TCP_SYN_SENT))
-		__skb_queue_tail(&tp->out_of_order_queue, skb);  // defer
+	if (__predict_false(sk->sk_state == TCP_SYN_SENT))
+		__skb_queue_tail(&tp->out_of_order_queue, m);  // defer
 	else if (through_l2t)
-		l2t_send(T3C_DEV(sk), skb, L2T_ENTRY(sk));  // send through L2T
+		l2t_send(TOE_DEV(so), m, L2T_ENTRY(so));  // send through L2T
 	else
-		cxgb3_ofld_send(T3C_DEV(sk), skb);          // send directly
+		cxgb_ofld_send(TOE_DEV(so), m);          // send directly
 }
 
 /*
@@ -169,12 +181,13 @@
  * Populate a TID_RELEASE WR.  The skb must be already propely sized.
  */
 static inline void
-mk_tid_release(struct mbuf *skb, const struct socket *so, unsigned int tid)
+mk_tid_release(struct mbuf *m, const struct socket *so, unsigned int tid)
 {
 	struct cpl_tid_release *req;
 
-	skb->priority = mkprio(CPL_PRIORITY_SETUP, sk);
-	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+	m->m_priority = mkprio(CPL_PRIORITY_SETUP, so);
+	MH_ALIGN(m, sizeof(*req));
+	req = mtod(m, struct cpl_tid_release *);
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
 }
@@ -186,7 +199,7 @@
 sk_insert_tid(struct tom_data *d, struct socket *so, unsigned int tid)
 {
 	sock_hold(sk);
-	cxgb3_insert_tid(d->cdev, d->client, sk, tid);
+	cxgb_insert_tid(d->cdev, d->client, sk, tid);
 }
 
 /**
@@ -198,7 +211,7 @@
  *	does not exceed the target MTU.
  */
 static unsigned int
-find_best_mtu(const struct t3c_data *d, unsigned short mtu)
+find_best_mtu(const struct toe_data *d, unsigned short mtu)
 {
 	int i = 0;
 
@@ -211,10 +224,10 @@
 select_mss(struct socket *so, unsigned int pmtu)
 {
 	unsigned int idx;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct dst_entry *dst = __sk_dst_get(sk);
 	struct tom_data *d = TOM_DATA(TOE_DEV(sk));
-	const struct t3c_data *td = T3C_DATA(d->cdev);
+	const struct t3_data *td = TOE_DATA(d->cdev);
 
 	tp->advmss = dst_metric(dst, RTAX_ADVMSS);
 	if (USER_MSS(tp) && tp->advmss > USER_MSS(tp))
@@ -241,7 +254,7 @@
 
 #define VALIDATE_SOCK(sk) \
 	do { \
-		if (unlikely(!(sk))) \
+		if (__predict_false(!(sk))) \
 			return CPL_RET_UNKNOWN_TID | CPL_RET_BUF_DONE; \
 	} while (0)
 #else
@@ -259,7 +272,7 @@
 {
 #if 0
 	printk("connection_done: TID: %u, state: %d, dead %d, refs %d\n",
-	       TID(tcp_sk(sk)), sk->sk_state, sock_flag(sk, SOCK_DEAD),
+	       TID(sototcpcb(so)), sk->sk_state, sock_flag(sk, SOCK_DEAD),
 	       atomic_read(&sk->sk_refcnt));
 //	dump_stack();
 #endif
@@ -315,20 +328,20 @@
 static inline unsigned int
 calc_opt0h(struct socket *so)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	return V_NAGLE((tp->nonagle & TCP_NAGLE_OFF) == 0) |
-	    V_KEEP_ALIVE(sock_flag(sk, SOCK_KEEPOPEN) != 0) | F_TCAM_BYPASS |
-	    V_WND_SCALE(RCV_WSCALE(tp)) | V_MSS_IDX(MTU_IDX(tp));
+	    V_KEEP_ALIVE(sock_flag(so, SOCK_KEEPOPEN) != 0) | F_TCAM_BYPASS |
+	    V_WND_SCALE(RCV_WSCALE(so)) | V_MSS_IDX(MTU_IDX(so));
 }
 
 static inline unsigned int
 calc_opt0l(const struct socket *so)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
-	return V_TOS(SK_TOS(sk)) | V_ULP_MODE(ULP_MODE(tp)) |
-	       V_RCV_BUFSIZ(min(tp->rcv_wnd >> 10, (u32)M_RCV_BUFSIZ));
+	return V_TOS(SK_TOS(so)) | V_ULP_MODE(ULP_MODE(so)) |
+	       V_RCV_BUFSIZ(min(tp->rcv_wnd >> 10, (uint32_t)M_RCV_BUFSIZ));
 }
 
 static inline unsigned int
@@ -349,11 +362,11 @@
  * it is available (has a user count of 1).  Otherwise we get a fresh buffer.
  */
 static struct mbuf *
-alloc_ctrl_skb(const struct tcp_sock *tp, int len)
+alloc_ctrl_mbuf(const struct tcpcb *tp, int len)
 {
-	struct mbuf *skb = CTRL_SKB_CACHE(tp);
+	struct mbuf *m = CTRL_MBUF_CACHE(so);
 
-	if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
+	if (__predict_true(m && !skb_shared(skb) && !skb_cloned(skb))) {
 		__skb_trim(skb, 0);
 		atomic_set(&skb->users, 2);
 	} else
@@ -374,34 +387,34 @@
  * WRs so that the two uses do not overlap.
  */
 static void
-synq_add(struct sock *parent, struct sock *child)
+synq_add(struct socket *parent, struct socket *child)
 {
-	struct tcp_sock *p = tcp_sk(parent);
-	struct tcp_sock *c = tcp_sk(child);
+	struct tcpcb *p = sototcpcb(parent);
+	struct tcpcb *c = sototcpcb(child);
 
 	if (synq_empty(parent)) {			/* this is the first child */
 		c->forward_skb_hint  = (void *)parent;
 		p->fastpath_skb_hint = (void *)child;
 	} else {
-		struct sock *first = (struct sock *)p->forward_skb_hint;
+		struct socket *first = (struct socket *)p->forward_skb_hint;
 		c->forward_skb_hint = p->forward_skb_hint;
-		tcp_sk(first)->fastpath_skb_hint = (void *)child;
+		sototcpcb(first)->fastpath_skb_hint = (void *)child;
 	}
 	p->forward_skb_hint  = (void *)child;
 	c->fastpath_skb_hint = (void *)parent;
 }
 
 static void
-synq_remove(struct tcp_sock *child)
+synq_remove(struct tcpcb *child)
 {
-	struct sock *next = (struct sock *)child->forward_skb_hint;
-	struct sock *prev = (struct sock *)child->fastpath_skb_hint;
+	struct socket *next = (struct socket *)child->forward_skb_hint;
+	struct socket *prev = (struct socket *)child->fastpath_skb_hint;
 
 	if (next == prev)                     /* sole child */
-		reset_synq(tcp_sk(next));
+		reset_synq(sototcpcb(next));
 	else {
-		tcp_sk(prev)->forward_skb_hint  = child->forward_skb_hint;
-		tcp_sk(next)->fastpath_skb_hint = child->fastpath_skb_hint;
+		sototcpcb(prev)->forward_skb_hint  = child->forward_skb_hint;
+		sototcpcb(next)->fastpath_skb_hint = child->fastpath_skb_hint;
 	}
 	reset_synq(child);
 }
@@ -417,7 +430,7 @@
 }
 
 static void
-purge_wr_queue(struct tcp_sock *tp)
+purge_wr_queue(struct tcpcb *tp)
 {
 	struct mbuf *skb;
 	while ((skb = dequeue_wr(tp)) != NULL)
@@ -440,7 +453,7 @@
  * Generic ARP failure handler that discards the buffer.
  */
 static void
-arp_failure_discard(struct t3cdev *cdev, struct mbuf *m)
+arp_failure_discard(struct toedev *cdev, struct mbuf *m)
 {
 	m_freem(m);
 }
@@ -449,7 +462,7 @@
 make_tx_data_wr(struct socket *so, struct mbuf *m, int len)
 {
 	struct tx_data_wr *req;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	skb->h.raw = skb->data;
 	req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
@@ -490,19 +503,19 @@
 t3_push_frames(struct socket *so, int req_completion)
 {
 	int total_size = 0;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct mbuf *m;
-	struct t3cdev *cdev;
+	struct toedev *cdev;
 	struct tom_data *d;
 
-	if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
+	if (__predict_false(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE)))
 		return 0;
 
 	/*
 	 * We shouldn't really be called at all after an abort but check just
 	 * in case.
 	 */
-	if (unlikely(sock_flag(sk, ABORT_SHUTDOWN)))
+	if (__predict_false(sock_flag(sk, ABORT_SHUTDOWN)))
 		return 0;
 
 	d = TOM_DATA(TOE_DEV(sk));
@@ -528,7 +541,7 @@
 		WR_UNACKED(tp) += wrs_needed;
 		enqueue_wr(tp, skb);
 
-		if (likely(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_NEED_HDR)) {
+		if (__predict_true(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_NEED_HDR)) {
 			len += ulp_extra_len(skb);
 			make_tx_data_wr(sk, skb, len);
 			tp->snd_nxt += len;
@@ -568,9 +581,9 @@
 };
 #endif
 
-static inline void free_atid(struct t3cdev *cdev, unsigned int tid)
+static inline void free_atid(struct toedev *cdev, unsigned int tid)
 {
-	struct socket *so = cxgb3_free_atid(cdev, tid);
+	struct socket *so = cxgb_free_atid(cdev, tid);
 	if (sk)
 		sock_put(sk);
 }
@@ -580,15 +593,15 @@
 static void
 t3_release_offload_resources(struct socket *so)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct toedev *tdev = TOE_DEV(sk);
-	struct t3cdev *cdev;
+	struct toedev *cdev;
 	unsigned int tid = TID(tp);
 
 	if (!tdev)
 		return;
 
-	cdev = T3C_DEV(sk);
+	cdev = TOE_DEV(sk);
 	if (!cdev)
 		return;
 
@@ -614,7 +627,7 @@
 		free_atid(cdev, tid);
 		__skb_queue_purge(&tp->out_of_order_queue);
 	} else {                                          // we have TID
-		cxgb3_remove_tid(cdev, (void *)sk, tid);
+		cxgb_remove_tid(cdev, (void *)sk, tid);
 		sock_put(sk);
 	}
 
@@ -637,34 +650,6 @@
 }
 
 /*
- * Called for each mbuf in a socket's receive backlog during
- * backlog processing.
- */
-static int
-t3_backlog_rcv(struct socket *so, struct mbuf *skb)
-{
-#if VALIDATE_TID
-	unsigned int opcode = ntohl(skb->csum) >> 24;
-
-	if (unlikely(sk->sk_state == TCP_CLOSE && bad_backlog_msg(opcode))) {
-		printk(KERN_ERR "unexpected CPL message with opcode %x for "
-		       "closed TID %u\n", opcode, TID(tcp_sk(sk)));
-		kfree_skb(skb);
-		return 0;
-	}
-#endif
-
-	BLOG_SKB_CB(skb)->backlog_rcv(sk, skb);
-	return 0;
-}
-
-#ifdef CONFIG_TCP_OFFLOAD_MODULE
-static void dummy_tcp_keepalive_timer(unsigned long data)
-{
-}
-#endif
-
-/*
  * Switch a socket to the offload protocol operations.  Note that the offload
  * operations do not contain the offload backlog handler, we install that
  * directly to the socket.
@@ -672,28 +657,17 @@
 static inline void
 install_offload_ops(struct socket *so)
 {
-	sk->sk_prot = &t3_tcp_prot;
-	sk->sk_backlog_rcv = t3_backlog_rcv;
-	sk->sk_write_space = t3_write_space;
-
-	if (sk->sk_filter)
-		sk_filter_release(sk, sk->sk_filter);
-	sk->sk_filter = &drop_all;
-	sk_filter_charge(sk, sk->sk_filter);
-
-#ifdef CONFIG_TCP_OFFLOAD_MODULE
-	sk->sk_timer.function = dummy_tcp_keepalive_timer;
-#endif
-	sock_set_flag(sk, SOCK_OFFLOADED);
+	so->so_proto = &t3_tcp_proto;
+	so->so_rcv.sb_flags |= SB_TOE;
 }
 
 #if DEBUG_WR
 static void
 dump_wrs(struct socket *so)
 {
-	u64 *d;
+	uint64_t *d;
 	struct mbuf *p;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	printk("TID %u info:\n", TID(tp));
 	skb_queue_walk(&sk->sk_write_queue, p) {
@@ -714,7 +688,7 @@
 }
 
 static int
-count_pending_wrs(const struct tcp_sock *tp)
+count_pending_wrs(const struct tcpcb *tp)
 {
 	int n = 0;
 	const struct mbuf *p;
@@ -725,11 +699,11 @@
 }
 
 static void
-check_wr_invariants(const struct tcp_sock *tp)
+check_wr_invariants(const struct tcpcb *tp)
 {
 	int pending = count_pending_wrs(tp);
 
-	if (unlikely(WR_AVAIL(tp) + pending != WR_MAX(tp)))
+	if (__predict_false(WR_AVAIL(tp) + pending != WR_MAX(tp)))
 		printk(KERN_ERR "TID %u: credit imbalance: avail %u, "
 		       "pending %u, total should be %u\n", TID(tp),
 		       WR_AVAIL(tp), pending, WR_MAX(tp));
@@ -737,7 +711,7 @@
 #endif
 
 static void
-t3_idiag_get_info(struct socket *so, u32 ext, struct mbuf *skb)
+t3_idiag_get_info(struct socket *so, uint32_t ext, struct mbuf *skb)
 {
 #if DEBUG_WR
 	if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
@@ -751,7 +725,7 @@
 	if (ext & (1 << INET_DIAG_MAX)) {
 		struct rtattr *rta;
 		struct t3_inet_diag_info *info;
-		const struct tcp_sock *tp = tcp_sk(sk);
+		const struct tcpcb *tp = sototcpcb(so);
 
 		rta = __RTA_PUT(skb, INET_DIAG_MAX + 1, sizeof(*info));
 		info = RTA_DATA(rta);
@@ -817,7 +791,7 @@
 	}
 }
 
-static void act_open_req_arp_failure(struct t3cdev *dev, struct mbuf *skb);
+static void act_open_req_arp_failure(struct toedev *dev, struct mbuf *skb);
 
 static void
 fail_act_open(struct socket *so, int errno)
@@ -833,7 +807,7 @@
 act_open_retry_timer(unsigned long data)
 {
 	struct mbuf *skb;
-	struct socket *so = (struct sock *)data;
+	struct socket *so = (struct socket *)data;
 	struct inet_connection_sock *icsk = inet_csk(sk);
 
 	bh_lock_sock(sk);
@@ -847,9 +821,9 @@
 		else {
 			skb->sk = sk;
 			set_arp_failure_handler(skb, act_open_req_arp_failure);
-			mk_act_open_req(sk, skb, TID(tcp_sk(sk)),
+			mk_act_open_req(sk, skb, TID(sototcpcb(so)),
 					L2T_ENTRY(sk));
-			l2t_send(T3C_DEV(sk), skb, L2T_ENTRY(sk));
+			l2t_send(TOE_DEV(sk), skb, L2T_ENTRY(sk));
 		}
 	}
 	bh_unlock_sock(sk);
@@ -889,7 +863,7 @@
  * Process an ACT_OPEN_RPL CPL message.
  */
 static int
-do_act_open_rpl(struct t3cdev *cdev, struct mbuf *m, void *ctx)
+do_act_open_rpl(struct toedev *cdev, struct mbuf *m, void *ctx)
 {
 	struct socket *so = (struct socket *)ctx;
 	struct cpl_act_open_rpl *rpl = cplhdr(m);
@@ -897,7 +871,7 @@
 	VALIDATE_SOCK(so);
 
 	if (cdev->type != T3A && act_open_has_tid(rpl->status))
-		cxgb3_queue_tid_release(cdev, GET_TID(rpl));
+		cxgb_queue_tid_release(cdev, GET_TID(rpl));
 
 	process_cpl_msg_ref(active_open_failed, sk, skb);
 	return 0;
@@ -911,7 +885,7 @@
  * check SOCK_DEAD or sk->sk_sock.  Or maybe generate the error here but don't
  * free the atid.  Hmm.
  */
-static void act_open_req_arp_failure(struct t3cdev *dev, struct mbuf *skb)
+static void act_open_req_arp_failure(struct toedev *dev, struct mbuf *skb)
 {
 	struct socket *so = skb->sk;
 
@@ -986,7 +960,7 @@
 				   unsigned int tid, struct l2t_entry *e,
 				   struct dst_entry *dst)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	TOE_DEV(sk) = dev;
 	TID(tp) = tid;
@@ -1020,10 +994,10 @@
 	struct mbuf *skb;
 	struct l2t_entry *e;
 	struct tom_data *d = TOM_DATA(tdev);
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct dst_entry *dst = __sk_dst_get(sk);
 
-	int atid = cxgb3_alloc_atid(d->cdev, d->client, sk);
+	int atid = cxgb_alloc_atid(d->cdev, d->client, sk);
 	if (atid < 0)
 		goto out_err;
 
@@ -1063,12 +1037,12 @@
  * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
  * and send it along.
  */
-static void abort_arp_failure(struct t3cdev *cdev, struct mbuf *skb)
+static void abort_arp_failure(struct toedev *cdev, struct mbuf *skb)
 {
 	struct cpl_abort_req *req = cplhdr(skb);
 
 	req->cmd = CPL_ABORT_NO_RST;
-	cxgb3_ofld_send(cdev, skb);
+	cxgb_ofld_send(cdev, skb);
 }
 
 /*
@@ -1080,10 +1054,10 @@
 int t3_send_reset(struct socket *so, int mode, struct mbuf *skb)
 {
 	struct cpl_abort_req *req;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	unsigned int tid = TID(tp);
 
-	if (unlikely(sock_flag(sk, ABORT_SHUTDOWN) || !TOE_DEV(sk))) {
+	if (__predict_false(sock_flag(sk, ABORT_SHUTDOWN) || !TOE_DEV(sk))) {
 		if (skb)
 			__kfree_skb(skb);
 		return 1;
@@ -1113,7 +1087,7 @@
 	if (sk->sk_state == TCP_SYN_SENT)
 		__skb_queue_tail(&tp->out_of_order_queue, skb);	// defer
 	else
-		l2t_send(T3C_DEV(sk), skb, L2T_ENTRY(sk));
+		l2t_send(TOE_DEV(sk), skb, L2T_ENTRY(sk));
 	return 0;
 }
 EXPORT_SYMBOL(t3_send_reset);
@@ -1125,7 +1099,7 @@
  *
  * Modeled after code in inet_csk_listen_stop().
  */
-static void reset_listen_child(struct sock *child)
+static void reset_listen_child(struct socket *child)
 {
 	struct mbuf *skb = alloc_skb_nofail(sizeof(struct cpl_abort_req));
 
@@ -1193,11 +1167,11 @@
 	mtx_unlock(&reap_list_lock);
 }
 
-static void __set_tcb_field(struct socket *so, struct mbuf *skb, u16 word,
-			    u64 mask, u64 val, int no_reply)
+static void __set_tcb_field(struct socket *so, struct mbuf *skb, uint16_t word,
+			    uint64_t mask, uint64_t val, int no_reply)
 {
 	struct cpl_set_tcb_field *req;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
@@ -1212,14 +1186,14 @@
 	send_or_defer(sk, tp, skb, 0);
 }
 
-void t3_set_tcb_field(struct socket *so, u16 word, u64 mask, u64 val)
+void t3_set_tcb_field(struct socket *so, uint16_t word, uint64_t mask, uint64_t val)
 {
 	struct mbuf *skb;
 
 	if (sk->sk_state == TCP_CLOSE || sock_flag(sk, ABORT_SHUTDOWN))
 		return;
 
-	skb = alloc_ctrl_skb(tcp_sk(sk), sizeof(struct cpl_set_tcb_field));
+	skb = alloc_ctrl_skb(sototcpcb(so), sizeof(struct cpl_set_tcb_field));
 	__set_tcb_field(sk, skb, word, mask, val, 1);
 }
 
@@ -1236,7 +1210,7 @@
  */
 void t3_set_nagle(struct socket *so)
 {
-	set_tcb_tflag(sk, S_TF_NAGLE, !(tcp_sk(sk)->nonagle & TCP_NAGLE_OFF));
+	set_tcb_tflag(sk, S_TF_NAGLE, !(sototcpcb(so)->nonagle & TCP_NAGLE_OFF));
 }
 
 /*
@@ -1277,7 +1251,7 @@
        V_TCB_RX_DDP_BUF0_LEN(3)) << 32))
 #define TP_DDP_TIMER_WORKAROUND_VAL\
     (V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_ACTIVE_BUF(0) |\
-     ((V_TCB_RX_DDP_BUF0_OFFSET((u64)1) | V_TCB_RX_DDP_BUF0_LEN((u64)2)) <<\
+     ((V_TCB_RX_DDP_BUF0_OFFSET((uint64_t)1) | V_TCB_RX_DDP_BUF0_LEN((uint64_t)2)) <<\
       32))
 
 void
@@ -1310,14 +1284,14 @@
 		t3_set_tcb_field(sk, W_TCB_RX_DDP_BUF0_OFFSET,
 			 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
 			 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
-			 V_TCB_RX_DDP_BUF0_OFFSET((u64)offset) |
-			 V_TCB_RX_DDP_BUF0_LEN((u64)len));
+			 V_TCB_RX_DDP_BUF0_OFFSET((uint64_t)offset) |
+			 V_TCB_RX_DDP_BUF0_LEN((uint64_t)len));
 	else
 		t3_set_tcb_field(sk, W_TCB_RX_DDP_BUF1_OFFSET,
 			 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
 			 V_TCB_RX_DDP_BUF1_LEN(M_TCB_RX_DDP_BUF1_LEN << 32),
-			 V_TCB_RX_DDP_BUF1_OFFSET((u64)offset) |
-			 V_TCB_RX_DDP_BUF1_LEN(((u64)len) << 32));
+			 V_TCB_RX_DDP_BUF1_OFFSET((uint64_t)offset) |
+			 V_TCB_RX_DDP_BUF1_LEN(((uint64_t)len) << 32));
 }
 
 int
@@ -1338,7 +1312,7 @@
 t3_get_tcb(struct socket *so)
 {
 	struct cpl_get_tcb *req;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct mbuf *skb = alloc_skb(sizeof(*req), gfp_any());
 
 	if (!skb)
@@ -1348,11 +1322,11 @@
 	req = (struct cpl_get_tcb *)__skb_put(skb, sizeof(*req));
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_GET_TCB, TID(tp)));
-	req->cpuno = htons(qset(tcp_sk(sk)));
+	req->cpuno = htons(qset(sototcpcb(so)));
 	if (sk->sk_state == TCP_SYN_SENT)
 		__skb_queue_tail(&tp->out_of_order_queue, skb);	// defer
 	else
-		cxgb3_ofld_send(T3C_DEV(sk), skb);
+		cxgb_ofld_send(TOE_DEV(sk), skb);
 	return 0;
 }
 
@@ -1362,8 +1336,8 @@
  * permitted to return without sending the message in case we cannot allocate
  * an mbuf.  Returns the number of credits sent.
  */
-u32
-t3_send_rx_credits(struct socket *so, u32 credits, u32 dack, int nofail)
+uint32_t
+t3_send_rx_credits(struct socket *so, uint32_t credits, uint32_t dack, int nofail)
 {
 	struct cpl_rx_data_ack *req;
 	struct mbuf *skb;
@@ -1380,11 +1354,11 @@
 	req = (struct cpl_rx_data_ack *)__skb_put(skb, sizeof(*req));
 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK,
-				TID(tcp_sk(sk))));
+				TID(sototcpcb(so))));
 	req->credit_dack = htonl(dack | V_RX_CREDITS(credits) |
 				 V_RX_FORCE_ACK(nofail));
 	skb->priority = mkprio(CPL_PRIORITY_ACK, sk);
-	cxgb3_ofld_send(T3C_DEV(sk), skb);
+	cxgb_ofld_send(TOE_DEV(sk), skb);
 	return credits;
 }
 
@@ -1398,7 +1372,7 @@
 {
 	struct mbuf *skb;
 	struct cpl_rx_data_ack *req;
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	skb = alloc_ctrl_skb(tp, sizeof(*req));
 
@@ -1409,7 +1383,7 @@
 				 V_RX_DACK_MODE(1) |
 				 V_RX_CREDITS(tp->copied_seq - tp->rcv_wup));
 	skb->priority = mkprio(CPL_PRIORITY_CONTROL, sk);
-	cxgb3_ofld_send(T3C_DEV(sk), skb);
+	cxgb_ofld_send(TOE_DEV(sk), skb);
 	tp->rcv_wup = tp->copied_seq;
 }
 
@@ -1417,9 +1391,9 @@
  * Handle receipt of an urgent pointer.
  */
 static void
-handle_urg_ptr(struct socket *so, u32 urg_seq)
+handle_urg_ptr(struct socket *so, uint32_t urg_seq)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
 	urg_seq--;   /* initially points past the urgent data, per BSD */
 
@@ -1524,15 +1498,15 @@
 static void
 tcb_rpl_as_ddp_complete(struct socket *so, struct mbuf *m)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct ddp_state *q = DDP_STATE(tp);
 	struct ddp_buf_state *bsp;
 	struct cpl_get_tcb_rpl *hdr;
 	unsigned int ddp_offset;
-	u64 t;
+	uint64_t t;
 	__be64 *tcb;
 
-	if (unlikely(!(tp = tcp_sk(sk)) || !(q = DDP_STATE(tp)))) {
+	if (__predict_false(!(tp = sototcpcb(so)) || !(q = DDP_STATE(tp)))) {
 		kfree_skb(skb);
 		return;
 	}
@@ -1609,7 +1583,7 @@
 	bsp->cur_offset = ddp_offset;
 	skb->len = ddp_offset - TCP_SKB_CB(skb)->when;
 
-	if (unlikely(sk_no_receive(sk) && skb->len)) {
+	if (__predict_false(sk_no_receive(sk) && skb->len)) {
 		handle_excess_rx(sk, skb);
 		return;
 	}
@@ -1687,7 +1661,7 @@
 static int
 do_get_tcb_rpl(struct toedev *cdev, struct mbuf *m, void *ctx)
 {
-	struct socket *so = (struct sock *)ctx;
+	struct socket *so = (struct socket *)ctx;
 
 	/* OK if socket doesn't exist */
 	if (!sk)
@@ -1700,7 +1674,7 @@
 static void
 handle_ddp_data(struct socket *so, struct mbuf *m)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct ddp_state *q;
 	struct ddp_buf_state *bsp;
 	struct cpl_rx_data *hdr = cplhdr(m);
@@ -1746,9 +1720,9 @@
 new_rx_data(struct socket *so, struct mbuf *m)
 {
 	struct cpl_rx_data *hdr = cplhdr(skb);
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 
-	if (unlikely(sk_no_receive(sk))) {
+	if (__predict_false(sk_no_receive(sk))) {
 		handle_excess_rx(sk, skb);
 		return;
 	}
@@ -1761,7 +1735,7 @@
 	skb_ulp_mode(skb) = 0;                    /* for iSCSI */
 
 #if VALIDATE_SEQ
-	if (unlikely(TCP_SKB_CB(skb)->seq != tp->rcv_nxt)) {
+	if (__predict_false(TCP_SKB_CB(skb)->seq != tp->rcv_nxt)) {
 		printk(KERN_ERR
 		       "%s: TID %u: Bad sequence number %u, expected %u\n",
 		       TOE_DEV(sk)->name, TID(tp), TCP_SKB_CB(skb)->seq,
@@ -1774,14 +1748,14 @@
 	if (!skb->data_len)
 		__skb_trim(skb, ntohs(hdr->len));
 
-	if (unlikely(hdr->urg))
+	if (__predict_false(hdr->urg))
 		handle_urg_ptr(sk, tp->rcv_nxt + ntohs(hdr->urg));
-	if (unlikely(tp->urg_data == TCP_URG_NOTYET &&
+	if (__predict_false(tp->urg_data == TCP_URG_NOTYET &&
 		     tp->urg_seq - tp->rcv_nxt < skb->len))
 		tp->urg_data = TCP_URG_VALID | skb->data[tp->urg_seq -
 							 tp->rcv_nxt];
 
-	if (unlikely(hdr->dack_mode != DELACK_MODE(sk))) {
+	if (__predict_false(hdr->dack_mode != DELACK_MODE(sk))) {
 		DELACK_MODE(sk) = hdr->dack_mode;
 		DELACK_SEQ(tp) = tp->rcv_nxt;
 	}
@@ -1804,9 +1778,9 @@
  * Handler for RX_DATA CPL messages.
  */
 static int
-do_rx_data(struct t3cdev *cdev, struct mbuf *skb, void *ctx)
+do_rx_data(struct toedev *cdev, struct mbuf *skb, void *ctx)
 {
-	struct socket *so = (struct sock *)ctx;
+	struct socket *so = (struct socket *)ctx;
 
 	VALIDATE_SOCK(sk);
 
@@ -1820,18 +1794,18 @@
 static void
 new_rx_data_ddp(struct socket *so, struct mbuf *skb)
 {
-	struct tcp_sock *tp;
+	struct tcpcb *tp;
 	struct ddp_state *q;
 	struct ddp_buf_state *bsp;
 	struct cpl_rx_data_ddp *hdr;
 	unsigned int ddp_len, rcv_nxt, ddp_report, end_offset, buf_idx;
 
-	if (unlikely(sk_no_receive(sk))) {
+	if (__predict_false(sk_no_receive(sk))) {
 		handle_excess_rx(sk, skb);
 		return;
 	}
 
-	tp = tcp_sk(sk);
+	tp = sototcpcb(so);
 	q = DDP_STATE(tp);
 	hdr = cplhdr(skb);
 	ddp_report = ntohl(hdr->ddp_report);
@@ -1913,14 +1887,14 @@
  * Handler for RX_DATA_DDP CPL messages.
  */
 static int
-do_rx_data_ddp(struct t3cdev *cdev, struct mbuf *m, void *ctx)
+do_rx_data_ddp(struct toedev *cdev, struct mbuf *m, void *ctx)
 {
 	struct socket *so = ctx;
 	const struct cpl_rx_data_ddp *hdr = cplhdr(m);
 
 	VALIDATE_SOCK(sk);
 
-	if (unlikely(ntohl(hdr->ddpvld_status) & DDP_ERR)) {
+	if (__predict_false(ntohl(hdr->ddpvld_status) & DDP_ERR)) {
 		printk(KERN_ERR "RX_DATA_DDP for TID %u reported error 0x%x\n",
 		       GET_TID(hdr), G_DDP_VALID(ntohl(hdr->ddpvld_status)));
 		return CPL_RET_BUF_DONE;
@@ -1934,18 +1908,18 @@
 static void
 process_ddp_complete(struct socket *so, struct mbuf *m)
 {
-	struct tcp_sock *tp = tcp_sk(sk);
+	struct tcpcb *tp = sototcpcb(so);
 	struct ddp_state *q;
 	struct ddp_buf_state *bsp;
 	struct cpl_rx_ddp_complete *hdr;
 	unsigned int ddp_report, buf_idx;
 
-	if (unlikely(sk_no_receive(sk))) {
+	if (__predict_false(sk_no_receive(sk))) {
 		handle_excess_rx(sk, skb);
 		return;
 	}
 
-	tp = tcp_sk(sk);
+	tp = sototcpcb(so);
 	q = DDP_STATE(tp);
 	hdr = cplhdr(skb);
 	ddp_report = ntohl(hdr->ddp_report);
@@ -2011,7 +1985,7 @@
 static void
 enter_timewait(struct socket *so)
 {

>>> TRUNCATED FOR MAIL (1000 lines) <<<


More information about the p4-projects mailing list