svn commit: r245274 - in head/sys/dev/cxgbe: . tom

Navdeep Parhar np at FreeBSD.org
Thu Jan 10 23:56:51 UTC 2013


Author: np
Date: Thu Jan 10 23:56:50 2013
New Revision: 245274
URL: http://svnweb.freebsd.org/changeset/base/245274

Log:
  cxgbe(4): Add functions to help synchronize "slow" operations (those not
  on the fast data path) and use them instead of frobbing the adapter lock
  and busy flag directly.
  
  Other changes made while reworking all slow operations:
  - Wait for the reply to a filter request (add/delete).  This guarantees
    that the operation is complete by the time the ioctl returns.
  - Tidy up the tid_info structure.
  - Do not allow the tx queue size to be set to something that's not a
    power of 2.
  
  MFC after:	1 week

Modified:
  head/sys/dev/cxgbe/adapter.h
  head/sys/dev/cxgbe/offload.h
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/tom/t4_tom.c

Modified: head/sys/dev/cxgbe/adapter.h
==============================================================================
--- head/sys/dev/cxgbe/adapter.h	Thu Jan 10 23:39:28 2013	(r245273)
+++ head/sys/dev/cxgbe/adapter.h	Thu Jan 10 23:56:50 2013	(r245274)
@@ -158,6 +158,16 @@ enum {
 };
 
 enum {
+	/* flags understood by begin_synchronized_op */
+	HOLD_LOCK	= (1 << 0),
+	SLEEP_OK	= (1 << 1),
+	INTR_OK		= (1 << 2),
+
+	/* flags understood by end_synchronized_op */
+	LOCK_HELD	= HOLD_LOCK,
+};
+
+enum {
 	/* adapter flags */
 	FULL_INIT_DONE	= (1 << 0),
 	FW_OK		= (1 << 1),
@@ -174,11 +184,11 @@ enum {
 	PORT_SYSCTL_CTX	= (1 << 2),
 };
 
-#define IS_DOOMED(pi)	(pi->flags & DOOMED)
-#define SET_DOOMED(pi)	do {pi->flags |= DOOMED;} while (0)
-#define IS_BUSY(sc)	(sc->flags & CXGBE_BUSY)
-#define SET_BUSY(sc)	do {sc->flags |= CXGBE_BUSY;} while (0)
-#define CLR_BUSY(sc)	do {sc->flags &= ~CXGBE_BUSY;} while (0)
+#define IS_DOOMED(pi)	((pi)->flags & DOOMED)
+#define SET_DOOMED(pi)	do {(pi)->flags |= DOOMED;} while (0)
+#define IS_BUSY(sc)	((sc)->flags & CXGBE_BUSY)
+#define SET_BUSY(sc)	do {(sc)->flags |= CXGBE_BUSY;} while (0)
+#define CLR_BUSY(sc)	do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
 
 struct port_info {
 	device_t dev;
@@ -591,6 +601,11 @@ struct adapter {
 	an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
 	fw_msg_handler_t fw_msg_handler[4];	/* NUM_FW6_TYPES */
 	cpl_handler_t cpl_handler[0xef];	/* NUM_CPL_CMDS */
+
+#ifdef INVARIANTS
+	const char *last_op;
+	const void *last_op_thr;
+#endif
 };
 
 #define ADAPTER_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
@@ -598,6 +613,12 @@ struct adapter {
 #define ADAPTER_LOCK_ASSERT_OWNED(sc)	mtx_assert(&(sc)->sc_lock, MA_OWNED)
 #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
 
+/* XXX: not bulletproof, but much better than nothing */
+#define ASSERT_SYNCHRONIZED_OP(sc)	\
+    KASSERT(IS_BUSY(sc) && \
+	(mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
+	("%s: operation not synchronized.", __func__))
+
 #define PORT_LOCK(pi)			mtx_lock(&(pi)->pi_lock)
 #define PORT_UNLOCK(pi)			mtx_unlock(&(pi)->pi_lock)
 #define PORT_LOCK_ASSERT_OWNED(pi)	mtx_assert(&(pi)->pi_lock, MA_OWNED)
@@ -751,6 +772,8 @@ int t4_register_cpl_handler(struct adapt
 int t4_register_an_handler(struct adapter *, an_handler_t);
 int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
 int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
+int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
+void end_synchronized_op(struct adapter *, int);
 
 /* t4_sge.c */
 void t4_sge_modload(void);

Modified: head/sys/dev/cxgbe/offload.h
==============================================================================
--- head/sys/dev/cxgbe/offload.h	Thu Jan 10 23:39:28 2013	(r245273)
+++ head/sys/dev/cxgbe/offload.h	Thu Jan 10 23:56:50 2013	(r245274)
@@ -75,29 +75,27 @@ union aopen_entry {
  */
 struct tid_info {
 	void **tid_tab;
-	unsigned int ntids;
+	u_int ntids;
+	u_int tids_in_use;
 
+	struct mtx stid_lock __aligned(CACHE_LINE_SIZE);
 	union serv_entry *stid_tab;
-	unsigned int nstids;
-	unsigned int stid_base;
+	u_int nstids;
+	u_int stid_base;
+	union serv_entry *sfree;
+	u_int stids_in_use;
 
+	struct mtx atid_lock __aligned(CACHE_LINE_SIZE);
 	union aopen_entry *atid_tab;
-	unsigned int natids;
-
-	struct filter_entry *ftid_tab;
-	unsigned int nftids;
-	unsigned int ftid_base;
-	unsigned int ftids_in_use;
-
-	struct mtx atid_lock;
+	u_int natids;
 	union aopen_entry *afree;
-	unsigned int atids_in_use;
+	u_int atids_in_use;
 
-	struct mtx stid_lock;
-	union serv_entry *sfree;
-	unsigned int stids_in_use;
-
-	unsigned int tids_in_use;
+	struct mtx ftid_lock __aligned(CACHE_LINE_SIZE);
+	struct filter_entry *ftid_tab;
+	u_int nftids;
+	u_int ftid_base;
+	u_int ftids_in_use;
 };
 
 struct t4_range {

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Thu Jan 10 23:39:28 2013	(r245273)
+++ head/sys/dev/cxgbe/t4_main.c	Thu Jan 10 23:56:50 2013	(r245274)
@@ -284,9 +284,7 @@ static int get_params__post_init(struct 
 static void t4_set_desc(struct adapter *);
 static void build_medialist(struct port_info *);
 static int update_mac_settings(struct port_info *, int);
-static int cxgbe_init_locked(struct port_info *);
 static int cxgbe_init_synchronized(struct port_info *);
-static int cxgbe_uninit_locked(struct port_info *);
 static int cxgbe_uninit_synchronized(struct port_info *);
 static int setup_intr_handlers(struct adapter *);
 static int adapter_full_init(struct adapter *);
@@ -348,6 +346,7 @@ static void clear_filter(struct filter_e
 static int set_filter_wr(struct adapter *, int);
 static int del_filter_wr(struct adapter *, int);
 static int get_sge_context(struct adapter *, struct t4_sge_context *);
+static int load_fw(struct adapter *, struct t4_data *);
 static int read_card_mem(struct adapter *, struct t4_mem_range *);
 static int read_i2c(struct adapter *, struct t4_i2c_data *);
 #ifdef TCP_OFFLOAD
@@ -820,6 +819,8 @@ t4_detach(device_t dev)
 		mtx_destroy(&sc->sc_lock);
 	}
 
+	if (mtx_initialized(&sc->tids.ftid_lock))
+		mtx_destroy(&sc->tids.ftid_lock);
 	if (mtx_initialized(&sc->sfl_lock))
 		mtx_destroy(&sc->sfl_lock);
 
@@ -918,6 +919,10 @@ cxgbe_detach(device_t dev)
 	while (IS_BUSY(sc))
 		mtx_sleep(&sc->flags, &sc->sc_lock, 0, "t4detach", 0);
 	SET_BUSY(sc);
+#ifdef INVARIANTS
+	sc->last_op = "t4detach";
+	sc->last_op_thr = curthread;
+#endif
 	ADAPTER_UNLOCK(sc);
 
 	if (pi->vlan_c)
@@ -939,7 +944,7 @@ cxgbe_detach(device_t dev)
 
 	ADAPTER_LOCK(sc);
 	CLR_BUSY(sc);
-	wakeup_one(&sc->flags);
+	wakeup(&sc->flags);
 	ADAPTER_UNLOCK(sc);
 
 	return (0);
@@ -951,9 +956,10 @@ cxgbe_init(void *arg)
 	struct port_info *pi = arg;
 	struct adapter *sc = pi->adapter;
 
-	ADAPTER_LOCK(sc);
-	cxgbe_init_locked(pi); /* releases adapter lock */
-	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+	if (begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4init") != 0)
+		return;
+	cxgbe_init_synchronized(pi);
+	end_synchronized_op(sc, 0);
 }
 
 static int
@@ -967,81 +973,56 @@ cxgbe_ioctl(struct ifnet *ifp, unsigned 
 
 	switch (cmd) {
 	case SIOCSIFMTU:
-		ADAPTER_LOCK(sc);
-		rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
-		if (rc) {
-fail:
-			ADAPTER_UNLOCK(sc);
-			return (rc);
-		}
-
 		mtu = ifr->ifr_mtu;
-		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) {
-			rc = EINVAL;
-		} else {
-			ifp->if_mtu = mtu;
-			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-				t4_update_fl_bufsize(ifp);
-				PORT_LOCK(pi);
-				rc = update_mac_settings(pi, XGMAC_MTU);
-				PORT_UNLOCK(pi);
-			}
+		if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO))
+			return (EINVAL);
+
+		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4mtu");
+		if (rc)
+			return (rc);
+		ifp->if_mtu = mtu;
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+			t4_update_fl_bufsize(ifp);
+			rc = update_mac_settings(pi, XGMAC_MTU);
 		}
-		ADAPTER_UNLOCK(sc);
+		end_synchronized_op(sc, 0);
 		break;
 
 	case SIOCSIFFLAGS:
-		ADAPTER_LOCK(sc);
-		if (IS_DOOMED(pi)) {
-			rc = ENXIO;
-			goto fail;
-		}
+		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4flg");
+		if (rc)
+			return (rc);
+
 		if (ifp->if_flags & IFF_UP) {
 			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
 				flags = pi->if_flags;
 				if ((ifp->if_flags ^ flags) &
 				    (IFF_PROMISC | IFF_ALLMULTI)) {
-					if (IS_BUSY(sc)) {
-						rc = EBUSY;
-						goto fail;
-					}
-					PORT_LOCK(pi);
 					rc = update_mac_settings(pi,
 					    XGMAC_PROMISC | XGMAC_ALLMULTI);
-					PORT_UNLOCK(pi);
 				}
-				ADAPTER_UNLOCK(sc);
 			} else
-				rc = cxgbe_init_locked(pi);
+				rc = cxgbe_init_synchronized(pi);
 			pi->if_flags = ifp->if_flags;
 		} else if (ifp->if_drv_flags & IFF_DRV_RUNNING)
-			rc = cxgbe_uninit_locked(pi);
-		else
-			ADAPTER_UNLOCK(sc);
-
-		ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+			rc = cxgbe_uninit_synchronized(pi);
+		end_synchronized_op(sc, 0);
 		break;
 
 	case SIOCADDMULTI:	
-	case SIOCDELMULTI: /* these two can be called with a mutex held :-( */
-		ADAPTER_LOCK(sc);
-		rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+	case SIOCDELMULTI: /* these two are called with a mutex held :-( */
+		rc = begin_synchronized_op(sc, pi, HOLD_LOCK, "t4multi");
 		if (rc)
-			goto fail;
-
-		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-			PORT_LOCK(pi);
+			return (rc);
+		if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 			rc = update_mac_settings(pi, XGMAC_MCADDRS);
-			PORT_UNLOCK(pi);
-		}
-		ADAPTER_UNLOCK(sc);
+		end_synchronized_op(sc, LOCK_HELD);
 		break;
 
 	case SIOCSIFCAP:
-		ADAPTER_LOCK(sc);
-		rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
+		rc = begin_synchronized_op(sc, pi, SLEEP_OK | INTR_OK, "t4cap");
 		if (rc)
-			goto fail;
+			return (rc);
 
 		mask = ifr->ifr_reqcap ^ ifp->if_capenable;
 		if (mask & IFCAP_TXCSUM) {
@@ -1122,11 +1103,8 @@ fail:
 #endif
 		if (mask & IFCAP_VLAN_HWTAGGING) {
 			ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
-			if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
-				PORT_LOCK(pi);
+			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
 				rc = update_mac_settings(pi, XGMAC_VLANEX);
-				PORT_UNLOCK(pi);
-			}
 		}
 		if (mask & IFCAP_VLAN_MTU) {
 			ifp->if_capenable ^= IFCAP_VLAN_MTU;
@@ -1141,7 +1119,8 @@ fail:
 #ifdef VLAN_CAPABILITIES
 		VLAN_CAPABILITIES(ifp);
 #endif
-		ADAPTER_UNLOCK(sc);
+fail:
+		end_synchronized_op(sc, 0);
 		break;
 
 	case SIOCSIFMEDIA:
@@ -2111,7 +2090,7 @@ update_mac_settings(struct port_info *pi
 	struct adapter *sc = pi->adapter;
 	int mtu = -1, promisc = -1, allmulti = -1, vlanex = -1;
 
-	PORT_LOCK_ASSERT_OWNED(pi);
+	ASSERT_SYNCHRONIZED_OP(sc);
 	KASSERT(flags, ("%s: not told what to update.", __func__));
 
 	if (flags & XGMAC_MTU)
@@ -2213,39 +2192,74 @@ mcfail:
 	return (rc);
 }
 
-static int
-cxgbe_init_locked(struct port_info *pi)
+int
+begin_synchronized_op(struct adapter *sc, struct port_info *pi, int flags,
+    char *wmesg)
 {
-	struct adapter *sc = pi->adapter;
-	int rc = 0;
+	int rc, pri;
+
+#ifdef WITNESS
+	/* the caller thinks it's ok to sleep, but is it really? */
+	if (flags & SLEEP_OK)
+		pause("t4slptst", 1);
+#endif
+
+	if (INTR_OK)
+		pri = PCATCH;
+	else
+		pri = 0;
+
+	ADAPTER_LOCK(sc);
+	for (;;) {
+
+		if (pi && IS_DOOMED(pi)) {
+			rc = ENXIO;
+			goto done;
+		}
+
+		if (!IS_BUSY(sc)) {
+			rc = 0;
+			break;
+		}
 
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
+		if (!(flags & SLEEP_OK)) {
+			rc = EBUSY;
+			goto done;
+		}
 
-	while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
-		if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4init", 0)) {
+		if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
 			rc = EINTR;
 			goto done;
 		}
 	}
-	if (IS_DOOMED(pi)) {
-		rc = ENXIO;
-		goto done;
-	}
-	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 
-	/* Give up the adapter lock, port init code can sleep. */
+	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
 	SET_BUSY(sc);
-	ADAPTER_UNLOCK(sc);
-
-	rc = cxgbe_init_synchronized(pi);
+#ifdef INVARIANTS
+	sc->last_op = wmesg;
+	sc->last_op_thr = curthread;
+#endif
 
 done:
-	ADAPTER_LOCK(sc);
+	if (!(flags & HOLD_LOCK) || rc)
+		ADAPTER_UNLOCK(sc);
+
+	return (rc);
+}
+
+void
+end_synchronized_op(struct adapter *sc, int flags)
+{
+
+	if (flags & LOCK_HELD)
+		ADAPTER_LOCK_ASSERT_OWNED(sc);
+	else
+		ADAPTER_LOCK(sc);
+
 	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
 	CLR_BUSY(sc);
-	wakeup_one(&sc->flags);
+	wakeup(&sc->flags);
 	ADAPTER_UNLOCK(sc);
-	return (rc);
 }
 
 static int
@@ -2255,7 +2269,7 @@ cxgbe_init_synchronized(struct port_info
 	struct ifnet *ifp = pi->ifp;
 	int rc = 0;
 
-	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+	ASSERT_SYNCHRONIZED_OP(sc);
 
 	if (isset(&sc->open_device_map, pi->port_id)) {
 		KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING,
@@ -2271,9 +2285,7 @@ cxgbe_init_synchronized(struct port_info
 	    ((rc = port_full_init(pi)) != 0))
 		return (rc); /* error message displayed already */
 
-	PORT_LOCK(pi);
 	rc = update_mac_settings(pi, XGMAC_ALL);
-	PORT_UNLOCK(pi);
 	if (rc)
 		goto done;	/* error message displayed already */
 
@@ -2291,7 +2303,9 @@ cxgbe_init_synchronized(struct port_info
 
 	/* all ok */
 	setbit(&sc->open_device_map, pi->port_id);
+	PORT_LOCK(pi);
 	ifp->if_drv_flags |= IFF_DRV_RUNNING;
+	PORT_UNLOCK(pi);
 
 	callout_reset(&pi->tick, hz, cxgbe_tick, pi);
 done:
@@ -2301,39 +2315,6 @@ done:
 	return (rc);
 }
 
-static int
-cxgbe_uninit_locked(struct port_info *pi)
-{
-	struct adapter *sc = pi->adapter;
-	int rc;
-
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
-
-	while (!IS_DOOMED(pi) && IS_BUSY(sc)) {
-		if (mtx_sleep(&sc->flags, &sc->sc_lock, PCATCH, "t4uninit", 0)) {
-			rc = EINTR;
-			goto done;
-		}
-	}
-	if (IS_DOOMED(pi)) {
-		rc = ENXIO;
-		goto done;
-	}
-	KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__));
-	SET_BUSY(sc);
-	ADAPTER_UNLOCK(sc);
-
-	rc = cxgbe_uninit_synchronized(pi);
-
-	ADAPTER_LOCK(sc);
-	KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__));
-	CLR_BUSY(sc);
-	wakeup_one(&sc->flags);
-done:
-	ADAPTER_UNLOCK(sc);
-	return (rc);
-}
-
 /*
  * Idempotent.
  */
@@ -2344,7 +2325,7 @@ cxgbe_uninit_synchronized(struct port_in
 	struct ifnet *ifp = pi->ifp;
 	int rc;
 
-	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+	ASSERT_SYNCHRONIZED_OP(sc);
 
 	/*
 	 * Disable the VI so that all its data in either direction is discarded
@@ -2360,7 +2341,9 @@ cxgbe_uninit_synchronized(struct port_in
 	}
 
 	clrbit(&sc->open_device_map, pi->port_id);
+	PORT_LOCK(pi);
 	ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+	PORT_UNLOCK(pi);
 
 	pi->link_cfg.link_ok = 0;
 	pi->link_cfg.speed = 0;
@@ -2539,7 +2522,7 @@ port_full_init(struct port_info *pi)
 	struct sge_rxq *rxq;
 	int rc, i;
 
-	ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
+	ASSERT_SYNCHRONIZED_OP(sc);
 	KASSERT((pi->flags & PORT_INIT_DONE) == 0,
 	    ("%s: PORT_INIT_DONE already", __func__));
 
@@ -3524,6 +3507,8 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_AR
 	struct port_info *pi = arg1;
 	struct adapter *sc = pi->adapter;
 	int idx, rc, i;
+	struct sge_rxq *rxq;
+	uint8_t v;
 
 	idx = pi->tmr_idx;
 
@@ -3534,25 +3519,23 @@ sysctl_holdoff_tmr_idx(SYSCTL_HANDLER_AR
 	if (idx < 0 || idx >= SGE_NTIMERS)
 		return (EINVAL);
 
-	ADAPTER_LOCK(sc);
-	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
-	if (rc == 0) {
-		struct sge_rxq *rxq;
-		uint8_t v;
+	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4tmr");
+	if (rc)
+		return (rc);
 
-		v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
-		for_each_rxq(pi, i, rxq) {
+	v = V_QINTR_TIMER_IDX(idx) | V_QINTR_CNT_EN(pi->pktc_idx != -1);
+	for_each_rxq(pi, i, rxq) {
 #ifdef atomic_store_rel_8
-			atomic_store_rel_8(&rxq->iq.intr_params, v);
+		atomic_store_rel_8(&rxq->iq.intr_params, v);
 #else
-			rxq->iq.intr_params = v;
+		rxq->iq.intr_params = v;
 #endif
-		}
-		pi->tmr_idx = idx;
 	}
+	pi->tmr_idx = idx;
 
-	ADAPTER_UNLOCK(sc);
-	return (rc);
+	end_synchronized_op(sc, LOCK_HELD);
+	return (0);
 }
 
 static int
@@ -3571,15 +3554,17 @@ sysctl_holdoff_pktc_idx(SYSCTL_HANDLER_A
 	if (idx < -1 || idx >= SGE_NCOUNTERS)
 		return (EINVAL);
 
-	ADAPTER_LOCK(sc);
-	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
-	if (rc == 0 && pi->flags & PORT_INIT_DONE)
-		rc = EBUSY; /* cannot be changed once the queues are created */
+	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4pktc");
+	if (rc)
+		return (rc);
 
-	if (rc == 0)
+	if (pi->flags & PORT_INIT_DONE)
+		rc = EBUSY; /* cannot be changed once the queues are created */
+	else
 		pi->pktc_idx = idx;
 
-	ADAPTER_UNLOCK(sc);
+	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
 }
 
@@ -3599,15 +3584,17 @@ sysctl_qsize_rxq(SYSCTL_HANDLER_ARGS)
 	if (qsize < 128 || (qsize & 7))
 		return (EINVAL);
 
-	ADAPTER_LOCK(sc);
-	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
-	if (rc == 0 && pi->flags & PORT_INIT_DONE)
-		rc = EBUSY; /* cannot be changed once the queues are created */
+	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4rxqs");
+	if (rc)
+		return (rc);
 
-	if (rc == 0)
+	if (pi->flags & PORT_INIT_DONE)
+		rc = EBUSY; /* cannot be changed once the queues are created */
+	else
 		pi->qsize_rxq = qsize;
 
-	ADAPTER_UNLOCK(sc);
+	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
 }
 
@@ -3624,18 +3611,21 @@ sysctl_qsize_txq(SYSCTL_HANDLER_ARGS)
 	if (rc != 0 || req->newptr == NULL)
 		return (rc);
 
-	if (qsize < 128)
+	/* bufring size must be powerof2 */
+	if (qsize < 128 || !powerof2(qsize))
 		return (EINVAL);
 
-	ADAPTER_LOCK(sc);
-	rc = IS_DOOMED(pi) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0);
-	if (rc == 0 && pi->flags & PORT_INIT_DONE)
-		rc = EBUSY; /* cannot be changed once the queues are created */
+	rc = begin_synchronized_op(sc, pi, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4txqs");
+	if (rc)
+		return (rc);
 
-	if (rc == 0)
+	if (pi->flags & PORT_INIT_DONE)
+		rc = EBUSY; /* cannot be changed once the queues are created */
+	else
 		pi->qsize_txq = qsize;
 
-	ADAPTER_UNLOCK(sc);
+	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
 }
 
@@ -4674,8 +4664,14 @@ fspec_to_fconf(struct t4_filter_specific
 static int
 get_filter_mode(struct adapter *sc, uint32_t *mode)
 {
+	int rc;
 	uint32_t fconf;
 
+	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4getfm");
+	if (rc)
+		return (rc);
+
 	t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &fconf, 1,
 	    A_TP_VLAN_PRI_MAP);
 
@@ -4687,6 +4683,7 @@ get_filter_mode(struct adapter *sc, uint
 
 	*mode = fconf_to_mode(sc->filter_mode);
 
+	end_synchronized_op(sc, LOCK_HELD);
 	return (0);
 }
 
@@ -4698,11 +4695,10 @@ set_filter_mode(struct adapter *sc, uint
 
 	fconf = mode_to_fconf(mode);
 
-	ADAPTER_LOCK(sc);
-	if (IS_BUSY(sc)) {
-		rc = EAGAIN;
-		goto done;
-	}
+	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4setfm");
+	if (rc)
+		return (rc);
 
 	if (sc->tids.ftids_in_use > 0) {
 		rc = EBUSY;
@@ -4725,7 +4721,7 @@ set_filter_mode(struct adapter *sc, uint
 #endif
 
 done:
-	ADAPTER_UNLOCK(sc);
+	end_synchronized_op(sc, LOCK_HELD);
 	return (rc);
 }
 
@@ -4746,18 +4742,18 @@ get_filter_hits(struct adapter *sc, uint
 static int
 get_filter(struct adapter *sc, struct t4_filter *t)
 {
-	int i, nfilters = sc->tids.nftids;
+	int i, rc, nfilters = sc->tids.nftids;
 	struct filter_entry *f;
 
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
-
-	if (IS_BUSY(sc))
-		return (EAGAIN);
+	rc = begin_synchronized_op(sc, NULL, HOLD_LOCK | SLEEP_OK | INTR_OK,
+	    "t4getf");
+	if (rc)
+		return (rc);
 
 	if (sc->tids.ftids_in_use == 0 || sc->tids.ftid_tab == NULL ||
 	    t->idx >= nfilters) {
 		t->idx = 0xffffffff;
-		return (0);
+		goto done;
 	}
 
 	f = &sc->tids.ftid_tab[t->idx];
@@ -4772,11 +4768,13 @@ get_filter(struct adapter *sc, struct t4
 				t->hits = UINT64_MAX;
 			t->fs = f->fs;
 
-			return (0);
+			goto done;
 		}
 	}
 
 	t->idx = 0xffffffff;
+done:
+	end_synchronized_op(sc, LOCK_HELD);
 	return (0);
 }
 
@@ -4785,40 +4783,58 @@ set_filter(struct adapter *sc, struct t4
 {
 	unsigned int nfilters, nports;
 	struct filter_entry *f;
-	int i;
+	int i, rc;
 
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
+	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
+	if (rc)
+		return (rc);
 
 	nfilters = sc->tids.nftids;
 	nports = sc->params.nports;
 
-	if (nfilters == 0)
-		return (ENOTSUP);
+	if (nfilters == 0) {
+		rc = ENOTSUP;
+		goto done;
+	}
 
-	if (!(sc->flags & FULL_INIT_DONE))
-		return (EAGAIN);
+	if (!(sc->flags & FULL_INIT_DONE)) {
+		rc = EAGAIN;
+		goto done;
+	}
 
-	if (t->idx >= nfilters)
-		return (EINVAL);
+	if (t->idx >= nfilters) {
+		rc = EINVAL;
+		goto done;
+	}
 
 	/* Validate against the global filter mode */
-	if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode)
-		return (E2BIG);
+	if ((sc->filter_mode | fspec_to_fconf(&t->fs)) != sc->filter_mode) {
+		rc = E2BIG;
+		goto done;
+	}
 
-	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports)
-		return (EINVAL);
+	if (t->fs.action == FILTER_SWITCH && t->fs.eport >= nports) {
+		rc = EINVAL;
+		goto done;
+	}
 
-	if (t->fs.val.iport >= nports)
-		return (EINVAL);
+	if (t->fs.val.iport >= nports) {
+		rc = EINVAL;
+		goto done;
+	}
 
 	/* Can't specify an iq if not steering to it */
-	if (!t->fs.dirsteer && t->fs.iq)
-		return (EINVAL);
+	if (!t->fs.dirsteer && t->fs.iq) {
+		rc = EINVAL;
+		goto done;
+	}
 
 	/* IPv6 filter idx must be 4 aligned */
 	if (t->fs.type == 1 &&
-	    ((t->idx & 0x3) || t->idx + 4 >= nfilters))
-		return (EINVAL);
+	    ((t->idx & 0x3) || t->idx + 4 >= nfilters)) {
+		rc = EINVAL;
+		goto done;
+	}
 
 	if (sc->tids.ftid_tab == NULL) {
 		KASSERT(sc->tids.ftids_in_use == 0,
@@ -4827,17 +4843,24 @@ set_filter(struct adapter *sc, struct t4
 
 		sc->tids.ftid_tab = malloc(sizeof (struct filter_entry) *
 		    nfilters, M_CXGBE, M_NOWAIT | M_ZERO);
-		if (sc->tids.ftid_tab == NULL)
-			return (ENOMEM);
+		if (sc->tids.ftid_tab == NULL) {
+			rc = ENOMEM;
+			goto done;
+		}
+		mtx_init(&sc->tids.ftid_lock, "T4 filters", 0, MTX_DEF);
 	}
 
 	for (i = 0; i < 4; i++) {
 		f = &sc->tids.ftid_tab[t->idx + i];
 
-		if (f->pending || f->valid)
-			return (EBUSY);
-		if (f->locked)
-			return (EPERM);
+		if (f->pending || f->valid) {
+			rc = EBUSY;
+			goto done;
+		}
+		if (f->locked) {
+			rc = EPERM;
+			goto done;
+		}
 
 		if (t->fs.type == 0)
 			break;
@@ -4846,7 +4869,27 @@ set_filter(struct adapter *sc, struct t4
 	f = &sc->tids.ftid_tab[t->idx];
 	f->fs = t->fs;
 
-	return set_filter_wr(sc, t->idx);
+	rc = set_filter_wr(sc, t->idx);
+done:
+	end_synchronized_op(sc, 0);
+
+	if (rc == 0) {
+		mtx_lock(&sc->tids.ftid_lock);
+		for (;;) {
+			if (f->pending == 0) {
+				rc = f->valid ? 0 : EIO;
+				break;
+			}
+
+			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
+			    PCATCH, "t4setfw", 0)) {
+				rc = EINPROGRESS;
+				break;
+			}
+		}
+		mtx_unlock(&sc->tids.ftid_lock);
+	}
+	return (rc);
 }
 
 static int
@@ -4854,37 +4897,67 @@ del_filter(struct adapter *sc, struct t4
 {
 	unsigned int nfilters;
 	struct filter_entry *f;
+	int rc;
 
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
-
-	if (IS_BUSY(sc))
-		return (EAGAIN);
+	rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4delf");
+	if (rc)
+		return (rc);
 
 	nfilters = sc->tids.nftids;
 
-	if (nfilters == 0)
-		return (ENOTSUP);
+	if (nfilters == 0) {
+		rc = ENOTSUP;
+		goto done;
+	}
 
 	if (sc->tids.ftid_tab == NULL || sc->tids.ftids_in_use == 0 ||
-	    t->idx >= nfilters)
-		return (EINVAL);
+	    t->idx >= nfilters) {
+		rc = EINVAL;
+		goto done;
+	}
 
-	if (!(sc->flags & FULL_INIT_DONE))
-		return (EAGAIN);
+	if (!(sc->flags & FULL_INIT_DONE)) {
+		rc = EAGAIN;
+		goto done;
+	}
 
 	f = &sc->tids.ftid_tab[t->idx];
 
-	if (f->pending)
-		return (EBUSY);
-	if (f->locked)
-		return (EPERM);
+	if (f->pending) {
+		rc = EBUSY;
+		goto done;
+	}
+	if (f->locked) {
+		rc = EPERM;
+		goto done;
+	}
 
 	if (f->valid) {
 		t->fs = f->fs;	/* extra info for the caller */
-		return del_filter_wr(sc, t->idx);
+		rc = del_filter_wr(sc, t->idx);
 	}
 
-	return (0);
+done:
+	end_synchronized_op(sc, 0);
+
+	if (rc == 0) {
+		mtx_lock(&sc->tids.ftid_lock);
+		for (;;) {
+			if (f->pending == 0) {
+				rc = f->valid ? EIO : 0;
+				break;
+			}
+
+			if (mtx_sleep(&sc->tids.ftid_tab, &sc->tids.ftid_lock,
+			    PCATCH, "t4delfw", 0)) {
+				rc = EINPROGRESS;
+				break;
+			}
+		}
+		mtx_unlock(&sc->tids.ftid_lock);
+	}
+
+	return (rc);
 }
 
 static void
@@ -4904,7 +4977,7 @@ set_filter_wr(struct adapter *sc, int fi
 	struct fw_filter_wr *fwr;
 	unsigned int ftid;
 
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
+	ASSERT_SYNCHRONIZED_OP(sc);
 
 	if (f->fs.newdmac || f->fs.newvlan) {
 		/* This filter needs an L2T entry; allocate one. */
@@ -5007,8 +5080,6 @@ del_filter_wr(struct adapter *sc, int fi
 	struct fw_filter_wr *fwr;
 	unsigned int ftid;
 
-	ADAPTER_LOCK_ASSERT_OWNED(sc);
-
 	ftid = sc->tids.ftid_base + fidx;
 
 	wr = alloc_wrqe(sizeof(*fwr), &sc->sge.mgmtq);
@@ -5039,8 +5110,10 @@ t4_filter_rpl(struct sge_iq *iq, const s
 		unsigned int rc = G_COOKIE(rpl->cookie);
 		struct filter_entry *f = &sc->tids.ftid_tab[idx];
 
-		ADAPTER_LOCK(sc);
+		mtx_lock(&sc->tids.ftid_lock);
 		if (rc == FW_FILTER_WR_FLT_ADDED) {
+			KASSERT(f->pending, ("%s: filter[%u] isn't pending.",
+			    __func__, idx));
 			f->smtidx = (be64toh(rpl->oldval) >> 24) & 0xff;
 			f->pending = 0;  /* asynchronous setup completed */
 			f->valid = 1;
@@ -5055,7 +5128,8 @@ t4_filter_rpl(struct sge_iq *iq, const s
 			clear_filter(f);
 			sc->tids.ftids_in_use--;
 		}
-		ADAPTER_UNLOCK(sc);
+		wakeup(&sc->tids.ftid_tab);
+		mtx_unlock(&sc->tids.ftid_lock);
 	}
 
 	return (0);
@@ -5064,29 +5138,63 @@ t4_filter_rpl(struct sge_iq *iq, const s
 static int
 get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
 {
-	int rc = EINVAL;
+	int rc;
 
 	if (cntxt->cid > M_CTXTQID)
-		return (rc);
+		return (EINVAL);
 
 	if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
 	    cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
-		return (rc);
+		return (EINVAL);
 
 	if (sc->flags & FW_OK) {
-		ADAPTER_LOCK(sc);	/* Avoid parallel t4_wr_mbox */

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list