PERFORCE change 137288 for review

Steve Wise swise at FreeBSD.org
Mon Mar 10 03:19:49 UTC 2008


http://perforce.freebsd.org/chv.cgi?CH=137288

Change 137288 by swise at swise:vic10:iwarp on 2008/03/10 03:19:01

	move back to MTX_DEF locking.
	
	This gets us part-way there.  WITNESS still complains about
	locking heiarchy and other issues.

Affected files ...

.. //depot/projects/iwarp/sys/contrib/rdma/krping/krping.c#12 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_addr.c#7 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_cache.c#4 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#12 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_device.c#6 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_iwcm.c#10 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb.c#3 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb.h#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.c#7 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cm.h#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_cq.c#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_ev.c#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_hal.c#3 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_hal.h#3 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_mem.c#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.c#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_provider.h#2 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_qp.c#4 edit
.. //depot/projects/iwarp/sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_resource.c#2 edit
.. //depot/projects/iwarp/sys/kern/subr_blist.c#2 edit
.. //depot/projects/iwarp/sys/modules/cxgb/iw_cxgb/Makefile#4 edit
.. //depot/projects/iwarp/sys/sys/blist.h#2 edit
.. //depot/projects/iwarp/sys/sys/linux_compat.h#12 edit
.. //depot/projects/iwarp/sys/sys/linux_pci.h#4 edit
.. //depot/projects/iwarp/sys/vm/swap_pager.c#5 edit

Differences ...

==== //depot/projects/iwarp/sys/contrib/rdma/krping/krping.c#12 (text+ko) ====

@@ -214,16 +214,16 @@
 
 static void krping_wait(struct krping_cb *cb, int state)
 {
-	mtx_lock_spin(&cb->lock);
+	mtx_lock(&cb->lock);
 	while (cb->state < state) {
-		msleep_spin(cb, &cb->lock, "krping", hz);
+		msleep(cb, &cb->lock, 0, "krping", hz);
 		if (SIGPENDING(curthread)) {
 			cb->state = ERROR;
 			DEBUG_LOG(PFX "interrupt!\n");
 			break;
 		}
 	}
-	mtx_unlock_spin(&cb->lock);
+	mtx_unlock(&cb->lock);
 }
 
 static int krping_cma_event_handler(struct rdma_cm_id *cma_id,
@@ -235,7 +235,7 @@
 	DEBUG_LOG(PFX "cma_event type %d cma_id %p (%s)\n", event->event, cma_id,
 		  (cma_id == cb->cm_id) ? "parent" : "child");
 
-	mtx_lock_spin(&cb->lock);
+	mtx_lock(&cb->lock);
 	switch (event->event) {
 	case RDMA_CM_EVENT_ADDR_RESOLVED:
 		cb->state = ADDR_RESOLVED;
@@ -293,7 +293,7 @@
 		wakeup(cb);
 		break;
 	}
-	mtx_unlock_spin(&cb->lock);
+	mtx_unlock(&cb->lock);
 	return 0;
 }
 
@@ -343,11 +343,11 @@
 	struct ib_recv_wr *bad_wr;
 	int ret;
 
-	mtx_lock_spin(&cb->lock);
+	mtx_lock(&cb->lock);
 	BUG_ON(cb->cq != cq);
 	if (cb->state == ERROR) {
 		DEBUG_LOG(PFX "cq completion in ERROR state\n");
-		mtx_unlock_spin(&cb->lock);
+		mtx_unlock(&cb->lock);
 		return;
 	}
 	if (!cb->wlat && !cb->rlat && !cb->bw)
@@ -414,12 +414,12 @@
 		DEBUG_LOG(PFX "poll error %d\n", ret);
 		goto error;
 	}
-	mtx_unlock_spin(&cb->lock);
+	mtx_unlock(&cb->lock);
 	return;
 error:
 	cb->state = ERROR;
 	wakeup(cb);
-	mtx_unlock_spin(&cb->lock);
+	mtx_unlock(&cb->lock);
 }
 
 static int krping_accept(struct krping_cb *cb)
@@ -1719,7 +1719,7 @@
 	cb->state = IDLE;
 	cb->size = 64;
 	cb->txdepth = RPING_SQ_DEPTH;
-	mtx_init(&cb->lock, "krping mtx", NULL, MTX_DUPOK|MTX_SPIN);
+	mtx_init(&cb->lock, "krping mtx", NULL, MTX_DUPOK|MTX_DEF);
 
 	while ((op = krping_getopt("krping", &cmd, krping_opts, NULL, &optarg,
 			      &optint)) != 0) {

==== //depot/projects/iwarp/sys/contrib/rdma/rdma_addr.c#7 (text+ko) ====

@@ -89,7 +89,7 @@
 
 static void addr_timeout(void *arg)
 {
-	taskqueue_enqueue(addr_taskq, &addr_task);
+	taskqueue_enqueue_fast(addr_taskq, &addr_task);
 }
 
 void rdma_addr_register_client(struct rdma_addr_client *client)
@@ -134,22 +134,13 @@
 
 int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
 {
-	int ret;
-	struct route iproute;
-	struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst;
+	struct ifaddr *ifa;
 
-	bzero(&iproute, sizeof iproute);
-	dst->sin_family = AF_INET;
-	dst->sin_len = sizeof dst;
-	dst->sin_addr.s_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
-
-	rtalloc(&iproute);
-	if (iproute.ro_rt == NULL);
+	printf("addr->sa_len %d sa_family %d\n", addr->sa_len, addr->sa_family);
+	ifa = ifa_ifwithaddr(addr);
+	if (!ifa)
 		return (EADDRNOTAVAIL);
-
-	ret = rdma_copy_addr(dev_addr, iproute.ro_rt->rt_ifp, NULL);
-	RTFREE(iproute.ro_rt);
-	return ret;
+	return rdma_copy_addr(dev_addr, ifa->ifa_ifp, NULL);
 }
 EXPORT_SYMBOL(rdma_translate_ip);
 
@@ -286,7 +277,7 @@
 	struct addr_req *req;
 	int ret = 0;
 
-	req = malloc(sizeof *req, M_DEVBUF, M_WAITOK);
+	req = malloc(sizeof *req, M_DEVBUF, M_NOWAIT);
 	if (!req)
 		return (ENOMEM);
 	memset(req, 0, sizeof *req);
@@ -355,7 +346,7 @@
 	struct sockaddr *sa)
 {
 		callout_stop(&addr_ch);
-		taskqueue_enqueue(addr_taskq, &addr_task);
+		taskqueue_enqueue_fast(addr_taskq, &addr_task);
 }
 
 static int addr_init(void)
@@ -363,7 +354,7 @@
 	TAILQ_INIT(&req_list);
 	mtx_init(&lock, "rdma_addr req_list lock", NULL, MTX_DEF);
 
-	addr_taskq = taskqueue_create("rdma_addr_taskq", M_WAITOK,
+	addr_taskq = taskqueue_create_fast("rdma_addr_taskq", M_NOWAIT,
 		taskqueue_thread_enqueue, &addr_taskq);
         if (addr_taskq == NULL) {
                 printf("failed to allocate rdma_addr taskqueue\n");

==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cache.c#4 (text+ko) ====

@@ -96,7 +96,7 @@
 	if (port_num < start_port(device) || port_num > end_port(device))
 		return -EINVAL;
 
-	mtx_lock_spin(&device->cache.lock);
+	mtx_lock(&device->cache.lock);
 
 	cache = device->cache.gid_cache[port_num - start_port(device)];
 
@@ -105,7 +105,7 @@
 	else
 		*gid = cache->table[index];
 
-	mtx_unlock_spin(&device->cache.lock);
+	mtx_unlock(&device->cache.lock);
 
 	return ret;
 }
@@ -124,7 +124,7 @@
 	if (index)
 		*index = -1;
 
-	mtx_lock_spin(&device->cache.lock);
+	mtx_lock(&device->cache.lock);
 
 	for (p = 0; p <= end_port(device) - start_port(device); ++p) {
 		cache = device->cache.gid_cache[p];
@@ -139,7 +139,7 @@
 		}
 	}
 found:
-	mtx_unlock_spin(&device->cache.lock);
+	mtx_unlock(&device->cache.lock);
 
 	return ret;
 }
@@ -156,7 +156,7 @@
 	if (port_num < start_port(device) || port_num > end_port(device))
 		return -EINVAL;
 
-	mtx_lock_spin(&device->cache.lock);
+	mtx_lock(&device->cache.lock);
 
 	cache = device->cache.pkey_cache[port_num - start_port(device)];
 
@@ -165,7 +165,7 @@
 	else
 		*pkey = cache->table[index];
 
-	mtx_unlock_spin(&device->cache.lock);
+	mtx_unlock(&device->cache.lock);
 
 	return ret;
 }
@@ -183,7 +183,7 @@
 	if (port_num < start_port(device) || port_num > end_port(device))
 		return -EINVAL;
 
-	mtx_lock_spin(&device->cache.lock);
+	mtx_lock(&device->cache.lock);
 
 	cache = device->cache.pkey_cache[port_num - start_port(device)];
 
@@ -196,7 +196,7 @@
 			break;
 		}
 
-	mtx_unlock_spin(&device->cache.lock);
+	mtx_unlock(&device->cache.lock);
 
 	return ret;
 }
@@ -211,9 +211,9 @@
 	if (port_num < start_port(device) || port_num > end_port(device))
 		return -EINVAL;
 
-	mtx_lock_spin(&device->cache.lock);
+	mtx_lock(&device->cache.lock);
 	*lmc = device->cache.lmc_cache[port_num - start_port(device)];
-	mtx_unlock_spin(&device->cache.lock);
+	mtx_unlock(&device->cache.lock);
 
 	return ret;
 }
@@ -228,7 +228,7 @@
 	int                        i;
 	int                        ret;
 
-	tprops = malloc(sizeof *tprops, M_DEVBUF, M_WAITOK);
+	tprops = malloc(sizeof *tprops, M_DEVBUF, M_NOWAIT);
 	if (!tprops)
 		return;
 
@@ -240,14 +240,14 @@
 	}
 
 	pkey_cache = malloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
-			     sizeof *pkey_cache->table, M_DEVBUF, M_WAITOK);
+			     sizeof *pkey_cache->table, M_DEVBUF, M_NOWAIT);
 	if (!pkey_cache)
 		goto err;
 
 	pkey_cache->table_len = tprops->pkey_tbl_len;
 
 	gid_cache = malloc(sizeof *gid_cache + tprops->gid_tbl_len *
-			    sizeof *gid_cache->table, M_DEVBUF, M_WAITOK);
+			    sizeof *gid_cache->table, M_DEVBUF, M_NOWAIT);
 	if (!gid_cache)
 		goto err;
 
@@ -271,7 +271,7 @@
 		}
 	}
 
-	mtx_lock_spin(&device->cache.lock);
+	mtx_lock(&device->cache.lock);
 
 	old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
 	old_gid_cache  = device->cache.gid_cache [port - start_port(device)];
@@ -281,7 +281,7 @@
 
 	device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
 
-	mtx_unlock_spin(&device->cache.lock);
+	mtx_unlock(&device->cache.lock);
 
 	free(old_pkey_cache, M_DEVBUF);
 	free(old_gid_cache, M_DEVBUF);
@@ -313,7 +313,7 @@
 	    event->event == IB_EVENT_PKEY_CHANGE ||
 	    event->event == IB_EVENT_SM_CHANGE   ||
 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
-		work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
+		work = malloc(sizeof *work, M_DEVBUF, M_NOWAIT);
 		if (work) {
 			TASK_INIT(&work->task, 0, ib_cache_task, work);
 			work->device   = event->device;
@@ -328,21 +328,21 @@
 	int p;
 
 	mtx_init(&device->cache.lock, "ib device cache", NULL, 
-		MTX_DUPOK|MTX_SPIN);
+		MTX_DUPOK|MTX_DEF);
 
 	device->cache.pkey_cache =
 		malloc(sizeof *device->cache.pkey_cache *
 			(end_port(device) - start_port(device) + 1), M_DEVBUF, 
-			M_WAITOK);
+			M_NOWAIT);
 	device->cache.gid_cache =
 		malloc(sizeof *device->cache.gid_cache *
 			(end_port(device) - start_port(device) + 1), M_DEVBUF, 
-			M_WAITOK);
+			M_NOWAIT);
 
 	device->cache.lmc_cache = malloc(sizeof *device->cache.lmc_cache *
 					  (end_port(device) -
 					  start_port(device) + 1),
-					  M_DEVBUF, M_WAITOK);
+					  M_DEVBUF, M_NOWAIT);
 
 	if (!device->cache.pkey_cache || !device->cache.gid_cache ||
 	    !device->cache.lmc_cache) {

==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#12 (text+ko) ====

@@ -217,9 +217,9 @@
 {
 	int ret;
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	ret = (id_priv->state == comp);
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	return ret;
 }
 
@@ -228,10 +228,10 @@
 {
 	int ret;
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	if ((ret = (id_priv->state == comp)))
 		id_priv->state = exch;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	return ret;
 }
 
@@ -240,10 +240,10 @@
 {
 	enum cma_state old;
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	old = id_priv->state;
 	id_priv->state = exch;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	return old;
 }
 
@@ -280,9 +280,9 @@
 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
 			      struct cma_device *cma_dev)
 {
-	mtx_lock_spin(&cma_dev->lock);
+	mtx_lock(&cma_dev->lock);
 	cma_dev->refcount++;
-	mtx_unlock_spin(&cma_dev->lock);
+	mtx_unlock(&cma_dev->lock);
 	id_priv->cma_dev = cma_dev;
 	id_priv->id.device = cma_dev->device;
 	LIST_INSERT_HEAD(&cma_dev->id_list, id_priv, list);
@@ -290,10 +290,10 @@
 
 static inline void cma_deref_dev(struct cma_device *cma_dev)
 {
-	mtx_lock_spin(&cma_dev->lock);
+	mtx_lock(&cma_dev->lock);
 	if (--cma_dev->refcount == 0)
 		cv_broadcast(&cma_dev->comp);
-	mtx_unlock_spin(&cma_dev->lock);
+	mtx_unlock(&cma_dev->lock);
 }
 
 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
@@ -367,11 +367,11 @@
 
 static void cma_deref_id(struct rdma_id_private *id_priv)
 {
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	if (--id_priv->refcount == 0) {
 		cv_broadcast(&id_priv->comp);
 	}
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 }
 
 static int cma_disable_remove(struct rdma_id_private *id_priv,
@@ -379,22 +379,22 @@
 {
 	int ret;
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	if (id_priv->state == state) {
 		id_priv->dev_remove++;
 		ret = 0;
 	} else
 		ret = EINVAL;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	return ret;
 }
 
 static void cma_enable_remove(struct rdma_id_private *id_priv)
 {
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	if (--id_priv->dev_remove == 0)
 		cv_broadcast(&id_priv->wait_remove);
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 }
 
 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
@@ -407,7 +407,7 @@
 {
 	struct rdma_id_private *id_priv;
 
-	id_priv = malloc(sizeof *id_priv, M_DEVBUF, M_WAITOK);
+	id_priv = malloc(sizeof *id_priv, M_DEVBUF, M_NOWAIT);
 	if (!id_priv)
 		return ERR_PTR(-ENOMEM);
 	bzero(id_priv, sizeof *id_priv);
@@ -416,7 +416,7 @@
 	id_priv->id.context = context;
 	id_priv->id.event_handler = event_handler;
 	id_priv->id.ps = ps;
-	mtx_init(&id_priv->lock, "rdma_cm_id_priv", NULL, MTX_DUPOK|MTX_SPIN);
+	mtx_init(&id_priv->lock, "rdma_cm_id_priv", NULL, MTX_DUPOK|MTX_DEF);
 	cv_init(&id_priv->comp, "rdma_cm_id_priv");
 	id_priv->refcount = 1;
 	cv_init(&id_priv->wait_remove, "id priv wait remove");
@@ -786,10 +786,10 @@
 	LIST_REMOVE(id_priv, listen_entry);
 
 	cma_deref_id(id_priv);
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	if (id_priv->refcount)
 		cv_wait(&id_priv->comp, &id_priv->lock);
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 
 	free(id_priv, M_DEVBUF);
 }
@@ -895,11 +895,11 @@
 	mtx_unlock(&lock);
 	cma_release_port(id_priv);
 	cma_deref_id(id_priv);
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	BUG_ON(id_priv->refcount < 0);
 	if (id_priv->refcount)
 		cv_wait(&id_priv->comp, &id_priv->lock);
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	free(id_priv->id.route.path_rec, M_DEVBUF);
 	free(id_priv, M_DEVBUF);
 }
@@ -1051,7 +1051,7 @@
 	rt = &id->route;
 	rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
 	rt->path_rec = malloc(sizeof *rt->path_rec * rt->num_paths,
-			       M_DEVBUF, M_WAITOK);
+			       M_DEVBUF, M_NOWAIT);
 	if (!rt->path_rec)
 		goto destroy_id;
 
@@ -1153,9 +1153,9 @@
 		goto out;
 	}
 
-	mtx_lock_spin(&conn_id->lock);
+	mtx_lock(&conn_id->lock);
 	conn_id->dev_remove++;
-	mtx_unlock_spin(&conn_id->lock);
+	mtx_unlock(&conn_id->lock);
 	mtx_lock(&lock);
 	ret = cma_acquire_dev(conn_id);
 	mtx_unlock(&lock);
@@ -1325,9 +1325,9 @@
 		goto out;
 	}
 	conn_id = container_of(new_cm_id, struct rdma_id_private, id);
-	mtx_lock_spin(&conn_id->lock);
+	mtx_lock(&conn_id->lock);
 	++conn_id->dev_remove;
-	mtx_unlock_spin(&conn_id->lock);
+	mtx_unlock(&conn_id->lock);
 	conn_id->state = CMA_CONNECT;
 
 	port = iw_event->local_addr.sin_port;
@@ -1566,7 +1566,7 @@
 		work->event.status = status;
 	}
 
-	taskqueue_enqueue(cma_wq, &work->task);
+	taskqueue_enqueue_fast(cma_wq, &work->task);
 }
 
 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
@@ -1587,7 +1587,7 @@
 				IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
 				IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
 				IB_SA_PATH_REC_REVERSIBLE,
-				timeout_ms, M_WAITOK,
+				timeout_ms, M_NOWAIT,
 				cma_query_handler, work, &id_priv->query);
 
 	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
@@ -1600,9 +1600,9 @@
 	struct rdma_id_private *id_priv = work->id;
 	int destroy = 0;
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	++id_priv->dev_remove;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
 		goto out;
 
@@ -1625,7 +1625,7 @@
 	struct cma_work *work;
 	int ret;
 
-	work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
+	work = malloc(sizeof *work, M_DEVBUF, M_NOWAIT);
 	if (!work)
 		return (ENOMEM);
 	bzero(work, sizeof *work);
@@ -1636,7 +1636,7 @@
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
 
-	route->path_rec = malloc(sizeof *route->path_rec, M_DEVBUF, M_WAITOK);
+	route->path_rec = malloc(sizeof *route->path_rec, M_DEVBUF, M_NOWAIT);
 	if (!route->path_rec) {
 		ret = ENOMEM;
 		goto err1;
@@ -1665,7 +1665,7 @@
 	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
 		return (EINVAL);
 
-	id->route.path_rec = malloc(sizeof *path_rec * num_paths, M_DEVBUF, M_WAITOK);
+	id->route.path_rec = malloc(sizeof *path_rec * num_paths, M_DEVBUF, M_NOWAIT);
 	if (!id->route.path_rec) {
 		ret = ENOMEM;
 		goto err;
@@ -1684,7 +1684,7 @@
 {
 	struct cma_work *work;
 
-	work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
+	work = malloc(sizeof *work, M_DEVBUF, M_NOWAIT);
 	if (!work)
 		return (ENOMEM);
 	bzero(work, sizeof *work);
@@ -1694,7 +1694,7 @@
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
-	taskqueue_enqueue(cma_wq, &work->task);
+	taskqueue_enqueue_fast(cma_wq, &work->task);
 	return 0;
 }
 
@@ -1707,9 +1707,9 @@
 	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
 		return (EINVAL);
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	id_priv->refcount++;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 #ifdef IB_SUPPORTED
 	switch (rdma_node_get_transport(id->device->node_type)) {
 	case RDMA_TRANSPORT_IB:
@@ -1784,9 +1784,9 @@
 	struct rdma_cm_event event;
 
 	memset(&event, 0, sizeof event);
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	++id_priv->dev_remove;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 
 	/*
 	 * Grab mutex to block rdma_destroy_id() from removing the device while
@@ -1832,7 +1832,7 @@
 	union ib_gid gid;
 	int ret;
 
-	work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
+	work = malloc(sizeof *work, M_DEVBUF, M_NOWAIT);
 	if (!work)
 		return (ENOMEM);
 	bzero(work, sizeof *work);
@@ -1858,7 +1858,7 @@
 	work->old_state = CMA_ADDR_QUERY;
 	work->new_state = CMA_ADDR_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
-	taskqueue_enqueue(cma_wq, &work->task);
+	taskqueue_enqueue_fast(cma_wq, &work->task);
 	return 0;
 err:
 	free(work, M_DEVBUF);
@@ -1890,9 +1890,9 @@
 	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
 		return (EINVAL);
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	id_priv->refcount++;
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 	memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
 	if (cma_any_addr(dst_addr))
 		ret = cma_resolve_loopback(id_priv);
@@ -1928,14 +1928,14 @@
 	struct rdma_bind_list *bind_list;
 	int port, ret;
 
-	bind_list = malloc(sizeof *bind_list, M_DEVBUF, M_WAITOK);
+	bind_list = malloc(sizeof *bind_list, M_DEVBUF, M_NOWAIT);
 	if (!bind_list)
 		return (ENOMEM);
 	bzero(bind_list, sizeof *bind_list);
 
 	do {
 		ret = idr_get_new_above(ps, bind_list, snum, &port);
-	} while ((ret == EAGAIN) && idr_pre_get(ps, M_WAITOK));
+	} while ((ret == EAGAIN) && idr_pre_get(ps, M_NOWAIT));
 
 	if (ret)
 		goto err1;
@@ -1961,7 +1961,7 @@
 	struct rdma_bind_list *bind_list;
 	int port, ret;
 
-	bind_list = malloc(sizeof *bind_list, M_DEVBUF, M_WAITOK);
+	bind_list = malloc(sizeof *bind_list, M_DEVBUF, M_NOWAIT);
 	if (!bind_list)
 		return (ENOMEM);
 	bzero(bind_list, sizeof *bind_list);
@@ -1969,7 +1969,7 @@
 retry:
 	do {
 		ret = idr_get_new_above(ps, bind_list, next_port, &port);
-	} while ((ret == EAGAIN) && idr_pre_get(ps, M_WAITOK));
+	} while ((ret == EAGAIN) && idr_pre_get(ps, M_NOWAIT));
 
 	if (ret)
 		goto err1;
@@ -2749,7 +2749,7 @@
 
 	mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
 						id_priv->id.port_num, &rec,
-						comp_mask, M_WAITOK,
+						comp_mask, M_NOWAIT,
 						cma_ib_mc_handler, mc);
 	if (IS_ERR(mc->multicast.ib))
 		return PTR_ERR(mc->multicast.ib);
@@ -2769,7 +2769,7 @@
 	    !cma_comp(id_priv, CMA_ADDR_RESOLVED))
 		return (EINVAL);
 
-	mc = malloc(sizeof *mc, M_DEVBUF, M_WAITOK);
+	mc = malloc(sizeof *mc, M_DEVBUF, M_NOWAIT);
 	if (!mc)
 		return (ENOMEM);
 
@@ -2777,9 +2777,9 @@
 	mc->context = context;
 	mc->id_priv = id_priv;
 
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	LIST_INSERT_HEAD(&id_priv->mc_list, mc, list);
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 
 	switch (rdma_node_get_transport(id->device->node_type)) {
 	case RDMA_TRANSPORT_IB:
@@ -2791,9 +2791,9 @@
 	}
 
 	if (ret) {
-		mtx_lock_spin(&id_priv->lock);
+		mtx_lock(&id_priv->lock);
 		list_del(&mc->list);
-		mtx_unlock_spin(&id_priv->lock);
+		mtx_unlock(&id_priv->lock);
 		free(mc, M_DEVBUF);
 	}
 	return ret;
@@ -2806,11 +2806,11 @@
 	struct cma_multicast *mc;
 
 	id_priv = container_of(id, struct rdma_id_private, id);
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	LIST_FOREACH(mc, &id_priv->mc_list, list) {
 		if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
 			list_del(&mc->list);
-			mtx_unlock_spin(&id_priv->lock);
+			mtx_unlock(&id_priv->lock);
 
 			if (id->qp)
 				ib_detach_mcast(id->qp,
@@ -2821,7 +2821,7 @@
 			return;
 		}
 	}
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 }
 EXPORT_SYMBOL(rdma_leave_multicast);
 #endif
@@ -2831,14 +2831,14 @@
 	struct cma_device *cma_dev;
 	struct rdma_id_private *id_priv;
 
-	cma_dev = malloc(sizeof *cma_dev, M_DEVBUF, M_WAITOK|M_ZERO);
+	cma_dev = malloc(sizeof *cma_dev, M_DEVBUF, M_NOWAIT|M_ZERO);
 	if (!cma_dev)
 		return;
 
 	cma_dev->device = device;
 
 	cv_init(&cma_dev->comp, "cma_device");
-	mtx_init(&cma_dev->lock, "cma_device", NULL, MTX_DUPOK|MTX_SPIN);
+	mtx_init(&cma_dev->lock, "cma_device", NULL, MTX_DUPOK|MTX_DEF);
 	cma_dev->refcount = 1;
 	LIST_INIT(&cma_dev->id_list);
 	ib_set_client_data(device, &cma_client, cma_dev);
@@ -2861,11 +2861,11 @@
 		return 0;
 
 	cma_cancel_operation(id_priv, state);
-	mtx_lock_spin(&id_priv->lock);
+	mtx_lock(&id_priv->lock);
 	BUG_ON(id_priv->dev_remove < 0);
 	if (id_priv->dev_remove)
 		cv_wait(&id_priv->wait_remove, &id_priv->lock);
-	mtx_unlock_spin(&id_priv->lock);
+	mtx_unlock(&id_priv->lock);
 
 	/* Check for destruction from another callback. */
 	if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
@@ -2891,9 +2891,9 @@
 		}
 
 		LIST_REMOVE(id_priv, list);
-		mtx_lock_spin(&id_priv->lock);
+		mtx_lock(&id_priv->lock);
 		id_priv->refcount++;
-		mtx_unlock_spin(&id_priv->lock);
+		mtx_unlock(&id_priv->lock);
 		mtx_unlock(&lock);
 
 		ret = cma_remove_id_dev(id_priv);
@@ -2906,11 +2906,11 @@
 	mtx_unlock(&lock);
 
 	cma_deref_dev(cma_dev);
-	mtx_lock_spin(&cma_dev->lock);
+	mtx_lock(&cma_dev->lock);
 	BUG_ON(cma_dev->refcount < 0);
 	if (cma_dev->refcount)
 		cv_wait(&cma_dev->comp, &cma_dev->lock);
-	mtx_unlock_spin(&cma_dev->lock);
+	mtx_unlock(&cma_dev->lock);
 }
 
 static void cma_remove_one(struct ib_device *device)
@@ -2941,7 +2941,7 @@
 	next_port = ((unsigned int) next_port %
 		    (ipport_lastauto - ipport_firstauto)) +
 		    ipport_firstauto;
-	cma_wq = taskqueue_create("rdma_cm", M_WAITOK, taskqueue_thread_enqueue,
+	cma_wq = taskqueue_create_fast("rdma_cm", M_NOWAIT, taskqueue_thread_enqueue,
 		&cma_wq);
 
 	if (!cma_wq)

==== //depot/projects/iwarp/sys/contrib/rdma/rdma_device.c#6 (text+ko) ====

@@ -135,7 +135,7 @@
 	struct ib_device *device;
 	int i;
 
-	inuse = malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
+	inuse = malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
 	if (!inuse)
 		return (ENOMEM);
 
@@ -188,7 +188,7 @@
 
 	BUG_ON(size < sizeof (struct ib_device));
 
-	dev = malloc(size, M_DEVBUF, M_WAITOK);
+	dev = malloc(size, M_DEVBUF, M_NOWAIT);
 	if (dev)
 		bzero(dev, size);
 	return dev;
@@ -219,7 +219,7 @@
 {
 	struct ib_client_data *context;
 
-	context = malloc(sizeof *context, M_DEVBUF, M_WAITOK);
+	context = malloc(sizeof *context, M_DEVBUF, M_NOWAIT);
 	if (!context) {
 		log(LOG_WARNING, "Couldn't allocate client context for %s/%s\n",
 		       device->name, client->name);
@@ -229,9 +229,9 @@
 	context->client = client;
 	context->data   = NULL;
 
-	mtx_lock_spin(&device->client_data_lock);
+	mtx_lock(&device->client_data_lock);
 	TAILQ_INSERT_TAIL(&device->client_data_list, context, list);
-	mtx_unlock_spin(&device->client_data_lock);
+	mtx_unlock(&device->client_data_lock);
 
 	return 0;
 }
@@ -242,16 +242,16 @@
 	int num_ports, ret = ENOMEM;
 	u8 port_index;
 
-	tprops = malloc(sizeof *tprops, M_DEVBUF, M_WAITOK);
+	tprops = malloc(sizeof *tprops, M_DEVBUF, M_NOWAIT);
 	if (!tprops)
 		goto out;
 
 	num_ports = end_port(device) - start_port(device) + 1;
 
 	device->pkey_tbl_len = malloc(sizeof *device->pkey_tbl_len * num_ports,
-				       M_DEVBUF, M_WAITOK);
+				       M_DEVBUF, M_NOWAIT);
 	device->gid_tbl_len = malloc(sizeof *device->gid_tbl_len * num_ports,
-				      M_DEVBUF, M_WAITOK);
+				      M_DEVBUF, M_NOWAIT);
 	if (!device->pkey_tbl_len || !device->gid_tbl_len)
 		goto err;
 
@@ -303,8 +303,10 @@
 
 	TAILQ_INIT(&device->event_handler_list);
 	TAILQ_INIT(&device->client_data_list);
-	mtx_init(&device->event_handler_lock, "ib event handler", NULL, MTX_DUPOK|MTX_SPIN);
-	mtx_init(&device->client_data_lock, "ib client data", NULL, MTX_DUPOK|MTX_SPIN);
+	mtx_init(&device->event_handler_lock, "ib event handler", NULL, 
+		 MTX_DUPOK|MTX_DEF);
+	mtx_init(&device->client_data_lock, "ib client data", NULL, 
+		 MTX_DUPOK|MTX_DEF);
 
 	ret = read_port_table_lengths(device);
 	if (ret) {
@@ -366,10 +368,10 @@
 
 	mtx_unlock(&device_mutex);
 
-	mtx_lock_spin(&device->client_data_lock);
+	mtx_lock(&device->client_data_lock);
 	TAILQ_FOREACH_SAFE(context, &device->client_data_list, list, tmp)
 		free(context, M_DEVBUF);
-	mtx_unlock_spin(&device->client_data_lock);
+	mtx_unlock(&device->client_data_lock);
 
 	device->reg_state = IB_DEV_UNREGISTERED;
 }
@@ -424,14 +426,14 @@
 		if (client->remove)
 			client->remove(device);
 
-		mtx_lock_spin(&device->client_data_lock);
+		mtx_lock(&device->client_data_lock);
 		TAILQ_FOREACH_SAFE(context, &device->client_data_list, list,tmp)
 			if (context->client == client) {
 				TAILQ_REMOVE(&device->client_data_list, context,
 					list);
 				free(context, M_DEVBUF);
 			}
-		mtx_unlock_spin(&device->client_data_lock);
+		mtx_unlock(&device->client_data_lock);
 	}
 	TAILQ_REMOVE(&client_list, client, list);
 
@@ -452,13 +454,13 @@
 	struct ib_client_data *context;
 	void *ret = NULL;
 
-	mtx_lock_spin(&device->client_data_lock);
+	mtx_lock(&device->client_data_lock);
 	TAILQ_FOREACH(context, &device->client_data_list, list)
 		if (context->client == client) {
 			ret = context->data;
 			break;
 		}
-	mtx_unlock_spin(&device->client_data_lock);
+	mtx_unlock(&device->client_data_lock);
 
 	return ret;
 }
@@ -478,7 +480,7 @@
 {
 	struct ib_client_data *context;
 
-	mtx_lock_spin(&device->client_data_lock);
+	mtx_lock(&device->client_data_lock);
 	TAILQ_FOREACH(context, &device->client_data_list, list)
 		if (context->client == client) {
 			context->data = data;
@@ -489,7 +491,7 @@
 	       device->name, client->name);
 
 out:
-	mtx_unlock_spin(&device->client_data_lock);
+	mtx_unlock(&device->client_data_lock);
 }
 EXPORT_SYMBOL(ib_set_client_data);
 
@@ -504,10 +506,10 @@
  */
 int ib_register_event_handler  (struct ib_event_handler *event_handler)
 {
-	mtx_lock_spin(&event_handler->device->event_handler_lock);
+	mtx_lock(&event_handler->device->event_handler_lock);
 	TAILQ_INSERT_TAIL(&event_handler->device->event_handler_list, 
 		event_handler, list);
-	mtx_unlock_spin(&event_handler->device->event_handler_lock);
+	mtx_unlock(&event_handler->device->event_handler_lock);
 
 	return 0;
 }
@@ -522,10 +524,10 @@
  */
 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
 {
-	mtx_lock_spin(&event_handler->device->event_handler_lock);
+	mtx_lock(&event_handler->device->event_handler_lock);
 	TAILQ_REMOVE(&event_handler->device->event_handler_list, event_handler,
 		list);
-	mtx_unlock_spin(&event_handler->device->event_handler_lock);
+	mtx_unlock(&event_handler->device->event_handler_lock);
 
 	return 0;
 }
@@ -543,12 +545,12 @@
 {
 	struct ib_event_handler *handler;
 
-	mtx_lock_spin(&event->device->event_handler_lock);
+	mtx_lock(&event->device->event_handler_lock);
 

>>> TRUNCATED FOR MAIL (1000 lines) <<<


More information about the p4-projects mailing list