PERFORCE change 128743 for review

Steve Wise swise at FreeBSD.org
Tue Nov 6 07:03:48 PST 2007


http://perforce.freebsd.org/chv.cgi?CH=128743

Change 128743 by swise at swise:vic10:iwarp on 2007/11/06 15:02:56

	Get rdma_cma.c to compile.

Affected files ...

.. //depot/projects/iwarp/sys/contrib/rdma/ib_cache.h#2 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_cm.h#2 edit
.. //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#2 edit
.. //depot/projects/iwarp/sys/modules/rdma/Makefile#2 edit
.. //depot/projects/iwarp/sys/sys/linux_compat.h#3 edit

Differences ...

==== //depot/projects/iwarp/sys/contrib/rdma/ib_cache.h#2 (text+ko) ====

@@ -37,7 +37,7 @@
 #ifndef _IB_CACHE_H
 #define _IB_CACHE_H
 
-#include <rdma/ib_verbs.h>
+#include <contrib/rdma/ib_verbs.h>
 
 /**
  * ib_get_cached_gid - Returns a cached GID table entry

==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cm.h#2 (text+ko) ====

@@ -30,10 +30,10 @@
 #if !defined(RDMA_CM_H)
 #define RDMA_CM_H
 
-#include <linux/socket.h>
-#include <linux/in6.h>
-#include <rdma/ib_addr.h>
-#include <rdma/ib_sa.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <contrib/rdma/ib_addr.h>
+#include <contrib/rdma/ib_sa.h>
 
 /*
  * Upon receiving a device removal event, users must destroy the associated

==== //depot/projects/iwarp/sys/contrib/rdma/rdma_cma.c#2 (text+ko) ====

@@ -28,23 +28,32 @@
  * and/or other materials provided with the distribution.
  *
  */
+#include <sys/cdefs.h>
 
-#include <linux/completion.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/mutex.h>
-#include <linux/random.h>
-#include <linux/idr.h>
-#include <linux/inetdevice.h>
+#include <sys/param.h>
+#include <sys/condvar.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/libkern.h>
+#include <sys/socket.h>
+#include <sys/module.h>
+#include <sys/linux_compat.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+#include <sys/priv.h>
+#include <sys/syslog.h>
 
-#include <net/tcp.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
 
-#include <rdma/rdma_cm.h>
-#include <rdma/rdma_cm_ib.h>
-#include <rdma/ib_cache.h>
-#include <rdma/ib_cm.h>
-#include <rdma/ib_sa.h>
-#include <rdma/iw_cm.h>
+#include <contrib/rdma/rdma_cm.h>
+#include <contrib/rdma/ib_cache.h>
+#include <contrib/rdma/ib_cm.h>
+#include <contrib/rdma/ib_sa.h>
+#include <contrib/rdma/iw_cm.h>
 
 MODULE_AUTHOR("Sean Hefty");
 MODULE_DESCRIPTION("Generic RDMA CM Agent");
@@ -64,9 +73,9 @@
 
 static struct ib_sa_client sa_client;
 static struct rdma_addr_client addr_client;
-static LIST_HEAD(dev_list);
-static LIST_HEAD(listen_any_list);
-static DEFINE_MUTEX(lock);
+static TAILQ_HEAD(, cma_device) dev_list;
+static LIST_HEAD(, rdma_id_private) listen_any_list;
+static struct mtx lock;
 static struct taskqueue *cma_wq;
 static DEFINE_IDR(sdp_ps);
 static DEFINE_IDR(tcp_ps);
@@ -76,11 +85,12 @@
 
 struct cma_device {
 	struct ib_device	*device;
-	struct completion	comp;
+	struct mtx		lock;
+	struct cv		comp;
 	atomic_t		refcount;
 
-	TAILQ_HEAD(, rdma_id_private) id_list;
-	TAILQ_ENTRY(cma_device) entry;
+	LIST_HEAD(, rdma_id_private) id_list;
+	TAILQ_ENTRY(cma_device) list;
 };
 
 enum cma_state {
@@ -99,7 +109,7 @@
 
 struct rdma_bind_list {
 	struct idr		*ps;
-	struct hlist_head	owners;
+	TAILQ_HEAD(, rdma_id_private) owners;
 	unsigned short		port;
 };
 
@@ -113,16 +123,17 @@
 	struct rdma_cm_id	id;
 
 	struct rdma_bind_list	*bind_list;
-	struct hlist_node	node;
-	struct list_head	list;
-	struct list_head	listen_list;
+	TAILQ_ENTRY(rdma_id_private) node;
+	LIST_ENTRY(rdma_id_private) list; /* listen_any_list or cma_dev.list */
+	LIST_HEAD(, rdma_id_private) listen_list; /* per-device listens */
+	LIST_ENTRY(rdma_id_private) listen_entry; 
 	struct cma_device	*cma_dev;
 #ifdef IB_SUPPORTED	
-	struct list_head	mc_list;
+	LIST_HEAD(, cma_multicast) mc_list;
 #endif
 	enum cma_state		state;
 	struct mtx		lock;
-	struct completion	comp;
+	struct cv		comp;
 	atomic_t		refcount;
 	struct cv		wait_remove;
 	atomic_t		dev_remove;
@@ -142,6 +153,7 @@
 	u8			srq;
 };
 
+#ifdef IB_SUPPORTED
 struct cma_multicast {
 	struct rdma_id_private *id_priv;
 	union {
@@ -153,9 +165,10 @@
 	u8			pad[sizeof(struct sockaddr_in6) -
 				    sizeof(struct sockaddr)];
 };
+#endif
 
 struct cma_work {
-	struct work_struct	work;
+	struct task		task;
 	struct rdma_id_private	*id;
 	enum cma_state		old_state;
 	enum cma_state		new_state;
@@ -267,22 +280,25 @@
 	atomic_inc(&cma_dev->refcount);
 	id_priv->cma_dev = cma_dev;
 	id_priv->id.device = cma_dev->device;
-	list_add_tail(&id_priv->list, &cma_dev->id_list);
+	LIST_INSERT_HEAD(&cma_dev->id_list, id_priv, list);
 }
 
 static inline void cma_deref_dev(struct cma_device *cma_dev)
 {
+	mtx_lock(&cma_dev->lock);
 	if (atomic_dec_and_test(&cma_dev->refcount))
-		complete(&cma_dev->comp);
+		cv_broadcast(&cma_dev->comp);
+	mtx_unlock(&cma_dev->lock);
 }
 
 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
 {
-	list_del(&id_priv->list);
+	LIST_REMOVE(id_priv, list);
 	cma_deref_dev(id_priv->cma_dev);
 	id_priv->cma_dev = NULL;
 }
 
+#ifdef IB_SUPPORTED
 static int cma_set_qkey(struct ib_device *device, u8 port_num,
 			enum rdma_port_space ps,
 			struct rdma_dev_addr *dev_addr, u32 *qkey)
@@ -304,6 +320,7 @@
 	}
 	return ret;
 }
+#endif
 
 static int cma_acquire_dev(struct rdma_id_private *id_priv)
 {
@@ -313,9 +330,11 @@
 	int ret = ENODEV;
 
 	switch (rdma_node_get_transport(dev_addr->dev_type)) {
+#ifdef IB_SUPPORTED
 	case RDMA_TRANSPORT_IB:
 		ib_addr_get_sgid(dev_addr, &gid);
 		break;
+#endif
 	case RDMA_TRANSPORT_IWARP:
 		iw_addr_get_sgid(dev_addr, &gid);
 		break;
@@ -323,15 +342,17 @@
 		return (ENODEV);
 	}
 
-	list_for_each_entry(cma_dev, &dev_list, list) {
+	TAILQ_FOREACH(cma_dev, &dev_list, list) {
 		ret = ib_find_cached_gid(cma_dev->device, &gid,
 					 &id_priv->id.port_num, NULL);
 		if (!ret) {
+#ifdef IB_SUPPORTED
 			ret = cma_set_qkey(cma_dev->device,
 					   id_priv->id.port_num,
 					   id_priv->id.ps, dev_addr,
 					   &id_priv->qkey);
 			if (!ret)
+#endif
 				cma_attach_to_dev(id_priv, cma_dev);
 			break;
 		}
@@ -341,8 +362,10 @@
 
 static void cma_deref_id(struct rdma_id_private *id_priv)
 {
+	mtx_lock(&id_priv->lock);
 	if (atomic_dec_and_test(&id_priv->refcount))
-		complete(&id_priv->comp);
+		cv_broadcast(&id_priv->comp);
+	mtx_unlock(&id_priv->lock);
 }
 
 static int cma_disable_remove(struct rdma_id_private *id_priv,
@@ -362,8 +385,10 @@
 
 static void cma_enable_remove(struct rdma_id_private *id_priv)
 {
+	mtx_lock(&id_priv->lock);
 	if (atomic_dec_and_test(&id_priv->dev_remove))
-		wake_up(&id_priv->wait_remove, &id_priv->lock);
+		cv_broadcast(&id_priv->wait_remove);
+	mtx_unlock(&id_priv->lock);
 }
 
 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
@@ -376,22 +401,22 @@
 {
 	struct rdma_id_private *id_priv;
 
-	id_priv = kzalloc(sizeof *id_priv, M_WAITOK);
+	id_priv = malloc(sizeof *id_priv, M_DEVBUF, M_WAITOK);
 	if (!id_priv)
 		return ERR_PTR(ENOMEM);
+	bzero(id_priv, sizeof *id_priv);
 
 	id_priv->state = CMA_IDLE;
 	id_priv->id.context = context;
 	id_priv->id.event_handler = event_handler;
 	id_priv->id.ps = ps;
-	mtx_init(&id_priv->lock, "rdma_id_priv", NULL, MTX_DUPOK|MTX_DEF);
-	init_completion(&id_priv->comp);
+	mtx_init(&id_priv->lock, "rdma_cm_id_priv", NULL, MTX_DUPOK|MTX_DEF);
+	cv_init(&id_priv->comp, "rdma_cm_id_priv");
 	atomic_set(&id_priv->refcount, 1);
 	cv_init(&id_priv->wait_remove, "id priv wait remove");
 	atomic_set(&id_priv->dev_remove, 0);
-	TAILQ_INIT(&id_priv->listen_list);
-	TAILQ_INIT(&id_priv->mc_list);
-	get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
+	LIST_INIT(&id_priv->listen_list);
+	arc4rand(&id_priv->seq_num, sizeof id_priv->seq_num, 0);
 
 	return &id_priv->id;
 }
@@ -450,7 +475,6 @@
 	qp = ib_create_qp(pd, qp_init_attr);
 	if (IS_ERR(qp))
 		return PTR_ERR(qp);
-
 	if (cma_is_ud_ps(id_priv->id.ps))
 		ret = cma_init_ud_qp(id_priv, qp);
 	else
@@ -500,6 +524,7 @@
 	return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
 }
 
+#ifdef IB_SUPPORTED
 static int cma_modify_qp_rts(struct rdma_cm_id *id)
 {
 	struct ib_qp_attr qp_attr;
@@ -515,6 +540,7 @@
 
 	return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
 }
+#endif
 
 static int cma_modify_qp_err(struct rdma_cm_id *id)
 {
@@ -591,7 +617,7 @@
 	struct in6_addr *ip6;
 
 	if (addr->sa_family == AF_INET)
-		return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+		return in_nullhost(((struct sockaddr_in *) addr)->sin_addr);
 	else {
 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
@@ -601,7 +627,7 @@
 
 static inline int cma_loopback_addr(struct sockaddr *addr)
 {
-	return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
+	return ((struct sockaddr_in *)addr)->sin_addr.s_addr == INADDR_LOOPBACK;
 }
 
 static inline int cma_any_addr(struct sockaddr *addr)
@@ -622,6 +648,7 @@
 	return !cma_port(addr);
 }
 
+#ifdef IB_SUPPORT
 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
 			    u8 *ip_ver, __u16 *port,
 			    union cma_ip_addr **src, union cma_ip_addr **dst)
@@ -690,6 +717,7 @@
 		break;
 	}
 }
+#endif
 
 static inline int cma_user_data_offset(enum rdma_port_space ps)
 {
@@ -738,12 +766,13 @@
 		}
 		cma_detach_from_dev(id_priv);
 	}
-	list_del(&id_priv->listen_list);
+	LIST_REMOVE(id_priv, listen_entry);
 
 	cma_deref_id(id_priv);
-	wait_for_completion(&id_priv->comp);
+	mtx_lock(&id_priv->lock);
+	cv_wait(&id_priv->comp, &id_priv->lock);
 
-	kfree(id_priv);
+	free(id_priv, M_DEVBUF);
 }
 
 static void cma_cancel_listens(struct rdma_id_private *id_priv)
@@ -751,11 +780,10 @@
 	struct rdma_id_private *dev_id_priv;
 
 	mtx_lock(&lock);
-	list_del(&id_priv->list);
+	LIST_REMOVE(id_priv, list);
 
-	while (!list_empty(&id_priv->listen_list)) {
-		dev_id_priv = list_entry(id_priv->listen_list.next,
-					 struct rdma_id_private, listen_list);
+	while (!LIST_EMPTY(&id_priv->listen_list)) {
+		dev_id_priv = LIST_FIRST(&id_priv->listen_list);
 		cma_destroy_listen(dev_id_priv);
 	}
 	mtx_unlock(&lock);
@@ -789,26 +817,27 @@
 		return;
 
 	mtx_lock(&lock);
-	hlist_del(&id_priv->node);
-	if (hlist_empty(&bind_list->owners)) {
+	TAILQ_REMOVE(&bind_list->owners, id_priv, node);
+	if (TAILQ_EMPTY(&bind_list->owners)) {
 		idr_remove(bind_list->ps, bind_list->port);
-		kfree(bind_list);
+		free(bind_list, M_DEVBUF);
 	}
 	mtx_unlock(&lock);
 }
 
+#ifdef IB_SUPPORTED
 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
 {
 	struct cma_multicast *mc;
 
-	while (!list_empty(&id_priv->mc_list)) {
-		mc = container_of(id_priv->mc_list.next,
-				  struct cma_multicast, list);
-		list_del(&mc->list);
+	while (!LIST_EMPTY(&id_priv->mc_list)) {
+		mc = LIST_FIRST(&id_priv->mc_list);
+		LIST_REMOVE(mc, list);
 		ib_sa_free_multicast(mc->multicast.ib);
-		kfree(mc);
+		free(mc, M_DEVBUF);
 	}
 }
+#endif
 
 void rdma_destroy_id(struct rdma_cm_id *id)
 {
@@ -822,19 +851,23 @@
 	mtx_lock(&lock);
 	if (id_priv->cma_dev) {
 		mtx_unlock(&lock);
+#ifdef IB_SUPPORTED
 		switch (rdma_node_get_transport(id->device->node_type)) {
 		case RDMA_TRANSPORT_IB:
 			if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
 				ib_destroy_cm_id(id_priv->cm_id.ib);
 			break;
 		case RDMA_TRANSPORT_IWARP:
+#endif
 			if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
 				iw_destroy_cm_id(id_priv->cm_id.iw);
+#ifdef IB_SUPPORTED
 			break;
 		default:
 			break;
 		}
 		cma_leave_mc_groups(id_priv);
+#endif
 		mtx_lock(&lock);
 		cma_detach_from_dev(id_priv);
 	}
@@ -842,13 +875,15 @@
 
 	cma_release_port(id_priv);
 	cma_deref_id(id_priv);
-	wait_for_completion(&id_priv->comp);
+	mtx_lock(&id_priv->lock);
+	cv_wait(&id_priv->comp, &id_priv->lock);
 
-	kfree(id_priv->id.route.path_rec);
-	kfree(id_priv);
+	free(id_priv->id.route.path_rec, M_DEVBUF);
+	free(id_priv, M_DEVBUF);
 }
 EXPORT_SYMBOL(rdma_destroy_id);
 
+#ifdef IB_SUPPORT
 static int cma_rep_recv(struct rdma_id_private *id_priv)
 {
 	int ret;
@@ -993,8 +1028,8 @@
 
 	rt = &id->route;
 	rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
-	rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
-			       M_WAITOK);
+	rt->path_rec = malloc(sizeof *rt->path_rec * rt->num_paths,
+			       M_DEVBUF, M_WAITOK);
 	if (!rt->path_rec)
 		goto destroy_id;
 
@@ -1178,6 +1213,7 @@
 		break;
 	}
 }
+#endif /* IB_SUPPORTED */
 
 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
 {
@@ -1246,9 +1282,10 @@
 	struct rdma_cm_id *new_cm_id;
 	struct rdma_id_private *listen_id, *conn_id;
 	struct sockaddr_in *sin;
-	struct net_device *dev = NULL;
+	struct ifnet *dev;
 	struct rdma_cm_event event;
 	int ret;
+	struct ifaddr *ifa;
 
 	listen_id = cm_id->context;
 	if (cma_disable_remove(listen_id, CMA_LISTEN))
@@ -1266,13 +1303,14 @@
 	atomic_inc(&conn_id->dev_remove);
 	conn_id->state = CMA_CONNECT;
 
-	dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
-	if (!dev) {
+	ifa = ifa_ifwithaddr((struct sockaddr *)&iw_event->local_addr);
+	if (!ifa) {
 		ret = EADDRNOTAVAIL;
 		cma_enable_remove(conn_id);
 		rdma_destroy_id(new_cm_id);
 		goto out;
 	}
+	dev = ifa->ifa_ifp;
 	ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
 	if (ret) {
 		cma_enable_remove(conn_id);
@@ -1312,12 +1350,11 @@
 	}
 
 out:
-	if (dev)
-		dev_put(dev);
 	cma_enable_remove(listen_id);
 	return ret;
 }
 
+#ifdef IB_SUPPORTED
 static int cma_ib_listen(struct rdma_id_private *id_priv)
 {
 	struct ib_cm_compare_data compare_data;
@@ -1346,6 +1383,7 @@
 
 	return ret;
 }
+#endif
 
 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
 {
@@ -1399,7 +1437,7 @@
 	       ip_addr_size(&id_priv->id.route.addr.src_addr));
 
 	cma_attach_to_dev(dev_id_priv, cma_dev);
-	list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
+	LIST_INSERT_HEAD(&id_priv->listen_list, dev_id_priv, listen_entry);
 
 	ret = rdma_listen(id, id_priv->backlog);
 	if (ret)
@@ -1415,8 +1453,8 @@
 	struct cma_device *cma_dev;
 
 	mtx_lock(&lock);
-	list_add_tail(&id_priv->list, &listen_any_list);
-	list_for_each_entry(cma_dev, &dev_list, list)
+	LIST_INSERT_HEAD(&listen_any_list, id_priv, list);
+	TAILQ_FOREACH(cma_dev, &dev_list, list)
 		cma_listen_on_dev(id_priv, cma_dev);
 	mtx_unlock(&lock);
 }
@@ -1447,6 +1485,7 @@
 
 	id_priv->backlog = backlog;
 	if (id->device) {
+#ifdef IB_SUPPORTED
 		switch (rdma_node_get_transport(id->device->node_type)) {
 		case RDMA_TRANSPORT_IB:
 			ret = cma_ib_listen(id_priv);
@@ -1454,14 +1493,17 @@
 				goto err;
 			break;
 		case RDMA_TRANSPORT_IWARP:
+#endif
 			ret = cma_iw_listen(id_priv, backlog);
 			if (ret)
 				goto err;
+#ifdef IB_SUPPORTED
 			break;
 		default:
 			ret = ENOSYS;
 			goto err;
 		}
+#endif
 	} else
 		cma_listen_on_all(id_priv);
 
@@ -1473,6 +1515,7 @@
 }
 EXPORT_SYMBOL(rdma_listen);
 
+#ifdef IB_SUPPORTED
 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
 			      void *context)
 {
@@ -1491,7 +1534,7 @@
 		work->event.status = status;
 	}
 
-	queue_work(cma_wq, &work->work);
+	taskqueue_enqueue(cma_wq, &work->task);
 }
 
 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
@@ -1517,10 +1560,11 @@
 
 	return (id_priv->query_id < 0) ? id_priv->query_id : 0;
 }
+#endif
 
-static void cma_work_handler(struct work_struct *_work)
+static void cma_work_handler(void *context, int pending)
 {
-	struct cma_work *work = container_of(_work, struct cma_work, work);
+	struct cma_work *work = context;
 	struct rdma_id_private *id_priv = work->id;
 	int destroy = 0;
 
@@ -1537,26 +1581,28 @@
 	cma_deref_id(id_priv);
 	if (destroy)
 		rdma_destroy_id(&id_priv->id);
-	kfree(work);
+	free(work, M_DEVBUF);
 }
 
+#ifdef IB_SUPPORTED
 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
 {
 	struct rdma_route *route = &id_priv->id.route;
 	struct cma_work *work;
 	int ret;
 
-	work = kzalloc(sizeof *work, M_WAITOK);
+	work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
 	if (!work)
 		return (ENOMEM);
+	bzero(work, sizeof *work);
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler);
+        TASK_INIT(&work->task, 0, cma_work_handler, work);
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
 
-	route->path_rec = kmalloc(sizeof *route->path_rec, M_WAITOK);
+	route->path_rec = malloc(sizeof *route->path_rec, M_DEVBUF, M_WAITOK);
 	if (!route->path_rec) {
 		ret = ENOMEM;
 		goto err1;
@@ -1568,10 +1614,10 @@
 
 	return 0;
 err2:
-	kfree(route->path_rec);
+	free(route->path_rec, M_DEVBUF);
 	route->path_rec = NULL;
 err1:
-	kfree(work);
+	free(work, M_DEVBUF);
 	return ret;
 }
 
@@ -1585,7 +1631,7 @@
 	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
 		return (EINVAL);
 
-	id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, M_WAITOK);
+	id->route.path_rec = malloc(sizeof *path_rec * num_paths, M_DEVBUF, M_WAITOK);
 	if (!id->route.path_rec) {
 		ret = ENOMEM;
 		goto err;
@@ -1598,21 +1644,23 @@
 	return ret;
 }
 EXPORT_SYMBOL(rdma_set_ib_paths);
+#endif
 
 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
 {
 	struct cma_work *work;
 
-	work = kzalloc(sizeof *work, M_WAITOK);
+	work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
 	if (!work)
 		return (ENOMEM);
+	bzero(work, sizeof *work);
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler);
+        TASK_INIT(&work->task, 0, cma_work_handler, work);
 	work->old_state = CMA_ROUTE_QUERY;
 	work->new_state = CMA_ROUTE_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
-	queue_work(cma_wq, &work->work);
+	taskqueue_enqueue(cma_wq, &work->task);
 	return 0;
 }
 
@@ -1626,17 +1674,21 @@
 		return (EINVAL);
 
 	atomic_inc(&id_priv->refcount);
+#ifdef IB_SUPPORTED
 	switch (rdma_node_get_transport(id->device->node_type)) {
 	case RDMA_TRANSPORT_IB:
 		ret = cma_resolve_ib_route(id_priv, timeout_ms);
 		break;
 	case RDMA_TRANSPORT_IWARP:
+#endif
 		ret = cma_resolve_iw_route(id_priv, timeout_ms);
+#ifdef IB_SUPPORTED
 		break;
 	default:
 		ret = ENOSYS;
 		break;
 	}
+#endif
 	if (ret)
 		goto err;
 
@@ -1658,18 +1710,18 @@
 	u8 p;
 
 	mtx_lock(&lock);
-	if (list_empty(&dev_list)) {
+	if (TAILQ_EMPTY(&dev_list)) {
 		ret = ENODEV;
 		goto out;
 	}
-	list_for_each_entry(cma_dev, &dev_list, list)
+	TAILQ_FOREACH(cma_dev, &dev_list, list)
 		for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
 			if (!ib_query_port(cma_dev->device, p, &port_attr) &&
 			    port_attr.state == IB_PORT_ACTIVE)
 				goto port_found;
 
 	p = 1;
-	cma_dev = list_entry(dev_list.next, struct cma_device, list);
+	cma_dev = TAILQ_FIRST(&dev_list);
 
 port_found:
 	ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
@@ -1742,9 +1794,10 @@
 	union ib_gid gid;
 	int ret;
 
-	work = kzalloc(sizeof *work, M_WAITOK);
+	work = malloc(sizeof *work, M_DEVBUF, M_WAITOK);
 	if (!work)
 		return (ENOMEM);
+	bzero(work, sizeof *work);
 
 	if (!id_priv->cma_dev) {
 		ret = cma_bind_loopback(id_priv);
@@ -1763,14 +1816,14 @@
 	}
 
 	work->id = id_priv;
-	INIT_WORK(&work->work, cma_work_handler);
+        TASK_INIT(&work->task, 0, cma_work_handler, work);
 	work->old_state = CMA_ADDR_QUERY;
 	work->new_state = CMA_ADDR_RESOLVED;
 	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
-	queue_work(cma_wq, &work->work);
+	taskqueue_enqueue(cma_wq, &work->task);
 	return 0;
 err:
-	kfree(work);
+	free(work, M_DEVBUF);
 	return ret;
 }
 
@@ -1826,7 +1879,7 @@
 	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
 	sin->sin_port = htons(bind_list->port);
 	id_priv->bind_list = bind_list;
-	hlist_add_head(&id_priv->node, &bind_list->owners);
+	TAILQ_INSERT_HEAD(&bind_list->owners, id_priv, node);
 }
 
 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
@@ -1835,9 +1888,10 @@
 	struct rdma_bind_list *bind_list;
 	int port, ret;
 
-	bind_list = kzalloc(sizeof *bind_list, M_WAITOK);
+	bind_list = malloc(sizeof *bind_list, M_DEVBUF, M_WAITOK);
 	if (!bind_list)
 		return (ENOMEM);
+	bzero(bind_list, sizeof *bind_list);
 
 	do {
 		ret = idr_get_new_above(ps, bind_list, snum, &port);
@@ -1858,7 +1912,7 @@
 err2:
 	idr_remove(ps, port);
 err1:
-	kfree(bind_list);
+	free(bind_list, M_DEVBUF);
 	return ret;
 }
 
@@ -1867,9 +1921,10 @@
 	struct rdma_bind_list *bind_list;
 	int port, ret;
 
-	bind_list = kzalloc(sizeof *bind_list, M_WAITOK);
+	bind_list = malloc(sizeof *bind_list, M_DEVBUF, M_WAITOK);
 	if (!bind_list)
 		return (ENOMEM);
+	bzero(bind_list, sizeof *bind_list);
 
 retry:
 	do {
@@ -1879,18 +1934,18 @@
 	if (ret)
 		goto err1;
 
-	if (port > sysctl_local_port_range[1]) {
-		if (next_port != sysctl_local_port_range[0]) {
+	if (port > ipport_lastauto) {
+		if (next_port != ipport_firstauto) {
 			idr_remove(ps, port);
-			next_port = sysctl_local_port_range[0];
+			next_port = ipport_firstauto;
 			goto retry;
 		}
 		ret = EADDRNOTAVAIL;
 		goto err2;
 	}
 
-	if (port == sysctl_local_port_range[1])
-		next_port = sysctl_local_port_range[0];
+	if (port == ipport_lastauto)
+		next_port = ipport_firstauto;
 	else
 		next_port = port + 1;
 
@@ -1901,7 +1956,7 @@
 err2:
 	idr_remove(ps, port);
 err1:
-	kfree(bind_list);
+	free(bind_list, M_DEVBUF);
 	return ret;
 }
 
@@ -1910,12 +1965,12 @@
 	struct rdma_id_private *cur_id;
 	struct sockaddr_in *sin, *cur_sin;
 	struct rdma_bind_list *bind_list;
-	struct hlist_node *node;
 	unsigned short snum;
 
 	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
 	snum = ntohs(sin->sin_port);
-	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
+	if (snum <= ipport_reservedhigh && snum >= ipport_reservedlow &&
+	    priv_check(curthread, PRIV_NETINET_RESERVEDPORT))
 		return (EACCES);
 
 	bind_list = idr_find(ps, snum);
@@ -1929,7 +1984,7 @@
 	if (cma_any_addr(&id_priv->id.route.addr.src_addr))
 		return (EADDRNOTAVAIL);
 
-	hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
+	TAILQ_FOREACH(cur_id, &bind_list->owners, node) {
 		if (cma_any_addr(&cur_id->id.route.addr.src_addr))
 			return (EADDRNOTAVAIL);
 
@@ -2016,6 +2071,7 @@
 }
 EXPORT_SYMBOL(rdma_bind_addr);
 
+#ifdef IB_SUPPORTED
 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
 			  struct rdma_route *route)
 {
@@ -2115,12 +2171,13 @@
 
 	req.private_data_len = sizeof(struct cma_hdr) +
 			       conn_param->private_data_len;
-	req.private_data = kzalloc(req.private_data_len, M_NOWAIT);
+	req.private_data = malloc(req.private_data_len, M_DEVBUF, M_NOWAIT);
 	if (!req.private_data)
 		return (ENOMEM);
+	bzero((void *)req.private_data, req.private_data_len);
 
 	if (conn_param->private_data && conn_param->private_data_len)
-		memcpy((void *) req.private_data + sizeof(struct cma_hdr),
+		memcpy((caddr_t) req.private_data + sizeof(struct cma_hdr),
 		       conn_param->private_data, conn_param->private_data_len);
 
 	route = &id_priv->id.route;
@@ -2147,7 +2204,7 @@
 		id_priv->cm_id.ib = NULL;
 	}
 out:
-	kfree(req.private_data);
+	free(req.private_data, M_DEVBUF);
 	return ret;
 }
 
@@ -2162,9 +2219,10 @@
 	memset(&req, 0, sizeof req);
 	offset = cma_user_data_offset(id_priv->id.ps);
 	req.private_data_len = offset + conn_param->private_data_len;
-	private_data = kzalloc(req.private_data_len, M_NOWAIT);
+	private_data = malloc(req.private_data_len, M_DEVBUF, M_NOWAIT);
 	if (!private_data)
 		return (ENOMEM);
+	bzero(private_data, req.private_data_len);
 
 	if (conn_param->private_data && conn_param->private_data_len)
 		memcpy(private_data + offset, conn_param->private_data,
@@ -2209,9 +2267,10 @@
 		id_priv->cm_id.ib = NULL;
 	}
 
-	kfree(private_data);
+	free(private_data, M_DEVBUF);
 	return ret;
 }
+#endif
 
 static int cma_connect_iw(struct rdma_id_private *id_priv,
 			  struct rdma_conn_param *conn_param)
@@ -2270,6 +2329,7 @@
 		id_priv->srq = conn_param->srq;
 	}
 
+#ifdef IB_SUPPORT
 	switch (rdma_node_get_transport(id->device->node_type)) {
 	case RDMA_TRANSPORT_IB:
 		if (cma_is_ud_ps(id->ps))
@@ -2278,12 +2338,15 @@
 			ret = cma_connect_ib(id_priv, conn_param);
 		break;
 	case RDMA_TRANSPORT_IWARP:
+#endif
 		ret = cma_connect_iw(id_priv, conn_param);
+#ifdef IB_SUPPORT
 		break;
 	default:
 		ret = ENOSYS;
 		break;
 	}
+#endif
 	if (ret)
 		goto err;
 
@@ -2294,6 +2357,7 @@
 }
 EXPORT_SYMBOL(rdma_connect);
 
+#ifdef IB_SUPPORTED
 static int cma_accept_ib(struct rdma_id_private *id_priv,
 			 struct rdma_conn_param *conn_param)
 {
@@ -2335,6 +2399,7 @@
 out:
 	return ret;
 }
+#endif
 
 static int cma_accept_iw(struct rdma_id_private *id_priv,
 		  struct rdma_conn_param *conn_param)
@@ -2358,6 +2423,7 @@
 	return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
 }
 
+#ifdef IB_SUPPORTED
 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
 			     enum ib_cm_sidr_status status,
 			     const void *private_data, int private_data_len)
@@ -2375,6 +2441,7 @@
 
 	return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
 }
+#endif
 
 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 {
@@ -2390,6 +2457,7 @@
 		id_priv->srq = conn_param->srq;
 	}
 
+#ifdef IB_SUPPORTED
 	switch (rdma_node_get_transport(id->device->node_type)) {
 	case RDMA_TRANSPORT_IB:
 		if (cma_is_ud_ps(id->ps))
@@ -2402,12 +2470,15 @@
 			ret = cma_rep_recv(id_priv);
 		break;
 	case RDMA_TRANSPORT_IWARP:
+#endif
 		ret = cma_accept_iw(id_priv, conn_param);
+#ifdef IB_SUPPORTED
 		break;
 	default:
 		ret = ENOSYS;
 		break;
 	}
+#endif

>>> TRUNCATED FOR MAIL (1000 lines) <<<


More information about the p4-projects mailing list