git: c7d8a572acb2 - stable/12 - libc/libc/rpc: refactor some global variables

From: Alan Somers <asomers_at_FreeBSD.org>
Date: Fri, 01 Dec 2023 21:17:34 UTC
The branch stable/12 has been updated by asomers:

URL: https://cgit.FreeBSD.org/src/commit/?id=c7d8a572acb2bcdf824a75af3e97b24e36463a34

commit c7d8a572acb2bcdf824a75af3e97b24e36463a34
Author:     Alan Somers <asomers@FreeBSD.org>
AuthorDate: 2023-11-09 22:58:56 +0000
Commit:     Alan Somers <asomers@FreeBSD.org>
CommitDate: 2023-12-01 21:15:45 +0000

    libc/libc/rpc: refactor some global variables
    
    * Combine dg_fd_locks and dg_cv into one array.
    * Similarly for vc_fd_locks and vc_cv
    * Turn some macros into inline functions
    
    This is a mostly cosmetic change to make refactoring these strutures in
    a future commit easier.
    
    Sponsored by:   Axcient
    Reviewed by:    kib
    Differential Revision: https://reviews.freebsd.org/D42597
    
    (cherry picked from commit a5c2f4e939430f0048136c39fb9fa6093d401905)
    
    lib/libc/rpc: switch the per-fd structs in clnt_{dg,vc}.c to RB Trees
    
    This saves oodles of memory, especially when "ulimit -n" is large.  It
    also prevents a buffer overflow if getrlimit should fail.
    
    Also replace per-fd condvars with mutexes to simplify the code.
    
    PR:             274968
    Sponsored by:   Axcient
    Reviewed by:    kib
    Differential Revision: https://reviews.freebsd.org/D42597
    
    (cherry picked from commit 24938f9311c9c9acc1ce747f4e6a088c2dbc967d)
---
 include/rpc/rpc_com.h      |   1 -
 lib/libc/rpc/clnt_dg.c     | 170 +++++++++++++++++++++---------------------
 lib/libc/rpc/clnt_vc.c     | 179 ++++++++++++++++++++++-----------------------
 lib/libc/rpc/rpc_com.h     |   1 -
 lib/libc/rpc/rpc_generic.c |  23 ------
 sys/rpc/rpc_com.h          |   1 -
 6 files changed, 168 insertions(+), 207 deletions(-)

diff --git a/include/rpc/rpc_com.h b/include/rpc/rpc_com.h
index dd331e2601f6..a1f51171c314 100644
--- a/include/rpc/rpc_com.h
+++ b/include/rpc/rpc_com.h
@@ -59,7 +59,6 @@
 
 __BEGIN_DECLS
 extern u_int __rpc_get_a_size(int);
-extern int __rpc_dtbsize(void);
 extern int _rpc_dtablesize(void);
 extern struct netconfig * __rpcgettp(int);
 extern  int  __rpc_get_default_domain(char **);
diff --git a/lib/libc/rpc/clnt_dg.c b/lib/libc/rpc/clnt_dg.c
index 26bf34633623..006ba3009d97 100644
--- a/lib/libc/rpc/clnt_dg.c
+++ b/lib/libc/rpc/clnt_dg.c
@@ -51,13 +51,17 @@ __FBSDID("$FreeBSD$");
 #include <sys/time.h>
 #include <sys/socket.h>
 #include <sys/ioctl.h>
+#include <sys/tree.h>
 #include <arpa/inet.h>
 #include <rpc/rpc.h>
 #include <rpc/rpcsec_gss.h>
+#include <assert.h>
 #include <errno.h>
+#include <pthread.h>
 #include <stdlib.h>
 #include <string.h>
 #include <signal.h>
+#include <stdbool.h>
 #include <unistd.h>
 #include <err.h>
 #include "un-namespace.h"
@@ -92,28 +96,65 @@ static void clnt_dg_destroy(CLIENT *);
  *	This machinery implements per-fd locks for MT-safety.  It is not
  *	sufficient to do per-CLIENT handle locks for MT-safety because a
  *	user may create more than one CLIENT handle with the same fd behind
- *	it.  Therefore, we allocate an array of flags (dg_fd_locks), protected
- *	by the clnt_fd_lock mutex, and an array (dg_cv) of condition variables
- *	similarly protected.  Dg_fd_lock[fd] == 1 => a call is active on some
- *	CLIENT handle created for that fd.
- *	The current implementation holds locks across the entire RPC and reply,
- *	including retransmissions.  Yes, this is silly, and as soon as this
- *	code is proven to work, this should be the first thing fixed.  One step
- *	at a time.
+ *	it.  Therefore, we allocate an associative array of flags and condition
+ *	variables (dg_fd).  The flags and the array are protected by the
+ *	clnt_fd_lock mutex.  dg_fd[fd].lock == 1 => a call is active on some
+ *	CLIENT handle created for that fd.  The current implementation holds
+ *	locks across the entire RPC and reply, including retransmissions.  Yes,
+ *	this is silly, and as soon as this code is proven to work, this should
+ *	be the first thing fixed.  One step at a time.
  */
-static int	*dg_fd_locks;
-static cond_t	*dg_cv;
-#define	release_fd_lock(fd, mask) {		\
-	mutex_lock(&clnt_fd_lock);	\
-	dg_fd_locks[fd] = 0;		\
-	mutex_unlock(&clnt_fd_lock);	\
-	thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
-	cond_signal(&dg_cv[fd]);	\
+struct dg_fd {
+	RB_ENTRY(dg_fd) dg_link;
+	int fd;
+	mutex_t mtx;
+};
+static inline int
+cmp_dg_fd(struct dg_fd *a, struct dg_fd *b)
+{
+	if (a->fd > b->fd) {
+		return (1);
+	} else if (a->fd < b->fd) {
+		return (-1);
+	} else {
+		return (0);
+	}
+}
+RB_HEAD(dg_fd_list, dg_fd);
+RB_PROTOTYPE(dg_fd_list, dg_fd, dg_link, cmp_dg_fd);
+RB_GENERATE(dg_fd_list, dg_fd, dg_link, cmp_dg_fd);
+struct dg_fd_list dg_fd_head = RB_INITIALIZER(&dg_fd_head);
+
+/*
+ * Find the lock structure for the given file descriptor, or initialize it if
+ * it does not already exist.  The clnt_fd_lock mutex must be held.
+ */
+static struct dg_fd *
+dg_fd_find(int fd)
+{
+	struct dg_fd key, *elem;
+
+	key.fd = fd;
+	elem = RB_FIND(dg_fd_list, &dg_fd_head, &key);
+	if (elem == NULL) {
+		elem = calloc(1, sizeof(*elem));
+		elem->fd = fd;
+		mutex_init(&elem->mtx, NULL);
+		RB_INSERT(dg_fd_list, &dg_fd_head, elem);
+	}
+	return (elem);
+}
+
+static void
+release_fd_lock(struct dg_fd *elem, sigset_t mask)
+{
+	mutex_unlock(&elem->mtx);
+	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
 }
 
 static const char mem_err_clnt_dg[] = "clnt_dg_create: out of memory";
 
-/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd_locks, dg_cv */
+/* VARIABLES PROTECTED BY clnt_fd_lock: dg_fd */
 
 #define	MCALL_MSG_SIZE 24
 
@@ -171,47 +212,9 @@ clnt_dg_create(int fd, const struct netbuf *svcaddr, rpcprog_t program,
 	struct cu_data *cu = NULL;	/* private data */
 	struct timeval now;
 	struct rpc_msg call_msg;
-	sigset_t mask;
-	sigset_t newmask;
 	struct __rpc_sockinfo si;
 	int one = 1;
 
-	sigfillset(&newmask);
-	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
-	mutex_lock(&clnt_fd_lock);
-	if (dg_fd_locks == (int *) NULL) {
-		int cv_allocsz;
-		size_t fd_allocsz;
-		int dtbsize = __rpc_dtbsize();
-
-		fd_allocsz = dtbsize * sizeof (int);
-		dg_fd_locks = (int *) mem_alloc(fd_allocsz);
-		if (dg_fd_locks == (int *) NULL) {
-			mutex_unlock(&clnt_fd_lock);
-			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-			goto err1;
-		} else
-			memset(dg_fd_locks, '\0', fd_allocsz);
-
-		cv_allocsz = dtbsize * sizeof (cond_t);
-		dg_cv = (cond_t *) mem_alloc(cv_allocsz);
-		if (dg_cv == (cond_t *) NULL) {
-			mem_free(dg_fd_locks, fd_allocsz);
-			dg_fd_locks = (int *) NULL;
-			mutex_unlock(&clnt_fd_lock);
-			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-			goto err1;
-		} else {
-			int i;
-
-			for (i = 0; i < dtbsize; i++)
-				cond_init(&dg_cv[i], 0, (void *) 0);
-		}
-	}
-
-	mutex_unlock(&clnt_fd_lock);
-	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-
 	if (svcaddr == NULL) {
 		rpc_createerr.cf_stat = RPC_UNKNOWNADDR;
 		return (NULL);
@@ -332,25 +335,21 @@ clnt_dg_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xargs, void *argsp,
 	struct timespec ts;
 	struct kevent kv;
 	struct sockaddr *sa;
+	struct dg_fd *elem;
 	sigset_t mask;
 	sigset_t newmask;
 	socklen_t salen;
 	ssize_t recvlen = 0;
-	int kin_len, n, rpc_lock_value;
+	int kin_len, n;
 	u_int32_t xid;
 
 	outlen = 0;
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (dg_fd_locks[cu->cu_fd])
-		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
-	if (__isthreaded)
-		rpc_lock_value = 1;
-	else
-		rpc_lock_value = 0;
-	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
+	elem = dg_fd_find(cu->cu_fd);
 	mutex_unlock(&clnt_fd_lock);
+	mutex_lock(&elem->mtx);
 	if (cu->cu_total.tv_usec == -1) {
 		timeout = utimeout;	/* use supplied timeout */
 	} else {
@@ -604,7 +603,7 @@ out:
 	if (cu->cu_kq >= 0)
 		_close(cu->cu_kq);
 	cu->cu_kq = -1;
-	release_fd_lock(cu->cu_fd, mask);
+	release_fd_lock(elem, mask);
 	return (cu->cu_error.re_status);
 }
 
@@ -620,6 +619,7 @@ static bool_t
 clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
 {
 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
+	struct dg_fd *elem;
 	XDR *xdrs = &(cu->cu_outxdrs);
 	bool_t dummy;
 	sigset_t mask;
@@ -628,13 +628,12 @@ clnt_dg_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (dg_fd_locks[cu->cu_fd])
-		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
+	elem = dg_fd_find(cu->cu_fd);
+	mutex_lock(&elem->mtx);
 	xdrs->x_op = XDR_FREE;
 	dummy = (*xdr_res)(xdrs, res_ptr);
 	mutex_unlock(&clnt_fd_lock);
-	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
-	cond_signal(&dg_cv[cu->cu_fd]);
+	release_fd_lock(elem, mask);
 	return (dummy);
 }
 
@@ -649,41 +648,36 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
 {
 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
 	struct netbuf *addr;
+	struct dg_fd *elem;
 	sigset_t mask;
 	sigset_t newmask;
-	int rpc_lock_value;
 
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (dg_fd_locks[cu->cu_fd])
-		cond_wait(&dg_cv[cu->cu_fd], &clnt_fd_lock);
-	if (__isthreaded)
-                rpc_lock_value = 1;
-        else
-                rpc_lock_value = 0;
-	dg_fd_locks[cu->cu_fd] = rpc_lock_value;
+	elem = dg_fd_find(cu->cu_fd);
 	mutex_unlock(&clnt_fd_lock);
+	mutex_lock(&elem->mtx);
 	switch (request) {
 	case CLSET_FD_CLOSE:
 		cu->cu_closeit = TRUE;
-		release_fd_lock(cu->cu_fd, mask);
+		release_fd_lock(elem, mask);
 		return (TRUE);
 	case CLSET_FD_NCLOSE:
 		cu->cu_closeit = FALSE;
-		release_fd_lock(cu->cu_fd, mask);
+		release_fd_lock(elem, mask);
 		return (TRUE);
 	}
 
 	/* for other requests which use info */
 	if (info == NULL) {
-		release_fd_lock(cu->cu_fd, mask);
+		release_fd_lock(elem, mask);
 		return (FALSE);
 	}
 	switch (request) {
 	case CLSET_TIMEOUT:
 		if (time_not_ok((struct timeval *)info)) {
-			release_fd_lock(cu->cu_fd, mask);
+			release_fd_lock(elem, mask);
 			return (FALSE);
 		}
 		cu->cu_total = *(struct timeval *)info;
@@ -697,7 +691,7 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
 		break;
 	case CLSET_RETRY_TIMEOUT:
 		if (time_not_ok((struct timeval *)info)) {
-			release_fd_lock(cu->cu_fd, mask);
+			release_fd_lock(elem, mask);
 			return (FALSE);
 		}
 		cu->cu_wait = *(struct timeval *)info;
@@ -717,7 +711,7 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
 	case CLSET_SVC_ADDR:		/* set to new address */
 		addr = (struct netbuf *)info;
 		if (addr->len < sizeof cu->cu_raddr) {
-			release_fd_lock(cu->cu_fd, mask);
+			release_fd_lock(elem, mask);
 			return (FALSE);
 		}
 		(void) memcpy(&cu->cu_raddr, addr->buf, addr->len);
@@ -780,10 +774,10 @@ clnt_dg_control(CLIENT *cl, u_int request, void *info)
 		cu->cu_connect = *(int *)info;
 		break;
 	default:
-		release_fd_lock(cu->cu_fd, mask);
+		release_fd_lock(elem, mask);
 		return (FALSE);
 	}
-	release_fd_lock(cu->cu_fd, mask);
+	release_fd_lock(elem, mask);
 	return (TRUE);
 }
 
@@ -791,6 +785,7 @@ static void
 clnt_dg_destroy(CLIENT *cl)
 {
 	struct cu_data *cu = (struct cu_data *)cl->cl_private;
+	struct dg_fd *elem;
 	int cu_fd = cu->cu_fd;
 	sigset_t mask;
 	sigset_t newmask;
@@ -798,8 +793,8 @@ clnt_dg_destroy(CLIENT *cl)
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (dg_fd_locks[cu_fd])
-		cond_wait(&dg_cv[cu_fd], &clnt_fd_lock);
+	elem = dg_fd_find(cu_fd);
+	mutex_lock(&elem->mtx);
 	if (cu->cu_closeit)
 		(void)_close(cu_fd);
 	if (cu->cu_kq >= 0)
@@ -812,8 +807,7 @@ clnt_dg_destroy(CLIENT *cl)
 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
 	mem_free(cl, sizeof (CLIENT));
 	mutex_unlock(&clnt_fd_lock);
-	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
-	cond_signal(&dg_cv[cu_fd]);
+	release_fd_lock(elem, mask);
 }
 
 static struct clnt_ops *
diff --git a/lib/libc/rpc/clnt_vc.c b/lib/libc/rpc/clnt_vc.c
index ebd4e303916f..0c8ef1f73141 100644
--- a/lib/libc/rpc/clnt_vc.c
+++ b/lib/libc/rpc/clnt_vc.c
@@ -63,6 +63,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/poll.h>
 #include <sys/syslog.h>
 #include <sys/socket.h>
+#include <sys/tree.h>
 #include <sys/un.h>
 #include <sys/uio.h>
 
@@ -71,7 +72,9 @@ __FBSDID("$FreeBSD$");
 #include <err.h>
 #include <errno.h>
 #include <netdb.h>
+#include <pthread.h>
 #include <stdio.h>
+#include <stdbool.h>
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
@@ -123,22 +126,60 @@ struct ct_data {
  *      This machinery implements per-fd locks for MT-safety.  It is not
  *      sufficient to do per-CLIENT handle locks for MT-safety because a
  *      user may create more than one CLIENT handle with the same fd behind
- *      it.  Therefore, we allocate an array of flags (vc_fd_locks), protected
- *      by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables
- *      similarly protected.  Vc_fd_lock[fd] == 1 => a call is active on some
- *      CLIENT handle created for that fd.
- *      The current implementation holds locks across the entire RPC and reply.
- *      Yes, this is silly, and as soon as this code is proven to work, this
- *      should be the first thing fixed.  One step at a time.
+ *      it.  Therefore, we allocate an associative array of flags and condition
+ *      variables (vc_fd).  The flags and the array are protected by the
+ *      clnt_fd_lock mutex.  vc_fd_lock[fd] == 1 => a call is active on some
+ *      CLIENT handle created for that fd.  The current implementation holds
+ *      locks across the entire RPC and reply.  Yes, this is silly, and as soon
+ *      as this code is proven to work, this should be the first thing fixed.
+ *      One step at a time.
  */
-static int      *vc_fd_locks;
-static cond_t   *vc_cv;
-#define release_fd_lock(fd, mask) {	\
-	mutex_lock(&clnt_fd_lock);	\
-	vc_fd_locks[fd] = 0;		\
-	mutex_unlock(&clnt_fd_lock);	\
-	thr_sigsetmask(SIG_SETMASK, &(mask), (sigset_t *) NULL);	\
-	cond_signal(&vc_cv[fd]);	\
+struct vc_fd {
+	RB_ENTRY(vc_fd) vc_link;
+	int fd;
+	mutex_t mtx;
+};
+static inline int
+cmp_vc_fd(struct vc_fd *a, struct vc_fd *b)
+{
+       if (a->fd > b->fd) {
+               return (1);
+       } else if (a->fd < b->fd) {
+               return (-1);
+       } else {
+               return (0);
+       }
+}
+RB_HEAD(vc_fd_list, vc_fd);
+RB_PROTOTYPE(vc_fd_list, vc_fd, vc_link, cmp_vc_fd);
+RB_GENERATE(vc_fd_list, vc_fd, vc_link, cmp_vc_fd);
+struct vc_fd_list vc_fd_head = RB_INITIALIZER(&vc_fd_head);
+
+/*
+ * Find the lock structure for the given file descriptor, or initialize it if
+ * it does not already exist.  The clnt_fd_lock mutex must be held.
+ */
+static struct vc_fd *
+vc_fd_find(int fd)
+{
+	struct vc_fd key, *elem;
+
+	key.fd = fd;
+	elem = RB_FIND(vc_fd_list, &vc_fd_head, &key);
+	if (elem == NULL) {
+		elem = calloc(1, sizeof(*elem));
+		elem->fd = fd;
+		mutex_init(&elem->mtx, NULL);
+		RB_INSERT(vc_fd_list, &vc_fd_head, elem);
+	}
+	return (elem);
+}
+
+static void
+release_fd_lock(struct vc_fd *elem, sigset_t mask)
+{
+	mutex_unlock(&elem->mtx);
+	thr_sigsetmask(SIG_SETMASK, &mask, NULL);
 }
 
 static const char clnt_vc_errstr[] = "%s : %s";
@@ -172,8 +213,6 @@ clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
 	struct timeval now;
 	struct rpc_msg call_msg;
 	static u_int32_t disrupt;
-	sigset_t mask;
-	sigset_t newmask;
 	struct sockaddr_storage ss;
 	socklen_t slen;
 	struct __rpc_sockinfo si;
@@ -191,39 +230,6 @@ clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
 		goto err;
 	}
 	ct->ct_addr.buf = NULL;
-	sigfillset(&newmask);
-	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
-	mutex_lock(&clnt_fd_lock);
-	if (vc_fd_locks == (int *) NULL) {
-		int cv_allocsz, fd_allocsz;
-		int dtbsize = __rpc_dtbsize();
-
-		fd_allocsz = dtbsize * sizeof (int);
-		vc_fd_locks = (int *) mem_alloc(fd_allocsz);
-		if (vc_fd_locks == (int *) NULL) {
-			mutex_unlock(&clnt_fd_lock);
-			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-			goto err;
-		} else
-			memset(vc_fd_locks, '\0', fd_allocsz);
-
-		assert(vc_cv == (cond_t *) NULL);
-		cv_allocsz = dtbsize * sizeof (cond_t);
-		vc_cv = (cond_t *) mem_alloc(cv_allocsz);
-		if (vc_cv == (cond_t *) NULL) {
-			mem_free(vc_fd_locks, fd_allocsz);
-			vc_fd_locks = (int *) NULL;
-			mutex_unlock(&clnt_fd_lock);
-			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-			goto err;
-		} else {
-			int i;
-
-			for (i = 0; i < dtbsize; i++)
-				cond_init(&vc_cv[i], 0, (void *) 0);
-		}
-	} else
-		assert(vc_cv != (cond_t *) NULL);
 
 	/*
 	 * XXX - fvdl connecting while holding a mutex?
@@ -234,19 +240,16 @@ clnt_vc_create(int fd, const struct netbuf *raddr, const rpcprog_t prog,
 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
 			rpc_createerr.cf_error.re_errno = errno;
 			mutex_unlock(&clnt_fd_lock);
-			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
 			goto err;
 		}
 		if (_connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){
 			rpc_createerr.cf_stat = RPC_SYSTEMERROR;
 			rpc_createerr.cf_error.re_errno = errno;
 			mutex_unlock(&clnt_fd_lock);
-			thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
 			goto err;
 		}
 	}
 	mutex_unlock(&clnt_fd_lock);
-	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
 	if (!__rpc_fd2sockinfo(fd, &si))
 		goto err;
 
@@ -321,12 +324,12 @@ clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr,
 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
 	XDR *xdrs = &(ct->ct_xdrs);
 	struct rpc_msg reply_msg;
+	struct vc_fd *elem;
 	u_int32_t x_id;
 	u_int32_t *msg_x_id = &ct->ct_u.ct_mcalli;    /* yuk */
 	bool_t shipnow;
 	int refreshes = 2;
 	sigset_t mask, newmask;
-	int rpc_lock_value;
 	bool_t reply_stat;
 
 	assert(cl != NULL);
@@ -334,14 +337,9 @@ clnt_vc_call(CLIENT *cl, rpcproc_t proc, xdrproc_t xdr_args, void *args_ptr,
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (vc_fd_locks[ct->ct_fd])
-		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
-	if (__isthreaded)
-                rpc_lock_value = 1;
-        else
-                rpc_lock_value = 0;
-	vc_fd_locks[ct->ct_fd] = rpc_lock_value;
+	elem = vc_fd_find(ct->ct_fd);
 	mutex_unlock(&clnt_fd_lock);
+	mutex_lock(&elem->mtx);
 	if (!ct->ct_waitset) {
 		/* If time is not within limits, we ignore it. */
 		if (time_not_ok(&timeout) == FALSE)
@@ -365,7 +363,7 @@ call_again:
 			if (ct->ct_error.re_status == RPC_SUCCESS)
 				ct->ct_error.re_status = RPC_CANTENCODEARGS;
 			(void)xdrrec_endofrecord(xdrs, TRUE);
-			release_fd_lock(ct->ct_fd, mask);
+			release_fd_lock(elem, mask);
 			return (ct->ct_error.re_status);
 		}
 	} else {
@@ -376,23 +374,23 @@ call_again:
 			if (ct->ct_error.re_status == RPC_SUCCESS)
 				ct->ct_error.re_status = RPC_CANTENCODEARGS;
 			(void)xdrrec_endofrecord(xdrs, TRUE);
-			release_fd_lock(ct->ct_fd, mask);
+			release_fd_lock(elem, mask);
 			return (ct->ct_error.re_status);
 		}
 	}
 	if (! xdrrec_endofrecord(xdrs, shipnow)) {
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (ct->ct_error.re_status = RPC_CANTSEND);
 	}
 	if (! shipnow) {
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (RPC_SUCCESS);
 	}
 	/*
 	 * Hack to provide rpc-based message passing
 	 */
 	if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return(ct->ct_error.re_status = RPC_TIMEDOUT);
 	}
 
@@ -406,14 +404,14 @@ call_again:
 		reply_msg.acpted_rply.ar_results.where = NULL;
 		reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
 		if (! xdrrec_skiprecord(xdrs)) {
-			release_fd_lock(ct->ct_fd, mask);
+			release_fd_lock(elem, mask);
 			return (ct->ct_error.re_status);
 		}
 		/* now decode and validate the response header */
 		if (! xdr_replymsg(xdrs, &reply_msg)) {
 			if (ct->ct_error.re_status == RPC_SUCCESS)
 				continue;
-			release_fd_lock(ct->ct_fd, mask);
+			release_fd_lock(elem, mask);
 			return (ct->ct_error.re_status);
 		}
 		if (reply_msg.rm_xid == x_id)
@@ -454,7 +452,7 @@ call_again:
 		if (refreshes-- && AUTH_REFRESH(cl->cl_auth, &reply_msg))
 			goto call_again;
 	}  /* end of unsuccessful completion */
-	release_fd_lock(ct->ct_fd, mask);
+	release_fd_lock(elem, mask);
 	return (ct->ct_error.re_status);
 }
 
@@ -474,6 +472,7 @@ static bool_t
 clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
 {
 	struct ct_data *ct;
+	struct vc_fd *elem;
 	XDR *xdrs;
 	bool_t dummy;
 	sigset_t mask;
@@ -487,14 +486,13 @@ clnt_vc_freeres(CLIENT *cl, xdrproc_t xdr_res, void *res_ptr)
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (vc_fd_locks[ct->ct_fd])
-		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
+	elem = vc_fd_find(ct->ct_fd);
+	mutex_lock(&elem->mtx);
 	xdrs->x_op = XDR_FREE;
 	dummy = (*xdr_res)(xdrs, res_ptr);
-	mutex_unlock(&clnt_fd_lock);
-	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-	cond_signal(&vc_cv[ct->ct_fd]);
 
+	mutex_unlock(&clnt_fd_lock);
+	release_fd_lock(elem, mask);
 	return dummy;
 }
 
@@ -522,10 +520,10 @@ static bool_t
 clnt_vc_control(CLIENT *cl, u_int request, void *info)
 {
 	struct ct_data *ct;
+	struct vc_fd *elem;
 	void *infop = info;
 	sigset_t mask;
 	sigset_t newmask;
-	int rpc_lock_value;
 
 	assert(cl != NULL);
 
@@ -534,23 +532,18 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (vc_fd_locks[ct->ct_fd])
-		cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
-	if (__isthreaded)
-                rpc_lock_value = 1;
-        else
-                rpc_lock_value = 0;
-	vc_fd_locks[ct->ct_fd] = rpc_lock_value;
+	elem = vc_fd_find(ct->ct_fd);
 	mutex_unlock(&clnt_fd_lock);
+	mutex_lock(&elem->mtx);
 
 	switch (request) {
 	case CLSET_FD_CLOSE:
 		ct->ct_closeit = TRUE;
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (TRUE);
 	case CLSET_FD_NCLOSE:
 		ct->ct_closeit = FALSE;
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (TRUE);
 	default:
 		break;
@@ -558,13 +551,13 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
 
 	/* for other requests which use info */
 	if (info == NULL) {
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (FALSE);
 	}
 	switch (request) {
 	case CLSET_TIMEOUT:
 		if (time_not_ok((struct timeval *)info)) {
-			release_fd_lock(ct->ct_fd, mask);
+			release_fd_lock(elem, mask);
 			return (FALSE);
 		}
 		ct->ct_wait = *(struct timeval *)infop;
@@ -584,7 +577,7 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
 		*(struct netbuf *)info = ct->ct_addr;
 		break;
 	case CLSET_SVC_ADDR:		/* set to new address */
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (FALSE);
 	case CLGET_XID:
 		/*
@@ -628,10 +621,10 @@ clnt_vc_control(CLIENT *cl, u_int request, void *info)
 		break;
 
 	default:
-		release_fd_lock(ct->ct_fd, mask);
+		release_fd_lock(elem, mask);
 		return (FALSE);
 	}
-	release_fd_lock(ct->ct_fd, mask);
+	release_fd_lock(elem, mask);
 	return (TRUE);
 }
 
@@ -640,6 +633,7 @@ static void
 clnt_vc_destroy(CLIENT *cl)
 {
 	struct ct_data *ct = (struct ct_data *) cl->cl_private;
+	struct vc_fd *elem;
 	int ct_fd = ct->ct_fd;
 	sigset_t mask;
 	sigset_t newmask;
@@ -651,8 +645,8 @@ clnt_vc_destroy(CLIENT *cl)
 	sigfillset(&newmask);
 	thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
 	mutex_lock(&clnt_fd_lock);
-	while (vc_fd_locks[ct_fd])
-		cond_wait(&vc_cv[ct_fd], &clnt_fd_lock);
+	elem = vc_fd_find(ct_fd);
+	mutex_lock(&elem->mtx);
 	if (ct->ct_closeit && ct->ct_fd != -1) {
 		(void)_close(ct->ct_fd);
 	}
@@ -665,8 +659,7 @@ clnt_vc_destroy(CLIENT *cl)
 		mem_free(cl->cl_tp, strlen(cl->cl_tp) +1);
 	mem_free(cl, sizeof(CLIENT));
 	mutex_unlock(&clnt_fd_lock);
-	thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
-	cond_signal(&vc_cv[ct_fd]);
+	release_fd_lock(elem, mask);
 }
 
 /*
diff --git a/lib/libc/rpc/rpc_com.h b/lib/libc/rpc/rpc_com.h
index c0c144fc589d..a8b6092f4e9d 100644
--- a/lib/libc/rpc/rpc_com.h
+++ b/lib/libc/rpc/rpc_com.h
@@ -62,7 +62,6 @@
 
 __BEGIN_DECLS
 extern u_int __rpc_get_a_size(int);
-extern int __rpc_dtbsize(void);
 extern struct netconfig * __rpcgettp(int);
 extern  int  __rpc_get_default_domain(char **);
 
diff --git a/lib/libc/rpc/rpc_generic.c b/lib/libc/rpc/rpc_generic.c
index 6fbeaf77a7e7..e884eb2febe2 100644
--- a/lib/libc/rpc/rpc_generic.c
+++ b/lib/libc/rpc/rpc_generic.c
@@ -107,29 +107,6 @@ static char *strlocase(char *);
 #endif
 static int getnettype(const char *);
 
-/*
- * Cache the result of getrlimit(), so we don't have to do an
- * expensive call every time.
- */
-int
-__rpc_dtbsize(void)
-{
-	static int tbsize;
-	struct rlimit rl;
-
-	if (tbsize) {
-		return (tbsize);
-	}
-	if (getrlimit(RLIMIT_NOFILE, &rl) == 0) {
-		return (tbsize = (int)rl.rlim_max);
-	}
-	/*
-	 * Something wrong.  I'll try to save face by returning a
-	 * pessimistic number.
-	 */
-	return (32);
-}
-
 
 /*
  * Find the appropriate buffer size
diff --git a/sys/rpc/rpc_com.h b/sys/rpc/rpc_com.h
index 70d4214f54e8..439d9764c833 100644
--- a/sys/rpc/rpc_com.h
+++ b/sys/rpc/rpc_com.h
@@ -72,7 +72,6 @@
 __BEGIN_DECLS
 #ifndef _KERNEL
 extern u_int __rpc_get_a_size(int);
-extern int __rpc_dtbsize(void);
 extern struct netconfig * __rpcgettp(int);
 extern  int  __rpc_get_default_domain(char **);