svn commit: r201475 - user/kmacy/releng_8_rump/lib/libunet
Kip Macy
kmacy at FreeBSD.org
Mon Jan 4 08:33:33 UTC 2010
Author: kmacy
Date: Mon Jan 4 08:33:33 2010
New Revision: 201475
URL: http://svn.freebsd.org/changeset/base/201475
Log:
shim or stub all remaining functions except kmem, critical, and spinlock
Added:
user/kmacy/releng_8_rump/lib/libunet/unet_in_cksum.c (contents, props changed)
user/kmacy/releng_8_rump/lib/libunet/unet_kern_condvar.c (contents, props changed)
user/kmacy/releng_8_rump/lib/libunet/unet_kern_descrip.c (contents, props changed)
user/kmacy/releng_8_rump/lib/libunet/unet_kern_timeout.c (contents, props changed)
Modified:
user/kmacy/releng_8_rump/lib/libunet/Makefile
user/kmacy/releng_8_rump/lib/libunet/unet_glue.c
user/kmacy/releng_8_rump/lib/libunet/unet_kern_subr.c
user/kmacy/releng_8_rump/lib/libunet/unet_kern_synch.c
user/kmacy/releng_8_rump/lib/libunet/unet_subr_taskqueue.c
Modified: user/kmacy/releng_8_rump/lib/libunet/Makefile
==============================================================================
--- user/kmacy/releng_8_rump/lib/libunet/Makefile Mon Jan 4 08:26:34 2010 (r201474)
+++ user/kmacy/releng_8_rump/lib/libunet/Makefile Mon Jan 4 08:33:33 2010 (r201475)
@@ -15,12 +15,15 @@ UNET_KERN_COMMON_OBJS += \
kern_malloc.o \
kern_mbuf.o \
kern_module.o \
+ kern_mtxpool.o \
kern_sysctl.o \
md5c.o \
subr_eventhandler.o \
subr_param.o \
subr_pcpu.o \
subr_sbuf.o \
+ sys_generic.o \
+ sys_socket.o \
uipc_accf.o \
uipc_mbuf.o \
uipc_mbuf2.o \
@@ -90,18 +93,22 @@ UNET_LIBKERN_COMMON_OBJS = \
strncmp.o \
strtoul.o
-UNET_RANDOM_COMMON_OBJS = \
+UNET_OTHER_COMMON_OBJS = \
harvest.o
UNET_GLUE_COMMON_OBJS = \
unet_compat.o \
unet_glue.o \
- unet_init_main.c \
+ unet_in_cksum.o \
+ unet_init_main.o \
unet_lock.o \
unet_uma_core.c \
+ unet_kern_condvar.o \
+ unet_kern_descrip.o \
unet_kern_intr.o \
unet_kern_synch.o \
unet_kern_subr.o \
+ unet_kern_timeout.o \
unet_subr_taskqueue.o
# unet_init.o \
@@ -113,7 +120,7 @@ UNET_COMMON_OBJS = \
${UNET_LIBKERN_COMMON_OBJS} \
${UNET_NET_COMMON_OBJS} \
${UNET_NETINET_COMMON_OBJS} \
- ${UNET_RANDOM_COMMON_OBJS} \
+ ${UNET_OTHER_COMMON_OBJS} \
${UNET_GLUE_COMMON_OBJS}
UNET_COMMON_SRCS= ${UNET_COMMON_OBJS:C/.o$/.c/}
Modified: user/kmacy/releng_8_rump/lib/libunet/unet_glue.c
==============================================================================
--- user/kmacy/releng_8_rump/lib/libunet/unet_glue.c Mon Jan 4 08:26:34 2010 (r201474)
+++ user/kmacy/releng_8_rump/lib/libunet/unet_glue.c Mon Jan 4 08:33:33 2010 (r201475)
@@ -34,8 +34,6 @@ SYSCTL_NODE(, CTL_NET, net, CTLFLAG
SYSCTL_NODE(, CTL_VM, vm, CTLFLAG_RW, 0,
"Virtual memory");
-MALLOC_DEFINE(M_IOV, "iov", "large iov's");
-
int ticks;
@@ -596,8 +594,6 @@ securelevel_gt(struct ucred *cr, int lev
}
-
-
/**
* @brief Send a 'notification' to userland, using standard ways
*/
@@ -614,3 +610,22 @@ cpu_pcpu_init(struct pcpu *pcpu, int cpu
;
}
+/*
+ * Send a SIGIO or SIGURG signal to a process or process group using stored
+ * credentials rather than those of the current process.
+ */
+void
+pgsigio(sigiop, sig, checkctty)
+ struct sigio **sigiop;
+ int sig, checkctty;
+{
+
+ panic("");
+}
+
+void
+kproc_exit(int ecode)
+{
+ panic("");
+}
+
Added: user/kmacy/releng_8_rump/lib/libunet/unet_in_cksum.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/releng_8_rump/lib/libunet/unet_in_cksum.c Mon Jan 4 08:33:33 2010 (r201475)
@@ -0,0 +1,491 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/ip.h>
+
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers.
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ *
+ * This implementation is 386 version.
+ */
+
+#undef ADDCARRY
+#define ADDCARRY(x) if ((x) > 0xffff) (x) -= 0xffff
+/*
+ * icc needs to be special cased here, as the asm code below results
+ * in broken code if compiled with icc.
+ */
+#if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER) || defined(UNET)
+/* non gcc parts stolen from sys/alpha/alpha/in_cksum.c */
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+#endif
+#define REDUCE {sum = (sum & 0xffff) + (sum >> 16); ADDCARRY(sum);}
+
+#if !defined(__GNUCLIKE_ASM) || defined(__INTEL_COMPILER) || defined(UNET)
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const u_int32_t *lw, int len)
+{
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (long) addr) & 1)
+ sum += in_cksumdata((const u_int32_t *)addr, mlen) << 8;
+ else
+ sum += in_cksumdata((const u_int32_t *)addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata((const u_int32_t *)ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+#else
+
+/*
+ * These asm statements require __volatile because they pass information
+ * via the condition codes. GCC does not currently provide a way to specify
+ * the condition codes as an input or output operand.
+ *
+ * The LOAD macro below is effectively a prefetch into cache. GCC will
+ * load the value into a register but will not use it. Since modern CPUs
+ * reorder operations, this will generally take place in parallel with
+ * other calculations.
+ */
+u_short
+in_cksum_skip(m, len, skip)
+ struct mbuf *m;
+ int len;
+ int skip;
+{
+ register u_short *w;
+ register unsigned sum = 0;
+ register int mlen = 0;
+ int byte_swapped = 0;
+ union { char c[2]; u_short s; } su;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ w = (u_short *)(mtod(m, u_char *) + skip);
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (;m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ w = mtod(m, u_short *);
+ if (mlen == -1) {
+ /*
+ * The first byte of this mbuf is the continuation
+ * of a word spanning between this mbuf and the
+ * last mbuf.
+ */
+
+ /* su.c[0] is already saved when scanning previous
+ * mbuf. sum was REDUCEd when we found mlen == -1
+ */
+ su.c[1] = *(u_char *)w;
+ sum += su.s;
+ w = (u_short *)((char *)w + 1);
+ mlen = m->m_len - 1;
+ len--;
+ } else
+ mlen = m->m_len;
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ len -= mlen;
+ /*
+ * Force to long boundary so we do longword aligned
+ * memory operations
+ */
+ if (3 & (int) w) {
+ REDUCE;
+ if ((1 & (int) w) && (mlen > 0)) {
+ sum <<= 8;
+ su.c[0] = *(char *)w;
+ w = (u_short *)((char *)w + 1);
+ mlen--;
+ byte_swapped = 1;
+ }
+ if ((2 & (int) w) && (mlen >= 2)) {
+ sum += *w++;
+ mlen -= 2;
+ }
+ }
+ /*
+ * Advance to a 486 cache line boundary.
+ */
+ if (4 & (int) w && mlen >= 4) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0])
+ );
+ w += 2;
+ mlen -= 4;
+ }
+ if (8 & (int) w && mlen >= 8) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1])
+ );
+ w += 4;
+ mlen -= 8;
+ }
+ /*
+ * Do as much of the checksum as possible 32 bits at at time.
+ * In fact, this loop is unrolled to make overhead from
+ * branches &c small.
+ */
+ mlen -= 1;
+ while ((mlen -= 32) >= 0) {
+ /*
+ * Add with carry 16 words and fold in the last
+ * carry by adding a 0 with carry.
+ *
+ * The early ADD(16) and the LOAD(32) are to load
+ * the next 2 cache lines in advance on 486's. The
+ * 486 has a penalty of 2 clock cycles for loading
+ * a cache line, plus whatever time the external
+ * memory takes to load the first word(s) addressed.
+ * These penalties are unavoidable. Subsequent
+ * accesses to a cache line being loaded (and to
+ * other external memory?) are delayed until the
+ * whole load finishes. These penalties are mostly
+ * avoided by not accessing external memory for
+ * 8 cycles after the ADD(16) and 12 cycles after
+ * the LOAD(32). The loop terminates when mlen
+ * is initially 33 (not 32) to guaranteed that
+ * the LOAD(32) is within bounds.
+ */
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl %5, %0\n"
+ "mov %6, %%eax\n"
+ "adcl %7, %0\n"
+ "adcl %8, %0\n"
+ "adcl %9, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[4]),
+ "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1]),
+ "g" (((const u_int32_t *)w)[2]),
+ "g" (((const u_int32_t *)w)[3]),
+ "g" (((const u_int32_t *)w)[8]),
+ "g" (((const u_int32_t *)w)[5]),
+ "g" (((const u_int32_t *)w)[6]),
+ "g" (((const u_int32_t *)w)[7])
+ : "eax"
+ );
+ w += 16;
+ }
+ mlen += 32 + 1;
+ if (mlen >= 32) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl %5, %0\n"
+ "adcl %6, %0\n"
+ "adcl %7, %0\n"
+ "adcl %8, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[4]),
+ "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1]),
+ "g" (((const u_int32_t *)w)[2]),
+ "g" (((const u_int32_t *)w)[3]),
+ "g" (((const u_int32_t *)w)[5]),
+ "g" (((const u_int32_t *)w)[6]),
+ "g" (((const u_int32_t *)w)[7])
+ );
+ w += 16;
+ mlen -= 32;
+ }
+ if (mlen >= 16) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl %3, %0\n"
+ "adcl %4, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1]),
+ "g" (((const u_int32_t *)w)[2]),
+ "g" (((const u_int32_t *)w)[3])
+ );
+ w += 8;
+ mlen -= 16;
+ }
+ if (mlen >= 8) {
+ __asm __volatile (
+ "addl %1, %0\n"
+ "adcl %2, %0\n"
+ "adcl $0, %0"
+ : "+r" (sum)
+ : "g" (((const u_int32_t *)w)[0]),
+ "g" (((const u_int32_t *)w)[1])
+ );
+ w += 4;
+ mlen -= 8;
+ }
+ if (mlen == 0 && byte_swapped == 0)
+ continue; /* worth 1% maybe ?? */
+ REDUCE;
+ while ((mlen -= 2) >= 0) {
+ sum += *w++;
+ }
+ if (byte_swapped) {
+ sum <<= 8;
+ byte_swapped = 0;
+ if (mlen == -1) {
+ su.c[1] = *(char *)w;
+ sum += su.s;
+ mlen = 0;
+ } else
+ mlen = -1;
+ } else if (mlen == -1)
+ /*
+ * This mbuf has odd number of bytes.
+ * There could be a word split betwen
+ * this mbuf and the next mbuf.
+ * Save the last byte (to prepend to next mbuf).
+ */
+ su.c[0] = *(char *)w;
+ }
+
+ if (len)
+ printf("%s: out of data by %d\n", __func__, len);
+ if (mlen == -1) {
+ /* The last mbuf has odd # of bytes. Follow the
+ standard (the odd byte is shifted left by 8 bits) */
+ su.c[1] = 0;
+ sum += su.s;
+ }
+ REDUCE;
+ return (~sum & 0xffff);
+}
+#endif
Added: user/kmacy/releng_8_rump/lib/libunet/unet_kern_condvar.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/releng_8_rump/lib/libunet/unet_kern_condvar.c Mon Jan 4 08:33:33 2010 (r201475)
@@ -0,0 +1,111 @@
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_ktrace.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/condvar.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/sleepqueue.h>
+#include <sys/resourcevar.h>
+
+/*
+ * Initialize a condition variable. Must be called before use.
+ */
+void
+cv_init(struct cv *cvp, const char *desc)
+{
+
+ cvp->cv_description = desc;
+ cvp->cv_waiters = 0;
+}
+
+/*
+ * Destroy a condition variable. The condition variable must be re-initialized
+ * in order to be re-used.
+ */
+void
+cv_destroy(struct cv *cvp)
+{
+#ifdef INVARIANTS
+ struct sleepqueue *sq;
+
+ sleepq_lock(cvp);
+ sq = sleepq_lookup(cvp);
+ sleepq_release(cvp);
+ KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
+#endif
+}
+
+/*
+ * Wait on a condition variable. The current thread is placed on the condition
+ * variable's wait queue and suspended. A cv_signal or cv_broadcast on the same
+ * condition variable will resume the thread. The mutex is released before
+ * sleeping and will be held on return. It is recommended that the mutex be
+ * held when cv_signal or cv_broadcast are called.
+ */
+void
+_cv_wait(struct cv *cvp, struct lock_object *lock)
+{
+ panic("");
+
+}
+
+/*
+ * Wait on a condition variable, allowing interruption by signals. Return 0 if
+ * the thread was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
+ * a signal was caught. If ERESTART is returned the system call should be
+ * restarted if possible.
+ */
+int
+_cv_wait_sig(struct cv *cvp, struct lock_object *lock)
+{
+ panic("");
+
+ return (0);
+}
+
+/*
+ * Wait on a condition variable for at most timo/hz seconds. Returns 0 if the
+ * process was resumed by cv_signal or cv_broadcast, EWOULDBLOCK if the timeout
+ * expires.
+ */
+int
+_cv_timedwait(struct cv *cvp, struct lock_object *lock, int timo)
+{
+ panic("");
+
+ return (0);
+}
+
+/*
+ * Wait on a condition variable for at most timo/hz seconds, allowing
+ * interruption by signals. Returns 0 if the thread was resumed by cv_signal
+ * or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
+ * a signal was caught.
+ */
+int
+_cv_timedwait_sig(struct cv *cvp, struct lock_object *lock, int timo)
+{
+ panic("");
+
+ return (0);
+}
+
+/*
+ * Broadcast a signal to a condition variable. Wakes up all waiting threads.
+ * Should be called with the same mutex as was passed to cv_wait held.
+ */
+void
+cv_broadcastpri(struct cv *cvp, int pri)
+{
+ panic("");
+
+}
Added: user/kmacy/releng_8_rump/lib/libunet/unet_kern_descrip.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ user/kmacy/releng_8_rump/lib/libunet/unet_kern_descrip.c Mon Jan 4 08:33:33 2010 (r201475)
@@ -0,0 +1,308 @@
+
+
+/*-
+ * Copyright (c) 1982, 1986, 1989, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_compat.h"
+#include "opt_ddb.h"
+#include "opt_ktrace.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <sys/conf.h>
+#include <sys/domain.h>
+#include <sys/fcntl.h>
+#include <sys/file.h>
+#include <sys/filedesc.h>
+#include <sys/filio.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/mqueue.h>
+#include <sys/mutex.h>
+#include <sys/namei.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/protosw.h>
+#include <sys/resourcevar.h>
+#include <sys/signalvar.h>
+#include <sys/socketvar.h>
+#include <sys/stat.h>
+#include <sys/sx.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysctl.h>
+#include <sys/sysproto.h>
+#include <sys/tty.h>
+#include <sys/unistd.h>
+#include <sys/user.h>
+
+
+/*
+ * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
+ *
+ * After permission checking, add a sigio structure to the sigio list for
+ * the process or process group.
+ */
+int
+fsetown(pid_t pgid, struct sigio **sigiop)
+{
+
+ panic("");
+
+ return (0);
+}
+
+pid_t
+fgetown(struct sigio **sigiop)
+{
+
+ panic("");
+ return (0);
+}
+
+
+void
+funsetown(struct sigio **sigiop)
+{
+
+ panic("");
+}
+
+
+/*
+ * Create a new open file structure and allocate a file decriptor for the
+ * process that refers to it. We add one reference to the file for the
+ * descriptor table and one reference for resultfp. This is to prevent us
+ * being preempted and the entry in the descriptor table closed after we
+ * release the FILEDESC lock.
+ */
+int
+falloc(struct thread *td, struct file **resultfp, int *resultfd)
+{
+ panic("");
+
+ return (0);
+}
+
+
+/*
+ * Handle the last reference to a file being closed.
+ */
+int
+_fdrop(struct file *fp, struct thread *td)
+{
+
+ panic("");
+ return (0);
+}
+
+void
+finit(struct file *fp, u_int flag, short type, void *data, struct fileops *ops)
+{
+ fp->f_data = data;
+ fp->f_flag = flag;
+ fp->f_type = type;
+ atomic_store_rel_ptr((volatile uintptr_t *)&fp->f_ops, (uintptr_t)ops);
+}
+
+struct file *
+fget_unlocked(struct filedesc *fdp, int fd)
+{
+ struct file *fp;
+ u_int count;
+
+ if (fd < 0 || fd >= fdp->fd_nfiles)
+ return (NULL);
+ /*
+ * Fetch the descriptor locklessly. We avoid fdrop() races by
+ * never raising a refcount above 0. To accomplish this we have
+ * to use a cmpset loop rather than an atomic_add. The descriptor
+ * must be re-verified once we acquire a reference to be certain
+ * that the identity is still correct and we did not lose a race
+ * due to preemption.
+ */
+ for (;;) {
+ fp = fdp->fd_ofiles[fd];
+ if (fp == NULL)
+ break;
+ count = fp->f_count;
+ if (count == 0)
+ continue;
+ /*
+ * Use an acquire barrier to prevent caching of fd_ofiles
+ * so it is refreshed for verification.
+ */
+ if (atomic_cmpset_acq_int(&fp->f_count, count, count + 1) != 1)
+ continue;
+ if (fp == fdp->fd_ofiles[fd])
+ break;
+ fdrop(fp, curthread);
+ }
+
+ return (fp);
+}
+
+
+/*
+ * Extract the file pointer associated with the specified descriptor for the
+ * current user process.
+ *
+ * If the descriptor doesn't exist or doesn't match 'flags', EBADF is
+ * returned.
+ *
+ * If an error occured the non-zero error is returned and *fpp is set to
+ * NULL. Otherwise *fpp is held and set and zero is returned. Caller is
+ * responsible for fdrop().
+ */
+static __inline int
+_fget(struct thread *td, int fd, struct file **fpp, int flags)
+{
+ struct filedesc *fdp;
+ struct file *fp;
+
+ *fpp = NULL;
+ if (td == NULL || (fdp = td->td_proc->p_fd) == NULL)
+ return (EBADF);
+ if ((fp = fget_unlocked(fdp, fd)) == NULL)
+ return (EBADF);
+ if (fp->f_ops == &badfileops) {
+ fdrop(fp, td);
+ return (EBADF);
+ }
+ /*
+ * FREAD and FWRITE failure return EBADF as per POSIX.
+ *
+ * Only one flag, or 0, may be specified.
+ */
+ if ((flags == FREAD && (fp->f_flag & FREAD) == 0) ||
+ (flags == FWRITE && (fp->f_flag & FWRITE) == 0)) {
+ fdrop(fp, td);
+ return (EBADF);
+ }
+ *fpp = fp;
+ return (0);
+}
+
+int
+fget(struct thread *td, int fd, struct file **fpp)
+{
+
+ return(_fget(td, fd, fpp, 0));
+}
+
+int
+fget_read(struct thread *td, int fd, struct file **fpp)
+{
+
+ return(_fget(td, fd, fpp, FREAD));
+}
+
+int
+fget_write(struct thread *td, int fd, struct file **fpp)
+{
+
+ return(_fget(td, fd, fpp, FWRITE));
+}
+
+
+/*-------------------------------------------------------------------*/
+
+static int
+badfo_readwrite(struct file *fp, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td)
+{
+
+ return (EBADF);
+}
+
+static int
+badfo_truncate(struct file *fp, off_t length, struct ucred *active_cred, struct thread *td)
+{
+
+ return (EINVAL);
+}
+
+static int
+badfo_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred, struct thread *td)
+{
+
+ return (EBADF);
+}
+
+static int
+badfo_poll(struct file *fp, int events, struct ucred *active_cred, struct thread *td)
+{
+
+ return (0);
+}
+
+static int
+badfo_kqfilter(struct file *fp, struct knote *kn)
+{
+
+ return (EBADF);
+}
+
+static int
+badfo_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td)
+{
+
+ return (EBADF);
+}
+
+static int
+badfo_close(struct file *fp, struct thread *td)
+{
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-user
mailing list