svn commit: r267353 - in projects/rrs_mqueue/sys: amd64/conf amd64/include arm/include conf i386/include ia64/include kern mips/include net powerpc/include sparc64/include sys
Randall Stewart
rrs at FreeBSD.org
Wed Jun 11 09:31:12 UTC 2014
Author: rrs
Date: Wed Jun 11 09:31:09 2014
New Revision: 267353
URL: http://svnweb.freebsd.org/changeset/base/267353
Log:
Add my Lock-Less-Often changes to this project branch
Added:
projects/rrs_mqueue/sys/kern/garbage_collector.c (contents, props changed)
projects/rrs_mqueue/sys/kern/llo_hash.c (contents, props changed)
projects/rrs_mqueue/sys/sys/garbage_collector.h (contents, props changed)
projects/rrs_mqueue/sys/sys/llo_hash.h (contents, props changed)
Modified:
projects/rrs_mqueue/sys/amd64/conf/GENERIC
projects/rrs_mqueue/sys/amd64/include/counter.h
projects/rrs_mqueue/sys/arm/include/counter.h
projects/rrs_mqueue/sys/conf/files
projects/rrs_mqueue/sys/i386/include/counter.h
projects/rrs_mqueue/sys/ia64/include/counter.h
projects/rrs_mqueue/sys/kern/subr_counter.c
projects/rrs_mqueue/sys/mips/include/counter.h
projects/rrs_mqueue/sys/net/drbr.c
projects/rrs_mqueue/sys/net/drbr.h
projects/rrs_mqueue/sys/powerpc/include/counter.h
projects/rrs_mqueue/sys/sparc64/include/counter.h
projects/rrs_mqueue/sys/sys/counter.h
projects/rrs_mqueue/sys/sys/queue.h
Modified: projects/rrs_mqueue/sys/amd64/conf/GENERIC
==============================================================================
--- projects/rrs_mqueue/sys/amd64/conf/GENERIC Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/amd64/conf/GENERIC Wed Jun 11 09:31:09 2014 (r267353)
@@ -64,8 +64,8 @@ options PRINTF_BUFR_SIZE=128 # Prevent
options KBD_INSTALL_CDEV # install a CDEV entry in /dev
options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4)
options AUDIT # Security event auditing
-options CAPABILITY_MODE # Capsicum capability mode
-options CAPABILITIES # Capsicum capabilities
+#options CAPABILITY_MODE # Capsicum capability mode
+#options CAPABILITIES # Capsicum capabilities
options MAC # TrustedBSD MAC Framework
options KDTRACE_FRAME # Ensure frames are compiled in
options KDTRACE_HOOKS # Kernel DTrace hooks
@@ -78,11 +78,11 @@ options KDB_TRACE # Print a stack trac
# For full debugger support use (turn off in stable branch):
options DDB # Support DDB.
options GDB # Support remote GDB.
-options DEADLKRES # Enable the deadlock resolver
-options INVARIANTS # Enable calls of extra sanity checking
-options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS
-options WITNESS # Enable checks to detect deadlocks and cycles
-options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed
+#options DEADLKRES # Enable the deadlock resolver
+#options INVARIANTS # Enable calls of extra sanity checking
+#options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS
+#options WITNESS # Enable checks to detect deadlocks and cycles
+#options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed
options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones
# Make an SMP-capable kernel by default
Modified: projects/rrs_mqueue/sys/amd64/include/counter.h
==============================================================================
--- projects/rrs_mqueue/sys/amd64/include/counter.h Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/amd64/include/counter.h Wed Jun 11 09:31:09 2014 (r267353)
@@ -57,6 +57,35 @@ counter_u64_fetch_inline(uint64_t *p)
return (r);
}
+static inline void
+counter_u64_copy_inline(uint64_t *dest, uint64_t *src)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ for (i = 0; i < mp_ncpus; i++) {
+ r = counter_u64_read_one((uint64_t *)src, i);
+ *((uint64_t *)((char *)dest + sizeof(struct pcpu) *i)) = r;
+ }
+}
+
+static inline int
+counter_u64_is_gte_inline(uint64_t *s1, uint64_t *s2)
+{
+ uint64_t s1v, s2v;
+ int i;
+
+ for (i = 0; i < mp_ncpus; i++) {
+ s1v = counter_u64_read_one((uint64_t *)s1, i);
+ s2v = counter_u64_read_one((uint64_t *)s2, i);
+ if (COUNTER_LT(s1v, s2v)) {
+ return(0);
+ }
+ }
+ return(1);
+}
+
static void
counter_u64_zero_one_cpu(void *arg)
{
Modified: projects/rrs_mqueue/sys/arm/include/counter.h
==============================================================================
--- projects/rrs_mqueue/sys/arm/include/counter.h Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/arm/include/counter.h Wed Jun 11 09:31:09 2014 (r267353)
@@ -59,6 +59,35 @@ counter_u64_fetch_inline(uint64_t *p)
return (r);
}
+static inline void
+counter_u64_copy_inline(uint64_t *dest, uint64_t *src)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ for (i = 0; i < mp_ncpus; i++) {
+ r = counter_u64_read_one((uint64_t *)src, i);
+ *((uint64_t *)((char *)dest+ sizeof(struct pcpu) * i)) = r;
+ }
+}
+
+static inline int
+counter_u64_is_gte_inline(uint64_t *s1, uint64_t *s2)
+{
+ uint64_t s1v, s2v;
+ int i;
+
+ for (i = 0; i < mp_ncpus; i++) {
+ s1v = counter_u64_read_one((uint64_t *)s1, i);
+ s2v = counter_u64_read_one((uint64_t *)s2, i);
+ if (COUNTER_LT(s1v, s2v)) {
+ return(0);
+ }
+ }
+ return(1);
+}
+
/* XXXKIB non-atomic 64bit store, might interrupt increment */
static void
counter_u64_zero_one_cpu(void *arg)
Modified: projects/rrs_mqueue/sys/conf/files
==============================================================================
--- projects/rrs_mqueue/sys/conf/files Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/conf/files Wed Jun 11 09:31:09 2014 (r267353)
@@ -2856,6 +2856,8 @@ kern/bus_if.m standard
kern/clock_if.m standard
kern/cpufreq_if.m standard
kern/device_if.m standard
+kern/garbage_collector.c standard
+kern/llo_hash.c standard
kern/imgact_binmisc.c optional imagact_binmisc
kern/imgact_elf.c standard
kern/imgact_elf32.c optional compat_freebsd32
Modified: projects/rrs_mqueue/sys/i386/include/counter.h
==============================================================================
--- projects/rrs_mqueue/sys/i386/include/counter.h Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/i386/include/counter.h Wed Jun 11 09:31:09 2014 (r267353)
@@ -112,6 +112,74 @@ counter_u64_fetch_inline(uint64_t *p)
}
static inline void
+counter_u64_copy_inline(uint64_t *dest, uint64_t *src)
+{
+ uint64_t res;
+ uint64_t *p;
+ int i;
+
+ res = 0;
+ if ((cpu_feature & CPUID_CX8) == 0) {
+ /*
+ * The machines without cmpxchg8b are not SMP.
+ * Disabling the preemption provides atomicity of the
+ * counter reading, since update is done in the
+ * critical section as well.
+ */
+ critical_enter();
+ for (i = 0; i < mp_ncpus; i++) {
+ res = *(uint64_t *)((char *)src +
+ sizeof(struct pcpu) * i);
+ p = (uint64_t *)((char *)dest + sizeof(struct pcpu) * i);
+ *p = res;
+ }
+ critical_exit();
+ } else {
+ for (i = 0; i < mp_ncpus; i++) {
+ res = counter_u64_read_one_8b((uint64_t *)((char *)src +
+ sizeof(struct pcpu) * i));
+ p = (uint64_t *)((char *)dest + sizeof(struct pcpu) * i);
+ *p = res;
+ }
+ }
+}
+
+static inline void
+counter_u64_is_gte_inline(uint64_t *dest, uint64_t *src)
+{
+ uint64_t s1v, s2v;
+ int i;
+
+ if ((cpu_feature & CPUID_CX8) == 0) {
+ /*
+ * The machines without cmpxchg8b are not SMP.
+ * Disabling the preemption provides atomicity of the
+ * counter reading, since update is done in the
+ * critical section as well.
+ */
+ critical_enter();
+ for (i = 0; i < mp_ncpus; i++) {
+ s1v = *(uint64_t *)((char *)src + sizeof(struct pcpu) * i);
+ s2v = *(uint64_t *)((char *)dest + sizeof(struct pcpu) * i);
+ if (COUNTER_LT(s1v, s2v)) {
+ critical_exit();
+ return(0);
+ }
+ }
+ critical_exit();
+ } else {
+ for (i = 0; i < mp_ncpus; i++) {
+ s1v = counter_u64_read_one_8b((uint64_t *)((char *)src + sizeof(struct pcpu) * i));
+ s2v = counter_u64_read_one_8b((uint64_t *)((char *)dest + sizeof(struct pcpu) * i));
+ if (COUNTER_LT(s1v, s2v)) {
+ return(0);
+ }
+ }
+ }
+ return(1);
+}
+
+static inline void
counter_u64_zero_one_8b(uint64_t *p)
{
Modified: projects/rrs_mqueue/sys/ia64/include/counter.h
==============================================================================
--- projects/rrs_mqueue/sys/ia64/include/counter.h Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/ia64/include/counter.h Wed Jun 11 09:31:09 2014 (r267353)
@@ -58,6 +58,35 @@ counter_u64_fetch_inline(uint64_t *p)
return (r);
}
+static inline void
+counter_u64_copy_inline(uint64_t *dest, uint64_t *src)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ for (i = 0; i < mp_ncpus; i++) {
+ r = counter_u64_read_one((uint64_t *)src, i);
+ *((uint64_t *)((char *)dest+ sizeof(struct pcpu) * i)) = r;
+ }
+}
+
+static inline int
+counter_u64_is_gte_inline(uint64_t *s1, uint64_t *s2)
+{
+ uint64_t s1v, s2v;
+ int i;
+
+ for (i = 0; i < mp_ncpus; i++) {
+ s1v = counter_u64_read_one((uint64_t *)s1, i);
+ s2v = counter_u64_read_one((uint64_t *)s2, i);
+ if (COUNTER_LT(s1v, s2v)) {
+ return(0);
+ }
+ }
+ return(1);
+}
+
/* XXXKIB might interrupt increment */
static void
counter_u64_zero_one_cpu(void *arg)
Added: projects/rrs_mqueue/sys/kern/garbage_collector.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/rrs_mqueue/sys/kern/garbage_collector.c Wed Jun 11 09:31:09 2014 (r267353)
@@ -0,0 +1,211 @@
+/*-
+ * * Copyright (c) 2012, by Adara Networks. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * a) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * b) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the distribution.
+ *
+ * c) Neither the name of Adara Networks,nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/callout.h>
+#include <sys/malloc.h>
+#include <sys/time.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/garbage_collector.h>
+
+
+static struct mtx gc_mtx;
+static struct garbage_list gc_list;
+static struct callout gc_timer;
+static uint8_t gc_running = 0;
+static uint8_t gc_inited = 0;
+
+
+#define GC_LOCK() mtx_lock(&gc_mtx)
+#define GC_UNLOCK() mtx_unlock(&gc_mtx)
+
+
+MALLOC_DEFINE(M_GARBAGE, "gc_temp_mem", "Space for garbage before deleting");
+
+static void
+garbage_init(void)
+{
+ TAILQ_INIT(&gc_list);
+ mtx_init(&gc_mtx, "garbage_collector_lock", "gc_lock", MTX_DEF);
+ gc_running = 0;
+ gc_inited = 1;
+ callout_init(&gc_timer, 1);
+}
+
+
+SYSINIT(garbage_collect,
+ SI_SUB_PROTO_END,
+ SI_ORDER_ANY, garbage_init, NULL);
+
+
+static void
+gc_time_out(void *notused)
+{
+ struct garbage *entry, *tentry;
+ struct timeval now;
+ struct garbage_list loc_gc_list;
+
+ TAILQ_INIT(&loc_gc_list);
+ GC_LOCK();
+ if (callout_pending(&gc_timer)) {
+ /* Callout has been rescheduled */
+ GC_UNLOCK();
+ return;
+ }
+ if (!callout_active(&gc_timer)) {
+ /* The callout has been stopped */
+ GC_UNLOCK();
+ return;
+ }
+ callout_deactivate(&gc_timer);
+ getmicrouptime(&now);
+ gc_running = 0;
+ TAILQ_FOREACH_SAFE(entry, &gc_list, next, tentry) {
+ if (timevalcmp(&now, &entry->purge_time, >)) {
+ /* Ok we can run the purge on this one */
+ TAILQ_REMOVE(&gc_list, entry, next);
+ TAILQ_INSERT_TAIL(&loc_gc_list, entry, next);
+ } else {
+ /* We will find no more */
+ break;
+ }
+ }
+ GC_UNLOCK();
+ TAILQ_FOREACH_SAFE(entry, &loc_gc_list, next, tentry) {
+ garbage_func gf;
+
+ TAILQ_REMOVE(&loc_gc_list, entry, next);
+ entry->purged_at = now; /* for debugging */
+ gf = entry->gf;
+ if (gf) {
+ /* It should not be 0 */
+#ifdef INVARIANTS
+ if ((void *)gf != (void *)0xdeadc0dedeadc0de) {
+ entry->gf = NULL;
+ gf(entry->junk);
+ } else {
+ printf("gc found deleted entry dead-code\n");
+ }
+#else
+ entry->gf = NULL;
+ gf(entry->junk);
+#endif
+ } else {
+ printf("gc finds NULL in gf:%p placed by f_line %s:%d?\n",
+ gf,
+ entry->func,
+ entry->line);
+ }
+ }
+ GC_LOCK();
+ if ((!TAILQ_EMPTY(&gc_list)) && (gc_running == 0)) {
+ struct timeval nxttm;
+ entry = TAILQ_FIRST(&gc_list);
+ if (timevalcmp(&entry->purge_time, &now, >)) {
+ nxttm = entry->purge_time;
+ timevalsub(&nxttm, &now);
+ } else {
+ /* Huh? TSNH */
+ nxttm.tv_sec = 0;
+ nxttm.tv_usec = 0; /* 1 tick I guess */
+ }
+ callout_reset(&gc_timer,
+ (TV_TO_TICKS(&nxttm) + 1),
+ gc_time_out, NULL);
+ gc_running = 1;
+ }
+ GC_UNLOCK();
+}
+
+int
+garbage_collect_add(struct garbage *m, garbage_func f,
+ void *gar, struct timeval *expire, const char *func,
+ int line)
+{
+ /* sanity */
+ if (gc_inited == 0) {
+ /* Sorry, to early in init process */
+ return (EAGAIN);
+ }
+ if ((f == NULL) ||
+ (gar == NULL) ||
+ (expire == NULL)) {
+ return (EINVAL);
+ }
+ if (m == NULL) {
+ return(EINVAL);
+ }
+ GC_LOCK();
+ if (m->on_gc_list) {
+ if (m->gf == NULL) {
+#ifdef INVARIANTS
+ printf("gc finds NULL gf, caller func:%s:%d func:%s:%d\n",
+ func, line,
+ m->func, m->line);
+#endif
+ GC_UNLOCK();
+ return (EINVAL);
+ }
+ /* Normal case -- extend the time */
+ callout_reset(&gc_timer,
+ (TV_TO_TICKS(expire) + 1),
+ gc_time_out, NULL);
+ GC_UNLOCK();
+ return(0);
+ }
+ getmicrouptime(&m->purge_time);
+ timevaladd(&m->purge_time, expire);
+ m->gf = f;
+ m->func = ((char *)(__uintptr_t)(const void *)(func));
+ m->line = line;
+ m->junk = gar;
+ m->on_gc_list = 1;
+ TAILQ_INSERT_TAIL(&gc_list, m, next);
+ if (gc_running == 0) {
+ gc_running = 1;
+ callout_reset(&gc_timer,
+ (TV_TO_TICKS(expire) + 1),
+ gc_time_out, NULL);
+ }
+ GC_UNLOCK();
+ return (0);
+}
+
+int
+garbage_collect_init(struct garbage *m)
+{
+ memset(m, 0, sizeof(struct garbage));
+ return (0);
+}
Added: projects/rrs_mqueue/sys/kern/llo_hash.c
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ projects/rrs_mqueue/sys/kern/llo_hash.c Wed Jun 11 09:31:09 2014 (r267353)
@@ -0,0 +1,415 @@
+#include <sys/llo_hash.h>
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/time.h>
+
+struct llo_hash *
+llo_hash_init(int nelements,
+ struct malloc_type *typ,
+ llo_hashfunc hf,
+ llo_comparefunc cf,
+ llo_freefunc ff,
+ size_t keysz,
+ int llo_flags)
+{
+ struct llo_hash *tbl=NULL;
+
+ tbl = malloc(sizeof(struct llo_hash), typ,
+ ((llo_flags & LLO_FLAGS_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (tbl == NULL) {
+ goto out_err;
+ }
+ memset(tbl, 0, sizeof(struct llo_hash));
+ tbl->llo_hf = hf;
+ tbl->llo_cf = cf;
+ tbl->llo_ff = ff;
+ tbl->llo_typ = typ;
+ callout_init(&tbl->lazy_clist_tmr, 1);
+ LIST_INIT(&tbl->lazy_clist);
+ LLO_MMTX_INIT(tbl);
+ /* Setup our flags */
+ if (llo_flags & LLO_FLAGS_NOWAIT) {
+ tbl->table_flags |= LLO_IFLAG_NOWAIT;
+ }
+ if (llo_flags & LLO_FLAGS_MULTI_MTX) {
+ tbl->table_flags |= LLO_IFLAG_MMTX;
+ }
+ if (llo_flags & LLO_FLAGS_MIN_U64) {
+ tbl->table_flags |= LLO_IFLAG_MINU64;
+ }
+ /* Now the mallocs */
+ tbl->llo_epoch_start = counter_u64_alloc(((llo_flags & LLO_FLAGS_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (tbl->llo_epoch_start == NULL) {
+ goto out_err;
+ }
+ tbl->llo_epoch_end = counter_u64_alloc(((llo_flags & LLO_FLAGS_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (tbl->llo_epoch_end == NULL) {
+ goto out_err;
+ }
+ tbl->llo_ht = hashinit_flags(nelements, tbl->llo_typ, &tbl->llo_hashmod,
+ ((llo_flags & LLO_FLAGS_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (tbl->llo_ht == NULL) {
+ goto out_err;
+ }
+ /* Now what about the mutex 1 or many? */
+ if (tbl->table_flags & LLO_IFLAG_MMTX) {
+ tbl->llo_hmtx = malloc(sizeof(struct mtx), tbl->llo_typ,
+ ((llo_flags & LLO_FLAGS_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (tbl->llo_hmtx == NULL) {
+ goto out_err;
+ }
+ LLO_MTX_INIT(tbl->llo_hmtx, 0);
+ } else {
+ size_t sz;
+ int i;
+ sz = (sizeof(struct mtx) * (tbl->llo_hashmod+1));
+ tbl->llo_hmtx = malloc(sz, tbl->llo_typ,
+ ((llo_flags & LLO_FLAGS_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (tbl->llo_hmtx == NULL) {
+ goto out_err;
+ }
+ for(i=0; i<=tbl->llo_hashmod; i++) {
+ LLO_MTX_INIT(tbl->llo_hmtx, i);
+ }
+ }
+ return(tbl);
+out_err:
+ if (tbl) {
+ if (tbl->llo_epoch_start) {
+ counter_u64_free(tbl->llo_epoch_start);
+ }
+ if (tbl->llo_epoch_end) {
+ counter_u64_free(tbl->llo_epoch_end);
+ }
+ if (tbl->llo_ht) {
+ hashdestroy(tbl->llo_ht, typ, tbl->llo_hashmod);
+ }
+ if (tbl->llo_hmtx) {
+ free(tbl->llo_hmtx, typ);
+ }
+ free(tbl, typ);
+ }
+ return(NULL);
+}
+
+static void
+llo_do_destroy_table(struct llo_hash *llo)
+{
+ /*
+ * All entries are properly gone at
+ * this point so we can purge the table.
+ */
+ struct malloc_type *typ;
+
+ /* Probably not needed. */
+
+ callout_stop(&llo->lazy_clist_tmr);
+ /* Now the purging */
+ if (llo->llo_ht) {
+ free(llo->llo_ht, llo->llo_typ);
+ llo->llo_ht = NULL;
+ }
+ if (llo->llo_epoch_start) {
+ counter_u64_free(llo->llo_epoch_start);
+ llo->llo_epoch_start = NULL;
+ }
+ if (llo->llo_epoch_end) {
+ counter_u64_free(llo->llo_epoch_end);
+ llo->llo_epoch_end = NULL;
+ }
+ if (llo->llo_hmtx) {
+ if (llo->table_flags & LLO_IFLAG_MMTX) {
+ int i;
+ for(i=0; i<= llo->llo_hashmod; i++) {
+ LLO_MTX_DESTROY(llo->llo_hmtx, i);
+ }
+ } else {
+ LLO_MTX_DESTROY(llo->llo_hmtx, 0);
+ }
+ llo->llo_hmtx = NULL;
+ }
+ LLO_MMTX_DESTROY(llo);
+ typ = llo->llo_typ;
+ free(llo, typ);
+}
+
+int
+llo_hash_destroy(struct llo_hash *llo)
+{
+ LLO_MMTX_LOCK(llo);
+ if (llo->entries) {
+ LLO_MMTX_UNLOCK(llo);
+ return(EINVAL);
+ }
+ llo->table_flags |= LLO_IFLAG_PURGING;
+ if (llo->being_deleted) {
+ LLO_MMTX_UNLOCK(llo);
+ return(0);
+ }
+ /* ok we can destroy it */
+ llo_do_destroy_table(llo);
+ return(0);
+}
+
+int
+llo_add_to_hash(struct llo_hash *llo, void *entry, void *key)
+{
+ struct llo_hash_head *bucket;
+ struct llo_hash_entry *he, *ne;
+ u_long hkey;
+ uint32_t hidx;
+ int mtx_idx;
+
+ if (llo->table_flags & LLO_IFLAG_PURGING) {
+ return (EINVAL);
+ }
+ /* Establish the bucket */
+ hkey = (llo->llo_hf)(key);
+ hidx = hkey % llo->llo_hashmod;
+ bucket = &llo->llo_ht[hidx];
+ /* Now what type of lock? */
+ if (llo->table_flags & LLO_IFLAG_MMTX) {
+ mtx_idx = hidx;
+ } else {
+ mtx_idx = 0;
+ }
+ /* Get space for a new entry */
+ ne = malloc(sizeof(struct llo_hash_entry), llo->llo_typ,
+ ((llo->table_flags & LLO_IFLAG_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (ne == NULL) {
+ /* No memory */
+ return (-1);
+ }
+ ne->entry = entry;
+ ne->key = key;
+ ne->parent = llo;
+ garbage_collect_init(&ne->gar);
+ if (llo->table_flags & LLO_IFLAG_MINU64) {
+ ne->delete_epoch = NULL;
+ } else {
+ ne->delete_epoch = counter_u64_alloc(((llo->table_flags & LLO_IFLAG_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (ne->delete_epoch == NULL) {
+ free(ne, llo->llo_typ);
+ return (-1);
+ }
+ }
+ LLO_MTX_LOCK(llo->llo_hmtx, mtx_idx);
+ /* Does it exist? */
+ LIST_FOREACH(he, bucket, next) {
+ if((llo->llo_cf)(he->key, ne->key) == 0) {
+ /* Already exists */
+ LLO_MTX_UNLOCK(llo->llo_hmtx, mtx_idx);
+ return(EEXIST);
+ }
+ }
+ /* Ok lets add it */
+ atomic_add_int(&llo->entries, 1);
+ LIST_INSERT_HEAD(bucket, ne, next);
+ LLO_MTX_UNLOCK(llo->llo_hmtx, mtx_idx);
+ return (0);
+}
+
+void *
+llo_hash_lookup(struct llo_hash *llo, void *key)
+{
+ struct llo_hash_head *bucket;
+ struct llo_hash_entry *he;
+ u_long hkey;
+ uint32_t hidx;
+
+ counter_u64_add(llo->llo_epoch_start, 1);
+ hkey = (llo->llo_hf)(key);
+ hidx = hkey % llo->llo_hashmod;
+ bucket = &llo->llo_ht[hidx];
+ LIST_FOREACH(he, bucket, next) {
+ if((llo->llo_cf)(he->key, key) == 0) {
+ return((void *)he);
+ }
+ }
+ counter_u64_add(llo->llo_epoch_end, 1);
+ return ((void *)NULL);
+}
+
+void
+llo_release(struct llo_hash *llo, void **entry, void *key)
+{
+ counter_u64_add(llo->llo_epoch_end, 1);
+ *entry = NULL;
+}
+
+static void
+llo_garfc(void *arg)
+{
+ struct llo_hash_entry *he;
+ struct llo_hash *llo;
+ struct timeval nxttm;
+ void *e;
+
+ he = (struct llo_hash_entry *)arg;
+ llo = he->parent;
+ /* Now we have all the pointers, can we delete? */
+ if (counter_u64_is_gte(he->delete_epoch, llo->llo_epoch_end)) {
+ /* Yes, lets do the delete's */
+ e = he->entry;
+ he->entry = NULL;
+ he->key = NULL;
+ he->parent = NULL;
+ counter_u64_free(he->delete_epoch);
+ free(he, llo->llo_typ);
+ (llo->llo_ff)(e);
+ atomic_subtract_int(&llo->being_deleted, 1);
+ LLO_MMTX_LOCK(llo);
+ if (llo->table_flags & LLO_IFLAG_PURGING) {
+ /* This table is scheduled for deletion, can we yet? */
+ if ((llo->being_deleted) || (llo->entries)) {
+ /*
+ * No, we check for entries too since there
+ * is a race that we ignore where an add happens
+ * at the same time as a destroy.
+ */
+ LLO_MMTX_UNLOCK(llo);
+ return;
+ }
+ /* Ok do the deed */
+ llo_do_destroy_table(llo);
+ return;
+ }
+ LLO_MMTX_UNLOCK(llo);
+ return;
+ }
+ /* nope, we need to restart gc */
+ garbage_collect_init(&he->gar);
+ nxttm.tv_sec = LLO_CALLOUT_SEC;
+ nxttm.tv_usec = LLO_CALLOUT_USEC;
+ garbage_collect_add(&he->gar, llo_garfc, (void *)he, &nxttm, __FUNCTION__, __LINE__);
+}
+
+static void
+llo_lazy_clist_to(void *arg)
+{
+ struct llo_hash *llo;
+ struct llo_hash_entry *he;
+ struct timeval nxttm;
+ int need_lock;
+
+ llo = (struct llo_hash *)arg;
+ LLO_MMTX_LOCK(llo);
+ if (callout_pending(&llo->lazy_clist_tmr)) {
+ /* Callout has been rescheduled */
+ LLO_MMTX_UNLOCK(llo);
+ return;
+ }
+ if (!callout_active(&llo->lazy_clist_tmr)) {
+ /* The callout has been stopped */
+ LLO_MMTX_UNLOCK(llo);
+ return;
+ }
+ callout_deactivate(&llo->lazy_clist_tmr);
+ /* Now can we get any of our guys off the lazy_clist? */
+ while ((he = LIST_FIRST(&llo->lazy_clist)) != NULL) {
+ if ((llo->table_flags & LLO_IFLAG_NOWAIT) == 0) {
+ /* We have to unlock */
+ need_lock = 1;
+ LLO_MMTX_UNLOCK(llo);
+ } else {
+ need_lock = 0;
+ }
+ he->delete_epoch = counter_u64_alloc(((llo->table_flags & LLO_IFLAG_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ if (need_lock) {
+ LLO_MMTX_LOCK(llo);
+ }
+ if (he->delete_epoch == NULL) {
+ break;
+ }
+ LIST_REMOVE(he, cnext);
+ counter_u64_copy(he->delete_epoch, llo->llo_epoch_start);
+ /* Start the GC */
+ nxttm.tv_sec = LLO_CALLOUT_SEC;
+ nxttm.tv_usec = LLO_CALLOUT_USEC;
+ /* This won't fail unless the system is not init'd yet */
+ garbage_collect_add(&he->gar, llo_garfc, (void *)he, &nxttm, __FUNCTION__, __LINE__);
+ }
+ /* Now do we need to start a new timer? */
+ if (LIST_EMPTY(&llo->lazy_clist) == 0) {
+ /* Yes */
+ nxttm.tv_sec = LLO_CALLOUT_SEC;
+ nxttm.tv_usec = LLO_CALLOUT_USEC;
+ callout_reset(&llo->lazy_clist_tmr,
+ (TV_TO_TICKS(&nxttm) + 1),
+ llo_lazy_clist_to, llo);
+ } else {
+ llo->table_flags &= ~LLO_IFLAG_CALLUP;
+ }
+ LLO_MMTX_UNLOCK(llo);
+}
+
+int
+llo_del_from_hash(struct llo_hash *llo, void *entry, void *key)
+{
+ struct llo_hash_head *bucket;
+ struct llo_hash_entry *he;
+ struct timeval nxttm;
+ u_long hkey;
+ uint32_t hidx;
+ int mtx_idx;
+ int retval=-1;
+ int locked;
+ /* Establish the bucket */
+ hkey = (llo->llo_hf)(key);
+ hidx = hkey % llo->llo_hashmod;
+ bucket = &llo->llo_ht[hidx];
+ /* Now what type of lock? */
+ if (llo->table_flags & LLO_IFLAG_MMTX) {
+ mtx_idx = hidx;
+ } else {
+ mtx_idx = 0;
+ }
+ LLO_MTX_LOCK(llo->llo_hmtx, mtx_idx);
+ locked = 1;
+ /* Does it exist? */
+ LIST_FOREACH(he, bucket, next) {
+ if((llo->llo_cf)(he->key, key) == 0) {
+ /* Found it */
+ LIST_REMOVE_STALE(he, next);
+ atomic_add_int(&llo->being_deleted, 1);
+ atomic_subtract_int(&llo->entries, 1);
+ LLO_MTX_UNLOCK(llo->llo_hmtx, mtx_idx);
+ retval = locked = 0;
+ if (llo->table_flags & LLO_IFLAG_MINU64) {
+ he->delete_epoch = counter_u64_alloc(((llo->table_flags & LLO_IFLAG_NOWAIT) ? M_NOWAIT : M_WAITOK));
+ LLO_MMTX_LOCK(llo);
+ if (he->delete_epoch == NULL) {
+ /* We have an issue, no memory for out count.. postpone it */
+ LIST_INSERT_HEAD(&llo->lazy_clist, he, cnext);
+ if ((llo->table_flags & LLO_IFLAG_CALLUP) == 0) {
+ /* Start a retry timer since nothing is up */
+ nxttm.tv_sec = LLO_CALLOUT_SEC;
+ nxttm.tv_usec = LLO_CALLOUT_USEC;
+ callout_reset(&llo->lazy_clist_tmr,
+ (TV_TO_TICKS(&nxttm) + 1),
+ llo_lazy_clist_to, llo);
+ llo->table_flags |= LLO_IFLAG_CALLUP;
+ }
+ LLO_MMTX_UNLOCK(llo);
+ return(0);
+ }
+ LLO_MMTX_UNLOCK(llo);
+ }
+ /* Now copy out the current start epoch to the delete epoch */
+ counter_u64_copy(he->delete_epoch, llo->llo_epoch_start);
+ /* Start the GC */
+ nxttm.tv_sec = LLO_CALLOUT_SEC;
+ nxttm.tv_usec = LLO_CALLOUT_USEC;
+ /* This won't fail unless the system is not init'd yet */
+ garbage_collect_add(&he->gar, llo_garfc, (void *)he, &nxttm, __FUNCTION__, __LINE__);
+ break;
+ }
+ }
+ if (locked) {
+ LLO_MTX_UNLOCK(llo->llo_hmtx, mtx_idx);
+ }
+ if (retval == -1) {
+ retval = ENOENT;
+ }
+ return(retval);
+}
Modified: projects/rrs_mqueue/sys/kern/subr_counter.c
==============================================================================
--- projects/rrs_mqueue/sys/kern/subr_counter.c Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/kern/subr_counter.c Wed Jun 11 09:31:09 2014 (r267353)
@@ -55,6 +55,18 @@ counter_u64_fetch(counter_u64_t c)
return (counter_u64_fetch_inline(c));
}
+void
+counter_u64_copy(counter_u64_t dest, counter_u64_t src)
+{
+ counter_u64_copy_inline(dest, src);
+}
+
+int
+counter_u64_is_gte(counter_u64_t s1, counter_u64_t s2)
+{
+ return(counter_u64_is_gte_inline(s1, s2));
+}
+
counter_u64_t
counter_u64_alloc(int flags)
{
Modified: projects/rrs_mqueue/sys/mips/include/counter.h
==============================================================================
--- projects/rrs_mqueue/sys/mips/include/counter.h Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/mips/include/counter.h Wed Jun 11 09:31:09 2014 (r267353)
@@ -59,6 +59,35 @@ counter_u64_fetch_inline(uint64_t *p)
return (r);
}
+static inline void
+counter_u64_copy_inline(uint64_t *dest, uint64_t *src)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ for (i = 0; i < mp_ncpus; i++) {
+ r = counter_u64_read_one((uint64_t *)src, i);
+ *((uint64_t *)((char *)dest + sizeof(struct pcpu) * i)) = r;
+ }
+}
+
+static inline int
+counter_u64_is_gte_inline(uint64_t *s1, uint64_t *s2)
+{
+ uint64_t s1v, s2v;
+ int i;
+
+ for (i = 0; i < mp_ncpus; i++) {
+ s1v = counter_u64_read_one((uint64_t *)s1, i);
+ s2v = counter_u64_read_one((uint64_t *)s2, i);
+ if (COUNTER_LT(s1v, s2v)) {
+ return(0);
+ }
+ }
+ return(1);
+}
+
/* XXXKIB non-atomic 64bit store on 32bit, might interrupt increment */
static void
counter_u64_zero_one_cpu(void *arg)
Modified: projects/rrs_mqueue/sys/net/drbr.c
==============================================================================
--- projects/rrs_mqueue/sys/net/drbr.c Wed Jun 11 09:24:35 2014 (r267352)
+++ projects/rrs_mqueue/sys/net/drbr.c Wed Jun 11 09:31:09 2014 (r267353)
@@ -1,6 +1,415 @@
#include <net/drbr.h>
SYSCTL_DECL(_net_link);
+
+uint8_t set_up_drbr_depth=0;
+uint32_t drbr_max_priority=DRBR_MAXQ_DEFAULT-1;
+uint32_t drbr_queue_depth=DRBR_MIN_DEPTH;
+uint32_t drbr_maxq=DRBR_MAXQ_DEFAULT;
+
+TUNABLE_INT("net.link.drbr_maxq", &drbr_maxq);
+
+SYSCTL_NODE(_net, OID_AUTO, drbr, CTLFLAG_RD, 0, "DRBR Parameters");
+
+SYSCTL_INT(_net_drbr, OID_AUTO, drbr_maxq, CTLFLAG_RDTUN,
+ &drbr_maxq, 0, "max number of priority queues per interface");
+SYSCTL_INT(_net_drbr, OID_AUTO, drbr_queue_depth, CTLFLAG_RD,
+ &drbr_queue_depth, 0, "Queue length configed via ifqmaxlen");
+SYSCTL_INT(_net_drbr, OID_AUTO, drbr_max_priority, CTLFLAG_RD,
+ &drbr_max_priority, 0, "Queue length configed via ifqmaxlen");
+
+struct drbr_ring *
+drbr_alloc(struct malloc_type *type, int flags, struct mtx *tmtx)
+{
+ struct drbr_ring *rng;
+ int i;
+ if (set_up_drbr_depth == 0) {
+ drbr_max_priority = drbr_maxq-1;
+ set_up_drbr_depth = 1;
+ drbr_queue_depth = 1 << ((fls(ifqmaxlen)-1));
+ if (drbr_queue_depth < DRBR_MIN_DEPTH) {
+ drbr_queue_depth = DRBR_MIN_DEPTH;
+ }
+ }
+ rng = (struct drbr_ring *)malloc(sizeof(struct drbr_ring), type, flags);
+ if (rng == NULL) {
+ return(NULL);
+ }
+ memset(rng, 0, sizeof(struct drbr_ring));
+ DRBR_LOCK_INIT(rng);
+ rng->re = (struct drbr_ring_entry *)malloc((sizeof(struct drbr_ring_entry)*drbr_maxq),
+ type, flags);
+ if (rng->re == NULL) {
+ free(rng, type);
+ return(NULL);
+ }
+ memset(rng->re, 0, (sizeof(struct drbr_ring_entry) * drbr_maxq));
+ /* Ok get the queues */
+ for (i=0; i<drbr_maxq; i++) {
+ rng->re[i].re_qs = buf_ring_alloc(drbr_queue_depth, type, flags, tmtx);
+ if (rng->re[i].re_qs == NULL) {
+ goto out_err;
+ }
+ }
+ rng->lowq_with_data = 0xffffffff;
+ return(rng);
+out_err:
+ for(i=0; i<drbr_maxq; i++) {
+ if (rng->re[i].re_qs) {
+ free(rng->re[i].re_qs, type);
+ }
+ }
+ free(rng->re, type);
+ free(rng, type);
+ return (NULL);
+}
+
+#define PRIO_NAME_LEN 32
+void
+drbr_add_sysctl_stats(device_t dev, struct sysctl_oid_list *queue_list,
+ struct drbr_ring *rng)
+{
+ int i;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *prio_node;
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-projects
mailing list