PERFORCE change 123570 for review
Fredrik Lindberg
fli at FreeBSD.org
Mon Jul 16 01:22:12 UTC 2007
http://perforce.freebsd.org/chv.cgi?CH=123570
Change 123570 by fli at fli_nexus on 2007/07/16 01:22:06
Style fixes (long lines) and debugging output tweaks only.
Affected files ...
.. //depot/projects/soc2007/fli-mdns_sd/mdnsd/event.c#4 edit
.. //depot/projects/soc2007/fli-mdns_sd/mdnsd/hash.c#6 edit
.. //depot/projects/soc2007/fli-mdns_sd/mdnsd/hash.h#6 edit
.. //depot/projects/soc2007/fli-mdns_sd/mdnsd/stack_buf.c#8 edit
.. //depot/projects/soc2007/fli-mdns_sd/mdnsd/stack_mdns.h#5 edit
.. //depot/projects/soc2007/fli-mdns_sd/mdnsd/wqueue.c#3 edit
Differences ...
==== //depot/projects/soc2007/fli-mdns_sd/mdnsd/event.c#4 (text+ko) ====
@@ -140,13 +140,16 @@
again:
switch (ev->ev_type) {
case EVENT_TYPE_IO:
- ret = ev->ev_cb.ev_handler.io(&ev->ev_data.io, ev->ev_handler_arg);
+ ret = ev->ev_cb.ev_handler.io(&ev->ev_data.io,
+ ev->ev_handler_arg);
break;
case EVENT_TYPE_TMR:
- ret = ev->ev_cb.ev_handler.tmr(&ev->ev_data.tmr, ev->ev_handler_arg);
+ ret = ev->ev_cb.ev_handler.tmr(&ev->ev_data.tmr,
+ ev->ev_handler_arg);
break;
case EVENT_TYPE_SIG:
- ret = ev->ev_cb.ev_handler.sig(&ev->ev_data.sig, ev->ev_handler_arg);
+ ret = ev->ev_cb.ev_handler.sig(&ev->ev_data.sig,
+ ev->ev_handler_arg);
break;
}
@@ -235,7 +238,7 @@
/* Exclusive event already in progress */
if ((ev->ev_flags & EVENT_FLAG_EX) && ev->ev_refcnt > 0) {
dprintf(DEBUG_EVENT,
- "Exclusive event already in progress ev=%x", ev);
+ "Exclusive event already in progress ev=%x", ev);
ev->ev_redo++;
EV_UNLOCK(ev);
continue;
@@ -243,8 +246,8 @@
ev->ev_refcnt++;
EV_UNLOCK(ev);
- dprintf(DEBUG_EVENT, "Event fired, dispatched to queue=%x, ev=%x",
- wq, ev);
+ dprintf(DEBUG_EVENT,
+ "Event fired, dispatched to queue=%x, ev=%x", wq, ev);
wa.ptr = ev;
#ifdef HAVE_PTHREAD
error = wq_enqueue(wq, event_engine, &wa);
@@ -293,7 +296,7 @@
*/
int
event_add(struct eventlist *evl, int type, void *handler, ev_arg *handler_arg,
- void *init, ev_arg *init_arg)
+ void *init, ev_arg *init_arg)
{
int ret;
struct event *ev;
@@ -333,8 +336,8 @@
switch (type) {
case EVENT_TYPE_IO:
if (init != NULL)
- ret = ev->ev_cb.ev_init.io(EVENT_INIT_OPEN, &ev->ev_data.io,
- ev_arg_init);
+ ret = ev->ev_cb.ev_init.io(EVENT_INIT_OPEN,
+ &ev->ev_data.io, ev_arg_init);
if (ev->ev_data.io.evio_dir == EVENT_IO_READ)
kev.filter = EVFILT_READ;
else if (ev->ev_data.io.evio_dir == EVENT_IO_WRITE)
@@ -345,8 +348,8 @@
break;
case EVENT_TYPE_TMR:
if (init != NULL)
- ret = ev->ev_cb.ev_init.tmr(EVENT_INIT_OPEN, &ev->ev_data.tmr,
- ev_arg_init);
+ ret = ev->ev_cb.ev_init.tmr(EVENT_INIT_OPEN,
+ &ev->ev_data.tmr, ev_arg_init);
kev.filter = EVFILT_TIMER;
if (ev->ev_data.tmr.evtmr_oneshot)
kev.flags |= EV_ONESHOT;
@@ -356,8 +359,8 @@
break;
case EVENT_TYPE_SIG:
if (init != NULL)
- ret = ev->ev_cb.ev_init.sig(EVENT_INIT_OPEN, &ev->ev_data.sig,
- ev_arg_init);
+ ret = ev->ev_cb.ev_init.sig(EVENT_INIT_OPEN,
+ &ev->ev_data.sig, ev_arg_init);
kev.filter = EVFILT_SIGNAL;
signal(ev->ev_data.sig.evsig_signo, SIG_IGN);
kev.ident = ev->ev_data.sig.evsig_signo;
@@ -445,7 +448,8 @@
MDNS_INIT_ASSERT(ev, ev_magic);
if (ev->ev_refcnt > 0) {
- dprintf(DEBUG_EVENT, "Event busy ev=%x, refcnt=%d", ev, ev->ev_refcnt);
+ dprintf(DEBUG_EVENT, "Event busy ev=%x, refcnt=%d",
+ ev, ev->ev_refcnt);
ev->ev_flags |= EVENT_FLAG_DYING;
return (1);
}
==== //depot/projects/soc2007/fli-mdns_sd/mdnsd/hash.c#6 (text+ko) ====
@@ -80,16 +80,20 @@
hash(const void *key, size_t length, uint32_t initval)
{
uint32_t a, b, c; /* internal state */
- union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
-
+ /* needed for Mac Powerbook G4 */
+ union { const void *ptr; size_t i; } u;
/* Set up the internal state */
a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
u.ptr = key;
if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
- const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+ /* read 32-bit chunks */
+ const uint32_t *k = (const uint32_t *)key;
- /* all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ /*
+ * all but last block: aligned reads and
+ * affect 32 bits of (a,b,c)
+ */
while (length > 12) {
a += k[0];
b += k[1];
@@ -100,13 +104,14 @@
}
/*
- * handle the last (probably partial) block
- * "k[2]&0xffffff" actually reads beyond the end of the string, but
- * then masks off the part it's not allowed to read. Because the
- * string is aligned, the masked-off tail is in the same word as the
- * rest of the string. Every machine with memory protection I've seen
- * does it on word boundaries, so is OK with this. But VALGRIND will
- * still catch it and complain. The masking trick does make the hash
+ * handle the last (probably partial) block * "k[2]&0xffffff"
+ * actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read.
+ * Because the string is aligned, the masked-off tail is in the
+ * same word as the rest of the string. Every machine with
+ * memory protection I've seen does it on word boundaries,
+ * so is OK with this. But VALGRIND will still catch it and
+ * complain. The masking trick does make the hash
* noticably faster for short strings (like English words).
*/
@@ -127,7 +132,8 @@
}
} else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
- const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ /* read 16-bit chunks */
+ const uint16_t *k = (const uint16_t *)key;
const uint8_t *k8;
/* all but last block: aligned reads and different mixing */
@@ -240,7 +246,8 @@
hep = SLIST_FIRST(&ht->ht_new);
if (hep == NULL || hep->hep_pos >= hep->hep_size) {
len = 1 << ht->ht_alloc_size;
- hep = malloc(sizeof(struct he_pool) + (sizeof(struct hashentry) * len));
+ hep = malloc(sizeof(struct he_pool) +
+ (sizeof(struct hashentry) * len));
hep->hep_size = len;
hep->hep_pos = 0;
SLIST_INSERT_HEAD(&ht->ht_new, hep, hep_next);
@@ -274,7 +281,7 @@
static void
grow(struct hashtbl *ht)
{
- struct hashbkt *buckets;
+ struct hashbkt *bkts;
struct hashentry *he, *he2;
size_t i, len;
uint32_t hval;
@@ -283,23 +290,23 @@
ht->ht_tblsz *= 2;
ht->ht_mask = ht->ht_tblsz - 1;
- buckets = malloc(sizeof(struct hashbkt) * ht->ht_tblsz);
+ bkts = malloc(sizeof(struct hashbkt) * ht->ht_tblsz);
for (i = 0; i < ht->ht_tblsz; i++) {
- TAILQ_INIT(&buckets[i].hb_table);
- buckets[i].hb_len = 0;
+ TAILQ_INIT(&bkts[i].hb_table);
+ bkts[i].hb_len = 0;
}
for (i = 0; i < len; i++) {
- TAILQ_FOREACH_SAFE(he, &ht->ht_buckets[i].hb_table, he_next, he2) {
+ TAILQ_FOREACH_SAFE(he, &ht->ht_bkts[i].hb_table, he_next, he2) {
hval = he->he_hash & ht->ht_mask;
- TAILQ_REMOVE(&ht->ht_buckets[i].hb_table, he, he_next);
- TAILQ_INSERT_TAIL(&buckets[hval].hb_table, he, he_next);
- buckets[hval].hb_len++;
+ TAILQ_REMOVE(&ht->ht_bkts[i].hb_table, he, he_next);
+ TAILQ_INSERT_TAIL(&bkts[hval].hb_table, he, he_next);
+ bkts[hval].hb_len++;
}
}
- free(ht->ht_buckets);
- ht->ht_buckets = buckets;
+ free(ht->ht_bkts);
+ ht->ht_bkts = bkts;
}
/*
@@ -312,7 +319,7 @@
{
size_t i;
- ht->ht_buckets = malloc(sizeof(struct hashbkt) * len);
+ ht->ht_bkts = malloc(sizeof(struct hashbkt) * len);
ht->ht_tblsz = len;
ht->ht_grow = growsz;
ht->ht_col = col;
@@ -322,8 +329,8 @@
SLIST_INIT(&ht->ht_free);
for (i = 0; i < len; i++) {
- TAILQ_INIT(&ht->ht_buckets[i].hb_table);
- ht->ht_buckets[i].hb_len = 0;
+ TAILQ_INIT(&ht->ht_bkts[i].hb_table);
+ ht->ht_bkts[i].hb_len = 0;
}
return (0);
@@ -340,7 +347,7 @@
size_t i;
for (i = 0; i < ht->ht_tblsz; i++) {
- TAILQ_FOREACH(he, &ht->ht_buckets[i].hb_table, he_next) {
+ TAILQ_FOREACH(he, &ht->ht_bkts[i].hb_table, he_next) {
if (he->he_flags & HASHTBL_KEYDUP)
free(he->he_key.vol);
}
@@ -348,7 +355,7 @@
SLIST_FOREACH_SAFE(hep, &ht->ht_new, hep_next, hep2) {
free(hep);
}
- free(ht->ht_buckets);
+ free(ht->ht_bkts);
}
/*
@@ -363,7 +370,7 @@
{
struct hashentry *he;
- TAILQ_FOREACH(he, &ht->ht_buckets[hval].hb_table, he_next) {
+ TAILQ_FOREACH(he, &ht->ht_bkts[hval].hb_table, he_next) {
if (keylen == he->he_keylen) {
if (memcmp(key, he->he_key.con, keylen) == 0)
break;
@@ -408,12 +415,12 @@
}
he->he_keylen = keylen;
he->he_data = data;
- TAILQ_INSERT_TAIL(&ht->ht_buckets[hval].hb_table, he, he_next);
- ht->ht_buckets[hval].hb_len++;
+ TAILQ_INSERT_TAIL(&ht->ht_bkts[hval].hb_table, he, he_next);
+ ht->ht_bkts[hval].hb_len++;
/* Attempt to grow table if needed */
if ((ht->ht_grow > ht->ht_tblsz) &&
- (ht->ht_buckets[hval].hb_len >= ht->ht_col))
+ (ht->ht_bkts[hval].hb_len >= ht->ht_col))
grow(ht);
return (0);
@@ -438,7 +445,7 @@
he = find(ht, hval, key, keylen);
if (he != NULL) {
- TAILQ_REMOVE(&ht->ht_buckets[hval].hb_table, he, he_next);
+ TAILQ_REMOVE(&ht->ht_bkts[hval].hb_table, he, he_next);
if (he->he_flags & HASHTBL_KEYDUP)
free(he->he_key.vol);
free_he(ht, he);
@@ -482,20 +489,22 @@
size_t i;
for (i = 0; i < ht->ht_tblsz; i++) {
- he = he_head = TAILQ_FIRST(&ht->ht_buckets[i].hb_table);
+ he = he_head = TAILQ_FIRST(&ht->ht_bkts[i].hb_table);
while (he != NULL) {
he_next = TAILQ_NEXT(he, he_next);
he_prev = TAILQ_PREV(he, hashbkt_head, he_next);
cb(ht, he->he_key.con, he->he_keylen, he->he_data, arg);
- if (he_head != TAILQ_FIRST(&ht->ht_buckets[i].hb_table)) {
- he = he_head = TAILQ_FIRST(&ht->ht_buckets[i].hb_table);
+ if (he_head != TAILQ_FIRST(&ht->ht_bkts[i].hb_table)) {
+ he = TAILQ_FIRST(&ht->ht_bkts[i].hb_table);
+ he_head = he;
}
else {
he = he_next;
if (he_prev != NULL) {
- he_tmp = TAILQ_NEXT(TAILQ_NEXT(he_prev, he_next), he_next);
+ he_tmp = TAILQ_NEXT(TAILQ_NEXT(he_prev,
+ he_next), he_next);
if (he_tmp != NULL && he_tmp != he_next)
he = he_tmp;
}
==== //depot/projects/soc2007/fli-mdns_sd/mdnsd/hash.h#6 (text+ko) ====
@@ -66,10 +66,10 @@
* Hash table
*/
struct hashtbl {
- struct hashbkt *ht_buckets; /* Bucket array */
+ struct hashbkt *ht_bkts; /* Bucket array */
size_t ht_tblsz; /* Size of table */
- size_t ht_grow; /* Allowed grow size */
- size_t ht_col; /* Allowed collisions */
+ size_t ht_grow; /* Allowed grow size */
+ size_t ht_col; /* Allowed collisions */
uint32_t ht_mask;
uint8_t ht_alloc_size;
SLIST_HEAD(, he_pool) ht_new; /* new hashentry objs */
==== //depot/projects/soc2007/fli-mdns_sd/mdnsd/stack_buf.c#8 (text+ko) ====
@@ -154,7 +154,8 @@
buf->b_flags = 0;
buf->b_sz = (len == 0) ?
- ((flags & MDNS_BP_HUGE) ? MDNS_PKG_MAX_LEN : bp->bp_defsz) : len;
+ ((flags & MDNS_BP_HUGE) ?
+ MDNS_PKG_MAX_LEN : bp->bp_defsz) : len;
buf->b_buf = malloc(buf->b_sz);
if (buf->b_buf == NULL) {
free(buf);
@@ -183,8 +184,9 @@
TAILQ_INSERT_TAIL(&bh->bh_list, buf, b_next);
bh->bh_size++;
- dprintf(DEBUG_BUF, "Blocks in use %d, free normal=%d, huge=%d",
- bp->bp_allocs, bp->bp_buffree[0], bp->bp_buffree[1]);
+ dprintf(DEBUG_BUF,
+ "Alloced buf=%x, blocks in use %d, free normal=%d, huge=%d",
+ buf, bp->bp_allocs, bp->bp_buffree[0], bp->bp_buffree[1]);
out:
if (!(flags & MDNS_BP_LOCKED))
==== //depot/projects/soc2007/fli-mdns_sd/mdnsd/stack_mdns.h#5 (text+ko) ====
@@ -31,7 +31,7 @@
#include "stack_buf.h"
/* Multicast DNS constants */
-#define MDNS_MCAST_INET "224.0.0.251" /* IPv4 multicast address */
+#define MDNS_MCAST_INET "224.0.0.251" /* IPv4 multicast address */
#define MDNS_MCAST_INET6 "ff02::fb" /* IPv6 multicast address */
#define MDNS_MCAST_PORT 5353 /* Port number */
@@ -58,7 +58,7 @@
* r=<value> specifies the value for this field in response messages
*/
struct mdns_header {
- unsigned int h_id:16; /* 0 for multicast query/resp, unicast as normal */
+ unsigned int h_id:16; /* 0 for multicast, unicast as normal */
#if BYTE_ORDER == LITTLE_ENDIAN
/* byte 3 */
unsigned int h_rd:1; /* Recursion desired, q/r=0 ignore on recv */
==== //depot/projects/soc2007/fli-mdns_sd/mdnsd/wqueue.c#3 (text+ko) ====
@@ -78,14 +78,14 @@
bzero(wqt, sizeof(struct wq_tag));
wq->wq_tags++;
}
-
else {
wqt = TAILQ_FIRST(&wq->wq_free);
TAILQ_REMOVE(&wq->wq_free, wqt, wqt_list);
}
MDNS_INIT_SET(wqt, wqt_magic);
wq->wq_tags_used++;
- dprintf(DEBUG_WQUEUE, "%d tags, used %d", wq->wq_tags, wq->wq_tags_used);
+ dprintf(DEBUG_WQUEUE, "%d tags, used %d",
+ wq->wq_tags, wq->wq_tags_used);
return (wqt);
}
#endif
@@ -121,8 +121,8 @@
}
wq->wq_tags_used--;
-
- dprintf(DEBUG_WQUEUE, "%d tags, used %d", wq->wq_tags, wq->wq_tags_used);
+ dprintf(DEBUG_WQUEUE, "%d tags, used %d", wq->wq_tags,
+ wq->wq_tags_used);
}
#endif
@@ -248,11 +248,12 @@
*/
thr = new_worker(wq);
if (thr == NULL)
- logger(LOG_WARNING, "Failed to create new worker thread");
+ logger(LOG_WARNING,
+ "Failed to create new worker thread");
}
dprintf(DEBUG_WQUEUE, "Enqueued func=%x on queue=%x",
- func, wq);
+ func, wq);
WQ_UNLOCK(wq);
#else
@@ -286,7 +287,7 @@
return (NULL);
dprintf(DEBUG_WQUEUE, "New queue worker created, thr=%x, workers=%d",
- thr->wqthr_id, wq->wq_workers);
+ thr->wqthr_id, wq->wq_workers);
return (thr);
}
@@ -307,7 +308,7 @@
TAILQ_REMOVE(&wq->wq_threads, wqthr, wqthr_list);
wq->wq_workers--;
dprintf(DEBUG_WQUEUE, "Queue worker destroyed, thr=%x, workers=%d",
- wqthr->wqthr_id, wq->wq_workers);
+ wqthr->wqthr_id, wq->wq_workers);
free(wqthr);
}
#endif
@@ -340,10 +341,10 @@
wqt = TAILQ_FIRST(&wq->wq_queue);
TAILQ_REMOVE(&wq->wq_queue, wqt, wqt_list);
- dprintf(DEBUG_WQUEUE,
- "Worker executing, thr=%x, func=%x, workers=%d, idle=%d",
- wqthr->wqthr_id, wqt->wqt_func, wq->wq_workers,
- wq->wq_workers_idle);
+ dprintf(DEBUG_WQUEUE, "Worker executing,"
+ "thr=%x, func=%x, workers=%d, idle=%d",
+ wqthr->wqthr_id, wqt->wqt_func, wq->wq_workers,
+ wq->wq_workers_idle);
WQ_UNLOCK(wq);
/* Execute job function */
@@ -363,13 +364,15 @@
clock_gettime(CLOCK_REALTIME, &timeout);
timeout.tv_sec += 3;
- error = pthread_cond_timedwait(&wq->wq_cond, &wq->wq_mtx, &timeout);
+ error = pthread_cond_timedwait(&wq->wq_cond, &wq->wq_mtx,
+ &timeout);
/*
- * Commit suicide if we have been idle too long and there are enough
- * other workers around.
+ * Commit suicide if we have been idle too long and there
+ * are enough other workers around.
*/
- if (error == ETIMEDOUT && wq->wq_workers >= wq->wq_workers_min) {
+ if (error == ETIMEDOUT &&
+ wq->wq_workers >= wq->wq_workers_min) {
wq->wq_workers_idle--;
break;
}
More information about the p4-projects
mailing list