socsvn commit: r240550 - in soc2012/gmiller/locking-head: .
lib/libwitness
gmiller at FreeBSD.org
gmiller at FreeBSD.org
Mon Aug 20 04:26:53 UTC 2012
Author: gmiller
Date: Mon Aug 20 04:26:50 2012
New Revision: 240550
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=240550
Log:
r240561 at FreeBSD-dev: root | 2012-08-17 20:12:46 -0500
Add comments to libwitness.
Modified:
soc2012/gmiller/locking-head/ (props changed)
soc2012/gmiller/locking-head/lib/libwitness/api.c
soc2012/gmiller/locking-head/lib/libwitness/graph.c
soc2012/gmiller/locking-head/lib/libwitness/lockinfo.c
soc2012/gmiller/locking-head/lib/libwitness/locklist.c
soc2012/gmiller/locking-head/lib/libwitness/unwind.c
soc2012/gmiller/locking-head/lib/libwitness/witness.h
Modified: soc2012/gmiller/locking-head/lib/libwitness/api.c
==============================================================================
--- soc2012/gmiller/locking-head/lib/libwitness/api.c Mon Aug 20 02:28:07 2012 (r240549)
+++ soc2012/gmiller/locking-head/lib/libwitness/api.c Mon Aug 20 04:26:50 2012 (r240550)
@@ -48,6 +48,16 @@
int _pthread_spin_unlock(pthread_spinlock_t *spin);
int _pthread_spin_destroy(pthread_spinlock_t *spin);
+/*
+ Keep witness from including any lock usage that occurs as a result of
+ witness itself.
+
+ All API functions should call enter_witness() before doing anything that
+ could acquire a lock or using shared data that may experience race
+ conditions. Before returning, every function that calls enter_witness()
+ should call leave_witness().
+*/
+
pthread_mutex_t witness_mtx = PTHREAD_MUTEX_INITIALIZER;
static int witness_depth = 0;
@@ -73,6 +83,12 @@
return (witness_depth);
}
+/*
+ Each of these wrapper functions ensures that the default lock name has been
+ set and calls add_lock() or remove_lock() to update the thread-local list
+ of locks that are currently held adjust the lock order graph and LoR logs.
+*/
+
int
pthread_mutex_lock(pthread_mutex_t *mutex)
{
@@ -378,6 +394,10 @@
return (ret);
}
+/*
+ The pthread_lockorder_set_np() function attempts to insert the specified
+ pair into the lock order graph.
+*/
int
pthread_lockorder_set_np(void *first, void *second)
{
@@ -395,6 +415,10 @@
return (ret);
}
+/*
+ The pthread_lockorder_bless_np() function prevents LoRs from being generated
+ for the given pair of locks.
+*/
int
pthread_lockorder_bless_np(void *first_addr, void *second_addr)
{
@@ -438,6 +462,9 @@
return (ret);
}
+/*
+ pthread_lockorder_reset_np() resets all lock order information.
+*/
void
pthread_lockorder_reset_np(void)
{
@@ -450,6 +477,11 @@
leave_witness();
}
+/*
+ The pthread_*_setname_np() functions set lock names and ensure that all
+ locks with the same name are treated as the same lock.
+*/
+
int
pthread_mutex_setname_np(pthread_mutex_t *mutex, const char *name)
{
@@ -491,6 +523,20 @@
return (ret);
}
+
+/*
+ The pthread_lor_*_np() functions retrieve LoR log information.
+
+ pthread_lor_begin_np() must be called first to initialize the
+ pthread_lor_np structure.
+
+ pthread_lor_next_np() is used to retrieve one LoR entry per call.
+ Returns 0 if no LoR records remain.
+
+ pthread_lor_end_np() should be called after the final call to
+ pthread_lor_next_np().
+*/
+
int
pthread_lor_begin_np(struct pthread_lor_np *lor)
{
@@ -597,6 +643,16 @@
leave_witness();
}
+/*
+ The pthread_lockorder_*_np() functions are used to retrieve the contents
+ of the lock order graph.
+
+ pthread_lockorder_begin_np() is called to initialize the
+ pthread_lockorder_np structure, pthread_lockorder_next_np() is then called
+ until it returns 0, and pthread_lockorder_end_np() is called to clean up the
+ pthread_lockorder_np structure.
+*/
+
int
pthread_lockorder_begin_np(struct pthread_lockorder_np *node)
{
Modified: soc2012/gmiller/locking-head/lib/libwitness/graph.c
==============================================================================
--- soc2012/gmiller/locking-head/lib/libwitness/graph.c Mon Aug 20 02:28:07 2012 (r240549)
+++ soc2012/gmiller/locking-head/lib/libwitness/graph.c Mon Aug 20 04:26:50 2012 (r240550)
@@ -27,6 +27,10 @@
#include "witness.h"
+/*
+ scan_graph() returns 1 if lock is reachable from graph in the lock order
+ graph, or 0 if it isn't.
+*/
static int
scan_graph(struct lock_info *graph, struct lock_info *lock)
{
@@ -41,6 +45,7 @@
return (1);
}
+ /* Scan all children of the current node. */
ret = 0;
child = graph->child;
while (ret == 0 && child != NULL) {
@@ -51,6 +56,10 @@
return (ret);
}
+/*
+ Remove the edge from "from" to "to" if "to" would still be reachable from
+ "from" without the direct edge.
+*/
static void
optimize_path(struct lock_info *from, struct lock_info *to)
{
@@ -83,6 +92,10 @@
}
}
+/*
+ Insert an edge from "from" to "to" in the graph, while maintaining an
+ optimal lock order graph. Returns 0 if successful.
+*/
int
insert_lock(struct lock_info *from, struct lock_info *to)
{
@@ -92,15 +105,27 @@
return (0);
}
+ /*
+ If "from" is reachable from "to," a lock order reversal has been
+ detected.
+ */
if (scan_graph(to, from) && !blessed(from, to)) {
return (-1);
}
+ /* Insert the edge if it's not already present. */
if (to != from->child) {
to->sibling = from->child;
from->child = to;
}
+ /*
+ Redundant edges may be present, in which case they fall into two
+ categories. Either one of the edge from "from" to one of its
+ children will be redundant (in which case it will be handled by the
+ following loop) or the newly inserted edge will be (in which case
+ the final optimize_path() call will take care of it.
+ */
child = to->sibling;
while (child != NULL) {
optimize_path(from, child);
Modified: soc2012/gmiller/locking-head/lib/libwitness/lockinfo.c
==============================================================================
--- soc2012/gmiller/locking-head/lib/libwitness/lockinfo.c Mon Aug 20 02:28:07 2012 (r240549)
+++ soc2012/gmiller/locking-head/lib/libwitness/lockinfo.c Mon Aug 20 04:26:50 2012 (r240550)
@@ -32,6 +32,10 @@
static SLIST_HEAD(lock_instance_head, lock_instance) lock_instance_head =
SLIST_HEAD_INITIALIZER(lock_instance_head);
+/*
+ lookup_lock() returns the lock_instance structure associated with the
+ POSIX lock object, creating a new one if needed.
+*/
struct lock_instance *
lookup_lock(void *lock)
{
@@ -59,6 +63,9 @@
return (inst);
}
+/*
+ Free the lock_instance structure associated with a POSIX lock object.
+*/
void
destroy_lock(void *lock)
{
@@ -72,6 +79,9 @@
}
}
+/*
+ Returns 1 if first and second have been blessed, or 0 if not.
+*/
int
blessed(struct lock_info *first, struct lock_info *second)
{
@@ -86,6 +96,9 @@
return (0);
}
+/*
+ Free all lock_info data.
+*/
void
reset_lock_info(void)
{
@@ -111,6 +124,9 @@
}
}
+/*
+ Free all lock_instance data.
+*/
void
reset_lock_instance(void)
{
@@ -125,6 +141,10 @@
}
}
+/*
+ Returns the lock_info associated with a lock_instance, creating a new one if
+ needed.
+*/
struct lock_info *
get_lock_info(struct lock_instance *inst)
{
@@ -152,6 +172,9 @@
return (info);
}
+/*
+ Return the lock_info associated with a particular lock name.
+*/
static struct lock_info *
lookup_info(const char *name)
{
@@ -180,6 +203,10 @@
return (info);
}
+/*
+ Set the name associated with a lock. If multiple locks share the same name,
+ set_lock_name() will ensure that they share the same lock_info.
+*/
int
set_lock_name(void *lock, const char *name)
{
@@ -208,6 +235,9 @@
return "";
}
+/*
+ Ensure that a lock has a default name if no name has been previously set.
+*/
void
check_default_name(void *lock, const char *prefix)
{
Modified: soc2012/gmiller/locking-head/lib/libwitness/locklist.c
==============================================================================
--- soc2012/gmiller/locking-head/lib/libwitness/locklist.c Mon Aug 20 02:28:07 2012 (r240549)
+++ soc2012/gmiller/locking-head/lib/libwitness/locklist.c Mon Aug 20 04:26:50 2012 (r240550)
@@ -27,6 +27,9 @@
#include "witness.h"
+/*
+ lock_entry is used to maintain the list of locks held by the current thread.
+*/
struct lock_entry {
SLIST_ENTRY(lock_entry) lock_next;
struct lock_info *lock;
@@ -40,6 +43,9 @@
static int exit_set = 0;
struct lor_head lor_head = STAILQ_HEAD_INITIALIZER(lor_head);
+/*
+ Record a lock order reversal in the lor log.
+*/
static void
log_reversal(struct lock_info *lock, struct backtrace *lock_trace,
struct lock_info *previous, struct backtrace *previous_trace)
@@ -78,6 +84,11 @@
free(entry);
}
+/*
+ Add a lock to the list of locks held by the current thread and insert it
+ insert it into the lock order graph. If a lock order reversal is detected,
+ log it.
+*/
void
add_lock(void *lock)
{
@@ -86,11 +97,13 @@
struct lock_instance *inst;
struct lock_info *info;
+ /* Call write_xml() on exit to generate the XML data file. */
if (exit_set == 0) {
atexit(write_xml);
exit_set = 1;
}
+ /* Don't track lock acquisitions that occur within witness. */
if (in_witness() > 1 || lock == NULL) {
return;
}
@@ -109,6 +122,10 @@
record_backtrace(&entry->trace);
entry->lock = info;
+ /*
+ If a reset has been requested since this thread's lock list has
+ been reset, reset the data and update the reset count.
+ */
if (reset_count > thread_reset_count) {
thread_reset_count = reset_count;
@@ -125,12 +142,19 @@
SLIST_INSERT_HEAD(&lock_head, entry, lock_next);
+ /*
+ Insert the lock into the lock order graph and log the reversal if
+ one is detected.
+ */
if (next != NULL && insert_lock(next->lock, entry->lock) < 0) {
log_reversal(entry->lock, &entry->trace, next->lock,
&next->trace);
}
}
+/*
+ Remove a lock from the list of locks currently held.
+*/
void
remove_lock(void *lock)
{
@@ -152,6 +176,9 @@
}
}
+/*
+ Request a reset of the list of locks held by each thread.
+*/
void
reset_lists(void)
{
Modified: soc2012/gmiller/locking-head/lib/libwitness/unwind.c
==============================================================================
--- soc2012/gmiller/locking-head/lib/libwitness/unwind.c Mon Aug 20 02:28:07 2012 (r240549)
+++ soc2012/gmiller/locking-head/lib/libwitness/unwind.c Mon Aug 20 04:26:50 2012 (r240550)
@@ -27,6 +27,10 @@
#include "witness.h"
+/*
+ Return a string containing a description of the given backtrace. The string
+ must be freed by the caller using free().
+*/
char *
trace_str(struct backtrace *trace)
{
@@ -41,6 +45,10 @@
buffer[0] = '\0';
SLIST_FOREACH(frame, trace, frame_next) {
+ /*
+ Ignore pthread_* API functions and any calls made
+ from within them.
+ */
if (strncmp(frame->id, "pthread_", 8) == 0) {
break;
}
@@ -52,6 +60,7 @@
strcat(buffer, line_buf);
+ /* Include no more than 10 calls. */
if (++count == 10) {
break;
}
@@ -61,6 +70,11 @@
return (buffer);
}
+/*
+ If USE_LIBWUNWIND is set in src.conf, use devel/libunwind to record
+ backtrace information.
+*/
+
#ifdef USE_LIBUNWIND
#include <libunwind.h>
Modified: soc2012/gmiller/locking-head/lib/libwitness/witness.h
==============================================================================
--- soc2012/gmiller/locking-head/lib/libwitness/witness.h Mon Aug 20 02:28:07 2012 (r240549)
+++ soc2012/gmiller/locking-head/lib/libwitness/witness.h Mon Aug 20 04:26:50 2012 (r240550)
@@ -37,11 +37,22 @@
#define MAX_FRAME_ID_LENGTH (80)
#define MAX_DEFAULT_NAME_LENGTH (80)
+/*
+ Each lock_info structure has a list of blessing structures that records the
+ list of structures whose ordering relative to the given lock can be safely
+ ignored.
+*/
struct blessing {
SLIST_ENTRY(blessing) bless_next;
struct lock_info *lock;
};
+/*
+ One lock_info structure exists for each lock in the ordering graph. The
+ child pointer points to the first node that has an outbound edge from the
+ current node. Sibling points to the next child of the current node's
+ parent(s).
+*/
struct lock_info {
SLIST_ENTRY(lock_info) lock_info_next;
struct lock_info *child;
@@ -52,12 +63,21 @@
SLIST_HEAD(lock_info_head, lock_info);
+/*
+ lock_instance holds data about a pthread_*_t object. Multiple locks may
+ have the same name, in which case multiple lock_instance structures will
+ point to a single lock_info structure.
+*/
struct lock_instance {
SLIST_ENTRY(lock_instance) lock_instance_next;
struct lock_info *info;
void *lock;
};
+/*
+ Each stack_frame entry represents one frame in the backtrace data that is
+ recorded whenever a lock is acquired.
+*/
struct stack_frame {
SLIST_ENTRY(stack_frame) frame_next;
char id[MAX_FRAME_ID_LENGTH + 1];
@@ -65,6 +85,10 @@
SLIST_HEAD(backtrace, stack_frame);
+/*
+ lor_entry stores information about the locks and call sequences for a
+ lock order reversal.
+*/
struct lor_entry {
STAILQ_ENTRY(lor_entry) lor_next;
struct lock_info *lock_first;
More information about the svn-soc-all
mailing list