svn commit: r277586 - in stable/10: cddl/contrib/opensolaris/cmd/zdb sys/cddl/contrib/opensolaris/uts/common/fs/zfs sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys

Xin LI delphij at FreeBSD.org
Fri Jan 23 18:33:52 UTC 2015


Author: delphij
Date: Fri Jan 23 18:33:50 2015
New Revision: 277586
URL: https://svnweb.freebsd.org/changeset/base/277586

Log:
  MFC r275811: MFV r275783:
  
  Convert ARC flags to use enum.  Previously, public flags are defined in
  arc.h and private flags are defined in arc.c which can lead to confusion
  and programming errors.
  
  Consistently use 'hdr' (when referencing arc_buf_hdr_t) instead of 'buf'
  or 'ab' because arc_buf_t are often named 'buf' as well.
  
  Illumos issue:
      5369 arc flags should be an enum
      5370 consistent arc_buf_hdr_t naming scheme

Modified:
  stable/10/cddl/contrib/opensolaris/cmd/zdb/zdb.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dbuf.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_diff.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_objset.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_send.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dmu_traverse.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/dsl_scan.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/sys/arc.h
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zil.c
  stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/cddl/contrib/opensolaris/cmd/zdb/zdb.c
==============================================================================
--- stable/10/cddl/contrib/opensolaris/cmd/zdb/zdb.c	Fri Jan 23 18:30:32 2015	(r277585)
+++ stable/10/cddl/contrib/opensolaris/cmd/zdb/zdb.c	Fri Jan 23 18:33:50 2015	(r277586)
@@ -1184,7 +1184,7 @@ visit_indirect(spa_t *spa, const dnode_p
 	print_indirect(bp, zb, dnp);
 
 	if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
-		uint32_t flags = ARC_WAIT;
+		arc_flags_t flags = ARC_FLAG_WAIT;
 		int i;
 		blkptr_t *cbp;
 		int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;

Modified: stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c
==============================================================================
--- stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c	Fri Jan 23 18:30:32 2015	(r277585)
+++ stable/10/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c	Fri Jan 23 18:33:50 2015	(r277586)
@@ -616,7 +616,7 @@ struct arc_buf_hdr {
 
 	arc_buf_hdr_t		*b_hash_next;
 	arc_buf_t		*b_buf;
-	uint32_t		b_flags;
+	arc_flags_t		b_flags;
 	uint32_t		b_datacnt;
 
 	arc_callback_t		*b_acb;
@@ -664,52 +664,26 @@ sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HAN
 static arc_buf_t *arc_eviction_list;
 static kmutex_t arc_eviction_mtx;
 static arc_buf_hdr_t arc_eviction_hdr;
-static void arc_get_data_buf(arc_buf_t *buf);
-static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
-static int arc_evict_needed(arc_buf_contents_t type);
-static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
-#ifdef illumos
-static void arc_buf_watch(arc_buf_t *buf);
-#endif /* illumos */
-
-static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
 
 #define	GHOST_STATE(state)	\
 	((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||	\
 	(state) == arc_l2c_only)
 
-/*
- * Private ARC flags.  These flags are private ARC only flags that will show up
- * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
- * be passed in as arc_flags in things like arc_read.  However, these flags
- * should never be passed and should only be set by ARC code.  When adding new
- * public flags, make sure not to smash the private ones.
- */
-
-#define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
-#define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
-#define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
-#define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
-#define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
-#define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
-#define	ARC_FREE_IN_PROGRESS	(1 << 15)	/* hdr about to be freed */
-#define	ARC_L2_WRITING		(1 << 16)	/* L2ARC write in progress */
-#define	ARC_L2_EVICTED		(1 << 17)	/* evicted during I/O */
-#define	ARC_L2_WRITE_HEAD	(1 << 18)	/* head of write list */
-
-#define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
-#define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
-#define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
-#define	HDR_PREFETCH(hdr)	((hdr)->b_flags & ARC_PREFETCH)
-#define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
-#define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
-#define	HDR_FREE_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
-#define	HDR_L2CACHE(hdr)	((hdr)->b_flags & ARC_L2CACHE)
-#define	HDR_L2_READING(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS &&	\
-				    (hdr)->b_l2hdr != NULL)
-#define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_L2_WRITING)
-#define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_L2_EVICTED)
-#define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_L2_WRITE_HEAD)
+#define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
+#define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
+#define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_FLAG_IO_ERROR)
+#define	HDR_PREFETCH(hdr)	((hdr)->b_flags & ARC_FLAG_PREFETCH)
+#define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FLAG_FREED_IN_READ)
+#define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE)
+#define	HDR_FREE_IN_PROGRESS(hdr)	\
+	((hdr)->b_flags & ARC_FLAG_FREE_IN_PROGRESS)
+#define	HDR_L2CACHE(hdr)	((hdr)->b_flags & ARC_FLAG_L2CACHE)
+#define	HDR_L2_READING(hdr)	\
+	((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS &&	\
+	    (hdr)->b_l2hdr != NULL)
+#define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_FLAG_L2_WRITING)
+#define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
+#define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
 
 /*
  * Other sizes
@@ -901,14 +875,20 @@ static kmutex_t l2arc_feed_thr_lock;
 static kcondvar_t l2arc_feed_thr_cv;
 static uint8_t l2arc_thread_exit;
 
-static void l2arc_read_done(zio_t *zio);
+static void arc_get_data_buf(arc_buf_t *);
+static void arc_access(arc_buf_hdr_t *, kmutex_t *);
+static int arc_evict_needed(arc_buf_contents_t);
+static void arc_evict_ghost(arc_state_t *, uint64_t, int64_t);
+static void arc_buf_watch(arc_buf_t *);
+
+static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
+static void l2arc_read_done(zio_t *);
 static void l2arc_hdr_stat_add(void);
 static void l2arc_hdr_stat_remove(void);
 
-static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
-static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
-    enum zio_compress c);
-static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
+static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *);
+static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress);
+static void l2arc_release_cdata_buf(arc_buf_hdr_t *);
 
 static uint64_t
 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
@@ -953,14 +933,14 @@ buf_hash_find(uint64_t spa, const blkptr
 	uint64_t birth = BP_PHYSICAL_BIRTH(bp);
 	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
-	arc_buf_hdr_t *buf;
+	arc_buf_hdr_t *hdr;
 
 	mutex_enter(hash_lock);
-	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
-	    buf = buf->b_hash_next) {
-		if (BUF_EQUAL(spa, dva, birth, buf)) {
+	for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
+	    hdr = hdr->b_hash_next) {
+		if (BUF_EQUAL(spa, dva, birth, hdr)) {
 			*lockp = hash_lock;
-			return (buf);
+			return (hdr);
 		}
 	}
 	mutex_exit(hash_lock);
@@ -975,27 +955,27 @@ buf_hash_find(uint64_t spa, const blkptr
  * Otherwise returns NULL.
  */
 static arc_buf_hdr_t *
-buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
+buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
 {
-	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
+	uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
 	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
-	arc_buf_hdr_t *fbuf;
+	arc_buf_hdr_t *fhdr;
 	uint32_t i;
 
-	ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
-	ASSERT(buf->b_birth != 0);
-	ASSERT(!HDR_IN_HASH_TABLE(buf));
+	ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
+	ASSERT(hdr->b_birth != 0);
+	ASSERT(!HDR_IN_HASH_TABLE(hdr));
 	*lockp = hash_lock;
 	mutex_enter(hash_lock);
-	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
-	    fbuf = fbuf->b_hash_next, i++) {
-		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
-			return (fbuf);
+	for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
+	    fhdr = fhdr->b_hash_next, i++) {
+		if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
+			return (fhdr);
 	}
 
-	buf->b_hash_next = buf_hash_table.ht_table[idx];
-	buf_hash_table.ht_table[idx] = buf;
-	buf->b_flags |= ARC_IN_HASH_TABLE;
+	hdr->b_hash_next = buf_hash_table.ht_table[idx];
+	buf_hash_table.ht_table[idx] = hdr;
+	hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
 
 	/* collect some hash table performance data */
 	if (i > 0) {
@@ -1013,22 +993,22 @@ buf_hash_insert(arc_buf_hdr_t *buf, kmut
 }
 
 static void
-buf_hash_remove(arc_buf_hdr_t *buf)
+buf_hash_remove(arc_buf_hdr_t *hdr)
 {
-	arc_buf_hdr_t *fbuf, **bufp;
-	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
+	arc_buf_hdr_t *fhdr, **hdrp;
+	uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
 
 	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
-	ASSERT(HDR_IN_HASH_TABLE(buf));
+	ASSERT(HDR_IN_HASH_TABLE(hdr));
 
-	bufp = &buf_hash_table.ht_table[idx];
-	while ((fbuf = *bufp) != buf) {
-		ASSERT(fbuf != NULL);
-		bufp = &fbuf->b_hash_next;
-	}
-	*bufp = buf->b_hash_next;
-	buf->b_hash_next = NULL;
-	buf->b_flags &= ~ARC_IN_HASH_TABLE;
+	hdrp = &buf_hash_table.ht_table[idx];
+	while ((fhdr = *hdrp) != hdr) {
+		ASSERT(fhdr != NULL);
+		hdrp = &fhdr->b_hash_next;
+	}
+	*hdrp = hdr->b_hash_next;
+	hdr->b_hash_next = NULL;
+	hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE;
 
 	/* collect some hash table performance data */
 	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
@@ -1065,12 +1045,12 @@ buf_fini(void)
 static int
 hdr_cons(void *vbuf, void *unused, int kmflag)
 {
-	arc_buf_hdr_t *buf = vbuf;
+	arc_buf_hdr_t *hdr = vbuf;
 
-	bzero(buf, sizeof (arc_buf_hdr_t));
-	refcount_create(&buf->b_refcnt);
-	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
-	mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
+	bzero(hdr, sizeof (arc_buf_hdr_t));
+	refcount_create(&hdr->b_refcnt);
+	cv_init(&hdr->b_cv, NULL, CV_DEFAULT, NULL);
+	mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
 	arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 
 	return (0);
@@ -1097,12 +1077,12 @@ buf_cons(void *vbuf, void *unused, int k
 static void
 hdr_dest(void *vbuf, void *unused)
 {
-	arc_buf_hdr_t *buf = vbuf;
+	arc_buf_hdr_t *hdr = vbuf;
 
-	ASSERT(BUF_EMPTY(buf));
-	refcount_destroy(&buf->b_refcnt);
-	cv_destroy(&buf->b_cv);
-	mutex_destroy(&buf->b_freeze_lock);
+	ASSERT(BUF_EMPTY(hdr));
+	refcount_destroy(&hdr->b_refcnt);
+	cv_destroy(&hdr->b_cv);
+	mutex_destroy(&hdr->b_freeze_lock);
 	arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
 }
 
@@ -1184,7 +1164,7 @@ arc_cksum_verify(arc_buf_t *buf)
 
 	mutex_enter(&buf->b_hdr->b_freeze_lock);
 	if (buf->b_hdr->b_freeze_cksum == NULL ||
-	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
+	    (buf->b_hdr->b_flags & ARC_FLAG_IO_ERROR)) {
 		mutex_exit(&buf->b_hdr->b_freeze_lock);
 		return;
 	}
@@ -1279,7 +1259,7 @@ arc_buf_thaw(arc_buf_t *buf)
 	if (zfs_flags & ZFS_DEBUG_MODIFY) {
 		if (buf->b_hdr->b_state != arc_anon)
 			panic("modifying non-anon buffer!");
-		if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
+		if (buf->b_hdr->b_flags & ARC_FLAG_IO_IN_PROGRESS)
 			panic("modifying buffer while i/o in progress!");
 		arc_cksum_verify(buf);
 	}
@@ -1322,11 +1302,11 @@ arc_buf_freeze(arc_buf_t *buf)
 }
 
 static void
-get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
+get_buf_info(arc_buf_hdr_t *hdr, arc_state_t *state, list_t **list, kmutex_t **lock)
 {
-	uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
+	uint64_t buf_hashid = buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
 
-	if (ab->b_type == ARC_BUFC_METADATA)
+	if (hdr->b_type == ARC_BUFC_METADATA)
 		buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
 	else {
 		buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
@@ -1339,59 +1319,59 @@ get_buf_info(arc_buf_hdr_t *ab, arc_stat
 
 
 static void
-add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
+add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
 {
 	ASSERT(MUTEX_HELD(hash_lock));
 
-	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
-	    (ab->b_state != arc_anon)) {
-		uint64_t delta = ab->b_size * ab->b_datacnt;
-		uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
+	if ((refcount_add(&hdr->b_refcnt, tag) == 1) &&
+	    (hdr->b_state != arc_anon)) {
+		uint64_t delta = hdr->b_size * hdr->b_datacnt;
+		uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
 		list_t *list;
 		kmutex_t *lock;
 
-		get_buf_info(ab, ab->b_state, &list, &lock);
+		get_buf_info(hdr, hdr->b_state, &list, &lock);
 		ASSERT(!MUTEX_HELD(lock));
 		mutex_enter(lock);
-		ASSERT(list_link_active(&ab->b_arc_node));
-		list_remove(list, ab);
-		if (GHOST_STATE(ab->b_state)) {
-			ASSERT0(ab->b_datacnt);
-			ASSERT3P(ab->b_buf, ==, NULL);
-			delta = ab->b_size;
+		ASSERT(list_link_active(&hdr->b_arc_node));
+		list_remove(list, hdr);
+		if (GHOST_STATE(hdr->b_state)) {
+			ASSERT0(hdr->b_datacnt);
+			ASSERT3P(hdr->b_buf, ==, NULL);
+			delta = hdr->b_size;
 		}
 		ASSERT(delta > 0);
 		ASSERT3U(*size, >=, delta);
 		atomic_add_64(size, -delta);
 		mutex_exit(lock);
 		/* remove the prefetch flag if we get a reference */
-		if (ab->b_flags & ARC_PREFETCH)
-			ab->b_flags &= ~ARC_PREFETCH;
+		if (hdr->b_flags & ARC_FLAG_PREFETCH)
+			hdr->b_flags &= ~ARC_FLAG_PREFETCH;
 	}
 }
 
 static int
-remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
+remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
 {
 	int cnt;
-	arc_state_t *state = ab->b_state;
+	arc_state_t *state = hdr->b_state;
 
 	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
 	ASSERT(!GHOST_STATE(state));
 
-	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
+	if (((cnt = refcount_remove(&hdr->b_refcnt, tag)) == 0) &&
 	    (state != arc_anon)) {
-		uint64_t *size = &state->arcs_lsize[ab->b_type];
+		uint64_t *size = &state->arcs_lsize[hdr->b_type];
 		list_t *list;
 		kmutex_t *lock;
 
-		get_buf_info(ab, state, &list, &lock);
+		get_buf_info(hdr, state, &list, &lock);
 		ASSERT(!MUTEX_HELD(lock));
 		mutex_enter(lock);
-		ASSERT(!list_link_active(&ab->b_arc_node));
-		list_insert_head(list, ab);
-		ASSERT(ab->b_datacnt > 0);
-		atomic_add_64(size, ab->b_size * ab->b_datacnt);
+		ASSERT(!list_link_active(&hdr->b_arc_node));
+		list_insert_head(list, hdr);
+		ASSERT(hdr->b_datacnt > 0);
+		atomic_add_64(size, hdr->b_size * hdr->b_datacnt);
 		mutex_exit(lock);
 	}
 	return (cnt);
@@ -1402,21 +1382,22 @@ remove_reference(arc_buf_hdr_t *ab, kmut
  * for the buffer must be held by the caller.
  */
 static void
-arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
+arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
+    kmutex_t *hash_lock)
 {
-	arc_state_t *old_state = ab->b_state;
-	int64_t refcnt = refcount_count(&ab->b_refcnt);
+	arc_state_t *old_state = hdr->b_state;
+	int64_t refcnt = refcount_count(&hdr->b_refcnt);
 	uint64_t from_delta, to_delta;
 	list_t *list;
 	kmutex_t *lock;
 
 	ASSERT(MUTEX_HELD(hash_lock));
 	ASSERT3P(new_state, !=, old_state);
-	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
-	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
-	ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
+	ASSERT(refcnt == 0 || hdr->b_datacnt > 0);
+	ASSERT(hdr->b_datacnt == 0 || !GHOST_STATE(new_state));
+	ASSERT(hdr->b_datacnt <= 1 || old_state != arc_anon);
 
-	from_delta = to_delta = ab->b_datacnt * ab->b_size;
+	from_delta = to_delta = hdr->b_datacnt * hdr->b_size;
 
 	/*
 	 * If this buffer is evictable, transfer it from the
@@ -1425,24 +1406,24 @@ arc_change_state(arc_state_t *new_state,
 	if (refcnt == 0) {
 		if (old_state != arc_anon) {
 			int use_mutex;
-			uint64_t *size = &old_state->arcs_lsize[ab->b_type];
+			uint64_t *size = &old_state->arcs_lsize[hdr->b_type];
 
-			get_buf_info(ab, old_state, &list, &lock);
+			get_buf_info(hdr, old_state, &list, &lock);
 			use_mutex = !MUTEX_HELD(lock);
 			if (use_mutex)
 				mutex_enter(lock);
 
-			ASSERT(list_link_active(&ab->b_arc_node));
-			list_remove(list, ab);
+			ASSERT(list_link_active(&hdr->b_arc_node));
+			list_remove(list, hdr);
 
 			/*
 			 * If prefetching out of the ghost cache,
 			 * we will have a non-zero datacnt.
 			 */
-			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
+			if (GHOST_STATE(old_state) && hdr->b_datacnt == 0) {
 				/* ghost elements have a ghost size */
-				ASSERT(ab->b_buf == NULL);
-				from_delta = ab->b_size;
+				ASSERT(hdr->b_buf == NULL);
+				from_delta = hdr->b_size;
 			}
 			ASSERT3U(*size, >=, from_delta);
 			atomic_add_64(size, -from_delta);
@@ -1452,20 +1433,20 @@ arc_change_state(arc_state_t *new_state,
 		}
 		if (new_state != arc_anon) {
 			int use_mutex;
-			uint64_t *size = &new_state->arcs_lsize[ab->b_type];
+			uint64_t *size = &new_state->arcs_lsize[hdr->b_type];
 
-			get_buf_info(ab, new_state, &list, &lock);
+			get_buf_info(hdr, new_state, &list, &lock);
 			use_mutex = !MUTEX_HELD(lock);
 			if (use_mutex)
 				mutex_enter(lock);
 
-			list_insert_head(list, ab);
+			list_insert_head(list, hdr);
 
 			/* ghost elements have a ghost size */
 			if (GHOST_STATE(new_state)) {
-				ASSERT(ab->b_datacnt == 0);
-				ASSERT(ab->b_buf == NULL);
-				to_delta = ab->b_size;
+				ASSERT(hdr->b_datacnt == 0);
+				ASSERT(hdr->b_buf == NULL);
+				to_delta = hdr->b_size;
 			}
 			atomic_add_64(size, to_delta);
 
@@ -1474,9 +1455,9 @@ arc_change_state(arc_state_t *new_state,
 		}
 	}
 
-	ASSERT(!BUF_EMPTY(ab));
-	if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
-		buf_hash_remove(ab);
+	ASSERT(!BUF_EMPTY(hdr));
+	if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
+		buf_hash_remove(hdr);
 
 	/* adjust state sizes */
 	if (to_delta)
@@ -1485,7 +1466,7 @@ arc_change_state(arc_state_t *new_state,
 		ASSERT3U(old_state->arcs_size, >=, from_delta);
 		atomic_add_64(&old_state->arcs_size, -from_delta);
 	}
-	ab->b_state = new_state;
+	hdr->b_state = new_state;
 
 	/* adjust l2arc hdr stats */
 	if (new_state == arc_l2c_only)
@@ -1687,7 +1668,7 @@ arc_buf_add_ref(arc_buf_t *buf, void* ta
 	arc_access(hdr, hash_lock);
 	mutex_exit(hash_lock);
 	ARCSTAT_BUMP(arcstat_hits);
-	ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
+	ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_FLAG_PREFETCH),
 	    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
 	    data, metadata, hits);
 }
@@ -1918,7 +1899,7 @@ arc_buf_free(arc_buf_t *buf, void *tag)
 		} else {
 			ASSERT(buf == hdr->b_buf);
 			ASSERT(buf->b_efunc == NULL);
-			hdr->b_flags |= ARC_BUF_AVAILABLE;
+			hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
 		}
 		mutex_exit(hash_lock);
 	} else if (HDR_IO_IN_PROGRESS(hdr)) {
@@ -1969,7 +1950,7 @@ arc_buf_remove_ref(arc_buf_t *buf, void*
 	} else if (no_callback) {
 		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
 		ASSERT(buf->b_efunc == NULL);
-		hdr->b_flags |= ARC_BUF_AVAILABLE;
+		hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
 	}
 	ASSERT(no_callback || hdr->b_datacnt > 1 ||
 	    refcount_is_zero(&hdr->b_refcnt));
@@ -2045,7 +2026,7 @@ arc_evict(arc_state_t *state, uint64_t s
 	arc_state_t *evicted_state;
 	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
 	int64_t bytes_remaining;
-	arc_buf_hdr_t *ab, *ab_prev = NULL;
+	arc_buf_hdr_t *hdr, *hdr_prev = NULL;
 	list_t *evicted_list, *list, *evicted_list_start, *list_start;
 	kmutex_t *lock, *evicted_lock;
 	kmutex_t *hash_lock;
@@ -2085,25 +2066,25 @@ evict_start:
 	mutex_enter(lock);
 	mutex_enter(evicted_lock);
 
-	for (ab = list_tail(list); ab; ab = ab_prev) {
-		ab_prev = list_prev(list, ab);
-		bytes_remaining -= (ab->b_size * ab->b_datacnt);
+	for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
+		hdr_prev = list_prev(list, hdr);
+		bytes_remaining -= (hdr->b_size * hdr->b_datacnt);
 		/* prefetch buffers have a minimum lifespan */
-		if (HDR_IO_IN_PROGRESS(ab) ||
-		    (spa && ab->b_spa != spa) ||
-		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
-		    ddi_get_lbolt() - ab->b_arc_access <
+		if (HDR_IO_IN_PROGRESS(hdr) ||
+		    (spa && hdr->b_spa != spa) ||
+		    (hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT) &&
+		    ddi_get_lbolt() - hdr->b_arc_access <
 		    arc_min_prefetch_lifespan)) {
 			skipped++;
 			continue;
 		}
 		/* "lookahead" for better eviction candidate */
-		if (recycle && ab->b_size != bytes &&
-		    ab_prev && ab_prev->b_size == bytes)
+		if (recycle && hdr->b_size != bytes &&
+		    hdr_prev && hdr_prev->b_size == bytes)
 			continue;
 
 		/* ignore markers */
-		if (ab->b_spa == 0)
+		if (hdr->b_spa == 0)
 			continue;
 
 		/*
@@ -2116,34 +2097,34 @@ evict_start:
 		 * the hot code path, so don't sleep.
 		 */
 		if (!recycle && count++ > arc_evict_iterations) {
-			list_insert_after(list, ab, &marker);
+			list_insert_after(list, hdr, &marker);
 			mutex_exit(evicted_lock);
 			mutex_exit(lock);
 			kpreempt(KPREEMPT_SYNC);
 			mutex_enter(lock);
 			mutex_enter(evicted_lock);
-			ab_prev = list_prev(list, &marker);
+			hdr_prev = list_prev(list, &marker);
 			list_remove(list, &marker);
 			count = 0;
 			continue;
 		}
 
-		hash_lock = HDR_LOCK(ab);
+		hash_lock = HDR_LOCK(hdr);
 		have_lock = MUTEX_HELD(hash_lock);
 		if (have_lock || mutex_tryenter(hash_lock)) {
-			ASSERT0(refcount_count(&ab->b_refcnt));
-			ASSERT(ab->b_datacnt > 0);
-			while (ab->b_buf) {
-				arc_buf_t *buf = ab->b_buf;
+			ASSERT0(refcount_count(&hdr->b_refcnt));
+			ASSERT(hdr->b_datacnt > 0);
+			while (hdr->b_buf) {
+				arc_buf_t *buf = hdr->b_buf;
 				if (!mutex_tryenter(&buf->b_evict_lock)) {
 					missed += 1;
 					break;
 				}
 				if (buf->b_data) {
-					bytes_evicted += ab->b_size;
-					if (recycle && ab->b_type == type &&
-					    ab->b_size == bytes &&
-					    !HDR_L2_WRITING(ab)) {
+					bytes_evicted += hdr->b_size;
+					if (recycle && hdr->b_type == type &&
+					    hdr->b_size == bytes &&
+					    !HDR_L2_WRITING(hdr)) {
 						stolen = buf->b_data;
 						recycle = FALSE;
 					}
@@ -2152,7 +2133,7 @@ evict_start:
 					mutex_enter(&arc_eviction_mtx);
 					arc_buf_destroy(buf,
 					    buf->b_data == stolen, FALSE);
-					ab->b_buf = buf->b_next;
+					hdr->b_buf = buf->b_next;
 					buf->b_hdr = &arc_eviction_hdr;
 					buf->b_next = arc_eviction_list;
 					arc_eviction_list = buf;
@@ -2165,26 +2146,26 @@ evict_start:
 				}
 			}
 
-			if (ab->b_l2hdr) {
+			if (hdr->b_l2hdr) {
 				ARCSTAT_INCR(arcstat_evict_l2_cached,
-				    ab->b_size);
+				    hdr->b_size);
 			} else {
-				if (l2arc_write_eligible(ab->b_spa, ab)) {
+				if (l2arc_write_eligible(hdr->b_spa, hdr)) {
 					ARCSTAT_INCR(arcstat_evict_l2_eligible,
-					    ab->b_size);
+					    hdr->b_size);
 				} else {
 					ARCSTAT_INCR(
 					    arcstat_evict_l2_ineligible,
-					    ab->b_size);
+					    hdr->b_size);
 				}
 			}
 
-			if (ab->b_datacnt == 0) {
-				arc_change_state(evicted_state, ab, hash_lock);
-				ASSERT(HDR_IN_HASH_TABLE(ab));
-				ab->b_flags |= ARC_IN_HASH_TABLE;
-				ab->b_flags &= ~ARC_BUF_AVAILABLE;
-				DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
+			if (hdr->b_datacnt == 0) {
+				arc_change_state(evicted_state, hdr, hash_lock);
+				ASSERT(HDR_IN_HASH_TABLE(hdr));
+				hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
+				hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
+				DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
 			}
 			if (!have_lock)
 				mutex_exit(hash_lock);
@@ -2245,7 +2226,7 @@ evict_start:
 static void
 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
 {
-	arc_buf_hdr_t *ab, *ab_prev;
+	arc_buf_hdr_t *hdr, *hdr_prev;
 	arc_buf_hdr_t marker = { 0 };
 	list_t *list, *list_start;
 	kmutex_t *hash_lock, *lock;
@@ -2270,18 +2251,18 @@ evict_start:
 	lock = ARCS_LOCK(state, idx + offset);
 
 	mutex_enter(lock);
-	for (ab = list_tail(list); ab; ab = ab_prev) {
-		ab_prev = list_prev(list, ab);
-		if (ab->b_type > ARC_BUFC_NUMTYPES)
-			panic("invalid ab=%p", (void *)ab);
-		if (spa && ab->b_spa != spa)
+	for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
+		hdr_prev = list_prev(list, hdr);
+		if (hdr->b_type > ARC_BUFC_NUMTYPES)
+			panic("invalid hdr=%p", (void *)hdr);
+		if (spa && hdr->b_spa != spa)
 			continue;
 
 		/* ignore markers */
-		if (ab->b_spa == 0)
+		if (hdr->b_spa == 0)
 			continue;
 
-		hash_lock = HDR_LOCK(ab);
+		hash_lock = HDR_LOCK(hdr);
 		/* caller may be trying to modify this buffer, skip it */
 		if (MUTEX_HELD(hash_lock))
 			continue;
@@ -2293,35 +2274,35 @@ evict_start:
 		 * before reacquiring the lock.
 		 */
 		if (count++ > arc_evict_iterations) {
-			list_insert_after(list, ab, &marker);
+			list_insert_after(list, hdr, &marker);
 			mutex_exit(lock);
 			kpreempt(KPREEMPT_SYNC);
 			mutex_enter(lock);
-			ab_prev = list_prev(list, &marker);
+			hdr_prev = list_prev(list, &marker);
 			list_remove(list, &marker);
 			count = 0;
 			continue;
 		}
 		if (mutex_tryenter(hash_lock)) {
-			ASSERT(!HDR_IO_IN_PROGRESS(ab));
-			ASSERT(ab->b_buf == NULL);
+			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
+			ASSERT(hdr->b_buf == NULL);
 			ARCSTAT_BUMP(arcstat_deleted);
-			bytes_deleted += ab->b_size;
+			bytes_deleted += hdr->b_size;
 
-			if (ab->b_l2hdr != NULL) {
+			if (hdr->b_l2hdr != NULL) {
 				/*
 				 * This buffer is cached on the 2nd Level ARC;
 				 * don't destroy the header.
 				 */
-				arc_change_state(arc_l2c_only, ab, hash_lock);
+				arc_change_state(arc_l2c_only, hdr, hash_lock);
 				mutex_exit(hash_lock);
 			} else {
-				arc_change_state(arc_anon, ab, hash_lock);
+				arc_change_state(arc_anon, hdr, hash_lock);
 				mutex_exit(hash_lock);
-				arc_hdr_destroy(ab);
+				arc_hdr_destroy(hdr);
 			}
 
-			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
+			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
 			if (bytes >= 0 && bytes_deleted >= bytes)
 				break;
 		} else if (bytes < 0) {
@@ -2330,12 +2311,12 @@ evict_start:
 			 * hash lock to become available. Once its
 			 * available, restart from where we left off.
 			 */
-			list_insert_after(list, ab, &marker);
+			list_insert_after(list, hdr, &marker);
 			mutex_exit(lock);
 			mutex_enter(hash_lock);
 			mutex_exit(hash_lock);
 			mutex_enter(lock);
-			ab_prev = list_prev(list, &marker);
+			hdr_prev = list_prev(list, &marker);
 			list_remove(list, &marker);
 		} else {
 			bufs_skipped += 1;
@@ -2920,7 +2901,8 @@ arc_get_data_buf(arc_buf_t *buf)
 	 * will end up on the mru list; so steal space from there.
 	 */
 	if (state == arc_mfu_ghost)
-		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
+		state = buf->b_hdr->b_flags & ARC_FLAG_PREFETCH ?
+		    arc_mru : arc_mfu;
 	else if (state == arc_mru_ghost)
 		state = arc_mru;
 
@@ -2976,25 +2958,25 @@ out:
  * NOTE: the hash lock is dropped in this function.
  */
 static void
-arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
+arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 {
 	clock_t now;
 
 	ASSERT(MUTEX_HELD(hash_lock));
 
-	if (buf->b_state == arc_anon) {
+	if (hdr->b_state == arc_anon) {
 		/*
 		 * This buffer is not in the cache, and does not
 		 * appear in our "ghost" list.  Add the new buffer
 		 * to the MRU state.
 		 */
 
-		ASSERT(buf->b_arc_access == 0);
-		buf->b_arc_access = ddi_get_lbolt();
-		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
-		arc_change_state(arc_mru, buf, hash_lock);
+		ASSERT(hdr->b_arc_access == 0);
+		hdr->b_arc_access = ddi_get_lbolt();
+		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
+		arc_change_state(arc_mru, hdr, hash_lock);
 
-	} else if (buf->b_state == arc_mru) {
+	} else if (hdr->b_state == arc_mru) {
 		now = ddi_get_lbolt();
 
 		/*
@@ -3005,14 +2987,14 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t 
 		 * - move the buffer to the head of the list if this is
 		 *   another prefetch (to make it less likely to be evicted).
 		 */
-		if ((buf->b_flags & ARC_PREFETCH) != 0) {
-			if (refcount_count(&buf->b_refcnt) == 0) {
-				ASSERT(list_link_active(&buf->b_arc_node));
+		if ((hdr->b_flags & ARC_FLAG_PREFETCH) != 0) {
+			if (refcount_count(&hdr->b_refcnt) == 0) {
+				ASSERT(list_link_active(&hdr->b_arc_node));
 			} else {
-				buf->b_flags &= ~ARC_PREFETCH;
+				hdr->b_flags &= ~ARC_FLAG_PREFETCH;
 				ARCSTAT_BUMP(arcstat_mru_hits);
 			}
-			buf->b_arc_access = now;
+			hdr->b_arc_access = now;
 			return;
 		}
 
@@ -3021,18 +3003,18 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t 
 		 * but it is still in the cache. Move it to the MFU
 		 * state.
 		 */
-		if (now > buf->b_arc_access + ARC_MINTIME) {
+		if (now > hdr->b_arc_access + ARC_MINTIME) {
 			/*
 			 * More than 125ms have passed since we
 			 * instantiated this buffer.  Move it to the
 			 * most frequently used state.
 			 */
-			buf->b_arc_access = now;
-			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
-			arc_change_state(arc_mfu, buf, hash_lock);
+			hdr->b_arc_access = now;
+			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
+			arc_change_state(arc_mfu, hdr, hash_lock);
 		}
 		ARCSTAT_BUMP(arcstat_mru_hits);
-	} else if (buf->b_state == arc_mru_ghost) {
+	} else if (hdr->b_state == arc_mru_ghost) {
 		arc_state_t	*new_state;
 		/*
 		 * This buffer has been "accessed" recently, but
@@ -3040,21 +3022,21 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t 
 		 * MFU state.
 		 */
 
-		if (buf->b_flags & ARC_PREFETCH) {
+		if (hdr->b_flags & ARC_FLAG_PREFETCH) {
 			new_state = arc_mru;
-			if (refcount_count(&buf->b_refcnt) > 0)
-				buf->b_flags &= ~ARC_PREFETCH;
-			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
+			if (refcount_count(&hdr->b_refcnt) > 0)
+				hdr->b_flags &= ~ARC_FLAG_PREFETCH;
+			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
 		} else {
 			new_state = arc_mfu;
-			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
+			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
 		}
 
-		buf->b_arc_access = ddi_get_lbolt();
-		arc_change_state(new_state, buf, hash_lock);
+		hdr->b_arc_access = ddi_get_lbolt();
+		arc_change_state(new_state, hdr, hash_lock);
 
 		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
-	} else if (buf->b_state == arc_mfu) {
+	} else if (hdr->b_state == arc_mfu) {
 		/*
 		 * This buffer has been accessed more than once and is
 		 * still in the cache.  Keep it in the MFU state.
@@ -3064,13 +3046,13 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t 
 		 * If it was a prefetch, we will explicitly move it to
 		 * the head of the list now.
 		 */
-		if ((buf->b_flags & ARC_PREFETCH) != 0) {
-			ASSERT(refcount_count(&buf->b_refcnt) == 0);
-			ASSERT(list_link_active(&buf->b_arc_node));
+		if ((hdr->b_flags & ARC_FLAG_PREFETCH) != 0) {
+			ASSERT(refcount_count(&hdr->b_refcnt) == 0);
+			ASSERT(list_link_active(&hdr->b_arc_node));
 		}
 		ARCSTAT_BUMP(arcstat_mfu_hits);
-		buf->b_arc_access = ddi_get_lbolt();
-	} else if (buf->b_state == arc_mfu_ghost) {
+		hdr->b_arc_access = ddi_get_lbolt();
+	} else if (hdr->b_state == arc_mfu_ghost) {
 		arc_state_t	*new_state = arc_mfu;
 		/*
 		 * This buffer has been accessed more than once but has
@@ -3078,28 +3060,28 @@ arc_access(arc_buf_hdr_t *buf, kmutex_t 
 		 * MFU state.
 		 */
 
-		if (buf->b_flags & ARC_PREFETCH) {
+		if (hdr->b_flags & ARC_FLAG_PREFETCH) {
 			/*
 			 * This is a prefetch access...
 			 * move this block back to the MRU state.
 			 */
-			ASSERT0(refcount_count(&buf->b_refcnt));
+			ASSERT0(refcount_count(&hdr->b_refcnt));
 			new_state = arc_mru;
 		}
 
-		buf->b_arc_access = ddi_get_lbolt();
-		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
-		arc_change_state(new_state, buf, hash_lock);
+		hdr->b_arc_access = ddi_get_lbolt();
+		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
+		arc_change_state(new_state, hdr, hash_lock);
 
 		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
-	} else if (buf->b_state == arc_l2c_only) {
+	} else if (hdr->b_state == arc_l2c_only) {
 		/*
 		 * This buffer is on the 2nd Level ARC.
 		 */
 
-		buf->b_arc_access = ddi_get_lbolt();
-		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
-		arc_change_state(arc_mfu, buf, hash_lock);
+		hdr->b_arc_access = ddi_get_lbolt();
+		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
+		arc_change_state(arc_mfu, hdr, hash_lock);
 	} else {
 		ASSERT(!"invalid arc state");
 	}
@@ -3167,9 +3149,9 @@ arc_read_done(zio_t *zio)
 		    (found == hdr && HDR_L2_READING(hdr)));
 	}
 
-	hdr->b_flags &= ~ARC_L2_EVICTED;
-	if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
-		hdr->b_flags &= ~ARC_L2CACHE;
+	hdr->b_flags &= ~ARC_FLAG_L2_EVICTED;
+	if (l2arc_noprefetch && (hdr->b_flags & ARC_FLAG_PREFETCH))
+		hdr->b_flags &= ~ARC_FLAG_L2CACHE;
 
 	/* byteswap if necessary */
 	callback_list = hdr->b_acb;
@@ -3211,18 +3193,18 @@ arc_read_done(zio_t *zio)
 		}
 	}
 	hdr->b_acb = NULL;
-	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
+	hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
 	ASSERT(!HDR_BUF_AVAILABLE(hdr));
 	if (abuf == buf) {
 		ASSERT(buf->b_efunc == NULL);
 		ASSERT(hdr->b_datacnt == 1);
-		hdr->b_flags |= ARC_BUF_AVAILABLE;
+		hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
 	}
 
 	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
 
 	if (zio->io_error != 0) {
-		hdr->b_flags |= ARC_IO_ERROR;
+		hdr->b_flags |= ARC_FLAG_IO_ERROR;
 		if (hdr->b_state != arc_anon)
 			arc_change_state(arc_anon, hdr, hash_lock);
 		if (HDR_IN_HASH_TABLE(hdr))
@@ -3288,8 +3270,8 @@ arc_read_done(zio_t *zio)
  */
 int
 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
-    void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
-    const zbookmark_phys_t *zb)
+    void *private, zio_priority_t priority, int zio_flags,
+    arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
 {
 	arc_buf_hdr_t *hdr = NULL;
 	arc_buf_t *buf = NULL;
@@ -3311,16 +3293,16 @@ top:
 
 	if (hdr != NULL && hdr->b_datacnt > 0) {
 
-		*arc_flags |= ARC_CACHED;
+		*arc_flags |= ARC_FLAG_CACHED;
 
 		if (HDR_IO_IN_PROGRESS(hdr)) {
 
-			if (*arc_flags & ARC_WAIT) {
+			if (*arc_flags & ARC_FLAG_WAIT) {
 				cv_wait(&hdr->b_cv, hash_lock);
 				mutex_exit(hash_lock);
 				goto top;
 			}
-			ASSERT(*arc_flags & ARC_NOWAIT);
+			ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
 
 			if (done) {
 				arc_callback_t	*acb = NULL;
@@ -3358,24 +3340,24 @@ top:
 			ASSERT(buf->b_data);
 			if (HDR_BUF_AVAILABLE(hdr)) {
 				ASSERT(buf->b_efunc == NULL);
-				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
+				hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
 			} else {
 				buf = arc_buf_clone(buf);
 			}
 
-		} else if (*arc_flags & ARC_PREFETCH &&
+		} else if (*arc_flags & ARC_FLAG_PREFETCH &&
 		    refcount_count(&hdr->b_refcnt) == 0) {
-			hdr->b_flags |= ARC_PREFETCH;
+			hdr->b_flags |= ARC_FLAG_PREFETCH;
 		}
 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
 		arc_access(hdr, hash_lock);
-		if (*arc_flags & ARC_L2CACHE)
-			hdr->b_flags |= ARC_L2CACHE;
-		if (*arc_flags & ARC_L2COMPRESS)
-			hdr->b_flags |= ARC_L2COMPRESS;
+		if (*arc_flags & ARC_FLAG_L2CACHE)
+			hdr->b_flags |= ARC_FLAG_L2CACHE;

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-stable-10 mailing list