svn commit: r353561 - in vendor-sys/illumos/dist/uts/common/fs/zfs: . sys

Andriy Gapon avg at FreeBSD.org
Tue Oct 15 14:33:44 UTC 2019


Author: avg
Date: Tue Oct 15 14:33:42 2019
New Revision: 353561
URL: https://svnweb.freebsd.org/changeset/base/353561

Log:
  10343 ZoL: Prefix all refcount functions with zfs_
  
  illumos/illumos-gate at e914ace2e9d9bf2dbf9a1f1ce81cb776022096f5
  https://github.com/illumos/illumos-gate/commit/e914ace2e9d9bf2dbf9a1f1ce81cb776022096f5
  
  https://www.illumos.org/issues/10343
    On the openzfs feature/porting matrix, this is listed as:
    prefix to refcount funcs/types
    Having these changes will make it easier to share other work across the
    different ZFS operating systems.
    PR 7963 424fd7c3e Prefix all refcount functions with zfs_
    PR 7885 & 7932 c13060e47 Linux 4.19-rc3+ compat: Remove refcount_t compat
    PR 5823 & 5842 4859fe796 Linux 4.11 compat: avoid refcount_t name conflict
  
  Author: Tim Schumacher <timschumi at gmx.de>

Modified:
  vendor-sys/illumos/dist/uts/common/fs/zfs/abd.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/arc.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dbuf.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dmu.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dmu_tx.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dnode.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dnode_sync.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dsl_dataset.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/dsl_destroy.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/metaslab.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/refcount.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/rrwlock.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/sa.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/spa.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/spa_misc.c
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/abd.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/dbuf.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/dmu_tx.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/dnode.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/dsl_dataset.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/metaslab_impl.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/refcount.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/rrwlock.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/sa_impl.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/spa_impl.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/sys/zap.h
  vendor-sys/illumos/dist/uts/common/fs/zfs/zio.c

Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/abd.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/abd.c	Tue Oct 15 14:29:30 2019	(r353560)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/abd.c	Tue Oct 15 14:33:42 2019	(r353561)
@@ -291,7 +291,7 @@ abd_alloc(size_t size, boolean_t is_metadata)
 	}
 	abd->abd_size = size;
 	abd->abd_parent = NULL;
-	refcount_create(&abd->abd_children);
+	zfs_refcount_create(&abd->abd_children);
 
 	abd->abd_u.abd_scatter.abd_offset = 0;
 	abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
@@ -318,7 +318,7 @@ abd_free_scatter(abd_t *abd)
 		abd_free_chunk(abd->abd_u.abd_scatter.abd_chunks[i]);
 	}
 
-	refcount_destroy(&abd->abd_children);
+	zfs_refcount_destroy(&abd->abd_children);
 	ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
 	ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
 	ABDSTAT_INCR(abdstat_scatter_chunk_waste,
@@ -345,7 +345,7 @@ abd_alloc_linear(size_t size, boolean_t is_metadata)
 	}
 	abd->abd_size = size;
 	abd->abd_parent = NULL;
-	refcount_create(&abd->abd_children);
+	zfs_refcount_create(&abd->abd_children);
 
 	if (is_metadata) {
 		abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
@@ -368,7 +368,7 @@ abd_free_linear(abd_t *abd)
 		zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
 	}
 
-	refcount_destroy(&abd->abd_children);
+	zfs_refcount_destroy(&abd->abd_children);
 	ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
 	ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
 
@@ -474,8 +474,8 @@ abd_get_offset(abd_t *sabd, size_t off)
 
 	abd->abd_size = sabd->abd_size - off;
 	abd->abd_parent = sabd;
-	refcount_create(&abd->abd_children);
-	(void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
+	zfs_refcount_create(&abd->abd_children);
+	(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
 
 	return (abd);
 }
@@ -499,7 +499,7 @@ abd_get_from_buf(void *buf, size_t size)
 	abd->abd_flags = ABD_FLAG_LINEAR;
 	abd->abd_size = size;
 	abd->abd_parent = NULL;
-	refcount_create(&abd->abd_children);
+	zfs_refcount_create(&abd->abd_children);
 
 	abd->abd_u.abd_linear.abd_buf = buf;
 
@@ -517,11 +517,11 @@ abd_put(abd_t *abd)
 	ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
 
 	if (abd->abd_parent != NULL) {
-		(void) refcount_remove_many(&abd->abd_parent->abd_children,
+		(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
 		    abd->abd_size, abd);
 	}
 
-	refcount_destroy(&abd->abd_children);
+	zfs_refcount_destroy(&abd->abd_children);
 	abd_free_struct(abd);
 }
 
@@ -553,7 +553,7 @@ abd_borrow_buf(abd_t *abd, size_t n)
 	} else {
 		buf = zio_buf_alloc(n);
 	}
-	(void) refcount_add_many(&abd->abd_children, n, buf);
+	(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
 
 	return (buf);
 }
@@ -585,7 +585,7 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
 		ASSERT0(abd_cmp_buf(abd, buf, n));
 		zio_buf_free(buf, n);
 	}
-	(void) refcount_remove_many(&abd->abd_children, n, buf);
+	(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
 }
 
 void

Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/arc.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/arc.c	Tue Oct 15 14:29:30 2019	(r353560)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/arc.c	Tue Oct 15 14:33:42 2019	(r353561)
@@ -428,12 +428,12 @@ typedef struct arc_state {
 	/*
 	 * total amount of evictable data in this state
 	 */
-	refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
+	zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
 	/*
 	 * total amount of data in this state; this includes: evictable,
 	 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
 	 */
-	refcount_t arcs_size;
+	zfs_refcount_t arcs_size;
 } arc_state_t;
 
 /* The 6 states: */
@@ -947,7 +947,7 @@ typedef struct l1arc_buf_hdr {
 	clock_t			b_arc_access;
 
 	/* self protecting */
-	refcount_t		b_refcnt;
+	zfs_refcount_t		b_refcnt;
 
 	arc_callback_t		*b_acb;
 	abd_t			*b_pabd;
@@ -1124,7 +1124,7 @@ struct l2arc_dev {
 	kmutex_t		l2ad_mtx;	/* lock for buffer list */
 	list_t			l2ad_buflist;	/* buffer list */
 	list_node_t		l2ad_node;	/* device list node */
-	refcount_t		l2ad_alloc;	/* allocated bytes */
+	zfs_refcount_t		l2ad_alloc;	/* allocated bytes */
 };
 
 static list_t L2ARC_dev_list;			/* device list */
@@ -1341,7 +1341,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
 
 	bzero(hdr, HDR_FULL_SIZE);
 	cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
-	refcount_create(&hdr->b_l1hdr.b_refcnt);
+	zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
 	mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
 	multilist_link_init(&hdr->b_l1hdr.b_arc_node);
 	arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
@@ -1386,7 +1386,7 @@ hdr_full_dest(void *vbuf, void *unused)
 
 	ASSERT(HDR_EMPTY(hdr));
 	cv_destroy(&hdr->b_l1hdr.b_cv);
-	refcount_destroy(&hdr->b_l1hdr.b_refcnt);
+	zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
 	mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
 	ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
 	arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
@@ -2065,21 +2065,21 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_
 		ASSERT0(hdr->b_l1hdr.b_bufcnt);
 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
 		ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
-		(void) refcount_add_many(&state->arcs_esize[type],
+		(void) zfs_refcount_add_many(&state->arcs_esize[type],
 		    HDR_GET_LSIZE(hdr), hdr);
 		return;
 	}
 
 	ASSERT(!GHOST_STATE(state));
 	if (hdr->b_l1hdr.b_pabd != NULL) {
-		(void) refcount_add_many(&state->arcs_esize[type],
+		(void) zfs_refcount_add_many(&state->arcs_esize[type],
 		    arc_hdr_size(hdr), hdr);
 	}
 	for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
 	    buf = buf->b_next) {
 		if (arc_buf_is_shared(buf))
 			continue;
-		(void) refcount_add_many(&state->arcs_esize[type],
+		(void) zfs_refcount_add_many(&state->arcs_esize[type],
 		    arc_buf_size(buf), buf);
 	}
 }
@@ -2100,21 +2100,21 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_
 		ASSERT0(hdr->b_l1hdr.b_bufcnt);
 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
 		ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
-		(void) refcount_remove_many(&state->arcs_esize[type],
+		(void) zfs_refcount_remove_many(&state->arcs_esize[type],
 		    HDR_GET_LSIZE(hdr), hdr);
 		return;
 	}
 
 	ASSERT(!GHOST_STATE(state));
 	if (hdr->b_l1hdr.b_pabd != NULL) {
-		(void) refcount_remove_many(&state->arcs_esize[type],
+		(void) zfs_refcount_remove_many(&state->arcs_esize[type],
 		    arc_hdr_size(hdr), hdr);
 	}
 	for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
 	    buf = buf->b_next) {
 		if (arc_buf_is_shared(buf))
 			continue;
-		(void) refcount_remove_many(&state->arcs_esize[type],
+		(void) zfs_refcount_remove_many(&state->arcs_esize[type],
 		    arc_buf_size(buf), buf);
 	}
 }
@@ -2131,13 +2131,13 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
 	ASSERT(HDR_HAS_L1HDR(hdr));
 	if (!MUTEX_HELD(HDR_LOCK(hdr))) {
 		ASSERT(hdr->b_l1hdr.b_state == arc_anon);
-		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+		ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 		ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
 	}
 
 	arc_state_t *state = hdr->b_l1hdr.b_state;
 
-	if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
+	if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
 	    (state != arc_anon)) {
 		/* We don't use the L2-only state list. */
 		if (state != arc_l2c_only) {
@@ -2169,7 +2169,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lo
 	 * arc_l2c_only counts as a ghost state so we don't need to explicitly
 	 * check to prevent usage of the arc_l2c_only list.
 	 */
-	if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
+	if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
 	    (state != arc_anon)) {
 		multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
 		ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
@@ -2201,7 +2201,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
 	 */
 	if (HDR_HAS_L1HDR(hdr)) {
 		old_state = hdr->b_l1hdr.b_state;
-		refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
+		refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
 		bufcnt = hdr->b_l1hdr.b_bufcnt;
 		update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
 	} else {
@@ -2271,7 +2271,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
 			 * the reference. As a result, we use the arc
 			 * header pointer for the reference.
 			 */
-			(void) refcount_add_many(&new_state->arcs_size,
+			(void) zfs_refcount_add_many(&new_state->arcs_size,
 			    HDR_GET_LSIZE(hdr), hdr);
 			ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
 		} else {
@@ -2297,13 +2297,15 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
 				if (arc_buf_is_shared(buf))
 					continue;
 
-				(void) refcount_add_many(&new_state->arcs_size,
+				(void) zfs_refcount_add_many(
+				    &new_state->arcs_size,
 				    arc_buf_size(buf), buf);
 			}
 			ASSERT3U(bufcnt, ==, buffers);
 
 			if (hdr->b_l1hdr.b_pabd != NULL) {
-				(void) refcount_add_many(&new_state->arcs_size,
+				(void) zfs_refcount_add_many(
+				    &new_state->arcs_size,
 				    arc_hdr_size(hdr), hdr);
 			} else {
 				ASSERT(GHOST_STATE(old_state));
@@ -2325,7 +2327,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
 			 * header on the ghost state.
 			 */
 
-			(void) refcount_remove_many(&old_state->arcs_size,
+			(void) zfs_refcount_remove_many(&old_state->arcs_size,
 			    HDR_GET_LSIZE(hdr), hdr);
 		} else {
 			uint32_t buffers = 0;
@@ -2350,13 +2352,13 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t
 				if (arc_buf_is_shared(buf))
 					continue;
 
-				(void) refcount_remove_many(
+				(void) zfs_refcount_remove_many(
 				    &old_state->arcs_size, arc_buf_size(buf),
 				    buf);
 			}
 			ASSERT3U(bufcnt, ==, buffers);
 			ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
-			(void) refcount_remove_many(
+			(void) zfs_refcount_remove_many(
 			    &old_state->arcs_size, arc_hdr_size(hdr), hdr);
 		}
 	}
@@ -2610,8 +2612,8 @@ arc_return_buf(arc_buf_t *buf, void *tag)
 
 	ASSERT3P(buf->b_data, !=, NULL);
 	ASSERT(HDR_HAS_L1HDR(hdr));
-	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
-	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
+	(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
+	(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
 
 	arc_loaned_bytes_update(-arc_buf_size(buf));
 }
@@ -2624,8 +2626,8 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
 
 	ASSERT3P(buf->b_data, !=, NULL);
 	ASSERT(HDR_HAS_L1HDR(hdr));
-	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
-	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
+	(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
+	(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
 
 	arc_loaned_bytes_update(arc_buf_size(buf));
 }
@@ -2652,13 +2654,13 @@ arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
 
 	/* protected by hash lock, if in the hash table */
 	if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
-		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+		ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 		ASSERT(state != arc_anon && state != arc_l2c_only);
 
-		(void) refcount_remove_many(&state->arcs_esize[type],
+		(void) zfs_refcount_remove_many(&state->arcs_esize[type],
 		    size, hdr);
 	}
-	(void) refcount_remove_many(&state->arcs_size, size, hdr);
+	(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
 	if (type == ARC_BUFC_METADATA) {
 		arc_space_return(size, ARC_SPACE_META);
 	} else {
@@ -2688,7 +2690,7 @@ arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
 	 * refcount ownership to the hdr since it always owns
 	 * the refcount whenever an arc_buf_t is shared.
 	 */
-	refcount_transfer_ownership(&state->arcs_size, buf, hdr);
+	zfs_refcount_transfer_ownership(&state->arcs_size, buf, hdr);
 	hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
 	abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
 	    HDR_ISTYPE_METADATA(hdr));
@@ -2718,7 +2720,7 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
 	 * We are no longer sharing this buffer so we need
 	 * to transfer its ownership to the rightful owner.
 	 */
-	refcount_transfer_ownership(&state->arcs_size, hdr, buf);
+	zfs_refcount_transfer_ownership(&state->arcs_size, hdr, buf);
 	arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
 	abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
 	abd_put(hdr->b_l1hdr.b_pabd);
@@ -2943,7 +2945,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsi
 	 * it references and compressed arc enablement.
 	 */
 	arc_hdr_alloc_pabd(hdr);
-	ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+	ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 
 	return (hdr);
 }
@@ -3045,8 +3047,10 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old,
 	 * the wrong pointer address when calling arc_hdr_destroy() later.
 	 */
 
-	(void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
-	(void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
+	(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
+	    hdr);
+	(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr),
+	    nhdr);
 
 	buf_discard_identity(hdr);
 	kmem_cache_free(old, hdr);
@@ -3126,7 +3130,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
 
 	vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
 
-	(void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
+	(void) zfs_refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
 	arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
 }
 
@@ -3136,7 +3140,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
 	if (HDR_HAS_L1HDR(hdr)) {
 		ASSERT(hdr->b_l1hdr.b_buf == NULL ||
 		    hdr->b_l1hdr.b_bufcnt > 0);
-		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+		ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 		ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
 	}
 	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
@@ -3297,7 +3301,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 		return (bytes_evicted);
 	}
 
-	ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+	ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
 	while (hdr->b_l1hdr.b_buf) {
 		arc_buf_t *buf = hdr->b_l1hdr.b_buf;
 		if (!mutex_tryenter(&buf->b_evict_lock)) {
@@ -3599,7 +3603,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_
 {
 	uint64_t evicted = 0;
 
-	while (refcount_count(&state->arcs_esize[type]) != 0) {
+	while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
 		evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
 
 		if (!retry)
@@ -3623,8 +3627,9 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int6
 {
 	int64_t delta;
 
-	if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
-		delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
+	if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
+		delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
+		    bytes);
 		return (arc_evict_state(state, spa, delta, type));
 	}
 
@@ -3649,8 +3654,8 @@ arc_adjust_meta(uint64_t meta_used)
 	 * evict some from the MRU here, and some from the MFU below.
 	 */
 	target = MIN((int64_t)(meta_used - arc_meta_limit),
-	    (int64_t)(refcount_count(&arc_anon->arcs_size) +
-	    refcount_count(&arc_mru->arcs_size) - arc_p));
+	    (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
+	    zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
 
 	total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
 
@@ -3660,7 +3665,7 @@ arc_adjust_meta(uint64_t meta_used)
 	 * space allotted to the MFU (which is defined as arc_c - arc_p).
 	 */
 	target = MIN((int64_t)(meta_used - arc_meta_limit),
-	    (int64_t)(refcount_count(&arc_mfu->arcs_size) -
+	    (int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
 	    (arc_c - arc_p)));
 
 	total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
@@ -3772,8 +3777,8 @@ arc_adjust(void)
 	 * arc_p here, and then evict more from the MFU below.
 	 */
 	target = MIN((int64_t)(asize - arc_c),
-	    (int64_t)(refcount_count(&arc_anon->arcs_size) +
-	    refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
+	    (int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
+	    zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
 
 	/*
 	 * If we're below arc_meta_min, always prefer to evict data.
@@ -3857,8 +3862,8 @@ arc_adjust(void)
 	 * cache. The following logic enforces these limits on the ghost
 	 * caches, and evicts from them as needed.
 	 */
-	target = refcount_count(&arc_mru->arcs_size) +
-	    refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
+	target = zfs_refcount_count(&arc_mru->arcs_size) +
+	    zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
 
 	bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
 	total_evicted += bytes;
@@ -3876,8 +3881,8 @@ arc_adjust(void)
 	 *	mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
 	 *		    mru ghost + mfu ghost <= arc_c
 	 */
-	target = refcount_count(&arc_mru_ghost->arcs_size) +
-	    refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
+	target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
+	    zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
 
 	bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
 	total_evicted += bytes;
@@ -4315,8 +4320,8 @@ arc_adapt(int bytes, arc_state_t *state)
 {
 	int mult;
 	uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
-	int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
-	int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
+	int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
+	int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
 
 	if (state == arc_l2c_only)
 		return;
@@ -4493,7 +4498,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, v
 	 */
 	if (!GHOST_STATE(state)) {
 
-		(void) refcount_add_many(&state->arcs_size, size, tag);
+		(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
 
 		/*
 		 * If this is reached via arc_read, the link is
@@ -4505,8 +4510,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, v
 		 * trying to [add|remove]_reference it.
 		 */
 		if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
-			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
-			(void) refcount_add_many(&state->arcs_esize[type],
+			ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+			(void) zfs_refcount_add_many(&state->arcs_esize[type],
 			    size, tag);
 		}
 
@@ -4516,8 +4521,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, v
 		 */
 		if (aggsum_compare(&arc_size, arc_c) < 0 &&
 		    hdr->b_l1hdr.b_state == arc_anon &&
-		    (refcount_count(&arc_anon->arcs_size) +
-		    refcount_count(&arc_mru->arcs_size) > arc_p))
+		    (zfs_refcount_count(&arc_anon->arcs_size) +
+		    zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
 			arc_p = MIN(arc_c, arc_p + size);
 	}
 }
@@ -4554,13 +4559,13 @@ arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, 
 
 	/* protected by hash lock, if in the hash table */
 	if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
-		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+		ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 		ASSERT(state != arc_anon && state != arc_l2c_only);
 
-		(void) refcount_remove_many(&state->arcs_esize[type],
+		(void) zfs_refcount_remove_many(&state->arcs_esize[type],
 		    size, tag);
 	}
-	(void) refcount_remove_many(&state->arcs_size, size, tag);
+	(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
 
 	VERIFY3U(hdr->b_type, ==, type);
 	if (type == ARC_BUFC_METADATA) {
@@ -4607,7 +4612,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 		 *   another prefetch (to make it less likely to be evicted).
 		 */
 		if (HDR_PREFETCH(hdr)) {
-			if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
+			if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
 				/* link protected by hash lock */
 				ASSERT(multilist_link_active(
 				    &hdr->b_l1hdr.b_arc_node));
@@ -4645,7 +4650,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 
 		if (HDR_PREFETCH(hdr)) {
 			new_state = arc_mru;
-			if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
+			if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
 				arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
 			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
 		} else {
@@ -4668,7 +4673,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 		 * the head of the list now.
 		 */
 		if ((HDR_PREFETCH(hdr)) != 0) {
-			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+			ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 			/* link protected by hash_lock */
 			ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
 		}
@@ -4687,7 +4692,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
 			 * This is a prefetch access...
 			 * move this block back to the MRU state.
 			 */
-			ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
+			ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
 			new_state = arc_mru;
 		}
 
@@ -4869,7 +4874,7 @@ arc_read_done(zio_t *zio)
 		ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
 	}
 
-	ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
+	ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
 	    callback_list != NULL);
 
 	if (no_zio_error) {
@@ -4880,7 +4885,7 @@ arc_read_done(zio_t *zio)
 			arc_change_state(arc_anon, hdr, hash_lock);
 		if (HDR_IN_HASH_TABLE(hdr))
 			buf_hash_remove(hdr);
-		freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
+		freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
 	}
 
 	/*
@@ -4900,7 +4905,7 @@ arc_read_done(zio_t *zio)
 		 * in the cache).
 		 */
 		ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
-		freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
+		freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
 	}
 
 	/* execute each callback and free its structure */
@@ -5062,7 +5067,7 @@ top:
 			VERIFY0(arc_buf_alloc_impl(hdr, private,
 			    compressed_read, B_TRUE, &buf));
 		} else if (*arc_flags & ARC_FLAG_PREFETCH &&
-		    refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
+		    zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
 			arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
 		}
 		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
@@ -5118,7 +5123,7 @@ top:
 			ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
 			ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
 			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
-			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+			ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 			ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
 			ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
 
@@ -5339,7 +5344,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
 	 * this hdr, then we don't destroy the hdr.
 	 */
 	if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
-	    refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
+	    zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
 		arc_change_state(arc_anon, hdr, hash_lock);
 		arc_hdr_destroy(hdr);
 		mutex_exit(hash_lock);
@@ -5383,7 +5388,7 @@ arc_release(arc_buf_t *buf, void *tag)
 		ASSERT(HDR_EMPTY(hdr));
 
 		ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
-		ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
+		ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
 		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
 
 		hdr->b_l1hdr.b_arc_access = 0;
@@ -5411,7 +5416,7 @@ arc_release(arc_buf_t *buf, void *tag)
 	ASSERT3P(state, !=, arc_anon);
 
 	/* this buffer is not on any list */
-	ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
+	ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
 
 	if (HDR_HAS_L2HDR(hdr)) {
 		mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
@@ -5501,12 +5506,13 @@ arc_release(arc_buf_t *buf, void *tag)
 		ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
 		ASSERT3P(state, !=, arc_l2c_only);
 
-		(void) refcount_remove_many(&state->arcs_size,
+		(void) zfs_refcount_remove_many(&state->arcs_size,
 		    arc_buf_size(buf), buf);
 
-		if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
+		if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
 			ASSERT3P(state, !=, arc_l2c_only);
-			(void) refcount_remove_many(&state->arcs_esize[type],
+			(void) zfs_refcount_remove_many(
+			    &state->arcs_esize[type],
 			    arc_buf_size(buf), buf);
 		}
 
@@ -5523,21 +5529,21 @@ arc_release(arc_buf_t *buf, void *tag)
 		nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
 		ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
 		ASSERT0(nhdr->b_l1hdr.b_bufcnt);
-		ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
+		ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
 		VERIFY3U(nhdr->b_type, ==, type);
 		ASSERT(!HDR_SHARED_DATA(nhdr));
 
 		nhdr->b_l1hdr.b_buf = buf;
 		nhdr->b_l1hdr.b_bufcnt = 1;
-		(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
+		(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
 		buf->b_hdr = nhdr;
 
 		mutex_exit(&buf->b_evict_lock);
-		(void) refcount_add_many(&arc_anon->arcs_size,
+		(void) zfs_refcount_add_many(&arc_anon->arcs_size,
 		    arc_buf_size(buf), buf);
 	} else {
 		mutex_exit(&buf->b_evict_lock);
-		ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
+		ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
 		/* protected by hash lock, or hdr is on arc_anon */
 		ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
 		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
@@ -5569,7 +5575,7 @@ arc_referenced(arc_buf_t *buf)
 	int referenced;
 
 	mutex_enter(&buf->b_evict_lock);
-	referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
+	referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
 	mutex_exit(&buf->b_evict_lock);
 	return (referenced);
 }
@@ -5584,7 +5590,7 @@ arc_write_ready(zio_t *zio)
 	uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp);
 
 	ASSERT(HDR_HAS_L1HDR(hdr));
-	ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
+	ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
 	ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
 
 	/*
@@ -5737,7 +5743,7 @@ arc_write_done(zio_t *zio)
 				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
 					panic("bad overwrite, hdr=%p exists=%p",
 					    (void *)hdr, (void *)exists);
-				ASSERT(refcount_is_zero(
+				ASSERT(zfs_refcount_is_zero(
 				    &exists->b_l1hdr.b_refcnt));
 				arc_change_state(arc_anon, exists, hash_lock);
 				mutex_exit(hash_lock);
@@ -5767,7 +5773,7 @@ arc_write_done(zio_t *zio)
 		arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
 	}
 
-	ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
+	ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
 	callback->awcb_done(zio, buf, callback->awcb_private);
 
 	abd_put(zio->io_abd);
@@ -5912,7 +5918,7 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, ui
 	/* assert that it has not wrapped around */
 	ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
 
-	anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
+	anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
 	    arc_loaned_bytes), 0);
 
 	/*
@@ -5947,9 +5953,10 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, ui
 	    anon_size > arc_c * zfs_arc_anon_limit_percent / 100 &&
 	    spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
 		uint64_t meta_esize =
-		    refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+		    zfs_refcount_count(
+		    &arc_anon->arcs_esize[ARC_BUFC_METADATA]);
 		uint64_t data_esize =
-		    refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+		    zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
 		dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
 		    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
 		    arc_tempreserve >> 10, meta_esize >> 10,
@@ -5964,11 +5971,11 @@ static void
 arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
     kstat_named_t *evict_data, kstat_named_t *evict_metadata)
 {
-	size->value.ui64 = refcount_count(&state->arcs_size);
+	size->value.ui64 = zfs_refcount_count(&state->arcs_size);
 	evict_data->value.ui64 =
-	    refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
+	    zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
 	evict_metadata->value.ui64 =
-	    refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
+	    zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
 }
 
 static int
@@ -6099,25 +6106,25 @@ arc_state_init(void)
 	    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
 	    arc_state_multilist_index_func);
 
-	refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
-	refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
-	refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
-	refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
-	refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
-	refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
 
-	refcount_create(&arc_anon->arcs_size);
-	refcount_create(&arc_mru->arcs_size);
-	refcount_create(&arc_mru_ghost->arcs_size);
-	refcount_create(&arc_mfu->arcs_size);
-	refcount_create(&arc_mfu_ghost->arcs_size);
-	refcount_create(&arc_l2c_only->arcs_size);
+	zfs_refcount_create(&arc_anon->arcs_size);
+	zfs_refcount_create(&arc_mru->arcs_size);
+	zfs_refcount_create(&arc_mru_ghost->arcs_size);
+	zfs_refcount_create(&arc_mfu->arcs_size);
+	zfs_refcount_create(&arc_mfu_ghost->arcs_size);
+	zfs_refcount_create(&arc_l2c_only->arcs_size);
 
 	aggsum_init(&arc_meta_used, 0);
 	aggsum_init(&arc_size, 0);
@@ -6131,25 +6138,25 @@ arc_state_init(void)
 static void
 arc_state_fini(void)
 {
-	refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
-	refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
-	refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
-	refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
-	refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
-	refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
-	refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
+	zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
+	zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
 
-	refcount_destroy(&arc_anon->arcs_size);
-	refcount_destroy(&arc_mru->arcs_size);
-	refcount_destroy(&arc_mru_ghost->arcs_size);
-	refcount_destroy(&arc_mfu->arcs_size);
-	refcount_destroy(&arc_mfu_ghost->arcs_size);
-	refcount_destroy(&arc_l2c_only->arcs_size);
+	zfs_refcount_destroy(&arc_anon->arcs_size);
+	zfs_refcount_destroy(&arc_mru->arcs_size);
+	zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
+	zfs_refcount_destroy(&arc_mfu->arcs_size);
+	zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
+	zfs_refcount_destroy(&arc_l2c_only->arcs_size);
 
 	multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
 	multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
@@ -6729,7 +6736,7 @@ top:
 			ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
 
 			bytes_dropped += arc_hdr_size(hdr);
-			(void) refcount_remove_many(&dev->l2ad_alloc,
+			(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
 			    arc_hdr_size(hdr), hdr);
 		}
 
@@ -7135,7 +7142,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint
 			list_insert_head(&dev->l2ad_buflist, hdr);
 			mutex_exit(&dev->l2ad_mtx);
 
-			(void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
+			(void) zfs_refcount_add_many(&dev->l2ad_alloc, psize,
+			    hdr);
 
 			/*
 			 * Normally the L2ARC can use the hdr's data, but if
@@ -7367,7 +7375,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
 	    offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
 
 	vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
-	refcount_create(&adddev->l2ad_alloc);
+	zfs_refcount_create(&adddev->l2ad_alloc);
 
 	/*
 	 * Add device to global list
@@ -7413,7 +7421,7 @@ l2arc_remove_vdev(vdev_t *vd)
 	l2arc_evict(remdev, 0, B_TRUE);
 	list_destroy(&remdev->l2ad_buflist);
 	mutex_destroy(&remdev->l2ad_mtx);
-	refcount_destroy(&remdev->l2ad_alloc);
+	zfs_refcount_destroy(&remdev->l2ad_alloc);
 	kmem_free(remdev, sizeof (l2arc_dev_t));
 }
 

Modified: vendor-sys/illumos/dist/uts/common/fs/zfs/dbuf.c
==============================================================================
--- vendor-sys/illumos/dist/uts/common/fs/zfs/dbuf.c	Tue Oct 15 14:29:30 2019	(r353560)
+++ vendor-sys/illumos/dist/uts/common/fs/zfs/dbuf.c	Tue Oct 15 14:33:42 2019	(r353561)
@@ -105,7 +105,7 @@ static boolean_t dbuf_evict_thread_exit;
  */
 typedef struct dbuf_cache {
 	multilist_t *cache;
-	refcount_t size;
+	zfs_refcount_t size;
 } dbuf_cache_t;
 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
 
@@ -178,7 +178,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
 	multilist_link_init(&db->db_cache_link);
-	refcount_create(&db->db_holds);
+	zfs_refcount_create(&db->db_holds);
 
 	return (0);
 }
@@ -191,7 +191,7 @@ dbuf_dest(void *vdb, void *unused)
 	mutex_destroy(&db->db_mtx);
 	cv_destroy(&db->db_changed);
 	ASSERT(!multilist_link_active(&db->db_cache_link));
-	refcount_destroy(&db->db_holds);
+	zfs_refcount_destroy(&db->db_holds);
 }
 
 /*
@@ -313,7 +313,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
 	 * We musn't hold db_mtx to maintain lock ordering:
 	 * DBUF_HASH_MUTEX > db_mtx.
 	 */
-	ASSERT(refcount_is_zero(&db->db_holds));
+	ASSERT(zfs_refcount_is_zero(&db->db_holds));
 	ASSERT(db->db_state == DB_EVICTING);
 	ASSERT(!MUTEX_HELD(&db->db_mtx));
 
@@ -350,7 +350,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_
 	ASSERT(db->db.db_data != NULL);
 	ASSERT3U(db->db_state, ==, DB_CACHED);
 
-	holds = refcount_count(&db->db_holds);
+	holds = zfs_refcount_count(&db->db_holds);
 	if (verify_type == DBVU_EVICTING) {
 		/*
 		 * Immediate eviction occurs when holds == dirtycnt.
@@ -447,7 +447,8 @@ dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
 		 * Sanity check for small-memory systems: don't allocate too
 		 * much memory for this purpose.
 		 */
-		if (refcount_count(&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
+		if (zfs_refcount_count(
+		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
 		    dbuf_metadata_cache_max_bytes) {
 			dbuf_metadata_cache_overflow++;
 			DTRACE_PROBE1(dbuf__metadata__cache__overflow,
@@ -496,7 +497,7 @@ dbuf_cache_above_hiwater(void)
 	uint64_t dbuf_cache_hiwater_bytes =
 	    (dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100;
 
-	return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
+	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
 	    dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes);
 }
 
@@ -506,7 +507,7 @@ dbuf_cache_above_lowater(void)
 	uint64_t dbuf_cache_lowater_bytes =
 	    (dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100;
 
-	return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
+	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
 	    dbuf_cache_max_bytes - dbuf_cache_lowater_bytes);
 }
 
@@ -533,7 +534,8 @@ dbuf_evict_one(void)
 	if (db != NULL) {
 		multilist_sublist_remove(mls, db);
 		multilist_sublist_unlock(mls);
-		(void) refcount_remove_many(&dbuf_caches[DB_DBUF_CACHE].size,
+		(void) zfs_refcount_remove_many(
+		    &dbuf_caches[DB_DBUF_CACHE].size,
 		    db->db.db_size, db);
 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
 		db->db_caching_status = DB_NO_CACHE;
@@ -599,7 +601,7 @@ dbuf_evict_notify(void)
 	 * because it's OK to occasionally make the wrong decision here,
 	 * and grabbing the lock results in massive lock contention.
 	 */
-	if (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
+	if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
 	    dbuf_cache_max_bytes) {
 		if (dbuf_cache_above_hiwater())
 			dbuf_evict_one();
@@ -667,7 +669,7 @@ retry:
 		    multilist_create(sizeof (dmu_buf_impl_t),
 		    offsetof(dmu_buf_impl_t, db_cache_link),
 		    dbuf_cache_multilist_index_func);
-		refcount_create(&dbuf_caches[dcs].size);
+		zfs_refcount_create(&dbuf_caches[dcs].size);
 	}
 
 	dbuf_evict_thread_exit = B_FALSE;
@@ -701,7 +703,7 @@ dbuf_fini(void)
 	cv_destroy(&dbuf_evict_cv);
 
 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
-		refcount_destroy(&dbuf_caches[dcs].size);
+		zfs_refcount_destroy(&dbuf_caches[dcs].size);
 		multilist_destroy(dbuf_caches[dcs].cache);
 	}
 }
@@ -887,7 +889,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
 
 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
 	mutex_enter(&db->db_mtx);
-	if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
+	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
 		int blksz = db->db.db_size;
 		spa_t *spa = db->db_objset->os_spa;
 
@@ -949,7 +951,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
 	/*
 	 * All reads are synchronous, so we must have a hold on the dbuf
 	 */
-	ASSERT(refcount_count(&db->db_holds) > 0);
+	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
 	ASSERT(db->db_buf == NULL);
 	ASSERT(db->db.db_data == NULL);
 	if (buf == NULL) {
@@ -986,7 +988,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_
 
 	DB_DNODE_ENTER(db);

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-src-all mailing list