git: e9ca38abc79b - stable/12 - tcp: For hostcache performance, use atomics instead of counters

Richard Scheffenegger rscheff at FreeBSD.org
Fri Apr 16 21:32:28 UTC 2021


The branch stable/12 has been updated by rscheff:

URL: https://cgit.FreeBSD.org/src/commit/?id=e9ca38abc79b5ee0cc0052fb4e213b3dd5720716

commit e9ca38abc79b5ee0cc0052fb4e213b3dd5720716
Author:     Richard Scheffenegger <rscheff at FreeBSD.org>
AuthorDate: 2021-04-01 08:00:32 +0000
Commit:     Richard Scheffenegger <rscheff at FreeBSD.org>
CommitDate: 2021-04-16 20:52:37 +0000

    tcp: For hostcache performance, use atomics instead of counters
    
    As accessing the tcp hostcache happens frequently on some
    classes of servers, it was recommended to use atomic_add/subtract
    rather than (per-CPU distributed) counters, which have to be
    summed up at high cost to cache efficiency.
    
    PR: 254333
    MFC after: 2 weeks
    Sponsored by: NetApp, Inc.
    Reviewed By: #transport, tuexen, jtl
    Differential Revision: https://reviews.freebsd.org/D29522
    
    (cherry picked from commit 529a2a0f2765f6c57c50a5af6be242c03bf714e3)
---
 sys/netinet/tcp_hostcache.c | 24 +++++++++++-------------
 sys/netinet/tcp_hostcache.h |  2 +-
 2 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/sys/netinet/tcp_hostcache.c b/sys/netinet/tcp_hostcache.c
index 5a7e3fb63c7e..a92a12109bc4 100644
--- a/sys/netinet/tcp_hostcache.c
+++ b/sys/netinet/tcp_hostcache.c
@@ -146,8 +146,8 @@ SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, bucketlimit,
     CTLFLAG_VNET | CTLFLAG_RDTUN, &VNET_NAME(tcp_hostcache.bucket_limit), 0,
     "Per-bucket hash limit for hostcache");
 
-SYSCTL_COUNTER_U64(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
-     &VNET_NAME(tcp_hostcache.cache_count),
+SYSCTL_UINT(_net_inet_tcp_hostcache, OID_AUTO, count, CTLFLAG_VNET | CTLFLAG_RD,
+     &VNET_NAME(tcp_hostcache.cache_count), 0,
     "Current number of entries in hostcache");
 
 SYSCTL_INT(_net_inet_tcp_hostcache, OID_AUTO, expire, CTLFLAG_VNET | CTLFLAG_RW,
@@ -196,8 +196,7 @@ tcp_hc_init(void)
 	/*
 	 * Initialize hostcache structures.
 	 */
-	V_tcp_hostcache.cache_count = counter_u64_alloc(M_WAITOK);
-	counter_u64_zero(V_tcp_hostcache.cache_count);
+	atomic_store_int(&V_tcp_hostcache.cache_count, 0);
 	V_tcp_hostcache.hashsize = TCP_HOSTCACHE_HASHSIZE;
 	V_tcp_hostcache.bucket_limit = TCP_HOSTCACHE_BUCKETLIMIT;
 	V_tcp_hostcache.expire = TCP_HOSTCACHE_EXPIRE;
@@ -265,9 +264,6 @@ tcp_hc_destroy(void)
 	/* Purge all hc entries. */
 	tcp_hc_purge_internal(1);
 
-	/* Release the counter */
-	counter_u64_free(V_tcp_hostcache.cache_count);
-
 	/* Free the uma zone and the allocated hash table. */
 	uma_zdestroy(V_tcp_hostcache.zone);
 
@@ -375,7 +371,7 @@ tcp_hc_insert(struct in_conninfo *inc)
 	 * If the bucket limit is reached, reuse the least-used element.
 	 */
 	if (hc_head->hch_length >= V_tcp_hostcache.bucket_limit ||
-	    counter_u64_fetch(V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) {
+	    atomic_load_int(&V_tcp_hostcache.cache_count) >= V_tcp_hostcache.cache_limit) {
 		hc_entry = TAILQ_LAST(&hc_head->hch_bucket, hc_qhead);
 		/*
 		 * At first we were dropping the last element, just to
@@ -392,7 +388,7 @@ tcp_hc_insert(struct in_conninfo *inc)
 		}
 		TAILQ_REMOVE(&hc_head->hch_bucket, hc_entry, rmx_q);
 		V_tcp_hostcache.hashbase[hash].hch_length--;
-		counter_u64_add(V_tcp_hostcache.cache_count, -1);
+		atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
 		TCPSTAT_INC(tcps_hc_bucketoverflow);
 #if 0
 		uma_zfree(V_tcp_hostcache.zone, hc_entry);
@@ -425,7 +421,7 @@ tcp_hc_insert(struct in_conninfo *inc)
 	 */
 	TAILQ_INSERT_HEAD(&hc_head->hch_bucket, hc_entry, rmx_q);
 	V_tcp_hostcache.hashbase[hash].hch_length++;
-	counter_u64_add(V_tcp_hostcache.cache_count, 1);
+	atomic_add_int(&V_tcp_hostcache.cache_count, 1);
 	TCPSTAT_INC(tcps_hc_added);
 
 	return hc_entry;
@@ -641,7 +637,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
 
 	/* Optimize Buffer length query by sbin/sysctl */
 	if (req->oldptr == NULL) {
-		len = (counter_u64_fetch(V_tcp_hostcache.cache_count) + 1) * linesize;
+		len = (atomic_load_int(&V_tcp_hostcache.cache_count) + 1) *
+			linesize;
 		return (SYSCTL_OUT(req, NULL, len));
 	}
 
@@ -651,7 +648,8 @@ sysctl_tcp_hc_list(SYSCTL_HANDLER_ARGS)
 	}
 
 	/* Use a buffer sized for one full bucket */
-	sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit * linesize, req);
+	sbuf_new_for_sysctl(&sb, NULL, V_tcp_hostcache.bucket_limit *
+		linesize, req);
 
 	sbuf_printf(&sb,
 		"\nIP address        MTU  SSTRESH      RTT   RTTVAR "
@@ -713,7 +711,7 @@ tcp_hc_purge_internal(int all)
 					      hc_entry, rmx_q);
 				uma_zfree(V_tcp_hostcache.zone, hc_entry);
 				V_tcp_hostcache.hashbase[i].hch_length--;
-				counter_u64_add(V_tcp_hostcache.cache_count, -1);
+				atomic_subtract_int(&V_tcp_hostcache.cache_count, 1);
 			} else
 				hc_entry->rmx_expire -= V_tcp_hostcache.prune;
 		}
diff --git a/sys/netinet/tcp_hostcache.h b/sys/netinet/tcp_hostcache.h
index b5237392acc2..2f7035c0c6af 100644
--- a/sys/netinet/tcp_hostcache.h
+++ b/sys/netinet/tcp_hostcache.h
@@ -74,7 +74,7 @@ struct tcp_hostcache {
 	u_int		hashsize;
 	u_int		hashmask;
 	u_int		bucket_limit;
-	counter_u64_t	cache_count;
+	u_int		cache_count;
 	u_int		cache_limit;
 	int		expire;
 	int		prune;


More information about the dev-commits-src-all mailing list