git: 731c145612dd - main - e1000: Fix some issues in em_newitr()

From: Mark Johnston <markj_at_FreeBSD.org>
Date: Tue, 27 May 2025 17:54:36 UTC
The branch main has been updated by markj:

URL: https://cgit.FreeBSD.org/src/commit/?id=731c145612dd6ffe457a562959a5c027acf13334

commit 731c145612dd6ffe457a562959a5c027acf13334
Author:     Mark Johnston <markj@FreeBSD.org>
AuthorDate: 2025-05-27 17:08:07 +0000
Commit:     Mark Johnston <markj@FreeBSD.org>
CommitDate: 2025-05-27 17:33:39 +0000

    e1000: Fix some issues in em_newitr()
    
    - Load packet and byte counters exactly once, as they can be
      concurrently mutated.
    - Rename bytes_packets to bytes_per_packet, which seems clearer.
    - Use local variables that have the same types as the counter values,
      rather than truncating unsigned long to u32.
    
    Reviewed by:    kbowling
    MFC after:      2 weeks
    Differential Revision:  https://reviews.freebsd.org/D50416
---
 sys/dev/e1000/if_em.c | 40 ++++++++++++++++++++++------------------
 1 file changed, 22 insertions(+), 18 deletions(-)

diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 27500f936c25..f0ef6051fab1 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -1644,14 +1644,16 @@ em_newitr(struct e1000_softc *sc, struct em_rx_queue *que,
     struct tx_ring *txr, struct rx_ring *rxr)
 {
 	struct e1000_hw *hw = &sc->hw;
+	unsigned long bytes, bytes_per_packet, packets;
+	unsigned long rxbytes, rxpackets, txbytes, txpackets;
 	u32 newitr;
-	u32 bytes;
-	u32 bytes_packets;
-	u32 packets;
 	u8 nextlatency;
 
+	rxbytes = atomic_load_long(&rxr->rx_bytes);
+	txbytes = atomic_load_long(&txr->tx_bytes);
+
 	/* Idle, do nothing */
-	if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0))
+	if (txbytes == 0 && rxbytes == 0)
 		return;
 
 	newitr = 0;
@@ -1671,18 +1673,20 @@ em_newitr(struct e1000_softc *sc, struct em_rx_queue *que,
 			goto em_set_next_itr;
 		}
 
-		bytes = bytes_packets = 0;
+		bytes = bytes_per_packet = 0;
 		/* Get largest values from the associated tx and rx ring */
-		if (txr->tx_bytes && txr->tx_packets) {
-			bytes = txr->tx_bytes;
-			bytes_packets = txr->tx_bytes/txr->tx_packets;
-			packets = txr->tx_packets;
+		txpackets = atomic_load_long(&txr->tx_packets);
+		if (txpackets != 0) {
+			bytes = txbytes;
+			bytes_per_packet = txbytes / txpackets;
+			packets = txpackets;
 		}
-		if (rxr->rx_bytes && rxr->rx_packets) {
-			bytes = max(bytes, rxr->rx_bytes);
-			bytes_packets =
-			    max(bytes_packets, rxr->rx_bytes/rxr->rx_packets);
-			packets = max(packets, rxr->rx_packets);
+		rxpackets = atomic_load_long(&rxr->rx_packets);
+		if (rxpackets != 0) {
+			bytes = lmax(bytes, rxbytes);
+			bytes_per_packet =
+			    lmax(bytes_per_packet, rxbytes / rxpackets);
+			packets = lmax(packets, rxpackets);
 		}
 
 		/* Latency state machine */
@@ -1692,7 +1696,7 @@ em_newitr(struct e1000_softc *sc, struct em_rx_queue *que,
 			break;
 		case itr_latency_lowest: /* 70k ints/s */
 			/* TSO and jumbo frames */
-			if (bytes_packets > 8000)
+			if (bytes_per_packet > 8000)
 				nextlatency = itr_latency_bulk;
 			else if ((packets < 5) && (bytes > 512))
 				nextlatency = itr_latency_low;
@@ -1700,14 +1704,14 @@ em_newitr(struct e1000_softc *sc, struct em_rx_queue *que,
 		case itr_latency_low: /* 20k ints/s */
 			if (bytes > 10000) {
 				/* Handle TSO */
-				if (bytes_packets > 8000)
+				if (bytes_per_packet > 8000)
 					nextlatency = itr_latency_bulk;
 				else if ((packets < 10) ||
-				    (bytes_packets > 1200))
+				    (bytes_per_packet > 1200))
 					nextlatency = itr_latency_bulk;
 				else if (packets > 35)
 					nextlatency = itr_latency_lowest;
-			} else if (bytes_packets > 2000) {
+			} else if (bytes_per_packet > 2000) {
 				nextlatency = itr_latency_bulk;
 			} else if (packets < 3 && bytes < 512) {
 				nextlatency = itr_latency_lowest;