git: f0d33323feab - stable/13 - LinuxKPI: implement mul_u64_u64_div_u64()

From: Bjoern A. Zeeb <bz_at_FreeBSD.org>
Date: Wed, 29 Nov 2023 16:38:00 UTC
The branch stable/13 has been updated by bz:

URL: https://cgit.FreeBSD.org/src/commit/?id=f0d33323feab22b2550437132b8ee89f2086b310

commit f0d33323feab22b2550437132b8ee89f2086b310
Author:     Bjoern A. Zeeb <bz@FreeBSD.org>
AuthorDate: 2023-05-16 20:55:00 +0000
Commit:     Bjoern A. Zeeb <bz@FreeBSD.org>
CommitDate: 2023-11-29 16:36:04 +0000

    LinuxKPI: implement mul_u64_u64_div_u64()
    
    Implement mul_u64_u64_div_u64() for an updated iwlwifi driver (though
    we do not yet use it there; it is used for in-kernel ptp on wifi).
    
    Sponsored by:   The FreeBSD Foundation
    
    (cherry picked from commit b80ea452375f52a3ab7d82a9aef10da0d89985d9)
    
    Unbalanced parentheses broke the build; re-balance.
    
    Fixes:  b80ea452375f ("LinuxKPI: implement mul_u64_u64_div_u64()")
    (cherry picked from commit dabbbebcb0f5ced163454cd08b78e551b4a365b5)
    
    linuxkpi math: fix kassert in math64.h
    
    Include <sys/systm.h> in math64.h, so that KASSERT and bool are
    defined, to allow compilation to succeed after
    b80ea452375f52a3ab7d82a9aef10da0d89985d9 and dabbbebcb0f5...
    
    (cherry picked from commit b6a61d6836d90ff2756d804eb981a02b0828f496)
---
 sys/compat/linuxkpi/common/include/linux/math64.h | 49 +++++++++++++++++++++++
 1 file changed, 49 insertions(+)

diff --git a/sys/compat/linuxkpi/common/include/linux/math64.h b/sys/compat/linuxkpi/common/include/linux/math64.h
index 1b00fd71e69f..cae5e30b08df 100644
--- a/sys/compat/linuxkpi/common/include/linux/math64.h
+++ b/sys/compat/linuxkpi/common/include/linux/math64.h
@@ -29,6 +29,7 @@
 #define	_LINUXKPI_LINUX_MATH64_H
 
 #include <sys/stdint.h>
+#include <sys/systm.h>
 
 #define	do_div(n, base) ({			\
 	uint32_t __base = (base);		\
@@ -106,6 +107,54 @@ mul_u64_u32_div(uint64_t x, uint32_t y, uint32_t div)
 	return ((x / div) * y + (rem * y) / div);
 }
 
+static inline uint64_t
+mul_u64_u64_div_u64(uint64_t x, uint64_t y, uint64_t z)
+{
+	uint64_t res, rem;
+	uint64_t x1, y1, y1z;
+
+	res = rem = 0;
+	x1 = x;
+	y1z = y / z;
+	y1 = y - y1z * z;
+
+	/*
+	 * INVARIANT: x * y = res * z + rem + (y1 + y1z * z) * x1
+	 * INVARIANT: y1 < z
+	 * INVARIANT: rem < z
+	 */
+	while (x1 > 0) {
+		/* Handle low bit. */
+		if (x1 & 1) {
+			x1 &= ~1;
+			res += y1z;
+			rem += y1;
+			if ((rem < y1) || (rem >= z)) {
+				res += 1;
+				rem -= z;
+			}
+		}
+
+		/* Shift x1 right and (y1 + y1z * z) left */
+		x1 >>= 1;
+		if ((y1 * 2 < y1) || (y1 * 2 >= z)) {
+			y1z = y1z * 2 + 1;
+			y1 = y1 * 2 - z;
+		} else {
+			y1z *= 2;
+			y1 *= 2;
+		}
+	}
+
+	KASSERT(res * z + rem == x * y, ("%s: res %ju * z %ju + rem %ju != "
+	    "x %ju * y %ju", __func__, (uintmax_t)res, (uintmax_t)z,
+	    (uintmax_t)rem, (uintmax_t)x, (uintmax_t)y));
+	KASSERT(rem < z, ("%s: rem %ju >= z %ju\n", __func__,
+	    (uintmax_t)rem, (uintmax_t)z));
+
+	return (res);
+}
+
 static inline uint64_t
 mul_u64_u32_shr(uint64_t x, uint32_t y, unsigned int shift)
 {