git: ee4506105171 - main - cc_cubic: use newreno to emulate AIMD in TCP-friendly region
Date: Tue, 17 Sep 2024 14:37:47 UTC
The branch main has been updated by cc:
URL: https://cgit.FreeBSD.org/src/commit/?id=ee45061051715be4704ba22d2fcd1c373e29079d
commit ee45061051715be4704ba22d2fcd1c373e29079d
Author: Cheng Cui <cc@FreeBSD.org>
AuthorDate: 2024-09-05 18:52:57 +0000
Commit: Cheng Cui <cc@FreeBSD.org>
CommitDate: 2024-09-17 14:37:00 +0000
cc_cubic: use newreno to emulate AIMD in TCP-friendly region
Reviewed by: rscheff, tuexen
Differential Revision: https://reviews.freebsd.org/D46546
---
sys/netinet/cc/cc_cubic.c | 44 ++++++++++++++++++++++----------------------
sys/netinet/cc/cc_cubic.h | 18 +++++-------------
2 files changed, 27 insertions(+), 35 deletions(-)
diff --git a/sys/netinet/cc/cc_cubic.c b/sys/netinet/cc/cc_cubic.c
index 45f75efe5ae7..c4b44d5c3660 100644
--- a/sys/netinet/cc/cc_cubic.c
+++ b/sys/netinet/cc/cc_cubic.c
@@ -288,31 +288,26 @@ cubic_ack_received(struct cc_var *ccv, ccsignal_t type)
usecs_since_epoch = INT_MAX;
cubic_data->t_epoch = ticks - INT_MAX;
}
+
+ W_est = tf_cwnd(ccv);
+
/*
* The mean RTT is used to best reflect the equations in
- * the I-D. Using min_rtt in the tf_cwnd calculation
- * causes W_est to grow much faster than it should if the
- * RTT is dominated by network buffering rather than
- * propagation delay.
+ * the I-D.
*/
- W_est = tf_cwnd(usecs_since_epoch, cubic_data->mean_rtt_usecs,
- cubic_data->W_max, CCV(ccv, t_maxseg));
-
W_cubic = cubic_cwnd(usecs_since_epoch +
cubic_data->mean_rtt_usecs,
cubic_data->W_max,
CCV(ccv, t_maxseg),
cubic_data->K);
- ccv->flags &= ~CCF_ABC_SENTAWND;
-
if (W_cubic < W_est) {
/*
* TCP-friendly region, follow tf
* cwnd growth.
*/
- if (CCV(ccv, snd_cwnd) < W_est)
- CCV(ccv, snd_cwnd) = ulmin(W_est, INT_MAX);
+ CCV(ccv, snd_cwnd) = ulmin(W_est, INT_MAX);
+ cubic_data->flags |= CUBICFLAG_IN_TF;
} else if (CCV(ccv, snd_cwnd) < W_cubic) {
/*
* Concave or convex region, follow CUBIC
@@ -320,6 +315,7 @@ cubic_ack_received(struct cc_var *ccv, ccsignal_t type)
* Only update snd_cwnd, if it doesn't shrink.
*/
CCV(ccv, snd_cwnd) = ulmin(W_cubic, INT_MAX);
+ cubic_data->flags &= ~CUBICFLAG_IN_TF;
}
/*
@@ -644,19 +640,23 @@ cubic_ssthresh_update(struct cc_var *ccv, uint32_t maxseg)
cubic_data->undo_W_max = cubic_data->W_max;
cubic_data->W_max = cwnd;
- /*
- * On the first congestion event, set ssthresh to cwnd * 0.5
- * and reduce W_max to cwnd * beta. This aligns the cubic concave
- * region appropriately. On subsequent congestion events, set
- * ssthresh to cwnd * beta.
- */
- if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) {
+ if (cubic_data->flags & CUBICFLAG_IN_TF) {
+ /* If in the TCP friendly region, follow what newreno does */
+ ssthresh = newreno_cc_cwnd_on_multiplicative_decrease(ccv, maxseg);
+
+ } else if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) {
+ /*
+ * On the first congestion event, set ssthresh to cwnd * 0.5
+ * and reduce W_max to cwnd * beta. This aligns the cubic
+ * concave region appropriately.
+ */
ssthresh = cwnd >> 1;
- cubic_data->W_max = ((uint64_t)cwnd *
- CUBIC_BETA) >> CUBIC_SHIFT;
+ cubic_data->W_max = ((uint64_t)cwnd * CUBIC_BETA) >> CUBIC_SHIFT;
} else {
- ssthresh = ((uint64_t)cwnd *
- CUBIC_BETA) >> CUBIC_SHIFT;
+ /*
+ * On subsequent congestion events, set ssthresh to cwnd * beta.
+ */
+ ssthresh = ((uint64_t)cwnd * CUBIC_BETA) >> CUBIC_SHIFT;
}
CCV(ccv, snd_ssthresh) = max(ssthresh, 2 * maxseg);
}
diff --git a/sys/netinet/cc/cc_cubic.h b/sys/netinet/cc/cc_cubic.h
index 592703906d1a..b4773618e6f8 100644
--- a/sys/netinet/cc/cc_cubic.h
+++ b/sys/netinet/cc/cc_cubic.h
@@ -83,6 +83,7 @@
#define CUBICFLAG_RTO_EVENT 0x00000008 /* RTO experienced */
#define CUBICFLAG_HYSTART_ENABLED 0x00000010 /* Hystart++ is enabled */
#define CUBICFLAG_HYSTART_IN_CSS 0x00000020 /* We are in Hystart++ CSS */
+#define CUBICFLAG_IN_TF 0x00000040 /* We are in TCP friendly region */
/* Kernel only bits */
#ifdef _KERNEL
@@ -286,22 +287,13 @@ reno_cwnd(int usecs_since_epoch, int rtt_usecs, unsigned long wmax,
}
/*
- * Compute an approximation of the "TCP friendly" cwnd some number of usecs
- * after a congestion event that is designed to yield the same average cwnd as
- * NewReno while using CUBIC's beta of 0.7. RTT should be the average RTT
- * estimate for the path measured over the previous congestion epoch and wmax is
- * the value of cwnd at the last congestion event.
+ * Compute the "TCP friendly" cwnd by newreno in congestion avoidance state.
*/
static __inline unsigned long
-tf_cwnd(int usecs_since_epoch, int rtt_usecs, unsigned long wmax,
- uint32_t smss)
+tf_cwnd(struct cc_var *ccv)
{
-
- /* Equation 4 of I-D. */
- return (((wmax * CUBIC_BETA) +
- (((THREE_X_PT3 * (unsigned long)usecs_since_epoch *
- (unsigned long)smss) << CUBIC_SHIFT) / (TWO_SUB_PT3 * rtt_usecs)))
- >> CUBIC_SHIFT);
+ /* newreno is "TCP friendly" */
+ return newreno_cc_cwnd_in_cong_avoid(ccv);
}
#endif /* _NETINET_CC_CUBIC_H_ */