git: 156dfc3e6e53 - main - tcp: use time instead of slots in the HPTS API
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 13 Oct 2025 20:37:44 UTC
The branch main has been updated by tuexen:
URL: https://cgit.FreeBSD.org/src/commit/?id=156dfc3e6e53a07bfa2d1e9d6e1ec37871cb887a
commit 156dfc3e6e53a07bfa2d1e9d6e1ec37871cb887a
Author: Nick Banks <nickbanks@netflix.com>
AuthorDate: 2025-10-13 20:35:29 +0000
Commit: Michael Tuexen <tuexen@FreeBSD.org>
CommitDate: 2025-10-13 20:35:29 +0000
tcp: use time instead of slots in the HPTS API
This makes slots an internal concept of HPTS.
Reviewed by: tuexen
Sponsored by: Netflix, Inc.
---
sys/netinet/tcp_hpts.c | 21 ++--
sys/netinet/tcp_hpts.h | 16 ++-
sys/netinet/tcp_hpts_test.c | 59 +++++-----
sys/netinet/tcp_stacks/bbr.c | 125 +++++++++++-----------
sys/netinet/tcp_stacks/rack.c | 244 +++++++++++++++++++++---------------------
5 files changed, 232 insertions(+), 233 deletions(-)
diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c
index 2631e79ab034..a50ad3fa6b33 100644
--- a/sys/netinet/tcp_hpts.c
+++ b/sys/netinet/tcp_hpts.c
@@ -39,15 +39,14 @@
* First, and probably the main thing its used by Rack and BBR, it can
* be used to call tcp_output() of a transport stack at some time in the future.
* The normal way this is done is that tcp_output() of the stack schedules
- * itself to be called again by calling tcp_hpts_insert(tcpcb, slot). The
- * slot is the time from now that the stack wants to be called but it
- * must be converted to tcp_hpts's notion of slot. This is done with
- * one of the macros HPTS_MS_TO_SLOTS or HPTS_USEC_TO_SLOTS. So a typical
+ * itself to be called again by calling tcp_hpts_insert(tcpcb, usecs). The
+ * usecs is the time from now that the stack wants to be called and is
+ * passing time directly in microseconds. So a typical
* call from the tcp_output() routine might look like:
*
- * tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(550), NULL);
+ * tcp_hpts_insert(tp, 550, NULL);
*
- * The above would schedule tcp_output() to be called in 550 useconds.
+ * The above would schedule tcp_output() to be called in 550 microseconds.
* Note that if using this mechanism the stack will want to add near
* its top a check to prevent unwanted calls (from user land or the
* arrival of incoming ack's). So it would add something like:
@@ -832,16 +831,16 @@ check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
void
#ifdef INVARIANTS
-__tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t slot,
+__tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
int32_t line, struct hpts_diag *diag)
#else
-__tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t slot,
+__tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
struct hpts_diag *diag)
#endif
{
struct tcp_hpts_entry *hpts;
struct timeval tv;
- uint32_t wheel_cts, last_slot, need_new_to = 0;
+ uint32_t slot, wheel_cts, last_slot, need_new_to = 0;
int32_t wheel_slot, maxslots;
bool need_wakeup = false;
@@ -850,10 +849,12 @@ __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t slot,
MPASS(!(tp->t_in_hpts == IHPTS_ONQUEUE));
/*
+ * Convert microseconds to slots for internal use.
* We now return the next-slot the hpts will be on, beyond its
* current run (if up) or where it was when it stopped if it is
* sleeping.
*/
+ slot = HPTS_USEC_TO_SLOTS(usecs);
hpts = tcp_hpts_lock(pace, tp);
microuptime(&tv);
if (diag) {
@@ -929,7 +930,7 @@ __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t slot,
tp->t_hpts_slot = last_slot;
}
if (diag) {
- diag->slot_remaining = tp->t_hpts_request;
+ diag->time_remaining = tp->t_hpts_request;
diag->inp_hptsslot = tp->t_hpts_slot;
}
#ifdef INVARIANTS
diff --git a/sys/netinet/tcp_hpts.h b/sys/netinet/tcp_hpts.h
index 6183e6646133..8b69e6af35ed 100644
--- a/sys/netinet/tcp_hpts.h
+++ b/sys/netinet/tcp_hpts.h
@@ -28,8 +28,6 @@
/* Number of useconds represented by an hpts slot */
#define HPTS_USECS_PER_SLOT 10
-#define HPTS_MS_TO_SLOTS(x) ((x * 100) + 1)
-#define HPTS_USEC_TO_SLOTS(x) ((x+9) /10)
#define HPTS_USEC_IN_SEC 1000000
#define HPTS_MSEC_IN_SEC 1000
#define HPTS_USEC_IN_MSEC 1000
@@ -60,7 +58,7 @@ struct hpts_diag {
uint32_t p_runningslot; /* bbr->inflight */
uint32_t slot_req; /* bbr->flex3 x */
uint32_t inp_hptsslot; /* bbr->flex4 x */
- uint32_t slot_remaining; /* bbr->flex5 x */
+ uint32_t time_remaining; /* bbr->flex5 x */
uint32_t have_slept; /* bbr->epoch x */
uint32_t hpts_sleep_time; /* bbr->applimited x */
uint32_t yet_to_sleep; /* bbr->lt_epoch x */
@@ -130,15 +128,15 @@ tcp_in_hpts(struct tcpcb *tp)
* you should already have the INP_WLOCK().
*/
#ifdef INVARIANTS
-void __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t slot,
+void __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
int32_t line, struct hpts_diag *diag);
-#define tcp_hpts_insert(tp, slot, diag) \
- __tcp_hpts_insert(tcp_hptsi_pace, (tp), (slot), __LINE__, (diag))
+#define tcp_hpts_insert(tp, usecs, diag) \
+ __tcp_hpts_insert(tcp_hptsi_pace, (tp), (usecs), __LINE__, (diag))
#else
-void __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t slot,
+void __tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
struct hpts_diag *diag);
-#define tcp_hpts_insert(tp, slot, diag) \
- __tcp_hpts_insert(tcp_hptsi_pace, (tp), (slot), (diag))
+#define tcp_hpts_insert(tp, usecs, diag) \
+ __tcp_hpts_insert(tcp_hptsi_pace, (tp), (usecs), (diag))
#endif
void __tcp_set_hpts(struct tcp_hptsi *pace, struct tcpcb *tp);
diff --git a/sys/netinet/tcp_hpts_test.c b/sys/netinet/tcp_hpts_test.c
index fb2d9c0c7024..9217c3205587 100644
--- a/sys/netinet/tcp_hpts_test.c
+++ b/sys/netinet/tcp_hpts_test.c
@@ -57,11 +57,11 @@
#define tcp_hpts_init(pace, tp) __tcp_hpts_init((pace), (tp))
#define tcp_hpts_remove(pace, tp) __tcp_hpts_remove((pace), (tp))
#ifdef INVARIANTS
-#define tcp_hpts_insert(pace, tp, slot, diag) \
- __tcp_hpts_insert((pace), (tp), (slot), __LINE__, (diag))
+#define tcp_hpts_insert(pace, tp, usecs, diag) \
+ __tcp_hpts_insert((pace), (tp), (usecs), __LINE__, (diag))
#else
-#define tcp_hpts_insert(pace, tp, slot, diag) \
- __tcp_hpts_insert((pace), (tp), (slot), (diag))
+#define tcp_hpts_insert(pace, tp, usecs, diag) \
+ __tcp_hpts_insert((pace), (tp), (usecs), (diag))
#endif
#define tcp_set_hpts(pace, tp) __tcp_set_hpts((pace), (tp))
@@ -607,7 +607,7 @@ KTEST_FUNC(tcpcb_insertion)
struct tcp_hptsi *pace;
struct tcpcb *tp;
struct tcp_hpts_entry *hpts;
- uint32_t slot = 10;
+ uint32_t timeout_usecs = 10;
test_hpts_init();
@@ -623,7 +623,7 @@ KTEST_FUNC(tcpcb_insertion)
INP_WLOCK(&tp->t_inpcb);
tp->t_flags2 |= TF2_HPTS_CALLS;
KTEST_EQUAL(call_counts[CCNT_SWI_SCHED], 0);
- tcp_hpts_insert(pace, tp, slot, NULL);
+ tcp_hpts_insert(pace, tp, timeout_usecs, NULL);
KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
INP_WUNLOCK(&tp->t_inpcb);
KTEST_EQUAL(call_counts[CCNT_TCP_OUTPUT], 0);
@@ -635,7 +635,7 @@ KTEST_FUNC(tcpcb_insertion)
hpts = pace->rp_ent[tp->t_hpts_cpu];
KTEST_EQUAL(hpts->p_on_queue_cnt, 1);
KTEST_EQUAL(tp->t_hpts_request, 0);
- KTEST_EQUAL(tp->t_hpts_slot, slot);
+ KTEST_EQUAL(tp->t_hpts_slot, HPTS_USEC_TO_SLOTS(timeout_usecs));
//KTEST_EQUAL(tp->t_hpts_gencnt, 1);
INP_WLOCK(&tp->t_inpcb);
@@ -686,7 +686,7 @@ KTEST_FUNC(timer_functionality)
KTEST_LOG(ctx, "=> tcp_hpts_insert(%p)", tp);
INP_WLOCK(&tp->t_inpcb);
tp->t_flags2 |= TF2_HPTS_CALLS; /* Mark as needing HPTS processing */
- tcp_hpts_insert(pace, tp, HPTS_USEC_TO_SLOTS(500), NULL);
+ tcp_hpts_insert(pace, tp, 500, NULL);
INP_WUNLOCK(&tp->t_inpcb);
dump_tcpcb(tp);
@@ -814,7 +814,7 @@ KTEST_FUNC(scalability_tcpcbs)
INP_WLOCK(&tcpcbs[i]->t_inpcb);
tcpcbs[i]->t_flags2 |= TF2_HPTS_CALLS;
/* Insert with varying future timeouts to distribute across slots */
- tcp_hpts_insert(pace, tcpcbs[i], HPTS_USEC_TO_SLOTS(100 + (i % 1000)), NULL);
+ tcp_hpts_insert(pace, tcpcbs[i], 100 + (i % 1000), NULL);
INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
}
@@ -864,7 +864,7 @@ KTEST_FUNC(wheel_wrap_recovery)
struct epoch_tracker et;
struct tcp_hptsi *pace;
struct tcpcb **tcpcbs;
- uint32_t i, slot, num_tcpcbs = 500;
+ uint32_t i, timeout_usecs, num_tcpcbs = 500;
int32_t slots_ran;
test_hpts_init();
@@ -883,11 +883,11 @@ KTEST_FUNC(wheel_wrap_recovery)
KTEST_NEQUAL(tcpcbs[i], NULL);
TP_REMOVE_FROM_HPTS(tcpcbs[i]) = 1;
- slot = (i * NUM_OF_HPTSI_SLOTS) / num_tcpcbs; /* Spread across slots */
+ timeout_usecs = ((i * NUM_OF_HPTSI_SLOTS) / num_tcpcbs) * HPTS_USECS_PER_SLOT; /* Spread across slots */
INP_WLOCK(&tcpcbs[i]->t_inpcb);
tcpcbs[i]->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tcpcbs[i], slot, NULL);
+ tcp_hpts_insert(pace, tcpcbs[i], timeout_usecs, NULL);
INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
}
@@ -957,12 +957,12 @@ KTEST_FUNC(tcpcb_moving_state)
/* Insert both into the same slot */
INP_WLOCK(&tp1->t_inpcb);
tp1->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp1, HPTS_USEC_TO_SLOTS(100), NULL);
+ tcp_hpts_insert(pace, tp1, 100, NULL);
INP_WUNLOCK(&tp1->t_inpcb);
INP_WLOCK(&tp2->t_inpcb);
tp2->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp2, HPTS_USEC_TO_SLOTS(100), NULL);
+ tcp_hpts_insert(pace, tp2, 100, NULL);
INP_WUNLOCK(&tp2->t_inpcb);
hpts = pace->rp_ent[0];
@@ -1008,8 +1008,8 @@ KTEST_FUNC(deferred_requests)
struct tcp_hptsi *pace;
struct tcpcb *tp, *tp2;
struct tcp_hpts_entry *hpts;
- uint32_t large_slot = NUM_OF_HPTSI_SLOTS + 5000; /* Beyond wheel capacity */
- uint32_t huge_slot = NUM_OF_HPTSI_SLOTS * 3; /* 3x wheel capacity */
+ uint32_t large_timeout_usecs = (NUM_OF_HPTSI_SLOTS + 5000) * HPTS_USECS_PER_SLOT; /* Beyond wheel capacity */
+ uint32_t huge_timeout_usecs = (NUM_OF_HPTSI_SLOTS * 3) * HPTS_USECS_PER_SLOT; /* 3x wheel capacity */
uint32_t initial_request;
int32_t slots_ran;
@@ -1025,7 +1025,7 @@ KTEST_FUNC(deferred_requests)
/* Insert with a request that exceeds current wheel capacity */
INP_WLOCK(&tp->t_inpcb);
tp->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp, large_slot, NULL);
+ tcp_hpts_insert(pace, tp, large_timeout_usecs, NULL);
INP_WUNLOCK(&tp->t_inpcb);
/* Verify it was inserted with a deferred request */
@@ -1061,7 +1061,7 @@ KTEST_FUNC(deferred_requests)
INP_WLOCK(&tp2->t_inpcb);
tp2->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp2, huge_slot, NULL);
+ tcp_hpts_insert(pace, tp2, huge_timeout_usecs, NULL);
INP_WUNLOCK(&tp2->t_inpcb);
/* Verify initial deferred request */
@@ -1081,7 +1081,7 @@ KTEST_FUNC(deferred_requests)
KTEST_VERIFY(tp2->t_hpts_request > 0);
KTEST_EQUAL(tp2->t_in_hpts, IHPTS_ONQUEUE); /* Still queued */
- /* For huge_slot = NUM_OF_HPTSI_SLOTS * 3, we need ~3 cycles to complete.
+ /* For huge_timeout_usecs = NUM_OF_HPTSI_SLOTS * 3 * HPTS_USECS_PER_SLOT, we need ~3 cycles to complete.
* Each cycle can reduce the request by at most NUM_OF_HPTSI_SLOTS. */
test_time_usec += NUM_OF_HPTSI_SLOTS * HPTS_USECS_PER_SLOT;
HPTS_LOCK(hpts);
@@ -1200,7 +1200,7 @@ KTEST_FUNC(slot_boundary_conditions)
KTEST_NEQUAL(tp, NULL);
INP_WLOCK(&tp->t_inpcb);
tp->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp, 0, NULL); /* Should insert in next available slot */
+ tcp_hpts_insert(pace, tp, 0, NULL); /* Should insert immediately (0 timeout) */
INP_WUNLOCK(&tp->t_inpcb);
KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
KTEST_VERIFY(tp->t_hpts_slot < NUM_OF_HPTSI_SLOTS);
@@ -1212,7 +1212,7 @@ KTEST_FUNC(slot_boundary_conditions)
/* Test insertion at maximum slot value */
INP_WLOCK(&tp->t_inpcb);
tp->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp, NUM_OF_HPTSI_SLOTS - 1, NULL);
+ tcp_hpts_insert(pace, tp, (NUM_OF_HPTSI_SLOTS - 1) * HPTS_USECS_PER_SLOT, NULL);
INP_WUNLOCK(&tp->t_inpcb);
KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
@@ -1220,13 +1220,13 @@ KTEST_FUNC(slot_boundary_conditions)
tcp_hpts_remove(pace, tp);
INP_WUNLOCK(&tp->t_inpcb);
- /* Test very small slot values */
+ /* Test very small timeout values */
INP_WLOCK(&tp->t_inpcb);
tp->t_flags2 |= TF2_HPTS_CALLS;
tcp_hpts_insert(pace, tp, 1, NULL);
INP_WUNLOCK(&tp->t_inpcb);
KTEST_EQUAL(tp->t_in_hpts, IHPTS_ONQUEUE);
- KTEST_EQUAL(tp->t_hpts_slot, 1); /* Should be exactly slot 1 */
+ KTEST_EQUAL(tp->t_hpts_slot, HPTS_USEC_TO_SLOTS(1)); /* Should convert 1 usec to slot */
INP_WLOCK(&tp->t_inpcb);
tcp_hpts_remove(pace, tp);
@@ -1269,7 +1269,7 @@ KTEST_FUNC(dynamic_sleep_adjustment)
INP_WLOCK(&tcpcbs[i]->t_inpcb);
tcpcbs[i]->t_flags2 |= TF2_HPTS_CALLS;
TP_REMOVE_FROM_HPTS(tcpcbs[i]) = 1; /* Will be removed after output */
- tcp_hpts_insert(pace, tcpcbs[i], HPTS_USEC_TO_SLOTS(100), NULL);
+ tcp_hpts_insert(pace, tcpcbs[i], 100, NULL);
INP_WUNLOCK(&tcpcbs[i]->t_inpcb);
}
@@ -1333,13 +1333,13 @@ KTEST_FUNC(concurrent_operations)
/* Insert tp1 */
INP_WLOCK(&tp1->t_inpcb);
tp1->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp1, HPTS_USEC_TO_SLOTS(100), NULL);
+ tcp_hpts_insert(pace, tp1, 100, NULL);
INP_WUNLOCK(&tp1->t_inpcb);
/* Insert tp2 into same slot */
INP_WLOCK(&tp2->t_inpcb);
tp2->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp2, HPTS_USEC_TO_SLOTS(100), NULL);
+ tcp_hpts_insert(pace, tp2, 100, NULL);
INP_WUNLOCK(&tp2->t_inpcb);
/* Verify both are inserted */
@@ -1415,7 +1415,7 @@ KTEST_FUNC(queued_segments_processing)
STAILQ_INSERT_TAIL(&tp->t_inqueue, fake_mbuf, m_stailqpkt);
INP_WLOCK(&tp->t_inpcb);
- tcp_hpts_insert(pace, tp, HPTS_USEC_TO_SLOTS(100), NULL);
+ tcp_hpts_insert(pace, tp, 100, NULL);
INP_WUNLOCK(&tp->t_inpcb);
hpts = pace->rp_ent[tp->t_hpts_cpu];
@@ -1549,6 +1549,7 @@ KTEST_FUNC(generation_count_validation)
struct tcp_hpts_entry *hpts;
struct tcpcb *tp1, *tp2;
uint32_t initial_gencnt, slot_to_test = 10;
+ uint32_t timeout_usecs = slot_to_test * HPTS_USECS_PER_SLOT;
uint32_t tp2_original_gencnt;
int32_t slots_ran;
@@ -1570,7 +1571,7 @@ KTEST_FUNC(generation_count_validation)
INP_WLOCK(&tp1->t_inpcb);
tp1->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp1, slot_to_test, NULL);
+ tcp_hpts_insert(pace, tp1, timeout_usecs, NULL);
INP_WUNLOCK(&tp1->t_inpcb);
/* Verify connection stored the generation count */
@@ -1604,7 +1605,7 @@ KTEST_FUNC(generation_count_validation)
/* Insert second connection and record its generation count */
INP_WLOCK(&tp2->t_inpcb);
tp2->t_flags2 |= TF2_HPTS_CALLS;
- tcp_hpts_insert(pace, tp2, slot_to_test, NULL);
+ tcp_hpts_insert(pace, tp2, timeout_usecs, NULL);
INP_WUNLOCK(&tp2->t_inpcb);
/* Verify connection was inserted successfully */
diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c
index d035766d895d..66983edcdd73 100644
--- a/sys/netinet/tcp_stacks/bbr.c
+++ b/sys/netinet/tcp_stacks/bbr.c
@@ -480,7 +480,7 @@ bbr_find_lowest_rsm(struct tcp_bbr *bbr);
static __inline uint32_t
bbr_get_rtt(struct tcp_bbr *bbr, int32_t rtt_type);
static void
-bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot,
+bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t pacing_delay,
uint8_t which);
static void
bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts,
@@ -489,7 +489,7 @@ bbr_log_timer_var(struct tcp_bbr *bbr, int mode, uint32_t cts,
static void
bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag);
static void
-bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot,
+bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t pacing_delay,
uint32_t del_by, uint32_t cts, uint32_t sloton,
uint32_t prev_delay);
static void
@@ -724,7 +724,7 @@ bbr_minseg(struct tcp_bbr *bbr)
}
static void
-bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t slot, uint32_t tot_len)
+bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_t frm, int32_t pacing_delay, uint32_t tot_len)
{
struct inpcb *inp = tptoinpcb(tp);
struct hpts_diag diag;
@@ -751,40 +751,40 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
bbr->r_ctl.rc_timer_exp = 0;
prev_delay = bbr->r_ctl.rc_last_delay_val;
if (bbr->r_ctl.rc_last_delay_val &&
- (slot == 0)) {
+ (pacing_delay == 0)) {
/*
* If a previous pacer delay was in place we
* are not coming from the output side (where
* we calculate a delay, more likely a timer).
*/
- slot = bbr->r_ctl.rc_last_delay_val;
+ pacing_delay = bbr->r_ctl.rc_last_delay_val;
if (TSTMP_GT(cts, bbr->rc_pacer_started)) {
/* Compensate for time passed */
delay_calc = cts - bbr->rc_pacer_started;
- if (delay_calc <= slot)
- slot -= delay_calc;
+ if (delay_calc <= pacing_delay)
+ pacing_delay -= delay_calc;
}
}
/* Do we have early to make up for by pushing out the pacing time? */
if (bbr->r_agg_early_set) {
- bbr_log_pacing_delay_calc(bbr, 0, bbr->r_ctl.rc_agg_early, cts, slot, 0, bbr->r_agg_early_set, 2);
- slot += bbr->r_ctl.rc_agg_early;
+ bbr_log_pacing_delay_calc(bbr, 0, bbr->r_ctl.rc_agg_early, cts, pacing_delay, 0, bbr->r_agg_early_set, 2);
+ pacing_delay += bbr->r_ctl.rc_agg_early;
bbr->r_ctl.rc_agg_early = 0;
bbr->r_agg_early_set = 0;
}
/* Are we running a total debt that needs to be compensated for? */
if (bbr->r_ctl.rc_hptsi_agg_delay) {
- if (slot > bbr->r_ctl.rc_hptsi_agg_delay) {
+ if (pacing_delay > bbr->r_ctl.rc_hptsi_agg_delay) {
/* We nuke the delay */
- slot -= bbr->r_ctl.rc_hptsi_agg_delay;
+ pacing_delay -= bbr->r_ctl.rc_hptsi_agg_delay;
bbr->r_ctl.rc_hptsi_agg_delay = 0;
} else {
/* We nuke some of the delay, put in a minimal 100usecs */
- bbr->r_ctl.rc_hptsi_agg_delay -= slot;
- bbr->r_ctl.rc_last_delay_val = slot = 100;
+ bbr->r_ctl.rc_hptsi_agg_delay -= pacing_delay;
+ bbr->r_ctl.rc_last_delay_val = pacing_delay = 100;
}
}
- bbr->r_ctl.rc_last_delay_val = slot;
+ bbr->r_ctl.rc_last_delay_val = pacing_delay;
hpts_timeout = bbr_timer_start(tp, bbr, cts);
if (tp->t_flags & TF_DELACK) {
if (bbr->rc_in_persist == 0) {
@@ -810,7 +810,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
bbr->r_ctl.rc_hpts_flags = PACE_TMR_DELACK;
hpts_timeout = delayed_ack;
}
- if (slot) {
+ if (pacing_delay) {
/* Mark that we have a pacing timer up */
BBR_STAT_INC(bbr_paced_segments);
bbr->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
@@ -820,7 +820,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
* wheel, we resort to a keep-alive timer if its configured.
*/
if ((hpts_timeout == 0) &&
- (slot == 0)) {
+ (pacing_delay == 0)) {
if ((V_tcp_always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
(tp->t_state <= TCPS_CLOSING)) {
/*
@@ -849,7 +849,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
if (left < hpts_timeout)
hpts_timeout = left;
}
- if (bbr->r_ctl.rc_incr_tmrs && slot &&
+ if (bbr->r_ctl.rc_incr_tmrs && pacing_delay &&
(bbr->r_ctl.rc_hpts_flags & (PACE_TMR_TLP|PACE_TMR_RXT))) {
/*
* If configured to do so, and the timer is either
@@ -867,7 +867,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
* this extra delay but this is easier and being more
* conservative is probably better.
*/
- hpts_timeout += slot;
+ hpts_timeout += pacing_delay;
}
if (hpts_timeout) {
/*
@@ -879,10 +879,10 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
bbr->r_ctl.rc_timer_exp = cts + hpts_timeout;
} else
bbr->r_ctl.rc_timer_exp = 0;
- if ((slot) &&
+ if ((pacing_delay) &&
(bbr->rc_use_google ||
bbr->output_error_seen ||
- (slot <= hpts_timeout)) ) {
+ (pacing_delay <= hpts_timeout)) ) {
/*
* Tell LRO that it can queue packets while
* we pace.
@@ -900,15 +900,15 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
tp->t_flags2 &= ~TF2_DONT_SACK_QUEUE;
bbr->rc_pacer_started = cts;
- tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(slot), &diag);
+ tcp_hpts_insert(tp, pacing_delay, &diag);
bbr->rc_timer_first = 0;
bbr->bbr_timer_src = frm;
- bbr_log_to_start(bbr, cts, hpts_timeout, slot, 1);
+ bbr_log_to_start(bbr, cts, hpts_timeout, pacing_delay, 1);
bbr_log_hpts_diag(bbr, cts, &diag);
} else if (hpts_timeout) {
- tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(hpts_timeout), &diag);
+ tcp_hpts_insert(tp, hpts_timeout, &diag);
/*
- * We add the flag here as well if the slot is set,
+ * We add the flag here as well if the pacing delay is set,
* since hpts will call in to clear the queue first before
* calling the output routine (which does our timers).
* We don't want to set the flag if its just a timer
@@ -917,7 +917,7 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
* on a keep-alive timer and a request comes in for
* more data.
*/
- if (slot)
+ if (pacing_delay)
bbr->rc_pacer_started = cts;
if ((bbr->r_ctl.rc_hpts_flags & PACE_TMR_RACK) &&
(bbr->rc_cwnd_limited == 0)) {
@@ -934,12 +934,12 @@ bbr_start_hpts_timer(struct tcp_bbr *bbr, struct tcpcb *tp, uint32_t cts, int32_
TF2_DONT_SACK_QUEUE);
}
bbr->bbr_timer_src = frm;
- bbr_log_to_start(bbr, cts, hpts_timeout, slot, 0);
+ bbr_log_to_start(bbr, cts, hpts_timeout, pacing_delay, 0);
bbr_log_hpts_diag(bbr, cts, &diag);
bbr->rc_timer_first = 1;
}
bbr->rc_tmr_stopped = 0;
- bbr_log_type_bbrsnd(bbr, tot_len, slot, delay_calc, cts, frm, prev_delay);
+ bbr_log_type_bbrsnd(bbr, tot_len, pacing_delay, delay_calc, cts, frm, prev_delay);
}
static void
@@ -1031,8 +1031,8 @@ bbr_timer_audit(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, struct sock
}
/*
* Ok the timer originally started is not what we want now. We will
- * force the hpts to be stopped if any, and restart with the slot
- * set to what was in the saved slot.
+ * force the hpts to be stopped if any, and restart with the pacing
+ * delay set to what was in the saved delay.
*/
wrong_timer:
if ((bbr->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) == 0) {
@@ -2395,7 +2395,7 @@ bbr_log_hpts_diag(struct tcp_bbr *bbr, uint32_t cts, struct hpts_diag *diag)
log.u_bbr.flex2 = diag->p_cur_slot;
log.u_bbr.flex3 = diag->slot_req;
log.u_bbr.flex4 = diag->inp_hptsslot;
- log.u_bbr.flex5 = diag->slot_remaining;
+ log.u_bbr.flex5 = diag->time_remaining;
log.u_bbr.flex6 = diag->need_new_to;
log.u_bbr.flex7 = diag->p_hpts_active;
log.u_bbr.flex8 = diag->p_on_min_sleep;
@@ -2468,7 +2468,7 @@ bbr_log_pacing_delay_calc(struct tcp_bbr *bbr, uint16_t gain, uint32_t len,
}
static void
-bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
+bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which)
{
if (tcp_bblogging_on(bbr->rc_tp)) {
union tcp_log_stackspecific log;
@@ -2478,7 +2478,7 @@ bbr_log_to_start(struct tcp_bbr *bbr, uint32_t cts, uint32_t to, int32_t slot, u
log.u_bbr.flex1 = bbr->bbr_timer_src;
log.u_bbr.flex2 = to;
log.u_bbr.flex3 = bbr->r_ctl.rc_hpts_flags;
- log.u_bbr.flex4 = slot;
+ log.u_bbr.flex4 = pacing_delay;
log.u_bbr.flex5 = bbr->rc_tp->t_hpts_slot;
log.u_bbr.flex6 = TICKS_2_USEC(bbr->rc_tp->t_rxtcur);
log.u_bbr.pkts_out = bbr->rc_tp->t_flags2;
@@ -2728,13 +2728,13 @@ bbr_type_log_hdwr_pacing(struct tcp_bbr *bbr, const struct ifnet *ifp,
}
static void
-bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t slot, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay)
+bbr_log_type_bbrsnd(struct tcp_bbr *bbr, uint32_t len, uint32_t pacing_delay, uint32_t del_by, uint32_t cts, uint32_t line, uint32_t prev_delay)
{
if (tcp_bblogging_on(bbr->rc_tp)) {
union tcp_log_stackspecific log;
bbr_fill_in_logging_data(bbr, &log.u_bbr, cts);
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
log.u_bbr.flex2 = del_by;
log.u_bbr.flex3 = prev_delay;
log.u_bbr.flex4 = line;
@@ -5200,7 +5200,7 @@ bbr_process_timers(struct tcpcb *tp, struct tcp_bbr *bbr, uint32_t cts, uint8_t
left = bbr->r_ctl.rc_timer_exp - cts;
ret = -3;
bbr_log_to_processing(bbr, cts, ret, left, hpts_calling);
- tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(left), NULL);
+ tcp_hpts_insert(tp, left, NULL);
return (1);
}
bbr->rc_tmr_stopped = 0;
@@ -5249,7 +5249,7 @@ bbr_timer_cancel(struct tcp_bbr *bbr, int32_t line, uint32_t cts)
else
time_since_send = 0;
if (bbr->r_ctl.rc_last_delay_val > time_since_send) {
- /* Cut down our slot time */
+ /* Cut down our pacing_delay time */
bbr->r_ctl.rc_last_delay_val -= time_since_send;
} else {
bbr->r_ctl.rc_last_delay_val = 0;
@@ -5883,7 +5883,7 @@ bbr_log_output(struct tcp_bbr *bbr, struct tcpcb *tp, struct tcpopt *to, int32_t
* sequence 1 for 10 bytes. In such an example the r_start would be
* 1 (starting sequence) but the r_end would be r_start+len i.e. 11.
* This means that r_end is actually the first sequence for the next
- * slot (11).
+ * pacing delay (11).
*
*/
INP_WLOCK_ASSERT(tptoinpcb(tp));
@@ -11851,7 +11851,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
struct bbr_sendmap *rsm = NULL;
int32_t tso, mtu;
struct tcpopt to;
- int32_t slot = 0;
+ int32_t pacing_delay = 0;
struct inpcb *inp;
struct sockbuf *sb;
bool hpts_calling;
@@ -11981,8 +11981,7 @@ bbr_output_wtime(struct tcpcb *tp, const struct timeval *tv)
delay_calc -= bbr->r_ctl.rc_last_delay_val;
else {
/*
- * We are early setup to adjust
- * our slot time.
+ * We are early setup to adjust out pacing delay.
*/
uint64_t merged_val;
@@ -12099,7 +12098,7 @@ again:
#endif
error = 0;
tso = 0;
- slot = 0;
+ pacing_delay = 0;
mtu = 0;
sendwin = min(tp->snd_wnd, tp->snd_cwnd);
sb_offset = tp->snd_max - tp->snd_una;
@@ -12121,7 +12120,7 @@ recheck_resend:
tot_len = tp->t_maxseg;
if (hpts_calling)
/* Retry in a ms */
- slot = 1001;
+ pacing_delay = 1001;
goto just_return_nolock;
}
TAILQ_INSERT_TAIL(&bbr->r_ctl.rc_free, rsm, r_next);
@@ -12694,9 +12693,9 @@ just_return:
SOCK_SENDBUF_UNLOCK(so);
just_return_nolock:
if (tot_len)
- slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
+ pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
if (bbr->rc_no_pacing)
- slot = 0;
+ pacing_delay = 0;
if (tot_len == 0) {
if ((ctf_outstanding(tp) + min((bbr->r_ctl.rc_high_rwnd/2), bbr_minseg(bbr))) >=
tp->snd_wnd) {
@@ -12746,7 +12745,7 @@ just_return_nolock:
/* Dont update the time if we did not send */
bbr->r_ctl.rc_last_delay_val = 0;
bbr->rc_output_starts_timer = 1;
- bbr_start_hpts_timer(bbr, tp, cts, 9, slot, tot_len);
+ bbr_start_hpts_timer(bbr, tp, cts, 9, pacing_delay, tot_len);
bbr_log_type_just_return(bbr, cts, tot_len, hpts_calling, app_limited, p_maxseg, len);
if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
/* Make sure snd_nxt is drug up */
@@ -12782,7 +12781,7 @@ send:
flags &= ~TH_FIN;
if ((len == 0) && ((tp->t_flags & TF_ACKNOW) == 0)) {
/* Lets not send this */
- slot = 0;
+ pacing_delay = 0;
goto just_return;
}
}
@@ -13048,7 +13047,7 @@ send:
/*
* We have outstanding data, don't send a fin by itself!.
*/
- slot = 0;
+ pacing_delay = 0;
goto just_return;
}
/*
@@ -13758,7 +13757,7 @@ nomore:
if (tp->snd_cwnd < maxseg)
tp->snd_cwnd = maxseg;
}
- slot = (bbr_error_base_paceout + 1) << bbr->oerror_cnt;
+ pacing_delay = (bbr_error_base_paceout + 1) << bbr->oerror_cnt;
BBR_STAT_INC(bbr_saw_enobuf);
if (bbr->bbr_hdrw_pacing)
counter_u64_add(bbr_hdwr_pacing_enobuf, 1);
@@ -13807,18 +13806,18 @@ nomore:
}
/*
* Nuke all other things that can interfere
- * with slot
+ * with pacing delay
*/
if ((tot_len + len) && (len >= tp->t_maxseg)) {
- slot = bbr_get_pacing_delay(bbr,
+ pacing_delay = bbr_get_pacing_delay(bbr,
bbr->r_ctl.rc_bbr_hptsi_gain,
(tot_len + len), cts, 0);
- if (slot < bbr_error_base_paceout)
- slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
+ if (pacing_delay < bbr_error_base_paceout)
+ pacing_delay = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
} else
- slot = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
+ pacing_delay = (bbr_error_base_paceout + 2) << bbr->oerror_cnt;
bbr->rc_output_starts_timer = 1;
- bbr_start_hpts_timer(bbr, tp, cts, 10, slot,
+ bbr_start_hpts_timer(bbr, tp, cts, 10, pacing_delay,
tot_len);
return (error);
}
@@ -13836,9 +13835,9 @@ nomore:
}
/* FALLTHROUGH */
default:
- slot = (bbr_error_base_paceout + 3) << bbr->oerror_cnt;
+ pacing_delay = (bbr_error_base_paceout + 3) << bbr->oerror_cnt;
bbr->rc_output_starts_timer = 1;
- bbr_start_hpts_timer(bbr, tp, cts, 11, slot, 0);
+ bbr_start_hpts_timer(bbr, tp, cts, 11, pacing_delay, 0);
return (error);
}
#ifdef STATS
@@ -13976,12 +13975,12 @@ skip_again:
tcp_log_end_status(tp, TCP_EI_STATUS_SERVER_RST);
if (((flags & (TH_RST | TH_SYN | TH_FIN)) == 0) && tot_len) {
/*
- * Calculate/Re-Calculate the hptsi slot in usecs based on
+ * Calculate/Re-Calculate the hptsi timeout in usecs based on
* what we have sent so far
*/
- slot = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
+ pacing_delay = bbr_get_pacing_delay(bbr, bbr->r_ctl.rc_bbr_hptsi_gain, tot_len, cts, 0);
if (bbr->rc_no_pacing)
- slot = 0;
+ pacing_delay = 0;
}
tp->t_flags &= ~(TF_ACKNOW | TF_DELACK);
enobufs:
@@ -13994,8 +13993,8 @@ enobufs:
(more_to_rxt ||
((bbr->r_ctl.rc_resend = bbr_check_recovery_mode(tp, bbr, cts)) != NULL))) {
/* Rack cheats and shotguns out all rxt's 1ms apart */
- if (slot > 1000)
- slot = 1000;
+ if (pacing_delay > 1000)
+ pacing_delay = 1000;
}
if (bbr->bbr_hdrw_pacing && (bbr->hw_pacing_set == 0)) {
/*
@@ -14009,7 +14008,7 @@ enobufs:
tcp_bbr_tso_size_check(bbr, cts);
}
}
- bbr_start_hpts_timer(bbr, tp, cts, 12, slot, tot_len);
+ bbr_start_hpts_timer(bbr, tp, cts, 12, pacing_delay, tot_len);
if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
/* Make sure snd_nxt is drug up */
tp->snd_nxt = tp->snd_max;
@@ -14127,7 +14126,7 @@ bbr_switch_failed(struct tcpcb *tp)
}
} else
toval = HPTS_USECS_PER_SLOT;
- tcp_hpts_insert(tp, HPTS_USEC_TO_SLOTS(toval), &diag);
+ tcp_hpts_insert(tp, toval, &diag);
bbr_log_hpts_diag(bbr, cts, &diag);
}
diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c
index cff07b4932d9..c7962b57a69e 100644
--- a/sys/netinet/tcp_stacks/rack.c
+++ b/sys/netinet/tcp_stacks/rack.c
@@ -250,11 +250,11 @@ static int32_t rack_non_rxt_use_cr = 0; /* does a non-rxt in recovery use the co
static int32_t rack_persist_min = 250000; /* 250usec */
static int32_t rack_persist_max = 2000000; /* 2 Second in usec's */
static int32_t rack_honors_hpts_min_to = 1; /* Do we honor the hpts minimum time out for pacing timers */
-static uint32_t rack_max_reduce = 10; /* Percent we can reduce slot by */
+static uint32_t rack_max_reduce = 10; /* Percent we can reduce pacing delay by */
static int32_t rack_sack_not_required = 1; /* set to one to allow non-sack to use rack */
static int32_t rack_limit_time_with_srtt = 0;
static int32_t rack_autosndbuf_inc = 20; /* In percentage form */
-static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost slot using time_between */
+static int32_t rack_enobuf_hw_boost_mult = 0; /* How many times the hw rate we boost pacing delay using time_between */
static int32_t rack_enobuf_hw_max = 12000; /* 12 ms in usecs */
static int32_t rack_enobuf_hw_min = 10000; /* 10 ms in usecs */
static int32_t rack_hw_rwnd_factor = 2; /* How many max_segs the rwnd must be before we hold off sending */
@@ -278,7 +278,7 @@ static int32_t rack_hptsi_segments = 40;
static int32_t rack_rate_sample_method = USE_RTT_LOW;
static int32_t rack_pace_every_seg = 0;
static int32_t rack_delayed_ack_time = 40000; /* 40ms in usecs */
-static int32_t rack_slot_reduction = 4;
+static int32_t rack_pacing_delay_reduction = 4;
static int32_t rack_wma_divisor = 8; /* For WMA calculation */
static int32_t rack_cwnd_block_ends_measure = 0;
static int32_t rack_rwnd_block_ends_measure = 0;
@@ -478,7 +478,7 @@ rack_log_alt_to_to_cancel(struct tcp_rack *rack,
uint16_t flex7, uint8_t mod);
static void
-rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t slot,
+rack_log_pacing_delay_calc(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay,
uint64_t bw_est, uint64_t bw, uint64_t len_time, int method, int line,
struct rack_sendmap *rsm, uint8_t quality);
static struct rack_sendmap *
@@ -1107,7 +1107,7 @@ rack_init_sysctls(void)
SYSCTL_ADD_S32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_pacing),
OID_AUTO, "burst_reduces", CTLFLAG_RW,
- &rack_slot_reduction, 4,
+ &rack_pacing_delay_reduction, 4,
"When doing only burst mitigation what is the reduce divisor");
SYSCTL_ADD_S32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_sysctl_root),
@@ -1399,7 +1399,7 @@ rack_init_sysctls(void)
SYSCTL_CHILDREN(rack_timers),
OID_AUTO, "hpts_max_reduce", CTLFLAG_RW,
&rack_max_reduce, 10,
- "Max percentage we will reduce slot by for pacing when we are behind");
+ "Max percentage we will reduce pacing delay by for pacing when we are behind");
SYSCTL_ADD_U32(&rack_sysctl_ctx,
SYSCTL_CHILDREN(rack_timers),
OID_AUTO, "persmin", CTLFLAG_RW,
@@ -2700,7 +2700,7 @@ rack_log_retran_reason(struct tcp_rack *rack, struct rack_sendmap *rsm, uint32_t
}
static void
-rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot, uint8_t which)
+rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t pacing_delay, uint8_t which)
{
if (tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
@@ -2710,7 +2710,7 @@ rack_log_to_start(struct tcp_rack *rack, uint32_t cts, uint32_t to, int32_t slot
log.u_bbr.flex1 = rack->rc_tp->t_srtt;
log.u_bbr.flex2 = to;
log.u_bbr.flex3 = rack->r_ctl.rc_hpts_flags;
- log.u_bbr.flex4 = slot;
+ log.u_bbr.flex4 = pacing_delay;
log.u_bbr.flex5 = rack->rc_tp->t_hpts_slot;
log.u_bbr.flex6 = rack->rc_tp->t_rxtcur;
log.u_bbr.flex7 = rack->rc_in_persist;
@@ -3034,14 +3034,14 @@ rack_log_progress_event(struct tcp_rack *rack, struct tcpcb *tp, uint32_t tick,
}
static void
-rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t slot, uint32_t cts, struct timeval *tv, int line)
+rack_log_type_bbrsnd(struct tcp_rack *rack, uint32_t len, uint32_t pacing_delay, uint32_t cts, struct timeval *tv, int line)
{
if (rack_verbose_logging && tcp_bblogging_on(rack->rc_tp)) {
union tcp_log_stackspecific log;
memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
if (rack->rack_no_prr)
log.u_bbr.flex2 = 0;
else
@@ -3139,7 +3139,7 @@ rack_log_type_pacing_sizes(struct tcpcb *tp, struct tcp_rack *rack, uint32_t arg
}
static void
-rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot,
+rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t pacing_delay,
uint8_t hpts_calling, int reason, uint32_t cwnd_to_use)
{
if (tcp_bblogging_on(rack->rc_tp)) {
@@ -3148,7 +3148,7 @@ rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, ui
memset(&log, 0, sizeof(log));
log.u_bbr.inhpts = tcp_in_hpts(rack->rc_tp);
- log.u_bbr.flex1 = slot;
+ log.u_bbr.flex1 = pacing_delay;
log.u_bbr.flex2 = rack->r_ctl.rc_hpts_flags;
log.u_bbr.flex4 = reason;
if (rack->rack_no_prr)
@@ -6482,7 +6482,7 @@ rack_log_hpts_diag(struct tcp_rack *rack, uint32_t cts,
log.u_bbr.flex2 = diag->p_cur_slot;
log.u_bbr.flex3 = diag->slot_req;
log.u_bbr.flex4 = diag->inp_hptsslot;
- log.u_bbr.flex5 = diag->slot_remaining;
+ log.u_bbr.flex5 = diag->time_remaining;
log.u_bbr.flex6 = diag->need_new_to;
log.u_bbr.flex7 = diag->p_hpts_active;
log.u_bbr.flex8 = diag->p_on_min_sleep;
@@ -6529,14 +6529,14 @@ rack_log_wakeup(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb, uin
static void
rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
- int32_t slot, uint32_t tot_len_this_send, int sup_rack)
+ int32_t usecs, uint32_t tot_len_this_send, int sup_rack)
{
struct hpts_diag diag;
struct inpcb *inp = tptoinpcb(tp);
struct timeval tv;
uint32_t delayed_ack = 0;
uint32_t hpts_timeout;
- uint32_t entry_slot = slot;
+ uint32_t entry_usecs = usecs;
uint8_t stopped;
uint32_t left = 0;
uint32_t us_cts;
@@ -6557,7 +6557,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
rack->r_ctl.rc_hpts_flags = 0;
us_cts = tcp_get_usecs(&tv);
/* Now early/late accounting */
- rack_log_pacing_delay_calc(rack, entry_slot, slot, 0, 0, 0, 26, __LINE__, NULL, 0);
+ rack_log_pacing_delay_calc(rack, entry_usecs, usecs, 0, 0, 0, 26, __LINE__, NULL, 0);
if (rack->r_early && (rack->rc_ack_can_sendout_data == 0)) {
/*
* We have a early carry over set,
@@ -6568,7 +6568,7 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* penalize the next timer for being awoke
* by an ack aka the rc_agg_early (non-paced mode).
*/
- slot += rack->r_ctl.rc_agg_early;
+ usecs += rack->r_ctl.rc_agg_early;
rack->r_early = 0;
rack->r_ctl.rc_agg_early = 0;
}
@@ -6580,29 +6580,29 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
* really depends on what
* the current pacing time is.
*/
- if (rack->r_ctl.rc_agg_delayed >= slot) {
+ if (rack->r_ctl.rc_agg_delayed >= usecs) {
/*
* We can't compensate for it all.
* And we have to have some time
* on the clock. We always have a min
- * 10 slots (10 x 10 i.e. 100 usecs).
+ * 10 HPTS timer units (10 x 10 i.e. 100 usecs).
*/
- if (slot <= HPTS_USECS_PER_SLOT) {
+ if (usecs <= HPTS_USECS_PER_SLOT) {
/* We gain delay */
- rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - slot);
- slot = HPTS_USECS_PER_SLOT;
+ rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - usecs);
+ usecs = HPTS_USECS_PER_SLOT;
} else {
/* We take off some */
- rack->r_ctl.rc_agg_delayed -= (slot - HPTS_USECS_PER_SLOT);
- slot = HPTS_USECS_PER_SLOT;
+ rack->r_ctl.rc_agg_delayed -= (usecs - HPTS_USECS_PER_SLOT);
+ usecs = HPTS_USECS_PER_SLOT;
}
} else {
- slot -= rack->r_ctl.rc_agg_delayed;
+ usecs -= rack->r_ctl.rc_agg_delayed;
rack->r_ctl.rc_agg_delayed = 0;
/* Make sure we have 100 useconds at minimum */
- if (slot < HPTS_USECS_PER_SLOT) {
- rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - slot;
- slot = HPTS_USECS_PER_SLOT;
+ if (usecs < HPTS_USECS_PER_SLOT) {
+ rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - usecs;
+ usecs = HPTS_USECS_PER_SLOT;
}
if (rack->r_ctl.rc_agg_delayed == 0)
rack->r_late = 0;
*** 561 LINES SKIPPED ***