svn commit: r307901 - in head/sys/netinet: . cc tcp_stacks
Hiren Panchasara
hiren at FreeBSD.org
Tue Oct 25 05:45:50 UTC 2016
Author: hiren
Date: Tue Oct 25 05:45:47 2016
New Revision: 307901
URL: https://svnweb.freebsd.org/changeset/base/307901
Log:
FreeBSD tcp stack used to inform respective congestion control module about the
loss event but not use or obay the recommendations i.e. values set by it in some
cases.
Here is an attempt to solve that confusion by following relevant RFCs/drafts.
Stack only sets congestion window/slow start threshold values when there is no
CC module availalbe to take that action. All CC modules are inspected and
updated when needed to take appropriate action on loss.
tcp_stacks/fastpath module has been updated to adapt these changes.
Note: Probably, the most significant change would be to not bring congestion
window down to 1MSS on a loss signaled by 3-duplicate acks and letting
respective CC decide that value.
In collaboration with: Matt Macy <mmacy at nextbsd dot org>
Discussed on: transport@ mailing list
Reviewed by: jtl
MFC after: 1 month
Sponsored by: Limelight Networks
Differential Revision: https://reviews.freebsd.org/D8225
Modified:
head/sys/netinet/cc/cc_cdg.c
head/sys/netinet/cc/cc_chd.c
head/sys/netinet/cc/cc_cubic.c
head/sys/netinet/cc/cc_dctcp.c
head/sys/netinet/cc/cc_htcp.c
head/sys/netinet/cc/cc_newreno.c
head/sys/netinet/tcp_input.c
head/sys/netinet/tcp_stacks/fastpath.c
Modified: head/sys/netinet/cc/cc_cdg.c
==============================================================================
--- head/sys/netinet/cc/cc_cdg.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/cc/cc_cdg.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -431,6 +431,11 @@ static void
cdg_cong_signal(struct cc_var *ccv, uint32_t signal_type)
{
struct cdg *cdg_data = ccv->cc_data;
+ uint32_t cwin;
+ u_int mss;
+
+ cwin = CCV(ccv, snd_cwnd);
+ mss = CCV(ccv, t_maxseg);
switch(signal_type) {
case CC_CDG_DELAY:
@@ -448,7 +453,7 @@ cdg_cong_signal(struct cc_var *ccv, uint
*/
if (IN_CONGRECOVERY(CCV(ccv, t_flags)) ||
cdg_data->queue_state < CDG_Q_FULL) {
- CCV(ccv, snd_ssthresh) = CCV(ccv, snd_cwnd);
+ CCV(ccv, snd_ssthresh) = cwin;
CCV(ccv, snd_recover) = CCV(ccv, snd_max);
} else {
/*
@@ -461,13 +466,17 @@ cdg_cong_signal(struct cc_var *ccv, uint
cdg_data->shadow_w, RENO_BETA);
CCV(ccv, snd_ssthresh) = max(cdg_data->shadow_w,
- cdg_window_decrease(ccv, CCV(ccv, snd_cwnd),
- V_cdg_beta_loss));
+ cdg_window_decrease(ccv, cwin, V_cdg_beta_loss));
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
cdg_data->window_incr = cdg_data->rtt_count = 0;
}
ENTER_RECOVERY(CCV(ccv, t_flags));
break;
+ case CC_RTO:
+ CCV(ccv, snd_ssthresh) = max(2*mss, cwin/2);
+ CCV(ccv, snd_cwnd) = mss;
+ break;
default:
newreno_cc_algo.cong_signal(ccv, signal_type);
break;
Modified: head/sys/netinet/cc/cc_chd.c
==============================================================================
--- head/sys/netinet/cc/cc_chd.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/cc/cc_chd.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -330,10 +330,14 @@ chd_cong_signal(struct cc_var *ccv, uint
struct ertt *e_t;
struct chd *chd_data;
int qdly;
+ uint32_t cwin;
+ u_int mss;
e_t = khelp_get_osd(CCV(ccv, osd), ertt_id);
chd_data = ccv->cc_data;
qdly = imax(e_t->rtt, chd_data->maxrtt_in_rtt) - e_t->minrtt;
+ cwin = CCV(ccv, snd_cwnd);
+ mss = CCV(ccv, t_maxseg);
switch(signal_type) {
case CC_CHD_DELAY:
@@ -373,6 +377,10 @@ chd_cong_signal(struct cc_var *ccv, uint
}
ENTER_FASTRECOVERY(CCV(ccv, t_flags));
break;
+ case CC_RTO:
+ CCV(ccv, snd_ssthresh) = max(2*mss, cwin/2);
+ CCV(ccv, snd_cwnd) = mss;
+ break;
default:
newreno_cc_algo.cong_signal(ccv, signal_type);
Modified: head/sys/netinet/cc/cc_cubic.c
==============================================================================
--- head/sys/netinet/cc/cc_cubic.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/cc/cc_cubic.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -225,8 +225,12 @@ static void
cubic_cong_signal(struct cc_var *ccv, uint32_t type)
{
struct cubic *cubic_data;
+ uint32_t cwin;
+ u_int mss;
cubic_data = ccv->cc_data;
+ cwin = CCV(ccv, snd_cwnd);
+ mss = CCV(ccv, t_maxseg);
switch (type) {
case CC_NDUPACK:
@@ -235,7 +239,8 @@ cubic_cong_signal(struct cc_var *ccv, ui
cubic_ssthresh_update(ccv);
cubic_data->num_cong_events++;
cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
- cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
+ cubic_data->max_cwnd = cwin;
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
}
ENTER_RECOVERY(CCV(ccv, t_flags));
}
@@ -246,7 +251,7 @@ cubic_cong_signal(struct cc_var *ccv, ui
cubic_ssthresh_update(ccv);
cubic_data->num_cong_events++;
cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
- cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
+ cubic_data->max_cwnd = cwin;
cubic_data->t_last_cong = ticks;
CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
ENTER_CONGRECOVERY(CCV(ccv, t_flags));
@@ -261,9 +266,13 @@ cubic_cong_signal(struct cc_var *ccv, ui
* chance the first one is a false alarm and may not indicate
* congestion.
*/
- if (CCV(ccv, t_rxtshift) >= 2)
+ if (CCV(ccv, t_rxtshift) >= 2) {
cubic_data->num_cong_events++;
cubic_data->t_last_cong = ticks;
+ cubic_ssthresh_update(ccv);
+ cubic_data->max_cwnd = cwin;
+ CCV(ccv, snd_cwnd) = mss;
+ }
break;
}
}
Modified: head/sys/netinet/cc/cc_dctcp.c
==============================================================================
--- head/sys/netinet/cc/cc_dctcp.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/cc/cc_dctcp.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -230,10 +230,11 @@ static void
dctcp_cong_signal(struct cc_var *ccv, uint32_t type)
{
struct dctcp *dctcp_data;
- u_int win, mss;
+ uint32_t cwin;
+ u_int mss;
dctcp_data = ccv->cc_data;
- win = CCV(ccv, snd_cwnd);
+ cwin = CCV(ccv, snd_cwnd);
mss = CCV(ccv, t_maxseg);
switch (type) {
@@ -241,16 +242,16 @@ dctcp_cong_signal(struct cc_var *ccv, ui
if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
CCV(ccv, snd_ssthresh) = mss *
- max(win / 2 / mss, 2);
+ max(cwin / 2 / mss, 2);
dctcp_data->num_cong_events++;
} else {
/* cwnd has already updated as congestion
* recovery. Reverse cwnd value using
* snd_cwnd_prev and recalculate snd_ssthresh
*/
- win = CCV(ccv, snd_cwnd_prev);
+ cwin = CCV(ccv, snd_cwnd_prev);
CCV(ccv, snd_ssthresh) =
- max(win / 2 / mss, 2) * mss;
+ max(cwin / 2 / mss, 2) * mss;
}
ENTER_RECOVERY(CCV(ccv, t_flags));
}
@@ -260,18 +261,18 @@ dctcp_cong_signal(struct cc_var *ccv, ui
* Save current snd_cwnd when the host encounters both
* congestion recovery and fast recovery.
*/
- CCV(ccv, snd_cwnd_prev) = win;
+ CCV(ccv, snd_cwnd_prev) = cwin;
if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
if (V_dctcp_slowstart &&
dctcp_data->num_cong_events++ == 0) {
CCV(ccv, snd_ssthresh) =
- mss * max(win / 2 / mss, 2);
+ mss * max(cwin / 2 / mss, 2);
dctcp_data->alpha = MAX_ALPHA_VALUE;
dctcp_data->bytes_ecn = 0;
dctcp_data->bytes_total = 0;
dctcp_data->save_sndnxt = CCV(ccv, snd_nxt);
} else
- CCV(ccv, snd_ssthresh) = max((win - ((win *
+ CCV(ccv, snd_ssthresh) = max((cwin - ((cwin *
dctcp_data->alpha) >> 11)) / mss, 2) * mss;
CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
ENTER_CONGRECOVERY(CCV(ccv, t_flags));
@@ -284,6 +285,8 @@ dctcp_cong_signal(struct cc_var *ccv, ui
dctcp_update_alpha(ccv);
dctcp_data->save_sndnxt += CCV(ccv, t_maxseg);
dctcp_data->num_cong_events++;
+ CCV(ccv, snd_ssthresh) = max(2 * mss, cwin / 2);
+ CCV(ccv, snd_cwnd) = mss;
}
break;
}
Modified: head/sys/netinet/cc/cc_htcp.c
==============================================================================
--- head/sys/netinet/cc/cc_htcp.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/cc/cc_htcp.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -271,8 +271,12 @@ static void
htcp_cong_signal(struct cc_var *ccv, uint32_t type)
{
struct htcp *htcp_data;
+ uint32_t cwin;
+ u_int mss;
htcp_data = ccv->cc_data;
+ cwin = CCV(ccv, snd_cwnd);
+ mss = CCV(ccv, t_maxseg);
switch (type) {
case CC_NDUPACK:
@@ -287,8 +291,9 @@ htcp_cong_signal(struct cc_var *ccv, uin
(htcp_data->maxrtt - htcp_data->minrtt) *
95) / 100;
htcp_ssthresh_update(ccv);
+ CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
htcp_data->t_last_cong = ticks;
- htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
+ htcp_data->prev_cwnd = cwin;
}
ENTER_RECOVERY(CCV(ccv, t_flags));
}
@@ -305,7 +310,7 @@ htcp_cong_signal(struct cc_var *ccv, uin
htcp_ssthresh_update(ccv);
CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
htcp_data->t_last_cong = ticks;
- htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
+ htcp_data->prev_cwnd = cwin;
ENTER_CONGRECOVERY(CCV(ccv, t_flags));
}
break;
@@ -320,6 +325,8 @@ htcp_cong_signal(struct cc_var *ccv, uin
*/
if (CCV(ccv, t_rxtshift) >= 2)
htcp_data->t_last_cong = ticks;
+ CCV(ccv, snd_ssthresh) = max(2 * mss, cwin / 2);
+ CCV(ccv, snd_cwnd) = mss;
break;
}
}
Modified: head/sys/netinet/cc/cc_newreno.c
==============================================================================
--- head/sys/netinet/cc/cc_newreno.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/cc/cc_newreno.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -182,30 +182,39 @@ newreno_after_idle(struct cc_var *ccv)
static void
newreno_cong_signal(struct cc_var *ccv, uint32_t type)
{
- u_int win;
+ uint32_t cwin;
+ u_int mss;
+
+ cwin = CCV(ccv, snd_cwnd);
+ mss = CCV(ccv, t_maxseg);
/* Catch algos which mistakenly leak private signal types. */
KASSERT((type & CC_SIGPRIVMASK) == 0,
("%s: congestion signal type 0x%08x is private\n", __func__, type));
- win = max(CCV(ccv, snd_cwnd) / 2 / CCV(ccv, t_maxseg), 2) *
- CCV(ccv, t_maxseg);
+ cwin = max(2*mss, cwin/2);
switch (type) {
case CC_NDUPACK:
if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
- if (!IN_CONGRECOVERY(CCV(ccv, t_flags)))
- CCV(ccv, snd_ssthresh) = win;
+ if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
+ CCV(ccv, snd_ssthresh) = cwin;
+ CCV(ccv, snd_cwnd) = cwin;
+ }
ENTER_RECOVERY(CCV(ccv, t_flags));
}
break;
case CC_ECN:
if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
- CCV(ccv, snd_ssthresh) = win;
- CCV(ccv, snd_cwnd) = win;
+ CCV(ccv, snd_ssthresh) = cwin;
+ CCV(ccv, snd_cwnd) = cwin;
ENTER_CONGRECOVERY(CCV(ccv, t_flags));
}
break;
+ case CC_RTO:
+ CCV(ccv, snd_ssthresh) = cwin;
+ CCV(ccv, snd_cwnd) = mss;
+ break;
}
}
Modified: head/sys/netinet/tcp_input.c
==============================================================================
--- head/sys/netinet/tcp_input.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/tcp_input.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -438,9 +438,15 @@ cc_cong_signal(struct tcpcb *tp, struct
tp->t_dupacks = 0;
tp->t_bytes_acked = 0;
EXIT_RECOVERY(tp->t_flags);
- tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 /
- maxseg) * maxseg;
- tp->snd_cwnd = maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL) {
+ /*
+ * RFC5681 Section 3.1
+ * ssthresh = max (FlightSize / 2, 2*SMSS) eq (4)
+ */
+ tp->snd_ssthresh =
+ max((tp->snd_max - tp->snd_una) / 2, 2 * maxseg);
+ tp->snd_cwnd = maxseg;
+ }
break;
case CC_RTO_ERR:
TCPSTAT_INC(tcps_sndrexmitbad);
@@ -2613,6 +2619,15 @@ tcp_do_segment(struct mbuf *m, struct tc
if (awnd < tp->snd_ssthresh) {
tp->snd_cwnd += maxseg;
+ /*
+ * RFC5681 Section 3.2 talks about cwnd
+ * inflation on additional dupacks and
+ * deflation on recovering from loss.
+ *
+ * We keep cwnd into check so that
+ * we don't have to 'deflate' it when we
+ * get out of recovery.
+ */
if (tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd = tp->snd_ssthresh;
}
@@ -2652,19 +2667,22 @@ tcp_do_segment(struct mbuf *m, struct tc
TCPSTAT_INC(
tcps_sack_recovery_episode);
tp->sack_newdata = tp->snd_nxt;
- tp->snd_cwnd = maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
goto drop;
}
tp->snd_nxt = th->th_ack;
- tp->snd_cwnd = maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
KASSERT(tp->snd_limited <= 2,
("%s: tp->snd_limited too big",
__func__));
- tp->snd_cwnd = tp->snd_ssthresh +
- maxseg *
- (tp->t_dupacks - tp->snd_limited);
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = tp->snd_ssthresh +
+ maxseg *
+ (tp->t_dupacks - tp->snd_limited);
if (SEQ_GT(onxt, tp->snd_nxt))
tp->snd_nxt = onxt;
goto drop;
Modified: head/sys/netinet/tcp_stacks/fastpath.c
==============================================================================
--- head/sys/netinet/tcp_stacks/fastpath.c Tue Oct 25 05:07:51 2016 (r307900)
+++ head/sys/netinet/tcp_stacks/fastpath.c Tue Oct 25 05:45:47 2016 (r307901)
@@ -1119,6 +1119,15 @@ tcp_do_slowpath(struct mbuf *m, struct t
if (awnd < tp->snd_ssthresh) {
tp->snd_cwnd += tp->t_maxseg;
+ /*
+ * RFC5681 Section 3.2 talks about cwnd
+ * inflation on additional dupacks and
+ * deflation on recovering from loss.
+ *
+ * We keep cwnd into check so that
+ * we don't have to 'deflate' it when we
+ * get out of recovery.
+ */
if (tp->snd_cwnd > tp->snd_ssthresh)
tp->snd_cwnd = tp->snd_ssthresh;
}
@@ -1158,19 +1167,22 @@ tcp_do_slowpath(struct mbuf *m, struct t
TCPSTAT_INC(
tcps_sack_recovery_episode);
tp->sack_newdata = tp->snd_nxt;
- tp->snd_cwnd = tp->t_maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = tp->t_maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
goto drop;
}
tp->snd_nxt = th->th_ack;
- tp->snd_cwnd = tp->t_maxseg;
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = tp->t_maxseg;
(void) tp->t_fb->tfb_tcp_output(tp);
KASSERT(tp->snd_limited <= 2,
("%s: tp->snd_limited too big",
__func__));
- tp->snd_cwnd = tp->snd_ssthresh +
- tp->t_maxseg *
- (tp->t_dupacks - tp->snd_limited);
+ if (CC_ALGO(tp)->cong_signal == NULL)
+ tp->snd_cwnd = tp->snd_ssthresh +
+ tp->t_maxseg *
+ (tp->t_dupacks - tp->snd_limited);
if (SEQ_GT(onxt, tp->snd_nxt))
tp->snd_nxt = onxt;
goto drop;
More information about the svn-src-head
mailing list