BUG/MINOR: quic: ensure cwnd limits are always enforced

Congestion window is limit by a minimal and maximum values which can
never be exceeded. Min value is hardcoded to 2 datagrams as recommended
by the specification. Max value is specified via haproxy configuration.

These values must be respected each time the congestion window size is
adjusted. However, in some rare occasions, limit were not always
enforced. Fix this by implementing wrappers to set or increment the
congestion window. These functions ensure limits are always applied
after the operation.

Additionnally, wrappers also ensure that if window reached a new maximum
value, it is saved in <cwnd_last_max> field.

This should be backported up to 2.6, after a brief period of
observation.
This commit is contained in:
Amaury Denoyelle 2025-01-20 16:24:21 +01:00
parent c01d455288
commit 7bad88c35c
5 changed files with 46 additions and 37 deletions

View File

@ -115,7 +115,9 @@ static inline size_t quic_cc_path_prep_data(struct quic_cc_path *path)
return path->cwnd - path->prep_in_flight;
}
int quic_cwnd_may_increase(const struct quic_cc_path *path);
void quic_cc_path_reset(struct quic_cc_path *path);
void quic_cc_path_set(struct quic_cc_path *path, uint64_t val);
void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val);
#endif /* USE_QUIC */
#endif /* _PROTO_QUIC_CC_H */

View File

@ -58,7 +58,7 @@ uint quic_cc_default_pacing_inter(const struct quic_cc *cc)
}
/* Returns true if congestion window on path ought to be increased. */
int quic_cwnd_may_increase(const struct quic_cc_path *path)
static int quic_cwnd_may_increase(const struct quic_cc_path *path)
{
/* RFC 9002 7.8. Underutilizing the Congestion Window
*
@ -75,3 +75,32 @@ int quic_cwnd_may_increase(const struct quic_cc_path *path)
*/
return 2 * path->in_flight >= path->cwnd || path->cwnd < 16384;
}
/* Restore congestion window for <path> to its minimal value. */
void quic_cc_path_reset(struct quic_cc_path *path)
{
path->cwnd = path->limit_min;
}
/* Set congestion window for <path> to <val>. Min and max limits are enforced. */
void quic_cc_path_set(struct quic_cc_path *path, uint64_t val)
{
path->cwnd = QUIC_MIN(val, path->limit_max);
path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
/* Increment congestion window for <path> with <val>. Min and max limits are
* enforced. Contrary to quic_cc_path_set(), increase is performed only if a
* certain minimal level of the window was already filled.
*/
void quic_cc_path_inc(struct quic_cc_path *path, uint64_t val)
{
if (quic_cwnd_may_increase(path)) {
path->cwnd = QUIC_MIN(path->cwnd + val, path->limit_max);
path->cwnd = QUIC_MAX(path->cwnd, path->limit_min);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
}

View File

@ -419,7 +419,7 @@ static void bbr_save_cwnd(struct bbr *bbr, struct quic_cc_path *p)
static void bbr_restore_cwnd(struct bbr *bbr, struct quic_cc_path *p)
{
p->cwnd = MAX(p->cwnd, bbr->prior_cwnd);
quic_cc_path_set(p, MAX(p->cwnd, bbr->prior_cwnd));
}
/* <gain> must be provided in percents. */
@ -560,7 +560,7 @@ static void bbr_set_cwnd(struct bbr *bbr, struct quic_cc_path *p, uint32_t acked
cwnd = bbr_bound_cwnd_for_probe_rtt(bbr, p, cwnd);
cwnd = bbr_bound_cwnd_for_model(bbr, p, cwnd);
/* Limitation by configuration (not in BBR RFC). */
p->cwnd = MIN(cwnd, p->limit_max);
quic_cc_path_set(p, cwnd);
}
static int bbr_init(struct quic_cc *cc)
@ -1316,7 +1316,7 @@ static void bbr_handle_recovery(struct bbr *bbr, struct quic_cc_path *p,
bbr->round_count_at_recovery =
bbr->round_start ? bbr->round_count : bbr->round_count + 1;
bbr_save_cwnd(bbr, p);
p->cwnd = p->in_flight + MAX(acked, p->mtu);
quic_cc_path_set(p, p->in_flight + MAX(acked, p->mtu));
p->recovery_start_ts = bbr->recovery_start_ts;
bbr->recovery_start_ts = TICK_ETERNITY;
}

View File

@ -383,11 +383,7 @@ static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
inc = W_est_inc;
}
if (quic_cwnd_may_increase(path)) {
path->cwnd += inc;
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_path_inc(path, inc);
leave:
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
}
@ -434,7 +430,7 @@ static void quic_enter_recovery(struct quic_cc *cc)
}
c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min);
quic_cc_path_set(path, c->ssthresh);
c->state = QUIC_CC_ST_RP;
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
}
@ -456,10 +452,7 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
goto out;
if (quic_cwnd_may_increase(path)) {
path->cwnd += acked;
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_path_inc(path, acked);
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (ev->ack.pn >= h->wnd_end)
h->wnd_end = UINT64_MAX;
@ -470,15 +463,11 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
}
}
else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
if (quic_cwnd_may_increase(path)) {
path->cwnd += ev->ack.acked;
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
}
quic_cc_path_inc(path, ev->ack.acked);
}
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd >= c->ssthresh)
c->state = QUIC_CC_ST_CA;
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
break;
case QUIC_CC_EVT_LOSS:
@ -551,10 +540,7 @@ static void quic_cc_cubic_cs_cb(struct quic_cc *cc, struct quic_cc_event *ev)
if (path->cwnd >= QUIC_CC_INFINITE_SSTHESH - acked)
goto out;
if (quic_cwnd_may_increase(path)) {
path->cwnd += acked;
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_path_inc(path, acked);
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
if (quic_cc_hystart_may_reenter_ss(h)) {
/* Exit to slow start */

View File

@ -55,7 +55,7 @@ static void quic_cc_nr_slow_start(struct quic_cc *cc)
struct nr *nr = quic_cc_priv(cc);
path = container_of(cc, struct quic_cc_path, cc);
path->cwnd = path->limit_min;
quic_cc_path_reset(path);
/* Re-entering slow start state. */
nr->state = QUIC_CC_ST_SS;
/* Recovery start time reset */
@ -71,7 +71,7 @@ static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
path = container_of(cc, struct quic_cc_path, cc);
nr->recovery_start_time = now_ms;
nr->ssthresh = path->cwnd >> 1;
path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min);
quic_cc_path_set(path, nr->ssthresh);
nr->state = QUIC_CC_ST_RP;
}
@ -86,11 +86,7 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
path = container_of(cc, struct quic_cc_path, cc);
switch (ev->type) {
case QUIC_CC_EVT_ACK:
if (quic_cwnd_may_increase(path)) {
path->cwnd += ev->ack.acked;
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_path_inc(path, ev->ack.acked);
/* Exit to congestion avoidance if slow start threshold is reached. */
if (path->cwnd > nr->ssthresh)
nr->state = QUIC_CC_ST_CA;
@ -126,11 +122,7 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
*/
acked = ev->ack.acked * path->mtu + nr->remain_acked;
nr->remain_acked = acked % path->cwnd;
if (quic_cwnd_may_increase(path)) {
path->cwnd += acked / path->cwnd;
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
}
quic_cc_path_inc(path, acked / path->cwnd);
break;
}
@ -170,7 +162,7 @@ static void quic_cc_nr_rp_cb(struct quic_cc *cc, struct quic_cc_event *ev)
nr->state = QUIC_CC_ST_CA;
nr->recovery_start_time = TICK_ETERNITY;
path->cwnd = nr->ssthresh;
quic_cc_path_set(path, nr->ssthresh);
break;
case QUIC_CC_EVT_LOSS:
/* Do nothing */