MINOR: quic: rename min/max fields for congestion window algo
There was some possible confusion between fields related to congestion window size min and max limit which cannot be exceeded, and the maximum value previously reached by the window. Fix this by adopting a new naming scheme. Enforced limit are now renamed <limit_max>/<limit_min>, while the previously reached max value is renamed <cwnd_last_max>. This should be backported up to 3.1.
This commit is contained in:
parent
62dfe1fc87
commit
2eb1b0cd96
@ -107,11 +107,11 @@ struct quic_cc_path {
|
||||
/* Congestion window. */
|
||||
uint64_t cwnd;
|
||||
/* The current maximum congestion window value reached. */
|
||||
uint64_t mcwnd;
|
||||
/* The maximum congestion window value which can be reached. */
|
||||
uint64_t max_cwnd;
|
||||
/* Minimum congestion window. */
|
||||
uint64_t min_cwnd;
|
||||
uint64_t cwnd_last_max;
|
||||
/* Max limit on congestion window size. */
|
||||
uint64_t limit_max;
|
||||
/* Min limit on congestion window size. */
|
||||
uint64_t limit_min;
|
||||
/* Prepared data to be sent (in bytes). */
|
||||
uint64_t prep_in_flight;
|
||||
/* Outstanding data (in bytes). */
|
||||
|
@ -91,9 +91,9 @@ static inline void quic_cc_path_init(struct quic_cc_path *path, int ipv4, unsign
|
||||
*(size_t *)&path->mtu = max_dgram_sz;
|
||||
path->initial_wnd = QUIC_MIN(10 * max_dgram_sz, QUIC_MAX(max_dgram_sz << 1, 14720U));
|
||||
path->cwnd = path->initial_wnd;
|
||||
path->mcwnd = path->cwnd;
|
||||
path->max_cwnd = max_cwnd;
|
||||
path->min_cwnd = max_dgram_sz << 1;
|
||||
path->cwnd_last_max = path->cwnd;
|
||||
path->limit_max = max_cwnd;
|
||||
path->limit_min = max_dgram_sz << 1;
|
||||
path->prep_in_flight = 0;
|
||||
path->in_flight = 0;
|
||||
path->ifae_pkts = 0;
|
||||
|
@ -552,7 +552,7 @@ static void bbr_set_cwnd(struct bbr *bbr, struct quic_cc_path *p, uint32_t acked
|
||||
bbr_bound_cwnd_for_probe_rtt(bbr, p);
|
||||
bbr_bound_cwnd_for_model(bbr, p);
|
||||
/* Limitation by configuration (not in BBR RFC). */
|
||||
p->cwnd = MIN(p->cwnd, p->max_cwnd);
|
||||
p->cwnd = MIN(p->cwnd, p->limit_max);
|
||||
}
|
||||
|
||||
static int bbr_init(struct quic_cc *cc)
|
||||
|
@ -385,8 +385,8 @@ static inline void quic_cubic_update(struct quic_cc *cc, uint32_t acked)
|
||||
|
||||
if (quic_cwnd_may_increase(path)) {
|
||||
path->cwnd += inc;
|
||||
path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
|
||||
path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
|
||||
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
|
||||
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
|
||||
}
|
||||
leave:
|
||||
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc);
|
||||
@ -434,7 +434,7 @@ static void quic_enter_recovery(struct quic_cc *cc)
|
||||
}
|
||||
|
||||
c->ssthresh = (CUBIC_BETA_SCALED * path->cwnd) >> CUBIC_SCALE_FACTOR_SHIFT;
|
||||
path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->min_cwnd);
|
||||
path->cwnd = QUIC_MAX(c->ssthresh, (uint32_t)path->limit_min);
|
||||
c->state = QUIC_CC_ST_RP;
|
||||
TRACE_LEAVE(QUIC_EV_CONN_CC, cc->qc, NULL, cc);
|
||||
}
|
||||
@ -458,7 +458,7 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
|
||||
|
||||
if (quic_cwnd_may_increase(path)) {
|
||||
path->cwnd += acked;
|
||||
path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
|
||||
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
|
||||
}
|
||||
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
|
||||
if (ev->ack.pn >= h->wnd_end)
|
||||
@ -472,13 +472,13 @@ static void quic_cc_cubic_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
|
||||
else if (path->cwnd < QUIC_CC_INFINITE_SSTHESH - ev->ack.acked) {
|
||||
if (quic_cwnd_may_increase(path)) {
|
||||
path->cwnd += ev->ack.acked;
|
||||
path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
|
||||
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
|
||||
}
|
||||
}
|
||||
/* Exit to congestion avoidance if slow start threshold is reached. */
|
||||
if (path->cwnd >= c->ssthresh)
|
||||
c->state = QUIC_CC_ST_CA;
|
||||
path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
|
||||
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
|
||||
break;
|
||||
|
||||
case QUIC_CC_EVT_LOSS:
|
||||
@ -553,7 +553,7 @@ static void quic_cc_cubic_cs_cb(struct quic_cc *cc, struct quic_cc_event *ev)
|
||||
|
||||
if (quic_cwnd_may_increase(path)) {
|
||||
path->cwnd += acked;
|
||||
path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
|
||||
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
|
||||
}
|
||||
quic_cc_hystart_track_min_rtt(cc, h, path->loss.latest_rtt);
|
||||
if (quic_cc_hystart_may_reenter_ss(h)) {
|
||||
@ -663,10 +663,10 @@ static void quic_cc_cubic_state_trace(struct buffer *buf, const struct quic_cc *
|
||||
struct cubic *c = quic_cc_priv(cc);
|
||||
|
||||
path = container_of(cc, struct quic_cc_path, cc);
|
||||
chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%d rpst=%dms",
|
||||
chunk_appendf(buf, " state=%s cwnd=%llu cwnd_last_max=%llu ssthresh=%d rpst=%dms",
|
||||
quic_cc_state_str(c->state),
|
||||
(unsigned long long)path->cwnd,
|
||||
(unsigned long long)path->mcwnd,
|
||||
(unsigned long long)path->cwnd_last_max,
|
||||
(int)c->ssthresh,
|
||||
!tick_isset(c->recovery_start_time) ? -1 :
|
||||
TICKS_TO_MS(tick_remain(c->recovery_start_time, now_ms)));
|
||||
|
@ -55,7 +55,7 @@ static void quic_cc_nr_slow_start(struct quic_cc *cc)
|
||||
struct nr *nr = quic_cc_priv(cc);
|
||||
|
||||
path = container_of(cc, struct quic_cc_path, cc);
|
||||
path->cwnd = path->min_cwnd;
|
||||
path->cwnd = path->limit_min;
|
||||
/* Re-entering slow start state. */
|
||||
nr->state = QUIC_CC_ST_SS;
|
||||
/* Recovery start time reset */
|
||||
@ -71,7 +71,7 @@ static void quic_cc_nr_enter_recovery(struct quic_cc *cc)
|
||||
path = container_of(cc, struct quic_cc_path, cc);
|
||||
nr->recovery_start_time = now_ms;
|
||||
nr->ssthresh = path->cwnd >> 1;
|
||||
path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->min_cwnd);
|
||||
path->cwnd = QUIC_MAX(nr->ssthresh, (uint32_t)path->limit_min);
|
||||
nr->state = QUIC_CC_ST_RP;
|
||||
}
|
||||
|
||||
@ -88,8 +88,8 @@ static void quic_cc_nr_ss_cb(struct quic_cc *cc, struct quic_cc_event *ev)
|
||||
case QUIC_CC_EVT_ACK:
|
||||
if (quic_cwnd_may_increase(path)) {
|
||||
path->cwnd += ev->ack.acked;
|
||||
path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
|
||||
path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
|
||||
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
|
||||
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
|
||||
}
|
||||
/* Exit to congestion avoidance if slow start threshold is reached. */
|
||||
if (path->cwnd > nr->ssthresh)
|
||||
@ -128,8 +128,8 @@ static void quic_cc_nr_ca_cb(struct quic_cc *cc, struct quic_cc_event *ev)
|
||||
nr->remain_acked = acked % path->cwnd;
|
||||
if (quic_cwnd_may_increase(path)) {
|
||||
path->cwnd += acked / path->cwnd;
|
||||
path->cwnd = QUIC_MIN(path->max_cwnd, path->cwnd);
|
||||
path->mcwnd = QUIC_MAX(path->cwnd, path->mcwnd);
|
||||
path->cwnd = QUIC_MIN(path->limit_max, path->cwnd);
|
||||
path->cwnd_last_max = QUIC_MAX(path->cwnd, path->cwnd_last_max);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -190,10 +190,10 @@ static void quic_cc_nr_state_trace(struct buffer *buf, const struct quic_cc *cc)
|
||||
struct nr *nr = quic_cc_priv(cc);
|
||||
|
||||
path = container_of(cc, struct quic_cc_path, cc);
|
||||
chunk_appendf(buf, " state=%s cwnd=%llu mcwnd=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
|
||||
chunk_appendf(buf, " state=%s cwnd=%llu cwnd_last_max=%llu ssthresh=%ld rpst=%dms pktloss=%llu",
|
||||
quic_cc_state_str(nr->state),
|
||||
(unsigned long long)path->cwnd,
|
||||
(unsigned long long)path->mcwnd,
|
||||
(unsigned long long)path->cwnd_last_max,
|
||||
(long)nr->ssthresh,
|
||||
!tick_isset(nr->recovery_start_time) ? -1 :
|
||||
TICKS_TO_MS(tick_remain(nr->recovery_start_time, now_ms)),
|
||||
|
@ -15,7 +15,7 @@ static int quic_cc_nocc_init(struct quic_cc *cc)
|
||||
struct quic_cc_path *path;
|
||||
|
||||
path = container_of(cc, struct quic_cc_path, cc);
|
||||
path->cwnd = path->max_cwnd;
|
||||
path->cwnd = path->limit_max;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -322,12 +322,12 @@ static void dump_quic_full(struct show_quic_ctx *ctx, struct quic_conn *qc)
|
||||
qc->path->cc.algo->state_cli(&trash, qc->path);
|
||||
|
||||
chunk_appendf(&trash, " srtt=%-4u rttvar=%-4u rttmin=%-4u ptoc=%-4u\n"
|
||||
" cwnd=%-6llu mcwnd=%-6llu\n"
|
||||
" cwnd=%-6llu cwnd_last_max=%-6llu\n"
|
||||
" sentbytes=%-12llu sentbytesgso=%-12llu sentpkts=%-6llu\n"
|
||||
" lostpkts=%-6llu reorderedpkts=%-6llu\n",
|
||||
qc->path->loss.srtt, qc->path->loss.rtt_var,
|
||||
qc->path->loss.rtt_min, qc->path->loss.pto_count, (ullong)qc->path->cwnd,
|
||||
(ullong)qc->path->mcwnd, (ullong)qc->cntrs.sent_bytes, (ullong)qc->cntrs.sent_bytes_gso,
|
||||
(ullong)qc->path->cwnd_last_max, (ullong)qc->cntrs.sent_bytes, (ullong)qc->cntrs.sent_bytes_gso,
|
||||
(ullong)qc->cntrs.sent_pkt, (ullong)qc->path->loss.nb_lost_pkt, (ullong)qc->path->loss.nb_reordered_pkt);
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user