Skip to content

Commit d4fc13c

Browse files
committed
feature(transport/cubic): dynamic changing of alpha value
This implements changing CUBIC_ALPHA to `1.0` when `w_est >= cwnd_prior` as per <https://datatracker.ietf.org/doc/html/rfc9438#section-4.3-11>. - added `self.alpha` field - added `self.cwnd_prior` field - existing functions now use `self.alpha` instead of constant value - `self.alpha` changes and resets as per RFC - added test helpers - added test for alpha - made tests pass
1 parent 737d97e commit d4fc13c

2 files changed

Lines changed: 202 additions & 37 deletions

File tree

neqo-transport/src/cc/cubic.rs

Lines changed: 52 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,17 @@ pub fn convert_to_f64(v: usize) -> f64 {
9292

9393
#[derive(Debug, Default)]
9494
pub struct Cubic {
95+
/// > CUBIC additive increase factor used in the Reno-friendly region \[to achieve
96+
/// > approximately the same average congestion window size as Reno\].
97+
///
98+
/// <https://datatracker.ietf.org/doc/html/rfc9438#name-constants-of-interest>
99+
alpha: f64,
100+
/// > Size of cwnd in \[bytes\] at the time of setting `ssthresh` most recently, either upon
101+
/// > exiting the first slow start or just before `cwnd` was reduced in the last congestion
102+
/// > event.
103+
///
104+
/// <https://datatracker.ietf.org/doc/html/rfc9438#section-4.1.2-2.4>
105+
cwnd_prior: f64,
95106
/// > An estimate for the congestion window \[...\] in the Reno-friendly region -- that
96107
/// > is, an estimate for the congestion window of Reno.
97108
///
@@ -205,24 +216,50 @@ impl Cubic {
205216
max_datagram_size: f64,
206217
now: Instant,
207218
) {
219+
self.alpha = CUBIC_ALPHA;
208220
self.t_epoch = Some(now);
209221
self.reno_acked_bytes = new_acked_bytes;
210222
self.w_est = curr_cwnd;
211223
// If `w_max < cwnd_epoch` we take the cubic root from a negative value in `calc_k()`. That
212224
// could only happen if somehow `cwnd` get's increased between calling `reduce_cwnd()` and
213225
// `start_epoch()`. This could happen if we exit slow start without packet loss, thus never
214226
// had a congestion event and called `reduce_cwnd()` which means `w_max` was never set and
215-
// is still it's default `0.0` value. For those cases we reset/initialize `w_max` here and
216-
// appropiately set `k` to `0.0` (`k` is the time for `cwnd` to reach `w_max`).
227+
// is still it's default `0.0` value. For those cases we reset/initialize `w_max` and
228+
// `cwnd_prior` here and appropiately set `k` to `0.0` (`k` is the time for `cwnd`
229+
// to reach `w_max`). We also set `alpha` to `1.0` as per the below RFC section,
230+
// since `w_est >= cwnd_prior` is true here.
231+
//
232+
// <https://datatracker.ietf.org/doc/html/rfc9438#section-4.3-11>
217233
self.k = if self.w_max <= curr_cwnd {
234+
self.cwnd_prior = curr_cwnd;
218235
self.w_max = curr_cwnd;
236+
debug_assert!(
237+
self.w_est >= self.cwnd_prior,
238+
"w_est < cwnd_prior, so we are not allowed to set alpha = 1 (w_est: {}, cwnd_prior: {})", self.w_est, self.cwnd_prior
239+
);
240+
self.alpha = 1.0;
219241
0.0
220242
} else {
221243
self.calc_k(curr_cwnd, max_datagram_size)
222244
};
223245
qtrace!("[{self}] New epoch");
224246
}
225247

248+
#[cfg(test)]
249+
pub const fn w_est(&self) -> f64 {
250+
self.w_est
251+
}
252+
253+
#[cfg(test)]
254+
pub const fn cwnd_prior(&self) -> f64 {
255+
self.cwnd_prior
256+
}
257+
258+
#[cfg(test)]
259+
pub const fn alpha(&self) -> f64 {
260+
self.alpha
261+
}
262+
226263
#[cfg(test)]
227264
pub const fn w_max(&self) -> f64 {
228265
self.w_max
@@ -301,15 +338,24 @@ impl WindowAdjustment for Cubic {
301338
// <https://datatracker.ietf.org/doc/html/rfc9438#section-4.3-9>
302339

303340
// We first calculate the increase in segments and floor it to only include whole segments.
304-
let increase = (CUBIC_ALPHA * self.reno_acked_bytes / curr_cwnd).floor();
305-
341+
let increase = (self.alpha * self.reno_acked_bytes / curr_cwnd).floor();
306342
// Only apply the increase if it is at least by one segment.
307343
if increase > 0.0 {
308344
self.w_est += increase * max_datagram_size;
309345
// Because we floored the increase to whole segments we cannot just zero
310346
// `reno_acked_bytes` but have to calculate the actual bytes used.
311-
let acked_bytes_used = increase * curr_cwnd / CUBIC_ALPHA;
347+
let acked_bytes_used = increase * curr_cwnd / self.alpha;
312348
self.reno_acked_bytes -= acked_bytes_used;
349+
350+
// > Once w_est has grown to reach the cwnd at the time of most recently setting
351+
// > ssthresh -- that is, w_est >= cwnd_prior -- the sender SHOULD set CUBIC_ALPHA to
352+
// > 1 to ensure that it can achieve the same congestion window increment rate
353+
// > as Reno, which uses AIMD(1, 0.5).
354+
//
355+
// <https://datatracker.ietf.org/doc/html/rfc9438#section-4.3-11>
356+
if self.w_est >= self.cwnd_prior {
357+
self.alpha = 1.0;
358+
}
313359
}
314360

315361
// > When receiving a new ACK in congestion avoidance (where cwnd could be greater than
@@ -381,6 +427,7 @@ impl WindowAdjustment for Cubic {
381427
max_datagram_size: usize,
382428
) -> (usize, usize) {
383429
let curr_cwnd_f64 = convert_to_f64(curr_cwnd);
430+
self.cwnd_prior = curr_cwnd_f64;
384431
// Fast Convergence
385432
//
386433
// > During a congestion event, if the current cwnd is less than w_max, this indicates

neqo-transport/src/cc/tests/cubic.rs

Lines changed: 150 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,16 @@ use std::{
1515
time::{Duration, Instant},
1616
};
1717

18+
use neqo_common::qinfo;
1819
use test_fixture::now;
1920

2021
use super::{IP_ADDR, MTU, RTT};
2122
use crate::{
2223
cc::{
2324
classic_cc::ClassicCongestionControl,
2425
cubic::{
25-
convert_to_f64, Cubic, CUBIC_ALPHA, CUBIC_BETA_USIZE_DIVIDEND,
26-
CUBIC_BETA_USIZE_DIVISOR, CUBIC_C, CUBIC_FAST_CONVERGENCE_FACTOR,
26+
convert_to_f64, Cubic, CUBIC_BETA_USIZE_DIVIDEND, CUBIC_BETA_USIZE_DIVISOR, CUBIC_C,
27+
CUBIC_FAST_CONVERGENCE_FACTOR,
2728
},
2829
CongestionControl as _,
2930
},
@@ -82,13 +83,34 @@ fn packet_lost(cc: &mut ClassicCongestionControl<Cubic>, pn: u64) {
8283
cc.on_packets_lost(None, None, PTO, &[p_lost], now());
8384
}
8485

85-
fn expected_tcp_acks(cwnd_rtt_start: usize, mtu: usize) -> u64 {
86+
fn expected_tcp_acks(cwnd_rtt_start: usize, mtu: usize, alpha: f64) -> u64 {
8687
(f64::from(i32::try_from(cwnd_rtt_start).unwrap())
8788
/ f64::from(i32::try_from(mtu).unwrap())
88-
/ CUBIC_ALPHA)
89+
/ alpha)
8990
.round() as u64
9091
}
9192

93+
/// Calculates the expected increase to `w_est` given the number of acked bytes, the current
94+
/// congestion window and the state of `alpha`.
95+
///
96+
/// Returns the expected increase to `w_est` and the acked bytes used for it as `(w_est_increase,
97+
/// acked_bytes_used)`.
98+
fn expected_w_est_increase(
99+
curr_cwnd: f64,
100+
max_datagram_size: f64,
101+
acked_bytes: f64,
102+
alpha: f64,
103+
) -> (f64, f64) {
104+
let increase = (alpha * (acked_bytes / curr_cwnd)).floor();
105+
if increase > 0.0 {
106+
let w_est_increase = increase * max_datagram_size;
107+
let acked_bytes_used = increase * curr_cwnd / alpha;
108+
(w_est_increase, acked_bytes_used)
109+
} else {
110+
(0.0, 0.0)
111+
}
112+
}
113+
92114
#[test]
93115
fn tcp_phase() {
94116
let mut cubic = ClassicCongestionControl::new(Cubic::default(), Pmtud::new(IP_ADDR, MTU));
@@ -97,7 +119,6 @@ fn tcp_phase() {
97119
cubic.set_ssthresh(1);
98120

99121
let mut now = now();
100-
let start_time = now;
101122
// helper variables to remember the next packet number to be sent/acked.
102123
let mut next_pn_send = 0;
103124
let mut next_pn_ack = 0;
@@ -110,23 +131,33 @@ fn tcp_phase() {
110131
// The phase will end when cwnd calculated with cubic equation is equal to TCP estimate:
111132
// CUBIC_C * (n * RTT / CUBIC_ALPHA)^3 * MAX_DATAGRAM_SIZE = n * MAX_DATAGRAM_SIZE
112133
// from this n = sqrt(CUBIC_ALPHA^3/ (CUBIC_C * RTT^3)).
113-
let num_tcp_increases = (CUBIC_ALPHA.powi(3) / (CUBIC_C * RTT.as_secs_f64().powi(3)))
134+
135+
// Because `cubic::Cubic::alpha` is uninialized here (it's initialized in
136+
// `cubic::Cubic::start_epoch` and we never had an ack yet) we set it to `1.0` which would be
137+
// the value it'd be initialized to after the first ack under this test's conditions.
138+
let alpha: f64 = 1.0;
139+
let num_tcp_increases = (alpha.powi(3) / (CUBIC_C * RTT.as_secs_f64().powi(3)))
114140
.sqrt()
115141
.floor() as u64;
116142

117-
for _ in 0..num_tcp_increases {
143+
for i in 0..num_tcp_increases {
118144
let cwnd_rtt_start = cubic.cwnd();
119145
// Expected acks during a period of RTT / CUBIC_ALPHA.
120-
let acks = expected_tcp_acks(cwnd_rtt_start, cubic.max_datagram_size());
146+
let acks = expected_tcp_acks(cwnd_rtt_start, cubic.max_datagram_size(), alpha);
121147
// The time between acks if they are ideally paced over a RTT.
122148
let time_increase =
123149
RTT / u32::try_from(cwnd_rtt_start / cubic.max_datagram_size()).unwrap();
124150

125-
for _ in 0..acks {
151+
for j in 0..acks {
126152
now += time_increase;
127153
ack_packet(&mut cubic, next_pn_ack, now);
128154
next_pn_ack += 1;
129155
next_pn_send = fill_cwnd(&mut cubic, next_pn_send, now);
156+
qinfo!(
157+
"round {i}, ACK {j}, cwnd: {}, alpha: {}",
158+
cubic.cwnd(),
159+
cubic.cc_algorithm().alpha(),
160+
);
130161
}
131162

132163
assert_eq!(cubic.cwnd() - cwnd_rtt_start, cubic.max_datagram_size());
@@ -150,16 +181,19 @@ fn tcp_phase() {
150181

151182
// Make sure that the increase is not according to TCP equation, i.e., that it took
152183
// less than RTT / CUBIC_ALPHA.
153-
let expected_ack_tcp_increase = expected_tcp_acks(cwnd_rtt_start, cubic.max_datagram_size());
184+
let expected_ack_tcp_increase = expected_tcp_acks(
185+
cwnd_rtt_start,
186+
cubic.max_datagram_size(),
187+
cubic.cc_algorithm().alpha(),
188+
);
154189
assert!(num_acks < expected_ack_tcp_increase);
155190

156191
// This first increase after a TCP phase may be shorter than what it would take by a regular
157192
// cubic phase, because of the proper byte counting and the credit it already had before
158-
// entering this phase. Therefore We will perform another round and compare it to expected
159-
// increase using the cubic equation.
193+
// entering this phase. Therefore We will perform another round and compare it to the expected
194+
// number of acks needed for TCP.
160195

161196
let cwnd_rtt_start_after_tcp = cubic.cwnd();
162-
let elapsed_time = now - start_time;
163197

164198
// calculate new time_increase.
165199
let time_increase =
@@ -174,26 +208,12 @@ fn tcp_phase() {
174208
next_pn_send = fill_cwnd(&mut cubic, next_pn_send, now);
175209
}
176210

177-
let expected_ack_tcp_increase2 =
178-
expected_tcp_acks(cwnd_rtt_start_after_tcp, cubic.max_datagram_size());
211+
let expected_ack_tcp_increase2 = expected_tcp_acks(
212+
cwnd_rtt_start_after_tcp,
213+
cubic.max_datagram_size(),
214+
cubic.cc_algorithm().alpha(),
215+
);
179216
assert!(num_acks2 < expected_ack_tcp_increase2);
180-
181-
// The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be
182-
// calculated from: W_cubic(elapsed_time + t_to_increase) - W_cubic(elapsed_time) =
183-
// MAX_DATAGRAM_SIZE => CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE +
184-
// CWND_INITIAL - CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL =
185-
// MAX_DATAGRAM_SIZE => t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) -
186-
// elapsed_time (t_to_increase is in seconds)
187-
// number of ack needed is t_to_increase / time_increase.
188-
let expected_ack_cubic_increase =
189-
(((CUBIC_C.mul_add((elapsed_time).as_secs_f64().powi(3), 1.0) / CUBIC_C).cbrt()
190-
- elapsed_time.as_secs_f64())
191-
/ time_increase.as_secs_f64())
192-
.ceil() as u64;
193-
// num_acks is very close to the calculated value. The exact value is hard to calculate
194-
// because the proportional increase (i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) *
195-
// MAX_DATAGRAM_SIZE_F64) and the byte counting.
196-
assert_eq!(num_acks2, expected_ack_cubic_increase + 2);
197217
}
198218

199219
#[test]
@@ -366,3 +386,101 @@ fn congestion_event_congestion_avoidance_no_overflow() {
366386
// Now ack packet that was send earlier.
367387
ack_packet(&mut cubic, 0, now().checked_sub(PTO).unwrap());
368388
}
389+
390+
/// This tests the dynamic changing of the `alpha` value outlined in RFC 9438 section 4.3.
391+
///
392+
/// <https://datatracker.ietf.org/doc/html/rfc9438#section-4.3-11>
393+
#[test]
394+
fn alpha_changes_for_high_w_est_values() {
395+
const NORMAL_ALPHA: f64 = 3.0 * (1.0 - 0.7) / (1.0 + 0.7);
396+
const INCREASED_ALPHA: f64 = 1.0;
397+
let mut cc = ClassicCongestionControl::new(Cubic::default(), Pmtud::new(IP_ADDR, MTU));
398+
let mut next_pn_to_send = 0;
399+
let mut last_sent_pn;
400+
let mut first_sent_pn;
401+
let mut w_est_projected = convert_to_f64(cc.cwnd_initial()); // initial value
402+
let max_datagram_size = convert_to_f64(cc.max_datagram_size());
403+
let mut w_est_increase;
404+
let mut acked_bytes_used;
405+
let mut acked_bytes = 0.0;
406+
407+
// Set ssthresh to something small to make sure that cc is in the congection avoidance phase.
408+
cc.set_ssthresh(1);
409+
410+
// Send enough packets to have at least one congestion window increase
411+
next_pn_to_send = fill_cwnd(&mut cc, next_pn_to_send, now());
412+
last_sent_pn = next_pn_to_send - 1;
413+
for pn in 0..=last_sent_pn {
414+
// Calculate the projected increase of w_est
415+
acked_bytes += max_datagram_size;
416+
(w_est_increase, acked_bytes_used) = expected_w_est_increase(
417+
convert_to_f64(cc.cwnd()),
418+
max_datagram_size,
419+
acked_bytes,
420+
INCREASED_ALPHA,
421+
);
422+
acked_bytes -= acked_bytes_used;
423+
w_est_projected += w_est_increase;
424+
425+
// Actually process the ACK
426+
ack_packet(&mut cc, pn, now());
427+
428+
qinfo!(
429+
"pn acked: {pn}, alpha: {}, w_est: {}, w_est_projected: {w_est_projected}",
430+
cc.cc_algorithm().alpha(),
431+
cc.cc_algorithm().w_est()
432+
);
433+
// Since we never had a congestion event we started with the initial values for `w_est =
434+
// cwnd_prior = current_cwnd`, thus `w_est >= cwnd_prior` should be `true`, `alpha`
435+
// should be set to it's increased value and `w_est` should be growing accordingly.
436+
assert!(cc.cc_algorithm().w_est() >= cc.cc_algorithm().cwnd_prior());
437+
assert_within(cc.cc_algorithm().alpha(), INCREASED_ALPHA, f64::EPSILON);
438+
assert_within(cc.cc_algorithm().w_est(), w_est_projected, f64::EPSILON);
439+
}
440+
441+
// Trigger a congestion event, which calls `reduce_cwnd` where `cwnd_prior` is updated to the
442+
// current congestion window before reducing it. The next ACK after a congestion event will call
443+
// `start_epoch` where `alpha` is set to it's default value and `w_est` is set to the newly
444+
// reduced congestion window. Thus we now have `w_est < cwnd_prior` and `alpha ==
445+
// NORMAL_ALPHA`.
446+
packet_lost(&mut cc, last_sent_pn);
447+
w_est_projected = convert_to_f64(cc.cwnd()); // update the value after the congestion event
448+
acked_bytes = 0.0; // reset the acked bytes counter for the next epoch
449+
450+
// Send and ack packets until the congestion window grew so much that `w_est` is as big as
451+
// `cwnd_prior`.
452+
loop {
453+
first_sent_pn = next_pn_to_send;
454+
next_pn_to_send = fill_cwnd(&mut cc, next_pn_to_send, now());
455+
last_sent_pn = next_pn_to_send - 1;
456+
for pn in first_sent_pn..=last_sent_pn {
457+
// Calculate the projected increase of w_est
458+
acked_bytes += max_datagram_size;
459+
(w_est_increase, acked_bytes_used) = expected_w_est_increase(
460+
convert_to_f64(cc.cwnd()),
461+
max_datagram_size,
462+
acked_bytes,
463+
NORMAL_ALPHA,
464+
);
465+
acked_bytes -= acked_bytes_used;
466+
w_est_projected += w_est_increase;
467+
468+
// Actually process the ACK
469+
ack_packet(&mut cc, pn, now());
470+
qinfo!(
471+
"pn acked: {pn}, alpha: {}, w_est: {}, w_est_projected: {w_est_projected}",
472+
cc.cc_algorithm().alpha(),
473+
cc.cc_algorithm().w_est()
474+
);
475+
}
476+
if cc.cc_algorithm().w_est() >= cc.cc_algorithm().cwnd_prior() {
477+
break;
478+
}
479+
// Make sure `w_est` grew by the amount expected with the normal `alpha` value
480+
assert_within(cc.cc_algorithm().w_est(), w_est_projected, f64::EPSILON);
481+
}
482+
483+
// Now `w_est` should be as big as `cwnd_prior`, thus `alpha` should have it's increased value.
484+
assert!(cc.cc_algorithm().w_est() >= cc.cc_algorithm().cwnd_prior());
485+
assert_within(cc.cc_algorithm().alpha(), INCREASED_ALPHA, f64::EPSILON);
486+
}

0 commit comments

Comments
 (0)