@@ -15,15 +15,16 @@ use std::{
1515 time:: { Duration , Instant } ,
1616} ;
1717
18+ use neqo_common:: qinfo;
1819use test_fixture:: now;
1920
2021use super :: { IP_ADDR , MTU , RTT } ;
2122use crate :: {
2223 cc:: {
2324 classic_cc:: ClassicCongestionControl ,
2425 cubic:: {
25- convert_to_f64, Cubic , CUBIC_ALPHA , CUBIC_BETA_USIZE_DIVIDEND ,
26- CUBIC_BETA_USIZE_DIVISOR , CUBIC_C , CUBIC_FAST_CONVERGENCE_FACTOR ,
26+ convert_to_f64, Cubic , CUBIC_BETA_USIZE_DIVIDEND , CUBIC_BETA_USIZE_DIVISOR , CUBIC_C ,
27+ CUBIC_FAST_CONVERGENCE_FACTOR ,
2728 } ,
2829 CongestionControl as _,
2930 } ,
@@ -82,13 +83,34 @@ fn packet_lost(cc: &mut ClassicCongestionControl<Cubic>, pn: u64) {
8283 cc. on_packets_lost ( None , None , PTO , & [ p_lost] , now ( ) ) ;
8384}
8485
85- fn expected_tcp_acks ( cwnd_rtt_start : usize , mtu : usize ) -> u64 {
86+ fn expected_tcp_acks ( cwnd_rtt_start : usize , mtu : usize , alpha : f64 ) -> u64 {
8687 ( f64:: from ( i32:: try_from ( cwnd_rtt_start) . unwrap ( ) )
8788 / f64:: from ( i32:: try_from ( mtu) . unwrap ( ) )
88- / CUBIC_ALPHA )
89+ / alpha )
8990 . round ( ) as u64
9091}
9192
93+ /// Calculates the expected increase to `w_est` given the number of acked bytes, the current
94+ /// congestion window and the state of `alpha`.
95+ ///
96+ /// Returns the expected increase to `w_est` and the acked bytes used for it as `(w_est_increase,
97+ /// acked_bytes_used)`.
98+ fn expected_w_est_increase (
99+ curr_cwnd : f64 ,
100+ max_datagram_size : f64 ,
101+ acked_bytes : f64 ,
102+ alpha : f64 ,
103+ ) -> ( f64 , f64 ) {
104+ let increase = ( alpha * ( acked_bytes / curr_cwnd) ) . floor ( ) ;
105+ if increase > 0.0 {
106+ let w_est_increase = increase * max_datagram_size;
107+ let acked_bytes_used = increase * curr_cwnd / alpha;
108+ ( w_est_increase, acked_bytes_used)
109+ } else {
110+ ( 0.0 , 0.0 )
111+ }
112+ }
113+
92114#[ test]
93115fn tcp_phase ( ) {
94116 let mut cubic = ClassicCongestionControl :: new ( Cubic :: default ( ) , Pmtud :: new ( IP_ADDR , MTU ) ) ;
@@ -97,7 +119,6 @@ fn tcp_phase() {
97119 cubic. set_ssthresh ( 1 ) ;
98120
99121 let mut now = now ( ) ;
100- let start_time = now;
101122 // helper variables to remember the next packet number to be sent/acked.
102123 let mut next_pn_send = 0 ;
103124 let mut next_pn_ack = 0 ;
@@ -110,23 +131,33 @@ fn tcp_phase() {
110131 // The phase will end when cwnd calculated with cubic equation is equal to TCP estimate:
111132 // CUBIC_C * (n * RTT / CUBIC_ALPHA)^3 * MAX_DATAGRAM_SIZE = n * MAX_DATAGRAM_SIZE
112133 // from this n = sqrt(CUBIC_ALPHA^3/ (CUBIC_C * RTT^3)).
113- let num_tcp_increases = ( CUBIC_ALPHA . powi ( 3 ) / ( CUBIC_C * RTT . as_secs_f64 ( ) . powi ( 3 ) ) )
134+
135+ // Because `cubic::Cubic::alpha` is uninialized here (it's initialized in
136+ // `cubic::Cubic::start_epoch` and we never had an ack yet) we set it to `1.0` which would be
137+ // the value it'd be initialized to after the first ack under this test's conditions.
138+ let alpha: f64 = 1.0 ;
139+ let num_tcp_increases = ( alpha. powi ( 3 ) / ( CUBIC_C * RTT . as_secs_f64 ( ) . powi ( 3 ) ) )
114140 . sqrt ( )
115141 . floor ( ) as u64 ;
116142
117- for _ in 0 ..num_tcp_increases {
143+ for i in 0 ..num_tcp_increases {
118144 let cwnd_rtt_start = cubic. cwnd ( ) ;
119145 // Expected acks during a period of RTT / CUBIC_ALPHA.
120- let acks = expected_tcp_acks ( cwnd_rtt_start, cubic. max_datagram_size ( ) ) ;
146+ let acks = expected_tcp_acks ( cwnd_rtt_start, cubic. max_datagram_size ( ) , alpha ) ;
121147 // The time between acks if they are ideally paced over a RTT.
122148 let time_increase =
123149 RTT / u32:: try_from ( cwnd_rtt_start / cubic. max_datagram_size ( ) ) . unwrap ( ) ;
124150
125- for _ in 0 ..acks {
151+ for j in 0 ..acks {
126152 now += time_increase;
127153 ack_packet ( & mut cubic, next_pn_ack, now) ;
128154 next_pn_ack += 1 ;
129155 next_pn_send = fill_cwnd ( & mut cubic, next_pn_send, now) ;
156+ qinfo ! (
157+ "round {i}, ACK {j}, cwnd: {}, alpha: {}" ,
158+ cubic. cwnd( ) ,
159+ cubic. cc_algorithm( ) . alpha( ) ,
160+ ) ;
130161 }
131162
132163 assert_eq ! ( cubic. cwnd( ) - cwnd_rtt_start, cubic. max_datagram_size( ) ) ;
@@ -150,16 +181,19 @@ fn tcp_phase() {
150181
151182 // Make sure that the increase is not according to TCP equation, i.e., that it took
152183 // less than RTT / CUBIC_ALPHA.
153- let expected_ack_tcp_increase = expected_tcp_acks ( cwnd_rtt_start, cubic. max_datagram_size ( ) ) ;
184+ let expected_ack_tcp_increase = expected_tcp_acks (
185+ cwnd_rtt_start,
186+ cubic. max_datagram_size ( ) ,
187+ cubic. cc_algorithm ( ) . alpha ( ) ,
188+ ) ;
154189 assert ! ( num_acks < expected_ack_tcp_increase) ;
155190
156191 // This first increase after a TCP phase may be shorter than what it would take by a regular
157192 // cubic phase, because of the proper byte counting and the credit it already had before
158- // entering this phase. Therefore We will perform another round and compare it to expected
159- // increase using the cubic equation .
193+ // entering this phase. Therefore We will perform another round and compare it to the expected
194+ // number of acks needed for TCP .
160195
161196 let cwnd_rtt_start_after_tcp = cubic. cwnd ( ) ;
162- let elapsed_time = now - start_time;
163197
164198 // calculate new time_increase.
165199 let time_increase =
@@ -174,26 +208,12 @@ fn tcp_phase() {
174208 next_pn_send = fill_cwnd ( & mut cubic, next_pn_send, now) ;
175209 }
176210
177- let expected_ack_tcp_increase2 =
178- expected_tcp_acks ( cwnd_rtt_start_after_tcp, cubic. max_datagram_size ( ) ) ;
211+ let expected_ack_tcp_increase2 = expected_tcp_acks (
212+ cwnd_rtt_start_after_tcp,
213+ cubic. max_datagram_size ( ) ,
214+ cubic. cc_algorithm ( ) . alpha ( ) ,
215+ ) ;
179216 assert ! ( num_acks2 < expected_ack_tcp_increase2) ;
180-
181- // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be
182- // calculated from: W_cubic(elapsed_time + t_to_increase) - W_cubic(elapsed_time) =
183- // MAX_DATAGRAM_SIZE => CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE +
184- // CWND_INITIAL - CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL =
185- // MAX_DATAGRAM_SIZE => t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) -
186- // elapsed_time (t_to_increase is in seconds)
187- // number of ack needed is t_to_increase / time_increase.
188- let expected_ack_cubic_increase =
189- ( ( ( CUBIC_C . mul_add ( ( elapsed_time) . as_secs_f64 ( ) . powi ( 3 ) , 1.0 ) / CUBIC_C ) . cbrt ( )
190- - elapsed_time. as_secs_f64 ( ) )
191- / time_increase. as_secs_f64 ( ) )
192- . ceil ( ) as u64 ;
193- // num_acks is very close to the calculated value. The exact value is hard to calculate
194- // because the proportional increase (i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) *
195- // MAX_DATAGRAM_SIZE_F64) and the byte counting.
196- assert_eq ! ( num_acks2, expected_ack_cubic_increase + 2 ) ;
197217}
198218
199219#[ test]
@@ -366,3 +386,101 @@ fn congestion_event_congestion_avoidance_no_overflow() {
366386 // Now ack packet that was send earlier.
367387 ack_packet ( & mut cubic, 0 , now ( ) . checked_sub ( PTO ) . unwrap ( ) ) ;
368388}
389+
390+ /// This tests the dynamic changing of the `alpha` value outlined in RFC 9438 section 4.3.
391+ ///
392+ /// <https://datatracker.ietf.org/doc/html/rfc9438#section-4.3-11>
393+ #[ test]
394+ fn alpha_changes_for_high_w_est_values ( ) {
395+ const NORMAL_ALPHA : f64 = 3.0 * ( 1.0 - 0.7 ) / ( 1.0 + 0.7 ) ;
396+ const INCREASED_ALPHA : f64 = 1.0 ;
397+ let mut cc = ClassicCongestionControl :: new ( Cubic :: default ( ) , Pmtud :: new ( IP_ADDR , MTU ) ) ;
398+ let mut next_pn_to_send = 0 ;
399+ let mut last_sent_pn;
400+ let mut first_sent_pn;
401+ let mut w_est_projected = convert_to_f64 ( cc. cwnd_initial ( ) ) ; // initial value
402+ let max_datagram_size = convert_to_f64 ( cc. max_datagram_size ( ) ) ;
403+ let mut w_est_increase;
404+ let mut acked_bytes_used;
405+ let mut acked_bytes = 0.0 ;
406+
407+ // Set ssthresh to something small to make sure that cc is in the congection avoidance phase.
408+ cc. set_ssthresh ( 1 ) ;
409+
410+ // Send enough packets to have at least one congestion window increase
411+ next_pn_to_send = fill_cwnd ( & mut cc, next_pn_to_send, now ( ) ) ;
412+ last_sent_pn = next_pn_to_send - 1 ;
413+ for pn in 0 ..=last_sent_pn {
414+ // Calculate the projected increase of w_est
415+ acked_bytes += max_datagram_size;
416+ ( w_est_increase, acked_bytes_used) = expected_w_est_increase (
417+ convert_to_f64 ( cc. cwnd ( ) ) ,
418+ max_datagram_size,
419+ acked_bytes,
420+ INCREASED_ALPHA ,
421+ ) ;
422+ acked_bytes -= acked_bytes_used;
423+ w_est_projected += w_est_increase;
424+
425+ // Actually process the ACK
426+ ack_packet ( & mut cc, pn, now ( ) ) ;
427+
428+ qinfo ! (
429+ "pn acked: {pn}, alpha: {}, w_est: {}, w_est_projected: {w_est_projected}" ,
430+ cc. cc_algorithm( ) . alpha( ) ,
431+ cc. cc_algorithm( ) . w_est( )
432+ ) ;
433+ // Since we never had a congestion event we started with the initial values for `w_est =
434+ // cwnd_prior = current_cwnd`, thus `w_est >= cwnd_prior` should be `true`, `alpha`
435+ // should be set to it's increased value and `w_est` should be growing accordingly.
436+ assert ! ( cc. cc_algorithm( ) . w_est( ) >= cc. cc_algorithm( ) . cwnd_prior( ) ) ;
437+ assert_within ( cc. cc_algorithm ( ) . alpha ( ) , INCREASED_ALPHA , f64:: EPSILON ) ;
438+ assert_within ( cc. cc_algorithm ( ) . w_est ( ) , w_est_projected, f64:: EPSILON ) ;
439+ }
440+
441+ // Trigger a congestion event, which calls `reduce_cwnd` where `cwnd_prior` is updated to the
442+ // current congestion window before reducing it. The next ACK after a congestion event will call
443+ // `start_epoch` where `alpha` is set to it's default value and `w_est` is set to the newly
444+ // reduced congestion window. Thus we now have `w_est < cwnd_prior` and `alpha ==
445+ // NORMAL_ALPHA`.
446+ packet_lost ( & mut cc, last_sent_pn) ;
447+ w_est_projected = convert_to_f64 ( cc. cwnd ( ) ) ; // update the value after the congestion event
448+ acked_bytes = 0.0 ; // reset the acked bytes counter for the next epoch
449+
450+ // Send and ack packets until the congestion window grew so much that `w_est` is as big as
451+ // `cwnd_prior`.
452+ loop {
453+ first_sent_pn = next_pn_to_send;
454+ next_pn_to_send = fill_cwnd ( & mut cc, next_pn_to_send, now ( ) ) ;
455+ last_sent_pn = next_pn_to_send - 1 ;
456+ for pn in first_sent_pn..=last_sent_pn {
457+ // Calculate the projected increase of w_est
458+ acked_bytes += max_datagram_size;
459+ ( w_est_increase, acked_bytes_used) = expected_w_est_increase (
460+ convert_to_f64 ( cc. cwnd ( ) ) ,
461+ max_datagram_size,
462+ acked_bytes,
463+ NORMAL_ALPHA ,
464+ ) ;
465+ acked_bytes -= acked_bytes_used;
466+ w_est_projected += w_est_increase;
467+
468+ // Actually process the ACK
469+ ack_packet ( & mut cc, pn, now ( ) ) ;
470+ qinfo ! (
471+ "pn acked: {pn}, alpha: {}, w_est: {}, w_est_projected: {w_est_projected}" ,
472+ cc. cc_algorithm( ) . alpha( ) ,
473+ cc. cc_algorithm( ) . w_est( )
474+ ) ;
475+ }
476+ if cc. cc_algorithm ( ) . w_est ( ) >= cc. cc_algorithm ( ) . cwnd_prior ( ) {
477+ break ;
478+ }
479+ // Make sure `w_est` grew by the amount expected with the normal `alpha` value
480+ assert_within ( cc. cc_algorithm ( ) . w_est ( ) , w_est_projected, f64:: EPSILON ) ;
481+ }
482+
483+ // Now `w_est` should be as big as `cwnd_prior`, thus `alpha` should have it's increased value.
484+ assert ! ( cc. cc_algorithm( ) . w_est( ) >= cc. cc_algorithm( ) . cwnd_prior( ) ) ;
485+ assert_within ( cc. cc_algorithm ( ) . alpha ( ) , INCREASED_ALPHA , f64:: EPSILON ) ;
486+ }
0 commit comments