summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDeepti Raghavan2018-07-09 19:53:39 +0200
committerDavid S. Miller2018-07-12 08:01:56 +0200
commit4929c9428a171145f82f81aae0c3c25ef7d82837 (patch)
tree06b40ce94924ece01ded4e949be9ba05f977faef
parentnet: sched: fix unprotected access to rcu cookie pointer (diff)
downloadkernel-qcow2-linux-4929c9428a171145f82f81aae0c3c25ef7d82837.tar.gz
kernel-qcow2-linux-4929c9428a171145f82f81aae0c3c25ef7d82837.tar.xz
kernel-qcow2-linux-4929c9428a171145f82f81aae0c3c25ef7d82837.zip
tcp: expose both send and receive intervals for rate sample
Congestion control algorithms, which access the rate sample through the tcp_cong_control function, only have access to the maximum of the send and receive interval, for cases where the acknowledgment rate may be inaccurate due to ACK compression or decimation. Algorithms may want to use send rates and receive rates as separate signals. Signed-off-by: Deepti Raghavan <deeptir@mit.edu> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h2
-rw-r--r--net/ipv4/tcp_rate.c4
2 files changed, 6 insertions, 0 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cce37694776e..f6cb20e6e524 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -954,6 +954,8 @@ struct rate_sample {
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */
+ u32 snd_interval_us; /* snd interval for delivered packets */
+ u32 rcv_interval_us; /* rcv interval for delivered packets */
long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index c61240e43923..4dff40dad4dc 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -146,6 +146,10 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us);
+ /* Record both segment send and ack receive intervals */
+ rs->snd_interval_us = snd_us;
+ rs->rcv_interval_us = ack_us;
+
/* Normally we expect interval_us >= min-rtt.
* Note that rate may still be over-estimated when a spuriously
* retransmistted skb was first (s)acked because "interval_us"