summaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorYuchung Cheng2012-09-02 19:38:02 +0200
committerDavid S. Miller2012-09-03 20:34:02 +0200
commit09484d1f6edb3b798c5926a6755f1badb77ece9f (patch)
tree217059c32218b27c177293a75cbedfeb78e4a6fa /net/ipv4/tcp_input.c
parentnet: sierra_net: rx_urb_size is constant (diff)
downloadkernel-qcow2-linux-09484d1f6edb3b798c5926a6755f1badb77ece9f.tar.gz
kernel-qcow2-linux-09484d1f6edb3b798c5926a6755f1badb77ece9f.tar.xz
kernel-qcow2-linux-09484d1f6edb3b798c5926a6755f1badb77ece9f.zip
tcp: move tcp_enter_cwr()
To prepare replacing rate halving with PRR algorithm in CWR state. Signed-off-by: Yuchung Cheng <ycheng@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8c304a400798..3ab0c7573a0b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -743,29 +743,6 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
-/* Set slow start threshold and cwnd not falling to slow start */
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- const struct inet_connection_sock *icsk = inet_csk(sk);
-
- tp->prior_ssthresh = 0;
- tp->bytes_acked = 0;
- if (icsk->icsk_ca_state < TCP_CA_CWR) {
- tp->undo_marker = 0;
- if (set_ssthresh)
- tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
- tp->snd_cwnd = min(tp->snd_cwnd,
- tcp_packets_in_flight(tp) + 1U);
- tp->snd_cwnd_cnt = 0;
- tp->high_seq = tp->snd_nxt;
- tp->snd_cwnd_stamp = tcp_time_stamp;
- TCP_ECN_queue_cwr(tp);
-
- tcp_set_ca_state(sk, TCP_CA_CWR);
- }
-}
-
/*
* Packet counting of FACK is based on in-order assumptions, therefore TCP
* disables it when reordering is detected
@@ -2741,6 +2718,29 @@ static inline void tcp_complete_cwr(struct sock *sk)
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
+/* Set slow start threshold and cwnd not falling to slow start */
+void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+
+ tp->prior_ssthresh = 0;
+ tp->bytes_acked = 0;
+ if (icsk->icsk_ca_state < TCP_CA_CWR) {
+ tp->undo_marker = 0;
+ if (set_ssthresh)
+ tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
+ tp->snd_cwnd = min(tp->snd_cwnd,
+ tcp_packets_in_flight(tp) + 1U);
+ tp->snd_cwnd_cnt = 0;
+ tp->high_seq = tp->snd_nxt;
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+ TCP_ECN_queue_cwr(tp);
+
+ tcp_set_ca_state(sk, TCP_CA_CWR);
+ }
+}
+
static void tcp_try_keep_open(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);