]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/ipv4/tcp_recovery.c
Merge branch 'for-4.13-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[karo-tx-linux.git] / net / ipv4 / tcp_recovery.c
1 #include <linux/tcp.h>
2 #include <net/tcp.h>
3
4 int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
5
6 static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
7 {
8         struct tcp_sock *tp = tcp_sk(sk);
9
10         tcp_skb_mark_lost_uncond_verify(tp, skb);
11         if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
12                 /* Account for retransmits that are lost again */
13                 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
14                 tp->retrans_out -= tcp_skb_pcount(skb);
15                 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
16                               tcp_skb_pcount(skb));
17         }
18 }
19
20 static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
21 {
22         return t1 > t2 || (t1 == t2 && after(seq1, seq2));
23 }
24
25 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
26  *
27  * Marks a packet lost, if some packet sent later has been (s)acked.
28  * The underlying idea is similar to the traditional dupthresh and FACK
29  * but they look at different metrics:
30  *
31  * dupthresh: 3 OOO packets delivered (packet count)
32  * FACK: sequence delta to highest sacked sequence (sequence space)
33  * RACK: sent time delta to the latest delivered packet (time domain)
34  *
35  * The advantage of RACK is it applies to both original and retransmitted
36  * packet and therefore is robust against tail losses. Another advantage
37  * is being more resilient to reordering by simply allowing some
38  * "settling delay", instead of tweaking the dupthresh.
39  *
40  * When tcp_rack_detect_loss() detects some packets are lost and we
41  * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
42  * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
43  * make us enter the CA_Recovery state.
44  */
45 static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
46 {
47         struct tcp_sock *tp = tcp_sk(sk);
48         struct sk_buff *skb;
49         u32 reo_wnd;
50
51         *reo_timeout = 0;
52         /* To be more reordering resilient, allow min_rtt/4 settling delay
53          * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
54          * RTT because reordering is often a path property and less related
55          * to queuing or delayed ACKs.
56          */
57         reo_wnd = 1000;
58         if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
59                 reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
60
61         tcp_for_write_queue(skb, sk) {
62                 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
63
64                 if (skb == tcp_send_head(sk))
65                         break;
66
67                 /* Skip ones already (s)acked */
68                 if (!after(scb->end_seq, tp->snd_una) ||
69                     scb->sacked & TCPCB_SACKED_ACKED)
70                         continue;
71
72                 if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
73                                         tp->rack.end_seq, scb->end_seq)) {
74                         /* Step 3 in draft-cheng-tcpm-rack-00.txt:
75                          * A packet is lost if its elapsed time is beyond
76                          * the recent RTT plus the reordering window.
77                          */
78                         u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
79                                                          skb->skb_mstamp);
80                         s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
81
82                         if (remaining < 0) {
83                                 tcp_rack_mark_skb_lost(sk, skb);
84                                 continue;
85                         }
86
87                         /* Skip ones marked lost but not yet retransmitted */
88                         if ((scb->sacked & TCPCB_LOST) &&
89                             !(scb->sacked & TCPCB_SACKED_RETRANS))
90                                 continue;
91
92                         /* Record maximum wait time (+1 to avoid 0) */
93                         *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
94
95                 } else if (!(scb->sacked & TCPCB_RETRANS)) {
96                         /* Original data are sent sequentially so stop early
97                          * b/c the rest are all sent after rack_sent
98                          */
99                         break;
100                 }
101         }
102 }
103
104 void tcp_rack_mark_lost(struct sock *sk)
105 {
106         struct tcp_sock *tp = tcp_sk(sk);
107         u32 timeout;
108
109         if (!tp->rack.advanced)
110                 return;
111
112         /* Reset the advanced flag to avoid unnecessary queue scanning */
113         tp->rack.advanced = 0;
114         tcp_rack_detect_loss(sk, &timeout);
115         if (timeout) {
116                 timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
117                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
118                                           timeout, inet_csk(sk)->icsk_rto);
119         }
120 }
121
122 /* Record the most recently (re)sent time among the (s)acked packets
123  * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
124  * draft-cheng-tcpm-rack-00.txt
125  */
126 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
127                       u64 xmit_time)
128 {
129         u32 rtt_us;
130
131         if (tp->rack.mstamp &&
132             !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
133                                  end_seq, tp->rack.end_seq))
134                 return;
135
136         rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
137         if (sacked & TCPCB_RETRANS) {
138                 /* If the sacked packet was retransmitted, it's ambiguous
139                  * whether the retransmission or the original (or the prior
140                  * retransmission) was sacked.
141                  *
142                  * If the original is lost, there is no ambiguity. Otherwise
143                  * we assume the original can be delayed up to aRTT + min_rtt.
144                  * the aRTT term is bounded by the fast recovery or timeout,
145                  * so it's at least one RTT (i.e., retransmission is at least
146                  * an RTT later).
147                  */
148                 if (rtt_us < tcp_min_rtt(tp))
149                         return;
150         }
151         tp->rack.rtt_us = rtt_us;
152         tp->rack.mstamp = xmit_time;
153         tp->rack.end_seq = end_seq;
154         tp->rack.advanced = 1;
155 }
156
157 /* We have waited long enough to accommodate reordering. Mark the expired
158  * packets lost and retransmit them.
159  */
160 void tcp_rack_reo_timeout(struct sock *sk)
161 {
162         struct tcp_sock *tp = tcp_sk(sk);
163         u32 timeout, prior_inflight;
164
165         prior_inflight = tcp_packets_in_flight(tp);
166         tcp_rack_detect_loss(sk, &timeout);
167         if (prior_inflight != tcp_packets_in_flight(tp)) {
168                 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
169                         tcp_enter_recovery(sk, false);
170                         if (!inet_csk(sk)->icsk_ca_ops->cong_control)
171                                 tcp_cwnd_reduction(sk, 1, 0);
172                 }
173                 tcp_xmit_retransmit_queue(sk);
174         }
175         if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
176                 tcp_rearm_rto(sk);
177 }