]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_input.c
net-tcp: Fast Open client - receiving SYN-ACK
[karo-tx-linux.git] / net / ipv4 / tcp_input.c
index b224eb8bce8b3c6644eb003fd1346dde30e77c83..38b6a811edfc11f796bc75d76c1d49bc12c0d8f7 100644 (file)
@@ -88,12 +88,14 @@ int sysctl_tcp_app_win __read_mostly = 31;
 int sysctl_tcp_adv_win_scale __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
+/* rfc5961 challenge ack rate limiting */
+int sysctl_tcp_challenge_ack_limit = 100;
+
 int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
 int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 int sysctl_tcp_frto __read_mostly = 2;
 int sysctl_tcp_frto_response __read_mostly;
-int sysctl_tcp_nometrics_save __read_mostly;
 
 int sysctl_tcp_thin_dupack __read_mostly;
 
@@ -701,7 +703,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
  * routine referred to above.
  */
-static inline void tcp_set_rto(struct sock *sk)
+void tcp_set_rto(struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        /* Old crap is replaced with new one. 8)
@@ -728,109 +730,6 @@ static inline void tcp_set_rto(struct sock *sk)
        tcp_bound_rto(sk);
 }
 
-/* Save metrics learned by this TCP session.
-   This function is called only, when TCP finishes successfully
-   i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
- */
-void tcp_update_metrics(struct sock *sk)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct dst_entry *dst = __sk_dst_get(sk);
-
-       if (sysctl_tcp_nometrics_save)
-               return;
-
-       dst_confirm(dst);
-
-       if (dst && (dst->flags & DST_HOST)) {
-               const struct inet_connection_sock *icsk = inet_csk(sk);
-               int m;
-               unsigned long rtt;
-
-               if (icsk->icsk_backoff || !tp->srtt) {
-                       /* This session failed to estimate rtt. Why?
-                        * Probably, no packets returned in time.
-                        * Reset our results.
-                        */
-                       if (!(dst_metric_locked(dst, RTAX_RTT)))
-                               dst_metric_set(dst, RTAX_RTT, 0);
-                       return;
-               }
-
-               rtt = dst_metric_rtt(dst, RTAX_RTT);
-               m = rtt - tp->srtt;
-
-               /* If newly calculated rtt larger than stored one,
-                * store new one. Otherwise, use EWMA. Remember,
-                * rtt overestimation is always better than underestimation.
-                */
-               if (!(dst_metric_locked(dst, RTAX_RTT))) {
-                       if (m <= 0)
-                               set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
-                       else
-                               set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
-               }
-
-               if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
-                       unsigned long var;
-                       if (m < 0)
-                               m = -m;
-
-                       /* Scale deviation to rttvar fixed point */
-                       m >>= 1;
-                       if (m < tp->mdev)
-                               m = tp->mdev;
-
-                       var = dst_metric_rtt(dst, RTAX_RTTVAR);
-                       if (m >= var)
-                               var = m;
-                       else
-                               var -= (var - m) >> 2;
-
-                       set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
-               }
-
-               if (tcp_in_initial_slowstart(tp)) {
-                       /* Slow start still did not finish. */
-                       if (dst_metric(dst, RTAX_SSTHRESH) &&
-                           !dst_metric_locked(dst, RTAX_SSTHRESH) &&
-                           (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
-                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
-                       if (!dst_metric_locked(dst, RTAX_CWND) &&
-                           tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
-                               dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
-               } else if (tp->snd_cwnd > tp->snd_ssthresh &&
-                          icsk->icsk_ca_state == TCP_CA_Open) {
-                       /* Cong. avoidance phase, cwnd is reliable. */
-                       if (!dst_metric_locked(dst, RTAX_SSTHRESH))
-                               dst_metric_set(dst, RTAX_SSTHRESH,
-                                              max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
-                       if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst_metric_set(dst, RTAX_CWND,
-                                              (dst_metric(dst, RTAX_CWND) +
-                                               tp->snd_cwnd) >> 1);
-               } else {
-                       /* Else slow start did not finish, cwnd is non-sense,
-                          ssthresh may be also invalid.
-                        */
-                       if (!dst_metric_locked(dst, RTAX_CWND))
-                               dst_metric_set(dst, RTAX_CWND,
-                                              (dst_metric(dst, RTAX_CWND) +
-                                               tp->snd_ssthresh) >> 1);
-                       if (dst_metric(dst, RTAX_SSTHRESH) &&
-                           !dst_metric_locked(dst, RTAX_SSTHRESH) &&
-                           tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
-                               dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
-               }
-
-               if (!dst_metric_locked(dst, RTAX_REORDERING)) {
-                       if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
-                           tp->reordering != sysctl_tcp_reordering)
-                               dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
-               }
-       }
-}
-
 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
 {
        __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +766,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
  * Packet counting of FACK is based on in-order assumptions, therefore TCP
  * disables it when reordering is detected
  */
-static void tcp_disable_fack(struct tcp_sock *tp)
+void tcp_disable_fack(struct tcp_sock *tp)
 {
        /* RFC3517 uses different metric in lost marker => reset on change */
        if (tcp_is_fack(tp))
@@ -881,86 +780,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
        tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
 }
 
-/* Initialize metrics on socket. */
-
-static void tcp_init_metrics(struct sock *sk)
-{
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct dst_entry *dst = __sk_dst_get(sk);
-
-       if (dst == NULL)
-               goto reset;
-
-       dst_confirm(dst);
-
-       if (dst_metric_locked(dst, RTAX_CWND))
-               tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
-       if (dst_metric(dst, RTAX_SSTHRESH)) {
-               tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
-               if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
-                       tp->snd_ssthresh = tp->snd_cwnd_clamp;
-       } else {
-               /* ssthresh may have been reduced unnecessarily during.
-                * 3WHS. Restore it back to its initial default.
-                */
-               tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
-       }
-       if (dst_metric(dst, RTAX_REORDERING) &&
-           tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
-               tcp_disable_fack(tp);
-               tcp_disable_early_retrans(tp);
-               tp->reordering = dst_metric(dst, RTAX_REORDERING);
-       }
-
-       if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
-               goto reset;
-
-       /* Initial rtt is determined from SYN,SYN-ACK.
-        * The segment is small and rtt may appear much
-        * less than real one. Use per-dst memory
-        * to make it more realistic.
-        *
-        * A bit of theory. RTT is time passed after "normal" sized packet
-        * is sent until it is ACKed. In normal circumstances sending small
-        * packets force peer to delay ACKs and calculation is correct too.
-        * The algorithm is adaptive and, provided we follow specs, it
-        * NEVER underestimate RTT. BUT! If peer tries to make some clever
-        * tricks sort of "quick acks" for time long enough to decrease RTT
-        * to low value, and then abruptly stops to do it and starts to delay
-        * ACKs, wait for troubles.
-        */
-       if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
-               tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
-               tp->rtt_seq = tp->snd_nxt;
-       }
-       if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
-               tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
-               tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
-       }
-       tcp_set_rto(sk);
-reset:
-       if (tp->srtt == 0) {
-               /* RFC6298: 5.7 We've failed to get a valid RTT sample from
-                * 3WHS. This is most likely due to retransmission,
-                * including spurious one. Reset the RTO back to 3secs
-                * from the more aggressive 1sec to avoid more spurious
-                * retransmission.
-                */
-               tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
-               inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
-       }
-       /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
-        * retransmitted. In light of RFC6298 more aggressive 1sec
-        * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
-        * retransmission has occurred.
-        */
-       if (tp->total_retrans > 1)
-               tp->snd_cwnd = 1;
-       else
-               tp->snd_cwnd = tcp_init_cwnd(tp, dst);
-       tp->snd_cwnd_stamp = tcp_time_stamp;
-}
-
 static void tcp_update_reordering(struct sock *sk, const int metric,
                                  const int ts)
 {
@@ -3869,9 +3688,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                        tcp_cong_avoid(sk, ack, prior_in_flight);
        }
 
-       if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
-               dst_confirm(__sk_dst_get(sk));
-
+       if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
+               struct dst_entry *dst = __sk_dst_get(sk);
+               if (dst)
+                       dst_confirm(dst);
+       }
        return 1;
 
 no_queue:
@@ -3911,7 +3732,8 @@ old_ack:
  * the fast version below fails.
  */
 void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
-                      const u8 **hvpp, int estab)
+                      const u8 **hvpp, int estab,
+                      struct tcp_fastopen_cookie *foc)
 {
        const unsigned char *ptr;
        const struct tcphdr *th = tcp_hdr(skb);
@@ -4018,8 +3840,25 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
                                        break;
                                }
                                break;
-                       }
 
+                       case TCPOPT_EXP:
+                               /* Fast Open option shares code 254 using a
+                                * 16 bits magic number. It's valid only in
+                                * SYN or SYN-ACK with an even size.
+                                */
+                               if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
+                                   get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
+                                   foc == NULL || !th->syn || (opsize & 1))
+                                       break;
+                               foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
+                               if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
+                                   foc->len <= TCP_FASTOPEN_COOKIE_MAX)
+                                       memcpy(foc->val, ptr + 2, foc->len);
+                               else if (foc->len != 0)
+                                       foc->len = -1;
+                               break;
+
+                       }
                        ptr += opsize-2;
                        length -= opsize;
                }
@@ -4061,7 +3900,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
                if (tcp_parse_aligned_timestamp(tp, th))
                        return true;
        }
-       tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
+       tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
        return true;
 }
 
@@ -4579,8 +4418,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
 
        TCP_ECN_check_ce(tp, skb);
 
-       if (tcp_try_rmem_schedule(sk, skb->truesize)) {
-               /* TODO: should increment a counter */
+       if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
                __kfree_skb(skb);
                return;
        }
@@ -4589,6 +4428,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        tp->pred_flags = 0;
        inet_csk_schedule_ack(sk);
 
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
                   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
 
@@ -4642,6 +4482,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
        if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
                if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
                        /* All the bits are present. Drop. */
+                       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
                        __kfree_skb(skb);
                        skb = NULL;
                        tcp_dsack_set(sk, seq, end_seq);
@@ -4680,6 +4521,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
                __skb_unlink(skb1, &tp->out_of_order_queue);
                tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
                                 TCP_SKB_CB(skb1)->end_seq);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
                __kfree_skb(skb1);
        }
 
@@ -5426,11 +5268,28 @@ out:
 }
 #endif /* CONFIG_NET_DMA */
 
+static void tcp_send_challenge_ack(struct sock *sk)
+{
+       /* unprotected vars, we dont care of overwrites */
+       static u32 challenge_timestamp;
+       static unsigned int challenge_count;
+       u32 now = jiffies / HZ;
+
+       if (now != challenge_timestamp) {
+               challenge_timestamp = now;
+               challenge_count = 0;
+       }
+       if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+               tcp_send_ack(sk);
+       }
+}
+
 /* Does PAWS and seqno based validation of an incoming segment, flags will
  * play significant role here.
  */
-static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
-                             const struct tcphdr *th, int syn_inerr)
+static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+                                 const struct tcphdr *th, int syn_inerr)
 {
        const u8 *hash_location;
        struct tcp_sock *tp = tcp_sk(sk);
@@ -5455,14 +5314,26 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
                 * an acknowledgment should be sent in reply (unless the RST
                 * bit is set, if so drop the segment and return)".
                 */
-               if (!th->rst)
+               if (!th->rst) {
+                       if (th->syn)
+                               goto syn_challenge;
                        tcp_send_dupack(sk, skb);
+               }
                goto discard;
        }
 
        /* Step 2: check RST bit */
        if (th->rst) {
-               tcp_reset(sk);
+               /* RFC 5961 3.2 :
+                * If sequence number exactly matches RCV.NXT, then
+                *     RESET the connection
+                * else
+                *     Send a challenge ACK
+                */
+               if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
+                       tcp_reset(sk);
+               else
+                       tcp_send_challenge_ack(sk);
                goto discard;
        }
 
@@ -5473,20 +5344,23 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
 
        /* step 3: check security and precedence [ignored] */
 
-       /* step 4: Check for a SYN in window. */
-       if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+       /* step 4: Check for a SYN
+        * RFC 5691 4.2 : Send a challenge ack
+        */
+       if (th->syn) {
+syn_challenge:
                if (syn_inerr)
                        TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
-               tcp_reset(sk);
-               return -1;
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
+               tcp_send_challenge_ack(sk);
+               goto discard;
        }
 
-       return 1;
+       return true;
 
 discard:
        __kfree_skb(skb);
-       return 0;
+       return false;
 }
 
 /*
@@ -5516,7 +5390,18 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                        const struct tcphdr *th, unsigned int len)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       int res;
+
+       if (sk->sk_rx_dst) {
+               struct dst_entry *dst = sk->sk_rx_dst;
+               if (unlikely(dst->obsolete)) {
+                       if (dst->ops->check(dst, 0) == NULL) {
+                               dst_release(dst);
+                               sk->sk_rx_dst = NULL;
+                       }
+               }
+       }
+       if (unlikely(sk->sk_rx_dst == NULL))
+               sk->sk_rx_dst = dst_clone(skb_dst(skb));
 
        /*
         *      Header prediction.
@@ -5693,9 +5578,8 @@ slow_path:
         *      Standard slow path.
         */
 
-       res = tcp_validate_incoming(sk, skb, th, 1);
-       if (res <= 0)
-               return -res;
+       if (!tcp_validate_incoming(sk, skb, th, 1))
+               return 0;
 
 step5:
        if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
@@ -5729,8 +5613,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
 
        tcp_set_state(sk, TCP_ESTABLISHED);
 
-       if (skb != NULL)
+       if (skb != NULL) {
+               sk->sk_rx_dst = dst_clone(skb_dst(skb));
                security_inet_conn_established(sk, skb);
+       }
 
        /* Make sure socket is routed, for correct metrics.  */
        icsk->icsk_af_ops->rebuild_header(sk);
@@ -5760,6 +5646,34 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
        }
 }
 
+static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+                                   struct tcp_fastopen_cookie *cookie)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct sk_buff *data = tcp_write_queue_head(sk);
+       u16 mss = tp->rx_opt.mss_clamp;
+
+       if (mss == tp->rx_opt.user_mss) {
+               struct tcp_options_received opt;
+               const u8 *hash_location;
+
+               /* Get original SYNACK MSS value if user MSS sets mss_clamp */
+               tcp_clear_options(&opt);
+               opt.user_mss = opt.mss_clamp = 0;
+               tcp_parse_options(synack, &opt, &hash_location, 0, NULL);
+               mss = opt.mss_clamp;
+       }
+
+       tcp_fastopen_cache_set(sk, mss, cookie);
+
+       if (data) { /* Retransmit unacked data in SYN */
+               tcp_retransmit_skb(sk, data);
+               tcp_rearm_rto(sk);
+               return true;
+       }
+       return false;
+}
+
 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                                         const struct tcphdr *th, unsigned int len)
 {
@@ -5767,9 +5681,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_cookie_values *cvp = tp->cookie_values;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
        int saved_clamp = tp->rx_opt.mss_clamp;
 
-       tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
+       tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
 
        if (th->ack) {
                /* rfc793:
@@ -5779,11 +5694,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                 *        If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
                 *        a reset (unless the RST bit is set, if so drop
                 *        the segment and return)"
-                *
-                *  We do not send data with SYN, so that RFC-correct
-                *  test reduces to:
                 */
-               if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
+               if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
+                   after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
                        goto reset_and_undo;
 
                if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -5895,6 +5808,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                tcp_finish_connect(sk, skb);
 
+               if (tp->syn_fastopen && tcp_rcv_fastopen_synack(sk, skb, &foc))
+                       return -1;
+
                if (sk->sk_write_pending ||
                    icsk->icsk_accept_queue.rskq_defer_accept ||
                    icsk->icsk_ack.pingpong) {
@@ -6013,7 +5929,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
        int queued = 0;
-       int res;
 
        tp->rx_opt.saw_tstamp = 0;
 
@@ -6068,9 +5983,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                return 0;
        }
 
-       res = tcp_validate_incoming(sk, skb, th, 0);
-       if (res <= 0)
-               return -res;
+       if (!tcp_validate_incoming(sk, skb, th, 0))
+               return 0;
 
        /* step 5: check the ACK field */
        if (th->ack) {
@@ -6126,9 +6040,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
 
                case TCP_FIN_WAIT1:
                        if (tp->snd_una == tp->write_seq) {
+                               struct dst_entry *dst;
+
                                tcp_set_state(sk, TCP_FIN_WAIT2);
                                sk->sk_shutdown |= SEND_SHUTDOWN;
-                               dst_confirm(__sk_dst_get(sk));
+
+                               dst = __sk_dst_get(sk);
+                               if (dst)
+                                       dst_confirm(dst);
 
                                if (!sock_flag(sk, SOCK_DEAD))
                                        /* Wake up lingering close() */