]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_output.c
[TCP] Allow len == skb->len in tcp_fragment
[karo-tx-linux.git] / net / ipv4 / tcp_output.c
index 75b68116682ae2912ca8e0dd4faf84eb4993e6bd..b907456a79f46373bb81aac30ff9123910901952 100644 (file)
@@ -190,7 +190,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
        }
 
        /* Set initial window to value enough for senders,
-        * following RFC1414. Senders, not following this RFC,
+        * following RFC2414. Senders, not following this RFC,
         * will be satisfied with 2.
         */
        if (mss > (1<<*rcv_wscale)) {
@@ -428,13 +428,14 @@ static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned
  * packet to the list.  This won't be called frequently, I hope. 
  * Remember, these are still headerless SKBs at this point.
  */
-static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
+int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *buff;
-       int nsize;
+       int nsize, old_factor;
        u16 flags;
 
+       BUG_ON(len > skb->len);
        nsize = skb_headlen(skb) - len;
        if (nsize < 0)
                nsize = 0;
@@ -459,9 +460,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
        flags = TCP_SKB_CB(skb)->flags;
        TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
        TCP_SKB_CB(buff)->flags = flags;
-       TCP_SKB_CB(buff)->sacked =
-               (TCP_SKB_CB(skb)->sacked &
-                (TCPCB_LOST | TCPCB_EVER_RETRANS | TCPCB_AT_TAIL));
+       TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
        TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL;
 
        if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) {
@@ -485,23 +484,44 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned
        TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
        buff->tstamp = skb->tstamp;
 
-       if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
-               tp->lost_out -= tcp_skb_pcount(skb);
-               tp->left_out -= tcp_skb_pcount(skb);
-       }
+       old_factor = tcp_skb_pcount(skb);
 
        /* Fix up tso_factor for both original and new SKB.  */
        tcp_set_skb_tso_segs(sk, skb, mss_now);
        tcp_set_skb_tso_segs(sk, buff, mss_now);
 
-       if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
-               tp->lost_out += tcp_skb_pcount(skb);
-               tp->left_out += tcp_skb_pcount(skb);
-       }
+       /* If this packet has been sent out already, we must
+        * adjust the various packet counters.
+        */
+       if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
+               int diff = old_factor - tcp_skb_pcount(skb) -
+                       tcp_skb_pcount(buff);
+
+               tp->packets_out -= diff;
+
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
+                       tp->sacked_out -= diff;
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
+                       tp->retrans_out -= diff;
+
+               if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) {
+                       tp->lost_out -= diff;
+                       tp->left_out -= diff;
+               }
+
+               if (diff > 0) {
+                       /* Adjust Reno SACK estimate. */
+                       if (!tp->rx_opt.sack_ok) {
+                               tp->sacked_out -= diff;
+                               if ((int)tp->sacked_out < 0)
+                                       tp->sacked_out = 0;
+                               tcp_sync_left_out(tp);
+                       }
 
-       if (TCP_SKB_CB(buff)->sacked&TCPCB_LOST) {
-               tp->lost_out += tcp_skb_pcount(buff);
-               tp->left_out += tcp_skb_pcount(buff);
+                       tp->fackets_out -= diff;
+                       if ((int)tp->fackets_out < 0)
+                               tp->fackets_out = 0;
+               }
        }
 
        /* Link BUFF into the send queue. */
@@ -1350,12 +1370,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
                if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
                        BUG();
-
-               if (sk->sk_route_caps & NETIF_F_TSO) {
-                       sk->sk_route_caps &= ~NETIF_F_TSO;
-                       sock_set_flag(sk, SOCK_NO_LARGESEND);
-               }
-
                if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
                        return -ENOMEM;
        }
@@ -1370,22 +1384,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                return -EAGAIN;
 
        if (skb->len > cur_mss) {
-               int old_factor = tcp_skb_pcount(skb);
-               int diff;
-
                if (tcp_fragment(sk, skb, cur_mss, cur_mss))
                        return -ENOMEM; /* We'll try again later. */
-
-               /* New SKB created, account for it. */
-               diff = old_factor - tcp_skb_pcount(skb) -
-                      tcp_skb_pcount(skb->next);
-               tp->packets_out -= diff;
-
-               if (diff > 0) {
-                       tp->fackets_out -= diff;
-                       if ((int)tp->fackets_out < 0)
-                               tp->fackets_out = 0;
-               }
        }
 
        /* Collapse two adjacent packets if worthwhile and we can. */
@@ -1609,7 +1609,7 @@ void tcp_send_fin(struct sock *sk)
  * was unread data in the receive queue.  This behavior is recommended
  * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
  */
-void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority)
+void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
@@ -1993,12 +1993,6 @@ int tcp_write_wakeup(struct sock *sk)
                                TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
                                if (tcp_fragment(sk, skb, seg_size, mss))
                                        return -1;
-                               /* SWS override triggered forced fragmentation.
-                                * Disable TSO, the connection is too sick. */
-                               if (sk->sk_route_caps & NETIF_F_TSO) {
-                                       sock_set_flag(sk, SOCK_NO_LARGESEND);
-                                       sk->sk_route_caps &= ~NETIF_F_TSO;
-                               }
                        } else if (!tcp_skb_pcount(skb))
                                tcp_set_skb_tso_segs(sk, skb, mss);