]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
tcp: Apply device TSO segment limit earlier
authorBen Hutchings <bhutchings@solarflare.com>
Mon, 30 Jul 2012 16:11:42 +0000 (16:11 +0000)
committerBen Hutchings <ben@decadent.org.uk>
Wed, 19 Sep 2012 14:04:46 +0000 (15:04 +0100)
[ Upstream commit 1485348d2424e1131ea42efc033cbd9366462b01 ]

Cache the device gso_max_segs in sock::sk_gso_max_segs and use it to
limit the size of TSO skbs.  This avoids the need to fall back to
software GSO for local TCP senders.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
include/net/sock.h
net/core/sock.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_output.c

index 32e39371fba627ebfaab66713e645889ccb7ba47..ddf523c6158bab6f7f7965ff1f85b1807477db2a 100644 (file)
@@ -194,6 +194,7 @@ struct sock_common {
   *    @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
   *    @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
   *    @sk_gso_max_size: Maximum GSO segment size to build
+  *    @sk_gso_max_segs: Maximum number of GSO segments
   *    @sk_lingertime: %SO_LINGER l_linger setting
   *    @sk_backlog: always used with the per-socket spinlock held
   *    @sk_callback_lock: used with the callbacks in the end of this struct
@@ -310,6 +311,7 @@ struct sock {
        int                     sk_route_nocaps;
        int                     sk_gso_type;
        unsigned int            sk_gso_max_size;
+       u16                     sk_gso_max_segs;
        int                     sk_rcvlowat;
        unsigned long           sk_lingertime;
        struct sk_buff_head     sk_error_queue;
index 8d095b918e8cf44894d236f7b52bcf71d6643ccc..018fd41cfb0d8309e5449bc3c85df831f05b5fee 100644 (file)
@@ -1308,6 +1308,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
                } else {
                        sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
                        sk->sk_gso_max_size = dst->dev->gso_max_size;
+                       sk->sk_gso_max_segs = dst->dev->gso_max_segs;
                }
        }
 }
index ad466a7f8bc606dea6d947ecd6806e71ac269fd3..043d49b39c09811751c75deeb845f516f25fb4ce 100644 (file)
@@ -740,7 +740,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
                           old_size_goal + mss_now > xmit_size_goal)) {
                        xmit_size_goal = old_size_goal;
                } else {
-                       tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
+                       tp->xmit_size_goal_segs =
+                               min_t(u16, xmit_size_goal / mss_now,
+                                     sk->sk_gso_max_segs);
                        xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
                }
        }
index 850c737e08e2a4a9185c64c8776711d83b8c7b08..6cebfd2df6154056345964bc37f62d4fde201530 100644 (file)
@@ -290,7 +290,8 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
        left = tp->snd_cwnd - in_flight;
        if (sk_can_gso(sk) &&
            left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
-           left * tp->mss_cache < sk->sk_gso_max_size)
+           left * tp->mss_cache < sk->sk_gso_max_size &&
+           left < sk->sk_gso_max_segs)
                return 1;
        return left <= tcp_max_burst(tp);
 }
index c51dd5b2cbd5c9992a7807a9e1966a3cf768a5a2..921cbac22684912fdf29744fd7caada656834b87 100644 (file)
@@ -1318,21 +1318,21 @@ static void tcp_cwnd_validate(struct sock *sk)
  * when we would be allowed to send the split-due-to-Nagle skb fully.
  */
 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb,
-                                       unsigned int mss_now, unsigned int cwnd)
+                                       unsigned int mss_now, unsigned int max_segs)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       u32 needed, window, cwnd_len;
+       u32 needed, window, max_len;
 
        window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
-       cwnd_len = mss_now * cwnd;
+       max_len = mss_now * max_segs;
 
-       if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
-               return cwnd_len;
+       if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
+               return max_len;
 
        needed = min(skb->len, window);
 
-       if (cwnd_len <= needed)
-               return cwnd_len;
+       if (max_len <= needed)
+               return max_len;
 
        return needed - needed % mss_now;
 }
@@ -1560,7 +1560,8 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
        limit = min(send_win, cong_win);
 
        /* If a full-sized TSO skb can be sent, do it. */
-       if (limit >= sk->sk_gso_max_size)
+       if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
+                          sk->sk_gso_max_segs * tp->mss_cache))
                goto send_now;
 
        /* Middle in queue won't get any more data, full sendable already? */
@@ -1786,7 +1787,9 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                limit = mss_now;
                if (tso_segs > 1 && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
-                                                   cwnd_quota);
+                                                   min_t(unsigned int,
+                                                         cwnd_quota,
+                                                         sk->sk_gso_max_segs));
 
                if (skb->len > limit &&
                    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))