]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
net: convert syn_wait_lock to a spinlock
authorEric Dumazet <edumazet@google.com>
Sun, 22 Mar 2015 17:22:21 +0000 (10:22 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 23 Mar 2015 20:52:26 +0000 (16:52 -0400)
This is a low hanging fruit, as we'll get rid of syn_wait_lock eventually.

We hold syn_wait_lock for such small sections, that it makes no sense to use
a read/write lock. A spin lock is simply faster.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/request_sock.h
net/core/request_sock.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/tcp_ipv4.c
net/ipv6/inet6_connection_sock.c

index 8603c350fad0ac66c06201bd9ac77247c6dde85a..fe41f3ceb008d767d594de6a042393ba463b509b 100644 (file)
@@ -173,11 +173,6 @@ struct fastopen_queue {
  * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
  * lock sock while browsing the listening hash (otherwise it's deadlock prone).
  *
- * This lock is acquired in read mode only from listening_get_next() seq_file
- * op and it's acquired in write mode _only_ from code that is actively
- * changing rskq_accept_head. All readers that are holding the master sock lock
- * don't need to grab this lock in read mode too as rskq_accept_head. writes
- * are always protected from the main sock lock.
  */
 struct request_sock_queue {
        struct request_sock     *rskq_accept_head;
@@ -192,7 +187,7 @@ struct request_sock_queue {
                                             */
 
        /* temporary alignment, our goal is to get rid of this lock */
-       rwlock_t                syn_wait_lock ____cacheline_aligned_in_smp;
+       spinlock_t              syn_wait_lock ____cacheline_aligned_in_smp;
 };
 
 int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -223,14 +218,14 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
        struct listen_sock *lopt = queue->listen_opt;
        struct request_sock **prev;
 
-       write_lock(&queue->syn_wait_lock);
+       spin_lock(&queue->syn_wait_lock);
 
        prev = &lopt->syn_table[req->rsk_hash];
        while (*prev != req)
                prev = &(*prev)->dl_next;
        *prev = req->dl_next;
 
-       write_unlock(&queue->syn_wait_lock);
+       spin_unlock(&queue->syn_wait_lock);
        if (del_timer(&req->rsk_timer))
                reqsk_put(req);
 }
index cdc0ddd9ac9f7c1768c1d6b7ed30a09ed476137d..87b22c0bc08c2f33fa31948b8b2604f48b8009bc 100644 (file)
@@ -58,14 +58,14 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
                return -ENOMEM;
 
        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
-       rwlock_init(&queue->syn_wait_lock);
+       spin_lock_init(&queue->syn_wait_lock);
        queue->rskq_accept_head = NULL;
        lopt->nr_table_entries = nr_table_entries;
        lopt->max_qlen_log = ilog2(nr_table_entries);
 
-       write_lock_bh(&queue->syn_wait_lock);
+       spin_lock_bh(&queue->syn_wait_lock);
        queue->listen_opt = lopt;
-       write_unlock_bh(&queue->syn_wait_lock);
+       spin_unlock_bh(&queue->syn_wait_lock);
 
        return 0;
 }
@@ -81,10 +81,10 @@ static inline struct listen_sock *reqsk_queue_yank_listen_sk(
 {
        struct listen_sock *lopt;
 
-       write_lock_bh(&queue->syn_wait_lock);
+       spin_lock_bh(&queue->syn_wait_lock);
        lopt = queue->listen_opt;
        queue->listen_opt = NULL;
-       write_unlock_bh(&queue->syn_wait_lock);
+       spin_unlock_bh(&queue->syn_wait_lock);
 
        return lopt;
 }
@@ -100,7 +100,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                for (i = 0; i < lopt->nr_table_entries; i++) {
                        struct request_sock *req;
 
-                       write_lock_bh(&queue->syn_wait_lock);
+                       spin_lock_bh(&queue->syn_wait_lock);
                        while ((req = lopt->syn_table[i]) != NULL) {
                                lopt->syn_table[i] = req->dl_next;
                                atomic_inc(&lopt->qlen_dec);
@@ -108,7 +108,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
                                        reqsk_put(req);
                                reqsk_put(req);
                        }
-                       write_unlock_bh(&queue->syn_wait_lock);
+                       spin_unlock_bh(&queue->syn_wait_lock);
                }
        }
 
index 711ab143d4cb1cb5b26b30d71e209b4c832c9aad..79c0c9439fdc7dd0b68421a6b229c869f37f7a01 100644 (file)
@@ -495,7 +495,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
        u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
                                  lopt->nr_table_entries);
 
-       write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
        for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
@@ -508,7 +508,7 @@ struct request_sock *inet_csk_search_req(struct sock *sk,
                        break;
                }
        }
-       write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return req;
 }
@@ -650,10 +650,10 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
        setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
        req->rsk_hash = hash;
 
-       write_lock(&queue->syn_wait_lock);
+       spin_lock(&queue->syn_wait_lock);
        req->dl_next = lopt->syn_table[hash];
        lopt->syn_table[hash] = req;
-       write_unlock(&queue->syn_wait_lock);
+       spin_unlock(&queue->syn_wait_lock);
 
        mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
 }
index f984b2001d0acf5191f59b56157a0c29f0db1316..76322c9867d5eb1ffe7808c908e42208046888a7 100644 (file)
@@ -728,7 +728,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
 
        entry.family = sk->sk_family;
 
-       read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        lopt = icsk->icsk_accept_queue.listen_opt;
        if (!lopt || !listen_sock_qlen(lopt))
@@ -776,7 +776,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
        }
 
 out:
-       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return err;
 }
index 5554b8f33d41b43dc4ccf3b95322e362bbe3844a..8028ad5920a43d0f9784612ee3bea61017a667ae 100644 (file)
@@ -1909,13 +1909,13 @@ get_req:
                }
                sk        = sk_nulls_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        } else {
                icsk = inet_csk(sk);
-               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                if (reqsk_queue_len(&icsk->icsk_accept_queue))
                        goto start_req;
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                sk = sk_nulls_next(sk);
        }
 get_sk:
@@ -1927,7 +1927,7 @@ get_sk:
                        goto out;
                }
                icsk = inet_csk(sk);
-               read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
@@ -1936,7 +1936,7 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+               spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
        }
        spin_unlock_bh(&ilb->lock);
        st->offset = 0;
@@ -2155,7 +2155,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
                        struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
-                       read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
+                       spin_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
index 2f3bbe569e8f751b2229305eefce9d2110d1f8c7..6927f3fb5597fd2013b885cddb35bed852b950d5 100644 (file)
@@ -124,7 +124,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk,
        u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
                                   lopt->nr_table_entries);
 
-       write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_lock(&icsk->icsk_accept_queue.syn_wait_lock);
        for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
                const struct inet_request_sock *ireq = inet_rsk(req);
 
@@ -138,7 +138,7 @@ struct request_sock *inet6_csk_search_req(struct sock *sk,
                        break;
                }
        }
-       write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
+       spin_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
 
        return req;
 }