]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Bluetooth: Use event-driven approach for handling ERTM receive buffer
authorMat Martineau <mathewm@codeaurora.org>
Thu, 7 Jul 2011 16:39:02 +0000 (09:39 -0700)
committerGustavo F. Padovan <padovan@profusion.mobi>
Thu, 7 Jul 2011 18:28:56 +0000 (15:28 -0300)
This change moves most L2CAP ERTM receive buffer handling out of the
L2CAP core and in to the socket code.  It's up to the higher layer
(the socket code, in this case) to tell the core when its buffer is
full or has space available.  The recv op should always accept
incoming ERTM data or else the connection will go down.

Within the socket layer, an skb that does not fit in the socket
receive buffer will be temporarily stored.  When the socket is read
from, that skb will be placed in the receive buffer if possible.  Once
adequate buffer space becomes available, the L2CAP core is informed
and the ERTM local busy state is cleared.

Receive buffer management for non-ERTM modes is unchanged.

Signed-off-by: Mat Martineau <mathewm@codeaurora.org>
Signed-off-by: Gustavo F. Padovan <padovan@profusion.mobi>
include/net/bluetooth/l2cap.h
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c

index 9c18e555b6ed0c302552f2676f36a04f7ddc5389..66b8d9688d9e3ff5e1dd79168fdaa770151eb738 100644 (file)
@@ -422,6 +422,7 @@ struct l2cap_conn {
 struct l2cap_pinfo {
        struct bt_sock  bt;
        struct l2cap_chan       *chan;
+       struct sk_buff  *rx_busy_skb;
 };
 
 enum {
@@ -498,5 +499,6 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason);
 void l2cap_chan_destroy(struct l2cap_chan *chan);
 int l2cap_chan_connect(struct l2cap_chan *chan);
 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
 
 #endif /* __L2CAP_H */
index f7ada4a2cc5d9878d2d13b5c5bda89f49a87bb08..ea9c7d06104617c3df9eb234b090eda1b4d5437b 100644 (file)
@@ -3350,21 +3350,21 @@ static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 c
        }
 
        err = l2cap_ertm_reassembly_sdu(chan, skb, control);
-       if (err >= 0) {
-               chan->buffer_seq = (chan->buffer_seq + 1) % 64;
-               return err;
-       }
-
-       l2cap_ertm_enter_local_busy(chan);
-
-       bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
-       __skb_queue_tail(&chan->busy_q, skb);
-
-       queue_work(_busy_wq, &chan->busy_work);
+       chan->buffer_seq = (chan->buffer_seq + 1) % 64;
 
        return err;
 }
 
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
+{
+       if (chan->mode == L2CAP_MODE_ERTM) {
+               if (busy)
+                       l2cap_ertm_enter_local_busy(chan);
+               else
+                       l2cap_ertm_exit_local_busy(chan);
+       }
+}
+
 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
 {
        struct sk_buff *_skb;
@@ -3463,13 +3463,22 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
        struct sk_buff *skb;
        u16 control;
 
-       while ((skb = skb_peek(&chan->srej_q))) {
+       while ((skb = skb_peek(&chan->srej_q)) &&
+                       !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+               int err;
+
                if (bt_cb(skb)->tx_seq != tx_seq)
                        break;
 
                skb = skb_dequeue(&chan->srej_q);
                control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
-               l2cap_ertm_reassembly_sdu(chan, skb, control);
+               err = l2cap_ertm_reassembly_sdu(chan, skb, control);
+
+               if (err < 0) {
+                       l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+                       break;
+               }
+
                chan->buffer_seq_srej =
                        (chan->buffer_seq_srej + 1) % 64;
                tx_seq = (tx_seq + 1) % 64;
@@ -3625,8 +3634,10 @@ expected:
        }
 
        err = l2cap_push_rx_skb(chan, skb, rx_control);
-       if (err < 0)
-               return 0;
+       if (err < 0) {
+               l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+               return err;
+       }
 
        if (rx_control & L2CAP_CTRL_FINAL) {
                if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
index 39082d4e77cef96b6a99370b3bb1792eb4ce7f22..146b614d10ed1ed72b521165eec3907ce9fbd0f1 100644 (file)
@@ -711,13 +711,15 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
 {
        struct sock *sk = sock->sk;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
+       int err;
 
        lock_sock(sk);
 
        if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
                sk->sk_state = BT_CONFIG;
 
-               __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan);
+               __l2cap_connect_rsp_defer(pi->chan);
                release_sock(sk);
                return 0;
        }
@@ -725,9 +727,37 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
        release_sock(sk);
 
        if (sock->type == SOCK_STREAM)
-               return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+               err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
+       else
+               err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
+
+       if (pi->chan->mode != L2CAP_MODE_ERTM)
+               return err;
+
+       /* Attempt to put pending rx data in the socket buffer */
+
+       lock_sock(sk);
+
+       if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
+               goto done;
+
+       if (pi->rx_busy_skb) {
+               if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
+                       pi->rx_busy_skb = NULL;
+               else
+                       goto done;
+       }
 
-       return bt_sock_recvmsg(iocb, sock, msg, len, flags);
+       /* Restore data flow when half of the receive buffer is
+        * available.  This avoids resending large numbers of
+        * frames.
+        */
+       if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
+               l2cap_chan_busy(pi->chan, 0);
+
+done:
+       release_sock(sk);
+       return err;
 }
 
 /* Kill socket (only if zapped and orphan)
@@ -811,9 +841,31 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
 
 static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
 {
+       int err;
        struct sock *sk = data;
+       struct l2cap_pinfo *pi = l2cap_pi(sk);
 
-       return sock_queue_rcv_skb(sk, skb);
+       if (pi->rx_busy_skb)
+               return -ENOMEM;
+
+       err = sock_queue_rcv_skb(sk, skb);
+
+       /* For ERTM, handle one skb that doesn't fit into the recv
+        * buffer.  This is important to do because the data frames
+        * have already been acked, so the skb cannot be discarded.
+        *
+        * Notify the l2cap core that the buffer is full, so the
+        * LOCAL_BUSY state is entered and no more frames are
+        * acked and reassembled until there is buffer space
+        * available.
+        */
+       if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
+               pi->rx_busy_skb = skb;
+               l2cap_chan_busy(pi->chan, 1);
+               err = 0;
+       }
+
+       return err;
 }
 
 static void l2cap_sock_close_cb(void *data)
@@ -842,6 +894,11 @@ static void l2cap_sock_destruct(struct sock *sk)
 {
        BT_DBG("sk %p", sk);
 
+       if (l2cap_pi(sk)->rx_busy_skb) {
+               kfree_skb(l2cap_pi(sk)->rx_busy_skb);
+               l2cap_pi(sk)->rx_busy_skb = NULL;
+       }
+
        skb_queue_purge(&sk->sk_receive_queue);
        skb_queue_purge(&sk->sk_write_queue);
 }