2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
142 read_unlock(&l->lock);
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 s = __l2cap_get_chan_by_ident(l, ident);
163 read_unlock(&l->lock);
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
200 l2cap_pi(next)->prev_c = prev;
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
237 bt_accept_enqueue(parent, sk);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
294 auth_type = HCI_AT_NO_BONDING;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
374 control |= L2CAP_SUPER_RCV_READY;
376 return l2cap_send_sframe(pi, control);
379 static void l2cap_do_start(struct sock *sk)
381 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
383 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
384 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
387 if (l2cap_check_security(sk)) {
388 struct l2cap_conn_req req;
389 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
390 req.psm = l2cap_pi(sk)->psm;
392 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
394 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
395 L2CAP_CONN_REQ, sizeof(req), &req);
398 struct l2cap_info_req req;
399 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
402 conn->info_ident = l2cap_get_ident(conn);
404 mod_timer(&conn->info_timer, jiffies +
405 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
407 l2cap_send_cmd(conn, conn->info_ident,
408 L2CAP_INFO_REQ, sizeof(req), &req);
412 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
414 struct l2cap_disconn_req req;
416 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
417 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
418 l2cap_send_cmd(conn, l2cap_get_ident(conn),
419 L2CAP_DISCONN_REQ, sizeof(req), &req);
422 /* ---- L2CAP connections ---- */
423 static void l2cap_conn_start(struct l2cap_conn *conn)
425 struct l2cap_chan_list *l = &conn->chan_list;
428 BT_DBG("conn %p", conn);
432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
435 if (sk->sk_type != SOCK_SEQPACKET) {
440 if (sk->sk_state == BT_CONNECT) {
441 if (l2cap_check_security(sk)) {
442 struct l2cap_conn_req req;
443 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
444 req.psm = l2cap_pi(sk)->psm;
446 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
448 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
449 L2CAP_CONN_REQ, sizeof(req), &req);
451 } else if (sk->sk_state == BT_CONNECT2) {
452 struct l2cap_conn_rsp rsp;
453 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
454 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
456 if (l2cap_check_security(sk)) {
457 if (bt_sk(sk)->defer_setup) {
458 struct sock *parent = bt_sk(sk)->parent;
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
461 parent->sk_data_ready(parent, 0);
464 sk->sk_state = BT_CONFIG;
465 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
466 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
470 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
473 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
474 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
480 read_unlock(&l->lock);
483 static void l2cap_conn_ready(struct l2cap_conn *conn)
485 struct l2cap_chan_list *l = &conn->chan_list;
488 BT_DBG("conn %p", conn);
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
495 if (sk->sk_type != SOCK_SEQPACKET) {
496 l2cap_sock_clear_timer(sk);
497 sk->sk_state = BT_CONNECTED;
498 sk->sk_state_change(sk);
499 } else if (sk->sk_state == BT_CONNECT)
505 read_unlock(&l->lock);
508 /* Notify sockets that we cannot guaranty reliability anymore */
509 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
511 struct l2cap_chan_list *l = &conn->chan_list;
514 BT_DBG("conn %p", conn);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 if (l2cap_pi(sk)->force_reliable)
523 read_unlock(&l->lock);
526 static void l2cap_info_timeout(unsigned long arg)
528 struct l2cap_conn *conn = (void *) arg;
530 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
531 conn->info_ident = 0;
533 l2cap_conn_start(conn);
536 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
538 struct l2cap_conn *conn = hcon->l2cap_data;
543 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
547 hcon->l2cap_data = conn;
550 BT_DBG("hcon %p conn %p", hcon, conn);
552 conn->mtu = hcon->hdev->acl_mtu;
553 conn->src = &hcon->hdev->bdaddr;
554 conn->dst = &hcon->dst;
558 setup_timer(&conn->info_timer, l2cap_info_timeout,
559 (unsigned long) conn);
561 spin_lock_init(&conn->lock);
562 rwlock_init(&conn->chan_list.lock);
564 conn->disc_reason = 0x13;
569 static void l2cap_conn_del(struct hci_conn *hcon, int err)
571 struct l2cap_conn *conn = hcon->l2cap_data;
577 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
579 kfree_skb(conn->rx_skb);
582 while ((sk = conn->chan_list.head)) {
584 l2cap_chan_del(sk, err);
589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
590 del_timer_sync(&conn->info_timer);
592 hcon->l2cap_data = NULL;
596 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
598 struct l2cap_chan_list *l = &conn->chan_list;
599 write_lock_bh(&l->lock);
600 __l2cap_chan_add(conn, sk, parent);
601 write_unlock_bh(&l->lock);
604 /* ---- Socket interface ---- */
605 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
608 struct hlist_node *node;
609 sk_for_each(sk, node, &l2cap_sk_list.head)
610 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
617 /* Find socket with psm and source bdaddr.
618 * Returns closest match.
620 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
622 struct sock *sk = NULL, *sk1 = NULL;
623 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head) {
626 if (state && sk->sk_state != state)
629 if (l2cap_pi(sk)->psm == psm) {
631 if (!bacmp(&bt_sk(sk)->src, src))
635 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
639 return node ? sk : sk1;
642 /* Find socket with given address (psm, src).
643 * Returns locked socket */
644 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
647 read_lock(&l2cap_sk_list.lock);
648 s = __l2cap_get_sock_by_psm(state, psm, src);
651 read_unlock(&l2cap_sk_list.lock);
655 static void l2cap_sock_destruct(struct sock *sk)
659 skb_queue_purge(&sk->sk_receive_queue);
660 skb_queue_purge(&sk->sk_write_queue);
663 static void l2cap_sock_cleanup_listen(struct sock *parent)
667 BT_DBG("parent %p", parent);
669 /* Close not yet accepted channels */
670 while ((sk = bt_accept_dequeue(parent, NULL)))
671 l2cap_sock_close(sk);
673 parent->sk_state = BT_CLOSED;
674 sock_set_flag(parent, SOCK_ZAPPED);
677 /* Kill socket (only if zapped and orphan)
678 * Must be called on unlocked socket.
680 static void l2cap_sock_kill(struct sock *sk)
682 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
685 BT_DBG("sk %p state %d", sk, sk->sk_state);
687 /* Kill poor orphan */
688 bt_sock_unlink(&l2cap_sk_list, sk);
689 sock_set_flag(sk, SOCK_DEAD);
693 static void __l2cap_sock_close(struct sock *sk, int reason)
695 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
697 switch (sk->sk_state) {
699 l2cap_sock_cleanup_listen(sk);
704 if (sk->sk_type == SOCK_SEQPACKET) {
705 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
707 sk->sk_state = BT_DISCONN;
708 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
709 l2cap_send_disconn_req(conn, sk);
711 l2cap_chan_del(sk, reason);
715 if (sk->sk_type == SOCK_SEQPACKET) {
716 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
717 struct l2cap_conn_rsp rsp;
720 if (bt_sk(sk)->defer_setup)
721 result = L2CAP_CR_SEC_BLOCK;
723 result = L2CAP_CR_BAD_PSM;
725 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
726 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
727 rsp.result = cpu_to_le16(result);
728 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
729 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
730 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
732 l2cap_chan_del(sk, reason);
737 l2cap_chan_del(sk, reason);
741 sock_set_flag(sk, SOCK_ZAPPED);
746 /* Must be called on unlocked socket. */
747 static void l2cap_sock_close(struct sock *sk)
749 l2cap_sock_clear_timer(sk);
751 __l2cap_sock_close(sk, ECONNRESET);
756 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
758 struct l2cap_pinfo *pi = l2cap_pi(sk);
763 sk->sk_type = parent->sk_type;
764 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
766 pi->imtu = l2cap_pi(parent)->imtu;
767 pi->omtu = l2cap_pi(parent)->omtu;
768 pi->mode = l2cap_pi(parent)->mode;
769 pi->fcs = l2cap_pi(parent)->fcs;
770 pi->sec_level = l2cap_pi(parent)->sec_level;
771 pi->role_switch = l2cap_pi(parent)->role_switch;
772 pi->force_reliable = l2cap_pi(parent)->force_reliable;
774 pi->imtu = L2CAP_DEFAULT_MTU;
776 pi->mode = L2CAP_MODE_BASIC;
777 pi->fcs = L2CAP_FCS_CRC16;
778 pi->sec_level = BT_SECURITY_LOW;
780 pi->force_reliable = 0;
783 /* Default config options */
785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
788 static struct proto l2cap_proto = {
790 .owner = THIS_MODULE,
791 .obj_size = sizeof(struct l2cap_pinfo)
794 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
798 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
802 sock_init_data(sock, sk);
803 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
805 sk->sk_destruct = l2cap_sock_destruct;
806 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
808 sock_reset_flag(sk, SOCK_ZAPPED);
810 sk->sk_protocol = proto;
811 sk->sk_state = BT_OPEN;
813 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
815 bt_sock_link(&l2cap_sk_list, sk);
819 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
823 BT_DBG("sock %p", sock);
825 sock->state = SS_UNCONNECTED;
827 if (sock->type != SOCK_SEQPACKET &&
828 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
829 return -ESOCKTNOSUPPORT;
831 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
834 sock->ops = &l2cap_sock_ops;
836 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
840 l2cap_sock_init(sk, NULL);
844 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
846 struct sock *sk = sock->sk;
847 struct sockaddr_l2 la;
852 if (!addr || addr->sa_family != AF_BLUETOOTH)
855 memset(&la, 0, sizeof(la));
856 len = min_t(unsigned int, sizeof(la), alen);
857 memcpy(&la, addr, len);
864 if (sk->sk_state != BT_OPEN) {
869 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
870 !capable(CAP_NET_BIND_SERVICE)) {
875 write_lock_bh(&l2cap_sk_list.lock);
877 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
880 /* Save source address */
881 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
882 l2cap_pi(sk)->psm = la.l2_psm;
883 l2cap_pi(sk)->sport = la.l2_psm;
884 sk->sk_state = BT_BOUND;
886 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
887 __le16_to_cpu(la.l2_psm) == 0x0003)
888 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
891 write_unlock_bh(&l2cap_sk_list.lock);
898 static int l2cap_do_connect(struct sock *sk)
900 bdaddr_t *src = &bt_sk(sk)->src;
901 bdaddr_t *dst = &bt_sk(sk)->dst;
902 struct l2cap_conn *conn;
903 struct hci_conn *hcon;
904 struct hci_dev *hdev;
908 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
911 hdev = hci_get_route(dst, src);
913 return -EHOSTUNREACH;
915 hci_dev_lock_bh(hdev);
919 if (sk->sk_type == SOCK_RAW) {
920 switch (l2cap_pi(sk)->sec_level) {
921 case BT_SECURITY_HIGH:
922 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
924 case BT_SECURITY_MEDIUM:
925 auth_type = HCI_AT_DEDICATED_BONDING;
928 auth_type = HCI_AT_NO_BONDING;
931 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
932 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
933 auth_type = HCI_AT_NO_BONDING_MITM;
935 auth_type = HCI_AT_NO_BONDING;
937 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
938 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
940 switch (l2cap_pi(sk)->sec_level) {
941 case BT_SECURITY_HIGH:
942 auth_type = HCI_AT_GENERAL_BONDING_MITM;
944 case BT_SECURITY_MEDIUM:
945 auth_type = HCI_AT_GENERAL_BONDING;
948 auth_type = HCI_AT_NO_BONDING;
953 hcon = hci_connect(hdev, ACL_LINK, dst,
954 l2cap_pi(sk)->sec_level, auth_type);
958 conn = l2cap_conn_add(hcon, 0);
966 /* Update source addr of the socket */
967 bacpy(src, conn->src);
969 l2cap_chan_add(conn, sk, NULL);
971 sk->sk_state = BT_CONNECT;
972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
974 if (hcon->state == BT_CONNECTED) {
975 if (sk->sk_type != SOCK_SEQPACKET) {
976 l2cap_sock_clear_timer(sk);
977 sk->sk_state = BT_CONNECTED;
983 hci_dev_unlock_bh(hdev);
988 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
990 struct sock *sk = sock->sk;
991 struct sockaddr_l2 la;
996 if (!addr || addr->sa_family != AF_BLUETOOTH)
999 memset(&la, 0, sizeof(la));
1000 len = min_t(unsigned int, sizeof(la), alen);
1001 memcpy(&la, addr, len);
1008 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1013 switch (l2cap_pi(sk)->mode) {
1014 case L2CAP_MODE_BASIC:
1016 case L2CAP_MODE_ERTM:
1017 case L2CAP_MODE_STREAMING:
1026 switch (sk->sk_state) {
1030 /* Already connecting */
1034 /* Already connected */
1047 /* Set destination address and psm */
1048 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1049 l2cap_pi(sk)->psm = la.l2_psm;
1051 err = l2cap_do_connect(sk);
1056 err = bt_sock_wait_state(sk, BT_CONNECTED,
1057 sock_sndtimeo(sk, flags & O_NONBLOCK));
1063 static int l2cap_sock_listen(struct socket *sock, int backlog)
1065 struct sock *sk = sock->sk;
1068 BT_DBG("sk %p backlog %d", sk, backlog);
1072 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1077 switch (l2cap_pi(sk)->mode) {
1078 case L2CAP_MODE_BASIC:
1080 case L2CAP_MODE_ERTM:
1081 case L2CAP_MODE_STREAMING:
1090 if (!l2cap_pi(sk)->psm) {
1091 bdaddr_t *src = &bt_sk(sk)->src;
1096 write_lock_bh(&l2cap_sk_list.lock);
1098 for (psm = 0x1001; psm < 0x1100; psm += 2)
1099 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1100 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1101 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1106 write_unlock_bh(&l2cap_sk_list.lock);
1112 sk->sk_max_ack_backlog = backlog;
1113 sk->sk_ack_backlog = 0;
1114 sk->sk_state = BT_LISTEN;
1121 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1123 DECLARE_WAITQUEUE(wait, current);
1124 struct sock *sk = sock->sk, *nsk;
1128 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1130 if (sk->sk_state != BT_LISTEN) {
1135 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1137 BT_DBG("sk %p timeo %ld", sk, timeo);
1139 /* Wait for an incoming connection. (wake-one). */
1140 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1141 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1142 set_current_state(TASK_INTERRUPTIBLE);
1149 timeo = schedule_timeout(timeo);
1150 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1152 if (sk->sk_state != BT_LISTEN) {
1157 if (signal_pending(current)) {
1158 err = sock_intr_errno(timeo);
1162 set_current_state(TASK_RUNNING);
1163 remove_wait_queue(sk->sk_sleep, &wait);
1168 newsock->state = SS_CONNECTED;
1170 BT_DBG("new socket %p", nsk);
1177 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1179 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1180 struct sock *sk = sock->sk;
1182 BT_DBG("sock %p, sk %p", sock, sk);
1184 addr->sa_family = AF_BLUETOOTH;
1185 *len = sizeof(struct sockaddr_l2);
1188 la->l2_psm = l2cap_pi(sk)->psm;
1189 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1190 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1192 la->l2_psm = l2cap_pi(sk)->sport;
1193 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1194 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1200 static void l2cap_monitor_timeout(unsigned long arg)
1202 struct sock *sk = (void *) arg;
1206 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1207 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1211 l2cap_pi(sk)->retry_count++;
1212 __mod_monitor_timer();
1214 control = L2CAP_CTRL_POLL;
1215 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1219 static void l2cap_retrans_timeout(unsigned long arg)
1221 struct sock *sk = (void *) arg;
1225 l2cap_pi(sk)->retry_count = 1;
1226 __mod_monitor_timer();
1228 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1230 control = L2CAP_CTRL_POLL;
1231 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1235 static void l2cap_drop_acked_frames(struct sock *sk)
1237 struct sk_buff *skb;
1239 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1240 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1243 skb = skb_dequeue(TX_QUEUE(sk));
1246 l2cap_pi(sk)->unacked_frames--;
1249 if (!l2cap_pi(sk)->unacked_frames)
1250 del_timer(&l2cap_pi(sk)->retrans_timer);
1255 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1257 struct l2cap_pinfo *pi = l2cap_pi(sk);
1260 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1262 err = hci_send_acl(pi->conn->hcon, skb, 0);
1269 static int l2cap_streaming_send(struct sock *sk)
1271 struct sk_buff *skb, *tx_skb;
1272 struct l2cap_pinfo *pi = l2cap_pi(sk);
1276 while ((skb = sk->sk_send_head)) {
1277 tx_skb = skb_clone(skb, GFP_ATOMIC);
1279 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1280 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1281 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1283 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1284 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1285 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1288 err = l2cap_do_send(sk, tx_skb);
1290 l2cap_send_disconn_req(pi->conn, sk);
1294 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1296 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1297 sk->sk_send_head = NULL;
1299 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1301 skb = skb_dequeue(TX_QUEUE(sk));
1307 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1309 struct l2cap_pinfo *pi = l2cap_pi(sk);
1310 struct sk_buff *skb, *tx_skb;
1314 skb = skb_peek(TX_QUEUE(sk));
1316 if (bt_cb(skb)->tx_seq != tx_seq) {
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1319 skb = skb_queue_next(TX_QUEUE(sk), skb);
1323 if (pi->remote_max_tx &&
1324 bt_cb(skb)->retries == pi->remote_max_tx) {
1325 l2cap_send_disconn_req(pi->conn, sk);
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1331 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1332 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1333 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1334 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1336 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1337 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1338 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1341 err = l2cap_do_send(sk, tx_skb);
1343 l2cap_send_disconn_req(pi->conn, sk);
1351 static int l2cap_ertm_send(struct sock *sk)
1353 struct sk_buff *skb, *tx_skb;
1354 struct l2cap_pinfo *pi = l2cap_pi(sk);
1358 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1361 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
1362 && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1363 tx_skb = skb_clone(skb, GFP_ATOMIC);
1365 if (pi->remote_max_tx &&
1366 bt_cb(skb)->retries == pi->remote_max_tx) {
1367 l2cap_send_disconn_req(pi->conn, sk);
1371 bt_cb(skb)->retries++;
1373 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1374 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1375 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1376 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1379 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1380 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1381 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1384 err = l2cap_do_send(sk, tx_skb);
1386 l2cap_send_disconn_req(pi->conn, sk);
1389 __mod_retrans_timer();
1391 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1394 pi->unacked_frames++;
1396 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1397 sk->sk_send_head = NULL;
1399 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1405 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1407 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1408 struct sk_buff **frag;
1411 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1418 /* Continuation fragments (no L2CAP header) */
1419 frag = &skb_shinfo(skb)->frag_list;
1421 count = min_t(unsigned int, conn->mtu, len);
1423 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1426 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1432 frag = &(*frag)->next;
1438 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1441 struct sk_buff *skb;
1442 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1443 struct l2cap_hdr *lh;
1445 BT_DBG("sk %p len %d", sk, (int)len);
1447 count = min_t(unsigned int, (conn->mtu - hlen), len);
1448 skb = bt_skb_send_alloc(sk, count + hlen,
1449 msg->msg_flags & MSG_DONTWAIT, &err);
1451 return ERR_PTR(-ENOMEM);
1453 /* Create L2CAP header */
1454 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1455 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1456 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1457 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1459 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1460 if (unlikely(err < 0)) {
1462 return ERR_PTR(err);
1467 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1469 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1470 struct sk_buff *skb;
1471 int err, count, hlen = L2CAP_HDR_SIZE;
1472 struct l2cap_hdr *lh;
1474 BT_DBG("sk %p len %d", sk, (int)len);
1476 count = min_t(unsigned int, (conn->mtu - hlen), len);
1477 skb = bt_skb_send_alloc(sk, count + hlen,
1478 msg->msg_flags & MSG_DONTWAIT, &err);
1480 return ERR_PTR(-ENOMEM);
1482 /* Create L2CAP header */
1483 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1484 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1485 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1487 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1488 if (unlikely(err < 0)) {
1490 return ERR_PTR(err);
1495 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1497 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1498 struct sk_buff *skb;
1499 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1500 struct l2cap_hdr *lh;
1502 BT_DBG("sk %p len %d", sk, (int)len);
1507 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1510 count = min_t(unsigned int, (conn->mtu - hlen), len);
1511 skb = bt_skb_send_alloc(sk, count + hlen,
1512 msg->msg_flags & MSG_DONTWAIT, &err);
1514 return ERR_PTR(-ENOMEM);
1516 /* Create L2CAP header */
1517 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1518 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1519 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1520 put_unaligned_le16(control, skb_put(skb, 2));
1522 put_unaligned_le16(sdulen, skb_put(skb, 2));
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1527 return ERR_PTR(err);
1530 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1531 put_unaligned_le16(0, skb_put(skb, 2));
1533 bt_cb(skb)->retries = 0;
1537 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1539 struct l2cap_pinfo *pi = l2cap_pi(sk);
1540 struct sk_buff *skb;
1541 struct sk_buff_head sar_queue;
1545 __skb_queue_head_init(&sar_queue);
1546 control = L2CAP_SDU_START;
1547 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1549 return PTR_ERR(skb);
1551 __skb_queue_tail(&sar_queue, skb);
1552 len -= pi->max_pdu_size;
1553 size +=pi->max_pdu_size;
1559 if (len > pi->max_pdu_size) {
1560 control |= L2CAP_SDU_CONTINUE;
1561 buflen = pi->max_pdu_size;
1563 control |= L2CAP_SDU_END;
1567 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1569 skb_queue_purge(&sar_queue);
1570 return PTR_ERR(skb);
1573 __skb_queue_tail(&sar_queue, skb);
1578 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1579 if (sk->sk_send_head == NULL)
1580 sk->sk_send_head = sar_queue.next;
1585 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1587 struct sock *sk = sock->sk;
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct sk_buff *skb;
1593 BT_DBG("sock %p, sk %p", sock, sk);
1595 err = sock_error(sk);
1599 if (msg->msg_flags & MSG_OOB)
1602 /* Check outgoing MTU */
1603 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1609 if (sk->sk_state != BT_CONNECTED) {
1614 /* Connectionless channel */
1615 if (sk->sk_type == SOCK_DGRAM) {
1616 skb = l2cap_create_connless_pdu(sk, msg, len);
1617 err = l2cap_do_send(sk, skb);
1622 case L2CAP_MODE_BASIC:
1623 /* Create a basic PDU */
1624 skb = l2cap_create_basic_pdu(sk, msg, len);
1630 err = l2cap_do_send(sk, skb);
1635 case L2CAP_MODE_ERTM:
1636 case L2CAP_MODE_STREAMING:
1637 /* Entire SDU fits into one PDU */
1638 if (len <= pi->max_pdu_size) {
1639 control = L2CAP_SDU_UNSEGMENTED;
1640 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1645 __skb_queue_tail(TX_QUEUE(sk), skb);
1646 if (sk->sk_send_head == NULL)
1647 sk->sk_send_head = skb;
1649 /* Segment SDU into multiples PDUs */
1650 err = l2cap_sar_segment_sdu(sk, msg, len);
1655 if (pi->mode == L2CAP_MODE_STREAMING)
1656 err = l2cap_streaming_send(sk);
1658 err = l2cap_ertm_send(sk);
1665 BT_DBG("bad state %1.1x", pi->mode);
1674 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1676 struct sock *sk = sock->sk;
1680 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1681 struct l2cap_conn_rsp rsp;
1683 sk->sk_state = BT_CONFIG;
1685 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1687 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1688 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1689 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1690 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1698 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1701 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1703 struct sock *sk = sock->sk;
1704 struct l2cap_options opts;
1708 BT_DBG("sk %p", sk);
1714 opts.imtu = l2cap_pi(sk)->imtu;
1715 opts.omtu = l2cap_pi(sk)->omtu;
1716 opts.flush_to = l2cap_pi(sk)->flush_to;
1717 opts.mode = l2cap_pi(sk)->mode;
1718 opts.fcs = l2cap_pi(sk)->fcs;
1720 len = min_t(unsigned int, sizeof(opts), optlen);
1721 if (copy_from_user((char *) &opts, optval, len)) {
1726 l2cap_pi(sk)->imtu = opts.imtu;
1727 l2cap_pi(sk)->omtu = opts.omtu;
1728 l2cap_pi(sk)->mode = opts.mode;
1729 l2cap_pi(sk)->fcs = opts.fcs;
1733 if (get_user(opt, (u32 __user *) optval)) {
1738 if (opt & L2CAP_LM_AUTH)
1739 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1740 if (opt & L2CAP_LM_ENCRYPT)
1741 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1742 if (opt & L2CAP_LM_SECURE)
1743 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1745 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1746 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1758 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1760 struct sock *sk = sock->sk;
1761 struct bt_security sec;
1765 BT_DBG("sk %p", sk);
1767 if (level == SOL_L2CAP)
1768 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1770 if (level != SOL_BLUETOOTH)
1771 return -ENOPROTOOPT;
1777 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1782 sec.level = BT_SECURITY_LOW;
1784 len = min_t(unsigned int, sizeof(sec), optlen);
1785 if (copy_from_user((char *) &sec, optval, len)) {
1790 if (sec.level < BT_SECURITY_LOW ||
1791 sec.level > BT_SECURITY_HIGH) {
1796 l2cap_pi(sk)->sec_level = sec.level;
1799 case BT_DEFER_SETUP:
1800 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1805 if (get_user(opt, (u32 __user *) optval)) {
1810 bt_sk(sk)->defer_setup = opt;
1822 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1824 struct sock *sk = sock->sk;
1825 struct l2cap_options opts;
1826 struct l2cap_conninfo cinfo;
1830 BT_DBG("sk %p", sk);
1832 if (get_user(len, optlen))
1839 opts.imtu = l2cap_pi(sk)->imtu;
1840 opts.omtu = l2cap_pi(sk)->omtu;
1841 opts.flush_to = l2cap_pi(sk)->flush_to;
1842 opts.mode = l2cap_pi(sk)->mode;
1843 opts.fcs = l2cap_pi(sk)->fcs;
1845 len = min_t(unsigned int, len, sizeof(opts));
1846 if (copy_to_user(optval, (char *) &opts, len))
1852 switch (l2cap_pi(sk)->sec_level) {
1853 case BT_SECURITY_LOW:
1854 opt = L2CAP_LM_AUTH;
1856 case BT_SECURITY_MEDIUM:
1857 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1859 case BT_SECURITY_HIGH:
1860 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1868 if (l2cap_pi(sk)->role_switch)
1869 opt |= L2CAP_LM_MASTER;
1871 if (l2cap_pi(sk)->force_reliable)
1872 opt |= L2CAP_LM_RELIABLE;
1874 if (put_user(opt, (u32 __user *) optval))
1878 case L2CAP_CONNINFO:
1879 if (sk->sk_state != BT_CONNECTED &&
1880 !(sk->sk_state == BT_CONNECT2 &&
1881 bt_sk(sk)->defer_setup)) {
1886 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1887 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1889 len = min_t(unsigned int, len, sizeof(cinfo));
1890 if (copy_to_user(optval, (char *) &cinfo, len))
1904 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1906 struct sock *sk = sock->sk;
1907 struct bt_security sec;
1910 BT_DBG("sk %p", sk);
1912 if (level == SOL_L2CAP)
1913 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1915 if (level != SOL_BLUETOOTH)
1916 return -ENOPROTOOPT;
1918 if (get_user(len, optlen))
1925 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1930 sec.level = l2cap_pi(sk)->sec_level;
1932 len = min_t(unsigned int, len, sizeof(sec));
1933 if (copy_to_user(optval, (char *) &sec, len))
1938 case BT_DEFER_SETUP:
1939 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1944 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1958 static int l2cap_sock_shutdown(struct socket *sock, int how)
1960 struct sock *sk = sock->sk;
1963 BT_DBG("sock %p, sk %p", sock, sk);
1969 if (!sk->sk_shutdown) {
1970 sk->sk_shutdown = SHUTDOWN_MASK;
1971 l2cap_sock_clear_timer(sk);
1972 __l2cap_sock_close(sk, 0);
1974 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1975 err = bt_sock_wait_state(sk, BT_CLOSED,
1982 static int l2cap_sock_release(struct socket *sock)
1984 struct sock *sk = sock->sk;
1987 BT_DBG("sock %p, sk %p", sock, sk);
1992 err = l2cap_sock_shutdown(sock, 2);
1995 l2cap_sock_kill(sk);
1999 static void l2cap_chan_ready(struct sock *sk)
2001 struct sock *parent = bt_sk(sk)->parent;
2003 BT_DBG("sk %p, parent %p", sk, parent);
2005 l2cap_pi(sk)->conf_state = 0;
2006 l2cap_sock_clear_timer(sk);
2009 /* Outgoing channel.
2010 * Wake up socket sleeping on connect.
2012 sk->sk_state = BT_CONNECTED;
2013 sk->sk_state_change(sk);
2015 /* Incoming channel.
2016 * Wake up socket sleeping on accept.
2018 parent->sk_data_ready(parent, 0);
2022 /* Copy frame to all raw sockets on that connection */
2023 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2025 struct l2cap_chan_list *l = &conn->chan_list;
2026 struct sk_buff *nskb;
2029 BT_DBG("conn %p", conn);
2031 read_lock(&l->lock);
2032 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2033 if (sk->sk_type != SOCK_RAW)
2036 /* Don't send frame to the socket it came from */
2039 nskb = skb_clone(skb, GFP_ATOMIC);
2043 if (sock_queue_rcv_skb(sk, nskb))
2046 read_unlock(&l->lock);
2049 /* ---- L2CAP signalling commands ---- */
2050 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2051 u8 code, u8 ident, u16 dlen, void *data)
2053 struct sk_buff *skb, **frag;
2054 struct l2cap_cmd_hdr *cmd;
2055 struct l2cap_hdr *lh;
2058 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2059 conn, code, ident, dlen);
2061 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2062 count = min_t(unsigned int, conn->mtu, len);
2064 skb = bt_skb_alloc(count, GFP_ATOMIC);
2068 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2069 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2070 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2072 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2075 cmd->len = cpu_to_le16(dlen);
2078 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2079 memcpy(skb_put(skb, count), data, count);
2085 /* Continuation fragments (no L2CAP header) */
2086 frag = &skb_shinfo(skb)->frag_list;
2088 count = min_t(unsigned int, conn->mtu, len);
2090 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2094 memcpy(skb_put(*frag, count), data, count);
2099 frag = &(*frag)->next;
2109 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2111 struct l2cap_conf_opt *opt = *ptr;
2114 len = L2CAP_CONF_OPT_SIZE + opt->len;
2122 *val = *((u8 *) opt->val);
2126 *val = __le16_to_cpu(*((__le16 *) opt->val));
2130 *val = __le32_to_cpu(*((__le32 *) opt->val));
2134 *val = (unsigned long) opt->val;
2138 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2142 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2144 struct l2cap_conf_opt *opt = *ptr;
2146 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2153 *((u8 *) opt->val) = val;
2157 *((__le16 *) opt->val) = cpu_to_le16(val);
2161 *((__le32 *) opt->val) = cpu_to_le32(val);
2165 memcpy(opt->val, (void *) val, len);
2169 *ptr += L2CAP_CONF_OPT_SIZE + len;
2172 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2174 u32 local_feat_mask = l2cap_feat_mask;
2176 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2179 case L2CAP_MODE_ERTM:
2180 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2181 case L2CAP_MODE_STREAMING:
2182 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2188 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2191 case L2CAP_MODE_STREAMING:
2192 case L2CAP_MODE_ERTM:
2193 if (l2cap_mode_supported(mode, remote_feat_mask))
2197 return L2CAP_MODE_BASIC;
2201 static int l2cap_build_conf_req(struct sock *sk, void *data)
2203 struct l2cap_pinfo *pi = l2cap_pi(sk);
2204 struct l2cap_conf_req *req = data;
2205 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2206 void *ptr = req->data;
2208 BT_DBG("sk %p", sk);
2210 if (pi->num_conf_req || pi->num_conf_rsp)
2214 case L2CAP_MODE_STREAMING:
2215 case L2CAP_MODE_ERTM:
2216 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2217 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2218 l2cap_send_disconn_req(pi->conn, sk);
2221 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2227 case L2CAP_MODE_BASIC:
2228 if (pi->imtu != L2CAP_DEFAULT_MTU)
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2232 case L2CAP_MODE_ERTM:
2233 rfc.mode = L2CAP_MODE_ERTM;
2234 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2235 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2236 rfc.retrans_timeout = 0;
2237 rfc.monitor_timeout = 0;
2238 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2241 sizeof(rfc), (unsigned long) &rfc);
2243 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2246 if (pi->fcs == L2CAP_FCS_NONE ||
2247 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2248 pi->fcs = L2CAP_FCS_NONE;
2249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2253 case L2CAP_MODE_STREAMING:
2254 rfc.mode = L2CAP_MODE_STREAMING;
2256 rfc.max_transmit = 0;
2257 rfc.retrans_timeout = 0;
2258 rfc.monitor_timeout = 0;
2259 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2262 sizeof(rfc), (unsigned long) &rfc);
2264 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2267 if (pi->fcs == L2CAP_FCS_NONE ||
2268 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2269 pi->fcs = L2CAP_FCS_NONE;
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2275 /* FIXME: Need actual value of the flush timeout */
2276 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2277 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2279 req->dcid = cpu_to_le16(pi->dcid);
2280 req->flags = cpu_to_le16(0);
2285 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2287 struct l2cap_pinfo *pi = l2cap_pi(sk);
2288 struct l2cap_conf_rsp *rsp = data;
2289 void *ptr = rsp->data;
2290 void *req = pi->conf_req;
2291 int len = pi->conf_len;
2292 int type, hint, olen;
2294 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2295 u16 mtu = L2CAP_DEFAULT_MTU;
2296 u16 result = L2CAP_CONF_SUCCESS;
2298 BT_DBG("sk %p", sk);
2300 while (len >= L2CAP_CONF_OPT_SIZE) {
2301 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2303 hint = type & L2CAP_CONF_HINT;
2304 type &= L2CAP_CONF_MASK;
2307 case L2CAP_CONF_MTU:
2311 case L2CAP_CONF_FLUSH_TO:
2315 case L2CAP_CONF_QOS:
2318 case L2CAP_CONF_RFC:
2319 if (olen == sizeof(rfc))
2320 memcpy(&rfc, (void *) val, olen);
2323 case L2CAP_CONF_FCS:
2324 if (val == L2CAP_FCS_NONE)
2325 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2333 result = L2CAP_CONF_UNKNOWN;
2334 *((u8 *) ptr++) = type;
2339 if (pi->num_conf_rsp || pi->num_conf_req)
2343 case L2CAP_MODE_STREAMING:
2344 case L2CAP_MODE_ERTM:
2345 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2346 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2347 return -ECONNREFUSED;
2350 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2355 if (pi->mode != rfc.mode) {
2356 result = L2CAP_CONF_UNACCEPT;
2357 rfc.mode = pi->mode;
2359 if (pi->num_conf_rsp == 1)
2360 return -ECONNREFUSED;
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2363 sizeof(rfc), (unsigned long) &rfc);
2367 if (result == L2CAP_CONF_SUCCESS) {
2368 /* Configure output options and let the other side know
2369 * which ones we don't like. */
2371 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2372 result = L2CAP_CONF_UNACCEPT;
2375 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2380 case L2CAP_MODE_BASIC:
2381 pi->fcs = L2CAP_FCS_NONE;
2382 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2385 case L2CAP_MODE_ERTM:
2386 pi->remote_tx_win = rfc.txwin_size;
2387 pi->remote_max_tx = rfc.max_transmit;
2388 pi->max_pdu_size = rfc.max_pdu_size;
2390 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2391 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2393 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2396 case L2CAP_MODE_STREAMING:
2397 pi->remote_tx_win = rfc.txwin_size;
2398 pi->max_pdu_size = rfc.max_pdu_size;
2400 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2404 result = L2CAP_CONF_UNACCEPT;
2406 memset(&rfc, 0, sizeof(rfc));
2407 rfc.mode = pi->mode;
2410 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2411 sizeof(rfc), (unsigned long) &rfc);
2413 if (result == L2CAP_CONF_SUCCESS)
2414 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2416 rsp->scid = cpu_to_le16(pi->dcid);
2417 rsp->result = cpu_to_le16(result);
2418 rsp->flags = cpu_to_le16(0x0000);
2423 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2425 struct l2cap_pinfo *pi = l2cap_pi(sk);
2426 struct l2cap_conf_req *req = data;
2427 void *ptr = req->data;
2430 struct l2cap_conf_rfc rfc;
2432 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2434 while (len >= L2CAP_CONF_OPT_SIZE) {
2435 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2438 case L2CAP_CONF_MTU:
2439 if (val < L2CAP_DEFAULT_MIN_MTU) {
2440 *result = L2CAP_CONF_UNACCEPT;
2441 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2447 case L2CAP_CONF_FLUSH_TO:
2449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2453 case L2CAP_CONF_RFC:
2454 if (olen == sizeof(rfc))
2455 memcpy(&rfc, (void *)val, olen);
2457 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2458 rfc.mode != pi->mode)
2459 return -ECONNREFUSED;
2461 pi->mode = rfc.mode;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2465 sizeof(rfc), (unsigned long) &rfc);
2470 if (*result == L2CAP_CONF_SUCCESS) {
2472 case L2CAP_MODE_ERTM:
2473 pi->remote_tx_win = rfc.txwin_size;
2474 pi->retrans_timeout = rfc.retrans_timeout;
2475 pi->monitor_timeout = rfc.monitor_timeout;
2476 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2478 case L2CAP_MODE_STREAMING:
2479 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2484 req->dcid = cpu_to_le16(pi->dcid);
2485 req->flags = cpu_to_le16(0x0000);
2490 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2492 struct l2cap_conf_rsp *rsp = data;
2493 void *ptr = rsp->data;
2495 BT_DBG("sk %p", sk);
2497 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2498 rsp->result = cpu_to_le16(result);
2499 rsp->flags = cpu_to_le16(flags);
2504 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2506 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2508 if (rej->reason != 0x0000)
2511 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2512 cmd->ident == conn->info_ident) {
2513 del_timer(&conn->info_timer);
2515 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2516 conn->info_ident = 0;
2518 l2cap_conn_start(conn);
2524 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2526 struct l2cap_chan_list *list = &conn->chan_list;
2527 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2528 struct l2cap_conn_rsp rsp;
2529 struct sock *sk, *parent;
2530 int result, status = L2CAP_CS_NO_INFO;
2532 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2533 __le16 psm = req->psm;
2535 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2537 /* Check if we have socket listening on psm */
2538 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2540 result = L2CAP_CR_BAD_PSM;
2544 /* Check if the ACL is secure enough (if not SDP) */
2545 if (psm != cpu_to_le16(0x0001) &&
2546 !hci_conn_check_link_mode(conn->hcon)) {
2547 conn->disc_reason = 0x05;
2548 result = L2CAP_CR_SEC_BLOCK;
2552 result = L2CAP_CR_NO_MEM;
2554 /* Check for backlog size */
2555 if (sk_acceptq_is_full(parent)) {
2556 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2560 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2564 write_lock_bh(&list->lock);
2566 /* Check if we already have channel with that dcid */
2567 if (__l2cap_get_chan_by_dcid(list, scid)) {
2568 write_unlock_bh(&list->lock);
2569 sock_set_flag(sk, SOCK_ZAPPED);
2570 l2cap_sock_kill(sk);
2574 hci_conn_hold(conn->hcon);
2576 l2cap_sock_init(sk, parent);
2577 bacpy(&bt_sk(sk)->src, conn->src);
2578 bacpy(&bt_sk(sk)->dst, conn->dst);
2579 l2cap_pi(sk)->psm = psm;
2580 l2cap_pi(sk)->dcid = scid;
2582 __l2cap_chan_add(conn, sk, parent);
2583 dcid = l2cap_pi(sk)->scid;
2585 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2587 l2cap_pi(sk)->ident = cmd->ident;
2589 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2590 if (l2cap_check_security(sk)) {
2591 if (bt_sk(sk)->defer_setup) {
2592 sk->sk_state = BT_CONNECT2;
2593 result = L2CAP_CR_PEND;
2594 status = L2CAP_CS_AUTHOR_PEND;
2595 parent->sk_data_ready(parent, 0);
2597 sk->sk_state = BT_CONFIG;
2598 result = L2CAP_CR_SUCCESS;
2599 status = L2CAP_CS_NO_INFO;
2602 sk->sk_state = BT_CONNECT2;
2603 result = L2CAP_CR_PEND;
2604 status = L2CAP_CS_AUTHEN_PEND;
2607 sk->sk_state = BT_CONNECT2;
2608 result = L2CAP_CR_PEND;
2609 status = L2CAP_CS_NO_INFO;
2612 write_unlock_bh(&list->lock);
2615 bh_unlock_sock(parent);
2618 rsp.scid = cpu_to_le16(scid);
2619 rsp.dcid = cpu_to_le16(dcid);
2620 rsp.result = cpu_to_le16(result);
2621 rsp.status = cpu_to_le16(status);
2622 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2624 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2625 struct l2cap_info_req info;
2626 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2628 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2629 conn->info_ident = l2cap_get_ident(conn);
2631 mod_timer(&conn->info_timer, jiffies +
2632 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2634 l2cap_send_cmd(conn, conn->info_ident,
2635 L2CAP_INFO_REQ, sizeof(info), &info);
2641 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2643 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2644 u16 scid, dcid, result, status;
2648 scid = __le16_to_cpu(rsp->scid);
2649 dcid = __le16_to_cpu(rsp->dcid);
2650 result = __le16_to_cpu(rsp->result);
2651 status = __le16_to_cpu(rsp->status);
2653 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2656 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2660 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2666 case L2CAP_CR_SUCCESS:
2667 sk->sk_state = BT_CONFIG;
2668 l2cap_pi(sk)->ident = 0;
2669 l2cap_pi(sk)->dcid = dcid;
2670 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2672 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2674 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2675 l2cap_build_conf_req(sk, req), req);
2676 l2cap_pi(sk)->num_conf_req++;
2680 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2684 l2cap_chan_del(sk, ECONNREFUSED);
2692 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2694 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2700 dcid = __le16_to_cpu(req->dcid);
2701 flags = __le16_to_cpu(req->flags);
2703 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2705 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2709 if (sk->sk_state == BT_DISCONN)
2712 /* Reject if config buffer is too small. */
2713 len = cmd_len - sizeof(*req);
2714 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2715 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2716 l2cap_build_conf_rsp(sk, rsp,
2717 L2CAP_CONF_REJECT, flags), rsp);
2722 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2723 l2cap_pi(sk)->conf_len += len;
2725 if (flags & 0x0001) {
2726 /* Incomplete config. Send empty response. */
2727 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2728 l2cap_build_conf_rsp(sk, rsp,
2729 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2733 /* Complete config. */
2734 len = l2cap_parse_conf_req(sk, rsp);
2736 l2cap_send_disconn_req(conn, sk);
2740 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2741 l2cap_pi(sk)->num_conf_rsp++;
2743 /* Reset config buffer. */
2744 l2cap_pi(sk)->conf_len = 0;
2746 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2749 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2750 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2751 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2752 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2754 sk->sk_state = BT_CONNECTED;
2755 l2cap_pi(sk)->next_tx_seq = 0;
2756 l2cap_pi(sk)->expected_ack_seq = 0;
2757 l2cap_pi(sk)->unacked_frames = 0;
2759 setup_timer(&l2cap_pi(sk)->retrans_timer,
2760 l2cap_retrans_timeout, (unsigned long) sk);
2761 setup_timer(&l2cap_pi(sk)->monitor_timer,
2762 l2cap_monitor_timeout, (unsigned long) sk);
2764 __skb_queue_head_init(TX_QUEUE(sk));
2765 __skb_queue_head_init(SREJ_QUEUE(sk));
2766 l2cap_chan_ready(sk);
2770 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2772 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2773 l2cap_build_conf_req(sk, buf), buf);
2774 l2cap_pi(sk)->num_conf_req++;
2782 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2784 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2785 u16 scid, flags, result;
2788 scid = __le16_to_cpu(rsp->scid);
2789 flags = __le16_to_cpu(rsp->flags);
2790 result = __le16_to_cpu(rsp->result);
2792 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2793 scid, flags, result);
2795 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2800 case L2CAP_CONF_SUCCESS:
2803 case L2CAP_CONF_UNACCEPT:
2804 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2805 int len = cmd->len - sizeof(*rsp);
2808 /* throw out any old stored conf requests */
2809 result = L2CAP_CONF_SUCCESS;
2810 len = l2cap_parse_conf_rsp(sk, rsp->data,
2813 l2cap_send_disconn_req(conn, sk);
2817 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2818 L2CAP_CONF_REQ, len, req);
2819 l2cap_pi(sk)->num_conf_req++;
2820 if (result != L2CAP_CONF_SUCCESS)
2826 sk->sk_state = BT_DISCONN;
2827 sk->sk_err = ECONNRESET;
2828 l2cap_sock_set_timer(sk, HZ * 5);
2829 l2cap_send_disconn_req(conn, sk);
2836 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2838 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2839 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2840 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2841 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2843 sk->sk_state = BT_CONNECTED;
2844 l2cap_pi(sk)->expected_tx_seq = 0;
2845 l2cap_pi(sk)->buffer_seq = 0;
2846 l2cap_pi(sk)->num_to_ack = 0;
2847 __skb_queue_head_init(TX_QUEUE(sk));
2848 __skb_queue_head_init(SREJ_QUEUE(sk));
2849 l2cap_chan_ready(sk);
2857 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2859 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2860 struct l2cap_disconn_rsp rsp;
2864 scid = __le16_to_cpu(req->scid);
2865 dcid = __le16_to_cpu(req->dcid);
2867 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2869 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2873 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2874 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2875 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2877 sk->sk_shutdown = SHUTDOWN_MASK;
2879 skb_queue_purge(TX_QUEUE(sk));
2880 skb_queue_purge(SREJ_QUEUE(sk));
2881 del_timer(&l2cap_pi(sk)->retrans_timer);
2882 del_timer(&l2cap_pi(sk)->monitor_timer);
2884 l2cap_chan_del(sk, ECONNRESET);
2887 l2cap_sock_kill(sk);
2891 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2893 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2897 scid = __le16_to_cpu(rsp->scid);
2898 dcid = __le16_to_cpu(rsp->dcid);
2900 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2906 skb_queue_purge(TX_QUEUE(sk));
2907 skb_queue_purge(SREJ_QUEUE(sk));
2908 del_timer(&l2cap_pi(sk)->retrans_timer);
2909 del_timer(&l2cap_pi(sk)->monitor_timer);
2911 l2cap_chan_del(sk, 0);
2914 l2cap_sock_kill(sk);
2918 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2920 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2923 type = __le16_to_cpu(req->type);
2925 BT_DBG("type 0x%4.4x", type);
2927 if (type == L2CAP_IT_FEAT_MASK) {
2929 u32 feat_mask = l2cap_feat_mask;
2930 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2931 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2932 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2934 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2936 put_unaligned_le32(feat_mask, rsp->data);
2937 l2cap_send_cmd(conn, cmd->ident,
2938 L2CAP_INFO_RSP, sizeof(buf), buf);
2939 } else if (type == L2CAP_IT_FIXED_CHAN) {
2941 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2942 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2943 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2944 memcpy(buf + 4, l2cap_fixed_chan, 8);
2945 l2cap_send_cmd(conn, cmd->ident,
2946 L2CAP_INFO_RSP, sizeof(buf), buf);
2948 struct l2cap_info_rsp rsp;
2949 rsp.type = cpu_to_le16(type);
2950 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2951 l2cap_send_cmd(conn, cmd->ident,
2952 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2958 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2960 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2963 type = __le16_to_cpu(rsp->type);
2964 result = __le16_to_cpu(rsp->result);
2966 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2968 del_timer(&conn->info_timer);
2970 if (type == L2CAP_IT_FEAT_MASK) {
2971 conn->feat_mask = get_unaligned_le32(rsp->data);
2973 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2974 struct l2cap_info_req req;
2975 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2977 conn->info_ident = l2cap_get_ident(conn);
2979 l2cap_send_cmd(conn, conn->info_ident,
2980 L2CAP_INFO_REQ, sizeof(req), &req);
2982 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2983 conn->info_ident = 0;
2985 l2cap_conn_start(conn);
2987 } else if (type == L2CAP_IT_FIXED_CHAN) {
2988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2989 conn->info_ident = 0;
2991 l2cap_conn_start(conn);
2997 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2999 u8 *data = skb->data;
3001 struct l2cap_cmd_hdr cmd;
3004 l2cap_raw_recv(conn, skb);
3006 while (len >= L2CAP_CMD_HDR_SIZE) {
3008 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3009 data += L2CAP_CMD_HDR_SIZE;
3010 len -= L2CAP_CMD_HDR_SIZE;
3012 cmd_len = le16_to_cpu(cmd.len);
3014 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3016 if (cmd_len > len || !cmd.ident) {
3017 BT_DBG("corrupted command");
3022 case L2CAP_COMMAND_REJ:
3023 l2cap_command_rej(conn, &cmd, data);
3026 case L2CAP_CONN_REQ:
3027 err = l2cap_connect_req(conn, &cmd, data);
3030 case L2CAP_CONN_RSP:
3031 err = l2cap_connect_rsp(conn, &cmd, data);
3034 case L2CAP_CONF_REQ:
3035 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3038 case L2CAP_CONF_RSP:
3039 err = l2cap_config_rsp(conn, &cmd, data);
3042 case L2CAP_DISCONN_REQ:
3043 err = l2cap_disconnect_req(conn, &cmd, data);
3046 case L2CAP_DISCONN_RSP:
3047 err = l2cap_disconnect_rsp(conn, &cmd, data);
3050 case L2CAP_ECHO_REQ:
3051 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3054 case L2CAP_ECHO_RSP:
3057 case L2CAP_INFO_REQ:
3058 err = l2cap_information_req(conn, &cmd, data);
3061 case L2CAP_INFO_RSP:
3062 err = l2cap_information_rsp(conn, &cmd, data);
3066 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3072 struct l2cap_cmd_rej rej;
3073 BT_DBG("error %d", err);
3075 /* FIXME: Map err to a valid reason */
3076 rej.reason = cpu_to_le16(0);
3077 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3087 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3089 u16 our_fcs, rcv_fcs;
3090 int hdr_size = L2CAP_HDR_SIZE + 2;
3092 if (pi->fcs == L2CAP_FCS_CRC16) {
3093 skb_trim(skb, skb->len - 2);
3094 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3095 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3097 if (our_fcs != rcv_fcs)
3103 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3105 struct sk_buff *next_skb;
3107 bt_cb(skb)->tx_seq = tx_seq;
3108 bt_cb(skb)->sar = sar;
3110 next_skb = skb_peek(SREJ_QUEUE(sk));
3112 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3117 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3118 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3122 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3125 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3127 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3130 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3132 struct l2cap_pinfo *pi = l2cap_pi(sk);
3133 struct sk_buff *_skb;
3136 switch (control & L2CAP_CTRL_SAR) {
3137 case L2CAP_SDU_UNSEGMENTED:
3138 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3143 err = sock_queue_rcv_skb(sk, skb);
3149 case L2CAP_SDU_START:
3150 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3155 pi->sdu_len = get_unaligned_le16(skb->data);
3158 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3164 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3166 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3167 pi->partial_sdu_len = skb->len;
3171 case L2CAP_SDU_CONTINUE:
3172 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3175 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3177 pi->partial_sdu_len += skb->len;
3178 if (pi->partial_sdu_len > pi->sdu_len)
3186 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3189 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3191 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3192 pi->partial_sdu_len += skb->len;
3194 if (pi->partial_sdu_len == pi->sdu_len) {
3195 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3196 err = sock_queue_rcv_skb(sk, _skb);
3210 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3212 struct sk_buff *skb;
3215 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3216 if (bt_cb(skb)->tx_seq != tx_seq)
3219 skb = skb_dequeue(SREJ_QUEUE(sk));
3220 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3221 l2cap_sar_reassembly_sdu(sk, skb, control);
3222 l2cap_pi(sk)->buffer_seq_srej =
3223 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3228 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3230 struct l2cap_pinfo *pi = l2cap_pi(sk);
3231 struct srej_list *l, *tmp;
3234 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3235 if (l->tx_seq == tx_seq) {
3240 control = L2CAP_SUPER_SELECT_REJECT;
3241 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3242 l2cap_send_sframe(pi, control);
3244 list_add_tail(&l->list, SREJ_LIST(sk));
3248 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3250 struct l2cap_pinfo *pi = l2cap_pi(sk);
3251 struct srej_list *new;
3254 while (tx_seq != pi->expected_tx_seq) {
3255 control = L2CAP_SUPER_SELECT_REJECT;
3256 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3257 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3258 control |= L2CAP_CTRL_POLL;
3259 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3261 l2cap_send_sframe(pi, control);
3263 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3264 new->tx_seq = pi->expected_tx_seq++;
3265 list_add_tail(&new->list, SREJ_LIST(sk));
3267 pi->expected_tx_seq++;
3270 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3272 struct l2cap_pinfo *pi = l2cap_pi(sk);
3273 u8 tx_seq = __get_txseq(rx_control);
3275 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3278 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3280 if (tx_seq == pi->expected_tx_seq)
3283 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3284 struct srej_list *first;
3286 first = list_first_entry(SREJ_LIST(sk),
3287 struct srej_list, list);
3288 if (tx_seq == first->tx_seq) {
3289 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3290 l2cap_check_srej_gap(sk, tx_seq);
3292 list_del(&first->list);
3295 if (list_empty(SREJ_LIST(sk))) {
3296 pi->buffer_seq = pi->buffer_seq_srej;
3297 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3300 struct srej_list *l;
3301 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3303 list_for_each_entry(l, SREJ_LIST(sk), list) {
3304 if (l->tx_seq == tx_seq) {
3305 l2cap_resend_srejframe(sk, tx_seq);
3309 l2cap_send_srejframe(sk, tx_seq);
3312 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3314 INIT_LIST_HEAD(SREJ_LIST(sk));
3315 pi->buffer_seq_srej = pi->buffer_seq;
3317 __skb_queue_head_init(SREJ_QUEUE(sk));
3318 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3320 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3322 l2cap_send_srejframe(sk, tx_seq);
3327 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3329 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3330 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3334 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3336 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3340 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3341 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3342 tx_control |= L2CAP_SUPER_RCV_READY;
3343 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3344 l2cap_send_sframe(pi, tx_control);
3349 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3351 struct l2cap_pinfo *pi = l2cap_pi(sk);
3352 u8 tx_seq = __get_reqseq(rx_control);
3354 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3356 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3357 case L2CAP_SUPER_RCV_READY:
3358 if (rx_control & L2CAP_CTRL_POLL) {
3359 u16 control = L2CAP_CTRL_FINAL;
3360 control |= L2CAP_SUPER_RCV_READY |
3361 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3362 l2cap_send_sframe(l2cap_pi(sk), control);
3363 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3365 } else if (rx_control & L2CAP_CTRL_FINAL) {
3366 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3367 pi->expected_ack_seq = tx_seq;
3368 l2cap_drop_acked_frames(sk);
3370 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3373 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3374 del_timer(&pi->monitor_timer);
3376 if (pi->unacked_frames > 0)
3377 __mod_retrans_timer();
3379 pi->expected_ack_seq = tx_seq;
3380 l2cap_drop_acked_frames(sk);
3382 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3383 && (pi->unacked_frames > 0))
3384 __mod_retrans_timer();
3386 l2cap_ertm_send(sk);
3387 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3391 case L2CAP_SUPER_REJECT:
3392 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3394 pi->expected_ack_seq = __get_reqseq(rx_control);
3395 l2cap_drop_acked_frames(sk);
3397 sk->sk_send_head = TX_QUEUE(sk)->next;
3398 pi->next_tx_seq = pi->expected_ack_seq;
3400 l2cap_ertm_send(sk);
3404 case L2CAP_SUPER_SELECT_REJECT:
3405 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3407 if (rx_control & L2CAP_CTRL_POLL) {
3408 l2cap_retransmit_frame(sk, tx_seq);
3409 pi->expected_ack_seq = tx_seq;
3410 l2cap_drop_acked_frames(sk);
3411 l2cap_ertm_send(sk);
3412 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3413 pi->srej_save_reqseq = tx_seq;
3414 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3416 } else if (rx_control & L2CAP_CTRL_FINAL) {
3417 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3418 pi->srej_save_reqseq == tx_seq)
3419 pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
3421 l2cap_retransmit_frame(sk, tx_seq);
3424 l2cap_retransmit_frame(sk, tx_seq);
3425 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3426 pi->srej_save_reqseq = tx_seq;
3427 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3432 case L2CAP_SUPER_RCV_NOT_READY:
3433 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3434 pi->expected_ack_seq = tx_seq;
3435 l2cap_drop_acked_frames(sk);
3437 del_timer(&l2cap_pi(sk)->retrans_timer);
3438 if (rx_control & L2CAP_CTRL_POLL) {
3439 u16 control = L2CAP_CTRL_FINAL;
3440 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3448 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3451 struct l2cap_pinfo *pi;
3456 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3458 BT_DBG("unknown cid 0x%4.4x", cid);
3464 BT_DBG("sk %p, len %d", sk, skb->len);
3466 if (sk->sk_state != BT_CONNECTED)
3470 case L2CAP_MODE_BASIC:
3471 /* If socket recv buffers overflows we drop data here
3472 * which is *bad* because L2CAP has to be reliable.
3473 * But we don't have any other choice. L2CAP doesn't
3474 * provide flow control mechanism. */
3476 if (pi->imtu < skb->len)
3479 if (!sock_queue_rcv_skb(sk, skb))
3483 case L2CAP_MODE_ERTM:
3484 control = get_unaligned_le16(skb->data);
3488 if (__is_sar_start(control))
3491 if (pi->fcs == L2CAP_FCS_CRC16)
3495 * We can just drop the corrupted I-frame here.
3496 * Receiver will miss it and start proper recovery
3497 * procedures and ask retransmission.
3499 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3502 if (l2cap_check_fcs(pi, skb))
3505 if (__is_iframe(control))
3506 err = l2cap_data_channel_iframe(sk, control, skb);
3508 err = l2cap_data_channel_sframe(sk, control, skb);
3514 case L2CAP_MODE_STREAMING:
3515 control = get_unaligned_le16(skb->data);
3519 if (__is_sar_start(control))
3522 if (pi->fcs == L2CAP_FCS_CRC16)
3525 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3528 if (l2cap_check_fcs(pi, skb))
3531 tx_seq = __get_txseq(control);
3533 if (pi->expected_tx_seq == tx_seq)
3534 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3536 pi->expected_tx_seq = tx_seq + 1;
3538 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3543 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3557 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3561 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3565 BT_DBG("sk %p, len %d", sk, skb->len);
3567 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3570 if (l2cap_pi(sk)->imtu < skb->len)
3573 if (!sock_queue_rcv_skb(sk, skb))
3585 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3587 struct l2cap_hdr *lh = (void *) skb->data;
3591 skb_pull(skb, L2CAP_HDR_SIZE);
3592 cid = __le16_to_cpu(lh->cid);
3593 len = __le16_to_cpu(lh->len);
3595 if (len != skb->len) {
3600 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3603 case L2CAP_CID_SIGNALING:
3604 l2cap_sig_channel(conn, skb);
3607 case L2CAP_CID_CONN_LESS:
3608 psm = get_unaligned_le16(skb->data);
3610 l2cap_conless_channel(conn, psm, skb);
3614 l2cap_data_channel(conn, cid, skb);
3619 /* ---- L2CAP interface with lower layer (HCI) ---- */
3621 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3623 int exact = 0, lm1 = 0, lm2 = 0;
3624 register struct sock *sk;
3625 struct hlist_node *node;
3627 if (type != ACL_LINK)
3630 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3632 /* Find listening sockets and check their link_mode */
3633 read_lock(&l2cap_sk_list.lock);
3634 sk_for_each(sk, node, &l2cap_sk_list.head) {
3635 if (sk->sk_state != BT_LISTEN)
3638 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3639 lm1 |= HCI_LM_ACCEPT;
3640 if (l2cap_pi(sk)->role_switch)
3641 lm1 |= HCI_LM_MASTER;
3643 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3644 lm2 |= HCI_LM_ACCEPT;
3645 if (l2cap_pi(sk)->role_switch)
3646 lm2 |= HCI_LM_MASTER;
3649 read_unlock(&l2cap_sk_list.lock);
3651 return exact ? lm1 : lm2;
3654 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3656 struct l2cap_conn *conn;
3658 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3660 if (hcon->type != ACL_LINK)
3664 conn = l2cap_conn_add(hcon, status);
3666 l2cap_conn_ready(conn);
3668 l2cap_conn_del(hcon, bt_err(status));
3673 static int l2cap_disconn_ind(struct hci_conn *hcon)
3675 struct l2cap_conn *conn = hcon->l2cap_data;
3677 BT_DBG("hcon %p", hcon);
3679 if (hcon->type != ACL_LINK || !conn)
3682 return conn->disc_reason;
3685 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3687 BT_DBG("hcon %p reason %d", hcon, reason);
3689 if (hcon->type != ACL_LINK)
3692 l2cap_conn_del(hcon, bt_err(reason));
3697 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3699 if (sk->sk_type != SOCK_SEQPACKET)
3702 if (encrypt == 0x00) {
3703 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3704 l2cap_sock_clear_timer(sk);
3705 l2cap_sock_set_timer(sk, HZ * 5);
3706 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3707 __l2cap_sock_close(sk, ECONNREFUSED);
3709 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3710 l2cap_sock_clear_timer(sk);
3714 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3716 struct l2cap_chan_list *l;
3717 struct l2cap_conn *conn = hcon->l2cap_data;
3723 l = &conn->chan_list;
3725 BT_DBG("conn %p", conn);
3727 read_lock(&l->lock);
3729 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3732 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3737 if (!status && (sk->sk_state == BT_CONNECTED ||
3738 sk->sk_state == BT_CONFIG)) {
3739 l2cap_check_encryption(sk, encrypt);
3744 if (sk->sk_state == BT_CONNECT) {
3746 struct l2cap_conn_req req;
3747 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3748 req.psm = l2cap_pi(sk)->psm;
3750 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3752 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3753 L2CAP_CONN_REQ, sizeof(req), &req);
3755 l2cap_sock_clear_timer(sk);
3756 l2cap_sock_set_timer(sk, HZ / 10);
3758 } else if (sk->sk_state == BT_CONNECT2) {
3759 struct l2cap_conn_rsp rsp;
3763 sk->sk_state = BT_CONFIG;
3764 result = L2CAP_CR_SUCCESS;
3766 sk->sk_state = BT_DISCONN;
3767 l2cap_sock_set_timer(sk, HZ / 10);
3768 result = L2CAP_CR_SEC_BLOCK;
3771 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3772 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3773 rsp.result = cpu_to_le16(result);
3774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3775 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3776 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3782 read_unlock(&l->lock);
3787 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3789 struct l2cap_conn *conn = hcon->l2cap_data;
3791 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3794 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3796 if (flags & ACL_START) {
3797 struct l2cap_hdr *hdr;
3801 BT_ERR("Unexpected start frame (len %d)", skb->len);
3802 kfree_skb(conn->rx_skb);
3803 conn->rx_skb = NULL;
3805 l2cap_conn_unreliable(conn, ECOMM);
3809 BT_ERR("Frame is too short (len %d)", skb->len);
3810 l2cap_conn_unreliable(conn, ECOMM);
3814 hdr = (struct l2cap_hdr *) skb->data;
3815 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3817 if (len == skb->len) {
3818 /* Complete frame received */
3819 l2cap_recv_frame(conn, skb);
3823 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3825 if (skb->len > len) {
3826 BT_ERR("Frame is too long (len %d, expected len %d)",
3828 l2cap_conn_unreliable(conn, ECOMM);
3832 /* Allocate skb for the complete frame (with header) */
3833 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3837 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3839 conn->rx_len = len - skb->len;
3841 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3843 if (!conn->rx_len) {
3844 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3845 l2cap_conn_unreliable(conn, ECOMM);
3849 if (skb->len > conn->rx_len) {
3850 BT_ERR("Fragment is too long (len %d, expected %d)",
3851 skb->len, conn->rx_len);
3852 kfree_skb(conn->rx_skb);
3853 conn->rx_skb = NULL;
3855 l2cap_conn_unreliable(conn, ECOMM);
3859 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3861 conn->rx_len -= skb->len;
3863 if (!conn->rx_len) {
3864 /* Complete frame received */
3865 l2cap_recv_frame(conn, conn->rx_skb);
3866 conn->rx_skb = NULL;
3875 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3878 struct hlist_node *node;
3881 read_lock_bh(&l2cap_sk_list.lock);
3883 sk_for_each(sk, node, &l2cap_sk_list.head) {
3884 struct l2cap_pinfo *pi = l2cap_pi(sk);
3886 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3887 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3888 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3889 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3892 read_unlock_bh(&l2cap_sk_list.lock);
3897 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3899 static const struct proto_ops l2cap_sock_ops = {
3900 .family = PF_BLUETOOTH,
3901 .owner = THIS_MODULE,
3902 .release = l2cap_sock_release,
3903 .bind = l2cap_sock_bind,
3904 .connect = l2cap_sock_connect,
3905 .listen = l2cap_sock_listen,
3906 .accept = l2cap_sock_accept,
3907 .getname = l2cap_sock_getname,
3908 .sendmsg = l2cap_sock_sendmsg,
3909 .recvmsg = l2cap_sock_recvmsg,
3910 .poll = bt_sock_poll,
3911 .ioctl = bt_sock_ioctl,
3912 .mmap = sock_no_mmap,
3913 .socketpair = sock_no_socketpair,
3914 .shutdown = l2cap_sock_shutdown,
3915 .setsockopt = l2cap_sock_setsockopt,
3916 .getsockopt = l2cap_sock_getsockopt
3919 static struct net_proto_family l2cap_sock_family_ops = {
3920 .family = PF_BLUETOOTH,
3921 .owner = THIS_MODULE,
3922 .create = l2cap_sock_create,
3925 static struct hci_proto l2cap_hci_proto = {
3927 .id = HCI_PROTO_L2CAP,
3928 .connect_ind = l2cap_connect_ind,
3929 .connect_cfm = l2cap_connect_cfm,
3930 .disconn_ind = l2cap_disconn_ind,
3931 .disconn_cfm = l2cap_disconn_cfm,
3932 .security_cfm = l2cap_security_cfm,
3933 .recv_acldata = l2cap_recv_acldata
3936 static int __init l2cap_init(void)
3940 err = proto_register(&l2cap_proto, 0);
3944 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3946 BT_ERR("L2CAP socket registration failed");
3950 err = hci_register_proto(&l2cap_hci_proto);
3952 BT_ERR("L2CAP protocol registration failed");
3953 bt_sock_unregister(BTPROTO_L2CAP);
3957 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3958 BT_ERR("Failed to create L2CAP info file");
3960 BT_INFO("L2CAP ver %s", VERSION);
3961 BT_INFO("L2CAP socket layer initialized");
3966 proto_unregister(&l2cap_proto);
3970 static void __exit l2cap_exit(void)
3972 class_remove_file(bt_class, &class_attr_l2cap);
3974 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3975 BT_ERR("L2CAP socket unregistration failed");
3977 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3978 BT_ERR("L2CAP protocol unregistration failed");
3980 proto_unregister(&l2cap_proto);
3983 void l2cap_load(void)
3985 /* Dummy function to trigger automatic L2CAP module loading by
3986 * other modules that use L2CAP sockets but don't use any other
3987 * symbols from it. */
3990 EXPORT_SYMBOL(l2cap_load);
3992 module_init(l2cap_init);
3993 module_exit(l2cap_exit);
3995 module_param(enable_ertm, bool, 0644);
3996 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3998 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3999 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4000 MODULE_VERSION(VERSION);
4001 MODULE_LICENSE("GPL");
4002 MODULE_ALIAS("bt-proto-0");