2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
45 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
47 static LIST_HEAD(chan_list);
48 static DEFINE_RWLOCK(chan_list_lock);
50 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
51 u8 code, u8 ident, u16 dlen, void *data);
52 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
54 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
55 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
56 struct l2cap_chan *chan, int err);
58 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
59 struct sk_buff_head *skbs, u8 event);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
67 list_for_each_entry(c, &conn->chan_l, list) {
74 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
78 list_for_each_entry(c, &conn->chan_l, list) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
91 mutex_lock(&conn->chan_lock);
92 c = __l2cap_get_chan_by_scid(conn, cid);
95 mutex_unlock(&conn->chan_lock);
100 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
102 struct l2cap_chan *c;
104 list_for_each_entry(c, &conn->chan_l, list) {
105 if (c->ident == ident)
111 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
113 struct l2cap_chan *c;
115 list_for_each_entry(c, &chan_list, global_l) {
116 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
122 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
126 write_lock(&chan_list_lock);
128 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
141 for (p = 0x1001; p < 0x1100; p += 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
143 chan->psm = cpu_to_le16(p);
144 chan->sport = cpu_to_le16(p);
151 write_unlock(&chan_list_lock);
155 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
157 write_lock(&chan_list_lock);
161 write_unlock(&chan_list_lock);
166 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(conn, cid))
178 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
180 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
181 state_to_string(state));
184 chan->ops->state_change(chan, state);
187 static void l2cap_state_change(struct l2cap_chan *chan, int state)
189 struct sock *sk = chan->sk;
192 __l2cap_state_change(chan, state);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
198 struct sock *sk = chan->sk;
203 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
205 struct sock *sk = chan->sk;
208 __l2cap_chan_set_err(chan, err);
212 static void __set_retrans_timer(struct l2cap_chan *chan)
214 if (!delayed_work_pending(&chan->monitor_timer) &&
215 chan->retrans_timeout) {
216 l2cap_set_timer(chan, &chan->retrans_timer,
217 msecs_to_jiffies(chan->retrans_timeout));
221 static void __set_monitor_timer(struct l2cap_chan *chan)
223 __clear_retrans_timer(chan);
224 if (chan->monitor_timeout) {
225 l2cap_set_timer(chan, &chan->monitor_timer,
226 msecs_to_jiffies(chan->monitor_timeout));
230 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
235 skb_queue_walk(head, skb) {
236 if (bt_cb(skb)->control.txseq == seq)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
256 size_t alloc_size, i;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size = roundup_pow_of_two(size);
264 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
268 seq_list->mask = alloc_size - 1;
269 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
270 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
271 for (i = 0; i < alloc_size; i++)
272 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
279 kfree(seq_list->list);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
285 /* Constant-time check for list membership */
286 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
289 static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
291 u16 mask = seq_list->mask;
293 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR;
296 } else if (seq_list->head == seq) {
297 /* Head can be removed in constant time */
298 seq_list->head = seq_list->list[seq & mask];
299 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
301 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
302 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
303 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
306 /* Walk the list to find the sequence number */
307 u16 prev = seq_list->head;
308 while (seq_list->list[prev & mask] != seq) {
309 prev = seq_list->list[prev & mask];
310 if (prev == L2CAP_SEQ_LIST_TAIL)
311 return L2CAP_SEQ_LIST_CLEAR;
314 /* Unlink the number from the list and clear it */
315 seq_list->list[prev & mask] = seq_list->list[seq & mask];
316 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
317 if (seq_list->tail == seq)
318 seq_list->tail = prev;
323 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list, seq_list->head);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
333 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
336 for (i = 0; i <= seq_list->mask; i++)
337 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
343 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
345 u16 mask = seq_list->mask;
347 /* All appends happen in constant time */
349 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
352 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
353 seq_list->head = seq;
355 seq_list->list[seq_list->tail & mask] = seq;
357 seq_list->tail = seq;
358 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
361 static void l2cap_chan_timeout(struct work_struct *work)
363 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
365 struct l2cap_conn *conn = chan->conn;
368 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
370 mutex_lock(&conn->chan_lock);
371 l2cap_chan_lock(chan);
373 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
374 reason = ECONNREFUSED;
375 else if (chan->state == BT_CONNECT &&
376 chan->sec_level != BT_SECURITY_SDP)
377 reason = ECONNREFUSED;
381 l2cap_chan_close(chan, reason);
383 l2cap_chan_unlock(chan);
385 chan->ops->close(chan);
386 mutex_unlock(&conn->chan_lock);
388 l2cap_chan_put(chan);
391 struct l2cap_chan *l2cap_chan_create(void)
393 struct l2cap_chan *chan;
395 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
399 mutex_init(&chan->lock);
401 write_lock(&chan_list_lock);
402 list_add(&chan->global_l, &chan_list);
403 write_unlock(&chan_list_lock);
405 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
407 chan->state = BT_OPEN;
409 kref_init(&chan->kref);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
414 BT_DBG("chan %p", chan);
419 static void l2cap_chan_destroy(struct kref *kref)
421 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
423 BT_DBG("chan %p", chan);
425 write_lock(&chan_list_lock);
426 list_del(&chan->global_l);
427 write_unlock(&chan_list_lock);
432 void l2cap_chan_hold(struct l2cap_chan *c)
434 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
439 void l2cap_chan_put(struct l2cap_chan *c)
441 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
443 kref_put(&c->kref, l2cap_chan_destroy);
446 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
448 chan->fcs = L2CAP_FCS_CRC16;
449 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
450 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
451 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
452 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
453 chan->sec_level = BT_SECURITY_LOW;
455 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
458 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
460 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
461 __le16_to_cpu(chan->psm), chan->dcid);
463 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
467 switch (chan->chan_type) {
468 case L2CAP_CHAN_CONN_ORIENTED:
469 if (conn->hcon->type == LE_LINK) {
471 chan->omtu = L2CAP_DEFAULT_MTU;
472 chan->scid = L2CAP_CID_LE_DATA;
473 chan->dcid = L2CAP_CID_LE_DATA;
475 /* Alloc CID for connection-oriented socket */
476 chan->scid = l2cap_alloc_cid(conn);
477 chan->omtu = L2CAP_DEFAULT_MTU;
481 case L2CAP_CHAN_CONN_LESS:
482 /* Connectionless socket */
483 chan->scid = L2CAP_CID_CONN_LESS;
484 chan->dcid = L2CAP_CID_CONN_LESS;
485 chan->omtu = L2CAP_DEFAULT_MTU;
488 case L2CAP_CHAN_CONN_FIX_A2MP:
489 chan->scid = L2CAP_CID_A2MP;
490 chan->dcid = L2CAP_CID_A2MP;
491 chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
492 chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
496 /* Raw socket can send/recv signalling messages only */
497 chan->scid = L2CAP_CID_SIGNALING;
498 chan->dcid = L2CAP_CID_SIGNALING;
499 chan->omtu = L2CAP_DEFAULT_MTU;
502 chan->local_id = L2CAP_BESTEFFORT_ID;
503 chan->local_stype = L2CAP_SERV_BESTEFFORT;
504 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
505 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
506 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
507 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
509 l2cap_chan_hold(chan);
511 list_add(&chan->list, &conn->chan_l);
514 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
516 mutex_lock(&conn->chan_lock);
517 __l2cap_chan_add(conn, chan);
518 mutex_unlock(&conn->chan_lock);
521 void l2cap_chan_del(struct l2cap_chan *chan, int err)
523 struct l2cap_conn *conn = chan->conn;
525 __clear_chan_timer(chan);
527 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
530 /* Delete from channel list */
531 list_del(&chan->list);
533 l2cap_chan_put(chan);
537 if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
538 hci_conn_put(conn->hcon);
541 if (chan->ops->teardown)
542 chan->ops->teardown(chan, err);
544 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
548 case L2CAP_MODE_BASIC:
551 case L2CAP_MODE_ERTM:
552 __clear_retrans_timer(chan);
553 __clear_monitor_timer(chan);
554 __clear_ack_timer(chan);
556 skb_queue_purge(&chan->srej_q);
558 l2cap_seq_list_free(&chan->srej_list);
559 l2cap_seq_list_free(&chan->retrans_list);
563 case L2CAP_MODE_STREAMING:
564 skb_queue_purge(&chan->tx_q);
571 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
573 struct l2cap_conn *conn = chan->conn;
574 struct sock *sk = chan->sk;
576 BT_DBG("chan %p state %s sk %p", chan,
577 state_to_string(chan->state), sk);
579 switch (chan->state) {
581 if (chan->ops->teardown)
582 chan->ops->teardown(chan, 0);
587 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
588 conn->hcon->type == ACL_LINK) {
589 __set_chan_timer(chan, sk->sk_sndtimeo);
590 l2cap_send_disconn_req(conn, chan, reason);
592 l2cap_chan_del(chan, reason);
596 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
597 conn->hcon->type == ACL_LINK) {
598 struct l2cap_conn_rsp rsp;
601 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))
602 result = L2CAP_CR_SEC_BLOCK;
604 result = L2CAP_CR_BAD_PSM;
605 l2cap_state_change(chan, BT_DISCONN);
607 rsp.scid = cpu_to_le16(chan->dcid);
608 rsp.dcid = cpu_to_le16(chan->scid);
609 rsp.result = cpu_to_le16(result);
610 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
611 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
615 l2cap_chan_del(chan, reason);
620 l2cap_chan_del(chan, reason);
624 if (chan->ops->teardown)
625 chan->ops->teardown(chan, 0);
630 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
632 if (chan->chan_type == L2CAP_CHAN_RAW) {
633 switch (chan->sec_level) {
634 case BT_SECURITY_HIGH:
635 return HCI_AT_DEDICATED_BONDING_MITM;
636 case BT_SECURITY_MEDIUM:
637 return HCI_AT_DEDICATED_BONDING;
639 return HCI_AT_NO_BONDING;
641 } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
642 if (chan->sec_level == BT_SECURITY_LOW)
643 chan->sec_level = BT_SECURITY_SDP;
645 if (chan->sec_level == BT_SECURITY_HIGH)
646 return HCI_AT_NO_BONDING_MITM;
648 return HCI_AT_NO_BONDING;
650 switch (chan->sec_level) {
651 case BT_SECURITY_HIGH:
652 return HCI_AT_GENERAL_BONDING_MITM;
653 case BT_SECURITY_MEDIUM:
654 return HCI_AT_GENERAL_BONDING;
656 return HCI_AT_NO_BONDING;
661 /* Service level security */
662 int l2cap_chan_check_security(struct l2cap_chan *chan)
664 struct l2cap_conn *conn = chan->conn;
667 auth_type = l2cap_get_auth_type(chan);
669 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
672 static u8 l2cap_get_ident(struct l2cap_conn *conn)
676 /* Get next available identificator.
677 * 1 - 128 are used by kernel.
678 * 129 - 199 are reserved.
679 * 200 - 254 are used by utilities like l2ping, etc.
682 spin_lock(&conn->lock);
684 if (++conn->tx_ident > 128)
689 spin_unlock(&conn->lock);
694 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
696 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
699 BT_DBG("code 0x%2.2x", code);
704 if (lmp_no_flush_capable(conn->hcon->hdev))
705 flags = ACL_START_NO_FLUSH;
709 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
710 skb->priority = HCI_PRIO_MAX;
712 hci_send_acl(conn->hchan, skb, flags);
715 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
717 struct hci_conn *hcon = chan->conn->hcon;
720 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
723 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
724 lmp_no_flush_capable(hcon->hdev))
725 flags = ACL_START_NO_FLUSH;
729 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
730 hci_send_acl(chan->conn->hchan, skb, flags);
733 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
735 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
736 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
738 if (enh & L2CAP_CTRL_FRAME_TYPE) {
741 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
742 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
749 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
750 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
757 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
759 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
760 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
762 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
765 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
766 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
773 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
774 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
781 static inline void __unpack_control(struct l2cap_chan *chan,
784 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
785 __unpack_extended_control(get_unaligned_le32(skb->data),
786 &bt_cb(skb)->control);
787 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
789 __unpack_enhanced_control(get_unaligned_le16(skb->data),
790 &bt_cb(skb)->control);
791 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
795 static u32 __pack_extended_control(struct l2cap_ctrl *control)
799 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
800 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
802 if (control->sframe) {
803 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
804 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
805 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
807 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
808 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
814 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
818 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
819 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
821 if (control->sframe) {
822 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
823 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
824 packed |= L2CAP_CTRL_FRAME_TYPE;
826 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
827 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
833 static inline void __pack_control(struct l2cap_chan *chan,
834 struct l2cap_ctrl *control,
837 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
838 put_unaligned_le32(__pack_extended_control(control),
839 skb->data + L2CAP_HDR_SIZE);
841 put_unaligned_le16(__pack_enhanced_control(control),
842 skb->data + L2CAP_HDR_SIZE);
846 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
848 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
849 return L2CAP_EXT_HDR_SIZE;
851 return L2CAP_ENH_HDR_SIZE;
854 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
858 struct l2cap_hdr *lh;
859 int hlen = __ertm_hdr_size(chan);
861 if (chan->fcs == L2CAP_FCS_CRC16)
862 hlen += L2CAP_FCS_SIZE;
864 skb = bt_skb_alloc(hlen, GFP_KERNEL);
867 return ERR_PTR(-ENOMEM);
869 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
870 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
871 lh->cid = cpu_to_le16(chan->dcid);
873 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
874 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
876 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
878 if (chan->fcs == L2CAP_FCS_CRC16) {
879 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
880 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
883 skb->priority = HCI_PRIO_MAX;
887 static void l2cap_send_sframe(struct l2cap_chan *chan,
888 struct l2cap_ctrl *control)
893 BT_DBG("chan %p, control %p", chan, control);
895 if (!control->sframe)
898 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
902 if (control->super == L2CAP_SUPER_RR)
903 clear_bit(CONN_RNR_SENT, &chan->conn_state);
904 else if (control->super == L2CAP_SUPER_RNR)
905 set_bit(CONN_RNR_SENT, &chan->conn_state);
907 if (control->super != L2CAP_SUPER_SREJ) {
908 chan->last_acked_seq = control->reqseq;
909 __clear_ack_timer(chan);
912 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
913 control->final, control->poll, control->super);
915 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
916 control_field = __pack_extended_control(control);
918 control_field = __pack_enhanced_control(control);
920 skb = l2cap_create_sframe_pdu(chan, control_field);
922 l2cap_do_send(chan, skb);
925 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
927 struct l2cap_ctrl control;
929 BT_DBG("chan %p, poll %d", chan, poll);
931 memset(&control, 0, sizeof(control));
935 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
936 control.super = L2CAP_SUPER_RNR;
938 control.super = L2CAP_SUPER_RR;
940 control.reqseq = chan->buffer_seq;
941 l2cap_send_sframe(chan, &control);
944 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
946 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
949 static void l2cap_send_conn_req(struct l2cap_chan *chan)
951 struct l2cap_conn *conn = chan->conn;
952 struct l2cap_conn_req req;
954 req.scid = cpu_to_le16(chan->scid);
957 chan->ident = l2cap_get_ident(conn);
959 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
961 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
964 static void l2cap_chan_ready(struct l2cap_chan *chan)
966 /* This clears all conf flags, including CONF_NOT_COMPLETE */
967 chan->conf_state = 0;
968 __clear_chan_timer(chan);
970 chan->state = BT_CONNECTED;
972 chan->ops->ready(chan);
975 static void l2cap_do_start(struct l2cap_chan *chan)
977 struct l2cap_conn *conn = chan->conn;
979 if (conn->hcon->type == LE_LINK) {
980 l2cap_chan_ready(chan);
984 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
985 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
988 if (l2cap_chan_check_security(chan) &&
989 __l2cap_no_conn_pending(chan))
990 l2cap_send_conn_req(chan);
992 struct l2cap_info_req req;
993 req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
995 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
996 conn->info_ident = l2cap_get_ident(conn);
998 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1000 l2cap_send_cmd(conn, conn->info_ident,
1001 L2CAP_INFO_REQ, sizeof(req), &req);
1005 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1007 u32 local_feat_mask = l2cap_feat_mask;
1009 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1012 case L2CAP_MODE_ERTM:
1013 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1014 case L2CAP_MODE_STREAMING:
1015 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1021 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
1023 struct sock *sk = chan->sk;
1024 struct l2cap_disconn_req req;
1029 if (chan->mode == L2CAP_MODE_ERTM) {
1030 __clear_retrans_timer(chan);
1031 __clear_monitor_timer(chan);
1032 __clear_ack_timer(chan);
1035 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1036 __l2cap_state_change(chan, BT_DISCONN);
1040 req.dcid = cpu_to_le16(chan->dcid);
1041 req.scid = cpu_to_le16(chan->scid);
1042 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1043 L2CAP_DISCONN_REQ, sizeof(req), &req);
1046 __l2cap_state_change(chan, BT_DISCONN);
1047 __l2cap_chan_set_err(chan, err);
1051 /* ---- L2CAP connections ---- */
1052 static void l2cap_conn_start(struct l2cap_conn *conn)
1054 struct l2cap_chan *chan, *tmp;
1056 BT_DBG("conn %p", conn);
1058 mutex_lock(&conn->chan_lock);
1060 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1061 struct sock *sk = chan->sk;
1063 l2cap_chan_lock(chan);
1065 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1066 l2cap_chan_unlock(chan);
1070 if (chan->state == BT_CONNECT) {
1071 if (!l2cap_chan_check_security(chan) ||
1072 !__l2cap_no_conn_pending(chan)) {
1073 l2cap_chan_unlock(chan);
1077 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1078 && test_bit(CONF_STATE2_DEVICE,
1079 &chan->conf_state)) {
1080 l2cap_chan_close(chan, ECONNRESET);
1081 l2cap_chan_unlock(chan);
1085 l2cap_send_conn_req(chan);
1087 } else if (chan->state == BT_CONNECT2) {
1088 struct l2cap_conn_rsp rsp;
1090 rsp.scid = cpu_to_le16(chan->dcid);
1091 rsp.dcid = cpu_to_le16(chan->scid);
1093 if (l2cap_chan_check_security(chan)) {
1095 if (test_bit(BT_SK_DEFER_SETUP,
1096 &bt_sk(sk)->flags)) {
1097 struct sock *parent = bt_sk(sk)->parent;
1098 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1099 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1101 parent->sk_data_ready(parent, 0);
1104 __l2cap_state_change(chan, BT_CONFIG);
1105 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
1106 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
1110 rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
1111 rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1114 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1117 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1118 rsp.result != L2CAP_CR_SUCCESS) {
1119 l2cap_chan_unlock(chan);
1123 set_bit(CONF_REQ_SENT, &chan->conf_state);
1124 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1125 l2cap_build_conf_req(chan, buf), buf);
1126 chan->num_conf_req++;
1129 l2cap_chan_unlock(chan);
1132 mutex_unlock(&conn->chan_lock);
1135 /* Find socket with cid and source/destination bdaddr.
1136 * Returns closest match, locked.
1138 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, u16 cid,
1142 struct l2cap_chan *c, *c1 = NULL;
1144 read_lock(&chan_list_lock);
1146 list_for_each_entry(c, &chan_list, global_l) {
1147 struct sock *sk = c->sk;
1149 if (state && c->state != state)
1152 if (c->scid == cid) {
1153 int src_match, dst_match;
1154 int src_any, dst_any;
1157 src_match = !bacmp(&bt_sk(sk)->src, src);
1158 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1159 if (src_match && dst_match) {
1160 read_unlock(&chan_list_lock);
1165 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1166 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1167 if ((src_match && dst_any) || (src_any && dst_match) ||
1168 (src_any && dst_any))
1173 read_unlock(&chan_list_lock);
1178 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1180 struct sock *parent, *sk;
1181 struct l2cap_chan *chan, *pchan;
1185 /* Check if we have socket listening on cid */
1186 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
1187 conn->src, conn->dst);
1195 chan = pchan->ops->new_connection(pchan);
1201 hci_conn_hold(conn->hcon);
1202 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
1204 bacpy(&bt_sk(sk)->src, conn->src);
1205 bacpy(&bt_sk(sk)->dst, conn->dst);
1207 bt_accept_enqueue(parent, sk);
1209 l2cap_chan_add(conn, chan);
1211 l2cap_chan_ready(chan);
1214 release_sock(parent);
1217 static void l2cap_conn_ready(struct l2cap_conn *conn)
1219 struct l2cap_chan *chan;
1220 struct hci_conn *hcon = conn->hcon;
1222 BT_DBG("conn %p", conn);
1224 if (!hcon->out && hcon->type == LE_LINK)
1225 l2cap_le_conn_ready(conn);
1227 if (hcon->out && hcon->type == LE_LINK)
1228 smp_conn_security(hcon, hcon->pending_sec_level);
1230 mutex_lock(&conn->chan_lock);
1232 list_for_each_entry(chan, &conn->chan_l, list) {
1234 l2cap_chan_lock(chan);
1236 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
1237 l2cap_chan_unlock(chan);
1241 if (hcon->type == LE_LINK) {
1242 if (smp_conn_security(hcon, chan->sec_level))
1243 l2cap_chan_ready(chan);
1245 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1246 struct sock *sk = chan->sk;
1247 __clear_chan_timer(chan);
1249 __l2cap_state_change(chan, BT_CONNECTED);
1250 sk->sk_state_change(sk);
1253 } else if (chan->state == BT_CONNECT)
1254 l2cap_do_start(chan);
1256 l2cap_chan_unlock(chan);
1259 mutex_unlock(&conn->chan_lock);
1262 /* Notify sockets that we cannot guaranty reliability anymore */
1263 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1265 struct l2cap_chan *chan;
1267 BT_DBG("conn %p", conn);
1269 mutex_lock(&conn->chan_lock);
1271 list_for_each_entry(chan, &conn->chan_l, list) {
1272 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1273 __l2cap_chan_set_err(chan, err);
1276 mutex_unlock(&conn->chan_lock);
1279 static void l2cap_info_timeout(struct work_struct *work)
1281 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1284 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1285 conn->info_ident = 0;
1287 l2cap_conn_start(conn);
1290 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1292 struct l2cap_conn *conn = hcon->l2cap_data;
1293 struct l2cap_chan *chan, *l;
1298 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1300 kfree_skb(conn->rx_skb);
1302 mutex_lock(&conn->chan_lock);
1305 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1306 l2cap_chan_hold(chan);
1307 l2cap_chan_lock(chan);
1309 l2cap_chan_del(chan, err);
1311 l2cap_chan_unlock(chan);
1313 chan->ops->close(chan);
1314 l2cap_chan_put(chan);
1317 mutex_unlock(&conn->chan_lock);
1319 hci_chan_del(conn->hchan);
1321 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1322 cancel_delayed_work_sync(&conn->info_timer);
1324 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1325 cancel_delayed_work_sync(&conn->security_timer);
1326 smp_chan_destroy(conn);
1329 hcon->l2cap_data = NULL;
1333 static void security_timeout(struct work_struct *work)
1335 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1336 security_timer.work);
1338 BT_DBG("conn %p", conn);
1340 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) {
1341 smp_chan_destroy(conn);
1342 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1346 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1348 struct l2cap_conn *conn = hcon->l2cap_data;
1349 struct hci_chan *hchan;
1354 hchan = hci_chan_create(hcon);
1358 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1360 hci_chan_del(hchan);
1364 hcon->l2cap_data = conn;
1366 conn->hchan = hchan;
1368 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1370 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1371 conn->mtu = hcon->hdev->le_mtu;
1373 conn->mtu = hcon->hdev->acl_mtu;
1375 conn->src = &hcon->hdev->bdaddr;
1376 conn->dst = &hcon->dst;
1378 conn->feat_mask = 0;
1380 spin_lock_init(&conn->lock);
1381 mutex_init(&conn->chan_lock);
1383 INIT_LIST_HEAD(&conn->chan_l);
1385 if (hcon->type == LE_LINK)
1386 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1388 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1390 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1395 /* ---- Socket interface ---- */
1397 /* Find socket with psm and source / destination bdaddr.
1398 * Returns closest match.
1400 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1404 struct l2cap_chan *c, *c1 = NULL;
1406 read_lock(&chan_list_lock);
1408 list_for_each_entry(c, &chan_list, global_l) {
1409 struct sock *sk = c->sk;
1411 if (state && c->state != state)
1414 if (c->psm == psm) {
1415 int src_match, dst_match;
1416 int src_any, dst_any;
1419 src_match = !bacmp(&bt_sk(sk)->src, src);
1420 dst_match = !bacmp(&bt_sk(sk)->dst, dst);
1421 if (src_match && dst_match) {
1422 read_unlock(&chan_list_lock);
1427 src_any = !bacmp(&bt_sk(sk)->src, BDADDR_ANY);
1428 dst_any = !bacmp(&bt_sk(sk)->dst, BDADDR_ANY);
1429 if ((src_match && dst_any) || (src_any && dst_match) ||
1430 (src_any && dst_any))
1435 read_unlock(&chan_list_lock);
1440 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
1441 bdaddr_t *dst, u8 dst_type)
1443 struct sock *sk = chan->sk;
1444 bdaddr_t *src = &bt_sk(sk)->src;
1445 struct l2cap_conn *conn;
1446 struct hci_conn *hcon;
1447 struct hci_dev *hdev;
1451 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src), batostr(dst),
1452 dst_type, __le16_to_cpu(psm));
1454 hdev = hci_get_route(dst, src);
1456 return -EHOSTUNREACH;
1460 l2cap_chan_lock(chan);
1462 /* PSM must be odd and lsb of upper byte must be 0 */
1463 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1464 chan->chan_type != L2CAP_CHAN_RAW) {
1469 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1474 switch (chan->mode) {
1475 case L2CAP_MODE_BASIC:
1477 case L2CAP_MODE_ERTM:
1478 case L2CAP_MODE_STREAMING:
1487 switch (chan->state) {
1491 /* Already connecting */
1496 /* Already connected */
1510 /* Set destination address and psm */
1512 bacpy(&bt_sk(sk)->dst, dst);
1518 auth_type = l2cap_get_auth_type(chan);
1520 if (chan->dcid == L2CAP_CID_LE_DATA)
1521 hcon = hci_connect(hdev, LE_LINK, dst, dst_type,
1522 chan->sec_level, auth_type);
1524 hcon = hci_connect(hdev, ACL_LINK, dst, dst_type,
1525 chan->sec_level, auth_type);
1528 err = PTR_ERR(hcon);
1532 conn = l2cap_conn_add(hcon, 0);
1539 if (hcon->type == LE_LINK) {
1542 if (!list_empty(&conn->chan_l)) {
1551 /* Update source addr of the socket */
1552 bacpy(src, conn->src);
1554 l2cap_chan_unlock(chan);
1555 l2cap_chan_add(conn, chan);
1556 l2cap_chan_lock(chan);
1558 l2cap_state_change(chan, BT_CONNECT);
1559 __set_chan_timer(chan, sk->sk_sndtimeo);
1561 if (hcon->state == BT_CONNECTED) {
1562 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1563 __clear_chan_timer(chan);
1564 if (l2cap_chan_check_security(chan))
1565 l2cap_state_change(chan, BT_CONNECTED);
1567 l2cap_do_start(chan);
1573 l2cap_chan_unlock(chan);
1574 hci_dev_unlock(hdev);
1579 int __l2cap_wait_ack(struct sock *sk)
1581 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1582 DECLARE_WAITQUEUE(wait, current);
1586 add_wait_queue(sk_sleep(sk), &wait);
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 while (chan->unacked_frames > 0 && chan->conn) {
1592 if (signal_pending(current)) {
1593 err = sock_intr_errno(timeo);
1598 timeo = schedule_timeout(timeo);
1600 set_current_state(TASK_INTERRUPTIBLE);
1602 err = sock_error(sk);
1606 set_current_state(TASK_RUNNING);
1607 remove_wait_queue(sk_sleep(sk), &wait);
1611 static void l2cap_monitor_timeout(struct work_struct *work)
1613 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1614 monitor_timer.work);
1616 BT_DBG("chan %p", chan);
1618 l2cap_chan_lock(chan);
1621 l2cap_chan_unlock(chan);
1622 l2cap_chan_put(chan);
1626 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1628 l2cap_chan_unlock(chan);
1629 l2cap_chan_put(chan);
1632 static void l2cap_retrans_timeout(struct work_struct *work)
1634 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1635 retrans_timer.work);
1637 BT_DBG("chan %p", chan);
1639 l2cap_chan_lock(chan);
1642 l2cap_chan_unlock(chan);
1643 l2cap_chan_put(chan);
1647 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1648 l2cap_chan_unlock(chan);
1649 l2cap_chan_put(chan);
1652 static void l2cap_streaming_send(struct l2cap_chan *chan,
1653 struct sk_buff_head *skbs)
1655 struct sk_buff *skb;
1656 struct l2cap_ctrl *control;
1658 BT_DBG("chan %p, skbs %p", chan, skbs);
1660 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1662 while (!skb_queue_empty(&chan->tx_q)) {
1664 skb = skb_dequeue(&chan->tx_q);
1666 bt_cb(skb)->control.retries = 1;
1667 control = &bt_cb(skb)->control;
1669 control->reqseq = 0;
1670 control->txseq = chan->next_tx_seq;
1672 __pack_control(chan, control, skb);
1674 if (chan->fcs == L2CAP_FCS_CRC16) {
1675 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1676 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1679 l2cap_do_send(chan, skb);
1681 BT_DBG("Sent txseq %u", control->txseq);
1683 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1684 chan->frames_sent++;
1688 static int l2cap_ertm_send(struct l2cap_chan *chan)
1690 struct sk_buff *skb, *tx_skb;
1691 struct l2cap_ctrl *control;
1694 BT_DBG("chan %p", chan);
1696 if (chan->state != BT_CONNECTED)
1699 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1702 while (chan->tx_send_head &&
1703 chan->unacked_frames < chan->remote_tx_win &&
1704 chan->tx_state == L2CAP_TX_STATE_XMIT) {
1706 skb = chan->tx_send_head;
1708 bt_cb(skb)->control.retries = 1;
1709 control = &bt_cb(skb)->control;
1711 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1714 control->reqseq = chan->buffer_seq;
1715 chan->last_acked_seq = chan->buffer_seq;
1716 control->txseq = chan->next_tx_seq;
1718 __pack_control(chan, control, skb);
1720 if (chan->fcs == L2CAP_FCS_CRC16) {
1721 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1722 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1725 /* Clone after data has been modified. Data is assumed to be
1726 read-only (for locking purposes) on cloned sk_buffs.
1728 tx_skb = skb_clone(skb, GFP_KERNEL);
1733 __set_retrans_timer(chan);
1735 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1736 chan->unacked_frames++;
1737 chan->frames_sent++;
1740 if (skb_queue_is_last(&chan->tx_q, skb))
1741 chan->tx_send_head = NULL;
1743 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1745 l2cap_do_send(chan, tx_skb);
1746 BT_DBG("Sent txseq %u", control->txseq);
1749 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1750 chan->unacked_frames, skb_queue_len(&chan->tx_q));
1755 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1757 struct l2cap_ctrl control;
1758 struct sk_buff *skb;
1759 struct sk_buff *tx_skb;
1762 BT_DBG("chan %p", chan);
1764 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1767 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1768 seq = l2cap_seq_list_pop(&chan->retrans_list);
1770 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1772 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1777 bt_cb(skb)->control.retries++;
1778 control = bt_cb(skb)->control;
1780 if (chan->max_tx != 0 &&
1781 bt_cb(skb)->control.retries > chan->max_tx) {
1782 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1783 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
1784 l2cap_seq_list_clear(&chan->retrans_list);
1788 control.reqseq = chan->buffer_seq;
1789 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1794 if (skb_cloned(skb)) {
1795 /* Cloned sk_buffs are read-only, so we need a
1798 tx_skb = skb_copy(skb, GFP_ATOMIC);
1800 tx_skb = skb_clone(skb, GFP_ATOMIC);
1804 l2cap_seq_list_clear(&chan->retrans_list);
1808 /* Update skb contents */
1809 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1810 put_unaligned_le32(__pack_extended_control(&control),
1811 tx_skb->data + L2CAP_HDR_SIZE);
1813 put_unaligned_le16(__pack_enhanced_control(&control),
1814 tx_skb->data + L2CAP_HDR_SIZE);
1817 if (chan->fcs == L2CAP_FCS_CRC16) {
1818 u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
1819 put_unaligned_le16(fcs, skb_put(tx_skb,
1823 l2cap_do_send(chan, tx_skb);
1825 BT_DBG("Resent txseq %d", control.txseq);
1827 chan->last_acked_seq = chan->buffer_seq;
1831 static void l2cap_retransmit(struct l2cap_chan *chan,
1832 struct l2cap_ctrl *control)
1834 BT_DBG("chan %p, control %p", chan, control);
1836 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1837 l2cap_ertm_resend(chan);
1840 static void l2cap_retransmit_all(struct l2cap_chan *chan,
1841 struct l2cap_ctrl *control)
1843 struct sk_buff *skb;
1845 BT_DBG("chan %p, control %p", chan, control);
1848 set_bit(CONN_SEND_FBIT, &chan->conn_state);
1850 l2cap_seq_list_clear(&chan->retrans_list);
1852 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1855 if (chan->unacked_frames) {
1856 skb_queue_walk(&chan->tx_q, skb) {
1857 if (bt_cb(skb)->control.txseq == control->reqseq ||
1858 skb == chan->tx_send_head)
1862 skb_queue_walk_from(&chan->tx_q, skb) {
1863 if (skb == chan->tx_send_head)
1866 l2cap_seq_list_append(&chan->retrans_list,
1867 bt_cb(skb)->control.txseq);
1870 l2cap_ertm_resend(chan);
1874 static void l2cap_send_ack(struct l2cap_chan *chan)
1876 struct l2cap_ctrl control;
1877 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
1878 chan->last_acked_seq);
1881 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1882 chan, chan->last_acked_seq, chan->buffer_seq);
1884 memset(&control, 0, sizeof(control));
1887 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
1888 chan->rx_state == L2CAP_RX_STATE_RECV) {
1889 __clear_ack_timer(chan);
1890 control.super = L2CAP_SUPER_RNR;
1891 control.reqseq = chan->buffer_seq;
1892 l2cap_send_sframe(chan, &control);
1894 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
1895 l2cap_ertm_send(chan);
1896 /* If any i-frames were sent, they included an ack */
1897 if (chan->buffer_seq == chan->last_acked_seq)
1901 /* Ack now if the window is 3/4ths full.
1902 * Calculate without mul or div
1904 threshold = chan->ack_win;
1905 threshold += threshold << 1;
1908 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
1911 if (frames_to_ack >= threshold) {
1912 __clear_ack_timer(chan);
1913 control.super = L2CAP_SUPER_RR;
1914 control.reqseq = chan->buffer_seq;
1915 l2cap_send_sframe(chan, &control);
1920 __set_ack_timer(chan);
1924 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1925 struct msghdr *msg, int len,
1926 int count, struct sk_buff *skb)
1928 struct l2cap_conn *conn = chan->conn;
1929 struct sk_buff **frag;
1932 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1938 /* Continuation fragments (no L2CAP header) */
1939 frag = &skb_shinfo(skb)->frag_list;
1941 struct sk_buff *tmp;
1943 count = min_t(unsigned int, conn->mtu, len);
1945 tmp = chan->ops->alloc_skb(chan, count,
1946 msg->msg_flags & MSG_DONTWAIT);
1948 return PTR_ERR(tmp);
1952 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1955 (*frag)->priority = skb->priority;
1960 skb->len += (*frag)->len;
1961 skb->data_len += (*frag)->len;
1963 frag = &(*frag)->next;
1969 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1970 struct msghdr *msg, size_t len,
1973 struct l2cap_conn *conn = chan->conn;
1974 struct sk_buff *skb;
1975 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1976 struct l2cap_hdr *lh;
1978 BT_DBG("chan %p len %zu priority %u", chan, len, priority);
1980 count = min_t(unsigned int, (conn->mtu - hlen), len);
1982 skb = chan->ops->alloc_skb(chan, count + hlen,
1983 msg->msg_flags & MSG_DONTWAIT);
1987 skb->priority = priority;
1989 /* Create L2CAP header */
1990 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1991 lh->cid = cpu_to_le16(chan->dcid);
1992 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
1993 put_unaligned(chan->psm, skb_put(skb, L2CAP_PSMLEN_SIZE));
1995 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1996 if (unlikely(err < 0)) {
1998 return ERR_PTR(err);
2003 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2004 struct msghdr *msg, size_t len,
2007 struct l2cap_conn *conn = chan->conn;
2008 struct sk_buff *skb;
2010 struct l2cap_hdr *lh;
2012 BT_DBG("chan %p len %zu", chan, len);
2014 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2016 skb = chan->ops->alloc_skb(chan, count + L2CAP_HDR_SIZE,
2017 msg->msg_flags & MSG_DONTWAIT);
2021 skb->priority = priority;
2023 /* Create L2CAP header */
2024 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2025 lh->cid = cpu_to_le16(chan->dcid);
2026 lh->len = cpu_to_le16(len);
2028 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2029 if (unlikely(err < 0)) {
2031 return ERR_PTR(err);
2036 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2037 struct msghdr *msg, size_t len,
2040 struct l2cap_conn *conn = chan->conn;
2041 struct sk_buff *skb;
2042 int err, count, hlen;
2043 struct l2cap_hdr *lh;
2045 BT_DBG("chan %p len %zu", chan, len);
2048 return ERR_PTR(-ENOTCONN);
2050 hlen = __ertm_hdr_size(chan);
2053 hlen += L2CAP_SDULEN_SIZE;
2055 if (chan->fcs == L2CAP_FCS_CRC16)
2056 hlen += L2CAP_FCS_SIZE;
2058 count = min_t(unsigned int, (conn->mtu - hlen), len);
2060 skb = chan->ops->alloc_skb(chan, count + hlen,
2061 msg->msg_flags & MSG_DONTWAIT);
2065 /* Create L2CAP header */
2066 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2067 lh->cid = cpu_to_le16(chan->dcid);
2068 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2070 /* Control header is populated later */
2071 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2072 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2074 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2077 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2079 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2080 if (unlikely(err < 0)) {
2082 return ERR_PTR(err);
2085 bt_cb(skb)->control.fcs = chan->fcs;
2086 bt_cb(skb)->control.retries = 0;
2090 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2091 struct sk_buff_head *seg_queue,
2092 struct msghdr *msg, size_t len)
2094 struct sk_buff *skb;
2099 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2101 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2102 * so fragmented skbs are not used. The HCI layer's handling
2103 * of fragmented skbs is not compatible with ERTM's queueing.
2106 /* PDU size is derived from the HCI MTU */
2107 pdu_len = chan->conn->mtu;
2109 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2111 /* Adjust for largest possible L2CAP overhead. */
2113 pdu_len -= L2CAP_FCS_SIZE;
2115 pdu_len -= __ertm_hdr_size(chan);
2117 /* Remote device may have requested smaller PDUs */
2118 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2120 if (len <= pdu_len) {
2121 sar = L2CAP_SAR_UNSEGMENTED;
2125 sar = L2CAP_SAR_START;
2127 pdu_len -= L2CAP_SDULEN_SIZE;
2131 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2134 __skb_queue_purge(seg_queue);
2135 return PTR_ERR(skb);
2138 bt_cb(skb)->control.sar = sar;
2139 __skb_queue_tail(seg_queue, skb);
2144 pdu_len += L2CAP_SDULEN_SIZE;
2147 if (len <= pdu_len) {
2148 sar = L2CAP_SAR_END;
2151 sar = L2CAP_SAR_CONTINUE;
2158 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
2161 struct sk_buff *skb;
2163 struct sk_buff_head seg_queue;
2165 /* Connectionless channel */
2166 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2167 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
2169 return PTR_ERR(skb);
2171 l2cap_do_send(chan, skb);
2175 switch (chan->mode) {
2176 case L2CAP_MODE_BASIC:
2177 /* Check outgoing MTU */
2178 if (len > chan->omtu)
2181 /* Create a basic PDU */
2182 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
2184 return PTR_ERR(skb);
2186 l2cap_do_send(chan, skb);
2190 case L2CAP_MODE_ERTM:
2191 case L2CAP_MODE_STREAMING:
2192 /* Check outgoing MTU */
2193 if (len > chan->omtu) {
2198 __skb_queue_head_init(&seg_queue);
2200 /* Do segmentation before calling in to the state machine,
2201 * since it's possible to block while waiting for memory
2204 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2206 /* The channel could have been closed while segmenting,
2207 * check that it is still connected.
2209 if (chan->state != BT_CONNECTED) {
2210 __skb_queue_purge(&seg_queue);
2217 if (chan->mode == L2CAP_MODE_ERTM)
2218 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2220 l2cap_streaming_send(chan, &seg_queue);
2224 /* If the skbs were not queued for sending, they'll still be in
2225 * seg_queue and need to be purged.
2227 __skb_queue_purge(&seg_queue);
2231 BT_DBG("bad state %1.1x", chan->mode);
2238 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2240 struct l2cap_ctrl control;
2243 BT_DBG("chan %p, txseq %u", chan, txseq);
2245 memset(&control, 0, sizeof(control));
2247 control.super = L2CAP_SUPER_SREJ;
2249 for (seq = chan->expected_tx_seq; seq != txseq;
2250 seq = __next_seq(chan, seq)) {
2251 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2252 control.reqseq = seq;
2253 l2cap_send_sframe(chan, &control);
2254 l2cap_seq_list_append(&chan->srej_list, seq);
2258 chan->expected_tx_seq = __next_seq(chan, txseq);
2261 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2263 struct l2cap_ctrl control;
2265 BT_DBG("chan %p", chan);
2267 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2270 memset(&control, 0, sizeof(control));
2272 control.super = L2CAP_SUPER_SREJ;
2273 control.reqseq = chan->srej_list.tail;
2274 l2cap_send_sframe(chan, &control);
2277 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2279 struct l2cap_ctrl control;
2283 BT_DBG("chan %p, txseq %u", chan, txseq);
2285 memset(&control, 0, sizeof(control));
2287 control.super = L2CAP_SUPER_SREJ;
2289 /* Capture initial list head to allow only one pass through the list. */
2290 initial_head = chan->srej_list.head;
2293 seq = l2cap_seq_list_pop(&chan->srej_list);
2294 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2297 control.reqseq = seq;
2298 l2cap_send_sframe(chan, &control);
2299 l2cap_seq_list_append(&chan->srej_list, seq);
2300 } while (chan->srej_list.head != initial_head);
2303 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2305 struct sk_buff *acked_skb;
2308 BT_DBG("chan %p, reqseq %u", chan, reqseq);
2310 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2313 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2314 chan->expected_ack_seq, chan->unacked_frames);
2316 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2317 ackseq = __next_seq(chan, ackseq)) {
2319 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2321 skb_unlink(acked_skb, &chan->tx_q);
2322 kfree_skb(acked_skb);
2323 chan->unacked_frames--;
2327 chan->expected_ack_seq = reqseq;
2329 if (chan->unacked_frames == 0)
2330 __clear_retrans_timer(chan);
2332 BT_DBG("unacked_frames %u", chan->unacked_frames);
2335 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2337 BT_DBG("chan %p", chan);
2339 chan->expected_tx_seq = chan->buffer_seq;
2340 l2cap_seq_list_clear(&chan->srej_list);
2341 skb_queue_purge(&chan->srej_q);
2342 chan->rx_state = L2CAP_RX_STATE_RECV;
2345 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2346 struct l2cap_ctrl *control,
2347 struct sk_buff_head *skbs, u8 event)
2349 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2353 case L2CAP_EV_DATA_REQUEST:
2354 if (chan->tx_send_head == NULL)
2355 chan->tx_send_head = skb_peek(skbs);
2357 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2358 l2cap_ertm_send(chan);
2360 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2361 BT_DBG("Enter LOCAL_BUSY");
2362 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2364 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2365 /* The SREJ_SENT state must be aborted if we are to
2366 * enter the LOCAL_BUSY state.
2368 l2cap_abort_rx_srej_sent(chan);
2371 l2cap_send_ack(chan);
2374 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2375 BT_DBG("Exit LOCAL_BUSY");
2376 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2378 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2379 struct l2cap_ctrl local_control;
2381 memset(&local_control, 0, sizeof(local_control));
2382 local_control.sframe = 1;
2383 local_control.super = L2CAP_SUPER_RR;
2384 local_control.poll = 1;
2385 local_control.reqseq = chan->buffer_seq;
2386 l2cap_send_sframe(chan, &local_control);
2388 chan->retry_count = 1;
2389 __set_monitor_timer(chan);
2390 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2393 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2394 l2cap_process_reqseq(chan, control->reqseq);
2396 case L2CAP_EV_EXPLICIT_POLL:
2397 l2cap_send_rr_or_rnr(chan, 1);
2398 chan->retry_count = 1;
2399 __set_monitor_timer(chan);
2400 __clear_ack_timer(chan);
2401 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2403 case L2CAP_EV_RETRANS_TO:
2404 l2cap_send_rr_or_rnr(chan, 1);
2405 chan->retry_count = 1;
2406 __set_monitor_timer(chan);
2407 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2409 case L2CAP_EV_RECV_FBIT:
2410 /* Nothing to process */
2417 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2418 struct l2cap_ctrl *control,
2419 struct sk_buff_head *skbs, u8 event)
2421 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2425 case L2CAP_EV_DATA_REQUEST:
2426 if (chan->tx_send_head == NULL)
2427 chan->tx_send_head = skb_peek(skbs);
2428 /* Queue data, but don't send. */
2429 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2431 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2432 BT_DBG("Enter LOCAL_BUSY");
2433 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2435 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2436 /* The SREJ_SENT state must be aborted if we are to
2437 * enter the LOCAL_BUSY state.
2439 l2cap_abort_rx_srej_sent(chan);
2442 l2cap_send_ack(chan);
2445 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2446 BT_DBG("Exit LOCAL_BUSY");
2447 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2449 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2450 struct l2cap_ctrl local_control;
2451 memset(&local_control, 0, sizeof(local_control));
2452 local_control.sframe = 1;
2453 local_control.super = L2CAP_SUPER_RR;
2454 local_control.poll = 1;
2455 local_control.reqseq = chan->buffer_seq;
2456 l2cap_send_sframe(chan, &local_control);
2458 chan->retry_count = 1;
2459 __set_monitor_timer(chan);
2460 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2463 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2464 l2cap_process_reqseq(chan, control->reqseq);
2468 case L2CAP_EV_RECV_FBIT:
2469 if (control && control->final) {
2470 __clear_monitor_timer(chan);
2471 if (chan->unacked_frames > 0)
2472 __set_retrans_timer(chan);
2473 chan->retry_count = 0;
2474 chan->tx_state = L2CAP_TX_STATE_XMIT;
2475 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2478 case L2CAP_EV_EXPLICIT_POLL:
2481 case L2CAP_EV_MONITOR_TO:
2482 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2483 l2cap_send_rr_or_rnr(chan, 1);
2484 __set_monitor_timer(chan);
2485 chan->retry_count++;
2487 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
2495 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2496 struct sk_buff_head *skbs, u8 event)
2498 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2499 chan, control, skbs, event, chan->tx_state);
2501 switch (chan->tx_state) {
2502 case L2CAP_TX_STATE_XMIT:
2503 l2cap_tx_state_xmit(chan, control, skbs, event);
2505 case L2CAP_TX_STATE_WAIT_F:
2506 l2cap_tx_state_wait_f(chan, control, skbs, event);
2514 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2515 struct l2cap_ctrl *control)
2517 BT_DBG("chan %p, control %p", chan, control);
2518 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2521 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2522 struct l2cap_ctrl *control)
2524 BT_DBG("chan %p, control %p", chan, control);
2525 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2528 /* Copy frame to all raw sockets on that connection */
2529 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2531 struct sk_buff *nskb;
2532 struct l2cap_chan *chan;
2534 BT_DBG("conn %p", conn);
2536 mutex_lock(&conn->chan_lock);
2538 list_for_each_entry(chan, &conn->chan_l, list) {
2539 struct sock *sk = chan->sk;
2540 if (chan->chan_type != L2CAP_CHAN_RAW)
2543 /* Don't send frame to the socket it came from */
2546 nskb = skb_clone(skb, GFP_ATOMIC);
2550 if (chan->ops->recv(chan, nskb))
2554 mutex_unlock(&conn->chan_lock);
2557 /* ---- L2CAP signalling commands ---- */
2558 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2559 u8 ident, u16 dlen, void *data)
2561 struct sk_buff *skb, **frag;
2562 struct l2cap_cmd_hdr *cmd;
2563 struct l2cap_hdr *lh;
2566 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2567 conn, code, ident, dlen);
2569 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2570 count = min_t(unsigned int, conn->mtu, len);
2572 skb = bt_skb_alloc(count, GFP_ATOMIC);
2576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2577 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2579 if (conn->hcon->type == LE_LINK)
2580 lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2582 lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
2584 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2587 cmd->len = cpu_to_le16(dlen);
2590 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2591 memcpy(skb_put(skb, count), data, count);
2597 /* Continuation fragments (no L2CAP header) */
2598 frag = &skb_shinfo(skb)->frag_list;
2600 count = min_t(unsigned int, conn->mtu, len);
2602 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2606 memcpy(skb_put(*frag, count), data, count);
2611 frag = &(*frag)->next;
2621 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2623 struct l2cap_conf_opt *opt = *ptr;
2626 len = L2CAP_CONF_OPT_SIZE + opt->len;
2634 *val = *((u8 *) opt->val);
2638 *val = get_unaligned_le16(opt->val);
2642 *val = get_unaligned_le32(opt->val);
2646 *val = (unsigned long) opt->val;
2650 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2654 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2656 struct l2cap_conf_opt *opt = *ptr;
2658 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2665 *((u8 *) opt->val) = val;
2669 put_unaligned_le16(val, opt->val);
2673 put_unaligned_le32(val, opt->val);
2677 memcpy(opt->val, (void *) val, len);
2681 *ptr += L2CAP_CONF_OPT_SIZE + len;
2684 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2686 struct l2cap_conf_efs efs;
2688 switch (chan->mode) {
2689 case L2CAP_MODE_ERTM:
2690 efs.id = chan->local_id;
2691 efs.stype = chan->local_stype;
2692 efs.msdu = cpu_to_le16(chan->local_msdu);
2693 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2694 efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2695 efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2698 case L2CAP_MODE_STREAMING:
2700 efs.stype = L2CAP_SERV_BESTEFFORT;
2701 efs.msdu = cpu_to_le16(chan->local_msdu);
2702 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2711 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2712 (unsigned long) &efs);
2715 static void l2cap_ack_timeout(struct work_struct *work)
2717 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2721 BT_DBG("chan %p", chan);
2723 l2cap_chan_lock(chan);
2725 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2726 chan->last_acked_seq);
2729 l2cap_send_rr_or_rnr(chan, 0);
2731 l2cap_chan_unlock(chan);
2732 l2cap_chan_put(chan);
2735 int l2cap_ertm_init(struct l2cap_chan *chan)
2739 chan->next_tx_seq = 0;
2740 chan->expected_tx_seq = 0;
2741 chan->expected_ack_seq = 0;
2742 chan->unacked_frames = 0;
2743 chan->buffer_seq = 0;
2744 chan->frames_sent = 0;
2745 chan->last_acked_seq = 0;
2747 chan->sdu_last_frag = NULL;
2750 skb_queue_head_init(&chan->tx_q);
2752 if (chan->mode != L2CAP_MODE_ERTM)
2755 chan->rx_state = L2CAP_RX_STATE_RECV;
2756 chan->tx_state = L2CAP_TX_STATE_XMIT;
2758 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2759 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2760 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2762 skb_queue_head_init(&chan->srej_q);
2764 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
2768 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
2770 l2cap_seq_list_free(&chan->srej_list);
2775 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2778 case L2CAP_MODE_STREAMING:
2779 case L2CAP_MODE_ERTM:
2780 if (l2cap_mode_supported(mode, remote_feat_mask))
2784 return L2CAP_MODE_BASIC;
2788 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2790 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2793 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2795 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2798 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2800 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2801 __l2cap_ews_supported(chan)) {
2802 /* use extended control field */
2803 set_bit(FLAG_EXT_CTRL, &chan->flags);
2804 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2806 chan->tx_win = min_t(u16, chan->tx_win,
2807 L2CAP_DEFAULT_TX_WINDOW);
2808 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2810 chan->ack_win = chan->tx_win;
2813 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2815 struct l2cap_conf_req *req = data;
2816 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2817 void *ptr = req->data;
2820 BT_DBG("chan %p", chan);
2822 if (chan->num_conf_req || chan->num_conf_rsp)
2825 switch (chan->mode) {
2826 case L2CAP_MODE_STREAMING:
2827 case L2CAP_MODE_ERTM:
2828 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2831 if (__l2cap_efs_supported(chan))
2832 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2836 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2841 if (chan->imtu != L2CAP_DEFAULT_MTU)
2842 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2844 switch (chan->mode) {
2845 case L2CAP_MODE_BASIC:
2846 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2847 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2850 rfc.mode = L2CAP_MODE_BASIC;
2852 rfc.max_transmit = 0;
2853 rfc.retrans_timeout = 0;
2854 rfc.monitor_timeout = 0;
2855 rfc.max_pdu_size = 0;
2857 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2858 (unsigned long) &rfc);
2861 case L2CAP_MODE_ERTM:
2862 rfc.mode = L2CAP_MODE_ERTM;
2863 rfc.max_transmit = chan->max_tx;
2864 rfc.retrans_timeout = 0;
2865 rfc.monitor_timeout = 0;
2867 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2868 L2CAP_EXT_HDR_SIZE -
2871 rfc.max_pdu_size = cpu_to_le16(size);
2873 l2cap_txwin_setup(chan);
2875 rfc.txwin_size = min_t(u16, chan->tx_win,
2876 L2CAP_DEFAULT_TX_WINDOW);
2878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2879 (unsigned long) &rfc);
2881 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2882 l2cap_add_opt_efs(&ptr, chan);
2884 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2887 if (chan->fcs == L2CAP_FCS_NONE ||
2888 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2889 chan->fcs = L2CAP_FCS_NONE;
2890 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2893 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2898 case L2CAP_MODE_STREAMING:
2899 l2cap_txwin_setup(chan);
2900 rfc.mode = L2CAP_MODE_STREAMING;
2902 rfc.max_transmit = 0;
2903 rfc.retrans_timeout = 0;
2904 rfc.monitor_timeout = 0;
2906 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2907 L2CAP_EXT_HDR_SIZE -
2910 rfc.max_pdu_size = cpu_to_le16(size);
2912 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2913 (unsigned long) &rfc);
2915 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2916 l2cap_add_opt_efs(&ptr, chan);
2918 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2921 if (chan->fcs == L2CAP_FCS_NONE ||
2922 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2923 chan->fcs = L2CAP_FCS_NONE;
2924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2929 req->dcid = cpu_to_le16(chan->dcid);
2930 req->flags = __constant_cpu_to_le16(0);
2935 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2937 struct l2cap_conf_rsp *rsp = data;
2938 void *ptr = rsp->data;
2939 void *req = chan->conf_req;
2940 int len = chan->conf_len;
2941 int type, hint, olen;
2943 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2944 struct l2cap_conf_efs efs;
2946 u16 mtu = L2CAP_DEFAULT_MTU;
2947 u16 result = L2CAP_CONF_SUCCESS;
2950 BT_DBG("chan %p", chan);
2952 while (len >= L2CAP_CONF_OPT_SIZE) {
2953 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2955 hint = type & L2CAP_CONF_HINT;
2956 type &= L2CAP_CONF_MASK;
2959 case L2CAP_CONF_MTU:
2963 case L2CAP_CONF_FLUSH_TO:
2964 chan->flush_to = val;
2967 case L2CAP_CONF_QOS:
2970 case L2CAP_CONF_RFC:
2971 if (olen == sizeof(rfc))
2972 memcpy(&rfc, (void *) val, olen);
2975 case L2CAP_CONF_FCS:
2976 if (val == L2CAP_FCS_NONE)
2977 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2980 case L2CAP_CONF_EFS:
2982 if (olen == sizeof(efs))
2983 memcpy(&efs, (void *) val, olen);
2986 case L2CAP_CONF_EWS:
2988 return -ECONNREFUSED;
2990 set_bit(FLAG_EXT_CTRL, &chan->flags);
2991 set_bit(CONF_EWS_RECV, &chan->conf_state);
2992 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2993 chan->remote_tx_win = val;
3000 result = L2CAP_CONF_UNKNOWN;
3001 *((u8 *) ptr++) = type;
3006 if (chan->num_conf_rsp || chan->num_conf_req > 1)
3009 switch (chan->mode) {
3010 case L2CAP_MODE_STREAMING:
3011 case L2CAP_MODE_ERTM:
3012 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3013 chan->mode = l2cap_select_mode(rfc.mode,
3014 chan->conn->feat_mask);
3019 if (__l2cap_efs_supported(chan))
3020 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3022 return -ECONNREFUSED;
3025 if (chan->mode != rfc.mode)
3026 return -ECONNREFUSED;
3032 if (chan->mode != rfc.mode) {
3033 result = L2CAP_CONF_UNACCEPT;
3034 rfc.mode = chan->mode;
3036 if (chan->num_conf_rsp == 1)
3037 return -ECONNREFUSED;
3039 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3040 sizeof(rfc), (unsigned long) &rfc);
3043 if (result == L2CAP_CONF_SUCCESS) {
3044 /* Configure output options and let the other side know
3045 * which ones we don't like. */
3047 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3048 result = L2CAP_CONF_UNACCEPT;
3051 set_bit(CONF_MTU_DONE, &chan->conf_state);
3053 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
3056 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3057 efs.stype != L2CAP_SERV_NOTRAFIC &&
3058 efs.stype != chan->local_stype) {
3060 result = L2CAP_CONF_UNACCEPT;
3062 if (chan->num_conf_req >= 1)
3063 return -ECONNREFUSED;
3065 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3067 (unsigned long) &efs);
3069 /* Send PENDING Conf Rsp */
3070 result = L2CAP_CONF_PENDING;
3071 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3076 case L2CAP_MODE_BASIC:
3077 chan->fcs = L2CAP_FCS_NONE;
3078 set_bit(CONF_MODE_DONE, &chan->conf_state);
3081 case L2CAP_MODE_ERTM:
3082 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3083 chan->remote_tx_win = rfc.txwin_size;
3085 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3087 chan->remote_max_tx = rfc.max_transmit;
3089 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3091 L2CAP_EXT_HDR_SIZE -
3094 rfc.max_pdu_size = cpu_to_le16(size);
3095 chan->remote_mps = size;
3097 rfc.retrans_timeout =
3098 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3099 rfc.monitor_timeout =
3100 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3102 set_bit(CONF_MODE_DONE, &chan->conf_state);
3104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3105 sizeof(rfc), (unsigned long) &rfc);
3107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3108 chan->remote_id = efs.id;
3109 chan->remote_stype = efs.stype;
3110 chan->remote_msdu = le16_to_cpu(efs.msdu);
3111 chan->remote_flush_to =
3112 le32_to_cpu(efs.flush_to);
3113 chan->remote_acc_lat =
3114 le32_to_cpu(efs.acc_lat);
3115 chan->remote_sdu_itime =
3116 le32_to_cpu(efs.sdu_itime);
3117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3118 sizeof(efs), (unsigned long) &efs);
3122 case L2CAP_MODE_STREAMING:
3123 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3125 L2CAP_EXT_HDR_SIZE -
3128 rfc.max_pdu_size = cpu_to_le16(size);
3129 chan->remote_mps = size;
3131 set_bit(CONF_MODE_DONE, &chan->conf_state);
3133 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3134 sizeof(rfc), (unsigned long) &rfc);
3139 result = L2CAP_CONF_UNACCEPT;
3141 memset(&rfc, 0, sizeof(rfc));
3142 rfc.mode = chan->mode;
3145 if (result == L2CAP_CONF_SUCCESS)
3146 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3148 rsp->scid = cpu_to_le16(chan->dcid);
3149 rsp->result = cpu_to_le16(result);
3150 rsp->flags = __constant_cpu_to_le16(0);
3155 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
3157 struct l2cap_conf_req *req = data;
3158 void *ptr = req->data;
3161 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3162 struct l2cap_conf_efs efs;
3164 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3166 while (len >= L2CAP_CONF_OPT_SIZE) {
3167 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3170 case L2CAP_CONF_MTU:
3171 if (val < L2CAP_DEFAULT_MIN_MTU) {
3172 *result = L2CAP_CONF_UNACCEPT;
3173 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3176 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
3179 case L2CAP_CONF_FLUSH_TO:
3180 chan->flush_to = val;
3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3185 case L2CAP_CONF_RFC:
3186 if (olen == sizeof(rfc))
3187 memcpy(&rfc, (void *)val, olen);
3189 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3190 rfc.mode != chan->mode)
3191 return -ECONNREFUSED;
3195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3196 sizeof(rfc), (unsigned long) &rfc);
3199 case L2CAP_CONF_EWS:
3200 chan->ack_win = min_t(u16, val, chan->ack_win);
3201 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3205 case L2CAP_CONF_EFS:
3206 if (olen == sizeof(efs))
3207 memcpy(&efs, (void *)val, olen);
3209 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3210 efs.stype != L2CAP_SERV_NOTRAFIC &&
3211 efs.stype != chan->local_stype)
3212 return -ECONNREFUSED;
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3215 sizeof(efs), (unsigned long) &efs);
3220 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3221 return -ECONNREFUSED;
3223 chan->mode = rfc.mode;
3225 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3227 case L2CAP_MODE_ERTM:
3228 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3229 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3230 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3231 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3232 chan->ack_win = min_t(u16, chan->ack_win,
3235 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3236 chan->local_msdu = le16_to_cpu(efs.msdu);
3237 chan->local_sdu_itime =
3238 le32_to_cpu(efs.sdu_itime);
3239 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3240 chan->local_flush_to =
3241 le32_to_cpu(efs.flush_to);
3245 case L2CAP_MODE_STREAMING:
3246 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3250 req->dcid = cpu_to_le16(chan->dcid);
3251 req->flags = __constant_cpu_to_le16(0);
3256 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
3258 struct l2cap_conf_rsp *rsp = data;
3259 void *ptr = rsp->data;
3261 BT_DBG("chan %p", chan);
3263 rsp->scid = cpu_to_le16(chan->dcid);
3264 rsp->result = cpu_to_le16(result);
3265 rsp->flags = cpu_to_le16(flags);
3270 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3272 struct l2cap_conn_rsp rsp;
3273 struct l2cap_conn *conn = chan->conn;
3276 rsp.scid = cpu_to_le16(chan->dcid);
3277 rsp.dcid = cpu_to_le16(chan->scid);
3278 rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
3279 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3280 l2cap_send_cmd(conn, chan->ident,
3281 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3283 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3286 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3287 l2cap_build_conf_req(chan, buf), buf);
3288 chan->num_conf_req++;
3291 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3295 /* Use sane default values in case a misbehaving remote device
3296 * did not send an RFC or extended window size option.
3298 u16 txwin_ext = chan->ack_win;
3299 struct l2cap_conf_rfc rfc = {
3301 .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3302 .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3303 .max_pdu_size = cpu_to_le16(chan->imtu),
3304 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3307 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3309 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3312 while (len >= L2CAP_CONF_OPT_SIZE) {
3313 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3316 case L2CAP_CONF_RFC:
3317 if (olen == sizeof(rfc))
3318 memcpy(&rfc, (void *)val, olen);
3320 case L2CAP_CONF_EWS:
3327 case L2CAP_MODE_ERTM:
3328 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3329 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3330 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3331 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3332 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3334 chan->ack_win = min_t(u16, chan->ack_win,
3337 case L2CAP_MODE_STREAMING:
3338 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3342 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3344 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3346 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3349 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3350 cmd->ident == conn->info_ident) {
3351 cancel_delayed_work(&conn->info_timer);
3353 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3354 conn->info_ident = 0;
3356 l2cap_conn_start(conn);
3362 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3364 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3365 struct l2cap_conn_rsp rsp;
3366 struct l2cap_chan *chan = NULL, *pchan;
3367 struct sock *parent, *sk = NULL;
3368 int result, status = L2CAP_CS_NO_INFO;
3370 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3371 __le16 psm = req->psm;
3373 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3375 /* Check if we have socket listening on psm */
3376 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src, conn->dst);
3378 result = L2CAP_CR_BAD_PSM;
3384 mutex_lock(&conn->chan_lock);
3387 /* Check if the ACL is secure enough (if not SDP) */
3388 if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
3389 !hci_conn_check_link_mode(conn->hcon)) {
3390 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3391 result = L2CAP_CR_SEC_BLOCK;
3395 result = L2CAP_CR_NO_MEM;
3397 /* Check if we already have channel with that dcid */
3398 if (__l2cap_get_chan_by_dcid(conn, scid))
3401 chan = pchan->ops->new_connection(pchan);
3407 hci_conn_hold(conn->hcon);
3409 bacpy(&bt_sk(sk)->src, conn->src);
3410 bacpy(&bt_sk(sk)->dst, conn->dst);
3414 bt_accept_enqueue(parent, sk);
3416 __l2cap_chan_add(conn, chan);
3420 __set_chan_timer(chan, sk->sk_sndtimeo);
3422 chan->ident = cmd->ident;
3424 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3425 if (l2cap_chan_check_security(chan)) {
3426 if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
3427 __l2cap_state_change(chan, BT_CONNECT2);
3428 result = L2CAP_CR_PEND;
3429 status = L2CAP_CS_AUTHOR_PEND;
3430 parent->sk_data_ready(parent, 0);
3432 __l2cap_state_change(chan, BT_CONFIG);
3433 result = L2CAP_CR_SUCCESS;
3434 status = L2CAP_CS_NO_INFO;
3437 __l2cap_state_change(chan, BT_CONNECT2);
3438 result = L2CAP_CR_PEND;
3439 status = L2CAP_CS_AUTHEN_PEND;
3442 __l2cap_state_change(chan, BT_CONNECT2);
3443 result = L2CAP_CR_PEND;
3444 status = L2CAP_CS_NO_INFO;
3448 release_sock(parent);
3449 mutex_unlock(&conn->chan_lock);
3452 rsp.scid = cpu_to_le16(scid);
3453 rsp.dcid = cpu_to_le16(dcid);
3454 rsp.result = cpu_to_le16(result);
3455 rsp.status = cpu_to_le16(status);
3456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3458 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3459 struct l2cap_info_req info;
3460 info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3462 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3463 conn->info_ident = l2cap_get_ident(conn);
3465 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3467 l2cap_send_cmd(conn, conn->info_ident,
3468 L2CAP_INFO_REQ, sizeof(info), &info);
3471 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3472 result == L2CAP_CR_SUCCESS) {
3474 set_bit(CONF_REQ_SENT, &chan->conf_state);
3475 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3476 l2cap_build_conf_req(chan, buf), buf);
3477 chan->num_conf_req++;
3483 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3485 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3486 u16 scid, dcid, result, status;
3487 struct l2cap_chan *chan;
3491 scid = __le16_to_cpu(rsp->scid);
3492 dcid = __le16_to_cpu(rsp->dcid);
3493 result = __le16_to_cpu(rsp->result);
3494 status = __le16_to_cpu(rsp->status);
3496 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3497 dcid, scid, result, status);
3499 mutex_lock(&conn->chan_lock);
3502 chan = __l2cap_get_chan_by_scid(conn, scid);
3508 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3517 l2cap_chan_lock(chan);
3520 case L2CAP_CR_SUCCESS:
3521 l2cap_state_change(chan, BT_CONFIG);
3524 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3526 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3529 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3530 l2cap_build_conf_req(chan, req), req);
3531 chan->num_conf_req++;
3535 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3539 l2cap_chan_del(chan, ECONNREFUSED);
3543 l2cap_chan_unlock(chan);
3546 mutex_unlock(&conn->chan_lock);
3551 static inline void set_default_fcs(struct l2cap_chan *chan)
3553 /* FCS is enabled only in ERTM or streaming mode, if one or both
3556 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3557 chan->fcs = L2CAP_FCS_NONE;
3558 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
3559 chan->fcs = L2CAP_FCS_CRC16;
3562 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3564 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3567 struct l2cap_chan *chan;
3570 dcid = __le16_to_cpu(req->dcid);
3571 flags = __le16_to_cpu(req->flags);
3573 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3575 chan = l2cap_get_chan_by_scid(conn, dcid);
3579 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
3580 struct l2cap_cmd_rej_cid rej;
3582 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
3583 rej.scid = cpu_to_le16(chan->scid);
3584 rej.dcid = cpu_to_le16(chan->dcid);
3586 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3591 /* Reject if config buffer is too small. */
3592 len = cmd_len - sizeof(*req);
3593 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
3594 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3595 l2cap_build_conf_rsp(chan, rsp,
3596 L2CAP_CONF_REJECT, flags), rsp);
3601 memcpy(chan->conf_req + chan->conf_len, req->data, len);
3602 chan->conf_len += len;
3604 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
3605 /* Incomplete config. Send empty response. */
3606 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3607 l2cap_build_conf_rsp(chan, rsp,
3608 L2CAP_CONF_SUCCESS, flags), rsp);
3612 /* Complete config. */
3613 len = l2cap_parse_conf_req(chan, rsp);
3615 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3619 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3620 chan->num_conf_rsp++;
3622 /* Reset config buffer. */
3625 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
3628 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
3629 set_default_fcs(chan);
3631 if (chan->mode == L2CAP_MODE_ERTM ||
3632 chan->mode == L2CAP_MODE_STREAMING)
3633 err = l2cap_ertm_init(chan);
3636 l2cap_send_disconn_req(chan->conn, chan, -err);
3638 l2cap_chan_ready(chan);
3643 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
3645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3646 l2cap_build_conf_req(chan, buf), buf);
3647 chan->num_conf_req++;
3650 /* Got Conf Rsp PENDING from remote side and asume we sent
3651 Conf Rsp PENDING in the code above */
3652 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
3653 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3655 /* check compatibility */
3657 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3658 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3660 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3661 l2cap_build_conf_rsp(chan, rsp,
3662 L2CAP_CONF_SUCCESS, flags), rsp);
3666 l2cap_chan_unlock(chan);
3670 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3672 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3673 u16 scid, flags, result;
3674 struct l2cap_chan *chan;
3675 int len = le16_to_cpu(cmd->len) - sizeof(*rsp);
3678 scid = __le16_to_cpu(rsp->scid);
3679 flags = __le16_to_cpu(rsp->flags);
3680 result = __le16_to_cpu(rsp->result);
3682 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
3685 chan = l2cap_get_chan_by_scid(conn, scid);
3690 case L2CAP_CONF_SUCCESS:
3691 l2cap_conf_rfc_get(chan, rsp->data, len);
3692 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3695 case L2CAP_CONF_PENDING:
3696 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
3698 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
3701 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3704 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3708 /* check compatibility */
3710 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3711 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3713 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3714 l2cap_build_conf_rsp(chan, buf,
3715 L2CAP_CONF_SUCCESS, 0x0000), buf);
3719 case L2CAP_CONF_UNACCEPT:
3720 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3723 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3724 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3728 /* throw out any old stored conf requests */
3729 result = L2CAP_CONF_SUCCESS;
3730 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3733 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3737 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3738 L2CAP_CONF_REQ, len, req);
3739 chan->num_conf_req++;
3740 if (result != L2CAP_CONF_SUCCESS)
3746 l2cap_chan_set_err(chan, ECONNRESET);
3748 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3749 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3753 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
3756 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3758 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3759 set_default_fcs(chan);
3761 if (chan->mode == L2CAP_MODE_ERTM ||
3762 chan->mode == L2CAP_MODE_STREAMING)
3763 err = l2cap_ertm_init(chan);
3766 l2cap_send_disconn_req(chan->conn, chan, -err);
3768 l2cap_chan_ready(chan);
3772 l2cap_chan_unlock(chan);
3776 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3778 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3779 struct l2cap_disconn_rsp rsp;
3781 struct l2cap_chan *chan;
3784 scid = __le16_to_cpu(req->scid);
3785 dcid = __le16_to_cpu(req->dcid);
3787 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3789 mutex_lock(&conn->chan_lock);
3791 chan = __l2cap_get_chan_by_scid(conn, dcid);
3793 mutex_unlock(&conn->chan_lock);
3797 l2cap_chan_lock(chan);
3801 rsp.dcid = cpu_to_le16(chan->scid);
3802 rsp.scid = cpu_to_le16(chan->dcid);
3803 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3806 sk->sk_shutdown = SHUTDOWN_MASK;
3809 l2cap_chan_hold(chan);
3810 l2cap_chan_del(chan, ECONNRESET);
3812 l2cap_chan_unlock(chan);
3814 chan->ops->close(chan);
3815 l2cap_chan_put(chan);
3817 mutex_unlock(&conn->chan_lock);
3822 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3824 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3826 struct l2cap_chan *chan;
3828 scid = __le16_to_cpu(rsp->scid);
3829 dcid = __le16_to_cpu(rsp->dcid);
3831 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3833 mutex_lock(&conn->chan_lock);
3835 chan = __l2cap_get_chan_by_scid(conn, scid);
3837 mutex_unlock(&conn->chan_lock);
3841 l2cap_chan_lock(chan);
3843 l2cap_chan_hold(chan);
3844 l2cap_chan_del(chan, 0);
3846 l2cap_chan_unlock(chan);
3848 chan->ops->close(chan);
3849 l2cap_chan_put(chan);
3851 mutex_unlock(&conn->chan_lock);
3856 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3858 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3861 type = __le16_to_cpu(req->type);
3863 BT_DBG("type 0x%4.4x", type);
3865 if (type == L2CAP_IT_FEAT_MASK) {
3867 u32 feat_mask = l2cap_feat_mask;
3868 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3869 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
3870 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3872 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3875 feat_mask |= L2CAP_FEAT_EXT_FLOW
3876 | L2CAP_FEAT_EXT_WINDOW;
3878 put_unaligned_le32(feat_mask, rsp->data);
3879 l2cap_send_cmd(conn, cmd->ident,
3880 L2CAP_INFO_RSP, sizeof(buf), buf);
3881 } else if (type == L2CAP_IT_FIXED_CHAN) {
3883 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3886 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3888 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3890 rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3891 rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
3892 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3893 l2cap_send_cmd(conn, cmd->ident,
3894 L2CAP_INFO_RSP, sizeof(buf), buf);
3896 struct l2cap_info_rsp rsp;
3897 rsp.type = cpu_to_le16(type);
3898 rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
3899 l2cap_send_cmd(conn, cmd->ident,
3900 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3906 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3908 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3911 type = __le16_to_cpu(rsp->type);
3912 result = __le16_to_cpu(rsp->result);
3914 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3916 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3917 if (cmd->ident != conn->info_ident ||
3918 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3921 cancel_delayed_work(&conn->info_timer);
3923 if (result != L2CAP_IR_SUCCESS) {
3924 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3925 conn->info_ident = 0;
3927 l2cap_conn_start(conn);
3933 case L2CAP_IT_FEAT_MASK:
3934 conn->feat_mask = get_unaligned_le32(rsp->data);
3936 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3937 struct l2cap_info_req req;
3938 req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3940 conn->info_ident = l2cap_get_ident(conn);
3942 l2cap_send_cmd(conn, conn->info_ident,
3943 L2CAP_INFO_REQ, sizeof(req), &req);
3945 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3946 conn->info_ident = 0;
3948 l2cap_conn_start(conn);
3952 case L2CAP_IT_FIXED_CHAN:
3953 conn->fixed_chan_mask = rsp->data[0];
3954 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3955 conn->info_ident = 0;
3957 l2cap_conn_start(conn);
3964 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3965 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3968 struct l2cap_create_chan_req *req = data;
3969 struct l2cap_create_chan_rsp rsp;
3972 if (cmd_len != sizeof(*req))
3978 psm = le16_to_cpu(req->psm);
3979 scid = le16_to_cpu(req->scid);
3981 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
3983 /* Placeholder: Always reject */
3985 rsp.scid = cpu_to_le16(scid);
3986 rsp.result = __constant_cpu_to_le16(L2CAP_CR_NO_MEM);
3987 rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
3989 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3995 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3996 struct l2cap_cmd_hdr *cmd, void *data)
3998 BT_DBG("conn %p", conn);
4000 return l2cap_connect_rsp(conn, cmd, data);
4003 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4004 u16 icid, u16 result)
4006 struct l2cap_move_chan_rsp rsp;
4008 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4010 rsp.icid = cpu_to_le16(icid);
4011 rsp.result = cpu_to_le16(result);
4013 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4016 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4017 struct l2cap_chan *chan,
4018 u16 icid, u16 result)
4020 struct l2cap_move_chan_cfm cfm;
4023 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4025 ident = l2cap_get_ident(conn);
4027 chan->ident = ident;
4029 cfm.icid = cpu_to_le16(icid);
4030 cfm.result = cpu_to_le16(result);
4032 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4035 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4038 struct l2cap_move_chan_cfm_rsp rsp;
4040 BT_DBG("icid 0x%4.4x", icid);
4042 rsp.icid = cpu_to_le16(icid);
4043 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4046 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4047 struct l2cap_cmd_hdr *cmd,
4048 u16 cmd_len, void *data)
4050 struct l2cap_move_chan_req *req = data;
4052 u16 result = L2CAP_MR_NOT_ALLOWED;
4054 if (cmd_len != sizeof(*req))
4057 icid = le16_to_cpu(req->icid);
4059 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4064 /* Placeholder: Always refuse */
4065 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4070 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4071 struct l2cap_cmd_hdr *cmd,
4072 u16 cmd_len, void *data)
4074 struct l2cap_move_chan_rsp *rsp = data;
4077 if (cmd_len != sizeof(*rsp))
4080 icid = le16_to_cpu(rsp->icid);
4081 result = le16_to_cpu(rsp->result);
4083 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4085 /* Placeholder: Always unconfirmed */
4086 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
4091 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4092 struct l2cap_cmd_hdr *cmd,
4093 u16 cmd_len, void *data)
4095 struct l2cap_move_chan_cfm *cfm = data;
4098 if (cmd_len != sizeof(*cfm))
4101 icid = le16_to_cpu(cfm->icid);
4102 result = le16_to_cpu(cfm->result);
4104 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
4106 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
4111 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
4112 struct l2cap_cmd_hdr *cmd,
4113 u16 cmd_len, void *data)
4115 struct l2cap_move_chan_cfm_rsp *rsp = data;
4118 if (cmd_len != sizeof(*rsp))
4121 icid = le16_to_cpu(rsp->icid);
4123 BT_DBG("icid 0x%4.4x", icid);
4128 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
4133 if (min > max || min < 6 || max > 3200)
4136 if (to_multiplier < 10 || to_multiplier > 3200)
4139 if (max >= to_multiplier * 8)
4142 max_latency = (to_multiplier * 8 / max) - 1;
4143 if (latency > 499 || latency > max_latency)
4149 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
4150 struct l2cap_cmd_hdr *cmd, u8 *data)
4152 struct hci_conn *hcon = conn->hcon;
4153 struct l2cap_conn_param_update_req *req;
4154 struct l2cap_conn_param_update_rsp rsp;
4155 u16 min, max, latency, to_multiplier, cmd_len;
4158 if (!(hcon->link_mode & HCI_LM_MASTER))
4161 cmd_len = __le16_to_cpu(cmd->len);
4162 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
4165 req = (struct l2cap_conn_param_update_req *) data;
4166 min = __le16_to_cpu(req->min);
4167 max = __le16_to_cpu(req->max);
4168 latency = __le16_to_cpu(req->latency);
4169 to_multiplier = __le16_to_cpu(req->to_multiplier);
4171 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4172 min, max, latency, to_multiplier);
4174 memset(&rsp, 0, sizeof(rsp));
4176 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
4178 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
4180 rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
4182 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
4186 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
4191 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
4192 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4196 switch (cmd->code) {
4197 case L2CAP_COMMAND_REJ:
4198 l2cap_command_rej(conn, cmd, data);
4201 case L2CAP_CONN_REQ:
4202 err = l2cap_connect_req(conn, cmd, data);
4205 case L2CAP_CONN_RSP:
4206 err = l2cap_connect_rsp(conn, cmd, data);
4209 case L2CAP_CONF_REQ:
4210 err = l2cap_config_req(conn, cmd, cmd_len, data);
4213 case L2CAP_CONF_RSP:
4214 err = l2cap_config_rsp(conn, cmd, data);
4217 case L2CAP_DISCONN_REQ:
4218 err = l2cap_disconnect_req(conn, cmd, data);
4221 case L2CAP_DISCONN_RSP:
4222 err = l2cap_disconnect_rsp(conn, cmd, data);
4225 case L2CAP_ECHO_REQ:
4226 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
4229 case L2CAP_ECHO_RSP:
4232 case L2CAP_INFO_REQ:
4233 err = l2cap_information_req(conn, cmd, data);
4236 case L2CAP_INFO_RSP:
4237 err = l2cap_information_rsp(conn, cmd, data);
4240 case L2CAP_CREATE_CHAN_REQ:
4241 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
4244 case L2CAP_CREATE_CHAN_RSP:
4245 err = l2cap_create_channel_rsp(conn, cmd, data);
4248 case L2CAP_MOVE_CHAN_REQ:
4249 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
4252 case L2CAP_MOVE_CHAN_RSP:
4253 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
4256 case L2CAP_MOVE_CHAN_CFM:
4257 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
4260 case L2CAP_MOVE_CHAN_CFM_RSP:
4261 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
4265 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
4273 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
4274 struct l2cap_cmd_hdr *cmd, u8 *data)
4276 switch (cmd->code) {
4277 case L2CAP_COMMAND_REJ:
4280 case L2CAP_CONN_PARAM_UPDATE_REQ:
4281 return l2cap_conn_param_update_req(conn, cmd, data);
4283 case L2CAP_CONN_PARAM_UPDATE_RSP:
4287 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
4292 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
4293 struct sk_buff *skb)
4295 u8 *data = skb->data;
4297 struct l2cap_cmd_hdr cmd;
4300 l2cap_raw_recv(conn, skb);
4302 while (len >= L2CAP_CMD_HDR_SIZE) {
4304 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
4305 data += L2CAP_CMD_HDR_SIZE;
4306 len -= L2CAP_CMD_HDR_SIZE;
4308 cmd_len = le16_to_cpu(cmd.len);
4310 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
4312 if (cmd_len > len || !cmd.ident) {
4313 BT_DBG("corrupted command");
4317 if (conn->hcon->type == LE_LINK)
4318 err = l2cap_le_sig_cmd(conn, &cmd, data);
4320 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
4323 struct l2cap_cmd_rej_unk rej;
4325 BT_ERR("Wrong link type (%d)", err);
4327 /* FIXME: Map err to a valid reason */
4328 rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
4329 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4339 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
4341 u16 our_fcs, rcv_fcs;
4344 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4345 hdr_size = L2CAP_EXT_HDR_SIZE;
4347 hdr_size = L2CAP_ENH_HDR_SIZE;
4349 if (chan->fcs == L2CAP_FCS_CRC16) {
4350 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
4351 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
4352 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
4354 if (our_fcs != rcv_fcs)
4360 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
4362 struct l2cap_ctrl control;
4364 BT_DBG("chan %p", chan);
4366 memset(&control, 0, sizeof(control));
4369 control.reqseq = chan->buffer_seq;
4370 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4372 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4373 control.super = L2CAP_SUPER_RNR;
4374 l2cap_send_sframe(chan, &control);
4377 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4378 chan->unacked_frames > 0)
4379 __set_retrans_timer(chan);
4381 /* Send pending iframes */
4382 l2cap_ertm_send(chan);
4384 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
4385 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
4386 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4389 control.super = L2CAP_SUPER_RR;
4390 l2cap_send_sframe(chan, &control);
4394 static void append_skb_frag(struct sk_buff *skb,
4395 struct sk_buff *new_frag, struct sk_buff **last_frag)
4397 /* skb->len reflects data in skb as well as all fragments
4398 * skb->data_len reflects only data in fragments
4400 if (!skb_has_frag_list(skb))
4401 skb_shinfo(skb)->frag_list = new_frag;
4403 new_frag->next = NULL;
4405 (*last_frag)->next = new_frag;
4406 *last_frag = new_frag;
4408 skb->len += new_frag->len;
4409 skb->data_len += new_frag->len;
4410 skb->truesize += new_frag->truesize;
4413 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
4414 struct l2cap_ctrl *control)
4418 switch (control->sar) {
4419 case L2CAP_SAR_UNSEGMENTED:
4423 err = chan->ops->recv(chan, skb);
4426 case L2CAP_SAR_START:
4430 chan->sdu_len = get_unaligned_le16(skb->data);
4431 skb_pull(skb, L2CAP_SDULEN_SIZE);
4433 if (chan->sdu_len > chan->imtu) {
4438 if (skb->len >= chan->sdu_len)
4442 chan->sdu_last_frag = skb;
4448 case L2CAP_SAR_CONTINUE:
4452 append_skb_frag(chan->sdu, skb,
4453 &chan->sdu_last_frag);
4456 if (chan->sdu->len >= chan->sdu_len)
4466 append_skb_frag(chan->sdu, skb,
4467 &chan->sdu_last_frag);
4470 if (chan->sdu->len != chan->sdu_len)
4473 err = chan->ops->recv(chan, chan->sdu);
4476 /* Reassembly complete */
4478 chan->sdu_last_frag = NULL;
4486 kfree_skb(chan->sdu);
4488 chan->sdu_last_frag = NULL;
4495 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
4499 if (chan->mode != L2CAP_MODE_ERTM)
4502 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
4503 l2cap_tx(chan, NULL, NULL, event);
4506 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
4509 /* Pass sequential frames to l2cap_reassemble_sdu()
4510 * until a gap is encountered.
4513 BT_DBG("chan %p", chan);
4515 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4516 struct sk_buff *skb;
4517 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4518 chan->buffer_seq, skb_queue_len(&chan->srej_q));
4520 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
4525 skb_unlink(skb, &chan->srej_q);
4526 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4527 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
4532 if (skb_queue_empty(&chan->srej_q)) {
4533 chan->rx_state = L2CAP_RX_STATE_RECV;
4534 l2cap_send_ack(chan);
4540 static void l2cap_handle_srej(struct l2cap_chan *chan,
4541 struct l2cap_ctrl *control)
4543 struct sk_buff *skb;
4545 BT_DBG("chan %p, control %p", chan, control);
4547 if (control->reqseq == chan->next_tx_seq) {
4548 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4549 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4553 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4556 BT_DBG("Seq %d not available for retransmission",
4561 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
4562 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4563 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4567 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4569 if (control->poll) {
4570 l2cap_pass_to_tx(chan, control);
4572 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4573 l2cap_retransmit(chan, control);
4574 l2cap_ertm_send(chan);
4576 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4577 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4578 chan->srej_save_reqseq = control->reqseq;
4581 l2cap_pass_to_tx_fbit(chan, control);
4583 if (control->final) {
4584 if (chan->srej_save_reqseq != control->reqseq ||
4585 !test_and_clear_bit(CONN_SREJ_ACT,
4587 l2cap_retransmit(chan, control);
4589 l2cap_retransmit(chan, control);
4590 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
4591 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4592 chan->srej_save_reqseq = control->reqseq;
4598 static void l2cap_handle_rej(struct l2cap_chan *chan,
4599 struct l2cap_ctrl *control)
4601 struct sk_buff *skb;
4603 BT_DBG("chan %p, control %p", chan, control);
4605 if (control->reqseq == chan->next_tx_seq) {
4606 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
4607 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4611 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
4613 if (chan->max_tx && skb &&
4614 bt_cb(skb)->control.retries >= chan->max_tx) {
4615 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
4616 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4620 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4622 l2cap_pass_to_tx(chan, control);
4624 if (control->final) {
4625 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4626 l2cap_retransmit_all(chan, control);
4628 l2cap_retransmit_all(chan, control);
4629 l2cap_ertm_send(chan);
4630 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
4631 set_bit(CONN_REJ_ACT, &chan->conn_state);
4635 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
4637 BT_DBG("chan %p, txseq %d", chan, txseq);
4639 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
4640 chan->expected_tx_seq);
4642 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
4643 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4645 /* See notes below regarding "double poll" and
4648 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4649 BT_DBG("Invalid/Ignore - after SREJ");
4650 return L2CAP_TXSEQ_INVALID_IGNORE;
4652 BT_DBG("Invalid - in window after SREJ sent");
4653 return L2CAP_TXSEQ_INVALID;
4657 if (chan->srej_list.head == txseq) {
4658 BT_DBG("Expected SREJ");
4659 return L2CAP_TXSEQ_EXPECTED_SREJ;
4662 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
4663 BT_DBG("Duplicate SREJ - txseq already stored");
4664 return L2CAP_TXSEQ_DUPLICATE_SREJ;
4667 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
4668 BT_DBG("Unexpected SREJ - not requested");
4669 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
4673 if (chan->expected_tx_seq == txseq) {
4674 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
4676 BT_DBG("Invalid - txseq outside tx window");
4677 return L2CAP_TXSEQ_INVALID;
4680 return L2CAP_TXSEQ_EXPECTED;
4684 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
4685 __seq_offset(chan, chan->expected_tx_seq,
4686 chan->last_acked_seq)){
4687 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4688 return L2CAP_TXSEQ_DUPLICATE;
4691 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
4692 /* A source of invalid packets is a "double poll" condition,
4693 * where delays cause us to send multiple poll packets. If
4694 * the remote stack receives and processes both polls,
4695 * sequence numbers can wrap around in such a way that a
4696 * resent frame has a sequence number that looks like new data
4697 * with a sequence gap. This would trigger an erroneous SREJ
4700 * Fortunately, this is impossible with a tx window that's
4701 * less than half of the maximum sequence number, which allows
4702 * invalid frames to be safely ignored.
4704 * With tx window sizes greater than half of the tx window
4705 * maximum, the frame is invalid and cannot be ignored. This
4706 * causes a disconnect.
4709 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
4710 BT_DBG("Invalid/Ignore - txseq outside tx window");
4711 return L2CAP_TXSEQ_INVALID_IGNORE;
4713 BT_DBG("Invalid - txseq outside tx window");
4714 return L2CAP_TXSEQ_INVALID;
4717 BT_DBG("Unexpected - txseq indicates missing frames");
4718 return L2CAP_TXSEQ_UNEXPECTED;
4722 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
4723 struct l2cap_ctrl *control,
4724 struct sk_buff *skb, u8 event)
4727 bool skb_in_use = 0;
4729 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4733 case L2CAP_EV_RECV_IFRAME:
4734 switch (l2cap_classify_txseq(chan, control->txseq)) {
4735 case L2CAP_TXSEQ_EXPECTED:
4736 l2cap_pass_to_tx(chan, control);
4738 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4739 BT_DBG("Busy, discarding expected seq %d",
4744 chan->expected_tx_seq = __next_seq(chan,
4747 chan->buffer_seq = chan->expected_tx_seq;
4750 err = l2cap_reassemble_sdu(chan, skb, control);
4754 if (control->final) {
4755 if (!test_and_clear_bit(CONN_REJ_ACT,
4756 &chan->conn_state)) {
4758 l2cap_retransmit_all(chan, control);
4759 l2cap_ertm_send(chan);
4763 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
4764 l2cap_send_ack(chan);
4766 case L2CAP_TXSEQ_UNEXPECTED:
4767 l2cap_pass_to_tx(chan, control);
4769 /* Can't issue SREJ frames in the local busy state.
4770 * Drop this frame, it will be seen as missing
4771 * when local busy is exited.
4773 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4774 BT_DBG("Busy, discarding unexpected seq %d",
4779 /* There was a gap in the sequence, so an SREJ
4780 * must be sent for each missing frame. The
4781 * current frame is stored for later use.
4783 skb_queue_tail(&chan->srej_q, skb);
4785 BT_DBG("Queued %p (queue len %d)", skb,
4786 skb_queue_len(&chan->srej_q));
4788 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4789 l2cap_seq_list_clear(&chan->srej_list);
4790 l2cap_send_srej(chan, control->txseq);
4792 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
4794 case L2CAP_TXSEQ_DUPLICATE:
4795 l2cap_pass_to_tx(chan, control);
4797 case L2CAP_TXSEQ_INVALID_IGNORE:
4799 case L2CAP_TXSEQ_INVALID:
4801 l2cap_send_disconn_req(chan->conn, chan,
4806 case L2CAP_EV_RECV_RR:
4807 l2cap_pass_to_tx(chan, control);
4808 if (control->final) {
4809 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4811 if (!test_and_clear_bit(CONN_REJ_ACT,
4812 &chan->conn_state)) {
4814 l2cap_retransmit_all(chan, control);
4817 l2cap_ertm_send(chan);
4818 } else if (control->poll) {
4819 l2cap_send_i_or_rr_or_rnr(chan);
4821 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4822 &chan->conn_state) &&
4823 chan->unacked_frames)
4824 __set_retrans_timer(chan);
4826 l2cap_ertm_send(chan);
4829 case L2CAP_EV_RECV_RNR:
4830 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4831 l2cap_pass_to_tx(chan, control);
4832 if (control && control->poll) {
4833 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4834 l2cap_send_rr_or_rnr(chan, 0);
4836 __clear_retrans_timer(chan);
4837 l2cap_seq_list_clear(&chan->retrans_list);
4839 case L2CAP_EV_RECV_REJ:
4840 l2cap_handle_rej(chan, control);
4842 case L2CAP_EV_RECV_SREJ:
4843 l2cap_handle_srej(chan, control);
4849 if (skb && !skb_in_use) {
4850 BT_DBG("Freeing %p", skb);
4857 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
4858 struct l2cap_ctrl *control,
4859 struct sk_buff *skb, u8 event)
4862 u16 txseq = control->txseq;
4863 bool skb_in_use = 0;
4865 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
4869 case L2CAP_EV_RECV_IFRAME:
4870 switch (l2cap_classify_txseq(chan, txseq)) {
4871 case L2CAP_TXSEQ_EXPECTED:
4872 /* Keep frame for reassembly later */
4873 l2cap_pass_to_tx(chan, control);
4874 skb_queue_tail(&chan->srej_q, skb);
4876 BT_DBG("Queued %p (queue len %d)", skb,
4877 skb_queue_len(&chan->srej_q));
4879 chan->expected_tx_seq = __next_seq(chan, txseq);
4881 case L2CAP_TXSEQ_EXPECTED_SREJ:
4882 l2cap_seq_list_pop(&chan->srej_list);
4884 l2cap_pass_to_tx(chan, control);
4885 skb_queue_tail(&chan->srej_q, skb);
4887 BT_DBG("Queued %p (queue len %d)", skb,
4888 skb_queue_len(&chan->srej_q));
4890 err = l2cap_rx_queued_iframes(chan);
4895 case L2CAP_TXSEQ_UNEXPECTED:
4896 /* Got a frame that can't be reassembled yet.
4897 * Save it for later, and send SREJs to cover
4898 * the missing frames.
4900 skb_queue_tail(&chan->srej_q, skb);
4902 BT_DBG("Queued %p (queue len %d)", skb,
4903 skb_queue_len(&chan->srej_q));
4905 l2cap_pass_to_tx(chan, control);
4906 l2cap_send_srej(chan, control->txseq);
4908 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
4909 /* This frame was requested with an SREJ, but
4910 * some expected retransmitted frames are
4911 * missing. Request retransmission of missing
4914 skb_queue_tail(&chan->srej_q, skb);
4916 BT_DBG("Queued %p (queue len %d)", skb,
4917 skb_queue_len(&chan->srej_q));
4919 l2cap_pass_to_tx(chan, control);
4920 l2cap_send_srej_list(chan, control->txseq);
4922 case L2CAP_TXSEQ_DUPLICATE_SREJ:
4923 /* We've already queued this frame. Drop this copy. */
4924 l2cap_pass_to_tx(chan, control);
4926 case L2CAP_TXSEQ_DUPLICATE:
4927 /* Expecting a later sequence number, so this frame
4928 * was already received. Ignore it completely.
4931 case L2CAP_TXSEQ_INVALID_IGNORE:
4933 case L2CAP_TXSEQ_INVALID:
4935 l2cap_send_disconn_req(chan->conn, chan,
4940 case L2CAP_EV_RECV_RR:
4941 l2cap_pass_to_tx(chan, control);
4942 if (control->final) {
4943 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4945 if (!test_and_clear_bit(CONN_REJ_ACT,
4946 &chan->conn_state)) {
4948 l2cap_retransmit_all(chan, control);
4951 l2cap_ertm_send(chan);
4952 } else if (control->poll) {
4953 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4954 &chan->conn_state) &&
4955 chan->unacked_frames) {
4956 __set_retrans_timer(chan);
4959 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4960 l2cap_send_srej_tail(chan);
4962 if (test_and_clear_bit(CONN_REMOTE_BUSY,
4963 &chan->conn_state) &&
4964 chan->unacked_frames)
4965 __set_retrans_timer(chan);
4967 l2cap_send_ack(chan);
4970 case L2CAP_EV_RECV_RNR:
4971 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4972 l2cap_pass_to_tx(chan, control);
4973 if (control->poll) {
4974 l2cap_send_srej_tail(chan);
4976 struct l2cap_ctrl rr_control;
4977 memset(&rr_control, 0, sizeof(rr_control));
4978 rr_control.sframe = 1;
4979 rr_control.super = L2CAP_SUPER_RR;
4980 rr_control.reqseq = chan->buffer_seq;
4981 l2cap_send_sframe(chan, &rr_control);
4985 case L2CAP_EV_RECV_REJ:
4986 l2cap_handle_rej(chan, control);
4988 case L2CAP_EV_RECV_SREJ:
4989 l2cap_handle_srej(chan, control);
4993 if (skb && !skb_in_use) {
4994 BT_DBG("Freeing %p", skb);
5001 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
5003 /* Make sure reqseq is for a packet that has been sent but not acked */
5006 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
5007 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
5010 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5011 struct sk_buff *skb, u8 event)
5015 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
5016 control, skb, event, chan->rx_state);
5018 if (__valid_reqseq(chan, control->reqseq)) {
5019 switch (chan->rx_state) {
5020 case L2CAP_RX_STATE_RECV:
5021 err = l2cap_rx_state_recv(chan, control, skb, event);
5023 case L2CAP_RX_STATE_SREJ_SENT:
5024 err = l2cap_rx_state_srej_sent(chan, control, skb,
5032 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5033 control->reqseq, chan->next_tx_seq,
5034 chan->expected_ack_seq);
5035 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5041 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
5042 struct sk_buff *skb)
5046 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
5049 if (l2cap_classify_txseq(chan, control->txseq) ==
5050 L2CAP_TXSEQ_EXPECTED) {
5051 l2cap_pass_to_tx(chan, control);
5053 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
5054 __next_seq(chan, chan->buffer_seq));
5056 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5058 l2cap_reassemble_sdu(chan, skb, control);
5061 kfree_skb(chan->sdu);
5064 chan->sdu_last_frag = NULL;
5068 BT_DBG("Freeing %p", skb);
5073 chan->last_acked_seq = control->txseq;
5074 chan->expected_tx_seq = __next_seq(chan, control->txseq);
5079 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
5081 struct l2cap_ctrl *control = &bt_cb(skb)->control;
5085 __unpack_control(chan, skb);
5090 * We can just drop the corrupted I-frame here.
5091 * Receiver will miss it and start proper recovery
5092 * procedures and ask for retransmission.
5094 if (l2cap_check_fcs(chan, skb))
5097 if (!control->sframe && control->sar == L2CAP_SAR_START)
5098 len -= L2CAP_SDULEN_SIZE;
5100 if (chan->fcs == L2CAP_FCS_CRC16)
5101 len -= L2CAP_FCS_SIZE;
5103 if (len > chan->mps) {
5104 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5108 if (!control->sframe) {
5111 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5112 control->sar, control->reqseq, control->final,
5115 /* Validate F-bit - F=0 always valid, F=1 only
5116 * valid in TX WAIT_F
5118 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
5121 if (chan->mode != L2CAP_MODE_STREAMING) {
5122 event = L2CAP_EV_RECV_IFRAME;
5123 err = l2cap_rx(chan, control, skb, event);
5125 err = l2cap_stream_rx(chan, control, skb);
5129 l2cap_send_disconn_req(chan->conn, chan,
5132 const u8 rx_func_to_event[4] = {
5133 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
5134 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
5137 /* Only I-frames are expected in streaming mode */
5138 if (chan->mode == L2CAP_MODE_STREAMING)
5141 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5142 control->reqseq, control->final, control->poll,
5147 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5151 /* Validate F and P bits */
5152 if (control->final && (control->poll ||
5153 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
5156 event = rx_func_to_event[control->super];
5157 if (l2cap_rx(chan, control, skb, event))
5158 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
5168 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
5169 struct sk_buff *skb)
5171 struct l2cap_chan *chan;
5173 chan = l2cap_get_chan_by_scid(conn, cid);
5175 if (cid == L2CAP_CID_A2MP) {
5176 chan = a2mp_channel_create(conn, skb);
5182 l2cap_chan_lock(chan);
5184 BT_DBG("unknown cid 0x%4.4x", cid);
5185 /* Drop packet and return */
5191 BT_DBG("chan %p, len %d", chan, skb->len);
5193 if (chan->state != BT_CONNECTED)
5196 switch (chan->mode) {
5197 case L2CAP_MODE_BASIC:
5198 /* If socket recv buffers overflows we drop data here
5199 * which is *bad* because L2CAP has to be reliable.
5200 * But we don't have any other choice. L2CAP doesn't
5201 * provide flow control mechanism. */
5203 if (chan->imtu < skb->len)
5206 if (!chan->ops->recv(chan, skb))
5210 case L2CAP_MODE_ERTM:
5211 case L2CAP_MODE_STREAMING:
5212 l2cap_data_rcv(chan, skb);
5216 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
5224 l2cap_chan_unlock(chan);
5227 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
5228 struct sk_buff *skb)
5230 struct l2cap_chan *chan;
5232 chan = l2cap_global_chan_by_psm(0, psm, conn->src, conn->dst);
5236 BT_DBG("chan %p, len %d", chan, skb->len);
5238 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5241 if (chan->imtu < skb->len)
5244 if (!chan->ops->recv(chan, skb))
5251 static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
5252 struct sk_buff *skb)
5254 struct l2cap_chan *chan;
5256 chan = l2cap_global_chan_by_scid(0, cid, conn->src, conn->dst);
5260 BT_DBG("chan %p, len %d", chan, skb->len);
5262 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
5265 if (chan->imtu < skb->len)
5268 if (!chan->ops->recv(chan, skb))
5275 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
5277 struct l2cap_hdr *lh = (void *) skb->data;
5281 skb_pull(skb, L2CAP_HDR_SIZE);
5282 cid = __le16_to_cpu(lh->cid);
5283 len = __le16_to_cpu(lh->len);
5285 if (len != skb->len) {
5290 BT_DBG("len %d, cid 0x%4.4x", len, cid);
5293 case L2CAP_CID_LE_SIGNALING:
5294 case L2CAP_CID_SIGNALING:
5295 l2cap_sig_channel(conn, skb);
5298 case L2CAP_CID_CONN_LESS:
5299 psm = get_unaligned((__le16 *) skb->data);
5300 skb_pull(skb, L2CAP_PSMLEN_SIZE);
5301 l2cap_conless_channel(conn, psm, skb);
5304 case L2CAP_CID_LE_DATA:
5305 l2cap_att_channel(conn, cid, skb);
5309 if (smp_sig_channel(conn, skb))
5310 l2cap_conn_del(conn->hcon, EACCES);
5314 l2cap_data_channel(conn, cid, skb);
5319 /* ---- L2CAP interface with lower layer (HCI) ---- */
5321 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
5323 int exact = 0, lm1 = 0, lm2 = 0;
5324 struct l2cap_chan *c;
5326 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
5328 /* Find listening sockets and check their link_mode */
5329 read_lock(&chan_list_lock);
5330 list_for_each_entry(c, &chan_list, global_l) {
5331 struct sock *sk = c->sk;
5333 if (c->state != BT_LISTEN)
5336 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
5337 lm1 |= HCI_LM_ACCEPT;
5338 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5339 lm1 |= HCI_LM_MASTER;
5341 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
5342 lm2 |= HCI_LM_ACCEPT;
5343 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
5344 lm2 |= HCI_LM_MASTER;
5347 read_unlock(&chan_list_lock);
5349 return exact ? lm1 : lm2;
5352 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
5354 struct l2cap_conn *conn;
5356 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
5359 conn = l2cap_conn_add(hcon, status);
5361 l2cap_conn_ready(conn);
5363 l2cap_conn_del(hcon, bt_to_errno(status));
5367 int l2cap_disconn_ind(struct hci_conn *hcon)
5369 struct l2cap_conn *conn = hcon->l2cap_data;
5371 BT_DBG("hcon %p", hcon);
5374 return HCI_ERROR_REMOTE_USER_TERM;
5375 return conn->disc_reason;
5378 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
5380 BT_DBG("hcon %p reason %d", hcon, reason);
5382 l2cap_conn_del(hcon, bt_to_errno(reason));
5385 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
5387 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
5390 if (encrypt == 0x00) {
5391 if (chan->sec_level == BT_SECURITY_MEDIUM) {
5392 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
5393 } else if (chan->sec_level == BT_SECURITY_HIGH)
5394 l2cap_chan_close(chan, ECONNREFUSED);
5396 if (chan->sec_level == BT_SECURITY_MEDIUM)
5397 __clear_chan_timer(chan);
5401 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
5403 struct l2cap_conn *conn = hcon->l2cap_data;
5404 struct l2cap_chan *chan;
5409 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
5411 if (hcon->type == LE_LINK) {
5412 if (!status && encrypt)
5413 smp_distribute_keys(conn, 0);
5414 cancel_delayed_work(&conn->security_timer);
5417 mutex_lock(&conn->chan_lock);
5419 list_for_each_entry(chan, &conn->chan_l, list) {
5420 l2cap_chan_lock(chan);
5422 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
5423 state_to_string(chan->state));
5425 if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
5426 l2cap_chan_unlock(chan);
5430 if (chan->scid == L2CAP_CID_LE_DATA) {
5431 if (!status && encrypt) {
5432 chan->sec_level = hcon->sec_level;
5433 l2cap_chan_ready(chan);
5436 l2cap_chan_unlock(chan);
5440 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
5441 l2cap_chan_unlock(chan);
5445 if (!status && (chan->state == BT_CONNECTED ||
5446 chan->state == BT_CONFIG)) {
5447 struct sock *sk = chan->sk;
5449 clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags);
5450 sk->sk_state_change(sk);
5452 l2cap_check_encryption(chan, encrypt);
5453 l2cap_chan_unlock(chan);
5457 if (chan->state == BT_CONNECT) {
5459 l2cap_send_conn_req(chan);
5461 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5463 } else if (chan->state == BT_CONNECT2) {
5464 struct sock *sk = chan->sk;
5465 struct l2cap_conn_rsp rsp;
5471 if (test_bit(BT_SK_DEFER_SETUP,
5472 &bt_sk(sk)->flags)) {
5473 struct sock *parent = bt_sk(sk)->parent;
5474 res = L2CAP_CR_PEND;
5475 stat = L2CAP_CS_AUTHOR_PEND;
5477 parent->sk_data_ready(parent, 0);
5479 __l2cap_state_change(chan, BT_CONFIG);
5480 res = L2CAP_CR_SUCCESS;
5481 stat = L2CAP_CS_NO_INFO;
5484 __l2cap_state_change(chan, BT_DISCONN);
5485 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
5486 res = L2CAP_CR_SEC_BLOCK;
5487 stat = L2CAP_CS_NO_INFO;
5492 rsp.scid = cpu_to_le16(chan->dcid);
5493 rsp.dcid = cpu_to_le16(chan->scid);
5494 rsp.result = cpu_to_le16(res);
5495 rsp.status = cpu_to_le16(stat);
5496 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
5499 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
5500 res == L2CAP_CR_SUCCESS) {
5502 set_bit(CONF_REQ_SENT, &chan->conf_state);
5503 l2cap_send_cmd(conn, l2cap_get_ident(conn),
5505 l2cap_build_conf_req(chan, buf),
5507 chan->num_conf_req++;
5511 l2cap_chan_unlock(chan);
5514 mutex_unlock(&conn->chan_lock);
5519 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
5521 struct l2cap_conn *conn = hcon->l2cap_data;
5524 conn = l2cap_conn_add(hcon, 0);
5529 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
5531 if (!(flags & ACL_CONT)) {
5532 struct l2cap_hdr *hdr;
5536 BT_ERR("Unexpected start frame (len %d)", skb->len);
5537 kfree_skb(conn->rx_skb);
5538 conn->rx_skb = NULL;
5540 l2cap_conn_unreliable(conn, ECOMM);
5543 /* Start fragment always begin with Basic L2CAP header */
5544 if (skb->len < L2CAP_HDR_SIZE) {
5545 BT_ERR("Frame is too short (len %d)", skb->len);
5546 l2cap_conn_unreliable(conn, ECOMM);
5550 hdr = (struct l2cap_hdr *) skb->data;
5551 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
5553 if (len == skb->len) {
5554 /* Complete frame received */
5555 l2cap_recv_frame(conn, skb);
5559 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
5561 if (skb->len > len) {
5562 BT_ERR("Frame is too long (len %d, expected len %d)",
5564 l2cap_conn_unreliable(conn, ECOMM);
5568 /* Allocate skb for the complete frame (with header) */
5569 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
5573 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5575 conn->rx_len = len - skb->len;
5577 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
5579 if (!conn->rx_len) {
5580 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
5581 l2cap_conn_unreliable(conn, ECOMM);
5585 if (skb->len > conn->rx_len) {
5586 BT_ERR("Fragment is too long (len %d, expected %d)",
5587 skb->len, conn->rx_len);
5588 kfree_skb(conn->rx_skb);
5589 conn->rx_skb = NULL;
5591 l2cap_conn_unreliable(conn, ECOMM);
5595 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
5597 conn->rx_len -= skb->len;
5599 if (!conn->rx_len) {
5600 /* Complete frame received */
5601 l2cap_recv_frame(conn, conn->rx_skb);
5602 conn->rx_skb = NULL;
5611 static int l2cap_debugfs_show(struct seq_file *f, void *p)
5613 struct l2cap_chan *c;
5615 read_lock(&chan_list_lock);
5617 list_for_each_entry(c, &chan_list, global_l) {
5618 struct sock *sk = c->sk;
5620 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5621 batostr(&bt_sk(sk)->src),
5622 batostr(&bt_sk(sk)->dst),
5623 c->state, __le16_to_cpu(c->psm),
5624 c->scid, c->dcid, c->imtu, c->omtu,
5625 c->sec_level, c->mode);
5628 read_unlock(&chan_list_lock);
5633 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
5635 return single_open(file, l2cap_debugfs_show, inode->i_private);
5638 static const struct file_operations l2cap_debugfs_fops = {
5639 .open = l2cap_debugfs_open,
5641 .llseek = seq_lseek,
5642 .release = single_release,
5645 static struct dentry *l2cap_debugfs;
5647 int __init l2cap_init(void)
5651 err = l2cap_init_sockets();
5656 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
5657 bt_debugfs, NULL, &l2cap_debugfs_fops);
5659 BT_ERR("Failed to create L2CAP debug file");
5665 void l2cap_exit(void)
5667 debugfs_remove(l2cap_debugfs);
5668 l2cap_cleanup_sockets();
5671 module_param(disable_ertm, bool, 0644);
5672 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");