2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
92 hdev->recv_evt = NULL;
97 return ERR_PTR(-ENODATA);
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
108 if (hdr->evt != event)
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
126 if (opcode == __le16_to_cpu(ev->opcode))
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
134 return ERR_PTR(-ENODATA);
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138 const void *param, u8 event, u32 timeout)
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
144 BT_DBG("%s", hdev->name);
146 hci_req_init(&req, hdev);
148 hci_req_add_ev(&req, opcode, plen, param, event);
150 hdev->req_status = HCI_REQ_PEND;
152 err = hci_req_run(&req, hci_req_sync_complete);
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
159 schedule_timeout(timeout);
161 remove_wait_queue(&hdev->req_wait_q, &wait);
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
166 switch (hdev->req_status) {
168 err = -bt_to_errno(hdev->req_result);
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
180 hdev->req_status = hdev->req_result = 0;
182 BT_DBG("%s end: err %d", hdev->name, err);
187 return hci_get_cmd_complete(hdev, opcode, event);
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192 const void *param, u32 timeout)
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
196 EXPORT_SYMBOL(__hci_cmd_sync);
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200 void (*func)(struct hci_request *req,
202 unsigned long opt, __u32 timeout)
204 struct hci_request req;
205 DECLARE_WAITQUEUE(wait, current);
208 BT_DBG("%s start", hdev->name);
210 hci_req_init(&req, hdev);
212 hdev->req_status = HCI_REQ_PEND;
216 err = hci_req_run(&req, hci_req_sync_complete);
218 hdev->req_status = 0;
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
234 schedule_timeout(timeout);
236 remove_wait_queue(&hdev->req_wait_q, &wait);
238 if (signal_pending(current))
241 switch (hdev->req_status) {
243 err = -bt_to_errno(hdev->req_result);
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
255 hdev->req_status = hdev->req_result = 0;
257 BT_DBG("%s end: err %d", hdev->name, err);
262 static int hci_req_sync(struct hci_dev *hdev,
263 void (*req)(struct hci_request *req,
265 unsigned long opt, __u32 timeout)
269 if (!test_bit(HCI_UP, &hdev->flags))
272 /* Serialize all requests */
274 ret = __hci_req_sync(hdev, req, opt, timeout);
275 hci_req_unlock(hdev);
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
282 BT_DBG("%s %ld", req->hdev->name, opt);
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
289 static void bredr_init(struct hci_request *req)
291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
293 /* Read Local Supported Features */
294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
296 /* Read Local Version */
297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
299 /* Read BD Address */
300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
303 static void amp_init(struct hci_request *req)
305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
307 /* Read Local Version */
308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
310 /* Read Local AMP Info */
311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
313 /* Read Data Blk size */
314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
319 struct hci_dev *hdev = req->hdev;
321 BT_DBG("%s %ld", hdev->name, opt);
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325 hci_reset_req(req, 0);
327 switch (hdev->dev_type) {
337 BT_ERR("Unknown device type %d", hdev->dev_type);
342 static void bredr_setup(struct hci_request *req)
344 struct hci_cp_delete_stored_link_key cp;
348 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
349 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
351 /* Read Class of Device */
352 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
354 /* Read Local Name */
355 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
357 /* Read Voice Setting */
358 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
360 /* Clear Event Filters */
361 flt_type = HCI_FLT_CLEAR_ALL;
362 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
364 /* Connection accept timeout ~20 secs */
365 param = __constant_cpu_to_le16(0x7d00);
366 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
368 bacpy(&cp.bdaddr, BDADDR_ANY);
369 cp.delete_all = 0x01;
370 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
372 /* Read page scan parameters */
373 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
375 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
379 static void le_setup(struct hci_request *req)
381 struct hci_dev *hdev = req->hdev;
383 /* Read LE Buffer Size */
384 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
386 /* Read LE Local Supported Features */
387 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
389 /* Read LE Advertising Channel TX Power */
390 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
392 /* Read LE White List Size */
393 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
395 /* Read LE Supported States */
396 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
398 /* LE-only controllers have LE implicitly enabled */
399 if (!lmp_bredr_capable(hdev))
400 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
403 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
405 if (lmp_ext_inq_capable(hdev))
408 if (lmp_inq_rssi_capable(hdev))
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
431 static void hci_setup_inquiry_mode(struct hci_request *req)
435 mode = hci_get_inquiry_mode(req->hdev);
437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
440 static void hci_setup_event_mask(struct hci_request *req)
442 struct hci_dev *hdev = req->hdev;
444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
511 struct hci_dev *hdev = req->hdev;
513 if (lmp_bredr_capable(hdev))
516 if (lmp_le_capable(hdev))
519 hci_setup_event_mask(req);
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
530 struct hci_cp_write_eir cp;
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
539 if (lmp_inq_rssi_capable(hdev))
540 hci_setup_inquiry_mode(req);
542 if (lmp_inq_tx_pwr_capable(hdev))
543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
560 static void hci_setup_link_policy(struct hci_request *req)
562 struct hci_dev *hdev = req->hdev;
563 struct hci_cp_write_def_link_policy cp;
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
575 cp.policy = cpu_to_le16(link_policy);
576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
579 static void hci_set_le_support(struct hci_request *req)
581 struct hci_dev *hdev = req->hdev;
582 struct hci_cp_write_le_host_supported cp;
584 /* LE-only devices do not support explicit enablement */
585 if (!lmp_bredr_capable(hdev))
588 memset(&cp, 0, sizeof(cp));
590 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
592 cp.simul = lmp_le_br_capable(hdev);
595 if (cp.le != lmp_host_le_capable(hdev))
596 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
600 static void hci_init3_req(struct hci_request *req, unsigned long opt)
602 struct hci_dev *hdev = req->hdev;
605 if (hdev->commands[5] & 0x10)
606 hci_setup_link_policy(req);
608 if (lmp_le_capable(hdev)) {
609 hci_set_le_support(req);
613 /* Read features beyond page 1 if available */
614 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
615 struct hci_cp_read_local_ext_features cp;
618 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
623 static int __hci_init(struct hci_dev *hdev)
627 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
631 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
632 * BR/EDR/LE type controllers. AMP controllers only need the
635 if (hdev->dev_type != HCI_BREDR)
638 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
642 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
645 static void hci_scan_req(struct hci_request *req, unsigned long opt)
649 BT_DBG("%s %x", req->hdev->name, scan);
651 /* Inquiry and Page scans */
652 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
655 static void hci_auth_req(struct hci_request *req, unsigned long opt)
659 BT_DBG("%s %x", req->hdev->name, auth);
662 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
665 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
669 BT_DBG("%s %x", req->hdev->name, encrypt);
672 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
675 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
677 __le16 policy = cpu_to_le16(opt);
679 BT_DBG("%s %x", req->hdev->name, policy);
681 /* Default link policy */
682 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
685 /* Get HCI device by index.
686 * Device is held on return. */
687 struct hci_dev *hci_dev_get(int index)
689 struct hci_dev *hdev = NULL, *d;
696 read_lock(&hci_dev_list_lock);
697 list_for_each_entry(d, &hci_dev_list, list) {
698 if (d->id == index) {
699 hdev = hci_dev_hold(d);
703 read_unlock(&hci_dev_list_lock);
707 /* ---- Inquiry support ---- */
709 bool hci_discovery_active(struct hci_dev *hdev)
711 struct discovery_state *discov = &hdev->discovery;
713 switch (discov->state) {
714 case DISCOVERY_FINDING:
715 case DISCOVERY_RESOLVING:
723 void hci_discovery_set_state(struct hci_dev *hdev, int state)
725 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
727 if (hdev->discovery.state == state)
731 case DISCOVERY_STOPPED:
732 if (hdev->discovery.state != DISCOVERY_STARTING)
733 mgmt_discovering(hdev, 0);
735 case DISCOVERY_STARTING:
737 case DISCOVERY_FINDING:
738 mgmt_discovering(hdev, 1);
740 case DISCOVERY_RESOLVING:
742 case DISCOVERY_STOPPING:
746 hdev->discovery.state = state;
749 static void inquiry_cache_flush(struct hci_dev *hdev)
751 struct discovery_state *cache = &hdev->discovery;
752 struct inquiry_entry *p, *n;
754 list_for_each_entry_safe(p, n, &cache->all, all) {
759 INIT_LIST_HEAD(&cache->unknown);
760 INIT_LIST_HEAD(&cache->resolve);
763 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
766 struct discovery_state *cache = &hdev->discovery;
767 struct inquiry_entry *e;
769 BT_DBG("cache %p, %pMR", cache, bdaddr);
771 list_for_each_entry(e, &cache->all, all) {
772 if (!bacmp(&e->data.bdaddr, bdaddr))
779 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
782 struct discovery_state *cache = &hdev->discovery;
783 struct inquiry_entry *e;
785 BT_DBG("cache %p, %pMR", cache, bdaddr);
787 list_for_each_entry(e, &cache->unknown, list) {
788 if (!bacmp(&e->data.bdaddr, bdaddr))
795 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
799 struct discovery_state *cache = &hdev->discovery;
800 struct inquiry_entry *e;
802 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
804 list_for_each_entry(e, &cache->resolve, list) {
805 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
807 if (!bacmp(&e->data.bdaddr, bdaddr))
814 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
815 struct inquiry_entry *ie)
817 struct discovery_state *cache = &hdev->discovery;
818 struct list_head *pos = &cache->resolve;
819 struct inquiry_entry *p;
823 list_for_each_entry(p, &cache->resolve, list) {
824 if (p->name_state != NAME_PENDING &&
825 abs(p->data.rssi) >= abs(ie->data.rssi))
830 list_add(&ie->list, pos);
833 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
834 bool name_known, bool *ssp)
836 struct discovery_state *cache = &hdev->discovery;
837 struct inquiry_entry *ie;
839 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
841 hci_remove_remote_oob_data(hdev, &data->bdaddr);
844 *ssp = data->ssp_mode;
846 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
848 if (ie->data.ssp_mode && ssp)
851 if (ie->name_state == NAME_NEEDED &&
852 data->rssi != ie->data.rssi) {
853 ie->data.rssi = data->rssi;
854 hci_inquiry_cache_update_resolve(hdev, ie);
860 /* Entry not in the cache. Add new one. */
861 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
865 list_add(&ie->all, &cache->all);
868 ie->name_state = NAME_KNOWN;
870 ie->name_state = NAME_NOT_KNOWN;
871 list_add(&ie->list, &cache->unknown);
875 if (name_known && ie->name_state != NAME_KNOWN &&
876 ie->name_state != NAME_PENDING) {
877 ie->name_state = NAME_KNOWN;
881 memcpy(&ie->data, data, sizeof(*data));
882 ie->timestamp = jiffies;
883 cache->timestamp = jiffies;
885 if (ie->name_state == NAME_NOT_KNOWN)
891 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
893 struct discovery_state *cache = &hdev->discovery;
894 struct inquiry_info *info = (struct inquiry_info *) buf;
895 struct inquiry_entry *e;
898 list_for_each_entry(e, &cache->all, all) {
899 struct inquiry_data *data = &e->data;
904 bacpy(&info->bdaddr, &data->bdaddr);
905 info->pscan_rep_mode = data->pscan_rep_mode;
906 info->pscan_period_mode = data->pscan_period_mode;
907 info->pscan_mode = data->pscan_mode;
908 memcpy(info->dev_class, data->dev_class, 3);
909 info->clock_offset = data->clock_offset;
915 BT_DBG("cache %p, copied %d", cache, copied);
919 static void hci_inq_req(struct hci_request *req, unsigned long opt)
921 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
922 struct hci_dev *hdev = req->hdev;
923 struct hci_cp_inquiry cp;
925 BT_DBG("%s", hdev->name);
927 if (test_bit(HCI_INQUIRY, &hdev->flags))
931 memcpy(&cp.lap, &ir->lap, 3);
932 cp.length = ir->length;
933 cp.num_rsp = ir->num_rsp;
934 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
937 static int wait_inquiry(void *word)
940 return signal_pending(current);
943 int hci_inquiry(void __user *arg)
945 __u8 __user *ptr = arg;
946 struct hci_inquiry_req ir;
947 struct hci_dev *hdev;
948 int err = 0, do_inquiry = 0, max_rsp;
952 if (copy_from_user(&ir, ptr, sizeof(ir)))
955 hdev = hci_dev_get(ir.dev_id);
960 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
961 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
962 inquiry_cache_flush(hdev);
965 hci_dev_unlock(hdev);
967 timeo = ir.length * msecs_to_jiffies(2000);
970 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
975 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
976 * cleared). If it is interrupted by a signal, return -EINTR.
978 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
983 /* for unlimited number of responses we will use buffer with
986 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
988 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
989 * copy it to the user space.
991 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
998 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
999 hci_dev_unlock(hdev);
1001 BT_DBG("num_rsp %d", ir.num_rsp);
1003 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1005 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1018 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1020 u8 ad_len = 0, flags = 0;
1023 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1024 flags |= LE_AD_GENERAL;
1026 if (!lmp_bredr_capable(hdev))
1027 flags |= LE_AD_NO_BREDR;
1029 if (lmp_le_br_capable(hdev))
1030 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1032 if (lmp_host_le_br_capable(hdev))
1033 flags |= LE_AD_SIM_LE_BREDR_HOST;
1036 BT_DBG("adv flags 0x%02x", flags);
1046 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1048 ptr[1] = EIR_TX_POWER;
1049 ptr[2] = (u8) hdev->adv_tx_power;
1055 name_len = strlen(hdev->dev_name);
1057 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1059 if (name_len > max_len) {
1061 ptr[1] = EIR_NAME_SHORT;
1063 ptr[1] = EIR_NAME_COMPLETE;
1065 ptr[0] = name_len + 1;
1067 memcpy(ptr + 2, hdev->dev_name, name_len);
1069 ad_len += (name_len + 2);
1070 ptr += (name_len + 2);
1076 void hci_update_ad(struct hci_request *req)
1078 struct hci_dev *hdev = req->hdev;
1079 struct hci_cp_le_set_adv_data cp;
1082 if (!lmp_le_capable(hdev))
1085 memset(&cp, 0, sizeof(cp));
1087 len = create_ad(hdev, cp.data);
1089 if (hdev->adv_data_len == len &&
1090 memcmp(cp.data, hdev->adv_data, len) == 0)
1093 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1094 hdev->adv_data_len = len;
1098 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1101 /* ---- HCI ioctl helpers ---- */
1103 int hci_dev_open(__u16 dev)
1105 struct hci_dev *hdev;
1108 hdev = hci_dev_get(dev);
1112 BT_DBG("%s %p", hdev->name, hdev);
1116 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1121 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1126 if (test_bit(HCI_UP, &hdev->flags)) {
1131 if (hdev->open(hdev)) {
1136 atomic_set(&hdev->cmd_cnt, 1);
1137 set_bit(HCI_INIT, &hdev->flags);
1139 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1140 ret = hdev->setup(hdev);
1143 /* Treat all non BR/EDR controllers as raw devices if
1144 * enable_hs is not set.
1146 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1147 set_bit(HCI_RAW, &hdev->flags);
1149 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1150 set_bit(HCI_RAW, &hdev->flags);
1152 if (!test_bit(HCI_RAW, &hdev->flags))
1153 ret = __hci_init(hdev);
1156 clear_bit(HCI_INIT, &hdev->flags);
1160 set_bit(HCI_UP, &hdev->flags);
1161 hci_notify(hdev, HCI_DEV_UP);
1162 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1163 mgmt_valid_hdev(hdev)) {
1165 mgmt_powered(hdev, 1);
1166 hci_dev_unlock(hdev);
1169 /* Init failed, cleanup */
1170 flush_work(&hdev->tx_work);
1171 flush_work(&hdev->cmd_work);
1172 flush_work(&hdev->rx_work);
1174 skb_queue_purge(&hdev->cmd_q);
1175 skb_queue_purge(&hdev->rx_q);
1180 if (hdev->sent_cmd) {
1181 kfree_skb(hdev->sent_cmd);
1182 hdev->sent_cmd = NULL;
1190 hci_req_unlock(hdev);
1195 static int hci_dev_do_close(struct hci_dev *hdev)
1197 BT_DBG("%s %p", hdev->name, hdev);
1199 cancel_work_sync(&hdev->le_scan);
1201 cancel_delayed_work(&hdev->power_off);
1203 hci_req_cancel(hdev, ENODEV);
1206 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1207 del_timer_sync(&hdev->cmd_timer);
1208 hci_req_unlock(hdev);
1212 /* Flush RX and TX works */
1213 flush_work(&hdev->tx_work);
1214 flush_work(&hdev->rx_work);
1216 if (hdev->discov_timeout > 0) {
1217 cancel_delayed_work(&hdev->discov_off);
1218 hdev->discov_timeout = 0;
1219 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1222 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1223 cancel_delayed_work(&hdev->service_cache);
1225 cancel_delayed_work_sync(&hdev->le_scan_disable);
1228 inquiry_cache_flush(hdev);
1229 hci_conn_hash_flush(hdev);
1230 hci_dev_unlock(hdev);
1232 hci_notify(hdev, HCI_DEV_DOWN);
1238 skb_queue_purge(&hdev->cmd_q);
1239 atomic_set(&hdev->cmd_cnt, 1);
1240 if (!test_bit(HCI_RAW, &hdev->flags) &&
1241 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1242 set_bit(HCI_INIT, &hdev->flags);
1243 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1244 clear_bit(HCI_INIT, &hdev->flags);
1247 /* flush cmd work */
1248 flush_work(&hdev->cmd_work);
1251 skb_queue_purge(&hdev->rx_q);
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->raw_q);
1255 /* Drop last sent command */
1256 if (hdev->sent_cmd) {
1257 del_timer_sync(&hdev->cmd_timer);
1258 kfree_skb(hdev->sent_cmd);
1259 hdev->sent_cmd = NULL;
1262 kfree_skb(hdev->recv_evt);
1263 hdev->recv_evt = NULL;
1265 /* After this point our queues are empty
1266 * and no tasks are scheduled. */
1271 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1273 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1274 mgmt_valid_hdev(hdev)) {
1276 mgmt_powered(hdev, 0);
1277 hci_dev_unlock(hdev);
1280 /* Controller radio is available but is currently powered down */
1281 hdev->amp_status = 0;
1283 memset(hdev->eir, 0, sizeof(hdev->eir));
1284 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1286 hci_req_unlock(hdev);
1292 int hci_dev_close(__u16 dev)
1294 struct hci_dev *hdev;
1297 hdev = hci_dev_get(dev);
1301 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1302 cancel_delayed_work(&hdev->power_off);
1304 err = hci_dev_do_close(hdev);
1310 int hci_dev_reset(__u16 dev)
1312 struct hci_dev *hdev;
1315 hdev = hci_dev_get(dev);
1321 if (!test_bit(HCI_UP, &hdev->flags))
1325 skb_queue_purge(&hdev->rx_q);
1326 skb_queue_purge(&hdev->cmd_q);
1329 inquiry_cache_flush(hdev);
1330 hci_conn_hash_flush(hdev);
1331 hci_dev_unlock(hdev);
1336 atomic_set(&hdev->cmd_cnt, 1);
1337 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1339 if (!test_bit(HCI_RAW, &hdev->flags))
1340 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1343 hci_req_unlock(hdev);
1348 int hci_dev_reset_stat(__u16 dev)
1350 struct hci_dev *hdev;
1353 hdev = hci_dev_get(dev);
1357 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1364 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1366 struct hci_dev *hdev;
1367 struct hci_dev_req dr;
1370 if (copy_from_user(&dr, arg, sizeof(dr)))
1373 hdev = hci_dev_get(dr.dev_id);
1379 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1384 if (!lmp_encrypt_capable(hdev)) {
1389 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1390 /* Auth must be enabled first */
1391 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1402 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1407 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1411 case HCISETLINKMODE:
1412 hdev->link_mode = ((__u16) dr.dev_opt) &
1413 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1417 hdev->pkt_type = (__u16) dr.dev_opt;
1421 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1422 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1426 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1439 int hci_get_dev_list(void __user *arg)
1441 struct hci_dev *hdev;
1442 struct hci_dev_list_req *dl;
1443 struct hci_dev_req *dr;
1444 int n = 0, size, err;
1447 if (get_user(dev_num, (__u16 __user *) arg))
1450 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1453 size = sizeof(*dl) + dev_num * sizeof(*dr);
1455 dl = kzalloc(size, GFP_KERNEL);
1461 read_lock(&hci_dev_list_lock);
1462 list_for_each_entry(hdev, &hci_dev_list, list) {
1463 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1464 cancel_delayed_work(&hdev->power_off);
1466 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1467 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1469 (dr + n)->dev_id = hdev->id;
1470 (dr + n)->dev_opt = hdev->flags;
1475 read_unlock(&hci_dev_list_lock);
1478 size = sizeof(*dl) + n * sizeof(*dr);
1480 err = copy_to_user(arg, dl, size);
1483 return err ? -EFAULT : 0;
1486 int hci_get_dev_info(void __user *arg)
1488 struct hci_dev *hdev;
1489 struct hci_dev_info di;
1492 if (copy_from_user(&di, arg, sizeof(di)))
1495 hdev = hci_dev_get(di.dev_id);
1499 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500 cancel_delayed_work_sync(&hdev->power_off);
1502 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1503 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1505 strcpy(di.name, hdev->name);
1506 di.bdaddr = hdev->bdaddr;
1507 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1508 di.flags = hdev->flags;
1509 di.pkt_type = hdev->pkt_type;
1510 if (lmp_bredr_capable(hdev)) {
1511 di.acl_mtu = hdev->acl_mtu;
1512 di.acl_pkts = hdev->acl_pkts;
1513 di.sco_mtu = hdev->sco_mtu;
1514 di.sco_pkts = hdev->sco_pkts;
1516 di.acl_mtu = hdev->le_mtu;
1517 di.acl_pkts = hdev->le_pkts;
1521 di.link_policy = hdev->link_policy;
1522 di.link_mode = hdev->link_mode;
1524 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1525 memcpy(&di.features, &hdev->features, sizeof(di.features));
1527 if (copy_to_user(arg, &di, sizeof(di)))
1535 /* ---- Interface to HCI drivers ---- */
1537 static int hci_rfkill_set_block(void *data, bool blocked)
1539 struct hci_dev *hdev = data;
1541 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1546 hci_dev_do_close(hdev);
1551 static const struct rfkill_ops hci_rfkill_ops = {
1552 .set_block = hci_rfkill_set_block,
1555 static void hci_power_on(struct work_struct *work)
1557 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1560 BT_DBG("%s", hdev->name);
1562 err = hci_dev_open(hdev->id);
1564 mgmt_set_powered_failed(hdev, err);
1568 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1569 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1570 HCI_AUTO_OFF_TIMEOUT);
1572 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1573 mgmt_index_added(hdev);
1576 static void hci_power_off(struct work_struct *work)
1578 struct hci_dev *hdev = container_of(work, struct hci_dev,
1581 BT_DBG("%s", hdev->name);
1583 hci_dev_do_close(hdev);
1586 static void hci_discov_off(struct work_struct *work)
1588 struct hci_dev *hdev;
1589 u8 scan = SCAN_PAGE;
1591 hdev = container_of(work, struct hci_dev, discov_off.work);
1593 BT_DBG("%s", hdev->name);
1597 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1599 hdev->discov_timeout = 0;
1601 hci_dev_unlock(hdev);
1604 int hci_uuids_clear(struct hci_dev *hdev)
1606 struct bt_uuid *uuid, *tmp;
1608 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1609 list_del(&uuid->list);
1616 int hci_link_keys_clear(struct hci_dev *hdev)
1618 struct list_head *p, *n;
1620 list_for_each_safe(p, n, &hdev->link_keys) {
1621 struct link_key *key;
1623 key = list_entry(p, struct link_key, list);
1632 int hci_smp_ltks_clear(struct hci_dev *hdev)
1634 struct smp_ltk *k, *tmp;
1636 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1644 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1648 list_for_each_entry(k, &hdev->link_keys, list)
1649 if (bacmp(bdaddr, &k->bdaddr) == 0)
1655 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1656 u8 key_type, u8 old_key_type)
1659 if (key_type < 0x03)
1662 /* Debug keys are insecure so don't store them persistently */
1663 if (key_type == HCI_LK_DEBUG_COMBINATION)
1666 /* Changed combination key and there's no previous one */
1667 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1670 /* Security mode 3 case */
1674 /* Neither local nor remote side had no-bonding as requirement */
1675 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1678 /* Local side had dedicated bonding as requirement */
1679 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1682 /* Remote side had dedicated bonding as requirement */
1683 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1686 /* If none of the above criteria match, then don't store the key
1691 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1695 list_for_each_entry(k, &hdev->long_term_keys, list) {
1696 if (k->ediv != ediv ||
1697 memcmp(rand, k->rand, sizeof(k->rand)))
1706 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1711 list_for_each_entry(k, &hdev->long_term_keys, list)
1712 if (addr_type == k->bdaddr_type &&
1713 bacmp(bdaddr, &k->bdaddr) == 0)
1719 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1720 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1722 struct link_key *key, *old_key;
1726 old_key = hci_find_link_key(hdev, bdaddr);
1728 old_key_type = old_key->type;
1731 old_key_type = conn ? conn->key_type : 0xff;
1732 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1735 list_add(&key->list, &hdev->link_keys);
1738 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1740 /* Some buggy controller combinations generate a changed
1741 * combination key for legacy pairing even when there's no
1743 if (type == HCI_LK_CHANGED_COMBINATION &&
1744 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1745 type = HCI_LK_COMBINATION;
1747 conn->key_type = type;
1750 bacpy(&key->bdaddr, bdaddr);
1751 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1752 key->pin_len = pin_len;
1754 if (type == HCI_LK_CHANGED_COMBINATION)
1755 key->type = old_key_type;
1762 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1764 mgmt_new_link_key(hdev, key, persistent);
1767 conn->flush_key = !persistent;
1772 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1773 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1776 struct smp_ltk *key, *old_key;
1778 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1781 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1785 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1788 list_add(&key->list, &hdev->long_term_keys);
1791 bacpy(&key->bdaddr, bdaddr);
1792 key->bdaddr_type = addr_type;
1793 memcpy(key->val, tk, sizeof(key->val));
1794 key->authenticated = authenticated;
1796 key->enc_size = enc_size;
1798 memcpy(key->rand, rand, sizeof(key->rand));
1803 if (type & HCI_SMP_LTK)
1804 mgmt_new_ltk(hdev, key, 1);
1809 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1811 struct link_key *key;
1813 key = hci_find_link_key(hdev, bdaddr);
1817 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1819 list_del(&key->list);
1825 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1827 struct smp_ltk *k, *tmp;
1829 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1830 if (bacmp(bdaddr, &k->bdaddr))
1833 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1842 /* HCI command timer function */
1843 static void hci_cmd_timeout(unsigned long arg)
1845 struct hci_dev *hdev = (void *) arg;
1847 if (hdev->sent_cmd) {
1848 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1849 u16 opcode = __le16_to_cpu(sent->opcode);
1851 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1853 BT_ERR("%s command tx timeout", hdev->name);
1856 atomic_set(&hdev->cmd_cnt, 1);
1857 queue_work(hdev->workqueue, &hdev->cmd_work);
1860 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1863 struct oob_data *data;
1865 list_for_each_entry(data, &hdev->remote_oob_data, list)
1866 if (bacmp(bdaddr, &data->bdaddr) == 0)
1872 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1874 struct oob_data *data;
1876 data = hci_find_remote_oob_data(hdev, bdaddr);
1880 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1882 list_del(&data->list);
1888 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1890 struct oob_data *data, *n;
1892 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1893 list_del(&data->list);
1900 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1903 struct oob_data *data;
1905 data = hci_find_remote_oob_data(hdev, bdaddr);
1908 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1912 bacpy(&data->bdaddr, bdaddr);
1913 list_add(&data->list, &hdev->remote_oob_data);
1916 memcpy(data->hash, hash, sizeof(data->hash));
1917 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1919 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1924 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1926 struct bdaddr_list *b;
1928 list_for_each_entry(b, &hdev->blacklist, list)
1929 if (bacmp(bdaddr, &b->bdaddr) == 0)
1935 int hci_blacklist_clear(struct hci_dev *hdev)
1937 struct list_head *p, *n;
1939 list_for_each_safe(p, n, &hdev->blacklist) {
1940 struct bdaddr_list *b;
1942 b = list_entry(p, struct bdaddr_list, list);
1951 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1953 struct bdaddr_list *entry;
1955 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1958 if (hci_blacklist_lookup(hdev, bdaddr))
1961 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1965 bacpy(&entry->bdaddr, bdaddr);
1967 list_add(&entry->list, &hdev->blacklist);
1969 return mgmt_device_blocked(hdev, bdaddr, type);
1972 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1974 struct bdaddr_list *entry;
1976 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1977 return hci_blacklist_clear(hdev);
1979 entry = hci_blacklist_lookup(hdev, bdaddr);
1983 list_del(&entry->list);
1986 return mgmt_device_unblocked(hdev, bdaddr, type);
1989 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1991 struct le_scan_params *param = (struct le_scan_params *) opt;
1992 struct hci_cp_le_set_scan_param cp;
1994 memset(&cp, 0, sizeof(cp));
1995 cp.type = param->type;
1996 cp.interval = cpu_to_le16(param->interval);
1997 cp.window = cpu_to_le16(param->window);
1999 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2002 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2004 struct hci_cp_le_set_scan_enable cp;
2006 memset(&cp, 0, sizeof(cp));
2007 cp.enable = LE_SCAN_ENABLE;
2008 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2010 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2013 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2014 u16 window, int timeout)
2016 long timeo = msecs_to_jiffies(3000);
2017 struct le_scan_params param;
2020 BT_DBG("%s", hdev->name);
2022 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2023 return -EINPROGRESS;
2026 param.interval = interval;
2027 param.window = window;
2031 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
2034 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2036 hci_req_unlock(hdev);
2041 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2047 int hci_cancel_le_scan(struct hci_dev *hdev)
2049 BT_DBG("%s", hdev->name);
2051 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2054 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2055 struct hci_cp_le_set_scan_enable cp;
2057 /* Send HCI command to disable LE Scan */
2058 memset(&cp, 0, sizeof(cp));
2059 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2065 static void le_scan_disable_work(struct work_struct *work)
2067 struct hci_dev *hdev = container_of(work, struct hci_dev,
2068 le_scan_disable.work);
2069 struct hci_cp_le_set_scan_enable cp;
2071 BT_DBG("%s", hdev->name);
2073 memset(&cp, 0, sizeof(cp));
2075 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2078 static void le_scan_work(struct work_struct *work)
2080 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2081 struct le_scan_params *param = &hdev->le_scan_params;
2083 BT_DBG("%s", hdev->name);
2085 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2089 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2092 struct le_scan_params *param = &hdev->le_scan_params;
2094 BT_DBG("%s", hdev->name);
2096 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2099 if (work_busy(&hdev->le_scan))
2100 return -EINPROGRESS;
2103 param->interval = interval;
2104 param->window = window;
2105 param->timeout = timeout;
2107 queue_work(system_long_wq, &hdev->le_scan);
2112 /* Alloc HCI device */
2113 struct hci_dev *hci_alloc_dev(void)
2115 struct hci_dev *hdev;
2117 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2121 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2122 hdev->esco_type = (ESCO_HV1);
2123 hdev->link_mode = (HCI_LM_ACCEPT);
2124 hdev->io_capability = 0x03; /* No Input No Output */
2125 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2126 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2128 hdev->sniff_max_interval = 800;
2129 hdev->sniff_min_interval = 80;
2131 mutex_init(&hdev->lock);
2132 mutex_init(&hdev->req_lock);
2134 INIT_LIST_HEAD(&hdev->mgmt_pending);
2135 INIT_LIST_HEAD(&hdev->blacklist);
2136 INIT_LIST_HEAD(&hdev->uuids);
2137 INIT_LIST_HEAD(&hdev->link_keys);
2138 INIT_LIST_HEAD(&hdev->long_term_keys);
2139 INIT_LIST_HEAD(&hdev->remote_oob_data);
2140 INIT_LIST_HEAD(&hdev->conn_hash.list);
2142 INIT_WORK(&hdev->rx_work, hci_rx_work);
2143 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2144 INIT_WORK(&hdev->tx_work, hci_tx_work);
2145 INIT_WORK(&hdev->power_on, hci_power_on);
2146 INIT_WORK(&hdev->le_scan, le_scan_work);
2148 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2149 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2150 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2152 skb_queue_head_init(&hdev->rx_q);
2153 skb_queue_head_init(&hdev->cmd_q);
2154 skb_queue_head_init(&hdev->raw_q);
2156 init_waitqueue_head(&hdev->req_wait_q);
2158 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2160 hci_init_sysfs(hdev);
2161 discovery_init(hdev);
2165 EXPORT_SYMBOL(hci_alloc_dev);
2167 /* Free HCI device */
2168 void hci_free_dev(struct hci_dev *hdev)
2170 /* will free via device release */
2171 put_device(&hdev->dev);
2173 EXPORT_SYMBOL(hci_free_dev);
2175 /* Register HCI device */
2176 int hci_register_dev(struct hci_dev *hdev)
2180 if (!hdev->open || !hdev->close)
2183 /* Do not allow HCI_AMP devices to register at index 0,
2184 * so the index can be used as the AMP controller ID.
2186 switch (hdev->dev_type) {
2188 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2191 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2200 sprintf(hdev->name, "hci%d", id);
2203 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2205 write_lock(&hci_dev_list_lock);
2206 list_add(&hdev->list, &hci_dev_list);
2207 write_unlock(&hci_dev_list_lock);
2209 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2211 if (!hdev->workqueue) {
2216 hdev->req_workqueue = alloc_workqueue(hdev->name,
2217 WQ_HIGHPRI | WQ_UNBOUND |
2219 if (!hdev->req_workqueue) {
2220 destroy_workqueue(hdev->workqueue);
2225 error = hci_add_sysfs(hdev);
2229 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2230 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2233 if (rfkill_register(hdev->rfkill) < 0) {
2234 rfkill_destroy(hdev->rfkill);
2235 hdev->rfkill = NULL;
2239 set_bit(HCI_SETUP, &hdev->dev_flags);
2241 if (hdev->dev_type != HCI_AMP)
2242 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2244 hci_notify(hdev, HCI_DEV_REG);
2247 queue_work(hdev->req_workqueue, &hdev->power_on);
2252 destroy_workqueue(hdev->workqueue);
2253 destroy_workqueue(hdev->req_workqueue);
2255 ida_simple_remove(&hci_index_ida, hdev->id);
2256 write_lock(&hci_dev_list_lock);
2257 list_del(&hdev->list);
2258 write_unlock(&hci_dev_list_lock);
2262 EXPORT_SYMBOL(hci_register_dev);
2264 /* Unregister HCI device */
2265 void hci_unregister_dev(struct hci_dev *hdev)
2269 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2271 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2275 write_lock(&hci_dev_list_lock);
2276 list_del(&hdev->list);
2277 write_unlock(&hci_dev_list_lock);
2279 hci_dev_do_close(hdev);
2281 for (i = 0; i < NUM_REASSEMBLY; i++)
2282 kfree_skb(hdev->reassembly[i]);
2284 cancel_work_sync(&hdev->power_on);
2286 if (!test_bit(HCI_INIT, &hdev->flags) &&
2287 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2289 mgmt_index_removed(hdev);
2290 hci_dev_unlock(hdev);
2293 /* mgmt_index_removed should take care of emptying the
2295 BUG_ON(!list_empty(&hdev->mgmt_pending));
2297 hci_notify(hdev, HCI_DEV_UNREG);
2300 rfkill_unregister(hdev->rfkill);
2301 rfkill_destroy(hdev->rfkill);
2304 hci_del_sysfs(hdev);
2306 destroy_workqueue(hdev->workqueue);
2307 destroy_workqueue(hdev->req_workqueue);
2310 hci_blacklist_clear(hdev);
2311 hci_uuids_clear(hdev);
2312 hci_link_keys_clear(hdev);
2313 hci_smp_ltks_clear(hdev);
2314 hci_remote_oob_data_clear(hdev);
2315 hci_dev_unlock(hdev);
2319 ida_simple_remove(&hci_index_ida, id);
2321 EXPORT_SYMBOL(hci_unregister_dev);
2323 /* Suspend HCI device */
2324 int hci_suspend_dev(struct hci_dev *hdev)
2326 hci_notify(hdev, HCI_DEV_SUSPEND);
2329 EXPORT_SYMBOL(hci_suspend_dev);
2331 /* Resume HCI device */
2332 int hci_resume_dev(struct hci_dev *hdev)
2334 hci_notify(hdev, HCI_DEV_RESUME);
2337 EXPORT_SYMBOL(hci_resume_dev);
2339 /* Receive frame from HCI drivers */
2340 int hci_recv_frame(struct sk_buff *skb)
2342 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2343 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2344 && !test_bit(HCI_INIT, &hdev->flags))) {
2350 bt_cb(skb)->incoming = 1;
2353 __net_timestamp(skb);
2355 skb_queue_tail(&hdev->rx_q, skb);
2356 queue_work(hdev->workqueue, &hdev->rx_work);
2360 EXPORT_SYMBOL(hci_recv_frame);
2362 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2363 int count, __u8 index)
2368 struct sk_buff *skb;
2369 struct bt_skb_cb *scb;
2371 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2372 index >= NUM_REASSEMBLY)
2375 skb = hdev->reassembly[index];
2379 case HCI_ACLDATA_PKT:
2380 len = HCI_MAX_FRAME_SIZE;
2381 hlen = HCI_ACL_HDR_SIZE;
2384 len = HCI_MAX_EVENT_SIZE;
2385 hlen = HCI_EVENT_HDR_SIZE;
2387 case HCI_SCODATA_PKT:
2388 len = HCI_MAX_SCO_SIZE;
2389 hlen = HCI_SCO_HDR_SIZE;
2393 skb = bt_skb_alloc(len, GFP_ATOMIC);
2397 scb = (void *) skb->cb;
2399 scb->pkt_type = type;
2401 skb->dev = (void *) hdev;
2402 hdev->reassembly[index] = skb;
2406 scb = (void *) skb->cb;
2407 len = min_t(uint, scb->expect, count);
2409 memcpy(skb_put(skb, len), data, len);
2418 if (skb->len == HCI_EVENT_HDR_SIZE) {
2419 struct hci_event_hdr *h = hci_event_hdr(skb);
2420 scb->expect = h->plen;
2422 if (skb_tailroom(skb) < scb->expect) {
2424 hdev->reassembly[index] = NULL;
2430 case HCI_ACLDATA_PKT:
2431 if (skb->len == HCI_ACL_HDR_SIZE) {
2432 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2433 scb->expect = __le16_to_cpu(h->dlen);
2435 if (skb_tailroom(skb) < scb->expect) {
2437 hdev->reassembly[index] = NULL;
2443 case HCI_SCODATA_PKT:
2444 if (skb->len == HCI_SCO_HDR_SIZE) {
2445 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2446 scb->expect = h->dlen;
2448 if (skb_tailroom(skb) < scb->expect) {
2450 hdev->reassembly[index] = NULL;
2457 if (scb->expect == 0) {
2458 /* Complete frame */
2460 bt_cb(skb)->pkt_type = type;
2461 hci_recv_frame(skb);
2463 hdev->reassembly[index] = NULL;
2471 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2475 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2479 rem = hci_reassembly(hdev, type, data, count, type - 1);
2483 data += (count - rem);
2489 EXPORT_SYMBOL(hci_recv_fragment);
2491 #define STREAM_REASSEMBLY 0
2493 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2499 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2502 struct { char type; } *pkt;
2504 /* Start of the frame */
2511 type = bt_cb(skb)->pkt_type;
2513 rem = hci_reassembly(hdev, type, data, count,
2518 data += (count - rem);
2524 EXPORT_SYMBOL(hci_recv_stream_fragment);
2526 /* ---- Interface to upper protocols ---- */
2528 int hci_register_cb(struct hci_cb *cb)
2530 BT_DBG("%p name %s", cb, cb->name);
2532 write_lock(&hci_cb_list_lock);
2533 list_add(&cb->list, &hci_cb_list);
2534 write_unlock(&hci_cb_list_lock);
2538 EXPORT_SYMBOL(hci_register_cb);
2540 int hci_unregister_cb(struct hci_cb *cb)
2542 BT_DBG("%p name %s", cb, cb->name);
2544 write_lock(&hci_cb_list_lock);
2545 list_del(&cb->list);
2546 write_unlock(&hci_cb_list_lock);
2550 EXPORT_SYMBOL(hci_unregister_cb);
2552 static int hci_send_frame(struct sk_buff *skb)
2554 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2561 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2564 __net_timestamp(skb);
2566 /* Send copy to monitor */
2567 hci_send_to_monitor(hdev, skb);
2569 if (atomic_read(&hdev->promisc)) {
2570 /* Send copy to the sockets */
2571 hci_send_to_sock(hdev, skb);
2574 /* Get rid of skb owner, prior to sending to the driver. */
2577 return hdev->send(skb);
2580 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2582 skb_queue_head_init(&req->cmd_q);
2587 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2589 struct hci_dev *hdev = req->hdev;
2590 struct sk_buff *skb;
2591 unsigned long flags;
2593 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2595 /* If an error occured during request building, remove all HCI
2596 * commands queued on the HCI request queue.
2599 skb_queue_purge(&req->cmd_q);
2603 /* Do not allow empty requests */
2604 if (skb_queue_empty(&req->cmd_q))
2607 skb = skb_peek_tail(&req->cmd_q);
2608 bt_cb(skb)->req.complete = complete;
2610 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2611 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2612 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2614 queue_work(hdev->workqueue, &hdev->cmd_work);
2619 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2620 u32 plen, const void *param)
2622 int len = HCI_COMMAND_HDR_SIZE + plen;
2623 struct hci_command_hdr *hdr;
2624 struct sk_buff *skb;
2626 skb = bt_skb_alloc(len, GFP_ATOMIC);
2630 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2631 hdr->opcode = cpu_to_le16(opcode);
2635 memcpy(skb_put(skb, plen), param, plen);
2637 BT_DBG("skb len %d", skb->len);
2639 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2640 skb->dev = (void *) hdev;
2645 /* Send HCI command */
2646 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2649 struct sk_buff *skb;
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2653 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2655 BT_ERR("%s no memory for command", hdev->name);
2659 /* Stand-alone HCI commands must be flaged as
2660 * single-command requests.
2662 bt_cb(skb)->req.start = true;
2664 skb_queue_tail(&hdev->cmd_q, skb);
2665 queue_work(hdev->workqueue, &hdev->cmd_work);
2670 /* Queue a command to an asynchronous HCI request */
2671 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2672 const void *param, u8 event)
2674 struct hci_dev *hdev = req->hdev;
2675 struct sk_buff *skb;
2677 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2679 /* If an error occured during request building, there is no point in
2680 * queueing the HCI command. We can simply return.
2685 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2687 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2688 hdev->name, opcode);
2693 if (skb_queue_empty(&req->cmd_q))
2694 bt_cb(skb)->req.start = true;
2696 bt_cb(skb)->req.event = event;
2698 skb_queue_tail(&req->cmd_q, skb);
2701 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2704 hci_req_add_ev(req, opcode, plen, param, 0);
2707 /* Get data from the previously sent command */
2708 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2710 struct hci_command_hdr *hdr;
2712 if (!hdev->sent_cmd)
2715 hdr = (void *) hdev->sent_cmd->data;
2717 if (hdr->opcode != cpu_to_le16(opcode))
2720 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2722 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2726 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2728 struct hci_acl_hdr *hdr;
2731 skb_push(skb, HCI_ACL_HDR_SIZE);
2732 skb_reset_transport_header(skb);
2733 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2734 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2735 hdr->dlen = cpu_to_le16(len);
2738 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2739 struct sk_buff *skb, __u16 flags)
2741 struct hci_conn *conn = chan->conn;
2742 struct hci_dev *hdev = conn->hdev;
2743 struct sk_buff *list;
2745 skb->len = skb_headlen(skb);
2748 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2750 switch (hdev->dev_type) {
2752 hci_add_acl_hdr(skb, conn->handle, flags);
2755 hci_add_acl_hdr(skb, chan->handle, flags);
2758 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2762 list = skb_shinfo(skb)->frag_list;
2764 /* Non fragmented */
2765 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2767 skb_queue_tail(queue, skb);
2770 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2772 skb_shinfo(skb)->frag_list = NULL;
2774 /* Queue all fragments atomically */
2775 spin_lock(&queue->lock);
2777 __skb_queue_tail(queue, skb);
2779 flags &= ~ACL_START;
2782 skb = list; list = list->next;
2784 skb->dev = (void *) hdev;
2785 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2786 hci_add_acl_hdr(skb, conn->handle, flags);
2788 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2790 __skb_queue_tail(queue, skb);
2793 spin_unlock(&queue->lock);
2797 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2799 struct hci_dev *hdev = chan->conn->hdev;
2801 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2803 skb->dev = (void *) hdev;
2805 hci_queue_acl(chan, &chan->data_q, skb, flags);
2807 queue_work(hdev->workqueue, &hdev->tx_work);
2811 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2813 struct hci_dev *hdev = conn->hdev;
2814 struct hci_sco_hdr hdr;
2816 BT_DBG("%s len %d", hdev->name, skb->len);
2818 hdr.handle = cpu_to_le16(conn->handle);
2819 hdr.dlen = skb->len;
2821 skb_push(skb, HCI_SCO_HDR_SIZE);
2822 skb_reset_transport_header(skb);
2823 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2825 skb->dev = (void *) hdev;
2826 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2828 skb_queue_tail(&conn->data_q, skb);
2829 queue_work(hdev->workqueue, &hdev->tx_work);
2832 /* ---- HCI TX task (outgoing data) ---- */
2834 /* HCI Connection scheduler */
2835 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2838 struct hci_conn_hash *h = &hdev->conn_hash;
2839 struct hci_conn *conn = NULL, *c;
2840 unsigned int num = 0, min = ~0;
2842 /* We don't have to lock device here. Connections are always
2843 * added and removed with TX task disabled. */
2847 list_for_each_entry_rcu(c, &h->list, list) {
2848 if (c->type != type || skb_queue_empty(&c->data_q))
2851 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2856 if (c->sent < min) {
2861 if (hci_conn_num(hdev, type) == num)
2870 switch (conn->type) {
2872 cnt = hdev->acl_cnt;
2876 cnt = hdev->sco_cnt;
2879 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2883 BT_ERR("Unknown link type");
2891 BT_DBG("conn %p quote %d", conn, *quote);
2895 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2897 struct hci_conn_hash *h = &hdev->conn_hash;
2900 BT_ERR("%s link tx timeout", hdev->name);
2904 /* Kill stalled connections */
2905 list_for_each_entry_rcu(c, &h->list, list) {
2906 if (c->type == type && c->sent) {
2907 BT_ERR("%s killing stalled connection %pMR",
2908 hdev->name, &c->dst);
2909 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2916 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2919 struct hci_conn_hash *h = &hdev->conn_hash;
2920 struct hci_chan *chan = NULL;
2921 unsigned int num = 0, min = ~0, cur_prio = 0;
2922 struct hci_conn *conn;
2923 int cnt, q, conn_num = 0;
2925 BT_DBG("%s", hdev->name);
2929 list_for_each_entry_rcu(conn, &h->list, list) {
2930 struct hci_chan *tmp;
2932 if (conn->type != type)
2935 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2940 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2941 struct sk_buff *skb;
2943 if (skb_queue_empty(&tmp->data_q))
2946 skb = skb_peek(&tmp->data_q);
2947 if (skb->priority < cur_prio)
2950 if (skb->priority > cur_prio) {
2953 cur_prio = skb->priority;
2958 if (conn->sent < min) {
2964 if (hci_conn_num(hdev, type) == conn_num)
2973 switch (chan->conn->type) {
2975 cnt = hdev->acl_cnt;
2978 cnt = hdev->block_cnt;
2982 cnt = hdev->sco_cnt;
2985 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2989 BT_ERR("Unknown link type");
2994 BT_DBG("chan %p quote %d", chan, *quote);
2998 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3000 struct hci_conn_hash *h = &hdev->conn_hash;
3001 struct hci_conn *conn;
3004 BT_DBG("%s", hdev->name);
3008 list_for_each_entry_rcu(conn, &h->list, list) {
3009 struct hci_chan *chan;
3011 if (conn->type != type)
3014 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3019 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3020 struct sk_buff *skb;
3027 if (skb_queue_empty(&chan->data_q))
3030 skb = skb_peek(&chan->data_q);
3031 if (skb->priority >= HCI_PRIO_MAX - 1)
3034 skb->priority = HCI_PRIO_MAX - 1;
3036 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3040 if (hci_conn_num(hdev, type) == num)
3048 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3050 /* Calculate count of blocks used by this packet */
3051 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3054 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3056 if (!test_bit(HCI_RAW, &hdev->flags)) {
3057 /* ACL tx timeout must be longer than maximum
3058 * link supervision timeout (40.9 seconds) */
3059 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3060 HCI_ACL_TX_TIMEOUT))
3061 hci_link_tx_to(hdev, ACL_LINK);
3065 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3067 unsigned int cnt = hdev->acl_cnt;
3068 struct hci_chan *chan;
3069 struct sk_buff *skb;
3072 __check_timeout(hdev, cnt);
3074 while (hdev->acl_cnt &&
3075 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3076 u32 priority = (skb_peek(&chan->data_q))->priority;
3077 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3078 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3079 skb->len, skb->priority);
3081 /* Stop if priority has changed */
3082 if (skb->priority < priority)
3085 skb = skb_dequeue(&chan->data_q);
3087 hci_conn_enter_active_mode(chan->conn,
3088 bt_cb(skb)->force_active);
3090 hci_send_frame(skb);
3091 hdev->acl_last_tx = jiffies;
3099 if (cnt != hdev->acl_cnt)
3100 hci_prio_recalculate(hdev, ACL_LINK);
3103 static void hci_sched_acl_blk(struct hci_dev *hdev)
3105 unsigned int cnt = hdev->block_cnt;
3106 struct hci_chan *chan;
3107 struct sk_buff *skb;
3111 __check_timeout(hdev, cnt);
3113 BT_DBG("%s", hdev->name);
3115 if (hdev->dev_type == HCI_AMP)
3120 while (hdev->block_cnt > 0 &&
3121 (chan = hci_chan_sent(hdev, type, "e))) {
3122 u32 priority = (skb_peek(&chan->data_q))->priority;
3123 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3126 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3127 skb->len, skb->priority);
3129 /* Stop if priority has changed */
3130 if (skb->priority < priority)
3133 skb = skb_dequeue(&chan->data_q);
3135 blocks = __get_blocks(hdev, skb);
3136 if (blocks > hdev->block_cnt)
3139 hci_conn_enter_active_mode(chan->conn,
3140 bt_cb(skb)->force_active);
3142 hci_send_frame(skb);
3143 hdev->acl_last_tx = jiffies;
3145 hdev->block_cnt -= blocks;
3148 chan->sent += blocks;
3149 chan->conn->sent += blocks;
3153 if (cnt != hdev->block_cnt)
3154 hci_prio_recalculate(hdev, type);
3157 static void hci_sched_acl(struct hci_dev *hdev)
3159 BT_DBG("%s", hdev->name);
3161 /* No ACL link over BR/EDR controller */
3162 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3165 /* No AMP link over AMP controller */
3166 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3169 switch (hdev->flow_ctl_mode) {
3170 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3171 hci_sched_acl_pkt(hdev);
3174 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3175 hci_sched_acl_blk(hdev);
3181 static void hci_sched_sco(struct hci_dev *hdev)
3183 struct hci_conn *conn;
3184 struct sk_buff *skb;
3187 BT_DBG("%s", hdev->name);
3189 if (!hci_conn_num(hdev, SCO_LINK))
3192 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3193 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3194 BT_DBG("skb %p len %d", skb, skb->len);
3195 hci_send_frame(skb);
3198 if (conn->sent == ~0)
3204 static void hci_sched_esco(struct hci_dev *hdev)
3206 struct hci_conn *conn;
3207 struct sk_buff *skb;
3210 BT_DBG("%s", hdev->name);
3212 if (!hci_conn_num(hdev, ESCO_LINK))
3215 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3217 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3218 BT_DBG("skb %p len %d", skb, skb->len);
3219 hci_send_frame(skb);
3222 if (conn->sent == ~0)
3228 static void hci_sched_le(struct hci_dev *hdev)
3230 struct hci_chan *chan;
3231 struct sk_buff *skb;
3232 int quote, cnt, tmp;
3234 BT_DBG("%s", hdev->name);
3236 if (!hci_conn_num(hdev, LE_LINK))
3239 if (!test_bit(HCI_RAW, &hdev->flags)) {
3240 /* LE tx timeout must be longer than maximum
3241 * link supervision timeout (40.9 seconds) */
3242 if (!hdev->le_cnt && hdev->le_pkts &&
3243 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3244 hci_link_tx_to(hdev, LE_LINK);
3247 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3249 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3250 u32 priority = (skb_peek(&chan->data_q))->priority;
3251 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3252 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3253 skb->len, skb->priority);
3255 /* Stop if priority has changed */
3256 if (skb->priority < priority)
3259 skb = skb_dequeue(&chan->data_q);
3261 hci_send_frame(skb);
3262 hdev->le_last_tx = jiffies;
3273 hdev->acl_cnt = cnt;
3276 hci_prio_recalculate(hdev, LE_LINK);
3279 static void hci_tx_work(struct work_struct *work)
3281 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3282 struct sk_buff *skb;
3284 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3285 hdev->sco_cnt, hdev->le_cnt);
3287 /* Schedule queues and send stuff to HCI driver */
3289 hci_sched_acl(hdev);
3291 hci_sched_sco(hdev);
3293 hci_sched_esco(hdev);
3297 /* Send next queued raw (unknown type) packet */
3298 while ((skb = skb_dequeue(&hdev->raw_q)))
3299 hci_send_frame(skb);
3302 /* ----- HCI RX task (incoming data processing) ----- */
3304 /* ACL data packet */
3305 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3307 struct hci_acl_hdr *hdr = (void *) skb->data;
3308 struct hci_conn *conn;
3309 __u16 handle, flags;
3311 skb_pull(skb, HCI_ACL_HDR_SIZE);
3313 handle = __le16_to_cpu(hdr->handle);
3314 flags = hci_flags(handle);
3315 handle = hci_handle(handle);
3317 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3320 hdev->stat.acl_rx++;
3323 conn = hci_conn_hash_lookup_handle(hdev, handle);
3324 hci_dev_unlock(hdev);
3327 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3329 /* Send to upper protocol */
3330 l2cap_recv_acldata(conn, skb, flags);
3333 BT_ERR("%s ACL packet for unknown connection handle %d",
3334 hdev->name, handle);
3340 /* SCO data packet */
3341 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3343 struct hci_sco_hdr *hdr = (void *) skb->data;
3344 struct hci_conn *conn;
3347 skb_pull(skb, HCI_SCO_HDR_SIZE);
3349 handle = __le16_to_cpu(hdr->handle);
3351 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3353 hdev->stat.sco_rx++;
3356 conn = hci_conn_hash_lookup_handle(hdev, handle);
3357 hci_dev_unlock(hdev);
3360 /* Send to upper protocol */
3361 sco_recv_scodata(conn, skb);
3364 BT_ERR("%s SCO packet for unknown connection handle %d",
3365 hdev->name, handle);
3371 static bool hci_req_is_complete(struct hci_dev *hdev)
3373 struct sk_buff *skb;
3375 skb = skb_peek(&hdev->cmd_q);
3379 return bt_cb(skb)->req.start;
3382 static void hci_resend_last(struct hci_dev *hdev)
3384 struct hci_command_hdr *sent;
3385 struct sk_buff *skb;
3388 if (!hdev->sent_cmd)
3391 sent = (void *) hdev->sent_cmd->data;
3392 opcode = __le16_to_cpu(sent->opcode);
3393 if (opcode == HCI_OP_RESET)
3396 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3400 skb_queue_head(&hdev->cmd_q, skb);
3401 queue_work(hdev->workqueue, &hdev->cmd_work);
3404 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3406 hci_req_complete_t req_complete = NULL;
3407 struct sk_buff *skb;
3408 unsigned long flags;
3410 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3412 /* If the completed command doesn't match the last one that was
3413 * sent we need to do special handling of it.
3415 if (!hci_sent_cmd_data(hdev, opcode)) {
3416 /* Some CSR based controllers generate a spontaneous
3417 * reset complete event during init and any pending
3418 * command will never be completed. In such a case we
3419 * need to resend whatever was the last sent
3422 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3423 hci_resend_last(hdev);
3428 /* If the command succeeded and there's still more commands in
3429 * this request the request is not yet complete.
3431 if (!status && !hci_req_is_complete(hdev))
3434 /* If this was the last command in a request the complete
3435 * callback would be found in hdev->sent_cmd instead of the
3436 * command queue (hdev->cmd_q).
3438 if (hdev->sent_cmd) {
3439 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3444 /* Remove all pending commands belonging to this request */
3445 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3446 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3447 if (bt_cb(skb)->req.start) {
3448 __skb_queue_head(&hdev->cmd_q, skb);
3452 req_complete = bt_cb(skb)->req.complete;
3455 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3459 req_complete(hdev, status);
3462 static void hci_rx_work(struct work_struct *work)
3464 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3465 struct sk_buff *skb;
3467 BT_DBG("%s", hdev->name);
3469 while ((skb = skb_dequeue(&hdev->rx_q))) {
3470 /* Send copy to monitor */
3471 hci_send_to_monitor(hdev, skb);
3473 if (atomic_read(&hdev->promisc)) {
3474 /* Send copy to the sockets */
3475 hci_send_to_sock(hdev, skb);
3478 if (test_bit(HCI_RAW, &hdev->flags)) {
3483 if (test_bit(HCI_INIT, &hdev->flags)) {
3484 /* Don't process data packets in this states. */
3485 switch (bt_cb(skb)->pkt_type) {
3486 case HCI_ACLDATA_PKT:
3487 case HCI_SCODATA_PKT:
3494 switch (bt_cb(skb)->pkt_type) {
3496 BT_DBG("%s Event packet", hdev->name);
3497 hci_event_packet(hdev, skb);
3500 case HCI_ACLDATA_PKT:
3501 BT_DBG("%s ACL data packet", hdev->name);
3502 hci_acldata_packet(hdev, skb);
3505 case HCI_SCODATA_PKT:
3506 BT_DBG("%s SCO data packet", hdev->name);
3507 hci_scodata_packet(hdev, skb);
3517 static void hci_cmd_work(struct work_struct *work)
3519 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3520 struct sk_buff *skb;
3522 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3523 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3525 /* Send queued commands */
3526 if (atomic_read(&hdev->cmd_cnt)) {
3527 skb = skb_dequeue(&hdev->cmd_q);
3531 kfree_skb(hdev->sent_cmd);
3533 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3534 if (hdev->sent_cmd) {
3535 atomic_dec(&hdev->cmd_cnt);
3536 hci_send_frame(skb);
3537 if (test_bit(HCI_RESET, &hdev->flags))
3538 del_timer(&hdev->cmd_timer);
3540 mod_timer(&hdev->cmd_timer,
3541 jiffies + HCI_CMD_TIMEOUT);
3543 skb_queue_head(&hdev->cmd_q, skb);
3544 queue_work(hdev->workqueue, &hdev->cmd_work);
3549 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3551 /* General inquiry access code (GIAC) */
3552 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3553 struct hci_cp_inquiry cp;
3555 BT_DBG("%s", hdev->name);
3557 if (test_bit(HCI_INQUIRY, &hdev->flags))
3558 return -EINPROGRESS;
3560 inquiry_cache_flush(hdev);
3562 memset(&cp, 0, sizeof(cp));
3563 memcpy(&cp.lap, lap, sizeof(cp.lap));
3566 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3569 int hci_cancel_inquiry(struct hci_dev *hdev)
3571 BT_DBG("%s", hdev->name);
3573 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3576 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3579 u8 bdaddr_to_le(u8 bdaddr_type)
3581 switch (bdaddr_type) {
3582 case BDADDR_LE_PUBLIC:
3583 return ADDR_LE_DEV_PUBLIC;
3586 /* Fallback to LE Random address type */
3587 return ADDR_LE_DEV_RANDOM;