]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge branches 'for-3.11/wacom-fixed' and 'for-3.11/wiimote' into for-linus
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         struct hci_cp_delete_stored_link_key cp;
345         __le16 param;
346         __u8 flt_type;
347
348         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
349         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
350
351         /* Read Class of Device */
352         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
353
354         /* Read Local Name */
355         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
356
357         /* Read Voice Setting */
358         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
359
360         /* Clear Event Filters */
361         flt_type = HCI_FLT_CLEAR_ALL;
362         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
363
364         /* Connection accept timeout ~20 secs */
365         param = __constant_cpu_to_le16(0x7d00);
366         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
367
368         bacpy(&cp.bdaddr, BDADDR_ANY);
369         cp.delete_all = 0x01;
370         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
371
372         /* Read page scan parameters */
373         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
374                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
375                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
376         }
377 }
378
379 static void le_setup(struct hci_request *req)
380 {
381         struct hci_dev *hdev = req->hdev;
382
383         /* Read LE Buffer Size */
384         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
385
386         /* Read LE Local Supported Features */
387         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
388
389         /* Read LE Advertising Channel TX Power */
390         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
391
392         /* Read LE White List Size */
393         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
394
395         /* Read LE Supported States */
396         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
397
398         /* LE-only controllers have LE implicitly enabled */
399         if (!lmp_bredr_capable(hdev))
400                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
401 }
402
403 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404 {
405         if (lmp_ext_inq_capable(hdev))
406                 return 0x02;
407
408         if (lmp_inq_rssi_capable(hdev))
409                 return 0x01;
410
411         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412             hdev->lmp_subver == 0x0757)
413                 return 0x01;
414
415         if (hdev->manufacturer == 15) {
416                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417                         return 0x01;
418                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419                         return 0x01;
420                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421                         return 0x01;
422         }
423
424         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425             hdev->lmp_subver == 0x1805)
426                 return 0x01;
427
428         return 0x00;
429 }
430
431 static void hci_setup_inquiry_mode(struct hci_request *req)
432 {
433         u8 mode;
434
435         mode = hci_get_inquiry_mode(req->hdev);
436
437         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
438 }
439
440 static void hci_setup_event_mask(struct hci_request *req)
441 {
442         struct hci_dev *hdev = req->hdev;
443
444         /* The second byte is 0xff instead of 0x9f (two reserved bits
445          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446          * command otherwise.
447          */
448         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451          * any event mask for pre 1.2 devices.
452          */
453         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454                 return;
455
456         if (lmp_bredr_capable(hdev)) {
457                 events[4] |= 0x01; /* Flow Specification Complete */
458                 events[4] |= 0x02; /* Inquiry Result with RSSI */
459                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460                 events[5] |= 0x08; /* Synchronous Connection Complete */
461                 events[5] |= 0x10; /* Synchronous Connection Changed */
462         }
463
464         if (lmp_inq_rssi_capable(hdev))
465                 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467         if (lmp_sniffsubr_capable(hdev))
468                 events[5] |= 0x20; /* Sniff Subrating */
469
470         if (lmp_pause_enc_capable(hdev))
471                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473         if (lmp_ext_inq_capable(hdev))
474                 events[5] |= 0x40; /* Extended Inquiry Result */
475
476         if (lmp_no_flush_capable(hdev))
477                 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479         if (lmp_lsto_capable(hdev))
480                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482         if (lmp_ssp_capable(hdev)) {
483                 events[6] |= 0x01;      /* IO Capability Request */
484                 events[6] |= 0x02;      /* IO Capability Response */
485                 events[6] |= 0x04;      /* User Confirmation Request */
486                 events[6] |= 0x08;      /* User Passkey Request */
487                 events[6] |= 0x10;      /* Remote OOB Data Request */
488                 events[6] |= 0x20;      /* Simple Pairing Complete */
489                 events[7] |= 0x04;      /* User Passkey Notification */
490                 events[7] |= 0x08;      /* Keypress Notification */
491                 events[7] |= 0x10;      /* Remote Host Supported
492                                          * Features Notification
493                                          */
494         }
495
496         if (lmp_le_capable(hdev))
497                 events[7] |= 0x20;      /* LE Meta-Event */
498
499         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
500
501         if (lmp_le_capable(hdev)) {
502                 memset(events, 0, sizeof(events));
503                 events[0] = 0x1f;
504                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505                             sizeof(events), events);
506         }
507 }
508
509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
510 {
511         struct hci_dev *hdev = req->hdev;
512
513         if (lmp_bredr_capable(hdev))
514                 bredr_setup(req);
515
516         if (lmp_le_capable(hdev))
517                 le_setup(req);
518
519         hci_setup_event_mask(req);
520
521         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
522                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524         if (lmp_ssp_capable(hdev)) {
525                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526                         u8 mode = 0x01;
527                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528                                     sizeof(mode), &mode);
529                 } else {
530                         struct hci_cp_write_eir cp;
531
532                         memset(hdev->eir, 0, sizeof(hdev->eir));
533                         memset(&cp, 0, sizeof(cp));
534
535                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 hci_setup_inquiry_mode(req);
541
542         if (lmp_inq_tx_pwr_capable(hdev))
543                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
544
545         if (lmp_ext_feat_capable(hdev)) {
546                 struct hci_cp_read_local_ext_features cp;
547
548                 cp.page = 0x01;
549                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550                             sizeof(cp), &cp);
551         }
552
553         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554                 u8 enable = 1;
555                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556                             &enable);
557         }
558 }
559
560 static void hci_setup_link_policy(struct hci_request *req)
561 {
562         struct hci_dev *hdev = req->hdev;
563         struct hci_cp_write_def_link_policy cp;
564         u16 link_policy = 0;
565
566         if (lmp_rswitch_capable(hdev))
567                 link_policy |= HCI_LP_RSWITCH;
568         if (lmp_hold_capable(hdev))
569                 link_policy |= HCI_LP_HOLD;
570         if (lmp_sniff_capable(hdev))
571                 link_policy |= HCI_LP_SNIFF;
572         if (lmp_park_capable(hdev))
573                 link_policy |= HCI_LP_PARK;
574
575         cp.policy = cpu_to_le16(link_policy);
576         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
577 }
578
579 static void hci_set_le_support(struct hci_request *req)
580 {
581         struct hci_dev *hdev = req->hdev;
582         struct hci_cp_write_le_host_supported cp;
583
584         /* LE-only devices do not support explicit enablement */
585         if (!lmp_bredr_capable(hdev))
586                 return;
587
588         memset(&cp, 0, sizeof(cp));
589
590         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
591                 cp.le = 0x01;
592                 cp.simul = lmp_le_br_capable(hdev);
593         }
594
595         if (cp.le != lmp_host_le_capable(hdev))
596                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
597                             &cp);
598 }
599
600 static void hci_init3_req(struct hci_request *req, unsigned long opt)
601 {
602         struct hci_dev *hdev = req->hdev;
603         u8 p;
604
605         if (hdev->commands[5] & 0x10)
606                 hci_setup_link_policy(req);
607
608         if (lmp_le_capable(hdev)) {
609                 hci_set_le_support(req);
610                 hci_update_ad(req);
611         }
612
613         /* Read features beyond page 1 if available */
614         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
615                 struct hci_cp_read_local_ext_features cp;
616
617                 cp.page = p;
618                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
619                             sizeof(cp), &cp);
620         }
621 }
622
623 static int __hci_init(struct hci_dev *hdev)
624 {
625         int err;
626
627         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
628         if (err < 0)
629                 return err;
630
631         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
632          * BR/EDR/LE type controllers. AMP controllers only need the
633          * first stage init.
634          */
635         if (hdev->dev_type != HCI_BREDR)
636                 return 0;
637
638         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
639         if (err < 0)
640                 return err;
641
642         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
643 }
644
645 static void hci_scan_req(struct hci_request *req, unsigned long opt)
646 {
647         __u8 scan = opt;
648
649         BT_DBG("%s %x", req->hdev->name, scan);
650
651         /* Inquiry and Page scans */
652         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
653 }
654
655 static void hci_auth_req(struct hci_request *req, unsigned long opt)
656 {
657         __u8 auth = opt;
658
659         BT_DBG("%s %x", req->hdev->name, auth);
660
661         /* Authentication */
662         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
663 }
664
665 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
666 {
667         __u8 encrypt = opt;
668
669         BT_DBG("%s %x", req->hdev->name, encrypt);
670
671         /* Encryption */
672         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
673 }
674
675 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
676 {
677         __le16 policy = cpu_to_le16(opt);
678
679         BT_DBG("%s %x", req->hdev->name, policy);
680
681         /* Default link policy */
682         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
683 }
684
685 /* Get HCI device by index.
686  * Device is held on return. */
687 struct hci_dev *hci_dev_get(int index)
688 {
689         struct hci_dev *hdev = NULL, *d;
690
691         BT_DBG("%d", index);
692
693         if (index < 0)
694                 return NULL;
695
696         read_lock(&hci_dev_list_lock);
697         list_for_each_entry(d, &hci_dev_list, list) {
698                 if (d->id == index) {
699                         hdev = hci_dev_hold(d);
700                         break;
701                 }
702         }
703         read_unlock(&hci_dev_list_lock);
704         return hdev;
705 }
706
707 /* ---- Inquiry support ---- */
708
709 bool hci_discovery_active(struct hci_dev *hdev)
710 {
711         struct discovery_state *discov = &hdev->discovery;
712
713         switch (discov->state) {
714         case DISCOVERY_FINDING:
715         case DISCOVERY_RESOLVING:
716                 return true;
717
718         default:
719                 return false;
720         }
721 }
722
723 void hci_discovery_set_state(struct hci_dev *hdev, int state)
724 {
725         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
726
727         if (hdev->discovery.state == state)
728                 return;
729
730         switch (state) {
731         case DISCOVERY_STOPPED:
732                 if (hdev->discovery.state != DISCOVERY_STARTING)
733                         mgmt_discovering(hdev, 0);
734                 break;
735         case DISCOVERY_STARTING:
736                 break;
737         case DISCOVERY_FINDING:
738                 mgmt_discovering(hdev, 1);
739                 break;
740         case DISCOVERY_RESOLVING:
741                 break;
742         case DISCOVERY_STOPPING:
743                 break;
744         }
745
746         hdev->discovery.state = state;
747 }
748
749 static void inquiry_cache_flush(struct hci_dev *hdev)
750 {
751         struct discovery_state *cache = &hdev->discovery;
752         struct inquiry_entry *p, *n;
753
754         list_for_each_entry_safe(p, n, &cache->all, all) {
755                 list_del(&p->all);
756                 kfree(p);
757         }
758
759         INIT_LIST_HEAD(&cache->unknown);
760         INIT_LIST_HEAD(&cache->resolve);
761 }
762
763 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
764                                                bdaddr_t *bdaddr)
765 {
766         struct discovery_state *cache = &hdev->discovery;
767         struct inquiry_entry *e;
768
769         BT_DBG("cache %p, %pMR", cache, bdaddr);
770
771         list_for_each_entry(e, &cache->all, all) {
772                 if (!bacmp(&e->data.bdaddr, bdaddr))
773                         return e;
774         }
775
776         return NULL;
777 }
778
779 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
780                                                        bdaddr_t *bdaddr)
781 {
782         struct discovery_state *cache = &hdev->discovery;
783         struct inquiry_entry *e;
784
785         BT_DBG("cache %p, %pMR", cache, bdaddr);
786
787         list_for_each_entry(e, &cache->unknown, list) {
788                 if (!bacmp(&e->data.bdaddr, bdaddr))
789                         return e;
790         }
791
792         return NULL;
793 }
794
795 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
796                                                        bdaddr_t *bdaddr,
797                                                        int state)
798 {
799         struct discovery_state *cache = &hdev->discovery;
800         struct inquiry_entry *e;
801
802         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
803
804         list_for_each_entry(e, &cache->resolve, list) {
805                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
806                         return e;
807                 if (!bacmp(&e->data.bdaddr, bdaddr))
808                         return e;
809         }
810
811         return NULL;
812 }
813
814 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
815                                       struct inquiry_entry *ie)
816 {
817         struct discovery_state *cache = &hdev->discovery;
818         struct list_head *pos = &cache->resolve;
819         struct inquiry_entry *p;
820
821         list_del(&ie->list);
822
823         list_for_each_entry(p, &cache->resolve, list) {
824                 if (p->name_state != NAME_PENDING &&
825                     abs(p->data.rssi) >= abs(ie->data.rssi))
826                         break;
827                 pos = &p->list;
828         }
829
830         list_add(&ie->list, pos);
831 }
832
833 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
834                               bool name_known, bool *ssp)
835 {
836         struct discovery_state *cache = &hdev->discovery;
837         struct inquiry_entry *ie;
838
839         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
840
841         hci_remove_remote_oob_data(hdev, &data->bdaddr);
842
843         if (ssp)
844                 *ssp = data->ssp_mode;
845
846         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
847         if (ie) {
848                 if (ie->data.ssp_mode && ssp)
849                         *ssp = true;
850
851                 if (ie->name_state == NAME_NEEDED &&
852                     data->rssi != ie->data.rssi) {
853                         ie->data.rssi = data->rssi;
854                         hci_inquiry_cache_update_resolve(hdev, ie);
855                 }
856
857                 goto update;
858         }
859
860         /* Entry not in the cache. Add new one. */
861         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
862         if (!ie)
863                 return false;
864
865         list_add(&ie->all, &cache->all);
866
867         if (name_known) {
868                 ie->name_state = NAME_KNOWN;
869         } else {
870                 ie->name_state = NAME_NOT_KNOWN;
871                 list_add(&ie->list, &cache->unknown);
872         }
873
874 update:
875         if (name_known && ie->name_state != NAME_KNOWN &&
876             ie->name_state != NAME_PENDING) {
877                 ie->name_state = NAME_KNOWN;
878                 list_del(&ie->list);
879         }
880
881         memcpy(&ie->data, data, sizeof(*data));
882         ie->timestamp = jiffies;
883         cache->timestamp = jiffies;
884
885         if (ie->name_state == NAME_NOT_KNOWN)
886                 return false;
887
888         return true;
889 }
890
891 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
892 {
893         struct discovery_state *cache = &hdev->discovery;
894         struct inquiry_info *info = (struct inquiry_info *) buf;
895         struct inquiry_entry *e;
896         int copied = 0;
897
898         list_for_each_entry(e, &cache->all, all) {
899                 struct inquiry_data *data = &e->data;
900
901                 if (copied >= num)
902                         break;
903
904                 bacpy(&info->bdaddr, &data->bdaddr);
905                 info->pscan_rep_mode    = data->pscan_rep_mode;
906                 info->pscan_period_mode = data->pscan_period_mode;
907                 info->pscan_mode        = data->pscan_mode;
908                 memcpy(info->dev_class, data->dev_class, 3);
909                 info->clock_offset      = data->clock_offset;
910
911                 info++;
912                 copied++;
913         }
914
915         BT_DBG("cache %p, copied %d", cache, copied);
916         return copied;
917 }
918
919 static void hci_inq_req(struct hci_request *req, unsigned long opt)
920 {
921         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
922         struct hci_dev *hdev = req->hdev;
923         struct hci_cp_inquiry cp;
924
925         BT_DBG("%s", hdev->name);
926
927         if (test_bit(HCI_INQUIRY, &hdev->flags))
928                 return;
929
930         /* Start Inquiry */
931         memcpy(&cp.lap, &ir->lap, 3);
932         cp.length  = ir->length;
933         cp.num_rsp = ir->num_rsp;
934         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
935 }
936
937 static int wait_inquiry(void *word)
938 {
939         schedule();
940         return signal_pending(current);
941 }
942
943 int hci_inquiry(void __user *arg)
944 {
945         __u8 __user *ptr = arg;
946         struct hci_inquiry_req ir;
947         struct hci_dev *hdev;
948         int err = 0, do_inquiry = 0, max_rsp;
949         long timeo;
950         __u8 *buf;
951
952         if (copy_from_user(&ir, ptr, sizeof(ir)))
953                 return -EFAULT;
954
955         hdev = hci_dev_get(ir.dev_id);
956         if (!hdev)
957                 return -ENODEV;
958
959         hci_dev_lock(hdev);
960         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
961             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
962                 inquiry_cache_flush(hdev);
963                 do_inquiry = 1;
964         }
965         hci_dev_unlock(hdev);
966
967         timeo = ir.length * msecs_to_jiffies(2000);
968
969         if (do_inquiry) {
970                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
971                                    timeo);
972                 if (err < 0)
973                         goto done;
974
975                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
976                  * cleared). If it is interrupted by a signal, return -EINTR.
977                  */
978                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
979                                 TASK_INTERRUPTIBLE))
980                         return -EINTR;
981         }
982
983         /* for unlimited number of responses we will use buffer with
984          * 255 entries
985          */
986         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
987
988         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
989          * copy it to the user space.
990          */
991         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
992         if (!buf) {
993                 err = -ENOMEM;
994                 goto done;
995         }
996
997         hci_dev_lock(hdev);
998         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
999         hci_dev_unlock(hdev);
1000
1001         BT_DBG("num_rsp %d", ir.num_rsp);
1002
1003         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1004                 ptr += sizeof(ir);
1005                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1006                                  ir.num_rsp))
1007                         err = -EFAULT;
1008         } else
1009                 err = -EFAULT;
1010
1011         kfree(buf);
1012
1013 done:
1014         hci_dev_put(hdev);
1015         return err;
1016 }
1017
1018 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1019 {
1020         u8 ad_len = 0, flags = 0;
1021         size_t name_len;
1022
1023         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1024                 flags |= LE_AD_GENERAL;
1025
1026         if (!lmp_bredr_capable(hdev))
1027                 flags |= LE_AD_NO_BREDR;
1028
1029         if (lmp_le_br_capable(hdev))
1030                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1031
1032         if (lmp_host_le_br_capable(hdev))
1033                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1034
1035         if (flags) {
1036                 BT_DBG("adv flags 0x%02x", flags);
1037
1038                 ptr[0] = 2;
1039                 ptr[1] = EIR_FLAGS;
1040                 ptr[2] = flags;
1041
1042                 ad_len += 3;
1043                 ptr += 3;
1044         }
1045
1046         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1047                 ptr[0] = 2;
1048                 ptr[1] = EIR_TX_POWER;
1049                 ptr[2] = (u8) hdev->adv_tx_power;
1050
1051                 ad_len += 3;
1052                 ptr += 3;
1053         }
1054
1055         name_len = strlen(hdev->dev_name);
1056         if (name_len > 0) {
1057                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1058
1059                 if (name_len > max_len) {
1060                         name_len = max_len;
1061                         ptr[1] = EIR_NAME_SHORT;
1062                 } else
1063                         ptr[1] = EIR_NAME_COMPLETE;
1064
1065                 ptr[0] = name_len + 1;
1066
1067                 memcpy(ptr + 2, hdev->dev_name, name_len);
1068
1069                 ad_len += (name_len + 2);
1070                 ptr += (name_len + 2);
1071         }
1072
1073         return ad_len;
1074 }
1075
1076 void hci_update_ad(struct hci_request *req)
1077 {
1078         struct hci_dev *hdev = req->hdev;
1079         struct hci_cp_le_set_adv_data cp;
1080         u8 len;
1081
1082         if (!lmp_le_capable(hdev))
1083                 return;
1084
1085         memset(&cp, 0, sizeof(cp));
1086
1087         len = create_ad(hdev, cp.data);
1088
1089         if (hdev->adv_data_len == len &&
1090             memcmp(cp.data, hdev->adv_data, len) == 0)
1091                 return;
1092
1093         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1094         hdev->adv_data_len = len;
1095
1096         cp.length = len;
1097
1098         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1099 }
1100
1101 /* ---- HCI ioctl helpers ---- */
1102
1103 int hci_dev_open(__u16 dev)
1104 {
1105         struct hci_dev *hdev;
1106         int ret = 0;
1107
1108         hdev = hci_dev_get(dev);
1109         if (!hdev)
1110                 return -ENODEV;
1111
1112         BT_DBG("%s %p", hdev->name, hdev);
1113
1114         hci_req_lock(hdev);
1115
1116         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1117                 ret = -ENODEV;
1118                 goto done;
1119         }
1120
1121         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1122                 ret = -ERFKILL;
1123                 goto done;
1124         }
1125
1126         if (test_bit(HCI_UP, &hdev->flags)) {
1127                 ret = -EALREADY;
1128                 goto done;
1129         }
1130
1131         if (hdev->open(hdev)) {
1132                 ret = -EIO;
1133                 goto done;
1134         }
1135
1136         atomic_set(&hdev->cmd_cnt, 1);
1137         set_bit(HCI_INIT, &hdev->flags);
1138
1139         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1140                 ret = hdev->setup(hdev);
1141
1142         if (!ret) {
1143                 /* Treat all non BR/EDR controllers as raw devices if
1144                  * enable_hs is not set.
1145                  */
1146                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1147                         set_bit(HCI_RAW, &hdev->flags);
1148
1149                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1150                         set_bit(HCI_RAW, &hdev->flags);
1151
1152                 if (!test_bit(HCI_RAW, &hdev->flags))
1153                         ret = __hci_init(hdev);
1154         }
1155
1156         clear_bit(HCI_INIT, &hdev->flags);
1157
1158         if (!ret) {
1159                 hci_dev_hold(hdev);
1160                 set_bit(HCI_UP, &hdev->flags);
1161                 hci_notify(hdev, HCI_DEV_UP);
1162                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1163                     mgmt_valid_hdev(hdev)) {
1164                         hci_dev_lock(hdev);
1165                         mgmt_powered(hdev, 1);
1166                         hci_dev_unlock(hdev);
1167                 }
1168         } else {
1169                 /* Init failed, cleanup */
1170                 flush_work(&hdev->tx_work);
1171                 flush_work(&hdev->cmd_work);
1172                 flush_work(&hdev->rx_work);
1173
1174                 skb_queue_purge(&hdev->cmd_q);
1175                 skb_queue_purge(&hdev->rx_q);
1176
1177                 if (hdev->flush)
1178                         hdev->flush(hdev);
1179
1180                 if (hdev->sent_cmd) {
1181                         kfree_skb(hdev->sent_cmd);
1182                         hdev->sent_cmd = NULL;
1183                 }
1184
1185                 hdev->close(hdev);
1186                 hdev->flags = 0;
1187         }
1188
1189 done:
1190         hci_req_unlock(hdev);
1191         hci_dev_put(hdev);
1192         return ret;
1193 }
1194
1195 static int hci_dev_do_close(struct hci_dev *hdev)
1196 {
1197         BT_DBG("%s %p", hdev->name, hdev);
1198
1199         cancel_work_sync(&hdev->le_scan);
1200
1201         cancel_delayed_work(&hdev->power_off);
1202
1203         hci_req_cancel(hdev, ENODEV);
1204         hci_req_lock(hdev);
1205
1206         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1207                 del_timer_sync(&hdev->cmd_timer);
1208                 hci_req_unlock(hdev);
1209                 return 0;
1210         }
1211
1212         /* Flush RX and TX works */
1213         flush_work(&hdev->tx_work);
1214         flush_work(&hdev->rx_work);
1215
1216         if (hdev->discov_timeout > 0) {
1217                 cancel_delayed_work(&hdev->discov_off);
1218                 hdev->discov_timeout = 0;
1219                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1220         }
1221
1222         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1223                 cancel_delayed_work(&hdev->service_cache);
1224
1225         cancel_delayed_work_sync(&hdev->le_scan_disable);
1226
1227         hci_dev_lock(hdev);
1228         inquiry_cache_flush(hdev);
1229         hci_conn_hash_flush(hdev);
1230         hci_dev_unlock(hdev);
1231
1232         hci_notify(hdev, HCI_DEV_DOWN);
1233
1234         if (hdev->flush)
1235                 hdev->flush(hdev);
1236
1237         /* Reset device */
1238         skb_queue_purge(&hdev->cmd_q);
1239         atomic_set(&hdev->cmd_cnt, 1);
1240         if (!test_bit(HCI_RAW, &hdev->flags) &&
1241             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1242                 set_bit(HCI_INIT, &hdev->flags);
1243                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1244                 clear_bit(HCI_INIT, &hdev->flags);
1245         }
1246
1247         /* flush cmd  work */
1248         flush_work(&hdev->cmd_work);
1249
1250         /* Drop queues */
1251         skb_queue_purge(&hdev->rx_q);
1252         skb_queue_purge(&hdev->cmd_q);
1253         skb_queue_purge(&hdev->raw_q);
1254
1255         /* Drop last sent command */
1256         if (hdev->sent_cmd) {
1257                 del_timer_sync(&hdev->cmd_timer);
1258                 kfree_skb(hdev->sent_cmd);
1259                 hdev->sent_cmd = NULL;
1260         }
1261
1262         kfree_skb(hdev->recv_evt);
1263         hdev->recv_evt = NULL;
1264
1265         /* After this point our queues are empty
1266          * and no tasks are scheduled. */
1267         hdev->close(hdev);
1268
1269         /* Clear flags */
1270         hdev->flags = 0;
1271         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1272
1273         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1274             mgmt_valid_hdev(hdev)) {
1275                 hci_dev_lock(hdev);
1276                 mgmt_powered(hdev, 0);
1277                 hci_dev_unlock(hdev);
1278         }
1279
1280         /* Controller radio is available but is currently powered down */
1281         hdev->amp_status = 0;
1282
1283         memset(hdev->eir, 0, sizeof(hdev->eir));
1284         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1285
1286         hci_req_unlock(hdev);
1287
1288         hci_dev_put(hdev);
1289         return 0;
1290 }
1291
1292 int hci_dev_close(__u16 dev)
1293 {
1294         struct hci_dev *hdev;
1295         int err;
1296
1297         hdev = hci_dev_get(dev);
1298         if (!hdev)
1299                 return -ENODEV;
1300
1301         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1302                 cancel_delayed_work(&hdev->power_off);
1303
1304         err = hci_dev_do_close(hdev);
1305
1306         hci_dev_put(hdev);
1307         return err;
1308 }
1309
1310 int hci_dev_reset(__u16 dev)
1311 {
1312         struct hci_dev *hdev;
1313         int ret = 0;
1314
1315         hdev = hci_dev_get(dev);
1316         if (!hdev)
1317                 return -ENODEV;
1318
1319         hci_req_lock(hdev);
1320
1321         if (!test_bit(HCI_UP, &hdev->flags))
1322                 goto done;
1323
1324         /* Drop queues */
1325         skb_queue_purge(&hdev->rx_q);
1326         skb_queue_purge(&hdev->cmd_q);
1327
1328         hci_dev_lock(hdev);
1329         inquiry_cache_flush(hdev);
1330         hci_conn_hash_flush(hdev);
1331         hci_dev_unlock(hdev);
1332
1333         if (hdev->flush)
1334                 hdev->flush(hdev);
1335
1336         atomic_set(&hdev->cmd_cnt, 1);
1337         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1338
1339         if (!test_bit(HCI_RAW, &hdev->flags))
1340                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1341
1342 done:
1343         hci_req_unlock(hdev);
1344         hci_dev_put(hdev);
1345         return ret;
1346 }
1347
1348 int hci_dev_reset_stat(__u16 dev)
1349 {
1350         struct hci_dev *hdev;
1351         int ret = 0;
1352
1353         hdev = hci_dev_get(dev);
1354         if (!hdev)
1355                 return -ENODEV;
1356
1357         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1358
1359         hci_dev_put(hdev);
1360
1361         return ret;
1362 }
1363
1364 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1365 {
1366         struct hci_dev *hdev;
1367         struct hci_dev_req dr;
1368         int err = 0;
1369
1370         if (copy_from_user(&dr, arg, sizeof(dr)))
1371                 return -EFAULT;
1372
1373         hdev = hci_dev_get(dr.dev_id);
1374         if (!hdev)
1375                 return -ENODEV;
1376
1377         switch (cmd) {
1378         case HCISETAUTH:
1379                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1380                                    HCI_INIT_TIMEOUT);
1381                 break;
1382
1383         case HCISETENCRYPT:
1384                 if (!lmp_encrypt_capable(hdev)) {
1385                         err = -EOPNOTSUPP;
1386                         break;
1387                 }
1388
1389                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1390                         /* Auth must be enabled first */
1391                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1392                                            HCI_INIT_TIMEOUT);
1393                         if (err)
1394                                 break;
1395                 }
1396
1397                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1398                                    HCI_INIT_TIMEOUT);
1399                 break;
1400
1401         case HCISETSCAN:
1402                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1403                                    HCI_INIT_TIMEOUT);
1404                 break;
1405
1406         case HCISETLINKPOL:
1407                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1408                                    HCI_INIT_TIMEOUT);
1409                 break;
1410
1411         case HCISETLINKMODE:
1412                 hdev->link_mode = ((__u16) dr.dev_opt) &
1413                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1414                 break;
1415
1416         case HCISETPTYPE:
1417                 hdev->pkt_type = (__u16) dr.dev_opt;
1418                 break;
1419
1420         case HCISETACLMTU:
1421                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1422                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1423                 break;
1424
1425         case HCISETSCOMTU:
1426                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1427                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1428                 break;
1429
1430         default:
1431                 err = -EINVAL;
1432                 break;
1433         }
1434
1435         hci_dev_put(hdev);
1436         return err;
1437 }
1438
1439 int hci_get_dev_list(void __user *arg)
1440 {
1441         struct hci_dev *hdev;
1442         struct hci_dev_list_req *dl;
1443         struct hci_dev_req *dr;
1444         int n = 0, size, err;
1445         __u16 dev_num;
1446
1447         if (get_user(dev_num, (__u16 __user *) arg))
1448                 return -EFAULT;
1449
1450         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1451                 return -EINVAL;
1452
1453         size = sizeof(*dl) + dev_num * sizeof(*dr);
1454
1455         dl = kzalloc(size, GFP_KERNEL);
1456         if (!dl)
1457                 return -ENOMEM;
1458
1459         dr = dl->dev_req;
1460
1461         read_lock(&hci_dev_list_lock);
1462         list_for_each_entry(hdev, &hci_dev_list, list) {
1463                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1464                         cancel_delayed_work(&hdev->power_off);
1465
1466                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1467                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1468
1469                 (dr + n)->dev_id  = hdev->id;
1470                 (dr + n)->dev_opt = hdev->flags;
1471
1472                 if (++n >= dev_num)
1473                         break;
1474         }
1475         read_unlock(&hci_dev_list_lock);
1476
1477         dl->dev_num = n;
1478         size = sizeof(*dl) + n * sizeof(*dr);
1479
1480         err = copy_to_user(arg, dl, size);
1481         kfree(dl);
1482
1483         return err ? -EFAULT : 0;
1484 }
1485
1486 int hci_get_dev_info(void __user *arg)
1487 {
1488         struct hci_dev *hdev;
1489         struct hci_dev_info di;
1490         int err = 0;
1491
1492         if (copy_from_user(&di, arg, sizeof(di)))
1493                 return -EFAULT;
1494
1495         hdev = hci_dev_get(di.dev_id);
1496         if (!hdev)
1497                 return -ENODEV;
1498
1499         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1500                 cancel_delayed_work_sync(&hdev->power_off);
1501
1502         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1503                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1504
1505         strcpy(di.name, hdev->name);
1506         di.bdaddr   = hdev->bdaddr;
1507         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1508         di.flags    = hdev->flags;
1509         di.pkt_type = hdev->pkt_type;
1510         if (lmp_bredr_capable(hdev)) {
1511                 di.acl_mtu  = hdev->acl_mtu;
1512                 di.acl_pkts = hdev->acl_pkts;
1513                 di.sco_mtu  = hdev->sco_mtu;
1514                 di.sco_pkts = hdev->sco_pkts;
1515         } else {
1516                 di.acl_mtu  = hdev->le_mtu;
1517                 di.acl_pkts = hdev->le_pkts;
1518                 di.sco_mtu  = 0;
1519                 di.sco_pkts = 0;
1520         }
1521         di.link_policy = hdev->link_policy;
1522         di.link_mode   = hdev->link_mode;
1523
1524         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1525         memcpy(&di.features, &hdev->features, sizeof(di.features));
1526
1527         if (copy_to_user(arg, &di, sizeof(di)))
1528                 err = -EFAULT;
1529
1530         hci_dev_put(hdev);
1531
1532         return err;
1533 }
1534
1535 /* ---- Interface to HCI drivers ---- */
1536
1537 static int hci_rfkill_set_block(void *data, bool blocked)
1538 {
1539         struct hci_dev *hdev = data;
1540
1541         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1542
1543         if (!blocked)
1544                 return 0;
1545
1546         hci_dev_do_close(hdev);
1547
1548         return 0;
1549 }
1550
1551 static const struct rfkill_ops hci_rfkill_ops = {
1552         .set_block = hci_rfkill_set_block,
1553 };
1554
1555 static void hci_power_on(struct work_struct *work)
1556 {
1557         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1558         int err;
1559
1560         BT_DBG("%s", hdev->name);
1561
1562         err = hci_dev_open(hdev->id);
1563         if (err < 0) {
1564                 mgmt_set_powered_failed(hdev, err);
1565                 return;
1566         }
1567
1568         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1569                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1570                                    HCI_AUTO_OFF_TIMEOUT);
1571
1572         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1573                 mgmt_index_added(hdev);
1574 }
1575
1576 static void hci_power_off(struct work_struct *work)
1577 {
1578         struct hci_dev *hdev = container_of(work, struct hci_dev,
1579                                             power_off.work);
1580
1581         BT_DBG("%s", hdev->name);
1582
1583         hci_dev_do_close(hdev);
1584 }
1585
1586 static void hci_discov_off(struct work_struct *work)
1587 {
1588         struct hci_dev *hdev;
1589         u8 scan = SCAN_PAGE;
1590
1591         hdev = container_of(work, struct hci_dev, discov_off.work);
1592
1593         BT_DBG("%s", hdev->name);
1594
1595         hci_dev_lock(hdev);
1596
1597         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1598
1599         hdev->discov_timeout = 0;
1600
1601         hci_dev_unlock(hdev);
1602 }
1603
1604 int hci_uuids_clear(struct hci_dev *hdev)
1605 {
1606         struct bt_uuid *uuid, *tmp;
1607
1608         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1609                 list_del(&uuid->list);
1610                 kfree(uuid);
1611         }
1612
1613         return 0;
1614 }
1615
1616 int hci_link_keys_clear(struct hci_dev *hdev)
1617 {
1618         struct list_head *p, *n;
1619
1620         list_for_each_safe(p, n, &hdev->link_keys) {
1621                 struct link_key *key;
1622
1623                 key = list_entry(p, struct link_key, list);
1624
1625                 list_del(p);
1626                 kfree(key);
1627         }
1628
1629         return 0;
1630 }
1631
1632 int hci_smp_ltks_clear(struct hci_dev *hdev)
1633 {
1634         struct smp_ltk *k, *tmp;
1635
1636         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1637                 list_del(&k->list);
1638                 kfree(k);
1639         }
1640
1641         return 0;
1642 }
1643
1644 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1645 {
1646         struct link_key *k;
1647
1648         list_for_each_entry(k, &hdev->link_keys, list)
1649                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1650                         return k;
1651
1652         return NULL;
1653 }
1654
1655 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1656                                u8 key_type, u8 old_key_type)
1657 {
1658         /* Legacy key */
1659         if (key_type < 0x03)
1660                 return true;
1661
1662         /* Debug keys are insecure so don't store them persistently */
1663         if (key_type == HCI_LK_DEBUG_COMBINATION)
1664                 return false;
1665
1666         /* Changed combination key and there's no previous one */
1667         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1668                 return false;
1669
1670         /* Security mode 3 case */
1671         if (!conn)
1672                 return true;
1673
1674         /* Neither local nor remote side had no-bonding as requirement */
1675         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1676                 return true;
1677
1678         /* Local side had dedicated bonding as requirement */
1679         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1680                 return true;
1681
1682         /* Remote side had dedicated bonding as requirement */
1683         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1684                 return true;
1685
1686         /* If none of the above criteria match, then don't store the key
1687          * persistently */
1688         return false;
1689 }
1690
1691 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1692 {
1693         struct smp_ltk *k;
1694
1695         list_for_each_entry(k, &hdev->long_term_keys, list) {
1696                 if (k->ediv != ediv ||
1697                     memcmp(rand, k->rand, sizeof(k->rand)))
1698                         continue;
1699
1700                 return k;
1701         }
1702
1703         return NULL;
1704 }
1705
1706 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1707                                      u8 addr_type)
1708 {
1709         struct smp_ltk *k;
1710
1711         list_for_each_entry(k, &hdev->long_term_keys, list)
1712                 if (addr_type == k->bdaddr_type &&
1713                     bacmp(bdaddr, &k->bdaddr) == 0)
1714                         return k;
1715
1716         return NULL;
1717 }
1718
1719 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1720                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1721 {
1722         struct link_key *key, *old_key;
1723         u8 old_key_type;
1724         bool persistent;
1725
1726         old_key = hci_find_link_key(hdev, bdaddr);
1727         if (old_key) {
1728                 old_key_type = old_key->type;
1729                 key = old_key;
1730         } else {
1731                 old_key_type = conn ? conn->key_type : 0xff;
1732                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1733                 if (!key)
1734                         return -ENOMEM;
1735                 list_add(&key->list, &hdev->link_keys);
1736         }
1737
1738         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1739
1740         /* Some buggy controller combinations generate a changed
1741          * combination key for legacy pairing even when there's no
1742          * previous key */
1743         if (type == HCI_LK_CHANGED_COMBINATION &&
1744             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1745                 type = HCI_LK_COMBINATION;
1746                 if (conn)
1747                         conn->key_type = type;
1748         }
1749
1750         bacpy(&key->bdaddr, bdaddr);
1751         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1752         key->pin_len = pin_len;
1753
1754         if (type == HCI_LK_CHANGED_COMBINATION)
1755                 key->type = old_key_type;
1756         else
1757                 key->type = type;
1758
1759         if (!new_key)
1760                 return 0;
1761
1762         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1763
1764         mgmt_new_link_key(hdev, key, persistent);
1765
1766         if (conn)
1767                 conn->flush_key = !persistent;
1768
1769         return 0;
1770 }
1771
1772 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1773                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1774                 ediv, u8 rand[8])
1775 {
1776         struct smp_ltk *key, *old_key;
1777
1778         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1779                 return 0;
1780
1781         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1782         if (old_key)
1783                 key = old_key;
1784         else {
1785                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1786                 if (!key)
1787                         return -ENOMEM;
1788                 list_add(&key->list, &hdev->long_term_keys);
1789         }
1790
1791         bacpy(&key->bdaddr, bdaddr);
1792         key->bdaddr_type = addr_type;
1793         memcpy(key->val, tk, sizeof(key->val));
1794         key->authenticated = authenticated;
1795         key->ediv = ediv;
1796         key->enc_size = enc_size;
1797         key->type = type;
1798         memcpy(key->rand, rand, sizeof(key->rand));
1799
1800         if (!new_key)
1801                 return 0;
1802
1803         if (type & HCI_SMP_LTK)
1804                 mgmt_new_ltk(hdev, key, 1);
1805
1806         return 0;
1807 }
1808
1809 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1810 {
1811         struct link_key *key;
1812
1813         key = hci_find_link_key(hdev, bdaddr);
1814         if (!key)
1815                 return -ENOENT;
1816
1817         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1818
1819         list_del(&key->list);
1820         kfree(key);
1821
1822         return 0;
1823 }
1824
1825 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1826 {
1827         struct smp_ltk *k, *tmp;
1828
1829         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1830                 if (bacmp(bdaddr, &k->bdaddr))
1831                         continue;
1832
1833                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1834
1835                 list_del(&k->list);
1836                 kfree(k);
1837         }
1838
1839         return 0;
1840 }
1841
1842 /* HCI command timer function */
1843 static void hci_cmd_timeout(unsigned long arg)
1844 {
1845         struct hci_dev *hdev = (void *) arg;
1846
1847         if (hdev->sent_cmd) {
1848                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1849                 u16 opcode = __le16_to_cpu(sent->opcode);
1850
1851                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1852         } else {
1853                 BT_ERR("%s command tx timeout", hdev->name);
1854         }
1855
1856         atomic_set(&hdev->cmd_cnt, 1);
1857         queue_work(hdev->workqueue, &hdev->cmd_work);
1858 }
1859
1860 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1861                                           bdaddr_t *bdaddr)
1862 {
1863         struct oob_data *data;
1864
1865         list_for_each_entry(data, &hdev->remote_oob_data, list)
1866                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1867                         return data;
1868
1869         return NULL;
1870 }
1871
1872 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1873 {
1874         struct oob_data *data;
1875
1876         data = hci_find_remote_oob_data(hdev, bdaddr);
1877         if (!data)
1878                 return -ENOENT;
1879
1880         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1881
1882         list_del(&data->list);
1883         kfree(data);
1884
1885         return 0;
1886 }
1887
1888 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1889 {
1890         struct oob_data *data, *n;
1891
1892         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1893                 list_del(&data->list);
1894                 kfree(data);
1895         }
1896
1897         return 0;
1898 }
1899
1900 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1901                             u8 *randomizer)
1902 {
1903         struct oob_data *data;
1904
1905         data = hci_find_remote_oob_data(hdev, bdaddr);
1906
1907         if (!data) {
1908                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1909                 if (!data)
1910                         return -ENOMEM;
1911
1912                 bacpy(&data->bdaddr, bdaddr);
1913                 list_add(&data->list, &hdev->remote_oob_data);
1914         }
1915
1916         memcpy(data->hash, hash, sizeof(data->hash));
1917         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1918
1919         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1920
1921         return 0;
1922 }
1923
1924 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1925 {
1926         struct bdaddr_list *b;
1927
1928         list_for_each_entry(b, &hdev->blacklist, list)
1929                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1930                         return b;
1931
1932         return NULL;
1933 }
1934
1935 int hci_blacklist_clear(struct hci_dev *hdev)
1936 {
1937         struct list_head *p, *n;
1938
1939         list_for_each_safe(p, n, &hdev->blacklist) {
1940                 struct bdaddr_list *b;
1941
1942                 b = list_entry(p, struct bdaddr_list, list);
1943
1944                 list_del(p);
1945                 kfree(b);
1946         }
1947
1948         return 0;
1949 }
1950
1951 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1952 {
1953         struct bdaddr_list *entry;
1954
1955         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1956                 return -EBADF;
1957
1958         if (hci_blacklist_lookup(hdev, bdaddr))
1959                 return -EEXIST;
1960
1961         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1962         if (!entry)
1963                 return -ENOMEM;
1964
1965         bacpy(&entry->bdaddr, bdaddr);
1966
1967         list_add(&entry->list, &hdev->blacklist);
1968
1969         return mgmt_device_blocked(hdev, bdaddr, type);
1970 }
1971
1972 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1973 {
1974         struct bdaddr_list *entry;
1975
1976         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1977                 return hci_blacklist_clear(hdev);
1978
1979         entry = hci_blacklist_lookup(hdev, bdaddr);
1980         if (!entry)
1981                 return -ENOENT;
1982
1983         list_del(&entry->list);
1984         kfree(entry);
1985
1986         return mgmt_device_unblocked(hdev, bdaddr, type);
1987 }
1988
1989 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1990 {
1991         struct le_scan_params *param =  (struct le_scan_params *) opt;
1992         struct hci_cp_le_set_scan_param cp;
1993
1994         memset(&cp, 0, sizeof(cp));
1995         cp.type = param->type;
1996         cp.interval = cpu_to_le16(param->interval);
1997         cp.window = cpu_to_le16(param->window);
1998
1999         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2000 }
2001
2002 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2003 {
2004         struct hci_cp_le_set_scan_enable cp;
2005
2006         memset(&cp, 0, sizeof(cp));
2007         cp.enable = LE_SCAN_ENABLE;
2008         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2009
2010         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2011 }
2012
2013 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2014                           u16 window, int timeout)
2015 {
2016         long timeo = msecs_to_jiffies(3000);
2017         struct le_scan_params param;
2018         int err;
2019
2020         BT_DBG("%s", hdev->name);
2021
2022         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2023                 return -EINPROGRESS;
2024
2025         param.type = type;
2026         param.interval = interval;
2027         param.window = window;
2028
2029         hci_req_lock(hdev);
2030
2031         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2032                              timeo);
2033         if (!err)
2034                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2035
2036         hci_req_unlock(hdev);
2037
2038         if (err < 0)
2039                 return err;
2040
2041         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2042                            timeout);
2043
2044         return 0;
2045 }
2046
2047 int hci_cancel_le_scan(struct hci_dev *hdev)
2048 {
2049         BT_DBG("%s", hdev->name);
2050
2051         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2052                 return -EALREADY;
2053
2054         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2055                 struct hci_cp_le_set_scan_enable cp;
2056
2057                 /* Send HCI command to disable LE Scan */
2058                 memset(&cp, 0, sizeof(cp));
2059                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2060         }
2061
2062         return 0;
2063 }
2064
2065 static void le_scan_disable_work(struct work_struct *work)
2066 {
2067         struct hci_dev *hdev = container_of(work, struct hci_dev,
2068                                             le_scan_disable.work);
2069         struct hci_cp_le_set_scan_enable cp;
2070
2071         BT_DBG("%s", hdev->name);
2072
2073         memset(&cp, 0, sizeof(cp));
2074
2075         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2076 }
2077
2078 static void le_scan_work(struct work_struct *work)
2079 {
2080         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2081         struct le_scan_params *param = &hdev->le_scan_params;
2082
2083         BT_DBG("%s", hdev->name);
2084
2085         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2086                        param->timeout);
2087 }
2088
2089 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2090                 int timeout)
2091 {
2092         struct le_scan_params *param = &hdev->le_scan_params;
2093
2094         BT_DBG("%s", hdev->name);
2095
2096         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2097                 return -ENOTSUPP;
2098
2099         if (work_busy(&hdev->le_scan))
2100                 return -EINPROGRESS;
2101
2102         param->type = type;
2103         param->interval = interval;
2104         param->window = window;
2105         param->timeout = timeout;
2106
2107         queue_work(system_long_wq, &hdev->le_scan);
2108
2109         return 0;
2110 }
2111
2112 /* Alloc HCI device */
2113 struct hci_dev *hci_alloc_dev(void)
2114 {
2115         struct hci_dev *hdev;
2116
2117         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2118         if (!hdev)
2119                 return NULL;
2120
2121         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2122         hdev->esco_type = (ESCO_HV1);
2123         hdev->link_mode = (HCI_LM_ACCEPT);
2124         hdev->io_capability = 0x03; /* No Input No Output */
2125         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2126         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2127
2128         hdev->sniff_max_interval = 800;
2129         hdev->sniff_min_interval = 80;
2130
2131         mutex_init(&hdev->lock);
2132         mutex_init(&hdev->req_lock);
2133
2134         INIT_LIST_HEAD(&hdev->mgmt_pending);
2135         INIT_LIST_HEAD(&hdev->blacklist);
2136         INIT_LIST_HEAD(&hdev->uuids);
2137         INIT_LIST_HEAD(&hdev->link_keys);
2138         INIT_LIST_HEAD(&hdev->long_term_keys);
2139         INIT_LIST_HEAD(&hdev->remote_oob_data);
2140         INIT_LIST_HEAD(&hdev->conn_hash.list);
2141
2142         INIT_WORK(&hdev->rx_work, hci_rx_work);
2143         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2144         INIT_WORK(&hdev->tx_work, hci_tx_work);
2145         INIT_WORK(&hdev->power_on, hci_power_on);
2146         INIT_WORK(&hdev->le_scan, le_scan_work);
2147
2148         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2149         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2150         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2151
2152         skb_queue_head_init(&hdev->rx_q);
2153         skb_queue_head_init(&hdev->cmd_q);
2154         skb_queue_head_init(&hdev->raw_q);
2155
2156         init_waitqueue_head(&hdev->req_wait_q);
2157
2158         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2159
2160         hci_init_sysfs(hdev);
2161         discovery_init(hdev);
2162
2163         return hdev;
2164 }
2165 EXPORT_SYMBOL(hci_alloc_dev);
2166
2167 /* Free HCI device */
2168 void hci_free_dev(struct hci_dev *hdev)
2169 {
2170         /* will free via device release */
2171         put_device(&hdev->dev);
2172 }
2173 EXPORT_SYMBOL(hci_free_dev);
2174
2175 /* Register HCI device */
2176 int hci_register_dev(struct hci_dev *hdev)
2177 {
2178         int id, error;
2179
2180         if (!hdev->open || !hdev->close)
2181                 return -EINVAL;
2182
2183         /* Do not allow HCI_AMP devices to register at index 0,
2184          * so the index can be used as the AMP controller ID.
2185          */
2186         switch (hdev->dev_type) {
2187         case HCI_BREDR:
2188                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2189                 break;
2190         case HCI_AMP:
2191                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2192                 break;
2193         default:
2194                 return -EINVAL;
2195         }
2196
2197         if (id < 0)
2198                 return id;
2199
2200         sprintf(hdev->name, "hci%d", id);
2201         hdev->id = id;
2202
2203         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2204
2205         write_lock(&hci_dev_list_lock);
2206         list_add(&hdev->list, &hci_dev_list);
2207         write_unlock(&hci_dev_list_lock);
2208
2209         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2210                                           WQ_MEM_RECLAIM, 1);
2211         if (!hdev->workqueue) {
2212                 error = -ENOMEM;
2213                 goto err;
2214         }
2215
2216         hdev->req_workqueue = alloc_workqueue(hdev->name,
2217                                               WQ_HIGHPRI | WQ_UNBOUND |
2218                                               WQ_MEM_RECLAIM, 1);
2219         if (!hdev->req_workqueue) {
2220                 destroy_workqueue(hdev->workqueue);
2221                 error = -ENOMEM;
2222                 goto err;
2223         }
2224
2225         error = hci_add_sysfs(hdev);
2226         if (error < 0)
2227                 goto err_wqueue;
2228
2229         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2230                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2231                                     hdev);
2232         if (hdev->rfkill) {
2233                 if (rfkill_register(hdev->rfkill) < 0) {
2234                         rfkill_destroy(hdev->rfkill);
2235                         hdev->rfkill = NULL;
2236                 }
2237         }
2238
2239         set_bit(HCI_SETUP, &hdev->dev_flags);
2240
2241         if (hdev->dev_type != HCI_AMP)
2242                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2243
2244         hci_notify(hdev, HCI_DEV_REG);
2245         hci_dev_hold(hdev);
2246
2247         queue_work(hdev->req_workqueue, &hdev->power_on);
2248
2249         return id;
2250
2251 err_wqueue:
2252         destroy_workqueue(hdev->workqueue);
2253         destroy_workqueue(hdev->req_workqueue);
2254 err:
2255         ida_simple_remove(&hci_index_ida, hdev->id);
2256         write_lock(&hci_dev_list_lock);
2257         list_del(&hdev->list);
2258         write_unlock(&hci_dev_list_lock);
2259
2260         return error;
2261 }
2262 EXPORT_SYMBOL(hci_register_dev);
2263
2264 /* Unregister HCI device */
2265 void hci_unregister_dev(struct hci_dev *hdev)
2266 {
2267         int i, id;
2268
2269         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2270
2271         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2272
2273         id = hdev->id;
2274
2275         write_lock(&hci_dev_list_lock);
2276         list_del(&hdev->list);
2277         write_unlock(&hci_dev_list_lock);
2278
2279         hci_dev_do_close(hdev);
2280
2281         for (i = 0; i < NUM_REASSEMBLY; i++)
2282                 kfree_skb(hdev->reassembly[i]);
2283
2284         cancel_work_sync(&hdev->power_on);
2285
2286         if (!test_bit(HCI_INIT, &hdev->flags) &&
2287             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2288                 hci_dev_lock(hdev);
2289                 mgmt_index_removed(hdev);
2290                 hci_dev_unlock(hdev);
2291         }
2292
2293         /* mgmt_index_removed should take care of emptying the
2294          * pending list */
2295         BUG_ON(!list_empty(&hdev->mgmt_pending));
2296
2297         hci_notify(hdev, HCI_DEV_UNREG);
2298
2299         if (hdev->rfkill) {
2300                 rfkill_unregister(hdev->rfkill);
2301                 rfkill_destroy(hdev->rfkill);
2302         }
2303
2304         hci_del_sysfs(hdev);
2305
2306         destroy_workqueue(hdev->workqueue);
2307         destroy_workqueue(hdev->req_workqueue);
2308
2309         hci_dev_lock(hdev);
2310         hci_blacklist_clear(hdev);
2311         hci_uuids_clear(hdev);
2312         hci_link_keys_clear(hdev);
2313         hci_smp_ltks_clear(hdev);
2314         hci_remote_oob_data_clear(hdev);
2315         hci_dev_unlock(hdev);
2316
2317         hci_dev_put(hdev);
2318
2319         ida_simple_remove(&hci_index_ida, id);
2320 }
2321 EXPORT_SYMBOL(hci_unregister_dev);
2322
2323 /* Suspend HCI device */
2324 int hci_suspend_dev(struct hci_dev *hdev)
2325 {
2326         hci_notify(hdev, HCI_DEV_SUSPEND);
2327         return 0;
2328 }
2329 EXPORT_SYMBOL(hci_suspend_dev);
2330
2331 /* Resume HCI device */
2332 int hci_resume_dev(struct hci_dev *hdev)
2333 {
2334         hci_notify(hdev, HCI_DEV_RESUME);
2335         return 0;
2336 }
2337 EXPORT_SYMBOL(hci_resume_dev);
2338
2339 /* Receive frame from HCI drivers */
2340 int hci_recv_frame(struct sk_buff *skb)
2341 {
2342         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2343         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2344                       && !test_bit(HCI_INIT, &hdev->flags))) {
2345                 kfree_skb(skb);
2346                 return -ENXIO;
2347         }
2348
2349         /* Incoming skb */
2350         bt_cb(skb)->incoming = 1;
2351
2352         /* Time stamp */
2353         __net_timestamp(skb);
2354
2355         skb_queue_tail(&hdev->rx_q, skb);
2356         queue_work(hdev->workqueue, &hdev->rx_work);
2357
2358         return 0;
2359 }
2360 EXPORT_SYMBOL(hci_recv_frame);
2361
2362 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2363                           int count, __u8 index)
2364 {
2365         int len = 0;
2366         int hlen = 0;
2367         int remain = count;
2368         struct sk_buff *skb;
2369         struct bt_skb_cb *scb;
2370
2371         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2372             index >= NUM_REASSEMBLY)
2373                 return -EILSEQ;
2374
2375         skb = hdev->reassembly[index];
2376
2377         if (!skb) {
2378                 switch (type) {
2379                 case HCI_ACLDATA_PKT:
2380                         len = HCI_MAX_FRAME_SIZE;
2381                         hlen = HCI_ACL_HDR_SIZE;
2382                         break;
2383                 case HCI_EVENT_PKT:
2384                         len = HCI_MAX_EVENT_SIZE;
2385                         hlen = HCI_EVENT_HDR_SIZE;
2386                         break;
2387                 case HCI_SCODATA_PKT:
2388                         len = HCI_MAX_SCO_SIZE;
2389                         hlen = HCI_SCO_HDR_SIZE;
2390                         break;
2391                 }
2392
2393                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2394                 if (!skb)
2395                         return -ENOMEM;
2396
2397                 scb = (void *) skb->cb;
2398                 scb->expect = hlen;
2399                 scb->pkt_type = type;
2400
2401                 skb->dev = (void *) hdev;
2402                 hdev->reassembly[index] = skb;
2403         }
2404
2405         while (count) {
2406                 scb = (void *) skb->cb;
2407                 len = min_t(uint, scb->expect, count);
2408
2409                 memcpy(skb_put(skb, len), data, len);
2410
2411                 count -= len;
2412                 data += len;
2413                 scb->expect -= len;
2414                 remain = count;
2415
2416                 switch (type) {
2417                 case HCI_EVENT_PKT:
2418                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2419                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2420                                 scb->expect = h->plen;
2421
2422                                 if (skb_tailroom(skb) < scb->expect) {
2423                                         kfree_skb(skb);
2424                                         hdev->reassembly[index] = NULL;
2425                                         return -ENOMEM;
2426                                 }
2427                         }
2428                         break;
2429
2430                 case HCI_ACLDATA_PKT:
2431                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2432                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2433                                 scb->expect = __le16_to_cpu(h->dlen);
2434
2435                                 if (skb_tailroom(skb) < scb->expect) {
2436                                         kfree_skb(skb);
2437                                         hdev->reassembly[index] = NULL;
2438                                         return -ENOMEM;
2439                                 }
2440                         }
2441                         break;
2442
2443                 case HCI_SCODATA_PKT:
2444                         if (skb->len == HCI_SCO_HDR_SIZE) {
2445                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2446                                 scb->expect = h->dlen;
2447
2448                                 if (skb_tailroom(skb) < scb->expect) {
2449                                         kfree_skb(skb);
2450                                         hdev->reassembly[index] = NULL;
2451                                         return -ENOMEM;
2452                                 }
2453                         }
2454                         break;
2455                 }
2456
2457                 if (scb->expect == 0) {
2458                         /* Complete frame */
2459
2460                         bt_cb(skb)->pkt_type = type;
2461                         hci_recv_frame(skb);
2462
2463                         hdev->reassembly[index] = NULL;
2464                         return remain;
2465                 }
2466         }
2467
2468         return remain;
2469 }
2470
2471 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2472 {
2473         int rem = 0;
2474
2475         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2476                 return -EILSEQ;
2477
2478         while (count) {
2479                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2480                 if (rem < 0)
2481                         return rem;
2482
2483                 data += (count - rem);
2484                 count = rem;
2485         }
2486
2487         return rem;
2488 }
2489 EXPORT_SYMBOL(hci_recv_fragment);
2490
2491 #define STREAM_REASSEMBLY 0
2492
2493 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2494 {
2495         int type;
2496         int rem = 0;
2497
2498         while (count) {
2499                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2500
2501                 if (!skb) {
2502                         struct { char type; } *pkt;
2503
2504                         /* Start of the frame */
2505                         pkt = data;
2506                         type = pkt->type;
2507
2508                         data++;
2509                         count--;
2510                 } else
2511                         type = bt_cb(skb)->pkt_type;
2512
2513                 rem = hci_reassembly(hdev, type, data, count,
2514                                      STREAM_REASSEMBLY);
2515                 if (rem < 0)
2516                         return rem;
2517
2518                 data += (count - rem);
2519                 count = rem;
2520         }
2521
2522         return rem;
2523 }
2524 EXPORT_SYMBOL(hci_recv_stream_fragment);
2525
2526 /* ---- Interface to upper protocols ---- */
2527
2528 int hci_register_cb(struct hci_cb *cb)
2529 {
2530         BT_DBG("%p name %s", cb, cb->name);
2531
2532         write_lock(&hci_cb_list_lock);
2533         list_add(&cb->list, &hci_cb_list);
2534         write_unlock(&hci_cb_list_lock);
2535
2536         return 0;
2537 }
2538 EXPORT_SYMBOL(hci_register_cb);
2539
2540 int hci_unregister_cb(struct hci_cb *cb)
2541 {
2542         BT_DBG("%p name %s", cb, cb->name);
2543
2544         write_lock(&hci_cb_list_lock);
2545         list_del(&cb->list);
2546         write_unlock(&hci_cb_list_lock);
2547
2548         return 0;
2549 }
2550 EXPORT_SYMBOL(hci_unregister_cb);
2551
2552 static int hci_send_frame(struct sk_buff *skb)
2553 {
2554         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2555
2556         if (!hdev) {
2557                 kfree_skb(skb);
2558                 return -ENODEV;
2559         }
2560
2561         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2562
2563         /* Time stamp */
2564         __net_timestamp(skb);
2565
2566         /* Send copy to monitor */
2567         hci_send_to_monitor(hdev, skb);
2568
2569         if (atomic_read(&hdev->promisc)) {
2570                 /* Send copy to the sockets */
2571                 hci_send_to_sock(hdev, skb);
2572         }
2573
2574         /* Get rid of skb owner, prior to sending to the driver. */
2575         skb_orphan(skb);
2576
2577         return hdev->send(skb);
2578 }
2579
2580 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2581 {
2582         skb_queue_head_init(&req->cmd_q);
2583         req->hdev = hdev;
2584         req->err = 0;
2585 }
2586
2587 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2588 {
2589         struct hci_dev *hdev = req->hdev;
2590         struct sk_buff *skb;
2591         unsigned long flags;
2592
2593         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2594
2595         /* If an error occured during request building, remove all HCI
2596          * commands queued on the HCI request queue.
2597          */
2598         if (req->err) {
2599                 skb_queue_purge(&req->cmd_q);
2600                 return req->err;
2601         }
2602
2603         /* Do not allow empty requests */
2604         if (skb_queue_empty(&req->cmd_q))
2605                 return -ENODATA;
2606
2607         skb = skb_peek_tail(&req->cmd_q);
2608         bt_cb(skb)->req.complete = complete;
2609
2610         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2611         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2612         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2613
2614         queue_work(hdev->workqueue, &hdev->cmd_work);
2615
2616         return 0;
2617 }
2618
2619 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2620                                        u32 plen, const void *param)
2621 {
2622         int len = HCI_COMMAND_HDR_SIZE + plen;
2623         struct hci_command_hdr *hdr;
2624         struct sk_buff *skb;
2625
2626         skb = bt_skb_alloc(len, GFP_ATOMIC);
2627         if (!skb)
2628                 return NULL;
2629
2630         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2631         hdr->opcode = cpu_to_le16(opcode);
2632         hdr->plen   = plen;
2633
2634         if (plen)
2635                 memcpy(skb_put(skb, plen), param, plen);
2636
2637         BT_DBG("skb len %d", skb->len);
2638
2639         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2640         skb->dev = (void *) hdev;
2641
2642         return skb;
2643 }
2644
2645 /* Send HCI command */
2646 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2647                  const void *param)
2648 {
2649         struct sk_buff *skb;
2650
2651         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
2653         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2654         if (!skb) {
2655                 BT_ERR("%s no memory for command", hdev->name);
2656                 return -ENOMEM;
2657         }
2658
2659         /* Stand-alone HCI commands must be flaged as
2660          * single-command requests.
2661          */
2662         bt_cb(skb)->req.start = true;
2663
2664         skb_queue_tail(&hdev->cmd_q, skb);
2665         queue_work(hdev->workqueue, &hdev->cmd_work);
2666
2667         return 0;
2668 }
2669
2670 /* Queue a command to an asynchronous HCI request */
2671 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2672                     const void *param, u8 event)
2673 {
2674         struct hci_dev *hdev = req->hdev;
2675         struct sk_buff *skb;
2676
2677         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2678
2679         /* If an error occured during request building, there is no point in
2680          * queueing the HCI command. We can simply return.
2681          */
2682         if (req->err)
2683                 return;
2684
2685         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2686         if (!skb) {
2687                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2688                        hdev->name, opcode);
2689                 req->err = -ENOMEM;
2690                 return;
2691         }
2692
2693         if (skb_queue_empty(&req->cmd_q))
2694                 bt_cb(skb)->req.start = true;
2695
2696         bt_cb(skb)->req.event = event;
2697
2698         skb_queue_tail(&req->cmd_q, skb);
2699 }
2700
2701 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2702                  const void *param)
2703 {
2704         hci_req_add_ev(req, opcode, plen, param, 0);
2705 }
2706
2707 /* Get data from the previously sent command */
2708 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2709 {
2710         struct hci_command_hdr *hdr;
2711
2712         if (!hdev->sent_cmd)
2713                 return NULL;
2714
2715         hdr = (void *) hdev->sent_cmd->data;
2716
2717         if (hdr->opcode != cpu_to_le16(opcode))
2718                 return NULL;
2719
2720         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2721
2722         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2723 }
2724
2725 /* Send ACL data */
2726 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2727 {
2728         struct hci_acl_hdr *hdr;
2729         int len = skb->len;
2730
2731         skb_push(skb, HCI_ACL_HDR_SIZE);
2732         skb_reset_transport_header(skb);
2733         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2734         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2735         hdr->dlen   = cpu_to_le16(len);
2736 }
2737
2738 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2739                           struct sk_buff *skb, __u16 flags)
2740 {
2741         struct hci_conn *conn = chan->conn;
2742         struct hci_dev *hdev = conn->hdev;
2743         struct sk_buff *list;
2744
2745         skb->len = skb_headlen(skb);
2746         skb->data_len = 0;
2747
2748         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2749
2750         switch (hdev->dev_type) {
2751         case HCI_BREDR:
2752                 hci_add_acl_hdr(skb, conn->handle, flags);
2753                 break;
2754         case HCI_AMP:
2755                 hci_add_acl_hdr(skb, chan->handle, flags);
2756                 break;
2757         default:
2758                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2759                 return;
2760         }
2761
2762         list = skb_shinfo(skb)->frag_list;
2763         if (!list) {
2764                 /* Non fragmented */
2765                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2766
2767                 skb_queue_tail(queue, skb);
2768         } else {
2769                 /* Fragmented */
2770                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2771
2772                 skb_shinfo(skb)->frag_list = NULL;
2773
2774                 /* Queue all fragments atomically */
2775                 spin_lock(&queue->lock);
2776
2777                 __skb_queue_tail(queue, skb);
2778
2779                 flags &= ~ACL_START;
2780                 flags |= ACL_CONT;
2781                 do {
2782                         skb = list; list = list->next;
2783
2784                         skb->dev = (void *) hdev;
2785                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2786                         hci_add_acl_hdr(skb, conn->handle, flags);
2787
2788                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2789
2790                         __skb_queue_tail(queue, skb);
2791                 } while (list);
2792
2793                 spin_unlock(&queue->lock);
2794         }
2795 }
2796
2797 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2798 {
2799         struct hci_dev *hdev = chan->conn->hdev;
2800
2801         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2802
2803         skb->dev = (void *) hdev;
2804
2805         hci_queue_acl(chan, &chan->data_q, skb, flags);
2806
2807         queue_work(hdev->workqueue, &hdev->tx_work);
2808 }
2809
2810 /* Send SCO data */
2811 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2812 {
2813         struct hci_dev *hdev = conn->hdev;
2814         struct hci_sco_hdr hdr;
2815
2816         BT_DBG("%s len %d", hdev->name, skb->len);
2817
2818         hdr.handle = cpu_to_le16(conn->handle);
2819         hdr.dlen   = skb->len;
2820
2821         skb_push(skb, HCI_SCO_HDR_SIZE);
2822         skb_reset_transport_header(skb);
2823         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2824
2825         skb->dev = (void *) hdev;
2826         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2827
2828         skb_queue_tail(&conn->data_q, skb);
2829         queue_work(hdev->workqueue, &hdev->tx_work);
2830 }
2831
2832 /* ---- HCI TX task (outgoing data) ---- */
2833
2834 /* HCI Connection scheduler */
2835 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2836                                      int *quote)
2837 {
2838         struct hci_conn_hash *h = &hdev->conn_hash;
2839         struct hci_conn *conn = NULL, *c;
2840         unsigned int num = 0, min = ~0;
2841
2842         /* We don't have to lock device here. Connections are always
2843          * added and removed with TX task disabled. */
2844
2845         rcu_read_lock();
2846
2847         list_for_each_entry_rcu(c, &h->list, list) {
2848                 if (c->type != type || skb_queue_empty(&c->data_q))
2849                         continue;
2850
2851                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2852                         continue;
2853
2854                 num++;
2855
2856                 if (c->sent < min) {
2857                         min  = c->sent;
2858                         conn = c;
2859                 }
2860
2861                 if (hci_conn_num(hdev, type) == num)
2862                         break;
2863         }
2864
2865         rcu_read_unlock();
2866
2867         if (conn) {
2868                 int cnt, q;
2869
2870                 switch (conn->type) {
2871                 case ACL_LINK:
2872                         cnt = hdev->acl_cnt;
2873                         break;
2874                 case SCO_LINK:
2875                 case ESCO_LINK:
2876                         cnt = hdev->sco_cnt;
2877                         break;
2878                 case LE_LINK:
2879                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2880                         break;
2881                 default:
2882                         cnt = 0;
2883                         BT_ERR("Unknown link type");
2884                 }
2885
2886                 q = cnt / num;
2887                 *quote = q ? q : 1;
2888         } else
2889                 *quote = 0;
2890
2891         BT_DBG("conn %p quote %d", conn, *quote);
2892         return conn;
2893 }
2894
2895 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2896 {
2897         struct hci_conn_hash *h = &hdev->conn_hash;
2898         struct hci_conn *c;
2899
2900         BT_ERR("%s link tx timeout", hdev->name);
2901
2902         rcu_read_lock();
2903
2904         /* Kill stalled connections */
2905         list_for_each_entry_rcu(c, &h->list, list) {
2906                 if (c->type == type && c->sent) {
2907                         BT_ERR("%s killing stalled connection %pMR",
2908                                hdev->name, &c->dst);
2909                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2910                 }
2911         }
2912
2913         rcu_read_unlock();
2914 }
2915
2916 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2917                                       int *quote)
2918 {
2919         struct hci_conn_hash *h = &hdev->conn_hash;
2920         struct hci_chan *chan = NULL;
2921         unsigned int num = 0, min = ~0, cur_prio = 0;
2922         struct hci_conn *conn;
2923         int cnt, q, conn_num = 0;
2924
2925         BT_DBG("%s", hdev->name);
2926
2927         rcu_read_lock();
2928
2929         list_for_each_entry_rcu(conn, &h->list, list) {
2930                 struct hci_chan *tmp;
2931
2932                 if (conn->type != type)
2933                         continue;
2934
2935                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2936                         continue;
2937
2938                 conn_num++;
2939
2940                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2941                         struct sk_buff *skb;
2942
2943                         if (skb_queue_empty(&tmp->data_q))
2944                                 continue;
2945
2946                         skb = skb_peek(&tmp->data_q);
2947                         if (skb->priority < cur_prio)
2948                                 continue;
2949
2950                         if (skb->priority > cur_prio) {
2951                                 num = 0;
2952                                 min = ~0;
2953                                 cur_prio = skb->priority;
2954                         }
2955
2956                         num++;
2957
2958                         if (conn->sent < min) {
2959                                 min  = conn->sent;
2960                                 chan = tmp;
2961                         }
2962                 }
2963
2964                 if (hci_conn_num(hdev, type) == conn_num)
2965                         break;
2966         }
2967
2968         rcu_read_unlock();
2969
2970         if (!chan)
2971                 return NULL;
2972
2973         switch (chan->conn->type) {
2974         case ACL_LINK:
2975                 cnt = hdev->acl_cnt;
2976                 break;
2977         case AMP_LINK:
2978                 cnt = hdev->block_cnt;
2979                 break;
2980         case SCO_LINK:
2981         case ESCO_LINK:
2982                 cnt = hdev->sco_cnt;
2983                 break;
2984         case LE_LINK:
2985                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2986                 break;
2987         default:
2988                 cnt = 0;
2989                 BT_ERR("Unknown link type");
2990         }
2991
2992         q = cnt / num;
2993         *quote = q ? q : 1;
2994         BT_DBG("chan %p quote %d", chan, *quote);
2995         return chan;
2996 }
2997
2998 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2999 {
3000         struct hci_conn_hash *h = &hdev->conn_hash;
3001         struct hci_conn *conn;
3002         int num = 0;
3003
3004         BT_DBG("%s", hdev->name);
3005
3006         rcu_read_lock();
3007
3008         list_for_each_entry_rcu(conn, &h->list, list) {
3009                 struct hci_chan *chan;
3010
3011                 if (conn->type != type)
3012                         continue;
3013
3014                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3015                         continue;
3016
3017                 num++;
3018
3019                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3020                         struct sk_buff *skb;
3021
3022                         if (chan->sent) {
3023                                 chan->sent = 0;
3024                                 continue;
3025                         }
3026
3027                         if (skb_queue_empty(&chan->data_q))
3028                                 continue;
3029
3030                         skb = skb_peek(&chan->data_q);
3031                         if (skb->priority >= HCI_PRIO_MAX - 1)
3032                                 continue;
3033
3034                         skb->priority = HCI_PRIO_MAX - 1;
3035
3036                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3037                                skb->priority);
3038                 }
3039
3040                 if (hci_conn_num(hdev, type) == num)
3041                         break;
3042         }
3043
3044         rcu_read_unlock();
3045
3046 }
3047
3048 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3049 {
3050         /* Calculate count of blocks used by this packet */
3051         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3052 }
3053
3054 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3055 {
3056         if (!test_bit(HCI_RAW, &hdev->flags)) {
3057                 /* ACL tx timeout must be longer than maximum
3058                  * link supervision timeout (40.9 seconds) */
3059                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3060                                        HCI_ACL_TX_TIMEOUT))
3061                         hci_link_tx_to(hdev, ACL_LINK);
3062         }
3063 }
3064
3065 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3066 {
3067         unsigned int cnt = hdev->acl_cnt;
3068         struct hci_chan *chan;
3069         struct sk_buff *skb;
3070         int quote;
3071
3072         __check_timeout(hdev, cnt);
3073
3074         while (hdev->acl_cnt &&
3075                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3076                 u32 priority = (skb_peek(&chan->data_q))->priority;
3077                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3078                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3079                                skb->len, skb->priority);
3080
3081                         /* Stop if priority has changed */
3082                         if (skb->priority < priority)
3083                                 break;
3084
3085                         skb = skb_dequeue(&chan->data_q);
3086
3087                         hci_conn_enter_active_mode(chan->conn,
3088                                                    bt_cb(skb)->force_active);
3089
3090                         hci_send_frame(skb);
3091                         hdev->acl_last_tx = jiffies;
3092
3093                         hdev->acl_cnt--;
3094                         chan->sent++;
3095                         chan->conn->sent++;
3096                 }
3097         }
3098
3099         if (cnt != hdev->acl_cnt)
3100                 hci_prio_recalculate(hdev, ACL_LINK);
3101 }
3102
3103 static void hci_sched_acl_blk(struct hci_dev *hdev)
3104 {
3105         unsigned int cnt = hdev->block_cnt;
3106         struct hci_chan *chan;
3107         struct sk_buff *skb;
3108         int quote;
3109         u8 type;
3110
3111         __check_timeout(hdev, cnt);
3112
3113         BT_DBG("%s", hdev->name);
3114
3115         if (hdev->dev_type == HCI_AMP)
3116                 type = AMP_LINK;
3117         else
3118                 type = ACL_LINK;
3119
3120         while (hdev->block_cnt > 0 &&
3121                (chan = hci_chan_sent(hdev, type, &quote))) {
3122                 u32 priority = (skb_peek(&chan->data_q))->priority;
3123                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3124                         int blocks;
3125
3126                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3127                                skb->len, skb->priority);
3128
3129                         /* Stop if priority has changed */
3130                         if (skb->priority < priority)
3131                                 break;
3132
3133                         skb = skb_dequeue(&chan->data_q);
3134
3135                         blocks = __get_blocks(hdev, skb);
3136                         if (blocks > hdev->block_cnt)
3137                                 return;
3138
3139                         hci_conn_enter_active_mode(chan->conn,
3140                                                    bt_cb(skb)->force_active);
3141
3142                         hci_send_frame(skb);
3143                         hdev->acl_last_tx = jiffies;
3144
3145                         hdev->block_cnt -= blocks;
3146                         quote -= blocks;
3147
3148                         chan->sent += blocks;
3149                         chan->conn->sent += blocks;
3150                 }
3151         }
3152
3153         if (cnt != hdev->block_cnt)
3154                 hci_prio_recalculate(hdev, type);
3155 }
3156
3157 static void hci_sched_acl(struct hci_dev *hdev)
3158 {
3159         BT_DBG("%s", hdev->name);
3160
3161         /* No ACL link over BR/EDR controller */
3162         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3163                 return;
3164
3165         /* No AMP link over AMP controller */
3166         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3167                 return;
3168
3169         switch (hdev->flow_ctl_mode) {
3170         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3171                 hci_sched_acl_pkt(hdev);
3172                 break;
3173
3174         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3175                 hci_sched_acl_blk(hdev);
3176                 break;
3177         }
3178 }
3179
3180 /* Schedule SCO */
3181 static void hci_sched_sco(struct hci_dev *hdev)
3182 {
3183         struct hci_conn *conn;
3184         struct sk_buff *skb;
3185         int quote;
3186
3187         BT_DBG("%s", hdev->name);
3188
3189         if (!hci_conn_num(hdev, SCO_LINK))
3190                 return;
3191
3192         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3193                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3194                         BT_DBG("skb %p len %d", skb, skb->len);
3195                         hci_send_frame(skb);
3196
3197                         conn->sent++;
3198                         if (conn->sent == ~0)
3199                                 conn->sent = 0;
3200                 }
3201         }
3202 }
3203
3204 static void hci_sched_esco(struct hci_dev *hdev)
3205 {
3206         struct hci_conn *conn;
3207         struct sk_buff *skb;
3208         int quote;
3209
3210         BT_DBG("%s", hdev->name);
3211
3212         if (!hci_conn_num(hdev, ESCO_LINK))
3213                 return;
3214
3215         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3216                                                      &quote))) {
3217                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3218                         BT_DBG("skb %p len %d", skb, skb->len);
3219                         hci_send_frame(skb);
3220
3221                         conn->sent++;
3222                         if (conn->sent == ~0)
3223                                 conn->sent = 0;
3224                 }
3225         }
3226 }
3227
3228 static void hci_sched_le(struct hci_dev *hdev)
3229 {
3230         struct hci_chan *chan;
3231         struct sk_buff *skb;
3232         int quote, cnt, tmp;
3233
3234         BT_DBG("%s", hdev->name);
3235
3236         if (!hci_conn_num(hdev, LE_LINK))
3237                 return;
3238
3239         if (!test_bit(HCI_RAW, &hdev->flags)) {
3240                 /* LE tx timeout must be longer than maximum
3241                  * link supervision timeout (40.9 seconds) */
3242                 if (!hdev->le_cnt && hdev->le_pkts &&
3243                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3244                         hci_link_tx_to(hdev, LE_LINK);
3245         }
3246
3247         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3248         tmp = cnt;
3249         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3250                 u32 priority = (skb_peek(&chan->data_q))->priority;
3251                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3252                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3253                                skb->len, skb->priority);
3254
3255                         /* Stop if priority has changed */
3256                         if (skb->priority < priority)
3257                                 break;
3258
3259                         skb = skb_dequeue(&chan->data_q);
3260
3261                         hci_send_frame(skb);
3262                         hdev->le_last_tx = jiffies;
3263
3264                         cnt--;
3265                         chan->sent++;
3266                         chan->conn->sent++;
3267                 }
3268         }
3269
3270         if (hdev->le_pkts)
3271                 hdev->le_cnt = cnt;
3272         else
3273                 hdev->acl_cnt = cnt;
3274
3275         if (cnt != tmp)
3276                 hci_prio_recalculate(hdev, LE_LINK);
3277 }
3278
3279 static void hci_tx_work(struct work_struct *work)
3280 {
3281         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3282         struct sk_buff *skb;
3283
3284         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3285                hdev->sco_cnt, hdev->le_cnt);
3286
3287         /* Schedule queues and send stuff to HCI driver */
3288
3289         hci_sched_acl(hdev);
3290
3291         hci_sched_sco(hdev);
3292
3293         hci_sched_esco(hdev);
3294
3295         hci_sched_le(hdev);
3296
3297         /* Send next queued raw (unknown type) packet */
3298         while ((skb = skb_dequeue(&hdev->raw_q)))
3299                 hci_send_frame(skb);
3300 }
3301
3302 /* ----- HCI RX task (incoming data processing) ----- */
3303
3304 /* ACL data packet */
3305 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3306 {
3307         struct hci_acl_hdr *hdr = (void *) skb->data;
3308         struct hci_conn *conn;
3309         __u16 handle, flags;
3310
3311         skb_pull(skb, HCI_ACL_HDR_SIZE);
3312
3313         handle = __le16_to_cpu(hdr->handle);
3314         flags  = hci_flags(handle);
3315         handle = hci_handle(handle);
3316
3317         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3318                handle, flags);
3319
3320         hdev->stat.acl_rx++;
3321
3322         hci_dev_lock(hdev);
3323         conn = hci_conn_hash_lookup_handle(hdev, handle);
3324         hci_dev_unlock(hdev);
3325
3326         if (conn) {
3327                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3328
3329                 /* Send to upper protocol */
3330                 l2cap_recv_acldata(conn, skb, flags);
3331                 return;
3332         } else {
3333                 BT_ERR("%s ACL packet for unknown connection handle %d",
3334                        hdev->name, handle);
3335         }
3336
3337         kfree_skb(skb);
3338 }
3339
3340 /* SCO data packet */
3341 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3342 {
3343         struct hci_sco_hdr *hdr = (void *) skb->data;
3344         struct hci_conn *conn;
3345         __u16 handle;
3346
3347         skb_pull(skb, HCI_SCO_HDR_SIZE);
3348
3349         handle = __le16_to_cpu(hdr->handle);
3350
3351         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3352
3353         hdev->stat.sco_rx++;
3354
3355         hci_dev_lock(hdev);
3356         conn = hci_conn_hash_lookup_handle(hdev, handle);
3357         hci_dev_unlock(hdev);
3358
3359         if (conn) {
3360                 /* Send to upper protocol */
3361                 sco_recv_scodata(conn, skb);
3362                 return;
3363         } else {
3364                 BT_ERR("%s SCO packet for unknown connection handle %d",
3365                        hdev->name, handle);
3366         }
3367
3368         kfree_skb(skb);
3369 }
3370
3371 static bool hci_req_is_complete(struct hci_dev *hdev)
3372 {
3373         struct sk_buff *skb;
3374
3375         skb = skb_peek(&hdev->cmd_q);
3376         if (!skb)
3377                 return true;
3378
3379         return bt_cb(skb)->req.start;
3380 }
3381
3382 static void hci_resend_last(struct hci_dev *hdev)
3383 {
3384         struct hci_command_hdr *sent;
3385         struct sk_buff *skb;
3386         u16 opcode;
3387
3388         if (!hdev->sent_cmd)
3389                 return;
3390
3391         sent = (void *) hdev->sent_cmd->data;
3392         opcode = __le16_to_cpu(sent->opcode);
3393         if (opcode == HCI_OP_RESET)
3394                 return;
3395
3396         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3397         if (!skb)
3398                 return;
3399
3400         skb_queue_head(&hdev->cmd_q, skb);
3401         queue_work(hdev->workqueue, &hdev->cmd_work);
3402 }
3403
3404 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3405 {
3406         hci_req_complete_t req_complete = NULL;
3407         struct sk_buff *skb;
3408         unsigned long flags;
3409
3410         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3411
3412         /* If the completed command doesn't match the last one that was
3413          * sent we need to do special handling of it.
3414          */
3415         if (!hci_sent_cmd_data(hdev, opcode)) {
3416                 /* Some CSR based controllers generate a spontaneous
3417                  * reset complete event during init and any pending
3418                  * command will never be completed. In such a case we
3419                  * need to resend whatever was the last sent
3420                  * command.
3421                  */
3422                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3423                         hci_resend_last(hdev);
3424
3425                 return;
3426         }
3427
3428         /* If the command succeeded and there's still more commands in
3429          * this request the request is not yet complete.
3430          */
3431         if (!status && !hci_req_is_complete(hdev))
3432                 return;
3433
3434         /* If this was the last command in a request the complete
3435          * callback would be found in hdev->sent_cmd instead of the
3436          * command queue (hdev->cmd_q).
3437          */
3438         if (hdev->sent_cmd) {
3439                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3440                 if (req_complete)
3441                         goto call_complete;
3442         }
3443
3444         /* Remove all pending commands belonging to this request */
3445         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3446         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3447                 if (bt_cb(skb)->req.start) {
3448                         __skb_queue_head(&hdev->cmd_q, skb);
3449                         break;
3450                 }
3451
3452                 req_complete = bt_cb(skb)->req.complete;
3453                 kfree_skb(skb);
3454         }
3455         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3456
3457 call_complete:
3458         if (req_complete)
3459                 req_complete(hdev, status);
3460 }
3461
3462 static void hci_rx_work(struct work_struct *work)
3463 {
3464         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3465         struct sk_buff *skb;
3466
3467         BT_DBG("%s", hdev->name);
3468
3469         while ((skb = skb_dequeue(&hdev->rx_q))) {
3470                 /* Send copy to monitor */
3471                 hci_send_to_monitor(hdev, skb);
3472
3473                 if (atomic_read(&hdev->promisc)) {
3474                         /* Send copy to the sockets */
3475                         hci_send_to_sock(hdev, skb);
3476                 }
3477
3478                 if (test_bit(HCI_RAW, &hdev->flags)) {
3479                         kfree_skb(skb);
3480                         continue;
3481                 }
3482
3483                 if (test_bit(HCI_INIT, &hdev->flags)) {
3484                         /* Don't process data packets in this states. */
3485                         switch (bt_cb(skb)->pkt_type) {
3486                         case HCI_ACLDATA_PKT:
3487                         case HCI_SCODATA_PKT:
3488                                 kfree_skb(skb);
3489                                 continue;
3490                         }
3491                 }
3492
3493                 /* Process frame */
3494                 switch (bt_cb(skb)->pkt_type) {
3495                 case HCI_EVENT_PKT:
3496                         BT_DBG("%s Event packet", hdev->name);
3497                         hci_event_packet(hdev, skb);
3498                         break;
3499
3500                 case HCI_ACLDATA_PKT:
3501                         BT_DBG("%s ACL data packet", hdev->name);
3502                         hci_acldata_packet(hdev, skb);
3503                         break;
3504
3505                 case HCI_SCODATA_PKT:
3506                         BT_DBG("%s SCO data packet", hdev->name);
3507                         hci_scodata_packet(hdev, skb);
3508                         break;
3509
3510                 default:
3511                         kfree_skb(skb);
3512                         break;
3513                 }
3514         }
3515 }
3516
3517 static void hci_cmd_work(struct work_struct *work)
3518 {
3519         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3520         struct sk_buff *skb;
3521
3522         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3523                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3524
3525         /* Send queued commands */
3526         if (atomic_read(&hdev->cmd_cnt)) {
3527                 skb = skb_dequeue(&hdev->cmd_q);
3528                 if (!skb)
3529                         return;
3530
3531                 kfree_skb(hdev->sent_cmd);
3532
3533                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3534                 if (hdev->sent_cmd) {
3535                         atomic_dec(&hdev->cmd_cnt);
3536                         hci_send_frame(skb);
3537                         if (test_bit(HCI_RESET, &hdev->flags))
3538                                 del_timer(&hdev->cmd_timer);
3539                         else
3540                                 mod_timer(&hdev->cmd_timer,
3541                                           jiffies + HCI_CMD_TIMEOUT);
3542                 } else {
3543                         skb_queue_head(&hdev->cmd_q, skb);
3544                         queue_work(hdev->workqueue, &hdev->cmd_work);
3545                 }
3546         }
3547 }
3548
3549 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3550 {
3551         /* General inquiry access code (GIAC) */
3552         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3553         struct hci_cp_inquiry cp;
3554
3555         BT_DBG("%s", hdev->name);
3556
3557         if (test_bit(HCI_INQUIRY, &hdev->flags))
3558                 return -EINPROGRESS;
3559
3560         inquiry_cache_flush(hdev);
3561
3562         memset(&cp, 0, sizeof(cp));
3563         memcpy(&cp.lap, lap, sizeof(cp.lap));
3564         cp.length  = length;
3565
3566         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3567 }
3568
3569 int hci_cancel_inquiry(struct hci_dev *hdev)
3570 {
3571         BT_DBG("%s", hdev->name);
3572
3573         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3574                 return -EALREADY;
3575
3576         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3577 }
3578
3579 u8 bdaddr_to_le(u8 bdaddr_type)
3580 {
3581         switch (bdaddr_type) {
3582         case BDADDR_LE_PUBLIC:
3583                 return ADDR_LE_DEV_PUBLIC;
3584
3585         default:
3586                 /* Fallback to LE Random address type */
3587                 return ADDR_LE_DEV_RANDOM;
3588         }
3589 }