]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         }
458
459         if (lmp_inq_rssi_capable(hdev))
460                 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462         if (lmp_sniffsubr_capable(hdev))
463                 events[5] |= 0x20; /* Sniff Subrating */
464
465         if (lmp_pause_enc_capable(hdev))
466                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468         if (lmp_ext_inq_capable(hdev))
469                 events[5] |= 0x40; /* Extended Inquiry Result */
470
471         if (lmp_no_flush_capable(hdev))
472                 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474         if (lmp_lsto_capable(hdev))
475                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477         if (lmp_ssp_capable(hdev)) {
478                 events[6] |= 0x01;      /* IO Capability Request */
479                 events[6] |= 0x02;      /* IO Capability Response */
480                 events[6] |= 0x04;      /* User Confirmation Request */
481                 events[6] |= 0x08;      /* User Passkey Request */
482                 events[6] |= 0x10;      /* Remote OOB Data Request */
483                 events[6] |= 0x20;      /* Simple Pairing Complete */
484                 events[7] |= 0x04;      /* User Passkey Notification */
485                 events[7] |= 0x08;      /* Keypress Notification */
486                 events[7] |= 0x10;      /* Remote Host Supported
487                                          * Features Notification
488                                          */
489         }
490
491         if (lmp_le_capable(hdev))
492                 events[7] |= 0x20;      /* LE Meta-Event */
493
494         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495
496         if (lmp_le_capable(hdev)) {
497                 memset(events, 0, sizeof(events));
498                 events[0] = 0x1f;
499                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500                             sizeof(events), events);
501         }
502 }
503
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
505 {
506         struct hci_dev *hdev = req->hdev;
507
508         if (lmp_bredr_capable(hdev))
509                 bredr_setup(req);
510
511         if (lmp_le_capable(hdev))
512                 le_setup(req);
513
514         hci_setup_event_mask(req);
515
516         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
517                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
518
519         if (lmp_ssp_capable(hdev)) {
520                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521                         u8 mode = 0x01;
522                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523                                     sizeof(mode), &mode);
524                 } else {
525                         struct hci_cp_write_eir cp;
526
527                         memset(hdev->eir, 0, sizeof(hdev->eir));
528                         memset(&cp, 0, sizeof(cp));
529
530                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
531                 }
532         }
533
534         if (lmp_inq_rssi_capable(hdev))
535                 hci_setup_inquiry_mode(req);
536
537         if (lmp_inq_tx_pwr_capable(hdev))
538                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
539
540         if (lmp_ext_feat_capable(hdev)) {
541                 struct hci_cp_read_local_ext_features cp;
542
543                 cp.page = 0x01;
544                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545                             sizeof(cp), &cp);
546         }
547
548         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549                 u8 enable = 1;
550                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551                             &enable);
552         }
553 }
554
555 static void hci_setup_link_policy(struct hci_request *req)
556 {
557         struct hci_dev *hdev = req->hdev;
558         struct hci_cp_write_def_link_policy cp;
559         u16 link_policy = 0;
560
561         if (lmp_rswitch_capable(hdev))
562                 link_policy |= HCI_LP_RSWITCH;
563         if (lmp_hold_capable(hdev))
564                 link_policy |= HCI_LP_HOLD;
565         if (lmp_sniff_capable(hdev))
566                 link_policy |= HCI_LP_SNIFF;
567         if (lmp_park_capable(hdev))
568                 link_policy |= HCI_LP_PARK;
569
570         cp.policy = cpu_to_le16(link_policy);
571         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
572 }
573
574 static void hci_set_le_support(struct hci_request *req)
575 {
576         struct hci_dev *hdev = req->hdev;
577         struct hci_cp_write_le_host_supported cp;
578
579         /* LE-only devices do not support explicit enablement */
580         if (!lmp_bredr_capable(hdev))
581                 return;
582
583         memset(&cp, 0, sizeof(cp));
584
585         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586                 cp.le = 0x01;
587                 cp.simul = lmp_le_br_capable(hdev);
588         }
589
590         if (cp.le != lmp_host_le_capable(hdev))
591                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592                             &cp);
593 }
594
595 static void hci_init3_req(struct hci_request *req, unsigned long opt)
596 {
597         struct hci_dev *hdev = req->hdev;
598         u8 p;
599
600         /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601         if (hdev->commands[6] & 0x80) {
602                 struct hci_cp_delete_stored_link_key cp;
603
604                 bacpy(&cp.bdaddr, BDADDR_ANY);
605                 cp.delete_all = 0x01;
606                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607                             sizeof(cp), &cp);
608         }
609
610         if (hdev->commands[5] & 0x10)
611                 hci_setup_link_policy(req);
612
613         if (lmp_le_capable(hdev)) {
614                 hci_set_le_support(req);
615                 hci_update_ad(req);
616         }
617
618         /* Read features beyond page 1 if available */
619         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620                 struct hci_cp_read_local_ext_features cp;
621
622                 cp.page = p;
623                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624                             sizeof(cp), &cp);
625         }
626 }
627
628 static int __hci_init(struct hci_dev *hdev)
629 {
630         int err;
631
632         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633         if (err < 0)
634                 return err;
635
636         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637          * BR/EDR/LE type controllers. AMP controllers only need the
638          * first stage init.
639          */
640         if (hdev->dev_type != HCI_BREDR)
641                 return 0;
642
643         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644         if (err < 0)
645                 return err;
646
647         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
648 }
649
650 static void hci_scan_req(struct hci_request *req, unsigned long opt)
651 {
652         __u8 scan = opt;
653
654         BT_DBG("%s %x", req->hdev->name, scan);
655
656         /* Inquiry and Page scans */
657         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
658 }
659
660 static void hci_auth_req(struct hci_request *req, unsigned long opt)
661 {
662         __u8 auth = opt;
663
664         BT_DBG("%s %x", req->hdev->name, auth);
665
666         /* Authentication */
667         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
668 }
669
670 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
671 {
672         __u8 encrypt = opt;
673
674         BT_DBG("%s %x", req->hdev->name, encrypt);
675
676         /* Encryption */
677         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
678 }
679
680 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
681 {
682         __le16 policy = cpu_to_le16(opt);
683
684         BT_DBG("%s %x", req->hdev->name, policy);
685
686         /* Default link policy */
687         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
688 }
689
690 /* Get HCI device by index.
691  * Device is held on return. */
692 struct hci_dev *hci_dev_get(int index)
693 {
694         struct hci_dev *hdev = NULL, *d;
695
696         BT_DBG("%d", index);
697
698         if (index < 0)
699                 return NULL;
700
701         read_lock(&hci_dev_list_lock);
702         list_for_each_entry(d, &hci_dev_list, list) {
703                 if (d->id == index) {
704                         hdev = hci_dev_hold(d);
705                         break;
706                 }
707         }
708         read_unlock(&hci_dev_list_lock);
709         return hdev;
710 }
711
712 /* ---- Inquiry support ---- */
713
714 bool hci_discovery_active(struct hci_dev *hdev)
715 {
716         struct discovery_state *discov = &hdev->discovery;
717
718         switch (discov->state) {
719         case DISCOVERY_FINDING:
720         case DISCOVERY_RESOLVING:
721                 return true;
722
723         default:
724                 return false;
725         }
726 }
727
728 void hci_discovery_set_state(struct hci_dev *hdev, int state)
729 {
730         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
731
732         if (hdev->discovery.state == state)
733                 return;
734
735         switch (state) {
736         case DISCOVERY_STOPPED:
737                 if (hdev->discovery.state != DISCOVERY_STARTING)
738                         mgmt_discovering(hdev, 0);
739                 break;
740         case DISCOVERY_STARTING:
741                 break;
742         case DISCOVERY_FINDING:
743                 mgmt_discovering(hdev, 1);
744                 break;
745         case DISCOVERY_RESOLVING:
746                 break;
747         case DISCOVERY_STOPPING:
748                 break;
749         }
750
751         hdev->discovery.state = state;
752 }
753
754 static void inquiry_cache_flush(struct hci_dev *hdev)
755 {
756         struct discovery_state *cache = &hdev->discovery;
757         struct inquiry_entry *p, *n;
758
759         list_for_each_entry_safe(p, n, &cache->all, all) {
760                 list_del(&p->all);
761                 kfree(p);
762         }
763
764         INIT_LIST_HEAD(&cache->unknown);
765         INIT_LIST_HEAD(&cache->resolve);
766 }
767
768 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769                                                bdaddr_t *bdaddr)
770 {
771         struct discovery_state *cache = &hdev->discovery;
772         struct inquiry_entry *e;
773
774         BT_DBG("cache %p, %pMR", cache, bdaddr);
775
776         list_for_each_entry(e, &cache->all, all) {
777                 if (!bacmp(&e->data.bdaddr, bdaddr))
778                         return e;
779         }
780
781         return NULL;
782 }
783
784 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
785                                                        bdaddr_t *bdaddr)
786 {
787         struct discovery_state *cache = &hdev->discovery;
788         struct inquiry_entry *e;
789
790         BT_DBG("cache %p, %pMR", cache, bdaddr);
791
792         list_for_each_entry(e, &cache->unknown, list) {
793                 if (!bacmp(&e->data.bdaddr, bdaddr))
794                         return e;
795         }
796
797         return NULL;
798 }
799
800 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
801                                                        bdaddr_t *bdaddr,
802                                                        int state)
803 {
804         struct discovery_state *cache = &hdev->discovery;
805         struct inquiry_entry *e;
806
807         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
808
809         list_for_each_entry(e, &cache->resolve, list) {
810                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811                         return e;
812                 if (!bacmp(&e->data.bdaddr, bdaddr))
813                         return e;
814         }
815
816         return NULL;
817 }
818
819 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
820                                       struct inquiry_entry *ie)
821 {
822         struct discovery_state *cache = &hdev->discovery;
823         struct list_head *pos = &cache->resolve;
824         struct inquiry_entry *p;
825
826         list_del(&ie->list);
827
828         list_for_each_entry(p, &cache->resolve, list) {
829                 if (p->name_state != NAME_PENDING &&
830                     abs(p->data.rssi) >= abs(ie->data.rssi))
831                         break;
832                 pos = &p->list;
833         }
834
835         list_add(&ie->list, pos);
836 }
837
838 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
839                               bool name_known, bool *ssp)
840 {
841         struct discovery_state *cache = &hdev->discovery;
842         struct inquiry_entry *ie;
843
844         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
845
846         hci_remove_remote_oob_data(hdev, &data->bdaddr);
847
848         if (ssp)
849                 *ssp = data->ssp_mode;
850
851         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
852         if (ie) {
853                 if (ie->data.ssp_mode && ssp)
854                         *ssp = true;
855
856                 if (ie->name_state == NAME_NEEDED &&
857                     data->rssi != ie->data.rssi) {
858                         ie->data.rssi = data->rssi;
859                         hci_inquiry_cache_update_resolve(hdev, ie);
860                 }
861
862                 goto update;
863         }
864
865         /* Entry not in the cache. Add new one. */
866         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867         if (!ie)
868                 return false;
869
870         list_add(&ie->all, &cache->all);
871
872         if (name_known) {
873                 ie->name_state = NAME_KNOWN;
874         } else {
875                 ie->name_state = NAME_NOT_KNOWN;
876                 list_add(&ie->list, &cache->unknown);
877         }
878
879 update:
880         if (name_known && ie->name_state != NAME_KNOWN &&
881             ie->name_state != NAME_PENDING) {
882                 ie->name_state = NAME_KNOWN;
883                 list_del(&ie->list);
884         }
885
886         memcpy(&ie->data, data, sizeof(*data));
887         ie->timestamp = jiffies;
888         cache->timestamp = jiffies;
889
890         if (ie->name_state == NAME_NOT_KNOWN)
891                 return false;
892
893         return true;
894 }
895
896 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
897 {
898         struct discovery_state *cache = &hdev->discovery;
899         struct inquiry_info *info = (struct inquiry_info *) buf;
900         struct inquiry_entry *e;
901         int copied = 0;
902
903         list_for_each_entry(e, &cache->all, all) {
904                 struct inquiry_data *data = &e->data;
905
906                 if (copied >= num)
907                         break;
908
909                 bacpy(&info->bdaddr, &data->bdaddr);
910                 info->pscan_rep_mode    = data->pscan_rep_mode;
911                 info->pscan_period_mode = data->pscan_period_mode;
912                 info->pscan_mode        = data->pscan_mode;
913                 memcpy(info->dev_class, data->dev_class, 3);
914                 info->clock_offset      = data->clock_offset;
915
916                 info++;
917                 copied++;
918         }
919
920         BT_DBG("cache %p, copied %d", cache, copied);
921         return copied;
922 }
923
924 static void hci_inq_req(struct hci_request *req, unsigned long opt)
925 {
926         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
927         struct hci_dev *hdev = req->hdev;
928         struct hci_cp_inquiry cp;
929
930         BT_DBG("%s", hdev->name);
931
932         if (test_bit(HCI_INQUIRY, &hdev->flags))
933                 return;
934
935         /* Start Inquiry */
936         memcpy(&cp.lap, &ir->lap, 3);
937         cp.length  = ir->length;
938         cp.num_rsp = ir->num_rsp;
939         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
940 }
941
942 static int wait_inquiry(void *word)
943 {
944         schedule();
945         return signal_pending(current);
946 }
947
948 int hci_inquiry(void __user *arg)
949 {
950         __u8 __user *ptr = arg;
951         struct hci_inquiry_req ir;
952         struct hci_dev *hdev;
953         int err = 0, do_inquiry = 0, max_rsp;
954         long timeo;
955         __u8 *buf;
956
957         if (copy_from_user(&ir, ptr, sizeof(ir)))
958                 return -EFAULT;
959
960         hdev = hci_dev_get(ir.dev_id);
961         if (!hdev)
962                 return -ENODEV;
963
964         hci_dev_lock(hdev);
965         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
966             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
967                 inquiry_cache_flush(hdev);
968                 do_inquiry = 1;
969         }
970         hci_dev_unlock(hdev);
971
972         timeo = ir.length * msecs_to_jiffies(2000);
973
974         if (do_inquiry) {
975                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976                                    timeo);
977                 if (err < 0)
978                         goto done;
979
980                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981                  * cleared). If it is interrupted by a signal, return -EINTR.
982                  */
983                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984                                 TASK_INTERRUPTIBLE))
985                         return -EINTR;
986         }
987
988         /* for unlimited number of responses we will use buffer with
989          * 255 entries
990          */
991         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
992
993         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994          * copy it to the user space.
995          */
996         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
997         if (!buf) {
998                 err = -ENOMEM;
999                 goto done;
1000         }
1001
1002         hci_dev_lock(hdev);
1003         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1004         hci_dev_unlock(hdev);
1005
1006         BT_DBG("num_rsp %d", ir.num_rsp);
1007
1008         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009                 ptr += sizeof(ir);
1010                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1011                                  ir.num_rsp))
1012                         err = -EFAULT;
1013         } else
1014                 err = -EFAULT;
1015
1016         kfree(buf);
1017
1018 done:
1019         hci_dev_put(hdev);
1020         return err;
1021 }
1022
1023 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1024 {
1025         u8 ad_len = 0, flags = 0;
1026         size_t name_len;
1027
1028         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029                 flags |= LE_AD_GENERAL;
1030
1031         if (!lmp_bredr_capable(hdev))
1032                 flags |= LE_AD_NO_BREDR;
1033
1034         if (lmp_le_br_capable(hdev))
1035                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1036
1037         if (lmp_host_le_br_capable(hdev))
1038                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1039
1040         if (flags) {
1041                 BT_DBG("adv flags 0x%02x", flags);
1042
1043                 ptr[0] = 2;
1044                 ptr[1] = EIR_FLAGS;
1045                 ptr[2] = flags;
1046
1047                 ad_len += 3;
1048                 ptr += 3;
1049         }
1050
1051         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052                 ptr[0] = 2;
1053                 ptr[1] = EIR_TX_POWER;
1054                 ptr[2] = (u8) hdev->adv_tx_power;
1055
1056                 ad_len += 3;
1057                 ptr += 3;
1058         }
1059
1060         name_len = strlen(hdev->dev_name);
1061         if (name_len > 0) {
1062                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1063
1064                 if (name_len > max_len) {
1065                         name_len = max_len;
1066                         ptr[1] = EIR_NAME_SHORT;
1067                 } else
1068                         ptr[1] = EIR_NAME_COMPLETE;
1069
1070                 ptr[0] = name_len + 1;
1071
1072                 memcpy(ptr + 2, hdev->dev_name, name_len);
1073
1074                 ad_len += (name_len + 2);
1075                 ptr += (name_len + 2);
1076         }
1077
1078         return ad_len;
1079 }
1080
1081 void hci_update_ad(struct hci_request *req)
1082 {
1083         struct hci_dev *hdev = req->hdev;
1084         struct hci_cp_le_set_adv_data cp;
1085         u8 len;
1086
1087         if (!lmp_le_capable(hdev))
1088                 return;
1089
1090         memset(&cp, 0, sizeof(cp));
1091
1092         len = create_ad(hdev, cp.data);
1093
1094         if (hdev->adv_data_len == len &&
1095             memcmp(cp.data, hdev->adv_data, len) == 0)
1096                 return;
1097
1098         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099         hdev->adv_data_len = len;
1100
1101         cp.length = len;
1102
1103         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1104 }
1105
1106 /* ---- HCI ioctl helpers ---- */
1107
1108 int hci_dev_open(__u16 dev)
1109 {
1110         struct hci_dev *hdev;
1111         int ret = 0;
1112
1113         hdev = hci_dev_get(dev);
1114         if (!hdev)
1115                 return -ENODEV;
1116
1117         BT_DBG("%s %p", hdev->name, hdev);
1118
1119         hci_req_lock(hdev);
1120
1121         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122                 ret = -ENODEV;
1123                 goto done;
1124         }
1125
1126         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1127                 ret = -ERFKILL;
1128                 goto done;
1129         }
1130
1131         if (test_bit(HCI_UP, &hdev->flags)) {
1132                 ret = -EALREADY;
1133                 goto done;
1134         }
1135
1136         if (hdev->open(hdev)) {
1137                 ret = -EIO;
1138                 goto done;
1139         }
1140
1141         atomic_set(&hdev->cmd_cnt, 1);
1142         set_bit(HCI_INIT, &hdev->flags);
1143
1144         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145                 ret = hdev->setup(hdev);
1146
1147         if (!ret) {
1148                 /* Treat all non BR/EDR controllers as raw devices if
1149                  * enable_hs is not set.
1150                  */
1151                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152                         set_bit(HCI_RAW, &hdev->flags);
1153
1154                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155                         set_bit(HCI_RAW, &hdev->flags);
1156
1157                 if (!test_bit(HCI_RAW, &hdev->flags))
1158                         ret = __hci_init(hdev);
1159         }
1160
1161         clear_bit(HCI_INIT, &hdev->flags);
1162
1163         if (!ret) {
1164                 hci_dev_hold(hdev);
1165                 set_bit(HCI_UP, &hdev->flags);
1166                 hci_notify(hdev, HCI_DEV_UP);
1167                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168                     mgmt_valid_hdev(hdev)) {
1169                         hci_dev_lock(hdev);
1170                         mgmt_powered(hdev, 1);
1171                         hci_dev_unlock(hdev);
1172                 }
1173         } else {
1174                 /* Init failed, cleanup */
1175                 flush_work(&hdev->tx_work);
1176                 flush_work(&hdev->cmd_work);
1177                 flush_work(&hdev->rx_work);
1178
1179                 skb_queue_purge(&hdev->cmd_q);
1180                 skb_queue_purge(&hdev->rx_q);
1181
1182                 if (hdev->flush)
1183                         hdev->flush(hdev);
1184
1185                 if (hdev->sent_cmd) {
1186                         kfree_skb(hdev->sent_cmd);
1187                         hdev->sent_cmd = NULL;
1188                 }
1189
1190                 hdev->close(hdev);
1191                 hdev->flags = 0;
1192         }
1193
1194 done:
1195         hci_req_unlock(hdev);
1196         hci_dev_put(hdev);
1197         return ret;
1198 }
1199
1200 static int hci_dev_do_close(struct hci_dev *hdev)
1201 {
1202         BT_DBG("%s %p", hdev->name, hdev);
1203
1204         cancel_work_sync(&hdev->le_scan);
1205
1206         cancel_delayed_work(&hdev->power_off);
1207
1208         hci_req_cancel(hdev, ENODEV);
1209         hci_req_lock(hdev);
1210
1211         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1212                 del_timer_sync(&hdev->cmd_timer);
1213                 hci_req_unlock(hdev);
1214                 return 0;
1215         }
1216
1217         /* Flush RX and TX works */
1218         flush_work(&hdev->tx_work);
1219         flush_work(&hdev->rx_work);
1220
1221         if (hdev->discov_timeout > 0) {
1222                 cancel_delayed_work(&hdev->discov_off);
1223                 hdev->discov_timeout = 0;
1224                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1225         }
1226
1227         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1228                 cancel_delayed_work(&hdev->service_cache);
1229
1230         cancel_delayed_work_sync(&hdev->le_scan_disable);
1231
1232         hci_dev_lock(hdev);
1233         inquiry_cache_flush(hdev);
1234         hci_conn_hash_flush(hdev);
1235         hci_dev_unlock(hdev);
1236
1237         hci_notify(hdev, HCI_DEV_DOWN);
1238
1239         if (hdev->flush)
1240                 hdev->flush(hdev);
1241
1242         /* Reset device */
1243         skb_queue_purge(&hdev->cmd_q);
1244         atomic_set(&hdev->cmd_cnt, 1);
1245         if (!test_bit(HCI_RAW, &hdev->flags) &&
1246             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1247                 set_bit(HCI_INIT, &hdev->flags);
1248                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1249                 clear_bit(HCI_INIT, &hdev->flags);
1250         }
1251
1252         /* flush cmd  work */
1253         flush_work(&hdev->cmd_work);
1254
1255         /* Drop queues */
1256         skb_queue_purge(&hdev->rx_q);
1257         skb_queue_purge(&hdev->cmd_q);
1258         skb_queue_purge(&hdev->raw_q);
1259
1260         /* Drop last sent command */
1261         if (hdev->sent_cmd) {
1262                 del_timer_sync(&hdev->cmd_timer);
1263                 kfree_skb(hdev->sent_cmd);
1264                 hdev->sent_cmd = NULL;
1265         }
1266
1267         kfree_skb(hdev->recv_evt);
1268         hdev->recv_evt = NULL;
1269
1270         /* After this point our queues are empty
1271          * and no tasks are scheduled. */
1272         hdev->close(hdev);
1273
1274         /* Clear flags */
1275         hdev->flags = 0;
1276         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1277
1278         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1279             mgmt_valid_hdev(hdev)) {
1280                 hci_dev_lock(hdev);
1281                 mgmt_powered(hdev, 0);
1282                 hci_dev_unlock(hdev);
1283         }
1284
1285         /* Controller radio is available but is currently powered down */
1286         hdev->amp_status = 0;
1287
1288         memset(hdev->eir, 0, sizeof(hdev->eir));
1289         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1290
1291         hci_req_unlock(hdev);
1292
1293         hci_dev_put(hdev);
1294         return 0;
1295 }
1296
1297 int hci_dev_close(__u16 dev)
1298 {
1299         struct hci_dev *hdev;
1300         int err;
1301
1302         hdev = hci_dev_get(dev);
1303         if (!hdev)
1304                 return -ENODEV;
1305
1306         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1307                 cancel_delayed_work(&hdev->power_off);
1308
1309         err = hci_dev_do_close(hdev);
1310
1311         hci_dev_put(hdev);
1312         return err;
1313 }
1314
1315 int hci_dev_reset(__u16 dev)
1316 {
1317         struct hci_dev *hdev;
1318         int ret = 0;
1319
1320         hdev = hci_dev_get(dev);
1321         if (!hdev)
1322                 return -ENODEV;
1323
1324         hci_req_lock(hdev);
1325
1326         if (!test_bit(HCI_UP, &hdev->flags))
1327                 goto done;
1328
1329         /* Drop queues */
1330         skb_queue_purge(&hdev->rx_q);
1331         skb_queue_purge(&hdev->cmd_q);
1332
1333         hci_dev_lock(hdev);
1334         inquiry_cache_flush(hdev);
1335         hci_conn_hash_flush(hdev);
1336         hci_dev_unlock(hdev);
1337
1338         if (hdev->flush)
1339                 hdev->flush(hdev);
1340
1341         atomic_set(&hdev->cmd_cnt, 1);
1342         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1343
1344         if (!test_bit(HCI_RAW, &hdev->flags))
1345                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1346
1347 done:
1348         hci_req_unlock(hdev);
1349         hci_dev_put(hdev);
1350         return ret;
1351 }
1352
1353 int hci_dev_reset_stat(__u16 dev)
1354 {
1355         struct hci_dev *hdev;
1356         int ret = 0;
1357
1358         hdev = hci_dev_get(dev);
1359         if (!hdev)
1360                 return -ENODEV;
1361
1362         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1363
1364         hci_dev_put(hdev);
1365
1366         return ret;
1367 }
1368
1369 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1370 {
1371         struct hci_dev *hdev;
1372         struct hci_dev_req dr;
1373         int err = 0;
1374
1375         if (copy_from_user(&dr, arg, sizeof(dr)))
1376                 return -EFAULT;
1377
1378         hdev = hci_dev_get(dr.dev_id);
1379         if (!hdev)
1380                 return -ENODEV;
1381
1382         switch (cmd) {
1383         case HCISETAUTH:
1384                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1385                                    HCI_INIT_TIMEOUT);
1386                 break;
1387
1388         case HCISETENCRYPT:
1389                 if (!lmp_encrypt_capable(hdev)) {
1390                         err = -EOPNOTSUPP;
1391                         break;
1392                 }
1393
1394                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1395                         /* Auth must be enabled first */
1396                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397                                            HCI_INIT_TIMEOUT);
1398                         if (err)
1399                                 break;
1400                 }
1401
1402                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1403                                    HCI_INIT_TIMEOUT);
1404                 break;
1405
1406         case HCISETSCAN:
1407                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1408                                    HCI_INIT_TIMEOUT);
1409                 break;
1410
1411         case HCISETLINKPOL:
1412                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1413                                    HCI_INIT_TIMEOUT);
1414                 break;
1415
1416         case HCISETLINKMODE:
1417                 hdev->link_mode = ((__u16) dr.dev_opt) &
1418                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1419                 break;
1420
1421         case HCISETPTYPE:
1422                 hdev->pkt_type = (__u16) dr.dev_opt;
1423                 break;
1424
1425         case HCISETACLMTU:
1426                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1427                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1428                 break;
1429
1430         case HCISETSCOMTU:
1431                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1432                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1433                 break;
1434
1435         default:
1436                 err = -EINVAL;
1437                 break;
1438         }
1439
1440         hci_dev_put(hdev);
1441         return err;
1442 }
1443
1444 int hci_get_dev_list(void __user *arg)
1445 {
1446         struct hci_dev *hdev;
1447         struct hci_dev_list_req *dl;
1448         struct hci_dev_req *dr;
1449         int n = 0, size, err;
1450         __u16 dev_num;
1451
1452         if (get_user(dev_num, (__u16 __user *) arg))
1453                 return -EFAULT;
1454
1455         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1456                 return -EINVAL;
1457
1458         size = sizeof(*dl) + dev_num * sizeof(*dr);
1459
1460         dl = kzalloc(size, GFP_KERNEL);
1461         if (!dl)
1462                 return -ENOMEM;
1463
1464         dr = dl->dev_req;
1465
1466         read_lock(&hci_dev_list_lock);
1467         list_for_each_entry(hdev, &hci_dev_list, list) {
1468                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1469                         cancel_delayed_work(&hdev->power_off);
1470
1471                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1472                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1473
1474                 (dr + n)->dev_id  = hdev->id;
1475                 (dr + n)->dev_opt = hdev->flags;
1476
1477                 if (++n >= dev_num)
1478                         break;
1479         }
1480         read_unlock(&hci_dev_list_lock);
1481
1482         dl->dev_num = n;
1483         size = sizeof(*dl) + n * sizeof(*dr);
1484
1485         err = copy_to_user(arg, dl, size);
1486         kfree(dl);
1487
1488         return err ? -EFAULT : 0;
1489 }
1490
1491 int hci_get_dev_info(void __user *arg)
1492 {
1493         struct hci_dev *hdev;
1494         struct hci_dev_info di;
1495         int err = 0;
1496
1497         if (copy_from_user(&di, arg, sizeof(di)))
1498                 return -EFAULT;
1499
1500         hdev = hci_dev_get(di.dev_id);
1501         if (!hdev)
1502                 return -ENODEV;
1503
1504         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1505                 cancel_delayed_work_sync(&hdev->power_off);
1506
1507         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1508                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1509
1510         strcpy(di.name, hdev->name);
1511         di.bdaddr   = hdev->bdaddr;
1512         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1513         di.flags    = hdev->flags;
1514         di.pkt_type = hdev->pkt_type;
1515         if (lmp_bredr_capable(hdev)) {
1516                 di.acl_mtu  = hdev->acl_mtu;
1517                 di.acl_pkts = hdev->acl_pkts;
1518                 di.sco_mtu  = hdev->sco_mtu;
1519                 di.sco_pkts = hdev->sco_pkts;
1520         } else {
1521                 di.acl_mtu  = hdev->le_mtu;
1522                 di.acl_pkts = hdev->le_pkts;
1523                 di.sco_mtu  = 0;
1524                 di.sco_pkts = 0;
1525         }
1526         di.link_policy = hdev->link_policy;
1527         di.link_mode   = hdev->link_mode;
1528
1529         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1530         memcpy(&di.features, &hdev->features, sizeof(di.features));
1531
1532         if (copy_to_user(arg, &di, sizeof(di)))
1533                 err = -EFAULT;
1534
1535         hci_dev_put(hdev);
1536
1537         return err;
1538 }
1539
1540 /* ---- Interface to HCI drivers ---- */
1541
1542 static int hci_rfkill_set_block(void *data, bool blocked)
1543 {
1544         struct hci_dev *hdev = data;
1545
1546         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1547
1548         if (!blocked)
1549                 return 0;
1550
1551         hci_dev_do_close(hdev);
1552
1553         return 0;
1554 }
1555
1556 static const struct rfkill_ops hci_rfkill_ops = {
1557         .set_block = hci_rfkill_set_block,
1558 };
1559
1560 static void hci_power_on(struct work_struct *work)
1561 {
1562         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1563         int err;
1564
1565         BT_DBG("%s", hdev->name);
1566
1567         err = hci_dev_open(hdev->id);
1568         if (err < 0) {
1569                 mgmt_set_powered_failed(hdev, err);
1570                 return;
1571         }
1572
1573         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1574                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1575                                    HCI_AUTO_OFF_TIMEOUT);
1576
1577         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1578                 mgmt_index_added(hdev);
1579 }
1580
1581 static void hci_power_off(struct work_struct *work)
1582 {
1583         struct hci_dev *hdev = container_of(work, struct hci_dev,
1584                                             power_off.work);
1585
1586         BT_DBG("%s", hdev->name);
1587
1588         hci_dev_do_close(hdev);
1589 }
1590
1591 static void hci_discov_off(struct work_struct *work)
1592 {
1593         struct hci_dev *hdev;
1594         u8 scan = SCAN_PAGE;
1595
1596         hdev = container_of(work, struct hci_dev, discov_off.work);
1597
1598         BT_DBG("%s", hdev->name);
1599
1600         hci_dev_lock(hdev);
1601
1602         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1603
1604         hdev->discov_timeout = 0;
1605
1606         hci_dev_unlock(hdev);
1607 }
1608
1609 int hci_uuids_clear(struct hci_dev *hdev)
1610 {
1611         struct bt_uuid *uuid, *tmp;
1612
1613         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1614                 list_del(&uuid->list);
1615                 kfree(uuid);
1616         }
1617
1618         return 0;
1619 }
1620
1621 int hci_link_keys_clear(struct hci_dev *hdev)
1622 {
1623         struct list_head *p, *n;
1624
1625         list_for_each_safe(p, n, &hdev->link_keys) {
1626                 struct link_key *key;
1627
1628                 key = list_entry(p, struct link_key, list);
1629
1630                 list_del(p);
1631                 kfree(key);
1632         }
1633
1634         return 0;
1635 }
1636
1637 int hci_smp_ltks_clear(struct hci_dev *hdev)
1638 {
1639         struct smp_ltk *k, *tmp;
1640
1641         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1642                 list_del(&k->list);
1643                 kfree(k);
1644         }
1645
1646         return 0;
1647 }
1648
1649 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1650 {
1651         struct link_key *k;
1652
1653         list_for_each_entry(k, &hdev->link_keys, list)
1654                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1655                         return k;
1656
1657         return NULL;
1658 }
1659
1660 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1661                                u8 key_type, u8 old_key_type)
1662 {
1663         /* Legacy key */
1664         if (key_type < 0x03)
1665                 return true;
1666
1667         /* Debug keys are insecure so don't store them persistently */
1668         if (key_type == HCI_LK_DEBUG_COMBINATION)
1669                 return false;
1670
1671         /* Changed combination key and there's no previous one */
1672         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1673                 return false;
1674
1675         /* Security mode 3 case */
1676         if (!conn)
1677                 return true;
1678
1679         /* Neither local nor remote side had no-bonding as requirement */
1680         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1681                 return true;
1682
1683         /* Local side had dedicated bonding as requirement */
1684         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1685                 return true;
1686
1687         /* Remote side had dedicated bonding as requirement */
1688         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1689                 return true;
1690
1691         /* If none of the above criteria match, then don't store the key
1692          * persistently */
1693         return false;
1694 }
1695
1696 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1697 {
1698         struct smp_ltk *k;
1699
1700         list_for_each_entry(k, &hdev->long_term_keys, list) {
1701                 if (k->ediv != ediv ||
1702                     memcmp(rand, k->rand, sizeof(k->rand)))
1703                         continue;
1704
1705                 return k;
1706         }
1707
1708         return NULL;
1709 }
1710
1711 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1712                                      u8 addr_type)
1713 {
1714         struct smp_ltk *k;
1715
1716         list_for_each_entry(k, &hdev->long_term_keys, list)
1717                 if (addr_type == k->bdaddr_type &&
1718                     bacmp(bdaddr, &k->bdaddr) == 0)
1719                         return k;
1720
1721         return NULL;
1722 }
1723
1724 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1725                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1726 {
1727         struct link_key *key, *old_key;
1728         u8 old_key_type;
1729         bool persistent;
1730
1731         old_key = hci_find_link_key(hdev, bdaddr);
1732         if (old_key) {
1733                 old_key_type = old_key->type;
1734                 key = old_key;
1735         } else {
1736                 old_key_type = conn ? conn->key_type : 0xff;
1737                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1738                 if (!key)
1739                         return -ENOMEM;
1740                 list_add(&key->list, &hdev->link_keys);
1741         }
1742
1743         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1744
1745         /* Some buggy controller combinations generate a changed
1746          * combination key for legacy pairing even when there's no
1747          * previous key */
1748         if (type == HCI_LK_CHANGED_COMBINATION &&
1749             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1750                 type = HCI_LK_COMBINATION;
1751                 if (conn)
1752                         conn->key_type = type;
1753         }
1754
1755         bacpy(&key->bdaddr, bdaddr);
1756         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1757         key->pin_len = pin_len;
1758
1759         if (type == HCI_LK_CHANGED_COMBINATION)
1760                 key->type = old_key_type;
1761         else
1762                 key->type = type;
1763
1764         if (!new_key)
1765                 return 0;
1766
1767         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1768
1769         mgmt_new_link_key(hdev, key, persistent);
1770
1771         if (conn)
1772                 conn->flush_key = !persistent;
1773
1774         return 0;
1775 }
1776
1777 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1778                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1779                 ediv, u8 rand[8])
1780 {
1781         struct smp_ltk *key, *old_key;
1782
1783         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1784                 return 0;
1785
1786         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1787         if (old_key)
1788                 key = old_key;
1789         else {
1790                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1791                 if (!key)
1792                         return -ENOMEM;
1793                 list_add(&key->list, &hdev->long_term_keys);
1794         }
1795
1796         bacpy(&key->bdaddr, bdaddr);
1797         key->bdaddr_type = addr_type;
1798         memcpy(key->val, tk, sizeof(key->val));
1799         key->authenticated = authenticated;
1800         key->ediv = ediv;
1801         key->enc_size = enc_size;
1802         key->type = type;
1803         memcpy(key->rand, rand, sizeof(key->rand));
1804
1805         if (!new_key)
1806                 return 0;
1807
1808         if (type & HCI_SMP_LTK)
1809                 mgmt_new_ltk(hdev, key, 1);
1810
1811         return 0;
1812 }
1813
1814 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1815 {
1816         struct link_key *key;
1817
1818         key = hci_find_link_key(hdev, bdaddr);
1819         if (!key)
1820                 return -ENOENT;
1821
1822         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1823
1824         list_del(&key->list);
1825         kfree(key);
1826
1827         return 0;
1828 }
1829
1830 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1831 {
1832         struct smp_ltk *k, *tmp;
1833
1834         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1835                 if (bacmp(bdaddr, &k->bdaddr))
1836                         continue;
1837
1838                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1839
1840                 list_del(&k->list);
1841                 kfree(k);
1842         }
1843
1844         return 0;
1845 }
1846
1847 /* HCI command timer function */
1848 static void hci_cmd_timeout(unsigned long arg)
1849 {
1850         struct hci_dev *hdev = (void *) arg;
1851
1852         if (hdev->sent_cmd) {
1853                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1854                 u16 opcode = __le16_to_cpu(sent->opcode);
1855
1856                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1857         } else {
1858                 BT_ERR("%s command tx timeout", hdev->name);
1859         }
1860
1861         atomic_set(&hdev->cmd_cnt, 1);
1862         queue_work(hdev->workqueue, &hdev->cmd_work);
1863 }
1864
1865 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1866                                           bdaddr_t *bdaddr)
1867 {
1868         struct oob_data *data;
1869
1870         list_for_each_entry(data, &hdev->remote_oob_data, list)
1871                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1872                         return data;
1873
1874         return NULL;
1875 }
1876
1877 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1878 {
1879         struct oob_data *data;
1880
1881         data = hci_find_remote_oob_data(hdev, bdaddr);
1882         if (!data)
1883                 return -ENOENT;
1884
1885         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1886
1887         list_del(&data->list);
1888         kfree(data);
1889
1890         return 0;
1891 }
1892
1893 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1894 {
1895         struct oob_data *data, *n;
1896
1897         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1898                 list_del(&data->list);
1899                 kfree(data);
1900         }
1901
1902         return 0;
1903 }
1904
1905 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1906                             u8 *randomizer)
1907 {
1908         struct oob_data *data;
1909
1910         data = hci_find_remote_oob_data(hdev, bdaddr);
1911
1912         if (!data) {
1913                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1914                 if (!data)
1915                         return -ENOMEM;
1916
1917                 bacpy(&data->bdaddr, bdaddr);
1918                 list_add(&data->list, &hdev->remote_oob_data);
1919         }
1920
1921         memcpy(data->hash, hash, sizeof(data->hash));
1922         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1923
1924         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1925
1926         return 0;
1927 }
1928
1929 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1930 {
1931         struct bdaddr_list *b;
1932
1933         list_for_each_entry(b, &hdev->blacklist, list)
1934                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1935                         return b;
1936
1937         return NULL;
1938 }
1939
1940 int hci_blacklist_clear(struct hci_dev *hdev)
1941 {
1942         struct list_head *p, *n;
1943
1944         list_for_each_safe(p, n, &hdev->blacklist) {
1945                 struct bdaddr_list *b;
1946
1947                 b = list_entry(p, struct bdaddr_list, list);
1948
1949                 list_del(p);
1950                 kfree(b);
1951         }
1952
1953         return 0;
1954 }
1955
1956 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1957 {
1958         struct bdaddr_list *entry;
1959
1960         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1961                 return -EBADF;
1962
1963         if (hci_blacklist_lookup(hdev, bdaddr))
1964                 return -EEXIST;
1965
1966         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1967         if (!entry)
1968                 return -ENOMEM;
1969
1970         bacpy(&entry->bdaddr, bdaddr);
1971
1972         list_add(&entry->list, &hdev->blacklist);
1973
1974         return mgmt_device_blocked(hdev, bdaddr, type);
1975 }
1976
1977 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1978 {
1979         struct bdaddr_list *entry;
1980
1981         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1982                 return hci_blacklist_clear(hdev);
1983
1984         entry = hci_blacklist_lookup(hdev, bdaddr);
1985         if (!entry)
1986                 return -ENOENT;
1987
1988         list_del(&entry->list);
1989         kfree(entry);
1990
1991         return mgmt_device_unblocked(hdev, bdaddr, type);
1992 }
1993
1994 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1995 {
1996         struct le_scan_params *param =  (struct le_scan_params *) opt;
1997         struct hci_cp_le_set_scan_param cp;
1998
1999         memset(&cp, 0, sizeof(cp));
2000         cp.type = param->type;
2001         cp.interval = cpu_to_le16(param->interval);
2002         cp.window = cpu_to_le16(param->window);
2003
2004         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2005 }
2006
2007 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2008 {
2009         struct hci_cp_le_set_scan_enable cp;
2010
2011         memset(&cp, 0, sizeof(cp));
2012         cp.enable = LE_SCAN_ENABLE;
2013         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2014
2015         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2016 }
2017
2018 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2019                           u16 window, int timeout)
2020 {
2021         long timeo = msecs_to_jiffies(3000);
2022         struct le_scan_params param;
2023         int err;
2024
2025         BT_DBG("%s", hdev->name);
2026
2027         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2028                 return -EINPROGRESS;
2029
2030         param.type = type;
2031         param.interval = interval;
2032         param.window = window;
2033
2034         hci_req_lock(hdev);
2035
2036         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2037                              timeo);
2038         if (!err)
2039                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2040
2041         hci_req_unlock(hdev);
2042
2043         if (err < 0)
2044                 return err;
2045
2046         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2047                            timeout);
2048
2049         return 0;
2050 }
2051
2052 int hci_cancel_le_scan(struct hci_dev *hdev)
2053 {
2054         BT_DBG("%s", hdev->name);
2055
2056         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2057                 return -EALREADY;
2058
2059         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2060                 struct hci_cp_le_set_scan_enable cp;
2061
2062                 /* Send HCI command to disable LE Scan */
2063                 memset(&cp, 0, sizeof(cp));
2064                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2065         }
2066
2067         return 0;
2068 }
2069
2070 static void le_scan_disable_work(struct work_struct *work)
2071 {
2072         struct hci_dev *hdev = container_of(work, struct hci_dev,
2073                                             le_scan_disable.work);
2074         struct hci_cp_le_set_scan_enable cp;
2075
2076         BT_DBG("%s", hdev->name);
2077
2078         memset(&cp, 0, sizeof(cp));
2079
2080         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2081 }
2082
2083 static void le_scan_work(struct work_struct *work)
2084 {
2085         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2086         struct le_scan_params *param = &hdev->le_scan_params;
2087
2088         BT_DBG("%s", hdev->name);
2089
2090         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2091                        param->timeout);
2092 }
2093
2094 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2095                 int timeout)
2096 {
2097         struct le_scan_params *param = &hdev->le_scan_params;
2098
2099         BT_DBG("%s", hdev->name);
2100
2101         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2102                 return -ENOTSUPP;
2103
2104         if (work_busy(&hdev->le_scan))
2105                 return -EINPROGRESS;
2106
2107         param->type = type;
2108         param->interval = interval;
2109         param->window = window;
2110         param->timeout = timeout;
2111
2112         queue_work(system_long_wq, &hdev->le_scan);
2113
2114         return 0;
2115 }
2116
2117 /* Alloc HCI device */
2118 struct hci_dev *hci_alloc_dev(void)
2119 {
2120         struct hci_dev *hdev;
2121
2122         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2123         if (!hdev)
2124                 return NULL;
2125
2126         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2127         hdev->esco_type = (ESCO_HV1);
2128         hdev->link_mode = (HCI_LM_ACCEPT);
2129         hdev->io_capability = 0x03; /* No Input No Output */
2130         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2131         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2132
2133         hdev->sniff_max_interval = 800;
2134         hdev->sniff_min_interval = 80;
2135
2136         mutex_init(&hdev->lock);
2137         mutex_init(&hdev->req_lock);
2138
2139         INIT_LIST_HEAD(&hdev->mgmt_pending);
2140         INIT_LIST_HEAD(&hdev->blacklist);
2141         INIT_LIST_HEAD(&hdev->uuids);
2142         INIT_LIST_HEAD(&hdev->link_keys);
2143         INIT_LIST_HEAD(&hdev->long_term_keys);
2144         INIT_LIST_HEAD(&hdev->remote_oob_data);
2145         INIT_LIST_HEAD(&hdev->conn_hash.list);
2146
2147         INIT_WORK(&hdev->rx_work, hci_rx_work);
2148         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2149         INIT_WORK(&hdev->tx_work, hci_tx_work);
2150         INIT_WORK(&hdev->power_on, hci_power_on);
2151         INIT_WORK(&hdev->le_scan, le_scan_work);
2152
2153         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2154         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2155         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2156
2157         skb_queue_head_init(&hdev->rx_q);
2158         skb_queue_head_init(&hdev->cmd_q);
2159         skb_queue_head_init(&hdev->raw_q);
2160
2161         init_waitqueue_head(&hdev->req_wait_q);
2162
2163         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2164
2165         hci_init_sysfs(hdev);
2166         discovery_init(hdev);
2167
2168         return hdev;
2169 }
2170 EXPORT_SYMBOL(hci_alloc_dev);
2171
2172 /* Free HCI device */
2173 void hci_free_dev(struct hci_dev *hdev)
2174 {
2175         /* will free via device release */
2176         put_device(&hdev->dev);
2177 }
2178 EXPORT_SYMBOL(hci_free_dev);
2179
2180 /* Register HCI device */
2181 int hci_register_dev(struct hci_dev *hdev)
2182 {
2183         int id, error;
2184
2185         if (!hdev->open || !hdev->close)
2186                 return -EINVAL;
2187
2188         /* Do not allow HCI_AMP devices to register at index 0,
2189          * so the index can be used as the AMP controller ID.
2190          */
2191         switch (hdev->dev_type) {
2192         case HCI_BREDR:
2193                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2194                 break;
2195         case HCI_AMP:
2196                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2197                 break;
2198         default:
2199                 return -EINVAL;
2200         }
2201
2202         if (id < 0)
2203                 return id;
2204
2205         sprintf(hdev->name, "hci%d", id);
2206         hdev->id = id;
2207
2208         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2209
2210         write_lock(&hci_dev_list_lock);
2211         list_add(&hdev->list, &hci_dev_list);
2212         write_unlock(&hci_dev_list_lock);
2213
2214         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2215                                           WQ_MEM_RECLAIM, 1, hdev->name);
2216         if (!hdev->workqueue) {
2217                 error = -ENOMEM;
2218                 goto err;
2219         }
2220
2221         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2222                                               WQ_MEM_RECLAIM, 1, hdev->name);
2223         if (!hdev->req_workqueue) {
2224                 destroy_workqueue(hdev->workqueue);
2225                 error = -ENOMEM;
2226                 goto err;
2227         }
2228
2229         error = hci_add_sysfs(hdev);
2230         if (error < 0)
2231                 goto err_wqueue;
2232
2233         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2234                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2235                                     hdev);
2236         if (hdev->rfkill) {
2237                 if (rfkill_register(hdev->rfkill) < 0) {
2238                         rfkill_destroy(hdev->rfkill);
2239                         hdev->rfkill = NULL;
2240                 }
2241         }
2242
2243         set_bit(HCI_SETUP, &hdev->dev_flags);
2244
2245         if (hdev->dev_type != HCI_AMP)
2246                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2247
2248         hci_notify(hdev, HCI_DEV_REG);
2249         hci_dev_hold(hdev);
2250
2251         queue_work(hdev->req_workqueue, &hdev->power_on);
2252
2253         return id;
2254
2255 err_wqueue:
2256         destroy_workqueue(hdev->workqueue);
2257         destroy_workqueue(hdev->req_workqueue);
2258 err:
2259         ida_simple_remove(&hci_index_ida, hdev->id);
2260         write_lock(&hci_dev_list_lock);
2261         list_del(&hdev->list);
2262         write_unlock(&hci_dev_list_lock);
2263
2264         return error;
2265 }
2266 EXPORT_SYMBOL(hci_register_dev);
2267
2268 /* Unregister HCI device */
2269 void hci_unregister_dev(struct hci_dev *hdev)
2270 {
2271         int i, id;
2272
2273         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2274
2275         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2276
2277         id = hdev->id;
2278
2279         write_lock(&hci_dev_list_lock);
2280         list_del(&hdev->list);
2281         write_unlock(&hci_dev_list_lock);
2282
2283         hci_dev_do_close(hdev);
2284
2285         for (i = 0; i < NUM_REASSEMBLY; i++)
2286                 kfree_skb(hdev->reassembly[i]);
2287
2288         cancel_work_sync(&hdev->power_on);
2289
2290         if (!test_bit(HCI_INIT, &hdev->flags) &&
2291             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2292                 hci_dev_lock(hdev);
2293                 mgmt_index_removed(hdev);
2294                 hci_dev_unlock(hdev);
2295         }
2296
2297         /* mgmt_index_removed should take care of emptying the
2298          * pending list */
2299         BUG_ON(!list_empty(&hdev->mgmt_pending));
2300
2301         hci_notify(hdev, HCI_DEV_UNREG);
2302
2303         if (hdev->rfkill) {
2304                 rfkill_unregister(hdev->rfkill);
2305                 rfkill_destroy(hdev->rfkill);
2306         }
2307
2308         hci_del_sysfs(hdev);
2309
2310         destroy_workqueue(hdev->workqueue);
2311         destroy_workqueue(hdev->req_workqueue);
2312
2313         hci_dev_lock(hdev);
2314         hci_blacklist_clear(hdev);
2315         hci_uuids_clear(hdev);
2316         hci_link_keys_clear(hdev);
2317         hci_smp_ltks_clear(hdev);
2318         hci_remote_oob_data_clear(hdev);
2319         hci_dev_unlock(hdev);
2320
2321         hci_dev_put(hdev);
2322
2323         ida_simple_remove(&hci_index_ida, id);
2324 }
2325 EXPORT_SYMBOL(hci_unregister_dev);
2326
2327 /* Suspend HCI device */
2328 int hci_suspend_dev(struct hci_dev *hdev)
2329 {
2330         hci_notify(hdev, HCI_DEV_SUSPEND);
2331         return 0;
2332 }
2333 EXPORT_SYMBOL(hci_suspend_dev);
2334
2335 /* Resume HCI device */
2336 int hci_resume_dev(struct hci_dev *hdev)
2337 {
2338         hci_notify(hdev, HCI_DEV_RESUME);
2339         return 0;
2340 }
2341 EXPORT_SYMBOL(hci_resume_dev);
2342
2343 /* Receive frame from HCI drivers */
2344 int hci_recv_frame(struct sk_buff *skb)
2345 {
2346         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2347         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2348                       && !test_bit(HCI_INIT, &hdev->flags))) {
2349                 kfree_skb(skb);
2350                 return -ENXIO;
2351         }
2352
2353         /* Incoming skb */
2354         bt_cb(skb)->incoming = 1;
2355
2356         /* Time stamp */
2357         __net_timestamp(skb);
2358
2359         skb_queue_tail(&hdev->rx_q, skb);
2360         queue_work(hdev->workqueue, &hdev->rx_work);
2361
2362         return 0;
2363 }
2364 EXPORT_SYMBOL(hci_recv_frame);
2365
2366 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2367                           int count, __u8 index)
2368 {
2369         int len = 0;
2370         int hlen = 0;
2371         int remain = count;
2372         struct sk_buff *skb;
2373         struct bt_skb_cb *scb;
2374
2375         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2376             index >= NUM_REASSEMBLY)
2377                 return -EILSEQ;
2378
2379         skb = hdev->reassembly[index];
2380
2381         if (!skb) {
2382                 switch (type) {
2383                 case HCI_ACLDATA_PKT:
2384                         len = HCI_MAX_FRAME_SIZE;
2385                         hlen = HCI_ACL_HDR_SIZE;
2386                         break;
2387                 case HCI_EVENT_PKT:
2388                         len = HCI_MAX_EVENT_SIZE;
2389                         hlen = HCI_EVENT_HDR_SIZE;
2390                         break;
2391                 case HCI_SCODATA_PKT:
2392                         len = HCI_MAX_SCO_SIZE;
2393                         hlen = HCI_SCO_HDR_SIZE;
2394                         break;
2395                 }
2396
2397                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2398                 if (!skb)
2399                         return -ENOMEM;
2400
2401                 scb = (void *) skb->cb;
2402                 scb->expect = hlen;
2403                 scb->pkt_type = type;
2404
2405                 skb->dev = (void *) hdev;
2406                 hdev->reassembly[index] = skb;
2407         }
2408
2409         while (count) {
2410                 scb = (void *) skb->cb;
2411                 len = min_t(uint, scb->expect, count);
2412
2413                 memcpy(skb_put(skb, len), data, len);
2414
2415                 count -= len;
2416                 data += len;
2417                 scb->expect -= len;
2418                 remain = count;
2419
2420                 switch (type) {
2421                 case HCI_EVENT_PKT:
2422                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2423                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2424                                 scb->expect = h->plen;
2425
2426                                 if (skb_tailroom(skb) < scb->expect) {
2427                                         kfree_skb(skb);
2428                                         hdev->reassembly[index] = NULL;
2429                                         return -ENOMEM;
2430                                 }
2431                         }
2432                         break;
2433
2434                 case HCI_ACLDATA_PKT:
2435                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2436                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2437                                 scb->expect = __le16_to_cpu(h->dlen);
2438
2439                                 if (skb_tailroom(skb) < scb->expect) {
2440                                         kfree_skb(skb);
2441                                         hdev->reassembly[index] = NULL;
2442                                         return -ENOMEM;
2443                                 }
2444                         }
2445                         break;
2446
2447                 case HCI_SCODATA_PKT:
2448                         if (skb->len == HCI_SCO_HDR_SIZE) {
2449                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2450                                 scb->expect = h->dlen;
2451
2452                                 if (skb_tailroom(skb) < scb->expect) {
2453                                         kfree_skb(skb);
2454                                         hdev->reassembly[index] = NULL;
2455                                         return -ENOMEM;
2456                                 }
2457                         }
2458                         break;
2459                 }
2460
2461                 if (scb->expect == 0) {
2462                         /* Complete frame */
2463
2464                         bt_cb(skb)->pkt_type = type;
2465                         hci_recv_frame(skb);
2466
2467                         hdev->reassembly[index] = NULL;
2468                         return remain;
2469                 }
2470         }
2471
2472         return remain;
2473 }
2474
2475 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2476 {
2477         int rem = 0;
2478
2479         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2480                 return -EILSEQ;
2481
2482         while (count) {
2483                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2484                 if (rem < 0)
2485                         return rem;
2486
2487                 data += (count - rem);
2488                 count = rem;
2489         }
2490
2491         return rem;
2492 }
2493 EXPORT_SYMBOL(hci_recv_fragment);
2494
2495 #define STREAM_REASSEMBLY 0
2496
2497 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2498 {
2499         int type;
2500         int rem = 0;
2501
2502         while (count) {
2503                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2504
2505                 if (!skb) {
2506                         struct { char type; } *pkt;
2507
2508                         /* Start of the frame */
2509                         pkt = data;
2510                         type = pkt->type;
2511
2512                         data++;
2513                         count--;
2514                 } else
2515                         type = bt_cb(skb)->pkt_type;
2516
2517                 rem = hci_reassembly(hdev, type, data, count,
2518                                      STREAM_REASSEMBLY);
2519                 if (rem < 0)
2520                         return rem;
2521
2522                 data += (count - rem);
2523                 count = rem;
2524         }
2525
2526         return rem;
2527 }
2528 EXPORT_SYMBOL(hci_recv_stream_fragment);
2529
2530 /* ---- Interface to upper protocols ---- */
2531
2532 int hci_register_cb(struct hci_cb *cb)
2533 {
2534         BT_DBG("%p name %s", cb, cb->name);
2535
2536         write_lock(&hci_cb_list_lock);
2537         list_add(&cb->list, &hci_cb_list);
2538         write_unlock(&hci_cb_list_lock);
2539
2540         return 0;
2541 }
2542 EXPORT_SYMBOL(hci_register_cb);
2543
2544 int hci_unregister_cb(struct hci_cb *cb)
2545 {
2546         BT_DBG("%p name %s", cb, cb->name);
2547
2548         write_lock(&hci_cb_list_lock);
2549         list_del(&cb->list);
2550         write_unlock(&hci_cb_list_lock);
2551
2552         return 0;
2553 }
2554 EXPORT_SYMBOL(hci_unregister_cb);
2555
2556 static int hci_send_frame(struct sk_buff *skb)
2557 {
2558         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2559
2560         if (!hdev) {
2561                 kfree_skb(skb);
2562                 return -ENODEV;
2563         }
2564
2565         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2566
2567         /* Time stamp */
2568         __net_timestamp(skb);
2569
2570         /* Send copy to monitor */
2571         hci_send_to_monitor(hdev, skb);
2572
2573         if (atomic_read(&hdev->promisc)) {
2574                 /* Send copy to the sockets */
2575                 hci_send_to_sock(hdev, skb);
2576         }
2577
2578         /* Get rid of skb owner, prior to sending to the driver. */
2579         skb_orphan(skb);
2580
2581         return hdev->send(skb);
2582 }
2583
2584 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2585 {
2586         skb_queue_head_init(&req->cmd_q);
2587         req->hdev = hdev;
2588         req->err = 0;
2589 }
2590
2591 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2592 {
2593         struct hci_dev *hdev = req->hdev;
2594         struct sk_buff *skb;
2595         unsigned long flags;
2596
2597         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2598
2599         /* If an error occured during request building, remove all HCI
2600          * commands queued on the HCI request queue.
2601          */
2602         if (req->err) {
2603                 skb_queue_purge(&req->cmd_q);
2604                 return req->err;
2605         }
2606
2607         /* Do not allow empty requests */
2608         if (skb_queue_empty(&req->cmd_q))
2609                 return -ENODATA;
2610
2611         skb = skb_peek_tail(&req->cmd_q);
2612         bt_cb(skb)->req.complete = complete;
2613
2614         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2615         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2616         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2617
2618         queue_work(hdev->workqueue, &hdev->cmd_work);
2619
2620         return 0;
2621 }
2622
2623 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2624                                        u32 plen, const void *param)
2625 {
2626         int len = HCI_COMMAND_HDR_SIZE + plen;
2627         struct hci_command_hdr *hdr;
2628         struct sk_buff *skb;
2629
2630         skb = bt_skb_alloc(len, GFP_ATOMIC);
2631         if (!skb)
2632                 return NULL;
2633
2634         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2635         hdr->opcode = cpu_to_le16(opcode);
2636         hdr->plen   = plen;
2637
2638         if (plen)
2639                 memcpy(skb_put(skb, plen), param, plen);
2640
2641         BT_DBG("skb len %d", skb->len);
2642
2643         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2644         skb->dev = (void *) hdev;
2645
2646         return skb;
2647 }
2648
2649 /* Send HCI command */
2650 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2651                  const void *param)
2652 {
2653         struct sk_buff *skb;
2654
2655         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2656
2657         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2658         if (!skb) {
2659                 BT_ERR("%s no memory for command", hdev->name);
2660                 return -ENOMEM;
2661         }
2662
2663         /* Stand-alone HCI commands must be flaged as
2664          * single-command requests.
2665          */
2666         bt_cb(skb)->req.start = true;
2667
2668         skb_queue_tail(&hdev->cmd_q, skb);
2669         queue_work(hdev->workqueue, &hdev->cmd_work);
2670
2671         return 0;
2672 }
2673
2674 /* Queue a command to an asynchronous HCI request */
2675 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2676                     const void *param, u8 event)
2677 {
2678         struct hci_dev *hdev = req->hdev;
2679         struct sk_buff *skb;
2680
2681         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2682
2683         /* If an error occured during request building, there is no point in
2684          * queueing the HCI command. We can simply return.
2685          */
2686         if (req->err)
2687                 return;
2688
2689         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2690         if (!skb) {
2691                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2692                        hdev->name, opcode);
2693                 req->err = -ENOMEM;
2694                 return;
2695         }
2696
2697         if (skb_queue_empty(&req->cmd_q))
2698                 bt_cb(skb)->req.start = true;
2699
2700         bt_cb(skb)->req.event = event;
2701
2702         skb_queue_tail(&req->cmd_q, skb);
2703 }
2704
2705 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2706                  const void *param)
2707 {
2708         hci_req_add_ev(req, opcode, plen, param, 0);
2709 }
2710
2711 /* Get data from the previously sent command */
2712 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2713 {
2714         struct hci_command_hdr *hdr;
2715
2716         if (!hdev->sent_cmd)
2717                 return NULL;
2718
2719         hdr = (void *) hdev->sent_cmd->data;
2720
2721         if (hdr->opcode != cpu_to_le16(opcode))
2722                 return NULL;
2723
2724         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2725
2726         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2727 }
2728
2729 /* Send ACL data */
2730 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2731 {
2732         struct hci_acl_hdr *hdr;
2733         int len = skb->len;
2734
2735         skb_push(skb, HCI_ACL_HDR_SIZE);
2736         skb_reset_transport_header(skb);
2737         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2738         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2739         hdr->dlen   = cpu_to_le16(len);
2740 }
2741
2742 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2743                           struct sk_buff *skb, __u16 flags)
2744 {
2745         struct hci_conn *conn = chan->conn;
2746         struct hci_dev *hdev = conn->hdev;
2747         struct sk_buff *list;
2748
2749         skb->len = skb_headlen(skb);
2750         skb->data_len = 0;
2751
2752         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2753
2754         switch (hdev->dev_type) {
2755         case HCI_BREDR:
2756                 hci_add_acl_hdr(skb, conn->handle, flags);
2757                 break;
2758         case HCI_AMP:
2759                 hci_add_acl_hdr(skb, chan->handle, flags);
2760                 break;
2761         default:
2762                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2763                 return;
2764         }
2765
2766         list = skb_shinfo(skb)->frag_list;
2767         if (!list) {
2768                 /* Non fragmented */
2769                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2770
2771                 skb_queue_tail(queue, skb);
2772         } else {
2773                 /* Fragmented */
2774                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2775
2776                 skb_shinfo(skb)->frag_list = NULL;
2777
2778                 /* Queue all fragments atomically */
2779                 spin_lock(&queue->lock);
2780
2781                 __skb_queue_tail(queue, skb);
2782
2783                 flags &= ~ACL_START;
2784                 flags |= ACL_CONT;
2785                 do {
2786                         skb = list; list = list->next;
2787
2788                         skb->dev = (void *) hdev;
2789                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2790                         hci_add_acl_hdr(skb, conn->handle, flags);
2791
2792                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2793
2794                         __skb_queue_tail(queue, skb);
2795                 } while (list);
2796
2797                 spin_unlock(&queue->lock);
2798         }
2799 }
2800
2801 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2802 {
2803         struct hci_dev *hdev = chan->conn->hdev;
2804
2805         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2806
2807         skb->dev = (void *) hdev;
2808
2809         hci_queue_acl(chan, &chan->data_q, skb, flags);
2810
2811         queue_work(hdev->workqueue, &hdev->tx_work);
2812 }
2813
2814 /* Send SCO data */
2815 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2816 {
2817         struct hci_dev *hdev = conn->hdev;
2818         struct hci_sco_hdr hdr;
2819
2820         BT_DBG("%s len %d", hdev->name, skb->len);
2821
2822         hdr.handle = cpu_to_le16(conn->handle);
2823         hdr.dlen   = skb->len;
2824
2825         skb_push(skb, HCI_SCO_HDR_SIZE);
2826         skb_reset_transport_header(skb);
2827         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2828
2829         skb->dev = (void *) hdev;
2830         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2831
2832         skb_queue_tail(&conn->data_q, skb);
2833         queue_work(hdev->workqueue, &hdev->tx_work);
2834 }
2835
2836 /* ---- HCI TX task (outgoing data) ---- */
2837
2838 /* HCI Connection scheduler */
2839 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2840                                      int *quote)
2841 {
2842         struct hci_conn_hash *h = &hdev->conn_hash;
2843         struct hci_conn *conn = NULL, *c;
2844         unsigned int num = 0, min = ~0;
2845
2846         /* We don't have to lock device here. Connections are always
2847          * added and removed with TX task disabled. */
2848
2849         rcu_read_lock();
2850
2851         list_for_each_entry_rcu(c, &h->list, list) {
2852                 if (c->type != type || skb_queue_empty(&c->data_q))
2853                         continue;
2854
2855                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2856                         continue;
2857
2858                 num++;
2859
2860                 if (c->sent < min) {
2861                         min  = c->sent;
2862                         conn = c;
2863                 }
2864
2865                 if (hci_conn_num(hdev, type) == num)
2866                         break;
2867         }
2868
2869         rcu_read_unlock();
2870
2871         if (conn) {
2872                 int cnt, q;
2873
2874                 switch (conn->type) {
2875                 case ACL_LINK:
2876                         cnt = hdev->acl_cnt;
2877                         break;
2878                 case SCO_LINK:
2879                 case ESCO_LINK:
2880                         cnt = hdev->sco_cnt;
2881                         break;
2882                 case LE_LINK:
2883                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2884                         break;
2885                 default:
2886                         cnt = 0;
2887                         BT_ERR("Unknown link type");
2888                 }
2889
2890                 q = cnt / num;
2891                 *quote = q ? q : 1;
2892         } else
2893                 *quote = 0;
2894
2895         BT_DBG("conn %p quote %d", conn, *quote);
2896         return conn;
2897 }
2898
2899 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2900 {
2901         struct hci_conn_hash *h = &hdev->conn_hash;
2902         struct hci_conn *c;
2903
2904         BT_ERR("%s link tx timeout", hdev->name);
2905
2906         rcu_read_lock();
2907
2908         /* Kill stalled connections */
2909         list_for_each_entry_rcu(c, &h->list, list) {
2910                 if (c->type == type && c->sent) {
2911                         BT_ERR("%s killing stalled connection %pMR",
2912                                hdev->name, &c->dst);
2913                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2914                 }
2915         }
2916
2917         rcu_read_unlock();
2918 }
2919
2920 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2921                                       int *quote)
2922 {
2923         struct hci_conn_hash *h = &hdev->conn_hash;
2924         struct hci_chan *chan = NULL;
2925         unsigned int num = 0, min = ~0, cur_prio = 0;
2926         struct hci_conn *conn;
2927         int cnt, q, conn_num = 0;
2928
2929         BT_DBG("%s", hdev->name);
2930
2931         rcu_read_lock();
2932
2933         list_for_each_entry_rcu(conn, &h->list, list) {
2934                 struct hci_chan *tmp;
2935
2936                 if (conn->type != type)
2937                         continue;
2938
2939                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2940                         continue;
2941
2942                 conn_num++;
2943
2944                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2945                         struct sk_buff *skb;
2946
2947                         if (skb_queue_empty(&tmp->data_q))
2948                                 continue;
2949
2950                         skb = skb_peek(&tmp->data_q);
2951                         if (skb->priority < cur_prio)
2952                                 continue;
2953
2954                         if (skb->priority > cur_prio) {
2955                                 num = 0;
2956                                 min = ~0;
2957                                 cur_prio = skb->priority;
2958                         }
2959
2960                         num++;
2961
2962                         if (conn->sent < min) {
2963                                 min  = conn->sent;
2964                                 chan = tmp;
2965                         }
2966                 }
2967
2968                 if (hci_conn_num(hdev, type) == conn_num)
2969                         break;
2970         }
2971
2972         rcu_read_unlock();
2973
2974         if (!chan)
2975                 return NULL;
2976
2977         switch (chan->conn->type) {
2978         case ACL_LINK:
2979                 cnt = hdev->acl_cnt;
2980                 break;
2981         case AMP_LINK:
2982                 cnt = hdev->block_cnt;
2983                 break;
2984         case SCO_LINK:
2985         case ESCO_LINK:
2986                 cnt = hdev->sco_cnt;
2987                 break;
2988         case LE_LINK:
2989                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2990                 break;
2991         default:
2992                 cnt = 0;
2993                 BT_ERR("Unknown link type");
2994         }
2995
2996         q = cnt / num;
2997         *quote = q ? q : 1;
2998         BT_DBG("chan %p quote %d", chan, *quote);
2999         return chan;
3000 }
3001
3002 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3003 {
3004         struct hci_conn_hash *h = &hdev->conn_hash;
3005         struct hci_conn *conn;
3006         int num = 0;
3007
3008         BT_DBG("%s", hdev->name);
3009
3010         rcu_read_lock();
3011
3012         list_for_each_entry_rcu(conn, &h->list, list) {
3013                 struct hci_chan *chan;
3014
3015                 if (conn->type != type)
3016                         continue;
3017
3018                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3019                         continue;
3020
3021                 num++;
3022
3023                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3024                         struct sk_buff *skb;
3025
3026                         if (chan->sent) {
3027                                 chan->sent = 0;
3028                                 continue;
3029                         }
3030
3031                         if (skb_queue_empty(&chan->data_q))
3032                                 continue;
3033
3034                         skb = skb_peek(&chan->data_q);
3035                         if (skb->priority >= HCI_PRIO_MAX - 1)
3036                                 continue;
3037
3038                         skb->priority = HCI_PRIO_MAX - 1;
3039
3040                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3041                                skb->priority);
3042                 }
3043
3044                 if (hci_conn_num(hdev, type) == num)
3045                         break;
3046         }
3047
3048         rcu_read_unlock();
3049
3050 }
3051
3052 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3053 {
3054         /* Calculate count of blocks used by this packet */
3055         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3056 }
3057
3058 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3059 {
3060         if (!test_bit(HCI_RAW, &hdev->flags)) {
3061                 /* ACL tx timeout must be longer than maximum
3062                  * link supervision timeout (40.9 seconds) */
3063                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3064                                        HCI_ACL_TX_TIMEOUT))
3065                         hci_link_tx_to(hdev, ACL_LINK);
3066         }
3067 }
3068
3069 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3070 {
3071         unsigned int cnt = hdev->acl_cnt;
3072         struct hci_chan *chan;
3073         struct sk_buff *skb;
3074         int quote;
3075
3076         __check_timeout(hdev, cnt);
3077
3078         while (hdev->acl_cnt &&
3079                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3080                 u32 priority = (skb_peek(&chan->data_q))->priority;
3081                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3082                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3083                                skb->len, skb->priority);
3084
3085                         /* Stop if priority has changed */
3086                         if (skb->priority < priority)
3087                                 break;
3088
3089                         skb = skb_dequeue(&chan->data_q);
3090
3091                         hci_conn_enter_active_mode(chan->conn,
3092                                                    bt_cb(skb)->force_active);
3093
3094                         hci_send_frame(skb);
3095                         hdev->acl_last_tx = jiffies;
3096
3097                         hdev->acl_cnt--;
3098                         chan->sent++;
3099                         chan->conn->sent++;
3100                 }
3101         }
3102
3103         if (cnt != hdev->acl_cnt)
3104                 hci_prio_recalculate(hdev, ACL_LINK);
3105 }
3106
3107 static void hci_sched_acl_blk(struct hci_dev *hdev)
3108 {
3109         unsigned int cnt = hdev->block_cnt;
3110         struct hci_chan *chan;
3111         struct sk_buff *skb;
3112         int quote;
3113         u8 type;
3114
3115         __check_timeout(hdev, cnt);
3116
3117         BT_DBG("%s", hdev->name);
3118
3119         if (hdev->dev_type == HCI_AMP)
3120                 type = AMP_LINK;
3121         else
3122                 type = ACL_LINK;
3123
3124         while (hdev->block_cnt > 0 &&
3125                (chan = hci_chan_sent(hdev, type, &quote))) {
3126                 u32 priority = (skb_peek(&chan->data_q))->priority;
3127                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3128                         int blocks;
3129
3130                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3131                                skb->len, skb->priority);
3132
3133                         /* Stop if priority has changed */
3134                         if (skb->priority < priority)
3135                                 break;
3136
3137                         skb = skb_dequeue(&chan->data_q);
3138
3139                         blocks = __get_blocks(hdev, skb);
3140                         if (blocks > hdev->block_cnt)
3141                                 return;
3142
3143                         hci_conn_enter_active_mode(chan->conn,
3144                                                    bt_cb(skb)->force_active);
3145
3146                         hci_send_frame(skb);
3147                         hdev->acl_last_tx = jiffies;
3148
3149                         hdev->block_cnt -= blocks;
3150                         quote -= blocks;
3151
3152                         chan->sent += blocks;
3153                         chan->conn->sent += blocks;
3154                 }
3155         }
3156
3157         if (cnt != hdev->block_cnt)
3158                 hci_prio_recalculate(hdev, type);
3159 }
3160
3161 static void hci_sched_acl(struct hci_dev *hdev)
3162 {
3163         BT_DBG("%s", hdev->name);
3164
3165         /* No ACL link over BR/EDR controller */
3166         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3167                 return;
3168
3169         /* No AMP link over AMP controller */
3170         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3171                 return;
3172
3173         switch (hdev->flow_ctl_mode) {
3174         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3175                 hci_sched_acl_pkt(hdev);
3176                 break;
3177
3178         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3179                 hci_sched_acl_blk(hdev);
3180                 break;
3181         }
3182 }
3183
3184 /* Schedule SCO */
3185 static void hci_sched_sco(struct hci_dev *hdev)
3186 {
3187         struct hci_conn *conn;
3188         struct sk_buff *skb;
3189         int quote;
3190
3191         BT_DBG("%s", hdev->name);
3192
3193         if (!hci_conn_num(hdev, SCO_LINK))
3194                 return;
3195
3196         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3197                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3198                         BT_DBG("skb %p len %d", skb, skb->len);
3199                         hci_send_frame(skb);
3200
3201                         conn->sent++;
3202                         if (conn->sent == ~0)
3203                                 conn->sent = 0;
3204                 }
3205         }
3206 }
3207
3208 static void hci_sched_esco(struct hci_dev *hdev)
3209 {
3210         struct hci_conn *conn;
3211         struct sk_buff *skb;
3212         int quote;
3213
3214         BT_DBG("%s", hdev->name);
3215
3216         if (!hci_conn_num(hdev, ESCO_LINK))
3217                 return;
3218
3219         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3220                                                      &quote))) {
3221                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3222                         BT_DBG("skb %p len %d", skb, skb->len);
3223                         hci_send_frame(skb);
3224
3225                         conn->sent++;
3226                         if (conn->sent == ~0)
3227                                 conn->sent = 0;
3228                 }
3229         }
3230 }
3231
3232 static void hci_sched_le(struct hci_dev *hdev)
3233 {
3234         struct hci_chan *chan;
3235         struct sk_buff *skb;
3236         int quote, cnt, tmp;
3237
3238         BT_DBG("%s", hdev->name);
3239
3240         if (!hci_conn_num(hdev, LE_LINK))
3241                 return;
3242
3243         if (!test_bit(HCI_RAW, &hdev->flags)) {
3244                 /* LE tx timeout must be longer than maximum
3245                  * link supervision timeout (40.9 seconds) */
3246                 if (!hdev->le_cnt && hdev->le_pkts &&
3247                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3248                         hci_link_tx_to(hdev, LE_LINK);
3249         }
3250
3251         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3252         tmp = cnt;
3253         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3254                 u32 priority = (skb_peek(&chan->data_q))->priority;
3255                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3256                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3257                                skb->len, skb->priority);
3258
3259                         /* Stop if priority has changed */
3260                         if (skb->priority < priority)
3261                                 break;
3262
3263                         skb = skb_dequeue(&chan->data_q);
3264
3265                         hci_send_frame(skb);
3266                         hdev->le_last_tx = jiffies;
3267
3268                         cnt--;
3269                         chan->sent++;
3270                         chan->conn->sent++;
3271                 }
3272         }
3273
3274         if (hdev->le_pkts)
3275                 hdev->le_cnt = cnt;
3276         else
3277                 hdev->acl_cnt = cnt;
3278
3279         if (cnt != tmp)
3280                 hci_prio_recalculate(hdev, LE_LINK);
3281 }
3282
3283 static void hci_tx_work(struct work_struct *work)
3284 {
3285         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3286         struct sk_buff *skb;
3287
3288         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3289                hdev->sco_cnt, hdev->le_cnt);
3290
3291         /* Schedule queues and send stuff to HCI driver */
3292
3293         hci_sched_acl(hdev);
3294
3295         hci_sched_sco(hdev);
3296
3297         hci_sched_esco(hdev);
3298
3299         hci_sched_le(hdev);
3300
3301         /* Send next queued raw (unknown type) packet */
3302         while ((skb = skb_dequeue(&hdev->raw_q)))
3303                 hci_send_frame(skb);
3304 }
3305
3306 /* ----- HCI RX task (incoming data processing) ----- */
3307
3308 /* ACL data packet */
3309 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3310 {
3311         struct hci_acl_hdr *hdr = (void *) skb->data;
3312         struct hci_conn *conn;
3313         __u16 handle, flags;
3314
3315         skb_pull(skb, HCI_ACL_HDR_SIZE);
3316
3317         handle = __le16_to_cpu(hdr->handle);
3318         flags  = hci_flags(handle);
3319         handle = hci_handle(handle);
3320
3321         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3322                handle, flags);
3323
3324         hdev->stat.acl_rx++;
3325
3326         hci_dev_lock(hdev);
3327         conn = hci_conn_hash_lookup_handle(hdev, handle);
3328         hci_dev_unlock(hdev);
3329
3330         if (conn) {
3331                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3332
3333                 /* Send to upper protocol */
3334                 l2cap_recv_acldata(conn, skb, flags);
3335                 return;
3336         } else {
3337                 BT_ERR("%s ACL packet for unknown connection handle %d",
3338                        hdev->name, handle);
3339         }
3340
3341         kfree_skb(skb);
3342 }
3343
3344 /* SCO data packet */
3345 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3346 {
3347         struct hci_sco_hdr *hdr = (void *) skb->data;
3348         struct hci_conn *conn;
3349         __u16 handle;
3350
3351         skb_pull(skb, HCI_SCO_HDR_SIZE);
3352
3353         handle = __le16_to_cpu(hdr->handle);
3354
3355         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3356
3357         hdev->stat.sco_rx++;
3358
3359         hci_dev_lock(hdev);
3360         conn = hci_conn_hash_lookup_handle(hdev, handle);
3361         hci_dev_unlock(hdev);
3362
3363         if (conn) {
3364                 /* Send to upper protocol */
3365                 sco_recv_scodata(conn, skb);
3366                 return;
3367         } else {
3368                 BT_ERR("%s SCO packet for unknown connection handle %d",
3369                        hdev->name, handle);
3370         }
3371
3372         kfree_skb(skb);
3373 }
3374
3375 static bool hci_req_is_complete(struct hci_dev *hdev)
3376 {
3377         struct sk_buff *skb;
3378
3379         skb = skb_peek(&hdev->cmd_q);
3380         if (!skb)
3381                 return true;
3382
3383         return bt_cb(skb)->req.start;
3384 }
3385
3386 static void hci_resend_last(struct hci_dev *hdev)
3387 {
3388         struct hci_command_hdr *sent;
3389         struct sk_buff *skb;
3390         u16 opcode;
3391
3392         if (!hdev->sent_cmd)
3393                 return;
3394
3395         sent = (void *) hdev->sent_cmd->data;
3396         opcode = __le16_to_cpu(sent->opcode);
3397         if (opcode == HCI_OP_RESET)
3398                 return;
3399
3400         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3401         if (!skb)
3402                 return;
3403
3404         skb_queue_head(&hdev->cmd_q, skb);
3405         queue_work(hdev->workqueue, &hdev->cmd_work);
3406 }
3407
3408 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3409 {
3410         hci_req_complete_t req_complete = NULL;
3411         struct sk_buff *skb;
3412         unsigned long flags;
3413
3414         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3415
3416         /* If the completed command doesn't match the last one that was
3417          * sent we need to do special handling of it.
3418          */
3419         if (!hci_sent_cmd_data(hdev, opcode)) {
3420                 /* Some CSR based controllers generate a spontaneous
3421                  * reset complete event during init and any pending
3422                  * command will never be completed. In such a case we
3423                  * need to resend whatever was the last sent
3424                  * command.
3425                  */
3426                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3427                         hci_resend_last(hdev);
3428
3429                 return;
3430         }
3431
3432         /* If the command succeeded and there's still more commands in
3433          * this request the request is not yet complete.
3434          */
3435         if (!status && !hci_req_is_complete(hdev))
3436                 return;
3437
3438         /* If this was the last command in a request the complete
3439          * callback would be found in hdev->sent_cmd instead of the
3440          * command queue (hdev->cmd_q).
3441          */
3442         if (hdev->sent_cmd) {
3443                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3444                 if (req_complete)
3445                         goto call_complete;
3446         }
3447
3448         /* Remove all pending commands belonging to this request */
3449         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3450         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3451                 if (bt_cb(skb)->req.start) {
3452                         __skb_queue_head(&hdev->cmd_q, skb);
3453                         break;
3454                 }
3455
3456                 req_complete = bt_cb(skb)->req.complete;
3457                 kfree_skb(skb);
3458         }
3459         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3460
3461 call_complete:
3462         if (req_complete)
3463                 req_complete(hdev, status);
3464 }
3465
3466 static void hci_rx_work(struct work_struct *work)
3467 {
3468         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3469         struct sk_buff *skb;
3470
3471         BT_DBG("%s", hdev->name);
3472
3473         while ((skb = skb_dequeue(&hdev->rx_q))) {
3474                 /* Send copy to monitor */
3475                 hci_send_to_monitor(hdev, skb);
3476
3477                 if (atomic_read(&hdev->promisc)) {
3478                         /* Send copy to the sockets */
3479                         hci_send_to_sock(hdev, skb);
3480                 }
3481
3482                 if (test_bit(HCI_RAW, &hdev->flags)) {
3483                         kfree_skb(skb);
3484                         continue;
3485                 }
3486
3487                 if (test_bit(HCI_INIT, &hdev->flags)) {
3488                         /* Don't process data packets in this states. */
3489                         switch (bt_cb(skb)->pkt_type) {
3490                         case HCI_ACLDATA_PKT:
3491                         case HCI_SCODATA_PKT:
3492                                 kfree_skb(skb);
3493                                 continue;
3494                         }
3495                 }
3496
3497                 /* Process frame */
3498                 switch (bt_cb(skb)->pkt_type) {
3499                 case HCI_EVENT_PKT:
3500                         BT_DBG("%s Event packet", hdev->name);
3501                         hci_event_packet(hdev, skb);
3502                         break;
3503
3504                 case HCI_ACLDATA_PKT:
3505                         BT_DBG("%s ACL data packet", hdev->name);
3506                         hci_acldata_packet(hdev, skb);
3507                         break;
3508
3509                 case HCI_SCODATA_PKT:
3510                         BT_DBG("%s SCO data packet", hdev->name);
3511                         hci_scodata_packet(hdev, skb);
3512                         break;
3513
3514                 default:
3515                         kfree_skb(skb);
3516                         break;
3517                 }
3518         }
3519 }
3520
3521 static void hci_cmd_work(struct work_struct *work)
3522 {
3523         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3524         struct sk_buff *skb;
3525
3526         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3527                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3528
3529         /* Send queued commands */
3530         if (atomic_read(&hdev->cmd_cnt)) {
3531                 skb = skb_dequeue(&hdev->cmd_q);
3532                 if (!skb)
3533                         return;
3534
3535                 kfree_skb(hdev->sent_cmd);
3536
3537                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3538                 if (hdev->sent_cmd) {
3539                         atomic_dec(&hdev->cmd_cnt);
3540                         hci_send_frame(skb);
3541                         if (test_bit(HCI_RESET, &hdev->flags))
3542                                 del_timer(&hdev->cmd_timer);
3543                         else
3544                                 mod_timer(&hdev->cmd_timer,
3545                                           jiffies + HCI_CMD_TIMEOUT);
3546                 } else {
3547                         skb_queue_head(&hdev->cmd_q, skb);
3548                         queue_work(hdev->workqueue, &hdev->cmd_work);
3549                 }
3550         }
3551 }
3552
3553 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3554 {
3555         /* General inquiry access code (GIAC) */
3556         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3557         struct hci_cp_inquiry cp;
3558
3559         BT_DBG("%s", hdev->name);
3560
3561         if (test_bit(HCI_INQUIRY, &hdev->flags))
3562                 return -EINPROGRESS;
3563
3564         inquiry_cache_flush(hdev);
3565
3566         memset(&cp, 0, sizeof(cp));
3567         memcpy(&cp.lap, lap, sizeof(cp.lap));
3568         cp.length  = length;
3569
3570         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3571 }
3572
3573 int hci_cancel_inquiry(struct hci_dev *hdev)
3574 {
3575         BT_DBG("%s", hdev->name);
3576
3577         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3578                 return -EALREADY;
3579
3580         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3581 }
3582
3583 u8 bdaddr_to_le(u8 bdaddr_type)
3584 {
3585         switch (bdaddr_type) {
3586         case BDADDR_LE_PUBLIC:
3587                 return ADDR_LE_DEV_PUBLIC;
3588
3589         default:
3590                 /* Fallback to LE Random address type */
3591                 return ADDR_LE_DEV_RANDOM;
3592         }
3593 }