]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge git://git.infradead.org/users/willy/linux-nvme
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522
523         if (lmp_le_capable(hdev))
524                 le_setup(req);
525
526         hci_setup_event_mask(req);
527
528         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529          * local supported commands HCI command.
530          */
531         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
533
534         if (lmp_ssp_capable(hdev)) {
535                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536                         u8 mode = 0x01;
537                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538                                     sizeof(mode), &mode);
539                 } else {
540                         struct hci_cp_write_eir cp;
541
542                         memset(hdev->eir, 0, sizeof(hdev->eir));
543                         memset(&cp, 0, sizeof(cp));
544
545                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
546                 }
547         }
548
549         if (lmp_inq_rssi_capable(hdev))
550                 hci_setup_inquiry_mode(req);
551
552         if (lmp_inq_tx_pwr_capable(hdev))
553                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
554
555         if (lmp_ext_feat_capable(hdev)) {
556                 struct hci_cp_read_local_ext_features cp;
557
558                 cp.page = 0x01;
559                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560                             sizeof(cp), &cp);
561         }
562
563         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564                 u8 enable = 1;
565                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566                             &enable);
567         }
568 }
569
570 static void hci_setup_link_policy(struct hci_request *req)
571 {
572         struct hci_dev *hdev = req->hdev;
573         struct hci_cp_write_def_link_policy cp;
574         u16 link_policy = 0;
575
576         if (lmp_rswitch_capable(hdev))
577                 link_policy |= HCI_LP_RSWITCH;
578         if (lmp_hold_capable(hdev))
579                 link_policy |= HCI_LP_HOLD;
580         if (lmp_sniff_capable(hdev))
581                 link_policy |= HCI_LP_SNIFF;
582         if (lmp_park_capable(hdev))
583                 link_policy |= HCI_LP_PARK;
584
585         cp.policy = cpu_to_le16(link_policy);
586         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
587 }
588
589 static void hci_set_le_support(struct hci_request *req)
590 {
591         struct hci_dev *hdev = req->hdev;
592         struct hci_cp_write_le_host_supported cp;
593
594         /* LE-only devices do not support explicit enablement */
595         if (!lmp_bredr_capable(hdev))
596                 return;
597
598         memset(&cp, 0, sizeof(cp));
599
600         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601                 cp.le = 0x01;
602                 cp.simul = lmp_le_br_capable(hdev);
603         }
604
605         if (cp.le != lmp_host_le_capable(hdev))
606                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607                             &cp);
608 }
609
610 static void hci_init3_req(struct hci_request *req, unsigned long opt)
611 {
612         struct hci_dev *hdev = req->hdev;
613         u8 p;
614
615         /* Some Broadcom based Bluetooth controllers do not support the
616          * Delete Stored Link Key command. They are clearly indicating its
617          * absence in the bit mask of supported commands.
618          *
619          * Check the supported commands and only if the the command is marked
620          * as supported send it. If not supported assume that the controller
621          * does not have actual support for stored link keys which makes this
622          * command redundant anyway.
623          */
624         if (hdev->commands[6] & 0x80) {
625                 struct hci_cp_delete_stored_link_key cp;
626
627                 bacpy(&cp.bdaddr, BDADDR_ANY);
628                 cp.delete_all = 0x01;
629                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630                             sizeof(cp), &cp);
631         }
632
633         if (hdev->commands[5] & 0x10)
634                 hci_setup_link_policy(req);
635
636         if (lmp_le_capable(hdev)) {
637                 hci_set_le_support(req);
638                 hci_update_ad(req);
639         }
640
641         /* Read features beyond page 1 if available */
642         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643                 struct hci_cp_read_local_ext_features cp;
644
645                 cp.page = p;
646                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647                             sizeof(cp), &cp);
648         }
649 }
650
651 static int __hci_init(struct hci_dev *hdev)
652 {
653         int err;
654
655         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656         if (err < 0)
657                 return err;
658
659         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660          * BR/EDR/LE type controllers. AMP controllers only need the
661          * first stage init.
662          */
663         if (hdev->dev_type != HCI_BREDR)
664                 return 0;
665
666         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667         if (err < 0)
668                 return err;
669
670         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671 }
672
673 static void hci_scan_req(struct hci_request *req, unsigned long opt)
674 {
675         __u8 scan = opt;
676
677         BT_DBG("%s %x", req->hdev->name, scan);
678
679         /* Inquiry and Page scans */
680         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
681 }
682
683 static void hci_auth_req(struct hci_request *req, unsigned long opt)
684 {
685         __u8 auth = opt;
686
687         BT_DBG("%s %x", req->hdev->name, auth);
688
689         /* Authentication */
690         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
691 }
692
693 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
694 {
695         __u8 encrypt = opt;
696
697         BT_DBG("%s %x", req->hdev->name, encrypt);
698
699         /* Encryption */
700         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
701 }
702
703 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
704 {
705         __le16 policy = cpu_to_le16(opt);
706
707         BT_DBG("%s %x", req->hdev->name, policy);
708
709         /* Default link policy */
710         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
711 }
712
713 /* Get HCI device by index.
714  * Device is held on return. */
715 struct hci_dev *hci_dev_get(int index)
716 {
717         struct hci_dev *hdev = NULL, *d;
718
719         BT_DBG("%d", index);
720
721         if (index < 0)
722                 return NULL;
723
724         read_lock(&hci_dev_list_lock);
725         list_for_each_entry(d, &hci_dev_list, list) {
726                 if (d->id == index) {
727                         hdev = hci_dev_hold(d);
728                         break;
729                 }
730         }
731         read_unlock(&hci_dev_list_lock);
732         return hdev;
733 }
734
735 /* ---- Inquiry support ---- */
736
737 bool hci_discovery_active(struct hci_dev *hdev)
738 {
739         struct discovery_state *discov = &hdev->discovery;
740
741         switch (discov->state) {
742         case DISCOVERY_FINDING:
743         case DISCOVERY_RESOLVING:
744                 return true;
745
746         default:
747                 return false;
748         }
749 }
750
751 void hci_discovery_set_state(struct hci_dev *hdev, int state)
752 {
753         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755         if (hdev->discovery.state == state)
756                 return;
757
758         switch (state) {
759         case DISCOVERY_STOPPED:
760                 if (hdev->discovery.state != DISCOVERY_STARTING)
761                         mgmt_discovering(hdev, 0);
762                 break;
763         case DISCOVERY_STARTING:
764                 break;
765         case DISCOVERY_FINDING:
766                 mgmt_discovering(hdev, 1);
767                 break;
768         case DISCOVERY_RESOLVING:
769                 break;
770         case DISCOVERY_STOPPING:
771                 break;
772         }
773
774         hdev->discovery.state = state;
775 }
776
777 void hci_inquiry_cache_flush(struct hci_dev *hdev)
778 {
779         struct discovery_state *cache = &hdev->discovery;
780         struct inquiry_entry *p, *n;
781
782         list_for_each_entry_safe(p, n, &cache->all, all) {
783                 list_del(&p->all);
784                 kfree(p);
785         }
786
787         INIT_LIST_HEAD(&cache->unknown);
788         INIT_LIST_HEAD(&cache->resolve);
789 }
790
791 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792                                                bdaddr_t *bdaddr)
793 {
794         struct discovery_state *cache = &hdev->discovery;
795         struct inquiry_entry *e;
796
797         BT_DBG("cache %p, %pMR", cache, bdaddr);
798
799         list_for_each_entry(e, &cache->all, all) {
800                 if (!bacmp(&e->data.bdaddr, bdaddr))
801                         return e;
802         }
803
804         return NULL;
805 }
806
807 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
808                                                        bdaddr_t *bdaddr)
809 {
810         struct discovery_state *cache = &hdev->discovery;
811         struct inquiry_entry *e;
812
813         BT_DBG("cache %p, %pMR", cache, bdaddr);
814
815         list_for_each_entry(e, &cache->unknown, list) {
816                 if (!bacmp(&e->data.bdaddr, bdaddr))
817                         return e;
818         }
819
820         return NULL;
821 }
822
823 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
824                                                        bdaddr_t *bdaddr,
825                                                        int state)
826 {
827         struct discovery_state *cache = &hdev->discovery;
828         struct inquiry_entry *e;
829
830         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
831
832         list_for_each_entry(e, &cache->resolve, list) {
833                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834                         return e;
835                 if (!bacmp(&e->data.bdaddr, bdaddr))
836                         return e;
837         }
838
839         return NULL;
840 }
841
842 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
843                                       struct inquiry_entry *ie)
844 {
845         struct discovery_state *cache = &hdev->discovery;
846         struct list_head *pos = &cache->resolve;
847         struct inquiry_entry *p;
848
849         list_del(&ie->list);
850
851         list_for_each_entry(p, &cache->resolve, list) {
852                 if (p->name_state != NAME_PENDING &&
853                     abs(p->data.rssi) >= abs(ie->data.rssi))
854                         break;
855                 pos = &p->list;
856         }
857
858         list_add(&ie->list, pos);
859 }
860
861 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
862                               bool name_known, bool *ssp)
863 {
864         struct discovery_state *cache = &hdev->discovery;
865         struct inquiry_entry *ie;
866
867         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
868
869         hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
871         if (ssp)
872                 *ssp = data->ssp_mode;
873
874         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
875         if (ie) {
876                 if (ie->data.ssp_mode && ssp)
877                         *ssp = true;
878
879                 if (ie->name_state == NAME_NEEDED &&
880                     data->rssi != ie->data.rssi) {
881                         ie->data.rssi = data->rssi;
882                         hci_inquiry_cache_update_resolve(hdev, ie);
883                 }
884
885                 goto update;
886         }
887
888         /* Entry not in the cache. Add new one. */
889         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890         if (!ie)
891                 return false;
892
893         list_add(&ie->all, &cache->all);
894
895         if (name_known) {
896                 ie->name_state = NAME_KNOWN;
897         } else {
898                 ie->name_state = NAME_NOT_KNOWN;
899                 list_add(&ie->list, &cache->unknown);
900         }
901
902 update:
903         if (name_known && ie->name_state != NAME_KNOWN &&
904             ie->name_state != NAME_PENDING) {
905                 ie->name_state = NAME_KNOWN;
906                 list_del(&ie->list);
907         }
908
909         memcpy(&ie->data, data, sizeof(*data));
910         ie->timestamp = jiffies;
911         cache->timestamp = jiffies;
912
913         if (ie->name_state == NAME_NOT_KNOWN)
914                 return false;
915
916         return true;
917 }
918
919 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920 {
921         struct discovery_state *cache = &hdev->discovery;
922         struct inquiry_info *info = (struct inquiry_info *) buf;
923         struct inquiry_entry *e;
924         int copied = 0;
925
926         list_for_each_entry(e, &cache->all, all) {
927                 struct inquiry_data *data = &e->data;
928
929                 if (copied >= num)
930                         break;
931
932                 bacpy(&info->bdaddr, &data->bdaddr);
933                 info->pscan_rep_mode    = data->pscan_rep_mode;
934                 info->pscan_period_mode = data->pscan_period_mode;
935                 info->pscan_mode        = data->pscan_mode;
936                 memcpy(info->dev_class, data->dev_class, 3);
937                 info->clock_offset      = data->clock_offset;
938
939                 info++;
940                 copied++;
941         }
942
943         BT_DBG("cache %p, copied %d", cache, copied);
944         return copied;
945 }
946
947 static void hci_inq_req(struct hci_request *req, unsigned long opt)
948 {
949         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
950         struct hci_dev *hdev = req->hdev;
951         struct hci_cp_inquiry cp;
952
953         BT_DBG("%s", hdev->name);
954
955         if (test_bit(HCI_INQUIRY, &hdev->flags))
956                 return;
957
958         /* Start Inquiry */
959         memcpy(&cp.lap, &ir->lap, 3);
960         cp.length  = ir->length;
961         cp.num_rsp = ir->num_rsp;
962         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
963 }
964
965 static int wait_inquiry(void *word)
966 {
967         schedule();
968         return signal_pending(current);
969 }
970
971 int hci_inquiry(void __user *arg)
972 {
973         __u8 __user *ptr = arg;
974         struct hci_inquiry_req ir;
975         struct hci_dev *hdev;
976         int err = 0, do_inquiry = 0, max_rsp;
977         long timeo;
978         __u8 *buf;
979
980         if (copy_from_user(&ir, ptr, sizeof(ir)))
981                 return -EFAULT;
982
983         hdev = hci_dev_get(ir.dev_id);
984         if (!hdev)
985                 return -ENODEV;
986
987         hci_dev_lock(hdev);
988         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
989             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
990                 hci_inquiry_cache_flush(hdev);
991                 do_inquiry = 1;
992         }
993         hci_dev_unlock(hdev);
994
995         timeo = ir.length * msecs_to_jiffies(2000);
996
997         if (do_inquiry) {
998                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999                                    timeo);
1000                 if (err < 0)
1001                         goto done;
1002
1003                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004                  * cleared). If it is interrupted by a signal, return -EINTR.
1005                  */
1006                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007                                 TASK_INTERRUPTIBLE))
1008                         return -EINTR;
1009         }
1010
1011         /* for unlimited number of responses we will use buffer with
1012          * 255 entries
1013          */
1014         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1015
1016         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017          * copy it to the user space.
1018          */
1019         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1020         if (!buf) {
1021                 err = -ENOMEM;
1022                 goto done;
1023         }
1024
1025         hci_dev_lock(hdev);
1026         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1027         hci_dev_unlock(hdev);
1028
1029         BT_DBG("num_rsp %d", ir.num_rsp);
1030
1031         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032                 ptr += sizeof(ir);
1033                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1034                                  ir.num_rsp))
1035                         err = -EFAULT;
1036         } else
1037                 err = -EFAULT;
1038
1039         kfree(buf);
1040
1041 done:
1042         hci_dev_put(hdev);
1043         return err;
1044 }
1045
1046 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1047 {
1048         u8 ad_len = 0, flags = 0;
1049         size_t name_len;
1050
1051         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052                 flags |= LE_AD_GENERAL;
1053
1054         if (!lmp_bredr_capable(hdev))
1055                 flags |= LE_AD_NO_BREDR;
1056
1057         if (lmp_le_br_capable(hdev))
1058                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1059
1060         if (lmp_host_le_br_capable(hdev))
1061                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1062
1063         if (flags) {
1064                 BT_DBG("adv flags 0x%02x", flags);
1065
1066                 ptr[0] = 2;
1067                 ptr[1] = EIR_FLAGS;
1068                 ptr[2] = flags;
1069
1070                 ad_len += 3;
1071                 ptr += 3;
1072         }
1073
1074         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075                 ptr[0] = 2;
1076                 ptr[1] = EIR_TX_POWER;
1077                 ptr[2] = (u8) hdev->adv_tx_power;
1078
1079                 ad_len += 3;
1080                 ptr += 3;
1081         }
1082
1083         name_len = strlen(hdev->dev_name);
1084         if (name_len > 0) {
1085                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1086
1087                 if (name_len > max_len) {
1088                         name_len = max_len;
1089                         ptr[1] = EIR_NAME_SHORT;
1090                 } else
1091                         ptr[1] = EIR_NAME_COMPLETE;
1092
1093                 ptr[0] = name_len + 1;
1094
1095                 memcpy(ptr + 2, hdev->dev_name, name_len);
1096
1097                 ad_len += (name_len + 2);
1098                 ptr += (name_len + 2);
1099         }
1100
1101         return ad_len;
1102 }
1103
1104 void hci_update_ad(struct hci_request *req)
1105 {
1106         struct hci_dev *hdev = req->hdev;
1107         struct hci_cp_le_set_adv_data cp;
1108         u8 len;
1109
1110         if (!lmp_le_capable(hdev))
1111                 return;
1112
1113         memset(&cp, 0, sizeof(cp));
1114
1115         len = create_ad(hdev, cp.data);
1116
1117         if (hdev->adv_data_len == len &&
1118             memcmp(cp.data, hdev->adv_data, len) == 0)
1119                 return;
1120
1121         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122         hdev->adv_data_len = len;
1123
1124         cp.length = len;
1125
1126         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1127 }
1128
1129 /* ---- HCI ioctl helpers ---- */
1130
1131 int hci_dev_open(__u16 dev)
1132 {
1133         struct hci_dev *hdev;
1134         int ret = 0;
1135
1136         hdev = hci_dev_get(dev);
1137         if (!hdev)
1138                 return -ENODEV;
1139
1140         BT_DBG("%s %p", hdev->name, hdev);
1141
1142         hci_req_lock(hdev);
1143
1144         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145                 ret = -ENODEV;
1146                 goto done;
1147         }
1148
1149         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1150                 ret = -ERFKILL;
1151                 goto done;
1152         }
1153
1154         if (test_bit(HCI_UP, &hdev->flags)) {
1155                 ret = -EALREADY;
1156                 goto done;
1157         }
1158
1159         if (hdev->open(hdev)) {
1160                 ret = -EIO;
1161                 goto done;
1162         }
1163
1164         atomic_set(&hdev->cmd_cnt, 1);
1165         set_bit(HCI_INIT, &hdev->flags);
1166
1167         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1168                 ret = hdev->setup(hdev);
1169
1170         if (!ret) {
1171                 /* Treat all non BR/EDR controllers as raw devices if
1172                  * enable_hs is not set.
1173                  */
1174                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1175                         set_bit(HCI_RAW, &hdev->flags);
1176
1177                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1178                         set_bit(HCI_RAW, &hdev->flags);
1179
1180                 if (!test_bit(HCI_RAW, &hdev->flags))
1181                         ret = __hci_init(hdev);
1182         }
1183
1184         clear_bit(HCI_INIT, &hdev->flags);
1185
1186         if (!ret) {
1187                 hci_dev_hold(hdev);
1188                 set_bit(HCI_UP, &hdev->flags);
1189                 hci_notify(hdev, HCI_DEV_UP);
1190                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1191                     mgmt_valid_hdev(hdev)) {
1192                         hci_dev_lock(hdev);
1193                         mgmt_powered(hdev, 1);
1194                         hci_dev_unlock(hdev);
1195                 }
1196         } else {
1197                 /* Init failed, cleanup */
1198                 flush_work(&hdev->tx_work);
1199                 flush_work(&hdev->cmd_work);
1200                 flush_work(&hdev->rx_work);
1201
1202                 skb_queue_purge(&hdev->cmd_q);
1203                 skb_queue_purge(&hdev->rx_q);
1204
1205                 if (hdev->flush)
1206                         hdev->flush(hdev);
1207
1208                 if (hdev->sent_cmd) {
1209                         kfree_skb(hdev->sent_cmd);
1210                         hdev->sent_cmd = NULL;
1211                 }
1212
1213                 hdev->close(hdev);
1214                 hdev->flags = 0;
1215         }
1216
1217 done:
1218         hci_req_unlock(hdev);
1219         hci_dev_put(hdev);
1220         return ret;
1221 }
1222
1223 static int hci_dev_do_close(struct hci_dev *hdev)
1224 {
1225         BT_DBG("%s %p", hdev->name, hdev);
1226
1227         cancel_delayed_work(&hdev->power_off);
1228
1229         hci_req_cancel(hdev, ENODEV);
1230         hci_req_lock(hdev);
1231
1232         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1233                 del_timer_sync(&hdev->cmd_timer);
1234                 hci_req_unlock(hdev);
1235                 return 0;
1236         }
1237
1238         /* Flush RX and TX works */
1239         flush_work(&hdev->tx_work);
1240         flush_work(&hdev->rx_work);
1241
1242         if (hdev->discov_timeout > 0) {
1243                 cancel_delayed_work(&hdev->discov_off);
1244                 hdev->discov_timeout = 0;
1245                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1246         }
1247
1248         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1249                 cancel_delayed_work(&hdev->service_cache);
1250
1251         cancel_delayed_work_sync(&hdev->le_scan_disable);
1252
1253         hci_dev_lock(hdev);
1254         hci_inquiry_cache_flush(hdev);
1255         hci_conn_hash_flush(hdev);
1256         hci_dev_unlock(hdev);
1257
1258         hci_notify(hdev, HCI_DEV_DOWN);
1259
1260         if (hdev->flush)
1261                 hdev->flush(hdev);
1262
1263         /* Reset device */
1264         skb_queue_purge(&hdev->cmd_q);
1265         atomic_set(&hdev->cmd_cnt, 1);
1266         if (!test_bit(HCI_RAW, &hdev->flags) &&
1267             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1268                 set_bit(HCI_INIT, &hdev->flags);
1269                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1270                 clear_bit(HCI_INIT, &hdev->flags);
1271         }
1272
1273         /* flush cmd  work */
1274         flush_work(&hdev->cmd_work);
1275
1276         /* Drop queues */
1277         skb_queue_purge(&hdev->rx_q);
1278         skb_queue_purge(&hdev->cmd_q);
1279         skb_queue_purge(&hdev->raw_q);
1280
1281         /* Drop last sent command */
1282         if (hdev->sent_cmd) {
1283                 del_timer_sync(&hdev->cmd_timer);
1284                 kfree_skb(hdev->sent_cmd);
1285                 hdev->sent_cmd = NULL;
1286         }
1287
1288         kfree_skb(hdev->recv_evt);
1289         hdev->recv_evt = NULL;
1290
1291         /* After this point our queues are empty
1292          * and no tasks are scheduled. */
1293         hdev->close(hdev);
1294
1295         /* Clear flags */
1296         hdev->flags = 0;
1297         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1298
1299         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1300             mgmt_valid_hdev(hdev)) {
1301                 hci_dev_lock(hdev);
1302                 mgmt_powered(hdev, 0);
1303                 hci_dev_unlock(hdev);
1304         }
1305
1306         /* Controller radio is available but is currently powered down */
1307         hdev->amp_status = 0;
1308
1309         memset(hdev->eir, 0, sizeof(hdev->eir));
1310         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1311
1312         hci_req_unlock(hdev);
1313
1314         hci_dev_put(hdev);
1315         return 0;
1316 }
1317
1318 int hci_dev_close(__u16 dev)
1319 {
1320         struct hci_dev *hdev;
1321         int err;
1322
1323         hdev = hci_dev_get(dev);
1324         if (!hdev)
1325                 return -ENODEV;
1326
1327         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328                 cancel_delayed_work(&hdev->power_off);
1329
1330         err = hci_dev_do_close(hdev);
1331
1332         hci_dev_put(hdev);
1333         return err;
1334 }
1335
1336 int hci_dev_reset(__u16 dev)
1337 {
1338         struct hci_dev *hdev;
1339         int ret = 0;
1340
1341         hdev = hci_dev_get(dev);
1342         if (!hdev)
1343                 return -ENODEV;
1344
1345         hci_req_lock(hdev);
1346
1347         if (!test_bit(HCI_UP, &hdev->flags))
1348                 goto done;
1349
1350         /* Drop queues */
1351         skb_queue_purge(&hdev->rx_q);
1352         skb_queue_purge(&hdev->cmd_q);
1353
1354         hci_dev_lock(hdev);
1355         hci_inquiry_cache_flush(hdev);
1356         hci_conn_hash_flush(hdev);
1357         hci_dev_unlock(hdev);
1358
1359         if (hdev->flush)
1360                 hdev->flush(hdev);
1361
1362         atomic_set(&hdev->cmd_cnt, 1);
1363         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1364
1365         if (!test_bit(HCI_RAW, &hdev->flags))
1366                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1367
1368 done:
1369         hci_req_unlock(hdev);
1370         hci_dev_put(hdev);
1371         return ret;
1372 }
1373
1374 int hci_dev_reset_stat(__u16 dev)
1375 {
1376         struct hci_dev *hdev;
1377         int ret = 0;
1378
1379         hdev = hci_dev_get(dev);
1380         if (!hdev)
1381                 return -ENODEV;
1382
1383         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1384
1385         hci_dev_put(hdev);
1386
1387         return ret;
1388 }
1389
1390 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1391 {
1392         struct hci_dev *hdev;
1393         struct hci_dev_req dr;
1394         int err = 0;
1395
1396         if (copy_from_user(&dr, arg, sizeof(dr)))
1397                 return -EFAULT;
1398
1399         hdev = hci_dev_get(dr.dev_id);
1400         if (!hdev)
1401                 return -ENODEV;
1402
1403         switch (cmd) {
1404         case HCISETAUTH:
1405                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406                                    HCI_INIT_TIMEOUT);
1407                 break;
1408
1409         case HCISETENCRYPT:
1410                 if (!lmp_encrypt_capable(hdev)) {
1411                         err = -EOPNOTSUPP;
1412                         break;
1413                 }
1414
1415                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1416                         /* Auth must be enabled first */
1417                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1418                                            HCI_INIT_TIMEOUT);
1419                         if (err)
1420                                 break;
1421                 }
1422
1423                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1424                                    HCI_INIT_TIMEOUT);
1425                 break;
1426
1427         case HCISETSCAN:
1428                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1429                                    HCI_INIT_TIMEOUT);
1430                 break;
1431
1432         case HCISETLINKPOL:
1433                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1434                                    HCI_INIT_TIMEOUT);
1435                 break;
1436
1437         case HCISETLINKMODE:
1438                 hdev->link_mode = ((__u16) dr.dev_opt) &
1439                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1440                 break;
1441
1442         case HCISETPTYPE:
1443                 hdev->pkt_type = (__u16) dr.dev_opt;
1444                 break;
1445
1446         case HCISETACLMTU:
1447                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1448                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1449                 break;
1450
1451         case HCISETSCOMTU:
1452                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1453                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1454                 break;
1455
1456         default:
1457                 err = -EINVAL;
1458                 break;
1459         }
1460
1461         hci_dev_put(hdev);
1462         return err;
1463 }
1464
1465 int hci_get_dev_list(void __user *arg)
1466 {
1467         struct hci_dev *hdev;
1468         struct hci_dev_list_req *dl;
1469         struct hci_dev_req *dr;
1470         int n = 0, size, err;
1471         __u16 dev_num;
1472
1473         if (get_user(dev_num, (__u16 __user *) arg))
1474                 return -EFAULT;
1475
1476         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1477                 return -EINVAL;
1478
1479         size = sizeof(*dl) + dev_num * sizeof(*dr);
1480
1481         dl = kzalloc(size, GFP_KERNEL);
1482         if (!dl)
1483                 return -ENOMEM;
1484
1485         dr = dl->dev_req;
1486
1487         read_lock(&hci_dev_list_lock);
1488         list_for_each_entry(hdev, &hci_dev_list, list) {
1489                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1490                         cancel_delayed_work(&hdev->power_off);
1491
1492                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1493                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1494
1495                 (dr + n)->dev_id  = hdev->id;
1496                 (dr + n)->dev_opt = hdev->flags;
1497
1498                 if (++n >= dev_num)
1499                         break;
1500         }
1501         read_unlock(&hci_dev_list_lock);
1502
1503         dl->dev_num = n;
1504         size = sizeof(*dl) + n * sizeof(*dr);
1505
1506         err = copy_to_user(arg, dl, size);
1507         kfree(dl);
1508
1509         return err ? -EFAULT : 0;
1510 }
1511
1512 int hci_get_dev_info(void __user *arg)
1513 {
1514         struct hci_dev *hdev;
1515         struct hci_dev_info di;
1516         int err = 0;
1517
1518         if (copy_from_user(&di, arg, sizeof(di)))
1519                 return -EFAULT;
1520
1521         hdev = hci_dev_get(di.dev_id);
1522         if (!hdev)
1523                 return -ENODEV;
1524
1525         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1526                 cancel_delayed_work_sync(&hdev->power_off);
1527
1528         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1529                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1530
1531         strcpy(di.name, hdev->name);
1532         di.bdaddr   = hdev->bdaddr;
1533         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1534         di.flags    = hdev->flags;
1535         di.pkt_type = hdev->pkt_type;
1536         if (lmp_bredr_capable(hdev)) {
1537                 di.acl_mtu  = hdev->acl_mtu;
1538                 di.acl_pkts = hdev->acl_pkts;
1539                 di.sco_mtu  = hdev->sco_mtu;
1540                 di.sco_pkts = hdev->sco_pkts;
1541         } else {
1542                 di.acl_mtu  = hdev->le_mtu;
1543                 di.acl_pkts = hdev->le_pkts;
1544                 di.sco_mtu  = 0;
1545                 di.sco_pkts = 0;
1546         }
1547         di.link_policy = hdev->link_policy;
1548         di.link_mode   = hdev->link_mode;
1549
1550         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1551         memcpy(&di.features, &hdev->features, sizeof(di.features));
1552
1553         if (copy_to_user(arg, &di, sizeof(di)))
1554                 err = -EFAULT;
1555
1556         hci_dev_put(hdev);
1557
1558         return err;
1559 }
1560
1561 /* ---- Interface to HCI drivers ---- */
1562
1563 static int hci_rfkill_set_block(void *data, bool blocked)
1564 {
1565         struct hci_dev *hdev = data;
1566
1567         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1568
1569         if (!blocked)
1570                 return 0;
1571
1572         hci_dev_do_close(hdev);
1573
1574         return 0;
1575 }
1576
1577 static const struct rfkill_ops hci_rfkill_ops = {
1578         .set_block = hci_rfkill_set_block,
1579 };
1580
1581 static void hci_power_on(struct work_struct *work)
1582 {
1583         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1584         int err;
1585
1586         BT_DBG("%s", hdev->name);
1587
1588         err = hci_dev_open(hdev->id);
1589         if (err < 0) {
1590                 mgmt_set_powered_failed(hdev, err);
1591                 return;
1592         }
1593
1594         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1595                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596                                    HCI_AUTO_OFF_TIMEOUT);
1597
1598         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1599                 mgmt_index_added(hdev);
1600 }
1601
1602 static void hci_power_off(struct work_struct *work)
1603 {
1604         struct hci_dev *hdev = container_of(work, struct hci_dev,
1605                                             power_off.work);
1606
1607         BT_DBG("%s", hdev->name);
1608
1609         hci_dev_do_close(hdev);
1610 }
1611
1612 static void hci_discov_off(struct work_struct *work)
1613 {
1614         struct hci_dev *hdev;
1615         u8 scan = SCAN_PAGE;
1616
1617         hdev = container_of(work, struct hci_dev, discov_off.work);
1618
1619         BT_DBG("%s", hdev->name);
1620
1621         hci_dev_lock(hdev);
1622
1623         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1624
1625         hdev->discov_timeout = 0;
1626
1627         hci_dev_unlock(hdev);
1628 }
1629
1630 int hci_uuids_clear(struct hci_dev *hdev)
1631 {
1632         struct bt_uuid *uuid, *tmp;
1633
1634         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1635                 list_del(&uuid->list);
1636                 kfree(uuid);
1637         }
1638
1639         return 0;
1640 }
1641
1642 int hci_link_keys_clear(struct hci_dev *hdev)
1643 {
1644         struct list_head *p, *n;
1645
1646         list_for_each_safe(p, n, &hdev->link_keys) {
1647                 struct link_key *key;
1648
1649                 key = list_entry(p, struct link_key, list);
1650
1651                 list_del(p);
1652                 kfree(key);
1653         }
1654
1655         return 0;
1656 }
1657
1658 int hci_smp_ltks_clear(struct hci_dev *hdev)
1659 {
1660         struct smp_ltk *k, *tmp;
1661
1662         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1663                 list_del(&k->list);
1664                 kfree(k);
1665         }
1666
1667         return 0;
1668 }
1669
1670 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1671 {
1672         struct link_key *k;
1673
1674         list_for_each_entry(k, &hdev->link_keys, list)
1675                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1676                         return k;
1677
1678         return NULL;
1679 }
1680
1681 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1682                                u8 key_type, u8 old_key_type)
1683 {
1684         /* Legacy key */
1685         if (key_type < 0x03)
1686                 return true;
1687
1688         /* Debug keys are insecure so don't store them persistently */
1689         if (key_type == HCI_LK_DEBUG_COMBINATION)
1690                 return false;
1691
1692         /* Changed combination key and there's no previous one */
1693         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1694                 return false;
1695
1696         /* Security mode 3 case */
1697         if (!conn)
1698                 return true;
1699
1700         /* Neither local nor remote side had no-bonding as requirement */
1701         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1702                 return true;
1703
1704         /* Local side had dedicated bonding as requirement */
1705         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1706                 return true;
1707
1708         /* Remote side had dedicated bonding as requirement */
1709         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1710                 return true;
1711
1712         /* If none of the above criteria match, then don't store the key
1713          * persistently */
1714         return false;
1715 }
1716
1717 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1718 {
1719         struct smp_ltk *k;
1720
1721         list_for_each_entry(k, &hdev->long_term_keys, list) {
1722                 if (k->ediv != ediv ||
1723                     memcmp(rand, k->rand, sizeof(k->rand)))
1724                         continue;
1725
1726                 return k;
1727         }
1728
1729         return NULL;
1730 }
1731
1732 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1733                                      u8 addr_type)
1734 {
1735         struct smp_ltk *k;
1736
1737         list_for_each_entry(k, &hdev->long_term_keys, list)
1738                 if (addr_type == k->bdaddr_type &&
1739                     bacmp(bdaddr, &k->bdaddr) == 0)
1740                         return k;
1741
1742         return NULL;
1743 }
1744
1745 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1746                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1747 {
1748         struct link_key *key, *old_key;
1749         u8 old_key_type;
1750         bool persistent;
1751
1752         old_key = hci_find_link_key(hdev, bdaddr);
1753         if (old_key) {
1754                 old_key_type = old_key->type;
1755                 key = old_key;
1756         } else {
1757                 old_key_type = conn ? conn->key_type : 0xff;
1758                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1759                 if (!key)
1760                         return -ENOMEM;
1761                 list_add(&key->list, &hdev->link_keys);
1762         }
1763
1764         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1765
1766         /* Some buggy controller combinations generate a changed
1767          * combination key for legacy pairing even when there's no
1768          * previous key */
1769         if (type == HCI_LK_CHANGED_COMBINATION &&
1770             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1771                 type = HCI_LK_COMBINATION;
1772                 if (conn)
1773                         conn->key_type = type;
1774         }
1775
1776         bacpy(&key->bdaddr, bdaddr);
1777         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1778         key->pin_len = pin_len;
1779
1780         if (type == HCI_LK_CHANGED_COMBINATION)
1781                 key->type = old_key_type;
1782         else
1783                 key->type = type;
1784
1785         if (!new_key)
1786                 return 0;
1787
1788         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1789
1790         mgmt_new_link_key(hdev, key, persistent);
1791
1792         if (conn)
1793                 conn->flush_key = !persistent;
1794
1795         return 0;
1796 }
1797
1798 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1799                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1800                 ediv, u8 rand[8])
1801 {
1802         struct smp_ltk *key, *old_key;
1803
1804         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1805                 return 0;
1806
1807         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1808         if (old_key)
1809                 key = old_key;
1810         else {
1811                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1812                 if (!key)
1813                         return -ENOMEM;
1814                 list_add(&key->list, &hdev->long_term_keys);
1815         }
1816
1817         bacpy(&key->bdaddr, bdaddr);
1818         key->bdaddr_type = addr_type;
1819         memcpy(key->val, tk, sizeof(key->val));
1820         key->authenticated = authenticated;
1821         key->ediv = ediv;
1822         key->enc_size = enc_size;
1823         key->type = type;
1824         memcpy(key->rand, rand, sizeof(key->rand));
1825
1826         if (!new_key)
1827                 return 0;
1828
1829         if (type & HCI_SMP_LTK)
1830                 mgmt_new_ltk(hdev, key, 1);
1831
1832         return 0;
1833 }
1834
1835 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1836 {
1837         struct link_key *key;
1838
1839         key = hci_find_link_key(hdev, bdaddr);
1840         if (!key)
1841                 return -ENOENT;
1842
1843         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1844
1845         list_del(&key->list);
1846         kfree(key);
1847
1848         return 0;
1849 }
1850
1851 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1852 {
1853         struct smp_ltk *k, *tmp;
1854
1855         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1856                 if (bacmp(bdaddr, &k->bdaddr))
1857                         continue;
1858
1859                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1860
1861                 list_del(&k->list);
1862                 kfree(k);
1863         }
1864
1865         return 0;
1866 }
1867
1868 /* HCI command timer function */
1869 static void hci_cmd_timeout(unsigned long arg)
1870 {
1871         struct hci_dev *hdev = (void *) arg;
1872
1873         if (hdev->sent_cmd) {
1874                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1875                 u16 opcode = __le16_to_cpu(sent->opcode);
1876
1877                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1878         } else {
1879                 BT_ERR("%s command tx timeout", hdev->name);
1880         }
1881
1882         atomic_set(&hdev->cmd_cnt, 1);
1883         queue_work(hdev->workqueue, &hdev->cmd_work);
1884 }
1885
1886 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1887                                           bdaddr_t *bdaddr)
1888 {
1889         struct oob_data *data;
1890
1891         list_for_each_entry(data, &hdev->remote_oob_data, list)
1892                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1893                         return data;
1894
1895         return NULL;
1896 }
1897
1898 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1899 {
1900         struct oob_data *data;
1901
1902         data = hci_find_remote_oob_data(hdev, bdaddr);
1903         if (!data)
1904                 return -ENOENT;
1905
1906         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1907
1908         list_del(&data->list);
1909         kfree(data);
1910
1911         return 0;
1912 }
1913
1914 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1915 {
1916         struct oob_data *data, *n;
1917
1918         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1919                 list_del(&data->list);
1920                 kfree(data);
1921         }
1922
1923         return 0;
1924 }
1925
1926 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1927                             u8 *randomizer)
1928 {
1929         struct oob_data *data;
1930
1931         data = hci_find_remote_oob_data(hdev, bdaddr);
1932
1933         if (!data) {
1934                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1935                 if (!data)
1936                         return -ENOMEM;
1937
1938                 bacpy(&data->bdaddr, bdaddr);
1939                 list_add(&data->list, &hdev->remote_oob_data);
1940         }
1941
1942         memcpy(data->hash, hash, sizeof(data->hash));
1943         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1944
1945         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1946
1947         return 0;
1948 }
1949
1950 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1951 {
1952         struct bdaddr_list *b;
1953
1954         list_for_each_entry(b, &hdev->blacklist, list)
1955                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1956                         return b;
1957
1958         return NULL;
1959 }
1960
1961 int hci_blacklist_clear(struct hci_dev *hdev)
1962 {
1963         struct list_head *p, *n;
1964
1965         list_for_each_safe(p, n, &hdev->blacklist) {
1966                 struct bdaddr_list *b;
1967
1968                 b = list_entry(p, struct bdaddr_list, list);
1969
1970                 list_del(p);
1971                 kfree(b);
1972         }
1973
1974         return 0;
1975 }
1976
1977 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1978 {
1979         struct bdaddr_list *entry;
1980
1981         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1982                 return -EBADF;
1983
1984         if (hci_blacklist_lookup(hdev, bdaddr))
1985                 return -EEXIST;
1986
1987         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1988         if (!entry)
1989                 return -ENOMEM;
1990
1991         bacpy(&entry->bdaddr, bdaddr);
1992
1993         list_add(&entry->list, &hdev->blacklist);
1994
1995         return mgmt_device_blocked(hdev, bdaddr, type);
1996 }
1997
1998 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1999 {
2000         struct bdaddr_list *entry;
2001
2002         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2003                 return hci_blacklist_clear(hdev);
2004
2005         entry = hci_blacklist_lookup(hdev, bdaddr);
2006         if (!entry)
2007                 return -ENOENT;
2008
2009         list_del(&entry->list);
2010         kfree(entry);
2011
2012         return mgmt_device_unblocked(hdev, bdaddr, type);
2013 }
2014
2015 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2016 {
2017         if (status) {
2018                 BT_ERR("Failed to start inquiry: status %d", status);
2019
2020                 hci_dev_lock(hdev);
2021                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2022                 hci_dev_unlock(hdev);
2023                 return;
2024         }
2025 }
2026
2027 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2028 {
2029         /* General inquiry access code (GIAC) */
2030         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2031         struct hci_request req;
2032         struct hci_cp_inquiry cp;
2033         int err;
2034
2035         if (status) {
2036                 BT_ERR("Failed to disable LE scanning: status %d", status);
2037                 return;
2038         }
2039
2040         switch (hdev->discovery.type) {
2041         case DISCOV_TYPE_LE:
2042                 hci_dev_lock(hdev);
2043                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2044                 hci_dev_unlock(hdev);
2045                 break;
2046
2047         case DISCOV_TYPE_INTERLEAVED:
2048                 hci_req_init(&req, hdev);
2049
2050                 memset(&cp, 0, sizeof(cp));
2051                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2052                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2053                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2054
2055                 hci_dev_lock(hdev);
2056
2057                 hci_inquiry_cache_flush(hdev);
2058
2059                 err = hci_req_run(&req, inquiry_complete);
2060                 if (err) {
2061                         BT_ERR("Inquiry request failed: err %d", err);
2062                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2063                 }
2064
2065                 hci_dev_unlock(hdev);
2066                 break;
2067         }
2068 }
2069
2070 static void le_scan_disable_work(struct work_struct *work)
2071 {
2072         struct hci_dev *hdev = container_of(work, struct hci_dev,
2073                                             le_scan_disable.work);
2074         struct hci_cp_le_set_scan_enable cp;
2075         struct hci_request req;
2076         int err;
2077
2078         BT_DBG("%s", hdev->name);
2079
2080         hci_req_init(&req, hdev);
2081
2082         memset(&cp, 0, sizeof(cp));
2083         cp.enable = LE_SCAN_DISABLE;
2084         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2085
2086         err = hci_req_run(&req, le_scan_disable_work_complete);
2087         if (err)
2088                 BT_ERR("Disable LE scanning request failed: err %d", err);
2089 }
2090
2091 /* Alloc HCI device */
2092 struct hci_dev *hci_alloc_dev(void)
2093 {
2094         struct hci_dev *hdev;
2095
2096         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2097         if (!hdev)
2098                 return NULL;
2099
2100         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2101         hdev->esco_type = (ESCO_HV1);
2102         hdev->link_mode = (HCI_LM_ACCEPT);
2103         hdev->io_capability = 0x03; /* No Input No Output */
2104         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2105         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2106
2107         hdev->sniff_max_interval = 800;
2108         hdev->sniff_min_interval = 80;
2109
2110         mutex_init(&hdev->lock);
2111         mutex_init(&hdev->req_lock);
2112
2113         INIT_LIST_HEAD(&hdev->mgmt_pending);
2114         INIT_LIST_HEAD(&hdev->blacklist);
2115         INIT_LIST_HEAD(&hdev->uuids);
2116         INIT_LIST_HEAD(&hdev->link_keys);
2117         INIT_LIST_HEAD(&hdev->long_term_keys);
2118         INIT_LIST_HEAD(&hdev->remote_oob_data);
2119         INIT_LIST_HEAD(&hdev->conn_hash.list);
2120
2121         INIT_WORK(&hdev->rx_work, hci_rx_work);
2122         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2123         INIT_WORK(&hdev->tx_work, hci_tx_work);
2124         INIT_WORK(&hdev->power_on, hci_power_on);
2125
2126         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2127         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2128         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2129
2130         skb_queue_head_init(&hdev->rx_q);
2131         skb_queue_head_init(&hdev->cmd_q);
2132         skb_queue_head_init(&hdev->raw_q);
2133
2134         init_waitqueue_head(&hdev->req_wait_q);
2135
2136         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2137
2138         hci_init_sysfs(hdev);
2139         discovery_init(hdev);
2140
2141         return hdev;
2142 }
2143 EXPORT_SYMBOL(hci_alloc_dev);
2144
2145 /* Free HCI device */
2146 void hci_free_dev(struct hci_dev *hdev)
2147 {
2148         /* will free via device release */
2149         put_device(&hdev->dev);
2150 }
2151 EXPORT_SYMBOL(hci_free_dev);
2152
2153 /* Register HCI device */
2154 int hci_register_dev(struct hci_dev *hdev)
2155 {
2156         int id, error;
2157
2158         if (!hdev->open || !hdev->close)
2159                 return -EINVAL;
2160
2161         /* Do not allow HCI_AMP devices to register at index 0,
2162          * so the index can be used as the AMP controller ID.
2163          */
2164         switch (hdev->dev_type) {
2165         case HCI_BREDR:
2166                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2167                 break;
2168         case HCI_AMP:
2169                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2170                 break;
2171         default:
2172                 return -EINVAL;
2173         }
2174
2175         if (id < 0)
2176                 return id;
2177
2178         sprintf(hdev->name, "hci%d", id);
2179         hdev->id = id;
2180
2181         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2182
2183         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2184                                           WQ_MEM_RECLAIM, 1, hdev->name);
2185         if (!hdev->workqueue) {
2186                 error = -ENOMEM;
2187                 goto err;
2188         }
2189
2190         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2191                                               WQ_MEM_RECLAIM, 1, hdev->name);
2192         if (!hdev->req_workqueue) {
2193                 destroy_workqueue(hdev->workqueue);
2194                 error = -ENOMEM;
2195                 goto err;
2196         }
2197
2198         error = hci_add_sysfs(hdev);
2199         if (error < 0)
2200                 goto err_wqueue;
2201
2202         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2203                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2204                                     hdev);
2205         if (hdev->rfkill) {
2206                 if (rfkill_register(hdev->rfkill) < 0) {
2207                         rfkill_destroy(hdev->rfkill);
2208                         hdev->rfkill = NULL;
2209                 }
2210         }
2211
2212         set_bit(HCI_SETUP, &hdev->dev_flags);
2213
2214         if (hdev->dev_type != HCI_AMP)
2215                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2216
2217         write_lock(&hci_dev_list_lock);
2218         list_add(&hdev->list, &hci_dev_list);
2219         write_unlock(&hci_dev_list_lock);
2220
2221         hci_notify(hdev, HCI_DEV_REG);
2222         hci_dev_hold(hdev);
2223
2224         queue_work(hdev->req_workqueue, &hdev->power_on);
2225
2226         return id;
2227
2228 err_wqueue:
2229         destroy_workqueue(hdev->workqueue);
2230         destroy_workqueue(hdev->req_workqueue);
2231 err:
2232         ida_simple_remove(&hci_index_ida, hdev->id);
2233
2234         return error;
2235 }
2236 EXPORT_SYMBOL(hci_register_dev);
2237
2238 /* Unregister HCI device */
2239 void hci_unregister_dev(struct hci_dev *hdev)
2240 {
2241         int i, id;
2242
2243         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2244
2245         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2246
2247         id = hdev->id;
2248
2249         write_lock(&hci_dev_list_lock);
2250         list_del(&hdev->list);
2251         write_unlock(&hci_dev_list_lock);
2252
2253         hci_dev_do_close(hdev);
2254
2255         for (i = 0; i < NUM_REASSEMBLY; i++)
2256                 kfree_skb(hdev->reassembly[i]);
2257
2258         cancel_work_sync(&hdev->power_on);
2259
2260         if (!test_bit(HCI_INIT, &hdev->flags) &&
2261             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2262                 hci_dev_lock(hdev);
2263                 mgmt_index_removed(hdev);
2264                 hci_dev_unlock(hdev);
2265         }
2266
2267         /* mgmt_index_removed should take care of emptying the
2268          * pending list */
2269         BUG_ON(!list_empty(&hdev->mgmt_pending));
2270
2271         hci_notify(hdev, HCI_DEV_UNREG);
2272
2273         if (hdev->rfkill) {
2274                 rfkill_unregister(hdev->rfkill);
2275                 rfkill_destroy(hdev->rfkill);
2276         }
2277
2278         hci_del_sysfs(hdev);
2279
2280         destroy_workqueue(hdev->workqueue);
2281         destroy_workqueue(hdev->req_workqueue);
2282
2283         hci_dev_lock(hdev);
2284         hci_blacklist_clear(hdev);
2285         hci_uuids_clear(hdev);
2286         hci_link_keys_clear(hdev);
2287         hci_smp_ltks_clear(hdev);
2288         hci_remote_oob_data_clear(hdev);
2289         hci_dev_unlock(hdev);
2290
2291         hci_dev_put(hdev);
2292
2293         ida_simple_remove(&hci_index_ida, id);
2294 }
2295 EXPORT_SYMBOL(hci_unregister_dev);
2296
2297 /* Suspend HCI device */
2298 int hci_suspend_dev(struct hci_dev *hdev)
2299 {
2300         hci_notify(hdev, HCI_DEV_SUSPEND);
2301         return 0;
2302 }
2303 EXPORT_SYMBOL(hci_suspend_dev);
2304
2305 /* Resume HCI device */
2306 int hci_resume_dev(struct hci_dev *hdev)
2307 {
2308         hci_notify(hdev, HCI_DEV_RESUME);
2309         return 0;
2310 }
2311 EXPORT_SYMBOL(hci_resume_dev);
2312
2313 /* Receive frame from HCI drivers */
2314 int hci_recv_frame(struct sk_buff *skb)
2315 {
2316         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2317         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2318                       && !test_bit(HCI_INIT, &hdev->flags))) {
2319                 kfree_skb(skb);
2320                 return -ENXIO;
2321         }
2322
2323         /* Incoming skb */
2324         bt_cb(skb)->incoming = 1;
2325
2326         /* Time stamp */
2327         __net_timestamp(skb);
2328
2329         skb_queue_tail(&hdev->rx_q, skb);
2330         queue_work(hdev->workqueue, &hdev->rx_work);
2331
2332         return 0;
2333 }
2334 EXPORT_SYMBOL(hci_recv_frame);
2335
2336 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2337                           int count, __u8 index)
2338 {
2339         int len = 0;
2340         int hlen = 0;
2341         int remain = count;
2342         struct sk_buff *skb;
2343         struct bt_skb_cb *scb;
2344
2345         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2346             index >= NUM_REASSEMBLY)
2347                 return -EILSEQ;
2348
2349         skb = hdev->reassembly[index];
2350
2351         if (!skb) {
2352                 switch (type) {
2353                 case HCI_ACLDATA_PKT:
2354                         len = HCI_MAX_FRAME_SIZE;
2355                         hlen = HCI_ACL_HDR_SIZE;
2356                         break;
2357                 case HCI_EVENT_PKT:
2358                         len = HCI_MAX_EVENT_SIZE;
2359                         hlen = HCI_EVENT_HDR_SIZE;
2360                         break;
2361                 case HCI_SCODATA_PKT:
2362                         len = HCI_MAX_SCO_SIZE;
2363                         hlen = HCI_SCO_HDR_SIZE;
2364                         break;
2365                 }
2366
2367                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2368                 if (!skb)
2369                         return -ENOMEM;
2370
2371                 scb = (void *) skb->cb;
2372                 scb->expect = hlen;
2373                 scb->pkt_type = type;
2374
2375                 skb->dev = (void *) hdev;
2376                 hdev->reassembly[index] = skb;
2377         }
2378
2379         while (count) {
2380                 scb = (void *) skb->cb;
2381                 len = min_t(uint, scb->expect, count);
2382
2383                 memcpy(skb_put(skb, len), data, len);
2384
2385                 count -= len;
2386                 data += len;
2387                 scb->expect -= len;
2388                 remain = count;
2389
2390                 switch (type) {
2391                 case HCI_EVENT_PKT:
2392                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2393                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2394                                 scb->expect = h->plen;
2395
2396                                 if (skb_tailroom(skb) < scb->expect) {
2397                                         kfree_skb(skb);
2398                                         hdev->reassembly[index] = NULL;
2399                                         return -ENOMEM;
2400                                 }
2401                         }
2402                         break;
2403
2404                 case HCI_ACLDATA_PKT:
2405                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2406                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2407                                 scb->expect = __le16_to_cpu(h->dlen);
2408
2409                                 if (skb_tailroom(skb) < scb->expect) {
2410                                         kfree_skb(skb);
2411                                         hdev->reassembly[index] = NULL;
2412                                         return -ENOMEM;
2413                                 }
2414                         }
2415                         break;
2416
2417                 case HCI_SCODATA_PKT:
2418                         if (skb->len == HCI_SCO_HDR_SIZE) {
2419                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2420                                 scb->expect = h->dlen;
2421
2422                                 if (skb_tailroom(skb) < scb->expect) {
2423                                         kfree_skb(skb);
2424                                         hdev->reassembly[index] = NULL;
2425                                         return -ENOMEM;
2426                                 }
2427                         }
2428                         break;
2429                 }
2430
2431                 if (scb->expect == 0) {
2432                         /* Complete frame */
2433
2434                         bt_cb(skb)->pkt_type = type;
2435                         hci_recv_frame(skb);
2436
2437                         hdev->reassembly[index] = NULL;
2438                         return remain;
2439                 }
2440         }
2441
2442         return remain;
2443 }
2444
2445 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2446 {
2447         int rem = 0;
2448
2449         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2450                 return -EILSEQ;
2451
2452         while (count) {
2453                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2454                 if (rem < 0)
2455                         return rem;
2456
2457                 data += (count - rem);
2458                 count = rem;
2459         }
2460
2461         return rem;
2462 }
2463 EXPORT_SYMBOL(hci_recv_fragment);
2464
2465 #define STREAM_REASSEMBLY 0
2466
2467 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2468 {
2469         int type;
2470         int rem = 0;
2471
2472         while (count) {
2473                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2474
2475                 if (!skb) {
2476                         struct { char type; } *pkt;
2477
2478                         /* Start of the frame */
2479                         pkt = data;
2480                         type = pkt->type;
2481
2482                         data++;
2483                         count--;
2484                 } else
2485                         type = bt_cb(skb)->pkt_type;
2486
2487                 rem = hci_reassembly(hdev, type, data, count,
2488                                      STREAM_REASSEMBLY);
2489                 if (rem < 0)
2490                         return rem;
2491
2492                 data += (count - rem);
2493                 count = rem;
2494         }
2495
2496         return rem;
2497 }
2498 EXPORT_SYMBOL(hci_recv_stream_fragment);
2499
2500 /* ---- Interface to upper protocols ---- */
2501
2502 int hci_register_cb(struct hci_cb *cb)
2503 {
2504         BT_DBG("%p name %s", cb, cb->name);
2505
2506         write_lock(&hci_cb_list_lock);
2507         list_add(&cb->list, &hci_cb_list);
2508         write_unlock(&hci_cb_list_lock);
2509
2510         return 0;
2511 }
2512 EXPORT_SYMBOL(hci_register_cb);
2513
2514 int hci_unregister_cb(struct hci_cb *cb)
2515 {
2516         BT_DBG("%p name %s", cb, cb->name);
2517
2518         write_lock(&hci_cb_list_lock);
2519         list_del(&cb->list);
2520         write_unlock(&hci_cb_list_lock);
2521
2522         return 0;
2523 }
2524 EXPORT_SYMBOL(hci_unregister_cb);
2525
2526 static int hci_send_frame(struct sk_buff *skb)
2527 {
2528         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2529
2530         if (!hdev) {
2531                 kfree_skb(skb);
2532                 return -ENODEV;
2533         }
2534
2535         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2536
2537         /* Time stamp */
2538         __net_timestamp(skb);
2539
2540         /* Send copy to monitor */
2541         hci_send_to_monitor(hdev, skb);
2542
2543         if (atomic_read(&hdev->promisc)) {
2544                 /* Send copy to the sockets */
2545                 hci_send_to_sock(hdev, skb);
2546         }
2547
2548         /* Get rid of skb owner, prior to sending to the driver. */
2549         skb_orphan(skb);
2550
2551         return hdev->send(skb);
2552 }
2553
2554 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2555 {
2556         skb_queue_head_init(&req->cmd_q);
2557         req->hdev = hdev;
2558         req->err = 0;
2559 }
2560
2561 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2562 {
2563         struct hci_dev *hdev = req->hdev;
2564         struct sk_buff *skb;
2565         unsigned long flags;
2566
2567         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2568
2569         /* If an error occured during request building, remove all HCI
2570          * commands queued on the HCI request queue.
2571          */
2572         if (req->err) {
2573                 skb_queue_purge(&req->cmd_q);
2574                 return req->err;
2575         }
2576
2577         /* Do not allow empty requests */
2578         if (skb_queue_empty(&req->cmd_q))
2579                 return -ENODATA;
2580
2581         skb = skb_peek_tail(&req->cmd_q);
2582         bt_cb(skb)->req.complete = complete;
2583
2584         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2585         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2586         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2587
2588         queue_work(hdev->workqueue, &hdev->cmd_work);
2589
2590         return 0;
2591 }
2592
2593 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2594                                        u32 plen, const void *param)
2595 {
2596         int len = HCI_COMMAND_HDR_SIZE + plen;
2597         struct hci_command_hdr *hdr;
2598         struct sk_buff *skb;
2599
2600         skb = bt_skb_alloc(len, GFP_ATOMIC);
2601         if (!skb)
2602                 return NULL;
2603
2604         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2605         hdr->opcode = cpu_to_le16(opcode);
2606         hdr->plen   = plen;
2607
2608         if (plen)
2609                 memcpy(skb_put(skb, plen), param, plen);
2610
2611         BT_DBG("skb len %d", skb->len);
2612
2613         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2614         skb->dev = (void *) hdev;
2615
2616         return skb;
2617 }
2618
2619 /* Send HCI command */
2620 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2621                  const void *param)
2622 {
2623         struct sk_buff *skb;
2624
2625         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2626
2627         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2628         if (!skb) {
2629                 BT_ERR("%s no memory for command", hdev->name);
2630                 return -ENOMEM;
2631         }
2632
2633         /* Stand-alone HCI commands must be flaged as
2634          * single-command requests.
2635          */
2636         bt_cb(skb)->req.start = true;
2637
2638         skb_queue_tail(&hdev->cmd_q, skb);
2639         queue_work(hdev->workqueue, &hdev->cmd_work);
2640
2641         return 0;
2642 }
2643
2644 /* Queue a command to an asynchronous HCI request */
2645 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2646                     const void *param, u8 event)
2647 {
2648         struct hci_dev *hdev = req->hdev;
2649         struct sk_buff *skb;
2650
2651         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
2653         /* If an error occured during request building, there is no point in
2654          * queueing the HCI command. We can simply return.
2655          */
2656         if (req->err)
2657                 return;
2658
2659         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660         if (!skb) {
2661                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662                        hdev->name, opcode);
2663                 req->err = -ENOMEM;
2664                 return;
2665         }
2666
2667         if (skb_queue_empty(&req->cmd_q))
2668                 bt_cb(skb)->req.start = true;
2669
2670         bt_cb(skb)->req.event = event;
2671
2672         skb_queue_tail(&req->cmd_q, skb);
2673 }
2674
2675 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2676                  const void *param)
2677 {
2678         hci_req_add_ev(req, opcode, plen, param, 0);
2679 }
2680
2681 /* Get data from the previously sent command */
2682 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2683 {
2684         struct hci_command_hdr *hdr;
2685
2686         if (!hdev->sent_cmd)
2687                 return NULL;
2688
2689         hdr = (void *) hdev->sent_cmd->data;
2690
2691         if (hdr->opcode != cpu_to_le16(opcode))
2692                 return NULL;
2693
2694         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2695
2696         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2697 }
2698
2699 /* Send ACL data */
2700 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2701 {
2702         struct hci_acl_hdr *hdr;
2703         int len = skb->len;
2704
2705         skb_push(skb, HCI_ACL_HDR_SIZE);
2706         skb_reset_transport_header(skb);
2707         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2708         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2709         hdr->dlen   = cpu_to_le16(len);
2710 }
2711
2712 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2713                           struct sk_buff *skb, __u16 flags)
2714 {
2715         struct hci_conn *conn = chan->conn;
2716         struct hci_dev *hdev = conn->hdev;
2717         struct sk_buff *list;
2718
2719         skb->len = skb_headlen(skb);
2720         skb->data_len = 0;
2721
2722         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2723
2724         switch (hdev->dev_type) {
2725         case HCI_BREDR:
2726                 hci_add_acl_hdr(skb, conn->handle, flags);
2727                 break;
2728         case HCI_AMP:
2729                 hci_add_acl_hdr(skb, chan->handle, flags);
2730                 break;
2731         default:
2732                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2733                 return;
2734         }
2735
2736         list = skb_shinfo(skb)->frag_list;
2737         if (!list) {
2738                 /* Non fragmented */
2739                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2740
2741                 skb_queue_tail(queue, skb);
2742         } else {
2743                 /* Fragmented */
2744                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2745
2746                 skb_shinfo(skb)->frag_list = NULL;
2747
2748                 /* Queue all fragments atomically */
2749                 spin_lock(&queue->lock);
2750
2751                 __skb_queue_tail(queue, skb);
2752
2753                 flags &= ~ACL_START;
2754                 flags |= ACL_CONT;
2755                 do {
2756                         skb = list; list = list->next;
2757
2758                         skb->dev = (void *) hdev;
2759                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2760                         hci_add_acl_hdr(skb, conn->handle, flags);
2761
2762                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2763
2764                         __skb_queue_tail(queue, skb);
2765                 } while (list);
2766
2767                 spin_unlock(&queue->lock);
2768         }
2769 }
2770
2771 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2772 {
2773         struct hci_dev *hdev = chan->conn->hdev;
2774
2775         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2776
2777         skb->dev = (void *) hdev;
2778
2779         hci_queue_acl(chan, &chan->data_q, skb, flags);
2780
2781         queue_work(hdev->workqueue, &hdev->tx_work);
2782 }
2783
2784 /* Send SCO data */
2785 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2786 {
2787         struct hci_dev *hdev = conn->hdev;
2788         struct hci_sco_hdr hdr;
2789
2790         BT_DBG("%s len %d", hdev->name, skb->len);
2791
2792         hdr.handle = cpu_to_le16(conn->handle);
2793         hdr.dlen   = skb->len;
2794
2795         skb_push(skb, HCI_SCO_HDR_SIZE);
2796         skb_reset_transport_header(skb);
2797         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2798
2799         skb->dev = (void *) hdev;
2800         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2801
2802         skb_queue_tail(&conn->data_q, skb);
2803         queue_work(hdev->workqueue, &hdev->tx_work);
2804 }
2805
2806 /* ---- HCI TX task (outgoing data) ---- */
2807
2808 /* HCI Connection scheduler */
2809 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2810                                      int *quote)
2811 {
2812         struct hci_conn_hash *h = &hdev->conn_hash;
2813         struct hci_conn *conn = NULL, *c;
2814         unsigned int num = 0, min = ~0;
2815
2816         /* We don't have to lock device here. Connections are always
2817          * added and removed with TX task disabled. */
2818
2819         rcu_read_lock();
2820
2821         list_for_each_entry_rcu(c, &h->list, list) {
2822                 if (c->type != type || skb_queue_empty(&c->data_q))
2823                         continue;
2824
2825                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2826                         continue;
2827
2828                 num++;
2829
2830                 if (c->sent < min) {
2831                         min  = c->sent;
2832                         conn = c;
2833                 }
2834
2835                 if (hci_conn_num(hdev, type) == num)
2836                         break;
2837         }
2838
2839         rcu_read_unlock();
2840
2841         if (conn) {
2842                 int cnt, q;
2843
2844                 switch (conn->type) {
2845                 case ACL_LINK:
2846                         cnt = hdev->acl_cnt;
2847                         break;
2848                 case SCO_LINK:
2849                 case ESCO_LINK:
2850                         cnt = hdev->sco_cnt;
2851                         break;
2852                 case LE_LINK:
2853                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2854                         break;
2855                 default:
2856                         cnt = 0;
2857                         BT_ERR("Unknown link type");
2858                 }
2859
2860                 q = cnt / num;
2861                 *quote = q ? q : 1;
2862         } else
2863                 *quote = 0;
2864
2865         BT_DBG("conn %p quote %d", conn, *quote);
2866         return conn;
2867 }
2868
2869 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2870 {
2871         struct hci_conn_hash *h = &hdev->conn_hash;
2872         struct hci_conn *c;
2873
2874         BT_ERR("%s link tx timeout", hdev->name);
2875
2876         rcu_read_lock();
2877
2878         /* Kill stalled connections */
2879         list_for_each_entry_rcu(c, &h->list, list) {
2880                 if (c->type == type && c->sent) {
2881                         BT_ERR("%s killing stalled connection %pMR",
2882                                hdev->name, &c->dst);
2883                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2884                 }
2885         }
2886
2887         rcu_read_unlock();
2888 }
2889
2890 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2891                                       int *quote)
2892 {
2893         struct hci_conn_hash *h = &hdev->conn_hash;
2894         struct hci_chan *chan = NULL;
2895         unsigned int num = 0, min = ~0, cur_prio = 0;
2896         struct hci_conn *conn;
2897         int cnt, q, conn_num = 0;
2898
2899         BT_DBG("%s", hdev->name);
2900
2901         rcu_read_lock();
2902
2903         list_for_each_entry_rcu(conn, &h->list, list) {
2904                 struct hci_chan *tmp;
2905
2906                 if (conn->type != type)
2907                         continue;
2908
2909                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2910                         continue;
2911
2912                 conn_num++;
2913
2914                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2915                         struct sk_buff *skb;
2916
2917                         if (skb_queue_empty(&tmp->data_q))
2918                                 continue;
2919
2920                         skb = skb_peek(&tmp->data_q);
2921                         if (skb->priority < cur_prio)
2922                                 continue;
2923
2924                         if (skb->priority > cur_prio) {
2925                                 num = 0;
2926                                 min = ~0;
2927                                 cur_prio = skb->priority;
2928                         }
2929
2930                         num++;
2931
2932                         if (conn->sent < min) {
2933                                 min  = conn->sent;
2934                                 chan = tmp;
2935                         }
2936                 }
2937
2938                 if (hci_conn_num(hdev, type) == conn_num)
2939                         break;
2940         }
2941
2942         rcu_read_unlock();
2943
2944         if (!chan)
2945                 return NULL;
2946
2947         switch (chan->conn->type) {
2948         case ACL_LINK:
2949                 cnt = hdev->acl_cnt;
2950                 break;
2951         case AMP_LINK:
2952                 cnt = hdev->block_cnt;
2953                 break;
2954         case SCO_LINK:
2955         case ESCO_LINK:
2956                 cnt = hdev->sco_cnt;
2957                 break;
2958         case LE_LINK:
2959                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2960                 break;
2961         default:
2962                 cnt = 0;
2963                 BT_ERR("Unknown link type");
2964         }
2965
2966         q = cnt / num;
2967         *quote = q ? q : 1;
2968         BT_DBG("chan %p quote %d", chan, *quote);
2969         return chan;
2970 }
2971
2972 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2973 {
2974         struct hci_conn_hash *h = &hdev->conn_hash;
2975         struct hci_conn *conn;
2976         int num = 0;
2977
2978         BT_DBG("%s", hdev->name);
2979
2980         rcu_read_lock();
2981
2982         list_for_each_entry_rcu(conn, &h->list, list) {
2983                 struct hci_chan *chan;
2984
2985                 if (conn->type != type)
2986                         continue;
2987
2988                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2989                         continue;
2990
2991                 num++;
2992
2993                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2994                         struct sk_buff *skb;
2995
2996                         if (chan->sent) {
2997                                 chan->sent = 0;
2998                                 continue;
2999                         }
3000
3001                         if (skb_queue_empty(&chan->data_q))
3002                                 continue;
3003
3004                         skb = skb_peek(&chan->data_q);
3005                         if (skb->priority >= HCI_PRIO_MAX - 1)
3006                                 continue;
3007
3008                         skb->priority = HCI_PRIO_MAX - 1;
3009
3010                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3011                                skb->priority);
3012                 }
3013
3014                 if (hci_conn_num(hdev, type) == num)
3015                         break;
3016         }
3017
3018         rcu_read_unlock();
3019
3020 }
3021
3022 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3023 {
3024         /* Calculate count of blocks used by this packet */
3025         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3026 }
3027
3028 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3029 {
3030         if (!test_bit(HCI_RAW, &hdev->flags)) {
3031                 /* ACL tx timeout must be longer than maximum
3032                  * link supervision timeout (40.9 seconds) */
3033                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3034                                        HCI_ACL_TX_TIMEOUT))
3035                         hci_link_tx_to(hdev, ACL_LINK);
3036         }
3037 }
3038
3039 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3040 {
3041         unsigned int cnt = hdev->acl_cnt;
3042         struct hci_chan *chan;
3043         struct sk_buff *skb;
3044         int quote;
3045
3046         __check_timeout(hdev, cnt);
3047
3048         while (hdev->acl_cnt &&
3049                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3050                 u32 priority = (skb_peek(&chan->data_q))->priority;
3051                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3052                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3053                                skb->len, skb->priority);
3054
3055                         /* Stop if priority has changed */
3056                         if (skb->priority < priority)
3057                                 break;
3058
3059                         skb = skb_dequeue(&chan->data_q);
3060
3061                         hci_conn_enter_active_mode(chan->conn,
3062                                                    bt_cb(skb)->force_active);
3063
3064                         hci_send_frame(skb);
3065                         hdev->acl_last_tx = jiffies;
3066
3067                         hdev->acl_cnt--;
3068                         chan->sent++;
3069                         chan->conn->sent++;
3070                 }
3071         }
3072
3073         if (cnt != hdev->acl_cnt)
3074                 hci_prio_recalculate(hdev, ACL_LINK);
3075 }
3076
3077 static void hci_sched_acl_blk(struct hci_dev *hdev)
3078 {
3079         unsigned int cnt = hdev->block_cnt;
3080         struct hci_chan *chan;
3081         struct sk_buff *skb;
3082         int quote;
3083         u8 type;
3084
3085         __check_timeout(hdev, cnt);
3086
3087         BT_DBG("%s", hdev->name);
3088
3089         if (hdev->dev_type == HCI_AMP)
3090                 type = AMP_LINK;
3091         else
3092                 type = ACL_LINK;
3093
3094         while (hdev->block_cnt > 0 &&
3095                (chan = hci_chan_sent(hdev, type, &quote))) {
3096                 u32 priority = (skb_peek(&chan->data_q))->priority;
3097                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3098                         int blocks;
3099
3100                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3101                                skb->len, skb->priority);
3102
3103                         /* Stop if priority has changed */
3104                         if (skb->priority < priority)
3105                                 break;
3106
3107                         skb = skb_dequeue(&chan->data_q);
3108
3109                         blocks = __get_blocks(hdev, skb);
3110                         if (blocks > hdev->block_cnt)
3111                                 return;
3112
3113                         hci_conn_enter_active_mode(chan->conn,
3114                                                    bt_cb(skb)->force_active);
3115
3116                         hci_send_frame(skb);
3117                         hdev->acl_last_tx = jiffies;
3118
3119                         hdev->block_cnt -= blocks;
3120                         quote -= blocks;
3121
3122                         chan->sent += blocks;
3123                         chan->conn->sent += blocks;
3124                 }
3125         }
3126
3127         if (cnt != hdev->block_cnt)
3128                 hci_prio_recalculate(hdev, type);
3129 }
3130
3131 static void hci_sched_acl(struct hci_dev *hdev)
3132 {
3133         BT_DBG("%s", hdev->name);
3134
3135         /* No ACL link over BR/EDR controller */
3136         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3137                 return;
3138
3139         /* No AMP link over AMP controller */
3140         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3141                 return;
3142
3143         switch (hdev->flow_ctl_mode) {
3144         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3145                 hci_sched_acl_pkt(hdev);
3146                 break;
3147
3148         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3149                 hci_sched_acl_blk(hdev);
3150                 break;
3151         }
3152 }
3153
3154 /* Schedule SCO */
3155 static void hci_sched_sco(struct hci_dev *hdev)
3156 {
3157         struct hci_conn *conn;
3158         struct sk_buff *skb;
3159         int quote;
3160
3161         BT_DBG("%s", hdev->name);
3162
3163         if (!hci_conn_num(hdev, SCO_LINK))
3164                 return;
3165
3166         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3167                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3168                         BT_DBG("skb %p len %d", skb, skb->len);
3169                         hci_send_frame(skb);
3170
3171                         conn->sent++;
3172                         if (conn->sent == ~0)
3173                                 conn->sent = 0;
3174                 }
3175         }
3176 }
3177
3178 static void hci_sched_esco(struct hci_dev *hdev)
3179 {
3180         struct hci_conn *conn;
3181         struct sk_buff *skb;
3182         int quote;
3183
3184         BT_DBG("%s", hdev->name);
3185
3186         if (!hci_conn_num(hdev, ESCO_LINK))
3187                 return;
3188
3189         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3190                                                      &quote))) {
3191                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3192                         BT_DBG("skb %p len %d", skb, skb->len);
3193                         hci_send_frame(skb);
3194
3195                         conn->sent++;
3196                         if (conn->sent == ~0)
3197                                 conn->sent = 0;
3198                 }
3199         }
3200 }
3201
3202 static void hci_sched_le(struct hci_dev *hdev)
3203 {
3204         struct hci_chan *chan;
3205         struct sk_buff *skb;
3206         int quote, cnt, tmp;
3207
3208         BT_DBG("%s", hdev->name);
3209
3210         if (!hci_conn_num(hdev, LE_LINK))
3211                 return;
3212
3213         if (!test_bit(HCI_RAW, &hdev->flags)) {
3214                 /* LE tx timeout must be longer than maximum
3215                  * link supervision timeout (40.9 seconds) */
3216                 if (!hdev->le_cnt && hdev->le_pkts &&
3217                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3218                         hci_link_tx_to(hdev, LE_LINK);
3219         }
3220
3221         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3222         tmp = cnt;
3223         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3224                 u32 priority = (skb_peek(&chan->data_q))->priority;
3225                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3226                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3227                                skb->len, skb->priority);
3228
3229                         /* Stop if priority has changed */
3230                         if (skb->priority < priority)
3231                                 break;
3232
3233                         skb = skb_dequeue(&chan->data_q);
3234
3235                         hci_send_frame(skb);
3236                         hdev->le_last_tx = jiffies;
3237
3238                         cnt--;
3239                         chan->sent++;
3240                         chan->conn->sent++;
3241                 }
3242         }
3243
3244         if (hdev->le_pkts)
3245                 hdev->le_cnt = cnt;
3246         else
3247                 hdev->acl_cnt = cnt;
3248
3249         if (cnt != tmp)
3250                 hci_prio_recalculate(hdev, LE_LINK);
3251 }
3252
3253 static void hci_tx_work(struct work_struct *work)
3254 {
3255         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3256         struct sk_buff *skb;
3257
3258         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3259                hdev->sco_cnt, hdev->le_cnt);
3260
3261         /* Schedule queues and send stuff to HCI driver */
3262
3263         hci_sched_acl(hdev);
3264
3265         hci_sched_sco(hdev);
3266
3267         hci_sched_esco(hdev);
3268
3269         hci_sched_le(hdev);
3270
3271         /* Send next queued raw (unknown type) packet */
3272         while ((skb = skb_dequeue(&hdev->raw_q)))
3273                 hci_send_frame(skb);
3274 }
3275
3276 /* ----- HCI RX task (incoming data processing) ----- */
3277
3278 /* ACL data packet */
3279 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3280 {
3281         struct hci_acl_hdr *hdr = (void *) skb->data;
3282         struct hci_conn *conn;
3283         __u16 handle, flags;
3284
3285         skb_pull(skb, HCI_ACL_HDR_SIZE);
3286
3287         handle = __le16_to_cpu(hdr->handle);
3288         flags  = hci_flags(handle);
3289         handle = hci_handle(handle);
3290
3291         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3292                handle, flags);
3293
3294         hdev->stat.acl_rx++;
3295
3296         hci_dev_lock(hdev);
3297         conn = hci_conn_hash_lookup_handle(hdev, handle);
3298         hci_dev_unlock(hdev);
3299
3300         if (conn) {
3301                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3302
3303                 /* Send to upper protocol */
3304                 l2cap_recv_acldata(conn, skb, flags);
3305                 return;
3306         } else {
3307                 BT_ERR("%s ACL packet for unknown connection handle %d",
3308                        hdev->name, handle);
3309         }
3310
3311         kfree_skb(skb);
3312 }
3313
3314 /* SCO data packet */
3315 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3316 {
3317         struct hci_sco_hdr *hdr = (void *) skb->data;
3318         struct hci_conn *conn;
3319         __u16 handle;
3320
3321         skb_pull(skb, HCI_SCO_HDR_SIZE);
3322
3323         handle = __le16_to_cpu(hdr->handle);
3324
3325         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3326
3327         hdev->stat.sco_rx++;
3328
3329         hci_dev_lock(hdev);
3330         conn = hci_conn_hash_lookup_handle(hdev, handle);
3331         hci_dev_unlock(hdev);
3332
3333         if (conn) {
3334                 /* Send to upper protocol */
3335                 sco_recv_scodata(conn, skb);
3336                 return;
3337         } else {
3338                 BT_ERR("%s SCO packet for unknown connection handle %d",
3339                        hdev->name, handle);
3340         }
3341
3342         kfree_skb(skb);
3343 }
3344
3345 static bool hci_req_is_complete(struct hci_dev *hdev)
3346 {
3347         struct sk_buff *skb;
3348
3349         skb = skb_peek(&hdev->cmd_q);
3350         if (!skb)
3351                 return true;
3352
3353         return bt_cb(skb)->req.start;
3354 }
3355
3356 static void hci_resend_last(struct hci_dev *hdev)
3357 {
3358         struct hci_command_hdr *sent;
3359         struct sk_buff *skb;
3360         u16 opcode;
3361
3362         if (!hdev->sent_cmd)
3363                 return;
3364
3365         sent = (void *) hdev->sent_cmd->data;
3366         opcode = __le16_to_cpu(sent->opcode);
3367         if (opcode == HCI_OP_RESET)
3368                 return;
3369
3370         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3371         if (!skb)
3372                 return;
3373
3374         skb_queue_head(&hdev->cmd_q, skb);
3375         queue_work(hdev->workqueue, &hdev->cmd_work);
3376 }
3377
3378 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3379 {
3380         hci_req_complete_t req_complete = NULL;
3381         struct sk_buff *skb;
3382         unsigned long flags;
3383
3384         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3385
3386         /* If the completed command doesn't match the last one that was
3387          * sent we need to do special handling of it.
3388          */
3389         if (!hci_sent_cmd_data(hdev, opcode)) {
3390                 /* Some CSR based controllers generate a spontaneous
3391                  * reset complete event during init and any pending
3392                  * command will never be completed. In such a case we
3393                  * need to resend whatever was the last sent
3394                  * command.
3395                  */
3396                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3397                         hci_resend_last(hdev);
3398
3399                 return;
3400         }
3401
3402         /* If the command succeeded and there's still more commands in
3403          * this request the request is not yet complete.
3404          */
3405         if (!status && !hci_req_is_complete(hdev))
3406                 return;
3407
3408         /* If this was the last command in a request the complete
3409          * callback would be found in hdev->sent_cmd instead of the
3410          * command queue (hdev->cmd_q).
3411          */
3412         if (hdev->sent_cmd) {
3413                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3414
3415                 if (req_complete) {
3416                         /* We must set the complete callback to NULL to
3417                          * avoid calling the callback more than once if
3418                          * this function gets called again.
3419                          */
3420                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3421
3422                         goto call_complete;
3423                 }
3424         }
3425
3426         /* Remove all pending commands belonging to this request */
3427         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3428         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3429                 if (bt_cb(skb)->req.start) {
3430                         __skb_queue_head(&hdev->cmd_q, skb);
3431                         break;
3432                 }
3433
3434                 req_complete = bt_cb(skb)->req.complete;
3435                 kfree_skb(skb);
3436         }
3437         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3438
3439 call_complete:
3440         if (req_complete)
3441                 req_complete(hdev, status);
3442 }
3443
3444 static void hci_rx_work(struct work_struct *work)
3445 {
3446         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3447         struct sk_buff *skb;
3448
3449         BT_DBG("%s", hdev->name);
3450
3451         while ((skb = skb_dequeue(&hdev->rx_q))) {
3452                 /* Send copy to monitor */
3453                 hci_send_to_monitor(hdev, skb);
3454
3455                 if (atomic_read(&hdev->promisc)) {
3456                         /* Send copy to the sockets */
3457                         hci_send_to_sock(hdev, skb);
3458                 }
3459
3460                 if (test_bit(HCI_RAW, &hdev->flags)) {
3461                         kfree_skb(skb);
3462                         continue;
3463                 }
3464
3465                 if (test_bit(HCI_INIT, &hdev->flags)) {
3466                         /* Don't process data packets in this states. */
3467                         switch (bt_cb(skb)->pkt_type) {
3468                         case HCI_ACLDATA_PKT:
3469                         case HCI_SCODATA_PKT:
3470                                 kfree_skb(skb);
3471                                 continue;
3472                         }
3473                 }
3474
3475                 /* Process frame */
3476                 switch (bt_cb(skb)->pkt_type) {
3477                 case HCI_EVENT_PKT:
3478                         BT_DBG("%s Event packet", hdev->name);
3479                         hci_event_packet(hdev, skb);
3480                         break;
3481
3482                 case HCI_ACLDATA_PKT:
3483                         BT_DBG("%s ACL data packet", hdev->name);
3484                         hci_acldata_packet(hdev, skb);
3485                         break;
3486
3487                 case HCI_SCODATA_PKT:
3488                         BT_DBG("%s SCO data packet", hdev->name);
3489                         hci_scodata_packet(hdev, skb);
3490                         break;
3491
3492                 default:
3493                         kfree_skb(skb);
3494                         break;
3495                 }
3496         }
3497 }
3498
3499 static void hci_cmd_work(struct work_struct *work)
3500 {
3501         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3502         struct sk_buff *skb;
3503
3504         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3505                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3506
3507         /* Send queued commands */
3508         if (atomic_read(&hdev->cmd_cnt)) {
3509                 skb = skb_dequeue(&hdev->cmd_q);
3510                 if (!skb)
3511                         return;
3512
3513                 kfree_skb(hdev->sent_cmd);
3514
3515                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3516                 if (hdev->sent_cmd) {
3517                         atomic_dec(&hdev->cmd_cnt);
3518                         hci_send_frame(skb);
3519                         if (test_bit(HCI_RESET, &hdev->flags))
3520                                 del_timer(&hdev->cmd_timer);
3521                         else
3522                                 mod_timer(&hdev->cmd_timer,
3523                                           jiffies + HCI_CMD_TIMEOUT);
3524                 } else {
3525                         skb_queue_head(&hdev->cmd_q, skb);
3526                         queue_work(hdev->workqueue, &hdev->cmd_work);
3527                 }
3528         }
3529 }
3530
3531 u8 bdaddr_to_le(u8 bdaddr_type)
3532 {
3533         switch (bdaddr_type) {
3534         case BDADDR_LE_PUBLIC:
3535                 return ADDR_LE_DEV_PUBLIC;
3536
3537         default:
3538                 /* Fallback to LE Random address type */
3539                 return ADDR_LE_DEV_RANDOM;
3540         }
3541 }