]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97         int err;
98
99         if (!test_bit(HCI_UP, &hdev->flags))
100                 return -ENETDOWN;
101
102         if (copy_from_user(buf, user_buf, buf_size))
103                 return -EFAULT;
104
105         buf[buf_size] = '\0';
106         if (strtobool(buf, &enable))
107                 return -EINVAL;
108
109         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
110                 return -EALREADY;
111
112         hci_req_lock(hdev);
113         if (enable)
114                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115                                      HCI_CMD_TIMEOUT);
116         else
117                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118                                      HCI_CMD_TIMEOUT);
119         hci_req_unlock(hdev);
120
121         if (IS_ERR(skb))
122                 return PTR_ERR(skb);
123
124         err = -bt_to_errno(skb->data[0]);
125         kfree_skb(skb);
126
127         if (err < 0)
128                 return err;
129
130         hci_dev_change_flag(hdev, HCI_DUT_MODE);
131
132         return count;
133 }
134
135 static const struct file_operations dut_mode_fops = {
136         .open           = simple_open,
137         .read           = dut_mode_read,
138         .write          = dut_mode_write,
139         .llseek         = default_llseek,
140 };
141
142 /* ---- HCI requests ---- */
143
144 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145                                   struct sk_buff *skb)
146 {
147         BT_DBG("%s result 0x%2.2x", hdev->name, result);
148
149         if (hdev->req_status == HCI_REQ_PEND) {
150                 hdev->req_result = result;
151                 hdev->req_status = HCI_REQ_DONE;
152                 if (skb)
153                         hdev->req_skb = skb_get(skb);
154                 wake_up_interruptible(&hdev->req_wait_q);
155         }
156 }
157
158 static void hci_req_cancel(struct hci_dev *hdev, int err)
159 {
160         BT_DBG("%s err 0x%2.2x", hdev->name, err);
161
162         if (hdev->req_status == HCI_REQ_PEND) {
163                 hdev->req_result = err;
164                 hdev->req_status = HCI_REQ_CANCELED;
165                 wake_up_interruptible(&hdev->req_wait_q);
166         }
167 }
168
169 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
170                                   const void *param, u8 event, u32 timeout)
171 {
172         DECLARE_WAITQUEUE(wait, current);
173         struct hci_request req;
174         struct sk_buff *skb;
175         int err = 0;
176
177         BT_DBG("%s", hdev->name);
178
179         hci_req_init(&req, hdev);
180
181         hci_req_add_ev(&req, opcode, plen, param, event);
182
183         hdev->req_status = HCI_REQ_PEND;
184
185         add_wait_queue(&hdev->req_wait_q, &wait);
186         set_current_state(TASK_INTERRUPTIBLE);
187
188         err = hci_req_run_skb(&req, hci_req_sync_complete);
189         if (err < 0) {
190                 remove_wait_queue(&hdev->req_wait_q, &wait);
191                 set_current_state(TASK_RUNNING);
192                 return ERR_PTR(err);
193         }
194
195         schedule_timeout(timeout);
196
197         remove_wait_queue(&hdev->req_wait_q, &wait);
198
199         if (signal_pending(current))
200                 return ERR_PTR(-EINTR);
201
202         switch (hdev->req_status) {
203         case HCI_REQ_DONE:
204                 err = -bt_to_errno(hdev->req_result);
205                 break;
206
207         case HCI_REQ_CANCELED:
208                 err = -hdev->req_result;
209                 break;
210
211         default:
212                 err = -ETIMEDOUT;
213                 break;
214         }
215
216         hdev->req_status = hdev->req_result = 0;
217         skb = hdev->req_skb;
218         hdev->req_skb = NULL;
219
220         BT_DBG("%s end: err %d", hdev->name, err);
221
222         if (err < 0) {
223                 kfree_skb(skb);
224                 return ERR_PTR(err);
225         }
226
227         if (!skb)
228                 return ERR_PTR(-ENODATA);
229
230         return skb;
231 }
232 EXPORT_SYMBOL(__hci_cmd_sync_ev);
233
234 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
235                                const void *param, u32 timeout)
236 {
237         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
238 }
239 EXPORT_SYMBOL(__hci_cmd_sync);
240
241 /* Execute request and wait for completion. */
242 static int __hci_req_sync(struct hci_dev *hdev,
243                           void (*func)(struct hci_request *req,
244                                       unsigned long opt),
245                           unsigned long opt, __u32 timeout)
246 {
247         struct hci_request req;
248         DECLARE_WAITQUEUE(wait, current);
249         int err = 0;
250
251         BT_DBG("%s start", hdev->name);
252
253         hci_req_init(&req, hdev);
254
255         hdev->req_status = HCI_REQ_PEND;
256
257         func(&req, opt);
258
259         add_wait_queue(&hdev->req_wait_q, &wait);
260         set_current_state(TASK_INTERRUPTIBLE);
261
262         err = hci_req_run_skb(&req, hci_req_sync_complete);
263         if (err < 0) {
264                 hdev->req_status = 0;
265
266                 remove_wait_queue(&hdev->req_wait_q, &wait);
267                 set_current_state(TASK_RUNNING);
268
269                 /* ENODATA means the HCI request command queue is empty.
270                  * This can happen when a request with conditionals doesn't
271                  * trigger any commands to be sent. This is normal behavior
272                  * and should not trigger an error return.
273                  */
274                 if (err == -ENODATA)
275                         return 0;
276
277                 return err;
278         }
279
280         schedule_timeout(timeout);
281
282         remove_wait_queue(&hdev->req_wait_q, &wait);
283
284         if (signal_pending(current))
285                 return -EINTR;
286
287         switch (hdev->req_status) {
288         case HCI_REQ_DONE:
289                 err = -bt_to_errno(hdev->req_result);
290                 break;
291
292         case HCI_REQ_CANCELED:
293                 err = -hdev->req_result;
294                 break;
295
296         default:
297                 err = -ETIMEDOUT;
298                 break;
299         }
300
301         hdev->req_status = hdev->req_result = 0;
302
303         BT_DBG("%s end: err %d", hdev->name, err);
304
305         return err;
306 }
307
308 static int hci_req_sync(struct hci_dev *hdev,
309                         void (*req)(struct hci_request *req,
310                                     unsigned long opt),
311                         unsigned long opt, __u32 timeout)
312 {
313         int ret;
314
315         if (!test_bit(HCI_UP, &hdev->flags))
316                 return -ENETDOWN;
317
318         /* Serialize all requests */
319         hci_req_lock(hdev);
320         ret = __hci_req_sync(hdev, req, opt, timeout);
321         hci_req_unlock(hdev);
322
323         return ret;
324 }
325
326 static void hci_reset_req(struct hci_request *req, unsigned long opt)
327 {
328         BT_DBG("%s %ld", req->hdev->name, opt);
329
330         /* Reset device */
331         set_bit(HCI_RESET, &req->hdev->flags);
332         hci_req_add(req, HCI_OP_RESET, 0, NULL);
333 }
334
335 static void bredr_init(struct hci_request *req)
336 {
337         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
338
339         /* Read Local Supported Features */
340         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
341
342         /* Read Local Version */
343         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
344
345         /* Read BD Address */
346         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
347 }
348
349 static void amp_init1(struct hci_request *req)
350 {
351         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
352
353         /* Read Local Version */
354         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
355
356         /* Read Local Supported Commands */
357         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358
359         /* Read Local AMP Info */
360         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
361
362         /* Read Data Blk size */
363         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
364
365         /* Read Flow Control Mode */
366         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367
368         /* Read Location Data */
369         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
370 }
371
372 static void amp_init2(struct hci_request *req)
373 {
374         /* Read Local Supported Features. Not all AMP controllers
375          * support this so it's placed conditionally in the second
376          * stage init.
377          */
378         if (req->hdev->commands[14] & 0x20)
379                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380 }
381
382 static void hci_init1_req(struct hci_request *req, unsigned long opt)
383 {
384         struct hci_dev *hdev = req->hdev;
385
386         BT_DBG("%s %ld", hdev->name, opt);
387
388         /* Reset */
389         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
390                 hci_reset_req(req, 0);
391
392         switch (hdev->dev_type) {
393         case HCI_BREDR:
394                 bredr_init(req);
395                 break;
396
397         case HCI_AMP:
398                 amp_init1(req);
399                 break;
400
401         default:
402                 BT_ERR("Unknown device type %d", hdev->dev_type);
403                 break;
404         }
405 }
406
407 static void bredr_setup(struct hci_request *req)
408 {
409         __le16 param;
410         __u8 flt_type;
411
412         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
413         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
414
415         /* Read Class of Device */
416         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
417
418         /* Read Local Name */
419         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
420
421         /* Read Voice Setting */
422         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
423
424         /* Read Number of Supported IAC */
425         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426
427         /* Read Current IAC LAP */
428         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429
430         /* Clear Event Filters */
431         flt_type = HCI_FLT_CLEAR_ALL;
432         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
433
434         /* Connection accept timeout ~20 secs */
435         param = cpu_to_le16(0x7d00);
436         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
437 }
438
439 static void le_setup(struct hci_request *req)
440 {
441         struct hci_dev *hdev = req->hdev;
442
443         /* Read LE Buffer Size */
444         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
445
446         /* Read LE Local Supported Features */
447         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
448
449         /* Read LE Supported States */
450         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451
452         /* Read LE White List Size */
453         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
454
455         /* Clear LE White List */
456         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
457
458         /* LE-only controllers have LE implicitly enabled */
459         if (!lmp_bredr_capable(hdev))
460                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
461 }
462
463 static void hci_setup_event_mask(struct hci_request *req)
464 {
465         struct hci_dev *hdev = req->hdev;
466
467         /* The second byte is 0xff instead of 0x9f (two reserved bits
468          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469          * command otherwise.
470          */
471         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474          * any event mask for pre 1.2 devices.
475          */
476         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477                 return;
478
479         if (lmp_bredr_capable(hdev)) {
480                 events[4] |= 0x01; /* Flow Specification Complete */
481                 events[4] |= 0x02; /* Inquiry Result with RSSI */
482                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483                 events[5] |= 0x08; /* Synchronous Connection Complete */
484                 events[5] |= 0x10; /* Synchronous Connection Changed */
485         } else {
486                 /* Use a different default for LE-only devices */
487                 memset(events, 0, sizeof(events));
488                 events[0] |= 0x10; /* Disconnection Complete */
489                 events[1] |= 0x08; /* Read Remote Version Information Complete */
490                 events[1] |= 0x20; /* Command Complete */
491                 events[1] |= 0x40; /* Command Status */
492                 events[1] |= 0x80; /* Hardware Error */
493                 events[2] |= 0x04; /* Number of Completed Packets */
494                 events[3] |= 0x02; /* Data Buffer Overflow */
495
496                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497                         events[0] |= 0x80; /* Encryption Change */
498                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
499                 }
500         }
501
502         if (lmp_inq_rssi_capable(hdev))
503                 events[4] |= 0x02; /* Inquiry Result with RSSI */
504
505         if (lmp_sniffsubr_capable(hdev))
506                 events[5] |= 0x20; /* Sniff Subrating */
507
508         if (lmp_pause_enc_capable(hdev))
509                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510
511         if (lmp_ext_inq_capable(hdev))
512                 events[5] |= 0x40; /* Extended Inquiry Result */
513
514         if (lmp_no_flush_capable(hdev))
515                 events[7] |= 0x01; /* Enhanced Flush Complete */
516
517         if (lmp_lsto_capable(hdev))
518                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519
520         if (lmp_ssp_capable(hdev)) {
521                 events[6] |= 0x01;      /* IO Capability Request */
522                 events[6] |= 0x02;      /* IO Capability Response */
523                 events[6] |= 0x04;      /* User Confirmation Request */
524                 events[6] |= 0x08;      /* User Passkey Request */
525                 events[6] |= 0x10;      /* Remote OOB Data Request */
526                 events[6] |= 0x20;      /* Simple Pairing Complete */
527                 events[7] |= 0x04;      /* User Passkey Notification */
528                 events[7] |= 0x08;      /* Keypress Notification */
529                 events[7] |= 0x10;      /* Remote Host Supported
530                                          * Features Notification
531                                          */
532         }
533
534         if (lmp_le_capable(hdev))
535                 events[7] |= 0x20;      /* LE Meta-Event */
536
537         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
538 }
539
540 static void hci_init2_req(struct hci_request *req, unsigned long opt)
541 {
542         struct hci_dev *hdev = req->hdev;
543
544         if (hdev->dev_type == HCI_AMP)
545                 return amp_init2(req);
546
547         if (lmp_bredr_capable(hdev))
548                 bredr_setup(req);
549         else
550                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
551
552         if (lmp_le_capable(hdev))
553                 le_setup(req);
554
555         /* All Bluetooth 1.2 and later controllers should support the
556          * HCI command for reading the local supported commands.
557          *
558          * Unfortunately some controllers indicate Bluetooth 1.2 support,
559          * but do not have support for this command. If that is the case,
560          * the driver can quirk the behavior and skip reading the local
561          * supported commands.
562          */
563         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
565                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
566
567         if (lmp_ssp_capable(hdev)) {
568                 /* When SSP is available, then the host features page
569                  * should also be available as well. However some
570                  * controllers list the max_page as 0 as long as SSP
571                  * has not been enabled. To achieve proper debugging
572                  * output, force the minimum max_page to 1 at least.
573                  */
574                 hdev->max_page = 0x01;
575
576                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
577                         u8 mode = 0x01;
578
579                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580                                     sizeof(mode), &mode);
581                 } else {
582                         struct hci_cp_write_eir cp;
583
584                         memset(hdev->eir, 0, sizeof(hdev->eir));
585                         memset(&cp, 0, sizeof(cp));
586
587                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
588                 }
589         }
590
591         if (lmp_inq_rssi_capable(hdev) ||
592             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
593                 u8 mode;
594
595                 /* If Extended Inquiry Result events are supported, then
596                  * they are clearly preferred over Inquiry Result with RSSI
597                  * events.
598                  */
599                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600
601                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602         }
603
604         if (lmp_inq_tx_pwr_capable(hdev))
605                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
606
607         if (lmp_ext_feat_capable(hdev)) {
608                 struct hci_cp_read_local_ext_features cp;
609
610                 cp.page = 0x01;
611                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612                             sizeof(cp), &cp);
613         }
614
615         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
616                 u8 enable = 1;
617                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618                             &enable);
619         }
620 }
621
622 static void hci_setup_link_policy(struct hci_request *req)
623 {
624         struct hci_dev *hdev = req->hdev;
625         struct hci_cp_write_def_link_policy cp;
626         u16 link_policy = 0;
627
628         if (lmp_rswitch_capable(hdev))
629                 link_policy |= HCI_LP_RSWITCH;
630         if (lmp_hold_capable(hdev))
631                 link_policy |= HCI_LP_HOLD;
632         if (lmp_sniff_capable(hdev))
633                 link_policy |= HCI_LP_SNIFF;
634         if (lmp_park_capable(hdev))
635                 link_policy |= HCI_LP_PARK;
636
637         cp.policy = cpu_to_le16(link_policy);
638         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
639 }
640
641 static void hci_set_le_support(struct hci_request *req)
642 {
643         struct hci_dev *hdev = req->hdev;
644         struct hci_cp_write_le_host_supported cp;
645
646         /* LE-only devices do not support explicit enablement */
647         if (!lmp_bredr_capable(hdev))
648                 return;
649
650         memset(&cp, 0, sizeof(cp));
651
652         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
653                 cp.le = 0x01;
654                 cp.simul = 0x00;
655         }
656
657         if (cp.le != lmp_host_le_capable(hdev))
658                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659                             &cp);
660 }
661
662 static void hci_set_event_mask_page_2(struct hci_request *req)
663 {
664         struct hci_dev *hdev = req->hdev;
665         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666
667         /* If Connectionless Slave Broadcast master role is supported
668          * enable all necessary events for it.
669          */
670         if (lmp_csb_master_capable(hdev)) {
671                 events[1] |= 0x40;      /* Triggered Clock Capture */
672                 events[1] |= 0x80;      /* Synchronization Train Complete */
673                 events[2] |= 0x10;      /* Slave Page Response Timeout */
674                 events[2] |= 0x20;      /* CSB Channel Map Change */
675         }
676
677         /* If Connectionless Slave Broadcast slave role is supported
678          * enable all necessary events for it.
679          */
680         if (lmp_csb_slave_capable(hdev)) {
681                 events[2] |= 0x01;      /* Synchronization Train Received */
682                 events[2] |= 0x02;      /* CSB Receive */
683                 events[2] |= 0x04;      /* CSB Timeout */
684                 events[2] |= 0x08;      /* Truncated Page Complete */
685         }
686
687         /* Enable Authenticated Payload Timeout Expired event if supported */
688         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
689                 events[2] |= 0x80;
690
691         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
692 }
693
694 static void hci_init3_req(struct hci_request *req, unsigned long opt)
695 {
696         struct hci_dev *hdev = req->hdev;
697         u8 p;
698
699         hci_setup_event_mask(req);
700
701         if (hdev->commands[6] & 0x20) {
702                 struct hci_cp_read_stored_link_key cp;
703
704                 bacpy(&cp.bdaddr, BDADDR_ANY);
705                 cp.read_all = 0x01;
706                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
707         }
708
709         if (hdev->commands[5] & 0x10)
710                 hci_setup_link_policy(req);
711
712         if (hdev->commands[8] & 0x01)
713                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
714
715         /* Some older Broadcom based Bluetooth 1.2 controllers do not
716          * support the Read Page Scan Type command. Check support for
717          * this command in the bit mask of supported commands.
718          */
719         if (hdev->commands[13] & 0x01)
720                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
721
722         if (lmp_le_capable(hdev)) {
723                 u8 events[8];
724
725                 memset(events, 0, sizeof(events));
726                 events[0] = 0x0f;
727
728                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
729                         events[0] |= 0x10;      /* LE Long Term Key Request */
730
731                 /* If controller supports the Connection Parameters Request
732                  * Link Layer Procedure, enable the corresponding event.
733                  */
734                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
735                         events[0] |= 0x20;      /* LE Remote Connection
736                                                  * Parameter Request
737                                                  */
738
739                 /* If the controller supports the Data Length Extension
740                  * feature, enable the corresponding event.
741                  */
742                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
743                         events[0] |= 0x40;      /* LE Data Length Change */
744
745                 /* If the controller supports Extended Scanner Filter
746                  * Policies, enable the correspondig event.
747                  */
748                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
749                         events[1] |= 0x04;      /* LE Direct Advertising
750                                                  * Report
751                                                  */
752
753                 /* If the controller supports the LE Read Local P-256
754                  * Public Key command, enable the corresponding event.
755                  */
756                 if (hdev->commands[34] & 0x02)
757                         events[0] |= 0x80;      /* LE Read Local P-256
758                                                  * Public Key Complete
759                                                  */
760
761                 /* If the controller supports the LE Generate DHKey
762                  * command, enable the corresponding event.
763                  */
764                 if (hdev->commands[34] & 0x04)
765                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
766
767                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
768                             events);
769
770                 if (hdev->commands[25] & 0x40) {
771                         /* Read LE Advertising Channel TX Power */
772                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
773                 }
774
775                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776                         /* Read LE Maximum Data Length */
777                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
778
779                         /* Read LE Suggested Default Data Length */
780                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
781                 }
782
783                 hci_set_le_support(req);
784         }
785
786         /* Read features beyond page 1 if available */
787         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
788                 struct hci_cp_read_local_ext_features cp;
789
790                 cp.page = p;
791                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
792                             sizeof(cp), &cp);
793         }
794 }
795
796 static void hci_init4_req(struct hci_request *req, unsigned long opt)
797 {
798         struct hci_dev *hdev = req->hdev;
799
800         /* Some Broadcom based Bluetooth controllers do not support the
801          * Delete Stored Link Key command. They are clearly indicating its
802          * absence in the bit mask of supported commands.
803          *
804          * Check the supported commands and only if the the command is marked
805          * as supported send it. If not supported assume that the controller
806          * does not have actual support for stored link keys which makes this
807          * command redundant anyway.
808          *
809          * Some controllers indicate that they support handling deleting
810          * stored link keys, but they don't. The quirk lets a driver
811          * just disable this command.
812          */
813         if (hdev->commands[6] & 0x80 &&
814             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
815                 struct hci_cp_delete_stored_link_key cp;
816
817                 bacpy(&cp.bdaddr, BDADDR_ANY);
818                 cp.delete_all = 0x01;
819                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
820                             sizeof(cp), &cp);
821         }
822
823         /* Set event mask page 2 if the HCI command for it is supported */
824         if (hdev->commands[22] & 0x04)
825                 hci_set_event_mask_page_2(req);
826
827         /* Read local codec list if the HCI command is supported */
828         if (hdev->commands[29] & 0x20)
829                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
830
831         /* Get MWS transport configuration if the HCI command is supported */
832         if (hdev->commands[30] & 0x08)
833                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
834
835         /* Check for Synchronization Train support */
836         if (lmp_sync_train_capable(hdev))
837                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
838
839         /* Enable Secure Connections if supported and configured */
840         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
841             bredr_sc_enabled(hdev)) {
842                 u8 support = 0x01;
843
844                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
845                             sizeof(support), &support);
846         }
847 }
848
849 static int __hci_init(struct hci_dev *hdev)
850 {
851         int err;
852
853         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
854         if (err < 0)
855                 return err;
856
857         /* The Device Under Test (DUT) mode is special and available for
858          * all controller types. So just create it early on.
859          */
860         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
861                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
862                                     &dut_mode_fops);
863         }
864
865         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
866         if (err < 0)
867                 return err;
868
869         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870          * BR/EDR/LE type controllers. AMP controllers only need the
871          * first two stages of init.
872          */
873         if (hdev->dev_type != HCI_BREDR)
874                 return 0;
875
876         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
877         if (err < 0)
878                 return err;
879
880         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
881         if (err < 0)
882                 return err;
883
884         /* This function is only called when the controller is actually in
885          * configured state. When the controller is marked as unconfigured,
886          * this initialization procedure is not run.
887          *
888          * It means that it is possible that a controller runs through its
889          * setup phase and then discovers missing settings. If that is the
890          * case, then this function will not be called. It then will only
891          * be called during the config phase.
892          *
893          * So only when in setup phase or config phase, create the debugfs
894          * entries and register the SMP channels.
895          */
896         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
897             !hci_dev_test_flag(hdev, HCI_CONFIG))
898                 return 0;
899
900         hci_debugfs_create_common(hdev);
901
902         if (lmp_bredr_capable(hdev))
903                 hci_debugfs_create_bredr(hdev);
904
905         if (lmp_le_capable(hdev))
906                 hci_debugfs_create_le(hdev);
907
908         return 0;
909 }
910
911 static void hci_init0_req(struct hci_request *req, unsigned long opt)
912 {
913         struct hci_dev *hdev = req->hdev;
914
915         BT_DBG("%s %ld", hdev->name, opt);
916
917         /* Reset */
918         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
919                 hci_reset_req(req, 0);
920
921         /* Read Local Version */
922         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923
924         /* Read BD Address */
925         if (hdev->set_bdaddr)
926                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927 }
928
929 static int __hci_unconf_init(struct hci_dev *hdev)
930 {
931         int err;
932
933         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
934                 return 0;
935
936         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
937         if (err < 0)
938                 return err;
939
940         return 0;
941 }
942
943 static void hci_scan_req(struct hci_request *req, unsigned long opt)
944 {
945         __u8 scan = opt;
946
947         BT_DBG("%s %x", req->hdev->name, scan);
948
949         /* Inquiry and Page scans */
950         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
951 }
952
953 static void hci_auth_req(struct hci_request *req, unsigned long opt)
954 {
955         __u8 auth = opt;
956
957         BT_DBG("%s %x", req->hdev->name, auth);
958
959         /* Authentication */
960         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
961 }
962
963 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
964 {
965         __u8 encrypt = opt;
966
967         BT_DBG("%s %x", req->hdev->name, encrypt);
968
969         /* Encryption */
970         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
971 }
972
973 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
974 {
975         __le16 policy = cpu_to_le16(opt);
976
977         BT_DBG("%s %x", req->hdev->name, policy);
978
979         /* Default link policy */
980         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
981 }
982
983 /* Get HCI device by index.
984  * Device is held on return. */
985 struct hci_dev *hci_dev_get(int index)
986 {
987         struct hci_dev *hdev = NULL, *d;
988
989         BT_DBG("%d", index);
990
991         if (index < 0)
992                 return NULL;
993
994         read_lock(&hci_dev_list_lock);
995         list_for_each_entry(d, &hci_dev_list, list) {
996                 if (d->id == index) {
997                         hdev = hci_dev_hold(d);
998                         break;
999                 }
1000         }
1001         read_unlock(&hci_dev_list_lock);
1002         return hdev;
1003 }
1004
1005 /* ---- Inquiry support ---- */
1006
1007 bool hci_discovery_active(struct hci_dev *hdev)
1008 {
1009         struct discovery_state *discov = &hdev->discovery;
1010
1011         switch (discov->state) {
1012         case DISCOVERY_FINDING:
1013         case DISCOVERY_RESOLVING:
1014                 return true;
1015
1016         default:
1017                 return false;
1018         }
1019 }
1020
1021 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1022 {
1023         int old_state = hdev->discovery.state;
1024
1025         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1026
1027         if (old_state == state)
1028                 return;
1029
1030         hdev->discovery.state = state;
1031
1032         switch (state) {
1033         case DISCOVERY_STOPPED:
1034                 hci_update_background_scan(hdev);
1035
1036                 if (old_state != DISCOVERY_STARTING)
1037                         mgmt_discovering(hdev, 0);
1038                 break;
1039         case DISCOVERY_STARTING:
1040                 break;
1041         case DISCOVERY_FINDING:
1042                 mgmt_discovering(hdev, 1);
1043                 break;
1044         case DISCOVERY_RESOLVING:
1045                 break;
1046         case DISCOVERY_STOPPING:
1047                 break;
1048         }
1049 }
1050
1051 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1052 {
1053         struct discovery_state *cache = &hdev->discovery;
1054         struct inquiry_entry *p, *n;
1055
1056         list_for_each_entry_safe(p, n, &cache->all, all) {
1057                 list_del(&p->all);
1058                 kfree(p);
1059         }
1060
1061         INIT_LIST_HEAD(&cache->unknown);
1062         INIT_LIST_HEAD(&cache->resolve);
1063 }
1064
1065 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1066                                                bdaddr_t *bdaddr)
1067 {
1068         struct discovery_state *cache = &hdev->discovery;
1069         struct inquiry_entry *e;
1070
1071         BT_DBG("cache %p, %pMR", cache, bdaddr);
1072
1073         list_for_each_entry(e, &cache->all, all) {
1074                 if (!bacmp(&e->data.bdaddr, bdaddr))
1075                         return e;
1076         }
1077
1078         return NULL;
1079 }
1080
1081 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1082                                                        bdaddr_t *bdaddr)
1083 {
1084         struct discovery_state *cache = &hdev->discovery;
1085         struct inquiry_entry *e;
1086
1087         BT_DBG("cache %p, %pMR", cache, bdaddr);
1088
1089         list_for_each_entry(e, &cache->unknown, list) {
1090                 if (!bacmp(&e->data.bdaddr, bdaddr))
1091                         return e;
1092         }
1093
1094         return NULL;
1095 }
1096
1097 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1098                                                        bdaddr_t *bdaddr,
1099                                                        int state)
1100 {
1101         struct discovery_state *cache = &hdev->discovery;
1102         struct inquiry_entry *e;
1103
1104         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1105
1106         list_for_each_entry(e, &cache->resolve, list) {
1107                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1108                         return e;
1109                 if (!bacmp(&e->data.bdaddr, bdaddr))
1110                         return e;
1111         }
1112
1113         return NULL;
1114 }
1115
1116 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1117                                       struct inquiry_entry *ie)
1118 {
1119         struct discovery_state *cache = &hdev->discovery;
1120         struct list_head *pos = &cache->resolve;
1121         struct inquiry_entry *p;
1122
1123         list_del(&ie->list);
1124
1125         list_for_each_entry(p, &cache->resolve, list) {
1126                 if (p->name_state != NAME_PENDING &&
1127                     abs(p->data.rssi) >= abs(ie->data.rssi))
1128                         break;
1129                 pos = &p->list;
1130         }
1131
1132         list_add(&ie->list, pos);
1133 }
1134
1135 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1136                              bool name_known)
1137 {
1138         struct discovery_state *cache = &hdev->discovery;
1139         struct inquiry_entry *ie;
1140         u32 flags = 0;
1141
1142         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1143
1144         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1145
1146         if (!data->ssp_mode)
1147                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1150         if (ie) {
1151                 if (!ie->data.ssp_mode)
1152                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1153
1154                 if (ie->name_state == NAME_NEEDED &&
1155                     data->rssi != ie->data.rssi) {
1156                         ie->data.rssi = data->rssi;
1157                         hci_inquiry_cache_update_resolve(hdev, ie);
1158                 }
1159
1160                 goto update;
1161         }
1162
1163         /* Entry not in the cache. Add new one. */
1164         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1165         if (!ie) {
1166                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1167                 goto done;
1168         }
1169
1170         list_add(&ie->all, &cache->all);
1171
1172         if (name_known) {
1173                 ie->name_state = NAME_KNOWN;
1174         } else {
1175                 ie->name_state = NAME_NOT_KNOWN;
1176                 list_add(&ie->list, &cache->unknown);
1177         }
1178
1179 update:
1180         if (name_known && ie->name_state != NAME_KNOWN &&
1181             ie->name_state != NAME_PENDING) {
1182                 ie->name_state = NAME_KNOWN;
1183                 list_del(&ie->list);
1184         }
1185
1186         memcpy(&ie->data, data, sizeof(*data));
1187         ie->timestamp = jiffies;
1188         cache->timestamp = jiffies;
1189
1190         if (ie->name_state == NAME_NOT_KNOWN)
1191                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1192
1193 done:
1194         return flags;
1195 }
1196
1197 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1198 {
1199         struct discovery_state *cache = &hdev->discovery;
1200         struct inquiry_info *info = (struct inquiry_info *) buf;
1201         struct inquiry_entry *e;
1202         int copied = 0;
1203
1204         list_for_each_entry(e, &cache->all, all) {
1205                 struct inquiry_data *data = &e->data;
1206
1207                 if (copied >= num)
1208                         break;
1209
1210                 bacpy(&info->bdaddr, &data->bdaddr);
1211                 info->pscan_rep_mode    = data->pscan_rep_mode;
1212                 info->pscan_period_mode = data->pscan_period_mode;
1213                 info->pscan_mode        = data->pscan_mode;
1214                 memcpy(info->dev_class, data->dev_class, 3);
1215                 info->clock_offset      = data->clock_offset;
1216
1217                 info++;
1218                 copied++;
1219         }
1220
1221         BT_DBG("cache %p, copied %d", cache, copied);
1222         return copied;
1223 }
1224
1225 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1226 {
1227         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1228         struct hci_dev *hdev = req->hdev;
1229         struct hci_cp_inquiry cp;
1230
1231         BT_DBG("%s", hdev->name);
1232
1233         if (test_bit(HCI_INQUIRY, &hdev->flags))
1234                 return;
1235
1236         /* Start Inquiry */
1237         memcpy(&cp.lap, &ir->lap, 3);
1238         cp.length  = ir->length;
1239         cp.num_rsp = ir->num_rsp;
1240         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1241 }
1242
1243 int hci_inquiry(void __user *arg)
1244 {
1245         __u8 __user *ptr = arg;
1246         struct hci_inquiry_req ir;
1247         struct hci_dev *hdev;
1248         int err = 0, do_inquiry = 0, max_rsp;
1249         long timeo;
1250         __u8 *buf;
1251
1252         if (copy_from_user(&ir, ptr, sizeof(ir)))
1253                 return -EFAULT;
1254
1255         hdev = hci_dev_get(ir.dev_id);
1256         if (!hdev)
1257                 return -ENODEV;
1258
1259         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1260                 err = -EBUSY;
1261                 goto done;
1262         }
1263
1264         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (hdev->dev_type != HCI_BREDR) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1275                 err = -EOPNOTSUPP;
1276                 goto done;
1277         }
1278
1279         hci_dev_lock(hdev);
1280         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1281             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1282                 hci_inquiry_cache_flush(hdev);
1283                 do_inquiry = 1;
1284         }
1285         hci_dev_unlock(hdev);
1286
1287         timeo = ir.length * msecs_to_jiffies(2000);
1288
1289         if (do_inquiry) {
1290                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1291                                    timeo);
1292                 if (err < 0)
1293                         goto done;
1294
1295                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296                  * cleared). If it is interrupted by a signal, return -EINTR.
1297                  */
1298                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1299                                 TASK_INTERRUPTIBLE))
1300                         return -EINTR;
1301         }
1302
1303         /* for unlimited number of responses we will use buffer with
1304          * 255 entries
1305          */
1306         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1307
1308         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309          * copy it to the user space.
1310          */
1311         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1312         if (!buf) {
1313                 err = -ENOMEM;
1314                 goto done;
1315         }
1316
1317         hci_dev_lock(hdev);
1318         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1319         hci_dev_unlock(hdev);
1320
1321         BT_DBG("num_rsp %d", ir.num_rsp);
1322
1323         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1324                 ptr += sizeof(ir);
1325                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1326                                  ir.num_rsp))
1327                         err = -EFAULT;
1328         } else
1329                 err = -EFAULT;
1330
1331         kfree(buf);
1332
1333 done:
1334         hci_dev_put(hdev);
1335         return err;
1336 }
1337
1338 static int hci_dev_do_open(struct hci_dev *hdev)
1339 {
1340         int ret = 0;
1341
1342         BT_DBG("%s %p", hdev->name, hdev);
1343
1344         hci_req_lock(hdev);
1345
1346         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1347                 ret = -ENODEV;
1348                 goto done;
1349         }
1350
1351         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1352             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1353                 /* Check for rfkill but allow the HCI setup stage to
1354                  * proceed (which in itself doesn't cause any RF activity).
1355                  */
1356                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1357                         ret = -ERFKILL;
1358                         goto done;
1359                 }
1360
1361                 /* Check for valid public address or a configured static
1362                  * random adddress, but let the HCI setup proceed to
1363                  * be able to determine if there is a public address
1364                  * or not.
1365                  *
1366                  * In case of user channel usage, it is not important
1367                  * if a public address or static random address is
1368                  * available.
1369                  *
1370                  * This check is only valid for BR/EDR controllers
1371                  * since AMP controllers do not have an address.
1372                  */
1373                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1374                     hdev->dev_type == HCI_BREDR &&
1375                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1376                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1377                         ret = -EADDRNOTAVAIL;
1378                         goto done;
1379                 }
1380         }
1381
1382         if (test_bit(HCI_UP, &hdev->flags)) {
1383                 ret = -EALREADY;
1384                 goto done;
1385         }
1386
1387         if (hdev->open(hdev)) {
1388                 ret = -EIO;
1389                 goto done;
1390         }
1391
1392         atomic_set(&hdev->cmd_cnt, 1);
1393         set_bit(HCI_INIT, &hdev->flags);
1394
1395         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1396                 if (hdev->setup)
1397                         ret = hdev->setup(hdev);
1398
1399                 /* The transport driver can set these quirks before
1400                  * creating the HCI device or in its setup callback.
1401                  *
1402                  * In case any of them is set, the controller has to
1403                  * start up as unconfigured.
1404                  */
1405                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1406                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1407                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1408
1409                 /* For an unconfigured controller it is required to
1410                  * read at least the version information provided by
1411                  * the Read Local Version Information command.
1412                  *
1413                  * If the set_bdaddr driver callback is provided, then
1414                  * also the original Bluetooth public device address
1415                  * will be read using the Read BD Address command.
1416                  */
1417                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1418                         ret = __hci_unconf_init(hdev);
1419         }
1420
1421         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1422                 /* If public address change is configured, ensure that
1423                  * the address gets programmed. If the driver does not
1424                  * support changing the public address, fail the power
1425                  * on procedure.
1426                  */
1427                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1428                     hdev->set_bdaddr)
1429                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1430                 else
1431                         ret = -EADDRNOTAVAIL;
1432         }
1433
1434         if (!ret) {
1435                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1436                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1437                         ret = __hci_init(hdev);
1438         }
1439
1440         clear_bit(HCI_INIT, &hdev->flags);
1441
1442         if (!ret) {
1443                 hci_dev_hold(hdev);
1444                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1445                 set_bit(HCI_UP, &hdev->flags);
1446                 hci_notify(hdev, HCI_DEV_UP);
1447                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451                     hdev->dev_type == HCI_BREDR) {
1452                         hci_dev_lock(hdev);
1453                         mgmt_powered(hdev, 1);
1454                         hci_dev_unlock(hdev);
1455                 }
1456         } else {
1457                 /* Init failed, cleanup */
1458                 flush_work(&hdev->tx_work);
1459                 flush_work(&hdev->cmd_work);
1460                 flush_work(&hdev->rx_work);
1461
1462                 skb_queue_purge(&hdev->cmd_q);
1463                 skb_queue_purge(&hdev->rx_q);
1464
1465                 if (hdev->flush)
1466                         hdev->flush(hdev);
1467
1468                 if (hdev->sent_cmd) {
1469                         kfree_skb(hdev->sent_cmd);
1470                         hdev->sent_cmd = NULL;
1471                 }
1472
1473                 hdev->close(hdev);
1474                 hdev->flags &= BIT(HCI_RAW);
1475         }
1476
1477 done:
1478         hci_req_unlock(hdev);
1479         return ret;
1480 }
1481
1482 /* ---- HCI ioctl helpers ---- */
1483
1484 int hci_dev_open(__u16 dev)
1485 {
1486         struct hci_dev *hdev;
1487         int err;
1488
1489         hdev = hci_dev_get(dev);
1490         if (!hdev)
1491                 return -ENODEV;
1492
1493         /* Devices that are marked as unconfigured can only be powered
1494          * up as user channel. Trying to bring them up as normal devices
1495          * will result into a failure. Only user channel operation is
1496          * possible.
1497          *
1498          * When this function is called for a user channel, the flag
1499          * HCI_USER_CHANNEL will be set first before attempting to
1500          * open the device.
1501          */
1502         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1504                 err = -EOPNOTSUPP;
1505                 goto done;
1506         }
1507
1508         /* We need to ensure that no other power on/off work is pending
1509          * before proceeding to call hci_dev_do_open. This is
1510          * particularly important if the setup procedure has not yet
1511          * completed.
1512          */
1513         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1514                 cancel_delayed_work(&hdev->power_off);
1515
1516         /* After this call it is guaranteed that the setup procedure
1517          * has finished. This means that error conditions like RFKILL
1518          * or no valid public or static random address apply.
1519          */
1520         flush_workqueue(hdev->req_workqueue);
1521
1522         /* For controllers not using the management interface and that
1523          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1524          * so that pairing works for them. Once the management interface
1525          * is in use this bit will be cleared again and userspace has
1526          * to explicitly enable it.
1527          */
1528         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529             !hci_dev_test_flag(hdev, HCI_MGMT))
1530                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1531
1532         err = hci_dev_do_open(hdev);
1533
1534 done:
1535         hci_dev_put(hdev);
1536         return err;
1537 }
1538
1539 /* This function requires the caller holds hdev->lock */
1540 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541 {
1542         struct hci_conn_params *p;
1543
1544         list_for_each_entry(p, &hdev->le_conn_params, list) {
1545                 if (p->conn) {
1546                         hci_conn_drop(p->conn);
1547                         hci_conn_put(p->conn);
1548                         p->conn = NULL;
1549                 }
1550                 list_del_init(&p->action);
1551         }
1552
1553         BT_DBG("All LE pending actions cleared");
1554 }
1555
1556 static int hci_dev_do_close(struct hci_dev *hdev)
1557 {
1558         BT_DBG("%s %p", hdev->name, hdev);
1559
1560         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1561             test_bit(HCI_UP, &hdev->flags)) {
1562                 /* Execute vendor specific shutdown routine */
1563                 if (hdev->shutdown)
1564                         hdev->shutdown(hdev);
1565         }
1566
1567         cancel_delayed_work(&hdev->power_off);
1568
1569         hci_req_cancel(hdev, ENODEV);
1570         hci_req_lock(hdev);
1571
1572         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1573                 cancel_delayed_work_sync(&hdev->cmd_timer);
1574                 hci_req_unlock(hdev);
1575                 return 0;
1576         }
1577
1578         /* Flush RX and TX works */
1579         flush_work(&hdev->tx_work);
1580         flush_work(&hdev->rx_work);
1581
1582         if (hdev->discov_timeout > 0) {
1583                 cancel_delayed_work(&hdev->discov_off);
1584                 hdev->discov_timeout = 0;
1585                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1586                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1587         }
1588
1589         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1590                 cancel_delayed_work(&hdev->service_cache);
1591
1592         cancel_delayed_work_sync(&hdev->le_scan_disable);
1593         cancel_delayed_work_sync(&hdev->le_scan_restart);
1594
1595         if (hci_dev_test_flag(hdev, HCI_MGMT))
1596                 cancel_delayed_work_sync(&hdev->rpa_expired);
1597
1598         /* Avoid potential lockdep warnings from the *_flush() calls by
1599          * ensuring the workqueue is empty up front.
1600          */
1601         drain_workqueue(hdev->workqueue);
1602
1603         hci_dev_lock(hdev);
1604
1605         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1606
1607         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1608                 if (hdev->dev_type == HCI_BREDR)
1609                         mgmt_powered(hdev, 0);
1610         }
1611
1612         hci_inquiry_cache_flush(hdev);
1613         hci_pend_le_actions_clear(hdev);
1614         hci_conn_hash_flush(hdev);
1615         hci_dev_unlock(hdev);
1616
1617         smp_unregister(hdev);
1618
1619         hci_notify(hdev, HCI_DEV_DOWN);
1620
1621         if (hdev->flush)
1622                 hdev->flush(hdev);
1623
1624         /* Reset device */
1625         skb_queue_purge(&hdev->cmd_q);
1626         atomic_set(&hdev->cmd_cnt, 1);
1627         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1628             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1629             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1630                 set_bit(HCI_INIT, &hdev->flags);
1631                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1632                 clear_bit(HCI_INIT, &hdev->flags);
1633         }
1634
1635         /* flush cmd  work */
1636         flush_work(&hdev->cmd_work);
1637
1638         /* Drop queues */
1639         skb_queue_purge(&hdev->rx_q);
1640         skb_queue_purge(&hdev->cmd_q);
1641         skb_queue_purge(&hdev->raw_q);
1642
1643         /* Drop last sent command */
1644         if (hdev->sent_cmd) {
1645                 cancel_delayed_work_sync(&hdev->cmd_timer);
1646                 kfree_skb(hdev->sent_cmd);
1647                 hdev->sent_cmd = NULL;
1648         }
1649
1650         /* After this point our queues are empty
1651          * and no tasks are scheduled. */
1652         hdev->close(hdev);
1653
1654         /* Clear flags */
1655         hdev->flags &= BIT(HCI_RAW);
1656         hci_dev_clear_volatile_flags(hdev);
1657
1658         /* Controller radio is available but is currently powered down */
1659         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1660
1661         memset(hdev->eir, 0, sizeof(hdev->eir));
1662         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1663         bacpy(&hdev->random_addr, BDADDR_ANY);
1664
1665         hci_req_unlock(hdev);
1666
1667         hci_dev_put(hdev);
1668         return 0;
1669 }
1670
1671 int hci_dev_close(__u16 dev)
1672 {
1673         struct hci_dev *hdev;
1674         int err;
1675
1676         hdev = hci_dev_get(dev);
1677         if (!hdev)
1678                 return -ENODEV;
1679
1680         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1681                 err = -EBUSY;
1682                 goto done;
1683         }
1684
1685         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1686                 cancel_delayed_work(&hdev->power_off);
1687
1688         err = hci_dev_do_close(hdev);
1689
1690 done:
1691         hci_dev_put(hdev);
1692         return err;
1693 }
1694
1695 static int hci_dev_do_reset(struct hci_dev *hdev)
1696 {
1697         int ret;
1698
1699         BT_DBG("%s %p", hdev->name, hdev);
1700
1701         hci_req_lock(hdev);
1702
1703         /* Drop queues */
1704         skb_queue_purge(&hdev->rx_q);
1705         skb_queue_purge(&hdev->cmd_q);
1706
1707         /* Avoid potential lockdep warnings from the *_flush() calls by
1708          * ensuring the workqueue is empty up front.
1709          */
1710         drain_workqueue(hdev->workqueue);
1711
1712         hci_dev_lock(hdev);
1713         hci_inquiry_cache_flush(hdev);
1714         hci_conn_hash_flush(hdev);
1715         hci_dev_unlock(hdev);
1716
1717         if (hdev->flush)
1718                 hdev->flush(hdev);
1719
1720         atomic_set(&hdev->cmd_cnt, 1);
1721         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1722
1723         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1724
1725         hci_req_unlock(hdev);
1726         return ret;
1727 }
1728
1729 int hci_dev_reset(__u16 dev)
1730 {
1731         struct hci_dev *hdev;
1732         int err;
1733
1734         hdev = hci_dev_get(dev);
1735         if (!hdev)
1736                 return -ENODEV;
1737
1738         if (!test_bit(HCI_UP, &hdev->flags)) {
1739                 err = -ENETDOWN;
1740                 goto done;
1741         }
1742
1743         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1744                 err = -EBUSY;
1745                 goto done;
1746         }
1747
1748         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1749                 err = -EOPNOTSUPP;
1750                 goto done;
1751         }
1752
1753         err = hci_dev_do_reset(hdev);
1754
1755 done:
1756         hci_dev_put(hdev);
1757         return err;
1758 }
1759
1760 int hci_dev_reset_stat(__u16 dev)
1761 {
1762         struct hci_dev *hdev;
1763         int ret = 0;
1764
1765         hdev = hci_dev_get(dev);
1766         if (!hdev)
1767                 return -ENODEV;
1768
1769         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1770                 ret = -EBUSY;
1771                 goto done;
1772         }
1773
1774         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1775                 ret = -EOPNOTSUPP;
1776                 goto done;
1777         }
1778
1779         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1780
1781 done:
1782         hci_dev_put(hdev);
1783         return ret;
1784 }
1785
1786 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1787 {
1788         bool conn_changed, discov_changed;
1789
1790         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1791
1792         if ((scan & SCAN_PAGE))
1793                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1794                                                           HCI_CONNECTABLE);
1795         else
1796                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1797                                                            HCI_CONNECTABLE);
1798
1799         if ((scan & SCAN_INQUIRY)) {
1800                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1801                                                             HCI_DISCOVERABLE);
1802         } else {
1803                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1804                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1805                                                              HCI_DISCOVERABLE);
1806         }
1807
1808         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1809                 return;
1810
1811         if (conn_changed || discov_changed) {
1812                 /* In case this was disabled through mgmt */
1813                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1814
1815                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1816                         mgmt_update_adv_data(hdev);
1817
1818                 mgmt_new_settings(hdev);
1819         }
1820 }
1821
1822 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1823 {
1824         struct hci_dev *hdev;
1825         struct hci_dev_req dr;
1826         int err = 0;
1827
1828         if (copy_from_user(&dr, arg, sizeof(dr)))
1829                 return -EFAULT;
1830
1831         hdev = hci_dev_get(dr.dev_id);
1832         if (!hdev)
1833                 return -ENODEV;
1834
1835         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1836                 err = -EBUSY;
1837                 goto done;
1838         }
1839
1840         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1841                 err = -EOPNOTSUPP;
1842                 goto done;
1843         }
1844
1845         if (hdev->dev_type != HCI_BREDR) {
1846                 err = -EOPNOTSUPP;
1847                 goto done;
1848         }
1849
1850         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1851                 err = -EOPNOTSUPP;
1852                 goto done;
1853         }
1854
1855         switch (cmd) {
1856         case HCISETAUTH:
1857                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1858                                    HCI_INIT_TIMEOUT);
1859                 break;
1860
1861         case HCISETENCRYPT:
1862                 if (!lmp_encrypt_capable(hdev)) {
1863                         err = -EOPNOTSUPP;
1864                         break;
1865                 }
1866
1867                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1868                         /* Auth must be enabled first */
1869                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1870                                            HCI_INIT_TIMEOUT);
1871                         if (err)
1872                                 break;
1873                 }
1874
1875                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1876                                    HCI_INIT_TIMEOUT);
1877                 break;
1878
1879         case HCISETSCAN:
1880                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1881                                    HCI_INIT_TIMEOUT);
1882
1883                 /* Ensure that the connectable and discoverable states
1884                  * get correctly modified as this was a non-mgmt change.
1885                  */
1886                 if (!err)
1887                         hci_update_scan_state(hdev, dr.dev_opt);
1888                 break;
1889
1890         case HCISETLINKPOL:
1891                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1892                                    HCI_INIT_TIMEOUT);
1893                 break;
1894
1895         case HCISETLINKMODE:
1896                 hdev->link_mode = ((__u16) dr.dev_opt) &
1897                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1898                 break;
1899
1900         case HCISETPTYPE:
1901                 hdev->pkt_type = (__u16) dr.dev_opt;
1902                 break;
1903
1904         case HCISETACLMTU:
1905                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1906                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1907                 break;
1908
1909         case HCISETSCOMTU:
1910                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1911                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1912                 break;
1913
1914         default:
1915                 err = -EINVAL;
1916                 break;
1917         }
1918
1919 done:
1920         hci_dev_put(hdev);
1921         return err;
1922 }
1923
1924 int hci_get_dev_list(void __user *arg)
1925 {
1926         struct hci_dev *hdev;
1927         struct hci_dev_list_req *dl;
1928         struct hci_dev_req *dr;
1929         int n = 0, size, err;
1930         __u16 dev_num;
1931
1932         if (get_user(dev_num, (__u16 __user *) arg))
1933                 return -EFAULT;
1934
1935         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1936                 return -EINVAL;
1937
1938         size = sizeof(*dl) + dev_num * sizeof(*dr);
1939
1940         dl = kzalloc(size, GFP_KERNEL);
1941         if (!dl)
1942                 return -ENOMEM;
1943
1944         dr = dl->dev_req;
1945
1946         read_lock(&hci_dev_list_lock);
1947         list_for_each_entry(hdev, &hci_dev_list, list) {
1948                 unsigned long flags = hdev->flags;
1949
1950                 /* When the auto-off is configured it means the transport
1951                  * is running, but in that case still indicate that the
1952                  * device is actually down.
1953                  */
1954                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1955                         flags &= ~BIT(HCI_UP);
1956
1957                 (dr + n)->dev_id  = hdev->id;
1958                 (dr + n)->dev_opt = flags;
1959
1960                 if (++n >= dev_num)
1961                         break;
1962         }
1963         read_unlock(&hci_dev_list_lock);
1964
1965         dl->dev_num = n;
1966         size = sizeof(*dl) + n * sizeof(*dr);
1967
1968         err = copy_to_user(arg, dl, size);
1969         kfree(dl);
1970
1971         return err ? -EFAULT : 0;
1972 }
1973
1974 int hci_get_dev_info(void __user *arg)
1975 {
1976         struct hci_dev *hdev;
1977         struct hci_dev_info di;
1978         unsigned long flags;
1979         int err = 0;
1980
1981         if (copy_from_user(&di, arg, sizeof(di)))
1982                 return -EFAULT;
1983
1984         hdev = hci_dev_get(di.dev_id);
1985         if (!hdev)
1986                 return -ENODEV;
1987
1988         /* When the auto-off is configured it means the transport
1989          * is running, but in that case still indicate that the
1990          * device is actually down.
1991          */
1992         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1993                 flags = hdev->flags & ~BIT(HCI_UP);
1994         else
1995                 flags = hdev->flags;
1996
1997         strcpy(di.name, hdev->name);
1998         di.bdaddr   = hdev->bdaddr;
1999         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2000         di.flags    = flags;
2001         di.pkt_type = hdev->pkt_type;
2002         if (lmp_bredr_capable(hdev)) {
2003                 di.acl_mtu  = hdev->acl_mtu;
2004                 di.acl_pkts = hdev->acl_pkts;
2005                 di.sco_mtu  = hdev->sco_mtu;
2006                 di.sco_pkts = hdev->sco_pkts;
2007         } else {
2008                 di.acl_mtu  = hdev->le_mtu;
2009                 di.acl_pkts = hdev->le_pkts;
2010                 di.sco_mtu  = 0;
2011                 di.sco_pkts = 0;
2012         }
2013         di.link_policy = hdev->link_policy;
2014         di.link_mode   = hdev->link_mode;
2015
2016         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2017         memcpy(&di.features, &hdev->features, sizeof(di.features));
2018
2019         if (copy_to_user(arg, &di, sizeof(di)))
2020                 err = -EFAULT;
2021
2022         hci_dev_put(hdev);
2023
2024         return err;
2025 }
2026
2027 /* ---- Interface to HCI drivers ---- */
2028
2029 static int hci_rfkill_set_block(void *data, bool blocked)
2030 {
2031         struct hci_dev *hdev = data;
2032
2033         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2034
2035         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2036                 return -EBUSY;
2037
2038         if (blocked) {
2039                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2040                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2041                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2042                         hci_dev_do_close(hdev);
2043         } else {
2044                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2045         }
2046
2047         return 0;
2048 }
2049
2050 static const struct rfkill_ops hci_rfkill_ops = {
2051         .set_block = hci_rfkill_set_block,
2052 };
2053
2054 static void hci_power_on(struct work_struct *work)
2055 {
2056         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2057         int err;
2058
2059         BT_DBG("%s", hdev->name);
2060
2061         err = hci_dev_do_open(hdev);
2062         if (err < 0) {
2063                 hci_dev_lock(hdev);
2064                 mgmt_set_powered_failed(hdev, err);
2065                 hci_dev_unlock(hdev);
2066                 return;
2067         }
2068
2069         /* During the HCI setup phase, a few error conditions are
2070          * ignored and they need to be checked now. If they are still
2071          * valid, it is important to turn the device back off.
2072          */
2073         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2074             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2075             (hdev->dev_type == HCI_BREDR &&
2076              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2077              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2078                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2079                 hci_dev_do_close(hdev);
2080         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2081                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2082                                    HCI_AUTO_OFF_TIMEOUT);
2083         }
2084
2085         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2086                 /* For unconfigured devices, set the HCI_RAW flag
2087                  * so that userspace can easily identify them.
2088                  */
2089                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2090                         set_bit(HCI_RAW, &hdev->flags);
2091
2092                 /* For fully configured devices, this will send
2093                  * the Index Added event. For unconfigured devices,
2094                  * it will send Unconfigued Index Added event.
2095                  *
2096                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2097                  * and no event will be send.
2098                  */
2099                 mgmt_index_added(hdev);
2100         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2101                 /* When the controller is now configured, then it
2102                  * is important to clear the HCI_RAW flag.
2103                  */
2104                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2105                         clear_bit(HCI_RAW, &hdev->flags);
2106
2107                 /* Powering on the controller with HCI_CONFIG set only
2108                  * happens with the transition from unconfigured to
2109                  * configured. This will send the Index Added event.
2110                  */
2111                 mgmt_index_added(hdev);
2112         }
2113 }
2114
2115 static void hci_power_off(struct work_struct *work)
2116 {
2117         struct hci_dev *hdev = container_of(work, struct hci_dev,
2118                                             power_off.work);
2119
2120         BT_DBG("%s", hdev->name);
2121
2122         hci_dev_do_close(hdev);
2123 }
2124
2125 static void hci_error_reset(struct work_struct *work)
2126 {
2127         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2128
2129         BT_DBG("%s", hdev->name);
2130
2131         if (hdev->hw_error)
2132                 hdev->hw_error(hdev, hdev->hw_error_code);
2133         else
2134                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2135                        hdev->hw_error_code);
2136
2137         if (hci_dev_do_close(hdev))
2138                 return;
2139
2140         hci_dev_do_open(hdev);
2141 }
2142
2143 static void hci_discov_off(struct work_struct *work)
2144 {
2145         struct hci_dev *hdev;
2146
2147         hdev = container_of(work, struct hci_dev, discov_off.work);
2148
2149         BT_DBG("%s", hdev->name);
2150
2151         mgmt_discoverable_timeout(hdev);
2152 }
2153
2154 void hci_uuids_clear(struct hci_dev *hdev)
2155 {
2156         struct bt_uuid *uuid, *tmp;
2157
2158         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2159                 list_del(&uuid->list);
2160                 kfree(uuid);
2161         }
2162 }
2163
2164 void hci_link_keys_clear(struct hci_dev *hdev)
2165 {
2166         struct link_key *key;
2167
2168         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2169                 list_del_rcu(&key->list);
2170                 kfree_rcu(key, rcu);
2171         }
2172 }
2173
2174 void hci_smp_ltks_clear(struct hci_dev *hdev)
2175 {
2176         struct smp_ltk *k;
2177
2178         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2179                 list_del_rcu(&k->list);
2180                 kfree_rcu(k, rcu);
2181         }
2182 }
2183
2184 void hci_smp_irks_clear(struct hci_dev *hdev)
2185 {
2186         struct smp_irk *k;
2187
2188         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2189                 list_del_rcu(&k->list);
2190                 kfree_rcu(k, rcu);
2191         }
2192 }
2193
2194 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2195 {
2196         struct link_key *k;
2197
2198         rcu_read_lock();
2199         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2200                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2201                         rcu_read_unlock();
2202                         return k;
2203                 }
2204         }
2205         rcu_read_unlock();
2206
2207         return NULL;
2208 }
2209
2210 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2211                                u8 key_type, u8 old_key_type)
2212 {
2213         /* Legacy key */
2214         if (key_type < 0x03)
2215                 return true;
2216
2217         /* Debug keys are insecure so don't store them persistently */
2218         if (key_type == HCI_LK_DEBUG_COMBINATION)
2219                 return false;
2220
2221         /* Changed combination key and there's no previous one */
2222         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2223                 return false;
2224
2225         /* Security mode 3 case */
2226         if (!conn)
2227                 return true;
2228
2229         /* BR/EDR key derived using SC from an LE link */
2230         if (conn->type == LE_LINK)
2231                 return true;
2232
2233         /* Neither local nor remote side had no-bonding as requirement */
2234         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2235                 return true;
2236
2237         /* Local side had dedicated bonding as requirement */
2238         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2239                 return true;
2240
2241         /* Remote side had dedicated bonding as requirement */
2242         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2243                 return true;
2244
2245         /* If none of the above criteria match, then don't store the key
2246          * persistently */
2247         return false;
2248 }
2249
2250 static u8 ltk_role(u8 type)
2251 {
2252         if (type == SMP_LTK)
2253                 return HCI_ROLE_MASTER;
2254
2255         return HCI_ROLE_SLAVE;
2256 }
2257
2258 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2259                              u8 addr_type, u8 role)
2260 {
2261         struct smp_ltk *k;
2262
2263         rcu_read_lock();
2264         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2265                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2266                         continue;
2267
2268                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2269                         rcu_read_unlock();
2270                         return k;
2271                 }
2272         }
2273         rcu_read_unlock();
2274
2275         return NULL;
2276 }
2277
2278 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2279 {
2280         struct smp_irk *irk;
2281
2282         rcu_read_lock();
2283         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2284                 if (!bacmp(&irk->rpa, rpa)) {
2285                         rcu_read_unlock();
2286                         return irk;
2287                 }
2288         }
2289
2290         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2291                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2292                         bacpy(&irk->rpa, rpa);
2293                         rcu_read_unlock();
2294                         return irk;
2295                 }
2296         }
2297         rcu_read_unlock();
2298
2299         return NULL;
2300 }
2301
2302 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2303                                      u8 addr_type)
2304 {
2305         struct smp_irk *irk;
2306
2307         /* Identity Address must be public or static random */
2308         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2309                 return NULL;
2310
2311         rcu_read_lock();
2312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2313                 if (addr_type == irk->addr_type &&
2314                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2315                         rcu_read_unlock();
2316                         return irk;
2317                 }
2318         }
2319         rcu_read_unlock();
2320
2321         return NULL;
2322 }
2323
2324 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2325                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2326                                   u8 pin_len, bool *persistent)
2327 {
2328         struct link_key *key, *old_key;
2329         u8 old_key_type;
2330
2331         old_key = hci_find_link_key(hdev, bdaddr);
2332         if (old_key) {
2333                 old_key_type = old_key->type;
2334                 key = old_key;
2335         } else {
2336                 old_key_type = conn ? conn->key_type : 0xff;
2337                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2338                 if (!key)
2339                         return NULL;
2340                 list_add_rcu(&key->list, &hdev->link_keys);
2341         }
2342
2343         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2344
2345         /* Some buggy controller combinations generate a changed
2346          * combination key for legacy pairing even when there's no
2347          * previous key */
2348         if (type == HCI_LK_CHANGED_COMBINATION &&
2349             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2350                 type = HCI_LK_COMBINATION;
2351                 if (conn)
2352                         conn->key_type = type;
2353         }
2354
2355         bacpy(&key->bdaddr, bdaddr);
2356         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2357         key->pin_len = pin_len;
2358
2359         if (type == HCI_LK_CHANGED_COMBINATION)
2360                 key->type = old_key_type;
2361         else
2362                 key->type = type;
2363
2364         if (persistent)
2365                 *persistent = hci_persistent_key(hdev, conn, type,
2366                                                  old_key_type);
2367
2368         return key;
2369 }
2370
2371 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2372                             u8 addr_type, u8 type, u8 authenticated,
2373                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2374 {
2375         struct smp_ltk *key, *old_key;
2376         u8 role = ltk_role(type);
2377
2378         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2379         if (old_key)
2380                 key = old_key;
2381         else {
2382                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2383                 if (!key)
2384                         return NULL;
2385                 list_add_rcu(&key->list, &hdev->long_term_keys);
2386         }
2387
2388         bacpy(&key->bdaddr, bdaddr);
2389         key->bdaddr_type = addr_type;
2390         memcpy(key->val, tk, sizeof(key->val));
2391         key->authenticated = authenticated;
2392         key->ediv = ediv;
2393         key->rand = rand;
2394         key->enc_size = enc_size;
2395         key->type = type;
2396
2397         return key;
2398 }
2399
2400 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2402 {
2403         struct smp_irk *irk;
2404
2405         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2406         if (!irk) {
2407                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2408                 if (!irk)
2409                         return NULL;
2410
2411                 bacpy(&irk->bdaddr, bdaddr);
2412                 irk->addr_type = addr_type;
2413
2414                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2415         }
2416
2417         memcpy(irk->val, val, 16);
2418         bacpy(&irk->rpa, rpa);
2419
2420         return irk;
2421 }
2422
2423 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2424 {
2425         struct link_key *key;
2426
2427         key = hci_find_link_key(hdev, bdaddr);
2428         if (!key)
2429                 return -ENOENT;
2430
2431         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2432
2433         list_del_rcu(&key->list);
2434         kfree_rcu(key, rcu);
2435
2436         return 0;
2437 }
2438
2439 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2440 {
2441         struct smp_ltk *k;
2442         int removed = 0;
2443
2444         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2445                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2446                         continue;
2447
2448                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2449
2450                 list_del_rcu(&k->list);
2451                 kfree_rcu(k, rcu);
2452                 removed++;
2453         }
2454
2455         return removed ? 0 : -ENOENT;
2456 }
2457
2458 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2459 {
2460         struct smp_irk *k;
2461
2462         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2463                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2464                         continue;
2465
2466                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2467
2468                 list_del_rcu(&k->list);
2469                 kfree_rcu(k, rcu);
2470         }
2471 }
2472
2473 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2474 {
2475         struct smp_ltk *k;
2476         struct smp_irk *irk;
2477         u8 addr_type;
2478
2479         if (type == BDADDR_BREDR) {
2480                 if (hci_find_link_key(hdev, bdaddr))
2481                         return true;
2482                 return false;
2483         }
2484
2485         /* Convert to HCI addr type which struct smp_ltk uses */
2486         if (type == BDADDR_LE_PUBLIC)
2487                 addr_type = ADDR_LE_DEV_PUBLIC;
2488         else
2489                 addr_type = ADDR_LE_DEV_RANDOM;
2490
2491         irk = hci_get_irk(hdev, bdaddr, addr_type);
2492         if (irk) {
2493                 bdaddr = &irk->bdaddr;
2494                 addr_type = irk->addr_type;
2495         }
2496
2497         rcu_read_lock();
2498         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2499                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2500                         rcu_read_unlock();
2501                         return true;
2502                 }
2503         }
2504         rcu_read_unlock();
2505
2506         return false;
2507 }
2508
2509 /* HCI command timer function */
2510 static void hci_cmd_timeout(struct work_struct *work)
2511 {
2512         struct hci_dev *hdev = container_of(work, struct hci_dev,
2513                                             cmd_timer.work);
2514
2515         if (hdev->sent_cmd) {
2516                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2517                 u16 opcode = __le16_to_cpu(sent->opcode);
2518
2519                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2520         } else {
2521                 BT_ERR("%s command tx timeout", hdev->name);
2522         }
2523
2524         atomic_set(&hdev->cmd_cnt, 1);
2525         queue_work(hdev->workqueue, &hdev->cmd_work);
2526 }
2527
2528 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2529                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2530 {
2531         struct oob_data *data;
2532
2533         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2534                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2535                         continue;
2536                 if (data->bdaddr_type != bdaddr_type)
2537                         continue;
2538                 return data;
2539         }
2540
2541         return NULL;
2542 }
2543
2544 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2545                                u8 bdaddr_type)
2546 {
2547         struct oob_data *data;
2548
2549         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2550         if (!data)
2551                 return -ENOENT;
2552
2553         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2554
2555         list_del(&data->list);
2556         kfree(data);
2557
2558         return 0;
2559 }
2560
2561 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2562 {
2563         struct oob_data *data, *n;
2564
2565         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2566                 list_del(&data->list);
2567                 kfree(data);
2568         }
2569 }
2570
2571 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2572                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2573                             u8 *hash256, u8 *rand256)
2574 {
2575         struct oob_data *data;
2576
2577         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2578         if (!data) {
2579                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2580                 if (!data)
2581                         return -ENOMEM;
2582
2583                 bacpy(&data->bdaddr, bdaddr);
2584                 data->bdaddr_type = bdaddr_type;
2585                 list_add(&data->list, &hdev->remote_oob_data);
2586         }
2587
2588         if (hash192 && rand192) {
2589                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2590                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2591                 if (hash256 && rand256)
2592                         data->present = 0x03;
2593         } else {
2594                 memset(data->hash192, 0, sizeof(data->hash192));
2595                 memset(data->rand192, 0, sizeof(data->rand192));
2596                 if (hash256 && rand256)
2597                         data->present = 0x02;
2598                 else
2599                         data->present = 0x00;
2600         }
2601
2602         if (hash256 && rand256) {
2603                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2604                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2605         } else {
2606                 memset(data->hash256, 0, sizeof(data->hash256));
2607                 memset(data->rand256, 0, sizeof(data->rand256));
2608                 if (hash192 && rand192)
2609                         data->present = 0x01;
2610         }
2611
2612         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2613
2614         return 0;
2615 }
2616
2617 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2618                                          bdaddr_t *bdaddr, u8 type)
2619 {
2620         struct bdaddr_list *b;
2621
2622         list_for_each_entry(b, bdaddr_list, list) {
2623                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2624                         return b;
2625         }
2626
2627         return NULL;
2628 }
2629
2630 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2631 {
2632         struct list_head *p, *n;
2633
2634         list_for_each_safe(p, n, bdaddr_list) {
2635                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2636
2637                 list_del(p);
2638                 kfree(b);
2639         }
2640 }
2641
2642 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2643 {
2644         struct bdaddr_list *entry;
2645
2646         if (!bacmp(bdaddr, BDADDR_ANY))
2647                 return -EBADF;
2648
2649         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2650                 return -EEXIST;
2651
2652         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2653         if (!entry)
2654                 return -ENOMEM;
2655
2656         bacpy(&entry->bdaddr, bdaddr);
2657         entry->bdaddr_type = type;
2658
2659         list_add(&entry->list, list);
2660
2661         return 0;
2662 }
2663
2664 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2665 {
2666         struct bdaddr_list *entry;
2667
2668         if (!bacmp(bdaddr, BDADDR_ANY)) {
2669                 hci_bdaddr_list_clear(list);
2670                 return 0;
2671         }
2672
2673         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2674         if (!entry)
2675                 return -ENOENT;
2676
2677         list_del(&entry->list);
2678         kfree(entry);
2679
2680         return 0;
2681 }
2682
2683 /* This function requires the caller holds hdev->lock */
2684 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2685                                                bdaddr_t *addr, u8 addr_type)
2686 {
2687         struct hci_conn_params *params;
2688
2689         /* The conn params list only contains identity addresses */
2690         if (!hci_is_identity_address(addr, addr_type))
2691                 return NULL;
2692
2693         list_for_each_entry(params, &hdev->le_conn_params, list) {
2694                 if (bacmp(&params->addr, addr) == 0 &&
2695                     params->addr_type == addr_type) {
2696                         return params;
2697                 }
2698         }
2699
2700         return NULL;
2701 }
2702
2703 /* This function requires the caller holds hdev->lock */
2704 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2705                                                   bdaddr_t *addr, u8 addr_type)
2706 {
2707         struct hci_conn_params *param;
2708
2709         /* The list only contains identity addresses */
2710         if (!hci_is_identity_address(addr, addr_type))
2711                 return NULL;
2712
2713         list_for_each_entry(param, list, action) {
2714                 if (bacmp(&param->addr, addr) == 0 &&
2715                     param->addr_type == addr_type)
2716                         return param;
2717         }
2718
2719         return NULL;
2720 }
2721
2722 /* This function requires the caller holds hdev->lock */
2723 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2724                                             bdaddr_t *addr, u8 addr_type)
2725 {
2726         struct hci_conn_params *params;
2727
2728         if (!hci_is_identity_address(addr, addr_type))
2729                 return NULL;
2730
2731         params = hci_conn_params_lookup(hdev, addr, addr_type);
2732         if (params)
2733                 return params;
2734
2735         params = kzalloc(sizeof(*params), GFP_KERNEL);
2736         if (!params) {
2737                 BT_ERR("Out of memory");
2738                 return NULL;
2739         }
2740
2741         bacpy(&params->addr, addr);
2742         params->addr_type = addr_type;
2743
2744         list_add(&params->list, &hdev->le_conn_params);
2745         INIT_LIST_HEAD(&params->action);
2746
2747         params->conn_min_interval = hdev->le_conn_min_interval;
2748         params->conn_max_interval = hdev->le_conn_max_interval;
2749         params->conn_latency = hdev->le_conn_latency;
2750         params->supervision_timeout = hdev->le_supv_timeout;
2751         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2752
2753         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2754
2755         return params;
2756 }
2757
2758 static void hci_conn_params_free(struct hci_conn_params *params)
2759 {
2760         if (params->conn) {
2761                 hci_conn_drop(params->conn);
2762                 hci_conn_put(params->conn);
2763         }
2764
2765         list_del(&params->action);
2766         list_del(&params->list);
2767         kfree(params);
2768 }
2769
2770 /* This function requires the caller holds hdev->lock */
2771 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2772 {
2773         struct hci_conn_params *params;
2774
2775         params = hci_conn_params_lookup(hdev, addr, addr_type);
2776         if (!params)
2777                 return;
2778
2779         hci_conn_params_free(params);
2780
2781         hci_update_background_scan(hdev);
2782
2783         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2784 }
2785
2786 /* This function requires the caller holds hdev->lock */
2787 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2788 {
2789         struct hci_conn_params *params, *tmp;
2790
2791         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2792                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2793                         continue;
2794                 list_del(&params->list);
2795                 kfree(params);
2796         }
2797
2798         BT_DBG("All LE disabled connection parameters were removed");
2799 }
2800
2801 /* This function requires the caller holds hdev->lock */
2802 void hci_conn_params_clear_all(struct hci_dev *hdev)
2803 {
2804         struct hci_conn_params *params, *tmp;
2805
2806         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2807                 hci_conn_params_free(params);
2808
2809         hci_update_background_scan(hdev);
2810
2811         BT_DBG("All LE connection parameters were removed");
2812 }
2813
2814 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2815 {
2816         if (status) {
2817                 BT_ERR("Failed to start inquiry: status %d", status);
2818
2819                 hci_dev_lock(hdev);
2820                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2821                 hci_dev_unlock(hdev);
2822                 return;
2823         }
2824 }
2825
2826 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2827                                           u16 opcode)
2828 {
2829         /* General inquiry access code (GIAC) */
2830         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2831         struct hci_cp_inquiry cp;
2832         int err;
2833
2834         if (status) {
2835                 BT_ERR("Failed to disable LE scanning: status %d", status);
2836                 return;
2837         }
2838
2839         hdev->discovery.scan_start = 0;
2840
2841         switch (hdev->discovery.type) {
2842         case DISCOV_TYPE_LE:
2843                 hci_dev_lock(hdev);
2844                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2845                 hci_dev_unlock(hdev);
2846                 break;
2847
2848         case DISCOV_TYPE_INTERLEAVED:
2849                 hci_dev_lock(hdev);
2850
2851                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2852                              &hdev->quirks)) {
2853                         /* If we were running LE only scan, change discovery
2854                          * state. If we were running both LE and BR/EDR inquiry
2855                          * simultaneously, and BR/EDR inquiry is already
2856                          * finished, stop discovery, otherwise BR/EDR inquiry
2857                          * will stop discovery when finished.
2858                          */
2859                         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2860                                 hci_discovery_set_state(hdev,
2861                                                         DISCOVERY_STOPPED);
2862                 } else {
2863                         struct hci_request req;
2864
2865                         hci_inquiry_cache_flush(hdev);
2866
2867                         hci_req_init(&req, hdev);
2868
2869                         memset(&cp, 0, sizeof(cp));
2870                         memcpy(&cp.lap, lap, sizeof(cp.lap));
2871                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2872                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2873
2874                         err = hci_req_run(&req, inquiry_complete);
2875                         if (err) {
2876                                 BT_ERR("Inquiry request failed: err %d", err);
2877                                 hci_discovery_set_state(hdev,
2878                                                         DISCOVERY_STOPPED);
2879                         }
2880                 }
2881
2882                 hci_dev_unlock(hdev);
2883                 break;
2884         }
2885 }
2886
2887 static void le_scan_disable_work(struct work_struct *work)
2888 {
2889         struct hci_dev *hdev = container_of(work, struct hci_dev,
2890                                             le_scan_disable.work);
2891         struct hci_request req;
2892         int err;
2893
2894         BT_DBG("%s", hdev->name);
2895
2896         cancel_delayed_work_sync(&hdev->le_scan_restart);
2897
2898         hci_req_init(&req, hdev);
2899
2900         hci_req_add_le_scan_disable(&req);
2901
2902         err = hci_req_run(&req, le_scan_disable_work_complete);
2903         if (err)
2904                 BT_ERR("Disable LE scanning request failed: err %d", err);
2905 }
2906
2907 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2908                                           u16 opcode)
2909 {
2910         unsigned long timeout, duration, scan_start, now;
2911
2912         BT_DBG("%s", hdev->name);
2913
2914         if (status) {
2915                 BT_ERR("Failed to restart LE scan: status %d", status);
2916                 return;
2917         }
2918
2919         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2920             !hdev->discovery.scan_start)
2921                 return;
2922
2923         /* When the scan was started, hdev->le_scan_disable has been queued
2924          * after duration from scan_start. During scan restart this job
2925          * has been canceled, and we need to queue it again after proper
2926          * timeout, to make sure that scan does not run indefinitely.
2927          */
2928         duration = hdev->discovery.scan_duration;
2929         scan_start = hdev->discovery.scan_start;
2930         now = jiffies;
2931         if (now - scan_start <= duration) {
2932                 int elapsed;
2933
2934                 if (now >= scan_start)
2935                         elapsed = now - scan_start;
2936                 else
2937                         elapsed = ULONG_MAX - scan_start + now;
2938
2939                 timeout = duration - elapsed;
2940         } else {
2941                 timeout = 0;
2942         }
2943         queue_delayed_work(hdev->workqueue,
2944                            &hdev->le_scan_disable, timeout);
2945 }
2946
2947 static void le_scan_restart_work(struct work_struct *work)
2948 {
2949         struct hci_dev *hdev = container_of(work, struct hci_dev,
2950                                             le_scan_restart.work);
2951         struct hci_request req;
2952         struct hci_cp_le_set_scan_enable cp;
2953         int err;
2954
2955         BT_DBG("%s", hdev->name);
2956
2957         /* If controller is not scanning we are done. */
2958         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2959                 return;
2960
2961         hci_req_init(&req, hdev);
2962
2963         hci_req_add_le_scan_disable(&req);
2964
2965         memset(&cp, 0, sizeof(cp));
2966         cp.enable = LE_SCAN_ENABLE;
2967         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2968         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2969
2970         err = hci_req_run(&req, le_scan_restart_work_complete);
2971         if (err)
2972                 BT_ERR("Restart LE scan request failed: err %d", err);
2973 }
2974
2975 /* Copy the Identity Address of the controller.
2976  *
2977  * If the controller has a public BD_ADDR, then by default use that one.
2978  * If this is a LE only controller without a public address, default to
2979  * the static random address.
2980  *
2981  * For debugging purposes it is possible to force controllers with a
2982  * public address to use the static random address instead.
2983  *
2984  * In case BR/EDR has been disabled on a dual-mode controller and
2985  * userspace has configured a static address, then that address
2986  * becomes the identity address instead of the public BR/EDR address.
2987  */
2988 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2989                                u8 *bdaddr_type)
2990 {
2991         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2992             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2993             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2994              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2995                 bacpy(bdaddr, &hdev->static_addr);
2996                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2997         } else {
2998                 bacpy(bdaddr, &hdev->bdaddr);
2999                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3000         }
3001 }
3002
3003 /* Alloc HCI device */
3004 struct hci_dev *hci_alloc_dev(void)
3005 {
3006         struct hci_dev *hdev;
3007
3008         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3009         if (!hdev)
3010                 return NULL;
3011
3012         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3013         hdev->esco_type = (ESCO_HV1);
3014         hdev->link_mode = (HCI_LM_ACCEPT);
3015         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3016         hdev->io_capability = 0x03;     /* No Input No Output */
3017         hdev->manufacturer = 0xffff;    /* Default to internal use */
3018         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3019         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3020
3021         hdev->sniff_max_interval = 800;
3022         hdev->sniff_min_interval = 80;
3023
3024         hdev->le_adv_channel_map = 0x07;
3025         hdev->le_adv_min_interval = 0x0800;
3026         hdev->le_adv_max_interval = 0x0800;
3027         hdev->le_scan_interval = 0x0060;
3028         hdev->le_scan_window = 0x0030;
3029         hdev->le_conn_min_interval = 0x0028;
3030         hdev->le_conn_max_interval = 0x0038;
3031         hdev->le_conn_latency = 0x0000;
3032         hdev->le_supv_timeout = 0x002a;
3033         hdev->le_def_tx_len = 0x001b;
3034         hdev->le_def_tx_time = 0x0148;
3035         hdev->le_max_tx_len = 0x001b;
3036         hdev->le_max_tx_time = 0x0148;
3037         hdev->le_max_rx_len = 0x001b;
3038         hdev->le_max_rx_time = 0x0148;
3039
3040         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3041         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3042         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3043         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3044
3045         mutex_init(&hdev->lock);
3046         mutex_init(&hdev->req_lock);
3047
3048         INIT_LIST_HEAD(&hdev->mgmt_pending);
3049         INIT_LIST_HEAD(&hdev->blacklist);
3050         INIT_LIST_HEAD(&hdev->whitelist);
3051         INIT_LIST_HEAD(&hdev->uuids);
3052         INIT_LIST_HEAD(&hdev->link_keys);
3053         INIT_LIST_HEAD(&hdev->long_term_keys);
3054         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3055         INIT_LIST_HEAD(&hdev->remote_oob_data);
3056         INIT_LIST_HEAD(&hdev->le_white_list);
3057         INIT_LIST_HEAD(&hdev->le_conn_params);
3058         INIT_LIST_HEAD(&hdev->pend_le_conns);
3059         INIT_LIST_HEAD(&hdev->pend_le_reports);
3060         INIT_LIST_HEAD(&hdev->conn_hash.list);
3061
3062         INIT_WORK(&hdev->rx_work, hci_rx_work);
3063         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3064         INIT_WORK(&hdev->tx_work, hci_tx_work);
3065         INIT_WORK(&hdev->power_on, hci_power_on);
3066         INIT_WORK(&hdev->error_reset, hci_error_reset);
3067
3068         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3069         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3070         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3071         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3072
3073         skb_queue_head_init(&hdev->rx_q);
3074         skb_queue_head_init(&hdev->cmd_q);
3075         skb_queue_head_init(&hdev->raw_q);
3076
3077         init_waitqueue_head(&hdev->req_wait_q);
3078
3079         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3080
3081         hci_init_sysfs(hdev);
3082         discovery_init(hdev);
3083         adv_info_init(hdev);
3084
3085         return hdev;
3086 }
3087 EXPORT_SYMBOL(hci_alloc_dev);
3088
3089 /* Free HCI device */
3090 void hci_free_dev(struct hci_dev *hdev)
3091 {
3092         /* will free via device release */
3093         put_device(&hdev->dev);
3094 }
3095 EXPORT_SYMBOL(hci_free_dev);
3096
3097 /* Register HCI device */
3098 int hci_register_dev(struct hci_dev *hdev)
3099 {
3100         int id, error;
3101
3102         if (!hdev->open || !hdev->close || !hdev->send)
3103                 return -EINVAL;
3104
3105         /* Do not allow HCI_AMP devices to register at index 0,
3106          * so the index can be used as the AMP controller ID.
3107          */
3108         switch (hdev->dev_type) {
3109         case HCI_BREDR:
3110                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3111                 break;
3112         case HCI_AMP:
3113                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3114                 break;
3115         default:
3116                 return -EINVAL;
3117         }
3118
3119         if (id < 0)
3120                 return id;
3121
3122         sprintf(hdev->name, "hci%d", id);
3123         hdev->id = id;
3124
3125         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3126
3127         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3128                                           WQ_MEM_RECLAIM, 1, hdev->name);
3129         if (!hdev->workqueue) {
3130                 error = -ENOMEM;
3131                 goto err;
3132         }
3133
3134         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3135                                               WQ_MEM_RECLAIM, 1, hdev->name);
3136         if (!hdev->req_workqueue) {
3137                 destroy_workqueue(hdev->workqueue);
3138                 error = -ENOMEM;
3139                 goto err;
3140         }
3141
3142         if (!IS_ERR_OR_NULL(bt_debugfs))
3143                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3144
3145         dev_set_name(&hdev->dev, "%s", hdev->name);
3146
3147         error = device_add(&hdev->dev);
3148         if (error < 0)
3149                 goto err_wqueue;
3150
3151         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3152                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3153                                     hdev);
3154         if (hdev->rfkill) {
3155                 if (rfkill_register(hdev->rfkill) < 0) {
3156                         rfkill_destroy(hdev->rfkill);
3157                         hdev->rfkill = NULL;
3158                 }
3159         }
3160
3161         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3162                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3163
3164         hci_dev_set_flag(hdev, HCI_SETUP);
3165         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3166
3167         if (hdev->dev_type == HCI_BREDR) {
3168                 /* Assume BR/EDR support until proven otherwise (such as
3169                  * through reading supported features during init.
3170                  */
3171                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3172         }
3173
3174         write_lock(&hci_dev_list_lock);
3175         list_add(&hdev->list, &hci_dev_list);
3176         write_unlock(&hci_dev_list_lock);
3177
3178         /* Devices that are marked for raw-only usage are unconfigured
3179          * and should not be included in normal operation.
3180          */
3181         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3182                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3183
3184         hci_notify(hdev, HCI_DEV_REG);
3185         hci_dev_hold(hdev);
3186
3187         queue_work(hdev->req_workqueue, &hdev->power_on);
3188
3189         return id;
3190
3191 err_wqueue:
3192         destroy_workqueue(hdev->workqueue);
3193         destroy_workqueue(hdev->req_workqueue);
3194 err:
3195         ida_simple_remove(&hci_index_ida, hdev->id);
3196
3197         return error;
3198 }
3199 EXPORT_SYMBOL(hci_register_dev);
3200
3201 /* Unregister HCI device */
3202 void hci_unregister_dev(struct hci_dev *hdev)
3203 {
3204         int id;
3205
3206         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3207
3208         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3209
3210         id = hdev->id;
3211
3212         write_lock(&hci_dev_list_lock);
3213         list_del(&hdev->list);
3214         write_unlock(&hci_dev_list_lock);
3215
3216         hci_dev_do_close(hdev);
3217
3218         cancel_work_sync(&hdev->power_on);
3219
3220         if (!test_bit(HCI_INIT, &hdev->flags) &&
3221             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3222             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3223                 hci_dev_lock(hdev);
3224                 mgmt_index_removed(hdev);
3225                 hci_dev_unlock(hdev);
3226         }
3227
3228         /* mgmt_index_removed should take care of emptying the
3229          * pending list */
3230         BUG_ON(!list_empty(&hdev->mgmt_pending));
3231
3232         hci_notify(hdev, HCI_DEV_UNREG);
3233
3234         if (hdev->rfkill) {
3235                 rfkill_unregister(hdev->rfkill);
3236                 rfkill_destroy(hdev->rfkill);
3237         }
3238
3239         device_del(&hdev->dev);
3240
3241         debugfs_remove_recursive(hdev->debugfs);
3242
3243         destroy_workqueue(hdev->workqueue);
3244         destroy_workqueue(hdev->req_workqueue);
3245
3246         hci_dev_lock(hdev);
3247         hci_bdaddr_list_clear(&hdev->blacklist);
3248         hci_bdaddr_list_clear(&hdev->whitelist);
3249         hci_uuids_clear(hdev);
3250         hci_link_keys_clear(hdev);
3251         hci_smp_ltks_clear(hdev);
3252         hci_smp_irks_clear(hdev);
3253         hci_remote_oob_data_clear(hdev);
3254         hci_bdaddr_list_clear(&hdev->le_white_list);
3255         hci_conn_params_clear_all(hdev);
3256         hci_discovery_filter_clear(hdev);
3257         hci_dev_unlock(hdev);
3258
3259         hci_dev_put(hdev);
3260
3261         ida_simple_remove(&hci_index_ida, id);
3262 }
3263 EXPORT_SYMBOL(hci_unregister_dev);
3264
3265 /* Suspend HCI device */
3266 int hci_suspend_dev(struct hci_dev *hdev)
3267 {
3268         hci_notify(hdev, HCI_DEV_SUSPEND);
3269         return 0;
3270 }
3271 EXPORT_SYMBOL(hci_suspend_dev);
3272
3273 /* Resume HCI device */
3274 int hci_resume_dev(struct hci_dev *hdev)
3275 {
3276         hci_notify(hdev, HCI_DEV_RESUME);
3277         return 0;
3278 }
3279 EXPORT_SYMBOL(hci_resume_dev);
3280
3281 /* Reset HCI device */
3282 int hci_reset_dev(struct hci_dev *hdev)
3283 {
3284         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3285         struct sk_buff *skb;
3286
3287         skb = bt_skb_alloc(3, GFP_ATOMIC);
3288         if (!skb)
3289                 return -ENOMEM;
3290
3291         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3292         memcpy(skb_put(skb, 3), hw_err, 3);
3293
3294         /* Send Hardware Error to upper stack */
3295         return hci_recv_frame(hdev, skb);
3296 }
3297 EXPORT_SYMBOL(hci_reset_dev);
3298
3299 /* Receive frame from HCI drivers */
3300 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3301 {
3302         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3303                       && !test_bit(HCI_INIT, &hdev->flags))) {
3304                 kfree_skb(skb);
3305                 return -ENXIO;
3306         }
3307
3308         /* Incoming skb */
3309         bt_cb(skb)->incoming = 1;
3310
3311         /* Time stamp */
3312         __net_timestamp(skb);
3313
3314         skb_queue_tail(&hdev->rx_q, skb);
3315         queue_work(hdev->workqueue, &hdev->rx_work);
3316
3317         return 0;
3318 }
3319 EXPORT_SYMBOL(hci_recv_frame);
3320
3321 /* ---- Interface to upper protocols ---- */
3322
3323 int hci_register_cb(struct hci_cb *cb)
3324 {
3325         BT_DBG("%p name %s", cb, cb->name);
3326
3327         mutex_lock(&hci_cb_list_lock);
3328         list_add_tail(&cb->list, &hci_cb_list);
3329         mutex_unlock(&hci_cb_list_lock);
3330
3331         return 0;
3332 }
3333 EXPORT_SYMBOL(hci_register_cb);
3334
3335 int hci_unregister_cb(struct hci_cb *cb)
3336 {
3337         BT_DBG("%p name %s", cb, cb->name);
3338
3339         mutex_lock(&hci_cb_list_lock);
3340         list_del(&cb->list);
3341         mutex_unlock(&hci_cb_list_lock);
3342
3343         return 0;
3344 }
3345 EXPORT_SYMBOL(hci_unregister_cb);
3346
3347 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3348 {
3349         int err;
3350
3351         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3352
3353         /* Time stamp */
3354         __net_timestamp(skb);
3355
3356         /* Send copy to monitor */
3357         hci_send_to_monitor(hdev, skb);
3358
3359         if (atomic_read(&hdev->promisc)) {
3360                 /* Send copy to the sockets */
3361                 hci_send_to_sock(hdev, skb);
3362         }
3363
3364         /* Get rid of skb owner, prior to sending to the driver. */
3365         skb_orphan(skb);
3366
3367         err = hdev->send(hdev, skb);
3368         if (err < 0) {
3369                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3370                 kfree_skb(skb);
3371         }
3372 }
3373
3374 /* Send HCI command */
3375 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3376                  const void *param)
3377 {
3378         struct sk_buff *skb;
3379
3380         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3381
3382         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3383         if (!skb) {
3384                 BT_ERR("%s no memory for command", hdev->name);
3385                 return -ENOMEM;
3386         }
3387
3388         /* Stand-alone HCI commands must be flagged as
3389          * single-command requests.
3390          */
3391         bt_cb(skb)->req.start = true;
3392
3393         skb_queue_tail(&hdev->cmd_q, skb);
3394         queue_work(hdev->workqueue, &hdev->cmd_work);
3395
3396         return 0;
3397 }
3398
3399 /* Get data from the previously sent command */
3400 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3401 {
3402         struct hci_command_hdr *hdr;
3403
3404         if (!hdev->sent_cmd)
3405                 return NULL;
3406
3407         hdr = (void *) hdev->sent_cmd->data;
3408
3409         if (hdr->opcode != cpu_to_le16(opcode))
3410                 return NULL;
3411
3412         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3413
3414         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3415 }
3416
3417 /* Send ACL data */
3418 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3419 {
3420         struct hci_acl_hdr *hdr;
3421         int len = skb->len;
3422
3423         skb_push(skb, HCI_ACL_HDR_SIZE);
3424         skb_reset_transport_header(skb);
3425         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3426         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3427         hdr->dlen   = cpu_to_le16(len);
3428 }
3429
3430 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3431                           struct sk_buff *skb, __u16 flags)
3432 {
3433         struct hci_conn *conn = chan->conn;
3434         struct hci_dev *hdev = conn->hdev;
3435         struct sk_buff *list;
3436
3437         skb->len = skb_headlen(skb);
3438         skb->data_len = 0;
3439
3440         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3441
3442         switch (hdev->dev_type) {
3443         case HCI_BREDR:
3444                 hci_add_acl_hdr(skb, conn->handle, flags);
3445                 break;
3446         case HCI_AMP:
3447                 hci_add_acl_hdr(skb, chan->handle, flags);
3448                 break;
3449         default:
3450                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3451                 return;
3452         }
3453
3454         list = skb_shinfo(skb)->frag_list;
3455         if (!list) {
3456                 /* Non fragmented */
3457                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3458
3459                 skb_queue_tail(queue, skb);
3460         } else {
3461                 /* Fragmented */
3462                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3463
3464                 skb_shinfo(skb)->frag_list = NULL;
3465
3466                 /* Queue all fragments atomically. We need to use spin_lock_bh
3467                  * here because of 6LoWPAN links, as there this function is
3468                  * called from softirq and using normal spin lock could cause
3469                  * deadlocks.
3470                  */
3471                 spin_lock_bh(&queue->lock);
3472
3473                 __skb_queue_tail(queue, skb);
3474
3475                 flags &= ~ACL_START;
3476                 flags |= ACL_CONT;
3477                 do {
3478                         skb = list; list = list->next;
3479
3480                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3481                         hci_add_acl_hdr(skb, conn->handle, flags);
3482
3483                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3484
3485                         __skb_queue_tail(queue, skb);
3486                 } while (list);
3487
3488                 spin_unlock_bh(&queue->lock);
3489         }
3490 }
3491
3492 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3493 {
3494         struct hci_dev *hdev = chan->conn->hdev;
3495
3496         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3497
3498         hci_queue_acl(chan, &chan->data_q, skb, flags);
3499
3500         queue_work(hdev->workqueue, &hdev->tx_work);
3501 }
3502
3503 /* Send SCO data */
3504 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3505 {
3506         struct hci_dev *hdev = conn->hdev;
3507         struct hci_sco_hdr hdr;
3508
3509         BT_DBG("%s len %d", hdev->name, skb->len);
3510
3511         hdr.handle = cpu_to_le16(conn->handle);
3512         hdr.dlen   = skb->len;
3513
3514         skb_push(skb, HCI_SCO_HDR_SIZE);
3515         skb_reset_transport_header(skb);
3516         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3517
3518         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3519
3520         skb_queue_tail(&conn->data_q, skb);
3521         queue_work(hdev->workqueue, &hdev->tx_work);
3522 }
3523
3524 /* ---- HCI TX task (outgoing data) ---- */
3525
3526 /* HCI Connection scheduler */
3527 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3528                                      int *quote)
3529 {
3530         struct hci_conn_hash *h = &hdev->conn_hash;
3531         struct hci_conn *conn = NULL, *c;
3532         unsigned int num = 0, min = ~0;
3533
3534         /* We don't have to lock device here. Connections are always
3535          * added and removed with TX task disabled. */
3536
3537         rcu_read_lock();
3538
3539         list_for_each_entry_rcu(c, &h->list, list) {
3540                 if (c->type != type || skb_queue_empty(&c->data_q))
3541                         continue;
3542
3543                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3544                         continue;
3545
3546                 num++;
3547
3548                 if (c->sent < min) {
3549                         min  = c->sent;
3550                         conn = c;
3551                 }
3552
3553                 if (hci_conn_num(hdev, type) == num)
3554                         break;
3555         }
3556
3557         rcu_read_unlock();
3558
3559         if (conn) {
3560                 int cnt, q;
3561
3562                 switch (conn->type) {
3563                 case ACL_LINK:
3564                         cnt = hdev->acl_cnt;
3565                         break;
3566                 case SCO_LINK:
3567                 case ESCO_LINK:
3568                         cnt = hdev->sco_cnt;
3569                         break;
3570                 case LE_LINK:
3571                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3572                         break;
3573                 default:
3574                         cnt = 0;
3575                         BT_ERR("Unknown link type");
3576                 }
3577
3578                 q = cnt / num;
3579                 *quote = q ? q : 1;
3580         } else
3581                 *quote = 0;
3582
3583         BT_DBG("conn %p quote %d", conn, *quote);
3584         return conn;
3585 }
3586
3587 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3588 {
3589         struct hci_conn_hash *h = &hdev->conn_hash;
3590         struct hci_conn *c;
3591
3592         BT_ERR("%s link tx timeout", hdev->name);
3593
3594         rcu_read_lock();
3595
3596         /* Kill stalled connections */
3597         list_for_each_entry_rcu(c, &h->list, list) {
3598                 if (c->type == type && c->sent) {
3599                         BT_ERR("%s killing stalled connection %pMR",
3600                                hdev->name, &c->dst);
3601                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3602                 }
3603         }
3604
3605         rcu_read_unlock();
3606 }
3607
3608 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3609                                       int *quote)
3610 {
3611         struct hci_conn_hash *h = &hdev->conn_hash;
3612         struct hci_chan *chan = NULL;
3613         unsigned int num = 0, min = ~0, cur_prio = 0;
3614         struct hci_conn *conn;
3615         int cnt, q, conn_num = 0;
3616
3617         BT_DBG("%s", hdev->name);
3618
3619         rcu_read_lock();
3620
3621         list_for_each_entry_rcu(conn, &h->list, list) {
3622                 struct hci_chan *tmp;
3623
3624                 if (conn->type != type)
3625                         continue;
3626
3627                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3628                         continue;
3629
3630                 conn_num++;
3631
3632                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3633                         struct sk_buff *skb;
3634
3635                         if (skb_queue_empty(&tmp->data_q))
3636                                 continue;
3637
3638                         skb = skb_peek(&tmp->data_q);
3639                         if (skb->priority < cur_prio)
3640                                 continue;
3641
3642                         if (skb->priority > cur_prio) {
3643                                 num = 0;
3644                                 min = ~0;
3645                                 cur_prio = skb->priority;
3646                         }
3647
3648                         num++;
3649
3650                         if (conn->sent < min) {
3651                                 min  = conn->sent;
3652                                 chan = tmp;
3653                         }
3654                 }
3655
3656                 if (hci_conn_num(hdev, type) == conn_num)
3657                         break;
3658         }
3659
3660         rcu_read_unlock();
3661
3662         if (!chan)
3663                 return NULL;
3664
3665         switch (chan->conn->type) {
3666         case ACL_LINK:
3667                 cnt = hdev->acl_cnt;
3668                 break;
3669         case AMP_LINK:
3670                 cnt = hdev->block_cnt;
3671                 break;
3672         case SCO_LINK:
3673         case ESCO_LINK:
3674                 cnt = hdev->sco_cnt;
3675                 break;
3676         case LE_LINK:
3677                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3678                 break;
3679         default:
3680                 cnt = 0;
3681                 BT_ERR("Unknown link type");
3682         }
3683
3684         q = cnt / num;
3685         *quote = q ? q : 1;
3686         BT_DBG("chan %p quote %d", chan, *quote);
3687         return chan;
3688 }
3689
3690 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3691 {
3692         struct hci_conn_hash *h = &hdev->conn_hash;
3693         struct hci_conn *conn;
3694         int num = 0;
3695
3696         BT_DBG("%s", hdev->name);
3697
3698         rcu_read_lock();
3699
3700         list_for_each_entry_rcu(conn, &h->list, list) {
3701                 struct hci_chan *chan;
3702
3703                 if (conn->type != type)
3704                         continue;
3705
3706                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3707                         continue;
3708
3709                 num++;
3710
3711                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3712                         struct sk_buff *skb;
3713
3714                         if (chan->sent) {
3715                                 chan->sent = 0;
3716                                 continue;
3717                         }
3718
3719                         if (skb_queue_empty(&chan->data_q))
3720                                 continue;
3721
3722                         skb = skb_peek(&chan->data_q);
3723                         if (skb->priority >= HCI_PRIO_MAX - 1)
3724                                 continue;
3725
3726                         skb->priority = HCI_PRIO_MAX - 1;
3727
3728                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3729                                skb->priority);
3730                 }
3731
3732                 if (hci_conn_num(hdev, type) == num)
3733                         break;
3734         }
3735
3736         rcu_read_unlock();
3737
3738 }
3739
3740 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3741 {
3742         /* Calculate count of blocks used by this packet */
3743         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3744 }
3745
3746 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3747 {
3748         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3749                 /* ACL tx timeout must be longer than maximum
3750                  * link supervision timeout (40.9 seconds) */
3751                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3752                                        HCI_ACL_TX_TIMEOUT))
3753                         hci_link_tx_to(hdev, ACL_LINK);
3754         }
3755 }
3756
3757 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3758 {
3759         unsigned int cnt = hdev->acl_cnt;
3760         struct hci_chan *chan;
3761         struct sk_buff *skb;
3762         int quote;
3763
3764         __check_timeout(hdev, cnt);
3765
3766         while (hdev->acl_cnt &&
3767                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3768                 u32 priority = (skb_peek(&chan->data_q))->priority;
3769                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3770                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3771                                skb->len, skb->priority);
3772
3773                         /* Stop if priority has changed */
3774                         if (skb->priority < priority)
3775                                 break;
3776
3777                         skb = skb_dequeue(&chan->data_q);
3778
3779                         hci_conn_enter_active_mode(chan->conn,
3780                                                    bt_cb(skb)->force_active);
3781
3782                         hci_send_frame(hdev, skb);
3783                         hdev->acl_last_tx = jiffies;
3784
3785                         hdev->acl_cnt--;
3786                         chan->sent++;
3787                         chan->conn->sent++;
3788                 }
3789         }
3790
3791         if (cnt != hdev->acl_cnt)
3792                 hci_prio_recalculate(hdev, ACL_LINK);
3793 }
3794
3795 static void hci_sched_acl_blk(struct hci_dev *hdev)
3796 {
3797         unsigned int cnt = hdev->block_cnt;
3798         struct hci_chan *chan;
3799         struct sk_buff *skb;
3800         int quote;
3801         u8 type;
3802
3803         __check_timeout(hdev, cnt);
3804
3805         BT_DBG("%s", hdev->name);
3806
3807         if (hdev->dev_type == HCI_AMP)
3808                 type = AMP_LINK;
3809         else
3810                 type = ACL_LINK;
3811
3812         while (hdev->block_cnt > 0 &&
3813                (chan = hci_chan_sent(hdev, type, &quote))) {
3814                 u32 priority = (skb_peek(&chan->data_q))->priority;
3815                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3816                         int blocks;
3817
3818                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3819                                skb->len, skb->priority);
3820
3821                         /* Stop if priority has changed */
3822                         if (skb->priority < priority)
3823                                 break;
3824
3825                         skb = skb_dequeue(&chan->data_q);
3826
3827                         blocks = __get_blocks(hdev, skb);
3828                         if (blocks > hdev->block_cnt)
3829                                 return;
3830
3831                         hci_conn_enter_active_mode(chan->conn,
3832                                                    bt_cb(skb)->force_active);
3833
3834                         hci_send_frame(hdev, skb);
3835                         hdev->acl_last_tx = jiffies;
3836
3837                         hdev->block_cnt -= blocks;
3838                         quote -= blocks;
3839
3840                         chan->sent += blocks;
3841                         chan->conn->sent += blocks;
3842                 }
3843         }
3844
3845         if (cnt != hdev->block_cnt)
3846                 hci_prio_recalculate(hdev, type);
3847 }
3848
3849 static void hci_sched_acl(struct hci_dev *hdev)
3850 {
3851         BT_DBG("%s", hdev->name);
3852
3853         /* No ACL link over BR/EDR controller */
3854         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3855                 return;
3856
3857         /* No AMP link over AMP controller */
3858         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3859                 return;
3860
3861         switch (hdev->flow_ctl_mode) {
3862         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3863                 hci_sched_acl_pkt(hdev);
3864                 break;
3865
3866         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3867                 hci_sched_acl_blk(hdev);
3868                 break;
3869         }
3870 }
3871
3872 /* Schedule SCO */
3873 static void hci_sched_sco(struct hci_dev *hdev)
3874 {
3875         struct hci_conn *conn;
3876         struct sk_buff *skb;
3877         int quote;
3878
3879         BT_DBG("%s", hdev->name);
3880
3881         if (!hci_conn_num(hdev, SCO_LINK))
3882                 return;
3883
3884         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3885                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3886                         BT_DBG("skb %p len %d", skb, skb->len);
3887                         hci_send_frame(hdev, skb);
3888
3889                         conn->sent++;
3890                         if (conn->sent == ~0)
3891                                 conn->sent = 0;
3892                 }
3893         }
3894 }
3895
3896 static void hci_sched_esco(struct hci_dev *hdev)
3897 {
3898         struct hci_conn *conn;
3899         struct sk_buff *skb;
3900         int quote;
3901
3902         BT_DBG("%s", hdev->name);
3903
3904         if (!hci_conn_num(hdev, ESCO_LINK))
3905                 return;
3906
3907         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3908                                                      &quote))) {
3909                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3910                         BT_DBG("skb %p len %d", skb, skb->len);
3911                         hci_send_frame(hdev, skb);
3912
3913                         conn->sent++;
3914                         if (conn->sent == ~0)
3915                                 conn->sent = 0;
3916                 }
3917         }
3918 }
3919
3920 static void hci_sched_le(struct hci_dev *hdev)
3921 {
3922         struct hci_chan *chan;
3923         struct sk_buff *skb;
3924         int quote, cnt, tmp;
3925
3926         BT_DBG("%s", hdev->name);
3927
3928         if (!hci_conn_num(hdev, LE_LINK))
3929                 return;
3930
3931         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3932                 /* LE tx timeout must be longer than maximum
3933                  * link supervision timeout (40.9 seconds) */
3934                 if (!hdev->le_cnt && hdev->le_pkts &&
3935                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3936                         hci_link_tx_to(hdev, LE_LINK);
3937         }
3938
3939         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3940         tmp = cnt;
3941         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3942                 u32 priority = (skb_peek(&chan->data_q))->priority;
3943                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3944                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3945                                skb->len, skb->priority);
3946
3947                         /* Stop if priority has changed */
3948                         if (skb->priority < priority)
3949                                 break;
3950
3951                         skb = skb_dequeue(&chan->data_q);
3952
3953                         hci_send_frame(hdev, skb);
3954                         hdev->le_last_tx = jiffies;
3955
3956                         cnt--;
3957                         chan->sent++;
3958                         chan->conn->sent++;
3959                 }
3960         }
3961
3962         if (hdev->le_pkts)
3963                 hdev->le_cnt = cnt;
3964         else
3965                 hdev->acl_cnt = cnt;
3966
3967         if (cnt != tmp)
3968                 hci_prio_recalculate(hdev, LE_LINK);
3969 }
3970
3971 static void hci_tx_work(struct work_struct *work)
3972 {
3973         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3974         struct sk_buff *skb;
3975
3976         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3977                hdev->sco_cnt, hdev->le_cnt);
3978
3979         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3980                 /* Schedule queues and send stuff to HCI driver */
3981                 hci_sched_acl(hdev);
3982                 hci_sched_sco(hdev);
3983                 hci_sched_esco(hdev);
3984                 hci_sched_le(hdev);
3985         }
3986
3987         /* Send next queued raw (unknown type) packet */
3988         while ((skb = skb_dequeue(&hdev->raw_q)))
3989                 hci_send_frame(hdev, skb);
3990 }
3991
3992 /* ----- HCI RX task (incoming data processing) ----- */
3993
3994 /* ACL data packet */
3995 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3996 {
3997         struct hci_acl_hdr *hdr = (void *) skb->data;
3998         struct hci_conn *conn;
3999         __u16 handle, flags;
4000
4001         skb_pull(skb, HCI_ACL_HDR_SIZE);
4002
4003         handle = __le16_to_cpu(hdr->handle);
4004         flags  = hci_flags(handle);
4005         handle = hci_handle(handle);
4006
4007         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4008                handle, flags);
4009
4010         hdev->stat.acl_rx++;
4011
4012         hci_dev_lock(hdev);
4013         conn = hci_conn_hash_lookup_handle(hdev, handle);
4014         hci_dev_unlock(hdev);
4015
4016         if (conn) {
4017                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4018
4019                 /* Send to upper protocol */
4020                 l2cap_recv_acldata(conn, skb, flags);
4021                 return;
4022         } else {
4023                 BT_ERR("%s ACL packet for unknown connection handle %d",
4024                        hdev->name, handle);
4025         }
4026
4027         kfree_skb(skb);
4028 }
4029
4030 /* SCO data packet */
4031 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4032 {
4033         struct hci_sco_hdr *hdr = (void *) skb->data;
4034         struct hci_conn *conn;
4035         __u16 handle;
4036
4037         skb_pull(skb, HCI_SCO_HDR_SIZE);
4038
4039         handle = __le16_to_cpu(hdr->handle);
4040
4041         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4042
4043         hdev->stat.sco_rx++;
4044
4045         hci_dev_lock(hdev);
4046         conn = hci_conn_hash_lookup_handle(hdev, handle);
4047         hci_dev_unlock(hdev);
4048
4049         if (conn) {
4050                 /* Send to upper protocol */
4051                 sco_recv_scodata(conn, skb);
4052                 return;
4053         } else {
4054                 BT_ERR("%s SCO packet for unknown connection handle %d",
4055                        hdev->name, handle);
4056         }
4057
4058         kfree_skb(skb);
4059 }
4060
4061 static bool hci_req_is_complete(struct hci_dev *hdev)
4062 {
4063         struct sk_buff *skb;
4064
4065         skb = skb_peek(&hdev->cmd_q);
4066         if (!skb)
4067                 return true;
4068
4069         return bt_cb(skb)->req.start;
4070 }
4071
4072 static void hci_resend_last(struct hci_dev *hdev)
4073 {
4074         struct hci_command_hdr *sent;
4075         struct sk_buff *skb;
4076         u16 opcode;
4077
4078         if (!hdev->sent_cmd)
4079                 return;
4080
4081         sent = (void *) hdev->sent_cmd->data;
4082         opcode = __le16_to_cpu(sent->opcode);
4083         if (opcode == HCI_OP_RESET)
4084                 return;
4085
4086         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4087         if (!skb)
4088                 return;
4089
4090         skb_queue_head(&hdev->cmd_q, skb);
4091         queue_work(hdev->workqueue, &hdev->cmd_work);
4092 }
4093
4094 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4095                           hci_req_complete_t *req_complete,
4096                           hci_req_complete_skb_t *req_complete_skb)
4097 {
4098         struct sk_buff *skb;
4099         unsigned long flags;
4100
4101         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4102
4103         /* If the completed command doesn't match the last one that was
4104          * sent we need to do special handling of it.
4105          */
4106         if (!hci_sent_cmd_data(hdev, opcode)) {
4107                 /* Some CSR based controllers generate a spontaneous
4108                  * reset complete event during init and any pending
4109                  * command will never be completed. In such a case we
4110                  * need to resend whatever was the last sent
4111                  * command.
4112                  */
4113                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4114                         hci_resend_last(hdev);
4115
4116                 return;
4117         }
4118
4119         /* If the command succeeded and there's still more commands in
4120          * this request the request is not yet complete.
4121          */
4122         if (!status && !hci_req_is_complete(hdev))
4123                 return;
4124
4125         /* If this was the last command in a request the complete
4126          * callback would be found in hdev->sent_cmd instead of the
4127          * command queue (hdev->cmd_q).
4128          */
4129         if (bt_cb(hdev->sent_cmd)->req.complete) {
4130                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4131                 return;
4132         }
4133
4134         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4135                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4136                 return;
4137         }
4138
4139         /* Remove all pending commands belonging to this request */
4140         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4141         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4142                 if (bt_cb(skb)->req.start) {
4143                         __skb_queue_head(&hdev->cmd_q, skb);
4144                         break;
4145                 }
4146
4147                 *req_complete = bt_cb(skb)->req.complete;
4148                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4149                 kfree_skb(skb);
4150         }
4151         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4152 }
4153
4154 static void hci_rx_work(struct work_struct *work)
4155 {
4156         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4157         struct sk_buff *skb;
4158
4159         BT_DBG("%s", hdev->name);
4160
4161         while ((skb = skb_dequeue(&hdev->rx_q))) {
4162                 /* Send copy to monitor */
4163                 hci_send_to_monitor(hdev, skb);
4164
4165                 if (atomic_read(&hdev->promisc)) {
4166                         /* Send copy to the sockets */
4167                         hci_send_to_sock(hdev, skb);
4168                 }
4169
4170                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4171                         kfree_skb(skb);
4172                         continue;
4173                 }
4174
4175                 if (test_bit(HCI_INIT, &hdev->flags)) {
4176                         /* Don't process data packets in this states. */
4177                         switch (bt_cb(skb)->pkt_type) {
4178                         case HCI_ACLDATA_PKT:
4179                         case HCI_SCODATA_PKT:
4180                                 kfree_skb(skb);
4181                                 continue;
4182                         }
4183                 }
4184
4185                 /* Process frame */
4186                 switch (bt_cb(skb)->pkt_type) {
4187                 case HCI_EVENT_PKT:
4188                         BT_DBG("%s Event packet", hdev->name);
4189                         hci_event_packet(hdev, skb);
4190                         break;
4191
4192                 case HCI_ACLDATA_PKT:
4193                         BT_DBG("%s ACL data packet", hdev->name);
4194                         hci_acldata_packet(hdev, skb);
4195                         break;
4196
4197                 case HCI_SCODATA_PKT:
4198                         BT_DBG("%s SCO data packet", hdev->name);
4199                         hci_scodata_packet(hdev, skb);
4200                         break;
4201
4202                 default:
4203                         kfree_skb(skb);
4204                         break;
4205                 }
4206         }
4207 }
4208
4209 static void hci_cmd_work(struct work_struct *work)
4210 {
4211         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4212         struct sk_buff *skb;
4213
4214         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4215                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4216
4217         /* Send queued commands */
4218         if (atomic_read(&hdev->cmd_cnt)) {
4219                 skb = skb_dequeue(&hdev->cmd_q);
4220                 if (!skb)
4221                         return;
4222
4223                 kfree_skb(hdev->sent_cmd);
4224
4225                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4226                 if (hdev->sent_cmd) {
4227                         atomic_dec(&hdev->cmd_cnt);
4228                         hci_send_frame(hdev, skb);
4229                         if (test_bit(HCI_RESET, &hdev->flags))
4230                                 cancel_delayed_work(&hdev->cmd_timer);
4231                         else
4232                                 schedule_delayed_work(&hdev->cmd_timer,
4233                                                       HCI_CMD_TIMEOUT);
4234                 } else {
4235                         skb_queue_head(&hdev->cmd_q, skb);
4236                         queue_work(hdev->workqueue, &hdev->cmd_work);
4237                 }
4238         }
4239 }