]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge branch 'for-linus-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78                              size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         char buf[3];
82
83         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84         buf[1] = '\n';
85         buf[2] = '\0';
86         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90                               size_t count, loff_t *ppos)
91 {
92         struct hci_dev *hdev = file->private_data;
93         struct sk_buff *skb;
94         char buf[32];
95         size_t buf_size = min(count, (sizeof(buf)-1));
96         bool enable;
97
98         if (!test_bit(HCI_UP, &hdev->flags))
99                 return -ENETDOWN;
100
101         if (copy_from_user(buf, user_buf, buf_size))
102                 return -EFAULT;
103
104         buf[buf_size] = '\0';
105         if (strtobool(buf, &enable))
106                 return -EINVAL;
107
108         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109                 return -EALREADY;
110
111         hci_req_lock(hdev);
112         if (enable)
113                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114                                      HCI_CMD_TIMEOUT);
115         else
116                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117                                      HCI_CMD_TIMEOUT);
118         hci_req_unlock(hdev);
119
120         if (IS_ERR(skb))
121                 return PTR_ERR(skb);
122
123         kfree_skb(skb);
124
125         hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127         return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131         .open           = simple_open,
132         .read           = dut_mode_read,
133         .write          = dut_mode_write,
134         .llseek         = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140                                   struct sk_buff *skb)
141 {
142         BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144         if (hdev->req_status == HCI_REQ_PEND) {
145                 hdev->req_result = result;
146                 hdev->req_status = HCI_REQ_DONE;
147                 if (skb)
148                         hdev->req_skb = skb_get(skb);
149                 wake_up_interruptible(&hdev->req_wait_q);
150         }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155         BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157         if (hdev->req_status == HCI_REQ_PEND) {
158                 hdev->req_result = err;
159                 hdev->req_status = HCI_REQ_CANCELED;
160                 wake_up_interruptible(&hdev->req_wait_q);
161         }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165                                   const void *param, u8 event, u32 timeout)
166 {
167         DECLARE_WAITQUEUE(wait, current);
168         struct hci_request req;
169         struct sk_buff *skb;
170         int err = 0;
171
172         BT_DBG("%s", hdev->name);
173
174         hci_req_init(&req, hdev);
175
176         hci_req_add_ev(&req, opcode, plen, param, event);
177
178         hdev->req_status = HCI_REQ_PEND;
179
180         add_wait_queue(&hdev->req_wait_q, &wait);
181         set_current_state(TASK_INTERRUPTIBLE);
182
183         err = hci_req_run_skb(&req, hci_req_sync_complete);
184         if (err < 0) {
185                 remove_wait_queue(&hdev->req_wait_q, &wait);
186                 set_current_state(TASK_RUNNING);
187                 return ERR_PTR(err);
188         }
189
190         schedule_timeout(timeout);
191
192         remove_wait_queue(&hdev->req_wait_q, &wait);
193
194         if (signal_pending(current))
195                 return ERR_PTR(-EINTR);
196
197         switch (hdev->req_status) {
198         case HCI_REQ_DONE:
199                 err = -bt_to_errno(hdev->req_result);
200                 break;
201
202         case HCI_REQ_CANCELED:
203                 err = -hdev->req_result;
204                 break;
205
206         default:
207                 err = -ETIMEDOUT;
208                 break;
209         }
210
211         hdev->req_status = hdev->req_result = 0;
212         skb = hdev->req_skb;
213         hdev->req_skb = NULL;
214
215         BT_DBG("%s end: err %d", hdev->name, err);
216
217         if (err < 0) {
218                 kfree_skb(skb);
219                 return ERR_PTR(err);
220         }
221
222         if (!skb)
223                 return ERR_PTR(-ENODATA);
224
225         return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230                                const void *param, u32 timeout)
231 {
232         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238                           void (*func)(struct hci_request *req,
239                                       unsigned long opt),
240                           unsigned long opt, __u32 timeout)
241 {
242         struct hci_request req;
243         DECLARE_WAITQUEUE(wait, current);
244         int err = 0;
245
246         BT_DBG("%s start", hdev->name);
247
248         hci_req_init(&req, hdev);
249
250         hdev->req_status = HCI_REQ_PEND;
251
252         func(&req, opt);
253
254         add_wait_queue(&hdev->req_wait_q, &wait);
255         set_current_state(TASK_INTERRUPTIBLE);
256
257         err = hci_req_run_skb(&req, hci_req_sync_complete);
258         if (err < 0) {
259                 hdev->req_status = 0;
260
261                 remove_wait_queue(&hdev->req_wait_q, &wait);
262                 set_current_state(TASK_RUNNING);
263
264                 /* ENODATA means the HCI request command queue is empty.
265                  * This can happen when a request with conditionals doesn't
266                  * trigger any commands to be sent. This is normal behavior
267                  * and should not trigger an error return.
268                  */
269                 if (err == -ENODATA)
270                         return 0;
271
272                 return err;
273         }
274
275         schedule_timeout(timeout);
276
277         remove_wait_queue(&hdev->req_wait_q, &wait);
278
279         if (signal_pending(current))
280                 return -EINTR;
281
282         switch (hdev->req_status) {
283         case HCI_REQ_DONE:
284                 err = -bt_to_errno(hdev->req_result);
285                 break;
286
287         case HCI_REQ_CANCELED:
288                 err = -hdev->req_result;
289                 break;
290
291         default:
292                 err = -ETIMEDOUT;
293                 break;
294         }
295
296         hdev->req_status = hdev->req_result = 0;
297
298         BT_DBG("%s end: err %d", hdev->name, err);
299
300         return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304                         void (*req)(struct hci_request *req,
305                                     unsigned long opt),
306                         unsigned long opt, __u32 timeout)
307 {
308         int ret;
309
310         if (!test_bit(HCI_UP, &hdev->flags))
311                 return -ENETDOWN;
312
313         /* Serialize all requests */
314         hci_req_lock(hdev);
315         ret = __hci_req_sync(hdev, req, opt, timeout);
316         hci_req_unlock(hdev);
317
318         return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323         BT_DBG("%s %ld", req->hdev->name, opt);
324
325         /* Reset device */
326         set_bit(HCI_RESET, &req->hdev->flags);
327         hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334         /* Read Local Supported Features */
335         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337         /* Read Local Version */
338         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340         /* Read BD Address */
341         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348         /* Read Local Version */
349         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351         /* Read Local Supported Commands */
352         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354         /* Read Local AMP Info */
355         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357         /* Read Data Blk size */
358         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360         /* Read Flow Control Mode */
361         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363         /* Read Location Data */
364         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369         /* Read Local Supported Features. Not all AMP controllers
370          * support this so it's placed conditionally in the second
371          * stage init.
372          */
373         if (req->hdev->commands[14] & 0x20)
374                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379         struct hci_dev *hdev = req->hdev;
380
381         BT_DBG("%s %ld", hdev->name, opt);
382
383         /* Reset */
384         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385                 hci_reset_req(req, 0);
386
387         switch (hdev->dev_type) {
388         case HCI_BREDR:
389                 bredr_init(req);
390                 break;
391
392         case HCI_AMP:
393                 amp_init1(req);
394                 break;
395
396         default:
397                 BT_ERR("Unknown device type %d", hdev->dev_type);
398                 break;
399         }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404         __le16 param;
405         __u8 flt_type;
406
407         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410         /* Read Class of Device */
411         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413         /* Read Local Name */
414         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416         /* Read Voice Setting */
417         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419         /* Read Number of Supported IAC */
420         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422         /* Read Current IAC LAP */
423         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425         /* Clear Event Filters */
426         flt_type = HCI_FLT_CLEAR_ALL;
427         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429         /* Connection accept timeout ~20 secs */
430         param = cpu_to_le16(0x7d00);
431         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436         struct hci_dev *hdev = req->hdev;
437
438         /* Read LE Buffer Size */
439         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441         /* Read LE Local Supported Features */
442         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444         /* Read LE Supported States */
445         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447         /* Read LE White List Size */
448         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450         /* Clear LE White List */
451         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453         /* LE-only controllers have LE implicitly enabled */
454         if (!lmp_bredr_capable(hdev))
455                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460         struct hci_dev *hdev = req->hdev;
461
462         /* The second byte is 0xff instead of 0x9f (two reserved bits
463          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464          * command otherwise.
465          */
466         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469          * any event mask for pre 1.2 devices.
470          */
471         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472                 return;
473
474         if (lmp_bredr_capable(hdev)) {
475                 events[4] |= 0x01; /* Flow Specification Complete */
476                 events[4] |= 0x02; /* Inquiry Result with RSSI */
477                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478                 events[5] |= 0x08; /* Synchronous Connection Complete */
479                 events[5] |= 0x10; /* Synchronous Connection Changed */
480         } else {
481                 /* Use a different default for LE-only devices */
482                 memset(events, 0, sizeof(events));
483                 events[0] |= 0x10; /* Disconnection Complete */
484                 events[1] |= 0x08; /* Read Remote Version Information Complete */
485                 events[1] |= 0x20; /* Command Complete */
486                 events[1] |= 0x40; /* Command Status */
487                 events[1] |= 0x80; /* Hardware Error */
488                 events[2] |= 0x04; /* Number of Completed Packets */
489                 events[3] |= 0x02; /* Data Buffer Overflow */
490
491                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492                         events[0] |= 0x80; /* Encryption Change */
493                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
494                 }
495         }
496
497         if (lmp_inq_rssi_capable(hdev))
498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500         if (lmp_sniffsubr_capable(hdev))
501                 events[5] |= 0x20; /* Sniff Subrating */
502
503         if (lmp_pause_enc_capable(hdev))
504                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506         if (lmp_ext_inq_capable(hdev))
507                 events[5] |= 0x40; /* Extended Inquiry Result */
508
509         if (lmp_no_flush_capable(hdev))
510                 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512         if (lmp_lsto_capable(hdev))
513                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515         if (lmp_ssp_capable(hdev)) {
516                 events[6] |= 0x01;      /* IO Capability Request */
517                 events[6] |= 0x02;      /* IO Capability Response */
518                 events[6] |= 0x04;      /* User Confirmation Request */
519                 events[6] |= 0x08;      /* User Passkey Request */
520                 events[6] |= 0x10;      /* Remote OOB Data Request */
521                 events[6] |= 0x20;      /* Simple Pairing Complete */
522                 events[7] |= 0x04;      /* User Passkey Notification */
523                 events[7] |= 0x08;      /* Keypress Notification */
524                 events[7] |= 0x10;      /* Remote Host Supported
525                                          * Features Notification
526                                          */
527         }
528
529         if (lmp_le_capable(hdev))
530                 events[7] |= 0x20;      /* LE Meta-Event */
531
532         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537         struct hci_dev *hdev = req->hdev;
538
539         if (hdev->dev_type == HCI_AMP)
540                 return amp_init2(req);
541
542         if (lmp_bredr_capable(hdev))
543                 bredr_setup(req);
544         else
545                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547         if (lmp_le_capable(hdev))
548                 le_setup(req);
549
550         /* All Bluetooth 1.2 and later controllers should support the
551          * HCI command for reading the local supported commands.
552          *
553          * Unfortunately some controllers indicate Bluetooth 1.2 support,
554          * but do not have support for this command. If that is the case,
555          * the driver can quirk the behavior and skip reading the local
556          * supported commands.
557          */
558         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562         if (lmp_ssp_capable(hdev)) {
563                 /* When SSP is available, then the host features page
564                  * should also be available as well. However some
565                  * controllers list the max_page as 0 as long as SSP
566                  * has not been enabled. To achieve proper debugging
567                  * output, force the minimum max_page to 1 at least.
568                  */
569                 hdev->max_page = 0x01;
570
571                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572                         u8 mode = 0x01;
573
574                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575                                     sizeof(mode), &mode);
576                 } else {
577                         struct hci_cp_write_eir cp;
578
579                         memset(hdev->eir, 0, sizeof(hdev->eir));
580                         memset(&cp, 0, sizeof(cp));
581
582                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583                 }
584         }
585
586         if (lmp_inq_rssi_capable(hdev) ||
587             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588                 u8 mode;
589
590                 /* If Extended Inquiry Result events are supported, then
591                  * they are clearly preferred over Inquiry Result with RSSI
592                  * events.
593                  */
594                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597         }
598
599         if (lmp_inq_tx_pwr_capable(hdev))
600                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602         if (lmp_ext_feat_capable(hdev)) {
603                 struct hci_cp_read_local_ext_features cp;
604
605                 cp.page = 0x01;
606                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607                             sizeof(cp), &cp);
608         }
609
610         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611                 u8 enable = 1;
612                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613                             &enable);
614         }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619         struct hci_dev *hdev = req->hdev;
620         struct hci_cp_write_def_link_policy cp;
621         u16 link_policy = 0;
622
623         if (lmp_rswitch_capable(hdev))
624                 link_policy |= HCI_LP_RSWITCH;
625         if (lmp_hold_capable(hdev))
626                 link_policy |= HCI_LP_HOLD;
627         if (lmp_sniff_capable(hdev))
628                 link_policy |= HCI_LP_SNIFF;
629         if (lmp_park_capable(hdev))
630                 link_policy |= HCI_LP_PARK;
631
632         cp.policy = cpu_to_le16(link_policy);
633         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638         struct hci_dev *hdev = req->hdev;
639         struct hci_cp_write_le_host_supported cp;
640
641         /* LE-only devices do not support explicit enablement */
642         if (!lmp_bredr_capable(hdev))
643                 return;
644
645         memset(&cp, 0, sizeof(cp));
646
647         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648                 cp.le = 0x01;
649                 cp.simul = 0x00;
650         }
651
652         if (cp.le != lmp_host_le_capable(hdev))
653                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654                             &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659         struct hci_dev *hdev = req->hdev;
660         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662         /* If Connectionless Slave Broadcast master role is supported
663          * enable all necessary events for it.
664          */
665         if (lmp_csb_master_capable(hdev)) {
666                 events[1] |= 0x40;      /* Triggered Clock Capture */
667                 events[1] |= 0x80;      /* Synchronization Train Complete */
668                 events[2] |= 0x10;      /* Slave Page Response Timeout */
669                 events[2] |= 0x20;      /* CSB Channel Map Change */
670         }
671
672         /* If Connectionless Slave Broadcast slave role is supported
673          * enable all necessary events for it.
674          */
675         if (lmp_csb_slave_capable(hdev)) {
676                 events[2] |= 0x01;      /* Synchronization Train Received */
677                 events[2] |= 0x02;      /* CSB Receive */
678                 events[2] |= 0x04;      /* CSB Timeout */
679                 events[2] |= 0x08;      /* Truncated Page Complete */
680         }
681
682         /* Enable Authenticated Payload Timeout Expired event if supported */
683         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684                 events[2] |= 0x80;
685
686         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691         struct hci_dev *hdev = req->hdev;
692         u8 p;
693
694         hci_setup_event_mask(req);
695
696         if (hdev->commands[6] & 0x20) {
697                 struct hci_cp_read_stored_link_key cp;
698
699                 bacpy(&cp.bdaddr, BDADDR_ANY);
700                 cp.read_all = 0x01;
701                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
702         }
703
704         if (hdev->commands[5] & 0x10)
705                 hci_setup_link_policy(req);
706
707         if (hdev->commands[8] & 0x01)
708                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
709
710         /* Some older Broadcom based Bluetooth 1.2 controllers do not
711          * support the Read Page Scan Type command. Check support for
712          * this command in the bit mask of supported commands.
713          */
714         if (hdev->commands[13] & 0x01)
715                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
716
717         if (lmp_le_capable(hdev)) {
718                 u8 events[8];
719
720                 memset(events, 0, sizeof(events));
721                 events[0] = 0x0f;
722
723                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
724                         events[0] |= 0x10;      /* LE Long Term Key Request */
725
726                 /* If controller supports the Connection Parameters Request
727                  * Link Layer Procedure, enable the corresponding event.
728                  */
729                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
730                         events[0] |= 0x20;      /* LE Remote Connection
731                                                  * Parameter Request
732                                                  */
733
734                 /* If the controller supports the Data Length Extension
735                  * feature, enable the corresponding event.
736                  */
737                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
738                         events[0] |= 0x40;      /* LE Data Length Change */
739
740                 /* If the controller supports Extended Scanner Filter
741                  * Policies, enable the correspondig event.
742                  */
743                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
744                         events[1] |= 0x04;      /* LE Direct Advertising
745                                                  * Report
746                                                  */
747
748                 /* If the controller supports the LE Read Local P-256
749                  * Public Key command, enable the corresponding event.
750                  */
751                 if (hdev->commands[34] & 0x02)
752                         events[0] |= 0x80;      /* LE Read Local P-256
753                                                  * Public Key Complete
754                                                  */
755
756                 /* If the controller supports the LE Generate DHKey
757                  * command, enable the corresponding event.
758                  */
759                 if (hdev->commands[34] & 0x04)
760                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
761
762                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
763                             events);
764
765                 if (hdev->commands[25] & 0x40) {
766                         /* Read LE Advertising Channel TX Power */
767                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
768                 }
769
770                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
771                         /* Read LE Maximum Data Length */
772                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
773
774                         /* Read LE Suggested Default Data Length */
775                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
776                 }
777
778                 hci_set_le_support(req);
779         }
780
781         /* Read features beyond page 1 if available */
782         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
783                 struct hci_cp_read_local_ext_features cp;
784
785                 cp.page = p;
786                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
787                             sizeof(cp), &cp);
788         }
789 }
790
791 static void hci_init4_req(struct hci_request *req, unsigned long opt)
792 {
793         struct hci_dev *hdev = req->hdev;
794
795         /* Some Broadcom based Bluetooth controllers do not support the
796          * Delete Stored Link Key command. They are clearly indicating its
797          * absence in the bit mask of supported commands.
798          *
799          * Check the supported commands and only if the the command is marked
800          * as supported send it. If not supported assume that the controller
801          * does not have actual support for stored link keys which makes this
802          * command redundant anyway.
803          *
804          * Some controllers indicate that they support handling deleting
805          * stored link keys, but they don't. The quirk lets a driver
806          * just disable this command.
807          */
808         if (hdev->commands[6] & 0x80 &&
809             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
810                 struct hci_cp_delete_stored_link_key cp;
811
812                 bacpy(&cp.bdaddr, BDADDR_ANY);
813                 cp.delete_all = 0x01;
814                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
815                             sizeof(cp), &cp);
816         }
817
818         /* Set event mask page 2 if the HCI command for it is supported */
819         if (hdev->commands[22] & 0x04)
820                 hci_set_event_mask_page_2(req);
821
822         /* Read local codec list if the HCI command is supported */
823         if (hdev->commands[29] & 0x20)
824                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
825
826         /* Get MWS transport configuration if the HCI command is supported */
827         if (hdev->commands[30] & 0x08)
828                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
829
830         /* Check for Synchronization Train support */
831         if (lmp_sync_train_capable(hdev))
832                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
833
834         /* Enable Secure Connections if supported and configured */
835         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
836             bredr_sc_enabled(hdev)) {
837                 u8 support = 0x01;
838
839                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
840                             sizeof(support), &support);
841         }
842 }
843
844 static int __hci_init(struct hci_dev *hdev)
845 {
846         int err;
847
848         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
849         if (err < 0)
850                 return err;
851
852         /* The Device Under Test (DUT) mode is special and available for
853          * all controller types. So just create it early on.
854          */
855         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
856                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
857                                     &dut_mode_fops);
858         }
859
860         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
861         if (err < 0)
862                 return err;
863
864         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
865          * BR/EDR/LE type controllers. AMP controllers only need the
866          * first two stages of init.
867          */
868         if (hdev->dev_type != HCI_BREDR)
869                 return 0;
870
871         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
872         if (err < 0)
873                 return err;
874
875         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
876         if (err < 0)
877                 return err;
878
879         /* This function is only called when the controller is actually in
880          * configured state. When the controller is marked as unconfigured,
881          * this initialization procedure is not run.
882          *
883          * It means that it is possible that a controller runs through its
884          * setup phase and then discovers missing settings. If that is the
885          * case, then this function will not be called. It then will only
886          * be called during the config phase.
887          *
888          * So only when in setup phase or config phase, create the debugfs
889          * entries and register the SMP channels.
890          */
891         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
892             !hci_dev_test_flag(hdev, HCI_CONFIG))
893                 return 0;
894
895         hci_debugfs_create_common(hdev);
896
897         if (lmp_bredr_capable(hdev))
898                 hci_debugfs_create_bredr(hdev);
899
900         if (lmp_le_capable(hdev))
901                 hci_debugfs_create_le(hdev);
902
903         return 0;
904 }
905
906 static void hci_init0_req(struct hci_request *req, unsigned long opt)
907 {
908         struct hci_dev *hdev = req->hdev;
909
910         BT_DBG("%s %ld", hdev->name, opt);
911
912         /* Reset */
913         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
914                 hci_reset_req(req, 0);
915
916         /* Read Local Version */
917         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
918
919         /* Read BD Address */
920         if (hdev->set_bdaddr)
921                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
922 }
923
924 static int __hci_unconf_init(struct hci_dev *hdev)
925 {
926         int err;
927
928         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
929                 return 0;
930
931         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
932         if (err < 0)
933                 return err;
934
935         return 0;
936 }
937
938 static void hci_scan_req(struct hci_request *req, unsigned long opt)
939 {
940         __u8 scan = opt;
941
942         BT_DBG("%s %x", req->hdev->name, scan);
943
944         /* Inquiry and Page scans */
945         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
946 }
947
948 static void hci_auth_req(struct hci_request *req, unsigned long opt)
949 {
950         __u8 auth = opt;
951
952         BT_DBG("%s %x", req->hdev->name, auth);
953
954         /* Authentication */
955         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
956 }
957
958 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
959 {
960         __u8 encrypt = opt;
961
962         BT_DBG("%s %x", req->hdev->name, encrypt);
963
964         /* Encryption */
965         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
966 }
967
968 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
969 {
970         __le16 policy = cpu_to_le16(opt);
971
972         BT_DBG("%s %x", req->hdev->name, policy);
973
974         /* Default link policy */
975         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
976 }
977
978 /* Get HCI device by index.
979  * Device is held on return. */
980 struct hci_dev *hci_dev_get(int index)
981 {
982         struct hci_dev *hdev = NULL, *d;
983
984         BT_DBG("%d", index);
985
986         if (index < 0)
987                 return NULL;
988
989         read_lock(&hci_dev_list_lock);
990         list_for_each_entry(d, &hci_dev_list, list) {
991                 if (d->id == index) {
992                         hdev = hci_dev_hold(d);
993                         break;
994                 }
995         }
996         read_unlock(&hci_dev_list_lock);
997         return hdev;
998 }
999
1000 /* ---- Inquiry support ---- */
1001
1002 bool hci_discovery_active(struct hci_dev *hdev)
1003 {
1004         struct discovery_state *discov = &hdev->discovery;
1005
1006         switch (discov->state) {
1007         case DISCOVERY_FINDING:
1008         case DISCOVERY_RESOLVING:
1009                 return true;
1010
1011         default:
1012                 return false;
1013         }
1014 }
1015
1016 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1017 {
1018         int old_state = hdev->discovery.state;
1019
1020         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1021
1022         if (old_state == state)
1023                 return;
1024
1025         hdev->discovery.state = state;
1026
1027         switch (state) {
1028         case DISCOVERY_STOPPED:
1029                 hci_update_background_scan(hdev);
1030
1031                 if (old_state != DISCOVERY_STARTING)
1032                         mgmt_discovering(hdev, 0);
1033                 break;
1034         case DISCOVERY_STARTING:
1035                 break;
1036         case DISCOVERY_FINDING:
1037                 mgmt_discovering(hdev, 1);
1038                 break;
1039         case DISCOVERY_RESOLVING:
1040                 break;
1041         case DISCOVERY_STOPPING:
1042                 break;
1043         }
1044 }
1045
1046 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1047 {
1048         struct discovery_state *cache = &hdev->discovery;
1049         struct inquiry_entry *p, *n;
1050
1051         list_for_each_entry_safe(p, n, &cache->all, all) {
1052                 list_del(&p->all);
1053                 kfree(p);
1054         }
1055
1056         INIT_LIST_HEAD(&cache->unknown);
1057         INIT_LIST_HEAD(&cache->resolve);
1058 }
1059
1060 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1061                                                bdaddr_t *bdaddr)
1062 {
1063         struct discovery_state *cache = &hdev->discovery;
1064         struct inquiry_entry *e;
1065
1066         BT_DBG("cache %p, %pMR", cache, bdaddr);
1067
1068         list_for_each_entry(e, &cache->all, all) {
1069                 if (!bacmp(&e->data.bdaddr, bdaddr))
1070                         return e;
1071         }
1072
1073         return NULL;
1074 }
1075
1076 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1077                                                        bdaddr_t *bdaddr)
1078 {
1079         struct discovery_state *cache = &hdev->discovery;
1080         struct inquiry_entry *e;
1081
1082         BT_DBG("cache %p, %pMR", cache, bdaddr);
1083
1084         list_for_each_entry(e, &cache->unknown, list) {
1085                 if (!bacmp(&e->data.bdaddr, bdaddr))
1086                         return e;
1087         }
1088
1089         return NULL;
1090 }
1091
1092 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1093                                                        bdaddr_t *bdaddr,
1094                                                        int state)
1095 {
1096         struct discovery_state *cache = &hdev->discovery;
1097         struct inquiry_entry *e;
1098
1099         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1100
1101         list_for_each_entry(e, &cache->resolve, list) {
1102                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1103                         return e;
1104                 if (!bacmp(&e->data.bdaddr, bdaddr))
1105                         return e;
1106         }
1107
1108         return NULL;
1109 }
1110
1111 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1112                                       struct inquiry_entry *ie)
1113 {
1114         struct discovery_state *cache = &hdev->discovery;
1115         struct list_head *pos = &cache->resolve;
1116         struct inquiry_entry *p;
1117
1118         list_del(&ie->list);
1119
1120         list_for_each_entry(p, &cache->resolve, list) {
1121                 if (p->name_state != NAME_PENDING &&
1122                     abs(p->data.rssi) >= abs(ie->data.rssi))
1123                         break;
1124                 pos = &p->list;
1125         }
1126
1127         list_add(&ie->list, pos);
1128 }
1129
1130 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1131                              bool name_known)
1132 {
1133         struct discovery_state *cache = &hdev->discovery;
1134         struct inquiry_entry *ie;
1135         u32 flags = 0;
1136
1137         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1138
1139         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1140
1141         if (!data->ssp_mode)
1142                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1143
1144         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1145         if (ie) {
1146                 if (!ie->data.ssp_mode)
1147                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1148
1149                 if (ie->name_state == NAME_NEEDED &&
1150                     data->rssi != ie->data.rssi) {
1151                         ie->data.rssi = data->rssi;
1152                         hci_inquiry_cache_update_resolve(hdev, ie);
1153                 }
1154
1155                 goto update;
1156         }
1157
1158         /* Entry not in the cache. Add new one. */
1159         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1160         if (!ie) {
1161                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1162                 goto done;
1163         }
1164
1165         list_add(&ie->all, &cache->all);
1166
1167         if (name_known) {
1168                 ie->name_state = NAME_KNOWN;
1169         } else {
1170                 ie->name_state = NAME_NOT_KNOWN;
1171                 list_add(&ie->list, &cache->unknown);
1172         }
1173
1174 update:
1175         if (name_known && ie->name_state != NAME_KNOWN &&
1176             ie->name_state != NAME_PENDING) {
1177                 ie->name_state = NAME_KNOWN;
1178                 list_del(&ie->list);
1179         }
1180
1181         memcpy(&ie->data, data, sizeof(*data));
1182         ie->timestamp = jiffies;
1183         cache->timestamp = jiffies;
1184
1185         if (ie->name_state == NAME_NOT_KNOWN)
1186                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1187
1188 done:
1189         return flags;
1190 }
1191
1192 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1193 {
1194         struct discovery_state *cache = &hdev->discovery;
1195         struct inquiry_info *info = (struct inquiry_info *) buf;
1196         struct inquiry_entry *e;
1197         int copied = 0;
1198
1199         list_for_each_entry(e, &cache->all, all) {
1200                 struct inquiry_data *data = &e->data;
1201
1202                 if (copied >= num)
1203                         break;
1204
1205                 bacpy(&info->bdaddr, &data->bdaddr);
1206                 info->pscan_rep_mode    = data->pscan_rep_mode;
1207                 info->pscan_period_mode = data->pscan_period_mode;
1208                 info->pscan_mode        = data->pscan_mode;
1209                 memcpy(info->dev_class, data->dev_class, 3);
1210                 info->clock_offset      = data->clock_offset;
1211
1212                 info++;
1213                 copied++;
1214         }
1215
1216         BT_DBG("cache %p, copied %d", cache, copied);
1217         return copied;
1218 }
1219
1220 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1221 {
1222         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1223         struct hci_dev *hdev = req->hdev;
1224         struct hci_cp_inquiry cp;
1225
1226         BT_DBG("%s", hdev->name);
1227
1228         if (test_bit(HCI_INQUIRY, &hdev->flags))
1229                 return;
1230
1231         /* Start Inquiry */
1232         memcpy(&cp.lap, &ir->lap, 3);
1233         cp.length  = ir->length;
1234         cp.num_rsp = ir->num_rsp;
1235         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1236 }
1237
1238 int hci_inquiry(void __user *arg)
1239 {
1240         __u8 __user *ptr = arg;
1241         struct hci_inquiry_req ir;
1242         struct hci_dev *hdev;
1243         int err = 0, do_inquiry = 0, max_rsp;
1244         long timeo;
1245         __u8 *buf;
1246
1247         if (copy_from_user(&ir, ptr, sizeof(ir)))
1248                 return -EFAULT;
1249
1250         hdev = hci_dev_get(ir.dev_id);
1251         if (!hdev)
1252                 return -ENODEV;
1253
1254         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1255                 err = -EBUSY;
1256                 goto done;
1257         }
1258
1259         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1260                 err = -EOPNOTSUPP;
1261                 goto done;
1262         }
1263
1264         if (hdev->dev_type != HCI_BREDR) {
1265                 err = -EOPNOTSUPP;
1266                 goto done;
1267         }
1268
1269         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1270                 err = -EOPNOTSUPP;
1271                 goto done;
1272         }
1273
1274         hci_dev_lock(hdev);
1275         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1276             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1277                 hci_inquiry_cache_flush(hdev);
1278                 do_inquiry = 1;
1279         }
1280         hci_dev_unlock(hdev);
1281
1282         timeo = ir.length * msecs_to_jiffies(2000);
1283
1284         if (do_inquiry) {
1285                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1286                                    timeo);
1287                 if (err < 0)
1288                         goto done;
1289
1290                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1291                  * cleared). If it is interrupted by a signal, return -EINTR.
1292                  */
1293                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1294                                 TASK_INTERRUPTIBLE))
1295                         return -EINTR;
1296         }
1297
1298         /* for unlimited number of responses we will use buffer with
1299          * 255 entries
1300          */
1301         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1302
1303         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1304          * copy it to the user space.
1305          */
1306         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1307         if (!buf) {
1308                 err = -ENOMEM;
1309                 goto done;
1310         }
1311
1312         hci_dev_lock(hdev);
1313         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1314         hci_dev_unlock(hdev);
1315
1316         BT_DBG("num_rsp %d", ir.num_rsp);
1317
1318         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1319                 ptr += sizeof(ir);
1320                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1321                                  ir.num_rsp))
1322                         err = -EFAULT;
1323         } else
1324                 err = -EFAULT;
1325
1326         kfree(buf);
1327
1328 done:
1329         hci_dev_put(hdev);
1330         return err;
1331 }
1332
1333 static int hci_dev_do_open(struct hci_dev *hdev)
1334 {
1335         int ret = 0;
1336
1337         BT_DBG("%s %p", hdev->name, hdev);
1338
1339         hci_req_lock(hdev);
1340
1341         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1342                 ret = -ENODEV;
1343                 goto done;
1344         }
1345
1346         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1347             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1348                 /* Check for rfkill but allow the HCI setup stage to
1349                  * proceed (which in itself doesn't cause any RF activity).
1350                  */
1351                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1352                         ret = -ERFKILL;
1353                         goto done;
1354                 }
1355
1356                 /* Check for valid public address or a configured static
1357                  * random adddress, but let the HCI setup proceed to
1358                  * be able to determine if there is a public address
1359                  * or not.
1360                  *
1361                  * In case of user channel usage, it is not important
1362                  * if a public address or static random address is
1363                  * available.
1364                  *
1365                  * This check is only valid for BR/EDR controllers
1366                  * since AMP controllers do not have an address.
1367                  */
1368                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1369                     hdev->dev_type == HCI_BREDR &&
1370                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1371                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1372                         ret = -EADDRNOTAVAIL;
1373                         goto done;
1374                 }
1375         }
1376
1377         if (test_bit(HCI_UP, &hdev->flags)) {
1378                 ret = -EALREADY;
1379                 goto done;
1380         }
1381
1382         if (hdev->open(hdev)) {
1383                 ret = -EIO;
1384                 goto done;
1385         }
1386
1387         atomic_set(&hdev->cmd_cnt, 1);
1388         set_bit(HCI_INIT, &hdev->flags);
1389
1390         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1391                 if (hdev->setup)
1392                         ret = hdev->setup(hdev);
1393
1394                 /* The transport driver can set these quirks before
1395                  * creating the HCI device or in its setup callback.
1396                  *
1397                  * In case any of them is set, the controller has to
1398                  * start up as unconfigured.
1399                  */
1400                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1401                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1402                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1403
1404                 /* For an unconfigured controller it is required to
1405                  * read at least the version information provided by
1406                  * the Read Local Version Information command.
1407                  *
1408                  * If the set_bdaddr driver callback is provided, then
1409                  * also the original Bluetooth public device address
1410                  * will be read using the Read BD Address command.
1411                  */
1412                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1413                         ret = __hci_unconf_init(hdev);
1414         }
1415
1416         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1417                 /* If public address change is configured, ensure that
1418                  * the address gets programmed. If the driver does not
1419                  * support changing the public address, fail the power
1420                  * on procedure.
1421                  */
1422                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1423                     hdev->set_bdaddr)
1424                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1425                 else
1426                         ret = -EADDRNOTAVAIL;
1427         }
1428
1429         if (!ret) {
1430                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1431                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1432                         ret = __hci_init(hdev);
1433         }
1434
1435         clear_bit(HCI_INIT, &hdev->flags);
1436
1437         if (!ret) {
1438                 hci_dev_hold(hdev);
1439                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1440                 set_bit(HCI_UP, &hdev->flags);
1441                 hci_notify(hdev, HCI_DEV_UP);
1442                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1443                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1444                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1445                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1446                     hdev->dev_type == HCI_BREDR) {
1447                         hci_dev_lock(hdev);
1448                         mgmt_powered(hdev, 1);
1449                         hci_dev_unlock(hdev);
1450                 }
1451         } else {
1452                 /* Init failed, cleanup */
1453                 flush_work(&hdev->tx_work);
1454                 flush_work(&hdev->cmd_work);
1455                 flush_work(&hdev->rx_work);
1456
1457                 skb_queue_purge(&hdev->cmd_q);
1458                 skb_queue_purge(&hdev->rx_q);
1459
1460                 if (hdev->flush)
1461                         hdev->flush(hdev);
1462
1463                 if (hdev->sent_cmd) {
1464                         kfree_skb(hdev->sent_cmd);
1465                         hdev->sent_cmd = NULL;
1466                 }
1467
1468                 hdev->close(hdev);
1469                 hdev->flags &= BIT(HCI_RAW);
1470         }
1471
1472 done:
1473         hci_req_unlock(hdev);
1474         return ret;
1475 }
1476
1477 /* ---- HCI ioctl helpers ---- */
1478
1479 int hci_dev_open(__u16 dev)
1480 {
1481         struct hci_dev *hdev;
1482         int err;
1483
1484         hdev = hci_dev_get(dev);
1485         if (!hdev)
1486                 return -ENODEV;
1487
1488         /* Devices that are marked as unconfigured can only be powered
1489          * up as user channel. Trying to bring them up as normal devices
1490          * will result into a failure. Only user channel operation is
1491          * possible.
1492          *
1493          * When this function is called for a user channel, the flag
1494          * HCI_USER_CHANNEL will be set first before attempting to
1495          * open the device.
1496          */
1497         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1498             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1499                 err = -EOPNOTSUPP;
1500                 goto done;
1501         }
1502
1503         /* We need to ensure that no other power on/off work is pending
1504          * before proceeding to call hci_dev_do_open. This is
1505          * particularly important if the setup procedure has not yet
1506          * completed.
1507          */
1508         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1509                 cancel_delayed_work(&hdev->power_off);
1510
1511         /* After this call it is guaranteed that the setup procedure
1512          * has finished. This means that error conditions like RFKILL
1513          * or no valid public or static random address apply.
1514          */
1515         flush_workqueue(hdev->req_workqueue);
1516
1517         /* For controllers not using the management interface and that
1518          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1519          * so that pairing works for them. Once the management interface
1520          * is in use this bit will be cleared again and userspace has
1521          * to explicitly enable it.
1522          */
1523         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1524             !hci_dev_test_flag(hdev, HCI_MGMT))
1525                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1526
1527         err = hci_dev_do_open(hdev);
1528
1529 done:
1530         hci_dev_put(hdev);
1531         return err;
1532 }
1533
1534 /* This function requires the caller holds hdev->lock */
1535 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1536 {
1537         struct hci_conn_params *p;
1538
1539         list_for_each_entry(p, &hdev->le_conn_params, list) {
1540                 if (p->conn) {
1541                         hci_conn_drop(p->conn);
1542                         hci_conn_put(p->conn);
1543                         p->conn = NULL;
1544                 }
1545                 list_del_init(&p->action);
1546         }
1547
1548         BT_DBG("All LE pending actions cleared");
1549 }
1550
1551 static int hci_dev_do_close(struct hci_dev *hdev)
1552 {
1553         BT_DBG("%s %p", hdev->name, hdev);
1554
1555         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1556             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1557             test_bit(HCI_UP, &hdev->flags)) {
1558                 /* Execute vendor specific shutdown routine */
1559                 if (hdev->shutdown)
1560                         hdev->shutdown(hdev);
1561         }
1562
1563         cancel_delayed_work(&hdev->power_off);
1564
1565         hci_req_cancel(hdev, ENODEV);
1566         hci_req_lock(hdev);
1567
1568         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1569                 cancel_delayed_work_sync(&hdev->cmd_timer);
1570                 hci_req_unlock(hdev);
1571                 return 0;
1572         }
1573
1574         /* Flush RX and TX works */
1575         flush_work(&hdev->tx_work);
1576         flush_work(&hdev->rx_work);
1577
1578         if (hdev->discov_timeout > 0) {
1579                 cancel_delayed_work(&hdev->discov_off);
1580                 hdev->discov_timeout = 0;
1581                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1582                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1583         }
1584
1585         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1586                 cancel_delayed_work(&hdev->service_cache);
1587
1588         cancel_delayed_work_sync(&hdev->le_scan_disable);
1589         cancel_delayed_work_sync(&hdev->le_scan_restart);
1590
1591         if (hci_dev_test_flag(hdev, HCI_MGMT))
1592                 cancel_delayed_work_sync(&hdev->rpa_expired);
1593
1594         if (hdev->adv_instance_timeout) {
1595                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1596                 hdev->adv_instance_timeout = 0;
1597         }
1598
1599         /* Avoid potential lockdep warnings from the *_flush() calls by
1600          * ensuring the workqueue is empty up front.
1601          */
1602         drain_workqueue(hdev->workqueue);
1603
1604         hci_dev_lock(hdev);
1605
1606         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1607
1608         if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1609                 if (hdev->dev_type == HCI_BREDR)
1610                         mgmt_powered(hdev, 0);
1611         }
1612
1613         hci_inquiry_cache_flush(hdev);
1614         hci_pend_le_actions_clear(hdev);
1615         hci_conn_hash_flush(hdev);
1616         hci_dev_unlock(hdev);
1617
1618         smp_unregister(hdev);
1619
1620         hci_notify(hdev, HCI_DEV_DOWN);
1621
1622         if (hdev->flush)
1623                 hdev->flush(hdev);
1624
1625         /* Reset device */
1626         skb_queue_purge(&hdev->cmd_q);
1627         atomic_set(&hdev->cmd_cnt, 1);
1628         if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1629             !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1630             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1631                 set_bit(HCI_INIT, &hdev->flags);
1632                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1633                 clear_bit(HCI_INIT, &hdev->flags);
1634         }
1635
1636         /* flush cmd  work */
1637         flush_work(&hdev->cmd_work);
1638
1639         /* Drop queues */
1640         skb_queue_purge(&hdev->rx_q);
1641         skb_queue_purge(&hdev->cmd_q);
1642         skb_queue_purge(&hdev->raw_q);
1643
1644         /* Drop last sent command */
1645         if (hdev->sent_cmd) {
1646                 cancel_delayed_work_sync(&hdev->cmd_timer);
1647                 kfree_skb(hdev->sent_cmd);
1648                 hdev->sent_cmd = NULL;
1649         }
1650
1651         /* After this point our queues are empty
1652          * and no tasks are scheduled. */
1653         hdev->close(hdev);
1654
1655         /* Clear flags */
1656         hdev->flags &= BIT(HCI_RAW);
1657         hci_dev_clear_volatile_flags(hdev);
1658
1659         /* Controller radio is available but is currently powered down */
1660         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1661
1662         memset(hdev->eir, 0, sizeof(hdev->eir));
1663         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1664         bacpy(&hdev->random_addr, BDADDR_ANY);
1665
1666         hci_req_unlock(hdev);
1667
1668         hci_dev_put(hdev);
1669         return 0;
1670 }
1671
1672 int hci_dev_close(__u16 dev)
1673 {
1674         struct hci_dev *hdev;
1675         int err;
1676
1677         hdev = hci_dev_get(dev);
1678         if (!hdev)
1679                 return -ENODEV;
1680
1681         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1682                 err = -EBUSY;
1683                 goto done;
1684         }
1685
1686         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1687                 cancel_delayed_work(&hdev->power_off);
1688
1689         err = hci_dev_do_close(hdev);
1690
1691 done:
1692         hci_dev_put(hdev);
1693         return err;
1694 }
1695
1696 static int hci_dev_do_reset(struct hci_dev *hdev)
1697 {
1698         int ret;
1699
1700         BT_DBG("%s %p", hdev->name, hdev);
1701
1702         hci_req_lock(hdev);
1703
1704         /* Drop queues */
1705         skb_queue_purge(&hdev->rx_q);
1706         skb_queue_purge(&hdev->cmd_q);
1707
1708         /* Avoid potential lockdep warnings from the *_flush() calls by
1709          * ensuring the workqueue is empty up front.
1710          */
1711         drain_workqueue(hdev->workqueue);
1712
1713         hci_dev_lock(hdev);
1714         hci_inquiry_cache_flush(hdev);
1715         hci_conn_hash_flush(hdev);
1716         hci_dev_unlock(hdev);
1717
1718         if (hdev->flush)
1719                 hdev->flush(hdev);
1720
1721         atomic_set(&hdev->cmd_cnt, 1);
1722         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1723
1724         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1725
1726         hci_req_unlock(hdev);
1727         return ret;
1728 }
1729
1730 int hci_dev_reset(__u16 dev)
1731 {
1732         struct hci_dev *hdev;
1733         int err;
1734
1735         hdev = hci_dev_get(dev);
1736         if (!hdev)
1737                 return -ENODEV;
1738
1739         if (!test_bit(HCI_UP, &hdev->flags)) {
1740                 err = -ENETDOWN;
1741                 goto done;
1742         }
1743
1744         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1745                 err = -EBUSY;
1746                 goto done;
1747         }
1748
1749         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1750                 err = -EOPNOTSUPP;
1751                 goto done;
1752         }
1753
1754         err = hci_dev_do_reset(hdev);
1755
1756 done:
1757         hci_dev_put(hdev);
1758         return err;
1759 }
1760
1761 int hci_dev_reset_stat(__u16 dev)
1762 {
1763         struct hci_dev *hdev;
1764         int ret = 0;
1765
1766         hdev = hci_dev_get(dev);
1767         if (!hdev)
1768                 return -ENODEV;
1769
1770         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1771                 ret = -EBUSY;
1772                 goto done;
1773         }
1774
1775         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1776                 ret = -EOPNOTSUPP;
1777                 goto done;
1778         }
1779
1780         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1781
1782 done:
1783         hci_dev_put(hdev);
1784         return ret;
1785 }
1786
1787 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1788 {
1789         bool conn_changed, discov_changed;
1790
1791         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1792
1793         if ((scan & SCAN_PAGE))
1794                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1795                                                           HCI_CONNECTABLE);
1796         else
1797                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1798                                                            HCI_CONNECTABLE);
1799
1800         if ((scan & SCAN_INQUIRY)) {
1801                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1802                                                             HCI_DISCOVERABLE);
1803         } else {
1804                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1805                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1806                                                              HCI_DISCOVERABLE);
1807         }
1808
1809         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1810                 return;
1811
1812         if (conn_changed || discov_changed) {
1813                 /* In case this was disabled through mgmt */
1814                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1815
1816                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1817                         mgmt_update_adv_data(hdev);
1818
1819                 mgmt_new_settings(hdev);
1820         }
1821 }
1822
1823 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1824 {
1825         struct hci_dev *hdev;
1826         struct hci_dev_req dr;
1827         int err = 0;
1828
1829         if (copy_from_user(&dr, arg, sizeof(dr)))
1830                 return -EFAULT;
1831
1832         hdev = hci_dev_get(dr.dev_id);
1833         if (!hdev)
1834                 return -ENODEV;
1835
1836         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1837                 err = -EBUSY;
1838                 goto done;
1839         }
1840
1841         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1842                 err = -EOPNOTSUPP;
1843                 goto done;
1844         }
1845
1846         if (hdev->dev_type != HCI_BREDR) {
1847                 err = -EOPNOTSUPP;
1848                 goto done;
1849         }
1850
1851         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1852                 err = -EOPNOTSUPP;
1853                 goto done;
1854         }
1855
1856         switch (cmd) {
1857         case HCISETAUTH:
1858                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1859                                    HCI_INIT_TIMEOUT);
1860                 break;
1861
1862         case HCISETENCRYPT:
1863                 if (!lmp_encrypt_capable(hdev)) {
1864                         err = -EOPNOTSUPP;
1865                         break;
1866                 }
1867
1868                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1869                         /* Auth must be enabled first */
1870                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1871                                            HCI_INIT_TIMEOUT);
1872                         if (err)
1873                                 break;
1874                 }
1875
1876                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1877                                    HCI_INIT_TIMEOUT);
1878                 break;
1879
1880         case HCISETSCAN:
1881                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1882                                    HCI_INIT_TIMEOUT);
1883
1884                 /* Ensure that the connectable and discoverable states
1885                  * get correctly modified as this was a non-mgmt change.
1886                  */
1887                 if (!err)
1888                         hci_update_scan_state(hdev, dr.dev_opt);
1889                 break;
1890
1891         case HCISETLINKPOL:
1892                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1893                                    HCI_INIT_TIMEOUT);
1894                 break;
1895
1896         case HCISETLINKMODE:
1897                 hdev->link_mode = ((__u16) dr.dev_opt) &
1898                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1899                 break;
1900
1901         case HCISETPTYPE:
1902                 hdev->pkt_type = (__u16) dr.dev_opt;
1903                 break;
1904
1905         case HCISETACLMTU:
1906                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1907                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1908                 break;
1909
1910         case HCISETSCOMTU:
1911                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1912                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1913                 break;
1914
1915         default:
1916                 err = -EINVAL;
1917                 break;
1918         }
1919
1920 done:
1921         hci_dev_put(hdev);
1922         return err;
1923 }
1924
1925 int hci_get_dev_list(void __user *arg)
1926 {
1927         struct hci_dev *hdev;
1928         struct hci_dev_list_req *dl;
1929         struct hci_dev_req *dr;
1930         int n = 0, size, err;
1931         __u16 dev_num;
1932
1933         if (get_user(dev_num, (__u16 __user *) arg))
1934                 return -EFAULT;
1935
1936         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1937                 return -EINVAL;
1938
1939         size = sizeof(*dl) + dev_num * sizeof(*dr);
1940
1941         dl = kzalloc(size, GFP_KERNEL);
1942         if (!dl)
1943                 return -ENOMEM;
1944
1945         dr = dl->dev_req;
1946
1947         read_lock(&hci_dev_list_lock);
1948         list_for_each_entry(hdev, &hci_dev_list, list) {
1949                 unsigned long flags = hdev->flags;
1950
1951                 /* When the auto-off is configured it means the transport
1952                  * is running, but in that case still indicate that the
1953                  * device is actually down.
1954                  */
1955                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1956                         flags &= ~BIT(HCI_UP);
1957
1958                 (dr + n)->dev_id  = hdev->id;
1959                 (dr + n)->dev_opt = flags;
1960
1961                 if (++n >= dev_num)
1962                         break;
1963         }
1964         read_unlock(&hci_dev_list_lock);
1965
1966         dl->dev_num = n;
1967         size = sizeof(*dl) + n * sizeof(*dr);
1968
1969         err = copy_to_user(arg, dl, size);
1970         kfree(dl);
1971
1972         return err ? -EFAULT : 0;
1973 }
1974
1975 int hci_get_dev_info(void __user *arg)
1976 {
1977         struct hci_dev *hdev;
1978         struct hci_dev_info di;
1979         unsigned long flags;
1980         int err = 0;
1981
1982         if (copy_from_user(&di, arg, sizeof(di)))
1983                 return -EFAULT;
1984
1985         hdev = hci_dev_get(di.dev_id);
1986         if (!hdev)
1987                 return -ENODEV;
1988
1989         /* When the auto-off is configured it means the transport
1990          * is running, but in that case still indicate that the
1991          * device is actually down.
1992          */
1993         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1994                 flags = hdev->flags & ~BIT(HCI_UP);
1995         else
1996                 flags = hdev->flags;
1997
1998         strcpy(di.name, hdev->name);
1999         di.bdaddr   = hdev->bdaddr;
2000         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2001         di.flags    = flags;
2002         di.pkt_type = hdev->pkt_type;
2003         if (lmp_bredr_capable(hdev)) {
2004                 di.acl_mtu  = hdev->acl_mtu;
2005                 di.acl_pkts = hdev->acl_pkts;
2006                 di.sco_mtu  = hdev->sco_mtu;
2007                 di.sco_pkts = hdev->sco_pkts;
2008         } else {
2009                 di.acl_mtu  = hdev->le_mtu;
2010                 di.acl_pkts = hdev->le_pkts;
2011                 di.sco_mtu  = 0;
2012                 di.sco_pkts = 0;
2013         }
2014         di.link_policy = hdev->link_policy;
2015         di.link_mode   = hdev->link_mode;
2016
2017         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2018         memcpy(&di.features, &hdev->features, sizeof(di.features));
2019
2020         if (copy_to_user(arg, &di, sizeof(di)))
2021                 err = -EFAULT;
2022
2023         hci_dev_put(hdev);
2024
2025         return err;
2026 }
2027
2028 /* ---- Interface to HCI drivers ---- */
2029
2030 static int hci_rfkill_set_block(void *data, bool blocked)
2031 {
2032         struct hci_dev *hdev = data;
2033
2034         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2035
2036         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2037                 return -EBUSY;
2038
2039         if (blocked) {
2040                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2041                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2042                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2043                         hci_dev_do_close(hdev);
2044         } else {
2045                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2046         }
2047
2048         return 0;
2049 }
2050
2051 static const struct rfkill_ops hci_rfkill_ops = {
2052         .set_block = hci_rfkill_set_block,
2053 };
2054
2055 static void hci_power_on(struct work_struct *work)
2056 {
2057         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2058         int err;
2059
2060         BT_DBG("%s", hdev->name);
2061
2062         err = hci_dev_do_open(hdev);
2063         if (err < 0) {
2064                 hci_dev_lock(hdev);
2065                 mgmt_set_powered_failed(hdev, err);
2066                 hci_dev_unlock(hdev);
2067                 return;
2068         }
2069
2070         /* During the HCI setup phase, a few error conditions are
2071          * ignored and they need to be checked now. If they are still
2072          * valid, it is important to turn the device back off.
2073          */
2074         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2075             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2076             (hdev->dev_type == HCI_BREDR &&
2077              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2078              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2079                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2080                 hci_dev_do_close(hdev);
2081         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2082                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2083                                    HCI_AUTO_OFF_TIMEOUT);
2084         }
2085
2086         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2087                 /* For unconfigured devices, set the HCI_RAW flag
2088                  * so that userspace can easily identify them.
2089                  */
2090                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2091                         set_bit(HCI_RAW, &hdev->flags);
2092
2093                 /* For fully configured devices, this will send
2094                  * the Index Added event. For unconfigured devices,
2095                  * it will send Unconfigued Index Added event.
2096                  *
2097                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2098                  * and no event will be send.
2099                  */
2100                 mgmt_index_added(hdev);
2101         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2102                 /* When the controller is now configured, then it
2103                  * is important to clear the HCI_RAW flag.
2104                  */
2105                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2106                         clear_bit(HCI_RAW, &hdev->flags);
2107
2108                 /* Powering on the controller with HCI_CONFIG set only
2109                  * happens with the transition from unconfigured to
2110                  * configured. This will send the Index Added event.
2111                  */
2112                 mgmt_index_added(hdev);
2113         }
2114 }
2115
2116 static void hci_power_off(struct work_struct *work)
2117 {
2118         struct hci_dev *hdev = container_of(work, struct hci_dev,
2119                                             power_off.work);
2120
2121         BT_DBG("%s", hdev->name);
2122
2123         hci_dev_do_close(hdev);
2124 }
2125
2126 static void hci_error_reset(struct work_struct *work)
2127 {
2128         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2129
2130         BT_DBG("%s", hdev->name);
2131
2132         if (hdev->hw_error)
2133                 hdev->hw_error(hdev, hdev->hw_error_code);
2134         else
2135                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2136                        hdev->hw_error_code);
2137
2138         if (hci_dev_do_close(hdev))
2139                 return;
2140
2141         hci_dev_do_open(hdev);
2142 }
2143
2144 static void hci_discov_off(struct work_struct *work)
2145 {
2146         struct hci_dev *hdev;
2147
2148         hdev = container_of(work, struct hci_dev, discov_off.work);
2149
2150         BT_DBG("%s", hdev->name);
2151
2152         mgmt_discoverable_timeout(hdev);
2153 }
2154
2155 static void hci_adv_timeout_expire(struct work_struct *work)
2156 {
2157         struct hci_dev *hdev;
2158
2159         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2160
2161         BT_DBG("%s", hdev->name);
2162
2163         mgmt_adv_timeout_expired(hdev);
2164 }
2165
2166 void hci_uuids_clear(struct hci_dev *hdev)
2167 {
2168         struct bt_uuid *uuid, *tmp;
2169
2170         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2171                 list_del(&uuid->list);
2172                 kfree(uuid);
2173         }
2174 }
2175
2176 void hci_link_keys_clear(struct hci_dev *hdev)
2177 {
2178         struct link_key *key;
2179
2180         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2181                 list_del_rcu(&key->list);
2182                 kfree_rcu(key, rcu);
2183         }
2184 }
2185
2186 void hci_smp_ltks_clear(struct hci_dev *hdev)
2187 {
2188         struct smp_ltk *k;
2189
2190         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2191                 list_del_rcu(&k->list);
2192                 kfree_rcu(k, rcu);
2193         }
2194 }
2195
2196 void hci_smp_irks_clear(struct hci_dev *hdev)
2197 {
2198         struct smp_irk *k;
2199
2200         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2201                 list_del_rcu(&k->list);
2202                 kfree_rcu(k, rcu);
2203         }
2204 }
2205
2206 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207 {
2208         struct link_key *k;
2209
2210         rcu_read_lock();
2211         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2212                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2213                         rcu_read_unlock();
2214                         return k;
2215                 }
2216         }
2217         rcu_read_unlock();
2218
2219         return NULL;
2220 }
2221
2222 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2223                                u8 key_type, u8 old_key_type)
2224 {
2225         /* Legacy key */
2226         if (key_type < 0x03)
2227                 return true;
2228
2229         /* Debug keys are insecure so don't store them persistently */
2230         if (key_type == HCI_LK_DEBUG_COMBINATION)
2231                 return false;
2232
2233         /* Changed combination key and there's no previous one */
2234         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2235                 return false;
2236
2237         /* Security mode 3 case */
2238         if (!conn)
2239                 return true;
2240
2241         /* BR/EDR key derived using SC from an LE link */
2242         if (conn->type == LE_LINK)
2243                 return true;
2244
2245         /* Neither local nor remote side had no-bonding as requirement */
2246         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2247                 return true;
2248
2249         /* Local side had dedicated bonding as requirement */
2250         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2251                 return true;
2252
2253         /* Remote side had dedicated bonding as requirement */
2254         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2255                 return true;
2256
2257         /* If none of the above criteria match, then don't store the key
2258          * persistently */
2259         return false;
2260 }
2261
2262 static u8 ltk_role(u8 type)
2263 {
2264         if (type == SMP_LTK)
2265                 return HCI_ROLE_MASTER;
2266
2267         return HCI_ROLE_SLAVE;
2268 }
2269
2270 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2271                              u8 addr_type, u8 role)
2272 {
2273         struct smp_ltk *k;
2274
2275         rcu_read_lock();
2276         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2278                         continue;
2279
2280                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2281                         rcu_read_unlock();
2282                         return k;
2283                 }
2284         }
2285         rcu_read_unlock();
2286
2287         return NULL;
2288 }
2289
2290 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2291 {
2292         struct smp_irk *irk;
2293
2294         rcu_read_lock();
2295         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2296                 if (!bacmp(&irk->rpa, rpa)) {
2297                         rcu_read_unlock();
2298                         return irk;
2299                 }
2300         }
2301
2302         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2303                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2304                         bacpy(&irk->rpa, rpa);
2305                         rcu_read_unlock();
2306                         return irk;
2307                 }
2308         }
2309         rcu_read_unlock();
2310
2311         return NULL;
2312 }
2313
2314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2315                                      u8 addr_type)
2316 {
2317         struct smp_irk *irk;
2318
2319         /* Identity Address must be public or static random */
2320         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2321                 return NULL;
2322
2323         rcu_read_lock();
2324         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2325                 if (addr_type == irk->addr_type &&
2326                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2327                         rcu_read_unlock();
2328                         return irk;
2329                 }
2330         }
2331         rcu_read_unlock();
2332
2333         return NULL;
2334 }
2335
2336 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2337                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2338                                   u8 pin_len, bool *persistent)
2339 {
2340         struct link_key *key, *old_key;
2341         u8 old_key_type;
2342
2343         old_key = hci_find_link_key(hdev, bdaddr);
2344         if (old_key) {
2345                 old_key_type = old_key->type;
2346                 key = old_key;
2347         } else {
2348                 old_key_type = conn ? conn->key_type : 0xff;
2349                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2350                 if (!key)
2351                         return NULL;
2352                 list_add_rcu(&key->list, &hdev->link_keys);
2353         }
2354
2355         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2356
2357         /* Some buggy controller combinations generate a changed
2358          * combination key for legacy pairing even when there's no
2359          * previous key */
2360         if (type == HCI_LK_CHANGED_COMBINATION &&
2361             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2362                 type = HCI_LK_COMBINATION;
2363                 if (conn)
2364                         conn->key_type = type;
2365         }
2366
2367         bacpy(&key->bdaddr, bdaddr);
2368         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2369         key->pin_len = pin_len;
2370
2371         if (type == HCI_LK_CHANGED_COMBINATION)
2372                 key->type = old_key_type;
2373         else
2374                 key->type = type;
2375
2376         if (persistent)
2377                 *persistent = hci_persistent_key(hdev, conn, type,
2378                                                  old_key_type);
2379
2380         return key;
2381 }
2382
2383 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2384                             u8 addr_type, u8 type, u8 authenticated,
2385                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2386 {
2387         struct smp_ltk *key, *old_key;
2388         u8 role = ltk_role(type);
2389
2390         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2391         if (old_key)
2392                 key = old_key;
2393         else {
2394                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2395                 if (!key)
2396                         return NULL;
2397                 list_add_rcu(&key->list, &hdev->long_term_keys);
2398         }
2399
2400         bacpy(&key->bdaddr, bdaddr);
2401         key->bdaddr_type = addr_type;
2402         memcpy(key->val, tk, sizeof(key->val));
2403         key->authenticated = authenticated;
2404         key->ediv = ediv;
2405         key->rand = rand;
2406         key->enc_size = enc_size;
2407         key->type = type;
2408
2409         return key;
2410 }
2411
2412 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2413                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2414 {
2415         struct smp_irk *irk;
2416
2417         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2418         if (!irk) {
2419                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2420                 if (!irk)
2421                         return NULL;
2422
2423                 bacpy(&irk->bdaddr, bdaddr);
2424                 irk->addr_type = addr_type;
2425
2426                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2427         }
2428
2429         memcpy(irk->val, val, 16);
2430         bacpy(&irk->rpa, rpa);
2431
2432         return irk;
2433 }
2434
2435 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2436 {
2437         struct link_key *key;
2438
2439         key = hci_find_link_key(hdev, bdaddr);
2440         if (!key)
2441                 return -ENOENT;
2442
2443         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2444
2445         list_del_rcu(&key->list);
2446         kfree_rcu(key, rcu);
2447
2448         return 0;
2449 }
2450
2451 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2452 {
2453         struct smp_ltk *k;
2454         int removed = 0;
2455
2456         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2457                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2458                         continue;
2459
2460                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2461
2462                 list_del_rcu(&k->list);
2463                 kfree_rcu(k, rcu);
2464                 removed++;
2465         }
2466
2467         return removed ? 0 : -ENOENT;
2468 }
2469
2470 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2471 {
2472         struct smp_irk *k;
2473
2474         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2475                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2476                         continue;
2477
2478                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2479
2480                 list_del_rcu(&k->list);
2481                 kfree_rcu(k, rcu);
2482         }
2483 }
2484
2485 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2486 {
2487         struct smp_ltk *k;
2488         struct smp_irk *irk;
2489         u8 addr_type;
2490
2491         if (type == BDADDR_BREDR) {
2492                 if (hci_find_link_key(hdev, bdaddr))
2493                         return true;
2494                 return false;
2495         }
2496
2497         /* Convert to HCI addr type which struct smp_ltk uses */
2498         if (type == BDADDR_LE_PUBLIC)
2499                 addr_type = ADDR_LE_DEV_PUBLIC;
2500         else
2501                 addr_type = ADDR_LE_DEV_RANDOM;
2502
2503         irk = hci_get_irk(hdev, bdaddr, addr_type);
2504         if (irk) {
2505                 bdaddr = &irk->bdaddr;
2506                 addr_type = irk->addr_type;
2507         }
2508
2509         rcu_read_lock();
2510         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2511                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2512                         rcu_read_unlock();
2513                         return true;
2514                 }
2515         }
2516         rcu_read_unlock();
2517
2518         return false;
2519 }
2520
2521 /* HCI command timer function */
2522 static void hci_cmd_timeout(struct work_struct *work)
2523 {
2524         struct hci_dev *hdev = container_of(work, struct hci_dev,
2525                                             cmd_timer.work);
2526
2527         if (hdev->sent_cmd) {
2528                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2529                 u16 opcode = __le16_to_cpu(sent->opcode);
2530
2531                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2532         } else {
2533                 BT_ERR("%s command tx timeout", hdev->name);
2534         }
2535
2536         atomic_set(&hdev->cmd_cnt, 1);
2537         queue_work(hdev->workqueue, &hdev->cmd_work);
2538 }
2539
2540 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2541                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2542 {
2543         struct oob_data *data;
2544
2545         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2546                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2547                         continue;
2548                 if (data->bdaddr_type != bdaddr_type)
2549                         continue;
2550                 return data;
2551         }
2552
2553         return NULL;
2554 }
2555
2556 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2557                                u8 bdaddr_type)
2558 {
2559         struct oob_data *data;
2560
2561         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2562         if (!data)
2563                 return -ENOENT;
2564
2565         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2566
2567         list_del(&data->list);
2568         kfree(data);
2569
2570         return 0;
2571 }
2572
2573 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2574 {
2575         struct oob_data *data, *n;
2576
2577         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2578                 list_del(&data->list);
2579                 kfree(data);
2580         }
2581 }
2582
2583 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2584                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2585                             u8 *hash256, u8 *rand256)
2586 {
2587         struct oob_data *data;
2588
2589         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2590         if (!data) {
2591                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2592                 if (!data)
2593                         return -ENOMEM;
2594
2595                 bacpy(&data->bdaddr, bdaddr);
2596                 data->bdaddr_type = bdaddr_type;
2597                 list_add(&data->list, &hdev->remote_oob_data);
2598         }
2599
2600         if (hash192 && rand192) {
2601                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2602                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2603                 if (hash256 && rand256)
2604                         data->present = 0x03;
2605         } else {
2606                 memset(data->hash192, 0, sizeof(data->hash192));
2607                 memset(data->rand192, 0, sizeof(data->rand192));
2608                 if (hash256 && rand256)
2609                         data->present = 0x02;
2610                 else
2611                         data->present = 0x00;
2612         }
2613
2614         if (hash256 && rand256) {
2615                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2616                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2617         } else {
2618                 memset(data->hash256, 0, sizeof(data->hash256));
2619                 memset(data->rand256, 0, sizeof(data->rand256));
2620                 if (hash192 && rand192)
2621                         data->present = 0x01;
2622         }
2623
2624         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2625
2626         return 0;
2627 }
2628
2629 /* This function requires the caller holds hdev->lock */
2630 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2631 {
2632         struct adv_info *adv_instance;
2633
2634         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2635                 if (adv_instance->instance == instance)
2636                         return adv_instance;
2637         }
2638
2639         return NULL;
2640 }
2641
2642 /* This function requires the caller holds hdev->lock */
2643 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2644         struct adv_info *cur_instance;
2645
2646         cur_instance = hci_find_adv_instance(hdev, instance);
2647         if (!cur_instance)
2648                 return NULL;
2649
2650         if (cur_instance == list_last_entry(&hdev->adv_instances,
2651                                             struct adv_info, list))
2652                 return list_first_entry(&hdev->adv_instances,
2653                                                  struct adv_info, list);
2654         else
2655                 return list_next_entry(cur_instance, list);
2656 }
2657
2658 /* This function requires the caller holds hdev->lock */
2659 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2660 {
2661         struct adv_info *adv_instance;
2662
2663         adv_instance = hci_find_adv_instance(hdev, instance);
2664         if (!adv_instance)
2665                 return -ENOENT;
2666
2667         BT_DBG("%s removing %dMR", hdev->name, instance);
2668
2669         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2670                 cancel_delayed_work(&hdev->adv_instance_expire);
2671                 hdev->adv_instance_timeout = 0;
2672         }
2673
2674         list_del(&adv_instance->list);
2675         kfree(adv_instance);
2676
2677         hdev->adv_instance_cnt--;
2678
2679         return 0;
2680 }
2681
2682 /* This function requires the caller holds hdev->lock */
2683 void hci_adv_instances_clear(struct hci_dev *hdev)
2684 {
2685         struct adv_info *adv_instance, *n;
2686
2687         if (hdev->adv_instance_timeout) {
2688                 cancel_delayed_work(&hdev->adv_instance_expire);
2689                 hdev->adv_instance_timeout = 0;
2690         }
2691
2692         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2693                 list_del(&adv_instance->list);
2694                 kfree(adv_instance);
2695         }
2696
2697         hdev->adv_instance_cnt = 0;
2698 }
2699
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702                          u16 adv_data_len, u8 *adv_data,
2703                          u16 scan_rsp_len, u8 *scan_rsp_data,
2704                          u16 timeout, u16 duration)
2705 {
2706         struct adv_info *adv_instance;
2707
2708         adv_instance = hci_find_adv_instance(hdev, instance);
2709         if (adv_instance) {
2710                 memset(adv_instance->adv_data, 0,
2711                        sizeof(adv_instance->adv_data));
2712                 memset(adv_instance->scan_rsp_data, 0,
2713                        sizeof(adv_instance->scan_rsp_data));
2714         } else {
2715                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717                         return -EOVERFLOW;
2718
2719                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2720                 if (!adv_instance)
2721                         return -ENOMEM;
2722
2723                 adv_instance->pending = true;
2724                 adv_instance->instance = instance;
2725                 list_add(&adv_instance->list, &hdev->adv_instances);
2726                 hdev->adv_instance_cnt++;
2727         }
2728
2729         adv_instance->flags = flags;
2730         adv_instance->adv_data_len = adv_data_len;
2731         adv_instance->scan_rsp_len = scan_rsp_len;
2732
2733         if (adv_data_len)
2734                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2735
2736         if (scan_rsp_len)
2737                 memcpy(adv_instance->scan_rsp_data,
2738                        scan_rsp_data, scan_rsp_len);
2739
2740         adv_instance->timeout = timeout;
2741         adv_instance->remaining_time = timeout;
2742
2743         if (duration == 0)
2744                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745         else
2746                 adv_instance->duration = duration;
2747
2748         BT_DBG("%s for %dMR", hdev->name, instance);
2749
2750         return 0;
2751 }
2752
2753 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2754                                          bdaddr_t *bdaddr, u8 type)
2755 {
2756         struct bdaddr_list *b;
2757
2758         list_for_each_entry(b, bdaddr_list, list) {
2759                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2760                         return b;
2761         }
2762
2763         return NULL;
2764 }
2765
2766 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2767 {
2768         struct list_head *p, *n;
2769
2770         list_for_each_safe(p, n, bdaddr_list) {
2771                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2772
2773                 list_del(p);
2774                 kfree(b);
2775         }
2776 }
2777
2778 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2779 {
2780         struct bdaddr_list *entry;
2781
2782         if (!bacmp(bdaddr, BDADDR_ANY))
2783                 return -EBADF;
2784
2785         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2786                 return -EEXIST;
2787
2788         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2789         if (!entry)
2790                 return -ENOMEM;
2791
2792         bacpy(&entry->bdaddr, bdaddr);
2793         entry->bdaddr_type = type;
2794
2795         list_add(&entry->list, list);
2796
2797         return 0;
2798 }
2799
2800 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2801 {
2802         struct bdaddr_list *entry;
2803
2804         if (!bacmp(bdaddr, BDADDR_ANY)) {
2805                 hci_bdaddr_list_clear(list);
2806                 return 0;
2807         }
2808
2809         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2810         if (!entry)
2811                 return -ENOENT;
2812
2813         list_del(&entry->list);
2814         kfree(entry);
2815
2816         return 0;
2817 }
2818
2819 /* This function requires the caller holds hdev->lock */
2820 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2821                                                bdaddr_t *addr, u8 addr_type)
2822 {
2823         struct hci_conn_params *params;
2824
2825         list_for_each_entry(params, &hdev->le_conn_params, list) {
2826                 if (bacmp(&params->addr, addr) == 0 &&
2827                     params->addr_type == addr_type) {
2828                         return params;
2829                 }
2830         }
2831
2832         return NULL;
2833 }
2834
2835 /* This function requires the caller holds hdev->lock */
2836 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2837                                                   bdaddr_t *addr, u8 addr_type)
2838 {
2839         struct hci_conn_params *param;
2840
2841         list_for_each_entry(param, list, action) {
2842                 if (bacmp(&param->addr, addr) == 0 &&
2843                     param->addr_type == addr_type)
2844                         return param;
2845         }
2846
2847         return NULL;
2848 }
2849
2850 /* This function requires the caller holds hdev->lock */
2851 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2852                                                     bdaddr_t *addr,
2853                                                     u8 addr_type)
2854 {
2855         struct hci_conn_params *param;
2856
2857         list_for_each_entry(param, &hdev->pend_le_conns, action) {
2858                 if (bacmp(&param->addr, addr) == 0 &&
2859                     param->addr_type == addr_type &&
2860                     param->explicit_connect)
2861                         return param;
2862         }
2863
2864         list_for_each_entry(param, &hdev->pend_le_reports, action) {
2865                 if (bacmp(&param->addr, addr) == 0 &&
2866                     param->addr_type == addr_type &&
2867                     param->explicit_connect)
2868                         return param;
2869         }
2870
2871         return NULL;
2872 }
2873
2874 /* This function requires the caller holds hdev->lock */
2875 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2876                                             bdaddr_t *addr, u8 addr_type)
2877 {
2878         struct hci_conn_params *params;
2879
2880         params = hci_conn_params_lookup(hdev, addr, addr_type);
2881         if (params)
2882                 return params;
2883
2884         params = kzalloc(sizeof(*params), GFP_KERNEL);
2885         if (!params) {
2886                 BT_ERR("Out of memory");
2887                 return NULL;
2888         }
2889
2890         bacpy(&params->addr, addr);
2891         params->addr_type = addr_type;
2892
2893         list_add(&params->list, &hdev->le_conn_params);
2894         INIT_LIST_HEAD(&params->action);
2895
2896         params->conn_min_interval = hdev->le_conn_min_interval;
2897         params->conn_max_interval = hdev->le_conn_max_interval;
2898         params->conn_latency = hdev->le_conn_latency;
2899         params->supervision_timeout = hdev->le_supv_timeout;
2900         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2901
2902         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2903
2904         return params;
2905 }
2906
2907 static void hci_conn_params_free(struct hci_conn_params *params)
2908 {
2909         if (params->conn) {
2910                 hci_conn_drop(params->conn);
2911                 hci_conn_put(params->conn);
2912         }
2913
2914         list_del(&params->action);
2915         list_del(&params->list);
2916         kfree(params);
2917 }
2918
2919 /* This function requires the caller holds hdev->lock */
2920 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2921 {
2922         struct hci_conn_params *params;
2923
2924         params = hci_conn_params_lookup(hdev, addr, addr_type);
2925         if (!params)
2926                 return;
2927
2928         hci_conn_params_free(params);
2929
2930         hci_update_background_scan(hdev);
2931
2932         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2933 }
2934
2935 /* This function requires the caller holds hdev->lock */
2936 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2937 {
2938         struct hci_conn_params *params, *tmp;
2939
2940         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2941                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2942                         continue;
2943
2944                 /* If trying to estabilish one time connection to disabled
2945                  * device, leave the params, but mark them as just once.
2946                  */
2947                 if (params->explicit_connect) {
2948                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2949                         continue;
2950                 }
2951
2952                 list_del(&params->list);
2953                 kfree(params);
2954         }
2955
2956         BT_DBG("All LE disabled connection parameters were removed");
2957 }
2958
2959 /* This function requires the caller holds hdev->lock */
2960 void hci_conn_params_clear_all(struct hci_dev *hdev)
2961 {
2962         struct hci_conn_params *params, *tmp;
2963
2964         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2965                 hci_conn_params_free(params);
2966
2967         hci_update_background_scan(hdev);
2968
2969         BT_DBG("All LE connection parameters were removed");
2970 }
2971
2972 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2973 {
2974         if (status) {
2975                 BT_ERR("Failed to start inquiry: status %d", status);
2976
2977                 hci_dev_lock(hdev);
2978                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2979                 hci_dev_unlock(hdev);
2980                 return;
2981         }
2982 }
2983
2984 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2985                                           u16 opcode)
2986 {
2987         /* General inquiry access code (GIAC) */
2988         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2989         struct hci_cp_inquiry cp;
2990         int err;
2991
2992         if (status) {
2993                 BT_ERR("Failed to disable LE scanning: status %d", status);
2994                 return;
2995         }
2996
2997         hdev->discovery.scan_start = 0;
2998
2999         switch (hdev->discovery.type) {
3000         case DISCOV_TYPE_LE:
3001                 hci_dev_lock(hdev);
3002                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3003                 hci_dev_unlock(hdev);
3004                 break;
3005
3006         case DISCOV_TYPE_INTERLEAVED:
3007                 hci_dev_lock(hdev);
3008
3009                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3010                              &hdev->quirks)) {
3011                         /* If we were running LE only scan, change discovery
3012                          * state. If we were running both LE and BR/EDR inquiry
3013                          * simultaneously, and BR/EDR inquiry is already
3014                          * finished, stop discovery, otherwise BR/EDR inquiry
3015                          * will stop discovery when finished. If we will resolve
3016                          * remote device name, do not change discovery state.
3017                          */
3018                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3019                             hdev->discovery.state != DISCOVERY_RESOLVING)
3020                                 hci_discovery_set_state(hdev,
3021                                                         DISCOVERY_STOPPED);
3022                 } else {
3023                         struct hci_request req;
3024
3025                         hci_inquiry_cache_flush(hdev);
3026
3027                         hci_req_init(&req, hdev);
3028
3029                         memset(&cp, 0, sizeof(cp));
3030                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3031                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3032                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3033
3034                         err = hci_req_run(&req, inquiry_complete);
3035                         if (err) {
3036                                 BT_ERR("Inquiry request failed: err %d", err);
3037                                 hci_discovery_set_state(hdev,
3038                                                         DISCOVERY_STOPPED);
3039                         }
3040                 }
3041
3042                 hci_dev_unlock(hdev);
3043                 break;
3044         }
3045 }
3046
3047 static void le_scan_disable_work(struct work_struct *work)
3048 {
3049         struct hci_dev *hdev = container_of(work, struct hci_dev,
3050                                             le_scan_disable.work);
3051         struct hci_request req;
3052         int err;
3053
3054         BT_DBG("%s", hdev->name);
3055
3056         cancel_delayed_work_sync(&hdev->le_scan_restart);
3057
3058         hci_req_init(&req, hdev);
3059
3060         hci_req_add_le_scan_disable(&req);
3061
3062         err = hci_req_run(&req, le_scan_disable_work_complete);
3063         if (err)
3064                 BT_ERR("Disable LE scanning request failed: err %d", err);
3065 }
3066
3067 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3068                                           u16 opcode)
3069 {
3070         unsigned long timeout, duration, scan_start, now;
3071
3072         BT_DBG("%s", hdev->name);
3073
3074         if (status) {
3075                 BT_ERR("Failed to restart LE scan: status %d", status);
3076                 return;
3077         }
3078
3079         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3080             !hdev->discovery.scan_start)
3081                 return;
3082
3083         /* When the scan was started, hdev->le_scan_disable has been queued
3084          * after duration from scan_start. During scan restart this job
3085          * has been canceled, and we need to queue it again after proper
3086          * timeout, to make sure that scan does not run indefinitely.
3087          */
3088         duration = hdev->discovery.scan_duration;
3089         scan_start = hdev->discovery.scan_start;
3090         now = jiffies;
3091         if (now - scan_start <= duration) {
3092                 int elapsed;
3093
3094                 if (now >= scan_start)
3095                         elapsed = now - scan_start;
3096                 else
3097                         elapsed = ULONG_MAX - scan_start + now;
3098
3099                 timeout = duration - elapsed;
3100         } else {
3101                 timeout = 0;
3102         }
3103         queue_delayed_work(hdev->workqueue,
3104                            &hdev->le_scan_disable, timeout);
3105 }
3106
3107 static void le_scan_restart_work(struct work_struct *work)
3108 {
3109         struct hci_dev *hdev = container_of(work, struct hci_dev,
3110                                             le_scan_restart.work);
3111         struct hci_request req;
3112         struct hci_cp_le_set_scan_enable cp;
3113         int err;
3114
3115         BT_DBG("%s", hdev->name);
3116
3117         /* If controller is not scanning we are done. */
3118         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3119                 return;
3120
3121         hci_req_init(&req, hdev);
3122
3123         hci_req_add_le_scan_disable(&req);
3124
3125         memset(&cp, 0, sizeof(cp));
3126         cp.enable = LE_SCAN_ENABLE;
3127         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3128         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3129
3130         err = hci_req_run(&req, le_scan_restart_work_complete);
3131         if (err)
3132                 BT_ERR("Restart LE scan request failed: err %d", err);
3133 }
3134
3135 /* Copy the Identity Address of the controller.
3136  *
3137  * If the controller has a public BD_ADDR, then by default use that one.
3138  * If this is a LE only controller without a public address, default to
3139  * the static random address.
3140  *
3141  * For debugging purposes it is possible to force controllers with a
3142  * public address to use the static random address instead.
3143  *
3144  * In case BR/EDR has been disabled on a dual-mode controller and
3145  * userspace has configured a static address, then that address
3146  * becomes the identity address instead of the public BR/EDR address.
3147  */
3148 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3149                                u8 *bdaddr_type)
3150 {
3151         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3152             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3153             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3154              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3155                 bacpy(bdaddr, &hdev->static_addr);
3156                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3157         } else {
3158                 bacpy(bdaddr, &hdev->bdaddr);
3159                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3160         }
3161 }
3162
3163 /* Alloc HCI device */
3164 struct hci_dev *hci_alloc_dev(void)
3165 {
3166         struct hci_dev *hdev;
3167
3168         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3169         if (!hdev)
3170                 return NULL;
3171
3172         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3173         hdev->esco_type = (ESCO_HV1);
3174         hdev->link_mode = (HCI_LM_ACCEPT);
3175         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3176         hdev->io_capability = 0x03;     /* No Input No Output */
3177         hdev->manufacturer = 0xffff;    /* Default to internal use */
3178         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3179         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3180         hdev->adv_instance_cnt = 0;
3181         hdev->cur_adv_instance = 0x00;
3182         hdev->adv_instance_timeout = 0;
3183
3184         hdev->sniff_max_interval = 800;
3185         hdev->sniff_min_interval = 80;
3186
3187         hdev->le_adv_channel_map = 0x07;
3188         hdev->le_adv_min_interval = 0x0800;
3189         hdev->le_adv_max_interval = 0x0800;
3190         hdev->le_scan_interval = 0x0060;
3191         hdev->le_scan_window = 0x0030;
3192         hdev->le_conn_min_interval = 0x0028;
3193         hdev->le_conn_max_interval = 0x0038;
3194         hdev->le_conn_latency = 0x0000;
3195         hdev->le_supv_timeout = 0x002a;
3196         hdev->le_def_tx_len = 0x001b;
3197         hdev->le_def_tx_time = 0x0148;
3198         hdev->le_max_tx_len = 0x001b;
3199         hdev->le_max_tx_time = 0x0148;
3200         hdev->le_max_rx_len = 0x001b;
3201         hdev->le_max_rx_time = 0x0148;
3202
3203         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3204         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3205         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3206         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3207
3208         mutex_init(&hdev->lock);
3209         mutex_init(&hdev->req_lock);
3210
3211         INIT_LIST_HEAD(&hdev->mgmt_pending);
3212         INIT_LIST_HEAD(&hdev->blacklist);
3213         INIT_LIST_HEAD(&hdev->whitelist);
3214         INIT_LIST_HEAD(&hdev->uuids);
3215         INIT_LIST_HEAD(&hdev->link_keys);
3216         INIT_LIST_HEAD(&hdev->long_term_keys);
3217         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3218         INIT_LIST_HEAD(&hdev->remote_oob_data);
3219         INIT_LIST_HEAD(&hdev->le_white_list);
3220         INIT_LIST_HEAD(&hdev->le_conn_params);
3221         INIT_LIST_HEAD(&hdev->pend_le_conns);
3222         INIT_LIST_HEAD(&hdev->pend_le_reports);
3223         INIT_LIST_HEAD(&hdev->conn_hash.list);
3224         INIT_LIST_HEAD(&hdev->adv_instances);
3225
3226         INIT_WORK(&hdev->rx_work, hci_rx_work);
3227         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3228         INIT_WORK(&hdev->tx_work, hci_tx_work);
3229         INIT_WORK(&hdev->power_on, hci_power_on);
3230         INIT_WORK(&hdev->error_reset, hci_error_reset);
3231
3232         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3233         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3234         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3235         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3236         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3237
3238         skb_queue_head_init(&hdev->rx_q);
3239         skb_queue_head_init(&hdev->cmd_q);
3240         skb_queue_head_init(&hdev->raw_q);
3241
3242         init_waitqueue_head(&hdev->req_wait_q);
3243
3244         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3245
3246         hci_init_sysfs(hdev);
3247         discovery_init(hdev);
3248
3249         return hdev;
3250 }
3251 EXPORT_SYMBOL(hci_alloc_dev);
3252
3253 /* Free HCI device */
3254 void hci_free_dev(struct hci_dev *hdev)
3255 {
3256         /* will free via device release */
3257         put_device(&hdev->dev);
3258 }
3259 EXPORT_SYMBOL(hci_free_dev);
3260
3261 /* Register HCI device */
3262 int hci_register_dev(struct hci_dev *hdev)
3263 {
3264         int id, error;
3265
3266         if (!hdev->open || !hdev->close || !hdev->send)
3267                 return -EINVAL;
3268
3269         /* Do not allow HCI_AMP devices to register at index 0,
3270          * so the index can be used as the AMP controller ID.
3271          */
3272         switch (hdev->dev_type) {
3273         case HCI_BREDR:
3274                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3275                 break;
3276         case HCI_AMP:
3277                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3278                 break;
3279         default:
3280                 return -EINVAL;
3281         }
3282
3283         if (id < 0)
3284                 return id;
3285
3286         sprintf(hdev->name, "hci%d", id);
3287         hdev->id = id;
3288
3289         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3290
3291         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3292                                           WQ_MEM_RECLAIM, 1, hdev->name);
3293         if (!hdev->workqueue) {
3294                 error = -ENOMEM;
3295                 goto err;
3296         }
3297
3298         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3299                                               WQ_MEM_RECLAIM, 1, hdev->name);
3300         if (!hdev->req_workqueue) {
3301                 destroy_workqueue(hdev->workqueue);
3302                 error = -ENOMEM;
3303                 goto err;
3304         }
3305
3306         if (!IS_ERR_OR_NULL(bt_debugfs))
3307                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3308
3309         dev_set_name(&hdev->dev, "%s", hdev->name);
3310
3311         error = device_add(&hdev->dev);
3312         if (error < 0)
3313                 goto err_wqueue;
3314
3315         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3316                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3317                                     hdev);
3318         if (hdev->rfkill) {
3319                 if (rfkill_register(hdev->rfkill) < 0) {
3320                         rfkill_destroy(hdev->rfkill);
3321                         hdev->rfkill = NULL;
3322                 }
3323         }
3324
3325         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3326                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3327
3328         hci_dev_set_flag(hdev, HCI_SETUP);
3329         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3330
3331         if (hdev->dev_type == HCI_BREDR) {
3332                 /* Assume BR/EDR support until proven otherwise (such as
3333                  * through reading supported features during init.
3334                  */
3335                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3336         }
3337
3338         write_lock(&hci_dev_list_lock);
3339         list_add(&hdev->list, &hci_dev_list);
3340         write_unlock(&hci_dev_list_lock);
3341
3342         /* Devices that are marked for raw-only usage are unconfigured
3343          * and should not be included in normal operation.
3344          */
3345         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3346                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3347
3348         hci_notify(hdev, HCI_DEV_REG);
3349         hci_dev_hold(hdev);
3350
3351         queue_work(hdev->req_workqueue, &hdev->power_on);
3352
3353         return id;
3354
3355 err_wqueue:
3356         destroy_workqueue(hdev->workqueue);
3357         destroy_workqueue(hdev->req_workqueue);
3358 err:
3359         ida_simple_remove(&hci_index_ida, hdev->id);
3360
3361         return error;
3362 }
3363 EXPORT_SYMBOL(hci_register_dev);
3364
3365 /* Unregister HCI device */
3366 void hci_unregister_dev(struct hci_dev *hdev)
3367 {
3368         int id;
3369
3370         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3371
3372         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3373
3374         id = hdev->id;
3375
3376         write_lock(&hci_dev_list_lock);
3377         list_del(&hdev->list);
3378         write_unlock(&hci_dev_list_lock);
3379
3380         hci_dev_do_close(hdev);
3381
3382         cancel_work_sync(&hdev->power_on);
3383
3384         if (!test_bit(HCI_INIT, &hdev->flags) &&
3385             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3386             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3387                 hci_dev_lock(hdev);
3388                 mgmt_index_removed(hdev);
3389                 hci_dev_unlock(hdev);
3390         }
3391
3392         /* mgmt_index_removed should take care of emptying the
3393          * pending list */
3394         BUG_ON(!list_empty(&hdev->mgmt_pending));
3395
3396         hci_notify(hdev, HCI_DEV_UNREG);
3397
3398         if (hdev->rfkill) {
3399                 rfkill_unregister(hdev->rfkill);
3400                 rfkill_destroy(hdev->rfkill);
3401         }
3402
3403         device_del(&hdev->dev);
3404
3405         debugfs_remove_recursive(hdev->debugfs);
3406
3407         destroy_workqueue(hdev->workqueue);
3408         destroy_workqueue(hdev->req_workqueue);
3409
3410         hci_dev_lock(hdev);
3411         hci_bdaddr_list_clear(&hdev->blacklist);
3412         hci_bdaddr_list_clear(&hdev->whitelist);
3413         hci_uuids_clear(hdev);
3414         hci_link_keys_clear(hdev);
3415         hci_smp_ltks_clear(hdev);
3416         hci_smp_irks_clear(hdev);
3417         hci_remote_oob_data_clear(hdev);
3418         hci_adv_instances_clear(hdev);
3419         hci_bdaddr_list_clear(&hdev->le_white_list);
3420         hci_conn_params_clear_all(hdev);
3421         hci_discovery_filter_clear(hdev);
3422         hci_dev_unlock(hdev);
3423
3424         hci_dev_put(hdev);
3425
3426         ida_simple_remove(&hci_index_ida, id);
3427 }
3428 EXPORT_SYMBOL(hci_unregister_dev);
3429
3430 /* Suspend HCI device */
3431 int hci_suspend_dev(struct hci_dev *hdev)
3432 {
3433         hci_notify(hdev, HCI_DEV_SUSPEND);
3434         return 0;
3435 }
3436 EXPORT_SYMBOL(hci_suspend_dev);
3437
3438 /* Resume HCI device */
3439 int hci_resume_dev(struct hci_dev *hdev)
3440 {
3441         hci_notify(hdev, HCI_DEV_RESUME);
3442         return 0;
3443 }
3444 EXPORT_SYMBOL(hci_resume_dev);
3445
3446 /* Reset HCI device */
3447 int hci_reset_dev(struct hci_dev *hdev)
3448 {
3449         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3450         struct sk_buff *skb;
3451
3452         skb = bt_skb_alloc(3, GFP_ATOMIC);
3453         if (!skb)
3454                 return -ENOMEM;
3455
3456         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3457         memcpy(skb_put(skb, 3), hw_err, 3);
3458
3459         /* Send Hardware Error to upper stack */
3460         return hci_recv_frame(hdev, skb);
3461 }
3462 EXPORT_SYMBOL(hci_reset_dev);
3463
3464 /* Receive frame from HCI drivers */
3465 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3466 {
3467         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3468                       && !test_bit(HCI_INIT, &hdev->flags))) {
3469                 kfree_skb(skb);
3470                 return -ENXIO;
3471         }
3472
3473         /* Incoming skb */
3474         bt_cb(skb)->incoming = 1;
3475
3476         /* Time stamp */
3477         __net_timestamp(skb);
3478
3479         skb_queue_tail(&hdev->rx_q, skb);
3480         queue_work(hdev->workqueue, &hdev->rx_work);
3481
3482         return 0;
3483 }
3484 EXPORT_SYMBOL(hci_recv_frame);
3485
3486 /* ---- Interface to upper protocols ---- */
3487
3488 int hci_register_cb(struct hci_cb *cb)
3489 {
3490         BT_DBG("%p name %s", cb, cb->name);
3491
3492         mutex_lock(&hci_cb_list_lock);
3493         list_add_tail(&cb->list, &hci_cb_list);
3494         mutex_unlock(&hci_cb_list_lock);
3495
3496         return 0;
3497 }
3498 EXPORT_SYMBOL(hci_register_cb);
3499
3500 int hci_unregister_cb(struct hci_cb *cb)
3501 {
3502         BT_DBG("%p name %s", cb, cb->name);
3503
3504         mutex_lock(&hci_cb_list_lock);
3505         list_del(&cb->list);
3506         mutex_unlock(&hci_cb_list_lock);
3507
3508         return 0;
3509 }
3510 EXPORT_SYMBOL(hci_unregister_cb);
3511
3512 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3513 {
3514         int err;
3515
3516         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3517
3518         /* Time stamp */
3519         __net_timestamp(skb);
3520
3521         /* Send copy to monitor */
3522         hci_send_to_monitor(hdev, skb);
3523
3524         if (atomic_read(&hdev->promisc)) {
3525                 /* Send copy to the sockets */
3526                 hci_send_to_sock(hdev, skb);
3527         }
3528
3529         /* Get rid of skb owner, prior to sending to the driver. */
3530         skb_orphan(skb);
3531
3532         err = hdev->send(hdev, skb);
3533         if (err < 0) {
3534                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3535                 kfree_skb(skb);
3536         }
3537 }
3538
3539 /* Send HCI command */
3540 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3541                  const void *param)
3542 {
3543         struct sk_buff *skb;
3544
3545         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3546
3547         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3548         if (!skb) {
3549                 BT_ERR("%s no memory for command", hdev->name);
3550                 return -ENOMEM;
3551         }
3552
3553         /* Stand-alone HCI commands must be flagged as
3554          * single-command requests.
3555          */
3556         bt_cb(skb)->req.start = true;
3557
3558         skb_queue_tail(&hdev->cmd_q, skb);
3559         queue_work(hdev->workqueue, &hdev->cmd_work);
3560
3561         return 0;
3562 }
3563
3564 /* Get data from the previously sent command */
3565 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3566 {
3567         struct hci_command_hdr *hdr;
3568
3569         if (!hdev->sent_cmd)
3570                 return NULL;
3571
3572         hdr = (void *) hdev->sent_cmd->data;
3573
3574         if (hdr->opcode != cpu_to_le16(opcode))
3575                 return NULL;
3576
3577         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3578
3579         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3580 }
3581
3582 /* Send ACL data */
3583 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3584 {
3585         struct hci_acl_hdr *hdr;
3586         int len = skb->len;
3587
3588         skb_push(skb, HCI_ACL_HDR_SIZE);
3589         skb_reset_transport_header(skb);
3590         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3591         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3592         hdr->dlen   = cpu_to_le16(len);
3593 }
3594
3595 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3596                           struct sk_buff *skb, __u16 flags)
3597 {
3598         struct hci_conn *conn = chan->conn;
3599         struct hci_dev *hdev = conn->hdev;
3600         struct sk_buff *list;
3601
3602         skb->len = skb_headlen(skb);
3603         skb->data_len = 0;
3604
3605         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3606
3607         switch (hdev->dev_type) {
3608         case HCI_BREDR:
3609                 hci_add_acl_hdr(skb, conn->handle, flags);
3610                 break;
3611         case HCI_AMP:
3612                 hci_add_acl_hdr(skb, chan->handle, flags);
3613                 break;
3614         default:
3615                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3616                 return;
3617         }
3618
3619         list = skb_shinfo(skb)->frag_list;
3620         if (!list) {
3621                 /* Non fragmented */
3622                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3623
3624                 skb_queue_tail(queue, skb);
3625         } else {
3626                 /* Fragmented */
3627                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3628
3629                 skb_shinfo(skb)->frag_list = NULL;
3630
3631                 /* Queue all fragments atomically. We need to use spin_lock_bh
3632                  * here because of 6LoWPAN links, as there this function is
3633                  * called from softirq and using normal spin lock could cause
3634                  * deadlocks.
3635                  */
3636                 spin_lock_bh(&queue->lock);
3637
3638                 __skb_queue_tail(queue, skb);
3639
3640                 flags &= ~ACL_START;
3641                 flags |= ACL_CONT;
3642                 do {
3643                         skb = list; list = list->next;
3644
3645                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3646                         hci_add_acl_hdr(skb, conn->handle, flags);
3647
3648                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3649
3650                         __skb_queue_tail(queue, skb);
3651                 } while (list);
3652
3653                 spin_unlock_bh(&queue->lock);
3654         }
3655 }
3656
3657 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3658 {
3659         struct hci_dev *hdev = chan->conn->hdev;
3660
3661         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3662
3663         hci_queue_acl(chan, &chan->data_q, skb, flags);
3664
3665         queue_work(hdev->workqueue, &hdev->tx_work);
3666 }
3667
3668 /* Send SCO data */
3669 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3670 {
3671         struct hci_dev *hdev = conn->hdev;
3672         struct hci_sco_hdr hdr;
3673
3674         BT_DBG("%s len %d", hdev->name, skb->len);
3675
3676         hdr.handle = cpu_to_le16(conn->handle);
3677         hdr.dlen   = skb->len;
3678
3679         skb_push(skb, HCI_SCO_HDR_SIZE);
3680         skb_reset_transport_header(skb);
3681         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3682
3683         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3684
3685         skb_queue_tail(&conn->data_q, skb);
3686         queue_work(hdev->workqueue, &hdev->tx_work);
3687 }
3688
3689 /* ---- HCI TX task (outgoing data) ---- */
3690
3691 /* HCI Connection scheduler */
3692 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3693                                      int *quote)
3694 {
3695         struct hci_conn_hash *h = &hdev->conn_hash;
3696         struct hci_conn *conn = NULL, *c;
3697         unsigned int num = 0, min = ~0;
3698
3699         /* We don't have to lock device here. Connections are always
3700          * added and removed with TX task disabled. */
3701
3702         rcu_read_lock();
3703
3704         list_for_each_entry_rcu(c, &h->list, list) {
3705                 if (c->type != type || skb_queue_empty(&c->data_q))
3706                         continue;
3707
3708                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3709                         continue;
3710
3711                 num++;
3712
3713                 if (c->sent < min) {
3714                         min  = c->sent;
3715                         conn = c;
3716                 }
3717
3718                 if (hci_conn_num(hdev, type) == num)
3719                         break;
3720         }
3721
3722         rcu_read_unlock();
3723
3724         if (conn) {
3725                 int cnt, q;
3726
3727                 switch (conn->type) {
3728                 case ACL_LINK:
3729                         cnt = hdev->acl_cnt;
3730                         break;
3731                 case SCO_LINK:
3732                 case ESCO_LINK:
3733                         cnt = hdev->sco_cnt;
3734                         break;
3735                 case LE_LINK:
3736                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3737                         break;
3738                 default:
3739                         cnt = 0;
3740                         BT_ERR("Unknown link type");
3741                 }
3742
3743                 q = cnt / num;
3744                 *quote = q ? q : 1;
3745         } else
3746                 *quote = 0;
3747
3748         BT_DBG("conn %p quote %d", conn, *quote);
3749         return conn;
3750 }
3751
3752 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3753 {
3754         struct hci_conn_hash *h = &hdev->conn_hash;
3755         struct hci_conn *c;
3756
3757         BT_ERR("%s link tx timeout", hdev->name);
3758
3759         rcu_read_lock();
3760
3761         /* Kill stalled connections */
3762         list_for_each_entry_rcu(c, &h->list, list) {
3763                 if (c->type == type && c->sent) {
3764                         BT_ERR("%s killing stalled connection %pMR",
3765                                hdev->name, &c->dst);
3766                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3767                 }
3768         }
3769
3770         rcu_read_unlock();
3771 }
3772
3773 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3774                                       int *quote)
3775 {
3776         struct hci_conn_hash *h = &hdev->conn_hash;
3777         struct hci_chan *chan = NULL;
3778         unsigned int num = 0, min = ~0, cur_prio = 0;
3779         struct hci_conn *conn;
3780         int cnt, q, conn_num = 0;
3781
3782         BT_DBG("%s", hdev->name);
3783
3784         rcu_read_lock();
3785
3786         list_for_each_entry_rcu(conn, &h->list, list) {
3787                 struct hci_chan *tmp;
3788
3789                 if (conn->type != type)
3790                         continue;
3791
3792                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3793                         continue;
3794
3795                 conn_num++;
3796
3797                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3798                         struct sk_buff *skb;
3799
3800                         if (skb_queue_empty(&tmp->data_q))
3801                                 continue;
3802
3803                         skb = skb_peek(&tmp->data_q);
3804                         if (skb->priority < cur_prio)
3805                                 continue;
3806
3807                         if (skb->priority > cur_prio) {
3808                                 num = 0;
3809                                 min = ~0;
3810                                 cur_prio = skb->priority;
3811                         }
3812
3813                         num++;
3814
3815                         if (conn->sent < min) {
3816                                 min  = conn->sent;
3817                                 chan = tmp;
3818                         }
3819                 }
3820
3821                 if (hci_conn_num(hdev, type) == conn_num)
3822                         break;
3823         }
3824
3825         rcu_read_unlock();
3826
3827         if (!chan)
3828                 return NULL;
3829
3830         switch (chan->conn->type) {
3831         case ACL_LINK:
3832                 cnt = hdev->acl_cnt;
3833                 break;
3834         case AMP_LINK:
3835                 cnt = hdev->block_cnt;
3836                 break;
3837         case SCO_LINK:
3838         case ESCO_LINK:
3839                 cnt = hdev->sco_cnt;
3840                 break;
3841         case LE_LINK:
3842                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3843                 break;
3844         default:
3845                 cnt = 0;
3846                 BT_ERR("Unknown link type");
3847         }
3848
3849         q = cnt / num;
3850         *quote = q ? q : 1;
3851         BT_DBG("chan %p quote %d", chan, *quote);
3852         return chan;
3853 }
3854
3855 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3856 {
3857         struct hci_conn_hash *h = &hdev->conn_hash;
3858         struct hci_conn *conn;
3859         int num = 0;
3860
3861         BT_DBG("%s", hdev->name);
3862
3863         rcu_read_lock();
3864
3865         list_for_each_entry_rcu(conn, &h->list, list) {
3866                 struct hci_chan *chan;
3867
3868                 if (conn->type != type)
3869                         continue;
3870
3871                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3872                         continue;
3873
3874                 num++;
3875
3876                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3877                         struct sk_buff *skb;
3878
3879                         if (chan->sent) {
3880                                 chan->sent = 0;
3881                                 continue;
3882                         }
3883
3884                         if (skb_queue_empty(&chan->data_q))
3885                                 continue;
3886
3887                         skb = skb_peek(&chan->data_q);
3888                         if (skb->priority >= HCI_PRIO_MAX - 1)
3889                                 continue;
3890
3891                         skb->priority = HCI_PRIO_MAX - 1;
3892
3893                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3894                                skb->priority);
3895                 }
3896
3897                 if (hci_conn_num(hdev, type) == num)
3898                         break;
3899         }
3900
3901         rcu_read_unlock();
3902
3903 }
3904
3905 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3906 {
3907         /* Calculate count of blocks used by this packet */
3908         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3909 }
3910
3911 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3912 {
3913         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3914                 /* ACL tx timeout must be longer than maximum
3915                  * link supervision timeout (40.9 seconds) */
3916                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3917                                        HCI_ACL_TX_TIMEOUT))
3918                         hci_link_tx_to(hdev, ACL_LINK);
3919         }
3920 }
3921
3922 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3923 {
3924         unsigned int cnt = hdev->acl_cnt;
3925         struct hci_chan *chan;
3926         struct sk_buff *skb;
3927         int quote;
3928
3929         __check_timeout(hdev, cnt);
3930
3931         while (hdev->acl_cnt &&
3932                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3933                 u32 priority = (skb_peek(&chan->data_q))->priority;
3934                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3935                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3936                                skb->len, skb->priority);
3937
3938                         /* Stop if priority has changed */
3939                         if (skb->priority < priority)
3940                                 break;
3941
3942                         skb = skb_dequeue(&chan->data_q);
3943
3944                         hci_conn_enter_active_mode(chan->conn,
3945                                                    bt_cb(skb)->force_active);
3946
3947                         hci_send_frame(hdev, skb);
3948                         hdev->acl_last_tx = jiffies;
3949
3950                         hdev->acl_cnt--;
3951                         chan->sent++;
3952                         chan->conn->sent++;
3953                 }
3954         }
3955
3956         if (cnt != hdev->acl_cnt)
3957                 hci_prio_recalculate(hdev, ACL_LINK);
3958 }
3959
3960 static void hci_sched_acl_blk(struct hci_dev *hdev)
3961 {
3962         unsigned int cnt = hdev->block_cnt;
3963         struct hci_chan *chan;
3964         struct sk_buff *skb;
3965         int quote;
3966         u8 type;
3967
3968         __check_timeout(hdev, cnt);
3969
3970         BT_DBG("%s", hdev->name);
3971
3972         if (hdev->dev_type == HCI_AMP)
3973                 type = AMP_LINK;
3974         else
3975                 type = ACL_LINK;
3976
3977         while (hdev->block_cnt > 0 &&
3978                (chan = hci_chan_sent(hdev, type, &quote))) {
3979                 u32 priority = (skb_peek(&chan->data_q))->priority;
3980                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3981                         int blocks;
3982
3983                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3984                                skb->len, skb->priority);
3985
3986                         /* Stop if priority has changed */
3987                         if (skb->priority < priority)
3988                                 break;
3989
3990                         skb = skb_dequeue(&chan->data_q);
3991
3992                         blocks = __get_blocks(hdev, skb);
3993                         if (blocks > hdev->block_cnt)
3994                                 return;
3995
3996                         hci_conn_enter_active_mode(chan->conn,
3997                                                    bt_cb(skb)->force_active);
3998
3999                         hci_send_frame(hdev, skb);
4000                         hdev->acl_last_tx = jiffies;
4001
4002                         hdev->block_cnt -= blocks;
4003                         quote -= blocks;
4004
4005                         chan->sent += blocks;
4006                         chan->conn->sent += blocks;
4007                 }
4008         }
4009
4010         if (cnt != hdev->block_cnt)
4011                 hci_prio_recalculate(hdev, type);
4012 }
4013
4014 static void hci_sched_acl(struct hci_dev *hdev)
4015 {
4016         BT_DBG("%s", hdev->name);
4017
4018         /* No ACL link over BR/EDR controller */
4019         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4020                 return;
4021
4022         /* No AMP link over AMP controller */
4023         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4024                 return;
4025
4026         switch (hdev->flow_ctl_mode) {
4027         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4028                 hci_sched_acl_pkt(hdev);
4029                 break;
4030
4031         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4032                 hci_sched_acl_blk(hdev);
4033                 break;
4034         }
4035 }
4036
4037 /* Schedule SCO */
4038 static void hci_sched_sco(struct hci_dev *hdev)
4039 {
4040         struct hci_conn *conn;
4041         struct sk_buff *skb;
4042         int quote;
4043
4044         BT_DBG("%s", hdev->name);
4045
4046         if (!hci_conn_num(hdev, SCO_LINK))
4047                 return;
4048
4049         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4050                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4051                         BT_DBG("skb %p len %d", skb, skb->len);
4052                         hci_send_frame(hdev, skb);
4053
4054                         conn->sent++;
4055                         if (conn->sent == ~0)
4056                                 conn->sent = 0;
4057                 }
4058         }
4059 }
4060
4061 static void hci_sched_esco(struct hci_dev *hdev)
4062 {
4063         struct hci_conn *conn;
4064         struct sk_buff *skb;
4065         int quote;
4066
4067         BT_DBG("%s", hdev->name);
4068
4069         if (!hci_conn_num(hdev, ESCO_LINK))
4070                 return;
4071
4072         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4073                                                      &quote))) {
4074                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4075                         BT_DBG("skb %p len %d", skb, skb->len);
4076                         hci_send_frame(hdev, skb);
4077
4078                         conn->sent++;
4079                         if (conn->sent == ~0)
4080                                 conn->sent = 0;
4081                 }
4082         }
4083 }
4084
4085 static void hci_sched_le(struct hci_dev *hdev)
4086 {
4087         struct hci_chan *chan;
4088         struct sk_buff *skb;
4089         int quote, cnt, tmp;
4090
4091         BT_DBG("%s", hdev->name);
4092
4093         if (!hci_conn_num(hdev, LE_LINK))
4094                 return;
4095
4096         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4097                 /* LE tx timeout must be longer than maximum
4098                  * link supervision timeout (40.9 seconds) */
4099                 if (!hdev->le_cnt && hdev->le_pkts &&
4100                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4101                         hci_link_tx_to(hdev, LE_LINK);
4102         }
4103
4104         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4105         tmp = cnt;
4106         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4107                 u32 priority = (skb_peek(&chan->data_q))->priority;
4108                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4109                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4110                                skb->len, skb->priority);
4111
4112                         /* Stop if priority has changed */
4113                         if (skb->priority < priority)
4114                                 break;
4115
4116                         skb = skb_dequeue(&chan->data_q);
4117
4118                         hci_send_frame(hdev, skb);
4119                         hdev->le_last_tx = jiffies;
4120
4121                         cnt--;
4122                         chan->sent++;
4123                         chan->conn->sent++;
4124                 }
4125         }
4126
4127         if (hdev->le_pkts)
4128                 hdev->le_cnt = cnt;
4129         else
4130                 hdev->acl_cnt = cnt;
4131
4132         if (cnt != tmp)
4133                 hci_prio_recalculate(hdev, LE_LINK);
4134 }
4135
4136 static void hci_tx_work(struct work_struct *work)
4137 {
4138         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4139         struct sk_buff *skb;
4140
4141         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4142                hdev->sco_cnt, hdev->le_cnt);
4143
4144         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4145                 /* Schedule queues and send stuff to HCI driver */
4146                 hci_sched_acl(hdev);
4147                 hci_sched_sco(hdev);
4148                 hci_sched_esco(hdev);
4149                 hci_sched_le(hdev);
4150         }
4151
4152         /* Send next queued raw (unknown type) packet */
4153         while ((skb = skb_dequeue(&hdev->raw_q)))
4154                 hci_send_frame(hdev, skb);
4155 }
4156
4157 /* ----- HCI RX task (incoming data processing) ----- */
4158
4159 /* ACL data packet */
4160 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4161 {
4162         struct hci_acl_hdr *hdr = (void *) skb->data;
4163         struct hci_conn *conn;
4164         __u16 handle, flags;
4165
4166         skb_pull(skb, HCI_ACL_HDR_SIZE);
4167
4168         handle = __le16_to_cpu(hdr->handle);
4169         flags  = hci_flags(handle);
4170         handle = hci_handle(handle);
4171
4172         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4173                handle, flags);
4174
4175         hdev->stat.acl_rx++;
4176
4177         hci_dev_lock(hdev);
4178         conn = hci_conn_hash_lookup_handle(hdev, handle);
4179         hci_dev_unlock(hdev);
4180
4181         if (conn) {
4182                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4183
4184                 /* Send to upper protocol */
4185                 l2cap_recv_acldata(conn, skb, flags);
4186                 return;
4187         } else {
4188                 BT_ERR("%s ACL packet for unknown connection handle %d",
4189                        hdev->name, handle);
4190         }
4191
4192         kfree_skb(skb);
4193 }
4194
4195 /* SCO data packet */
4196 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4197 {
4198         struct hci_sco_hdr *hdr = (void *) skb->data;
4199         struct hci_conn *conn;
4200         __u16 handle;
4201
4202         skb_pull(skb, HCI_SCO_HDR_SIZE);
4203
4204         handle = __le16_to_cpu(hdr->handle);
4205
4206         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4207
4208         hdev->stat.sco_rx++;
4209
4210         hci_dev_lock(hdev);
4211         conn = hci_conn_hash_lookup_handle(hdev, handle);
4212         hci_dev_unlock(hdev);
4213
4214         if (conn) {
4215                 /* Send to upper protocol */
4216                 sco_recv_scodata(conn, skb);
4217                 return;
4218         } else {
4219                 BT_ERR("%s SCO packet for unknown connection handle %d",
4220                        hdev->name, handle);
4221         }
4222
4223         kfree_skb(skb);
4224 }
4225
4226 static bool hci_req_is_complete(struct hci_dev *hdev)
4227 {
4228         struct sk_buff *skb;
4229
4230         skb = skb_peek(&hdev->cmd_q);
4231         if (!skb)
4232                 return true;
4233
4234         return bt_cb(skb)->req.start;
4235 }
4236
4237 static void hci_resend_last(struct hci_dev *hdev)
4238 {
4239         struct hci_command_hdr *sent;
4240         struct sk_buff *skb;
4241         u16 opcode;
4242
4243         if (!hdev->sent_cmd)
4244                 return;
4245
4246         sent = (void *) hdev->sent_cmd->data;
4247         opcode = __le16_to_cpu(sent->opcode);
4248         if (opcode == HCI_OP_RESET)
4249                 return;
4250
4251         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4252         if (!skb)
4253                 return;
4254
4255         skb_queue_head(&hdev->cmd_q, skb);
4256         queue_work(hdev->workqueue, &hdev->cmd_work);
4257 }
4258
4259 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4260                           hci_req_complete_t *req_complete,
4261                           hci_req_complete_skb_t *req_complete_skb)
4262 {
4263         struct sk_buff *skb;
4264         unsigned long flags;
4265
4266         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4267
4268         /* If the completed command doesn't match the last one that was
4269          * sent we need to do special handling of it.
4270          */
4271         if (!hci_sent_cmd_data(hdev, opcode)) {
4272                 /* Some CSR based controllers generate a spontaneous
4273                  * reset complete event during init and any pending
4274                  * command will never be completed. In such a case we
4275                  * need to resend whatever was the last sent
4276                  * command.
4277                  */
4278                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4279                         hci_resend_last(hdev);
4280
4281                 return;
4282         }
4283
4284         /* If the command succeeded and there's still more commands in
4285          * this request the request is not yet complete.
4286          */
4287         if (!status && !hci_req_is_complete(hdev))
4288                 return;
4289
4290         /* If this was the last command in a request the complete
4291          * callback would be found in hdev->sent_cmd instead of the
4292          * command queue (hdev->cmd_q).
4293          */
4294         if (bt_cb(hdev->sent_cmd)->req.complete) {
4295                 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4296                 return;
4297         }
4298
4299         if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4300                 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4301                 return;
4302         }
4303
4304         /* Remove all pending commands belonging to this request */
4305         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4306         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4307                 if (bt_cb(skb)->req.start) {
4308                         __skb_queue_head(&hdev->cmd_q, skb);
4309                         break;
4310                 }
4311
4312                 *req_complete = bt_cb(skb)->req.complete;
4313                 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4314                 kfree_skb(skb);
4315         }
4316         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4317 }
4318
4319 static void hci_rx_work(struct work_struct *work)
4320 {
4321         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4322         struct sk_buff *skb;
4323
4324         BT_DBG("%s", hdev->name);
4325
4326         while ((skb = skb_dequeue(&hdev->rx_q))) {
4327                 /* Send copy to monitor */
4328                 hci_send_to_monitor(hdev, skb);
4329
4330                 if (atomic_read(&hdev->promisc)) {
4331                         /* Send copy to the sockets */
4332                         hci_send_to_sock(hdev, skb);
4333                 }
4334
4335                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4336                         kfree_skb(skb);
4337                         continue;
4338                 }
4339
4340                 if (test_bit(HCI_INIT, &hdev->flags)) {
4341                         /* Don't process data packets in this states. */
4342                         switch (bt_cb(skb)->pkt_type) {
4343                         case HCI_ACLDATA_PKT:
4344                         case HCI_SCODATA_PKT:
4345                                 kfree_skb(skb);
4346                                 continue;
4347                         }
4348                 }
4349
4350                 /* Process frame */
4351                 switch (bt_cb(skb)->pkt_type) {
4352                 case HCI_EVENT_PKT:
4353                         BT_DBG("%s Event packet", hdev->name);
4354                         hci_event_packet(hdev, skb);
4355                         break;
4356
4357                 case HCI_ACLDATA_PKT:
4358                         BT_DBG("%s ACL data packet", hdev->name);
4359                         hci_acldata_packet(hdev, skb);
4360                         break;
4361
4362                 case HCI_SCODATA_PKT:
4363                         BT_DBG("%s SCO data packet", hdev->name);
4364                         hci_scodata_packet(hdev, skb);
4365                         break;
4366
4367                 default:
4368                         kfree_skb(skb);
4369                         break;
4370                 }
4371         }
4372 }
4373
4374 static void hci_cmd_work(struct work_struct *work)
4375 {
4376         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4377         struct sk_buff *skb;
4378
4379         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4380                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4381
4382         /* Send queued commands */
4383         if (atomic_read(&hdev->cmd_cnt)) {
4384                 skb = skb_dequeue(&hdev->cmd_q);
4385                 if (!skb)
4386                         return;
4387
4388                 kfree_skb(hdev->sent_cmd);
4389
4390                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4391                 if (hdev->sent_cmd) {
4392                         atomic_dec(&hdev->cmd_cnt);
4393                         hci_send_frame(hdev, skb);
4394                         if (test_bit(HCI_RESET, &hdev->flags))
4395                                 cancel_delayed_work(&hdev->cmd_timer);
4396                         else
4397                                 schedule_delayed_work(&hdev->cmd_timer,
4398                                                       HCI_CMD_TIMEOUT);
4399                 } else {
4400                         skb_queue_head(&hdev->cmd_q, skb);
4401                         queue_work(hdev->workqueue, &hdev->cmd_work);
4402                 }
4403         }
4404 }