]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE      0
62 #define HCI_REQ_PEND      1
63 #define HCI_REQ_CANCELED  2
64
65 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
67
68 /* ---- HCI debugfs entries ---- */
69
70 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71                              size_t count, loff_t *ppos)
72 {
73         struct hci_dev *hdev = file->private_data;
74         char buf[3];
75
76         buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
77         buf[1] = '\n';
78         buf[2] = '\0';
79         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80 }
81
82 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83                               size_t count, loff_t *ppos)
84 {
85         struct hci_dev *hdev = file->private_data;
86         struct sk_buff *skb;
87         char buf[32];
88         size_t buf_size = min(count, (sizeof(buf)-1));
89         bool enable;
90
91         if (!test_bit(HCI_UP, &hdev->flags))
92                 return -ENETDOWN;
93
94         if (copy_from_user(buf, user_buf, buf_size))
95                 return -EFAULT;
96
97         buf[buf_size] = '\0';
98         if (strtobool(buf, &enable))
99                 return -EINVAL;
100
101         if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
102                 return -EALREADY;
103
104         hci_req_lock(hdev);
105         if (enable)
106                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107                                      HCI_CMD_TIMEOUT);
108         else
109                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110                                      HCI_CMD_TIMEOUT);
111         hci_req_unlock(hdev);
112
113         if (IS_ERR(skb))
114                 return PTR_ERR(skb);
115
116         kfree_skb(skb);
117
118         hci_dev_change_flag(hdev, HCI_DUT_MODE);
119
120         return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124         .open           = simple_open,
125         .read           = dut_mode_read,
126         .write          = dut_mode_write,
127         .llseek         = default_llseek,
128 };
129
130 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131                                 size_t count, loff_t *ppos)
132 {
133         struct hci_dev *hdev = file->private_data;
134         char buf[3];
135
136         buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137         buf[1] = '\n';
138         buf[2] = '\0';
139         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140 }
141
142 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143                                  size_t count, loff_t *ppos)
144 {
145         struct hci_dev *hdev = file->private_data;
146         char buf[32];
147         size_t buf_size = min(count, (sizeof(buf)-1));
148         bool enable;
149         int err;
150
151         if (copy_from_user(buf, user_buf, buf_size))
152                 return -EFAULT;
153
154         buf[buf_size] = '\0';
155         if (strtobool(buf, &enable))
156                 return -EINVAL;
157
158         /* When the diagnostic flags are not persistent and the transport
159          * is not active, then there is no need for the vendor callback.
160          *
161          * Instead just store the desired value. If needed the setting
162          * will be programmed when the controller gets powered on.
163          */
164         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165             !test_bit(HCI_RUNNING, &hdev->flags))
166                 goto done;
167
168         hci_req_lock(hdev);
169         err = hdev->set_diag(hdev, enable);
170         hci_req_unlock(hdev);
171
172         if (err < 0)
173                 return err;
174
175 done:
176         if (enable)
177                 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178         else
179                 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181         return count;
182 }
183
184 static const struct file_operations vendor_diag_fops = {
185         .open           = simple_open,
186         .read           = vendor_diag_read,
187         .write          = vendor_diag_write,
188         .llseek         = default_llseek,
189 };
190
191 static void hci_debugfs_create_basic(struct hci_dev *hdev)
192 {
193         debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194                             &dut_mode_fops);
195
196         if (hdev->set_diag)
197                 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198                                     &vendor_diag_fops);
199 }
200
201 /* ---- HCI requests ---- */
202
203 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204                                   struct sk_buff *skb)
205 {
206         BT_DBG("%s result 0x%2.2x", hdev->name, result);
207
208         if (hdev->req_status == HCI_REQ_PEND) {
209                 hdev->req_result = result;
210                 hdev->req_status = HCI_REQ_DONE;
211                 if (skb)
212                         hdev->req_skb = skb_get(skb);
213                 wake_up_interruptible(&hdev->req_wait_q);
214         }
215 }
216
217 static void hci_req_cancel(struct hci_dev *hdev, int err)
218 {
219         BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221         if (hdev->req_status == HCI_REQ_PEND) {
222                 hdev->req_result = err;
223                 hdev->req_status = HCI_REQ_CANCELED;
224                 wake_up_interruptible(&hdev->req_wait_q);
225         }
226 }
227
228 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
229                                   const void *param, u8 event, u32 timeout)
230 {
231         DECLARE_WAITQUEUE(wait, current);
232         struct hci_request req;
233         struct sk_buff *skb;
234         int err = 0;
235
236         BT_DBG("%s", hdev->name);
237
238         hci_req_init(&req, hdev);
239
240         hci_req_add_ev(&req, opcode, plen, param, event);
241
242         hdev->req_status = HCI_REQ_PEND;
243
244         add_wait_queue(&hdev->req_wait_q, &wait);
245         set_current_state(TASK_INTERRUPTIBLE);
246
247         err = hci_req_run_skb(&req, hci_req_sync_complete);
248         if (err < 0) {
249                 remove_wait_queue(&hdev->req_wait_q, &wait);
250                 set_current_state(TASK_RUNNING);
251                 return ERR_PTR(err);
252         }
253
254         schedule_timeout(timeout);
255
256         remove_wait_queue(&hdev->req_wait_q, &wait);
257
258         if (signal_pending(current))
259                 return ERR_PTR(-EINTR);
260
261         switch (hdev->req_status) {
262         case HCI_REQ_DONE:
263                 err = -bt_to_errno(hdev->req_result);
264                 break;
265
266         case HCI_REQ_CANCELED:
267                 err = -hdev->req_result;
268                 break;
269
270         default:
271                 err = -ETIMEDOUT;
272                 break;
273         }
274
275         hdev->req_status = hdev->req_result = 0;
276         skb = hdev->req_skb;
277         hdev->req_skb = NULL;
278
279         BT_DBG("%s end: err %d", hdev->name, err);
280
281         if (err < 0) {
282                 kfree_skb(skb);
283                 return ERR_PTR(err);
284         }
285
286         if (!skb)
287                 return ERR_PTR(-ENODATA);
288
289         return skb;
290 }
291 EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
294                                const void *param, u32 timeout)
295 {
296         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
297 }
298 EXPORT_SYMBOL(__hci_cmd_sync);
299
300 /* Execute request and wait for completion. */
301 static int __hci_req_sync(struct hci_dev *hdev,
302                           void (*func)(struct hci_request *req,
303                                       unsigned long opt),
304                           unsigned long opt, __u32 timeout)
305 {
306         struct hci_request req;
307         DECLARE_WAITQUEUE(wait, current);
308         int err = 0;
309
310         BT_DBG("%s start", hdev->name);
311
312         hci_req_init(&req, hdev);
313
314         hdev->req_status = HCI_REQ_PEND;
315
316         func(&req, opt);
317
318         add_wait_queue(&hdev->req_wait_q, &wait);
319         set_current_state(TASK_INTERRUPTIBLE);
320
321         err = hci_req_run_skb(&req, hci_req_sync_complete);
322         if (err < 0) {
323                 hdev->req_status = 0;
324
325                 remove_wait_queue(&hdev->req_wait_q, &wait);
326                 set_current_state(TASK_RUNNING);
327
328                 /* ENODATA means the HCI request command queue is empty.
329                  * This can happen when a request with conditionals doesn't
330                  * trigger any commands to be sent. This is normal behavior
331                  * and should not trigger an error return.
332                  */
333                 if (err == -ENODATA)
334                         return 0;
335
336                 return err;
337         }
338
339         schedule_timeout(timeout);
340
341         remove_wait_queue(&hdev->req_wait_q, &wait);
342
343         if (signal_pending(current))
344                 return -EINTR;
345
346         switch (hdev->req_status) {
347         case HCI_REQ_DONE:
348                 err = -bt_to_errno(hdev->req_result);
349                 break;
350
351         case HCI_REQ_CANCELED:
352                 err = -hdev->req_result;
353                 break;
354
355         default:
356                 err = -ETIMEDOUT;
357                 break;
358         }
359
360         hdev->req_status = hdev->req_result = 0;
361
362         BT_DBG("%s end: err %d", hdev->name, err);
363
364         return err;
365 }
366
367 static int hci_req_sync(struct hci_dev *hdev,
368                         void (*req)(struct hci_request *req,
369                                     unsigned long opt),
370                         unsigned long opt, __u32 timeout)
371 {
372         int ret;
373
374         if (!test_bit(HCI_UP, &hdev->flags))
375                 return -ENETDOWN;
376
377         /* Serialize all requests */
378         hci_req_lock(hdev);
379         ret = __hci_req_sync(hdev, req, opt, timeout);
380         hci_req_unlock(hdev);
381
382         return ret;
383 }
384
385 static void hci_reset_req(struct hci_request *req, unsigned long opt)
386 {
387         BT_DBG("%s %ld", req->hdev->name, opt);
388
389         /* Reset device */
390         set_bit(HCI_RESET, &req->hdev->flags);
391         hci_req_add(req, HCI_OP_RESET, 0, NULL);
392 }
393
394 static void bredr_init(struct hci_request *req)
395 {
396         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
397
398         /* Read Local Supported Features */
399         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
400
401         /* Read Local Version */
402         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
403
404         /* Read BD Address */
405         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
406 }
407
408 static void amp_init1(struct hci_request *req)
409 {
410         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
411
412         /* Read Local Version */
413         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
414
415         /* Read Local Supported Commands */
416         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
418         /* Read Local AMP Info */
419         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
420
421         /* Read Data Blk size */
422         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
423
424         /* Read Flow Control Mode */
425         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
427         /* Read Location Data */
428         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
429 }
430
431 static void amp_init2(struct hci_request *req)
432 {
433         /* Read Local Supported Features. Not all AMP controllers
434          * support this so it's placed conditionally in the second
435          * stage init.
436          */
437         if (req->hdev->commands[14] & 0x20)
438                 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439 }
440
441 static void hci_init1_req(struct hci_request *req, unsigned long opt)
442 {
443         struct hci_dev *hdev = req->hdev;
444
445         BT_DBG("%s %ld", hdev->name, opt);
446
447         /* Reset */
448         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
449                 hci_reset_req(req, 0);
450
451         switch (hdev->dev_type) {
452         case HCI_BREDR:
453                 bredr_init(req);
454                 break;
455
456         case HCI_AMP:
457                 amp_init1(req);
458                 break;
459
460         default:
461                 BT_ERR("Unknown device type %d", hdev->dev_type);
462                 break;
463         }
464 }
465
466 static void bredr_setup(struct hci_request *req)
467 {
468         __le16 param;
469         __u8 flt_type;
470
471         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
472         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
473
474         /* Read Class of Device */
475         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
476
477         /* Read Local Name */
478         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
479
480         /* Read Voice Setting */
481         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
482
483         /* Read Number of Supported IAC */
484         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
486         /* Read Current IAC LAP */
487         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
489         /* Clear Event Filters */
490         flt_type = HCI_FLT_CLEAR_ALL;
491         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
492
493         /* Connection accept timeout ~20 secs */
494         param = cpu_to_le16(0x7d00);
495         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
496 }
497
498 static void le_setup(struct hci_request *req)
499 {
500         struct hci_dev *hdev = req->hdev;
501
502         /* Read LE Buffer Size */
503         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
504
505         /* Read LE Local Supported Features */
506         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
507
508         /* Read LE Supported States */
509         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
511         /* Read LE White List Size */
512         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
513
514         /* Clear LE White List */
515         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
516
517         /* LE-only controllers have LE implicitly enabled */
518         if (!lmp_bredr_capable(hdev))
519                 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
520 }
521
522 static void hci_setup_event_mask(struct hci_request *req)
523 {
524         struct hci_dev *hdev = req->hdev;
525
526         /* The second byte is 0xff instead of 0x9f (two reserved bits
527          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
528          * command otherwise.
529          */
530         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
531
532         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
533          * any event mask for pre 1.2 devices.
534          */
535         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
536                 return;
537
538         if (lmp_bredr_capable(hdev)) {
539                 events[4] |= 0x01; /* Flow Specification Complete */
540                 events[4] |= 0x02; /* Inquiry Result with RSSI */
541                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
542                 events[5] |= 0x08; /* Synchronous Connection Complete */
543                 events[5] |= 0x10; /* Synchronous Connection Changed */
544         } else {
545                 /* Use a different default for LE-only devices */
546                 memset(events, 0, sizeof(events));
547                 events[0] |= 0x10; /* Disconnection Complete */
548                 events[1] |= 0x08; /* Read Remote Version Information Complete */
549                 events[1] |= 0x20; /* Command Complete */
550                 events[1] |= 0x40; /* Command Status */
551                 events[1] |= 0x80; /* Hardware Error */
552                 events[2] |= 0x04; /* Number of Completed Packets */
553                 events[3] |= 0x02; /* Data Buffer Overflow */
554
555                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
556                         events[0] |= 0x80; /* Encryption Change */
557                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
558                 }
559         }
560
561         if (lmp_inq_rssi_capable(hdev))
562                 events[4] |= 0x02; /* Inquiry Result with RSSI */
563
564         if (lmp_sniffsubr_capable(hdev))
565                 events[5] |= 0x20; /* Sniff Subrating */
566
567         if (lmp_pause_enc_capable(hdev))
568                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
569
570         if (lmp_ext_inq_capable(hdev))
571                 events[5] |= 0x40; /* Extended Inquiry Result */
572
573         if (lmp_no_flush_capable(hdev))
574                 events[7] |= 0x01; /* Enhanced Flush Complete */
575
576         if (lmp_lsto_capable(hdev))
577                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
578
579         if (lmp_ssp_capable(hdev)) {
580                 events[6] |= 0x01;      /* IO Capability Request */
581                 events[6] |= 0x02;      /* IO Capability Response */
582                 events[6] |= 0x04;      /* User Confirmation Request */
583                 events[6] |= 0x08;      /* User Passkey Request */
584                 events[6] |= 0x10;      /* Remote OOB Data Request */
585                 events[6] |= 0x20;      /* Simple Pairing Complete */
586                 events[7] |= 0x04;      /* User Passkey Notification */
587                 events[7] |= 0x08;      /* Keypress Notification */
588                 events[7] |= 0x10;      /* Remote Host Supported
589                                          * Features Notification
590                                          */
591         }
592
593         if (lmp_le_capable(hdev))
594                 events[7] |= 0x20;      /* LE Meta-Event */
595
596         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
597 }
598
599 static void hci_init2_req(struct hci_request *req, unsigned long opt)
600 {
601         struct hci_dev *hdev = req->hdev;
602
603         if (hdev->dev_type == HCI_AMP)
604                 return amp_init2(req);
605
606         if (lmp_bredr_capable(hdev))
607                 bredr_setup(req);
608         else
609                 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
610
611         if (lmp_le_capable(hdev))
612                 le_setup(req);
613
614         /* All Bluetooth 1.2 and later controllers should support the
615          * HCI command for reading the local supported commands.
616          *
617          * Unfortunately some controllers indicate Bluetooth 1.2 support,
618          * but do not have support for this command. If that is the case,
619          * the driver can quirk the behavior and skip reading the local
620          * supported commands.
621          */
622         if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
623             !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
624                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
625
626         if (lmp_ssp_capable(hdev)) {
627                 /* When SSP is available, then the host features page
628                  * should also be available as well. However some
629                  * controllers list the max_page as 0 as long as SSP
630                  * has not been enabled. To achieve proper debugging
631                  * output, force the minimum max_page to 1 at least.
632                  */
633                 hdev->max_page = 0x01;
634
635                 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
636                         u8 mode = 0x01;
637
638                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
639                                     sizeof(mode), &mode);
640                 } else {
641                         struct hci_cp_write_eir cp;
642
643                         memset(hdev->eir, 0, sizeof(hdev->eir));
644                         memset(&cp, 0, sizeof(cp));
645
646                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
647                 }
648         }
649
650         if (lmp_inq_rssi_capable(hdev) ||
651             test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
652                 u8 mode;
653
654                 /* If Extended Inquiry Result events are supported, then
655                  * they are clearly preferred over Inquiry Result with RSSI
656                  * events.
657                  */
658                 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
659
660                 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
661         }
662
663         if (lmp_inq_tx_pwr_capable(hdev))
664                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
665
666         if (lmp_ext_feat_capable(hdev)) {
667                 struct hci_cp_read_local_ext_features cp;
668
669                 cp.page = 0x01;
670                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
671                             sizeof(cp), &cp);
672         }
673
674         if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
675                 u8 enable = 1;
676                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
677                             &enable);
678         }
679 }
680
681 static void hci_setup_link_policy(struct hci_request *req)
682 {
683         struct hci_dev *hdev = req->hdev;
684         struct hci_cp_write_def_link_policy cp;
685         u16 link_policy = 0;
686
687         if (lmp_rswitch_capable(hdev))
688                 link_policy |= HCI_LP_RSWITCH;
689         if (lmp_hold_capable(hdev))
690                 link_policy |= HCI_LP_HOLD;
691         if (lmp_sniff_capable(hdev))
692                 link_policy |= HCI_LP_SNIFF;
693         if (lmp_park_capable(hdev))
694                 link_policy |= HCI_LP_PARK;
695
696         cp.policy = cpu_to_le16(link_policy);
697         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
698 }
699
700 static void hci_set_le_support(struct hci_request *req)
701 {
702         struct hci_dev *hdev = req->hdev;
703         struct hci_cp_write_le_host_supported cp;
704
705         /* LE-only devices do not support explicit enablement */
706         if (!lmp_bredr_capable(hdev))
707                 return;
708
709         memset(&cp, 0, sizeof(cp));
710
711         if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
712                 cp.le = 0x01;
713                 cp.simul = 0x00;
714         }
715
716         if (cp.le != lmp_host_le_capable(hdev))
717                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
718                             &cp);
719 }
720
721 static void hci_set_event_mask_page_2(struct hci_request *req)
722 {
723         struct hci_dev *hdev = req->hdev;
724         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
725
726         /* If Connectionless Slave Broadcast master role is supported
727          * enable all necessary events for it.
728          */
729         if (lmp_csb_master_capable(hdev)) {
730                 events[1] |= 0x40;      /* Triggered Clock Capture */
731                 events[1] |= 0x80;      /* Synchronization Train Complete */
732                 events[2] |= 0x10;      /* Slave Page Response Timeout */
733                 events[2] |= 0x20;      /* CSB Channel Map Change */
734         }
735
736         /* If Connectionless Slave Broadcast slave role is supported
737          * enable all necessary events for it.
738          */
739         if (lmp_csb_slave_capable(hdev)) {
740                 events[2] |= 0x01;      /* Synchronization Train Received */
741                 events[2] |= 0x02;      /* CSB Receive */
742                 events[2] |= 0x04;      /* CSB Timeout */
743                 events[2] |= 0x08;      /* Truncated Page Complete */
744         }
745
746         /* Enable Authenticated Payload Timeout Expired event if supported */
747         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
748                 events[2] |= 0x80;
749
750         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
751 }
752
753 static void hci_init3_req(struct hci_request *req, unsigned long opt)
754 {
755         struct hci_dev *hdev = req->hdev;
756         u8 p;
757
758         hci_setup_event_mask(req);
759
760         if (hdev->commands[6] & 0x20 &&
761             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
762                 struct hci_cp_read_stored_link_key cp;
763
764                 bacpy(&cp.bdaddr, BDADDR_ANY);
765                 cp.read_all = 0x01;
766                 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
767         }
768
769         if (hdev->commands[5] & 0x10)
770                 hci_setup_link_policy(req);
771
772         if (hdev->commands[8] & 0x01)
773                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
774
775         /* Some older Broadcom based Bluetooth 1.2 controllers do not
776          * support the Read Page Scan Type command. Check support for
777          * this command in the bit mask of supported commands.
778          */
779         if (hdev->commands[13] & 0x01)
780                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
781
782         if (lmp_le_capable(hdev)) {
783                 u8 events[8];
784
785                 memset(events, 0, sizeof(events));
786                 events[0] = 0x0f;
787
788                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
789                         events[0] |= 0x10;      /* LE Long Term Key Request */
790
791                 /* If controller supports the Connection Parameters Request
792                  * Link Layer Procedure, enable the corresponding event.
793                  */
794                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
795                         events[0] |= 0x20;      /* LE Remote Connection
796                                                  * Parameter Request
797                                                  */
798
799                 /* If the controller supports the Data Length Extension
800                  * feature, enable the corresponding event.
801                  */
802                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
803                         events[0] |= 0x40;      /* LE Data Length Change */
804
805                 /* If the controller supports Extended Scanner Filter
806                  * Policies, enable the correspondig event.
807                  */
808                 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
809                         events[1] |= 0x04;      /* LE Direct Advertising
810                                                  * Report
811                                                  */
812
813                 /* If the controller supports the LE Read Local P-256
814                  * Public Key command, enable the corresponding event.
815                  */
816                 if (hdev->commands[34] & 0x02)
817                         events[0] |= 0x80;      /* LE Read Local P-256
818                                                  * Public Key Complete
819                                                  */
820
821                 /* If the controller supports the LE Generate DHKey
822                  * command, enable the corresponding event.
823                  */
824                 if (hdev->commands[34] & 0x04)
825                         events[1] |= 0x01;      /* LE Generate DHKey Complete */
826
827                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
828                             events);
829
830                 if (hdev->commands[25] & 0x40) {
831                         /* Read LE Advertising Channel TX Power */
832                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
833                 }
834
835                 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
836                         /* Read LE Maximum Data Length */
837                         hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
838
839                         /* Read LE Suggested Default Data Length */
840                         hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
841                 }
842
843                 hci_set_le_support(req);
844         }
845
846         /* Read features beyond page 1 if available */
847         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
848                 struct hci_cp_read_local_ext_features cp;
849
850                 cp.page = p;
851                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
852                             sizeof(cp), &cp);
853         }
854 }
855
856 static void hci_init4_req(struct hci_request *req, unsigned long opt)
857 {
858         struct hci_dev *hdev = req->hdev;
859
860         /* Some Broadcom based Bluetooth controllers do not support the
861          * Delete Stored Link Key command. They are clearly indicating its
862          * absence in the bit mask of supported commands.
863          *
864          * Check the supported commands and only if the the command is marked
865          * as supported send it. If not supported assume that the controller
866          * does not have actual support for stored link keys which makes this
867          * command redundant anyway.
868          *
869          * Some controllers indicate that they support handling deleting
870          * stored link keys, but they don't. The quirk lets a driver
871          * just disable this command.
872          */
873         if (hdev->commands[6] & 0x80 &&
874             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
875                 struct hci_cp_delete_stored_link_key cp;
876
877                 bacpy(&cp.bdaddr, BDADDR_ANY);
878                 cp.delete_all = 0x01;
879                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
880                             sizeof(cp), &cp);
881         }
882
883         /* Set event mask page 2 if the HCI command for it is supported */
884         if (hdev->commands[22] & 0x04)
885                 hci_set_event_mask_page_2(req);
886
887         /* Read local codec list if the HCI command is supported */
888         if (hdev->commands[29] & 0x20)
889                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
890
891         /* Get MWS transport configuration if the HCI command is supported */
892         if (hdev->commands[30] & 0x08)
893                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
894
895         /* Check for Synchronization Train support */
896         if (lmp_sync_train_capable(hdev))
897                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
898
899         /* Enable Secure Connections if supported and configured */
900         if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
901             bredr_sc_enabled(hdev)) {
902                 u8 support = 0x01;
903
904                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
905                             sizeof(support), &support);
906         }
907 }
908
909 static int __hci_init(struct hci_dev *hdev)
910 {
911         int err;
912
913         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
914         if (err < 0)
915                 return err;
916
917         if (hci_dev_test_flag(hdev, HCI_SETUP))
918                 hci_debugfs_create_basic(hdev);
919
920         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
921         if (err < 0)
922                 return err;
923
924         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
925          * BR/EDR/LE type controllers. AMP controllers only need the
926          * first two stages of init.
927          */
928         if (hdev->dev_type != HCI_BREDR)
929                 return 0;
930
931         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
932         if (err < 0)
933                 return err;
934
935         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
936         if (err < 0)
937                 return err;
938
939         /* This function is only called when the controller is actually in
940          * configured state. When the controller is marked as unconfigured,
941          * this initialization procedure is not run.
942          *
943          * It means that it is possible that a controller runs through its
944          * setup phase and then discovers missing settings. If that is the
945          * case, then this function will not be called. It then will only
946          * be called during the config phase.
947          *
948          * So only when in setup phase or config phase, create the debugfs
949          * entries and register the SMP channels.
950          */
951         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952             !hci_dev_test_flag(hdev, HCI_CONFIG))
953                 return 0;
954
955         hci_debugfs_create_common(hdev);
956
957         if (lmp_bredr_capable(hdev))
958                 hci_debugfs_create_bredr(hdev);
959
960         if (lmp_le_capable(hdev))
961                 hci_debugfs_create_le(hdev);
962
963         return 0;
964 }
965
966 static void hci_init0_req(struct hci_request *req, unsigned long opt)
967 {
968         struct hci_dev *hdev = req->hdev;
969
970         BT_DBG("%s %ld", hdev->name, opt);
971
972         /* Reset */
973         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974                 hci_reset_req(req, 0);
975
976         /* Read Local Version */
977         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
978
979         /* Read BD Address */
980         if (hdev->set_bdaddr)
981                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
982 }
983
984 static int __hci_unconf_init(struct hci_dev *hdev)
985 {
986         int err;
987
988         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
989                 return 0;
990
991         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
992         if (err < 0)
993                 return err;
994
995         if (hci_dev_test_flag(hdev, HCI_SETUP))
996                 hci_debugfs_create_basic(hdev);
997
998         return 0;
999 }
1000
1001 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1002 {
1003         __u8 scan = opt;
1004
1005         BT_DBG("%s %x", req->hdev->name, scan);
1006
1007         /* Inquiry and Page scans */
1008         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1009 }
1010
1011 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1012 {
1013         __u8 auth = opt;
1014
1015         BT_DBG("%s %x", req->hdev->name, auth);
1016
1017         /* Authentication */
1018         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1019 }
1020
1021 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1022 {
1023         __u8 encrypt = opt;
1024
1025         BT_DBG("%s %x", req->hdev->name, encrypt);
1026
1027         /* Encryption */
1028         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1029 }
1030
1031 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1032 {
1033         __le16 policy = cpu_to_le16(opt);
1034
1035         BT_DBG("%s %x", req->hdev->name, policy);
1036
1037         /* Default link policy */
1038         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1039 }
1040
1041 /* Get HCI device by index.
1042  * Device is held on return. */
1043 struct hci_dev *hci_dev_get(int index)
1044 {
1045         struct hci_dev *hdev = NULL, *d;
1046
1047         BT_DBG("%d", index);
1048
1049         if (index < 0)
1050                 return NULL;
1051
1052         read_lock(&hci_dev_list_lock);
1053         list_for_each_entry(d, &hci_dev_list, list) {
1054                 if (d->id == index) {
1055                         hdev = hci_dev_hold(d);
1056                         break;
1057                 }
1058         }
1059         read_unlock(&hci_dev_list_lock);
1060         return hdev;
1061 }
1062
1063 /* ---- Inquiry support ---- */
1064
1065 bool hci_discovery_active(struct hci_dev *hdev)
1066 {
1067         struct discovery_state *discov = &hdev->discovery;
1068
1069         switch (discov->state) {
1070         case DISCOVERY_FINDING:
1071         case DISCOVERY_RESOLVING:
1072                 return true;
1073
1074         default:
1075                 return false;
1076         }
1077 }
1078
1079 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1080 {
1081         int old_state = hdev->discovery.state;
1082
1083         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1084
1085         if (old_state == state)
1086                 return;
1087
1088         hdev->discovery.state = state;
1089
1090         switch (state) {
1091         case DISCOVERY_STOPPED:
1092                 hci_update_background_scan(hdev);
1093
1094                 if (old_state != DISCOVERY_STARTING)
1095                         mgmt_discovering(hdev, 0);
1096                 break;
1097         case DISCOVERY_STARTING:
1098                 break;
1099         case DISCOVERY_FINDING:
1100                 mgmt_discovering(hdev, 1);
1101                 break;
1102         case DISCOVERY_RESOLVING:
1103                 break;
1104         case DISCOVERY_STOPPING:
1105                 break;
1106         }
1107 }
1108
1109 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1110 {
1111         struct discovery_state *cache = &hdev->discovery;
1112         struct inquiry_entry *p, *n;
1113
1114         list_for_each_entry_safe(p, n, &cache->all, all) {
1115                 list_del(&p->all);
1116                 kfree(p);
1117         }
1118
1119         INIT_LIST_HEAD(&cache->unknown);
1120         INIT_LIST_HEAD(&cache->resolve);
1121 }
1122
1123 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1124                                                bdaddr_t *bdaddr)
1125 {
1126         struct discovery_state *cache = &hdev->discovery;
1127         struct inquiry_entry *e;
1128
1129         BT_DBG("cache %p, %pMR", cache, bdaddr);
1130
1131         list_for_each_entry(e, &cache->all, all) {
1132                 if (!bacmp(&e->data.bdaddr, bdaddr))
1133                         return e;
1134         }
1135
1136         return NULL;
1137 }
1138
1139 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1140                                                        bdaddr_t *bdaddr)
1141 {
1142         struct discovery_state *cache = &hdev->discovery;
1143         struct inquiry_entry *e;
1144
1145         BT_DBG("cache %p, %pMR", cache, bdaddr);
1146
1147         list_for_each_entry(e, &cache->unknown, list) {
1148                 if (!bacmp(&e->data.bdaddr, bdaddr))
1149                         return e;
1150         }
1151
1152         return NULL;
1153 }
1154
1155 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1156                                                        bdaddr_t *bdaddr,
1157                                                        int state)
1158 {
1159         struct discovery_state *cache = &hdev->discovery;
1160         struct inquiry_entry *e;
1161
1162         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1163
1164         list_for_each_entry(e, &cache->resolve, list) {
1165                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1166                         return e;
1167                 if (!bacmp(&e->data.bdaddr, bdaddr))
1168                         return e;
1169         }
1170
1171         return NULL;
1172 }
1173
1174 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1175                                       struct inquiry_entry *ie)
1176 {
1177         struct discovery_state *cache = &hdev->discovery;
1178         struct list_head *pos = &cache->resolve;
1179         struct inquiry_entry *p;
1180
1181         list_del(&ie->list);
1182
1183         list_for_each_entry(p, &cache->resolve, list) {
1184                 if (p->name_state != NAME_PENDING &&
1185                     abs(p->data.rssi) >= abs(ie->data.rssi))
1186                         break;
1187                 pos = &p->list;
1188         }
1189
1190         list_add(&ie->list, pos);
1191 }
1192
1193 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1194                              bool name_known)
1195 {
1196         struct discovery_state *cache = &hdev->discovery;
1197         struct inquiry_entry *ie;
1198         u32 flags = 0;
1199
1200         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1201
1202         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1203
1204         if (!data->ssp_mode)
1205                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1206
1207         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1208         if (ie) {
1209                 if (!ie->data.ssp_mode)
1210                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1211
1212                 if (ie->name_state == NAME_NEEDED &&
1213                     data->rssi != ie->data.rssi) {
1214                         ie->data.rssi = data->rssi;
1215                         hci_inquiry_cache_update_resolve(hdev, ie);
1216                 }
1217
1218                 goto update;
1219         }
1220
1221         /* Entry not in the cache. Add new one. */
1222         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1223         if (!ie) {
1224                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225                 goto done;
1226         }
1227
1228         list_add(&ie->all, &cache->all);
1229
1230         if (name_known) {
1231                 ie->name_state = NAME_KNOWN;
1232         } else {
1233                 ie->name_state = NAME_NOT_KNOWN;
1234                 list_add(&ie->list, &cache->unknown);
1235         }
1236
1237 update:
1238         if (name_known && ie->name_state != NAME_KNOWN &&
1239             ie->name_state != NAME_PENDING) {
1240                 ie->name_state = NAME_KNOWN;
1241                 list_del(&ie->list);
1242         }
1243
1244         memcpy(&ie->data, data, sizeof(*data));
1245         ie->timestamp = jiffies;
1246         cache->timestamp = jiffies;
1247
1248         if (ie->name_state == NAME_NOT_KNOWN)
1249                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1250
1251 done:
1252         return flags;
1253 }
1254
1255 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1256 {
1257         struct discovery_state *cache = &hdev->discovery;
1258         struct inquiry_info *info = (struct inquiry_info *) buf;
1259         struct inquiry_entry *e;
1260         int copied = 0;
1261
1262         list_for_each_entry(e, &cache->all, all) {
1263                 struct inquiry_data *data = &e->data;
1264
1265                 if (copied >= num)
1266                         break;
1267
1268                 bacpy(&info->bdaddr, &data->bdaddr);
1269                 info->pscan_rep_mode    = data->pscan_rep_mode;
1270                 info->pscan_period_mode = data->pscan_period_mode;
1271                 info->pscan_mode        = data->pscan_mode;
1272                 memcpy(info->dev_class, data->dev_class, 3);
1273                 info->clock_offset      = data->clock_offset;
1274
1275                 info++;
1276                 copied++;
1277         }
1278
1279         BT_DBG("cache %p, copied %d", cache, copied);
1280         return copied;
1281 }
1282
1283 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1284 {
1285         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1286         struct hci_dev *hdev = req->hdev;
1287         struct hci_cp_inquiry cp;
1288
1289         BT_DBG("%s", hdev->name);
1290
1291         if (test_bit(HCI_INQUIRY, &hdev->flags))
1292                 return;
1293
1294         /* Start Inquiry */
1295         memcpy(&cp.lap, &ir->lap, 3);
1296         cp.length  = ir->length;
1297         cp.num_rsp = ir->num_rsp;
1298         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1299 }
1300
1301 int hci_inquiry(void __user *arg)
1302 {
1303         __u8 __user *ptr = arg;
1304         struct hci_inquiry_req ir;
1305         struct hci_dev *hdev;
1306         int err = 0, do_inquiry = 0, max_rsp;
1307         long timeo;
1308         __u8 *buf;
1309
1310         if (copy_from_user(&ir, ptr, sizeof(ir)))
1311                 return -EFAULT;
1312
1313         hdev = hci_dev_get(ir.dev_id);
1314         if (!hdev)
1315                 return -ENODEV;
1316
1317         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1318                 err = -EBUSY;
1319                 goto done;
1320         }
1321
1322         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1323                 err = -EOPNOTSUPP;
1324                 goto done;
1325         }
1326
1327         if (hdev->dev_type != HCI_BREDR) {
1328                 err = -EOPNOTSUPP;
1329                 goto done;
1330         }
1331
1332         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1333                 err = -EOPNOTSUPP;
1334                 goto done;
1335         }
1336
1337         hci_dev_lock(hdev);
1338         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1339             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1340                 hci_inquiry_cache_flush(hdev);
1341                 do_inquiry = 1;
1342         }
1343         hci_dev_unlock(hdev);
1344
1345         timeo = ir.length * msecs_to_jiffies(2000);
1346
1347         if (do_inquiry) {
1348                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1349                                    timeo);
1350                 if (err < 0)
1351                         goto done;
1352
1353                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1354                  * cleared). If it is interrupted by a signal, return -EINTR.
1355                  */
1356                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1357                                 TASK_INTERRUPTIBLE))
1358                         return -EINTR;
1359         }
1360
1361         /* for unlimited number of responses we will use buffer with
1362          * 255 entries
1363          */
1364         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1365
1366         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1367          * copy it to the user space.
1368          */
1369         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1370         if (!buf) {
1371                 err = -ENOMEM;
1372                 goto done;
1373         }
1374
1375         hci_dev_lock(hdev);
1376         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1377         hci_dev_unlock(hdev);
1378
1379         BT_DBG("num_rsp %d", ir.num_rsp);
1380
1381         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1382                 ptr += sizeof(ir);
1383                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1384                                  ir.num_rsp))
1385                         err = -EFAULT;
1386         } else
1387                 err = -EFAULT;
1388
1389         kfree(buf);
1390
1391 done:
1392         hci_dev_put(hdev);
1393         return err;
1394 }
1395
1396 static int hci_dev_do_open(struct hci_dev *hdev)
1397 {
1398         int ret = 0;
1399
1400         BT_DBG("%s %p", hdev->name, hdev);
1401
1402         hci_req_lock(hdev);
1403
1404         if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1405                 ret = -ENODEV;
1406                 goto done;
1407         }
1408
1409         if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1410             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1411                 /* Check for rfkill but allow the HCI setup stage to
1412                  * proceed (which in itself doesn't cause any RF activity).
1413                  */
1414                 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1415                         ret = -ERFKILL;
1416                         goto done;
1417                 }
1418
1419                 /* Check for valid public address or a configured static
1420                  * random adddress, but let the HCI setup proceed to
1421                  * be able to determine if there is a public address
1422                  * or not.
1423                  *
1424                  * In case of user channel usage, it is not important
1425                  * if a public address or static random address is
1426                  * available.
1427                  *
1428                  * This check is only valid for BR/EDR controllers
1429                  * since AMP controllers do not have an address.
1430                  */
1431                 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1432                     hdev->dev_type == HCI_BREDR &&
1433                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1434                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1435                         ret = -EADDRNOTAVAIL;
1436                         goto done;
1437                 }
1438         }
1439
1440         if (test_bit(HCI_UP, &hdev->flags)) {
1441                 ret = -EALREADY;
1442                 goto done;
1443         }
1444
1445         if (hdev->open(hdev)) {
1446                 ret = -EIO;
1447                 goto done;
1448         }
1449
1450         set_bit(HCI_RUNNING, &hdev->flags);
1451         hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1452
1453         atomic_set(&hdev->cmd_cnt, 1);
1454         set_bit(HCI_INIT, &hdev->flags);
1455
1456         if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1457                 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1458
1459                 if (hdev->setup)
1460                         ret = hdev->setup(hdev);
1461
1462                 /* The transport driver can set these quirks before
1463                  * creating the HCI device or in its setup callback.
1464                  *
1465                  * In case any of them is set, the controller has to
1466                  * start up as unconfigured.
1467                  */
1468                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1469                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1470                         hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1471
1472                 /* For an unconfigured controller it is required to
1473                  * read at least the version information provided by
1474                  * the Read Local Version Information command.
1475                  *
1476                  * If the set_bdaddr driver callback is provided, then
1477                  * also the original Bluetooth public device address
1478                  * will be read using the Read BD Address command.
1479                  */
1480                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1481                         ret = __hci_unconf_init(hdev);
1482         }
1483
1484         if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1485                 /* If public address change is configured, ensure that
1486                  * the address gets programmed. If the driver does not
1487                  * support changing the public address, fail the power
1488                  * on procedure.
1489                  */
1490                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1491                     hdev->set_bdaddr)
1492                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1493                 else
1494                         ret = -EADDRNOTAVAIL;
1495         }
1496
1497         if (!ret) {
1498                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1499                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1500                         ret = __hci_init(hdev);
1501                         if (!ret && hdev->post_init)
1502                                 ret = hdev->post_init(hdev);
1503                 }
1504         }
1505
1506         /* If the HCI Reset command is clearing all diagnostic settings,
1507          * then they need to be reprogrammed after the init procedure
1508          * completed.
1509          */
1510         if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1511             hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1512                 ret = hdev->set_diag(hdev, true);
1513
1514         clear_bit(HCI_INIT, &hdev->flags);
1515
1516         if (!ret) {
1517                 hci_dev_hold(hdev);
1518                 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1519                 set_bit(HCI_UP, &hdev->flags);
1520                 hci_sock_dev_event(hdev, HCI_DEV_UP);
1521                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1522                     !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1523                     !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1524                     !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1525                     hdev->dev_type == HCI_BREDR) {
1526                         hci_dev_lock(hdev);
1527                         mgmt_powered(hdev, 1);
1528                         hci_dev_unlock(hdev);
1529                 }
1530         } else {
1531                 /* Init failed, cleanup */
1532                 flush_work(&hdev->tx_work);
1533                 flush_work(&hdev->cmd_work);
1534                 flush_work(&hdev->rx_work);
1535
1536                 skb_queue_purge(&hdev->cmd_q);
1537                 skb_queue_purge(&hdev->rx_q);
1538
1539                 if (hdev->flush)
1540                         hdev->flush(hdev);
1541
1542                 if (hdev->sent_cmd) {
1543                         kfree_skb(hdev->sent_cmd);
1544                         hdev->sent_cmd = NULL;
1545                 }
1546
1547                 clear_bit(HCI_RUNNING, &hdev->flags);
1548                 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1549
1550                 hdev->close(hdev);
1551                 hdev->flags &= BIT(HCI_RAW);
1552         }
1553
1554 done:
1555         hci_req_unlock(hdev);
1556         return ret;
1557 }
1558
1559 /* ---- HCI ioctl helpers ---- */
1560
1561 int hci_dev_open(__u16 dev)
1562 {
1563         struct hci_dev *hdev;
1564         int err;
1565
1566         hdev = hci_dev_get(dev);
1567         if (!hdev)
1568                 return -ENODEV;
1569
1570         /* Devices that are marked as unconfigured can only be powered
1571          * up as user channel. Trying to bring them up as normal devices
1572          * will result into a failure. Only user channel operation is
1573          * possible.
1574          *
1575          * When this function is called for a user channel, the flag
1576          * HCI_USER_CHANNEL will be set first before attempting to
1577          * open the device.
1578          */
1579         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1581                 err = -EOPNOTSUPP;
1582                 goto done;
1583         }
1584
1585         /* We need to ensure that no other power on/off work is pending
1586          * before proceeding to call hci_dev_do_open. This is
1587          * particularly important if the setup procedure has not yet
1588          * completed.
1589          */
1590         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1591                 cancel_delayed_work(&hdev->power_off);
1592
1593         /* After this call it is guaranteed that the setup procedure
1594          * has finished. This means that error conditions like RFKILL
1595          * or no valid public or static random address apply.
1596          */
1597         flush_workqueue(hdev->req_workqueue);
1598
1599         /* For controllers not using the management interface and that
1600          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1601          * so that pairing works for them. Once the management interface
1602          * is in use this bit will be cleared again and userspace has
1603          * to explicitly enable it.
1604          */
1605         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1606             !hci_dev_test_flag(hdev, HCI_MGMT))
1607                 hci_dev_set_flag(hdev, HCI_BONDABLE);
1608
1609         err = hci_dev_do_open(hdev);
1610
1611 done:
1612         hci_dev_put(hdev);
1613         return err;
1614 }
1615
1616 /* This function requires the caller holds hdev->lock */
1617 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1618 {
1619         struct hci_conn_params *p;
1620
1621         list_for_each_entry(p, &hdev->le_conn_params, list) {
1622                 if (p->conn) {
1623                         hci_conn_drop(p->conn);
1624                         hci_conn_put(p->conn);
1625                         p->conn = NULL;
1626                 }
1627                 list_del_init(&p->action);
1628         }
1629
1630         BT_DBG("All LE pending actions cleared");
1631 }
1632
1633 int hci_dev_do_close(struct hci_dev *hdev)
1634 {
1635         bool auto_off;
1636
1637         BT_DBG("%s %p", hdev->name, hdev);
1638
1639         if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1640             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1641             test_bit(HCI_UP, &hdev->flags)) {
1642                 /* Execute vendor specific shutdown routine */
1643                 if (hdev->shutdown)
1644                         hdev->shutdown(hdev);
1645         }
1646
1647         cancel_delayed_work(&hdev->power_off);
1648
1649         hci_req_cancel(hdev, ENODEV);
1650         hci_req_lock(hdev);
1651
1652         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1653                 cancel_delayed_work_sync(&hdev->cmd_timer);
1654                 hci_req_unlock(hdev);
1655                 return 0;
1656         }
1657
1658         /* Flush RX and TX works */
1659         flush_work(&hdev->tx_work);
1660         flush_work(&hdev->rx_work);
1661
1662         if (hdev->discov_timeout > 0) {
1663                 cancel_delayed_work(&hdev->discov_off);
1664                 hdev->discov_timeout = 0;
1665                 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1666                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1667         }
1668
1669         if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1670                 cancel_delayed_work(&hdev->service_cache);
1671
1672         cancel_delayed_work_sync(&hdev->le_scan_disable);
1673         cancel_delayed_work_sync(&hdev->le_scan_restart);
1674
1675         if (hci_dev_test_flag(hdev, HCI_MGMT))
1676                 cancel_delayed_work_sync(&hdev->rpa_expired);
1677
1678         if (hdev->adv_instance_timeout) {
1679                 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1680                 hdev->adv_instance_timeout = 0;
1681         }
1682
1683         /* Avoid potential lockdep warnings from the *_flush() calls by
1684          * ensuring the workqueue is empty up front.
1685          */
1686         drain_workqueue(hdev->workqueue);
1687
1688         hci_dev_lock(hdev);
1689
1690         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1691
1692         auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1693
1694         if (!auto_off && hdev->dev_type == HCI_BREDR)
1695                 mgmt_powered(hdev, 0);
1696
1697         hci_inquiry_cache_flush(hdev);
1698         hci_pend_le_actions_clear(hdev);
1699         hci_conn_hash_flush(hdev);
1700         hci_dev_unlock(hdev);
1701
1702         smp_unregister(hdev);
1703
1704         hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1705
1706         if (hdev->flush)
1707                 hdev->flush(hdev);
1708
1709         /* Reset device */
1710         skb_queue_purge(&hdev->cmd_q);
1711         atomic_set(&hdev->cmd_cnt, 1);
1712         if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1713             !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1714                 set_bit(HCI_INIT, &hdev->flags);
1715                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1716                 clear_bit(HCI_INIT, &hdev->flags);
1717         }
1718
1719         /* flush cmd  work */
1720         flush_work(&hdev->cmd_work);
1721
1722         /* Drop queues */
1723         skb_queue_purge(&hdev->rx_q);
1724         skb_queue_purge(&hdev->cmd_q);
1725         skb_queue_purge(&hdev->raw_q);
1726
1727         /* Drop last sent command */
1728         if (hdev->sent_cmd) {
1729                 cancel_delayed_work_sync(&hdev->cmd_timer);
1730                 kfree_skb(hdev->sent_cmd);
1731                 hdev->sent_cmd = NULL;
1732         }
1733
1734         clear_bit(HCI_RUNNING, &hdev->flags);
1735         hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1736
1737         /* After this point our queues are empty
1738          * and no tasks are scheduled. */
1739         hdev->close(hdev);
1740
1741         /* Clear flags */
1742         hdev->flags &= BIT(HCI_RAW);
1743         hci_dev_clear_volatile_flags(hdev);
1744
1745         /* Controller radio is available but is currently powered down */
1746         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1747
1748         memset(hdev->eir, 0, sizeof(hdev->eir));
1749         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1750         bacpy(&hdev->random_addr, BDADDR_ANY);
1751
1752         hci_req_unlock(hdev);
1753
1754         hci_dev_put(hdev);
1755         return 0;
1756 }
1757
1758 int hci_dev_close(__u16 dev)
1759 {
1760         struct hci_dev *hdev;
1761         int err;
1762
1763         hdev = hci_dev_get(dev);
1764         if (!hdev)
1765                 return -ENODEV;
1766
1767         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1768                 err = -EBUSY;
1769                 goto done;
1770         }
1771
1772         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1773                 cancel_delayed_work(&hdev->power_off);
1774
1775         err = hci_dev_do_close(hdev);
1776
1777 done:
1778         hci_dev_put(hdev);
1779         return err;
1780 }
1781
1782 static int hci_dev_do_reset(struct hci_dev *hdev)
1783 {
1784         int ret;
1785
1786         BT_DBG("%s %p", hdev->name, hdev);
1787
1788         hci_req_lock(hdev);
1789
1790         /* Drop queues */
1791         skb_queue_purge(&hdev->rx_q);
1792         skb_queue_purge(&hdev->cmd_q);
1793
1794         /* Avoid potential lockdep warnings from the *_flush() calls by
1795          * ensuring the workqueue is empty up front.
1796          */
1797         drain_workqueue(hdev->workqueue);
1798
1799         hci_dev_lock(hdev);
1800         hci_inquiry_cache_flush(hdev);
1801         hci_conn_hash_flush(hdev);
1802         hci_dev_unlock(hdev);
1803
1804         if (hdev->flush)
1805                 hdev->flush(hdev);
1806
1807         atomic_set(&hdev->cmd_cnt, 1);
1808         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1809
1810         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1811
1812         hci_req_unlock(hdev);
1813         return ret;
1814 }
1815
1816 int hci_dev_reset(__u16 dev)
1817 {
1818         struct hci_dev *hdev;
1819         int err;
1820
1821         hdev = hci_dev_get(dev);
1822         if (!hdev)
1823                 return -ENODEV;
1824
1825         if (!test_bit(HCI_UP, &hdev->flags)) {
1826                 err = -ENETDOWN;
1827                 goto done;
1828         }
1829
1830         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1831                 err = -EBUSY;
1832                 goto done;
1833         }
1834
1835         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1836                 err = -EOPNOTSUPP;
1837                 goto done;
1838         }
1839
1840         err = hci_dev_do_reset(hdev);
1841
1842 done:
1843         hci_dev_put(hdev);
1844         return err;
1845 }
1846
1847 int hci_dev_reset_stat(__u16 dev)
1848 {
1849         struct hci_dev *hdev;
1850         int ret = 0;
1851
1852         hdev = hci_dev_get(dev);
1853         if (!hdev)
1854                 return -ENODEV;
1855
1856         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1857                 ret = -EBUSY;
1858                 goto done;
1859         }
1860
1861         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1862                 ret = -EOPNOTSUPP;
1863                 goto done;
1864         }
1865
1866         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1867
1868 done:
1869         hci_dev_put(hdev);
1870         return ret;
1871 }
1872
1873 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1874 {
1875         bool conn_changed, discov_changed;
1876
1877         BT_DBG("%s scan 0x%02x", hdev->name, scan);
1878
1879         if ((scan & SCAN_PAGE))
1880                 conn_changed = !hci_dev_test_and_set_flag(hdev,
1881                                                           HCI_CONNECTABLE);
1882         else
1883                 conn_changed = hci_dev_test_and_clear_flag(hdev,
1884                                                            HCI_CONNECTABLE);
1885
1886         if ((scan & SCAN_INQUIRY)) {
1887                 discov_changed = !hci_dev_test_and_set_flag(hdev,
1888                                                             HCI_DISCOVERABLE);
1889         } else {
1890                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1891                 discov_changed = hci_dev_test_and_clear_flag(hdev,
1892                                                              HCI_DISCOVERABLE);
1893         }
1894
1895         if (!hci_dev_test_flag(hdev, HCI_MGMT))
1896                 return;
1897
1898         if (conn_changed || discov_changed) {
1899                 /* In case this was disabled through mgmt */
1900                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1901
1902                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1903                         mgmt_update_adv_data(hdev);
1904
1905                 mgmt_new_settings(hdev);
1906         }
1907 }
1908
1909 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1910 {
1911         struct hci_dev *hdev;
1912         struct hci_dev_req dr;
1913         int err = 0;
1914
1915         if (copy_from_user(&dr, arg, sizeof(dr)))
1916                 return -EFAULT;
1917
1918         hdev = hci_dev_get(dr.dev_id);
1919         if (!hdev)
1920                 return -ENODEV;
1921
1922         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1923                 err = -EBUSY;
1924                 goto done;
1925         }
1926
1927         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1928                 err = -EOPNOTSUPP;
1929                 goto done;
1930         }
1931
1932         if (hdev->dev_type != HCI_BREDR) {
1933                 err = -EOPNOTSUPP;
1934                 goto done;
1935         }
1936
1937         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1938                 err = -EOPNOTSUPP;
1939                 goto done;
1940         }
1941
1942         switch (cmd) {
1943         case HCISETAUTH:
1944                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1945                                    HCI_INIT_TIMEOUT);
1946                 break;
1947
1948         case HCISETENCRYPT:
1949                 if (!lmp_encrypt_capable(hdev)) {
1950                         err = -EOPNOTSUPP;
1951                         break;
1952                 }
1953
1954                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1955                         /* Auth must be enabled first */
1956                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1957                                            HCI_INIT_TIMEOUT);
1958                         if (err)
1959                                 break;
1960                 }
1961
1962                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1963                                    HCI_INIT_TIMEOUT);
1964                 break;
1965
1966         case HCISETSCAN:
1967                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1968                                    HCI_INIT_TIMEOUT);
1969
1970                 /* Ensure that the connectable and discoverable states
1971                  * get correctly modified as this was a non-mgmt change.
1972                  */
1973                 if (!err)
1974                         hci_update_scan_state(hdev, dr.dev_opt);
1975                 break;
1976
1977         case HCISETLINKPOL:
1978                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1979                                    HCI_INIT_TIMEOUT);
1980                 break;
1981
1982         case HCISETLINKMODE:
1983                 hdev->link_mode = ((__u16) dr.dev_opt) &
1984                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1985                 break;
1986
1987         case HCISETPTYPE:
1988                 hdev->pkt_type = (__u16) dr.dev_opt;
1989                 break;
1990
1991         case HCISETACLMTU:
1992                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1993                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1994                 break;
1995
1996         case HCISETSCOMTU:
1997                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1998                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1999                 break;
2000
2001         default:
2002                 err = -EINVAL;
2003                 break;
2004         }
2005
2006 done:
2007         hci_dev_put(hdev);
2008         return err;
2009 }
2010
2011 int hci_get_dev_list(void __user *arg)
2012 {
2013         struct hci_dev *hdev;
2014         struct hci_dev_list_req *dl;
2015         struct hci_dev_req *dr;
2016         int n = 0, size, err;
2017         __u16 dev_num;
2018
2019         if (get_user(dev_num, (__u16 __user *) arg))
2020                 return -EFAULT;
2021
2022         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2023                 return -EINVAL;
2024
2025         size = sizeof(*dl) + dev_num * sizeof(*dr);
2026
2027         dl = kzalloc(size, GFP_KERNEL);
2028         if (!dl)
2029                 return -ENOMEM;
2030
2031         dr = dl->dev_req;
2032
2033         read_lock(&hci_dev_list_lock);
2034         list_for_each_entry(hdev, &hci_dev_list, list) {
2035                 unsigned long flags = hdev->flags;
2036
2037                 /* When the auto-off is configured it means the transport
2038                  * is running, but in that case still indicate that the
2039                  * device is actually down.
2040                  */
2041                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2042                         flags &= ~BIT(HCI_UP);
2043
2044                 (dr + n)->dev_id  = hdev->id;
2045                 (dr + n)->dev_opt = flags;
2046
2047                 if (++n >= dev_num)
2048                         break;
2049         }
2050         read_unlock(&hci_dev_list_lock);
2051
2052         dl->dev_num = n;
2053         size = sizeof(*dl) + n * sizeof(*dr);
2054
2055         err = copy_to_user(arg, dl, size);
2056         kfree(dl);
2057
2058         return err ? -EFAULT : 0;
2059 }
2060
2061 int hci_get_dev_info(void __user *arg)
2062 {
2063         struct hci_dev *hdev;
2064         struct hci_dev_info di;
2065         unsigned long flags;
2066         int err = 0;
2067
2068         if (copy_from_user(&di, arg, sizeof(di)))
2069                 return -EFAULT;
2070
2071         hdev = hci_dev_get(di.dev_id);
2072         if (!hdev)
2073                 return -ENODEV;
2074
2075         /* When the auto-off is configured it means the transport
2076          * is running, but in that case still indicate that the
2077          * device is actually down.
2078          */
2079         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2080                 flags = hdev->flags & ~BIT(HCI_UP);
2081         else
2082                 flags = hdev->flags;
2083
2084         strcpy(di.name, hdev->name);
2085         di.bdaddr   = hdev->bdaddr;
2086         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2087         di.flags    = flags;
2088         di.pkt_type = hdev->pkt_type;
2089         if (lmp_bredr_capable(hdev)) {
2090                 di.acl_mtu  = hdev->acl_mtu;
2091                 di.acl_pkts = hdev->acl_pkts;
2092                 di.sco_mtu  = hdev->sco_mtu;
2093                 di.sco_pkts = hdev->sco_pkts;
2094         } else {
2095                 di.acl_mtu  = hdev->le_mtu;
2096                 di.acl_pkts = hdev->le_pkts;
2097                 di.sco_mtu  = 0;
2098                 di.sco_pkts = 0;
2099         }
2100         di.link_policy = hdev->link_policy;
2101         di.link_mode   = hdev->link_mode;
2102
2103         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2104         memcpy(&di.features, &hdev->features, sizeof(di.features));
2105
2106         if (copy_to_user(arg, &di, sizeof(di)))
2107                 err = -EFAULT;
2108
2109         hci_dev_put(hdev);
2110
2111         return err;
2112 }
2113
2114 /* ---- Interface to HCI drivers ---- */
2115
2116 static int hci_rfkill_set_block(void *data, bool blocked)
2117 {
2118         struct hci_dev *hdev = data;
2119
2120         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2121
2122         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2123                 return -EBUSY;
2124
2125         if (blocked) {
2126                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2127                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2128                     !hci_dev_test_flag(hdev, HCI_CONFIG))
2129                         hci_dev_do_close(hdev);
2130         } else {
2131                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2132         }
2133
2134         return 0;
2135 }
2136
2137 static const struct rfkill_ops hci_rfkill_ops = {
2138         .set_block = hci_rfkill_set_block,
2139 };
2140
2141 static void hci_power_on(struct work_struct *work)
2142 {
2143         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2144         int err;
2145
2146         BT_DBG("%s", hdev->name);
2147
2148         err = hci_dev_do_open(hdev);
2149         if (err < 0) {
2150                 hci_dev_lock(hdev);
2151                 mgmt_set_powered_failed(hdev, err);
2152                 hci_dev_unlock(hdev);
2153                 return;
2154         }
2155
2156         /* During the HCI setup phase, a few error conditions are
2157          * ignored and they need to be checked now. If they are still
2158          * valid, it is important to turn the device back off.
2159          */
2160         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2161             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2162             (hdev->dev_type == HCI_BREDR &&
2163              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2164              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2165                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2166                 hci_dev_do_close(hdev);
2167         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2168                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2169                                    HCI_AUTO_OFF_TIMEOUT);
2170         }
2171
2172         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2173                 /* For unconfigured devices, set the HCI_RAW flag
2174                  * so that userspace can easily identify them.
2175                  */
2176                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2177                         set_bit(HCI_RAW, &hdev->flags);
2178
2179                 /* For fully configured devices, this will send
2180                  * the Index Added event. For unconfigured devices,
2181                  * it will send Unconfigued Index Added event.
2182                  *
2183                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2184                  * and no event will be send.
2185                  */
2186                 mgmt_index_added(hdev);
2187         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2188                 /* When the controller is now configured, then it
2189                  * is important to clear the HCI_RAW flag.
2190                  */
2191                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2192                         clear_bit(HCI_RAW, &hdev->flags);
2193
2194                 /* Powering on the controller with HCI_CONFIG set only
2195                  * happens with the transition from unconfigured to
2196                  * configured. This will send the Index Added event.
2197                  */
2198                 mgmt_index_added(hdev);
2199         }
2200 }
2201
2202 static void hci_power_off(struct work_struct *work)
2203 {
2204         struct hci_dev *hdev = container_of(work, struct hci_dev,
2205                                             power_off.work);
2206
2207         BT_DBG("%s", hdev->name);
2208
2209         hci_dev_do_close(hdev);
2210 }
2211
2212 static void hci_error_reset(struct work_struct *work)
2213 {
2214         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2215
2216         BT_DBG("%s", hdev->name);
2217
2218         if (hdev->hw_error)
2219                 hdev->hw_error(hdev, hdev->hw_error_code);
2220         else
2221                 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2222                        hdev->hw_error_code);
2223
2224         if (hci_dev_do_close(hdev))
2225                 return;
2226
2227         hci_dev_do_open(hdev);
2228 }
2229
2230 static void hci_discov_off(struct work_struct *work)
2231 {
2232         struct hci_dev *hdev;
2233
2234         hdev = container_of(work, struct hci_dev, discov_off.work);
2235
2236         BT_DBG("%s", hdev->name);
2237
2238         mgmt_discoverable_timeout(hdev);
2239 }
2240
2241 static void hci_adv_timeout_expire(struct work_struct *work)
2242 {
2243         struct hci_dev *hdev;
2244
2245         hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2246
2247         BT_DBG("%s", hdev->name);
2248
2249         mgmt_adv_timeout_expired(hdev);
2250 }
2251
2252 void hci_uuids_clear(struct hci_dev *hdev)
2253 {
2254         struct bt_uuid *uuid, *tmp;
2255
2256         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2257                 list_del(&uuid->list);
2258                 kfree(uuid);
2259         }
2260 }
2261
2262 void hci_link_keys_clear(struct hci_dev *hdev)
2263 {
2264         struct link_key *key;
2265
2266         list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2267                 list_del_rcu(&key->list);
2268                 kfree_rcu(key, rcu);
2269         }
2270 }
2271
2272 void hci_smp_ltks_clear(struct hci_dev *hdev)
2273 {
2274         struct smp_ltk *k;
2275
2276         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277                 list_del_rcu(&k->list);
2278                 kfree_rcu(k, rcu);
2279         }
2280 }
2281
2282 void hci_smp_irks_clear(struct hci_dev *hdev)
2283 {
2284         struct smp_irk *k;
2285
2286         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2287                 list_del_rcu(&k->list);
2288                 kfree_rcu(k, rcu);
2289         }
2290 }
2291
2292 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2293 {
2294         struct link_key *k;
2295
2296         rcu_read_lock();
2297         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2298                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2299                         rcu_read_unlock();
2300                         return k;
2301                 }
2302         }
2303         rcu_read_unlock();
2304
2305         return NULL;
2306 }
2307
2308 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2309                                u8 key_type, u8 old_key_type)
2310 {
2311         /* Legacy key */
2312         if (key_type < 0x03)
2313                 return true;
2314
2315         /* Debug keys are insecure so don't store them persistently */
2316         if (key_type == HCI_LK_DEBUG_COMBINATION)
2317                 return false;
2318
2319         /* Changed combination key and there's no previous one */
2320         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2321                 return false;
2322
2323         /* Security mode 3 case */
2324         if (!conn)
2325                 return true;
2326
2327         /* BR/EDR key derived using SC from an LE link */
2328         if (conn->type == LE_LINK)
2329                 return true;
2330
2331         /* Neither local nor remote side had no-bonding as requirement */
2332         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2333                 return true;
2334
2335         /* Local side had dedicated bonding as requirement */
2336         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2337                 return true;
2338
2339         /* Remote side had dedicated bonding as requirement */
2340         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2341                 return true;
2342
2343         /* If none of the above criteria match, then don't store the key
2344          * persistently */
2345         return false;
2346 }
2347
2348 static u8 ltk_role(u8 type)
2349 {
2350         if (type == SMP_LTK)
2351                 return HCI_ROLE_MASTER;
2352
2353         return HCI_ROLE_SLAVE;
2354 }
2355
2356 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2357                              u8 addr_type, u8 role)
2358 {
2359         struct smp_ltk *k;
2360
2361         rcu_read_lock();
2362         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2363                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2364                         continue;
2365
2366                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2367                         rcu_read_unlock();
2368                         return k;
2369                 }
2370         }
2371         rcu_read_unlock();
2372
2373         return NULL;
2374 }
2375
2376 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2377 {
2378         struct smp_irk *irk;
2379
2380         rcu_read_lock();
2381         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2382                 if (!bacmp(&irk->rpa, rpa)) {
2383                         rcu_read_unlock();
2384                         return irk;
2385                 }
2386         }
2387
2388         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2389                 if (smp_irk_matches(hdev, irk->val, rpa)) {
2390                         bacpy(&irk->rpa, rpa);
2391                         rcu_read_unlock();
2392                         return irk;
2393                 }
2394         }
2395         rcu_read_unlock();
2396
2397         return NULL;
2398 }
2399
2400 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401                                      u8 addr_type)
2402 {
2403         struct smp_irk *irk;
2404
2405         /* Identity Address must be public or static random */
2406         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2407                 return NULL;
2408
2409         rcu_read_lock();
2410         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2411                 if (addr_type == irk->addr_type &&
2412                     bacmp(bdaddr, &irk->bdaddr) == 0) {
2413                         rcu_read_unlock();
2414                         return irk;
2415                 }
2416         }
2417         rcu_read_unlock();
2418
2419         return NULL;
2420 }
2421
2422 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2423                                   bdaddr_t *bdaddr, u8 *val, u8 type,
2424                                   u8 pin_len, bool *persistent)
2425 {
2426         struct link_key *key, *old_key;
2427         u8 old_key_type;
2428
2429         old_key = hci_find_link_key(hdev, bdaddr);
2430         if (old_key) {
2431                 old_key_type = old_key->type;
2432                 key = old_key;
2433         } else {
2434                 old_key_type = conn ? conn->key_type : 0xff;
2435                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2436                 if (!key)
2437                         return NULL;
2438                 list_add_rcu(&key->list, &hdev->link_keys);
2439         }
2440
2441         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2442
2443         /* Some buggy controller combinations generate a changed
2444          * combination key for legacy pairing even when there's no
2445          * previous key */
2446         if (type == HCI_LK_CHANGED_COMBINATION &&
2447             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2448                 type = HCI_LK_COMBINATION;
2449                 if (conn)
2450                         conn->key_type = type;
2451         }
2452
2453         bacpy(&key->bdaddr, bdaddr);
2454         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2455         key->pin_len = pin_len;
2456
2457         if (type == HCI_LK_CHANGED_COMBINATION)
2458                 key->type = old_key_type;
2459         else
2460                 key->type = type;
2461
2462         if (persistent)
2463                 *persistent = hci_persistent_key(hdev, conn, type,
2464                                                  old_key_type);
2465
2466         return key;
2467 }
2468
2469 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2470                             u8 addr_type, u8 type, u8 authenticated,
2471                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2472 {
2473         struct smp_ltk *key, *old_key;
2474         u8 role = ltk_role(type);
2475
2476         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2477         if (old_key)
2478                 key = old_key;
2479         else {
2480                 key = kzalloc(sizeof(*key), GFP_KERNEL);
2481                 if (!key)
2482                         return NULL;
2483                 list_add_rcu(&key->list, &hdev->long_term_keys);
2484         }
2485
2486         bacpy(&key->bdaddr, bdaddr);
2487         key->bdaddr_type = addr_type;
2488         memcpy(key->val, tk, sizeof(key->val));
2489         key->authenticated = authenticated;
2490         key->ediv = ediv;
2491         key->rand = rand;
2492         key->enc_size = enc_size;
2493         key->type = type;
2494
2495         return key;
2496 }
2497
2498 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2499                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
2500 {
2501         struct smp_irk *irk;
2502
2503         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2504         if (!irk) {
2505                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2506                 if (!irk)
2507                         return NULL;
2508
2509                 bacpy(&irk->bdaddr, bdaddr);
2510                 irk->addr_type = addr_type;
2511
2512                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2513         }
2514
2515         memcpy(irk->val, val, 16);
2516         bacpy(&irk->rpa, rpa);
2517
2518         return irk;
2519 }
2520
2521 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2522 {
2523         struct link_key *key;
2524
2525         key = hci_find_link_key(hdev, bdaddr);
2526         if (!key)
2527                 return -ENOENT;
2528
2529         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2530
2531         list_del_rcu(&key->list);
2532         kfree_rcu(key, rcu);
2533
2534         return 0;
2535 }
2536
2537 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2538 {
2539         struct smp_ltk *k;
2540         int removed = 0;
2541
2542         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2543                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2544                         continue;
2545
2546                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2547
2548                 list_del_rcu(&k->list);
2549                 kfree_rcu(k, rcu);
2550                 removed++;
2551         }
2552
2553         return removed ? 0 : -ENOENT;
2554 }
2555
2556 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2557 {
2558         struct smp_irk *k;
2559
2560         list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2561                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2562                         continue;
2563
2564                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2565
2566                 list_del_rcu(&k->list);
2567                 kfree_rcu(k, rcu);
2568         }
2569 }
2570
2571 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2572 {
2573         struct smp_ltk *k;
2574         struct smp_irk *irk;
2575         u8 addr_type;
2576
2577         if (type == BDADDR_BREDR) {
2578                 if (hci_find_link_key(hdev, bdaddr))
2579                         return true;
2580                 return false;
2581         }
2582
2583         /* Convert to HCI addr type which struct smp_ltk uses */
2584         if (type == BDADDR_LE_PUBLIC)
2585                 addr_type = ADDR_LE_DEV_PUBLIC;
2586         else
2587                 addr_type = ADDR_LE_DEV_RANDOM;
2588
2589         irk = hci_get_irk(hdev, bdaddr, addr_type);
2590         if (irk) {
2591                 bdaddr = &irk->bdaddr;
2592                 addr_type = irk->addr_type;
2593         }
2594
2595         rcu_read_lock();
2596         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2597                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2598                         rcu_read_unlock();
2599                         return true;
2600                 }
2601         }
2602         rcu_read_unlock();
2603
2604         return false;
2605 }
2606
2607 /* HCI command timer function */
2608 static void hci_cmd_timeout(struct work_struct *work)
2609 {
2610         struct hci_dev *hdev = container_of(work, struct hci_dev,
2611                                             cmd_timer.work);
2612
2613         if (hdev->sent_cmd) {
2614                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2615                 u16 opcode = __le16_to_cpu(sent->opcode);
2616
2617                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2618         } else {
2619                 BT_ERR("%s command tx timeout", hdev->name);
2620         }
2621
2622         atomic_set(&hdev->cmd_cnt, 1);
2623         queue_work(hdev->workqueue, &hdev->cmd_work);
2624 }
2625
2626 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2627                                           bdaddr_t *bdaddr, u8 bdaddr_type)
2628 {
2629         struct oob_data *data;
2630
2631         list_for_each_entry(data, &hdev->remote_oob_data, list) {
2632                 if (bacmp(bdaddr, &data->bdaddr) != 0)
2633                         continue;
2634                 if (data->bdaddr_type != bdaddr_type)
2635                         continue;
2636                 return data;
2637         }
2638
2639         return NULL;
2640 }
2641
2642 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2643                                u8 bdaddr_type)
2644 {
2645         struct oob_data *data;
2646
2647         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2648         if (!data)
2649                 return -ENOENT;
2650
2651         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2652
2653         list_del(&data->list);
2654         kfree(data);
2655
2656         return 0;
2657 }
2658
2659 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2660 {
2661         struct oob_data *data, *n;
2662
2663         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2664                 list_del(&data->list);
2665                 kfree(data);
2666         }
2667 }
2668
2669 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2670                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
2671                             u8 *hash256, u8 *rand256)
2672 {
2673         struct oob_data *data;
2674
2675         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2676         if (!data) {
2677                 data = kmalloc(sizeof(*data), GFP_KERNEL);
2678                 if (!data)
2679                         return -ENOMEM;
2680
2681                 bacpy(&data->bdaddr, bdaddr);
2682                 data->bdaddr_type = bdaddr_type;
2683                 list_add(&data->list, &hdev->remote_oob_data);
2684         }
2685
2686         if (hash192 && rand192) {
2687                 memcpy(data->hash192, hash192, sizeof(data->hash192));
2688                 memcpy(data->rand192, rand192, sizeof(data->rand192));
2689                 if (hash256 && rand256)
2690                         data->present = 0x03;
2691         } else {
2692                 memset(data->hash192, 0, sizeof(data->hash192));
2693                 memset(data->rand192, 0, sizeof(data->rand192));
2694                 if (hash256 && rand256)
2695                         data->present = 0x02;
2696                 else
2697                         data->present = 0x00;
2698         }
2699
2700         if (hash256 && rand256) {
2701                 memcpy(data->hash256, hash256, sizeof(data->hash256));
2702                 memcpy(data->rand256, rand256, sizeof(data->rand256));
2703         } else {
2704                 memset(data->hash256, 0, sizeof(data->hash256));
2705                 memset(data->rand256, 0, sizeof(data->rand256));
2706                 if (hash192 && rand192)
2707                         data->present = 0x01;
2708         }
2709
2710         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2711
2712         return 0;
2713 }
2714
2715 /* This function requires the caller holds hdev->lock */
2716 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2717 {
2718         struct adv_info *adv_instance;
2719
2720         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2721                 if (adv_instance->instance == instance)
2722                         return adv_instance;
2723         }
2724
2725         return NULL;
2726 }
2727
2728 /* This function requires the caller holds hdev->lock */
2729 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2730         struct adv_info *cur_instance;
2731
2732         cur_instance = hci_find_adv_instance(hdev, instance);
2733         if (!cur_instance)
2734                 return NULL;
2735
2736         if (cur_instance == list_last_entry(&hdev->adv_instances,
2737                                             struct adv_info, list))
2738                 return list_first_entry(&hdev->adv_instances,
2739                                                  struct adv_info, list);
2740         else
2741                 return list_next_entry(cur_instance, list);
2742 }
2743
2744 /* This function requires the caller holds hdev->lock */
2745 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2746 {
2747         struct adv_info *adv_instance;
2748
2749         adv_instance = hci_find_adv_instance(hdev, instance);
2750         if (!adv_instance)
2751                 return -ENOENT;
2752
2753         BT_DBG("%s removing %dMR", hdev->name, instance);
2754
2755         if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2756                 cancel_delayed_work(&hdev->adv_instance_expire);
2757                 hdev->adv_instance_timeout = 0;
2758         }
2759
2760         list_del(&adv_instance->list);
2761         kfree(adv_instance);
2762
2763         hdev->adv_instance_cnt--;
2764
2765         return 0;
2766 }
2767
2768 /* This function requires the caller holds hdev->lock */
2769 void hci_adv_instances_clear(struct hci_dev *hdev)
2770 {
2771         struct adv_info *adv_instance, *n;
2772
2773         if (hdev->adv_instance_timeout) {
2774                 cancel_delayed_work(&hdev->adv_instance_expire);
2775                 hdev->adv_instance_timeout = 0;
2776         }
2777
2778         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2779                 list_del(&adv_instance->list);
2780                 kfree(adv_instance);
2781         }
2782
2783         hdev->adv_instance_cnt = 0;
2784 }
2785
2786 /* This function requires the caller holds hdev->lock */
2787 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2788                          u16 adv_data_len, u8 *adv_data,
2789                          u16 scan_rsp_len, u8 *scan_rsp_data,
2790                          u16 timeout, u16 duration)
2791 {
2792         struct adv_info *adv_instance;
2793
2794         adv_instance = hci_find_adv_instance(hdev, instance);
2795         if (adv_instance) {
2796                 memset(adv_instance->adv_data, 0,
2797                        sizeof(adv_instance->adv_data));
2798                 memset(adv_instance->scan_rsp_data, 0,
2799                        sizeof(adv_instance->scan_rsp_data));
2800         } else {
2801                 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2802                     instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2803                         return -EOVERFLOW;
2804
2805                 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2806                 if (!adv_instance)
2807                         return -ENOMEM;
2808
2809                 adv_instance->pending = true;
2810                 adv_instance->instance = instance;
2811                 list_add(&adv_instance->list, &hdev->adv_instances);
2812                 hdev->adv_instance_cnt++;
2813         }
2814
2815         adv_instance->flags = flags;
2816         adv_instance->adv_data_len = adv_data_len;
2817         adv_instance->scan_rsp_len = scan_rsp_len;
2818
2819         if (adv_data_len)
2820                 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2821
2822         if (scan_rsp_len)
2823                 memcpy(adv_instance->scan_rsp_data,
2824                        scan_rsp_data, scan_rsp_len);
2825
2826         adv_instance->timeout = timeout;
2827         adv_instance->remaining_time = timeout;
2828
2829         if (duration == 0)
2830                 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2831         else
2832                 adv_instance->duration = duration;
2833
2834         BT_DBG("%s for %dMR", hdev->name, instance);
2835
2836         return 0;
2837 }
2838
2839 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2840                                          bdaddr_t *bdaddr, u8 type)
2841 {
2842         struct bdaddr_list *b;
2843
2844         list_for_each_entry(b, bdaddr_list, list) {
2845                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2846                         return b;
2847         }
2848
2849         return NULL;
2850 }
2851
2852 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2853 {
2854         struct list_head *p, *n;
2855
2856         list_for_each_safe(p, n, bdaddr_list) {
2857                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2858
2859                 list_del(p);
2860                 kfree(b);
2861         }
2862 }
2863
2864 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2865 {
2866         struct bdaddr_list *entry;
2867
2868         if (!bacmp(bdaddr, BDADDR_ANY))
2869                 return -EBADF;
2870
2871         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2872                 return -EEXIST;
2873
2874         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2875         if (!entry)
2876                 return -ENOMEM;
2877
2878         bacpy(&entry->bdaddr, bdaddr);
2879         entry->bdaddr_type = type;
2880
2881         list_add(&entry->list, list);
2882
2883         return 0;
2884 }
2885
2886 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2887 {
2888         struct bdaddr_list *entry;
2889
2890         if (!bacmp(bdaddr, BDADDR_ANY)) {
2891                 hci_bdaddr_list_clear(list);
2892                 return 0;
2893         }
2894
2895         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2896         if (!entry)
2897                 return -ENOENT;
2898
2899         list_del(&entry->list);
2900         kfree(entry);
2901
2902         return 0;
2903 }
2904
2905 /* This function requires the caller holds hdev->lock */
2906 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2907                                                bdaddr_t *addr, u8 addr_type)
2908 {
2909         struct hci_conn_params *params;
2910
2911         list_for_each_entry(params, &hdev->le_conn_params, list) {
2912                 if (bacmp(&params->addr, addr) == 0 &&
2913                     params->addr_type == addr_type) {
2914                         return params;
2915                 }
2916         }
2917
2918         return NULL;
2919 }
2920
2921 /* This function requires the caller holds hdev->lock */
2922 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2923                                                   bdaddr_t *addr, u8 addr_type)
2924 {
2925         struct hci_conn_params *param;
2926
2927         list_for_each_entry(param, list, action) {
2928                 if (bacmp(&param->addr, addr) == 0 &&
2929                     param->addr_type == addr_type)
2930                         return param;
2931         }
2932
2933         return NULL;
2934 }
2935
2936 /* This function requires the caller holds hdev->lock */
2937 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2938                                             bdaddr_t *addr, u8 addr_type)
2939 {
2940         struct hci_conn_params *params;
2941
2942         params = hci_conn_params_lookup(hdev, addr, addr_type);
2943         if (params)
2944                 return params;
2945
2946         params = kzalloc(sizeof(*params), GFP_KERNEL);
2947         if (!params) {
2948                 BT_ERR("Out of memory");
2949                 return NULL;
2950         }
2951
2952         bacpy(&params->addr, addr);
2953         params->addr_type = addr_type;
2954
2955         list_add(&params->list, &hdev->le_conn_params);
2956         INIT_LIST_HEAD(&params->action);
2957
2958         params->conn_min_interval = hdev->le_conn_min_interval;
2959         params->conn_max_interval = hdev->le_conn_max_interval;
2960         params->conn_latency = hdev->le_conn_latency;
2961         params->supervision_timeout = hdev->le_supv_timeout;
2962         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2963
2964         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2965
2966         return params;
2967 }
2968
2969 static void hci_conn_params_free(struct hci_conn_params *params)
2970 {
2971         if (params->conn) {
2972                 hci_conn_drop(params->conn);
2973                 hci_conn_put(params->conn);
2974         }
2975
2976         list_del(&params->action);
2977         list_del(&params->list);
2978         kfree(params);
2979 }
2980
2981 /* This function requires the caller holds hdev->lock */
2982 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2983 {
2984         struct hci_conn_params *params;
2985
2986         params = hci_conn_params_lookup(hdev, addr, addr_type);
2987         if (!params)
2988                 return;
2989
2990         hci_conn_params_free(params);
2991
2992         hci_update_background_scan(hdev);
2993
2994         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2995 }
2996
2997 /* This function requires the caller holds hdev->lock */
2998 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2999 {
3000         struct hci_conn_params *params, *tmp;
3001
3002         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3003                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3004                         continue;
3005
3006                 /* If trying to estabilish one time connection to disabled
3007                  * device, leave the params, but mark them as just once.
3008                  */
3009                 if (params->explicit_connect) {
3010                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3011                         continue;
3012                 }
3013
3014                 list_del(&params->list);
3015                 kfree(params);
3016         }
3017
3018         BT_DBG("All LE disabled connection parameters were removed");
3019 }
3020
3021 /* This function requires the caller holds hdev->lock */
3022 void hci_conn_params_clear_all(struct hci_dev *hdev)
3023 {
3024         struct hci_conn_params *params, *tmp;
3025
3026         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3027                 hci_conn_params_free(params);
3028
3029         hci_update_background_scan(hdev);
3030
3031         BT_DBG("All LE connection parameters were removed");
3032 }
3033
3034 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3035 {
3036         if (status) {
3037                 BT_ERR("Failed to start inquiry: status %d", status);
3038
3039                 hci_dev_lock(hdev);
3040                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3041                 hci_dev_unlock(hdev);
3042                 return;
3043         }
3044 }
3045
3046 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3047                                           u16 opcode)
3048 {
3049         /* General inquiry access code (GIAC) */
3050         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3051         struct hci_cp_inquiry cp;
3052         int err;
3053
3054         if (status) {
3055                 BT_ERR("Failed to disable LE scanning: status %d", status);
3056                 return;
3057         }
3058
3059         hdev->discovery.scan_start = 0;
3060
3061         switch (hdev->discovery.type) {
3062         case DISCOV_TYPE_LE:
3063                 hci_dev_lock(hdev);
3064                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3065                 hci_dev_unlock(hdev);
3066                 break;
3067
3068         case DISCOV_TYPE_INTERLEAVED:
3069                 hci_dev_lock(hdev);
3070
3071                 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3072                              &hdev->quirks)) {
3073                         /* If we were running LE only scan, change discovery
3074                          * state. If we were running both LE and BR/EDR inquiry
3075                          * simultaneously, and BR/EDR inquiry is already
3076                          * finished, stop discovery, otherwise BR/EDR inquiry
3077                          * will stop discovery when finished. If we will resolve
3078                          * remote device name, do not change discovery state.
3079                          */
3080                         if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3081                             hdev->discovery.state != DISCOVERY_RESOLVING)
3082                                 hci_discovery_set_state(hdev,
3083                                                         DISCOVERY_STOPPED);
3084                 } else {
3085                         struct hci_request req;
3086
3087                         hci_inquiry_cache_flush(hdev);
3088
3089                         hci_req_init(&req, hdev);
3090
3091                         memset(&cp, 0, sizeof(cp));
3092                         memcpy(&cp.lap, lap, sizeof(cp.lap));
3093                         cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3094                         hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3095
3096                         err = hci_req_run(&req, inquiry_complete);
3097                         if (err) {
3098                                 BT_ERR("Inquiry request failed: err %d", err);
3099                                 hci_discovery_set_state(hdev,
3100                                                         DISCOVERY_STOPPED);
3101                         }
3102                 }
3103
3104                 hci_dev_unlock(hdev);
3105                 break;
3106         }
3107 }
3108
3109 static void le_scan_disable_work(struct work_struct *work)
3110 {
3111         struct hci_dev *hdev = container_of(work, struct hci_dev,
3112                                             le_scan_disable.work);
3113         struct hci_request req;
3114         int err;
3115
3116         BT_DBG("%s", hdev->name);
3117
3118         cancel_delayed_work_sync(&hdev->le_scan_restart);
3119
3120         hci_req_init(&req, hdev);
3121
3122         hci_req_add_le_scan_disable(&req);
3123
3124         err = hci_req_run(&req, le_scan_disable_work_complete);
3125         if (err)
3126                 BT_ERR("Disable LE scanning request failed: err %d", err);
3127 }
3128
3129 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3130                                           u16 opcode)
3131 {
3132         unsigned long timeout, duration, scan_start, now;
3133
3134         BT_DBG("%s", hdev->name);
3135
3136         if (status) {
3137                 BT_ERR("Failed to restart LE scan: status %d", status);
3138                 return;
3139         }
3140
3141         if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3142             !hdev->discovery.scan_start)
3143                 return;
3144
3145         /* When the scan was started, hdev->le_scan_disable has been queued
3146          * after duration from scan_start. During scan restart this job
3147          * has been canceled, and we need to queue it again after proper
3148          * timeout, to make sure that scan does not run indefinitely.
3149          */
3150         duration = hdev->discovery.scan_duration;
3151         scan_start = hdev->discovery.scan_start;
3152         now = jiffies;
3153         if (now - scan_start <= duration) {
3154                 int elapsed;
3155
3156                 if (now >= scan_start)
3157                         elapsed = now - scan_start;
3158                 else
3159                         elapsed = ULONG_MAX - scan_start + now;
3160
3161                 timeout = duration - elapsed;
3162         } else {
3163                 timeout = 0;
3164         }
3165         queue_delayed_work(hdev->workqueue,
3166                            &hdev->le_scan_disable, timeout);
3167 }
3168
3169 static void le_scan_restart_work(struct work_struct *work)
3170 {
3171         struct hci_dev *hdev = container_of(work, struct hci_dev,
3172                                             le_scan_restart.work);
3173         struct hci_request req;
3174         struct hci_cp_le_set_scan_enable cp;
3175         int err;
3176
3177         BT_DBG("%s", hdev->name);
3178
3179         /* If controller is not scanning we are done. */
3180         if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3181                 return;
3182
3183         hci_req_init(&req, hdev);
3184
3185         hci_req_add_le_scan_disable(&req);
3186
3187         memset(&cp, 0, sizeof(cp));
3188         cp.enable = LE_SCAN_ENABLE;
3189         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3190         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3191
3192         err = hci_req_run(&req, le_scan_restart_work_complete);
3193         if (err)
3194                 BT_ERR("Restart LE scan request failed: err %d", err);
3195 }
3196
3197 /* Copy the Identity Address of the controller.
3198  *
3199  * If the controller has a public BD_ADDR, then by default use that one.
3200  * If this is a LE only controller without a public address, default to
3201  * the static random address.
3202  *
3203  * For debugging purposes it is possible to force controllers with a
3204  * public address to use the static random address instead.
3205  *
3206  * In case BR/EDR has been disabled on a dual-mode controller and
3207  * userspace has configured a static address, then that address
3208  * becomes the identity address instead of the public BR/EDR address.
3209  */
3210 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3211                                u8 *bdaddr_type)
3212 {
3213         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3214             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3215             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3216              bacmp(&hdev->static_addr, BDADDR_ANY))) {
3217                 bacpy(bdaddr, &hdev->static_addr);
3218                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3219         } else {
3220                 bacpy(bdaddr, &hdev->bdaddr);
3221                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3222         }
3223 }
3224
3225 /* Alloc HCI device */
3226 struct hci_dev *hci_alloc_dev(void)
3227 {
3228         struct hci_dev *hdev;
3229
3230         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3231         if (!hdev)
3232                 return NULL;
3233
3234         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3235         hdev->esco_type = (ESCO_HV1);
3236         hdev->link_mode = (HCI_LM_ACCEPT);
3237         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3238         hdev->io_capability = 0x03;     /* No Input No Output */
3239         hdev->manufacturer = 0xffff;    /* Default to internal use */
3240         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3241         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3242         hdev->adv_instance_cnt = 0;
3243         hdev->cur_adv_instance = 0x00;
3244         hdev->adv_instance_timeout = 0;
3245
3246         hdev->sniff_max_interval = 800;
3247         hdev->sniff_min_interval = 80;
3248
3249         hdev->le_adv_channel_map = 0x07;
3250         hdev->le_adv_min_interval = 0x0800;
3251         hdev->le_adv_max_interval = 0x0800;
3252         hdev->le_scan_interval = 0x0060;
3253         hdev->le_scan_window = 0x0030;
3254         hdev->le_conn_min_interval = 0x0028;
3255         hdev->le_conn_max_interval = 0x0038;
3256         hdev->le_conn_latency = 0x0000;
3257         hdev->le_supv_timeout = 0x002a;
3258         hdev->le_def_tx_len = 0x001b;
3259         hdev->le_def_tx_time = 0x0148;
3260         hdev->le_max_tx_len = 0x001b;
3261         hdev->le_max_tx_time = 0x0148;
3262         hdev->le_max_rx_len = 0x001b;
3263         hdev->le_max_rx_time = 0x0148;
3264
3265         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3266         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3267         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3268         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3269
3270         mutex_init(&hdev->lock);
3271         mutex_init(&hdev->req_lock);
3272
3273         INIT_LIST_HEAD(&hdev->mgmt_pending);
3274         INIT_LIST_HEAD(&hdev->blacklist);
3275         INIT_LIST_HEAD(&hdev->whitelist);
3276         INIT_LIST_HEAD(&hdev->uuids);
3277         INIT_LIST_HEAD(&hdev->link_keys);
3278         INIT_LIST_HEAD(&hdev->long_term_keys);
3279         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3280         INIT_LIST_HEAD(&hdev->remote_oob_data);
3281         INIT_LIST_HEAD(&hdev->le_white_list);
3282         INIT_LIST_HEAD(&hdev->le_conn_params);
3283         INIT_LIST_HEAD(&hdev->pend_le_conns);
3284         INIT_LIST_HEAD(&hdev->pend_le_reports);
3285         INIT_LIST_HEAD(&hdev->conn_hash.list);
3286         INIT_LIST_HEAD(&hdev->adv_instances);
3287
3288         INIT_WORK(&hdev->rx_work, hci_rx_work);
3289         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3290         INIT_WORK(&hdev->tx_work, hci_tx_work);
3291         INIT_WORK(&hdev->power_on, hci_power_on);
3292         INIT_WORK(&hdev->error_reset, hci_error_reset);
3293
3294         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3295         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3296         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3297         INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3298         INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3299
3300         skb_queue_head_init(&hdev->rx_q);
3301         skb_queue_head_init(&hdev->cmd_q);
3302         skb_queue_head_init(&hdev->raw_q);
3303
3304         init_waitqueue_head(&hdev->req_wait_q);
3305
3306         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3307
3308         hci_init_sysfs(hdev);
3309         discovery_init(hdev);
3310
3311         return hdev;
3312 }
3313 EXPORT_SYMBOL(hci_alloc_dev);
3314
3315 /* Free HCI device */
3316 void hci_free_dev(struct hci_dev *hdev)
3317 {
3318         /* will free via device release */
3319         put_device(&hdev->dev);
3320 }
3321 EXPORT_SYMBOL(hci_free_dev);
3322
3323 /* Register HCI device */
3324 int hci_register_dev(struct hci_dev *hdev)
3325 {
3326         int id, error;
3327
3328         if (!hdev->open || !hdev->close || !hdev->send)
3329                 return -EINVAL;
3330
3331         /* Do not allow HCI_AMP devices to register at index 0,
3332          * so the index can be used as the AMP controller ID.
3333          */
3334         switch (hdev->dev_type) {
3335         case HCI_BREDR:
3336                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3337                 break;
3338         case HCI_AMP:
3339                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3340                 break;
3341         default:
3342                 return -EINVAL;
3343         }
3344
3345         if (id < 0)
3346                 return id;
3347
3348         sprintf(hdev->name, "hci%d", id);
3349         hdev->id = id;
3350
3351         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3352
3353         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3354                                           WQ_MEM_RECLAIM, 1, hdev->name);
3355         if (!hdev->workqueue) {
3356                 error = -ENOMEM;
3357                 goto err;
3358         }
3359
3360         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3361                                               WQ_MEM_RECLAIM, 1, hdev->name);
3362         if (!hdev->req_workqueue) {
3363                 destroy_workqueue(hdev->workqueue);
3364                 error = -ENOMEM;
3365                 goto err;
3366         }
3367
3368         if (!IS_ERR_OR_NULL(bt_debugfs))
3369                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3370
3371         dev_set_name(&hdev->dev, "%s", hdev->name);
3372
3373         error = device_add(&hdev->dev);
3374         if (error < 0)
3375                 goto err_wqueue;
3376
3377         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3378                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3379                                     hdev);
3380         if (hdev->rfkill) {
3381                 if (rfkill_register(hdev->rfkill) < 0) {
3382                         rfkill_destroy(hdev->rfkill);
3383                         hdev->rfkill = NULL;
3384                 }
3385         }
3386
3387         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3388                 hci_dev_set_flag(hdev, HCI_RFKILLED);
3389
3390         hci_dev_set_flag(hdev, HCI_SETUP);
3391         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3392
3393         if (hdev->dev_type == HCI_BREDR) {
3394                 /* Assume BR/EDR support until proven otherwise (such as
3395                  * through reading supported features during init.
3396                  */
3397                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3398         }
3399
3400         write_lock(&hci_dev_list_lock);
3401         list_add(&hdev->list, &hci_dev_list);
3402         write_unlock(&hci_dev_list_lock);
3403
3404         /* Devices that are marked for raw-only usage are unconfigured
3405          * and should not be included in normal operation.
3406          */
3407         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3408                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3409
3410         hci_sock_dev_event(hdev, HCI_DEV_REG);
3411         hci_dev_hold(hdev);
3412
3413         queue_work(hdev->req_workqueue, &hdev->power_on);
3414
3415         return id;
3416
3417 err_wqueue:
3418         destroy_workqueue(hdev->workqueue);
3419         destroy_workqueue(hdev->req_workqueue);
3420 err:
3421         ida_simple_remove(&hci_index_ida, hdev->id);
3422
3423         return error;
3424 }
3425 EXPORT_SYMBOL(hci_register_dev);
3426
3427 /* Unregister HCI device */
3428 void hci_unregister_dev(struct hci_dev *hdev)
3429 {
3430         int id;
3431
3432         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3433
3434         hci_dev_set_flag(hdev, HCI_UNREGISTER);
3435
3436         id = hdev->id;
3437
3438         write_lock(&hci_dev_list_lock);
3439         list_del(&hdev->list);
3440         write_unlock(&hci_dev_list_lock);
3441
3442         hci_dev_do_close(hdev);
3443
3444         cancel_work_sync(&hdev->power_on);
3445
3446         if (!test_bit(HCI_INIT, &hdev->flags) &&
3447             !hci_dev_test_flag(hdev, HCI_SETUP) &&
3448             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3449                 hci_dev_lock(hdev);
3450                 mgmt_index_removed(hdev);
3451                 hci_dev_unlock(hdev);
3452         }
3453
3454         /* mgmt_index_removed should take care of emptying the
3455          * pending list */
3456         BUG_ON(!list_empty(&hdev->mgmt_pending));
3457
3458         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3459
3460         if (hdev->rfkill) {
3461                 rfkill_unregister(hdev->rfkill);
3462                 rfkill_destroy(hdev->rfkill);
3463         }
3464
3465         device_del(&hdev->dev);
3466
3467         debugfs_remove_recursive(hdev->debugfs);
3468
3469         destroy_workqueue(hdev->workqueue);
3470         destroy_workqueue(hdev->req_workqueue);
3471
3472         hci_dev_lock(hdev);
3473         hci_bdaddr_list_clear(&hdev->blacklist);
3474         hci_bdaddr_list_clear(&hdev->whitelist);
3475         hci_uuids_clear(hdev);
3476         hci_link_keys_clear(hdev);
3477         hci_smp_ltks_clear(hdev);
3478         hci_smp_irks_clear(hdev);
3479         hci_remote_oob_data_clear(hdev);
3480         hci_adv_instances_clear(hdev);
3481         hci_bdaddr_list_clear(&hdev->le_white_list);
3482         hci_conn_params_clear_all(hdev);
3483         hci_discovery_filter_clear(hdev);
3484         hci_dev_unlock(hdev);
3485
3486         hci_dev_put(hdev);
3487
3488         ida_simple_remove(&hci_index_ida, id);
3489 }
3490 EXPORT_SYMBOL(hci_unregister_dev);
3491
3492 /* Suspend HCI device */
3493 int hci_suspend_dev(struct hci_dev *hdev)
3494 {
3495         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3496         return 0;
3497 }
3498 EXPORT_SYMBOL(hci_suspend_dev);
3499
3500 /* Resume HCI device */
3501 int hci_resume_dev(struct hci_dev *hdev)
3502 {
3503         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3504         return 0;
3505 }
3506 EXPORT_SYMBOL(hci_resume_dev);
3507
3508 /* Reset HCI device */
3509 int hci_reset_dev(struct hci_dev *hdev)
3510 {
3511         const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3512         struct sk_buff *skb;
3513
3514         skb = bt_skb_alloc(3, GFP_ATOMIC);
3515         if (!skb)
3516                 return -ENOMEM;
3517
3518         bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3519         memcpy(skb_put(skb, 3), hw_err, 3);
3520
3521         /* Send Hardware Error to upper stack */
3522         return hci_recv_frame(hdev, skb);
3523 }
3524 EXPORT_SYMBOL(hci_reset_dev);
3525
3526 /* Receive frame from HCI drivers */
3527 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3528 {
3529         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3530                       && !test_bit(HCI_INIT, &hdev->flags))) {
3531                 kfree_skb(skb);
3532                 return -ENXIO;
3533         }
3534
3535         if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3536             bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3537             bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3538                 kfree_skb(skb);
3539                 return -EINVAL;
3540         }
3541
3542         /* Incoming skb */
3543         bt_cb(skb)->incoming = 1;
3544
3545         /* Time stamp */
3546         __net_timestamp(skb);
3547
3548         skb_queue_tail(&hdev->rx_q, skb);
3549         queue_work(hdev->workqueue, &hdev->rx_work);
3550
3551         return 0;
3552 }
3553 EXPORT_SYMBOL(hci_recv_frame);
3554
3555 /* Receive diagnostic message from HCI drivers */
3556 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3557 {
3558         /* Mark as diagnostic packet */
3559         bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3560
3561         /* Time stamp */
3562         __net_timestamp(skb);
3563
3564         skb_queue_tail(&hdev->rx_q, skb);
3565         queue_work(hdev->workqueue, &hdev->rx_work);
3566
3567         return 0;
3568 }
3569 EXPORT_SYMBOL(hci_recv_diag);
3570
3571 /* ---- Interface to upper protocols ---- */
3572
3573 int hci_register_cb(struct hci_cb *cb)
3574 {
3575         BT_DBG("%p name %s", cb, cb->name);
3576
3577         mutex_lock(&hci_cb_list_lock);
3578         list_add_tail(&cb->list, &hci_cb_list);
3579         mutex_unlock(&hci_cb_list_lock);
3580
3581         return 0;
3582 }
3583 EXPORT_SYMBOL(hci_register_cb);
3584
3585 int hci_unregister_cb(struct hci_cb *cb)
3586 {
3587         BT_DBG("%p name %s", cb, cb->name);
3588
3589         mutex_lock(&hci_cb_list_lock);
3590         list_del(&cb->list);
3591         mutex_unlock(&hci_cb_list_lock);
3592
3593         return 0;
3594 }
3595 EXPORT_SYMBOL(hci_unregister_cb);
3596
3597 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3598 {
3599         int err;
3600
3601         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3602
3603         /* Time stamp */
3604         __net_timestamp(skb);
3605
3606         /* Send copy to monitor */
3607         hci_send_to_monitor(hdev, skb);
3608
3609         if (atomic_read(&hdev->promisc)) {
3610                 /* Send copy to the sockets */
3611                 hci_send_to_sock(hdev, skb);
3612         }
3613
3614         /* Get rid of skb owner, prior to sending to the driver. */
3615         skb_orphan(skb);
3616
3617         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3618                 kfree_skb(skb);
3619                 return;
3620         }
3621
3622         err = hdev->send(hdev, skb);
3623         if (err < 0) {
3624                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3625                 kfree_skb(skb);
3626         }
3627 }
3628
3629 /* Send HCI command */
3630 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3631                  const void *param)
3632 {
3633         struct sk_buff *skb;
3634
3635         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3636
3637         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3638         if (!skb) {
3639                 BT_ERR("%s no memory for command", hdev->name);
3640                 return -ENOMEM;
3641         }
3642
3643         /* Stand-alone HCI commands must be flagged as
3644          * single-command requests.
3645          */
3646         bt_cb(skb)->hci.req_start = true;
3647
3648         skb_queue_tail(&hdev->cmd_q, skb);
3649         queue_work(hdev->workqueue, &hdev->cmd_work);
3650
3651         return 0;
3652 }
3653
3654 /* Get data from the previously sent command */
3655 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3656 {
3657         struct hci_command_hdr *hdr;
3658
3659         if (!hdev->sent_cmd)
3660                 return NULL;
3661
3662         hdr = (void *) hdev->sent_cmd->data;
3663
3664         if (hdr->opcode != cpu_to_le16(opcode))
3665                 return NULL;
3666
3667         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3668
3669         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3670 }
3671
3672 /* Send HCI command and wait for command commplete event */
3673 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3674                              const void *param, u32 timeout)
3675 {
3676         struct sk_buff *skb;
3677
3678         if (!test_bit(HCI_UP, &hdev->flags))
3679                 return ERR_PTR(-ENETDOWN);
3680
3681         bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3682
3683         hci_req_lock(hdev);
3684         skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3685         hci_req_unlock(hdev);
3686
3687         return skb;
3688 }
3689 EXPORT_SYMBOL(hci_cmd_sync);
3690
3691 /* Send ACL data */
3692 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3693 {
3694         struct hci_acl_hdr *hdr;
3695         int len = skb->len;
3696
3697         skb_push(skb, HCI_ACL_HDR_SIZE);
3698         skb_reset_transport_header(skb);
3699         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3700         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3701         hdr->dlen   = cpu_to_le16(len);
3702 }
3703
3704 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3705                           struct sk_buff *skb, __u16 flags)
3706 {
3707         struct hci_conn *conn = chan->conn;
3708         struct hci_dev *hdev = conn->hdev;
3709         struct sk_buff *list;
3710
3711         skb->len = skb_headlen(skb);
3712         skb->data_len = 0;
3713
3714         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3715
3716         switch (hdev->dev_type) {
3717         case HCI_BREDR:
3718                 hci_add_acl_hdr(skb, conn->handle, flags);
3719                 break;
3720         case HCI_AMP:
3721                 hci_add_acl_hdr(skb, chan->handle, flags);
3722                 break;
3723         default:
3724                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3725                 return;
3726         }
3727
3728         list = skb_shinfo(skb)->frag_list;
3729         if (!list) {
3730                 /* Non fragmented */
3731                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3732
3733                 skb_queue_tail(queue, skb);
3734         } else {
3735                 /* Fragmented */
3736                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3737
3738                 skb_shinfo(skb)->frag_list = NULL;
3739
3740                 /* Queue all fragments atomically. We need to use spin_lock_bh
3741                  * here because of 6LoWPAN links, as there this function is
3742                  * called from softirq and using normal spin lock could cause
3743                  * deadlocks.
3744                  */
3745                 spin_lock_bh(&queue->lock);
3746
3747                 __skb_queue_tail(queue, skb);
3748
3749                 flags &= ~ACL_START;
3750                 flags |= ACL_CONT;
3751                 do {
3752                         skb = list; list = list->next;
3753
3754                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3755                         hci_add_acl_hdr(skb, conn->handle, flags);
3756
3757                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3758
3759                         __skb_queue_tail(queue, skb);
3760                 } while (list);
3761
3762                 spin_unlock_bh(&queue->lock);
3763         }
3764 }
3765
3766 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3767 {
3768         struct hci_dev *hdev = chan->conn->hdev;
3769
3770         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3771
3772         hci_queue_acl(chan, &chan->data_q, skb, flags);
3773
3774         queue_work(hdev->workqueue, &hdev->tx_work);
3775 }
3776
3777 /* Send SCO data */
3778 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3779 {
3780         struct hci_dev *hdev = conn->hdev;
3781         struct hci_sco_hdr hdr;
3782
3783         BT_DBG("%s len %d", hdev->name, skb->len);
3784
3785         hdr.handle = cpu_to_le16(conn->handle);
3786         hdr.dlen   = skb->len;
3787
3788         skb_push(skb, HCI_SCO_HDR_SIZE);
3789         skb_reset_transport_header(skb);
3790         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3791
3792         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3793
3794         skb_queue_tail(&conn->data_q, skb);
3795         queue_work(hdev->workqueue, &hdev->tx_work);
3796 }
3797
3798 /* ---- HCI TX task (outgoing data) ---- */
3799
3800 /* HCI Connection scheduler */
3801 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3802                                      int *quote)
3803 {
3804         struct hci_conn_hash *h = &hdev->conn_hash;
3805         struct hci_conn *conn = NULL, *c;
3806         unsigned int num = 0, min = ~0;
3807
3808         /* We don't have to lock device here. Connections are always
3809          * added and removed with TX task disabled. */
3810
3811         rcu_read_lock();
3812
3813         list_for_each_entry_rcu(c, &h->list, list) {
3814                 if (c->type != type || skb_queue_empty(&c->data_q))
3815                         continue;
3816
3817                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3818                         continue;
3819
3820                 num++;
3821
3822                 if (c->sent < min) {
3823                         min  = c->sent;
3824                         conn = c;
3825                 }
3826
3827                 if (hci_conn_num(hdev, type) == num)
3828                         break;
3829         }
3830
3831         rcu_read_unlock();
3832
3833         if (conn) {
3834                 int cnt, q;
3835
3836                 switch (conn->type) {
3837                 case ACL_LINK:
3838                         cnt = hdev->acl_cnt;
3839                         break;
3840                 case SCO_LINK:
3841                 case ESCO_LINK:
3842                         cnt = hdev->sco_cnt;
3843                         break;
3844                 case LE_LINK:
3845                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3846                         break;
3847                 default:
3848                         cnt = 0;
3849                         BT_ERR("Unknown link type");
3850                 }
3851
3852                 q = cnt / num;
3853                 *quote = q ? q : 1;
3854         } else
3855                 *quote = 0;
3856
3857         BT_DBG("conn %p quote %d", conn, *quote);
3858         return conn;
3859 }
3860
3861 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3862 {
3863         struct hci_conn_hash *h = &hdev->conn_hash;
3864         struct hci_conn *c;
3865
3866         BT_ERR("%s link tx timeout", hdev->name);
3867
3868         rcu_read_lock();
3869
3870         /* Kill stalled connections */
3871         list_for_each_entry_rcu(c, &h->list, list) {
3872                 if (c->type == type && c->sent) {
3873                         BT_ERR("%s killing stalled connection %pMR",
3874                                hdev->name, &c->dst);
3875                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3876                 }
3877         }
3878
3879         rcu_read_unlock();
3880 }
3881
3882 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3883                                       int *quote)
3884 {
3885         struct hci_conn_hash *h = &hdev->conn_hash;
3886         struct hci_chan *chan = NULL;
3887         unsigned int num = 0, min = ~0, cur_prio = 0;
3888         struct hci_conn *conn;
3889         int cnt, q, conn_num = 0;
3890
3891         BT_DBG("%s", hdev->name);
3892
3893         rcu_read_lock();
3894
3895         list_for_each_entry_rcu(conn, &h->list, list) {
3896                 struct hci_chan *tmp;
3897
3898                 if (conn->type != type)
3899                         continue;
3900
3901                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3902                         continue;
3903
3904                 conn_num++;
3905
3906                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3907                         struct sk_buff *skb;
3908
3909                         if (skb_queue_empty(&tmp->data_q))
3910                                 continue;
3911
3912                         skb = skb_peek(&tmp->data_q);
3913                         if (skb->priority < cur_prio)
3914                                 continue;
3915
3916                         if (skb->priority > cur_prio) {
3917                                 num = 0;
3918                                 min = ~0;
3919                                 cur_prio = skb->priority;
3920                         }
3921
3922                         num++;
3923
3924                         if (conn->sent < min) {
3925                                 min  = conn->sent;
3926                                 chan = tmp;
3927                         }
3928                 }
3929
3930                 if (hci_conn_num(hdev, type) == conn_num)
3931                         break;
3932         }
3933
3934         rcu_read_unlock();
3935
3936         if (!chan)
3937                 return NULL;
3938
3939         switch (chan->conn->type) {
3940         case ACL_LINK:
3941                 cnt = hdev->acl_cnt;
3942                 break;
3943         case AMP_LINK:
3944                 cnt = hdev->block_cnt;
3945                 break;
3946         case SCO_LINK:
3947         case ESCO_LINK:
3948                 cnt = hdev->sco_cnt;
3949                 break;
3950         case LE_LINK:
3951                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3952                 break;
3953         default:
3954                 cnt = 0;
3955                 BT_ERR("Unknown link type");
3956         }
3957
3958         q = cnt / num;
3959         *quote = q ? q : 1;
3960         BT_DBG("chan %p quote %d", chan, *quote);
3961         return chan;
3962 }
3963
3964 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3965 {
3966         struct hci_conn_hash *h = &hdev->conn_hash;
3967         struct hci_conn *conn;
3968         int num = 0;
3969
3970         BT_DBG("%s", hdev->name);
3971
3972         rcu_read_lock();
3973
3974         list_for_each_entry_rcu(conn, &h->list, list) {
3975                 struct hci_chan *chan;
3976
3977                 if (conn->type != type)
3978                         continue;
3979
3980                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3981                         continue;
3982
3983                 num++;
3984
3985                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3986                         struct sk_buff *skb;
3987
3988                         if (chan->sent) {
3989                                 chan->sent = 0;
3990                                 continue;
3991                         }
3992
3993                         if (skb_queue_empty(&chan->data_q))
3994                                 continue;
3995
3996                         skb = skb_peek(&chan->data_q);
3997                         if (skb->priority >= HCI_PRIO_MAX - 1)
3998                                 continue;
3999
4000                         skb->priority = HCI_PRIO_MAX - 1;
4001
4002                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4003                                skb->priority);
4004                 }
4005
4006                 if (hci_conn_num(hdev, type) == num)
4007                         break;
4008         }
4009
4010         rcu_read_unlock();
4011
4012 }
4013
4014 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4015 {
4016         /* Calculate count of blocks used by this packet */
4017         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4018 }
4019
4020 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4021 {
4022         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4023                 /* ACL tx timeout must be longer than maximum
4024                  * link supervision timeout (40.9 seconds) */
4025                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4026                                        HCI_ACL_TX_TIMEOUT))
4027                         hci_link_tx_to(hdev, ACL_LINK);
4028         }
4029 }
4030
4031 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4032 {
4033         unsigned int cnt = hdev->acl_cnt;
4034         struct hci_chan *chan;
4035         struct sk_buff *skb;
4036         int quote;
4037
4038         __check_timeout(hdev, cnt);
4039
4040         while (hdev->acl_cnt &&
4041                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4042                 u32 priority = (skb_peek(&chan->data_q))->priority;
4043                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4044                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4045                                skb->len, skb->priority);
4046
4047                         /* Stop if priority has changed */
4048                         if (skb->priority < priority)
4049                                 break;
4050
4051                         skb = skb_dequeue(&chan->data_q);
4052
4053                         hci_conn_enter_active_mode(chan->conn,
4054                                                    bt_cb(skb)->force_active);
4055
4056                         hci_send_frame(hdev, skb);
4057                         hdev->acl_last_tx = jiffies;
4058
4059                         hdev->acl_cnt--;
4060                         chan->sent++;
4061                         chan->conn->sent++;
4062                 }
4063         }
4064
4065         if (cnt != hdev->acl_cnt)
4066                 hci_prio_recalculate(hdev, ACL_LINK);
4067 }
4068
4069 static void hci_sched_acl_blk(struct hci_dev *hdev)
4070 {
4071         unsigned int cnt = hdev->block_cnt;
4072         struct hci_chan *chan;
4073         struct sk_buff *skb;
4074         int quote;
4075         u8 type;
4076
4077         __check_timeout(hdev, cnt);
4078
4079         BT_DBG("%s", hdev->name);
4080
4081         if (hdev->dev_type == HCI_AMP)
4082                 type = AMP_LINK;
4083         else
4084                 type = ACL_LINK;
4085
4086         while (hdev->block_cnt > 0 &&
4087                (chan = hci_chan_sent(hdev, type, &quote))) {
4088                 u32 priority = (skb_peek(&chan->data_q))->priority;
4089                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4090                         int blocks;
4091
4092                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4093                                skb->len, skb->priority);
4094
4095                         /* Stop if priority has changed */
4096                         if (skb->priority < priority)
4097                                 break;
4098
4099                         skb = skb_dequeue(&chan->data_q);
4100
4101                         blocks = __get_blocks(hdev, skb);
4102                         if (blocks > hdev->block_cnt)
4103                                 return;
4104
4105                         hci_conn_enter_active_mode(chan->conn,
4106                                                    bt_cb(skb)->force_active);
4107
4108                         hci_send_frame(hdev, skb);
4109                         hdev->acl_last_tx = jiffies;
4110
4111                         hdev->block_cnt -= blocks;
4112                         quote -= blocks;
4113
4114                         chan->sent += blocks;
4115                         chan->conn->sent += blocks;
4116                 }
4117         }
4118
4119         if (cnt != hdev->block_cnt)
4120                 hci_prio_recalculate(hdev, type);
4121 }
4122
4123 static void hci_sched_acl(struct hci_dev *hdev)
4124 {
4125         BT_DBG("%s", hdev->name);
4126
4127         /* No ACL link over BR/EDR controller */
4128         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4129                 return;
4130
4131         /* No AMP link over AMP controller */
4132         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4133                 return;
4134
4135         switch (hdev->flow_ctl_mode) {
4136         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4137                 hci_sched_acl_pkt(hdev);
4138                 break;
4139
4140         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4141                 hci_sched_acl_blk(hdev);
4142                 break;
4143         }
4144 }
4145
4146 /* Schedule SCO */
4147 static void hci_sched_sco(struct hci_dev *hdev)
4148 {
4149         struct hci_conn *conn;
4150         struct sk_buff *skb;
4151         int quote;
4152
4153         BT_DBG("%s", hdev->name);
4154
4155         if (!hci_conn_num(hdev, SCO_LINK))
4156                 return;
4157
4158         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4159                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4160                         BT_DBG("skb %p len %d", skb, skb->len);
4161                         hci_send_frame(hdev, skb);
4162
4163                         conn->sent++;
4164                         if (conn->sent == ~0)
4165                                 conn->sent = 0;
4166                 }
4167         }
4168 }
4169
4170 static void hci_sched_esco(struct hci_dev *hdev)
4171 {
4172         struct hci_conn *conn;
4173         struct sk_buff *skb;
4174         int quote;
4175
4176         BT_DBG("%s", hdev->name);
4177
4178         if (!hci_conn_num(hdev, ESCO_LINK))
4179                 return;
4180
4181         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4182                                                      &quote))) {
4183                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4184                         BT_DBG("skb %p len %d", skb, skb->len);
4185                         hci_send_frame(hdev, skb);
4186
4187                         conn->sent++;
4188                         if (conn->sent == ~0)
4189                                 conn->sent = 0;
4190                 }
4191         }
4192 }
4193
4194 static void hci_sched_le(struct hci_dev *hdev)
4195 {
4196         struct hci_chan *chan;
4197         struct sk_buff *skb;
4198         int quote, cnt, tmp;
4199
4200         BT_DBG("%s", hdev->name);
4201
4202         if (!hci_conn_num(hdev, LE_LINK))
4203                 return;
4204
4205         if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4206                 /* LE tx timeout must be longer than maximum
4207                  * link supervision timeout (40.9 seconds) */
4208                 if (!hdev->le_cnt && hdev->le_pkts &&
4209                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4210                         hci_link_tx_to(hdev, LE_LINK);
4211         }
4212
4213         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4214         tmp = cnt;
4215         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4216                 u32 priority = (skb_peek(&chan->data_q))->priority;
4217                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4218                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4219                                skb->len, skb->priority);
4220
4221                         /* Stop if priority has changed */
4222                         if (skb->priority < priority)
4223                                 break;
4224
4225                         skb = skb_dequeue(&chan->data_q);
4226
4227                         hci_send_frame(hdev, skb);
4228                         hdev->le_last_tx = jiffies;
4229
4230                         cnt--;
4231                         chan->sent++;
4232                         chan->conn->sent++;
4233                 }
4234         }
4235
4236         if (hdev->le_pkts)
4237                 hdev->le_cnt = cnt;
4238         else
4239                 hdev->acl_cnt = cnt;
4240
4241         if (cnt != tmp)
4242                 hci_prio_recalculate(hdev, LE_LINK);
4243 }
4244
4245 static void hci_tx_work(struct work_struct *work)
4246 {
4247         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4248         struct sk_buff *skb;
4249
4250         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4251                hdev->sco_cnt, hdev->le_cnt);
4252
4253         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4254                 /* Schedule queues and send stuff to HCI driver */
4255                 hci_sched_acl(hdev);
4256                 hci_sched_sco(hdev);
4257                 hci_sched_esco(hdev);
4258                 hci_sched_le(hdev);
4259         }
4260
4261         /* Send next queued raw (unknown type) packet */
4262         while ((skb = skb_dequeue(&hdev->raw_q)))
4263                 hci_send_frame(hdev, skb);
4264 }
4265
4266 /* ----- HCI RX task (incoming data processing) ----- */
4267
4268 /* ACL data packet */
4269 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4270 {
4271         struct hci_acl_hdr *hdr = (void *) skb->data;
4272         struct hci_conn *conn;
4273         __u16 handle, flags;
4274
4275         skb_pull(skb, HCI_ACL_HDR_SIZE);
4276
4277         handle = __le16_to_cpu(hdr->handle);
4278         flags  = hci_flags(handle);
4279         handle = hci_handle(handle);
4280
4281         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4282                handle, flags);
4283
4284         hdev->stat.acl_rx++;
4285
4286         hci_dev_lock(hdev);
4287         conn = hci_conn_hash_lookup_handle(hdev, handle);
4288         hci_dev_unlock(hdev);
4289
4290         if (conn) {
4291                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4292
4293                 /* Send to upper protocol */
4294                 l2cap_recv_acldata(conn, skb, flags);
4295                 return;
4296         } else {
4297                 BT_ERR("%s ACL packet for unknown connection handle %d",
4298                        hdev->name, handle);
4299         }
4300
4301         kfree_skb(skb);
4302 }
4303
4304 /* SCO data packet */
4305 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4306 {
4307         struct hci_sco_hdr *hdr = (void *) skb->data;
4308         struct hci_conn *conn;
4309         __u16 handle;
4310
4311         skb_pull(skb, HCI_SCO_HDR_SIZE);
4312
4313         handle = __le16_to_cpu(hdr->handle);
4314
4315         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4316
4317         hdev->stat.sco_rx++;
4318
4319         hci_dev_lock(hdev);
4320         conn = hci_conn_hash_lookup_handle(hdev, handle);
4321         hci_dev_unlock(hdev);
4322
4323         if (conn) {
4324                 /* Send to upper protocol */
4325                 sco_recv_scodata(conn, skb);
4326                 return;
4327         } else {
4328                 BT_ERR("%s SCO packet for unknown connection handle %d",
4329                        hdev->name, handle);
4330         }
4331
4332         kfree_skb(skb);
4333 }
4334
4335 static bool hci_req_is_complete(struct hci_dev *hdev)
4336 {
4337         struct sk_buff *skb;
4338
4339         skb = skb_peek(&hdev->cmd_q);
4340         if (!skb)
4341                 return true;
4342
4343         return bt_cb(skb)->hci.req_start;
4344 }
4345
4346 static void hci_resend_last(struct hci_dev *hdev)
4347 {
4348         struct hci_command_hdr *sent;
4349         struct sk_buff *skb;
4350         u16 opcode;
4351
4352         if (!hdev->sent_cmd)
4353                 return;
4354
4355         sent = (void *) hdev->sent_cmd->data;
4356         opcode = __le16_to_cpu(sent->opcode);
4357         if (opcode == HCI_OP_RESET)
4358                 return;
4359
4360         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4361         if (!skb)
4362                 return;
4363
4364         skb_queue_head(&hdev->cmd_q, skb);
4365         queue_work(hdev->workqueue, &hdev->cmd_work);
4366 }
4367
4368 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4369                           hci_req_complete_t *req_complete,
4370                           hci_req_complete_skb_t *req_complete_skb)
4371 {
4372         struct sk_buff *skb;
4373         unsigned long flags;
4374
4375         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4376
4377         /* If the completed command doesn't match the last one that was
4378          * sent we need to do special handling of it.
4379          */
4380         if (!hci_sent_cmd_data(hdev, opcode)) {
4381                 /* Some CSR based controllers generate a spontaneous
4382                  * reset complete event during init and any pending
4383                  * command will never be completed. In such a case we
4384                  * need to resend whatever was the last sent
4385                  * command.
4386                  */
4387                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4388                         hci_resend_last(hdev);
4389
4390                 return;
4391         }
4392
4393         /* If the command succeeded and there's still more commands in
4394          * this request the request is not yet complete.
4395          */
4396         if (!status && !hci_req_is_complete(hdev))
4397                 return;
4398
4399         /* If this was the last command in a request the complete
4400          * callback would be found in hdev->sent_cmd instead of the
4401          * command queue (hdev->cmd_q).
4402          */
4403         if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4404                 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4405                 return;
4406         }
4407
4408         if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4409                 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4410                 return;
4411         }
4412
4413         /* Remove all pending commands belonging to this request */
4414         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4415         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4416                 if (bt_cb(skb)->hci.req_start) {
4417                         __skb_queue_head(&hdev->cmd_q, skb);
4418                         break;
4419                 }
4420
4421                 *req_complete = bt_cb(skb)->hci.req_complete;
4422                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4423                 kfree_skb(skb);
4424         }
4425         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4426 }
4427
4428 static void hci_rx_work(struct work_struct *work)
4429 {
4430         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4431         struct sk_buff *skb;
4432
4433         BT_DBG("%s", hdev->name);
4434
4435         while ((skb = skb_dequeue(&hdev->rx_q))) {
4436                 /* Send copy to monitor */
4437                 hci_send_to_monitor(hdev, skb);
4438
4439                 if (atomic_read(&hdev->promisc)) {
4440                         /* Send copy to the sockets */
4441                         hci_send_to_sock(hdev, skb);
4442                 }
4443
4444                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4445                         kfree_skb(skb);
4446                         continue;
4447                 }
4448
4449                 if (test_bit(HCI_INIT, &hdev->flags)) {
4450                         /* Don't process data packets in this states. */
4451                         switch (bt_cb(skb)->pkt_type) {
4452                         case HCI_ACLDATA_PKT:
4453                         case HCI_SCODATA_PKT:
4454                                 kfree_skb(skb);
4455                                 continue;
4456                         }
4457                 }
4458
4459                 /* Process frame */
4460                 switch (bt_cb(skb)->pkt_type) {
4461                 case HCI_EVENT_PKT:
4462                         BT_DBG("%s Event packet", hdev->name);
4463                         hci_event_packet(hdev, skb);
4464                         break;
4465
4466                 case HCI_ACLDATA_PKT:
4467                         BT_DBG("%s ACL data packet", hdev->name);
4468                         hci_acldata_packet(hdev, skb);
4469                         break;
4470
4471                 case HCI_SCODATA_PKT:
4472                         BT_DBG("%s SCO data packet", hdev->name);
4473                         hci_scodata_packet(hdev, skb);
4474                         break;
4475
4476                 default:
4477                         kfree_skb(skb);
4478                         break;
4479                 }
4480         }
4481 }
4482
4483 static void hci_cmd_work(struct work_struct *work)
4484 {
4485         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4486         struct sk_buff *skb;
4487
4488         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4489                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4490
4491         /* Send queued commands */
4492         if (atomic_read(&hdev->cmd_cnt)) {
4493                 skb = skb_dequeue(&hdev->cmd_q);
4494                 if (!skb)
4495                         return;
4496
4497                 kfree_skb(hdev->sent_cmd);
4498
4499                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4500                 if (hdev->sent_cmd) {
4501                         atomic_dec(&hdev->cmd_cnt);
4502                         hci_send_frame(hdev, skb);
4503                         if (test_bit(HCI_RESET, &hdev->flags))
4504                                 cancel_delayed_work(&hdev->cmd_timer);
4505                         else
4506                                 schedule_delayed_work(&hdev->cmd_timer,
4507                                                       HCI_CMD_TIMEOUT);
4508                 } else {
4509                         skb_queue_head(&hdev->cmd_q, skb);
4510                         queue_work(hdev->workqueue, &hdev->cmd_work);
4511                 }
4512         }
4513 }