]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE      0
60 #define HCI_REQ_PEND      1
61 #define HCI_REQ_CANCELED  2
62
63 #define hci_req_lock(d)         mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d)       mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70         hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76                              size_t count, loff_t *ppos)
77 {
78         struct hci_dev *hdev = file->private_data;
79         char buf[3];
80
81         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82         buf[1] = '\n';
83         buf[2] = '\0';
84         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88                               size_t count, loff_t *ppos)
89 {
90         struct hci_dev *hdev = file->private_data;
91         struct sk_buff *skb;
92         char buf[32];
93         size_t buf_size = min(count, (sizeof(buf)-1));
94         bool enable;
95         int err;
96
97         if (!test_bit(HCI_UP, &hdev->flags))
98                 return -ENETDOWN;
99
100         if (copy_from_user(buf, user_buf, buf_size))
101                 return -EFAULT;
102
103         buf[buf_size] = '\0';
104         if (strtobool(buf, &enable))
105                 return -EINVAL;
106
107         if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108                 return -EALREADY;
109
110         hci_req_lock(hdev);
111         if (enable)
112                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113                                      HCI_CMD_TIMEOUT);
114         else
115                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116                                      HCI_CMD_TIMEOUT);
117         hci_req_unlock(hdev);
118
119         if (IS_ERR(skb))
120                 return PTR_ERR(skb);
121
122         err = -bt_to_errno(skb->data[0]);
123         kfree_skb(skb);
124
125         if (err < 0)
126                 return err;
127
128         change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130         return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134         .open           = simple_open,
135         .read           = dut_mode_read,
136         .write          = dut_mode_write,
137         .llseek         = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142         struct hci_dev *hdev = f->private;
143         u8 p;
144
145         hci_dev_lock(hdev);
146         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149                            hdev->features[p][0], hdev->features[p][1],
150                            hdev->features[p][2], hdev->features[p][3],
151                            hdev->features[p][4], hdev->features[p][5],
152                            hdev->features[p][6], hdev->features[p][7]);
153         }
154         if (lmp_le_capable(hdev))
155                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157                            hdev->le_features[0], hdev->le_features[1],
158                            hdev->le_features[2], hdev->le_features[3],
159                            hdev->le_features[4], hdev->le_features[5],
160                            hdev->le_features[6], hdev->le_features[7]);
161         hci_dev_unlock(hdev);
162
163         return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168         return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172         .open           = features_open,
173         .read           = seq_read,
174         .llseek         = seq_lseek,
175         .release        = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180         struct hci_dev *hdev = f->private;
181         struct bdaddr_list *b;
182
183         hci_dev_lock(hdev);
184         list_for_each_entry(b, &hdev->blacklist, list)
185                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186         hci_dev_unlock(hdev);
187
188         return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193         return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197         .open           = blacklist_open,
198         .read           = seq_read,
199         .llseek         = seq_lseek,
200         .release        = single_release,
201 };
202
203 static int whitelist_show(struct seq_file *f, void *p)
204 {
205         struct hci_dev *hdev = f->private;
206         struct bdaddr_list *b;
207
208         hci_dev_lock(hdev);
209         list_for_each_entry(b, &hdev->whitelist, list)
210                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int whitelist_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, whitelist_show, inode->i_private);
219 }
220
221 static const struct file_operations whitelist_fops = {
222         .open           = whitelist_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int uuids_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct bt_uuid *uuid;
232
233         hci_dev_lock(hdev);
234         list_for_each_entry(uuid, &hdev->uuids, list) {
235                 u8 i, val[16];
236
237                 /* The Bluetooth UUID values are stored in big endian,
238                  * but with reversed byte order. So convert them into
239                  * the right order for the %pUb modifier.
240                  */
241                 for (i = 0; i < 16; i++)
242                         val[i] = uuid->uuid[15 - i];
243
244                 seq_printf(f, "%pUb\n", val);
245         }
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int uuids_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, uuids_show, inode->i_private);
254 }
255
256 static const struct file_operations uuids_fops = {
257         .open           = uuids_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int inquiry_cache_show(struct seq_file *f, void *p)
264 {
265         struct hci_dev *hdev = f->private;
266         struct discovery_state *cache = &hdev->discovery;
267         struct inquiry_entry *e;
268
269         hci_dev_lock(hdev);
270
271         list_for_each_entry(e, &cache->all, all) {
272                 struct inquiry_data *data = &e->data;
273                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274                            &data->bdaddr,
275                            data->pscan_rep_mode, data->pscan_period_mode,
276                            data->pscan_mode, data->dev_class[2],
277                            data->dev_class[1], data->dev_class[0],
278                            __le16_to_cpu(data->clock_offset),
279                            data->rssi, data->ssp_mode, e->timestamp);
280         }
281
282         hci_dev_unlock(hdev);
283
284         return 0;
285 }
286
287 static int inquiry_cache_open(struct inode *inode, struct file *file)
288 {
289         return single_open(file, inquiry_cache_show, inode->i_private);
290 }
291
292 static const struct file_operations inquiry_cache_fops = {
293         .open           = inquiry_cache_open,
294         .read           = seq_read,
295         .llseek         = seq_lseek,
296         .release        = single_release,
297 };
298
299 static int link_keys_show(struct seq_file *f, void *ptr)
300 {
301         struct hci_dev *hdev = f->private;
302         struct list_head *p, *n;
303
304         hci_dev_lock(hdev);
305         list_for_each_safe(p, n, &hdev->link_keys) {
306                 struct link_key *key = list_entry(p, struct link_key, list);
307                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309         }
310         hci_dev_unlock(hdev);
311
312         return 0;
313 }
314
315 static int link_keys_open(struct inode *inode, struct file *file)
316 {
317         return single_open(file, link_keys_show, inode->i_private);
318 }
319
320 static const struct file_operations link_keys_fops = {
321         .open           = link_keys_open,
322         .read           = seq_read,
323         .llseek         = seq_lseek,
324         .release        = single_release,
325 };
326
327 static int dev_class_show(struct seq_file *f, void *ptr)
328 {
329         struct hci_dev *hdev = f->private;
330
331         hci_dev_lock(hdev);
332         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333                    hdev->dev_class[1], hdev->dev_class[0]);
334         hci_dev_unlock(hdev);
335
336         return 0;
337 }
338
339 static int dev_class_open(struct inode *inode, struct file *file)
340 {
341         return single_open(file, dev_class_show, inode->i_private);
342 }
343
344 static const struct file_operations dev_class_fops = {
345         .open           = dev_class_open,
346         .read           = seq_read,
347         .llseek         = seq_lseek,
348         .release        = single_release,
349 };
350
351 static int voice_setting_get(void *data, u64 *val)
352 {
353         struct hci_dev *hdev = data;
354
355         hci_dev_lock(hdev);
356         *val = hdev->voice_setting;
357         hci_dev_unlock(hdev);
358
359         return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363                         NULL, "0x%4.4llx\n");
364
365 static int auto_accept_delay_set(void *data, u64 val)
366 {
367         struct hci_dev *hdev = data;
368
369         hci_dev_lock(hdev);
370         hdev->auto_accept_delay = val;
371         hci_dev_unlock(hdev);
372
373         return 0;
374 }
375
376 static int auto_accept_delay_get(void *data, u64 *val)
377 {
378         struct hci_dev *hdev = data;
379
380         hci_dev_lock(hdev);
381         *val = hdev->auto_accept_delay;
382         hci_dev_unlock(hdev);
383
384         return 0;
385 }
386
387 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388                         auto_accept_delay_set, "%llu\n");
389
390 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391                                      size_t count, loff_t *ppos)
392 {
393         struct hci_dev *hdev = file->private_data;
394         char buf[3];
395
396         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
397         buf[1] = '\n';
398         buf[2] = '\0';
399         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400 }
401
402 static ssize_t force_sc_support_write(struct file *file,
403                                       const char __user *user_buf,
404                                       size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[32];
408         size_t buf_size = min(count, (sizeof(buf)-1));
409         bool enable;
410
411         if (test_bit(HCI_UP, &hdev->flags))
412                 return -EBUSY;
413
414         if (copy_from_user(buf, user_buf, buf_size))
415                 return -EFAULT;
416
417         buf[buf_size] = '\0';
418         if (strtobool(buf, &enable))
419                 return -EINVAL;
420
421         if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
422                 return -EALREADY;
423
424         change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
425
426         return count;
427 }
428
429 static const struct file_operations force_sc_support_fops = {
430         .open           = simple_open,
431         .read           = force_sc_support_read,
432         .write          = force_sc_support_write,
433         .llseek         = default_llseek,
434 };
435
436 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437                                  size_t count, loff_t *ppos)
438 {
439         struct hci_dev *hdev = file->private_data;
440         char buf[3];
441
442         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443         buf[1] = '\n';
444         buf[2] = '\0';
445         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446 }
447
448 static const struct file_operations sc_only_mode_fops = {
449         .open           = simple_open,
450         .read           = sc_only_mode_read,
451         .llseek         = default_llseek,
452 };
453
454 static int idle_timeout_set(void *data, u64 val)
455 {
456         struct hci_dev *hdev = data;
457
458         if (val != 0 && (val < 500 || val > 3600000))
459                 return -EINVAL;
460
461         hci_dev_lock(hdev);
462         hdev->idle_timeout = val;
463         hci_dev_unlock(hdev);
464
465         return 0;
466 }
467
468 static int idle_timeout_get(void *data, u64 *val)
469 {
470         struct hci_dev *hdev = data;
471
472         hci_dev_lock(hdev);
473         *val = hdev->idle_timeout;
474         hci_dev_unlock(hdev);
475
476         return 0;
477 }
478
479 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480                         idle_timeout_set, "%llu\n");
481
482 static int rpa_timeout_set(void *data, u64 val)
483 {
484         struct hci_dev *hdev = data;
485
486         /* Require the RPA timeout to be at least 30 seconds and at most
487          * 24 hours.
488          */
489         if (val < 30 || val > (60 * 60 * 24))
490                 return -EINVAL;
491
492         hci_dev_lock(hdev);
493         hdev->rpa_timeout = val;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 static int rpa_timeout_get(void *data, u64 *val)
500 {
501         struct hci_dev *hdev = data;
502
503         hci_dev_lock(hdev);
504         *val = hdev->rpa_timeout;
505         hci_dev_unlock(hdev);
506
507         return 0;
508 }
509
510 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511                         rpa_timeout_set, "%llu\n");
512
513 static int sniff_min_interval_set(void *data, u64 val)
514 {
515         struct hci_dev *hdev = data;
516
517         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518                 return -EINVAL;
519
520         hci_dev_lock(hdev);
521         hdev->sniff_min_interval = val;
522         hci_dev_unlock(hdev);
523
524         return 0;
525 }
526
527 static int sniff_min_interval_get(void *data, u64 *val)
528 {
529         struct hci_dev *hdev = data;
530
531         hci_dev_lock(hdev);
532         *val = hdev->sniff_min_interval;
533         hci_dev_unlock(hdev);
534
535         return 0;
536 }
537
538 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539                         sniff_min_interval_set, "%llu\n");
540
541 static int sniff_max_interval_set(void *data, u64 val)
542 {
543         struct hci_dev *hdev = data;
544
545         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546                 return -EINVAL;
547
548         hci_dev_lock(hdev);
549         hdev->sniff_max_interval = val;
550         hci_dev_unlock(hdev);
551
552         return 0;
553 }
554
555 static int sniff_max_interval_get(void *data, u64 *val)
556 {
557         struct hci_dev *hdev = data;
558
559         hci_dev_lock(hdev);
560         *val = hdev->sniff_max_interval;
561         hci_dev_unlock(hdev);
562
563         return 0;
564 }
565
566 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567                         sniff_max_interval_set, "%llu\n");
568
569 static int conn_info_min_age_set(void *data, u64 val)
570 {
571         struct hci_dev *hdev = data;
572
573         if (val == 0 || val > hdev->conn_info_max_age)
574                 return -EINVAL;
575
576         hci_dev_lock(hdev);
577         hdev->conn_info_min_age = val;
578         hci_dev_unlock(hdev);
579
580         return 0;
581 }
582
583 static int conn_info_min_age_get(void *data, u64 *val)
584 {
585         struct hci_dev *hdev = data;
586
587         hci_dev_lock(hdev);
588         *val = hdev->conn_info_min_age;
589         hci_dev_unlock(hdev);
590
591         return 0;
592 }
593
594 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595                         conn_info_min_age_set, "%llu\n");
596
597 static int conn_info_max_age_set(void *data, u64 val)
598 {
599         struct hci_dev *hdev = data;
600
601         if (val == 0 || val < hdev->conn_info_min_age)
602                 return -EINVAL;
603
604         hci_dev_lock(hdev);
605         hdev->conn_info_max_age = val;
606         hci_dev_unlock(hdev);
607
608         return 0;
609 }
610
611 static int conn_info_max_age_get(void *data, u64 *val)
612 {
613         struct hci_dev *hdev = data;
614
615         hci_dev_lock(hdev);
616         *val = hdev->conn_info_max_age;
617         hci_dev_unlock(hdev);
618
619         return 0;
620 }
621
622 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623                         conn_info_max_age_set, "%llu\n");
624
625 static int identity_show(struct seq_file *f, void *p)
626 {
627         struct hci_dev *hdev = f->private;
628         bdaddr_t addr;
629         u8 addr_type;
630
631         hci_dev_lock(hdev);
632
633         hci_copy_identity_address(hdev, &addr, &addr_type);
634
635         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
636                    16, hdev->irk, &hdev->rpa);
637
638         hci_dev_unlock(hdev);
639
640         return 0;
641 }
642
643 static int identity_open(struct inode *inode, struct file *file)
644 {
645         return single_open(file, identity_show, inode->i_private);
646 }
647
648 static const struct file_operations identity_fops = {
649         .open           = identity_open,
650         .read           = seq_read,
651         .llseek         = seq_lseek,
652         .release        = single_release,
653 };
654
655 static int random_address_show(struct seq_file *f, void *p)
656 {
657         struct hci_dev *hdev = f->private;
658
659         hci_dev_lock(hdev);
660         seq_printf(f, "%pMR\n", &hdev->random_addr);
661         hci_dev_unlock(hdev);
662
663         return 0;
664 }
665
666 static int random_address_open(struct inode *inode, struct file *file)
667 {
668         return single_open(file, random_address_show, inode->i_private);
669 }
670
671 static const struct file_operations random_address_fops = {
672         .open           = random_address_open,
673         .read           = seq_read,
674         .llseek         = seq_lseek,
675         .release        = single_release,
676 };
677
678 static int static_address_show(struct seq_file *f, void *p)
679 {
680         struct hci_dev *hdev = f->private;
681
682         hci_dev_lock(hdev);
683         seq_printf(f, "%pMR\n", &hdev->static_addr);
684         hci_dev_unlock(hdev);
685
686         return 0;
687 }
688
689 static int static_address_open(struct inode *inode, struct file *file)
690 {
691         return single_open(file, static_address_show, inode->i_private);
692 }
693
694 static const struct file_operations static_address_fops = {
695         .open           = static_address_open,
696         .read           = seq_read,
697         .llseek         = seq_lseek,
698         .release        = single_release,
699 };
700
701 static ssize_t force_static_address_read(struct file *file,
702                                          char __user *user_buf,
703                                          size_t count, loff_t *ppos)
704 {
705         struct hci_dev *hdev = file->private_data;
706         char buf[3];
707
708         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
709         buf[1] = '\n';
710         buf[2] = '\0';
711         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
712 }
713
714 static ssize_t force_static_address_write(struct file *file,
715                                           const char __user *user_buf,
716                                           size_t count, loff_t *ppos)
717 {
718         struct hci_dev *hdev = file->private_data;
719         char buf[32];
720         size_t buf_size = min(count, (sizeof(buf)-1));
721         bool enable;
722
723         if (test_bit(HCI_UP, &hdev->flags))
724                 return -EBUSY;
725
726         if (copy_from_user(buf, user_buf, buf_size))
727                 return -EFAULT;
728
729         buf[buf_size] = '\0';
730         if (strtobool(buf, &enable))
731                 return -EINVAL;
732
733         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
734                 return -EALREADY;
735
736         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
737
738         return count;
739 }
740
741 static const struct file_operations force_static_address_fops = {
742         .open           = simple_open,
743         .read           = force_static_address_read,
744         .write          = force_static_address_write,
745         .llseek         = default_llseek,
746 };
747
748 static int white_list_show(struct seq_file *f, void *ptr)
749 {
750         struct hci_dev *hdev = f->private;
751         struct bdaddr_list *b;
752
753         hci_dev_lock(hdev);
754         list_for_each_entry(b, &hdev->le_white_list, list)
755                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756         hci_dev_unlock(hdev);
757
758         return 0;
759 }
760
761 static int white_list_open(struct inode *inode, struct file *file)
762 {
763         return single_open(file, white_list_show, inode->i_private);
764 }
765
766 static const struct file_operations white_list_fops = {
767         .open           = white_list_open,
768         .read           = seq_read,
769         .llseek         = seq_lseek,
770         .release        = single_release,
771 };
772
773 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774 {
775         struct hci_dev *hdev = f->private;
776         struct list_head *p, *n;
777
778         hci_dev_lock(hdev);
779         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782                            &irk->bdaddr, irk->addr_type,
783                            16, irk->val, &irk->rpa);
784         }
785         hci_dev_unlock(hdev);
786
787         return 0;
788 }
789
790 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791 {
792         return single_open(file, identity_resolving_keys_show,
793                            inode->i_private);
794 }
795
796 static const struct file_operations identity_resolving_keys_fops = {
797         .open           = identity_resolving_keys_open,
798         .read           = seq_read,
799         .llseek         = seq_lseek,
800         .release        = single_release,
801 };
802
803 static int long_term_keys_show(struct seq_file *f, void *ptr)
804 {
805         struct hci_dev *hdev = f->private;
806         struct list_head *p, *n;
807
808         hci_dev_lock(hdev);
809         list_for_each_safe(p, n, &hdev->long_term_keys) {
810                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
811                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
812                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
814                            __le64_to_cpu(ltk->rand), 16, ltk->val);
815         }
816         hci_dev_unlock(hdev);
817
818         return 0;
819 }
820
821 static int long_term_keys_open(struct inode *inode, struct file *file)
822 {
823         return single_open(file, long_term_keys_show, inode->i_private);
824 }
825
826 static const struct file_operations long_term_keys_fops = {
827         .open           = long_term_keys_open,
828         .read           = seq_read,
829         .llseek         = seq_lseek,
830         .release        = single_release,
831 };
832
833 static int conn_min_interval_set(void *data, u64 val)
834 {
835         struct hci_dev *hdev = data;
836
837         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838                 return -EINVAL;
839
840         hci_dev_lock(hdev);
841         hdev->le_conn_min_interval = val;
842         hci_dev_unlock(hdev);
843
844         return 0;
845 }
846
847 static int conn_min_interval_get(void *data, u64 *val)
848 {
849         struct hci_dev *hdev = data;
850
851         hci_dev_lock(hdev);
852         *val = hdev->le_conn_min_interval;
853         hci_dev_unlock(hdev);
854
855         return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859                         conn_min_interval_set, "%llu\n");
860
861 static int conn_max_interval_set(void *data, u64 val)
862 {
863         struct hci_dev *hdev = data;
864
865         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866                 return -EINVAL;
867
868         hci_dev_lock(hdev);
869         hdev->le_conn_max_interval = val;
870         hci_dev_unlock(hdev);
871
872         return 0;
873 }
874
875 static int conn_max_interval_get(void *data, u64 *val)
876 {
877         struct hci_dev *hdev = data;
878
879         hci_dev_lock(hdev);
880         *val = hdev->le_conn_max_interval;
881         hci_dev_unlock(hdev);
882
883         return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887                         conn_max_interval_set, "%llu\n");
888
889 static int conn_latency_set(void *data, u64 val)
890 {
891         struct hci_dev *hdev = data;
892
893         if (val > 0x01f3)
894                 return -EINVAL;
895
896         hci_dev_lock(hdev);
897         hdev->le_conn_latency = val;
898         hci_dev_unlock(hdev);
899
900         return 0;
901 }
902
903 static int conn_latency_get(void *data, u64 *val)
904 {
905         struct hci_dev *hdev = data;
906
907         hci_dev_lock(hdev);
908         *val = hdev->le_conn_latency;
909         hci_dev_unlock(hdev);
910
911         return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915                         conn_latency_set, "%llu\n");
916
917 static int supervision_timeout_set(void *data, u64 val)
918 {
919         struct hci_dev *hdev = data;
920
921         if (val < 0x000a || val > 0x0c80)
922                 return -EINVAL;
923
924         hci_dev_lock(hdev);
925         hdev->le_supv_timeout = val;
926         hci_dev_unlock(hdev);
927
928         return 0;
929 }
930
931 static int supervision_timeout_get(void *data, u64 *val)
932 {
933         struct hci_dev *hdev = data;
934
935         hci_dev_lock(hdev);
936         *val = hdev->le_supv_timeout;
937         hci_dev_unlock(hdev);
938
939         return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943                         supervision_timeout_set, "%llu\n");
944
945 static int adv_channel_map_set(void *data, u64 val)
946 {
947         struct hci_dev *hdev = data;
948
949         if (val < 0x01 || val > 0x07)
950                 return -EINVAL;
951
952         hci_dev_lock(hdev);
953         hdev->le_adv_channel_map = val;
954         hci_dev_unlock(hdev);
955
956         return 0;
957 }
958
959 static int adv_channel_map_get(void *data, u64 *val)
960 {
961         struct hci_dev *hdev = data;
962
963         hci_dev_lock(hdev);
964         *val = hdev->le_adv_channel_map;
965         hci_dev_unlock(hdev);
966
967         return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971                         adv_channel_map_set, "%llu\n");
972
973 static int adv_min_interval_set(void *data, u64 val)
974 {
975         struct hci_dev *hdev = data;
976
977         if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978                 return -EINVAL;
979
980         hci_dev_lock(hdev);
981         hdev->le_adv_min_interval = val;
982         hci_dev_unlock(hdev);
983
984         return 0;
985 }
986
987 static int adv_min_interval_get(void *data, u64 *val)
988 {
989         struct hci_dev *hdev = data;
990
991         hci_dev_lock(hdev);
992         *val = hdev->le_adv_min_interval;
993         hci_dev_unlock(hdev);
994
995         return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999                         adv_min_interval_set, "%llu\n");
1000
1001 static int adv_max_interval_set(void *data, u64 val)
1002 {
1003         struct hci_dev *hdev = data;
1004
1005         if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006                 return -EINVAL;
1007
1008         hci_dev_lock(hdev);
1009         hdev->le_adv_max_interval = val;
1010         hci_dev_unlock(hdev);
1011
1012         return 0;
1013 }
1014
1015 static int adv_max_interval_get(void *data, u64 *val)
1016 {
1017         struct hci_dev *hdev = data;
1018
1019         hci_dev_lock(hdev);
1020         *val = hdev->le_adv_max_interval;
1021         hci_dev_unlock(hdev);
1022
1023         return 0;
1024 }
1025
1026 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027                         adv_max_interval_set, "%llu\n");
1028
1029 static int device_list_show(struct seq_file *f, void *ptr)
1030 {
1031         struct hci_dev *hdev = f->private;
1032         struct hci_conn_params *p;
1033
1034         hci_dev_lock(hdev);
1035         list_for_each_entry(p, &hdev->le_conn_params, list) {
1036                 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
1037                            p->auto_connect);
1038         }
1039         hci_dev_unlock(hdev);
1040
1041         return 0;
1042 }
1043
1044 static int device_list_open(struct inode *inode, struct file *file)
1045 {
1046         return single_open(file, device_list_show, inode->i_private);
1047 }
1048
1049 static const struct file_operations device_list_fops = {
1050         .open           = device_list_open,
1051         .read           = seq_read,
1052         .llseek         = seq_lseek,
1053         .release        = single_release,
1054 };
1055
1056 /* ---- HCI requests ---- */
1057
1058 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1059 {
1060         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1061
1062         if (hdev->req_status == HCI_REQ_PEND) {
1063                 hdev->req_result = result;
1064                 hdev->req_status = HCI_REQ_DONE;
1065                 wake_up_interruptible(&hdev->req_wait_q);
1066         }
1067 }
1068
1069 static void hci_req_cancel(struct hci_dev *hdev, int err)
1070 {
1071         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073         if (hdev->req_status == HCI_REQ_PEND) {
1074                 hdev->req_result = err;
1075                 hdev->req_status = HCI_REQ_CANCELED;
1076                 wake_up_interruptible(&hdev->req_wait_q);
1077         }
1078 }
1079
1080 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081                                             u8 event)
1082 {
1083         struct hci_ev_cmd_complete *ev;
1084         struct hci_event_hdr *hdr;
1085         struct sk_buff *skb;
1086
1087         hci_dev_lock(hdev);
1088
1089         skb = hdev->recv_evt;
1090         hdev->recv_evt = NULL;
1091
1092         hci_dev_unlock(hdev);
1093
1094         if (!skb)
1095                 return ERR_PTR(-ENODATA);
1096
1097         if (skb->len < sizeof(*hdr)) {
1098                 BT_ERR("Too short HCI event");
1099                 goto failed;
1100         }
1101
1102         hdr = (void *) skb->data;
1103         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
1105         if (event) {
1106                 if (hdr->evt != event)
1107                         goto failed;
1108                 return skb;
1109         }
1110
1111         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113                 goto failed;
1114         }
1115
1116         if (skb->len < sizeof(*ev)) {
1117                 BT_ERR("Too short cmd_complete event");
1118                 goto failed;
1119         }
1120
1121         ev = (void *) skb->data;
1122         skb_pull(skb, sizeof(*ev));
1123
1124         if (opcode == __le16_to_cpu(ev->opcode))
1125                 return skb;
1126
1127         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128                __le16_to_cpu(ev->opcode));
1129
1130 failed:
1131         kfree_skb(skb);
1132         return ERR_PTR(-ENODATA);
1133 }
1134
1135 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1136                                   const void *param, u8 event, u32 timeout)
1137 {
1138         DECLARE_WAITQUEUE(wait, current);
1139         struct hci_request req;
1140         int err = 0;
1141
1142         BT_DBG("%s", hdev->name);
1143
1144         hci_req_init(&req, hdev);
1145
1146         hci_req_add_ev(&req, opcode, plen, param, event);
1147
1148         hdev->req_status = HCI_REQ_PEND;
1149
1150         err = hci_req_run(&req, hci_req_sync_complete);
1151         if (err < 0)
1152                 return ERR_PTR(err);
1153
1154         add_wait_queue(&hdev->req_wait_q, &wait);
1155         set_current_state(TASK_INTERRUPTIBLE);
1156
1157         schedule_timeout(timeout);
1158
1159         remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161         if (signal_pending(current))
1162                 return ERR_PTR(-EINTR);
1163
1164         switch (hdev->req_status) {
1165         case HCI_REQ_DONE:
1166                 err = -bt_to_errno(hdev->req_result);
1167                 break;
1168
1169         case HCI_REQ_CANCELED:
1170                 err = -hdev->req_result;
1171                 break;
1172
1173         default:
1174                 err = -ETIMEDOUT;
1175                 break;
1176         }
1177
1178         hdev->req_status = hdev->req_result = 0;
1179
1180         BT_DBG("%s end: err %d", hdev->name, err);
1181
1182         if (err < 0)
1183                 return ERR_PTR(err);
1184
1185         return hci_get_cmd_complete(hdev, opcode, event);
1186 }
1187 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1190                                const void *param, u32 timeout)
1191 {
1192         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1193 }
1194 EXPORT_SYMBOL(__hci_cmd_sync);
1195
1196 /* Execute request and wait for completion. */
1197 static int __hci_req_sync(struct hci_dev *hdev,
1198                           void (*func)(struct hci_request *req,
1199                                       unsigned long opt),
1200                           unsigned long opt, __u32 timeout)
1201 {
1202         struct hci_request req;
1203         DECLARE_WAITQUEUE(wait, current);
1204         int err = 0;
1205
1206         BT_DBG("%s start", hdev->name);
1207
1208         hci_req_init(&req, hdev);
1209
1210         hdev->req_status = HCI_REQ_PEND;
1211
1212         func(&req, opt);
1213
1214         err = hci_req_run(&req, hci_req_sync_complete);
1215         if (err < 0) {
1216                 hdev->req_status = 0;
1217
1218                 /* ENODATA means the HCI request command queue is empty.
1219                  * This can happen when a request with conditionals doesn't
1220                  * trigger any commands to be sent. This is normal behavior
1221                  * and should not trigger an error return.
1222                  */
1223                 if (err == -ENODATA)
1224                         return 0;
1225
1226                 return err;
1227         }
1228
1229         add_wait_queue(&hdev->req_wait_q, &wait);
1230         set_current_state(TASK_INTERRUPTIBLE);
1231
1232         schedule_timeout(timeout);
1233
1234         remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236         if (signal_pending(current))
1237                 return -EINTR;
1238
1239         switch (hdev->req_status) {
1240         case HCI_REQ_DONE:
1241                 err = -bt_to_errno(hdev->req_result);
1242                 break;
1243
1244         case HCI_REQ_CANCELED:
1245                 err = -hdev->req_result;
1246                 break;
1247
1248         default:
1249                 err = -ETIMEDOUT;
1250                 break;
1251         }
1252
1253         hdev->req_status = hdev->req_result = 0;
1254
1255         BT_DBG("%s end: err %d", hdev->name, err);
1256
1257         return err;
1258 }
1259
1260 static int hci_req_sync(struct hci_dev *hdev,
1261                         void (*req)(struct hci_request *req,
1262                                     unsigned long opt),
1263                         unsigned long opt, __u32 timeout)
1264 {
1265         int ret;
1266
1267         if (!test_bit(HCI_UP, &hdev->flags))
1268                 return -ENETDOWN;
1269
1270         /* Serialize all requests */
1271         hci_req_lock(hdev);
1272         ret = __hci_req_sync(hdev, req, opt, timeout);
1273         hci_req_unlock(hdev);
1274
1275         return ret;
1276 }
1277
1278 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1279 {
1280         BT_DBG("%s %ld", req->hdev->name, opt);
1281
1282         /* Reset device */
1283         set_bit(HCI_RESET, &req->hdev->flags);
1284         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1285 }
1286
1287 static void bredr_init(struct hci_request *req)
1288 {
1289         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1290
1291         /* Read Local Supported Features */
1292         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293
1294         /* Read Local Version */
1295         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1296
1297         /* Read BD Address */
1298         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1299 }
1300
1301 static void amp_init(struct hci_request *req)
1302 {
1303         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1304
1305         /* Read Local Version */
1306         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1307
1308         /* Read Local Supported Commands */
1309         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311         /* Read Local Supported Features */
1312         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
1314         /* Read Local AMP Info */
1315         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1316
1317         /* Read Data Blk size */
1318         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1319
1320         /* Read Flow Control Mode */
1321         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
1323         /* Read Location Data */
1324         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1325 }
1326
1327 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1328 {
1329         struct hci_dev *hdev = req->hdev;
1330
1331         BT_DBG("%s %ld", hdev->name, opt);
1332
1333         /* Reset */
1334         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1335                 hci_reset_req(req, 0);
1336
1337         switch (hdev->dev_type) {
1338         case HCI_BREDR:
1339                 bredr_init(req);
1340                 break;
1341
1342         case HCI_AMP:
1343                 amp_init(req);
1344                 break;
1345
1346         default:
1347                 BT_ERR("Unknown device type %d", hdev->dev_type);
1348                 break;
1349         }
1350 }
1351
1352 static void bredr_setup(struct hci_request *req)
1353 {
1354         struct hci_dev *hdev = req->hdev;
1355
1356         __le16 param;
1357         __u8 flt_type;
1358
1359         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1360         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1361
1362         /* Read Class of Device */
1363         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1364
1365         /* Read Local Name */
1366         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1367
1368         /* Read Voice Setting */
1369         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1370
1371         /* Read Number of Supported IAC */
1372         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
1374         /* Read Current IAC LAP */
1375         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
1377         /* Clear Event Filters */
1378         flt_type = HCI_FLT_CLEAR_ALL;
1379         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1380
1381         /* Connection accept timeout ~20 secs */
1382         param = cpu_to_le16(0x7d00);
1383         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1384
1385         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386          * but it does not support page scan related HCI commands.
1387          */
1388         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1389                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391         }
1392 }
1393
1394 static void le_setup(struct hci_request *req)
1395 {
1396         struct hci_dev *hdev = req->hdev;
1397
1398         /* Read LE Buffer Size */
1399         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1400
1401         /* Read LE Local Supported Features */
1402         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1403
1404         /* Read LE Supported States */
1405         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
1407         /* Read LE White List Size */
1408         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1409
1410         /* Clear LE White List */
1411         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1412
1413         /* LE-only controllers have LE implicitly enabled */
1414         if (!lmp_bredr_capable(hdev))
1415                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1416 }
1417
1418 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419 {
1420         if (lmp_ext_inq_capable(hdev))
1421                 return 0x02;
1422
1423         if (lmp_inq_rssi_capable(hdev))
1424                 return 0x01;
1425
1426         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427             hdev->lmp_subver == 0x0757)
1428                 return 0x01;
1429
1430         if (hdev->manufacturer == 15) {
1431                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432                         return 0x01;
1433                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434                         return 0x01;
1435                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436                         return 0x01;
1437         }
1438
1439         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440             hdev->lmp_subver == 0x1805)
1441                 return 0x01;
1442
1443         return 0x00;
1444 }
1445
1446 static void hci_setup_inquiry_mode(struct hci_request *req)
1447 {
1448         u8 mode;
1449
1450         mode = hci_get_inquiry_mode(req->hdev);
1451
1452         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1453 }
1454
1455 static void hci_setup_event_mask(struct hci_request *req)
1456 {
1457         struct hci_dev *hdev = req->hdev;
1458
1459         /* The second byte is 0xff instead of 0x9f (two reserved bits
1460          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461          * command otherwise.
1462          */
1463         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466          * any event mask for pre 1.2 devices.
1467          */
1468         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469                 return;
1470
1471         if (lmp_bredr_capable(hdev)) {
1472                 events[4] |= 0x01; /* Flow Specification Complete */
1473                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475                 events[5] |= 0x08; /* Synchronous Connection Complete */
1476                 events[5] |= 0x10; /* Synchronous Connection Changed */
1477         } else {
1478                 /* Use a different default for LE-only devices */
1479                 memset(events, 0, sizeof(events));
1480                 events[0] |= 0x10; /* Disconnection Complete */
1481                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482                 events[1] |= 0x20; /* Command Complete */
1483                 events[1] |= 0x40; /* Command Status */
1484                 events[1] |= 0x80; /* Hardware Error */
1485                 events[2] |= 0x04; /* Number of Completed Packets */
1486                 events[3] |= 0x02; /* Data Buffer Overflow */
1487
1488                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489                         events[0] |= 0x80; /* Encryption Change */
1490                         events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491                 }
1492         }
1493
1494         if (lmp_inq_rssi_capable(hdev))
1495                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497         if (lmp_sniffsubr_capable(hdev))
1498                 events[5] |= 0x20; /* Sniff Subrating */
1499
1500         if (lmp_pause_enc_capable(hdev))
1501                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503         if (lmp_ext_inq_capable(hdev))
1504                 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506         if (lmp_no_flush_capable(hdev))
1507                 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509         if (lmp_lsto_capable(hdev))
1510                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512         if (lmp_ssp_capable(hdev)) {
1513                 events[6] |= 0x01;      /* IO Capability Request */
1514                 events[6] |= 0x02;      /* IO Capability Response */
1515                 events[6] |= 0x04;      /* User Confirmation Request */
1516                 events[6] |= 0x08;      /* User Passkey Request */
1517                 events[6] |= 0x10;      /* Remote OOB Data Request */
1518                 events[6] |= 0x20;      /* Simple Pairing Complete */
1519                 events[7] |= 0x04;      /* User Passkey Notification */
1520                 events[7] |= 0x08;      /* Keypress Notification */
1521                 events[7] |= 0x10;      /* Remote Host Supported
1522                                          * Features Notification
1523                                          */
1524         }
1525
1526         if (lmp_le_capable(hdev))
1527                 events[7] |= 0x20;      /* LE Meta-Event */
1528
1529         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1530 }
1531
1532 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1533 {
1534         struct hci_dev *hdev = req->hdev;
1535
1536         if (lmp_bredr_capable(hdev))
1537                 bredr_setup(req);
1538         else
1539                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1540
1541         if (lmp_le_capable(hdev))
1542                 le_setup(req);
1543
1544         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545          * local supported commands HCI command.
1546          */
1547         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1548                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1549
1550         if (lmp_ssp_capable(hdev)) {
1551                 /* When SSP is available, then the host features page
1552                  * should also be available as well. However some
1553                  * controllers list the max_page as 0 as long as SSP
1554                  * has not been enabled. To achieve proper debugging
1555                  * output, force the minimum max_page to 1 at least.
1556                  */
1557                 hdev->max_page = 0x01;
1558
1559                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560                         u8 mode = 0x01;
1561                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562                                     sizeof(mode), &mode);
1563                 } else {
1564                         struct hci_cp_write_eir cp;
1565
1566                         memset(hdev->eir, 0, sizeof(hdev->eir));
1567                         memset(&cp, 0, sizeof(cp));
1568
1569                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1570                 }
1571         }
1572
1573         if (lmp_inq_rssi_capable(hdev))
1574                 hci_setup_inquiry_mode(req);
1575
1576         if (lmp_inq_tx_pwr_capable(hdev))
1577                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1578
1579         if (lmp_ext_feat_capable(hdev)) {
1580                 struct hci_cp_read_local_ext_features cp;
1581
1582                 cp.page = 0x01;
1583                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584                             sizeof(cp), &cp);
1585         }
1586
1587         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588                 u8 enable = 1;
1589                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590                             &enable);
1591         }
1592 }
1593
1594 static void hci_setup_link_policy(struct hci_request *req)
1595 {
1596         struct hci_dev *hdev = req->hdev;
1597         struct hci_cp_write_def_link_policy cp;
1598         u16 link_policy = 0;
1599
1600         if (lmp_rswitch_capable(hdev))
1601                 link_policy |= HCI_LP_RSWITCH;
1602         if (lmp_hold_capable(hdev))
1603                 link_policy |= HCI_LP_HOLD;
1604         if (lmp_sniff_capable(hdev))
1605                 link_policy |= HCI_LP_SNIFF;
1606         if (lmp_park_capable(hdev))
1607                 link_policy |= HCI_LP_PARK;
1608
1609         cp.policy = cpu_to_le16(link_policy);
1610         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1611 }
1612
1613 static void hci_set_le_support(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         struct hci_cp_write_le_host_supported cp;
1617
1618         /* LE-only devices do not support explicit enablement */
1619         if (!lmp_bredr_capable(hdev))
1620                 return;
1621
1622         memset(&cp, 0, sizeof(cp));
1623
1624         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625                 cp.le = 0x01;
1626                 cp.simul = 0x00;
1627         }
1628
1629         if (cp.le != lmp_host_le_capable(hdev))
1630                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631                             &cp);
1632 }
1633
1634 static void hci_set_event_mask_page_2(struct hci_request *req)
1635 {
1636         struct hci_dev *hdev = req->hdev;
1637         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639         /* If Connectionless Slave Broadcast master role is supported
1640          * enable all necessary events for it.
1641          */
1642         if (lmp_csb_master_capable(hdev)) {
1643                 events[1] |= 0x40;      /* Triggered Clock Capture */
1644                 events[1] |= 0x80;      /* Synchronization Train Complete */
1645                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1646                 events[2] |= 0x20;      /* CSB Channel Map Change */
1647         }
1648
1649         /* If Connectionless Slave Broadcast slave role is supported
1650          * enable all necessary events for it.
1651          */
1652         if (lmp_csb_slave_capable(hdev)) {
1653                 events[2] |= 0x01;      /* Synchronization Train Received */
1654                 events[2] |= 0x02;      /* CSB Receive */
1655                 events[2] |= 0x04;      /* CSB Timeout */
1656                 events[2] |= 0x08;      /* Truncated Page Complete */
1657         }
1658
1659         /* Enable Authenticated Payload Timeout Expired event if supported */
1660         if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1661                 events[2] |= 0x80;
1662
1663         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664 }
1665
1666 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1667 {
1668         struct hci_dev *hdev = req->hdev;
1669         u8 p;
1670
1671         hci_setup_event_mask(req);
1672
1673         /* Some Broadcom based Bluetooth controllers do not support the
1674          * Delete Stored Link Key command. They are clearly indicating its
1675          * absence in the bit mask of supported commands.
1676          *
1677          * Check the supported commands and only if the the command is marked
1678          * as supported send it. If not supported assume that the controller
1679          * does not have actual support for stored link keys which makes this
1680          * command redundant anyway.
1681          *
1682          * Some controllers indicate that they support handling deleting
1683          * stored link keys, but they don't. The quirk lets a driver
1684          * just disable this command.
1685          */
1686         if (hdev->commands[6] & 0x80 &&
1687             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1688                 struct hci_cp_delete_stored_link_key cp;
1689
1690                 bacpy(&cp.bdaddr, BDADDR_ANY);
1691                 cp.delete_all = 0x01;
1692                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693                             sizeof(cp), &cp);
1694         }
1695
1696         if (hdev->commands[5] & 0x10)
1697                 hci_setup_link_policy(req);
1698
1699         if (lmp_le_capable(hdev)) {
1700                 u8 events[8];
1701
1702                 memset(events, 0, sizeof(events));
1703                 events[0] = 0x0f;
1704
1705                 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706                         events[0] |= 0x10;      /* LE Long Term Key Request */
1707
1708                 /* If controller supports the Connection Parameters Request
1709                  * Link Layer Procedure, enable the corresponding event.
1710                  */
1711                 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712                         events[0] |= 0x20;      /* LE Remote Connection
1713                                                  * Parameter Request
1714                                                  */
1715
1716                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717                             events);
1718
1719                 if (hdev->commands[25] & 0x40) {
1720                         /* Read LE Advertising Channel TX Power */
1721                         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722                 }
1723
1724                 hci_set_le_support(req);
1725         }
1726
1727         /* Read features beyond page 1 if available */
1728         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729                 struct hci_cp_read_local_ext_features cp;
1730
1731                 cp.page = p;
1732                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733                             sizeof(cp), &cp);
1734         }
1735 }
1736
1737 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738 {
1739         struct hci_dev *hdev = req->hdev;
1740
1741         /* Set event mask page 2 if the HCI command for it is supported */
1742         if (hdev->commands[22] & 0x04)
1743                 hci_set_event_mask_page_2(req);
1744
1745         /* Read local codec list if the HCI command is supported */
1746         if (hdev->commands[29] & 0x20)
1747                 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
1749         /* Get MWS transport configuration if the HCI command is supported */
1750         if (hdev->commands[30] & 0x08)
1751                 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
1753         /* Check for Synchronization Train support */
1754         if (lmp_sync_train_capable(hdev))
1755                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1756
1757         /* Enable Secure Connections if supported and configured */
1758         if ((lmp_sc_capable(hdev) ||
1759              test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1760             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761                 u8 support = 0x01;
1762                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763                             sizeof(support), &support);
1764         }
1765 }
1766
1767 static int __hci_init(struct hci_dev *hdev)
1768 {
1769         int err;
1770
1771         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772         if (err < 0)
1773                 return err;
1774
1775         /* The Device Under Test (DUT) mode is special and available for
1776          * all controller types. So just create it early on.
1777          */
1778         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780                                     &dut_mode_fops);
1781         }
1782
1783         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784          * BR/EDR/LE type controllers. AMP controllers only need the
1785          * first stage init.
1786          */
1787         if (hdev->dev_type != HCI_BREDR)
1788                 return 0;
1789
1790         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791         if (err < 0)
1792                 return err;
1793
1794         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799         if (err < 0)
1800                 return err;
1801
1802         /* Only create debugfs entries during the initial setup
1803          * phase and not every time the controller gets powered on.
1804          */
1805         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806                 return 0;
1807
1808         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809                             &features_fops);
1810         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811                            &hdev->manufacturer);
1812         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1814         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815                             &blacklist_fops);
1816         debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817                             &whitelist_fops);
1818         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
1820         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821                             &conn_info_min_age_fops);
1822         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823                             &conn_info_max_age_fops);
1824
1825         if (lmp_bredr_capable(hdev)) {
1826                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827                                     hdev, &inquiry_cache_fops);
1828                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829                                     hdev, &link_keys_fops);
1830                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831                                     hdev, &dev_class_fops);
1832                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833                                     hdev, &voice_setting_fops);
1834         }
1835
1836         if (lmp_ssp_capable(hdev)) {
1837                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838                                     hdev, &auto_accept_delay_fops);
1839                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840                                     hdev, &force_sc_support_fops);
1841                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842                                     hdev, &sc_only_mode_fops);
1843         }
1844
1845         if (lmp_sniff_capable(hdev)) {
1846                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847                                     hdev, &idle_timeout_fops);
1848                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849                                     hdev, &sniff_min_interval_fops);
1850                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851                                     hdev, &sniff_max_interval_fops);
1852         }
1853
1854         if (lmp_le_capable(hdev)) {
1855                 debugfs_create_file("identity", 0400, hdev->debugfs,
1856                                     hdev, &identity_fops);
1857                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858                                     hdev, &rpa_timeout_fops);
1859                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860                                     hdev, &random_address_fops);
1861                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862                                     hdev, &static_address_fops);
1863
1864                 /* For controllers with a public address, provide a debug
1865                  * option to force the usage of the configured static
1866                  * address. By default the public address is used.
1867                  */
1868                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869                         debugfs_create_file("force_static_address", 0644,
1870                                             hdev->debugfs, hdev,
1871                                             &force_static_address_fops);
1872
1873                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874                                   &hdev->le_white_list_size);
1875                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876                                     &white_list_fops);
1877                 debugfs_create_file("identity_resolving_keys", 0400,
1878                                     hdev->debugfs, hdev,
1879                                     &identity_resolving_keys_fops);
1880                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881                                     hdev, &long_term_keys_fops);
1882                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883                                     hdev, &conn_min_interval_fops);
1884                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885                                     hdev, &conn_max_interval_fops);
1886                 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887                                     hdev, &conn_latency_fops);
1888                 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889                                     hdev, &supervision_timeout_fops);
1890                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891                                     hdev, &adv_channel_map_fops);
1892                 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893                                     hdev, &adv_min_interval_fops);
1894                 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895                                     hdev, &adv_max_interval_fops);
1896                 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897                                     &device_list_fops);
1898                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899                                    hdev->debugfs,
1900                                    &hdev->discov_interleaved_timeout);
1901
1902                 smp_register(hdev);
1903         }
1904
1905         return 0;
1906 }
1907
1908 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1909 {
1910         struct hci_dev *hdev = req->hdev;
1911
1912         BT_DBG("%s %ld", hdev->name, opt);
1913
1914         /* Reset */
1915         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1916                 hci_reset_req(req, 0);
1917
1918         /* Read Local Version */
1919         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1920
1921         /* Read BD Address */
1922         if (hdev->set_bdaddr)
1923                 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1924 }
1925
1926 static int __hci_unconf_init(struct hci_dev *hdev)
1927 {
1928         int err;
1929
1930         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1931                 return 0;
1932
1933         err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1934         if (err < 0)
1935                 return err;
1936
1937         return 0;
1938 }
1939
1940 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1941 {
1942         __u8 scan = opt;
1943
1944         BT_DBG("%s %x", req->hdev->name, scan);
1945
1946         /* Inquiry and Page scans */
1947         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1948 }
1949
1950 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1951 {
1952         __u8 auth = opt;
1953
1954         BT_DBG("%s %x", req->hdev->name, auth);
1955
1956         /* Authentication */
1957         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1958 }
1959
1960 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1961 {
1962         __u8 encrypt = opt;
1963
1964         BT_DBG("%s %x", req->hdev->name, encrypt);
1965
1966         /* Encryption */
1967         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1968 }
1969
1970 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1971 {
1972         __le16 policy = cpu_to_le16(opt);
1973
1974         BT_DBG("%s %x", req->hdev->name, policy);
1975
1976         /* Default link policy */
1977         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1978 }
1979
1980 /* Get HCI device by index.
1981  * Device is held on return. */
1982 struct hci_dev *hci_dev_get(int index)
1983 {
1984         struct hci_dev *hdev = NULL, *d;
1985
1986         BT_DBG("%d", index);
1987
1988         if (index < 0)
1989                 return NULL;
1990
1991         read_lock(&hci_dev_list_lock);
1992         list_for_each_entry(d, &hci_dev_list, list) {
1993                 if (d->id == index) {
1994                         hdev = hci_dev_hold(d);
1995                         break;
1996                 }
1997         }
1998         read_unlock(&hci_dev_list_lock);
1999         return hdev;
2000 }
2001
2002 /* ---- Inquiry support ---- */
2003
2004 bool hci_discovery_active(struct hci_dev *hdev)
2005 {
2006         struct discovery_state *discov = &hdev->discovery;
2007
2008         switch (discov->state) {
2009         case DISCOVERY_FINDING:
2010         case DISCOVERY_RESOLVING:
2011                 return true;
2012
2013         default:
2014                 return false;
2015         }
2016 }
2017
2018 void hci_discovery_set_state(struct hci_dev *hdev, int state)
2019 {
2020         int old_state = hdev->discovery.state;
2021
2022         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2023
2024         if (old_state == state)
2025                 return;
2026
2027         hdev->discovery.state = state;
2028
2029         switch (state) {
2030         case DISCOVERY_STOPPED:
2031                 hci_update_background_scan(hdev);
2032
2033                 if (old_state != DISCOVERY_STARTING)
2034                         mgmt_discovering(hdev, 0);
2035                 break;
2036         case DISCOVERY_STARTING:
2037                 break;
2038         case DISCOVERY_FINDING:
2039                 mgmt_discovering(hdev, 1);
2040                 break;
2041         case DISCOVERY_RESOLVING:
2042                 break;
2043         case DISCOVERY_STOPPING:
2044                 break;
2045         }
2046 }
2047
2048 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2049 {
2050         struct discovery_state *cache = &hdev->discovery;
2051         struct inquiry_entry *p, *n;
2052
2053         list_for_each_entry_safe(p, n, &cache->all, all) {
2054                 list_del(&p->all);
2055                 kfree(p);
2056         }
2057
2058         INIT_LIST_HEAD(&cache->unknown);
2059         INIT_LIST_HEAD(&cache->resolve);
2060 }
2061
2062 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2063                                                bdaddr_t *bdaddr)
2064 {
2065         struct discovery_state *cache = &hdev->discovery;
2066         struct inquiry_entry *e;
2067
2068         BT_DBG("cache %p, %pMR", cache, bdaddr);
2069
2070         list_for_each_entry(e, &cache->all, all) {
2071                 if (!bacmp(&e->data.bdaddr, bdaddr))
2072                         return e;
2073         }
2074
2075         return NULL;
2076 }
2077
2078 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2079                                                        bdaddr_t *bdaddr)
2080 {
2081         struct discovery_state *cache = &hdev->discovery;
2082         struct inquiry_entry *e;
2083
2084         BT_DBG("cache %p, %pMR", cache, bdaddr);
2085
2086         list_for_each_entry(e, &cache->unknown, list) {
2087                 if (!bacmp(&e->data.bdaddr, bdaddr))
2088                         return e;
2089         }
2090
2091         return NULL;
2092 }
2093
2094 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2095                                                        bdaddr_t *bdaddr,
2096                                                        int state)
2097 {
2098         struct discovery_state *cache = &hdev->discovery;
2099         struct inquiry_entry *e;
2100
2101         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2102
2103         list_for_each_entry(e, &cache->resolve, list) {
2104                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2105                         return e;
2106                 if (!bacmp(&e->data.bdaddr, bdaddr))
2107                         return e;
2108         }
2109
2110         return NULL;
2111 }
2112
2113 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2114                                       struct inquiry_entry *ie)
2115 {
2116         struct discovery_state *cache = &hdev->discovery;
2117         struct list_head *pos = &cache->resolve;
2118         struct inquiry_entry *p;
2119
2120         list_del(&ie->list);
2121
2122         list_for_each_entry(p, &cache->resolve, list) {
2123                 if (p->name_state != NAME_PENDING &&
2124                     abs(p->data.rssi) >= abs(ie->data.rssi))
2125                         break;
2126                 pos = &p->list;
2127         }
2128
2129         list_add(&ie->list, pos);
2130 }
2131
2132 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2133                              bool name_known)
2134 {
2135         struct discovery_state *cache = &hdev->discovery;
2136         struct inquiry_entry *ie;
2137         u32 flags = 0;
2138
2139         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2140
2141         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2142
2143         if (!data->ssp_mode)
2144                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2145
2146         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2147         if (ie) {
2148                 if (!ie->data.ssp_mode)
2149                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2150
2151                 if (ie->name_state == NAME_NEEDED &&
2152                     data->rssi != ie->data.rssi) {
2153                         ie->data.rssi = data->rssi;
2154                         hci_inquiry_cache_update_resolve(hdev, ie);
2155                 }
2156
2157                 goto update;
2158         }
2159
2160         /* Entry not in the cache. Add new one. */
2161         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2162         if (!ie) {
2163                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2164                 goto done;
2165         }
2166
2167         list_add(&ie->all, &cache->all);
2168
2169         if (name_known) {
2170                 ie->name_state = NAME_KNOWN;
2171         } else {
2172                 ie->name_state = NAME_NOT_KNOWN;
2173                 list_add(&ie->list, &cache->unknown);
2174         }
2175
2176 update:
2177         if (name_known && ie->name_state != NAME_KNOWN &&
2178             ie->name_state != NAME_PENDING) {
2179                 ie->name_state = NAME_KNOWN;
2180                 list_del(&ie->list);
2181         }
2182
2183         memcpy(&ie->data, data, sizeof(*data));
2184         ie->timestamp = jiffies;
2185         cache->timestamp = jiffies;
2186
2187         if (ie->name_state == NAME_NOT_KNOWN)
2188                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2189
2190 done:
2191         return flags;
2192 }
2193
2194 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2195 {
2196         struct discovery_state *cache = &hdev->discovery;
2197         struct inquiry_info *info = (struct inquiry_info *) buf;
2198         struct inquiry_entry *e;
2199         int copied = 0;
2200
2201         list_for_each_entry(e, &cache->all, all) {
2202                 struct inquiry_data *data = &e->data;
2203
2204                 if (copied >= num)
2205                         break;
2206
2207                 bacpy(&info->bdaddr, &data->bdaddr);
2208                 info->pscan_rep_mode    = data->pscan_rep_mode;
2209                 info->pscan_period_mode = data->pscan_period_mode;
2210                 info->pscan_mode        = data->pscan_mode;
2211                 memcpy(info->dev_class, data->dev_class, 3);
2212                 info->clock_offset      = data->clock_offset;
2213
2214                 info++;
2215                 copied++;
2216         }
2217
2218         BT_DBG("cache %p, copied %d", cache, copied);
2219         return copied;
2220 }
2221
2222 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2223 {
2224         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2225         struct hci_dev *hdev = req->hdev;
2226         struct hci_cp_inquiry cp;
2227
2228         BT_DBG("%s", hdev->name);
2229
2230         if (test_bit(HCI_INQUIRY, &hdev->flags))
2231                 return;
2232
2233         /* Start Inquiry */
2234         memcpy(&cp.lap, &ir->lap, 3);
2235         cp.length  = ir->length;
2236         cp.num_rsp = ir->num_rsp;
2237         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2238 }
2239
2240 int hci_inquiry(void __user *arg)
2241 {
2242         __u8 __user *ptr = arg;
2243         struct hci_inquiry_req ir;
2244         struct hci_dev *hdev;
2245         int err = 0, do_inquiry = 0, max_rsp;
2246         long timeo;
2247         __u8 *buf;
2248
2249         if (copy_from_user(&ir, ptr, sizeof(ir)))
2250                 return -EFAULT;
2251
2252         hdev = hci_dev_get(ir.dev_id);
2253         if (!hdev)
2254                 return -ENODEV;
2255
2256         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2257                 err = -EBUSY;
2258                 goto done;
2259         }
2260
2261         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2262                 err = -EOPNOTSUPP;
2263                 goto done;
2264         }
2265
2266         if (hdev->dev_type != HCI_BREDR) {
2267                 err = -EOPNOTSUPP;
2268                 goto done;
2269         }
2270
2271         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2272                 err = -EOPNOTSUPP;
2273                 goto done;
2274         }
2275
2276         hci_dev_lock(hdev);
2277         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2278             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2279                 hci_inquiry_cache_flush(hdev);
2280                 do_inquiry = 1;
2281         }
2282         hci_dev_unlock(hdev);
2283
2284         timeo = ir.length * msecs_to_jiffies(2000);
2285
2286         if (do_inquiry) {
2287                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2288                                    timeo);
2289                 if (err < 0)
2290                         goto done;
2291
2292                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2293                  * cleared). If it is interrupted by a signal, return -EINTR.
2294                  */
2295                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2296                                 TASK_INTERRUPTIBLE))
2297                         return -EINTR;
2298         }
2299
2300         /* for unlimited number of responses we will use buffer with
2301          * 255 entries
2302          */
2303         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2304
2305         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2306          * copy it to the user space.
2307          */
2308         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2309         if (!buf) {
2310                 err = -ENOMEM;
2311                 goto done;
2312         }
2313
2314         hci_dev_lock(hdev);
2315         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2316         hci_dev_unlock(hdev);
2317
2318         BT_DBG("num_rsp %d", ir.num_rsp);
2319
2320         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2321                 ptr += sizeof(ir);
2322                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2323                                  ir.num_rsp))
2324                         err = -EFAULT;
2325         } else
2326                 err = -EFAULT;
2327
2328         kfree(buf);
2329
2330 done:
2331         hci_dev_put(hdev);
2332         return err;
2333 }
2334
2335 static int hci_dev_do_open(struct hci_dev *hdev)
2336 {
2337         int ret = 0;
2338
2339         BT_DBG("%s %p", hdev->name, hdev);
2340
2341         hci_req_lock(hdev);
2342
2343         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2344                 ret = -ENODEV;
2345                 goto done;
2346         }
2347
2348         if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2349             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2350                 /* Check for rfkill but allow the HCI setup stage to
2351                  * proceed (which in itself doesn't cause any RF activity).
2352                  */
2353                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2354                         ret = -ERFKILL;
2355                         goto done;
2356                 }
2357
2358                 /* Check for valid public address or a configured static
2359                  * random adddress, but let the HCI setup proceed to
2360                  * be able to determine if there is a public address
2361                  * or not.
2362                  *
2363                  * In case of user channel usage, it is not important
2364                  * if a public address or static random address is
2365                  * available.
2366                  *
2367                  * This check is only valid for BR/EDR controllers
2368                  * since AMP controllers do not have an address.
2369                  */
2370                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2371                     hdev->dev_type == HCI_BREDR &&
2372                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2373                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2374                         ret = -EADDRNOTAVAIL;
2375                         goto done;
2376                 }
2377         }
2378
2379         if (test_bit(HCI_UP, &hdev->flags)) {
2380                 ret = -EALREADY;
2381                 goto done;
2382         }
2383
2384         if (hdev->open(hdev)) {
2385                 ret = -EIO;
2386                 goto done;
2387         }
2388
2389         atomic_set(&hdev->cmd_cnt, 1);
2390         set_bit(HCI_INIT, &hdev->flags);
2391
2392         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2393                 if (hdev->setup)
2394                         ret = hdev->setup(hdev);
2395
2396                 /* The transport driver can set these quirks before
2397                  * creating the HCI device or in its setup callback.
2398                  *
2399                  * In case any of them is set, the controller has to
2400                  * start up as unconfigured.
2401                  */
2402                 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2403                     test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2404                         set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2405
2406                 /* For an unconfigured controller it is required to
2407                  * read at least the version information provided by
2408                  * the Read Local Version Information command.
2409                  *
2410                  * If the set_bdaddr driver callback is provided, then
2411                  * also the original Bluetooth public device address
2412                  * will be read using the Read BD Address command.
2413                  */
2414                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2415                         ret = __hci_unconf_init(hdev);
2416         }
2417
2418         if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2419                 /* If public address change is configured, ensure that
2420                  * the address gets programmed. If the driver does not
2421                  * support changing the public address, fail the power
2422                  * on procedure.
2423                  */
2424                 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2425                     hdev->set_bdaddr)
2426                         ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2427                 else
2428                         ret = -EADDRNOTAVAIL;
2429         }
2430
2431         if (!ret) {
2432                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2433                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2434                         ret = __hci_init(hdev);
2435         }
2436
2437         clear_bit(HCI_INIT, &hdev->flags);
2438
2439         if (!ret) {
2440                 hci_dev_hold(hdev);
2441                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2442                 set_bit(HCI_UP, &hdev->flags);
2443                 hci_notify(hdev, HCI_DEV_UP);
2444                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2445                     !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2446                     !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2447                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2448                     hdev->dev_type == HCI_BREDR) {
2449                         hci_dev_lock(hdev);
2450                         mgmt_powered(hdev, 1);
2451                         hci_dev_unlock(hdev);
2452                 }
2453         } else {
2454                 /* Init failed, cleanup */
2455                 flush_work(&hdev->tx_work);
2456                 flush_work(&hdev->cmd_work);
2457                 flush_work(&hdev->rx_work);
2458
2459                 skb_queue_purge(&hdev->cmd_q);
2460                 skb_queue_purge(&hdev->rx_q);
2461
2462                 if (hdev->flush)
2463                         hdev->flush(hdev);
2464
2465                 if (hdev->sent_cmd) {
2466                         kfree_skb(hdev->sent_cmd);
2467                         hdev->sent_cmd = NULL;
2468                 }
2469
2470                 hdev->close(hdev);
2471                 hdev->flags &= BIT(HCI_RAW);
2472         }
2473
2474 done:
2475         hci_req_unlock(hdev);
2476         return ret;
2477 }
2478
2479 /* ---- HCI ioctl helpers ---- */
2480
2481 int hci_dev_open(__u16 dev)
2482 {
2483         struct hci_dev *hdev;
2484         int err;
2485
2486         hdev = hci_dev_get(dev);
2487         if (!hdev)
2488                 return -ENODEV;
2489
2490         /* Devices that are marked as unconfigured can only be powered
2491          * up as user channel. Trying to bring them up as normal devices
2492          * will result into a failure. Only user channel operation is
2493          * possible.
2494          *
2495          * When this function is called for a user channel, the flag
2496          * HCI_USER_CHANNEL will be set first before attempting to
2497          * open the device.
2498          */
2499         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2500             !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2501                 err = -EOPNOTSUPP;
2502                 goto done;
2503         }
2504
2505         /* We need to ensure that no other power on/off work is pending
2506          * before proceeding to call hci_dev_do_open. This is
2507          * particularly important if the setup procedure has not yet
2508          * completed.
2509          */
2510         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2511                 cancel_delayed_work(&hdev->power_off);
2512
2513         /* After this call it is guaranteed that the setup procedure
2514          * has finished. This means that error conditions like RFKILL
2515          * or no valid public or static random address apply.
2516          */
2517         flush_workqueue(hdev->req_workqueue);
2518
2519         /* For controllers not using the management interface and that
2520          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2521          * so that pairing works for them. Once the management interface
2522          * is in use this bit will be cleared again and userspace has
2523          * to explicitly enable it.
2524          */
2525         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2526             !test_bit(HCI_MGMT, &hdev->dev_flags))
2527                 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2528
2529         err = hci_dev_do_open(hdev);
2530
2531 done:
2532         hci_dev_put(hdev);
2533         return err;
2534 }
2535
2536 /* This function requires the caller holds hdev->lock */
2537 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2538 {
2539         struct hci_conn_params *p;
2540
2541         list_for_each_entry(p, &hdev->le_conn_params, list) {
2542                 if (p->conn) {
2543                         hci_conn_drop(p->conn);
2544                         p->conn = NULL;
2545                 }
2546                 list_del_init(&p->action);
2547         }
2548
2549         BT_DBG("All LE pending actions cleared");
2550 }
2551
2552 static int hci_dev_do_close(struct hci_dev *hdev)
2553 {
2554         BT_DBG("%s %p", hdev->name, hdev);
2555
2556         cancel_delayed_work(&hdev->power_off);
2557
2558         hci_req_cancel(hdev, ENODEV);
2559         hci_req_lock(hdev);
2560
2561         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2562                 cancel_delayed_work_sync(&hdev->cmd_timer);
2563                 hci_req_unlock(hdev);
2564                 return 0;
2565         }
2566
2567         /* Flush RX and TX works */
2568         flush_work(&hdev->tx_work);
2569         flush_work(&hdev->rx_work);
2570
2571         if (hdev->discov_timeout > 0) {
2572                 cancel_delayed_work(&hdev->discov_off);
2573                 hdev->discov_timeout = 0;
2574                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2575                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2576         }
2577
2578         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2579                 cancel_delayed_work(&hdev->service_cache);
2580
2581         cancel_delayed_work_sync(&hdev->le_scan_disable);
2582
2583         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2584                 cancel_delayed_work_sync(&hdev->rpa_expired);
2585
2586         hci_dev_lock(hdev);
2587         hci_inquiry_cache_flush(hdev);
2588         hci_pend_le_actions_clear(hdev);
2589         hci_conn_hash_flush(hdev);
2590         hci_dev_unlock(hdev);
2591
2592         hci_notify(hdev, HCI_DEV_DOWN);
2593
2594         if (hdev->flush)
2595                 hdev->flush(hdev);
2596
2597         /* Reset device */
2598         skb_queue_purge(&hdev->cmd_q);
2599         atomic_set(&hdev->cmd_cnt, 1);
2600         if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2601             !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2602             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2603                 set_bit(HCI_INIT, &hdev->flags);
2604                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2605                 clear_bit(HCI_INIT, &hdev->flags);
2606         }
2607
2608         /* flush cmd  work */
2609         flush_work(&hdev->cmd_work);
2610
2611         /* Drop queues */
2612         skb_queue_purge(&hdev->rx_q);
2613         skb_queue_purge(&hdev->cmd_q);
2614         skb_queue_purge(&hdev->raw_q);
2615
2616         /* Drop last sent command */
2617         if (hdev->sent_cmd) {
2618                 cancel_delayed_work_sync(&hdev->cmd_timer);
2619                 kfree_skb(hdev->sent_cmd);
2620                 hdev->sent_cmd = NULL;
2621         }
2622
2623         kfree_skb(hdev->recv_evt);
2624         hdev->recv_evt = NULL;
2625
2626         /* After this point our queues are empty
2627          * and no tasks are scheduled. */
2628         hdev->close(hdev);
2629
2630         /* Clear flags */
2631         hdev->flags &= BIT(HCI_RAW);
2632         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2633
2634         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2635                 if (hdev->dev_type == HCI_BREDR) {
2636                         hci_dev_lock(hdev);
2637                         mgmt_powered(hdev, 0);
2638                         hci_dev_unlock(hdev);
2639                 }
2640         }
2641
2642         /* Controller radio is available but is currently powered down */
2643         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2644
2645         memset(hdev->eir, 0, sizeof(hdev->eir));
2646         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2647         bacpy(&hdev->random_addr, BDADDR_ANY);
2648
2649         hci_req_unlock(hdev);
2650
2651         hci_dev_put(hdev);
2652         return 0;
2653 }
2654
2655 int hci_dev_close(__u16 dev)
2656 {
2657         struct hci_dev *hdev;
2658         int err;
2659
2660         hdev = hci_dev_get(dev);
2661         if (!hdev)
2662                 return -ENODEV;
2663
2664         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2665                 err = -EBUSY;
2666                 goto done;
2667         }
2668
2669         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2670                 cancel_delayed_work(&hdev->power_off);
2671
2672         err = hci_dev_do_close(hdev);
2673
2674 done:
2675         hci_dev_put(hdev);
2676         return err;
2677 }
2678
2679 int hci_dev_reset(__u16 dev)
2680 {
2681         struct hci_dev *hdev;
2682         int ret = 0;
2683
2684         hdev = hci_dev_get(dev);
2685         if (!hdev)
2686                 return -ENODEV;
2687
2688         hci_req_lock(hdev);
2689
2690         if (!test_bit(HCI_UP, &hdev->flags)) {
2691                 ret = -ENETDOWN;
2692                 goto done;
2693         }
2694
2695         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2696                 ret = -EBUSY;
2697                 goto done;
2698         }
2699
2700         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2701                 ret = -EOPNOTSUPP;
2702                 goto done;
2703         }
2704
2705         /* Drop queues */
2706         skb_queue_purge(&hdev->rx_q);
2707         skb_queue_purge(&hdev->cmd_q);
2708
2709         hci_dev_lock(hdev);
2710         hci_inquiry_cache_flush(hdev);
2711         hci_conn_hash_flush(hdev);
2712         hci_dev_unlock(hdev);
2713
2714         if (hdev->flush)
2715                 hdev->flush(hdev);
2716
2717         atomic_set(&hdev->cmd_cnt, 1);
2718         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2719
2720         ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2721
2722 done:
2723         hci_req_unlock(hdev);
2724         hci_dev_put(hdev);
2725         return ret;
2726 }
2727
2728 int hci_dev_reset_stat(__u16 dev)
2729 {
2730         struct hci_dev *hdev;
2731         int ret = 0;
2732
2733         hdev = hci_dev_get(dev);
2734         if (!hdev)
2735                 return -ENODEV;
2736
2737         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2738                 ret = -EBUSY;
2739                 goto done;
2740         }
2741
2742         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2743                 ret = -EOPNOTSUPP;
2744                 goto done;
2745         }
2746
2747         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2748
2749 done:
2750         hci_dev_put(hdev);
2751         return ret;
2752 }
2753
2754 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2755 {
2756         bool conn_changed, discov_changed;
2757
2758         BT_DBG("%s scan 0x%02x", hdev->name, scan);
2759
2760         if ((scan & SCAN_PAGE))
2761                 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2762                                                  &hdev->dev_flags);
2763         else
2764                 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2765                                                   &hdev->dev_flags);
2766
2767         if ((scan & SCAN_INQUIRY)) {
2768                 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2769                                                    &hdev->dev_flags);
2770         } else {
2771                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2772                 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2773                                                     &hdev->dev_flags);
2774         }
2775
2776         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2777                 return;
2778
2779         if (conn_changed || discov_changed) {
2780                 /* In case this was disabled through mgmt */
2781                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2782
2783                 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2784                         mgmt_update_adv_data(hdev);
2785
2786                 mgmt_new_settings(hdev);
2787         }
2788 }
2789
2790 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2791 {
2792         struct hci_dev *hdev;
2793         struct hci_dev_req dr;
2794         int err = 0;
2795
2796         if (copy_from_user(&dr, arg, sizeof(dr)))
2797                 return -EFAULT;
2798
2799         hdev = hci_dev_get(dr.dev_id);
2800         if (!hdev)
2801                 return -ENODEV;
2802
2803         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2804                 err = -EBUSY;
2805                 goto done;
2806         }
2807
2808         if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2809                 err = -EOPNOTSUPP;
2810                 goto done;
2811         }
2812
2813         if (hdev->dev_type != HCI_BREDR) {
2814                 err = -EOPNOTSUPP;
2815                 goto done;
2816         }
2817
2818         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2819                 err = -EOPNOTSUPP;
2820                 goto done;
2821         }
2822
2823         switch (cmd) {
2824         case HCISETAUTH:
2825                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2826                                    HCI_INIT_TIMEOUT);
2827                 break;
2828
2829         case HCISETENCRYPT:
2830                 if (!lmp_encrypt_capable(hdev)) {
2831                         err = -EOPNOTSUPP;
2832                         break;
2833                 }
2834
2835                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2836                         /* Auth must be enabled first */
2837                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2838                                            HCI_INIT_TIMEOUT);
2839                         if (err)
2840                                 break;
2841                 }
2842
2843                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2844                                    HCI_INIT_TIMEOUT);
2845                 break;
2846
2847         case HCISETSCAN:
2848                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2849                                    HCI_INIT_TIMEOUT);
2850
2851                 /* Ensure that the connectable and discoverable states
2852                  * get correctly modified as this was a non-mgmt change.
2853                  */
2854                 if (!err)
2855                         hci_update_scan_state(hdev, dr.dev_opt);
2856                 break;
2857
2858         case HCISETLINKPOL:
2859                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2860                                    HCI_INIT_TIMEOUT);
2861                 break;
2862
2863         case HCISETLINKMODE:
2864                 hdev->link_mode = ((__u16) dr.dev_opt) &
2865                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2866                 break;
2867
2868         case HCISETPTYPE:
2869                 hdev->pkt_type = (__u16) dr.dev_opt;
2870                 break;
2871
2872         case HCISETACLMTU:
2873                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2874                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2875                 break;
2876
2877         case HCISETSCOMTU:
2878                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2879                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2880                 break;
2881
2882         default:
2883                 err = -EINVAL;
2884                 break;
2885         }
2886
2887 done:
2888         hci_dev_put(hdev);
2889         return err;
2890 }
2891
2892 int hci_get_dev_list(void __user *arg)
2893 {
2894         struct hci_dev *hdev;
2895         struct hci_dev_list_req *dl;
2896         struct hci_dev_req *dr;
2897         int n = 0, size, err;
2898         __u16 dev_num;
2899
2900         if (get_user(dev_num, (__u16 __user *) arg))
2901                 return -EFAULT;
2902
2903         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2904                 return -EINVAL;
2905
2906         size = sizeof(*dl) + dev_num * sizeof(*dr);
2907
2908         dl = kzalloc(size, GFP_KERNEL);
2909         if (!dl)
2910                 return -ENOMEM;
2911
2912         dr = dl->dev_req;
2913
2914         read_lock(&hci_dev_list_lock);
2915         list_for_each_entry(hdev, &hci_dev_list, list) {
2916                 unsigned long flags = hdev->flags;
2917
2918                 /* When the auto-off is configured it means the transport
2919                  * is running, but in that case still indicate that the
2920                  * device is actually down.
2921                  */
2922                 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2923                         flags &= ~BIT(HCI_UP);
2924
2925                 (dr + n)->dev_id  = hdev->id;
2926                 (dr + n)->dev_opt = flags;
2927
2928                 if (++n >= dev_num)
2929                         break;
2930         }
2931         read_unlock(&hci_dev_list_lock);
2932
2933         dl->dev_num = n;
2934         size = sizeof(*dl) + n * sizeof(*dr);
2935
2936         err = copy_to_user(arg, dl, size);
2937         kfree(dl);
2938
2939         return err ? -EFAULT : 0;
2940 }
2941
2942 int hci_get_dev_info(void __user *arg)
2943 {
2944         struct hci_dev *hdev;
2945         struct hci_dev_info di;
2946         unsigned long flags;
2947         int err = 0;
2948
2949         if (copy_from_user(&di, arg, sizeof(di)))
2950                 return -EFAULT;
2951
2952         hdev = hci_dev_get(di.dev_id);
2953         if (!hdev)
2954                 return -ENODEV;
2955
2956         /* When the auto-off is configured it means the transport
2957          * is running, but in that case still indicate that the
2958          * device is actually down.
2959          */
2960         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2961                 flags = hdev->flags & ~BIT(HCI_UP);
2962         else
2963                 flags = hdev->flags;
2964
2965         strcpy(di.name, hdev->name);
2966         di.bdaddr   = hdev->bdaddr;
2967         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2968         di.flags    = flags;
2969         di.pkt_type = hdev->pkt_type;
2970         if (lmp_bredr_capable(hdev)) {
2971                 di.acl_mtu  = hdev->acl_mtu;
2972                 di.acl_pkts = hdev->acl_pkts;
2973                 di.sco_mtu  = hdev->sco_mtu;
2974                 di.sco_pkts = hdev->sco_pkts;
2975         } else {
2976                 di.acl_mtu  = hdev->le_mtu;
2977                 di.acl_pkts = hdev->le_pkts;
2978                 di.sco_mtu  = 0;
2979                 di.sco_pkts = 0;
2980         }
2981         di.link_policy = hdev->link_policy;
2982         di.link_mode   = hdev->link_mode;
2983
2984         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2985         memcpy(&di.features, &hdev->features, sizeof(di.features));
2986
2987         if (copy_to_user(arg, &di, sizeof(di)))
2988                 err = -EFAULT;
2989
2990         hci_dev_put(hdev);
2991
2992         return err;
2993 }
2994
2995 /* ---- Interface to HCI drivers ---- */
2996
2997 static int hci_rfkill_set_block(void *data, bool blocked)
2998 {
2999         struct hci_dev *hdev = data;
3000
3001         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3002
3003         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3004                 return -EBUSY;
3005
3006         if (blocked) {
3007                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3008                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3009                     !test_bit(HCI_CONFIG, &hdev->dev_flags))
3010                         hci_dev_do_close(hdev);
3011         } else {
3012                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
3013         }
3014
3015         return 0;
3016 }
3017
3018 static const struct rfkill_ops hci_rfkill_ops = {
3019         .set_block = hci_rfkill_set_block,
3020 };
3021
3022 static void hci_power_on(struct work_struct *work)
3023 {
3024         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3025         int err;
3026
3027         BT_DBG("%s", hdev->name);
3028
3029         err = hci_dev_do_open(hdev);
3030         if (err < 0) {
3031                 mgmt_set_powered_failed(hdev, err);
3032                 return;
3033         }
3034
3035         /* During the HCI setup phase, a few error conditions are
3036          * ignored and they need to be checked now. If they are still
3037          * valid, it is important to turn the device back off.
3038          */
3039         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3040             test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3041             (hdev->dev_type == HCI_BREDR &&
3042              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3043              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3044                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3045                 hci_dev_do_close(hdev);
3046         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3047                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3048                                    HCI_AUTO_OFF_TIMEOUT);
3049         }
3050
3051         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3052                 /* For unconfigured devices, set the HCI_RAW flag
3053                  * so that userspace can easily identify them.
3054                  */
3055                 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3056                         set_bit(HCI_RAW, &hdev->flags);
3057
3058                 /* For fully configured devices, this will send
3059                  * the Index Added event. For unconfigured devices,
3060                  * it will send Unconfigued Index Added event.
3061                  *
3062                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3063                  * and no event will be send.
3064                  */
3065                 mgmt_index_added(hdev);
3066         } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3067                 /* When the controller is now configured, then it
3068                  * is important to clear the HCI_RAW flag.
3069                  */
3070                 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3071                         clear_bit(HCI_RAW, &hdev->flags);
3072
3073                 /* Powering on the controller with HCI_CONFIG set only
3074                  * happens with the transition from unconfigured to
3075                  * configured. This will send the Index Added event.
3076                  */
3077                 mgmt_index_added(hdev);
3078         }
3079 }
3080
3081 static void hci_power_off(struct work_struct *work)
3082 {
3083         struct hci_dev *hdev = container_of(work, struct hci_dev,
3084                                             power_off.work);
3085
3086         BT_DBG("%s", hdev->name);
3087
3088         hci_dev_do_close(hdev);
3089 }
3090
3091 static void hci_discov_off(struct work_struct *work)
3092 {
3093         struct hci_dev *hdev;
3094
3095         hdev = container_of(work, struct hci_dev, discov_off.work);
3096
3097         BT_DBG("%s", hdev->name);
3098
3099         mgmt_discoverable_timeout(hdev);
3100 }
3101
3102 void hci_uuids_clear(struct hci_dev *hdev)
3103 {
3104         struct bt_uuid *uuid, *tmp;
3105
3106         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3107                 list_del(&uuid->list);
3108                 kfree(uuid);
3109         }
3110 }
3111
3112 void hci_link_keys_clear(struct hci_dev *hdev)
3113 {
3114         struct list_head *p, *n;
3115
3116         list_for_each_safe(p, n, &hdev->link_keys) {
3117                 struct link_key *key;
3118
3119                 key = list_entry(p, struct link_key, list);
3120
3121                 list_del(p);
3122                 kfree(key);
3123         }
3124 }
3125
3126 void hci_smp_ltks_clear(struct hci_dev *hdev)
3127 {
3128         struct smp_ltk *k, *tmp;
3129
3130         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3131                 list_del(&k->list);
3132                 kfree(k);
3133         }
3134 }
3135
3136 void hci_smp_irks_clear(struct hci_dev *hdev)
3137 {
3138         struct smp_irk *k, *tmp;
3139
3140         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3141                 list_del(&k->list);
3142                 kfree(k);
3143         }
3144 }
3145
3146 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3147 {
3148         struct link_key *k;
3149
3150         list_for_each_entry(k, &hdev->link_keys, list)
3151                 if (bacmp(bdaddr, &k->bdaddr) == 0)
3152                         return k;
3153
3154         return NULL;
3155 }
3156
3157 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3158                                u8 key_type, u8 old_key_type)
3159 {
3160         /* Legacy key */
3161         if (key_type < 0x03)
3162                 return true;
3163
3164         /* Debug keys are insecure so don't store them persistently */
3165         if (key_type == HCI_LK_DEBUG_COMBINATION)
3166                 return false;
3167
3168         /* Changed combination key and there's no previous one */
3169         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3170                 return false;
3171
3172         /* Security mode 3 case */
3173         if (!conn)
3174                 return true;
3175
3176         /* Neither local nor remote side had no-bonding as requirement */
3177         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3178                 return true;
3179
3180         /* Local side had dedicated bonding as requirement */
3181         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3182                 return true;
3183
3184         /* Remote side had dedicated bonding as requirement */
3185         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3186                 return true;
3187
3188         /* If none of the above criteria match, then don't store the key
3189          * persistently */
3190         return false;
3191 }
3192
3193 static u8 ltk_role(u8 type)
3194 {
3195         if (type == SMP_LTK)
3196                 return HCI_ROLE_MASTER;
3197
3198         return HCI_ROLE_SLAVE;
3199 }
3200
3201 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3202                              u8 role)
3203 {
3204         struct smp_ltk *k;
3205
3206         list_for_each_entry(k, &hdev->long_term_keys, list) {
3207                 if (k->ediv != ediv || k->rand != rand)
3208                         continue;
3209
3210                 if (ltk_role(k->type) != role)
3211                         continue;
3212
3213                 return k;
3214         }
3215
3216         return NULL;
3217 }
3218
3219 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3220                                      u8 addr_type, u8 role)
3221 {
3222         struct smp_ltk *k;
3223
3224         list_for_each_entry(k, &hdev->long_term_keys, list)
3225                 if (addr_type == k->bdaddr_type &&
3226                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3227                     ltk_role(k->type) == role)
3228                         return k;
3229
3230         return NULL;
3231 }
3232
3233 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3234 {
3235         struct smp_irk *irk;
3236
3237         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3238                 if (!bacmp(&irk->rpa, rpa))
3239                         return irk;
3240         }
3241
3242         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3243                 if (smp_irk_matches(hdev, irk->val, rpa)) {
3244                         bacpy(&irk->rpa, rpa);
3245                         return irk;
3246                 }
3247         }
3248
3249         return NULL;
3250 }
3251
3252 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3253                                      u8 addr_type)
3254 {
3255         struct smp_irk *irk;
3256
3257         /* Identity Address must be public or static random */
3258         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3259                 return NULL;
3260
3261         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3262                 if (addr_type == irk->addr_type &&
3263                     bacmp(bdaddr, &irk->bdaddr) == 0)
3264                         return irk;
3265         }
3266
3267         return NULL;
3268 }
3269
3270 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3271                                   bdaddr_t *bdaddr, u8 *val, u8 type,
3272                                   u8 pin_len, bool *persistent)
3273 {
3274         struct link_key *key, *old_key;
3275         u8 old_key_type;
3276
3277         old_key = hci_find_link_key(hdev, bdaddr);
3278         if (old_key) {
3279                 old_key_type = old_key->type;
3280                 key = old_key;
3281         } else {
3282                 old_key_type = conn ? conn->key_type : 0xff;
3283                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3284                 if (!key)
3285                         return NULL;
3286                 list_add(&key->list, &hdev->link_keys);
3287         }
3288
3289         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3290
3291         /* Some buggy controller combinations generate a changed
3292          * combination key for legacy pairing even when there's no
3293          * previous key */
3294         if (type == HCI_LK_CHANGED_COMBINATION &&
3295             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3296                 type = HCI_LK_COMBINATION;
3297                 if (conn)
3298                         conn->key_type = type;
3299         }
3300
3301         bacpy(&key->bdaddr, bdaddr);
3302         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3303         key->pin_len = pin_len;
3304
3305         if (type == HCI_LK_CHANGED_COMBINATION)
3306                 key->type = old_key_type;
3307         else
3308                 key->type = type;
3309
3310         if (persistent)
3311                 *persistent = hci_persistent_key(hdev, conn, type,
3312                                                  old_key_type);
3313
3314         return key;
3315 }
3316
3317 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3318                             u8 addr_type, u8 type, u8 authenticated,
3319                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3320 {
3321         struct smp_ltk *key, *old_key;
3322         u8 role = ltk_role(type);
3323
3324         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3325         if (old_key)
3326                 key = old_key;
3327         else {
3328                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3329                 if (!key)
3330                         return NULL;
3331                 list_add(&key->list, &hdev->long_term_keys);
3332         }
3333
3334         bacpy(&key->bdaddr, bdaddr);
3335         key->bdaddr_type = addr_type;
3336         memcpy(key->val, tk, sizeof(key->val));
3337         key->authenticated = authenticated;
3338         key->ediv = ediv;
3339         key->rand = rand;
3340         key->enc_size = enc_size;
3341         key->type = type;
3342
3343         return key;
3344 }
3345
3346 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3347                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3348 {
3349         struct smp_irk *irk;
3350
3351         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3352         if (!irk) {
3353                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3354                 if (!irk)
3355                         return NULL;
3356
3357                 bacpy(&irk->bdaddr, bdaddr);
3358                 irk->addr_type = addr_type;
3359
3360                 list_add(&irk->list, &hdev->identity_resolving_keys);
3361         }
3362
3363         memcpy(irk->val, val, 16);
3364         bacpy(&irk->rpa, rpa);
3365
3366         return irk;
3367 }
3368
3369 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3370 {
3371         struct link_key *key;
3372
3373         key = hci_find_link_key(hdev, bdaddr);
3374         if (!key)
3375                 return -ENOENT;
3376
3377         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3378
3379         list_del(&key->list);
3380         kfree(key);
3381
3382         return 0;
3383 }
3384
3385 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3386 {
3387         struct smp_ltk *k, *tmp;
3388         int removed = 0;
3389
3390         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3391                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3392                         continue;
3393
3394                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3395
3396                 list_del(&k->list);
3397                 kfree(k);
3398                 removed++;
3399         }
3400
3401         return removed ? 0 : -ENOENT;
3402 }
3403
3404 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3405 {
3406         struct smp_irk *k, *tmp;
3407
3408         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3409                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3410                         continue;
3411
3412                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3413
3414                 list_del(&k->list);
3415                 kfree(k);
3416         }
3417 }
3418
3419 /* HCI command timer function */
3420 static void hci_cmd_timeout(struct work_struct *work)
3421 {
3422         struct hci_dev *hdev = container_of(work, struct hci_dev,
3423                                             cmd_timer.work);
3424
3425         if (hdev->sent_cmd) {
3426                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3427                 u16 opcode = __le16_to_cpu(sent->opcode);
3428
3429                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3430         } else {
3431                 BT_ERR("%s command tx timeout", hdev->name);
3432         }
3433
3434         atomic_set(&hdev->cmd_cnt, 1);
3435         queue_work(hdev->workqueue, &hdev->cmd_work);
3436 }
3437
3438 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3439                                           bdaddr_t *bdaddr)
3440 {
3441         struct oob_data *data;
3442
3443         list_for_each_entry(data, &hdev->remote_oob_data, list)
3444                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3445                         return data;
3446
3447         return NULL;
3448 }
3449
3450 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3451 {
3452         struct oob_data *data;
3453
3454         data = hci_find_remote_oob_data(hdev, bdaddr);
3455         if (!data)
3456                 return -ENOENT;
3457
3458         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3459
3460         list_del(&data->list);
3461         kfree(data);
3462
3463         return 0;
3464 }
3465
3466 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3467 {
3468         struct oob_data *data, *n;
3469
3470         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3471                 list_del(&data->list);
3472                 kfree(data);
3473         }
3474 }
3475
3476 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3477                             u8 *hash, u8 *randomizer)
3478 {
3479         struct oob_data *data;
3480
3481         data = hci_find_remote_oob_data(hdev, bdaddr);
3482         if (!data) {
3483                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3484                 if (!data)
3485                         return -ENOMEM;
3486
3487                 bacpy(&data->bdaddr, bdaddr);
3488                 list_add(&data->list, &hdev->remote_oob_data);
3489         }
3490
3491         memcpy(data->hash192, hash, sizeof(data->hash192));
3492         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3493
3494         memset(data->hash256, 0, sizeof(data->hash256));
3495         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3496
3497         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3498
3499         return 0;
3500 }
3501
3502 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3503                                 u8 *hash192, u8 *randomizer192,
3504                                 u8 *hash256, u8 *randomizer256)
3505 {
3506         struct oob_data *data;
3507
3508         data = hci_find_remote_oob_data(hdev, bdaddr);
3509         if (!data) {
3510                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3511                 if (!data)
3512                         return -ENOMEM;
3513
3514                 bacpy(&data->bdaddr, bdaddr);
3515                 list_add(&data->list, &hdev->remote_oob_data);
3516         }
3517
3518         memcpy(data->hash192, hash192, sizeof(data->hash192));
3519         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3520
3521         memcpy(data->hash256, hash256, sizeof(data->hash256));
3522         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3523
3524         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3525
3526         return 0;
3527 }
3528
3529 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3530                                          bdaddr_t *bdaddr, u8 type)
3531 {
3532         struct bdaddr_list *b;
3533
3534         list_for_each_entry(b, bdaddr_list, list) {
3535                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3536                         return b;
3537         }
3538
3539         return NULL;
3540 }
3541
3542 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3543 {
3544         struct list_head *p, *n;
3545
3546         list_for_each_safe(p, n, bdaddr_list) {
3547                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3548
3549                 list_del(p);
3550                 kfree(b);
3551         }
3552 }
3553
3554 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3555 {
3556         struct bdaddr_list *entry;
3557
3558         if (!bacmp(bdaddr, BDADDR_ANY))
3559                 return -EBADF;
3560
3561         if (hci_bdaddr_list_lookup(list, bdaddr, type))
3562                 return -EEXIST;
3563
3564         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3565         if (!entry)
3566                 return -ENOMEM;
3567
3568         bacpy(&entry->bdaddr, bdaddr);
3569         entry->bdaddr_type = type;
3570
3571         list_add(&entry->list, list);
3572
3573         return 0;
3574 }
3575
3576 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3577 {
3578         struct bdaddr_list *entry;
3579
3580         if (!bacmp(bdaddr, BDADDR_ANY)) {
3581                 hci_bdaddr_list_clear(list);
3582                 return 0;
3583         }
3584
3585         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3586         if (!entry)
3587                 return -ENOENT;
3588
3589         list_del(&entry->list);
3590         kfree(entry);
3591
3592         return 0;
3593 }
3594
3595 /* This function requires the caller holds hdev->lock */
3596 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3597                                                bdaddr_t *addr, u8 addr_type)
3598 {
3599         struct hci_conn_params *params;
3600
3601         /* The conn params list only contains identity addresses */
3602         if (!hci_is_identity_address(addr, addr_type))
3603                 return NULL;
3604
3605         list_for_each_entry(params, &hdev->le_conn_params, list) {
3606                 if (bacmp(&params->addr, addr) == 0 &&
3607                     params->addr_type == addr_type) {
3608                         return params;
3609                 }
3610         }
3611
3612         return NULL;
3613 }
3614
3615 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3616 {
3617         struct hci_conn *conn;
3618
3619         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3620         if (!conn)
3621                 return false;
3622
3623         if (conn->dst_type != type)
3624                 return false;
3625
3626         if (conn->state != BT_CONNECTED)
3627                 return false;
3628
3629         return true;
3630 }
3631
3632 /* This function requires the caller holds hdev->lock */
3633 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3634                                                   bdaddr_t *addr, u8 addr_type)
3635 {
3636         struct hci_conn_params *param;
3637
3638         /* The list only contains identity addresses */
3639         if (!hci_is_identity_address(addr, addr_type))
3640                 return NULL;
3641
3642         list_for_each_entry(param, list, action) {
3643                 if (bacmp(&param->addr, addr) == 0 &&
3644                     param->addr_type == addr_type)
3645                         return param;
3646         }
3647
3648         return NULL;
3649 }
3650
3651 /* This function requires the caller holds hdev->lock */
3652 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3653                                             bdaddr_t *addr, u8 addr_type)
3654 {
3655         struct hci_conn_params *params;
3656
3657         if (!hci_is_identity_address(addr, addr_type))
3658                 return NULL;
3659
3660         params = hci_conn_params_lookup(hdev, addr, addr_type);
3661         if (params)
3662                 return params;
3663
3664         params = kzalloc(sizeof(*params), GFP_KERNEL);
3665         if (!params) {
3666                 BT_ERR("Out of memory");
3667                 return NULL;
3668         }
3669
3670         bacpy(&params->addr, addr);
3671         params->addr_type = addr_type;
3672
3673         list_add(&params->list, &hdev->le_conn_params);
3674         INIT_LIST_HEAD(&params->action);
3675
3676         params->conn_min_interval = hdev->le_conn_min_interval;
3677         params->conn_max_interval = hdev->le_conn_max_interval;
3678         params->conn_latency = hdev->le_conn_latency;
3679         params->supervision_timeout = hdev->le_supv_timeout;
3680         params->auto_connect = HCI_AUTO_CONN_DISABLED;
3681
3682         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3683
3684         return params;
3685 }
3686
3687 /* This function requires the caller holds hdev->lock */
3688 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3689                         u8 auto_connect)
3690 {
3691         struct hci_conn_params *params;
3692
3693         params = hci_conn_params_add(hdev, addr, addr_type);
3694         if (!params)
3695                 return -EIO;
3696
3697         if (params->auto_connect == auto_connect)
3698                 return 0;
3699
3700         list_del_init(&params->action);
3701
3702         switch (auto_connect) {
3703         case HCI_AUTO_CONN_DISABLED:
3704         case HCI_AUTO_CONN_LINK_LOSS:
3705                 hci_update_background_scan(hdev);
3706                 break;
3707         case HCI_AUTO_CONN_REPORT:
3708                 list_add(&params->action, &hdev->pend_le_reports);
3709                 hci_update_background_scan(hdev);
3710                 break;
3711         case HCI_AUTO_CONN_DIRECT:
3712         case HCI_AUTO_CONN_ALWAYS:
3713                 if (!is_connected(hdev, addr, addr_type)) {
3714                         list_add(&params->action, &hdev->pend_le_conns);
3715                         hci_update_background_scan(hdev);
3716                 }
3717                 break;
3718         }
3719
3720         params->auto_connect = auto_connect;
3721
3722         BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3723                auto_connect);
3724
3725         return 0;
3726 }
3727
3728 /* This function requires the caller holds hdev->lock */
3729 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3730 {
3731         struct hci_conn_params *params;
3732
3733         params = hci_conn_params_lookup(hdev, addr, addr_type);
3734         if (!params)
3735                 return;
3736
3737         if (params->conn)
3738                 hci_conn_drop(params->conn);
3739
3740         list_del(&params->action);
3741         list_del(&params->list);
3742         kfree(params);
3743
3744         hci_update_background_scan(hdev);
3745
3746         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3747 }
3748
3749 /* This function requires the caller holds hdev->lock */
3750 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3751 {
3752         struct hci_conn_params *params, *tmp;
3753
3754         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3755                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3756                         continue;
3757                 list_del(&params->list);
3758                 kfree(params);
3759         }
3760
3761         BT_DBG("All LE disabled connection parameters were removed");
3762 }
3763
3764 /* This function requires the caller holds hdev->lock */
3765 void hci_conn_params_clear_all(struct hci_dev *hdev)
3766 {
3767         struct hci_conn_params *params, *tmp;
3768
3769         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3770                 if (params->conn)
3771                         hci_conn_drop(params->conn);
3772                 list_del(&params->action);
3773                 list_del(&params->list);
3774                 kfree(params);
3775         }
3776
3777         hci_update_background_scan(hdev);
3778
3779         BT_DBG("All LE connection parameters were removed");
3780 }
3781
3782 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3783 {
3784         if (status) {
3785                 BT_ERR("Failed to start inquiry: status %d", status);
3786
3787                 hci_dev_lock(hdev);
3788                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3789                 hci_dev_unlock(hdev);
3790                 return;
3791         }
3792 }
3793
3794 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3795 {
3796         /* General inquiry access code (GIAC) */
3797         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3798         struct hci_request req;
3799         struct hci_cp_inquiry cp;
3800         int err;
3801
3802         if (status) {
3803                 BT_ERR("Failed to disable LE scanning: status %d", status);
3804                 return;
3805         }
3806
3807         switch (hdev->discovery.type) {
3808         case DISCOV_TYPE_LE:
3809                 hci_dev_lock(hdev);
3810                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3811                 hci_dev_unlock(hdev);
3812                 break;
3813
3814         case DISCOV_TYPE_INTERLEAVED:
3815                 hci_req_init(&req, hdev);
3816
3817                 memset(&cp, 0, sizeof(cp));
3818                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3819                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3820                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3821
3822                 hci_dev_lock(hdev);
3823
3824                 hci_inquiry_cache_flush(hdev);
3825
3826                 err = hci_req_run(&req, inquiry_complete);
3827                 if (err) {
3828                         BT_ERR("Inquiry request failed: err %d", err);
3829                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3830                 }
3831
3832                 hci_dev_unlock(hdev);
3833                 break;
3834         }
3835 }
3836
3837 static void le_scan_disable_work(struct work_struct *work)
3838 {
3839         struct hci_dev *hdev = container_of(work, struct hci_dev,
3840                                             le_scan_disable.work);
3841         struct hci_request req;
3842         int err;
3843
3844         BT_DBG("%s", hdev->name);
3845
3846         hci_req_init(&req, hdev);
3847
3848         hci_req_add_le_scan_disable(&req);
3849
3850         err = hci_req_run(&req, le_scan_disable_work_complete);
3851         if (err)
3852                 BT_ERR("Disable LE scanning request failed: err %d", err);
3853 }
3854
3855 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3856 {
3857         struct hci_dev *hdev = req->hdev;
3858
3859         /* If we're advertising or initiating an LE connection we can't
3860          * go ahead and change the random address at this time. This is
3861          * because the eventual initiator address used for the
3862          * subsequently created connection will be undefined (some
3863          * controllers use the new address and others the one we had
3864          * when the operation started).
3865          *
3866          * In this kind of scenario skip the update and let the random
3867          * address be updated at the next cycle.
3868          */
3869         if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3870             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3871                 BT_DBG("Deferring random address update");
3872                 return;
3873         }
3874
3875         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3876 }
3877
3878 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3879                               u8 *own_addr_type)
3880 {
3881         struct hci_dev *hdev = req->hdev;
3882         int err;
3883
3884         /* If privacy is enabled use a resolvable private address. If
3885          * current RPA has expired or there is something else than
3886          * the current RPA in use, then generate a new one.
3887          */
3888         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3889                 int to;
3890
3891                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3892
3893                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3894                     !bacmp(&hdev->random_addr, &hdev->rpa))
3895                         return 0;
3896
3897                 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3898                 if (err < 0) {
3899                         BT_ERR("%s failed to generate new RPA", hdev->name);
3900                         return err;
3901                 }
3902
3903                 set_random_addr(req, &hdev->rpa);
3904
3905                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3906                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3907
3908                 return 0;
3909         }
3910
3911         /* In case of required privacy without resolvable private address,
3912          * use an unresolvable private address. This is useful for active
3913          * scanning and non-connectable advertising.
3914          */
3915         if (require_privacy) {
3916                 bdaddr_t urpa;
3917
3918                 get_random_bytes(&urpa, 6);
3919                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3920
3921                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3922                 set_random_addr(req, &urpa);
3923                 return 0;
3924         }
3925
3926         /* If forcing static address is in use or there is no public
3927          * address use the static address as random address (but skip
3928          * the HCI command if the current random address is already the
3929          * static one.
3930          */
3931         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3932             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3933                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3934                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3935                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3936                                     &hdev->static_addr);
3937                 return 0;
3938         }
3939
3940         /* Neither privacy nor static address is being used so use a
3941          * public address.
3942          */
3943         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3944
3945         return 0;
3946 }
3947
3948 /* Copy the Identity Address of the controller.
3949  *
3950  * If the controller has a public BD_ADDR, then by default use that one.
3951  * If this is a LE only controller without a public address, default to
3952  * the static random address.
3953  *
3954  * For debugging purposes it is possible to force controllers with a
3955  * public address to use the static random address instead.
3956  */
3957 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3958                                u8 *bdaddr_type)
3959 {
3960         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3961             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3962                 bacpy(bdaddr, &hdev->static_addr);
3963                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3964         } else {
3965                 bacpy(bdaddr, &hdev->bdaddr);
3966                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3967         }
3968 }
3969
3970 /* Alloc HCI device */
3971 struct hci_dev *hci_alloc_dev(void)
3972 {
3973         struct hci_dev *hdev;
3974
3975         hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3976         if (!hdev)
3977                 return NULL;
3978
3979         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3980         hdev->esco_type = (ESCO_HV1);
3981         hdev->link_mode = (HCI_LM_ACCEPT);
3982         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3983         hdev->io_capability = 0x03;     /* No Input No Output */
3984         hdev->manufacturer = 0xffff;    /* Default to internal use */
3985         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3986         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3987
3988         hdev->sniff_max_interval = 800;
3989         hdev->sniff_min_interval = 80;
3990
3991         hdev->le_adv_channel_map = 0x07;
3992         hdev->le_adv_min_interval = 0x0800;
3993         hdev->le_adv_max_interval = 0x0800;
3994         hdev->le_scan_interval = 0x0060;
3995         hdev->le_scan_window = 0x0030;
3996         hdev->le_conn_min_interval = 0x0028;
3997         hdev->le_conn_max_interval = 0x0038;
3998         hdev->le_conn_latency = 0x0000;
3999         hdev->le_supv_timeout = 0x002a;
4000
4001         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4002         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4003         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4004         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4005
4006         mutex_init(&hdev->lock);
4007         mutex_init(&hdev->req_lock);
4008
4009         INIT_LIST_HEAD(&hdev->mgmt_pending);
4010         INIT_LIST_HEAD(&hdev->blacklist);
4011         INIT_LIST_HEAD(&hdev->whitelist);
4012         INIT_LIST_HEAD(&hdev->uuids);
4013         INIT_LIST_HEAD(&hdev->link_keys);
4014         INIT_LIST_HEAD(&hdev->long_term_keys);
4015         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4016         INIT_LIST_HEAD(&hdev->remote_oob_data);
4017         INIT_LIST_HEAD(&hdev->le_white_list);
4018         INIT_LIST_HEAD(&hdev->le_conn_params);
4019         INIT_LIST_HEAD(&hdev->pend_le_conns);
4020         INIT_LIST_HEAD(&hdev->pend_le_reports);
4021         INIT_LIST_HEAD(&hdev->conn_hash.list);
4022
4023         INIT_WORK(&hdev->rx_work, hci_rx_work);
4024         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4025         INIT_WORK(&hdev->tx_work, hci_tx_work);
4026         INIT_WORK(&hdev->power_on, hci_power_on);
4027
4028         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4029         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4030         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4031
4032         skb_queue_head_init(&hdev->rx_q);
4033         skb_queue_head_init(&hdev->cmd_q);
4034         skb_queue_head_init(&hdev->raw_q);
4035
4036         init_waitqueue_head(&hdev->req_wait_q);
4037
4038         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4039
4040         hci_init_sysfs(hdev);
4041         discovery_init(hdev);
4042
4043         return hdev;
4044 }
4045 EXPORT_SYMBOL(hci_alloc_dev);
4046
4047 /* Free HCI device */
4048 void hci_free_dev(struct hci_dev *hdev)
4049 {
4050         /* will free via device release */
4051         put_device(&hdev->dev);
4052 }
4053 EXPORT_SYMBOL(hci_free_dev);
4054
4055 /* Register HCI device */
4056 int hci_register_dev(struct hci_dev *hdev)
4057 {
4058         int id, error;
4059
4060         if (!hdev->open || !hdev->close || !hdev->send)
4061                 return -EINVAL;
4062
4063         /* Do not allow HCI_AMP devices to register at index 0,
4064          * so the index can be used as the AMP controller ID.
4065          */
4066         switch (hdev->dev_type) {
4067         case HCI_BREDR:
4068                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4069                 break;
4070         case HCI_AMP:
4071                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4072                 break;
4073         default:
4074                 return -EINVAL;
4075         }
4076
4077         if (id < 0)
4078                 return id;
4079
4080         sprintf(hdev->name, "hci%d", id);
4081         hdev->id = id;
4082
4083         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4084
4085         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4086                                           WQ_MEM_RECLAIM, 1, hdev->name);
4087         if (!hdev->workqueue) {
4088                 error = -ENOMEM;
4089                 goto err;
4090         }
4091
4092         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4093                                               WQ_MEM_RECLAIM, 1, hdev->name);
4094         if (!hdev->req_workqueue) {
4095                 destroy_workqueue(hdev->workqueue);
4096                 error = -ENOMEM;
4097                 goto err;
4098         }
4099
4100         if (!IS_ERR_OR_NULL(bt_debugfs))
4101                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4102
4103         dev_set_name(&hdev->dev, "%s", hdev->name);
4104
4105         error = device_add(&hdev->dev);
4106         if (error < 0)
4107                 goto err_wqueue;
4108
4109         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4110                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4111                                     hdev);
4112         if (hdev->rfkill) {
4113                 if (rfkill_register(hdev->rfkill) < 0) {
4114                         rfkill_destroy(hdev->rfkill);
4115                         hdev->rfkill = NULL;
4116                 }
4117         }
4118
4119         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4120                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4121
4122         set_bit(HCI_SETUP, &hdev->dev_flags);
4123         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4124
4125         if (hdev->dev_type == HCI_BREDR) {
4126                 /* Assume BR/EDR support until proven otherwise (such as
4127                  * through reading supported features during init.
4128                  */
4129                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4130         }
4131
4132         write_lock(&hci_dev_list_lock);
4133         list_add(&hdev->list, &hci_dev_list);
4134         write_unlock(&hci_dev_list_lock);
4135
4136         /* Devices that are marked for raw-only usage are unconfigured
4137          * and should not be included in normal operation.
4138          */
4139         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4140                 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4141
4142         hci_notify(hdev, HCI_DEV_REG);
4143         hci_dev_hold(hdev);
4144
4145         queue_work(hdev->req_workqueue, &hdev->power_on);
4146
4147         return id;
4148
4149 err_wqueue:
4150         destroy_workqueue(hdev->workqueue);
4151         destroy_workqueue(hdev->req_workqueue);
4152 err:
4153         ida_simple_remove(&hci_index_ida, hdev->id);
4154
4155         return error;
4156 }
4157 EXPORT_SYMBOL(hci_register_dev);
4158
4159 /* Unregister HCI device */
4160 void hci_unregister_dev(struct hci_dev *hdev)
4161 {
4162         int i, id;
4163
4164         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4165
4166         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4167
4168         id = hdev->id;
4169
4170         write_lock(&hci_dev_list_lock);
4171         list_del(&hdev->list);
4172         write_unlock(&hci_dev_list_lock);
4173
4174         hci_dev_do_close(hdev);
4175
4176         for (i = 0; i < NUM_REASSEMBLY; i++)
4177                 kfree_skb(hdev->reassembly[i]);
4178
4179         cancel_work_sync(&hdev->power_on);
4180
4181         if (!test_bit(HCI_INIT, &hdev->flags) &&
4182             !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4183             !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4184                 hci_dev_lock(hdev);
4185                 mgmt_index_removed(hdev);
4186                 hci_dev_unlock(hdev);
4187         }
4188
4189         /* mgmt_index_removed should take care of emptying the
4190          * pending list */
4191         BUG_ON(!list_empty(&hdev->mgmt_pending));
4192
4193         hci_notify(hdev, HCI_DEV_UNREG);
4194
4195         if (hdev->rfkill) {
4196                 rfkill_unregister(hdev->rfkill);
4197                 rfkill_destroy(hdev->rfkill);
4198         }
4199
4200         smp_unregister(hdev);
4201
4202         device_del(&hdev->dev);
4203
4204         debugfs_remove_recursive(hdev->debugfs);
4205
4206         destroy_workqueue(hdev->workqueue);
4207         destroy_workqueue(hdev->req_workqueue);
4208
4209         hci_dev_lock(hdev);
4210         hci_bdaddr_list_clear(&hdev->blacklist);
4211         hci_bdaddr_list_clear(&hdev->whitelist);
4212         hci_uuids_clear(hdev);
4213         hci_link_keys_clear(hdev);
4214         hci_smp_ltks_clear(hdev);
4215         hci_smp_irks_clear(hdev);
4216         hci_remote_oob_data_clear(hdev);
4217         hci_bdaddr_list_clear(&hdev->le_white_list);
4218         hci_conn_params_clear_all(hdev);
4219         hci_dev_unlock(hdev);
4220
4221         hci_dev_put(hdev);
4222
4223         ida_simple_remove(&hci_index_ida, id);
4224 }
4225 EXPORT_SYMBOL(hci_unregister_dev);
4226
4227 /* Suspend HCI device */
4228 int hci_suspend_dev(struct hci_dev *hdev)
4229 {
4230         hci_notify(hdev, HCI_DEV_SUSPEND);
4231         return 0;
4232 }
4233 EXPORT_SYMBOL(hci_suspend_dev);
4234
4235 /* Resume HCI device */
4236 int hci_resume_dev(struct hci_dev *hdev)
4237 {
4238         hci_notify(hdev, HCI_DEV_RESUME);
4239         return 0;
4240 }
4241 EXPORT_SYMBOL(hci_resume_dev);
4242
4243 /* Receive frame from HCI drivers */
4244 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4245 {
4246         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4247                       && !test_bit(HCI_INIT, &hdev->flags))) {
4248                 kfree_skb(skb);
4249                 return -ENXIO;
4250         }
4251
4252         /* Incoming skb */
4253         bt_cb(skb)->incoming = 1;
4254
4255         /* Time stamp */
4256         __net_timestamp(skb);
4257
4258         skb_queue_tail(&hdev->rx_q, skb);
4259         queue_work(hdev->workqueue, &hdev->rx_work);
4260
4261         return 0;
4262 }
4263 EXPORT_SYMBOL(hci_recv_frame);
4264
4265 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4266                           int count, __u8 index)
4267 {
4268         int len = 0;
4269         int hlen = 0;
4270         int remain = count;
4271         struct sk_buff *skb;
4272         struct bt_skb_cb *scb;
4273
4274         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4275             index >= NUM_REASSEMBLY)
4276                 return -EILSEQ;
4277
4278         skb = hdev->reassembly[index];
4279
4280         if (!skb) {
4281                 switch (type) {
4282                 case HCI_ACLDATA_PKT:
4283                         len = HCI_MAX_FRAME_SIZE;
4284                         hlen = HCI_ACL_HDR_SIZE;
4285                         break;
4286                 case HCI_EVENT_PKT:
4287                         len = HCI_MAX_EVENT_SIZE;
4288                         hlen = HCI_EVENT_HDR_SIZE;
4289                         break;
4290                 case HCI_SCODATA_PKT:
4291                         len = HCI_MAX_SCO_SIZE;
4292                         hlen = HCI_SCO_HDR_SIZE;
4293                         break;
4294                 }
4295
4296                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4297                 if (!skb)
4298                         return -ENOMEM;
4299
4300                 scb = (void *) skb->cb;
4301                 scb->expect = hlen;
4302                 scb->pkt_type = type;
4303
4304                 hdev->reassembly[index] = skb;
4305         }
4306
4307         while (count) {
4308                 scb = (void *) skb->cb;
4309                 len = min_t(uint, scb->expect, count);
4310
4311                 memcpy(skb_put(skb, len), data, len);
4312
4313                 count -= len;
4314                 data += len;
4315                 scb->expect -= len;
4316                 remain = count;
4317
4318                 switch (type) {
4319                 case HCI_EVENT_PKT:
4320                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4321                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4322                                 scb->expect = h->plen;
4323
4324                                 if (skb_tailroom(skb) < scb->expect) {
4325                                         kfree_skb(skb);
4326                                         hdev->reassembly[index] = NULL;
4327                                         return -ENOMEM;
4328                                 }
4329                         }
4330                         break;
4331
4332                 case HCI_ACLDATA_PKT:
4333                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4334                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4335                                 scb->expect = __le16_to_cpu(h->dlen);
4336
4337                                 if (skb_tailroom(skb) < scb->expect) {
4338                                         kfree_skb(skb);
4339                                         hdev->reassembly[index] = NULL;
4340                                         return -ENOMEM;
4341                                 }
4342                         }
4343                         break;
4344
4345                 case HCI_SCODATA_PKT:
4346                         if (skb->len == HCI_SCO_HDR_SIZE) {
4347                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4348                                 scb->expect = h->dlen;
4349
4350                                 if (skb_tailroom(skb) < scb->expect) {
4351                                         kfree_skb(skb);
4352                                         hdev->reassembly[index] = NULL;
4353                                         return -ENOMEM;
4354                                 }
4355                         }
4356                         break;
4357                 }
4358
4359                 if (scb->expect == 0) {
4360                         /* Complete frame */
4361
4362                         bt_cb(skb)->pkt_type = type;
4363                         hci_recv_frame(hdev, skb);
4364
4365                         hdev->reassembly[index] = NULL;
4366                         return remain;
4367                 }
4368         }
4369
4370         return remain;
4371 }
4372
4373 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4374 {
4375         int rem = 0;
4376
4377         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4378                 return -EILSEQ;
4379
4380         while (count) {
4381                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4382                 if (rem < 0)
4383                         return rem;
4384
4385                 data += (count - rem);
4386                 count = rem;
4387         }
4388
4389         return rem;
4390 }
4391 EXPORT_SYMBOL(hci_recv_fragment);
4392
4393 #define STREAM_REASSEMBLY 0
4394
4395 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4396 {
4397         int type;
4398         int rem = 0;
4399
4400         while (count) {
4401                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4402
4403                 if (!skb) {
4404                         struct { char type; } *pkt;
4405
4406                         /* Start of the frame */
4407                         pkt = data;
4408                         type = pkt->type;
4409
4410                         data++;
4411                         count--;
4412                 } else
4413                         type = bt_cb(skb)->pkt_type;
4414
4415                 rem = hci_reassembly(hdev, type, data, count,
4416                                      STREAM_REASSEMBLY);
4417                 if (rem < 0)
4418                         return rem;
4419
4420                 data += (count - rem);
4421                 count = rem;
4422         }
4423
4424         return rem;
4425 }
4426 EXPORT_SYMBOL(hci_recv_stream_fragment);
4427
4428 /* ---- Interface to upper protocols ---- */
4429
4430 int hci_register_cb(struct hci_cb *cb)
4431 {
4432         BT_DBG("%p name %s", cb, cb->name);
4433
4434         write_lock(&hci_cb_list_lock);
4435         list_add(&cb->list, &hci_cb_list);
4436         write_unlock(&hci_cb_list_lock);
4437
4438         return 0;
4439 }
4440 EXPORT_SYMBOL(hci_register_cb);
4441
4442 int hci_unregister_cb(struct hci_cb *cb)
4443 {
4444         BT_DBG("%p name %s", cb, cb->name);
4445
4446         write_lock(&hci_cb_list_lock);
4447         list_del(&cb->list);
4448         write_unlock(&hci_cb_list_lock);
4449
4450         return 0;
4451 }
4452 EXPORT_SYMBOL(hci_unregister_cb);
4453
4454 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4455 {
4456         int err;
4457
4458         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4459
4460         /* Time stamp */
4461         __net_timestamp(skb);
4462
4463         /* Send copy to monitor */
4464         hci_send_to_monitor(hdev, skb);
4465
4466         if (atomic_read(&hdev->promisc)) {
4467                 /* Send copy to the sockets */
4468                 hci_send_to_sock(hdev, skb);
4469         }
4470
4471         /* Get rid of skb owner, prior to sending to the driver. */
4472         skb_orphan(skb);
4473
4474         err = hdev->send(hdev, skb);
4475         if (err < 0) {
4476                 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4477                 kfree_skb(skb);
4478         }
4479 }
4480
4481 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4482 {
4483         skb_queue_head_init(&req->cmd_q);
4484         req->hdev = hdev;
4485         req->err = 0;
4486 }
4487
4488 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4489 {
4490         struct hci_dev *hdev = req->hdev;
4491         struct sk_buff *skb;
4492         unsigned long flags;
4493
4494         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4495
4496         /* If an error occured during request building, remove all HCI
4497          * commands queued on the HCI request queue.
4498          */
4499         if (req->err) {
4500                 skb_queue_purge(&req->cmd_q);
4501                 return req->err;
4502         }
4503
4504         /* Do not allow empty requests */
4505         if (skb_queue_empty(&req->cmd_q))
4506                 return -ENODATA;
4507
4508         skb = skb_peek_tail(&req->cmd_q);
4509         bt_cb(skb)->req.complete = complete;
4510
4511         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4512         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4513         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4514
4515         queue_work(hdev->workqueue, &hdev->cmd_work);
4516
4517         return 0;
4518 }
4519
4520 bool hci_req_pending(struct hci_dev *hdev)
4521 {
4522         return (hdev->req_status == HCI_REQ_PEND);
4523 }
4524
4525 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4526                                        u32 plen, const void *param)
4527 {
4528         int len = HCI_COMMAND_HDR_SIZE + plen;
4529         struct hci_command_hdr *hdr;
4530         struct sk_buff *skb;
4531
4532         skb = bt_skb_alloc(len, GFP_ATOMIC);
4533         if (!skb)
4534                 return NULL;
4535
4536         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4537         hdr->opcode = cpu_to_le16(opcode);
4538         hdr->plen   = plen;
4539
4540         if (plen)
4541                 memcpy(skb_put(skb, plen), param, plen);
4542
4543         BT_DBG("skb len %d", skb->len);
4544
4545         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4546
4547         return skb;
4548 }
4549
4550 /* Send HCI command */
4551 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4552                  const void *param)
4553 {
4554         struct sk_buff *skb;
4555
4556         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4557
4558         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4559         if (!skb) {
4560                 BT_ERR("%s no memory for command", hdev->name);
4561                 return -ENOMEM;
4562         }
4563
4564         /* Stand-alone HCI commands must be flaged as
4565          * single-command requests.
4566          */
4567         bt_cb(skb)->req.start = true;
4568
4569         skb_queue_tail(&hdev->cmd_q, skb);
4570         queue_work(hdev->workqueue, &hdev->cmd_work);
4571
4572         return 0;
4573 }
4574
4575 /* Queue a command to an asynchronous HCI request */
4576 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4577                     const void *param, u8 event)
4578 {
4579         struct hci_dev *hdev = req->hdev;
4580         struct sk_buff *skb;
4581
4582         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4583
4584         /* If an error occured during request building, there is no point in
4585          * queueing the HCI command. We can simply return.
4586          */
4587         if (req->err)
4588                 return;
4589
4590         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4591         if (!skb) {
4592                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4593                        hdev->name, opcode);
4594                 req->err = -ENOMEM;
4595                 return;
4596         }
4597
4598         if (skb_queue_empty(&req->cmd_q))
4599                 bt_cb(skb)->req.start = true;
4600
4601         bt_cb(skb)->req.event = event;
4602
4603         skb_queue_tail(&req->cmd_q, skb);
4604 }
4605
4606 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4607                  const void *param)
4608 {
4609         hci_req_add_ev(req, opcode, plen, param, 0);
4610 }
4611
4612 /* Get data from the previously sent command */
4613 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4614 {
4615         struct hci_command_hdr *hdr;
4616
4617         if (!hdev->sent_cmd)
4618                 return NULL;
4619
4620         hdr = (void *) hdev->sent_cmd->data;
4621
4622         if (hdr->opcode != cpu_to_le16(opcode))
4623                 return NULL;
4624
4625         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4626
4627         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4628 }
4629
4630 /* Send ACL data */
4631 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4632 {
4633         struct hci_acl_hdr *hdr;
4634         int len = skb->len;
4635
4636         skb_push(skb, HCI_ACL_HDR_SIZE);
4637         skb_reset_transport_header(skb);
4638         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4639         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4640         hdr->dlen   = cpu_to_le16(len);
4641 }
4642
4643 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4644                           struct sk_buff *skb, __u16 flags)
4645 {
4646         struct hci_conn *conn = chan->conn;
4647         struct hci_dev *hdev = conn->hdev;
4648         struct sk_buff *list;
4649
4650         skb->len = skb_headlen(skb);
4651         skb->data_len = 0;
4652
4653         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4654
4655         switch (hdev->dev_type) {
4656         case HCI_BREDR:
4657                 hci_add_acl_hdr(skb, conn->handle, flags);
4658                 break;
4659         case HCI_AMP:
4660                 hci_add_acl_hdr(skb, chan->handle, flags);
4661                 break;
4662         default:
4663                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4664                 return;
4665         }
4666
4667         list = skb_shinfo(skb)->frag_list;
4668         if (!list) {
4669                 /* Non fragmented */
4670                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4671
4672                 skb_queue_tail(queue, skb);
4673         } else {
4674                 /* Fragmented */
4675                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4676
4677                 skb_shinfo(skb)->frag_list = NULL;
4678
4679                 /* Queue all fragments atomically */
4680                 spin_lock(&queue->lock);
4681
4682                 __skb_queue_tail(queue, skb);
4683
4684                 flags &= ~ACL_START;
4685                 flags |= ACL_CONT;
4686                 do {
4687                         skb = list; list = list->next;
4688
4689                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4690                         hci_add_acl_hdr(skb, conn->handle, flags);
4691
4692                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4693
4694                         __skb_queue_tail(queue, skb);
4695                 } while (list);
4696
4697                 spin_unlock(&queue->lock);
4698         }
4699 }
4700
4701 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4702 {
4703         struct hci_dev *hdev = chan->conn->hdev;
4704
4705         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4706
4707         hci_queue_acl(chan, &chan->data_q, skb, flags);
4708
4709         queue_work(hdev->workqueue, &hdev->tx_work);
4710 }
4711
4712 /* Send SCO data */
4713 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4714 {
4715         struct hci_dev *hdev = conn->hdev;
4716         struct hci_sco_hdr hdr;
4717
4718         BT_DBG("%s len %d", hdev->name, skb->len);
4719
4720         hdr.handle = cpu_to_le16(conn->handle);
4721         hdr.dlen   = skb->len;
4722
4723         skb_push(skb, HCI_SCO_HDR_SIZE);
4724         skb_reset_transport_header(skb);
4725         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4726
4727         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4728
4729         skb_queue_tail(&conn->data_q, skb);
4730         queue_work(hdev->workqueue, &hdev->tx_work);
4731 }
4732
4733 /* ---- HCI TX task (outgoing data) ---- */
4734
4735 /* HCI Connection scheduler */
4736 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4737                                      int *quote)
4738 {
4739         struct hci_conn_hash *h = &hdev->conn_hash;
4740         struct hci_conn *conn = NULL, *c;
4741         unsigned int num = 0, min = ~0;
4742
4743         /* We don't have to lock device here. Connections are always
4744          * added and removed with TX task disabled. */
4745
4746         rcu_read_lock();
4747
4748         list_for_each_entry_rcu(c, &h->list, list) {
4749                 if (c->type != type || skb_queue_empty(&c->data_q))
4750                         continue;
4751
4752                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4753                         continue;
4754
4755                 num++;
4756
4757                 if (c->sent < min) {
4758                         min  = c->sent;
4759                         conn = c;
4760                 }
4761
4762                 if (hci_conn_num(hdev, type) == num)
4763                         break;
4764         }
4765
4766         rcu_read_unlock();
4767
4768         if (conn) {
4769                 int cnt, q;
4770
4771                 switch (conn->type) {
4772                 case ACL_LINK:
4773                         cnt = hdev->acl_cnt;
4774                         break;
4775                 case SCO_LINK:
4776                 case ESCO_LINK:
4777                         cnt = hdev->sco_cnt;
4778                         break;
4779                 case LE_LINK:
4780                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4781                         break;
4782                 default:
4783                         cnt = 0;
4784                         BT_ERR("Unknown link type");
4785                 }
4786
4787                 q = cnt / num;
4788                 *quote = q ? q : 1;
4789         } else
4790                 *quote = 0;
4791
4792         BT_DBG("conn %p quote %d", conn, *quote);
4793         return conn;
4794 }
4795
4796 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4797 {
4798         struct hci_conn_hash *h = &hdev->conn_hash;
4799         struct hci_conn *c;
4800
4801         BT_ERR("%s link tx timeout", hdev->name);
4802
4803         rcu_read_lock();
4804
4805         /* Kill stalled connections */
4806         list_for_each_entry_rcu(c, &h->list, list) {
4807                 if (c->type == type && c->sent) {
4808                         BT_ERR("%s killing stalled connection %pMR",
4809                                hdev->name, &c->dst);
4810                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4811                 }
4812         }
4813
4814         rcu_read_unlock();
4815 }
4816
4817 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4818                                       int *quote)
4819 {
4820         struct hci_conn_hash *h = &hdev->conn_hash;
4821         struct hci_chan *chan = NULL;
4822         unsigned int num = 0, min = ~0, cur_prio = 0;
4823         struct hci_conn *conn;
4824         int cnt, q, conn_num = 0;
4825
4826         BT_DBG("%s", hdev->name);
4827
4828         rcu_read_lock();
4829
4830         list_for_each_entry_rcu(conn, &h->list, list) {
4831                 struct hci_chan *tmp;
4832
4833                 if (conn->type != type)
4834                         continue;
4835
4836                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4837                         continue;
4838
4839                 conn_num++;
4840
4841                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4842                         struct sk_buff *skb;
4843
4844                         if (skb_queue_empty(&tmp->data_q))
4845                                 continue;
4846
4847                         skb = skb_peek(&tmp->data_q);
4848                         if (skb->priority < cur_prio)
4849                                 continue;
4850
4851                         if (skb->priority > cur_prio) {
4852                                 num = 0;
4853                                 min = ~0;
4854                                 cur_prio = skb->priority;
4855                         }
4856
4857                         num++;
4858
4859                         if (conn->sent < min) {
4860                                 min  = conn->sent;
4861                                 chan = tmp;
4862                         }
4863                 }
4864
4865                 if (hci_conn_num(hdev, type) == conn_num)
4866                         break;
4867         }
4868
4869         rcu_read_unlock();
4870
4871         if (!chan)
4872                 return NULL;
4873
4874         switch (chan->conn->type) {
4875         case ACL_LINK:
4876                 cnt = hdev->acl_cnt;
4877                 break;
4878         case AMP_LINK:
4879                 cnt = hdev->block_cnt;
4880                 break;
4881         case SCO_LINK:
4882         case ESCO_LINK:
4883                 cnt = hdev->sco_cnt;
4884                 break;
4885         case LE_LINK:
4886                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4887                 break;
4888         default:
4889                 cnt = 0;
4890                 BT_ERR("Unknown link type");
4891         }
4892
4893         q = cnt / num;
4894         *quote = q ? q : 1;
4895         BT_DBG("chan %p quote %d", chan, *quote);
4896         return chan;
4897 }
4898
4899 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4900 {
4901         struct hci_conn_hash *h = &hdev->conn_hash;
4902         struct hci_conn *conn;
4903         int num = 0;
4904
4905         BT_DBG("%s", hdev->name);
4906
4907         rcu_read_lock();
4908
4909         list_for_each_entry_rcu(conn, &h->list, list) {
4910                 struct hci_chan *chan;
4911
4912                 if (conn->type != type)
4913                         continue;
4914
4915                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4916                         continue;
4917
4918                 num++;
4919
4920                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4921                         struct sk_buff *skb;
4922
4923                         if (chan->sent) {
4924                                 chan->sent = 0;
4925                                 continue;
4926                         }
4927
4928                         if (skb_queue_empty(&chan->data_q))
4929                                 continue;
4930
4931                         skb = skb_peek(&chan->data_q);
4932                         if (skb->priority >= HCI_PRIO_MAX - 1)
4933                                 continue;
4934
4935                         skb->priority = HCI_PRIO_MAX - 1;
4936
4937                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4938                                skb->priority);
4939                 }
4940
4941                 if (hci_conn_num(hdev, type) == num)
4942                         break;
4943         }
4944
4945         rcu_read_unlock();
4946
4947 }
4948
4949 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4950 {
4951         /* Calculate count of blocks used by this packet */
4952         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4953 }
4954
4955 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4956 {
4957         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4958                 /* ACL tx timeout must be longer than maximum
4959                  * link supervision timeout (40.9 seconds) */
4960                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4961                                        HCI_ACL_TX_TIMEOUT))
4962                         hci_link_tx_to(hdev, ACL_LINK);
4963         }
4964 }
4965
4966 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4967 {
4968         unsigned int cnt = hdev->acl_cnt;
4969         struct hci_chan *chan;
4970         struct sk_buff *skb;
4971         int quote;
4972
4973         __check_timeout(hdev, cnt);
4974
4975         while (hdev->acl_cnt &&
4976                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4977                 u32 priority = (skb_peek(&chan->data_q))->priority;
4978                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4979                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4980                                skb->len, skb->priority);
4981
4982                         /* Stop if priority has changed */
4983                         if (skb->priority < priority)
4984                                 break;
4985
4986                         skb = skb_dequeue(&chan->data_q);
4987
4988                         hci_conn_enter_active_mode(chan->conn,
4989                                                    bt_cb(skb)->force_active);
4990
4991                         hci_send_frame(hdev, skb);
4992                         hdev->acl_last_tx = jiffies;
4993
4994                         hdev->acl_cnt--;
4995                         chan->sent++;
4996                         chan->conn->sent++;
4997                 }
4998         }
4999
5000         if (cnt != hdev->acl_cnt)
5001                 hci_prio_recalculate(hdev, ACL_LINK);
5002 }
5003
5004 static void hci_sched_acl_blk(struct hci_dev *hdev)
5005 {
5006         unsigned int cnt = hdev->block_cnt;
5007         struct hci_chan *chan;
5008         struct sk_buff *skb;
5009         int quote;
5010         u8 type;
5011
5012         __check_timeout(hdev, cnt);
5013
5014         BT_DBG("%s", hdev->name);
5015
5016         if (hdev->dev_type == HCI_AMP)
5017                 type = AMP_LINK;
5018         else
5019                 type = ACL_LINK;
5020
5021         while (hdev->block_cnt > 0 &&
5022                (chan = hci_chan_sent(hdev, type, &quote))) {
5023                 u32 priority = (skb_peek(&chan->data_q))->priority;
5024                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5025                         int blocks;
5026
5027                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5028                                skb->len, skb->priority);
5029
5030                         /* Stop if priority has changed */
5031                         if (skb->priority < priority)
5032                                 break;
5033
5034                         skb = skb_dequeue(&chan->data_q);
5035
5036                         blocks = __get_blocks(hdev, skb);
5037                         if (blocks > hdev->block_cnt)
5038                                 return;
5039
5040                         hci_conn_enter_active_mode(chan->conn,
5041                                                    bt_cb(skb)->force_active);
5042
5043                         hci_send_frame(hdev, skb);
5044                         hdev->acl_last_tx = jiffies;
5045
5046                         hdev->block_cnt -= blocks;
5047                         quote -= blocks;
5048
5049                         chan->sent += blocks;
5050                         chan->conn->sent += blocks;
5051                 }
5052         }
5053
5054         if (cnt != hdev->block_cnt)
5055                 hci_prio_recalculate(hdev, type);
5056 }
5057
5058 static void hci_sched_acl(struct hci_dev *hdev)
5059 {
5060         BT_DBG("%s", hdev->name);
5061
5062         /* No ACL link over BR/EDR controller */
5063         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5064                 return;
5065
5066         /* No AMP link over AMP controller */
5067         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5068                 return;
5069
5070         switch (hdev->flow_ctl_mode) {
5071         case HCI_FLOW_CTL_MODE_PACKET_BASED:
5072                 hci_sched_acl_pkt(hdev);
5073                 break;
5074
5075         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5076                 hci_sched_acl_blk(hdev);
5077                 break;
5078         }
5079 }
5080
5081 /* Schedule SCO */
5082 static void hci_sched_sco(struct hci_dev *hdev)
5083 {
5084         struct hci_conn *conn;
5085         struct sk_buff *skb;
5086         int quote;
5087
5088         BT_DBG("%s", hdev->name);
5089
5090         if (!hci_conn_num(hdev, SCO_LINK))
5091                 return;
5092
5093         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5094                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5095                         BT_DBG("skb %p len %d", skb, skb->len);
5096                         hci_send_frame(hdev, skb);
5097
5098                         conn->sent++;
5099                         if (conn->sent == ~0)
5100                                 conn->sent = 0;
5101                 }
5102         }
5103 }
5104
5105 static void hci_sched_esco(struct hci_dev *hdev)
5106 {
5107         struct hci_conn *conn;
5108         struct sk_buff *skb;
5109         int quote;
5110
5111         BT_DBG("%s", hdev->name);
5112
5113         if (!hci_conn_num(hdev, ESCO_LINK))
5114                 return;
5115
5116         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5117                                                      &quote))) {
5118                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5119                         BT_DBG("skb %p len %d", skb, skb->len);
5120                         hci_send_frame(hdev, skb);
5121
5122                         conn->sent++;
5123                         if (conn->sent == ~0)
5124                                 conn->sent = 0;
5125                 }
5126         }
5127 }
5128
5129 static void hci_sched_le(struct hci_dev *hdev)
5130 {
5131         struct hci_chan *chan;
5132         struct sk_buff *skb;
5133         int quote, cnt, tmp;
5134
5135         BT_DBG("%s", hdev->name);
5136
5137         if (!hci_conn_num(hdev, LE_LINK))
5138                 return;
5139
5140         if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5141                 /* LE tx timeout must be longer than maximum
5142                  * link supervision timeout (40.9 seconds) */
5143                 if (!hdev->le_cnt && hdev->le_pkts &&
5144                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
5145                         hci_link_tx_to(hdev, LE_LINK);
5146         }
5147
5148         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5149         tmp = cnt;
5150         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5151                 u32 priority = (skb_peek(&chan->data_q))->priority;
5152                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5153                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5154                                skb->len, skb->priority);
5155
5156                         /* Stop if priority has changed */
5157                         if (skb->priority < priority)
5158                                 break;
5159
5160                         skb = skb_dequeue(&chan->data_q);
5161
5162                         hci_send_frame(hdev, skb);
5163                         hdev->le_last_tx = jiffies;
5164
5165                         cnt--;
5166                         chan->sent++;
5167                         chan->conn->sent++;
5168                 }
5169         }
5170
5171         if (hdev->le_pkts)
5172                 hdev->le_cnt = cnt;
5173         else
5174                 hdev->acl_cnt = cnt;
5175
5176         if (cnt != tmp)
5177                 hci_prio_recalculate(hdev, LE_LINK);
5178 }
5179
5180 static void hci_tx_work(struct work_struct *work)
5181 {
5182         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5183         struct sk_buff *skb;
5184
5185         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5186                hdev->sco_cnt, hdev->le_cnt);
5187
5188         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5189                 /* Schedule queues and send stuff to HCI driver */
5190                 hci_sched_acl(hdev);
5191                 hci_sched_sco(hdev);
5192                 hci_sched_esco(hdev);
5193                 hci_sched_le(hdev);
5194         }
5195
5196         /* Send next queued raw (unknown type) packet */
5197         while ((skb = skb_dequeue(&hdev->raw_q)))
5198                 hci_send_frame(hdev, skb);
5199 }
5200
5201 /* ----- HCI RX task (incoming data processing) ----- */
5202
5203 /* ACL data packet */
5204 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5205 {
5206         struct hci_acl_hdr *hdr = (void *) skb->data;
5207         struct hci_conn *conn;
5208         __u16 handle, flags;
5209
5210         skb_pull(skb, HCI_ACL_HDR_SIZE);
5211
5212         handle = __le16_to_cpu(hdr->handle);
5213         flags  = hci_flags(handle);
5214         handle = hci_handle(handle);
5215
5216         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5217                handle, flags);
5218
5219         hdev->stat.acl_rx++;
5220
5221         hci_dev_lock(hdev);
5222         conn = hci_conn_hash_lookup_handle(hdev, handle);
5223         hci_dev_unlock(hdev);
5224
5225         if (conn) {
5226                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5227
5228                 /* Send to upper protocol */
5229                 l2cap_recv_acldata(conn, skb, flags);
5230                 return;
5231         } else {
5232                 BT_ERR("%s ACL packet for unknown connection handle %d",
5233                        hdev->name, handle);
5234         }
5235
5236         kfree_skb(skb);
5237 }
5238
5239 /* SCO data packet */
5240 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5241 {
5242         struct hci_sco_hdr *hdr = (void *) skb->data;
5243         struct hci_conn *conn;
5244         __u16 handle;
5245
5246         skb_pull(skb, HCI_SCO_HDR_SIZE);
5247
5248         handle = __le16_to_cpu(hdr->handle);
5249
5250         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5251
5252         hdev->stat.sco_rx++;
5253
5254         hci_dev_lock(hdev);
5255         conn = hci_conn_hash_lookup_handle(hdev, handle);
5256         hci_dev_unlock(hdev);
5257
5258         if (conn) {
5259                 /* Send to upper protocol */
5260                 sco_recv_scodata(conn, skb);
5261                 return;
5262         } else {
5263                 BT_ERR("%s SCO packet for unknown connection handle %d",
5264                        hdev->name, handle);
5265         }
5266
5267         kfree_skb(skb);
5268 }
5269
5270 static bool hci_req_is_complete(struct hci_dev *hdev)
5271 {
5272         struct sk_buff *skb;
5273
5274         skb = skb_peek(&hdev->cmd_q);
5275         if (!skb)
5276                 return true;
5277
5278         return bt_cb(skb)->req.start;
5279 }
5280
5281 static void hci_resend_last(struct hci_dev *hdev)
5282 {
5283         struct hci_command_hdr *sent;
5284         struct sk_buff *skb;
5285         u16 opcode;
5286
5287         if (!hdev->sent_cmd)
5288                 return;
5289
5290         sent = (void *) hdev->sent_cmd->data;
5291         opcode = __le16_to_cpu(sent->opcode);
5292         if (opcode == HCI_OP_RESET)
5293                 return;
5294
5295         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5296         if (!skb)
5297                 return;
5298
5299         skb_queue_head(&hdev->cmd_q, skb);
5300         queue_work(hdev->workqueue, &hdev->cmd_work);
5301 }
5302
5303 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5304 {
5305         hci_req_complete_t req_complete = NULL;
5306         struct sk_buff *skb;
5307         unsigned long flags;
5308
5309         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5310
5311         /* If the completed command doesn't match the last one that was
5312          * sent we need to do special handling of it.
5313          */
5314         if (!hci_sent_cmd_data(hdev, opcode)) {
5315                 /* Some CSR based controllers generate a spontaneous
5316                  * reset complete event during init and any pending
5317                  * command will never be completed. In such a case we
5318                  * need to resend whatever was the last sent
5319                  * command.
5320                  */
5321                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5322                         hci_resend_last(hdev);
5323
5324                 return;
5325         }
5326
5327         /* If the command succeeded and there's still more commands in
5328          * this request the request is not yet complete.
5329          */
5330         if (!status && !hci_req_is_complete(hdev))
5331                 return;
5332
5333         /* If this was the last command in a request the complete
5334          * callback would be found in hdev->sent_cmd instead of the
5335          * command queue (hdev->cmd_q).
5336          */
5337         if (hdev->sent_cmd) {
5338                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5339
5340                 if (req_complete) {
5341                         /* We must set the complete callback to NULL to
5342                          * avoid calling the callback more than once if
5343                          * this function gets called again.
5344                          */
5345                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5346
5347                         goto call_complete;
5348                 }
5349         }
5350
5351         /* Remove all pending commands belonging to this request */
5352         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5353         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5354                 if (bt_cb(skb)->req.start) {
5355                         __skb_queue_head(&hdev->cmd_q, skb);
5356                         break;
5357                 }
5358
5359                 req_complete = bt_cb(skb)->req.complete;
5360                 kfree_skb(skb);
5361         }
5362         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5363
5364 call_complete:
5365         if (req_complete)
5366                 req_complete(hdev, status);
5367 }
5368
5369 static void hci_rx_work(struct work_struct *work)
5370 {
5371         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5372         struct sk_buff *skb;
5373
5374         BT_DBG("%s", hdev->name);
5375
5376         while ((skb = skb_dequeue(&hdev->rx_q))) {
5377                 /* Send copy to monitor */
5378                 hci_send_to_monitor(hdev, skb);
5379
5380                 if (atomic_read(&hdev->promisc)) {
5381                         /* Send copy to the sockets */
5382                         hci_send_to_sock(hdev, skb);
5383                 }
5384
5385                 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5386                         kfree_skb(skb);
5387                         continue;
5388                 }
5389
5390                 if (test_bit(HCI_INIT, &hdev->flags)) {
5391                         /* Don't process data packets in this states. */
5392                         switch (bt_cb(skb)->pkt_type) {
5393                         case HCI_ACLDATA_PKT:
5394                         case HCI_SCODATA_PKT:
5395                                 kfree_skb(skb);
5396                                 continue;
5397                         }
5398                 }
5399
5400                 /* Process frame */
5401                 switch (bt_cb(skb)->pkt_type) {
5402                 case HCI_EVENT_PKT:
5403                         BT_DBG("%s Event packet", hdev->name);
5404                         hci_event_packet(hdev, skb);
5405                         break;
5406
5407                 case HCI_ACLDATA_PKT:
5408                         BT_DBG("%s ACL data packet", hdev->name);
5409                         hci_acldata_packet(hdev, skb);
5410                         break;
5411
5412                 case HCI_SCODATA_PKT:
5413                         BT_DBG("%s SCO data packet", hdev->name);
5414                         hci_scodata_packet(hdev, skb);
5415                         break;
5416
5417                 default:
5418                         kfree_skb(skb);
5419                         break;
5420                 }
5421         }
5422 }
5423
5424 static void hci_cmd_work(struct work_struct *work)
5425 {
5426         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5427         struct sk_buff *skb;
5428
5429         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5430                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5431
5432         /* Send queued commands */
5433         if (atomic_read(&hdev->cmd_cnt)) {
5434                 skb = skb_dequeue(&hdev->cmd_q);
5435                 if (!skb)
5436                         return;
5437
5438                 kfree_skb(hdev->sent_cmd);
5439
5440                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5441                 if (hdev->sent_cmd) {
5442                         atomic_dec(&hdev->cmd_cnt);
5443                         hci_send_frame(hdev, skb);
5444                         if (test_bit(HCI_RESET, &hdev->flags))
5445                                 cancel_delayed_work(&hdev->cmd_timer);
5446                         else
5447                                 schedule_delayed_work(&hdev->cmd_timer,
5448                                                       HCI_CMD_TIMEOUT);
5449                 } else {
5450                         skb_queue_head(&hdev->cmd_q, skb);
5451                         queue_work(hdev->workqueue, &hdev->cmd_work);
5452                 }
5453         }
5454 }
5455
5456 void hci_req_add_le_scan_disable(struct hci_request *req)
5457 {
5458         struct hci_cp_le_set_scan_enable cp;
5459
5460         memset(&cp, 0, sizeof(cp));
5461         cp.enable = LE_SCAN_DISABLE;
5462         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5463 }
5464
5465 static void add_to_white_list(struct hci_request *req,
5466                               struct hci_conn_params *params)
5467 {
5468         struct hci_cp_le_add_to_white_list cp;
5469
5470         cp.bdaddr_type = params->addr_type;
5471         bacpy(&cp.bdaddr, &params->addr);
5472
5473         hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5474 }
5475
5476 static u8 update_white_list(struct hci_request *req)
5477 {
5478         struct hci_dev *hdev = req->hdev;
5479         struct hci_conn_params *params;
5480         struct bdaddr_list *b;
5481         uint8_t white_list_entries = 0;
5482
5483         /* Go through the current white list programmed into the
5484          * controller one by one and check if that address is still
5485          * in the list of pending connections or list of devices to
5486          * report. If not present in either list, then queue the
5487          * command to remove it from the controller.
5488          */
5489         list_for_each_entry(b, &hdev->le_white_list, list) {
5490                 struct hci_cp_le_del_from_white_list cp;
5491
5492                 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5493                                               &b->bdaddr, b->bdaddr_type) ||
5494                     hci_pend_le_action_lookup(&hdev->pend_le_reports,
5495                                               &b->bdaddr, b->bdaddr_type)) {
5496                         white_list_entries++;
5497                         continue;
5498                 }
5499
5500                 cp.bdaddr_type = b->bdaddr_type;
5501                 bacpy(&cp.bdaddr, &b->bdaddr);
5502
5503                 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5504                             sizeof(cp), &cp);
5505         }
5506
5507         /* Since all no longer valid white list entries have been
5508          * removed, walk through the list of pending connections
5509          * and ensure that any new device gets programmed into
5510          * the controller.
5511          *
5512          * If the list of the devices is larger than the list of
5513          * available white list entries in the controller, then
5514          * just abort and return filer policy value to not use the
5515          * white list.
5516          */
5517         list_for_each_entry(params, &hdev->pend_le_conns, action) {
5518                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5519                                            &params->addr, params->addr_type))
5520                         continue;
5521
5522                 if (white_list_entries >= hdev->le_white_list_size) {
5523                         /* Select filter policy to accept all advertising */
5524                         return 0x00;
5525                 }
5526
5527                 if (hci_find_irk_by_addr(hdev, &params->addr,
5528                                          params->addr_type)) {
5529                         /* White list can not be used with RPAs */
5530                         return 0x00;
5531                 }
5532
5533                 white_list_entries++;
5534                 add_to_white_list(req, params);
5535         }
5536
5537         /* After adding all new pending connections, walk through
5538          * the list of pending reports and also add these to the
5539          * white list if there is still space.
5540          */
5541         list_for_each_entry(params, &hdev->pend_le_reports, action) {
5542                 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5543                                            &params->addr, params->addr_type))
5544                         continue;
5545
5546                 if (white_list_entries >= hdev->le_white_list_size) {
5547                         /* Select filter policy to accept all advertising */
5548                         return 0x00;
5549                 }
5550
5551                 if (hci_find_irk_by_addr(hdev, &params->addr,
5552                                          params->addr_type)) {
5553                         /* White list can not be used with RPAs */
5554                         return 0x00;
5555                 }
5556
5557                 white_list_entries++;
5558                 add_to_white_list(req, params);
5559         }
5560
5561         /* Select filter policy to use white list */
5562         return 0x01;
5563 }
5564
5565 void hci_req_add_le_passive_scan(struct hci_request *req)
5566 {
5567         struct hci_cp_le_set_scan_param param_cp;
5568         struct hci_cp_le_set_scan_enable enable_cp;
5569         struct hci_dev *hdev = req->hdev;
5570         u8 own_addr_type;
5571         u8 filter_policy;
5572
5573         /* Set require_privacy to false since no SCAN_REQ are send
5574          * during passive scanning. Not using an unresolvable address
5575          * here is important so that peer devices using direct
5576          * advertising with our address will be correctly reported
5577          * by the controller.
5578          */
5579         if (hci_update_random_address(req, false, &own_addr_type))
5580                 return;
5581
5582         /* Adding or removing entries from the white list must
5583          * happen before enabling scanning. The controller does
5584          * not allow white list modification while scanning.
5585          */
5586         filter_policy = update_white_list(req);
5587
5588         memset(&param_cp, 0, sizeof(param_cp));
5589         param_cp.type = LE_SCAN_PASSIVE;
5590         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5591         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5592         param_cp.own_address_type = own_addr_type;
5593         param_cp.filter_policy = filter_policy;
5594         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5595                     &param_cp);
5596
5597         memset(&enable_cp, 0, sizeof(enable_cp));
5598         enable_cp.enable = LE_SCAN_ENABLE;
5599         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5600         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5601                     &enable_cp);
5602 }
5603
5604 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5605 {
5606         if (status)
5607                 BT_DBG("HCI request failed to update background scanning: "
5608                        "status 0x%2.2x", status);
5609 }
5610
5611 /* This function controls the background scanning based on hdev->pend_le_conns
5612  * list. If there are pending LE connection we start the background scanning,
5613  * otherwise we stop it.
5614  *
5615  * This function requires the caller holds hdev->lock.
5616  */
5617 void hci_update_background_scan(struct hci_dev *hdev)
5618 {
5619         struct hci_request req;
5620         struct hci_conn *conn;
5621         int err;
5622
5623         if (!test_bit(HCI_UP, &hdev->flags) ||
5624             test_bit(HCI_INIT, &hdev->flags) ||
5625             test_bit(HCI_SETUP, &hdev->dev_flags) ||
5626             test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5627             test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5628             test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5629                 return;
5630
5631         /* No point in doing scanning if LE support hasn't been enabled */
5632         if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5633                 return;
5634
5635         /* If discovery is active don't interfere with it */
5636         if (hdev->discovery.state != DISCOVERY_STOPPED)
5637                 return;
5638
5639         hci_req_init(&req, hdev);
5640
5641         if (list_empty(&hdev->pend_le_conns) &&
5642             list_empty(&hdev->pend_le_reports)) {
5643                 /* If there is no pending LE connections or devices
5644                  * to be scanned for, we should stop the background
5645                  * scanning.
5646                  */
5647
5648                 /* If controller is not scanning we are done. */
5649                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5650                         return;
5651
5652                 hci_req_add_le_scan_disable(&req);
5653
5654                 BT_DBG("%s stopping background scanning", hdev->name);
5655         } else {
5656                 /* If there is at least one pending LE connection, we should
5657                  * keep the background scan running.
5658                  */
5659
5660                 /* If controller is connecting, we should not start scanning
5661                  * since some controllers are not able to scan and connect at
5662                  * the same time.
5663                  */
5664                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5665                 if (conn)
5666                         return;
5667
5668                 /* If controller is currently scanning, we stop it to ensure we
5669                  * don't miss any advertising (due to duplicates filter).
5670                  */
5671                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5672                         hci_req_add_le_scan_disable(&req);
5673
5674                 hci_req_add_le_passive_scan(&req);
5675
5676                 BT_DBG("%s starting background scanning", hdev->name);
5677         }
5678
5679         err = hci_req_run(&req, update_background_scan_complete);
5680         if (err)
5681                 BT_ERR("Failed to run HCI request: err %d", err);
5682 }
5683
5684 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5685 {
5686         struct bdaddr_list *b;
5687
5688         list_for_each_entry(b, &hdev->whitelist, list) {
5689                 struct hci_conn *conn;
5690
5691                 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5692                 if (!conn)
5693                         return true;
5694
5695                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5696                         return true;
5697         }
5698
5699         return false;
5700 }
5701
5702 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5703 {
5704         u8 scan;
5705
5706         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5707                 return;
5708
5709         if (!hdev_is_powered(hdev))
5710                 return;
5711
5712         if (mgmt_powering_down(hdev))
5713                 return;
5714
5715         if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5716             disconnected_whitelist_entries(hdev))
5717                 scan = SCAN_PAGE;
5718         else
5719                 scan = SCAN_DISABLED;
5720
5721         if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5722                 return;
5723
5724         if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5725                 scan |= SCAN_INQUIRY;
5726
5727         if (req)
5728                 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5729         else
5730                 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5731 }