]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - net/bluetooth/hci_core.c
Merge tag 'omap-for-v3.16/fixes-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38
39 #include "smp.h"
40
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
44
45 /* HCI device list */
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
48
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
52
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
55
56 /* ---- HCI notifications ---- */
57
58 static void hci_notify(struct hci_dev *hdev, int event)
59 {
60         hci_sock_dev_event(hdev, event);
61 }
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         char buf[32];
83         size_t buf_size = min(count, (sizeof(buf)-1));
84         bool enable;
85         int err;
86
87         if (!test_bit(HCI_UP, &hdev->flags))
88                 return -ENETDOWN;
89
90         if (copy_from_user(buf, user_buf, buf_size))
91                 return -EFAULT;
92
93         buf[buf_size] = '\0';
94         if (strtobool(buf, &enable))
95                 return -EINVAL;
96
97         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98                 return -EALREADY;
99
100         hci_req_lock(hdev);
101         if (enable)
102                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103                                      HCI_CMD_TIMEOUT);
104         else
105                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106                                      HCI_CMD_TIMEOUT);
107         hci_req_unlock(hdev);
108
109         if (IS_ERR(skb))
110                 return PTR_ERR(skb);
111
112         err = -bt_to_errno(skb->data[0]);
113         kfree_skb(skb);
114
115         if (err < 0)
116                 return err;
117
118         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
119
120         return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124         .open           = simple_open,
125         .read           = dut_mode_read,
126         .write          = dut_mode_write,
127         .llseek         = default_llseek,
128 };
129
130 static int features_show(struct seq_file *f, void *ptr)
131 {
132         struct hci_dev *hdev = f->private;
133         u8 p;
134
135         hci_dev_lock(hdev);
136         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139                            hdev->features[p][0], hdev->features[p][1],
140                            hdev->features[p][2], hdev->features[p][3],
141                            hdev->features[p][4], hdev->features[p][5],
142                            hdev->features[p][6], hdev->features[p][7]);
143         }
144         if (lmp_le_capable(hdev))
145                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147                            hdev->le_features[0], hdev->le_features[1],
148                            hdev->le_features[2], hdev->le_features[3],
149                            hdev->le_features[4], hdev->le_features[5],
150                            hdev->le_features[6], hdev->le_features[7]);
151         hci_dev_unlock(hdev);
152
153         return 0;
154 }
155
156 static int features_open(struct inode *inode, struct file *file)
157 {
158         return single_open(file, features_show, inode->i_private);
159 }
160
161 static const struct file_operations features_fops = {
162         .open           = features_open,
163         .read           = seq_read,
164         .llseek         = seq_lseek,
165         .release        = single_release,
166 };
167
168 static int blacklist_show(struct seq_file *f, void *p)
169 {
170         struct hci_dev *hdev = f->private;
171         struct bdaddr_list *b;
172
173         hci_dev_lock(hdev);
174         list_for_each_entry(b, &hdev->blacklist, list)
175                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176         hci_dev_unlock(hdev);
177
178         return 0;
179 }
180
181 static int blacklist_open(struct inode *inode, struct file *file)
182 {
183         return single_open(file, blacklist_show, inode->i_private);
184 }
185
186 static const struct file_operations blacklist_fops = {
187         .open           = blacklist_open,
188         .read           = seq_read,
189         .llseek         = seq_lseek,
190         .release        = single_release,
191 };
192
193 static int uuids_show(struct seq_file *f, void *p)
194 {
195         struct hci_dev *hdev = f->private;
196         struct bt_uuid *uuid;
197
198         hci_dev_lock(hdev);
199         list_for_each_entry(uuid, &hdev->uuids, list) {
200                 u8 i, val[16];
201
202                 /* The Bluetooth UUID values are stored in big endian,
203                  * but with reversed byte order. So convert them into
204                  * the right order for the %pUb modifier.
205                  */
206                 for (i = 0; i < 16; i++)
207                         val[i] = uuid->uuid[15 - i];
208
209                 seq_printf(f, "%pUb\n", val);
210         }
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int uuids_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, uuids_show, inode->i_private);
219 }
220
221 static const struct file_operations uuids_fops = {
222         .open           = uuids_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int inquiry_cache_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct discovery_state *cache = &hdev->discovery;
232         struct inquiry_entry *e;
233
234         hci_dev_lock(hdev);
235
236         list_for_each_entry(e, &cache->all, all) {
237                 struct inquiry_data *data = &e->data;
238                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239                            &data->bdaddr,
240                            data->pscan_rep_mode, data->pscan_period_mode,
241                            data->pscan_mode, data->dev_class[2],
242                            data->dev_class[1], data->dev_class[0],
243                            __le16_to_cpu(data->clock_offset),
244                            data->rssi, data->ssp_mode, e->timestamp);
245         }
246
247         hci_dev_unlock(hdev);
248
249         return 0;
250 }
251
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 {
254         return single_open(file, inquiry_cache_show, inode->i_private);
255 }
256
257 static const struct file_operations inquiry_cache_fops = {
258         .open           = inquiry_cache_open,
259         .read           = seq_read,
260         .llseek         = seq_lseek,
261         .release        = single_release,
262 };
263
264 static int link_keys_show(struct seq_file *f, void *ptr)
265 {
266         struct hci_dev *hdev = f->private;
267         struct list_head *p, *n;
268
269         hci_dev_lock(hdev);
270         list_for_each_safe(p, n, &hdev->link_keys) {
271                 struct link_key *key = list_entry(p, struct link_key, list);
272                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274         }
275         hci_dev_unlock(hdev);
276
277         return 0;
278 }
279
280 static int link_keys_open(struct inode *inode, struct file *file)
281 {
282         return single_open(file, link_keys_show, inode->i_private);
283 }
284
285 static const struct file_operations link_keys_fops = {
286         .open           = link_keys_open,
287         .read           = seq_read,
288         .llseek         = seq_lseek,
289         .release        = single_release,
290 };
291
292 static int dev_class_show(struct seq_file *f, void *ptr)
293 {
294         struct hci_dev *hdev = f->private;
295
296         hci_dev_lock(hdev);
297         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298                    hdev->dev_class[1], hdev->dev_class[0]);
299         hci_dev_unlock(hdev);
300
301         return 0;
302 }
303
304 static int dev_class_open(struct inode *inode, struct file *file)
305 {
306         return single_open(file, dev_class_show, inode->i_private);
307 }
308
309 static const struct file_operations dev_class_fops = {
310         .open           = dev_class_open,
311         .read           = seq_read,
312         .llseek         = seq_lseek,
313         .release        = single_release,
314 };
315
316 static int voice_setting_get(void *data, u64 *val)
317 {
318         struct hci_dev *hdev = data;
319
320         hci_dev_lock(hdev);
321         *val = hdev->voice_setting;
322         hci_dev_unlock(hdev);
323
324         return 0;
325 }
326
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328                         NULL, "0x%4.4llx\n");
329
330 static int auto_accept_delay_set(void *data, u64 val)
331 {
332         struct hci_dev *hdev = data;
333
334         hci_dev_lock(hdev);
335         hdev->auto_accept_delay = val;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 static int auto_accept_delay_get(void *data, u64 *val)
342 {
343         struct hci_dev *hdev = data;
344
345         hci_dev_lock(hdev);
346         *val = hdev->auto_accept_delay;
347         hci_dev_unlock(hdev);
348
349         return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353                         auto_accept_delay_set, "%llu\n");
354
355 static int ssp_debug_mode_set(void *data, u64 val)
356 {
357         struct hci_dev *hdev = data;
358         struct sk_buff *skb;
359         __u8 mode;
360         int err;
361
362         if (val != 0 && val != 1)
363                 return -EINVAL;
364
365         if (!test_bit(HCI_UP, &hdev->flags))
366                 return -ENETDOWN;
367
368         hci_req_lock(hdev);
369         mode = val;
370         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371                              &mode, HCI_CMD_TIMEOUT);
372         hci_req_unlock(hdev);
373
374         if (IS_ERR(skb))
375                 return PTR_ERR(skb);
376
377         err = -bt_to_errno(skb->data[0]);
378         kfree_skb(skb);
379
380         if (err < 0)
381                 return err;
382
383         hci_dev_lock(hdev);
384         hdev->ssp_debug_mode = val;
385         hci_dev_unlock(hdev);
386
387         return 0;
388 }
389
390 static int ssp_debug_mode_get(void *data, u64 *val)
391 {
392         struct hci_dev *hdev = data;
393
394         hci_dev_lock(hdev);
395         *val = hdev->ssp_debug_mode;
396         hci_dev_unlock(hdev);
397
398         return 0;
399 }
400
401 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402                         ssp_debug_mode_set, "%llu\n");
403
404 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405                                      size_t count, loff_t *ppos)
406 {
407         struct hci_dev *hdev = file->private_data;
408         char buf[3];
409
410         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
411         buf[1] = '\n';
412         buf[2] = '\0';
413         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 }
415
416 static ssize_t force_sc_support_write(struct file *file,
417                                       const char __user *user_buf,
418                                       size_t count, loff_t *ppos)
419 {
420         struct hci_dev *hdev = file->private_data;
421         char buf[32];
422         size_t buf_size = min(count, (sizeof(buf)-1));
423         bool enable;
424
425         if (test_bit(HCI_UP, &hdev->flags))
426                 return -EBUSY;
427
428         if (copy_from_user(buf, user_buf, buf_size))
429                 return -EFAULT;
430
431         buf[buf_size] = '\0';
432         if (strtobool(buf, &enable))
433                 return -EINVAL;
434
435         if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
436                 return -EALREADY;
437
438         change_bit(HCI_FORCE_SC, &hdev->dev_flags);
439
440         return count;
441 }
442
443 static const struct file_operations force_sc_support_fops = {
444         .open           = simple_open,
445         .read           = force_sc_support_read,
446         .write          = force_sc_support_write,
447         .llseek         = default_llseek,
448 };
449
450 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451                                  size_t count, loff_t *ppos)
452 {
453         struct hci_dev *hdev = file->private_data;
454         char buf[3];
455
456         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457         buf[1] = '\n';
458         buf[2] = '\0';
459         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460 }
461
462 static const struct file_operations sc_only_mode_fops = {
463         .open           = simple_open,
464         .read           = sc_only_mode_read,
465         .llseek         = default_llseek,
466 };
467
468 static int idle_timeout_set(void *data, u64 val)
469 {
470         struct hci_dev *hdev = data;
471
472         if (val != 0 && (val < 500 || val > 3600000))
473                 return -EINVAL;
474
475         hci_dev_lock(hdev);
476         hdev->idle_timeout = val;
477         hci_dev_unlock(hdev);
478
479         return 0;
480 }
481
482 static int idle_timeout_get(void *data, u64 *val)
483 {
484         struct hci_dev *hdev = data;
485
486         hci_dev_lock(hdev);
487         *val = hdev->idle_timeout;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494                         idle_timeout_set, "%llu\n");
495
496 static int rpa_timeout_set(void *data, u64 val)
497 {
498         struct hci_dev *hdev = data;
499
500         /* Require the RPA timeout to be at least 30 seconds and at most
501          * 24 hours.
502          */
503         if (val < 30 || val > (60 * 60 * 24))
504                 return -EINVAL;
505
506         hci_dev_lock(hdev);
507         hdev->rpa_timeout = val;
508         hci_dev_unlock(hdev);
509
510         return 0;
511 }
512
513 static int rpa_timeout_get(void *data, u64 *val)
514 {
515         struct hci_dev *hdev = data;
516
517         hci_dev_lock(hdev);
518         *val = hdev->rpa_timeout;
519         hci_dev_unlock(hdev);
520
521         return 0;
522 }
523
524 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525                         rpa_timeout_set, "%llu\n");
526
527 static int sniff_min_interval_set(void *data, u64 val)
528 {
529         struct hci_dev *hdev = data;
530
531         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532                 return -EINVAL;
533
534         hci_dev_lock(hdev);
535         hdev->sniff_min_interval = val;
536         hci_dev_unlock(hdev);
537
538         return 0;
539 }
540
541 static int sniff_min_interval_get(void *data, u64 *val)
542 {
543         struct hci_dev *hdev = data;
544
545         hci_dev_lock(hdev);
546         *val = hdev->sniff_min_interval;
547         hci_dev_unlock(hdev);
548
549         return 0;
550 }
551
552 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553                         sniff_min_interval_set, "%llu\n");
554
555 static int sniff_max_interval_set(void *data, u64 val)
556 {
557         struct hci_dev *hdev = data;
558
559         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560                 return -EINVAL;
561
562         hci_dev_lock(hdev);
563         hdev->sniff_max_interval = val;
564         hci_dev_unlock(hdev);
565
566         return 0;
567 }
568
569 static int sniff_max_interval_get(void *data, u64 *val)
570 {
571         struct hci_dev *hdev = data;
572
573         hci_dev_lock(hdev);
574         *val = hdev->sniff_max_interval;
575         hci_dev_unlock(hdev);
576
577         return 0;
578 }
579
580 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581                         sniff_max_interval_set, "%llu\n");
582
583 static int conn_info_min_age_set(void *data, u64 val)
584 {
585         struct hci_dev *hdev = data;
586
587         if (val == 0 || val > hdev->conn_info_max_age)
588                 return -EINVAL;
589
590         hci_dev_lock(hdev);
591         hdev->conn_info_min_age = val;
592         hci_dev_unlock(hdev);
593
594         return 0;
595 }
596
597 static int conn_info_min_age_get(void *data, u64 *val)
598 {
599         struct hci_dev *hdev = data;
600
601         hci_dev_lock(hdev);
602         *val = hdev->conn_info_min_age;
603         hci_dev_unlock(hdev);
604
605         return 0;
606 }
607
608 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609                         conn_info_min_age_set, "%llu\n");
610
611 static int conn_info_max_age_set(void *data, u64 val)
612 {
613         struct hci_dev *hdev = data;
614
615         if (val == 0 || val < hdev->conn_info_min_age)
616                 return -EINVAL;
617
618         hci_dev_lock(hdev);
619         hdev->conn_info_max_age = val;
620         hci_dev_unlock(hdev);
621
622         return 0;
623 }
624
625 static int conn_info_max_age_get(void *data, u64 *val)
626 {
627         struct hci_dev *hdev = data;
628
629         hci_dev_lock(hdev);
630         *val = hdev->conn_info_max_age;
631         hci_dev_unlock(hdev);
632
633         return 0;
634 }
635
636 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637                         conn_info_max_age_set, "%llu\n");
638
639 static int identity_show(struct seq_file *f, void *p)
640 {
641         struct hci_dev *hdev = f->private;
642         bdaddr_t addr;
643         u8 addr_type;
644
645         hci_dev_lock(hdev);
646
647         hci_copy_identity_address(hdev, &addr, &addr_type);
648
649         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
650                    16, hdev->irk, &hdev->rpa);
651
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 static int identity_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, identity_show, inode->i_private);
660 }
661
662 static const struct file_operations identity_fops = {
663         .open           = identity_open,
664         .read           = seq_read,
665         .llseek         = seq_lseek,
666         .release        = single_release,
667 };
668
669 static int random_address_show(struct seq_file *f, void *p)
670 {
671         struct hci_dev *hdev = f->private;
672
673         hci_dev_lock(hdev);
674         seq_printf(f, "%pMR\n", &hdev->random_addr);
675         hci_dev_unlock(hdev);
676
677         return 0;
678 }
679
680 static int random_address_open(struct inode *inode, struct file *file)
681 {
682         return single_open(file, random_address_show, inode->i_private);
683 }
684
685 static const struct file_operations random_address_fops = {
686         .open           = random_address_open,
687         .read           = seq_read,
688         .llseek         = seq_lseek,
689         .release        = single_release,
690 };
691
692 static int static_address_show(struct seq_file *f, void *p)
693 {
694         struct hci_dev *hdev = f->private;
695
696         hci_dev_lock(hdev);
697         seq_printf(f, "%pMR\n", &hdev->static_addr);
698         hci_dev_unlock(hdev);
699
700         return 0;
701 }
702
703 static int static_address_open(struct inode *inode, struct file *file)
704 {
705         return single_open(file, static_address_show, inode->i_private);
706 }
707
708 static const struct file_operations static_address_fops = {
709         .open           = static_address_open,
710         .read           = seq_read,
711         .llseek         = seq_lseek,
712         .release        = single_release,
713 };
714
715 static ssize_t force_static_address_read(struct file *file,
716                                          char __user *user_buf,
717                                          size_t count, loff_t *ppos)
718 {
719         struct hci_dev *hdev = file->private_data;
720         char buf[3];
721
722         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
723         buf[1] = '\n';
724         buf[2] = '\0';
725         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
726 }
727
728 static ssize_t force_static_address_write(struct file *file,
729                                           const char __user *user_buf,
730                                           size_t count, loff_t *ppos)
731 {
732         struct hci_dev *hdev = file->private_data;
733         char buf[32];
734         size_t buf_size = min(count, (sizeof(buf)-1));
735         bool enable;
736
737         if (test_bit(HCI_UP, &hdev->flags))
738                 return -EBUSY;
739
740         if (copy_from_user(buf, user_buf, buf_size))
741                 return -EFAULT;
742
743         buf[buf_size] = '\0';
744         if (strtobool(buf, &enable))
745                 return -EINVAL;
746
747         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
748                 return -EALREADY;
749
750         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
751
752         return count;
753 }
754
755 static const struct file_operations force_static_address_fops = {
756         .open           = simple_open,
757         .read           = force_static_address_read,
758         .write          = force_static_address_write,
759         .llseek         = default_llseek,
760 };
761
762 static int white_list_show(struct seq_file *f, void *ptr)
763 {
764         struct hci_dev *hdev = f->private;
765         struct bdaddr_list *b;
766
767         hci_dev_lock(hdev);
768         list_for_each_entry(b, &hdev->le_white_list, list)
769                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770         hci_dev_unlock(hdev);
771
772         return 0;
773 }
774
775 static int white_list_open(struct inode *inode, struct file *file)
776 {
777         return single_open(file, white_list_show, inode->i_private);
778 }
779
780 static const struct file_operations white_list_fops = {
781         .open           = white_list_open,
782         .read           = seq_read,
783         .llseek         = seq_lseek,
784         .release        = single_release,
785 };
786
787 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788 {
789         struct hci_dev *hdev = f->private;
790         struct list_head *p, *n;
791
792         hci_dev_lock(hdev);
793         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796                            &irk->bdaddr, irk->addr_type,
797                            16, irk->val, &irk->rpa);
798         }
799         hci_dev_unlock(hdev);
800
801         return 0;
802 }
803
804 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805 {
806         return single_open(file, identity_resolving_keys_show,
807                            inode->i_private);
808 }
809
810 static const struct file_operations identity_resolving_keys_fops = {
811         .open           = identity_resolving_keys_open,
812         .read           = seq_read,
813         .llseek         = seq_lseek,
814         .release        = single_release,
815 };
816
817 static int long_term_keys_show(struct seq_file *f, void *ptr)
818 {
819         struct hci_dev *hdev = f->private;
820         struct list_head *p, *n;
821
822         hci_dev_lock(hdev);
823         list_for_each_safe(p, n, &hdev->long_term_keys) {
824                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
825                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828                            __le64_to_cpu(ltk->rand), 16, ltk->val);
829         }
830         hci_dev_unlock(hdev);
831
832         return 0;
833 }
834
835 static int long_term_keys_open(struct inode *inode, struct file *file)
836 {
837         return single_open(file, long_term_keys_show, inode->i_private);
838 }
839
840 static const struct file_operations long_term_keys_fops = {
841         .open           = long_term_keys_open,
842         .read           = seq_read,
843         .llseek         = seq_lseek,
844         .release        = single_release,
845 };
846
847 static int conn_min_interval_set(void *data, u64 val)
848 {
849         struct hci_dev *hdev = data;
850
851         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852                 return -EINVAL;
853
854         hci_dev_lock(hdev);
855         hdev->le_conn_min_interval = val;
856         hci_dev_unlock(hdev);
857
858         return 0;
859 }
860
861 static int conn_min_interval_get(void *data, u64 *val)
862 {
863         struct hci_dev *hdev = data;
864
865         hci_dev_lock(hdev);
866         *val = hdev->le_conn_min_interval;
867         hci_dev_unlock(hdev);
868
869         return 0;
870 }
871
872 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873                         conn_min_interval_set, "%llu\n");
874
875 static int conn_max_interval_set(void *data, u64 val)
876 {
877         struct hci_dev *hdev = data;
878
879         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880                 return -EINVAL;
881
882         hci_dev_lock(hdev);
883         hdev->le_conn_max_interval = val;
884         hci_dev_unlock(hdev);
885
886         return 0;
887 }
888
889 static int conn_max_interval_get(void *data, u64 *val)
890 {
891         struct hci_dev *hdev = data;
892
893         hci_dev_lock(hdev);
894         *val = hdev->le_conn_max_interval;
895         hci_dev_unlock(hdev);
896
897         return 0;
898 }
899
900 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901                         conn_max_interval_set, "%llu\n");
902
903 static int adv_channel_map_set(void *data, u64 val)
904 {
905         struct hci_dev *hdev = data;
906
907         if (val < 0x01 || val > 0x07)
908                 return -EINVAL;
909
910         hci_dev_lock(hdev);
911         hdev->le_adv_channel_map = val;
912         hci_dev_unlock(hdev);
913
914         return 0;
915 }
916
917 static int adv_channel_map_get(void *data, u64 *val)
918 {
919         struct hci_dev *hdev = data;
920
921         hci_dev_lock(hdev);
922         *val = hdev->le_adv_channel_map;
923         hci_dev_unlock(hdev);
924
925         return 0;
926 }
927
928 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929                         adv_channel_map_set, "%llu\n");
930
931 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
932                            size_t count, loff_t *ppos)
933 {
934         struct hci_dev *hdev = file->private_data;
935         char buf[3];
936
937         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938         buf[1] = '\n';
939         buf[2] = '\0';
940         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
941 }
942
943 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944                             size_t count, loff_t *position)
945 {
946         struct hci_dev *hdev = fp->private_data;
947         bool enable;
948         char buf[32];
949         size_t buf_size = min(count, (sizeof(buf)-1));
950
951         if (copy_from_user(buf, user_buffer, buf_size))
952                 return -EFAULT;
953
954         buf[buf_size] = '\0';
955
956         if (strtobool(buf, &enable) < 0)
957                 return -EINVAL;
958
959         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
960                 return -EALREADY;
961
962         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
963
964         return count;
965 }
966
967 static const struct file_operations lowpan_debugfs_fops = {
968         .open           = simple_open,
969         .read           = lowpan_read,
970         .write          = lowpan_write,
971         .llseek         = default_llseek,
972 };
973
974 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
975 {
976         struct hci_dev *hdev = sf->private;
977         struct hci_conn_params *p;
978
979         hci_dev_lock(hdev);
980
981         list_for_each_entry(p, &hdev->le_conn_params, list) {
982                 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983                            p->auto_connect);
984         }
985
986         hci_dev_unlock(hdev);
987
988         return 0;
989 }
990
991 static int le_auto_conn_open(struct inode *inode, struct file *file)
992 {
993         return single_open(file, le_auto_conn_show, inode->i_private);
994 }
995
996 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
997                                   size_t count, loff_t *offset)
998 {
999         struct seq_file *sf = file->private_data;
1000         struct hci_dev *hdev = sf->private;
1001         u8 auto_connect = 0;
1002         bdaddr_t addr;
1003         u8 addr_type;
1004         char *buf;
1005         int err = 0;
1006         int n;
1007
1008         /* Don't allow partial write */
1009         if (*offset != 0)
1010                 return -EINVAL;
1011
1012         if (count < 3)
1013                 return -EINVAL;
1014
1015         buf = memdup_user(data, count);
1016         if (IS_ERR(buf))
1017                 return PTR_ERR(buf);
1018
1019         if (memcmp(buf, "add", 3) == 0) {
1020                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
1021                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1022                            &addr.b[1], &addr.b[0], &addr_type,
1023                            &auto_connect);
1024
1025                 if (n < 7) {
1026                         err = -EINVAL;
1027                         goto done;
1028                 }
1029
1030                 hci_dev_lock(hdev);
1031                 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
1032                                           hdev->le_conn_min_interval,
1033                                           hdev->le_conn_max_interval);
1034                 hci_dev_unlock(hdev);
1035
1036                 if (err)
1037                         goto done;
1038         } else if (memcmp(buf, "del", 3) == 0) {
1039                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041                            &addr.b[1], &addr.b[0], &addr_type);
1042
1043                 if (n < 7) {
1044                         err = -EINVAL;
1045                         goto done;
1046                 }
1047
1048                 hci_dev_lock(hdev);
1049                 hci_conn_params_del(hdev, &addr, addr_type);
1050                 hci_dev_unlock(hdev);
1051         } else if (memcmp(buf, "clr", 3) == 0) {
1052                 hci_dev_lock(hdev);
1053                 hci_conn_params_clear(hdev);
1054                 hci_pend_le_conns_clear(hdev);
1055                 hci_update_background_scan(hdev);
1056                 hci_dev_unlock(hdev);
1057         } else {
1058                 err = -EINVAL;
1059         }
1060
1061 done:
1062         kfree(buf);
1063
1064         if (err)
1065                 return err;
1066         else
1067                 return count;
1068 }
1069
1070 static const struct file_operations le_auto_conn_fops = {
1071         .open           = le_auto_conn_open,
1072         .read           = seq_read,
1073         .write          = le_auto_conn_write,
1074         .llseek         = seq_lseek,
1075         .release        = single_release,
1076 };
1077
1078 /* ---- HCI requests ---- */
1079
1080 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1081 {
1082         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1083
1084         if (hdev->req_status == HCI_REQ_PEND) {
1085                 hdev->req_result = result;
1086                 hdev->req_status = HCI_REQ_DONE;
1087                 wake_up_interruptible(&hdev->req_wait_q);
1088         }
1089 }
1090
1091 static void hci_req_cancel(struct hci_dev *hdev, int err)
1092 {
1093         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1094
1095         if (hdev->req_status == HCI_REQ_PEND) {
1096                 hdev->req_result = err;
1097                 hdev->req_status = HCI_REQ_CANCELED;
1098                 wake_up_interruptible(&hdev->req_wait_q);
1099         }
1100 }
1101
1102 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1103                                             u8 event)
1104 {
1105         struct hci_ev_cmd_complete *ev;
1106         struct hci_event_hdr *hdr;
1107         struct sk_buff *skb;
1108
1109         hci_dev_lock(hdev);
1110
1111         skb = hdev->recv_evt;
1112         hdev->recv_evt = NULL;
1113
1114         hci_dev_unlock(hdev);
1115
1116         if (!skb)
1117                 return ERR_PTR(-ENODATA);
1118
1119         if (skb->len < sizeof(*hdr)) {
1120                 BT_ERR("Too short HCI event");
1121                 goto failed;
1122         }
1123
1124         hdr = (void *) skb->data;
1125         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1126
1127         if (event) {
1128                 if (hdr->evt != event)
1129                         goto failed;
1130                 return skb;
1131         }
1132
1133         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1134                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1135                 goto failed;
1136         }
1137
1138         if (skb->len < sizeof(*ev)) {
1139                 BT_ERR("Too short cmd_complete event");
1140                 goto failed;
1141         }
1142
1143         ev = (void *) skb->data;
1144         skb_pull(skb, sizeof(*ev));
1145
1146         if (opcode == __le16_to_cpu(ev->opcode))
1147                 return skb;
1148
1149         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1150                __le16_to_cpu(ev->opcode));
1151
1152 failed:
1153         kfree_skb(skb);
1154         return ERR_PTR(-ENODATA);
1155 }
1156
1157 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1158                                   const void *param, u8 event, u32 timeout)
1159 {
1160         DECLARE_WAITQUEUE(wait, current);
1161         struct hci_request req;
1162         int err = 0;
1163
1164         BT_DBG("%s", hdev->name);
1165
1166         hci_req_init(&req, hdev);
1167
1168         hci_req_add_ev(&req, opcode, plen, param, event);
1169
1170         hdev->req_status = HCI_REQ_PEND;
1171
1172         err = hci_req_run(&req, hci_req_sync_complete);
1173         if (err < 0)
1174                 return ERR_PTR(err);
1175
1176         add_wait_queue(&hdev->req_wait_q, &wait);
1177         set_current_state(TASK_INTERRUPTIBLE);
1178
1179         schedule_timeout(timeout);
1180
1181         remove_wait_queue(&hdev->req_wait_q, &wait);
1182
1183         if (signal_pending(current))
1184                 return ERR_PTR(-EINTR);
1185
1186         switch (hdev->req_status) {
1187         case HCI_REQ_DONE:
1188                 err = -bt_to_errno(hdev->req_result);
1189                 break;
1190
1191         case HCI_REQ_CANCELED:
1192                 err = -hdev->req_result;
1193                 break;
1194
1195         default:
1196                 err = -ETIMEDOUT;
1197                 break;
1198         }
1199
1200         hdev->req_status = hdev->req_result = 0;
1201
1202         BT_DBG("%s end: err %d", hdev->name, err);
1203
1204         if (err < 0)
1205                 return ERR_PTR(err);
1206
1207         return hci_get_cmd_complete(hdev, opcode, event);
1208 }
1209 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1210
1211 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1212                                const void *param, u32 timeout)
1213 {
1214         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1215 }
1216 EXPORT_SYMBOL(__hci_cmd_sync);
1217
1218 /* Execute request and wait for completion. */
1219 static int __hci_req_sync(struct hci_dev *hdev,
1220                           void (*func)(struct hci_request *req,
1221                                       unsigned long opt),
1222                           unsigned long opt, __u32 timeout)
1223 {
1224         struct hci_request req;
1225         DECLARE_WAITQUEUE(wait, current);
1226         int err = 0;
1227
1228         BT_DBG("%s start", hdev->name);
1229
1230         hci_req_init(&req, hdev);
1231
1232         hdev->req_status = HCI_REQ_PEND;
1233
1234         func(&req, opt);
1235
1236         err = hci_req_run(&req, hci_req_sync_complete);
1237         if (err < 0) {
1238                 hdev->req_status = 0;
1239
1240                 /* ENODATA means the HCI request command queue is empty.
1241                  * This can happen when a request with conditionals doesn't
1242                  * trigger any commands to be sent. This is normal behavior
1243                  * and should not trigger an error return.
1244                  */
1245                 if (err == -ENODATA)
1246                         return 0;
1247
1248                 return err;
1249         }
1250
1251         add_wait_queue(&hdev->req_wait_q, &wait);
1252         set_current_state(TASK_INTERRUPTIBLE);
1253
1254         schedule_timeout(timeout);
1255
1256         remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258         if (signal_pending(current))
1259                 return -EINTR;
1260
1261         switch (hdev->req_status) {
1262         case HCI_REQ_DONE:
1263                 err = -bt_to_errno(hdev->req_result);
1264                 break;
1265
1266         case HCI_REQ_CANCELED:
1267                 err = -hdev->req_result;
1268                 break;
1269
1270         default:
1271                 err = -ETIMEDOUT;
1272                 break;
1273         }
1274
1275         hdev->req_status = hdev->req_result = 0;
1276
1277         BT_DBG("%s end: err %d", hdev->name, err);
1278
1279         return err;
1280 }
1281
1282 static int hci_req_sync(struct hci_dev *hdev,
1283                         void (*req)(struct hci_request *req,
1284                                     unsigned long opt),
1285                         unsigned long opt, __u32 timeout)
1286 {
1287         int ret;
1288
1289         if (!test_bit(HCI_UP, &hdev->flags))
1290                 return -ENETDOWN;
1291
1292         /* Serialize all requests */
1293         hci_req_lock(hdev);
1294         ret = __hci_req_sync(hdev, req, opt, timeout);
1295         hci_req_unlock(hdev);
1296
1297         return ret;
1298 }
1299
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302         BT_DBG("%s %ld", req->hdev->name, opt);
1303
1304         /* Reset device */
1305         set_bit(HCI_RESET, &req->hdev->flags);
1306         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308
1309 static void bredr_init(struct hci_request *req)
1310 {
1311         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312
1313         /* Read Local Supported Features */
1314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315
1316         /* Read Local Version */
1317         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318
1319         /* Read BD Address */
1320         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322
1323 static void amp_init(struct hci_request *req)
1324 {
1325         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326
1327         /* Read Local Version */
1328         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329
1330         /* Read Local Supported Commands */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333         /* Read Local Supported Features */
1334         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
1336         /* Read Local AMP Info */
1337         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338
1339         /* Read Data Blk size */
1340         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341
1342         /* Read Flow Control Mode */
1343         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
1345         /* Read Location Data */
1346         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351         struct hci_dev *hdev = req->hdev;
1352
1353         BT_DBG("%s %ld", hdev->name, opt);
1354
1355         /* Reset */
1356         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357                 hci_reset_req(req, 0);
1358
1359         switch (hdev->dev_type) {
1360         case HCI_BREDR:
1361                 bredr_init(req);
1362                 break;
1363
1364         case HCI_AMP:
1365                 amp_init(req);
1366                 break;
1367
1368         default:
1369                 BT_ERR("Unknown device type %d", hdev->dev_type);
1370                 break;
1371         }
1372 }
1373
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376         struct hci_dev *hdev = req->hdev;
1377
1378         __le16 param;
1379         __u8 flt_type;
1380
1381         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384         /* Read Class of Device */
1385         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387         /* Read Local Name */
1388         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390         /* Read Voice Setting */
1391         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393         /* Read Number of Supported IAC */
1394         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396         /* Read Current IAC LAP */
1397         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399         /* Clear Event Filters */
1400         flt_type = HCI_FLT_CLEAR_ALL;
1401         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403         /* Connection accept timeout ~20 secs */
1404         param = cpu_to_le16(0x7d00);
1405         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406
1407         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408          * but it does not support page scan related HCI commands.
1409          */
1410         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413         }
1414 }
1415
1416 static void le_setup(struct hci_request *req)
1417 {
1418         struct hci_dev *hdev = req->hdev;
1419
1420         /* Read LE Buffer Size */
1421         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1422
1423         /* Read LE Local Supported Features */
1424         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1425
1426         /* Read LE Supported States */
1427         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
1429         /* Read LE Advertising Channel TX Power */
1430         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1431
1432         /* Read LE White List Size */
1433         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1434
1435         /* Clear LE White List */
1436         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1437
1438         /* LE-only controllers have LE implicitly enabled */
1439         if (!lmp_bredr_capable(hdev))
1440                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1441 }
1442
1443 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1444 {
1445         if (lmp_ext_inq_capable(hdev))
1446                 return 0x02;
1447
1448         if (lmp_inq_rssi_capable(hdev))
1449                 return 0x01;
1450
1451         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1452             hdev->lmp_subver == 0x0757)
1453                 return 0x01;
1454
1455         if (hdev->manufacturer == 15) {
1456                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1457                         return 0x01;
1458                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1459                         return 0x01;
1460                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461                         return 0x01;
1462         }
1463
1464         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1465             hdev->lmp_subver == 0x1805)
1466                 return 0x01;
1467
1468         return 0x00;
1469 }
1470
1471 static void hci_setup_inquiry_mode(struct hci_request *req)
1472 {
1473         u8 mode;
1474
1475         mode = hci_get_inquiry_mode(req->hdev);
1476
1477         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1478 }
1479
1480 static void hci_setup_event_mask(struct hci_request *req)
1481 {
1482         struct hci_dev *hdev = req->hdev;
1483
1484         /* The second byte is 0xff instead of 0x9f (two reserved bits
1485          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1486          * command otherwise.
1487          */
1488         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1489
1490         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1491          * any event mask for pre 1.2 devices.
1492          */
1493         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1494                 return;
1495
1496         if (lmp_bredr_capable(hdev)) {
1497                 events[4] |= 0x01; /* Flow Specification Complete */
1498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1499                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1500                 events[5] |= 0x08; /* Synchronous Connection Complete */
1501                 events[5] |= 0x10; /* Synchronous Connection Changed */
1502         } else {
1503                 /* Use a different default for LE-only devices */
1504                 memset(events, 0, sizeof(events));
1505                 events[0] |= 0x10; /* Disconnection Complete */
1506                 events[0] |= 0x80; /* Encryption Change */
1507                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508                 events[1] |= 0x20; /* Command Complete */
1509                 events[1] |= 0x40; /* Command Status */
1510                 events[1] |= 0x80; /* Hardware Error */
1511                 events[2] |= 0x04; /* Number of Completed Packets */
1512                 events[3] |= 0x02; /* Data Buffer Overflow */
1513                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1514         }
1515
1516         if (lmp_inq_rssi_capable(hdev))
1517                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519         if (lmp_sniffsubr_capable(hdev))
1520                 events[5] |= 0x20; /* Sniff Subrating */
1521
1522         if (lmp_pause_enc_capable(hdev))
1523                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525         if (lmp_ext_inq_capable(hdev))
1526                 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528         if (lmp_no_flush_capable(hdev))
1529                 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531         if (lmp_lsto_capable(hdev))
1532                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534         if (lmp_ssp_capable(hdev)) {
1535                 events[6] |= 0x01;      /* IO Capability Request */
1536                 events[6] |= 0x02;      /* IO Capability Response */
1537                 events[6] |= 0x04;      /* User Confirmation Request */
1538                 events[6] |= 0x08;      /* User Passkey Request */
1539                 events[6] |= 0x10;      /* Remote OOB Data Request */
1540                 events[6] |= 0x20;      /* Simple Pairing Complete */
1541                 events[7] |= 0x04;      /* User Passkey Notification */
1542                 events[7] |= 0x08;      /* Keypress Notification */
1543                 events[7] |= 0x10;      /* Remote Host Supported
1544                                          * Features Notification
1545                                          */
1546         }
1547
1548         if (lmp_le_capable(hdev))
1549                 events[7] |= 0x20;      /* LE Meta-Event */
1550
1551         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552
1553         if (lmp_le_capable(hdev)) {
1554                 memset(events, 0, sizeof(events));
1555                 events[0] = 0x1f;
1556                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557                             sizeof(events), events);
1558         }
1559 }
1560
1561 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1562 {
1563         struct hci_dev *hdev = req->hdev;
1564
1565         if (lmp_bredr_capable(hdev))
1566                 bredr_setup(req);
1567         else
1568                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1569
1570         if (lmp_le_capable(hdev))
1571                 le_setup(req);
1572
1573         hci_setup_event_mask(req);
1574
1575         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576          * local supported commands HCI command.
1577          */
1578         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1579                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1580
1581         if (lmp_ssp_capable(hdev)) {
1582                 /* When SSP is available, then the host features page
1583                  * should also be available as well. However some
1584                  * controllers list the max_page as 0 as long as SSP
1585                  * has not been enabled. To achieve proper debugging
1586                  * output, force the minimum max_page to 1 at least.
1587                  */
1588                 hdev->max_page = 0x01;
1589
1590                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1591                         u8 mode = 0x01;
1592                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1593                                     sizeof(mode), &mode);
1594                 } else {
1595                         struct hci_cp_write_eir cp;
1596
1597                         memset(hdev->eir, 0, sizeof(hdev->eir));
1598                         memset(&cp, 0, sizeof(cp));
1599
1600                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1601                 }
1602         }
1603
1604         if (lmp_inq_rssi_capable(hdev))
1605                 hci_setup_inquiry_mode(req);
1606
1607         if (lmp_inq_tx_pwr_capable(hdev))
1608                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1609
1610         if (lmp_ext_feat_capable(hdev)) {
1611                 struct hci_cp_read_local_ext_features cp;
1612
1613                 cp.page = 0x01;
1614                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1615                             sizeof(cp), &cp);
1616         }
1617
1618         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1619                 u8 enable = 1;
1620                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1621                             &enable);
1622         }
1623 }
1624
1625 static void hci_setup_link_policy(struct hci_request *req)
1626 {
1627         struct hci_dev *hdev = req->hdev;
1628         struct hci_cp_write_def_link_policy cp;
1629         u16 link_policy = 0;
1630
1631         if (lmp_rswitch_capable(hdev))
1632                 link_policy |= HCI_LP_RSWITCH;
1633         if (lmp_hold_capable(hdev))
1634                 link_policy |= HCI_LP_HOLD;
1635         if (lmp_sniff_capable(hdev))
1636                 link_policy |= HCI_LP_SNIFF;
1637         if (lmp_park_capable(hdev))
1638                 link_policy |= HCI_LP_PARK;
1639
1640         cp.policy = cpu_to_le16(link_policy);
1641         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1642 }
1643
1644 static void hci_set_le_support(struct hci_request *req)
1645 {
1646         struct hci_dev *hdev = req->hdev;
1647         struct hci_cp_write_le_host_supported cp;
1648
1649         /* LE-only devices do not support explicit enablement */
1650         if (!lmp_bredr_capable(hdev))
1651                 return;
1652
1653         memset(&cp, 0, sizeof(cp));
1654
1655         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656                 cp.le = 0x01;
1657                 cp.simul = lmp_le_br_capable(hdev);
1658         }
1659
1660         if (cp.le != lmp_host_le_capable(hdev))
1661                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1662                             &cp);
1663 }
1664
1665 static void hci_set_event_mask_page_2(struct hci_request *req)
1666 {
1667         struct hci_dev *hdev = req->hdev;
1668         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1669
1670         /* If Connectionless Slave Broadcast master role is supported
1671          * enable all necessary events for it.
1672          */
1673         if (lmp_csb_master_capable(hdev)) {
1674                 events[1] |= 0x40;      /* Triggered Clock Capture */
1675                 events[1] |= 0x80;      /* Synchronization Train Complete */
1676                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1677                 events[2] |= 0x20;      /* CSB Channel Map Change */
1678         }
1679
1680         /* If Connectionless Slave Broadcast slave role is supported
1681          * enable all necessary events for it.
1682          */
1683         if (lmp_csb_slave_capable(hdev)) {
1684                 events[2] |= 0x01;      /* Synchronization Train Received */
1685                 events[2] |= 0x02;      /* CSB Receive */
1686                 events[2] |= 0x04;      /* CSB Timeout */
1687                 events[2] |= 0x08;      /* Truncated Page Complete */
1688         }
1689
1690         /* Enable Authenticated Payload Timeout Expired event if supported */
1691         if (lmp_ping_capable(hdev))
1692                 events[2] |= 0x80;
1693
1694         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1695 }
1696
1697 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1698 {
1699         struct hci_dev *hdev = req->hdev;
1700         u8 p;
1701
1702         /* Some Broadcom based Bluetooth controllers do not support the
1703          * Delete Stored Link Key command. They are clearly indicating its
1704          * absence in the bit mask of supported commands.
1705          *
1706          * Check the supported commands and only if the the command is marked
1707          * as supported send it. If not supported assume that the controller
1708          * does not have actual support for stored link keys which makes this
1709          * command redundant anyway.
1710          *
1711          * Some controllers indicate that they support handling deleting
1712          * stored link keys, but they don't. The quirk lets a driver
1713          * just disable this command.
1714          */
1715         if (hdev->commands[6] & 0x80 &&
1716             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1717                 struct hci_cp_delete_stored_link_key cp;
1718
1719                 bacpy(&cp.bdaddr, BDADDR_ANY);
1720                 cp.delete_all = 0x01;
1721                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1722                             sizeof(cp), &cp);
1723         }
1724
1725         if (hdev->commands[5] & 0x10)
1726                 hci_setup_link_policy(req);
1727
1728         if (lmp_le_capable(hdev))
1729                 hci_set_le_support(req);
1730
1731         /* Read features beyond page 1 if available */
1732         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733                 struct hci_cp_read_local_ext_features cp;
1734
1735                 cp.page = p;
1736                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737                             sizeof(cp), &cp);
1738         }
1739 }
1740
1741 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742 {
1743         struct hci_dev *hdev = req->hdev;
1744
1745         /* Set event mask page 2 if the HCI command for it is supported */
1746         if (hdev->commands[22] & 0x04)
1747                 hci_set_event_mask_page_2(req);
1748
1749         /* Check for Synchronization Train support */
1750         if (lmp_sync_train_capable(hdev))
1751                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1752
1753         /* Enable Secure Connections if supported and configured */
1754         if ((lmp_sc_capable(hdev) ||
1755              test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1756             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757                 u8 support = 0x01;
1758                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1759                             sizeof(support), &support);
1760         }
1761 }
1762
1763 static int __hci_init(struct hci_dev *hdev)
1764 {
1765         int err;
1766
1767         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1768         if (err < 0)
1769                 return err;
1770
1771         /* The Device Under Test (DUT) mode is special and available for
1772          * all controller types. So just create it early on.
1773          */
1774         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1775                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1776                                     &dut_mode_fops);
1777         }
1778
1779         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1780          * BR/EDR/LE type controllers. AMP controllers only need the
1781          * first stage init.
1782          */
1783         if (hdev->dev_type != HCI_BREDR)
1784                 return 0;
1785
1786         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1787         if (err < 0)
1788                 return err;
1789
1790         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1791         if (err < 0)
1792                 return err;
1793
1794         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         /* Only create debugfs entries during the initial setup
1799          * phase and not every time the controller gets powered on.
1800          */
1801         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1802                 return 0;
1803
1804         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1805                             &features_fops);
1806         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1807                            &hdev->manufacturer);
1808         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1809         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1810         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811                             &blacklist_fops);
1812         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813
1814         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815                             &conn_info_min_age_fops);
1816         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817                             &conn_info_max_age_fops);
1818
1819         if (lmp_bredr_capable(hdev)) {
1820                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1821                                     hdev, &inquiry_cache_fops);
1822                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1823                                     hdev, &link_keys_fops);
1824                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1825                                     hdev, &dev_class_fops);
1826                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1827                                     hdev, &voice_setting_fops);
1828         }
1829
1830         if (lmp_ssp_capable(hdev)) {
1831                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832                                     hdev, &auto_accept_delay_fops);
1833                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834                                     hdev, &ssp_debug_mode_fops);
1835                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836                                     hdev, &force_sc_support_fops);
1837                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1838                                     hdev, &sc_only_mode_fops);
1839         }
1840
1841         if (lmp_sniff_capable(hdev)) {
1842                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1843                                     hdev, &idle_timeout_fops);
1844                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1845                                     hdev, &sniff_min_interval_fops);
1846                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1847                                     hdev, &sniff_max_interval_fops);
1848         }
1849
1850         if (lmp_le_capable(hdev)) {
1851                 debugfs_create_file("identity", 0400, hdev->debugfs,
1852                                     hdev, &identity_fops);
1853                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1854                                     hdev, &rpa_timeout_fops);
1855                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1856                                     hdev, &random_address_fops);
1857                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1858                                     hdev, &static_address_fops);
1859
1860                 /* For controllers with a public address, provide a debug
1861                  * option to force the usage of the configured static
1862                  * address. By default the public address is used.
1863                  */
1864                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1865                         debugfs_create_file("force_static_address", 0644,
1866                                             hdev->debugfs, hdev,
1867                                             &force_static_address_fops);
1868
1869                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1870                                   &hdev->le_white_list_size);
1871                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1872                                     &white_list_fops);
1873                 debugfs_create_file("identity_resolving_keys", 0400,
1874                                     hdev->debugfs, hdev,
1875                                     &identity_resolving_keys_fops);
1876                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1877                                     hdev, &long_term_keys_fops);
1878                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1879                                     hdev, &conn_min_interval_fops);
1880                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881                                     hdev, &conn_max_interval_fops);
1882                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883                                     hdev, &adv_channel_map_fops);
1884                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1885                                     &lowpan_debugfs_fops);
1886                 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887                                     &le_auto_conn_fops);
1888                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889                                    hdev->debugfs,
1890                                    &hdev->discov_interleaved_timeout);
1891         }
1892
1893         return 0;
1894 }
1895
1896 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1897 {
1898         __u8 scan = opt;
1899
1900         BT_DBG("%s %x", req->hdev->name, scan);
1901
1902         /* Inquiry and Page scans */
1903         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1904 }
1905
1906 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1907 {
1908         __u8 auth = opt;
1909
1910         BT_DBG("%s %x", req->hdev->name, auth);
1911
1912         /* Authentication */
1913         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1914 }
1915
1916 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1917 {
1918         __u8 encrypt = opt;
1919
1920         BT_DBG("%s %x", req->hdev->name, encrypt);
1921
1922         /* Encryption */
1923         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1924 }
1925
1926 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1927 {
1928         __le16 policy = cpu_to_le16(opt);
1929
1930         BT_DBG("%s %x", req->hdev->name, policy);
1931
1932         /* Default link policy */
1933         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1934 }
1935
1936 /* Get HCI device by index.
1937  * Device is held on return. */
1938 struct hci_dev *hci_dev_get(int index)
1939 {
1940         struct hci_dev *hdev = NULL, *d;
1941
1942         BT_DBG("%d", index);
1943
1944         if (index < 0)
1945                 return NULL;
1946
1947         read_lock(&hci_dev_list_lock);
1948         list_for_each_entry(d, &hci_dev_list, list) {
1949                 if (d->id == index) {
1950                         hdev = hci_dev_hold(d);
1951                         break;
1952                 }
1953         }
1954         read_unlock(&hci_dev_list_lock);
1955         return hdev;
1956 }
1957
1958 /* ---- Inquiry support ---- */
1959
1960 bool hci_discovery_active(struct hci_dev *hdev)
1961 {
1962         struct discovery_state *discov = &hdev->discovery;
1963
1964         switch (discov->state) {
1965         case DISCOVERY_FINDING:
1966         case DISCOVERY_RESOLVING:
1967                 return true;
1968
1969         default:
1970                 return false;
1971         }
1972 }
1973
1974 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975 {
1976         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977
1978         if (hdev->discovery.state == state)
1979                 return;
1980
1981         switch (state) {
1982         case DISCOVERY_STOPPED:
1983                 hci_update_background_scan(hdev);
1984
1985                 if (hdev->discovery.state != DISCOVERY_STARTING)
1986                         mgmt_discovering(hdev, 0);
1987                 break;
1988         case DISCOVERY_STARTING:
1989                 break;
1990         case DISCOVERY_FINDING:
1991                 mgmt_discovering(hdev, 1);
1992                 break;
1993         case DISCOVERY_RESOLVING:
1994                 break;
1995         case DISCOVERY_STOPPING:
1996                 break;
1997         }
1998
1999         hdev->discovery.state = state;
2000 }
2001
2002 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2003 {
2004         struct discovery_state *cache = &hdev->discovery;
2005         struct inquiry_entry *p, *n;
2006
2007         list_for_each_entry_safe(p, n, &cache->all, all) {
2008                 list_del(&p->all);
2009                 kfree(p);
2010         }
2011
2012         INIT_LIST_HEAD(&cache->unknown);
2013         INIT_LIST_HEAD(&cache->resolve);
2014 }
2015
2016 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2017                                                bdaddr_t *bdaddr)
2018 {
2019         struct discovery_state *cache = &hdev->discovery;
2020         struct inquiry_entry *e;
2021
2022         BT_DBG("cache %p, %pMR", cache, bdaddr);
2023
2024         list_for_each_entry(e, &cache->all, all) {
2025                 if (!bacmp(&e->data.bdaddr, bdaddr))
2026                         return e;
2027         }
2028
2029         return NULL;
2030 }
2031
2032 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2033                                                        bdaddr_t *bdaddr)
2034 {
2035         struct discovery_state *cache = &hdev->discovery;
2036         struct inquiry_entry *e;
2037
2038         BT_DBG("cache %p, %pMR", cache, bdaddr);
2039
2040         list_for_each_entry(e, &cache->unknown, list) {
2041                 if (!bacmp(&e->data.bdaddr, bdaddr))
2042                         return e;
2043         }
2044
2045         return NULL;
2046 }
2047
2048 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2049                                                        bdaddr_t *bdaddr,
2050                                                        int state)
2051 {
2052         struct discovery_state *cache = &hdev->discovery;
2053         struct inquiry_entry *e;
2054
2055         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2056
2057         list_for_each_entry(e, &cache->resolve, list) {
2058                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2059                         return e;
2060                 if (!bacmp(&e->data.bdaddr, bdaddr))
2061                         return e;
2062         }
2063
2064         return NULL;
2065 }
2066
2067 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2068                                       struct inquiry_entry *ie)
2069 {
2070         struct discovery_state *cache = &hdev->discovery;
2071         struct list_head *pos = &cache->resolve;
2072         struct inquiry_entry *p;
2073
2074         list_del(&ie->list);
2075
2076         list_for_each_entry(p, &cache->resolve, list) {
2077                 if (p->name_state != NAME_PENDING &&
2078                     abs(p->data.rssi) >= abs(ie->data.rssi))
2079                         break;
2080                 pos = &p->list;
2081         }
2082
2083         list_add(&ie->list, pos);
2084 }
2085
2086 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2087                               bool name_known, bool *ssp)
2088 {
2089         struct discovery_state *cache = &hdev->discovery;
2090         struct inquiry_entry *ie;
2091
2092         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2093
2094         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095
2096         *ssp = data->ssp_mode;
2097
2098         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2099         if (ie) {
2100                 if (ie->data.ssp_mode)
2101                         *ssp = true;
2102
2103                 if (ie->name_state == NAME_NEEDED &&
2104                     data->rssi != ie->data.rssi) {
2105                         ie->data.rssi = data->rssi;
2106                         hci_inquiry_cache_update_resolve(hdev, ie);
2107                 }
2108
2109                 goto update;
2110         }
2111
2112         /* Entry not in the cache. Add new one. */
2113         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114         if (!ie)
2115                 return false;
2116
2117         list_add(&ie->all, &cache->all);
2118
2119         if (name_known) {
2120                 ie->name_state = NAME_KNOWN;
2121         } else {
2122                 ie->name_state = NAME_NOT_KNOWN;
2123                 list_add(&ie->list, &cache->unknown);
2124         }
2125
2126 update:
2127         if (name_known && ie->name_state != NAME_KNOWN &&
2128             ie->name_state != NAME_PENDING) {
2129                 ie->name_state = NAME_KNOWN;
2130                 list_del(&ie->list);
2131         }
2132
2133         memcpy(&ie->data, data, sizeof(*data));
2134         ie->timestamp = jiffies;
2135         cache->timestamp = jiffies;
2136
2137         if (ie->name_state == NAME_NOT_KNOWN)
2138                 return false;
2139
2140         return true;
2141 }
2142
2143 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2144 {
2145         struct discovery_state *cache = &hdev->discovery;
2146         struct inquiry_info *info = (struct inquiry_info *) buf;
2147         struct inquiry_entry *e;
2148         int copied = 0;
2149
2150         list_for_each_entry(e, &cache->all, all) {
2151                 struct inquiry_data *data = &e->data;
2152
2153                 if (copied >= num)
2154                         break;
2155
2156                 bacpy(&info->bdaddr, &data->bdaddr);
2157                 info->pscan_rep_mode    = data->pscan_rep_mode;
2158                 info->pscan_period_mode = data->pscan_period_mode;
2159                 info->pscan_mode        = data->pscan_mode;
2160                 memcpy(info->dev_class, data->dev_class, 3);
2161                 info->clock_offset      = data->clock_offset;
2162
2163                 info++;
2164                 copied++;
2165         }
2166
2167         BT_DBG("cache %p, copied %d", cache, copied);
2168         return copied;
2169 }
2170
2171 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2172 {
2173         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2174         struct hci_dev *hdev = req->hdev;
2175         struct hci_cp_inquiry cp;
2176
2177         BT_DBG("%s", hdev->name);
2178
2179         if (test_bit(HCI_INQUIRY, &hdev->flags))
2180                 return;
2181
2182         /* Start Inquiry */
2183         memcpy(&cp.lap, &ir->lap, 3);
2184         cp.length  = ir->length;
2185         cp.num_rsp = ir->num_rsp;
2186         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2187 }
2188
2189 static int wait_inquiry(void *word)
2190 {
2191         schedule();
2192         return signal_pending(current);
2193 }
2194
2195 int hci_inquiry(void __user *arg)
2196 {
2197         __u8 __user *ptr = arg;
2198         struct hci_inquiry_req ir;
2199         struct hci_dev *hdev;
2200         int err = 0, do_inquiry = 0, max_rsp;
2201         long timeo;
2202         __u8 *buf;
2203
2204         if (copy_from_user(&ir, ptr, sizeof(ir)))
2205                 return -EFAULT;
2206
2207         hdev = hci_dev_get(ir.dev_id);
2208         if (!hdev)
2209                 return -ENODEV;
2210
2211         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2212                 err = -EBUSY;
2213                 goto done;
2214         }
2215
2216         if (hdev->dev_type != HCI_BREDR) {
2217                 err = -EOPNOTSUPP;
2218                 goto done;
2219         }
2220
2221         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2222                 err = -EOPNOTSUPP;
2223                 goto done;
2224         }
2225
2226         hci_dev_lock(hdev);
2227         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2228             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2229                 hci_inquiry_cache_flush(hdev);
2230                 do_inquiry = 1;
2231         }
2232         hci_dev_unlock(hdev);
2233
2234         timeo = ir.length * msecs_to_jiffies(2000);
2235
2236         if (do_inquiry) {
2237                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2238                                    timeo);
2239                 if (err < 0)
2240                         goto done;
2241
2242                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2243                  * cleared). If it is interrupted by a signal, return -EINTR.
2244                  */
2245                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2246                                 TASK_INTERRUPTIBLE))
2247                         return -EINTR;
2248         }
2249
2250         /* for unlimited number of responses we will use buffer with
2251          * 255 entries
2252          */
2253         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2254
2255         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2256          * copy it to the user space.
2257          */
2258         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2259         if (!buf) {
2260                 err = -ENOMEM;
2261                 goto done;
2262         }
2263
2264         hci_dev_lock(hdev);
2265         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2266         hci_dev_unlock(hdev);
2267
2268         BT_DBG("num_rsp %d", ir.num_rsp);
2269
2270         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2271                 ptr += sizeof(ir);
2272                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2273                                  ir.num_rsp))
2274                         err = -EFAULT;
2275         } else
2276                 err = -EFAULT;
2277
2278         kfree(buf);
2279
2280 done:
2281         hci_dev_put(hdev);
2282         return err;
2283 }
2284
2285 static int hci_dev_do_open(struct hci_dev *hdev)
2286 {
2287         int ret = 0;
2288
2289         BT_DBG("%s %p", hdev->name, hdev);
2290
2291         hci_req_lock(hdev);
2292
2293         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2294                 ret = -ENODEV;
2295                 goto done;
2296         }
2297
2298         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2299                 /* Check for rfkill but allow the HCI setup stage to
2300                  * proceed (which in itself doesn't cause any RF activity).
2301                  */
2302                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2303                         ret = -ERFKILL;
2304                         goto done;
2305                 }
2306
2307                 /* Check for valid public address or a configured static
2308                  * random adddress, but let the HCI setup proceed to
2309                  * be able to determine if there is a public address
2310                  * or not.
2311                  *
2312                  * In case of user channel usage, it is not important
2313                  * if a public address or static random address is
2314                  * available.
2315                  *
2316                  * This check is only valid for BR/EDR controllers
2317                  * since AMP controllers do not have an address.
2318                  */
2319                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2320                     hdev->dev_type == HCI_BREDR &&
2321                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2322                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2323                         ret = -EADDRNOTAVAIL;
2324                         goto done;
2325                 }
2326         }
2327
2328         if (test_bit(HCI_UP, &hdev->flags)) {
2329                 ret = -EALREADY;
2330                 goto done;
2331         }
2332
2333         if (hdev->open(hdev)) {
2334                 ret = -EIO;
2335                 goto done;
2336         }
2337
2338         atomic_set(&hdev->cmd_cnt, 1);
2339         set_bit(HCI_INIT, &hdev->flags);
2340
2341         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2342                 ret = hdev->setup(hdev);
2343
2344         if (!ret) {
2345                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2346                         set_bit(HCI_RAW, &hdev->flags);
2347
2348                 if (!test_bit(HCI_RAW, &hdev->flags) &&
2349                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2350                         ret = __hci_init(hdev);
2351         }
2352
2353         clear_bit(HCI_INIT, &hdev->flags);
2354
2355         if (!ret) {
2356                 hci_dev_hold(hdev);
2357                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2358                 set_bit(HCI_UP, &hdev->flags);
2359                 hci_notify(hdev, HCI_DEV_UP);
2360                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2361                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2362                     hdev->dev_type == HCI_BREDR) {
2363                         hci_dev_lock(hdev);
2364                         mgmt_powered(hdev, 1);
2365                         hci_dev_unlock(hdev);
2366                 }
2367         } else {
2368                 /* Init failed, cleanup */
2369                 flush_work(&hdev->tx_work);
2370                 flush_work(&hdev->cmd_work);
2371                 flush_work(&hdev->rx_work);
2372
2373                 skb_queue_purge(&hdev->cmd_q);
2374                 skb_queue_purge(&hdev->rx_q);
2375
2376                 if (hdev->flush)
2377                         hdev->flush(hdev);
2378
2379                 if (hdev->sent_cmd) {
2380                         kfree_skb(hdev->sent_cmd);
2381                         hdev->sent_cmd = NULL;
2382                 }
2383
2384                 hdev->close(hdev);
2385                 hdev->flags = 0;
2386         }
2387
2388 done:
2389         hci_req_unlock(hdev);
2390         return ret;
2391 }
2392
2393 /* ---- HCI ioctl helpers ---- */
2394
2395 int hci_dev_open(__u16 dev)
2396 {
2397         struct hci_dev *hdev;
2398         int err;
2399
2400         hdev = hci_dev_get(dev);
2401         if (!hdev)
2402                 return -ENODEV;
2403
2404         /* We need to ensure that no other power on/off work is pending
2405          * before proceeding to call hci_dev_do_open. This is
2406          * particularly important if the setup procedure has not yet
2407          * completed.
2408          */
2409         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2410                 cancel_delayed_work(&hdev->power_off);
2411
2412         /* After this call it is guaranteed that the setup procedure
2413          * has finished. This means that error conditions like RFKILL
2414          * or no valid public or static random address apply.
2415          */
2416         flush_workqueue(hdev->req_workqueue);
2417
2418         err = hci_dev_do_open(hdev);
2419
2420         hci_dev_put(hdev);
2421
2422         return err;
2423 }
2424
2425 static int hci_dev_do_close(struct hci_dev *hdev)
2426 {
2427         BT_DBG("%s %p", hdev->name, hdev);
2428
2429         cancel_delayed_work(&hdev->power_off);
2430
2431         hci_req_cancel(hdev, ENODEV);
2432         hci_req_lock(hdev);
2433
2434         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2435                 del_timer_sync(&hdev->cmd_timer);
2436                 hci_req_unlock(hdev);
2437                 return 0;
2438         }
2439
2440         /* Flush RX and TX works */
2441         flush_work(&hdev->tx_work);
2442         flush_work(&hdev->rx_work);
2443
2444         if (hdev->discov_timeout > 0) {
2445                 cancel_delayed_work(&hdev->discov_off);
2446                 hdev->discov_timeout = 0;
2447                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2448                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2449         }
2450
2451         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2452                 cancel_delayed_work(&hdev->service_cache);
2453
2454         cancel_delayed_work_sync(&hdev->le_scan_disable);
2455
2456         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2457                 cancel_delayed_work_sync(&hdev->rpa_expired);
2458
2459         hci_dev_lock(hdev);
2460         hci_inquiry_cache_flush(hdev);
2461         hci_conn_hash_flush(hdev);
2462         hci_pend_le_conns_clear(hdev);
2463         hci_dev_unlock(hdev);
2464
2465         hci_notify(hdev, HCI_DEV_DOWN);
2466
2467         if (hdev->flush)
2468                 hdev->flush(hdev);
2469
2470         /* Reset device */
2471         skb_queue_purge(&hdev->cmd_q);
2472         atomic_set(&hdev->cmd_cnt, 1);
2473         if (!test_bit(HCI_RAW, &hdev->flags) &&
2474             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2475             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2476                 set_bit(HCI_INIT, &hdev->flags);
2477                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2478                 clear_bit(HCI_INIT, &hdev->flags);
2479         }
2480
2481         /* flush cmd  work */
2482         flush_work(&hdev->cmd_work);
2483
2484         /* Drop queues */
2485         skb_queue_purge(&hdev->rx_q);
2486         skb_queue_purge(&hdev->cmd_q);
2487         skb_queue_purge(&hdev->raw_q);
2488
2489         /* Drop last sent command */
2490         if (hdev->sent_cmd) {
2491                 del_timer_sync(&hdev->cmd_timer);
2492                 kfree_skb(hdev->sent_cmd);
2493                 hdev->sent_cmd = NULL;
2494         }
2495
2496         kfree_skb(hdev->recv_evt);
2497         hdev->recv_evt = NULL;
2498
2499         /* After this point our queues are empty
2500          * and no tasks are scheduled. */
2501         hdev->close(hdev);
2502
2503         /* Clear flags */
2504         hdev->flags = 0;
2505         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506
2507         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2508                 if (hdev->dev_type == HCI_BREDR) {
2509                         hci_dev_lock(hdev);
2510                         mgmt_powered(hdev, 0);
2511                         hci_dev_unlock(hdev);
2512                 }
2513         }
2514
2515         /* Controller radio is available but is currently powered down */
2516         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2517
2518         memset(hdev->eir, 0, sizeof(hdev->eir));
2519         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2520         bacpy(&hdev->random_addr, BDADDR_ANY);
2521
2522         hci_req_unlock(hdev);
2523
2524         hci_dev_put(hdev);
2525         return 0;
2526 }
2527
2528 int hci_dev_close(__u16 dev)
2529 {
2530         struct hci_dev *hdev;
2531         int err;
2532
2533         hdev = hci_dev_get(dev);
2534         if (!hdev)
2535                 return -ENODEV;
2536
2537         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2538                 err = -EBUSY;
2539                 goto done;
2540         }
2541
2542         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2543                 cancel_delayed_work(&hdev->power_off);
2544
2545         err = hci_dev_do_close(hdev);
2546
2547 done:
2548         hci_dev_put(hdev);
2549         return err;
2550 }
2551
2552 int hci_dev_reset(__u16 dev)
2553 {
2554         struct hci_dev *hdev;
2555         int ret = 0;
2556
2557         hdev = hci_dev_get(dev);
2558         if (!hdev)
2559                 return -ENODEV;
2560
2561         hci_req_lock(hdev);
2562
2563         if (!test_bit(HCI_UP, &hdev->flags)) {
2564                 ret = -ENETDOWN;
2565                 goto done;
2566         }
2567
2568         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569                 ret = -EBUSY;
2570                 goto done;
2571         }
2572
2573         /* Drop queues */
2574         skb_queue_purge(&hdev->rx_q);
2575         skb_queue_purge(&hdev->cmd_q);
2576
2577         hci_dev_lock(hdev);
2578         hci_inquiry_cache_flush(hdev);
2579         hci_conn_hash_flush(hdev);
2580         hci_dev_unlock(hdev);
2581
2582         if (hdev->flush)
2583                 hdev->flush(hdev);
2584
2585         atomic_set(&hdev->cmd_cnt, 1);
2586         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2587
2588         if (!test_bit(HCI_RAW, &hdev->flags))
2589                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2590
2591 done:
2592         hci_req_unlock(hdev);
2593         hci_dev_put(hdev);
2594         return ret;
2595 }
2596
2597 int hci_dev_reset_stat(__u16 dev)
2598 {
2599         struct hci_dev *hdev;
2600         int ret = 0;
2601
2602         hdev = hci_dev_get(dev);
2603         if (!hdev)
2604                 return -ENODEV;
2605
2606         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2607                 ret = -EBUSY;
2608                 goto done;
2609         }
2610
2611         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2612
2613 done:
2614         hci_dev_put(hdev);
2615         return ret;
2616 }
2617
2618 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2619 {
2620         struct hci_dev *hdev;
2621         struct hci_dev_req dr;
2622         int err = 0;
2623
2624         if (copy_from_user(&dr, arg, sizeof(dr)))
2625                 return -EFAULT;
2626
2627         hdev = hci_dev_get(dr.dev_id);
2628         if (!hdev)
2629                 return -ENODEV;
2630
2631         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2632                 err = -EBUSY;
2633                 goto done;
2634         }
2635
2636         if (hdev->dev_type != HCI_BREDR) {
2637                 err = -EOPNOTSUPP;
2638                 goto done;
2639         }
2640
2641         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2642                 err = -EOPNOTSUPP;
2643                 goto done;
2644         }
2645
2646         switch (cmd) {
2647         case HCISETAUTH:
2648                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2649                                    HCI_INIT_TIMEOUT);
2650                 break;
2651
2652         case HCISETENCRYPT:
2653                 if (!lmp_encrypt_capable(hdev)) {
2654                         err = -EOPNOTSUPP;
2655                         break;
2656                 }
2657
2658                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2659                         /* Auth must be enabled first */
2660                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2661                                            HCI_INIT_TIMEOUT);
2662                         if (err)
2663                                 break;
2664                 }
2665
2666                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2667                                    HCI_INIT_TIMEOUT);
2668                 break;
2669
2670         case HCISETSCAN:
2671                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672                                    HCI_INIT_TIMEOUT);
2673                 break;
2674
2675         case HCISETLINKPOL:
2676                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2677                                    HCI_INIT_TIMEOUT);
2678                 break;
2679
2680         case HCISETLINKMODE:
2681                 hdev->link_mode = ((__u16) dr.dev_opt) &
2682                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2683                 break;
2684
2685         case HCISETPTYPE:
2686                 hdev->pkt_type = (__u16) dr.dev_opt;
2687                 break;
2688
2689         case HCISETACLMTU:
2690                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2691                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2692                 break;
2693
2694         case HCISETSCOMTU:
2695                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2696                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2697                 break;
2698
2699         default:
2700                 err = -EINVAL;
2701                 break;
2702         }
2703
2704 done:
2705         hci_dev_put(hdev);
2706         return err;
2707 }
2708
2709 int hci_get_dev_list(void __user *arg)
2710 {
2711         struct hci_dev *hdev;
2712         struct hci_dev_list_req *dl;
2713         struct hci_dev_req *dr;
2714         int n = 0, size, err;
2715         __u16 dev_num;
2716
2717         if (get_user(dev_num, (__u16 __user *) arg))
2718                 return -EFAULT;
2719
2720         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2721                 return -EINVAL;
2722
2723         size = sizeof(*dl) + dev_num * sizeof(*dr);
2724
2725         dl = kzalloc(size, GFP_KERNEL);
2726         if (!dl)
2727                 return -ENOMEM;
2728
2729         dr = dl->dev_req;
2730
2731         read_lock(&hci_dev_list_lock);
2732         list_for_each_entry(hdev, &hci_dev_list, list) {
2733                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2734                         cancel_delayed_work(&hdev->power_off);
2735
2736                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2737                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2738
2739                 (dr + n)->dev_id  = hdev->id;
2740                 (dr + n)->dev_opt = hdev->flags;
2741
2742                 if (++n >= dev_num)
2743                         break;
2744         }
2745         read_unlock(&hci_dev_list_lock);
2746
2747         dl->dev_num = n;
2748         size = sizeof(*dl) + n * sizeof(*dr);
2749
2750         err = copy_to_user(arg, dl, size);
2751         kfree(dl);
2752
2753         return err ? -EFAULT : 0;
2754 }
2755
2756 int hci_get_dev_info(void __user *arg)
2757 {
2758         struct hci_dev *hdev;
2759         struct hci_dev_info di;
2760         int err = 0;
2761
2762         if (copy_from_user(&di, arg, sizeof(di)))
2763                 return -EFAULT;
2764
2765         hdev = hci_dev_get(di.dev_id);
2766         if (!hdev)
2767                 return -ENODEV;
2768
2769         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2770                 cancel_delayed_work_sync(&hdev->power_off);
2771
2772         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2773                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2774
2775         strcpy(di.name, hdev->name);
2776         di.bdaddr   = hdev->bdaddr;
2777         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2778         di.flags    = hdev->flags;
2779         di.pkt_type = hdev->pkt_type;
2780         if (lmp_bredr_capable(hdev)) {
2781                 di.acl_mtu  = hdev->acl_mtu;
2782                 di.acl_pkts = hdev->acl_pkts;
2783                 di.sco_mtu  = hdev->sco_mtu;
2784                 di.sco_pkts = hdev->sco_pkts;
2785         } else {
2786                 di.acl_mtu  = hdev->le_mtu;
2787                 di.acl_pkts = hdev->le_pkts;
2788                 di.sco_mtu  = 0;
2789                 di.sco_pkts = 0;
2790         }
2791         di.link_policy = hdev->link_policy;
2792         di.link_mode   = hdev->link_mode;
2793
2794         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2795         memcpy(&di.features, &hdev->features, sizeof(di.features));
2796
2797         if (copy_to_user(arg, &di, sizeof(di)))
2798                 err = -EFAULT;
2799
2800         hci_dev_put(hdev);
2801
2802         return err;
2803 }
2804
2805 /* ---- Interface to HCI drivers ---- */
2806
2807 static int hci_rfkill_set_block(void *data, bool blocked)
2808 {
2809         struct hci_dev *hdev = data;
2810
2811         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2812
2813         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2814                 return -EBUSY;
2815
2816         if (blocked) {
2817                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2818                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2819                         hci_dev_do_close(hdev);
2820         } else {
2821                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2822         }
2823
2824         return 0;
2825 }
2826
2827 static const struct rfkill_ops hci_rfkill_ops = {
2828         .set_block = hci_rfkill_set_block,
2829 };
2830
2831 static void hci_power_on(struct work_struct *work)
2832 {
2833         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2834         int err;
2835
2836         BT_DBG("%s", hdev->name);
2837
2838         err = hci_dev_do_open(hdev);
2839         if (err < 0) {
2840                 mgmt_set_powered_failed(hdev, err);
2841                 return;
2842         }
2843
2844         /* During the HCI setup phase, a few error conditions are
2845          * ignored and they need to be checked now. If they are still
2846          * valid, it is important to turn the device back off.
2847          */
2848         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2849             (hdev->dev_type == HCI_BREDR &&
2850              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2852                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2853                 hci_dev_do_close(hdev);
2854         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2855                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2856                                    HCI_AUTO_OFF_TIMEOUT);
2857         }
2858
2859         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2860                 mgmt_index_added(hdev);
2861 }
2862
2863 static void hci_power_off(struct work_struct *work)
2864 {
2865         struct hci_dev *hdev = container_of(work, struct hci_dev,
2866                                             power_off.work);
2867
2868         BT_DBG("%s", hdev->name);
2869
2870         hci_dev_do_close(hdev);
2871 }
2872
2873 static void hci_discov_off(struct work_struct *work)
2874 {
2875         struct hci_dev *hdev;
2876
2877         hdev = container_of(work, struct hci_dev, discov_off.work);
2878
2879         BT_DBG("%s", hdev->name);
2880
2881         mgmt_discoverable_timeout(hdev);
2882 }
2883
2884 void hci_uuids_clear(struct hci_dev *hdev)
2885 {
2886         struct bt_uuid *uuid, *tmp;
2887
2888         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2889                 list_del(&uuid->list);
2890                 kfree(uuid);
2891         }
2892 }
2893
2894 void hci_link_keys_clear(struct hci_dev *hdev)
2895 {
2896         struct list_head *p, *n;
2897
2898         list_for_each_safe(p, n, &hdev->link_keys) {
2899                 struct link_key *key;
2900
2901                 key = list_entry(p, struct link_key, list);
2902
2903                 list_del(p);
2904                 kfree(key);
2905         }
2906 }
2907
2908 void hci_smp_ltks_clear(struct hci_dev *hdev)
2909 {
2910         struct smp_ltk *k, *tmp;
2911
2912         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2913                 list_del(&k->list);
2914                 kfree(k);
2915         }
2916 }
2917
2918 void hci_smp_irks_clear(struct hci_dev *hdev)
2919 {
2920         struct smp_irk *k, *tmp;
2921
2922         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2923                 list_del(&k->list);
2924                 kfree(k);
2925         }
2926 }
2927
2928 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2929 {
2930         struct link_key *k;
2931
2932         list_for_each_entry(k, &hdev->link_keys, list)
2933                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2934                         return k;
2935
2936         return NULL;
2937 }
2938
2939 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2940                                u8 key_type, u8 old_key_type)
2941 {
2942         /* Legacy key */
2943         if (key_type < 0x03)
2944                 return true;
2945
2946         /* Debug keys are insecure so don't store them persistently */
2947         if (key_type == HCI_LK_DEBUG_COMBINATION)
2948                 return false;
2949
2950         /* Changed combination key and there's no previous one */
2951         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2952                 return false;
2953
2954         /* Security mode 3 case */
2955         if (!conn)
2956                 return true;
2957
2958         /* Neither local nor remote side had no-bonding as requirement */
2959         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2960                 return true;
2961
2962         /* Local side had dedicated bonding as requirement */
2963         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2964                 return true;
2965
2966         /* Remote side had dedicated bonding as requirement */
2967         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2968                 return true;
2969
2970         /* If none of the above criteria match, then don't store the key
2971          * persistently */
2972         return false;
2973 }
2974
2975 static bool ltk_type_master(u8 type)
2976 {
2977         if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2978                 return true;
2979
2980         return false;
2981 }
2982
2983 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2984                              bool master)
2985 {
2986         struct smp_ltk *k;
2987
2988         list_for_each_entry(k, &hdev->long_term_keys, list) {
2989                 if (k->ediv != ediv || k->rand != rand)
2990                         continue;
2991
2992                 if (ltk_type_master(k->type) != master)
2993                         continue;
2994
2995                 return k;
2996         }
2997
2998         return NULL;
2999 }
3000
3001 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3002                                      u8 addr_type, bool master)
3003 {
3004         struct smp_ltk *k;
3005
3006         list_for_each_entry(k, &hdev->long_term_keys, list)
3007                 if (addr_type == k->bdaddr_type &&
3008                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3009                     ltk_type_master(k->type) == master)
3010                         return k;
3011
3012         return NULL;
3013 }
3014
3015 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3016 {
3017         struct smp_irk *irk;
3018
3019         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3020                 if (!bacmp(&irk->rpa, rpa))
3021                         return irk;
3022         }
3023
3024         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3025                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3026                         bacpy(&irk->rpa, rpa);
3027                         return irk;
3028                 }
3029         }
3030
3031         return NULL;
3032 }
3033
3034 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035                                      u8 addr_type)
3036 {
3037         struct smp_irk *irk;
3038
3039         /* Identity Address must be public or static random */
3040         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3041                 return NULL;
3042
3043         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3044                 if (addr_type == irk->addr_type &&
3045                     bacmp(bdaddr, &irk->bdaddr) == 0)
3046                         return irk;
3047         }
3048
3049         return NULL;
3050 }
3051
3052 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3053                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
3054 {
3055         struct link_key *key, *old_key;
3056         u8 old_key_type;
3057         bool persistent;
3058
3059         old_key = hci_find_link_key(hdev, bdaddr);
3060         if (old_key) {
3061                 old_key_type = old_key->type;
3062                 key = old_key;
3063         } else {
3064                 old_key_type = conn ? conn->key_type : 0xff;
3065                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3066                 if (!key)
3067                         return -ENOMEM;
3068                 list_add(&key->list, &hdev->link_keys);
3069         }
3070
3071         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3072
3073         /* Some buggy controller combinations generate a changed
3074          * combination key for legacy pairing even when there's no
3075          * previous key */
3076         if (type == HCI_LK_CHANGED_COMBINATION &&
3077             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3078                 type = HCI_LK_COMBINATION;
3079                 if (conn)
3080                         conn->key_type = type;
3081         }
3082
3083         bacpy(&key->bdaddr, bdaddr);
3084         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3085         key->pin_len = pin_len;
3086
3087         if (type == HCI_LK_CHANGED_COMBINATION)
3088                 key->type = old_key_type;
3089         else
3090                 key->type = type;
3091
3092         if (!new_key)
3093                 return 0;
3094
3095         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3096
3097         mgmt_new_link_key(hdev, key, persistent);
3098
3099         if (conn)
3100                 conn->flush_key = !persistent;
3101
3102         return 0;
3103 }
3104
3105 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3106                             u8 addr_type, u8 type, u8 authenticated,
3107                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3108 {
3109         struct smp_ltk *key, *old_key;
3110         bool master = ltk_type_master(type);
3111
3112         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3113         if (old_key)
3114                 key = old_key;
3115         else {
3116                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3117                 if (!key)
3118                         return NULL;
3119                 list_add(&key->list, &hdev->long_term_keys);
3120         }
3121
3122         bacpy(&key->bdaddr, bdaddr);
3123         key->bdaddr_type = addr_type;
3124         memcpy(key->val, tk, sizeof(key->val));
3125         key->authenticated = authenticated;
3126         key->ediv = ediv;
3127         key->rand = rand;
3128         key->enc_size = enc_size;
3129         key->type = type;
3130
3131         return key;
3132 }
3133
3134 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3135                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3136 {
3137         struct smp_irk *irk;
3138
3139         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3140         if (!irk) {
3141                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3142                 if (!irk)
3143                         return NULL;
3144
3145                 bacpy(&irk->bdaddr, bdaddr);
3146                 irk->addr_type = addr_type;
3147
3148                 list_add(&irk->list, &hdev->identity_resolving_keys);
3149         }
3150
3151         memcpy(irk->val, val, 16);
3152         bacpy(&irk->rpa, rpa);
3153
3154         return irk;
3155 }
3156
3157 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3158 {
3159         struct link_key *key;
3160
3161         key = hci_find_link_key(hdev, bdaddr);
3162         if (!key)
3163                 return -ENOENT;
3164
3165         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3166
3167         list_del(&key->list);
3168         kfree(key);
3169
3170         return 0;
3171 }
3172
3173 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3174 {
3175         struct smp_ltk *k, *tmp;
3176         int removed = 0;
3177
3178         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3179                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3180                         continue;
3181
3182                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3183
3184                 list_del(&k->list);
3185                 kfree(k);
3186                 removed++;
3187         }
3188
3189         return removed ? 0 : -ENOENT;
3190 }
3191
3192 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3193 {
3194         struct smp_irk *k, *tmp;
3195
3196         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3197                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3198                         continue;
3199
3200                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3201
3202                 list_del(&k->list);
3203                 kfree(k);
3204         }
3205 }
3206
3207 /* HCI command timer function */
3208 static void hci_cmd_timeout(unsigned long arg)
3209 {
3210         struct hci_dev *hdev = (void *) arg;
3211
3212         if (hdev->sent_cmd) {
3213                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3214                 u16 opcode = __le16_to_cpu(sent->opcode);
3215
3216                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3217         } else {
3218                 BT_ERR("%s command tx timeout", hdev->name);
3219         }
3220
3221         atomic_set(&hdev->cmd_cnt, 1);
3222         queue_work(hdev->workqueue, &hdev->cmd_work);
3223 }
3224
3225 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3226                                           bdaddr_t *bdaddr)
3227 {
3228         struct oob_data *data;
3229
3230         list_for_each_entry(data, &hdev->remote_oob_data, list)
3231                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3232                         return data;
3233
3234         return NULL;
3235 }
3236
3237 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3238 {
3239         struct oob_data *data;
3240
3241         data = hci_find_remote_oob_data(hdev, bdaddr);
3242         if (!data)
3243                 return -ENOENT;
3244
3245         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3246
3247         list_del(&data->list);
3248         kfree(data);
3249
3250         return 0;
3251 }
3252
3253 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3254 {
3255         struct oob_data *data, *n;
3256
3257         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3258                 list_del(&data->list);
3259                 kfree(data);
3260         }
3261 }
3262
3263 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3264                             u8 *hash, u8 *randomizer)
3265 {
3266         struct oob_data *data;
3267
3268         data = hci_find_remote_oob_data(hdev, bdaddr);
3269         if (!data) {
3270                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3271                 if (!data)
3272                         return -ENOMEM;
3273
3274                 bacpy(&data->bdaddr, bdaddr);
3275                 list_add(&data->list, &hdev->remote_oob_data);
3276         }
3277
3278         memcpy(data->hash192, hash, sizeof(data->hash192));
3279         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3280
3281         memset(data->hash256, 0, sizeof(data->hash256));
3282         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3283
3284         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3285
3286         return 0;
3287 }
3288
3289 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3290                                 u8 *hash192, u8 *randomizer192,
3291                                 u8 *hash256, u8 *randomizer256)
3292 {
3293         struct oob_data *data;
3294
3295         data = hci_find_remote_oob_data(hdev, bdaddr);
3296         if (!data) {
3297                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3298                 if (!data)
3299                         return -ENOMEM;
3300
3301                 bacpy(&data->bdaddr, bdaddr);
3302                 list_add(&data->list, &hdev->remote_oob_data);
3303         }
3304
3305         memcpy(data->hash192, hash192, sizeof(data->hash192));
3306         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3307
3308         memcpy(data->hash256, hash256, sizeof(data->hash256));
3309         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3310
3311         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3312
3313         return 0;
3314 }
3315
3316 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3317                                          bdaddr_t *bdaddr, u8 type)
3318 {
3319         struct bdaddr_list *b;
3320
3321         list_for_each_entry(b, &hdev->blacklist, list) {
3322                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3323                         return b;
3324         }
3325
3326         return NULL;
3327 }
3328
3329 static void hci_blacklist_clear(struct hci_dev *hdev)
3330 {
3331         struct list_head *p, *n;
3332
3333         list_for_each_safe(p, n, &hdev->blacklist) {
3334                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3335
3336                 list_del(p);
3337                 kfree(b);
3338         }
3339 }
3340
3341 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3342 {
3343         struct bdaddr_list *entry;
3344
3345         if (!bacmp(bdaddr, BDADDR_ANY))
3346                 return -EBADF;
3347
3348         if (hci_blacklist_lookup(hdev, bdaddr, type))
3349                 return -EEXIST;
3350
3351         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3352         if (!entry)
3353                 return -ENOMEM;
3354
3355         bacpy(&entry->bdaddr, bdaddr);
3356         entry->bdaddr_type = type;
3357
3358         list_add(&entry->list, &hdev->blacklist);
3359
3360         return mgmt_device_blocked(hdev, bdaddr, type);
3361 }
3362
3363 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3364 {
3365         struct bdaddr_list *entry;
3366
3367         if (!bacmp(bdaddr, BDADDR_ANY)) {
3368                 hci_blacklist_clear(hdev);
3369                 return 0;
3370         }
3371
3372         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3373         if (!entry)
3374                 return -ENOENT;
3375
3376         list_del(&entry->list);
3377         kfree(entry);
3378
3379         return mgmt_device_unblocked(hdev, bdaddr, type);
3380 }
3381
3382 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3383                                           bdaddr_t *bdaddr, u8 type)
3384 {
3385         struct bdaddr_list *b;
3386
3387         list_for_each_entry(b, &hdev->le_white_list, list) {
3388                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3389                         return b;
3390         }
3391
3392         return NULL;
3393 }
3394
3395 void hci_white_list_clear(struct hci_dev *hdev)
3396 {
3397         struct list_head *p, *n;
3398
3399         list_for_each_safe(p, n, &hdev->le_white_list) {
3400                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3401
3402                 list_del(p);
3403                 kfree(b);
3404         }
3405 }
3406
3407 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3408 {
3409         struct bdaddr_list *entry;
3410
3411         if (!bacmp(bdaddr, BDADDR_ANY))
3412                 return -EBADF;
3413
3414         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3415         if (!entry)
3416                 return -ENOMEM;
3417
3418         bacpy(&entry->bdaddr, bdaddr);
3419         entry->bdaddr_type = type;
3420
3421         list_add(&entry->list, &hdev->le_white_list);
3422
3423         return 0;
3424 }
3425
3426 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3427 {
3428         struct bdaddr_list *entry;
3429
3430         if (!bacmp(bdaddr, BDADDR_ANY))
3431                 return -EBADF;
3432
3433         entry = hci_white_list_lookup(hdev, bdaddr, type);
3434         if (!entry)
3435                 return -ENOENT;
3436
3437         list_del(&entry->list);
3438         kfree(entry);
3439
3440         return 0;
3441 }
3442
3443 /* This function requires the caller holds hdev->lock */
3444 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3445                                                bdaddr_t *addr, u8 addr_type)
3446 {
3447         struct hci_conn_params *params;
3448
3449         list_for_each_entry(params, &hdev->le_conn_params, list) {
3450                 if (bacmp(&params->addr, addr) == 0 &&
3451                     params->addr_type == addr_type) {
3452                         return params;
3453                 }
3454         }
3455
3456         return NULL;
3457 }
3458
3459 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3460 {
3461         struct hci_conn *conn;
3462
3463         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3464         if (!conn)
3465                 return false;
3466
3467         if (conn->dst_type != type)
3468                 return false;
3469
3470         if (conn->state != BT_CONNECTED)
3471                 return false;
3472
3473         return true;
3474 }
3475
3476 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3477 {
3478         if (addr_type == ADDR_LE_DEV_PUBLIC)
3479                 return true;
3480
3481         /* Check for Random Static address type */
3482         if ((addr->b[5] & 0xc0) == 0xc0)
3483                 return true;
3484
3485         return false;
3486 }
3487
3488 /* This function requires the caller holds hdev->lock */
3489 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3490                         u8 auto_connect, u16 conn_min_interval,
3491                         u16 conn_max_interval)
3492 {
3493         struct hci_conn_params *params;
3494
3495         if (!is_identity_address(addr, addr_type))
3496                 return -EINVAL;
3497
3498         params = hci_conn_params_lookup(hdev, addr, addr_type);
3499         if (params)
3500                 goto update;
3501
3502         params = kzalloc(sizeof(*params), GFP_KERNEL);
3503         if (!params) {
3504                 BT_ERR("Out of memory");
3505                 return -ENOMEM;
3506         }
3507
3508         bacpy(&params->addr, addr);
3509         params->addr_type = addr_type;
3510
3511         list_add(&params->list, &hdev->le_conn_params);
3512
3513 update:
3514         params->conn_min_interval = conn_min_interval;
3515         params->conn_max_interval = conn_max_interval;
3516         params->auto_connect = auto_connect;
3517
3518         switch (auto_connect) {
3519         case HCI_AUTO_CONN_DISABLED:
3520         case HCI_AUTO_CONN_LINK_LOSS:
3521                 hci_pend_le_conn_del(hdev, addr, addr_type);
3522                 break;
3523         case HCI_AUTO_CONN_ALWAYS:
3524                 if (!is_connected(hdev, addr, addr_type))
3525                         hci_pend_le_conn_add(hdev, addr, addr_type);
3526                 break;
3527         }
3528
3529         BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3530                "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3531                conn_min_interval, conn_max_interval);
3532
3533         return 0;
3534 }
3535
3536 /* This function requires the caller holds hdev->lock */
3537 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3538 {
3539         struct hci_conn_params *params;
3540
3541         params = hci_conn_params_lookup(hdev, addr, addr_type);
3542         if (!params)
3543                 return;
3544
3545         hci_pend_le_conn_del(hdev, addr, addr_type);
3546
3547         list_del(&params->list);
3548         kfree(params);
3549
3550         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3551 }
3552
3553 /* This function requires the caller holds hdev->lock */
3554 void hci_conn_params_clear(struct hci_dev *hdev)
3555 {
3556         struct hci_conn_params *params, *tmp;
3557
3558         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3559                 list_del(&params->list);
3560                 kfree(params);
3561         }
3562
3563         BT_DBG("All LE connection parameters were removed");
3564 }
3565
3566 /* This function requires the caller holds hdev->lock */
3567 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3568                                             bdaddr_t *addr, u8 addr_type)
3569 {
3570         struct bdaddr_list *entry;
3571
3572         list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3573                 if (bacmp(&entry->bdaddr, addr) == 0 &&
3574                     entry->bdaddr_type == addr_type)
3575                         return entry;
3576         }
3577
3578         return NULL;
3579 }
3580
3581 /* This function requires the caller holds hdev->lock */
3582 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3583 {
3584         struct bdaddr_list *entry;
3585
3586         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3587         if (entry)
3588                 goto done;
3589
3590         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3591         if (!entry) {
3592                 BT_ERR("Out of memory");
3593                 return;
3594         }
3595
3596         bacpy(&entry->bdaddr, addr);
3597         entry->bdaddr_type = addr_type;
3598
3599         list_add(&entry->list, &hdev->pend_le_conns);
3600
3601         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3602
3603 done:
3604         hci_update_background_scan(hdev);
3605 }
3606
3607 /* This function requires the caller holds hdev->lock */
3608 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3609 {
3610         struct bdaddr_list *entry;
3611
3612         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3613         if (!entry)
3614                 goto done;
3615
3616         list_del(&entry->list);
3617         kfree(entry);
3618
3619         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3620
3621 done:
3622         hci_update_background_scan(hdev);
3623 }
3624
3625 /* This function requires the caller holds hdev->lock */
3626 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3627 {
3628         struct bdaddr_list *entry, *tmp;
3629
3630         list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3631                 list_del(&entry->list);
3632                 kfree(entry);
3633         }
3634
3635         BT_DBG("All LE pending connections cleared");
3636 }
3637
3638 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3639 {
3640         if (status) {
3641                 BT_ERR("Failed to start inquiry: status %d", status);
3642
3643                 hci_dev_lock(hdev);
3644                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3645                 hci_dev_unlock(hdev);
3646                 return;
3647         }
3648 }
3649
3650 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3651 {
3652         /* General inquiry access code (GIAC) */
3653         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3654         struct hci_request req;
3655         struct hci_cp_inquiry cp;
3656         int err;
3657
3658         if (status) {
3659                 BT_ERR("Failed to disable LE scanning: status %d", status);
3660                 return;
3661         }
3662
3663         switch (hdev->discovery.type) {
3664         case DISCOV_TYPE_LE:
3665                 hci_dev_lock(hdev);
3666                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3667                 hci_dev_unlock(hdev);
3668                 break;
3669
3670         case DISCOV_TYPE_INTERLEAVED:
3671                 hci_req_init(&req, hdev);
3672
3673                 memset(&cp, 0, sizeof(cp));
3674                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3675                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3676                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3677
3678                 hci_dev_lock(hdev);
3679
3680                 hci_inquiry_cache_flush(hdev);
3681
3682                 err = hci_req_run(&req, inquiry_complete);
3683                 if (err) {
3684                         BT_ERR("Inquiry request failed: err %d", err);
3685                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3686                 }
3687
3688                 hci_dev_unlock(hdev);
3689                 break;
3690         }
3691 }
3692
3693 static void le_scan_disable_work(struct work_struct *work)
3694 {
3695         struct hci_dev *hdev = container_of(work, struct hci_dev,
3696                                             le_scan_disable.work);
3697         struct hci_request req;
3698         int err;
3699
3700         BT_DBG("%s", hdev->name);
3701
3702         hci_req_init(&req, hdev);
3703
3704         hci_req_add_le_scan_disable(&req);
3705
3706         err = hci_req_run(&req, le_scan_disable_work_complete);
3707         if (err)
3708                 BT_ERR("Disable LE scanning request failed: err %d", err);
3709 }
3710
3711 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3712 {
3713         struct hci_dev *hdev = req->hdev;
3714
3715         /* If we're advertising or initiating an LE connection we can't
3716          * go ahead and change the random address at this time. This is
3717          * because the eventual initiator address used for the
3718          * subsequently created connection will be undefined (some
3719          * controllers use the new address and others the one we had
3720          * when the operation started).
3721          *
3722          * In this kind of scenario skip the update and let the random
3723          * address be updated at the next cycle.
3724          */
3725         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3726             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3727                 BT_DBG("Deferring random address update");
3728                 return;
3729         }
3730
3731         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3732 }
3733
3734 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3735                               u8 *own_addr_type)
3736 {
3737         struct hci_dev *hdev = req->hdev;
3738         int err;
3739
3740         /* If privacy is enabled use a resolvable private address. If
3741          * current RPA has expired or there is something else than
3742          * the current RPA in use, then generate a new one.
3743          */
3744         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3745                 int to;
3746
3747                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3748
3749                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3750                     !bacmp(&hdev->random_addr, &hdev->rpa))
3751                         return 0;
3752
3753                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3754                 if (err < 0) {
3755                         BT_ERR("%s failed to generate new RPA", hdev->name);
3756                         return err;
3757                 }
3758
3759                 set_random_addr(req, &hdev->rpa);
3760
3761                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3762                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3763
3764                 return 0;
3765         }
3766
3767         /* In case of required privacy without resolvable private address,
3768          * use an unresolvable private address. This is useful for active
3769          * scanning and non-connectable advertising.
3770          */
3771         if (require_privacy) {
3772                 bdaddr_t urpa;
3773
3774                 get_random_bytes(&urpa, 6);
3775                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3776
3777                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3778                 set_random_addr(req, &urpa);
3779                 return 0;
3780         }
3781
3782         /* If forcing static address is in use or there is no public
3783          * address use the static address as random address (but skip
3784          * the HCI command if the current random address is already the
3785          * static one.
3786          */
3787         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3788             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3789                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3790                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3791                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3792                                     &hdev->static_addr);
3793                 return 0;
3794         }
3795
3796         /* Neither privacy nor static address is being used so use a
3797          * public address.
3798          */
3799         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3800
3801         return 0;
3802 }
3803
3804 /* Copy the Identity Address of the controller.
3805  *
3806  * If the controller has a public BD_ADDR, then by default use that one.
3807  * If this is a LE only controller without a public address, default to
3808  * the static random address.
3809  *
3810  * For debugging purposes it is possible to force controllers with a
3811  * public address to use the static random address instead.
3812  */
3813 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814                                u8 *bdaddr_type)
3815 {
3816         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3817             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3818                 bacpy(bdaddr, &hdev->static_addr);
3819                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3820         } else {
3821                 bacpy(bdaddr, &hdev->bdaddr);
3822                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3823         }
3824 }
3825
3826 /* Alloc HCI device */
3827 struct hci_dev *hci_alloc_dev(void)
3828 {
3829         struct hci_dev *hdev;
3830
3831         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3832         if (!hdev)
3833                 return NULL;
3834
3835         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3836         hdev->esco_type = (ESCO_HV1);
3837         hdev->link_mode = (HCI_LM_ACCEPT);
3838         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3839         hdev->io_capability = 0x03;     /* No Input No Output */
3840         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3841         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3842
3843         hdev->sniff_max_interval = 800;
3844         hdev->sniff_min_interval = 80;
3845
3846         hdev->le_adv_channel_map = 0x07;
3847         hdev->le_scan_interval = 0x0060;
3848         hdev->le_scan_window = 0x0030;
3849         hdev->le_conn_min_interval = 0x0028;
3850         hdev->le_conn_max_interval = 0x0038;
3851
3852         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3853         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3854         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3855         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3856
3857         mutex_init(&hdev->lock);
3858         mutex_init(&hdev->req_lock);
3859
3860         INIT_LIST_HEAD(&hdev->mgmt_pending);
3861         INIT_LIST_HEAD(&hdev->blacklist);
3862         INIT_LIST_HEAD(&hdev->uuids);
3863         INIT_LIST_HEAD(&hdev->link_keys);
3864         INIT_LIST_HEAD(&hdev->long_term_keys);
3865         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3866         INIT_LIST_HEAD(&hdev->remote_oob_data);
3867         INIT_LIST_HEAD(&hdev->le_white_list);
3868         INIT_LIST_HEAD(&hdev->le_conn_params);
3869         INIT_LIST_HEAD(&hdev->pend_le_conns);
3870         INIT_LIST_HEAD(&hdev->conn_hash.list);
3871
3872         INIT_WORK(&hdev->rx_work, hci_rx_work);
3873         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3874         INIT_WORK(&hdev->tx_work, hci_tx_work);
3875         INIT_WORK(&hdev->power_on, hci_power_on);
3876
3877         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3878         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3879         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3880
3881         skb_queue_head_init(&hdev->rx_q);
3882         skb_queue_head_init(&hdev->cmd_q);
3883         skb_queue_head_init(&hdev->raw_q);
3884
3885         init_waitqueue_head(&hdev->req_wait_q);
3886
3887         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3888
3889         hci_init_sysfs(hdev);
3890         discovery_init(hdev);
3891
3892         return hdev;
3893 }
3894 EXPORT_SYMBOL(hci_alloc_dev);
3895
3896 /* Free HCI device */
3897 void hci_free_dev(struct hci_dev *hdev)
3898 {
3899         /* will free via device release */
3900         put_device(&hdev->dev);
3901 }
3902 EXPORT_SYMBOL(hci_free_dev);
3903
3904 /* Register HCI device */
3905 int hci_register_dev(struct hci_dev *hdev)
3906 {
3907         int id, error;
3908
3909         if (!hdev->open || !hdev->close)
3910                 return -EINVAL;
3911
3912         /* Do not allow HCI_AMP devices to register at index 0,
3913          * so the index can be used as the AMP controller ID.
3914          */
3915         switch (hdev->dev_type) {
3916         case HCI_BREDR:
3917                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3918                 break;
3919         case HCI_AMP:
3920                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3921                 break;
3922         default:
3923                 return -EINVAL;
3924         }
3925
3926         if (id < 0)
3927                 return id;
3928
3929         sprintf(hdev->name, "hci%d", id);
3930         hdev->id = id;
3931
3932         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3933
3934         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3935                                           WQ_MEM_RECLAIM, 1, hdev->name);
3936         if (!hdev->workqueue) {
3937                 error = -ENOMEM;
3938                 goto err;
3939         }
3940
3941         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3942                                               WQ_MEM_RECLAIM, 1, hdev->name);
3943         if (!hdev->req_workqueue) {
3944                 destroy_workqueue(hdev->workqueue);
3945                 error = -ENOMEM;
3946                 goto err;
3947         }
3948
3949         if (!IS_ERR_OR_NULL(bt_debugfs))
3950                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3951
3952         dev_set_name(&hdev->dev, "%s", hdev->name);
3953
3954         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3955                                                CRYPTO_ALG_ASYNC);
3956         if (IS_ERR(hdev->tfm_aes)) {
3957                 BT_ERR("Unable to create crypto context");
3958                 error = PTR_ERR(hdev->tfm_aes);
3959                 hdev->tfm_aes = NULL;
3960                 goto err_wqueue;
3961         }
3962
3963         error = device_add(&hdev->dev);
3964         if (error < 0)
3965                 goto err_tfm;
3966
3967         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3968                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3969                                     hdev);
3970         if (hdev->rfkill) {
3971                 if (rfkill_register(hdev->rfkill) < 0) {
3972                         rfkill_destroy(hdev->rfkill);
3973                         hdev->rfkill = NULL;
3974                 }
3975         }
3976
3977         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3978                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3979
3980         set_bit(HCI_SETUP, &hdev->dev_flags);
3981         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3982
3983         if (hdev->dev_type == HCI_BREDR) {
3984                 /* Assume BR/EDR support until proven otherwise (such as
3985                  * through reading supported features during init.
3986                  */
3987                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3988         }
3989
3990         write_lock(&hci_dev_list_lock);
3991         list_add(&hdev->list, &hci_dev_list);
3992         write_unlock(&hci_dev_list_lock);
3993
3994         hci_notify(hdev, HCI_DEV_REG);
3995         hci_dev_hold(hdev);
3996
3997         queue_work(hdev->req_workqueue, &hdev->power_on);
3998
3999         return id;
4000
4001 err_tfm:
4002         crypto_free_blkcipher(hdev->tfm_aes);
4003 err_wqueue:
4004         destroy_workqueue(hdev->workqueue);
4005         destroy_workqueue(hdev->req_workqueue);
4006 err:
4007         ida_simple_remove(&hci_index_ida, hdev->id);
4008
4009         return error;
4010 }
4011 EXPORT_SYMBOL(hci_register_dev);
4012
4013 /* Unregister HCI device */
4014 void hci_unregister_dev(struct hci_dev *hdev)
4015 {
4016         int i, id;
4017
4018         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4019
4020         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4021
4022         id = hdev->id;
4023
4024         write_lock(&hci_dev_list_lock);
4025         list_del(&hdev->list);
4026         write_unlock(&hci_dev_list_lock);
4027
4028         hci_dev_do_close(hdev);
4029
4030         for (i = 0; i < NUM_REASSEMBLY; i++)
4031                 kfree_skb(hdev->reassembly[i]);
4032
4033         cancel_work_sync(&hdev->power_on);
4034
4035         if (!test_bit(HCI_INIT, &hdev->flags) &&
4036             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4037                 hci_dev_lock(hdev);
4038                 mgmt_index_removed(hdev);
4039                 hci_dev_unlock(hdev);
4040         }
4041
4042         /* mgmt_index_removed should take care of emptying the
4043          * pending list */
4044         BUG_ON(!list_empty(&hdev->mgmt_pending));
4045
4046         hci_notify(hdev, HCI_DEV_UNREG);
4047
4048         if (hdev->rfkill) {
4049                 rfkill_unregister(hdev->rfkill);
4050                 rfkill_destroy(hdev->rfkill);
4051         }
4052
4053         if (hdev->tfm_aes)
4054                 crypto_free_blkcipher(hdev->tfm_aes);
4055
4056         device_del(&hdev->dev);
4057
4058         debugfs_remove_recursive(hdev->debugfs);
4059
4060         destroy_workqueue(hdev->workqueue);
4061         destroy_workqueue(hdev->req_workqueue);
4062
4063         hci_dev_lock(hdev);
4064         hci_blacklist_clear(hdev);
4065         hci_uuids_clear(hdev);
4066         hci_link_keys_clear(hdev);
4067         hci_smp_ltks_clear(hdev);
4068         hci_smp_irks_clear(hdev);
4069         hci_remote_oob_data_clear(hdev);
4070         hci_white_list_clear(hdev);
4071         hci_conn_params_clear(hdev);
4072         hci_pend_le_conns_clear(hdev);
4073         hci_dev_unlock(hdev);
4074
4075         hci_dev_put(hdev);
4076
4077         ida_simple_remove(&hci_index_ida, id);
4078 }
4079 EXPORT_SYMBOL(hci_unregister_dev);
4080
4081 /* Suspend HCI device */
4082 int hci_suspend_dev(struct hci_dev *hdev)
4083 {
4084         hci_notify(hdev, HCI_DEV_SUSPEND);
4085         return 0;
4086 }
4087 EXPORT_SYMBOL(hci_suspend_dev);
4088
4089 /* Resume HCI device */
4090 int hci_resume_dev(struct hci_dev *hdev)
4091 {
4092         hci_notify(hdev, HCI_DEV_RESUME);
4093         return 0;
4094 }
4095 EXPORT_SYMBOL(hci_resume_dev);
4096
4097 /* Receive frame from HCI drivers */
4098 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4099 {
4100         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4101                       && !test_bit(HCI_INIT, &hdev->flags))) {
4102                 kfree_skb(skb);
4103                 return -ENXIO;
4104         }
4105
4106         /* Incoming skb */
4107         bt_cb(skb)->incoming = 1;
4108
4109         /* Time stamp */
4110         __net_timestamp(skb);
4111
4112         skb_queue_tail(&hdev->rx_q, skb);
4113         queue_work(hdev->workqueue, &hdev->rx_work);
4114
4115         return 0;
4116 }
4117 EXPORT_SYMBOL(hci_recv_frame);
4118
4119 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4120                           int count, __u8 index)
4121 {
4122         int len = 0;
4123         int hlen = 0;
4124         int remain = count;
4125         struct sk_buff *skb;
4126         struct bt_skb_cb *scb;
4127
4128         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4129             index >= NUM_REASSEMBLY)
4130                 return -EILSEQ;
4131
4132         skb = hdev->reassembly[index];
4133
4134         if (!skb) {
4135                 switch (type) {
4136                 case HCI_ACLDATA_PKT:
4137                         len = HCI_MAX_FRAME_SIZE;
4138                         hlen = HCI_ACL_HDR_SIZE;
4139                         break;
4140                 case HCI_EVENT_PKT:
4141                         len = HCI_MAX_EVENT_SIZE;
4142                         hlen = HCI_EVENT_HDR_SIZE;
4143                         break;
4144                 case HCI_SCODATA_PKT:
4145                         len = HCI_MAX_SCO_SIZE;
4146                         hlen = HCI_SCO_HDR_SIZE;
4147                         break;
4148                 }
4149
4150                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4151                 if (!skb)
4152                         return -ENOMEM;
4153
4154                 scb = (void *) skb->cb;
4155                 scb->expect = hlen;
4156                 scb->pkt_type = type;
4157
4158                 hdev->reassembly[index] = skb;
4159         }
4160
4161         while (count) {
4162                 scb = (void *) skb->cb;
4163                 len = min_t(uint, scb->expect, count);
4164
4165                 memcpy(skb_put(skb, len), data, len);
4166
4167                 count -= len;
4168                 data += len;
4169                 scb->expect -= len;
4170                 remain = count;
4171
4172                 switch (type) {
4173                 case HCI_EVENT_PKT:
4174                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4175                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4176                                 scb->expect = h->plen;
4177
4178                                 if (skb_tailroom(skb) < scb->expect) {
4179                                         kfree_skb(skb);
4180                                         hdev->reassembly[index] = NULL;
4181                                         return -ENOMEM;
4182                                 }
4183                         }
4184                         break;
4185
4186                 case HCI_ACLDATA_PKT:
4187                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4188                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4189                                 scb->expect = __le16_to_cpu(h->dlen);
4190
4191                                 if (skb_tailroom(skb) < scb->expect) {
4192                                         kfree_skb(skb);
4193                                         hdev->reassembly[index] = NULL;
4194                                         return -ENOMEM;
4195                                 }
4196                         }
4197                         break;
4198
4199                 case HCI_SCODATA_PKT:
4200                         if (skb->len == HCI_SCO_HDR_SIZE) {
4201                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4202                                 scb->expect = h->dlen;
4203
4204                                 if (skb_tailroom(skb) < scb->expect) {
4205                                         kfree_skb(skb);
4206                                         hdev->reassembly[index] = NULL;
4207                                         return -ENOMEM;
4208                                 }
4209                         }
4210                         break;
4211                 }
4212
4213                 if (scb->expect == 0) {
4214                         /* Complete frame */
4215
4216                         bt_cb(skb)->pkt_type = type;
4217                         hci_recv_frame(hdev, skb);
4218
4219                         hdev->reassembly[index] = NULL;
4220                         return remain;
4221                 }
4222         }
4223
4224         return remain;
4225 }
4226
4227 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4228 {
4229         int rem = 0;
4230
4231         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4232                 return -EILSEQ;
4233
4234         while (count) {
4235                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4236                 if (rem < 0)
4237                         return rem;
4238
4239                 data += (count - rem);
4240                 count = rem;
4241         }
4242
4243         return rem;
4244 }
4245 EXPORT_SYMBOL(hci_recv_fragment);
4246
4247 #define STREAM_REASSEMBLY 0
4248
4249 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4250 {
4251         int type;
4252         int rem = 0;
4253
4254         while (count) {
4255                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4256
4257                 if (!skb) {
4258                         struct { char type; } *pkt;
4259
4260                         /* Start of the frame */
4261                         pkt = data;
4262                         type = pkt->type;
4263
4264                         data++;
4265                         count--;
4266                 } else
4267                         type = bt_cb(skb)->pkt_type;
4268
4269                 rem = hci_reassembly(hdev, type, data, count,
4270                                      STREAM_REASSEMBLY);
4271                 if (rem < 0)
4272                         return rem;
4273
4274                 data += (count - rem);
4275                 count = rem;
4276         }
4277
4278         return rem;
4279 }
4280 EXPORT_SYMBOL(hci_recv_stream_fragment);
4281
4282 /* ---- Interface to upper protocols ---- */
4283
4284 int hci_register_cb(struct hci_cb *cb)
4285 {
4286         BT_DBG("%p name %s", cb, cb->name);
4287
4288         write_lock(&hci_cb_list_lock);
4289         list_add(&cb->list, &hci_cb_list);
4290         write_unlock(&hci_cb_list_lock);
4291
4292         return 0;
4293 }
4294 EXPORT_SYMBOL(hci_register_cb);
4295
4296 int hci_unregister_cb(struct hci_cb *cb)
4297 {
4298         BT_DBG("%p name %s", cb, cb->name);
4299
4300         write_lock(&hci_cb_list_lock);
4301         list_del(&cb->list);
4302         write_unlock(&hci_cb_list_lock);
4303
4304         return 0;
4305 }
4306 EXPORT_SYMBOL(hci_unregister_cb);
4307
4308 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4309 {
4310         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4311
4312         /* Time stamp */
4313         __net_timestamp(skb);
4314
4315         /* Send copy to monitor */
4316         hci_send_to_monitor(hdev, skb);
4317
4318         if (atomic_read(&hdev->promisc)) {
4319                 /* Send copy to the sockets */
4320                 hci_send_to_sock(hdev, skb);
4321         }
4322
4323         /* Get rid of skb owner, prior to sending to the driver. */
4324         skb_orphan(skb);
4325
4326         if (hdev->send(hdev, skb) < 0)
4327                 BT_ERR("%s sending frame failed", hdev->name);
4328 }
4329
4330 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4331 {
4332         skb_queue_head_init(&req->cmd_q);
4333         req->hdev = hdev;
4334         req->err = 0;
4335 }
4336
4337 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4338 {
4339         struct hci_dev *hdev = req->hdev;
4340         struct sk_buff *skb;
4341         unsigned long flags;
4342
4343         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4344
4345         /* If an error occured during request building, remove all HCI
4346          * commands queued on the HCI request queue.
4347          */
4348         if (req->err) {
4349                 skb_queue_purge(&req->cmd_q);
4350                 return req->err;
4351         }
4352
4353         /* Do not allow empty requests */
4354         if (skb_queue_empty(&req->cmd_q))
4355                 return -ENODATA;
4356
4357         skb = skb_peek_tail(&req->cmd_q);
4358         bt_cb(skb)->req.complete = complete;
4359
4360         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4361         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4362         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4363
4364         queue_work(hdev->workqueue, &hdev->cmd_work);
4365
4366         return 0;
4367 }
4368
4369 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4370                                        u32 plen, const void *param)
4371 {
4372         int len = HCI_COMMAND_HDR_SIZE + plen;
4373         struct hci_command_hdr *hdr;
4374         struct sk_buff *skb;
4375
4376         skb = bt_skb_alloc(len, GFP_ATOMIC);
4377         if (!skb)
4378                 return NULL;
4379
4380         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4381         hdr->opcode = cpu_to_le16(opcode);
4382         hdr->plen   = plen;
4383
4384         if (plen)
4385                 memcpy(skb_put(skb, plen), param, plen);
4386
4387         BT_DBG("skb len %d", skb->len);
4388
4389         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4390
4391         return skb;
4392 }
4393
4394 /* Send HCI command */
4395 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4396                  const void *param)
4397 {
4398         struct sk_buff *skb;
4399
4400         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4401
4402         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4403         if (!skb) {
4404                 BT_ERR("%s no memory for command", hdev->name);
4405                 return -ENOMEM;
4406         }
4407
4408         /* Stand-alone HCI commands must be flaged as
4409          * single-command requests.
4410          */
4411         bt_cb(skb)->req.start = true;
4412
4413         skb_queue_tail(&hdev->cmd_q, skb);
4414         queue_work(hdev->workqueue, &hdev->cmd_work);
4415
4416         return 0;
4417 }
4418
4419 /* Queue a command to an asynchronous HCI request */
4420 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4421                     const void *param, u8 event)
4422 {
4423         struct hci_dev *hdev = req->hdev;
4424         struct sk_buff *skb;
4425
4426         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4427
4428         /* If an error occured during request building, there is no point in
4429          * queueing the HCI command. We can simply return.
4430          */
4431         if (req->err)
4432                 return;
4433
4434         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4435         if (!skb) {
4436                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4437                        hdev->name, opcode);
4438                 req->err = -ENOMEM;
4439                 return;
4440         }
4441
4442         if (skb_queue_empty(&req->cmd_q))
4443                 bt_cb(skb)->req.start = true;
4444
4445         bt_cb(skb)->req.event = event;
4446
4447         skb_queue_tail(&req->cmd_q, skb);
4448 }
4449
4450 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4451                  const void *param)
4452 {
4453         hci_req_add_ev(req, opcode, plen, param, 0);
4454 }
4455
4456 /* Get data from the previously sent command */
4457 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4458 {
4459         struct hci_command_hdr *hdr;
4460
4461         if (!hdev->sent_cmd)
4462                 return NULL;
4463
4464         hdr = (void *) hdev->sent_cmd->data;
4465
4466         if (hdr->opcode != cpu_to_le16(opcode))
4467                 return NULL;
4468
4469         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4470
4471         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4472 }
4473
4474 /* Send ACL data */
4475 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4476 {
4477         struct hci_acl_hdr *hdr;
4478         int len = skb->len;
4479
4480         skb_push(skb, HCI_ACL_HDR_SIZE);
4481         skb_reset_transport_header(skb);
4482         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4483         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4484         hdr->dlen   = cpu_to_le16(len);
4485 }
4486
4487 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4488                           struct sk_buff *skb, __u16 flags)
4489 {
4490         struct hci_conn *conn = chan->conn;
4491         struct hci_dev *hdev = conn->hdev;
4492         struct sk_buff *list;
4493
4494         skb->len = skb_headlen(skb);
4495         skb->data_len = 0;
4496
4497         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4498
4499         switch (hdev->dev_type) {
4500         case HCI_BREDR:
4501                 hci_add_acl_hdr(skb, conn->handle, flags);
4502                 break;
4503         case HCI_AMP:
4504                 hci_add_acl_hdr(skb, chan->handle, flags);
4505                 break;
4506         default:
4507                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4508                 return;
4509         }
4510
4511         list = skb_shinfo(skb)->frag_list;
4512         if (!list) {
4513                 /* Non fragmented */
4514                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4515
4516                 skb_queue_tail(queue, skb);
4517         } else {
4518                 /* Fragmented */
4519                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4520
4521                 skb_shinfo(skb)->frag_list = NULL;
4522
4523                 /* Queue all fragments atomically */
4524                 spin_lock(&queue->lock);
4525
4526                 __skb_queue_tail(queue, skb);
4527
4528                 flags &= ~ACL_START;
4529                 flags |= ACL_CONT;
4530                 do {
4531                         skb = list; list = list->next;
4532
4533                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4534                         hci_add_acl_hdr(skb, conn->handle, flags);
4535
4536                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4537
4538                         __skb_queue_tail(queue, skb);
4539                 } while (list);
4540
4541                 spin_unlock(&queue->lock);
4542         }
4543 }
4544
4545 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4546 {
4547         struct hci_dev *hdev = chan->conn->hdev;
4548
4549         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4550
4551         hci_queue_acl(chan, &chan->data_q, skb, flags);
4552
4553         queue_work(hdev->workqueue, &hdev->tx_work);
4554 }
4555
4556 /* Send SCO data */
4557 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4558 {
4559         struct hci_dev *hdev = conn->hdev;
4560         struct hci_sco_hdr hdr;
4561
4562         BT_DBG("%s len %d", hdev->name, skb->len);
4563
4564         hdr.handle = cpu_to_le16(conn->handle);
4565         hdr.dlen   = skb->len;
4566
4567         skb_push(skb, HCI_SCO_HDR_SIZE);
4568         skb_reset_transport_header(skb);
4569         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4570
4571         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4572
4573         skb_queue_tail(&conn->data_q, skb);
4574         queue_work(hdev->workqueue, &hdev->tx_work);
4575 }
4576
4577 /* ---- HCI TX task (outgoing data) ---- */
4578
4579 /* HCI Connection scheduler */
4580 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4581                                      int *quote)
4582 {
4583         struct hci_conn_hash *h = &hdev->conn_hash;
4584         struct hci_conn *conn = NULL, *c;
4585         unsigned int num = 0, min = ~0;
4586
4587         /* We don't have to lock device here. Connections are always
4588          * added and removed with TX task disabled. */
4589
4590         rcu_read_lock();
4591
4592         list_for_each_entry_rcu(c, &h->list, list) {
4593                 if (c->type != type || skb_queue_empty(&c->data_q))
4594                         continue;
4595
4596                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4597                         continue;
4598
4599                 num++;
4600
4601                 if (c->sent < min) {
4602                         min  = c->sent;
4603                         conn = c;
4604                 }
4605
4606                 if (hci_conn_num(hdev, type) == num)
4607                         break;
4608         }
4609
4610         rcu_read_unlock();
4611
4612         if (conn) {
4613                 int cnt, q;
4614
4615                 switch (conn->type) {
4616                 case ACL_LINK:
4617                         cnt = hdev->acl_cnt;
4618                         break;
4619                 case SCO_LINK:
4620                 case ESCO_LINK:
4621                         cnt = hdev->sco_cnt;
4622                         break;
4623                 case LE_LINK:
4624                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4625                         break;
4626                 default:
4627                         cnt = 0;
4628                         BT_ERR("Unknown link type");
4629                 }
4630
4631                 q = cnt / num;
4632                 *quote = q ? q : 1;
4633         } else
4634                 *quote = 0;
4635
4636         BT_DBG("conn %p quote %d", conn, *quote);
4637         return conn;
4638 }
4639
4640 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4641 {
4642         struct hci_conn_hash *h = &hdev->conn_hash;
4643         struct hci_conn *c;
4644
4645         BT_ERR("%s link tx timeout", hdev->name);
4646
4647         rcu_read_lock();
4648
4649         /* Kill stalled connections */
4650         list_for_each_entry_rcu(c, &h->list, list) {
4651                 if (c->type == type && c->sent) {
4652                         BT_ERR("%s killing stalled connection %pMR",
4653                                hdev->name, &c->dst);
4654                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4655                 }
4656         }
4657
4658         rcu_read_unlock();
4659 }
4660
4661 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4662                                       int *quote)
4663 {
4664         struct hci_conn_hash *h = &hdev->conn_hash;
4665         struct hci_chan *chan = NULL;
4666         unsigned int num = 0, min = ~0, cur_prio = 0;
4667         struct hci_conn *conn;
4668         int cnt, q, conn_num = 0;
4669
4670         BT_DBG("%s", hdev->name);
4671
4672         rcu_read_lock();
4673
4674         list_for_each_entry_rcu(conn, &h->list, list) {
4675                 struct hci_chan *tmp;
4676
4677                 if (conn->type != type)
4678                         continue;
4679
4680                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4681                         continue;
4682
4683                 conn_num++;
4684
4685                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4686                         struct sk_buff *skb;
4687
4688                         if (skb_queue_empty(&tmp->data_q))
4689                                 continue;
4690
4691                         skb = skb_peek(&tmp->data_q);
4692                         if (skb->priority < cur_prio)
4693                                 continue;
4694
4695                         if (skb->priority > cur_prio) {
4696                                 num = 0;
4697                                 min = ~0;
4698                                 cur_prio = skb->priority;
4699                         }
4700
4701                         num++;
4702
4703                         if (conn->sent < min) {
4704                                 min  = conn->sent;
4705                                 chan = tmp;
4706                         }
4707                 }
4708
4709                 if (hci_conn_num(hdev, type) == conn_num)
4710                         break;
4711         }
4712
4713         rcu_read_unlock();
4714
4715         if (!chan)
4716                 return NULL;
4717
4718         switch (chan->conn->type) {
4719         case ACL_LINK:
4720                 cnt = hdev->acl_cnt;
4721                 break;
4722         case AMP_LINK:
4723                 cnt = hdev->block_cnt;
4724                 break;
4725         case SCO_LINK:
4726         case ESCO_LINK:
4727                 cnt = hdev->sco_cnt;
4728                 break;
4729         case LE_LINK:
4730                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4731                 break;
4732         default:
4733                 cnt = 0;
4734                 BT_ERR("Unknown link type");
4735         }
4736
4737         q = cnt / num;
4738         *quote = q ? q : 1;
4739         BT_DBG("chan %p quote %d", chan, *quote);
4740         return chan;
4741 }
4742
4743 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4744 {
4745         struct hci_conn_hash *h = &hdev->conn_hash;
4746         struct hci_conn *conn;
4747         int num = 0;
4748
4749         BT_DBG("%s", hdev->name);
4750
4751         rcu_read_lock();
4752
4753         list_for_each_entry_rcu(conn, &h->list, list) {
4754                 struct hci_chan *chan;
4755
4756                 if (conn->type != type)
4757                         continue;
4758
4759                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4760                         continue;
4761
4762                 num++;
4763
4764                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4765                         struct sk_buff *skb;
4766
4767                         if (chan->sent) {
4768                                 chan->sent = 0;
4769                                 continue;
4770                         }
4771
4772                         if (skb_queue_empty(&chan->data_q))
4773                                 continue;
4774
4775                         skb = skb_peek(&chan->data_q);
4776                         if (skb->priority >= HCI_PRIO_MAX - 1)
4777                                 continue;
4778
4779                         skb->priority = HCI_PRIO_MAX - 1;
4780
4781                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4782                                skb->priority);
4783                 }
4784
4785                 if (hci_conn_num(hdev, type) == num)
4786                         break;
4787         }
4788
4789         rcu_read_unlock();
4790
4791 }
4792
4793 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4794 {
4795         /* Calculate count of blocks used by this packet */
4796         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4797 }
4798
4799 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4800 {
4801         if (!test_bit(HCI_RAW, &hdev->flags)) {
4802                 /* ACL tx timeout must be longer than maximum
4803                  * link supervision timeout (40.9 seconds) */
4804                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4805                                        HCI_ACL_TX_TIMEOUT))
4806                         hci_link_tx_to(hdev, ACL_LINK);
4807         }
4808 }
4809
4810 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4811 {
4812         unsigned int cnt = hdev->acl_cnt;
4813         struct hci_chan *chan;
4814         struct sk_buff *skb;
4815         int quote;
4816
4817         __check_timeout(hdev, cnt);
4818
4819         while (hdev->acl_cnt &&
4820                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4821                 u32 priority = (skb_peek(&chan->data_q))->priority;
4822                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4823                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4824                                skb->len, skb->priority);
4825
4826                         /* Stop if priority has changed */
4827                         if (skb->priority < priority)
4828                                 break;
4829
4830                         skb = skb_dequeue(&chan->data_q);
4831
4832                         hci_conn_enter_active_mode(chan->conn,
4833                                                    bt_cb(skb)->force_active);
4834
4835                         hci_send_frame(hdev, skb);
4836                         hdev->acl_last_tx = jiffies;
4837
4838                         hdev->acl_cnt--;
4839                         chan->sent++;
4840                         chan->conn->sent++;
4841                 }
4842         }
4843
4844         if (cnt != hdev->acl_cnt)
4845                 hci_prio_recalculate(hdev, ACL_LINK);
4846 }
4847
4848 static void hci_sched_acl_blk(struct hci_dev *hdev)
4849 {
4850         unsigned int cnt = hdev->block_cnt;
4851         struct hci_chan *chan;
4852         struct sk_buff *skb;
4853         int quote;
4854         u8 type;
4855
4856         __check_timeout(hdev, cnt);
4857
4858         BT_DBG("%s", hdev->name);
4859
4860         if (hdev->dev_type == HCI_AMP)
4861                 type = AMP_LINK;
4862         else
4863                 type = ACL_LINK;
4864
4865         while (hdev->block_cnt > 0 &&
4866                (chan = hci_chan_sent(hdev, type, &quote))) {
4867                 u32 priority = (skb_peek(&chan->data_q))->priority;
4868                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4869                         int blocks;
4870
4871                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4872                                skb->len, skb->priority);
4873
4874                         /* Stop if priority has changed */
4875                         if (skb->priority < priority)
4876                                 break;
4877
4878                         skb = skb_dequeue(&chan->data_q);
4879
4880                         blocks = __get_blocks(hdev, skb);
4881                         if (blocks > hdev->block_cnt)
4882                                 return;
4883
4884                         hci_conn_enter_active_mode(chan->conn,
4885                                                    bt_cb(skb)->force_active);
4886
4887                         hci_send_frame(hdev, skb);
4888                         hdev->acl_last_tx = jiffies;
4889
4890                         hdev->block_cnt -= blocks;
4891                         quote -= blocks;
4892
4893                         chan->sent += blocks;
4894                         chan->conn->sent += blocks;
4895                 }
4896         }
4897
4898         if (cnt != hdev->block_cnt)
4899                 hci_prio_recalculate(hdev, type);
4900 }
4901
4902 static void hci_sched_acl(struct hci_dev *hdev)
4903 {
4904         BT_DBG("%s", hdev->name);
4905
4906         /* No ACL link over BR/EDR controller */
4907         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4908                 return;
4909
4910         /* No AMP link over AMP controller */
4911         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4912                 return;
4913
4914         switch (hdev->flow_ctl_mode) {
4915         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4916                 hci_sched_acl_pkt(hdev);
4917                 break;
4918
4919         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4920                 hci_sched_acl_blk(hdev);
4921                 break;
4922         }
4923 }
4924
4925 /* Schedule SCO */
4926 static void hci_sched_sco(struct hci_dev *hdev)
4927 {
4928         struct hci_conn *conn;
4929         struct sk_buff *skb;
4930         int quote;
4931
4932         BT_DBG("%s", hdev->name);
4933
4934         if (!hci_conn_num(hdev, SCO_LINK))
4935                 return;
4936
4937         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4938                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4939                         BT_DBG("skb %p len %d", skb, skb->len);
4940                         hci_send_frame(hdev, skb);
4941
4942                         conn->sent++;
4943                         if (conn->sent == ~0)
4944                                 conn->sent = 0;
4945                 }
4946         }
4947 }
4948
4949 static void hci_sched_esco(struct hci_dev *hdev)
4950 {
4951         struct hci_conn *conn;
4952         struct sk_buff *skb;
4953         int quote;
4954
4955         BT_DBG("%s", hdev->name);
4956
4957         if (!hci_conn_num(hdev, ESCO_LINK))
4958                 return;
4959
4960         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4961                                                      &quote))) {
4962                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4963                         BT_DBG("skb %p len %d", skb, skb->len);
4964                         hci_send_frame(hdev, skb);
4965
4966                         conn->sent++;
4967                         if (conn->sent == ~0)
4968                                 conn->sent = 0;
4969                 }
4970         }
4971 }
4972
4973 static void hci_sched_le(struct hci_dev *hdev)
4974 {
4975         struct hci_chan *chan;
4976         struct sk_buff *skb;
4977         int quote, cnt, tmp;
4978
4979         BT_DBG("%s", hdev->name);
4980
4981         if (!hci_conn_num(hdev, LE_LINK))
4982                 return;
4983
4984         if (!test_bit(HCI_RAW, &hdev->flags)) {
4985                 /* LE tx timeout must be longer than maximum
4986                  * link supervision timeout (40.9 seconds) */
4987                 if (!hdev->le_cnt && hdev->le_pkts &&
4988                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4989                         hci_link_tx_to(hdev, LE_LINK);
4990         }
4991
4992         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4993         tmp = cnt;
4994         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4995                 u32 priority = (skb_peek(&chan->data_q))->priority;
4996                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4997                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4998                                skb->len, skb->priority);
4999
5000                         /* Stop if priority has changed */
5001                         if (skb->priority < priority)
5002                                 break;
5003
5004                         skb = skb_dequeue(&chan->data_q);
5005
5006                         hci_send_frame(hdev, skb);
5007                         hdev->le_last_tx = jiffies;
5008
5009                         cnt--;
5010                         chan->sent++;
5011                         chan->conn->sent++;
5012                 }
5013         }
5014
5015         if (hdev->le_pkts)
5016                 hdev->le_cnt = cnt;
5017         else
5018                 hdev->acl_cnt = cnt;
5019
5020         if (cnt != tmp)
5021                 hci_prio_recalculate(hdev, LE_LINK);
5022 }
5023
5024 static void hci_tx_work(struct work_struct *work)
5025 {
5026         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5027         struct sk_buff *skb;
5028
5029         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5030                hdev->sco_cnt, hdev->le_cnt);
5031
5032         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5033                 /* Schedule queues and send stuff to HCI driver */
5034                 hci_sched_acl(hdev);
5035                 hci_sched_sco(hdev);
5036                 hci_sched_esco(hdev);
5037                 hci_sched_le(hdev);
5038         }
5039
5040         /* Send next queued raw (unknown type) packet */
5041         while ((skb = skb_dequeue(&hdev->raw_q)))
5042                 hci_send_frame(hdev, skb);
5043 }
5044
5045 /* ----- HCI RX task (incoming data processing) ----- */
5046
5047 /* ACL data packet */
5048 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5049 {
5050         struct hci_acl_hdr *hdr = (void *) skb->data;
5051         struct hci_conn *conn;
5052         __u16 handle, flags;
5053
5054         skb_pull(skb, HCI_ACL_HDR_SIZE);
5055
5056         handle = __le16_to_cpu(hdr->handle);
5057         flags  = hci_flags(handle);
5058         handle = hci_handle(handle);
5059
5060         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5061                handle, flags);
5062
5063         hdev->stat.acl_rx++;
5064
5065         hci_dev_lock(hdev);
5066         conn = hci_conn_hash_lookup_handle(hdev, handle);
5067         hci_dev_unlock(hdev);
5068
5069         if (conn) {
5070                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5071
5072                 /* Send to upper protocol */
5073                 l2cap_recv_acldata(conn, skb, flags);
5074                 return;
5075         } else {
5076                 BT_ERR("%s ACL packet for unknown connection handle %d",
5077                        hdev->name, handle);
5078         }
5079
5080         kfree_skb(skb);
5081 }
5082
5083 /* SCO data packet */
5084 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5085 {
5086         struct hci_sco_hdr *hdr = (void *) skb->data;
5087         struct hci_conn *conn;
5088         __u16 handle;
5089
5090         skb_pull(skb, HCI_SCO_HDR_SIZE);
5091
5092         handle = __le16_to_cpu(hdr->handle);
5093
5094         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5095
5096         hdev->stat.sco_rx++;
5097
5098         hci_dev_lock(hdev);
5099         conn = hci_conn_hash_lookup_handle(hdev, handle);
5100         hci_dev_unlock(hdev);
5101
5102         if (conn) {
5103                 /* Send to upper protocol */
5104                 sco_recv_scodata(conn, skb);
5105                 return;
5106         } else {
5107                 BT_ERR("%s SCO packet for unknown connection handle %d",
5108                        hdev->name, handle);
5109         }
5110
5111         kfree_skb(skb);
5112 }
5113
5114 static bool hci_req_is_complete(struct hci_dev *hdev)
5115 {
5116         struct sk_buff *skb;
5117
5118         skb = skb_peek(&hdev->cmd_q);
5119         if (!skb)
5120                 return true;
5121
5122         return bt_cb(skb)->req.start;
5123 }
5124
5125 static void hci_resend_last(struct hci_dev *hdev)
5126 {
5127         struct hci_command_hdr *sent;
5128         struct sk_buff *skb;
5129         u16 opcode;
5130
5131         if (!hdev->sent_cmd)
5132                 return;
5133
5134         sent = (void *) hdev->sent_cmd->data;
5135         opcode = __le16_to_cpu(sent->opcode);
5136         if (opcode == HCI_OP_RESET)
5137                 return;
5138
5139         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5140         if (!skb)
5141                 return;
5142
5143         skb_queue_head(&hdev->cmd_q, skb);
5144         queue_work(hdev->workqueue, &hdev->cmd_work);
5145 }
5146
5147 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5148 {
5149         hci_req_complete_t req_complete = NULL;
5150         struct sk_buff *skb;
5151         unsigned long flags;
5152
5153         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5154
5155         /* If the completed command doesn't match the last one that was
5156          * sent we need to do special handling of it.
5157          */
5158         if (!hci_sent_cmd_data(hdev, opcode)) {
5159                 /* Some CSR based controllers generate a spontaneous
5160                  * reset complete event during init and any pending
5161                  * command will never be completed. In such a case we
5162                  * need to resend whatever was the last sent
5163                  * command.
5164                  */
5165                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5166                         hci_resend_last(hdev);
5167
5168                 return;
5169         }
5170
5171         /* If the command succeeded and there's still more commands in
5172          * this request the request is not yet complete.
5173          */
5174         if (!status && !hci_req_is_complete(hdev))
5175                 return;
5176
5177         /* If this was the last command in a request the complete
5178          * callback would be found in hdev->sent_cmd instead of the
5179          * command queue (hdev->cmd_q).
5180          */
5181         if (hdev->sent_cmd) {
5182                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5183
5184                 if (req_complete) {
5185                         /* We must set the complete callback to NULL to
5186                          * avoid calling the callback more than once if
5187                          * this function gets called again.
5188                          */
5189                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5190
5191                         goto call_complete;
5192                 }
5193         }
5194
5195         /* Remove all pending commands belonging to this request */
5196         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5197         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5198                 if (bt_cb(skb)->req.start) {
5199                         __skb_queue_head(&hdev->cmd_q, skb);
5200                         break;
5201                 }
5202
5203                 req_complete = bt_cb(skb)->req.complete;
5204                 kfree_skb(skb);
5205         }
5206         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5207
5208 call_complete:
5209         if (req_complete)
5210                 req_complete(hdev, status);
5211 }
5212
5213 static void hci_rx_work(struct work_struct *work)
5214 {
5215         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5216         struct sk_buff *skb;
5217
5218         BT_DBG("%s", hdev->name);
5219
5220         while ((skb = skb_dequeue(&hdev->rx_q))) {
5221                 /* Send copy to monitor */
5222                 hci_send_to_monitor(hdev, skb);
5223
5224                 if (atomic_read(&hdev->promisc)) {
5225                         /* Send copy to the sockets */
5226                         hci_send_to_sock(hdev, skb);
5227                 }
5228
5229                 if (test_bit(HCI_RAW, &hdev->flags) ||
5230                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5231                         kfree_skb(skb);
5232                         continue;
5233                 }
5234
5235                 if (test_bit(HCI_INIT, &hdev->flags)) {
5236                         /* Don't process data packets in this states. */
5237                         switch (bt_cb(skb)->pkt_type) {
5238                         case HCI_ACLDATA_PKT:
5239                         case HCI_SCODATA_PKT:
5240                                 kfree_skb(skb);
5241                                 continue;
5242                         }
5243                 }
5244
5245                 /* Process frame */
5246                 switch (bt_cb(skb)->pkt_type) {
5247                 case HCI_EVENT_PKT:
5248                         BT_DBG("%s Event packet", hdev->name);
5249                         hci_event_packet(hdev, skb);
5250                         break;
5251
5252                 case HCI_ACLDATA_PKT:
5253                         BT_DBG("%s ACL data packet", hdev->name);
5254                         hci_acldata_packet(hdev, skb);
5255                         break;
5256
5257                 case HCI_SCODATA_PKT:
5258                         BT_DBG("%s SCO data packet", hdev->name);
5259                         hci_scodata_packet(hdev, skb);
5260                         break;
5261
5262                 default:
5263                         kfree_skb(skb);
5264                         break;
5265                 }
5266         }
5267 }
5268
5269 static void hci_cmd_work(struct work_struct *work)
5270 {
5271         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5272         struct sk_buff *skb;
5273
5274         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5275                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5276
5277         /* Send queued commands */
5278         if (atomic_read(&hdev->cmd_cnt)) {
5279                 skb = skb_dequeue(&hdev->cmd_q);
5280                 if (!skb)
5281                         return;
5282
5283                 kfree_skb(hdev->sent_cmd);
5284
5285                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5286                 if (hdev->sent_cmd) {
5287                         atomic_dec(&hdev->cmd_cnt);
5288                         hci_send_frame(hdev, skb);
5289                         if (test_bit(HCI_RESET, &hdev->flags))
5290                                 del_timer(&hdev->cmd_timer);
5291                         else
5292                                 mod_timer(&hdev->cmd_timer,
5293                                           jiffies + HCI_CMD_TIMEOUT);
5294                 } else {
5295                         skb_queue_head(&hdev->cmd_q, skb);
5296                         queue_work(hdev->workqueue, &hdev->cmd_work);
5297                 }
5298         }
5299 }
5300
5301 void hci_req_add_le_scan_disable(struct hci_request *req)
5302 {
5303         struct hci_cp_le_set_scan_enable cp;
5304
5305         memset(&cp, 0, sizeof(cp));
5306         cp.enable = LE_SCAN_DISABLE;
5307         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5308 }
5309
5310 void hci_req_add_le_passive_scan(struct hci_request *req)
5311 {
5312         struct hci_cp_le_set_scan_param param_cp;
5313         struct hci_cp_le_set_scan_enable enable_cp;
5314         struct hci_dev *hdev = req->hdev;
5315         u8 own_addr_type;
5316
5317         /* Set require_privacy to true to avoid identification from
5318          * unknown peer devices. Since this is passive scanning, no
5319          * SCAN_REQ using the local identity should be sent. Mandating
5320          * privacy is just an extra precaution.
5321          */
5322         if (hci_update_random_address(req, true, &own_addr_type))
5323                 return;
5324
5325         memset(&param_cp, 0, sizeof(param_cp));
5326         param_cp.type = LE_SCAN_PASSIVE;
5327         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5328         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5329         param_cp.own_address_type = own_addr_type;
5330         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5331                     &param_cp);
5332
5333         memset(&enable_cp, 0, sizeof(enable_cp));
5334         enable_cp.enable = LE_SCAN_ENABLE;
5335         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5336         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5337                     &enable_cp);
5338 }
5339
5340 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5341 {
5342         if (status)
5343                 BT_DBG("HCI request failed to update background scanning: "
5344                        "status 0x%2.2x", status);
5345 }
5346
5347 /* This function controls the background scanning based on hdev->pend_le_conns
5348  * list. If there are pending LE connection we start the background scanning,
5349  * otherwise we stop it.
5350  *
5351  * This function requires the caller holds hdev->lock.
5352  */
5353 void hci_update_background_scan(struct hci_dev *hdev)
5354 {
5355         struct hci_request req;
5356         struct hci_conn *conn;
5357         int err;
5358
5359         hci_req_init(&req, hdev);
5360
5361         if (list_empty(&hdev->pend_le_conns)) {
5362                 /* If there is no pending LE connections, we should stop
5363                  * the background scanning.
5364                  */
5365
5366                 /* If controller is not scanning we are done. */
5367                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5368                         return;
5369
5370                 hci_req_add_le_scan_disable(&req);
5371
5372                 BT_DBG("%s stopping background scanning", hdev->name);
5373         } else {
5374                 /* If there is at least one pending LE connection, we should
5375                  * keep the background scan running.
5376                  */
5377
5378                 /* If controller is connecting, we should not start scanning
5379                  * since some controllers are not able to scan and connect at
5380                  * the same time.
5381                  */
5382                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5383                 if (conn)
5384                         return;
5385
5386                 /* If controller is currently scanning, we stop it to ensure we
5387                  * don't miss any advertising (due to duplicates filter).
5388                  */
5389                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5390                         hci_req_add_le_scan_disable(&req);
5391
5392                 hci_req_add_le_passive_scan(&req);
5393
5394                 BT_DBG("%s starting background scanning", hdev->name);
5395         }
5396
5397         err = hci_req_run(&req, update_background_scan_complete);
5398         if (err)
5399                 BT_ERR("Failed to run HCI request: err %d", err);
5400 }