xref: /linux/net/bluetooth/hci_sync.c (revision 1a9239bb4253f9076b5b4b2a1a4e8d7defd77a95)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * BlueZ - Bluetooth protocol stack for Linux
4   *
5   * Copyright (C) 2021 Intel Corporation
6   * Copyright 2023 NXP
7   */
8  
9  #include <linux/property.h>
10  
11  #include <net/bluetooth/bluetooth.h>
12  #include <net/bluetooth/hci_core.h>
13  #include <net/bluetooth/mgmt.h>
14  
15  #include "hci_codec.h"
16  #include "hci_debugfs.h"
17  #include "smp.h"
18  #include "eir.h"
19  #include "msft.h"
20  #include "aosp.h"
21  #include "leds.h"
22  
hci_cmd_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)23  static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24  				  struct sk_buff *skb)
25  {
26  	bt_dev_dbg(hdev, "result 0x%2.2x", result);
27  
28  	if (hdev->req_status != HCI_REQ_PEND)
29  		return;
30  
31  	hdev->req_result = result;
32  	hdev->req_status = HCI_REQ_DONE;
33  
34  	/* Free the request command so it is not used as response */
35  	kfree_skb(hdev->req_skb);
36  	hdev->req_skb = NULL;
37  
38  	if (skb) {
39  		struct sock *sk = hci_skb_sk(skb);
40  
41  		/* Drop sk reference if set */
42  		if (sk)
43  			sock_put(sk);
44  
45  		hdev->req_rsp = skb_get(skb);
46  	}
47  
48  	wake_up_interruptible(&hdev->req_wait_q);
49  }
50  
hci_cmd_sync_alloc(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,struct sock * sk)51  struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52  				   const void *param, struct sock *sk)
53  {
54  	int len = HCI_COMMAND_HDR_SIZE + plen;
55  	struct hci_command_hdr *hdr;
56  	struct sk_buff *skb;
57  
58  	skb = bt_skb_alloc(len, GFP_ATOMIC);
59  	if (!skb)
60  		return NULL;
61  
62  	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63  	hdr->opcode = cpu_to_le16(opcode);
64  	hdr->plen   = plen;
65  
66  	if (plen)
67  		skb_put_data(skb, param, plen);
68  
69  	bt_dev_dbg(hdev, "skb len %d", skb->len);
70  
71  	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72  	hci_skb_opcode(skb) = opcode;
73  
74  	/* Grab a reference if command needs to be associated with a sock (e.g.
75  	 * likely mgmt socket that initiated the command).
76  	 */
77  	if (sk) {
78  		hci_skb_sk(skb) = sk;
79  		sock_hold(sk);
80  	}
81  
82  	return skb;
83  }
84  
hci_cmd_sync_add(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event,struct sock * sk)85  static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86  			     const void *param, u8 event, struct sock *sk)
87  {
88  	struct hci_dev *hdev = req->hdev;
89  	struct sk_buff *skb;
90  
91  	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92  
93  	/* If an error occurred during request building, there is no point in
94  	 * queueing the HCI command. We can simply return.
95  	 */
96  	if (req->err)
97  		return;
98  
99  	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100  	if (!skb) {
101  		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102  			   opcode);
103  		req->err = -ENOMEM;
104  		return;
105  	}
106  
107  	if (skb_queue_empty(&req->cmd_q))
108  		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109  
110  	hci_skb_event(skb) = event;
111  
112  	skb_queue_tail(&req->cmd_q, skb);
113  }
114  
hci_req_sync_run(struct hci_request * req)115  static int hci_req_sync_run(struct hci_request *req)
116  {
117  	struct hci_dev *hdev = req->hdev;
118  	struct sk_buff *skb;
119  	unsigned long flags;
120  
121  	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122  
123  	/* If an error occurred during request building, remove all HCI
124  	 * commands queued on the HCI request queue.
125  	 */
126  	if (req->err) {
127  		skb_queue_purge(&req->cmd_q);
128  		return req->err;
129  	}
130  
131  	/* Do not allow empty requests */
132  	if (skb_queue_empty(&req->cmd_q))
133  		return -ENODATA;
134  
135  	skb = skb_peek_tail(&req->cmd_q);
136  	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137  	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138  
139  	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140  	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141  	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142  
143  	queue_work(hdev->workqueue, &hdev->cmd_work);
144  
145  	return 0;
146  }
147  
hci_request_init(struct hci_request * req,struct hci_dev * hdev)148  static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149  {
150  	skb_queue_head_init(&req->cmd_q);
151  	req->hdev = hdev;
152  	req->err = 0;
153  }
154  
155  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)156  struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157  				  const void *param, u8 event, u32 timeout,
158  				  struct sock *sk)
159  {
160  	struct hci_request req;
161  	struct sk_buff *skb;
162  	int err = 0;
163  
164  	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165  
166  	hci_request_init(&req, hdev);
167  
168  	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169  
170  	hdev->req_status = HCI_REQ_PEND;
171  
172  	err = hci_req_sync_run(&req);
173  	if (err < 0)
174  		return ERR_PTR(err);
175  
176  	err = wait_event_interruptible_timeout(hdev->req_wait_q,
177  					       hdev->req_status != HCI_REQ_PEND,
178  					       timeout);
179  
180  	if (err == -ERESTARTSYS)
181  		return ERR_PTR(-EINTR);
182  
183  	switch (hdev->req_status) {
184  	case HCI_REQ_DONE:
185  		err = -bt_to_errno(hdev->req_result);
186  		break;
187  
188  	case HCI_REQ_CANCELED:
189  		err = -hdev->req_result;
190  		break;
191  
192  	default:
193  		err = -ETIMEDOUT;
194  		break;
195  	}
196  
197  	hdev->req_status = 0;
198  	hdev->req_result = 0;
199  	skb = hdev->req_rsp;
200  	hdev->req_rsp = NULL;
201  
202  	bt_dev_dbg(hdev, "end: err %d", err);
203  
204  	if (err < 0) {
205  		kfree_skb(skb);
206  		return ERR_PTR(err);
207  	}
208  
209  	/* If command return a status event skb will be set to NULL as there are
210  	 * no parameters.
211  	 */
212  	if (!skb)
213  		return ERR_PTR(-ENODATA);
214  
215  	return skb;
216  }
217  EXPORT_SYMBOL(__hci_cmd_sync_sk);
218  
219  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)220  struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
221  			       const void *param, u32 timeout)
222  {
223  	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
224  }
225  EXPORT_SYMBOL(__hci_cmd_sync);
226  
227  /* Send HCI command and wait for command complete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)228  struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
229  			     const void *param, u32 timeout)
230  {
231  	struct sk_buff *skb;
232  
233  	if (!test_bit(HCI_UP, &hdev->flags))
234  		return ERR_PTR(-ENETDOWN);
235  
236  	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
237  
238  	hci_req_sync_lock(hdev);
239  	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
240  	hci_req_sync_unlock(hdev);
241  
242  	return skb;
243  }
244  EXPORT_SYMBOL(hci_cmd_sync);
245  
246  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)247  struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
248  				  const void *param, u8 event, u32 timeout)
249  {
250  	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
251  				 NULL);
252  }
253  EXPORT_SYMBOL(__hci_cmd_sync_ev);
254  
255  /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_status_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)256  int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
257  			     const void *param, u8 event, u32 timeout,
258  			     struct sock *sk)
259  {
260  	struct sk_buff *skb;
261  	u8 status;
262  
263  	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
264  
265  	/* If command return a status event, skb will be set to -ENODATA */
266  	if (skb == ERR_PTR(-ENODATA))
267  		return 0;
268  
269  	if (IS_ERR(skb)) {
270  		if (!event)
271  			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
272  				   PTR_ERR(skb));
273  		return PTR_ERR(skb);
274  	}
275  
276  	status = skb->data[0];
277  
278  	kfree_skb(skb);
279  
280  	return status;
281  }
282  EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
283  
__hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)284  int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
285  			  const void *param, u32 timeout)
286  {
287  	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
288  					NULL);
289  }
290  EXPORT_SYMBOL(__hci_cmd_sync_status);
291  
hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)292  int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
293  			const void *param, u32 timeout)
294  {
295  	int err;
296  
297  	hci_req_sync_lock(hdev);
298  	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
299  	hci_req_sync_unlock(hdev);
300  
301  	return err;
302  }
303  EXPORT_SYMBOL(hci_cmd_sync_status);
304  
hci_cmd_sync_work(struct work_struct * work)305  static void hci_cmd_sync_work(struct work_struct *work)
306  {
307  	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
308  
309  	bt_dev_dbg(hdev, "");
310  
311  	/* Dequeue all entries and run them */
312  	while (1) {
313  		struct hci_cmd_sync_work_entry *entry;
314  
315  		mutex_lock(&hdev->cmd_sync_work_lock);
316  		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
317  						 struct hci_cmd_sync_work_entry,
318  						 list);
319  		if (entry)
320  			list_del(&entry->list);
321  		mutex_unlock(&hdev->cmd_sync_work_lock);
322  
323  		if (!entry)
324  			break;
325  
326  		bt_dev_dbg(hdev, "entry %p", entry);
327  
328  		if (entry->func) {
329  			int err;
330  
331  			hci_req_sync_lock(hdev);
332  			err = entry->func(hdev, entry->data);
333  			if (entry->destroy)
334  				entry->destroy(hdev, entry->data, err);
335  			hci_req_sync_unlock(hdev);
336  		}
337  
338  		kfree(entry);
339  	}
340  }
341  
hci_cmd_sync_cancel_work(struct work_struct * work)342  static void hci_cmd_sync_cancel_work(struct work_struct *work)
343  {
344  	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
345  
346  	cancel_delayed_work_sync(&hdev->cmd_timer);
347  	cancel_delayed_work_sync(&hdev->ncmd_timer);
348  	atomic_set(&hdev->cmd_cnt, 1);
349  
350  	wake_up_interruptible(&hdev->req_wait_q);
351  }
352  
353  static int hci_scan_disable_sync(struct hci_dev *hdev);
scan_disable_sync(struct hci_dev * hdev,void * data)354  static int scan_disable_sync(struct hci_dev *hdev, void *data)
355  {
356  	return hci_scan_disable_sync(hdev);
357  }
358  
interleaved_inquiry_sync(struct hci_dev * hdev,void * data)359  static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
360  {
361  	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
362  }
363  
le_scan_disable(struct work_struct * work)364  static void le_scan_disable(struct work_struct *work)
365  {
366  	struct hci_dev *hdev = container_of(work, struct hci_dev,
367  					    le_scan_disable.work);
368  	int status;
369  
370  	bt_dev_dbg(hdev, "");
371  	hci_dev_lock(hdev);
372  
373  	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
374  		goto _return;
375  
376  	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
377  	if (status) {
378  		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
379  		goto _return;
380  	}
381  
382  	/* If we were running LE only scan, change discovery state. If
383  	 * we were running both LE and BR/EDR inquiry simultaneously,
384  	 * and BR/EDR inquiry is already finished, stop discovery,
385  	 * otherwise BR/EDR inquiry will stop discovery when finished.
386  	 * If we will resolve remote device name, do not change
387  	 * discovery state.
388  	 */
389  
390  	if (hdev->discovery.type == DISCOV_TYPE_LE)
391  		goto discov_stopped;
392  
393  	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
394  		goto _return;
395  
396  	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
397  		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
398  		    hdev->discovery.state != DISCOVERY_RESOLVING)
399  			goto discov_stopped;
400  
401  		goto _return;
402  	}
403  
404  	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
405  	if (status) {
406  		bt_dev_err(hdev, "inquiry failed: status %d", status);
407  		goto discov_stopped;
408  	}
409  
410  	goto _return;
411  
412  discov_stopped:
413  	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
414  
415  _return:
416  	hci_dev_unlock(hdev);
417  }
418  
419  static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
420  				       u8 filter_dup);
421  
reenable_adv_sync(struct hci_dev * hdev,void * data)422  static int reenable_adv_sync(struct hci_dev *hdev, void *data)
423  {
424  	bt_dev_dbg(hdev, "");
425  
426  	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
427  	    list_empty(&hdev->adv_instances))
428  		return 0;
429  
430  	if (hdev->cur_adv_instance) {
431  		return hci_schedule_adv_instance_sync(hdev,
432  						      hdev->cur_adv_instance,
433  						      true);
434  	} else {
435  		if (ext_adv_capable(hdev)) {
436  			hci_start_ext_adv_sync(hdev, 0x00);
437  		} else {
438  			hci_update_adv_data_sync(hdev, 0x00);
439  			hci_update_scan_rsp_data_sync(hdev, 0x00);
440  			hci_enable_advertising_sync(hdev);
441  		}
442  	}
443  
444  	return 0;
445  }
446  
reenable_adv(struct work_struct * work)447  static void reenable_adv(struct work_struct *work)
448  {
449  	struct hci_dev *hdev = container_of(work, struct hci_dev,
450  					    reenable_adv_work);
451  	int status;
452  
453  	bt_dev_dbg(hdev, "");
454  
455  	hci_dev_lock(hdev);
456  
457  	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
458  	if (status)
459  		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
460  
461  	hci_dev_unlock(hdev);
462  }
463  
cancel_adv_timeout(struct hci_dev * hdev)464  static void cancel_adv_timeout(struct hci_dev *hdev)
465  {
466  	if (hdev->adv_instance_timeout) {
467  		hdev->adv_instance_timeout = 0;
468  		cancel_delayed_work(&hdev->adv_instance_expire);
469  	}
470  }
471  
472  /* For a single instance:
473   * - force == true: The instance will be removed even when its remaining
474   *   lifetime is not zero.
475   * - force == false: the instance will be deactivated but kept stored unless
476   *   the remaining lifetime is zero.
477   *
478   * For instance == 0x00:
479   * - force == true: All instances will be removed regardless of their timeout
480   *   setting.
481   * - force == false: Only instances that have a timeout will be removed.
482   */
hci_clear_adv_instance_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)483  int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
484  				u8 instance, bool force)
485  {
486  	struct adv_info *adv_instance, *n, *next_instance = NULL;
487  	int err;
488  	u8 rem_inst;
489  
490  	/* Cancel any timeout concerning the removed instance(s). */
491  	if (!instance || hdev->cur_adv_instance == instance)
492  		cancel_adv_timeout(hdev);
493  
494  	/* Get the next instance to advertise BEFORE we remove
495  	 * the current one. This can be the same instance again
496  	 * if there is only one instance.
497  	 */
498  	if (instance && hdev->cur_adv_instance == instance)
499  		next_instance = hci_get_next_instance(hdev, instance);
500  
501  	if (instance == 0x00) {
502  		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
503  					 list) {
504  			if (!(force || adv_instance->timeout))
505  				continue;
506  
507  			rem_inst = adv_instance->instance;
508  			err = hci_remove_adv_instance(hdev, rem_inst);
509  			if (!err)
510  				mgmt_advertising_removed(sk, hdev, rem_inst);
511  		}
512  	} else {
513  		adv_instance = hci_find_adv_instance(hdev, instance);
514  
515  		if (force || (adv_instance && adv_instance->timeout &&
516  			      !adv_instance->remaining_time)) {
517  			/* Don't advertise a removed instance. */
518  			if (next_instance &&
519  			    next_instance->instance == instance)
520  				next_instance = NULL;
521  
522  			err = hci_remove_adv_instance(hdev, instance);
523  			if (!err)
524  				mgmt_advertising_removed(sk, hdev, instance);
525  		}
526  	}
527  
528  	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
529  		return 0;
530  
531  	if (next_instance && !ext_adv_capable(hdev))
532  		return hci_schedule_adv_instance_sync(hdev,
533  						      next_instance->instance,
534  						      false);
535  
536  	return 0;
537  }
538  
adv_timeout_expire_sync(struct hci_dev * hdev,void * data)539  static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
540  {
541  	u8 instance = *(u8 *)data;
542  
543  	kfree(data);
544  
545  	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
546  
547  	if (list_empty(&hdev->adv_instances))
548  		return hci_disable_advertising_sync(hdev);
549  
550  	return 0;
551  }
552  
adv_timeout_expire(struct work_struct * work)553  static void adv_timeout_expire(struct work_struct *work)
554  {
555  	u8 *inst_ptr;
556  	struct hci_dev *hdev = container_of(work, struct hci_dev,
557  					    adv_instance_expire.work);
558  
559  	bt_dev_dbg(hdev, "");
560  
561  	hci_dev_lock(hdev);
562  
563  	hdev->adv_instance_timeout = 0;
564  
565  	if (hdev->cur_adv_instance == 0x00)
566  		goto unlock;
567  
568  	inst_ptr = kmalloc(1, GFP_KERNEL);
569  	if (!inst_ptr)
570  		goto unlock;
571  
572  	*inst_ptr = hdev->cur_adv_instance;
573  	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
574  
575  unlock:
576  	hci_dev_unlock(hdev);
577  }
578  
is_interleave_scanning(struct hci_dev * hdev)579  static bool is_interleave_scanning(struct hci_dev *hdev)
580  {
581  	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
582  }
583  
584  static int hci_passive_scan_sync(struct hci_dev *hdev);
585  
interleave_scan_work(struct work_struct * work)586  static void interleave_scan_work(struct work_struct *work)
587  {
588  	struct hci_dev *hdev = container_of(work, struct hci_dev,
589  					    interleave_scan.work);
590  	unsigned long timeout;
591  
592  	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
593  		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
594  	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
595  		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
596  	} else {
597  		bt_dev_err(hdev, "unexpected error");
598  		return;
599  	}
600  
601  	hci_passive_scan_sync(hdev);
602  
603  	hci_dev_lock(hdev);
604  
605  	switch (hdev->interleave_scan_state) {
606  	case INTERLEAVE_SCAN_ALLOWLIST:
607  		bt_dev_dbg(hdev, "next state: allowlist");
608  		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
609  		break;
610  	case INTERLEAVE_SCAN_NO_FILTER:
611  		bt_dev_dbg(hdev, "next state: no filter");
612  		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
613  		break;
614  	case INTERLEAVE_SCAN_NONE:
615  		bt_dev_err(hdev, "unexpected error");
616  	}
617  
618  	hci_dev_unlock(hdev);
619  
620  	/* Don't continue interleaving if it was canceled */
621  	if (is_interleave_scanning(hdev))
622  		queue_delayed_work(hdev->req_workqueue,
623  				   &hdev->interleave_scan, timeout);
624  }
625  
hci_cmd_sync_init(struct hci_dev * hdev)626  void hci_cmd_sync_init(struct hci_dev *hdev)
627  {
628  	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
629  	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
630  	mutex_init(&hdev->cmd_sync_work_lock);
631  	mutex_init(&hdev->unregister_lock);
632  
633  	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
634  	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
635  	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
636  	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
637  	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
638  }
639  
_hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry,int err)640  static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
641  				       struct hci_cmd_sync_work_entry *entry,
642  				       int err)
643  {
644  	if (entry->destroy)
645  		entry->destroy(hdev, entry->data, err);
646  
647  	list_del(&entry->list);
648  	kfree(entry);
649  }
650  
hci_cmd_sync_clear(struct hci_dev * hdev)651  void hci_cmd_sync_clear(struct hci_dev *hdev)
652  {
653  	struct hci_cmd_sync_work_entry *entry, *tmp;
654  
655  	cancel_work_sync(&hdev->cmd_sync_work);
656  	cancel_work_sync(&hdev->reenable_adv_work);
657  
658  	mutex_lock(&hdev->cmd_sync_work_lock);
659  	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
660  		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
661  	mutex_unlock(&hdev->cmd_sync_work_lock);
662  }
663  
hci_cmd_sync_cancel(struct hci_dev * hdev,int err)664  void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
665  {
666  	bt_dev_dbg(hdev, "err 0x%2.2x", err);
667  
668  	if (hdev->req_status == HCI_REQ_PEND) {
669  		hdev->req_result = err;
670  		hdev->req_status = HCI_REQ_CANCELED;
671  
672  		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
673  	}
674  }
675  EXPORT_SYMBOL(hci_cmd_sync_cancel);
676  
677  /* Cancel ongoing command request synchronously:
678   *
679   * - Set result and mark status to HCI_REQ_CANCELED
680   * - Wakeup command sync thread
681   */
hci_cmd_sync_cancel_sync(struct hci_dev * hdev,int err)682  void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
683  {
684  	bt_dev_dbg(hdev, "err 0x%2.2x", err);
685  
686  	if (hdev->req_status == HCI_REQ_PEND) {
687  		/* req_result is __u32 so error must be positive to be properly
688  		 * propagated.
689  		 */
690  		hdev->req_result = err < 0 ? -err : err;
691  		hdev->req_status = HCI_REQ_CANCELED;
692  
693  		wake_up_interruptible(&hdev->req_wait_q);
694  	}
695  }
696  EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
697  
698  /* Submit HCI command to be run in as cmd_sync_work:
699   *
700   * - hdev must _not_ be unregistered
701   */
hci_cmd_sync_submit(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)702  int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
703  			void *data, hci_cmd_sync_work_destroy_t destroy)
704  {
705  	struct hci_cmd_sync_work_entry *entry;
706  	int err = 0;
707  
708  	mutex_lock(&hdev->unregister_lock);
709  	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
710  		err = -ENODEV;
711  		goto unlock;
712  	}
713  
714  	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
715  	if (!entry) {
716  		err = -ENOMEM;
717  		goto unlock;
718  	}
719  	entry->func = func;
720  	entry->data = data;
721  	entry->destroy = destroy;
722  
723  	mutex_lock(&hdev->cmd_sync_work_lock);
724  	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
725  	mutex_unlock(&hdev->cmd_sync_work_lock);
726  
727  	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
728  
729  unlock:
730  	mutex_unlock(&hdev->unregister_lock);
731  	return err;
732  }
733  EXPORT_SYMBOL(hci_cmd_sync_submit);
734  
735  /* Queue HCI command:
736   *
737   * - hdev must be running
738   */
hci_cmd_sync_queue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)739  int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
740  		       void *data, hci_cmd_sync_work_destroy_t destroy)
741  {
742  	/* Only queue command if hdev is running which means it had been opened
743  	 * and is either on init phase or is already up.
744  	 */
745  	if (!test_bit(HCI_RUNNING, &hdev->flags))
746  		return -ENETDOWN;
747  
748  	return hci_cmd_sync_submit(hdev, func, data, destroy);
749  }
750  EXPORT_SYMBOL(hci_cmd_sync_queue);
751  
752  static struct hci_cmd_sync_work_entry *
_hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)753  _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
754  			   void *data, hci_cmd_sync_work_destroy_t destroy)
755  {
756  	struct hci_cmd_sync_work_entry *entry, *tmp;
757  
758  	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
759  		if (func && entry->func != func)
760  			continue;
761  
762  		if (data && entry->data != data)
763  			continue;
764  
765  		if (destroy && entry->destroy != destroy)
766  			continue;
767  
768  		return entry;
769  	}
770  
771  	return NULL;
772  }
773  
774  /* Queue HCI command entry once:
775   *
776   * - Lookup if an entry already exist and only if it doesn't creates a new entry
777   *   and queue it.
778   */
hci_cmd_sync_queue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)779  int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
780  			    void *data, hci_cmd_sync_work_destroy_t destroy)
781  {
782  	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
783  		return 0;
784  
785  	return hci_cmd_sync_queue(hdev, func, data, destroy);
786  }
787  EXPORT_SYMBOL(hci_cmd_sync_queue_once);
788  
789  /* Run HCI command:
790   *
791   * - hdev must be running
792   * - if on cmd_sync_work then run immediately otherwise queue
793   */
hci_cmd_sync_run(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)794  int hci_cmd_sync_run(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
795  		     void *data, hci_cmd_sync_work_destroy_t destroy)
796  {
797  	/* Only queue command if hdev is running which means it had been opened
798  	 * and is either on init phase or is already up.
799  	 */
800  	if (!test_bit(HCI_RUNNING, &hdev->flags))
801  		return -ENETDOWN;
802  
803  	/* If on cmd_sync_work then run immediately otherwise queue */
804  	if (current_work() == &hdev->cmd_sync_work)
805  		return func(hdev, data);
806  
807  	return hci_cmd_sync_submit(hdev, func, data, destroy);
808  }
809  EXPORT_SYMBOL(hci_cmd_sync_run);
810  
811  /* Run HCI command entry once:
812   *
813   * - Lookup if an entry already exist and only if it doesn't creates a new entry
814   *   and run it.
815   * - if on cmd_sync_work then run immediately otherwise queue
816   */
hci_cmd_sync_run_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)817  int hci_cmd_sync_run_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
818  			  void *data, hci_cmd_sync_work_destroy_t destroy)
819  {
820  	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
821  		return 0;
822  
823  	return hci_cmd_sync_run(hdev, func, data, destroy);
824  }
825  EXPORT_SYMBOL(hci_cmd_sync_run_once);
826  
827  /* Lookup HCI command entry:
828   *
829   * - Return first entry that matches by function callback or data or
830   *   destroy callback.
831   */
832  struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)833  hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
834  			  void *data, hci_cmd_sync_work_destroy_t destroy)
835  {
836  	struct hci_cmd_sync_work_entry *entry;
837  
838  	mutex_lock(&hdev->cmd_sync_work_lock);
839  	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
840  	mutex_unlock(&hdev->cmd_sync_work_lock);
841  
842  	return entry;
843  }
844  EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
845  
846  /* Cancel HCI command entry */
hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry)847  void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
848  			       struct hci_cmd_sync_work_entry *entry)
849  {
850  	mutex_lock(&hdev->cmd_sync_work_lock);
851  	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
852  	mutex_unlock(&hdev->cmd_sync_work_lock);
853  }
854  EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
855  
856  /* Dequeue one HCI command entry:
857   *
858   * - Lookup and cancel first entry that matches.
859   */
hci_cmd_sync_dequeue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)860  bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
861  			       hci_cmd_sync_work_func_t func,
862  			       void *data, hci_cmd_sync_work_destroy_t destroy)
863  {
864  	struct hci_cmd_sync_work_entry *entry;
865  
866  	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
867  	if (!entry)
868  		return false;
869  
870  	hci_cmd_sync_cancel_entry(hdev, entry);
871  
872  	return true;
873  }
874  EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
875  
876  /* Dequeue HCI command entry:
877   *
878   * - Lookup and cancel any entry that matches by function callback or data or
879   *   destroy callback.
880   */
hci_cmd_sync_dequeue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)881  bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
882  			  void *data, hci_cmd_sync_work_destroy_t destroy)
883  {
884  	struct hci_cmd_sync_work_entry *entry;
885  	bool ret = false;
886  
887  	mutex_lock(&hdev->cmd_sync_work_lock);
888  	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
889  						   destroy))) {
890  		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
891  		ret = true;
892  	}
893  	mutex_unlock(&hdev->cmd_sync_work_lock);
894  
895  	return ret;
896  }
897  EXPORT_SYMBOL(hci_cmd_sync_dequeue);
898  
hci_update_eir_sync(struct hci_dev * hdev)899  int hci_update_eir_sync(struct hci_dev *hdev)
900  {
901  	struct hci_cp_write_eir cp;
902  
903  	bt_dev_dbg(hdev, "");
904  
905  	if (!hdev_is_powered(hdev))
906  		return 0;
907  
908  	if (!lmp_ext_inq_capable(hdev))
909  		return 0;
910  
911  	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
912  		return 0;
913  
914  	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
915  		return 0;
916  
917  	memset(&cp, 0, sizeof(cp));
918  
919  	eir_create(hdev, cp.data);
920  
921  	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
922  		return 0;
923  
924  	memcpy(hdev->eir, cp.data, sizeof(cp.data));
925  
926  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
927  				     HCI_CMD_TIMEOUT);
928  }
929  
get_service_classes(struct hci_dev * hdev)930  static u8 get_service_classes(struct hci_dev *hdev)
931  {
932  	struct bt_uuid *uuid;
933  	u8 val = 0;
934  
935  	list_for_each_entry(uuid, &hdev->uuids, list)
936  		val |= uuid->svc_hint;
937  
938  	return val;
939  }
940  
hci_update_class_sync(struct hci_dev * hdev)941  int hci_update_class_sync(struct hci_dev *hdev)
942  {
943  	u8 cod[3];
944  
945  	bt_dev_dbg(hdev, "");
946  
947  	if (!hdev_is_powered(hdev))
948  		return 0;
949  
950  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
951  		return 0;
952  
953  	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
954  		return 0;
955  
956  	cod[0] = hdev->minor_class;
957  	cod[1] = hdev->major_class;
958  	cod[2] = get_service_classes(hdev);
959  
960  	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
961  		cod[1] |= 0x20;
962  
963  	if (memcmp(cod, hdev->dev_class, 3) == 0)
964  		return 0;
965  
966  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
967  				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
968  }
969  
is_advertising_allowed(struct hci_dev * hdev,bool connectable)970  static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
971  {
972  	/* If there is no connection we are OK to advertise. */
973  	if (hci_conn_num(hdev, LE_LINK) == 0)
974  		return true;
975  
976  	/* Check le_states if there is any connection in peripheral role. */
977  	if (hdev->conn_hash.le_num_peripheral > 0) {
978  		/* Peripheral connection state and non connectable mode
979  		 * bit 20.
980  		 */
981  		if (!connectable && !(hdev->le_states[2] & 0x10))
982  			return false;
983  
984  		/* Peripheral connection state and connectable mode bit 38
985  		 * and scannable bit 21.
986  		 */
987  		if (connectable && (!(hdev->le_states[4] & 0x40) ||
988  				    !(hdev->le_states[2] & 0x20)))
989  			return false;
990  	}
991  
992  	/* Check le_states if there is any connection in central role. */
993  	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
994  		/* Central connection state and non connectable mode bit 18. */
995  		if (!connectable && !(hdev->le_states[2] & 0x02))
996  			return false;
997  
998  		/* Central connection state and connectable mode bit 35 and
999  		 * scannable 19.
1000  		 */
1001  		if (connectable && (!(hdev->le_states[4] & 0x08) ||
1002  				    !(hdev->le_states[2] & 0x08)))
1003  			return false;
1004  	}
1005  
1006  	return true;
1007  }
1008  
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)1009  static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
1010  {
1011  	/* If privacy is not enabled don't use RPA */
1012  	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
1013  		return false;
1014  
1015  	/* If basic privacy mode is enabled use RPA */
1016  	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1017  		return true;
1018  
1019  	/* If limited privacy mode is enabled don't use RPA if we're
1020  	 * both discoverable and bondable.
1021  	 */
1022  	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
1023  	    hci_dev_test_flag(hdev, HCI_BONDABLE))
1024  		return false;
1025  
1026  	/* We're neither bondable nor discoverable in the limited
1027  	 * privacy mode, therefore use RPA.
1028  	 */
1029  	return true;
1030  }
1031  
hci_set_random_addr_sync(struct hci_dev * hdev,bdaddr_t * rpa)1032  static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
1033  {
1034  	/* If a random_addr has been set we're advertising or initiating an LE
1035  	 * connection we can't go ahead and change the random address at this
1036  	 * time. This is because the eventual initiator address used for the
1037  	 * subsequently created connection will be undefined (some
1038  	 * controllers use the new address and others the one we had
1039  	 * when the operation started).
1040  	 *
1041  	 * In this kind of scenario skip the update and let the random
1042  	 * address be updated at the next cycle.
1043  	 */
1044  	if (bacmp(&hdev->random_addr, BDADDR_ANY) &&
1045  	    (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1046  	    hci_lookup_le_connect(hdev))) {
1047  		bt_dev_dbg(hdev, "Deferring random address update");
1048  		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1049  		return 0;
1050  	}
1051  
1052  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1053  				     6, rpa, HCI_CMD_TIMEOUT);
1054  }
1055  
hci_update_random_address_sync(struct hci_dev * hdev,bool require_privacy,bool rpa,u8 * own_addr_type)1056  int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1057  				   bool rpa, u8 *own_addr_type)
1058  {
1059  	int err;
1060  
1061  	/* If privacy is enabled use a resolvable private address. If
1062  	 * current RPA has expired or there is something else than
1063  	 * the current RPA in use, then generate a new one.
1064  	 */
1065  	if (rpa) {
1066  		/* If Controller supports LL Privacy use own address type is
1067  		 * 0x03
1068  		 */
1069  		if (ll_privacy_capable(hdev))
1070  			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1071  		else
1072  			*own_addr_type = ADDR_LE_DEV_RANDOM;
1073  
1074  		/* Check if RPA is valid */
1075  		if (rpa_valid(hdev))
1076  			return 0;
1077  
1078  		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1079  		if (err < 0) {
1080  			bt_dev_err(hdev, "failed to generate new RPA");
1081  			return err;
1082  		}
1083  
1084  		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1085  		if (err)
1086  			return err;
1087  
1088  		return 0;
1089  	}
1090  
1091  	/* In case of required privacy without resolvable private address,
1092  	 * use an non-resolvable private address. This is useful for active
1093  	 * scanning and non-connectable advertising.
1094  	 */
1095  	if (require_privacy) {
1096  		bdaddr_t nrpa;
1097  
1098  		while (true) {
1099  			/* The non-resolvable private address is generated
1100  			 * from random six bytes with the two most significant
1101  			 * bits cleared.
1102  			 */
1103  			get_random_bytes(&nrpa, 6);
1104  			nrpa.b[5] &= 0x3f;
1105  
1106  			/* The non-resolvable private address shall not be
1107  			 * equal to the public address.
1108  			 */
1109  			if (bacmp(&hdev->bdaddr, &nrpa))
1110  				break;
1111  		}
1112  
1113  		*own_addr_type = ADDR_LE_DEV_RANDOM;
1114  
1115  		return hci_set_random_addr_sync(hdev, &nrpa);
1116  	}
1117  
1118  	/* If forcing static address is in use or there is no public
1119  	 * address use the static address as random address (but skip
1120  	 * the HCI command if the current random address is already the
1121  	 * static one.
1122  	 *
1123  	 * In case BR/EDR has been disabled on a dual-mode controller
1124  	 * and a static address has been configured, then use that
1125  	 * address instead of the public BR/EDR address.
1126  	 */
1127  	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1128  	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1129  	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1130  	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1131  		*own_addr_type = ADDR_LE_DEV_RANDOM;
1132  		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1133  			return hci_set_random_addr_sync(hdev,
1134  							&hdev->static_addr);
1135  		return 0;
1136  	}
1137  
1138  	/* Neither privacy nor static address is being used so use a
1139  	 * public address.
1140  	 */
1141  	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1142  
1143  	return 0;
1144  }
1145  
hci_disable_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1146  static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1147  {
1148  	struct hci_cp_le_set_ext_adv_enable *cp;
1149  	struct hci_cp_ext_adv_set *set;
1150  	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1151  	u8 size;
1152  	struct adv_info *adv = NULL;
1153  
1154  	/* If request specifies an instance that doesn't exist, fail */
1155  	if (instance > 0) {
1156  		adv = hci_find_adv_instance(hdev, instance);
1157  		if (!adv)
1158  			return -EINVAL;
1159  
1160  		/* If not enabled there is nothing to do */
1161  		if (!adv->enabled)
1162  			return 0;
1163  	}
1164  
1165  	memset(data, 0, sizeof(data));
1166  
1167  	cp = (void *)data;
1168  	set = (void *)cp->data;
1169  
1170  	/* Instance 0x00 indicates all advertising instances will be disabled */
1171  	cp->num_of_sets = !!instance;
1172  	cp->enable = 0x00;
1173  
1174  	set->handle = adv ? adv->handle : instance;
1175  
1176  	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1177  
1178  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1179  				     size, data, HCI_CMD_TIMEOUT);
1180  }
1181  
hci_set_adv_set_random_addr_sync(struct hci_dev * hdev,u8 instance,bdaddr_t * random_addr)1182  static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1183  					    bdaddr_t *random_addr)
1184  {
1185  	struct hci_cp_le_set_adv_set_rand_addr cp;
1186  	int err;
1187  
1188  	if (!instance) {
1189  		/* Instance 0x00 doesn't have an adv_info, instead it uses
1190  		 * hdev->random_addr to track its address so whenever it needs
1191  		 * to be updated this also set the random address since
1192  		 * hdev->random_addr is shared with scan state machine.
1193  		 */
1194  		err = hci_set_random_addr_sync(hdev, random_addr);
1195  		if (err)
1196  			return err;
1197  	}
1198  
1199  	memset(&cp, 0, sizeof(cp));
1200  
1201  	cp.handle = instance;
1202  	bacpy(&cp.bdaddr, random_addr);
1203  
1204  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1205  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1206  }
1207  
hci_setup_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1208  int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1209  {
1210  	struct hci_cp_le_set_ext_adv_params cp;
1211  	bool connectable;
1212  	u32 flags;
1213  	bdaddr_t random_addr;
1214  	u8 own_addr_type;
1215  	int err;
1216  	struct adv_info *adv;
1217  	bool secondary_adv;
1218  
1219  	if (instance > 0) {
1220  		adv = hci_find_adv_instance(hdev, instance);
1221  		if (!adv)
1222  			return -EINVAL;
1223  	} else {
1224  		adv = NULL;
1225  	}
1226  
1227  	/* Updating parameters of an active instance will return a
1228  	 * Command Disallowed error, so we must first disable the
1229  	 * instance if it is active.
1230  	 */
1231  	if (adv && !adv->pending) {
1232  		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1233  		if (err)
1234  			return err;
1235  	}
1236  
1237  	flags = hci_adv_instance_flags(hdev, instance);
1238  
1239  	/* If the "connectable" instance flag was not set, then choose between
1240  	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1241  	 */
1242  	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1243  		      mgmt_get_connectable(hdev);
1244  
1245  	if (!is_advertising_allowed(hdev, connectable))
1246  		return -EPERM;
1247  
1248  	/* Set require_privacy to true only when non-connectable
1249  	 * advertising is used. In that case it is fine to use a
1250  	 * non-resolvable private address.
1251  	 */
1252  	err = hci_get_random_address(hdev, !connectable,
1253  				     adv_use_rpa(hdev, flags), adv,
1254  				     &own_addr_type, &random_addr);
1255  	if (err < 0)
1256  		return err;
1257  
1258  	memset(&cp, 0, sizeof(cp));
1259  
1260  	if (adv) {
1261  		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1262  		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1263  		cp.tx_power = adv->tx_power;
1264  	} else {
1265  		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1266  		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1267  		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1268  	}
1269  
1270  	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1271  
1272  	if (connectable) {
1273  		if (secondary_adv)
1274  			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1275  		else
1276  			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1277  	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1278  		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1279  		if (secondary_adv)
1280  			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1281  		else
1282  			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1283  	} else {
1284  		if (secondary_adv)
1285  			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1286  		else
1287  			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1288  	}
1289  
1290  	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1291  	 * contains the peer’s Identity Address and the Peer_Address_Type
1292  	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1293  	 * These parameters are used to locate the corresponding local IRK in
1294  	 * the resolving list; this IRK is used to generate their own address
1295  	 * used in the advertisement.
1296  	 */
1297  	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1298  		hci_copy_identity_address(hdev, &cp.peer_addr,
1299  					  &cp.peer_addr_type);
1300  
1301  	cp.own_addr_type = own_addr_type;
1302  	cp.channel_map = hdev->le_adv_channel_map;
1303  	cp.handle = adv ? adv->handle : instance;
1304  
1305  	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1306  		cp.primary_phy = HCI_ADV_PHY_1M;
1307  		cp.secondary_phy = HCI_ADV_PHY_2M;
1308  	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1309  		cp.primary_phy = HCI_ADV_PHY_CODED;
1310  		cp.secondary_phy = HCI_ADV_PHY_CODED;
1311  	} else {
1312  		/* In all other cases use 1M */
1313  		cp.primary_phy = HCI_ADV_PHY_1M;
1314  		cp.secondary_phy = HCI_ADV_PHY_1M;
1315  	}
1316  
1317  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1318  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1319  	if (err)
1320  		return err;
1321  
1322  	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1323  	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1324  	    bacmp(&random_addr, BDADDR_ANY)) {
1325  		/* Check if random address need to be updated */
1326  		if (adv) {
1327  			if (!bacmp(&random_addr, &adv->random_addr))
1328  				return 0;
1329  		} else {
1330  			if (!bacmp(&random_addr, &hdev->random_addr))
1331  				return 0;
1332  		}
1333  
1334  		return hci_set_adv_set_random_addr_sync(hdev, instance,
1335  							&random_addr);
1336  	}
1337  
1338  	return 0;
1339  }
1340  
hci_set_ext_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1341  static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1342  {
1343  	DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1344  		    HCI_MAX_EXT_AD_LENGTH);
1345  	u8 len;
1346  	struct adv_info *adv = NULL;
1347  	int err;
1348  
1349  	if (instance) {
1350  		adv = hci_find_adv_instance(hdev, instance);
1351  		if (!adv || !adv->scan_rsp_changed)
1352  			return 0;
1353  	}
1354  
1355  	len = eir_create_scan_rsp(hdev, instance, pdu->data);
1356  
1357  	pdu->handle = adv ? adv->handle : instance;
1358  	pdu->length = len;
1359  	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1360  	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1361  
1362  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1363  				    struct_size(pdu, data, len), pdu,
1364  				    HCI_CMD_TIMEOUT);
1365  	if (err)
1366  		return err;
1367  
1368  	if (adv) {
1369  		adv->scan_rsp_changed = false;
1370  	} else {
1371  		memcpy(hdev->scan_rsp_data, pdu->data, len);
1372  		hdev->scan_rsp_data_len = len;
1373  	}
1374  
1375  	return 0;
1376  }
1377  
__hci_set_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1378  static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1379  {
1380  	struct hci_cp_le_set_scan_rsp_data cp;
1381  	u8 len;
1382  
1383  	memset(&cp, 0, sizeof(cp));
1384  
1385  	len = eir_create_scan_rsp(hdev, instance, cp.data);
1386  
1387  	if (hdev->scan_rsp_data_len == len &&
1388  	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1389  		return 0;
1390  
1391  	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1392  	hdev->scan_rsp_data_len = len;
1393  
1394  	cp.length = len;
1395  
1396  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1397  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1398  }
1399  
hci_update_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1400  int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1401  {
1402  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1403  		return 0;
1404  
1405  	if (ext_adv_capable(hdev))
1406  		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1407  
1408  	return __hci_set_scan_rsp_data_sync(hdev, instance);
1409  }
1410  
hci_enable_ext_advertising_sync(struct hci_dev * hdev,u8 instance)1411  int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1412  {
1413  	struct hci_cp_le_set_ext_adv_enable *cp;
1414  	struct hci_cp_ext_adv_set *set;
1415  	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1416  	struct adv_info *adv;
1417  
1418  	if (instance > 0) {
1419  		adv = hci_find_adv_instance(hdev, instance);
1420  		if (!adv)
1421  			return -EINVAL;
1422  		/* If already enabled there is nothing to do */
1423  		if (adv->enabled)
1424  			return 0;
1425  	} else {
1426  		adv = NULL;
1427  	}
1428  
1429  	cp = (void *)data;
1430  	set = (void *)cp->data;
1431  
1432  	memset(cp, 0, sizeof(*cp));
1433  
1434  	cp->enable = 0x01;
1435  	cp->num_of_sets = 0x01;
1436  
1437  	memset(set, 0, sizeof(*set));
1438  
1439  	set->handle = adv ? adv->handle : instance;
1440  
1441  	/* Set duration per instance since controller is responsible for
1442  	 * scheduling it.
1443  	 */
1444  	if (adv && adv->timeout) {
1445  		u16 duration = adv->timeout * MSEC_PER_SEC;
1446  
1447  		/* Time = N * 10 ms */
1448  		set->duration = cpu_to_le16(duration / 10);
1449  	}
1450  
1451  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1452  				     sizeof(*cp) +
1453  				     sizeof(*set) * cp->num_of_sets,
1454  				     data, HCI_CMD_TIMEOUT);
1455  }
1456  
hci_start_ext_adv_sync(struct hci_dev * hdev,u8 instance)1457  int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1458  {
1459  	int err;
1460  
1461  	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1462  	if (err)
1463  		return err;
1464  
1465  	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1466  	if (err)
1467  		return err;
1468  
1469  	return hci_enable_ext_advertising_sync(hdev, instance);
1470  }
1471  
hci_disable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1472  int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1473  {
1474  	struct hci_cp_le_set_per_adv_enable cp;
1475  	struct adv_info *adv = NULL;
1476  
1477  	/* If periodic advertising already disabled there is nothing to do. */
1478  	adv = hci_find_adv_instance(hdev, instance);
1479  	if (!adv || !adv->periodic || !adv->enabled)
1480  		return 0;
1481  
1482  	memset(&cp, 0, sizeof(cp));
1483  
1484  	cp.enable = 0x00;
1485  	cp.handle = instance;
1486  
1487  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1488  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1489  }
1490  
hci_set_per_adv_params_sync(struct hci_dev * hdev,u8 instance,u16 min_interval,u16 max_interval)1491  static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1492  				       u16 min_interval, u16 max_interval)
1493  {
1494  	struct hci_cp_le_set_per_adv_params cp;
1495  
1496  	memset(&cp, 0, sizeof(cp));
1497  
1498  	if (!min_interval)
1499  		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1500  
1501  	if (!max_interval)
1502  		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1503  
1504  	cp.handle = instance;
1505  	cp.min_interval = cpu_to_le16(min_interval);
1506  	cp.max_interval = cpu_to_le16(max_interval);
1507  	cp.periodic_properties = 0x0000;
1508  
1509  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1510  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1511  }
1512  
hci_set_per_adv_data_sync(struct hci_dev * hdev,u8 instance)1513  static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1514  {
1515  	DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1516  		    HCI_MAX_PER_AD_LENGTH);
1517  	u8 len;
1518  	struct adv_info *adv = NULL;
1519  
1520  	if (instance) {
1521  		adv = hci_find_adv_instance(hdev, instance);
1522  		if (!adv || !adv->periodic)
1523  			return 0;
1524  	}
1525  
1526  	len = eir_create_per_adv_data(hdev, instance, pdu->data);
1527  
1528  	pdu->length = len;
1529  	pdu->handle = adv ? adv->handle : instance;
1530  	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1531  
1532  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1533  				     struct_size(pdu, data, len), pdu,
1534  				     HCI_CMD_TIMEOUT);
1535  }
1536  
hci_enable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1537  static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1538  {
1539  	struct hci_cp_le_set_per_adv_enable cp;
1540  	struct adv_info *adv = NULL;
1541  
1542  	/* If periodic advertising already enabled there is nothing to do. */
1543  	adv = hci_find_adv_instance(hdev, instance);
1544  	if (adv && adv->periodic && adv->enabled)
1545  		return 0;
1546  
1547  	memset(&cp, 0, sizeof(cp));
1548  
1549  	cp.enable = 0x01;
1550  	cp.handle = instance;
1551  
1552  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1553  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1554  }
1555  
1556  /* Checks if periodic advertising data contains a Basic Announcement and if it
1557   * does generates a Broadcast ID and add Broadcast Announcement.
1558   */
hci_adv_bcast_annoucement(struct hci_dev * hdev,struct adv_info * adv)1559  static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1560  {
1561  	u8 bid[3];
1562  	u8 ad[4 + 3];
1563  
1564  	/* Skip if NULL adv as instance 0x00 is used for general purpose
1565  	 * advertising so it cannot used for the likes of Broadcast Announcement
1566  	 * as it can be overwritten at any point.
1567  	 */
1568  	if (!adv)
1569  		return 0;
1570  
1571  	/* Check if PA data doesn't contains a Basic Audio Announcement then
1572  	 * there is nothing to do.
1573  	 */
1574  	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1575  				  0x1851, NULL))
1576  		return 0;
1577  
1578  	/* Check if advertising data already has a Broadcast Announcement since
1579  	 * the process may want to control the Broadcast ID directly and in that
1580  	 * case the kernel shall no interfere.
1581  	 */
1582  	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1583  				 NULL))
1584  		return 0;
1585  
1586  	/* Generate Broadcast ID */
1587  	get_random_bytes(bid, sizeof(bid));
1588  	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1589  	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1590  
1591  	return hci_update_adv_data_sync(hdev, adv->instance);
1592  }
1593  
hci_start_per_adv_sync(struct hci_dev * hdev,u8 instance,u8 data_len,u8 * data,u32 flags,u16 min_interval,u16 max_interval,u16 sync_interval)1594  int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1595  			   u8 *data, u32 flags, u16 min_interval,
1596  			   u16 max_interval, u16 sync_interval)
1597  {
1598  	struct adv_info *adv = NULL;
1599  	int err;
1600  	bool added = false;
1601  
1602  	hci_disable_per_advertising_sync(hdev, instance);
1603  
1604  	if (instance) {
1605  		adv = hci_find_adv_instance(hdev, instance);
1606  		/* Create an instance if that could not be found */
1607  		if (!adv) {
1608  			adv = hci_add_per_instance(hdev, instance, flags,
1609  						   data_len, data,
1610  						   sync_interval,
1611  						   sync_interval);
1612  			if (IS_ERR(adv))
1613  				return PTR_ERR(adv);
1614  			adv->pending = false;
1615  			added = true;
1616  		}
1617  	}
1618  
1619  	/* Start advertising */
1620  	err = hci_start_ext_adv_sync(hdev, instance);
1621  	if (err < 0)
1622  		goto fail;
1623  
1624  	err = hci_adv_bcast_annoucement(hdev, adv);
1625  	if (err < 0)
1626  		goto fail;
1627  
1628  	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1629  					  max_interval);
1630  	if (err < 0)
1631  		goto fail;
1632  
1633  	err = hci_set_per_adv_data_sync(hdev, instance);
1634  	if (err < 0)
1635  		goto fail;
1636  
1637  	err = hci_enable_per_advertising_sync(hdev, instance);
1638  	if (err < 0)
1639  		goto fail;
1640  
1641  	return 0;
1642  
1643  fail:
1644  	if (added)
1645  		hci_remove_adv_instance(hdev, instance);
1646  
1647  	return err;
1648  }
1649  
hci_start_adv_sync(struct hci_dev * hdev,u8 instance)1650  static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1651  {
1652  	int err;
1653  
1654  	if (ext_adv_capable(hdev))
1655  		return hci_start_ext_adv_sync(hdev, instance);
1656  
1657  	err = hci_update_adv_data_sync(hdev, instance);
1658  	if (err)
1659  		return err;
1660  
1661  	err = hci_update_scan_rsp_data_sync(hdev, instance);
1662  	if (err)
1663  		return err;
1664  
1665  	return hci_enable_advertising_sync(hdev);
1666  }
1667  
hci_enable_advertising_sync(struct hci_dev * hdev)1668  int hci_enable_advertising_sync(struct hci_dev *hdev)
1669  {
1670  	struct adv_info *adv_instance;
1671  	struct hci_cp_le_set_adv_param cp;
1672  	u8 own_addr_type, enable = 0x01;
1673  	bool connectable;
1674  	u16 adv_min_interval, adv_max_interval;
1675  	u32 flags;
1676  	u8 status;
1677  
1678  	if (ext_adv_capable(hdev))
1679  		return hci_enable_ext_advertising_sync(hdev,
1680  						       hdev->cur_adv_instance);
1681  
1682  	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1683  	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1684  
1685  	/* If the "connectable" instance flag was not set, then choose between
1686  	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1687  	 */
1688  	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1689  		      mgmt_get_connectable(hdev);
1690  
1691  	if (!is_advertising_allowed(hdev, connectable))
1692  		return -EINVAL;
1693  
1694  	status = hci_disable_advertising_sync(hdev);
1695  	if (status)
1696  		return status;
1697  
1698  	/* Clear the HCI_LE_ADV bit temporarily so that the
1699  	 * hci_update_random_address knows that it's safe to go ahead
1700  	 * and write a new random address. The flag will be set back on
1701  	 * as soon as the SET_ADV_ENABLE HCI command completes.
1702  	 */
1703  	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1704  
1705  	/* Set require_privacy to true only when non-connectable
1706  	 * advertising is used. In that case it is fine to use a
1707  	 * non-resolvable private address.
1708  	 */
1709  	status = hci_update_random_address_sync(hdev, !connectable,
1710  						adv_use_rpa(hdev, flags),
1711  						&own_addr_type);
1712  	if (status)
1713  		return status;
1714  
1715  	memset(&cp, 0, sizeof(cp));
1716  
1717  	if (adv_instance) {
1718  		adv_min_interval = adv_instance->min_interval;
1719  		adv_max_interval = adv_instance->max_interval;
1720  	} else {
1721  		adv_min_interval = hdev->le_adv_min_interval;
1722  		adv_max_interval = hdev->le_adv_max_interval;
1723  	}
1724  
1725  	if (connectable) {
1726  		cp.type = LE_ADV_IND;
1727  	} else {
1728  		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1729  			cp.type = LE_ADV_SCAN_IND;
1730  		else
1731  			cp.type = LE_ADV_NONCONN_IND;
1732  
1733  		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1734  		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1735  			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1736  			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1737  		}
1738  	}
1739  
1740  	cp.min_interval = cpu_to_le16(adv_min_interval);
1741  	cp.max_interval = cpu_to_le16(adv_max_interval);
1742  	cp.own_address_type = own_addr_type;
1743  	cp.channel_map = hdev->le_adv_channel_map;
1744  
1745  	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1746  				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1747  	if (status)
1748  		return status;
1749  
1750  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1751  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1752  }
1753  
enable_advertising_sync(struct hci_dev * hdev,void * data)1754  static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1755  {
1756  	return hci_enable_advertising_sync(hdev);
1757  }
1758  
hci_enable_advertising(struct hci_dev * hdev)1759  int hci_enable_advertising(struct hci_dev *hdev)
1760  {
1761  	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1762  	    list_empty(&hdev->adv_instances))
1763  		return 0;
1764  
1765  	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1766  }
1767  
hci_remove_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1768  int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1769  				     struct sock *sk)
1770  {
1771  	int err;
1772  
1773  	if (!ext_adv_capable(hdev))
1774  		return 0;
1775  
1776  	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1777  	if (err)
1778  		return err;
1779  
1780  	/* If request specifies an instance that doesn't exist, fail */
1781  	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1782  		return -EINVAL;
1783  
1784  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1785  					sizeof(instance), &instance, 0,
1786  					HCI_CMD_TIMEOUT, sk);
1787  }
1788  
hci_le_terminate_big_sync(struct hci_dev * hdev,u8 handle,u8 reason)1789  int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1790  {
1791  	struct hci_cp_le_term_big cp;
1792  
1793  	memset(&cp, 0, sizeof(cp));
1794  	cp.handle = handle;
1795  	cp.reason = reason;
1796  
1797  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1798  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1799  }
1800  
hci_set_ext_adv_data_sync(struct hci_dev * hdev,u8 instance)1801  static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1802  {
1803  	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1804  		    HCI_MAX_EXT_AD_LENGTH);
1805  	u8 len;
1806  	struct adv_info *adv = NULL;
1807  	int err;
1808  
1809  	if (instance) {
1810  		adv = hci_find_adv_instance(hdev, instance);
1811  		if (!adv || !adv->adv_data_changed)
1812  			return 0;
1813  	}
1814  
1815  	len = eir_create_adv_data(hdev, instance, pdu->data);
1816  
1817  	pdu->length = len;
1818  	pdu->handle = adv ? adv->handle : instance;
1819  	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1820  	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1821  
1822  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1823  				    struct_size(pdu, data, len), pdu,
1824  				    HCI_CMD_TIMEOUT);
1825  	if (err)
1826  		return err;
1827  
1828  	/* Update data if the command succeed */
1829  	if (adv) {
1830  		adv->adv_data_changed = false;
1831  	} else {
1832  		memcpy(hdev->adv_data, pdu->data, len);
1833  		hdev->adv_data_len = len;
1834  	}
1835  
1836  	return 0;
1837  }
1838  
hci_set_adv_data_sync(struct hci_dev * hdev,u8 instance)1839  static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1840  {
1841  	struct hci_cp_le_set_adv_data cp;
1842  	u8 len;
1843  
1844  	memset(&cp, 0, sizeof(cp));
1845  
1846  	len = eir_create_adv_data(hdev, instance, cp.data);
1847  
1848  	/* There's nothing to do if the data hasn't changed */
1849  	if (hdev->adv_data_len == len &&
1850  	    memcmp(cp.data, hdev->adv_data, len) == 0)
1851  		return 0;
1852  
1853  	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1854  	hdev->adv_data_len = len;
1855  
1856  	cp.length = len;
1857  
1858  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1859  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1860  }
1861  
hci_update_adv_data_sync(struct hci_dev * hdev,u8 instance)1862  int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1863  {
1864  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1865  		return 0;
1866  
1867  	if (ext_adv_capable(hdev))
1868  		return hci_set_ext_adv_data_sync(hdev, instance);
1869  
1870  	return hci_set_adv_data_sync(hdev, instance);
1871  }
1872  
hci_schedule_adv_instance_sync(struct hci_dev * hdev,u8 instance,bool force)1873  int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1874  				   bool force)
1875  {
1876  	struct adv_info *adv = NULL;
1877  	u16 timeout;
1878  
1879  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1880  		return -EPERM;
1881  
1882  	if (hdev->adv_instance_timeout)
1883  		return -EBUSY;
1884  
1885  	adv = hci_find_adv_instance(hdev, instance);
1886  	if (!adv)
1887  		return -ENOENT;
1888  
1889  	/* A zero timeout means unlimited advertising. As long as there is
1890  	 * only one instance, duration should be ignored. We still set a timeout
1891  	 * in case further instances are being added later on.
1892  	 *
1893  	 * If the remaining lifetime of the instance is more than the duration
1894  	 * then the timeout corresponds to the duration, otherwise it will be
1895  	 * reduced to the remaining instance lifetime.
1896  	 */
1897  	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1898  		timeout = adv->duration;
1899  	else
1900  		timeout = adv->remaining_time;
1901  
1902  	/* The remaining time is being reduced unless the instance is being
1903  	 * advertised without time limit.
1904  	 */
1905  	if (adv->timeout)
1906  		adv->remaining_time = adv->remaining_time - timeout;
1907  
1908  	/* Only use work for scheduling instances with legacy advertising */
1909  	if (!ext_adv_capable(hdev)) {
1910  		hdev->adv_instance_timeout = timeout;
1911  		queue_delayed_work(hdev->req_workqueue,
1912  				   &hdev->adv_instance_expire,
1913  				   secs_to_jiffies(timeout));
1914  	}
1915  
1916  	/* If we're just re-scheduling the same instance again then do not
1917  	 * execute any HCI commands. This happens when a single instance is
1918  	 * being advertised.
1919  	 */
1920  	if (!force && hdev->cur_adv_instance == instance &&
1921  	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1922  		return 0;
1923  
1924  	hdev->cur_adv_instance = instance;
1925  
1926  	return hci_start_adv_sync(hdev, instance);
1927  }
1928  
hci_clear_adv_sets_sync(struct hci_dev * hdev,struct sock * sk)1929  static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1930  {
1931  	int err;
1932  
1933  	if (!ext_adv_capable(hdev))
1934  		return 0;
1935  
1936  	/* Disable instance 0x00 to disable all instances */
1937  	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1938  	if (err)
1939  		return err;
1940  
1941  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1942  					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1943  }
1944  
hci_clear_adv_sync(struct hci_dev * hdev,struct sock * sk,bool force)1945  static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1946  {
1947  	struct adv_info *adv, *n;
1948  	int err = 0;
1949  
1950  	if (ext_adv_capable(hdev))
1951  		/* Remove all existing sets */
1952  		err = hci_clear_adv_sets_sync(hdev, sk);
1953  	if (ext_adv_capable(hdev))
1954  		return err;
1955  
1956  	/* This is safe as long as there is no command send while the lock is
1957  	 * held.
1958  	 */
1959  	hci_dev_lock(hdev);
1960  
1961  	/* Cleanup non-ext instances */
1962  	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1963  		u8 instance = adv->instance;
1964  		int err;
1965  
1966  		if (!(force || adv->timeout))
1967  			continue;
1968  
1969  		err = hci_remove_adv_instance(hdev, instance);
1970  		if (!err)
1971  			mgmt_advertising_removed(sk, hdev, instance);
1972  	}
1973  
1974  	hci_dev_unlock(hdev);
1975  
1976  	return 0;
1977  }
1978  
hci_remove_adv_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1979  static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1980  			       struct sock *sk)
1981  {
1982  	int err = 0;
1983  
1984  	/* If we use extended advertising, instance has to be removed first. */
1985  	if (ext_adv_capable(hdev))
1986  		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1987  	if (ext_adv_capable(hdev))
1988  		return err;
1989  
1990  	/* This is safe as long as there is no command send while the lock is
1991  	 * held.
1992  	 */
1993  	hci_dev_lock(hdev);
1994  
1995  	err = hci_remove_adv_instance(hdev, instance);
1996  	if (!err)
1997  		mgmt_advertising_removed(sk, hdev, instance);
1998  
1999  	hci_dev_unlock(hdev);
2000  
2001  	return err;
2002  }
2003  
2004  /* For a single instance:
2005   * - force == true: The instance will be removed even when its remaining
2006   *   lifetime is not zero.
2007   * - force == false: the instance will be deactivated but kept stored unless
2008   *   the remaining lifetime is zero.
2009   *
2010   * For instance == 0x00:
2011   * - force == true: All instances will be removed regardless of their timeout
2012   *   setting.
2013   * - force == false: Only instances that have a timeout will be removed.
2014   */
hci_remove_advertising_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)2015  int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
2016  				u8 instance, bool force)
2017  {
2018  	struct adv_info *next = NULL;
2019  	int err;
2020  
2021  	/* Cancel any timeout concerning the removed instance(s). */
2022  	if (!instance || hdev->cur_adv_instance == instance)
2023  		cancel_adv_timeout(hdev);
2024  
2025  	/* Get the next instance to advertise BEFORE we remove
2026  	 * the current one. This can be the same instance again
2027  	 * if there is only one instance.
2028  	 */
2029  	if (hdev->cur_adv_instance == instance)
2030  		next = hci_get_next_instance(hdev, instance);
2031  
2032  	if (!instance) {
2033  		err = hci_clear_adv_sync(hdev, sk, force);
2034  		if (err)
2035  			return err;
2036  	} else {
2037  		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2038  
2039  		if (force || (adv && adv->timeout && !adv->remaining_time)) {
2040  			/* Don't advertise a removed instance. */
2041  			if (next && next->instance == instance)
2042  				next = NULL;
2043  
2044  			err = hci_remove_adv_sync(hdev, instance, sk);
2045  			if (err)
2046  				return err;
2047  		}
2048  	}
2049  
2050  	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2051  		return 0;
2052  
2053  	if (next && !ext_adv_capable(hdev))
2054  		hci_schedule_adv_instance_sync(hdev, next->instance, false);
2055  
2056  	return 0;
2057  }
2058  
hci_read_rssi_sync(struct hci_dev * hdev,__le16 handle)2059  int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2060  {
2061  	struct hci_cp_read_rssi cp;
2062  
2063  	cp.handle = handle;
2064  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2065  					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2066  }
2067  
hci_read_clock_sync(struct hci_dev * hdev,struct hci_cp_read_clock * cp)2068  int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2069  {
2070  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2071  					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2072  }
2073  
hci_read_tx_power_sync(struct hci_dev * hdev,__le16 handle,u8 type)2074  int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2075  {
2076  	struct hci_cp_read_tx_power cp;
2077  
2078  	cp.handle = handle;
2079  	cp.type = type;
2080  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2081  					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2082  }
2083  
hci_disable_advertising_sync(struct hci_dev * hdev)2084  int hci_disable_advertising_sync(struct hci_dev *hdev)
2085  {
2086  	u8 enable = 0x00;
2087  	int err = 0;
2088  
2089  	/* If controller is not advertising we are done. */
2090  	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2091  		return 0;
2092  
2093  	if (ext_adv_capable(hdev))
2094  		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2095  	if (ext_adv_capable(hdev))
2096  		return err;
2097  
2098  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2099  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2100  }
2101  
hci_le_set_ext_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2102  static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2103  					   u8 filter_dup)
2104  {
2105  	struct hci_cp_le_set_ext_scan_enable cp;
2106  
2107  	memset(&cp, 0, sizeof(cp));
2108  	cp.enable = val;
2109  
2110  	if (hci_dev_test_flag(hdev, HCI_MESH))
2111  		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2112  	else
2113  		cp.filter_dup = filter_dup;
2114  
2115  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2116  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2117  }
2118  
hci_le_set_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2119  static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2120  				       u8 filter_dup)
2121  {
2122  	struct hci_cp_le_set_scan_enable cp;
2123  
2124  	if (use_ext_scan(hdev))
2125  		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2126  
2127  	memset(&cp, 0, sizeof(cp));
2128  	cp.enable = val;
2129  
2130  	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2131  		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2132  	else
2133  		cp.filter_dup = filter_dup;
2134  
2135  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2136  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2137  }
2138  
hci_le_set_addr_resolution_enable_sync(struct hci_dev * hdev,u8 val)2139  static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2140  {
2141  	if (!ll_privacy_capable(hdev))
2142  		return 0;
2143  
2144  	/* If controller is not/already resolving we are done. */
2145  	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2146  		return 0;
2147  
2148  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2149  				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2150  }
2151  
hci_scan_disable_sync(struct hci_dev * hdev)2152  static int hci_scan_disable_sync(struct hci_dev *hdev)
2153  {
2154  	int err;
2155  
2156  	/* If controller is not scanning we are done. */
2157  	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2158  		return 0;
2159  
2160  	if (hdev->scanning_paused) {
2161  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2162  		return 0;
2163  	}
2164  
2165  	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2166  	if (err) {
2167  		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2168  		return err;
2169  	}
2170  
2171  	return err;
2172  }
2173  
scan_use_rpa(struct hci_dev * hdev)2174  static bool scan_use_rpa(struct hci_dev *hdev)
2175  {
2176  	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2177  }
2178  
hci_start_interleave_scan(struct hci_dev * hdev)2179  static void hci_start_interleave_scan(struct hci_dev *hdev)
2180  {
2181  	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2182  	queue_delayed_work(hdev->req_workqueue,
2183  			   &hdev->interleave_scan, 0);
2184  }
2185  
cancel_interleave_scan(struct hci_dev * hdev)2186  static void cancel_interleave_scan(struct hci_dev *hdev)
2187  {
2188  	bt_dev_dbg(hdev, "cancelling interleave scan");
2189  
2190  	cancel_delayed_work_sync(&hdev->interleave_scan);
2191  
2192  	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2193  }
2194  
2195  /* Return true if interleave_scan wasn't started until exiting this function,
2196   * otherwise, return false
2197   */
hci_update_interleaved_scan_sync(struct hci_dev * hdev)2198  static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2199  {
2200  	/* Do interleaved scan only if all of the following are true:
2201  	 * - There is at least one ADV monitor
2202  	 * - At least one pending LE connection or one device to be scanned for
2203  	 * - Monitor offloading is not supported
2204  	 * If so, we should alternate between allowlist scan and one without
2205  	 * any filters to save power.
2206  	 */
2207  	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2208  				!(list_empty(&hdev->pend_le_conns) &&
2209  				  list_empty(&hdev->pend_le_reports)) &&
2210  				hci_get_adv_monitor_offload_ext(hdev) ==
2211  				    HCI_ADV_MONITOR_EXT_NONE;
2212  	bool is_interleaving = is_interleave_scanning(hdev);
2213  
2214  	if (use_interleaving && !is_interleaving) {
2215  		hci_start_interleave_scan(hdev);
2216  		bt_dev_dbg(hdev, "starting interleave scan");
2217  		return true;
2218  	}
2219  
2220  	if (!use_interleaving && is_interleaving)
2221  		cancel_interleave_scan(hdev);
2222  
2223  	return false;
2224  }
2225  
2226  /* Removes connection to resolve list if needed.*/
hci_le_del_resolve_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2227  static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2228  					bdaddr_t *bdaddr, u8 bdaddr_type)
2229  {
2230  	struct hci_cp_le_del_from_resolv_list cp;
2231  	struct bdaddr_list_with_irk *entry;
2232  
2233  	if (!ll_privacy_capable(hdev))
2234  		return 0;
2235  
2236  	/* Check if the IRK has been programmed */
2237  	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2238  						bdaddr_type);
2239  	if (!entry)
2240  		return 0;
2241  
2242  	cp.bdaddr_type = bdaddr_type;
2243  	bacpy(&cp.bdaddr, bdaddr);
2244  
2245  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2246  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2247  }
2248  
hci_le_del_accept_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2249  static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2250  				       bdaddr_t *bdaddr, u8 bdaddr_type)
2251  {
2252  	struct hci_cp_le_del_from_accept_list cp;
2253  	int err;
2254  
2255  	/* Check if device is on accept list before removing it */
2256  	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2257  		return 0;
2258  
2259  	cp.bdaddr_type = bdaddr_type;
2260  	bacpy(&cp.bdaddr, bdaddr);
2261  
2262  	/* Ignore errors when removing from resolving list as that is likely
2263  	 * that the device was never added.
2264  	 */
2265  	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2266  
2267  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2268  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2269  	if (err) {
2270  		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2271  		return err;
2272  	}
2273  
2274  	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2275  		   cp.bdaddr_type);
2276  
2277  	return 0;
2278  }
2279  
2280  struct conn_params {
2281  	bdaddr_t addr;
2282  	u8 addr_type;
2283  	hci_conn_flags_t flags;
2284  	u8 privacy_mode;
2285  };
2286  
2287  /* Adds connection to resolve list if needed.
2288   * Setting params to NULL programs local hdev->irk
2289   */
hci_le_add_resolve_list_sync(struct hci_dev * hdev,struct conn_params * params)2290  static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2291  					struct conn_params *params)
2292  {
2293  	struct hci_cp_le_add_to_resolv_list cp;
2294  	struct smp_irk *irk;
2295  	struct bdaddr_list_with_irk *entry;
2296  	struct hci_conn_params *p;
2297  
2298  	if (!ll_privacy_capable(hdev))
2299  		return 0;
2300  
2301  	/* Attempt to program local identity address, type and irk if params is
2302  	 * NULL.
2303  	 */
2304  	if (!params) {
2305  		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2306  			return 0;
2307  
2308  		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2309  		memcpy(cp.peer_irk, hdev->irk, 16);
2310  		goto done;
2311  	} else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
2312  		return 0;
2313  
2314  	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2315  	if (!irk)
2316  		return 0;
2317  
2318  	/* Check if the IK has _not_ been programmed yet. */
2319  	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2320  						&params->addr,
2321  						params->addr_type);
2322  	if (entry)
2323  		return 0;
2324  
2325  	cp.bdaddr_type = params->addr_type;
2326  	bacpy(&cp.bdaddr, &params->addr);
2327  	memcpy(cp.peer_irk, irk->val, 16);
2328  
2329  	/* Default privacy mode is always Network */
2330  	params->privacy_mode = HCI_NETWORK_PRIVACY;
2331  
2332  	rcu_read_lock();
2333  	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2334  				      &params->addr, params->addr_type);
2335  	if (!p)
2336  		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2337  					      &params->addr, params->addr_type);
2338  	if (p)
2339  		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2340  	rcu_read_unlock();
2341  
2342  done:
2343  	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2344  		memcpy(cp.local_irk, hdev->irk, 16);
2345  	else
2346  		memset(cp.local_irk, 0, 16);
2347  
2348  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2349  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2350  }
2351  
2352  /* Set Device Privacy Mode. */
hci_le_set_privacy_mode_sync(struct hci_dev * hdev,struct conn_params * params)2353  static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2354  					struct conn_params *params)
2355  {
2356  	struct hci_cp_le_set_privacy_mode cp;
2357  	struct smp_irk *irk;
2358  
2359  	if (!ll_privacy_capable(hdev) ||
2360  	    !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION))
2361  		return 0;
2362  
2363  	/* If device privacy mode has already been set there is nothing to do */
2364  	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2365  		return 0;
2366  
2367  	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2368  	 * indicates that LL Privacy has been enabled and
2369  	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2370  	 */
2371  	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2372  		return 0;
2373  
2374  	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2375  	if (!irk)
2376  		return 0;
2377  
2378  	memset(&cp, 0, sizeof(cp));
2379  	cp.bdaddr_type = irk->addr_type;
2380  	bacpy(&cp.bdaddr, &irk->bdaddr);
2381  	cp.mode = HCI_DEVICE_PRIVACY;
2382  
2383  	/* Note: params->privacy_mode is not updated since it is a copy */
2384  
2385  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2386  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2387  }
2388  
2389  /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2390   * this attempts to program the device in the resolving list as well and
2391   * properly set the privacy mode.
2392   */
hci_le_add_accept_list_sync(struct hci_dev * hdev,struct conn_params * params,u8 * num_entries)2393  static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2394  				       struct conn_params *params,
2395  				       u8 *num_entries)
2396  {
2397  	struct hci_cp_le_add_to_accept_list cp;
2398  	int err;
2399  
2400  	/* During suspend, only wakeable devices can be in acceptlist */
2401  	if (hdev->suspended &&
2402  	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2403  		hci_le_del_accept_list_sync(hdev, &params->addr,
2404  					    params->addr_type);
2405  		return 0;
2406  	}
2407  
2408  	/* Select filter policy to accept all advertising */
2409  	if (*num_entries >= hdev->le_accept_list_size)
2410  		return -ENOSPC;
2411  
2412  	/* Attempt to program the device in the resolving list first to avoid
2413  	 * having to rollback in case it fails since the resolving list is
2414  	 * dynamic it can probably be smaller than the accept list.
2415  	 */
2416  	err = hci_le_add_resolve_list_sync(hdev, params);
2417  	if (err) {
2418  		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2419  		return err;
2420  	}
2421  
2422  	/* Set Privacy Mode */
2423  	err = hci_le_set_privacy_mode_sync(hdev, params);
2424  	if (err) {
2425  		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2426  		return err;
2427  	}
2428  
2429  	/* Check if already in accept list */
2430  	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2431  				   params->addr_type))
2432  		return 0;
2433  
2434  	*num_entries += 1;
2435  	cp.bdaddr_type = params->addr_type;
2436  	bacpy(&cp.bdaddr, &params->addr);
2437  
2438  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2439  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2440  	if (err) {
2441  		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2442  		/* Rollback the device from the resolving list */
2443  		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2444  		return err;
2445  	}
2446  
2447  	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2448  		   cp.bdaddr_type);
2449  
2450  	return 0;
2451  }
2452  
2453  /* This function disables/pause all advertising instances */
hci_pause_advertising_sync(struct hci_dev * hdev)2454  static int hci_pause_advertising_sync(struct hci_dev *hdev)
2455  {
2456  	int err;
2457  	int old_state;
2458  
2459  	/* If already been paused there is nothing to do. */
2460  	if (hdev->advertising_paused)
2461  		return 0;
2462  
2463  	bt_dev_dbg(hdev, "Pausing directed advertising");
2464  
2465  	/* Stop directed advertising */
2466  	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2467  	if (old_state) {
2468  		/* When discoverable timeout triggers, then just make sure
2469  		 * the limited discoverable flag is cleared. Even in the case
2470  		 * of a timeout triggered from general discoverable, it is
2471  		 * safe to unconditionally clear the flag.
2472  		 */
2473  		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2474  		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2475  		hdev->discov_timeout = 0;
2476  	}
2477  
2478  	bt_dev_dbg(hdev, "Pausing advertising instances");
2479  
2480  	/* Call to disable any advertisements active on the controller.
2481  	 * This will succeed even if no advertisements are configured.
2482  	 */
2483  	err = hci_disable_advertising_sync(hdev);
2484  	if (err)
2485  		return err;
2486  
2487  	/* If we are using software rotation, pause the loop */
2488  	if (!ext_adv_capable(hdev))
2489  		cancel_adv_timeout(hdev);
2490  
2491  	hdev->advertising_paused = true;
2492  	hdev->advertising_old_state = old_state;
2493  
2494  	return 0;
2495  }
2496  
2497  /* This function enables all user advertising instances */
hci_resume_advertising_sync(struct hci_dev * hdev)2498  static int hci_resume_advertising_sync(struct hci_dev *hdev)
2499  {
2500  	struct adv_info *adv, *tmp;
2501  	int err;
2502  
2503  	/* If advertising has not been paused there is nothing  to do. */
2504  	if (!hdev->advertising_paused)
2505  		return 0;
2506  
2507  	/* Resume directed advertising */
2508  	hdev->advertising_paused = false;
2509  	if (hdev->advertising_old_state) {
2510  		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2511  		hdev->advertising_old_state = 0;
2512  	}
2513  
2514  	bt_dev_dbg(hdev, "Resuming advertising instances");
2515  
2516  	if (ext_adv_capable(hdev)) {
2517  		/* Call for each tracked instance to be re-enabled */
2518  		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2519  			err = hci_enable_ext_advertising_sync(hdev,
2520  							      adv->instance);
2521  			if (!err)
2522  				continue;
2523  
2524  			/* If the instance cannot be resumed remove it */
2525  			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2526  							 NULL);
2527  		}
2528  	} else {
2529  		/* Schedule for most recent instance to be restarted and begin
2530  		 * the software rotation loop
2531  		 */
2532  		err = hci_schedule_adv_instance_sync(hdev,
2533  						     hdev->cur_adv_instance,
2534  						     true);
2535  	}
2536  
2537  	hdev->advertising_paused = false;
2538  
2539  	return err;
2540  }
2541  
hci_pause_addr_resolution(struct hci_dev * hdev)2542  static int hci_pause_addr_resolution(struct hci_dev *hdev)
2543  {
2544  	int err;
2545  
2546  	if (!ll_privacy_capable(hdev))
2547  		return 0;
2548  
2549  	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2550  		return 0;
2551  
2552  	/* Cannot disable addr resolution if scanning is enabled or
2553  	 * when initiating an LE connection.
2554  	 */
2555  	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2556  	    hci_lookup_le_connect(hdev)) {
2557  		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2558  		return -EPERM;
2559  	}
2560  
2561  	/* Cannot disable addr resolution if advertising is enabled. */
2562  	err = hci_pause_advertising_sync(hdev);
2563  	if (err) {
2564  		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2565  		return err;
2566  	}
2567  
2568  	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2569  	if (err)
2570  		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2571  			   err);
2572  
2573  	/* Return if address resolution is disabled and RPA is not used. */
2574  	if (!err && scan_use_rpa(hdev))
2575  		return 0;
2576  
2577  	hci_resume_advertising_sync(hdev);
2578  	return err;
2579  }
2580  
hci_read_local_oob_data_sync(struct hci_dev * hdev,bool extended,struct sock * sk)2581  struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2582  					     bool extended, struct sock *sk)
2583  {
2584  	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2585  					HCI_OP_READ_LOCAL_OOB_DATA;
2586  
2587  	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2588  }
2589  
conn_params_copy(struct list_head * list,size_t * n)2590  static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2591  {
2592  	struct hci_conn_params *params;
2593  	struct conn_params *p;
2594  	size_t i;
2595  
2596  	rcu_read_lock();
2597  
2598  	i = 0;
2599  	list_for_each_entry_rcu(params, list, action)
2600  		++i;
2601  	*n = i;
2602  
2603  	rcu_read_unlock();
2604  
2605  	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2606  	if (!p)
2607  		return NULL;
2608  
2609  	rcu_read_lock();
2610  
2611  	i = 0;
2612  	list_for_each_entry_rcu(params, list, action) {
2613  		/* Racing adds are handled in next scan update */
2614  		if (i >= *n)
2615  			break;
2616  
2617  		/* No hdev->lock, but: addr, addr_type are immutable.
2618  		 * privacy_mode is only written by us or in
2619  		 * hci_cc_le_set_privacy_mode that we wait for.
2620  		 * We should be idempotent so MGMT updating flags
2621  		 * while we are processing is OK.
2622  		 */
2623  		bacpy(&p[i].addr, &params->addr);
2624  		p[i].addr_type = params->addr_type;
2625  		p[i].flags = READ_ONCE(params->flags);
2626  		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2627  		++i;
2628  	}
2629  
2630  	rcu_read_unlock();
2631  
2632  	*n = i;
2633  	return p;
2634  }
2635  
2636  /* Clear LE Accept List */
hci_le_clear_accept_list_sync(struct hci_dev * hdev)2637  static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2638  {
2639  	if (!(hdev->commands[26] & 0x80))
2640  		return 0;
2641  
2642  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2643  				     HCI_CMD_TIMEOUT);
2644  }
2645  
2646  /* Device must not be scanning when updating the accept list.
2647   *
2648   * Update is done using the following sequence:
2649   *
2650   * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) ->
2651   * Remove Devices From Accept List ->
2652   * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))->
2653   * Add Devices to Accept List ->
2654   * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) ->
2655   * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) ->
2656   * Enable Scanning
2657   *
2658   * In case of failure advertising shall be restored to its original state and
2659   * return would disable accept list since either accept or resolving list could
2660   * not be programmed.
2661   *
2662   */
hci_update_accept_list_sync(struct hci_dev * hdev)2663  static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2664  {
2665  	struct conn_params *params;
2666  	struct bdaddr_list *b, *t;
2667  	u8 num_entries = 0;
2668  	bool pend_conn, pend_report;
2669  	u8 filter_policy;
2670  	size_t i, n;
2671  	int err;
2672  
2673  	/* Pause advertising if resolving list can be used as controllers
2674  	 * cannot accept resolving list modifications while advertising.
2675  	 */
2676  	if (ll_privacy_capable(hdev)) {
2677  		err = hci_pause_advertising_sync(hdev);
2678  		if (err) {
2679  			bt_dev_err(hdev, "pause advertising failed: %d", err);
2680  			return 0x00;
2681  		}
2682  	}
2683  
2684  	/* Disable address resolution while reprogramming accept list since
2685  	 * devices that do have an IRK will be programmed in the resolving list
2686  	 * when LL Privacy is enabled.
2687  	 */
2688  	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2689  	if (err) {
2690  		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2691  		goto done;
2692  	}
2693  
2694  	/* Force address filtering if PA Sync is in progress */
2695  	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2696  		struct hci_cp_le_pa_create_sync *sent;
2697  
2698  		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
2699  		if (sent) {
2700  			struct conn_params pa;
2701  
2702  			memset(&pa, 0, sizeof(pa));
2703  
2704  			bacpy(&pa.addr, &sent->addr);
2705  			pa.addr_type = sent->addr_type;
2706  
2707  			/* Clear first since there could be addresses left
2708  			 * behind.
2709  			 */
2710  			hci_le_clear_accept_list_sync(hdev);
2711  
2712  			num_entries = 1;
2713  			err = hci_le_add_accept_list_sync(hdev, &pa,
2714  							  &num_entries);
2715  			goto done;
2716  		}
2717  	}
2718  
2719  	/* Go through the current accept list programmed into the
2720  	 * controller one by one and check if that address is connected or is
2721  	 * still in the list of pending connections or list of devices to
2722  	 * report. If not present in either list, then remove it from
2723  	 * the controller.
2724  	 */
2725  	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2726  		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2727  			continue;
2728  
2729  		/* Pointers not dereferenced, no locks needed */
2730  		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2731  						      &b->bdaddr,
2732  						      b->bdaddr_type);
2733  		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2734  							&b->bdaddr,
2735  							b->bdaddr_type);
2736  
2737  		/* If the device is not likely to connect or report,
2738  		 * remove it from the acceptlist.
2739  		 */
2740  		if (!pend_conn && !pend_report) {
2741  			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2742  						    b->bdaddr_type);
2743  			continue;
2744  		}
2745  
2746  		num_entries++;
2747  	}
2748  
2749  	/* Since all no longer valid accept list entries have been
2750  	 * removed, walk through the list of pending connections
2751  	 * and ensure that any new device gets programmed into
2752  	 * the controller.
2753  	 *
2754  	 * If the list of the devices is larger than the list of
2755  	 * available accept list entries in the controller, then
2756  	 * just abort and return filer policy value to not use the
2757  	 * accept list.
2758  	 *
2759  	 * The list and params may be mutated while we wait for events,
2760  	 * so make a copy and iterate it.
2761  	 */
2762  
2763  	params = conn_params_copy(&hdev->pend_le_conns, &n);
2764  	if (!params) {
2765  		err = -ENOMEM;
2766  		goto done;
2767  	}
2768  
2769  	for (i = 0; i < n; ++i) {
2770  		err = hci_le_add_accept_list_sync(hdev, &params[i],
2771  						  &num_entries);
2772  		if (err) {
2773  			kvfree(params);
2774  			goto done;
2775  		}
2776  	}
2777  
2778  	kvfree(params);
2779  
2780  	/* After adding all new pending connections, walk through
2781  	 * the list of pending reports and also add these to the
2782  	 * accept list if there is still space. Abort if space runs out.
2783  	 */
2784  
2785  	params = conn_params_copy(&hdev->pend_le_reports, &n);
2786  	if (!params) {
2787  		err = -ENOMEM;
2788  		goto done;
2789  	}
2790  
2791  	for (i = 0; i < n; ++i) {
2792  		err = hci_le_add_accept_list_sync(hdev, &params[i],
2793  						  &num_entries);
2794  		if (err) {
2795  			kvfree(params);
2796  			goto done;
2797  		}
2798  	}
2799  
2800  	kvfree(params);
2801  
2802  	/* Use the allowlist unless the following conditions are all true:
2803  	 * - We are not currently suspending
2804  	 * - There are 1 or more ADV monitors registered and it's not offloaded
2805  	 * - Interleaved scanning is not currently using the allowlist
2806  	 */
2807  	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2808  	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2809  	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2810  		err = -EINVAL;
2811  
2812  done:
2813  	filter_policy = err ? 0x00 : 0x01;
2814  
2815  	/* Enable address resolution when LL Privacy is enabled. */
2816  	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2817  	if (err)
2818  		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2819  
2820  	/* Resume advertising if it was paused */
2821  	if (ll_privacy_capable(hdev))
2822  		hci_resume_advertising_sync(hdev);
2823  
2824  	/* Select filter policy to use accept list */
2825  	return filter_policy;
2826  }
2827  
hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params * cp,u8 type,u16 interval,u16 window)2828  static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2829  				   u8 type, u16 interval, u16 window)
2830  {
2831  	cp->type = type;
2832  	cp->interval = cpu_to_le16(interval);
2833  	cp->window = cpu_to_le16(window);
2834  }
2835  
hci_le_set_ext_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2836  static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2837  					  u16 interval, u16 window,
2838  					  u8 own_addr_type, u8 filter_policy)
2839  {
2840  	struct hci_cp_le_set_ext_scan_params *cp;
2841  	struct hci_cp_le_scan_phy_params *phy;
2842  	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2843  	u8 num_phy = 0x00;
2844  
2845  	cp = (void *)data;
2846  	phy = (void *)cp->data;
2847  
2848  	memset(data, 0, sizeof(data));
2849  
2850  	cp->own_addr_type = own_addr_type;
2851  	cp->filter_policy = filter_policy;
2852  
2853  	/* Check if PA Sync is in progress then select the PHY based on the
2854  	 * hci_conn.iso_qos.
2855  	 */
2856  	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2857  		struct hci_cp_le_add_to_accept_list *sent;
2858  
2859  		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2860  		if (sent) {
2861  			struct hci_conn *conn;
2862  
2863  			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2864  						       &sent->bdaddr);
2865  			if (conn) {
2866  				struct bt_iso_qos *qos = &conn->iso_qos;
2867  
2868  				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2869  				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2870  					cp->scanning_phys |= LE_SCAN_PHY_1M;
2871  					hci_le_scan_phy_params(phy, type,
2872  							       interval,
2873  							       window);
2874  					num_phy++;
2875  					phy++;
2876  				}
2877  
2878  				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2879  					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2880  					hci_le_scan_phy_params(phy, type,
2881  							       interval * 3,
2882  							       window * 3);
2883  					num_phy++;
2884  					phy++;
2885  				}
2886  
2887  				if (num_phy)
2888  					goto done;
2889  			}
2890  		}
2891  	}
2892  
2893  	if (scan_1m(hdev) || scan_2m(hdev)) {
2894  		cp->scanning_phys |= LE_SCAN_PHY_1M;
2895  		hci_le_scan_phy_params(phy, type, interval, window);
2896  		num_phy++;
2897  		phy++;
2898  	}
2899  
2900  	if (scan_coded(hdev)) {
2901  		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2902  		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2903  		num_phy++;
2904  		phy++;
2905  	}
2906  
2907  done:
2908  	if (!num_phy)
2909  		return -EINVAL;
2910  
2911  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2912  				     sizeof(*cp) + sizeof(*phy) * num_phy,
2913  				     data, HCI_CMD_TIMEOUT);
2914  }
2915  
hci_le_set_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2916  static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2917  				      u16 interval, u16 window,
2918  				      u8 own_addr_type, u8 filter_policy)
2919  {
2920  	struct hci_cp_le_set_scan_param cp;
2921  
2922  	if (use_ext_scan(hdev))
2923  		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2924  						      window, own_addr_type,
2925  						      filter_policy);
2926  
2927  	memset(&cp, 0, sizeof(cp));
2928  	cp.type = type;
2929  	cp.interval = cpu_to_le16(interval);
2930  	cp.window = cpu_to_le16(window);
2931  	cp.own_address_type = own_addr_type;
2932  	cp.filter_policy = filter_policy;
2933  
2934  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2935  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2936  }
2937  
hci_start_scan_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,u8 filter_dup)2938  static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2939  			       u16 window, u8 own_addr_type, u8 filter_policy,
2940  			       u8 filter_dup)
2941  {
2942  	int err;
2943  
2944  	if (hdev->scanning_paused) {
2945  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2946  		return 0;
2947  	}
2948  
2949  	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2950  					 own_addr_type, filter_policy);
2951  	if (err)
2952  		return err;
2953  
2954  	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2955  }
2956  
hci_passive_scan_sync(struct hci_dev * hdev)2957  static int hci_passive_scan_sync(struct hci_dev *hdev)
2958  {
2959  	u8 own_addr_type;
2960  	u8 filter_policy;
2961  	u16 window, interval;
2962  	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2963  	int err;
2964  
2965  	if (hdev->scanning_paused) {
2966  		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2967  		return 0;
2968  	}
2969  
2970  	err = hci_scan_disable_sync(hdev);
2971  	if (err) {
2972  		bt_dev_err(hdev, "disable scanning failed: %d", err);
2973  		return err;
2974  	}
2975  
2976  	/* Set require_privacy to false since no SCAN_REQ are send
2977  	 * during passive scanning. Not using an non-resolvable address
2978  	 * here is important so that peer devices using direct
2979  	 * advertising with our address will be correctly reported
2980  	 * by the controller.
2981  	 */
2982  	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
2983  					   &own_addr_type))
2984  		return 0;
2985  
2986  	if (hdev->enable_advmon_interleave_scan &&
2987  	    hci_update_interleaved_scan_sync(hdev))
2988  		return 0;
2989  
2990  	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
2991  
2992  	/* Adding or removing entries from the accept list must
2993  	 * happen before enabling scanning. The controller does
2994  	 * not allow accept list modification while scanning.
2995  	 */
2996  	filter_policy = hci_update_accept_list_sync(hdev);
2997  
2998  	/* If suspended and filter_policy set to 0x00 (no acceptlist) then
2999  	 * passive scanning cannot be started since that would require the host
3000  	 * to be woken up to process the reports.
3001  	 */
3002  	if (hdev->suspended && !filter_policy) {
3003  		/* Check if accept list is empty then there is no need to scan
3004  		 * while suspended.
3005  		 */
3006  		if (list_empty(&hdev->le_accept_list))
3007  			return 0;
3008  
3009  		/* If there are devices is the accept_list that means some
3010  		 * devices could not be programmed which in non-suspended case
3011  		 * means filter_policy needs to be set to 0x00 so the host needs
3012  		 * to filter, but since this is treating suspended case we
3013  		 * can ignore device needing host to filter to allow devices in
3014  		 * the acceptlist to be able to wakeup the system.
3015  		 */
3016  		filter_policy = 0x01;
3017  	}
3018  
3019  	/* When the controller is using random resolvable addresses and
3020  	 * with that having LE privacy enabled, then controllers with
3021  	 * Extended Scanner Filter Policies support can now enable support
3022  	 * for handling directed advertising.
3023  	 *
3024  	 * So instead of using filter polices 0x00 (no acceptlist)
3025  	 * and 0x01 (acceptlist enabled) use the new filter policies
3026  	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3027  	 */
3028  	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3029  	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3030  		filter_policy |= 0x02;
3031  
3032  	if (hdev->suspended) {
3033  		window = hdev->le_scan_window_suspend;
3034  		interval = hdev->le_scan_int_suspend;
3035  	} else if (hci_is_le_conn_scanning(hdev)) {
3036  		window = hdev->le_scan_window_connect;
3037  		interval = hdev->le_scan_int_connect;
3038  	} else if (hci_is_adv_monitoring(hdev)) {
3039  		window = hdev->le_scan_window_adv_monitor;
3040  		interval = hdev->le_scan_int_adv_monitor;
3041  
3042  		/* Disable duplicates filter when scanning for advertisement
3043  		 * monitor for the following reasons.
3044  		 *
3045  		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3046  		 * controllers ignore RSSI_Sampling_Period when the duplicates
3047  		 * filter is enabled.
3048  		 *
3049  		 * For SW pattern filtering, when we're not doing interleaved
3050  		 * scanning, it is necessary to disable duplicates filter,
3051  		 * otherwise hosts can only receive one advertisement and it's
3052  		 * impossible to know if a peer is still in range.
3053  		 */
3054  		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3055  	} else {
3056  		window = hdev->le_scan_window;
3057  		interval = hdev->le_scan_interval;
3058  	}
3059  
3060  	/* Disable all filtering for Mesh */
3061  	if (hci_dev_test_flag(hdev, HCI_MESH)) {
3062  		filter_policy = 0;
3063  		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3064  	}
3065  
3066  	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3067  
3068  	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3069  				   own_addr_type, filter_policy, filter_dups);
3070  }
3071  
3072  /* This function controls the passive scanning based on hdev->pend_le_conns
3073   * list. If there are pending LE connection we start the background scanning,
3074   * otherwise we stop it in the following sequence:
3075   *
3076   * If there are devices to scan:
3077   *
3078   * Disable Scanning -> Update Accept List ->
3079   * ll_privacy_capable((Disable Advertising) -> Disable Resolving List ->
3080   * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3081   * Enable Scanning
3082   *
3083   * Otherwise:
3084   *
3085   * Disable Scanning
3086   */
hci_update_passive_scan_sync(struct hci_dev * hdev)3087  int hci_update_passive_scan_sync(struct hci_dev *hdev)
3088  {
3089  	int err;
3090  
3091  	if (!test_bit(HCI_UP, &hdev->flags) ||
3092  	    test_bit(HCI_INIT, &hdev->flags) ||
3093  	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3094  	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3095  	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3096  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3097  		return 0;
3098  
3099  	/* No point in doing scanning if LE support hasn't been enabled */
3100  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3101  		return 0;
3102  
3103  	/* If discovery is active don't interfere with it */
3104  	if (hdev->discovery.state != DISCOVERY_STOPPED)
3105  		return 0;
3106  
3107  	/* Reset RSSI and UUID filters when starting background scanning
3108  	 * since these filters are meant for service discovery only.
3109  	 *
3110  	 * The Start Discovery and Start Service Discovery operations
3111  	 * ensure to set proper values for RSSI threshold and UUID
3112  	 * filter list. So it is safe to just reset them here.
3113  	 */
3114  	hci_discovery_filter_clear(hdev);
3115  
3116  	bt_dev_dbg(hdev, "ADV monitoring is %s",
3117  		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3118  
3119  	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3120  	    list_empty(&hdev->pend_le_conns) &&
3121  	    list_empty(&hdev->pend_le_reports) &&
3122  	    !hci_is_adv_monitoring(hdev) &&
3123  	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3124  		/* If there is no pending LE connections or devices
3125  		 * to be scanned for or no ADV monitors, we should stop the
3126  		 * background scanning.
3127  		 */
3128  
3129  		bt_dev_dbg(hdev, "stopping background scanning");
3130  
3131  		err = hci_scan_disable_sync(hdev);
3132  		if (err)
3133  			bt_dev_err(hdev, "stop background scanning failed: %d",
3134  				   err);
3135  	} else {
3136  		/* If there is at least one pending LE connection, we should
3137  		 * keep the background scan running.
3138  		 */
3139  
3140  		/* If controller is connecting, we should not start scanning
3141  		 * since some controllers are not able to scan and connect at
3142  		 * the same time.
3143  		 */
3144  		if (hci_lookup_le_connect(hdev))
3145  			return 0;
3146  
3147  		bt_dev_dbg(hdev, "start background scanning");
3148  
3149  		err = hci_passive_scan_sync(hdev);
3150  		if (err)
3151  			bt_dev_err(hdev, "start background scanning failed: %d",
3152  				   err);
3153  	}
3154  
3155  	return err;
3156  }
3157  
update_scan_sync(struct hci_dev * hdev,void * data)3158  static int update_scan_sync(struct hci_dev *hdev, void *data)
3159  {
3160  	return hci_update_scan_sync(hdev);
3161  }
3162  
hci_update_scan(struct hci_dev * hdev)3163  int hci_update_scan(struct hci_dev *hdev)
3164  {
3165  	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3166  }
3167  
update_passive_scan_sync(struct hci_dev * hdev,void * data)3168  static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3169  {
3170  	return hci_update_passive_scan_sync(hdev);
3171  }
3172  
hci_update_passive_scan(struct hci_dev * hdev)3173  int hci_update_passive_scan(struct hci_dev *hdev)
3174  {
3175  	/* Only queue if it would have any effect */
3176  	if (!test_bit(HCI_UP, &hdev->flags) ||
3177  	    test_bit(HCI_INIT, &hdev->flags) ||
3178  	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3179  	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3180  	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3181  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3182  		return 0;
3183  
3184  	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3185  				       NULL);
3186  }
3187  
hci_write_sc_support_sync(struct hci_dev * hdev,u8 val)3188  int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3189  {
3190  	int err;
3191  
3192  	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3193  		return 0;
3194  
3195  	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3196  				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3197  
3198  	if (!err) {
3199  		if (val) {
3200  			hdev->features[1][0] |= LMP_HOST_SC;
3201  			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3202  		} else {
3203  			hdev->features[1][0] &= ~LMP_HOST_SC;
3204  			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3205  		}
3206  	}
3207  
3208  	return err;
3209  }
3210  
hci_write_ssp_mode_sync(struct hci_dev * hdev,u8 mode)3211  int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3212  {
3213  	int err;
3214  
3215  	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3216  	    lmp_host_ssp_capable(hdev))
3217  		return 0;
3218  
3219  	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3220  		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3221  				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3222  	}
3223  
3224  	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3225  				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3226  	if (err)
3227  		return err;
3228  
3229  	return hci_write_sc_support_sync(hdev, 0x01);
3230  }
3231  
hci_write_le_host_supported_sync(struct hci_dev * hdev,u8 le,u8 simul)3232  int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3233  {
3234  	struct hci_cp_write_le_host_supported cp;
3235  
3236  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3237  	    !lmp_bredr_capable(hdev))
3238  		return 0;
3239  
3240  	/* Check first if we already have the right host state
3241  	 * (host features set)
3242  	 */
3243  	if (le == lmp_host_le_capable(hdev) &&
3244  	    simul == lmp_host_le_br_capable(hdev))
3245  		return 0;
3246  
3247  	memset(&cp, 0, sizeof(cp));
3248  
3249  	cp.le = le;
3250  	cp.simul = simul;
3251  
3252  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3253  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3254  }
3255  
hci_powered_update_adv_sync(struct hci_dev * hdev)3256  static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3257  {
3258  	struct adv_info *adv, *tmp;
3259  	int err;
3260  
3261  	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3262  		return 0;
3263  
3264  	/* If RPA Resolution has not been enable yet it means the
3265  	 * resolving list is empty and we should attempt to program the
3266  	 * local IRK in order to support using own_addr_type
3267  	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3268  	 */
3269  	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3270  		hci_le_add_resolve_list_sync(hdev, NULL);
3271  		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3272  	}
3273  
3274  	/* Make sure the controller has a good default for
3275  	 * advertising data. This also applies to the case
3276  	 * where BR/EDR was toggled during the AUTO_OFF phase.
3277  	 */
3278  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3279  	    list_empty(&hdev->adv_instances)) {
3280  		if (ext_adv_capable(hdev)) {
3281  			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3282  			if (!err)
3283  				hci_update_scan_rsp_data_sync(hdev, 0x00);
3284  		} else {
3285  			err = hci_update_adv_data_sync(hdev, 0x00);
3286  			if (!err)
3287  				hci_update_scan_rsp_data_sync(hdev, 0x00);
3288  		}
3289  
3290  		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3291  			hci_enable_advertising_sync(hdev);
3292  	}
3293  
3294  	/* Call for each tracked instance to be scheduled */
3295  	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3296  		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3297  
3298  	return 0;
3299  }
3300  
hci_write_auth_enable_sync(struct hci_dev * hdev)3301  static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3302  {
3303  	u8 link_sec;
3304  
3305  	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3306  	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3307  		return 0;
3308  
3309  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3310  				     sizeof(link_sec), &link_sec,
3311  				     HCI_CMD_TIMEOUT);
3312  }
3313  
hci_write_fast_connectable_sync(struct hci_dev * hdev,bool enable)3314  int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3315  {
3316  	struct hci_cp_write_page_scan_activity cp;
3317  	u8 type;
3318  	int err = 0;
3319  
3320  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3321  		return 0;
3322  
3323  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3324  		return 0;
3325  
3326  	memset(&cp, 0, sizeof(cp));
3327  
3328  	if (enable) {
3329  		type = PAGE_SCAN_TYPE_INTERLACED;
3330  
3331  		/* 160 msec page scan interval */
3332  		cp.interval = cpu_to_le16(0x0100);
3333  	} else {
3334  		type = hdev->def_page_scan_type;
3335  		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3336  	}
3337  
3338  	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3339  
3340  	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3341  	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3342  		err = __hci_cmd_sync_status(hdev,
3343  					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3344  					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3345  		if (err)
3346  			return err;
3347  	}
3348  
3349  	if (hdev->page_scan_type != type)
3350  		err = __hci_cmd_sync_status(hdev,
3351  					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3352  					    sizeof(type), &type,
3353  					    HCI_CMD_TIMEOUT);
3354  
3355  	return err;
3356  }
3357  
disconnected_accept_list_entries(struct hci_dev * hdev)3358  static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3359  {
3360  	struct bdaddr_list *b;
3361  
3362  	list_for_each_entry(b, &hdev->accept_list, list) {
3363  		struct hci_conn *conn;
3364  
3365  		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3366  		if (!conn)
3367  			return true;
3368  
3369  		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3370  			return true;
3371  	}
3372  
3373  	return false;
3374  }
3375  
hci_write_scan_enable_sync(struct hci_dev * hdev,u8 val)3376  static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3377  {
3378  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3379  					    sizeof(val), &val,
3380  					    HCI_CMD_TIMEOUT);
3381  }
3382  
hci_update_scan_sync(struct hci_dev * hdev)3383  int hci_update_scan_sync(struct hci_dev *hdev)
3384  {
3385  	u8 scan;
3386  
3387  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3388  		return 0;
3389  
3390  	if (!hdev_is_powered(hdev))
3391  		return 0;
3392  
3393  	if (mgmt_powering_down(hdev))
3394  		return 0;
3395  
3396  	if (hdev->scanning_paused)
3397  		return 0;
3398  
3399  	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3400  	    disconnected_accept_list_entries(hdev))
3401  		scan = SCAN_PAGE;
3402  	else
3403  		scan = SCAN_DISABLED;
3404  
3405  	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3406  		scan |= SCAN_INQUIRY;
3407  
3408  	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3409  	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3410  		return 0;
3411  
3412  	return hci_write_scan_enable_sync(hdev, scan);
3413  }
3414  
hci_update_name_sync(struct hci_dev * hdev)3415  int hci_update_name_sync(struct hci_dev *hdev)
3416  {
3417  	struct hci_cp_write_local_name cp;
3418  
3419  	memset(&cp, 0, sizeof(cp));
3420  
3421  	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3422  
3423  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3424  					    sizeof(cp), &cp,
3425  					    HCI_CMD_TIMEOUT);
3426  }
3427  
3428  /* This function perform powered update HCI command sequence after the HCI init
3429   * sequence which end up resetting all states, the sequence is as follows:
3430   *
3431   * HCI_SSP_ENABLED(Enable SSP)
3432   * HCI_LE_ENABLED(Enable LE)
3433   * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) ->
3434   * Update adv data)
3435   * Enable Authentication
3436   * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3437   * Set Name -> Set EIR)
3438   * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3439   */
hci_powered_update_sync(struct hci_dev * hdev)3440  int hci_powered_update_sync(struct hci_dev *hdev)
3441  {
3442  	int err;
3443  
3444  	/* Register the available SMP channels (BR/EDR and LE) only when
3445  	 * successfully powering on the controller. This late
3446  	 * registration is required so that LE SMP can clearly decide if
3447  	 * the public address or static address is used.
3448  	 */
3449  	smp_register(hdev);
3450  
3451  	err = hci_write_ssp_mode_sync(hdev, 0x01);
3452  	if (err)
3453  		return err;
3454  
3455  	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3456  	if (err)
3457  		return err;
3458  
3459  	err = hci_powered_update_adv_sync(hdev);
3460  	if (err)
3461  		return err;
3462  
3463  	err = hci_write_auth_enable_sync(hdev);
3464  	if (err)
3465  		return err;
3466  
3467  	if (lmp_bredr_capable(hdev)) {
3468  		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3469  			hci_write_fast_connectable_sync(hdev, true);
3470  		else
3471  			hci_write_fast_connectable_sync(hdev, false);
3472  		hci_update_scan_sync(hdev);
3473  		hci_update_class_sync(hdev);
3474  		hci_update_name_sync(hdev);
3475  		hci_update_eir_sync(hdev);
3476  	}
3477  
3478  	/* If forcing static address is in use or there is no public
3479  	 * address use the static address as random address (but skip
3480  	 * the HCI command if the current random address is already the
3481  	 * static one.
3482  	 *
3483  	 * In case BR/EDR has been disabled on a dual-mode controller
3484  	 * and a static address has been configured, then use that
3485  	 * address instead of the public BR/EDR address.
3486  	 */
3487  	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3488  	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3489  	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3490  		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3491  			return hci_set_random_addr_sync(hdev,
3492  							&hdev->static_addr);
3493  	}
3494  
3495  	return 0;
3496  }
3497  
3498  /**
3499   * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3500   *				       (BD_ADDR) for a HCI device from
3501   *				       a firmware node property.
3502   * @hdev:	The HCI device
3503   *
3504   * Search the firmware node for 'local-bd-address'.
3505   *
3506   * All-zero BD addresses are rejected, because those could be properties
3507   * that exist in the firmware tables, but were not updated by the firmware. For
3508   * example, the DTS could define 'local-bd-address', with zero BD addresses.
3509   */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)3510  static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3511  {
3512  	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3513  	bdaddr_t ba;
3514  	int ret;
3515  
3516  	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3517  					    (u8 *)&ba, sizeof(ba));
3518  	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3519  		return;
3520  
3521  	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3522  		baswap(&hdev->public_addr, &ba);
3523  	else
3524  		bacpy(&hdev->public_addr, &ba);
3525  }
3526  
3527  struct hci_init_stage {
3528  	int (*func)(struct hci_dev *hdev);
3529  };
3530  
3531  /* Run init stage NULL terminated function table */
hci_init_stage_sync(struct hci_dev * hdev,const struct hci_init_stage * stage)3532  static int hci_init_stage_sync(struct hci_dev *hdev,
3533  			       const struct hci_init_stage *stage)
3534  {
3535  	size_t i;
3536  
3537  	for (i = 0; stage[i].func; i++) {
3538  		int err;
3539  
3540  		err = stage[i].func(hdev);
3541  		if (err)
3542  			return err;
3543  	}
3544  
3545  	return 0;
3546  }
3547  
3548  /* Read Local Version */
hci_read_local_version_sync(struct hci_dev * hdev)3549  static int hci_read_local_version_sync(struct hci_dev *hdev)
3550  {
3551  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3552  				     0, NULL, HCI_CMD_TIMEOUT);
3553  }
3554  
3555  /* Read BD Address */
hci_read_bd_addr_sync(struct hci_dev * hdev)3556  static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3557  {
3558  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3559  				     0, NULL, HCI_CMD_TIMEOUT);
3560  }
3561  
3562  #define HCI_INIT(_func) \
3563  { \
3564  	.func = _func, \
3565  }
3566  
3567  static const struct hci_init_stage hci_init0[] = {
3568  	/* HCI_OP_READ_LOCAL_VERSION */
3569  	HCI_INIT(hci_read_local_version_sync),
3570  	/* HCI_OP_READ_BD_ADDR */
3571  	HCI_INIT(hci_read_bd_addr_sync),
3572  	{}
3573  };
3574  
hci_reset_sync(struct hci_dev * hdev)3575  int hci_reset_sync(struct hci_dev *hdev)
3576  {
3577  	int err;
3578  
3579  	set_bit(HCI_RESET, &hdev->flags);
3580  
3581  	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3582  				    HCI_CMD_TIMEOUT);
3583  	if (err)
3584  		return err;
3585  
3586  	return 0;
3587  }
3588  
hci_init0_sync(struct hci_dev * hdev)3589  static int hci_init0_sync(struct hci_dev *hdev)
3590  {
3591  	int err;
3592  
3593  	bt_dev_dbg(hdev, "");
3594  
3595  	/* Reset */
3596  	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3597  		err = hci_reset_sync(hdev);
3598  		if (err)
3599  			return err;
3600  	}
3601  
3602  	return hci_init_stage_sync(hdev, hci_init0);
3603  }
3604  
hci_unconf_init_sync(struct hci_dev * hdev)3605  static int hci_unconf_init_sync(struct hci_dev *hdev)
3606  {
3607  	int err;
3608  
3609  	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3610  		return 0;
3611  
3612  	err = hci_init0_sync(hdev);
3613  	if (err < 0)
3614  		return err;
3615  
3616  	if (hci_dev_test_flag(hdev, HCI_SETUP))
3617  		hci_debugfs_create_basic(hdev);
3618  
3619  	return 0;
3620  }
3621  
3622  /* Read Local Supported Features. */
hci_read_local_features_sync(struct hci_dev * hdev)3623  static int hci_read_local_features_sync(struct hci_dev *hdev)
3624  {
3625  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3626  				     0, NULL, HCI_CMD_TIMEOUT);
3627  }
3628  
3629  /* BR Controller init stage 1 command sequence */
3630  static const struct hci_init_stage br_init1[] = {
3631  	/* HCI_OP_READ_LOCAL_FEATURES */
3632  	HCI_INIT(hci_read_local_features_sync),
3633  	/* HCI_OP_READ_LOCAL_VERSION */
3634  	HCI_INIT(hci_read_local_version_sync),
3635  	/* HCI_OP_READ_BD_ADDR */
3636  	HCI_INIT(hci_read_bd_addr_sync),
3637  	{}
3638  };
3639  
3640  /* Read Local Commands */
hci_read_local_cmds_sync(struct hci_dev * hdev)3641  static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3642  {
3643  	/* All Bluetooth 1.2 and later controllers should support the
3644  	 * HCI command for reading the local supported commands.
3645  	 *
3646  	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3647  	 * but do not have support for this command. If that is the case,
3648  	 * the driver can quirk the behavior and skip reading the local
3649  	 * supported commands.
3650  	 */
3651  	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3652  	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3653  		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3654  					     0, NULL, HCI_CMD_TIMEOUT);
3655  
3656  	return 0;
3657  }
3658  
hci_init1_sync(struct hci_dev * hdev)3659  static int hci_init1_sync(struct hci_dev *hdev)
3660  {
3661  	int err;
3662  
3663  	bt_dev_dbg(hdev, "");
3664  
3665  	/* Reset */
3666  	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3667  		err = hci_reset_sync(hdev);
3668  		if (err)
3669  			return err;
3670  	}
3671  
3672  	return hci_init_stage_sync(hdev, br_init1);
3673  }
3674  
3675  /* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_read_buffer_size_sync(struct hci_dev * hdev)3676  static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3677  {
3678  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3679  				     0, NULL, HCI_CMD_TIMEOUT);
3680  }
3681  
3682  /* Read Class of Device */
hci_read_dev_class_sync(struct hci_dev * hdev)3683  static int hci_read_dev_class_sync(struct hci_dev *hdev)
3684  {
3685  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3686  				     0, NULL, HCI_CMD_TIMEOUT);
3687  }
3688  
3689  /* Read Local Name */
hci_read_local_name_sync(struct hci_dev * hdev)3690  static int hci_read_local_name_sync(struct hci_dev *hdev)
3691  {
3692  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3693  				     0, NULL, HCI_CMD_TIMEOUT);
3694  }
3695  
3696  /* Read Voice Setting */
hci_read_voice_setting_sync(struct hci_dev * hdev)3697  static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3698  {
3699  	if (!read_voice_setting_capable(hdev))
3700  		return 0;
3701  
3702  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3703  				     0, NULL, HCI_CMD_TIMEOUT);
3704  }
3705  
3706  /* Read Number of Supported IAC */
hci_read_num_supported_iac_sync(struct hci_dev * hdev)3707  static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3708  {
3709  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3710  				     0, NULL, HCI_CMD_TIMEOUT);
3711  }
3712  
3713  /* Read Current IAC LAP */
hci_read_current_iac_lap_sync(struct hci_dev * hdev)3714  static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3715  {
3716  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3717  				     0, NULL, HCI_CMD_TIMEOUT);
3718  }
3719  
hci_set_event_filter_sync(struct hci_dev * hdev,u8 flt_type,u8 cond_type,bdaddr_t * bdaddr,u8 auto_accept)3720  static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3721  				     u8 cond_type, bdaddr_t *bdaddr,
3722  				     u8 auto_accept)
3723  {
3724  	struct hci_cp_set_event_filter cp;
3725  
3726  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3727  		return 0;
3728  
3729  	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3730  		return 0;
3731  
3732  	memset(&cp, 0, sizeof(cp));
3733  	cp.flt_type = flt_type;
3734  
3735  	if (flt_type != HCI_FLT_CLEAR_ALL) {
3736  		cp.cond_type = cond_type;
3737  		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3738  		cp.addr_conn_flt.auto_accept = auto_accept;
3739  	}
3740  
3741  	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3742  				     flt_type == HCI_FLT_CLEAR_ALL ?
3743  				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3744  				     HCI_CMD_TIMEOUT);
3745  }
3746  
hci_clear_event_filter_sync(struct hci_dev * hdev)3747  static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3748  {
3749  	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3750  		return 0;
3751  
3752  	/* In theory the state machine should not reach here unless
3753  	 * a hci_set_event_filter_sync() call succeeds, but we do
3754  	 * the check both for parity and as a future reminder.
3755  	 */
3756  	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3757  		return 0;
3758  
3759  	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3760  					 BDADDR_ANY, 0x00);
3761  }
3762  
3763  /* Connection accept timeout ~20 secs */
hci_write_ca_timeout_sync(struct hci_dev * hdev)3764  static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3765  {
3766  	__le16 param = cpu_to_le16(0x7d00);
3767  
3768  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3769  				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3770  }
3771  
3772  /* Enable SCO flow control if supported */
hci_write_sync_flowctl_sync(struct hci_dev * hdev)3773  static int hci_write_sync_flowctl_sync(struct hci_dev *hdev)
3774  {
3775  	struct hci_cp_write_sync_flowctl cp;
3776  	int err;
3777  
3778  	/* Check if the controller supports SCO and HCI_OP_WRITE_SYNC_FLOWCTL */
3779  	if (!lmp_sco_capable(hdev) || !(hdev->commands[10] & BIT(4)) ||
3780  	    !test_bit(HCI_QUIRK_SYNC_FLOWCTL_SUPPORTED, &hdev->quirks))
3781  		return 0;
3782  
3783  	memset(&cp, 0, sizeof(cp));
3784  	cp.enable = 0x01;
3785  
3786  	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SYNC_FLOWCTL,
3787  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3788  	if (!err)
3789  		hci_dev_set_flag(hdev, HCI_SCO_FLOWCTL);
3790  
3791  	return err;
3792  }
3793  
3794  /* BR Controller init stage 2 command sequence */
3795  static const struct hci_init_stage br_init2[] = {
3796  	/* HCI_OP_READ_BUFFER_SIZE */
3797  	HCI_INIT(hci_read_buffer_size_sync),
3798  	/* HCI_OP_READ_CLASS_OF_DEV */
3799  	HCI_INIT(hci_read_dev_class_sync),
3800  	/* HCI_OP_READ_LOCAL_NAME */
3801  	HCI_INIT(hci_read_local_name_sync),
3802  	/* HCI_OP_READ_VOICE_SETTING */
3803  	HCI_INIT(hci_read_voice_setting_sync),
3804  	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3805  	HCI_INIT(hci_read_num_supported_iac_sync),
3806  	/* HCI_OP_READ_CURRENT_IAC_LAP */
3807  	HCI_INIT(hci_read_current_iac_lap_sync),
3808  	/* HCI_OP_SET_EVENT_FLT */
3809  	HCI_INIT(hci_clear_event_filter_sync),
3810  	/* HCI_OP_WRITE_CA_TIMEOUT */
3811  	HCI_INIT(hci_write_ca_timeout_sync),
3812  	/* HCI_OP_WRITE_SYNC_FLOWCTL */
3813  	HCI_INIT(hci_write_sync_flowctl_sync),
3814  	{}
3815  };
3816  
hci_write_ssp_mode_1_sync(struct hci_dev * hdev)3817  static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3818  {
3819  	u8 mode = 0x01;
3820  
3821  	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3822  		return 0;
3823  
3824  	/* When SSP is available, then the host features page
3825  	 * should also be available as well. However some
3826  	 * controllers list the max_page as 0 as long as SSP
3827  	 * has not been enabled. To achieve proper debugging
3828  	 * output, force the minimum max_page to 1 at least.
3829  	 */
3830  	hdev->max_page = 0x01;
3831  
3832  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3833  				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3834  }
3835  
hci_write_eir_sync(struct hci_dev * hdev)3836  static int hci_write_eir_sync(struct hci_dev *hdev)
3837  {
3838  	struct hci_cp_write_eir cp;
3839  
3840  	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3841  		return 0;
3842  
3843  	memset(hdev->eir, 0, sizeof(hdev->eir));
3844  	memset(&cp, 0, sizeof(cp));
3845  
3846  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3847  				     HCI_CMD_TIMEOUT);
3848  }
3849  
hci_write_inquiry_mode_sync(struct hci_dev * hdev)3850  static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3851  {
3852  	u8 mode;
3853  
3854  	if (!lmp_inq_rssi_capable(hdev) &&
3855  	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3856  		return 0;
3857  
3858  	/* If Extended Inquiry Result events are supported, then
3859  	 * they are clearly preferred over Inquiry Result with RSSI
3860  	 * events.
3861  	 */
3862  	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3863  
3864  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3865  				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3866  }
3867  
hci_read_inq_rsp_tx_power_sync(struct hci_dev * hdev)3868  static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3869  {
3870  	if (!lmp_inq_tx_pwr_capable(hdev))
3871  		return 0;
3872  
3873  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3874  				     0, NULL, HCI_CMD_TIMEOUT);
3875  }
3876  
hci_read_local_ext_features_sync(struct hci_dev * hdev,u8 page)3877  static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3878  {
3879  	struct hci_cp_read_local_ext_features cp;
3880  
3881  	if (!lmp_ext_feat_capable(hdev))
3882  		return 0;
3883  
3884  	memset(&cp, 0, sizeof(cp));
3885  	cp.page = page;
3886  
3887  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3888  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3889  }
3890  
hci_read_local_ext_features_1_sync(struct hci_dev * hdev)3891  static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3892  {
3893  	return hci_read_local_ext_features_sync(hdev, 0x01);
3894  }
3895  
3896  /* HCI Controller init stage 2 command sequence */
3897  static const struct hci_init_stage hci_init2[] = {
3898  	/* HCI_OP_READ_LOCAL_COMMANDS */
3899  	HCI_INIT(hci_read_local_cmds_sync),
3900  	/* HCI_OP_WRITE_SSP_MODE */
3901  	HCI_INIT(hci_write_ssp_mode_1_sync),
3902  	/* HCI_OP_WRITE_EIR */
3903  	HCI_INIT(hci_write_eir_sync),
3904  	/* HCI_OP_WRITE_INQUIRY_MODE */
3905  	HCI_INIT(hci_write_inquiry_mode_sync),
3906  	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3907  	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3908  	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3909  	HCI_INIT(hci_read_local_ext_features_1_sync),
3910  	/* HCI_OP_WRITE_AUTH_ENABLE */
3911  	HCI_INIT(hci_write_auth_enable_sync),
3912  	{}
3913  };
3914  
3915  /* Read LE Buffer Size */
hci_le_read_buffer_size_sync(struct hci_dev * hdev)3916  static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3917  {
3918  	/* Use Read LE Buffer Size V2 if supported */
3919  	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3920  		return __hci_cmd_sync_status(hdev,
3921  					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3922  					     0, NULL, HCI_CMD_TIMEOUT);
3923  
3924  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3925  				     0, NULL, HCI_CMD_TIMEOUT);
3926  }
3927  
3928  /* Read LE Local Supported Features */
hci_le_read_local_features_sync(struct hci_dev * hdev)3929  static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3930  {
3931  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3932  				     0, NULL, HCI_CMD_TIMEOUT);
3933  }
3934  
3935  /* Read LE Supported States */
hci_le_read_supported_states_sync(struct hci_dev * hdev)3936  static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3937  {
3938  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3939  				     0, NULL, HCI_CMD_TIMEOUT);
3940  }
3941  
3942  /* LE Controller init stage 2 command sequence */
3943  static const struct hci_init_stage le_init2[] = {
3944  	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3945  	HCI_INIT(hci_le_read_local_features_sync),
3946  	/* HCI_OP_LE_READ_BUFFER_SIZE */
3947  	HCI_INIT(hci_le_read_buffer_size_sync),
3948  	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3949  	HCI_INIT(hci_le_read_supported_states_sync),
3950  	{}
3951  };
3952  
hci_init2_sync(struct hci_dev * hdev)3953  static int hci_init2_sync(struct hci_dev *hdev)
3954  {
3955  	int err;
3956  
3957  	bt_dev_dbg(hdev, "");
3958  
3959  	err = hci_init_stage_sync(hdev, hci_init2);
3960  	if (err)
3961  		return err;
3962  
3963  	if (lmp_bredr_capable(hdev)) {
3964  		err = hci_init_stage_sync(hdev, br_init2);
3965  		if (err)
3966  			return err;
3967  	} else {
3968  		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3969  	}
3970  
3971  	if (lmp_le_capable(hdev)) {
3972  		err = hci_init_stage_sync(hdev, le_init2);
3973  		if (err)
3974  			return err;
3975  		/* LE-only controllers have LE implicitly enabled */
3976  		if (!lmp_bredr_capable(hdev))
3977  			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3978  	}
3979  
3980  	return 0;
3981  }
3982  
hci_set_event_mask_sync(struct hci_dev * hdev)3983  static int hci_set_event_mask_sync(struct hci_dev *hdev)
3984  {
3985  	/* The second byte is 0xff instead of 0x9f (two reserved bits
3986  	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3987  	 * command otherwise.
3988  	 */
3989  	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3990  
3991  	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
3992  	 * any event mask for pre 1.2 devices.
3993  	 */
3994  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3995  		return 0;
3996  
3997  	if (lmp_bredr_capable(hdev)) {
3998  		events[4] |= 0x01; /* Flow Specification Complete */
3999  
4000  		/* Don't set Disconnect Complete and mode change when
4001  		 * suspended as that would wakeup the host when disconnecting
4002  		 * due to suspend.
4003  		 */
4004  		if (hdev->suspended) {
4005  			events[0] &= 0xef;
4006  			events[2] &= 0xf7;
4007  		}
4008  	} else {
4009  		/* Use a different default for LE-only devices */
4010  		memset(events, 0, sizeof(events));
4011  		events[1] |= 0x20; /* Command Complete */
4012  		events[1] |= 0x40; /* Command Status */
4013  		events[1] |= 0x80; /* Hardware Error */
4014  
4015  		/* If the controller supports the Disconnect command, enable
4016  		 * the corresponding event. In addition enable packet flow
4017  		 * control related events.
4018  		 */
4019  		if (hdev->commands[0] & 0x20) {
4020  			/* Don't set Disconnect Complete when suspended as that
4021  			 * would wakeup the host when disconnecting due to
4022  			 * suspend.
4023  			 */
4024  			if (!hdev->suspended)
4025  				events[0] |= 0x10; /* Disconnection Complete */
4026  			events[2] |= 0x04; /* Number of Completed Packets */
4027  			events[3] |= 0x02; /* Data Buffer Overflow */
4028  		}
4029  
4030  		/* If the controller supports the Read Remote Version
4031  		 * Information command, enable the corresponding event.
4032  		 */
4033  		if (hdev->commands[2] & 0x80)
4034  			events[1] |= 0x08; /* Read Remote Version Information
4035  					    * Complete
4036  					    */
4037  
4038  		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
4039  			events[0] |= 0x80; /* Encryption Change */
4040  			events[5] |= 0x80; /* Encryption Key Refresh Complete */
4041  		}
4042  	}
4043  
4044  	if (lmp_inq_rssi_capable(hdev) ||
4045  	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
4046  		events[4] |= 0x02; /* Inquiry Result with RSSI */
4047  
4048  	if (lmp_ext_feat_capable(hdev))
4049  		events[4] |= 0x04; /* Read Remote Extended Features Complete */
4050  
4051  	if (lmp_esco_capable(hdev)) {
4052  		events[5] |= 0x08; /* Synchronous Connection Complete */
4053  		events[5] |= 0x10; /* Synchronous Connection Changed */
4054  	}
4055  
4056  	if (lmp_sniffsubr_capable(hdev))
4057  		events[5] |= 0x20; /* Sniff Subrating */
4058  
4059  	if (lmp_pause_enc_capable(hdev))
4060  		events[5] |= 0x80; /* Encryption Key Refresh Complete */
4061  
4062  	if (lmp_ext_inq_capable(hdev))
4063  		events[5] |= 0x40; /* Extended Inquiry Result */
4064  
4065  	if (lmp_no_flush_capable(hdev))
4066  		events[7] |= 0x01; /* Enhanced Flush Complete */
4067  
4068  	if (lmp_lsto_capable(hdev))
4069  		events[6] |= 0x80; /* Link Supervision Timeout Changed */
4070  
4071  	if (lmp_ssp_capable(hdev)) {
4072  		events[6] |= 0x01;	/* IO Capability Request */
4073  		events[6] |= 0x02;	/* IO Capability Response */
4074  		events[6] |= 0x04;	/* User Confirmation Request */
4075  		events[6] |= 0x08;	/* User Passkey Request */
4076  		events[6] |= 0x10;	/* Remote OOB Data Request */
4077  		events[6] |= 0x20;	/* Simple Pairing Complete */
4078  		events[7] |= 0x04;	/* User Passkey Notification */
4079  		events[7] |= 0x08;	/* Keypress Notification */
4080  		events[7] |= 0x10;	/* Remote Host Supported
4081  					 * Features Notification
4082  					 */
4083  	}
4084  
4085  	if (lmp_le_capable(hdev))
4086  		events[7] |= 0x20;	/* LE Meta-Event */
4087  
4088  	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4089  				     sizeof(events), events, HCI_CMD_TIMEOUT);
4090  }
4091  
hci_read_stored_link_key_sync(struct hci_dev * hdev)4092  static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4093  {
4094  	struct hci_cp_read_stored_link_key cp;
4095  
4096  	if (!(hdev->commands[6] & 0x20) ||
4097  	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4098  		return 0;
4099  
4100  	memset(&cp, 0, sizeof(cp));
4101  	bacpy(&cp.bdaddr, BDADDR_ANY);
4102  	cp.read_all = 0x01;
4103  
4104  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4105  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4106  }
4107  
hci_setup_link_policy_sync(struct hci_dev * hdev)4108  static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4109  {
4110  	struct hci_cp_write_def_link_policy cp;
4111  	u16 link_policy = 0;
4112  
4113  	if (!(hdev->commands[5] & 0x10))
4114  		return 0;
4115  
4116  	memset(&cp, 0, sizeof(cp));
4117  
4118  	if (lmp_rswitch_capable(hdev))
4119  		link_policy |= HCI_LP_RSWITCH;
4120  	if (lmp_hold_capable(hdev))
4121  		link_policy |= HCI_LP_HOLD;
4122  	if (lmp_sniff_capable(hdev))
4123  		link_policy |= HCI_LP_SNIFF;
4124  	if (lmp_park_capable(hdev))
4125  		link_policy |= HCI_LP_PARK;
4126  
4127  	cp.policy = cpu_to_le16(link_policy);
4128  
4129  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4130  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4131  }
4132  
hci_read_page_scan_activity_sync(struct hci_dev * hdev)4133  static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4134  {
4135  	if (!(hdev->commands[8] & 0x01))
4136  		return 0;
4137  
4138  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4139  				     0, NULL, HCI_CMD_TIMEOUT);
4140  }
4141  
hci_read_def_err_data_reporting_sync(struct hci_dev * hdev)4142  static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4143  {
4144  	if (!(hdev->commands[18] & 0x04) ||
4145  	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4146  	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4147  		return 0;
4148  
4149  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4150  				     0, NULL, HCI_CMD_TIMEOUT);
4151  }
4152  
hci_read_page_scan_type_sync(struct hci_dev * hdev)4153  static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4154  {
4155  	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4156  	 * support the Read Page Scan Type command. Check support for
4157  	 * this command in the bit mask of supported commands.
4158  	 */
4159  	if (!(hdev->commands[13] & 0x01) ||
4160  	    test_bit(HCI_QUIRK_BROKEN_READ_PAGE_SCAN_TYPE, &hdev->quirks))
4161  		return 0;
4162  
4163  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4164  				     0, NULL, HCI_CMD_TIMEOUT);
4165  }
4166  
4167  /* Read features beyond page 1 if available */
hci_read_local_ext_features_all_sync(struct hci_dev * hdev)4168  static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4169  {
4170  	u8 page;
4171  	int err;
4172  
4173  	if (!lmp_ext_feat_capable(hdev))
4174  		return 0;
4175  
4176  	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4177  	     page++) {
4178  		err = hci_read_local_ext_features_sync(hdev, page);
4179  		if (err)
4180  			return err;
4181  	}
4182  
4183  	return 0;
4184  }
4185  
4186  /* HCI Controller init stage 3 command sequence */
4187  static const struct hci_init_stage hci_init3[] = {
4188  	/* HCI_OP_SET_EVENT_MASK */
4189  	HCI_INIT(hci_set_event_mask_sync),
4190  	/* HCI_OP_READ_STORED_LINK_KEY */
4191  	HCI_INIT(hci_read_stored_link_key_sync),
4192  	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4193  	HCI_INIT(hci_setup_link_policy_sync),
4194  	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4195  	HCI_INIT(hci_read_page_scan_activity_sync),
4196  	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4197  	HCI_INIT(hci_read_def_err_data_reporting_sync),
4198  	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4199  	HCI_INIT(hci_read_page_scan_type_sync),
4200  	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4201  	HCI_INIT(hci_read_local_ext_features_all_sync),
4202  	{}
4203  };
4204  
hci_le_set_event_mask_sync(struct hci_dev * hdev)4205  static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4206  {
4207  	u8 events[8];
4208  
4209  	if (!lmp_le_capable(hdev))
4210  		return 0;
4211  
4212  	memset(events, 0, sizeof(events));
4213  
4214  	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4215  		events[0] |= 0x10;	/* LE Long Term Key Request */
4216  
4217  	/* If controller supports the Connection Parameters Request
4218  	 * Link Layer Procedure, enable the corresponding event.
4219  	 */
4220  	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4221  		/* LE Remote Connection Parameter Request */
4222  		events[0] |= 0x20;
4223  
4224  	/* If the controller supports the Data Length Extension
4225  	 * feature, enable the corresponding event.
4226  	 */
4227  	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4228  		events[0] |= 0x40;	/* LE Data Length Change */
4229  
4230  	/* If the controller supports LL Privacy feature or LE Extended Adv,
4231  	 * enable the corresponding event.
4232  	 */
4233  	if (use_enhanced_conn_complete(hdev))
4234  		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4235  
4236  	/* Mark Device Privacy if Privacy Mode is supported */
4237  	if (privacy_mode_capable(hdev))
4238  		hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4239  
4240  	/* Mark Address Resolution if LL Privacy is supported */
4241  	if (ll_privacy_capable(hdev))
4242  		hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION;
4243  
4244  	/* If the controller supports Extended Scanner Filter
4245  	 * Policies, enable the corresponding event.
4246  	 */
4247  	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4248  		events[1] |= 0x04;	/* LE Direct Advertising Report */
4249  
4250  	/* If the controller supports Channel Selection Algorithm #2
4251  	 * feature, enable the corresponding event.
4252  	 */
4253  	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4254  		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4255  
4256  	/* If the controller supports the LE Set Scan Enable command,
4257  	 * enable the corresponding advertising report event.
4258  	 */
4259  	if (hdev->commands[26] & 0x08)
4260  		events[0] |= 0x02;	/* LE Advertising Report */
4261  
4262  	/* If the controller supports the LE Create Connection
4263  	 * command, enable the corresponding event.
4264  	 */
4265  	if (hdev->commands[26] & 0x10)
4266  		events[0] |= 0x01;	/* LE Connection Complete */
4267  
4268  	/* If the controller supports the LE Connection Update
4269  	 * command, enable the corresponding event.
4270  	 */
4271  	if (hdev->commands[27] & 0x04)
4272  		events[0] |= 0x04;	/* LE Connection Update Complete */
4273  
4274  	/* If the controller supports the LE Read Remote Used Features
4275  	 * command, enable the corresponding event.
4276  	 */
4277  	if (hdev->commands[27] & 0x20)
4278  		/* LE Read Remote Used Features Complete */
4279  		events[0] |= 0x08;
4280  
4281  	/* If the controller supports the LE Read Local P-256
4282  	 * Public Key command, enable the corresponding event.
4283  	 */
4284  	if (hdev->commands[34] & 0x02)
4285  		/* LE Read Local P-256 Public Key Complete */
4286  		events[0] |= 0x80;
4287  
4288  	/* If the controller supports the LE Generate DHKey
4289  	 * command, enable the corresponding event.
4290  	 */
4291  	if (hdev->commands[34] & 0x04)
4292  		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4293  
4294  	/* If the controller supports the LE Set Default PHY or
4295  	 * LE Set PHY commands, enable the corresponding event.
4296  	 */
4297  	if (hdev->commands[35] & (0x20 | 0x40))
4298  		events[1] |= 0x08;        /* LE PHY Update Complete */
4299  
4300  	/* If the controller supports LE Set Extended Scan Parameters
4301  	 * and LE Set Extended Scan Enable commands, enable the
4302  	 * corresponding event.
4303  	 */
4304  	if (use_ext_scan(hdev))
4305  		events[1] |= 0x10;	/* LE Extended Advertising Report */
4306  
4307  	/* If the controller supports the LE Extended Advertising
4308  	 * command, enable the corresponding event.
4309  	 */
4310  	if (ext_adv_capable(hdev))
4311  		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4312  
4313  	if (cis_capable(hdev)) {
4314  		events[3] |= 0x01;	/* LE CIS Established */
4315  		if (cis_peripheral_capable(hdev))
4316  			events[3] |= 0x02; /* LE CIS Request */
4317  	}
4318  
4319  	if (bis_capable(hdev)) {
4320  		events[1] |= 0x20;	/* LE PA Report */
4321  		events[1] |= 0x40;	/* LE PA Sync Established */
4322  		events[3] |= 0x04;	/* LE Create BIG Complete */
4323  		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4324  		events[3] |= 0x10;	/* LE BIG Sync Established */
4325  		events[3] |= 0x20;	/* LE BIG Sync Loss */
4326  		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4327  	}
4328  
4329  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4330  				     sizeof(events), events, HCI_CMD_TIMEOUT);
4331  }
4332  
4333  /* Read LE Advertising Channel TX Power */
hci_le_read_adv_tx_power_sync(struct hci_dev * hdev)4334  static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4335  {
4336  	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4337  		/* HCI TS spec forbids mixing of legacy and extended
4338  		 * advertising commands wherein READ_ADV_TX_POWER is
4339  		 * also included. So do not call it if extended adv
4340  		 * is supported otherwise controller will return
4341  		 * COMMAND_DISALLOWED for extended commands.
4342  		 */
4343  		return __hci_cmd_sync_status(hdev,
4344  					       HCI_OP_LE_READ_ADV_TX_POWER,
4345  					       0, NULL, HCI_CMD_TIMEOUT);
4346  	}
4347  
4348  	return 0;
4349  }
4350  
4351  /* Read LE Min/Max Tx Power*/
hci_le_read_tx_power_sync(struct hci_dev * hdev)4352  static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4353  {
4354  	if (!(hdev->commands[38] & 0x80) ||
4355  	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4356  		return 0;
4357  
4358  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4359  				     0, NULL, HCI_CMD_TIMEOUT);
4360  }
4361  
4362  /* Read LE Accept List Size */
hci_le_read_accept_list_size_sync(struct hci_dev * hdev)4363  static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4364  {
4365  	if (!(hdev->commands[26] & 0x40))
4366  		return 0;
4367  
4368  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4369  				     0, NULL, HCI_CMD_TIMEOUT);
4370  }
4371  
4372  /* Read LE Resolving List Size */
hci_le_read_resolv_list_size_sync(struct hci_dev * hdev)4373  static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4374  {
4375  	if (!(hdev->commands[34] & 0x40))
4376  		return 0;
4377  
4378  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4379  				     0, NULL, HCI_CMD_TIMEOUT);
4380  }
4381  
4382  /* Clear LE Resolving List */
hci_le_clear_resolv_list_sync(struct hci_dev * hdev)4383  static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4384  {
4385  	if (!(hdev->commands[34] & 0x20))
4386  		return 0;
4387  
4388  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4389  				     HCI_CMD_TIMEOUT);
4390  }
4391  
4392  /* Set RPA timeout */
hci_le_set_rpa_timeout_sync(struct hci_dev * hdev)4393  static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4394  {
4395  	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4396  
4397  	if (!(hdev->commands[35] & 0x04) ||
4398  	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4399  		return 0;
4400  
4401  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4402  				     sizeof(timeout), &timeout,
4403  				     HCI_CMD_TIMEOUT);
4404  }
4405  
4406  /* Read LE Maximum Data Length */
hci_le_read_max_data_len_sync(struct hci_dev * hdev)4407  static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4408  {
4409  	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4410  		return 0;
4411  
4412  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4413  				     HCI_CMD_TIMEOUT);
4414  }
4415  
4416  /* Read LE Suggested Default Data Length */
hci_le_read_def_data_len_sync(struct hci_dev * hdev)4417  static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4418  {
4419  	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4420  		return 0;
4421  
4422  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4423  				     HCI_CMD_TIMEOUT);
4424  }
4425  
4426  /* Read LE Number of Supported Advertising Sets */
hci_le_read_num_support_adv_sets_sync(struct hci_dev * hdev)4427  static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4428  {
4429  	if (!ext_adv_capable(hdev))
4430  		return 0;
4431  
4432  	return __hci_cmd_sync_status(hdev,
4433  				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4434  				     0, NULL, HCI_CMD_TIMEOUT);
4435  }
4436  
4437  /* Write LE Host Supported */
hci_set_le_support_sync(struct hci_dev * hdev)4438  static int hci_set_le_support_sync(struct hci_dev *hdev)
4439  {
4440  	struct hci_cp_write_le_host_supported cp;
4441  
4442  	/* LE-only devices do not support explicit enablement */
4443  	if (!lmp_bredr_capable(hdev))
4444  		return 0;
4445  
4446  	memset(&cp, 0, sizeof(cp));
4447  
4448  	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4449  		cp.le = 0x01;
4450  		cp.simul = 0x00;
4451  	}
4452  
4453  	if (cp.le == lmp_host_le_capable(hdev))
4454  		return 0;
4455  
4456  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4457  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4458  }
4459  
4460  /* LE Set Host Feature */
hci_le_set_host_feature_sync(struct hci_dev * hdev)4461  static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4462  {
4463  	struct hci_cp_le_set_host_feature cp;
4464  
4465  	if (!cis_capable(hdev))
4466  		return 0;
4467  
4468  	memset(&cp, 0, sizeof(cp));
4469  
4470  	/* Connected Isochronous Channels (Host Support) */
4471  	cp.bit_number = 32;
4472  	cp.bit_value = 1;
4473  
4474  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4475  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4476  }
4477  
4478  /* LE Controller init stage 3 command sequence */
4479  static const struct hci_init_stage le_init3[] = {
4480  	/* HCI_OP_LE_SET_EVENT_MASK */
4481  	HCI_INIT(hci_le_set_event_mask_sync),
4482  	/* HCI_OP_LE_READ_ADV_TX_POWER */
4483  	HCI_INIT(hci_le_read_adv_tx_power_sync),
4484  	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4485  	HCI_INIT(hci_le_read_tx_power_sync),
4486  	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4487  	HCI_INIT(hci_le_read_accept_list_size_sync),
4488  	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4489  	HCI_INIT(hci_le_clear_accept_list_sync),
4490  	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4491  	HCI_INIT(hci_le_read_resolv_list_size_sync),
4492  	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4493  	HCI_INIT(hci_le_clear_resolv_list_sync),
4494  	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4495  	HCI_INIT(hci_le_set_rpa_timeout_sync),
4496  	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4497  	HCI_INIT(hci_le_read_max_data_len_sync),
4498  	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4499  	HCI_INIT(hci_le_read_def_data_len_sync),
4500  	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4501  	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4502  	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4503  	HCI_INIT(hci_set_le_support_sync),
4504  	/* HCI_OP_LE_SET_HOST_FEATURE */
4505  	HCI_INIT(hci_le_set_host_feature_sync),
4506  	{}
4507  };
4508  
hci_init3_sync(struct hci_dev * hdev)4509  static int hci_init3_sync(struct hci_dev *hdev)
4510  {
4511  	int err;
4512  
4513  	bt_dev_dbg(hdev, "");
4514  
4515  	err = hci_init_stage_sync(hdev, hci_init3);
4516  	if (err)
4517  		return err;
4518  
4519  	if (lmp_le_capable(hdev))
4520  		return hci_init_stage_sync(hdev, le_init3);
4521  
4522  	return 0;
4523  }
4524  
hci_delete_stored_link_key_sync(struct hci_dev * hdev)4525  static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4526  {
4527  	struct hci_cp_delete_stored_link_key cp;
4528  
4529  	/* Some Broadcom based Bluetooth controllers do not support the
4530  	 * Delete Stored Link Key command. They are clearly indicating its
4531  	 * absence in the bit mask of supported commands.
4532  	 *
4533  	 * Check the supported commands and only if the command is marked
4534  	 * as supported send it. If not supported assume that the controller
4535  	 * does not have actual support for stored link keys which makes this
4536  	 * command redundant anyway.
4537  	 *
4538  	 * Some controllers indicate that they support handling deleting
4539  	 * stored link keys, but they don't. The quirk lets a driver
4540  	 * just disable this command.
4541  	 */
4542  	if (!(hdev->commands[6] & 0x80) ||
4543  	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4544  		return 0;
4545  
4546  	memset(&cp, 0, sizeof(cp));
4547  	bacpy(&cp.bdaddr, BDADDR_ANY);
4548  	cp.delete_all = 0x01;
4549  
4550  	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4551  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4552  }
4553  
hci_set_event_mask_page_2_sync(struct hci_dev * hdev)4554  static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4555  {
4556  	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4557  	bool changed = false;
4558  
4559  	/* Set event mask page 2 if the HCI command for it is supported */
4560  	if (!(hdev->commands[22] & 0x04))
4561  		return 0;
4562  
4563  	/* If Connectionless Peripheral Broadcast central role is supported
4564  	 * enable all necessary events for it.
4565  	 */
4566  	if (lmp_cpb_central_capable(hdev)) {
4567  		events[1] |= 0x40;	/* Triggered Clock Capture */
4568  		events[1] |= 0x80;	/* Synchronization Train Complete */
4569  		events[2] |= 0x08;	/* Truncated Page Complete */
4570  		events[2] |= 0x20;	/* CPB Channel Map Change */
4571  		changed = true;
4572  	}
4573  
4574  	/* If Connectionless Peripheral Broadcast peripheral role is supported
4575  	 * enable all necessary events for it.
4576  	 */
4577  	if (lmp_cpb_peripheral_capable(hdev)) {
4578  		events[2] |= 0x01;	/* Synchronization Train Received */
4579  		events[2] |= 0x02;	/* CPB Receive */
4580  		events[2] |= 0x04;	/* CPB Timeout */
4581  		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4582  		changed = true;
4583  	}
4584  
4585  	/* Enable Authenticated Payload Timeout Expired event if supported */
4586  	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4587  		events[2] |= 0x80;
4588  		changed = true;
4589  	}
4590  
4591  	/* Some Broadcom based controllers indicate support for Set Event
4592  	 * Mask Page 2 command, but then actually do not support it. Since
4593  	 * the default value is all bits set to zero, the command is only
4594  	 * required if the event mask has to be changed. In case no change
4595  	 * to the event mask is needed, skip this command.
4596  	 */
4597  	if (!changed)
4598  		return 0;
4599  
4600  	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4601  				     sizeof(events), events, HCI_CMD_TIMEOUT);
4602  }
4603  
4604  /* Read local codec list if the HCI command is supported */
hci_read_local_codecs_sync(struct hci_dev * hdev)4605  static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4606  {
4607  	if (hdev->commands[45] & 0x04)
4608  		hci_read_supported_codecs_v2(hdev);
4609  	else if (hdev->commands[29] & 0x20)
4610  		hci_read_supported_codecs(hdev);
4611  
4612  	return 0;
4613  }
4614  
4615  /* Read local pairing options if the HCI command is supported */
hci_read_local_pairing_opts_sync(struct hci_dev * hdev)4616  static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4617  {
4618  	if (!(hdev->commands[41] & 0x08))
4619  		return 0;
4620  
4621  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4622  				     0, NULL, HCI_CMD_TIMEOUT);
4623  }
4624  
4625  /* Get MWS transport configuration if the HCI command is supported */
hci_get_mws_transport_config_sync(struct hci_dev * hdev)4626  static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4627  {
4628  	if (!mws_transport_config_capable(hdev))
4629  		return 0;
4630  
4631  	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4632  				     0, NULL, HCI_CMD_TIMEOUT);
4633  }
4634  
4635  /* Check for Synchronization Train support */
hci_read_sync_train_params_sync(struct hci_dev * hdev)4636  static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4637  {
4638  	if (!lmp_sync_train_capable(hdev))
4639  		return 0;
4640  
4641  	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4642  				     0, NULL, HCI_CMD_TIMEOUT);
4643  }
4644  
4645  /* Enable Secure Connections if supported and configured */
hci_write_sc_support_1_sync(struct hci_dev * hdev)4646  static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4647  {
4648  	u8 support = 0x01;
4649  
4650  	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4651  	    !bredr_sc_enabled(hdev))
4652  		return 0;
4653  
4654  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4655  				     sizeof(support), &support,
4656  				     HCI_CMD_TIMEOUT);
4657  }
4658  
4659  /* Set erroneous data reporting if supported to the wideband speech
4660   * setting value
4661   */
hci_set_err_data_report_sync(struct hci_dev * hdev)4662  static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4663  {
4664  	struct hci_cp_write_def_err_data_reporting cp;
4665  	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4666  
4667  	if (!(hdev->commands[18] & 0x08) ||
4668  	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4669  	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4670  		return 0;
4671  
4672  	if (enabled == hdev->err_data_reporting)
4673  		return 0;
4674  
4675  	memset(&cp, 0, sizeof(cp));
4676  	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4677  				ERR_DATA_REPORTING_DISABLED;
4678  
4679  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4680  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4681  }
4682  
4683  static const struct hci_init_stage hci_init4[] = {
4684  	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4685  	HCI_INIT(hci_delete_stored_link_key_sync),
4686  	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4687  	HCI_INIT(hci_set_event_mask_page_2_sync),
4688  	/* HCI_OP_READ_LOCAL_CODECS */
4689  	HCI_INIT(hci_read_local_codecs_sync),
4690  	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4691  	HCI_INIT(hci_read_local_pairing_opts_sync),
4692  	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4693  	HCI_INIT(hci_get_mws_transport_config_sync),
4694  	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4695  	HCI_INIT(hci_read_sync_train_params_sync),
4696  	/* HCI_OP_WRITE_SC_SUPPORT */
4697  	HCI_INIT(hci_write_sc_support_1_sync),
4698  	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4699  	HCI_INIT(hci_set_err_data_report_sync),
4700  	{}
4701  };
4702  
4703  /* Set Suggested Default Data Length to maximum if supported */
hci_le_set_write_def_data_len_sync(struct hci_dev * hdev)4704  static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4705  {
4706  	struct hci_cp_le_write_def_data_len cp;
4707  
4708  	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4709  		return 0;
4710  
4711  	memset(&cp, 0, sizeof(cp));
4712  	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4713  	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4714  
4715  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4716  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4717  }
4718  
4719  /* Set Default PHY parameters if command is supported, enables all supported
4720   * PHYs according to the LE Features bits.
4721   */
hci_le_set_default_phy_sync(struct hci_dev * hdev)4722  static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4723  {
4724  	struct hci_cp_le_set_default_phy cp;
4725  
4726  	if (!(hdev->commands[35] & 0x20)) {
4727  		/* If the command is not supported it means only 1M PHY is
4728  		 * supported.
4729  		 */
4730  		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4731  		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4732  		return 0;
4733  	}
4734  
4735  	memset(&cp, 0, sizeof(cp));
4736  	cp.all_phys = 0x00;
4737  	cp.tx_phys = HCI_LE_SET_PHY_1M;
4738  	cp.rx_phys = HCI_LE_SET_PHY_1M;
4739  
4740  	/* Enables 2M PHY if supported */
4741  	if (le_2m_capable(hdev)) {
4742  		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4743  		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4744  	}
4745  
4746  	/* Enables Coded PHY if supported */
4747  	if (le_coded_capable(hdev)) {
4748  		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4749  		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4750  	}
4751  
4752  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4753  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4754  }
4755  
4756  static const struct hci_init_stage le_init4[] = {
4757  	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4758  	HCI_INIT(hci_le_set_write_def_data_len_sync),
4759  	/* HCI_OP_LE_SET_DEFAULT_PHY */
4760  	HCI_INIT(hci_le_set_default_phy_sync),
4761  	{}
4762  };
4763  
hci_init4_sync(struct hci_dev * hdev)4764  static int hci_init4_sync(struct hci_dev *hdev)
4765  {
4766  	int err;
4767  
4768  	bt_dev_dbg(hdev, "");
4769  
4770  	err = hci_init_stage_sync(hdev, hci_init4);
4771  	if (err)
4772  		return err;
4773  
4774  	if (lmp_le_capable(hdev))
4775  		return hci_init_stage_sync(hdev, le_init4);
4776  
4777  	return 0;
4778  }
4779  
hci_init_sync(struct hci_dev * hdev)4780  static int hci_init_sync(struct hci_dev *hdev)
4781  {
4782  	int err;
4783  
4784  	err = hci_init1_sync(hdev);
4785  	if (err < 0)
4786  		return err;
4787  
4788  	if (hci_dev_test_flag(hdev, HCI_SETUP))
4789  		hci_debugfs_create_basic(hdev);
4790  
4791  	err = hci_init2_sync(hdev);
4792  	if (err < 0)
4793  		return err;
4794  
4795  	err = hci_init3_sync(hdev);
4796  	if (err < 0)
4797  		return err;
4798  
4799  	err = hci_init4_sync(hdev);
4800  	if (err < 0)
4801  		return err;
4802  
4803  	/* This function is only called when the controller is actually in
4804  	 * configured state. When the controller is marked as unconfigured,
4805  	 * this initialization procedure is not run.
4806  	 *
4807  	 * It means that it is possible that a controller runs through its
4808  	 * setup phase and then discovers missing settings. If that is the
4809  	 * case, then this function will not be called. It then will only
4810  	 * be called during the config phase.
4811  	 *
4812  	 * So only when in setup phase or config phase, create the debugfs
4813  	 * entries and register the SMP channels.
4814  	 */
4815  	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4816  	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4817  		return 0;
4818  
4819  	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4820  		return 0;
4821  
4822  	hci_debugfs_create_common(hdev);
4823  
4824  	if (lmp_bredr_capable(hdev))
4825  		hci_debugfs_create_bredr(hdev);
4826  
4827  	if (lmp_le_capable(hdev))
4828  		hci_debugfs_create_le(hdev);
4829  
4830  	return 0;
4831  }
4832  
4833  #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4834  
4835  static const struct {
4836  	unsigned long quirk;
4837  	const char *desc;
4838  } hci_broken_table[] = {
4839  	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4840  			 "HCI Read Local Supported Commands not supported"),
4841  	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4842  			 "HCI Delete Stored Link Key command is advertised, "
4843  			 "but not supported."),
4844  	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4845  			 "HCI Read Default Erroneous Data Reporting command is "
4846  			 "advertised, but not supported."),
4847  	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4848  			 "HCI Read Transmit Power Level command is advertised, "
4849  			 "but not supported."),
4850  	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4851  			 "HCI Set Event Filter command not supported."),
4852  	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4853  			 "HCI Enhanced Setup Synchronous Connection command is "
4854  			 "advertised, but not supported."),
4855  	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4856  			 "HCI LE Set Random Private Address Timeout command is "
4857  			 "advertised, but not supported."),
4858  	HCI_QUIRK_BROKEN(EXT_CREATE_CONN,
4859  			 "HCI LE Extended Create Connection command is "
4860  			 "advertised, but not supported."),
4861  	HCI_QUIRK_BROKEN(WRITE_AUTH_PAYLOAD_TIMEOUT,
4862  			 "HCI WRITE AUTH PAYLOAD TIMEOUT command leads "
4863  			 "to unexpected SMP errors when pairing "
4864  			 "and will not be used."),
4865  	HCI_QUIRK_BROKEN(LE_CODED,
4866  			 "HCI LE Coded PHY feature bit is set, "
4867  			 "but its usage is not supported.")
4868  };
4869  
4870  /* This function handles hdev setup stage:
4871   *
4872   * Calls hdev->setup
4873   * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4874   */
hci_dev_setup_sync(struct hci_dev * hdev)4875  static int hci_dev_setup_sync(struct hci_dev *hdev)
4876  {
4877  	int ret = 0;
4878  	bool invalid_bdaddr;
4879  	size_t i;
4880  
4881  	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4882  	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4883  		return 0;
4884  
4885  	bt_dev_dbg(hdev, "");
4886  
4887  	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4888  
4889  	if (hdev->setup)
4890  		ret = hdev->setup(hdev);
4891  
4892  	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4893  		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4894  			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4895  	}
4896  
4897  	/* The transport driver can set the quirk to mark the
4898  	 * BD_ADDR invalid before creating the HCI device or in
4899  	 * its setup callback.
4900  	 */
4901  	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4902  			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4903  	if (!ret) {
4904  		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4905  		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4906  			hci_dev_get_bd_addr_from_property(hdev);
4907  
4908  		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4909  		    hdev->set_bdaddr) {
4910  			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4911  			if (!ret)
4912  				invalid_bdaddr = false;
4913  		}
4914  	}
4915  
4916  	/* The transport driver can set these quirks before
4917  	 * creating the HCI device or in its setup callback.
4918  	 *
4919  	 * For the invalid BD_ADDR quirk it is possible that
4920  	 * it becomes a valid address if the bootloader does
4921  	 * provide it (see above).
4922  	 *
4923  	 * In case any of them is set, the controller has to
4924  	 * start up as unconfigured.
4925  	 */
4926  	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4927  	    invalid_bdaddr)
4928  		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4929  
4930  	/* For an unconfigured controller it is required to
4931  	 * read at least the version information provided by
4932  	 * the Read Local Version Information command.
4933  	 *
4934  	 * If the set_bdaddr driver callback is provided, then
4935  	 * also the original Bluetooth public device address
4936  	 * will be read using the Read BD Address command.
4937  	 */
4938  	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4939  		return hci_unconf_init_sync(hdev);
4940  
4941  	return ret;
4942  }
4943  
4944  /* This function handles hdev init stage:
4945   *
4946   * Calls hci_dev_setup_sync to perform setup stage
4947   * Calls hci_init_sync to perform HCI command init sequence
4948   */
hci_dev_init_sync(struct hci_dev * hdev)4949  static int hci_dev_init_sync(struct hci_dev *hdev)
4950  {
4951  	int ret;
4952  
4953  	bt_dev_dbg(hdev, "");
4954  
4955  	atomic_set(&hdev->cmd_cnt, 1);
4956  	set_bit(HCI_INIT, &hdev->flags);
4957  
4958  	ret = hci_dev_setup_sync(hdev);
4959  
4960  	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4961  		/* If public address change is configured, ensure that
4962  		 * the address gets programmed. If the driver does not
4963  		 * support changing the public address, fail the power
4964  		 * on procedure.
4965  		 */
4966  		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4967  		    hdev->set_bdaddr)
4968  			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4969  		else
4970  			ret = -EADDRNOTAVAIL;
4971  	}
4972  
4973  	if (!ret) {
4974  		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4975  		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4976  			ret = hci_init_sync(hdev);
4977  			if (!ret && hdev->post_init)
4978  				ret = hdev->post_init(hdev);
4979  		}
4980  	}
4981  
4982  	/* If the HCI Reset command is clearing all diagnostic settings,
4983  	 * then they need to be reprogrammed after the init procedure
4984  	 * completed.
4985  	 */
4986  	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4987  	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4988  	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4989  		ret = hdev->set_diag(hdev, true);
4990  
4991  	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4992  		msft_do_open(hdev);
4993  		aosp_do_open(hdev);
4994  	}
4995  
4996  	clear_bit(HCI_INIT, &hdev->flags);
4997  
4998  	return ret;
4999  }
5000  
hci_dev_open_sync(struct hci_dev * hdev)5001  int hci_dev_open_sync(struct hci_dev *hdev)
5002  {
5003  	int ret;
5004  
5005  	bt_dev_dbg(hdev, "");
5006  
5007  	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5008  		ret = -ENODEV;
5009  		goto done;
5010  	}
5011  
5012  	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5013  	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
5014  		/* Check for rfkill but allow the HCI setup stage to
5015  		 * proceed (which in itself doesn't cause any RF activity).
5016  		 */
5017  		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
5018  			ret = -ERFKILL;
5019  			goto done;
5020  		}
5021  
5022  		/* Check for valid public address or a configured static
5023  		 * random address, but let the HCI setup proceed to
5024  		 * be able to determine if there is a public address
5025  		 * or not.
5026  		 *
5027  		 * In case of user channel usage, it is not important
5028  		 * if a public address or static random address is
5029  		 * available.
5030  		 */
5031  		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5032  		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5033  		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
5034  			ret = -EADDRNOTAVAIL;
5035  			goto done;
5036  		}
5037  	}
5038  
5039  	if (test_bit(HCI_UP, &hdev->flags)) {
5040  		ret = -EALREADY;
5041  		goto done;
5042  	}
5043  
5044  	if (hdev->open(hdev)) {
5045  		ret = -EIO;
5046  		goto done;
5047  	}
5048  
5049  	hci_devcd_reset(hdev);
5050  
5051  	set_bit(HCI_RUNNING, &hdev->flags);
5052  	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
5053  
5054  	ret = hci_dev_init_sync(hdev);
5055  	if (!ret) {
5056  		hci_dev_hold(hdev);
5057  		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5058  		hci_adv_instances_set_rpa_expired(hdev, true);
5059  		set_bit(HCI_UP, &hdev->flags);
5060  		hci_sock_dev_event(hdev, HCI_DEV_UP);
5061  		hci_leds_update_powered(hdev, true);
5062  		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5063  		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
5064  		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5065  		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5066  		    hci_dev_test_flag(hdev, HCI_MGMT)) {
5067  			ret = hci_powered_update_sync(hdev);
5068  			mgmt_power_on(hdev, ret);
5069  		}
5070  	} else {
5071  		/* Init failed, cleanup */
5072  		flush_work(&hdev->tx_work);
5073  
5074  		/* Since hci_rx_work() is possible to awake new cmd_work
5075  		 * it should be flushed first to avoid unexpected call of
5076  		 * hci_cmd_work()
5077  		 */
5078  		flush_work(&hdev->rx_work);
5079  		flush_work(&hdev->cmd_work);
5080  
5081  		skb_queue_purge(&hdev->cmd_q);
5082  		skb_queue_purge(&hdev->rx_q);
5083  
5084  		if (hdev->flush)
5085  			hdev->flush(hdev);
5086  
5087  		if (hdev->sent_cmd) {
5088  			cancel_delayed_work_sync(&hdev->cmd_timer);
5089  			kfree_skb(hdev->sent_cmd);
5090  			hdev->sent_cmd = NULL;
5091  		}
5092  
5093  		if (hdev->req_skb) {
5094  			kfree_skb(hdev->req_skb);
5095  			hdev->req_skb = NULL;
5096  		}
5097  
5098  		clear_bit(HCI_RUNNING, &hdev->flags);
5099  		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5100  
5101  		hdev->close(hdev);
5102  		hdev->flags &= BIT(HCI_RAW);
5103  	}
5104  
5105  done:
5106  	return ret;
5107  }
5108  
5109  /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)5110  static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5111  {
5112  	struct hci_conn_params *p;
5113  
5114  	list_for_each_entry(p, &hdev->le_conn_params, list) {
5115  		hci_pend_le_list_del_init(p);
5116  		if (p->conn) {
5117  			hci_conn_drop(p->conn);
5118  			hci_conn_put(p->conn);
5119  			p->conn = NULL;
5120  		}
5121  	}
5122  
5123  	BT_DBG("All LE pending actions cleared");
5124  }
5125  
hci_dev_shutdown(struct hci_dev * hdev)5126  static int hci_dev_shutdown(struct hci_dev *hdev)
5127  {
5128  	int err = 0;
5129  	/* Similar to how we first do setup and then set the exclusive access
5130  	 * bit for userspace, we must first unset userchannel and then clean up.
5131  	 * Otherwise, the kernel can't properly use the hci channel to clean up
5132  	 * the controller (some shutdown routines require sending additional
5133  	 * commands to the controller for example).
5134  	 */
5135  	bool was_userchannel =
5136  		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5137  
5138  	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5139  	    test_bit(HCI_UP, &hdev->flags)) {
5140  		/* Execute vendor specific shutdown routine */
5141  		if (hdev->shutdown)
5142  			err = hdev->shutdown(hdev);
5143  	}
5144  
5145  	if (was_userchannel)
5146  		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5147  
5148  	return err;
5149  }
5150  
hci_dev_close_sync(struct hci_dev * hdev)5151  int hci_dev_close_sync(struct hci_dev *hdev)
5152  {
5153  	bool auto_off;
5154  	int err = 0;
5155  
5156  	bt_dev_dbg(hdev, "");
5157  
5158  	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
5159  		disable_delayed_work(&hdev->power_off);
5160  		disable_delayed_work(&hdev->ncmd_timer);
5161  		disable_delayed_work(&hdev->le_scan_disable);
5162  	} else {
5163  		cancel_delayed_work(&hdev->power_off);
5164  		cancel_delayed_work(&hdev->ncmd_timer);
5165  		cancel_delayed_work(&hdev->le_scan_disable);
5166  	}
5167  
5168  	hci_cmd_sync_cancel_sync(hdev, ENODEV);
5169  
5170  	cancel_interleave_scan(hdev);
5171  
5172  	if (hdev->adv_instance_timeout) {
5173  		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5174  		hdev->adv_instance_timeout = 0;
5175  	}
5176  
5177  	err = hci_dev_shutdown(hdev);
5178  
5179  	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5180  		cancel_delayed_work_sync(&hdev->cmd_timer);
5181  		return err;
5182  	}
5183  
5184  	hci_leds_update_powered(hdev, false);
5185  
5186  	/* Flush RX and TX works */
5187  	flush_work(&hdev->tx_work);
5188  	flush_work(&hdev->rx_work);
5189  
5190  	if (hdev->discov_timeout > 0) {
5191  		hdev->discov_timeout = 0;
5192  		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5193  		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5194  	}
5195  
5196  	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5197  		cancel_delayed_work(&hdev->service_cache);
5198  
5199  	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5200  		struct adv_info *adv_instance;
5201  
5202  		cancel_delayed_work_sync(&hdev->rpa_expired);
5203  
5204  		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5205  			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5206  	}
5207  
5208  	/* Avoid potential lockdep warnings from the *_flush() calls by
5209  	 * ensuring the workqueue is empty up front.
5210  	 */
5211  	drain_workqueue(hdev->workqueue);
5212  
5213  	hci_dev_lock(hdev);
5214  
5215  	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5216  
5217  	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5218  
5219  	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5220  	    hci_dev_test_flag(hdev, HCI_MGMT))
5221  		__mgmt_power_off(hdev);
5222  
5223  	hci_inquiry_cache_flush(hdev);
5224  	hci_pend_le_actions_clear(hdev);
5225  	hci_conn_hash_flush(hdev);
5226  	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5227  	smp_unregister(hdev);
5228  	hci_dev_unlock(hdev);
5229  
5230  	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5231  
5232  	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5233  		aosp_do_close(hdev);
5234  		msft_do_close(hdev);
5235  	}
5236  
5237  	if (hdev->flush)
5238  		hdev->flush(hdev);
5239  
5240  	/* Reset device */
5241  	skb_queue_purge(&hdev->cmd_q);
5242  	atomic_set(&hdev->cmd_cnt, 1);
5243  	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5244  	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5245  		set_bit(HCI_INIT, &hdev->flags);
5246  		hci_reset_sync(hdev);
5247  		clear_bit(HCI_INIT, &hdev->flags);
5248  	}
5249  
5250  	/* flush cmd  work */
5251  	flush_work(&hdev->cmd_work);
5252  
5253  	/* Drop queues */
5254  	skb_queue_purge(&hdev->rx_q);
5255  	skb_queue_purge(&hdev->cmd_q);
5256  	skb_queue_purge(&hdev->raw_q);
5257  
5258  	/* Drop last sent command */
5259  	if (hdev->sent_cmd) {
5260  		cancel_delayed_work_sync(&hdev->cmd_timer);
5261  		kfree_skb(hdev->sent_cmd);
5262  		hdev->sent_cmd = NULL;
5263  	}
5264  
5265  	/* Drop last request */
5266  	if (hdev->req_skb) {
5267  		kfree_skb(hdev->req_skb);
5268  		hdev->req_skb = NULL;
5269  	}
5270  
5271  	clear_bit(HCI_RUNNING, &hdev->flags);
5272  	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5273  
5274  	/* After this point our queues are empty and no tasks are scheduled. */
5275  	hdev->close(hdev);
5276  
5277  	/* Clear flags */
5278  	hdev->flags &= BIT(HCI_RAW);
5279  	hci_dev_clear_volatile_flags(hdev);
5280  
5281  	memset(hdev->eir, 0, sizeof(hdev->eir));
5282  	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5283  	bacpy(&hdev->random_addr, BDADDR_ANY);
5284  	hci_codec_list_clear(&hdev->local_codecs);
5285  
5286  	hci_dev_put(hdev);
5287  	return err;
5288  }
5289  
5290  /* This function perform power on HCI command sequence as follows:
5291   *
5292   * If controller is already up (HCI_UP) performs hci_powered_update_sync
5293   * sequence otherwise run hci_dev_open_sync which will follow with
5294   * hci_powered_update_sync after the init sequence is completed.
5295   */
hci_power_on_sync(struct hci_dev * hdev)5296  static int hci_power_on_sync(struct hci_dev *hdev)
5297  {
5298  	int err;
5299  
5300  	if (test_bit(HCI_UP, &hdev->flags) &&
5301  	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5302  	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5303  		cancel_delayed_work(&hdev->power_off);
5304  		return hci_powered_update_sync(hdev);
5305  	}
5306  
5307  	err = hci_dev_open_sync(hdev);
5308  	if (err < 0)
5309  		return err;
5310  
5311  	/* During the HCI setup phase, a few error conditions are
5312  	 * ignored and they need to be checked now. If they are still
5313  	 * valid, it is important to return the device back off.
5314  	 */
5315  	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5316  	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5317  	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5318  	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5319  		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5320  		hci_dev_close_sync(hdev);
5321  	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5322  		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5323  				   HCI_AUTO_OFF_TIMEOUT);
5324  	}
5325  
5326  	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5327  		/* For unconfigured devices, set the HCI_RAW flag
5328  		 * so that userspace can easily identify them.
5329  		 */
5330  		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5331  			set_bit(HCI_RAW, &hdev->flags);
5332  
5333  		/* For fully configured devices, this will send
5334  		 * the Index Added event. For unconfigured devices,
5335  		 * it will send Unconfigued Index Added event.
5336  		 *
5337  		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5338  		 * and no event will be send.
5339  		 */
5340  		mgmt_index_added(hdev);
5341  	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5342  		/* When the controller is now configured, then it
5343  		 * is important to clear the HCI_RAW flag.
5344  		 */
5345  		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5346  			clear_bit(HCI_RAW, &hdev->flags);
5347  
5348  		/* Powering on the controller with HCI_CONFIG set only
5349  		 * happens with the transition from unconfigured to
5350  		 * configured. This will send the Index Added event.
5351  		 */
5352  		mgmt_index_added(hdev);
5353  	}
5354  
5355  	return 0;
5356  }
5357  
hci_remote_name_cancel_sync(struct hci_dev * hdev,bdaddr_t * addr)5358  static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5359  {
5360  	struct hci_cp_remote_name_req_cancel cp;
5361  
5362  	memset(&cp, 0, sizeof(cp));
5363  	bacpy(&cp.bdaddr, addr);
5364  
5365  	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5366  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5367  }
5368  
hci_stop_discovery_sync(struct hci_dev * hdev)5369  int hci_stop_discovery_sync(struct hci_dev *hdev)
5370  {
5371  	struct discovery_state *d = &hdev->discovery;
5372  	struct inquiry_entry *e;
5373  	int err;
5374  
5375  	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5376  
5377  	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5378  		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5379  			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5380  						    0, NULL, HCI_CMD_TIMEOUT);
5381  			if (err)
5382  				return err;
5383  		}
5384  
5385  		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5386  			cancel_delayed_work(&hdev->le_scan_disable);
5387  
5388  			err = hci_scan_disable_sync(hdev);
5389  			if (err)
5390  				return err;
5391  		}
5392  
5393  	} else {
5394  		err = hci_scan_disable_sync(hdev);
5395  		if (err)
5396  			return err;
5397  	}
5398  
5399  	/* Resume advertising if it was paused */
5400  	if (ll_privacy_capable(hdev))
5401  		hci_resume_advertising_sync(hdev);
5402  
5403  	/* No further actions needed for LE-only discovery */
5404  	if (d->type == DISCOV_TYPE_LE)
5405  		return 0;
5406  
5407  	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5408  		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5409  						     NAME_PENDING);
5410  		if (!e)
5411  			return 0;
5412  
5413  		/* Ignore cancel errors since it should interfere with stopping
5414  		 * of the discovery.
5415  		 */
5416  		hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5417  	}
5418  
5419  	return 0;
5420  }
5421  
hci_disconnect_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5422  static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5423  			       u8 reason)
5424  {
5425  	struct hci_cp_disconnect cp;
5426  
5427  	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5428  		/* This is a BIS connection, hci_conn_del will
5429  		 * do the necessary cleanup.
5430  		 */
5431  		hci_dev_lock(hdev);
5432  		hci_conn_failed(conn, reason);
5433  		hci_dev_unlock(hdev);
5434  
5435  		return 0;
5436  	}
5437  
5438  	memset(&cp, 0, sizeof(cp));
5439  	cp.handle = cpu_to_le16(conn->handle);
5440  	cp.reason = reason;
5441  
5442  	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5443  	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5444  	 * used when suspending or powering off, where we don't want to wait
5445  	 * for the peer's response.
5446  	 */
5447  	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5448  		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5449  						sizeof(cp), &cp,
5450  						HCI_EV_DISCONN_COMPLETE,
5451  						HCI_CMD_TIMEOUT, NULL);
5452  
5453  	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5454  				     HCI_CMD_TIMEOUT);
5455  }
5456  
hci_le_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5457  static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5458  				      struct hci_conn *conn, u8 reason)
5459  {
5460  	/* Return reason if scanning since the connection shall probably be
5461  	 * cleanup directly.
5462  	 */
5463  	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5464  		return reason;
5465  
5466  	if (conn->role == HCI_ROLE_SLAVE ||
5467  	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5468  		return 0;
5469  
5470  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5471  				     0, NULL, HCI_CMD_TIMEOUT);
5472  }
5473  
hci_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5474  static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5475  				   u8 reason)
5476  {
5477  	if (conn->type == LE_LINK)
5478  		return hci_le_connect_cancel_sync(hdev, conn, reason);
5479  
5480  	if (conn->type == ISO_LINK) {
5481  		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5482  		 * page 1857:
5483  		 *
5484  		 * If this command is issued for a CIS on the Central and the
5485  		 * CIS is successfully terminated before being established,
5486  		 * then an HCI_LE_CIS_Established event shall also be sent for
5487  		 * this CIS with the Status Operation Cancelled by Host (0x44).
5488  		 */
5489  		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5490  			return hci_disconnect_sync(hdev, conn, reason);
5491  
5492  		/* CIS with no Create CIS sent have nothing to cancel */
5493  		if (bacmp(&conn->dst, BDADDR_ANY))
5494  			return HCI_ERROR_LOCAL_HOST_TERM;
5495  
5496  		/* There is no way to cancel a BIS without terminating the BIG
5497  		 * which is done later on connection cleanup.
5498  		 */
5499  		return 0;
5500  	}
5501  
5502  	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5503  		return 0;
5504  
5505  	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5506  	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5507  	 * used when suspending or powering off, where we don't want to wait
5508  	 * for the peer's response.
5509  	 */
5510  	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5511  		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5512  						6, &conn->dst,
5513  						HCI_EV_CONN_COMPLETE,
5514  						HCI_CMD_TIMEOUT, NULL);
5515  
5516  	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5517  				     6, &conn->dst, HCI_CMD_TIMEOUT);
5518  }
5519  
hci_reject_sco_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5520  static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5521  			       u8 reason)
5522  {
5523  	struct hci_cp_reject_sync_conn_req cp;
5524  
5525  	memset(&cp, 0, sizeof(cp));
5526  	bacpy(&cp.bdaddr, &conn->dst);
5527  	cp.reason = reason;
5528  
5529  	/* SCO rejection has its own limited set of
5530  	 * allowed error values (0x0D-0x0F).
5531  	 */
5532  	if (reason < 0x0d || reason > 0x0f)
5533  		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5534  
5535  	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5536  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5537  }
5538  
hci_le_reject_cis_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5539  static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5540  				  u8 reason)
5541  {
5542  	struct hci_cp_le_reject_cis cp;
5543  
5544  	memset(&cp, 0, sizeof(cp));
5545  	cp.handle = cpu_to_le16(conn->handle);
5546  	cp.reason = reason;
5547  
5548  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5549  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5550  }
5551  
hci_reject_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5552  static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5553  				u8 reason)
5554  {
5555  	struct hci_cp_reject_conn_req cp;
5556  
5557  	if (conn->type == ISO_LINK)
5558  		return hci_le_reject_cis_sync(hdev, conn, reason);
5559  
5560  	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5561  		return hci_reject_sco_sync(hdev, conn, reason);
5562  
5563  	memset(&cp, 0, sizeof(cp));
5564  	bacpy(&cp.bdaddr, &conn->dst);
5565  	cp.reason = reason;
5566  
5567  	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5568  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5569  }
5570  
hci_abort_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5571  int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5572  {
5573  	int err = 0;
5574  	u16 handle = conn->handle;
5575  	bool disconnect = false;
5576  	struct hci_conn *c;
5577  
5578  	switch (conn->state) {
5579  	case BT_CONNECTED:
5580  	case BT_CONFIG:
5581  		err = hci_disconnect_sync(hdev, conn, reason);
5582  		break;
5583  	case BT_CONNECT:
5584  		err = hci_connect_cancel_sync(hdev, conn, reason);
5585  		break;
5586  	case BT_CONNECT2:
5587  		err = hci_reject_conn_sync(hdev, conn, reason);
5588  		break;
5589  	case BT_OPEN:
5590  	case BT_BOUND:
5591  		break;
5592  	default:
5593  		disconnect = true;
5594  		break;
5595  	}
5596  
5597  	hci_dev_lock(hdev);
5598  
5599  	/* Check if the connection has been cleaned up concurrently */
5600  	c = hci_conn_hash_lookup_handle(hdev, handle);
5601  	if (!c || c != conn) {
5602  		err = 0;
5603  		goto unlock;
5604  	}
5605  
5606  	/* Cleanup hci_conn object if it cannot be cancelled as it
5607  	 * likelly means the controller and host stack are out of sync
5608  	 * or in case of LE it was still scanning so it can be cleanup
5609  	 * safely.
5610  	 */
5611  	if (disconnect) {
5612  		conn->state = BT_CLOSED;
5613  		hci_disconn_cfm(conn, reason);
5614  		hci_conn_del(conn);
5615  	} else {
5616  		hci_conn_failed(conn, reason);
5617  	}
5618  
5619  unlock:
5620  	hci_dev_unlock(hdev);
5621  	return err;
5622  }
5623  
hci_disconnect_all_sync(struct hci_dev * hdev,u8 reason)5624  static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5625  {
5626  	struct list_head *head = &hdev->conn_hash.list;
5627  	struct hci_conn *conn;
5628  
5629  	rcu_read_lock();
5630  	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5631  		/* Make sure the connection is not freed while unlocking */
5632  		conn = hci_conn_get(conn);
5633  		rcu_read_unlock();
5634  		/* Disregard possible errors since hci_conn_del shall have been
5635  		 * called even in case of errors had occurred since it would
5636  		 * then cause hci_conn_failed to be called which calls
5637  		 * hci_conn_del internally.
5638  		 */
5639  		hci_abort_conn_sync(hdev, conn, reason);
5640  		hci_conn_put(conn);
5641  		rcu_read_lock();
5642  	}
5643  	rcu_read_unlock();
5644  
5645  	return 0;
5646  }
5647  
5648  /* This function perform power off HCI command sequence as follows:
5649   *
5650   * Clear Advertising
5651   * Stop Discovery
5652   * Disconnect all connections
5653   * hci_dev_close_sync
5654   */
hci_power_off_sync(struct hci_dev * hdev)5655  static int hci_power_off_sync(struct hci_dev *hdev)
5656  {
5657  	int err;
5658  
5659  	/* If controller is already down there is nothing to do */
5660  	if (!test_bit(HCI_UP, &hdev->flags))
5661  		return 0;
5662  
5663  	hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5664  
5665  	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5666  	    test_bit(HCI_PSCAN, &hdev->flags)) {
5667  		err = hci_write_scan_enable_sync(hdev, 0x00);
5668  		if (err)
5669  			goto out;
5670  	}
5671  
5672  	err = hci_clear_adv_sync(hdev, NULL, false);
5673  	if (err)
5674  		goto out;
5675  
5676  	err = hci_stop_discovery_sync(hdev);
5677  	if (err)
5678  		goto out;
5679  
5680  	/* Terminated due to Power Off */
5681  	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5682  	if (err)
5683  		goto out;
5684  
5685  	err = hci_dev_close_sync(hdev);
5686  
5687  out:
5688  	hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5689  	return err;
5690  }
5691  
hci_set_powered_sync(struct hci_dev * hdev,u8 val)5692  int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5693  {
5694  	if (val)
5695  		return hci_power_on_sync(hdev);
5696  
5697  	return hci_power_off_sync(hdev);
5698  }
5699  
hci_write_iac_sync(struct hci_dev * hdev)5700  static int hci_write_iac_sync(struct hci_dev *hdev)
5701  {
5702  	struct hci_cp_write_current_iac_lap cp;
5703  
5704  	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5705  		return 0;
5706  
5707  	memset(&cp, 0, sizeof(cp));
5708  
5709  	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5710  		/* Limited discoverable mode */
5711  		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5712  		cp.iac_lap[0] = 0x00;	/* LIAC */
5713  		cp.iac_lap[1] = 0x8b;
5714  		cp.iac_lap[2] = 0x9e;
5715  		cp.iac_lap[3] = 0x33;	/* GIAC */
5716  		cp.iac_lap[4] = 0x8b;
5717  		cp.iac_lap[5] = 0x9e;
5718  	} else {
5719  		/* General discoverable mode */
5720  		cp.num_iac = 1;
5721  		cp.iac_lap[0] = 0x33;	/* GIAC */
5722  		cp.iac_lap[1] = 0x8b;
5723  		cp.iac_lap[2] = 0x9e;
5724  	}
5725  
5726  	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5727  				     (cp.num_iac * 3) + 1, &cp,
5728  				     HCI_CMD_TIMEOUT);
5729  }
5730  
hci_update_discoverable_sync(struct hci_dev * hdev)5731  int hci_update_discoverable_sync(struct hci_dev *hdev)
5732  {
5733  	int err = 0;
5734  
5735  	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5736  		err = hci_write_iac_sync(hdev);
5737  		if (err)
5738  			return err;
5739  
5740  		err = hci_update_scan_sync(hdev);
5741  		if (err)
5742  			return err;
5743  
5744  		err = hci_update_class_sync(hdev);
5745  		if (err)
5746  			return err;
5747  	}
5748  
5749  	/* Advertising instances don't use the global discoverable setting, so
5750  	 * only update AD if advertising was enabled using Set Advertising.
5751  	 */
5752  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5753  		err = hci_update_adv_data_sync(hdev, 0x00);
5754  		if (err)
5755  			return err;
5756  
5757  		/* Discoverable mode affects the local advertising
5758  		 * address in limited privacy mode.
5759  		 */
5760  		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5761  			if (ext_adv_capable(hdev))
5762  				err = hci_start_ext_adv_sync(hdev, 0x00);
5763  			else
5764  				err = hci_enable_advertising_sync(hdev);
5765  		}
5766  	}
5767  
5768  	return err;
5769  }
5770  
update_discoverable_sync(struct hci_dev * hdev,void * data)5771  static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5772  {
5773  	return hci_update_discoverable_sync(hdev);
5774  }
5775  
hci_update_discoverable(struct hci_dev * hdev)5776  int hci_update_discoverable(struct hci_dev *hdev)
5777  {
5778  	/* Only queue if it would have any effect */
5779  	if (hdev_is_powered(hdev) &&
5780  	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5781  	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5782  	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5783  		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5784  					  NULL);
5785  
5786  	return 0;
5787  }
5788  
hci_update_connectable_sync(struct hci_dev * hdev)5789  int hci_update_connectable_sync(struct hci_dev *hdev)
5790  {
5791  	int err;
5792  
5793  	err = hci_update_scan_sync(hdev);
5794  	if (err)
5795  		return err;
5796  
5797  	/* If BR/EDR is not enabled and we disable advertising as a
5798  	 * by-product of disabling connectable, we need to update the
5799  	 * advertising flags.
5800  	 */
5801  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5802  		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5803  
5804  	/* Update the advertising parameters if necessary */
5805  	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5806  	    !list_empty(&hdev->adv_instances)) {
5807  		if (ext_adv_capable(hdev))
5808  			err = hci_start_ext_adv_sync(hdev,
5809  						     hdev->cur_adv_instance);
5810  		else
5811  			err = hci_enable_advertising_sync(hdev);
5812  
5813  		if (err)
5814  			return err;
5815  	}
5816  
5817  	return hci_update_passive_scan_sync(hdev);
5818  }
5819  
hci_inquiry_sync(struct hci_dev * hdev,u8 length,u8 num_rsp)5820  int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5821  {
5822  	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5823  	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5824  	struct hci_cp_inquiry cp;
5825  
5826  	bt_dev_dbg(hdev, "");
5827  
5828  	if (test_bit(HCI_INQUIRY, &hdev->flags))
5829  		return 0;
5830  
5831  	hci_dev_lock(hdev);
5832  	hci_inquiry_cache_flush(hdev);
5833  	hci_dev_unlock(hdev);
5834  
5835  	memset(&cp, 0, sizeof(cp));
5836  
5837  	if (hdev->discovery.limited)
5838  		memcpy(&cp.lap, liac, sizeof(cp.lap));
5839  	else
5840  		memcpy(&cp.lap, giac, sizeof(cp.lap));
5841  
5842  	cp.length = length;
5843  	cp.num_rsp = num_rsp;
5844  
5845  	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5846  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5847  }
5848  
hci_active_scan_sync(struct hci_dev * hdev,uint16_t interval)5849  static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5850  {
5851  	u8 own_addr_type;
5852  	/* Accept list is not used for discovery */
5853  	u8 filter_policy = 0x00;
5854  	/* Default is to enable duplicates filter */
5855  	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5856  	int err;
5857  
5858  	bt_dev_dbg(hdev, "");
5859  
5860  	/* If controller is scanning, it means the passive scanning is
5861  	 * running. Thus, we should temporarily stop it in order to set the
5862  	 * discovery scanning parameters.
5863  	 */
5864  	err = hci_scan_disable_sync(hdev);
5865  	if (err) {
5866  		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5867  		return err;
5868  	}
5869  
5870  	cancel_interleave_scan(hdev);
5871  
5872  	/* Pause address resolution for active scan and stop advertising if
5873  	 * privacy is enabled.
5874  	 */
5875  	err = hci_pause_addr_resolution(hdev);
5876  	if (err)
5877  		goto failed;
5878  
5879  	/* All active scans will be done with either a resolvable private
5880  	 * address (when privacy feature has been enabled) or non-resolvable
5881  	 * private address.
5882  	 */
5883  	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5884  					     &own_addr_type);
5885  	if (err < 0)
5886  		own_addr_type = ADDR_LE_DEV_PUBLIC;
5887  
5888  	if (hci_is_adv_monitoring(hdev) ||
5889  	    (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5890  	    hdev->discovery.result_filtering)) {
5891  		/* Duplicate filter should be disabled when some advertisement
5892  		 * monitor is activated, otherwise AdvMon can only receive one
5893  		 * advertisement for one peer(*) during active scanning, and
5894  		 * might report loss to these peers.
5895  		 *
5896  		 * If controller does strict duplicate filtering and the
5897  		 * discovery requires result filtering disables controller based
5898  		 * filtering since that can cause reports that would match the
5899  		 * host filter to not be reported.
5900  		 */
5901  		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5902  	}
5903  
5904  	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5905  				  hdev->le_scan_window_discovery,
5906  				  own_addr_type, filter_policy, filter_dup);
5907  	if (!err)
5908  		return err;
5909  
5910  failed:
5911  	/* Resume advertising if it was paused */
5912  	if (ll_privacy_capable(hdev))
5913  		hci_resume_advertising_sync(hdev);
5914  
5915  	/* Resume passive scanning */
5916  	hci_update_passive_scan_sync(hdev);
5917  	return err;
5918  }
5919  
hci_start_interleaved_discovery_sync(struct hci_dev * hdev)5920  static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5921  {
5922  	int err;
5923  
5924  	bt_dev_dbg(hdev, "");
5925  
5926  	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5927  	if (err)
5928  		return err;
5929  
5930  	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5931  }
5932  
hci_start_discovery_sync(struct hci_dev * hdev)5933  int hci_start_discovery_sync(struct hci_dev *hdev)
5934  {
5935  	unsigned long timeout;
5936  	int err;
5937  
5938  	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5939  
5940  	switch (hdev->discovery.type) {
5941  	case DISCOV_TYPE_BREDR:
5942  		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5943  	case DISCOV_TYPE_INTERLEAVED:
5944  		/* When running simultaneous discovery, the LE scanning time
5945  		 * should occupy the whole discovery time sine BR/EDR inquiry
5946  		 * and LE scanning are scheduled by the controller.
5947  		 *
5948  		 * For interleaving discovery in comparison, BR/EDR inquiry
5949  		 * and LE scanning are done sequentially with separate
5950  		 * timeouts.
5951  		 */
5952  		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5953  			     &hdev->quirks)) {
5954  			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5955  			/* During simultaneous discovery, we double LE scan
5956  			 * interval. We must leave some time for the controller
5957  			 * to do BR/EDR inquiry.
5958  			 */
5959  			err = hci_start_interleaved_discovery_sync(hdev);
5960  			break;
5961  		}
5962  
5963  		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5964  		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5965  		break;
5966  	case DISCOV_TYPE_LE:
5967  		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5968  		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5969  		break;
5970  	default:
5971  		return -EINVAL;
5972  	}
5973  
5974  	if (err)
5975  		return err;
5976  
5977  	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5978  
5979  	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5980  			   timeout);
5981  	return 0;
5982  }
5983  
hci_suspend_monitor_sync(struct hci_dev * hdev)5984  static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5985  {
5986  	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5987  	case HCI_ADV_MONITOR_EXT_MSFT:
5988  		msft_suspend_sync(hdev);
5989  		break;
5990  	default:
5991  		return;
5992  	}
5993  }
5994  
5995  /* This function disables discovery and mark it as paused */
hci_pause_discovery_sync(struct hci_dev * hdev)5996  static int hci_pause_discovery_sync(struct hci_dev *hdev)
5997  {
5998  	int old_state = hdev->discovery.state;
5999  	int err;
6000  
6001  	/* If discovery already stopped/stopping/paused there nothing to do */
6002  	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
6003  	    hdev->discovery_paused)
6004  		return 0;
6005  
6006  	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6007  	err = hci_stop_discovery_sync(hdev);
6008  	if (err)
6009  		return err;
6010  
6011  	hdev->discovery_paused = true;
6012  	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6013  
6014  	return 0;
6015  }
6016  
hci_update_event_filter_sync(struct hci_dev * hdev)6017  static int hci_update_event_filter_sync(struct hci_dev *hdev)
6018  {
6019  	struct bdaddr_list_with_flags *b;
6020  	u8 scan = SCAN_DISABLED;
6021  	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
6022  	int err;
6023  
6024  	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6025  		return 0;
6026  
6027  	/* Some fake CSR controllers lock up after setting this type of
6028  	 * filter, so avoid sending the request altogether.
6029  	 */
6030  	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
6031  		return 0;
6032  
6033  	/* Always clear event filter when starting */
6034  	hci_clear_event_filter_sync(hdev);
6035  
6036  	list_for_each_entry(b, &hdev->accept_list, list) {
6037  		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
6038  			continue;
6039  
6040  		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
6041  
6042  		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
6043  						 HCI_CONN_SETUP_ALLOW_BDADDR,
6044  						 &b->bdaddr,
6045  						 HCI_CONN_SETUP_AUTO_ON);
6046  		if (err)
6047  			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
6048  				   &b->bdaddr);
6049  		else
6050  			scan = SCAN_PAGE;
6051  	}
6052  
6053  	if (scan && !scanning)
6054  		hci_write_scan_enable_sync(hdev, scan);
6055  	else if (!scan && scanning)
6056  		hci_write_scan_enable_sync(hdev, scan);
6057  
6058  	return 0;
6059  }
6060  
6061  /* This function disables scan (BR and LE) and mark it as paused */
hci_pause_scan_sync(struct hci_dev * hdev)6062  static int hci_pause_scan_sync(struct hci_dev *hdev)
6063  {
6064  	if (hdev->scanning_paused)
6065  		return 0;
6066  
6067  	/* Disable page scan if enabled */
6068  	if (test_bit(HCI_PSCAN, &hdev->flags))
6069  		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
6070  
6071  	hci_scan_disable_sync(hdev);
6072  
6073  	hdev->scanning_paused = true;
6074  
6075  	return 0;
6076  }
6077  
6078  /* This function performs the HCI suspend procedures in the follow order:
6079   *
6080   * Pause discovery (active scanning/inquiry)
6081   * Pause Directed Advertising/Advertising
6082   * Pause Scanning (passive scanning in case discovery was not active)
6083   * Disconnect all connections
6084   * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6085   * otherwise:
6086   * Update event mask (only set events that are allowed to wake up the host)
6087   * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6088   * Update passive scanning (lower duty cycle)
6089   * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6090   */
hci_suspend_sync(struct hci_dev * hdev)6091  int hci_suspend_sync(struct hci_dev *hdev)
6092  {
6093  	int err;
6094  
6095  	/* If marked as suspended there nothing to do */
6096  	if (hdev->suspended)
6097  		return 0;
6098  
6099  	/* Mark device as suspended */
6100  	hdev->suspended = true;
6101  
6102  	/* Pause discovery if not already stopped */
6103  	hci_pause_discovery_sync(hdev);
6104  
6105  	/* Pause other advertisements */
6106  	hci_pause_advertising_sync(hdev);
6107  
6108  	/* Suspend monitor filters */
6109  	hci_suspend_monitor_sync(hdev);
6110  
6111  	/* Prevent disconnects from causing scanning to be re-enabled */
6112  	hci_pause_scan_sync(hdev);
6113  
6114  	if (hci_conn_count(hdev)) {
6115  		/* Soft disconnect everything (power off) */
6116  		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6117  		if (err) {
6118  			/* Set state to BT_RUNNING so resume doesn't notify */
6119  			hdev->suspend_state = BT_RUNNING;
6120  			hci_resume_sync(hdev);
6121  			return err;
6122  		}
6123  
6124  		/* Update event mask so only the allowed event can wakeup the
6125  		 * host.
6126  		 */
6127  		hci_set_event_mask_sync(hdev);
6128  	}
6129  
6130  	/* Only configure accept list if disconnect succeeded and wake
6131  	 * isn't being prevented.
6132  	 */
6133  	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6134  		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6135  		return 0;
6136  	}
6137  
6138  	/* Unpause to take care of updating scanning params */
6139  	hdev->scanning_paused = false;
6140  
6141  	/* Enable event filter for paired devices */
6142  	hci_update_event_filter_sync(hdev);
6143  
6144  	/* Update LE passive scan if enabled */
6145  	hci_update_passive_scan_sync(hdev);
6146  
6147  	/* Pause scan changes again. */
6148  	hdev->scanning_paused = true;
6149  
6150  	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6151  
6152  	return 0;
6153  }
6154  
6155  /* This function resumes discovery */
hci_resume_discovery_sync(struct hci_dev * hdev)6156  static int hci_resume_discovery_sync(struct hci_dev *hdev)
6157  {
6158  	int err;
6159  
6160  	/* If discovery not paused there nothing to do */
6161  	if (!hdev->discovery_paused)
6162  		return 0;
6163  
6164  	hdev->discovery_paused = false;
6165  
6166  	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6167  
6168  	err = hci_start_discovery_sync(hdev);
6169  
6170  	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6171  				DISCOVERY_FINDING);
6172  
6173  	return err;
6174  }
6175  
hci_resume_monitor_sync(struct hci_dev * hdev)6176  static void hci_resume_monitor_sync(struct hci_dev *hdev)
6177  {
6178  	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6179  	case HCI_ADV_MONITOR_EXT_MSFT:
6180  		msft_resume_sync(hdev);
6181  		break;
6182  	default:
6183  		return;
6184  	}
6185  }
6186  
6187  /* This function resume scan and reset paused flag */
hci_resume_scan_sync(struct hci_dev * hdev)6188  static int hci_resume_scan_sync(struct hci_dev *hdev)
6189  {
6190  	if (!hdev->scanning_paused)
6191  		return 0;
6192  
6193  	hdev->scanning_paused = false;
6194  
6195  	hci_update_scan_sync(hdev);
6196  
6197  	/* Reset passive scanning to normal */
6198  	hci_update_passive_scan_sync(hdev);
6199  
6200  	return 0;
6201  }
6202  
6203  /* This function performs the HCI suspend procedures in the follow order:
6204   *
6205   * Restore event mask
6206   * Clear event filter
6207   * Update passive scanning (normal duty cycle)
6208   * Resume Directed Advertising/Advertising
6209   * Resume discovery (active scanning/inquiry)
6210   */
hci_resume_sync(struct hci_dev * hdev)6211  int hci_resume_sync(struct hci_dev *hdev)
6212  {
6213  	/* If not marked as suspended there nothing to do */
6214  	if (!hdev->suspended)
6215  		return 0;
6216  
6217  	hdev->suspended = false;
6218  
6219  	/* Restore event mask */
6220  	hci_set_event_mask_sync(hdev);
6221  
6222  	/* Clear any event filters and restore scan state */
6223  	hci_clear_event_filter_sync(hdev);
6224  
6225  	/* Resume scanning */
6226  	hci_resume_scan_sync(hdev);
6227  
6228  	/* Resume monitor filters */
6229  	hci_resume_monitor_sync(hdev);
6230  
6231  	/* Resume other advertisements */
6232  	hci_resume_advertising_sync(hdev);
6233  
6234  	/* Resume discovery */
6235  	hci_resume_discovery_sync(hdev);
6236  
6237  	return 0;
6238  }
6239  
conn_use_rpa(struct hci_conn * conn)6240  static bool conn_use_rpa(struct hci_conn *conn)
6241  {
6242  	struct hci_dev *hdev = conn->hdev;
6243  
6244  	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6245  }
6246  
hci_le_ext_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6247  static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6248  						struct hci_conn *conn)
6249  {
6250  	struct hci_cp_le_set_ext_adv_params cp;
6251  	int err;
6252  	bdaddr_t random_addr;
6253  	u8 own_addr_type;
6254  
6255  	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6256  					     &own_addr_type);
6257  	if (err)
6258  		return err;
6259  
6260  	/* Set require_privacy to false so that the remote device has a
6261  	 * chance of identifying us.
6262  	 */
6263  	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6264  				     &own_addr_type, &random_addr);
6265  	if (err)
6266  		return err;
6267  
6268  	memset(&cp, 0, sizeof(cp));
6269  
6270  	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6271  	cp.channel_map = hdev->le_adv_channel_map;
6272  	cp.tx_power = HCI_TX_POWER_INVALID;
6273  	cp.primary_phy = HCI_ADV_PHY_1M;
6274  	cp.secondary_phy = HCI_ADV_PHY_1M;
6275  	cp.handle = 0x00; /* Use instance 0 for directed adv */
6276  	cp.own_addr_type = own_addr_type;
6277  	cp.peer_addr_type = conn->dst_type;
6278  	bacpy(&cp.peer_addr, &conn->dst);
6279  
6280  	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6281  	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6282  	 * does not supports advertising data when the advertising set already
6283  	 * contains some, the controller shall return erroc code 'Invalid
6284  	 * HCI Command Parameters(0x12).
6285  	 * So it is required to remove adv set for handle 0x00. since we use
6286  	 * instance 0 for directed adv.
6287  	 */
6288  	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6289  	if (err)
6290  		return err;
6291  
6292  	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6293  				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6294  	if (err)
6295  		return err;
6296  
6297  	/* Check if random address need to be updated */
6298  	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6299  	    bacmp(&random_addr, BDADDR_ANY) &&
6300  	    bacmp(&random_addr, &hdev->random_addr)) {
6301  		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6302  						       &random_addr);
6303  		if (err)
6304  			return err;
6305  	}
6306  
6307  	return hci_enable_ext_advertising_sync(hdev, 0x00);
6308  }
6309  
hci_le_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6310  static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6311  					    struct hci_conn *conn)
6312  {
6313  	struct hci_cp_le_set_adv_param cp;
6314  	u8 status;
6315  	u8 own_addr_type;
6316  	u8 enable;
6317  
6318  	if (ext_adv_capable(hdev))
6319  		return hci_le_ext_directed_advertising_sync(hdev, conn);
6320  
6321  	/* Clear the HCI_LE_ADV bit temporarily so that the
6322  	 * hci_update_random_address knows that it's safe to go ahead
6323  	 * and write a new random address. The flag will be set back on
6324  	 * as soon as the SET_ADV_ENABLE HCI command completes.
6325  	 */
6326  	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6327  
6328  	/* Set require_privacy to false so that the remote device has a
6329  	 * chance of identifying us.
6330  	 */
6331  	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6332  						&own_addr_type);
6333  	if (status)
6334  		return status;
6335  
6336  	memset(&cp, 0, sizeof(cp));
6337  
6338  	/* Some controllers might reject command if intervals are not
6339  	 * within range for undirected advertising.
6340  	 * BCM20702A0 is known to be affected by this.
6341  	 */
6342  	cp.min_interval = cpu_to_le16(0x0020);
6343  	cp.max_interval = cpu_to_le16(0x0020);
6344  
6345  	cp.type = LE_ADV_DIRECT_IND;
6346  	cp.own_address_type = own_addr_type;
6347  	cp.direct_addr_type = conn->dst_type;
6348  	bacpy(&cp.direct_addr, &conn->dst);
6349  	cp.channel_map = hdev->le_adv_channel_map;
6350  
6351  	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6352  				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6353  	if (status)
6354  		return status;
6355  
6356  	enable = 0x01;
6357  
6358  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6359  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6360  }
6361  
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)6362  static void set_ext_conn_params(struct hci_conn *conn,
6363  				struct hci_cp_le_ext_conn_param *p)
6364  {
6365  	struct hci_dev *hdev = conn->hdev;
6366  
6367  	memset(p, 0, sizeof(*p));
6368  
6369  	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6370  	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6371  	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6372  	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6373  	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6374  	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6375  	p->min_ce_len = cpu_to_le16(0x0000);
6376  	p->max_ce_len = cpu_to_le16(0x0000);
6377  }
6378  
hci_le_ext_create_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 own_addr_type)6379  static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6380  				       struct hci_conn *conn, u8 own_addr_type)
6381  {
6382  	struct hci_cp_le_ext_create_conn *cp;
6383  	struct hci_cp_le_ext_conn_param *p;
6384  	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6385  	u32 plen;
6386  
6387  	cp = (void *)data;
6388  	p = (void *)cp->data;
6389  
6390  	memset(cp, 0, sizeof(*cp));
6391  
6392  	bacpy(&cp->peer_addr, &conn->dst);
6393  	cp->peer_addr_type = conn->dst_type;
6394  	cp->own_addr_type = own_addr_type;
6395  
6396  	plen = sizeof(*cp);
6397  
6398  	if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6399  			      conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6400  		cp->phys |= LE_SCAN_PHY_1M;
6401  		set_ext_conn_params(conn, p);
6402  
6403  		p++;
6404  		plen += sizeof(*p);
6405  	}
6406  
6407  	if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6408  			      conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6409  		cp->phys |= LE_SCAN_PHY_2M;
6410  		set_ext_conn_params(conn, p);
6411  
6412  		p++;
6413  		plen += sizeof(*p);
6414  	}
6415  
6416  	if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6417  				 conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6418  		cp->phys |= LE_SCAN_PHY_CODED;
6419  		set_ext_conn_params(conn, p);
6420  
6421  		plen += sizeof(*p);
6422  	}
6423  
6424  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6425  					plen, data,
6426  					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6427  					conn->conn_timeout, NULL);
6428  }
6429  
hci_le_create_conn_sync(struct hci_dev * hdev,void * data)6430  static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6431  {
6432  	struct hci_cp_le_create_conn cp;
6433  	struct hci_conn_params *params;
6434  	u8 own_addr_type;
6435  	int err;
6436  	struct hci_conn *conn = data;
6437  
6438  	if (!hci_conn_valid(hdev, conn))
6439  		return -ECANCELED;
6440  
6441  	bt_dev_dbg(hdev, "conn %p", conn);
6442  
6443  	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6444  	conn->state = BT_CONNECT;
6445  
6446  	/* If requested to connect as peripheral use directed advertising */
6447  	if (conn->role == HCI_ROLE_SLAVE) {
6448  		/* If we're active scanning and simultaneous roles is not
6449  		 * enabled simply reject the attempt.
6450  		 */
6451  		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6452  		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6453  		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6454  			hci_conn_del(conn);
6455  			return -EBUSY;
6456  		}
6457  
6458  		/* Pause advertising while doing directed advertising. */
6459  		hci_pause_advertising_sync(hdev);
6460  
6461  		err = hci_le_directed_advertising_sync(hdev, conn);
6462  		goto done;
6463  	}
6464  
6465  	/* Disable advertising if simultaneous roles is not in use. */
6466  	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6467  		hci_pause_advertising_sync(hdev);
6468  
6469  	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6470  	if (params) {
6471  		conn->le_conn_min_interval = params->conn_min_interval;
6472  		conn->le_conn_max_interval = params->conn_max_interval;
6473  		conn->le_conn_latency = params->conn_latency;
6474  		conn->le_supv_timeout = params->supervision_timeout;
6475  	} else {
6476  		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6477  		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6478  		conn->le_conn_latency = hdev->le_conn_latency;
6479  		conn->le_supv_timeout = hdev->le_supv_timeout;
6480  	}
6481  
6482  	/* If controller is scanning, we stop it since some controllers are
6483  	 * not able to scan and connect at the same time. Also set the
6484  	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6485  	 * handler for scan disabling knows to set the correct discovery
6486  	 * state.
6487  	 */
6488  	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6489  		hci_scan_disable_sync(hdev);
6490  		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6491  	}
6492  
6493  	/* Update random address, but set require_privacy to false so
6494  	 * that we never connect with an non-resolvable address.
6495  	 */
6496  	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6497  					     &own_addr_type);
6498  	if (err)
6499  		goto done;
6500  	/* Send command LE Extended Create Connection if supported */
6501  	if (use_ext_conn(hdev)) {
6502  		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6503  		goto done;
6504  	}
6505  
6506  	memset(&cp, 0, sizeof(cp));
6507  
6508  	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6509  	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6510  
6511  	bacpy(&cp.peer_addr, &conn->dst);
6512  	cp.peer_addr_type = conn->dst_type;
6513  	cp.own_address_type = own_addr_type;
6514  	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6515  	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6516  	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6517  	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6518  	cp.min_ce_len = cpu_to_le16(0x0000);
6519  	cp.max_ce_len = cpu_to_le16(0x0000);
6520  
6521  	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6522  	 *
6523  	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6524  	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6525  	 * sent when a new connection has been created.
6526  	 */
6527  	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6528  				       sizeof(cp), &cp,
6529  				       use_enhanced_conn_complete(hdev) ?
6530  				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6531  				       HCI_EV_LE_CONN_COMPLETE,
6532  				       conn->conn_timeout, NULL);
6533  
6534  done:
6535  	if (err == -ETIMEDOUT)
6536  		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6537  
6538  	/* Re-enable advertising after the connection attempt is finished. */
6539  	hci_resume_advertising_sync(hdev);
6540  	return err;
6541  }
6542  
hci_le_create_cis_sync(struct hci_dev * hdev)6543  int hci_le_create_cis_sync(struct hci_dev *hdev)
6544  {
6545  	DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6546  	size_t aux_num_cis = 0;
6547  	struct hci_conn *conn;
6548  	u8 cig = BT_ISO_QOS_CIG_UNSET;
6549  
6550  	/* The spec allows only one pending LE Create CIS command at a time. If
6551  	 * the command is pending now, don't do anything. We check for pending
6552  	 * connections after each CIS Established event.
6553  	 *
6554  	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6555  	 * page 2566:
6556  	 *
6557  	 * If the Host issues this command before all the
6558  	 * HCI_LE_CIS_Established events from the previous use of the
6559  	 * command have been generated, the Controller shall return the
6560  	 * error code Command Disallowed (0x0C).
6561  	 *
6562  	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6563  	 * page 2567:
6564  	 *
6565  	 * When the Controller receives the HCI_LE_Create_CIS command, the
6566  	 * Controller sends the HCI_Command_Status event to the Host. An
6567  	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6568  	 * is established or if it is disconnected or considered lost before
6569  	 * being established; until all the events are generated, the command
6570  	 * remains pending.
6571  	 */
6572  
6573  	hci_dev_lock(hdev);
6574  
6575  	rcu_read_lock();
6576  
6577  	/* Wait until previous Create CIS has completed */
6578  	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6579  		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6580  			goto done;
6581  	}
6582  
6583  	/* Find CIG with all CIS ready */
6584  	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6585  		struct hci_conn *link;
6586  
6587  		if (hci_conn_check_create_cis(conn))
6588  			continue;
6589  
6590  		cig = conn->iso_qos.ucast.cig;
6591  
6592  		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6593  			if (hci_conn_check_create_cis(link) > 0 &&
6594  			    link->iso_qos.ucast.cig == cig &&
6595  			    link->state != BT_CONNECTED) {
6596  				cig = BT_ISO_QOS_CIG_UNSET;
6597  				break;
6598  			}
6599  		}
6600  
6601  		if (cig != BT_ISO_QOS_CIG_UNSET)
6602  			break;
6603  	}
6604  
6605  	if (cig == BT_ISO_QOS_CIG_UNSET)
6606  		goto done;
6607  
6608  	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6609  		struct hci_cis *cis = &cmd->cis[aux_num_cis];
6610  
6611  		if (hci_conn_check_create_cis(conn) ||
6612  		    conn->iso_qos.ucast.cig != cig)
6613  			continue;
6614  
6615  		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6616  		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6617  		cis->cis_handle = cpu_to_le16(conn->handle);
6618  		aux_num_cis++;
6619  
6620  		if (aux_num_cis >= cmd->num_cis)
6621  			break;
6622  	}
6623  	cmd->num_cis = aux_num_cis;
6624  
6625  done:
6626  	rcu_read_unlock();
6627  
6628  	hci_dev_unlock(hdev);
6629  
6630  	if (!aux_num_cis)
6631  		return 0;
6632  
6633  	/* Wait for HCI_LE_CIS_Established */
6634  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6635  					struct_size(cmd, cis, cmd->num_cis),
6636  					cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6637  					conn->conn_timeout, NULL);
6638  }
6639  
hci_le_remove_cig_sync(struct hci_dev * hdev,u8 handle)6640  int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6641  {
6642  	struct hci_cp_le_remove_cig cp;
6643  
6644  	memset(&cp, 0, sizeof(cp));
6645  	cp.cig_id = handle;
6646  
6647  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6648  				     &cp, HCI_CMD_TIMEOUT);
6649  }
6650  
hci_le_big_terminate_sync(struct hci_dev * hdev,u8 handle)6651  int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6652  {
6653  	struct hci_cp_le_big_term_sync cp;
6654  
6655  	memset(&cp, 0, sizeof(cp));
6656  	cp.handle = handle;
6657  
6658  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6659  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6660  }
6661  
hci_le_pa_terminate_sync(struct hci_dev * hdev,u16 handle)6662  int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6663  {
6664  	struct hci_cp_le_pa_term_sync cp;
6665  
6666  	memset(&cp, 0, sizeof(cp));
6667  	cp.handle = cpu_to_le16(handle);
6668  
6669  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6670  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6671  }
6672  
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)6673  int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6674  			   bool use_rpa, struct adv_info *adv_instance,
6675  			   u8 *own_addr_type, bdaddr_t *rand_addr)
6676  {
6677  	int err;
6678  
6679  	bacpy(rand_addr, BDADDR_ANY);
6680  
6681  	/* If privacy is enabled use a resolvable private address. If
6682  	 * current RPA has expired then generate a new one.
6683  	 */
6684  	if (use_rpa) {
6685  		/* If Controller supports LL Privacy use own address type is
6686  		 * 0x03
6687  		 */
6688  		if (ll_privacy_capable(hdev))
6689  			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6690  		else
6691  			*own_addr_type = ADDR_LE_DEV_RANDOM;
6692  
6693  		if (adv_instance) {
6694  			if (adv_rpa_valid(adv_instance))
6695  				return 0;
6696  		} else {
6697  			if (rpa_valid(hdev))
6698  				return 0;
6699  		}
6700  
6701  		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6702  		if (err < 0) {
6703  			bt_dev_err(hdev, "failed to generate new RPA");
6704  			return err;
6705  		}
6706  
6707  		bacpy(rand_addr, &hdev->rpa);
6708  
6709  		return 0;
6710  	}
6711  
6712  	/* In case of required privacy without resolvable private address,
6713  	 * use an non-resolvable private address. This is useful for
6714  	 * non-connectable advertising.
6715  	 */
6716  	if (require_privacy) {
6717  		bdaddr_t nrpa;
6718  
6719  		while (true) {
6720  			/* The non-resolvable private address is generated
6721  			 * from random six bytes with the two most significant
6722  			 * bits cleared.
6723  			 */
6724  			get_random_bytes(&nrpa, 6);
6725  			nrpa.b[5] &= 0x3f;
6726  
6727  			/* The non-resolvable private address shall not be
6728  			 * equal to the public address.
6729  			 */
6730  			if (bacmp(&hdev->bdaddr, &nrpa))
6731  				break;
6732  		}
6733  
6734  		*own_addr_type = ADDR_LE_DEV_RANDOM;
6735  		bacpy(rand_addr, &nrpa);
6736  
6737  		return 0;
6738  	}
6739  
6740  	/* No privacy so use a public address. */
6741  	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6742  
6743  	return 0;
6744  }
6745  
_update_adv_data_sync(struct hci_dev * hdev,void * data)6746  static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6747  {
6748  	u8 instance = PTR_UINT(data);
6749  
6750  	return hci_update_adv_data_sync(hdev, instance);
6751  }
6752  
hci_update_adv_data(struct hci_dev * hdev,u8 instance)6753  int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6754  {
6755  	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6756  				  UINT_PTR(instance), NULL);
6757  }
6758  
hci_acl_create_conn_sync(struct hci_dev * hdev,void * data)6759  static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6760  {
6761  	struct hci_conn *conn = data;
6762  	struct inquiry_entry *ie;
6763  	struct hci_cp_create_conn cp;
6764  	int err;
6765  
6766  	if (!hci_conn_valid(hdev, conn))
6767  		return -ECANCELED;
6768  
6769  	/* Many controllers disallow HCI Create Connection while it is doing
6770  	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6771  	 * Connection. This may cause the MGMT discovering state to become false
6772  	 * without user space's request but it is okay since the MGMT Discovery
6773  	 * APIs do not promise that discovery should be done forever. Instead,
6774  	 * the user space monitors the status of MGMT discovering and it may
6775  	 * request for discovery again when this flag becomes false.
6776  	 */
6777  	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6778  		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6779  					    NULL, HCI_CMD_TIMEOUT);
6780  		if (err)
6781  			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6782  	}
6783  
6784  	conn->state = BT_CONNECT;
6785  	conn->out = true;
6786  	conn->role = HCI_ROLE_MASTER;
6787  
6788  	conn->attempt++;
6789  
6790  	conn->link_policy = hdev->link_policy;
6791  
6792  	memset(&cp, 0, sizeof(cp));
6793  	bacpy(&cp.bdaddr, &conn->dst);
6794  	cp.pscan_rep_mode = 0x02;
6795  
6796  	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6797  	if (ie) {
6798  		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6799  			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6800  			cp.pscan_mode     = ie->data.pscan_mode;
6801  			cp.clock_offset   = ie->data.clock_offset |
6802  					    cpu_to_le16(0x8000);
6803  		}
6804  
6805  		memcpy(conn->dev_class, ie->data.dev_class, 3);
6806  	}
6807  
6808  	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6809  	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6810  		cp.role_switch = 0x01;
6811  	else
6812  		cp.role_switch = 0x00;
6813  
6814  	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6815  					sizeof(cp), &cp,
6816  					HCI_EV_CONN_COMPLETE,
6817  					conn->conn_timeout, NULL);
6818  }
6819  
hci_connect_acl_sync(struct hci_dev * hdev,struct hci_conn * conn)6820  int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6821  {
6822  	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6823  				       NULL);
6824  }
6825  
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)6826  static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6827  {
6828  	struct hci_conn *conn = data;
6829  
6830  	bt_dev_dbg(hdev, "err %d", err);
6831  
6832  	if (err == -ECANCELED)
6833  		return;
6834  
6835  	hci_dev_lock(hdev);
6836  
6837  	if (!hci_conn_valid(hdev, conn))
6838  		goto done;
6839  
6840  	if (!err) {
6841  		hci_connect_le_scan_cleanup(conn, 0x00);
6842  		goto done;
6843  	}
6844  
6845  	/* Check if connection is still pending */
6846  	if (conn != hci_lookup_le_connect(hdev))
6847  		goto done;
6848  
6849  	/* Flush to make sure we send create conn cancel command if needed */
6850  	flush_delayed_work(&conn->le_conn_timeout);
6851  	hci_conn_failed(conn, bt_status(err));
6852  
6853  done:
6854  	hci_dev_unlock(hdev);
6855  }
6856  
hci_connect_le_sync(struct hci_dev * hdev,struct hci_conn * conn)6857  int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6858  {
6859  	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6860  				       create_le_conn_complete);
6861  }
6862  
hci_cancel_connect_sync(struct hci_dev * hdev,struct hci_conn * conn)6863  int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6864  {
6865  	if (conn->state != BT_OPEN)
6866  		return -EINVAL;
6867  
6868  	switch (conn->type) {
6869  	case ACL_LINK:
6870  		return !hci_cmd_sync_dequeue_once(hdev,
6871  						  hci_acl_create_conn_sync,
6872  						  conn, NULL);
6873  	case LE_LINK:
6874  		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6875  						  conn, create_le_conn_complete);
6876  	}
6877  
6878  	return -ENOENT;
6879  }
6880  
hci_le_conn_update_sync(struct hci_dev * hdev,struct hci_conn * conn,struct hci_conn_params * params)6881  int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6882  			    struct hci_conn_params *params)
6883  {
6884  	struct hci_cp_le_conn_update cp;
6885  
6886  	memset(&cp, 0, sizeof(cp));
6887  	cp.handle		= cpu_to_le16(conn->handle);
6888  	cp.conn_interval_min	= cpu_to_le16(params->conn_min_interval);
6889  	cp.conn_interval_max	= cpu_to_le16(params->conn_max_interval);
6890  	cp.conn_latency		= cpu_to_le16(params->conn_latency);
6891  	cp.supervision_timeout	= cpu_to_le16(params->supervision_timeout);
6892  	cp.min_ce_len		= cpu_to_le16(0x0000);
6893  	cp.max_ce_len		= cpu_to_le16(0x0000);
6894  
6895  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6896  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6897  }
6898