xref: /linux/net/bluetooth/hci_sync.c (revision ff30564411ffdcee49d579cb15eb13185a36e253)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  * Copyright 2023 NXP
7  */
8 
9 #include <linux/property.h>
10 
11 #include <net/bluetooth/bluetooth.h>
12 #include <net/bluetooth/hci_core.h>
13 #include <net/bluetooth/mgmt.h>
14 
15 #include "hci_codec.h"
16 #include "hci_debugfs.h"
17 #include "smp.h"
18 #include "eir.h"
19 #include "msft.h"
20 #include "aosp.h"
21 #include "leds.h"
22 
23 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24 				  struct sk_buff *skb)
25 {
26 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
27 
28 	if (hdev->req_status != HCI_REQ_PEND)
29 		return;
30 
31 	hdev->req_result = result;
32 	hdev->req_status = HCI_REQ_DONE;
33 
34 	/* Free the request command so it is not used as response */
35 	kfree_skb(hdev->req_skb);
36 	hdev->req_skb = NULL;
37 
38 	if (skb) {
39 		struct sock *sk = hci_skb_sk(skb);
40 
41 		/* Drop sk reference if set */
42 		if (sk)
43 			sock_put(sk);
44 
45 		hdev->req_rsp = skb_get(skb);
46 	}
47 
48 	wake_up_interruptible(&hdev->req_wait_q);
49 }
50 
51 struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52 				   const void *param, struct sock *sk)
53 {
54 	int len = HCI_COMMAND_HDR_SIZE + plen;
55 	struct hci_command_hdr *hdr;
56 	struct sk_buff *skb;
57 
58 	skb = bt_skb_alloc(len, GFP_ATOMIC);
59 	if (!skb)
60 		return NULL;
61 
62 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63 	hdr->opcode = cpu_to_le16(opcode);
64 	hdr->plen   = plen;
65 
66 	if (plen)
67 		skb_put_data(skb, param, plen);
68 
69 	bt_dev_dbg(hdev, "skb len %d", skb->len);
70 
71 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72 	hci_skb_opcode(skb) = opcode;
73 
74 	/* Grab a reference if command needs to be associated with a sock (e.g.
75 	 * likely mgmt socket that initiated the command).
76 	 */
77 	if (sk) {
78 		hci_skb_sk(skb) = sk;
79 		sock_hold(sk);
80 	}
81 
82 	return skb;
83 }
84 
85 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86 			     const void *param, u8 event, struct sock *sk)
87 {
88 	struct hci_dev *hdev = req->hdev;
89 	struct sk_buff *skb;
90 
91 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92 
93 	/* If an error occurred during request building, there is no point in
94 	 * queueing the HCI command. We can simply return.
95 	 */
96 	if (req->err)
97 		return;
98 
99 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100 	if (!skb) {
101 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102 			   opcode);
103 		req->err = -ENOMEM;
104 		return;
105 	}
106 
107 	if (skb_queue_empty(&req->cmd_q))
108 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109 
110 	hci_skb_event(skb) = event;
111 
112 	skb_queue_tail(&req->cmd_q, skb);
113 }
114 
115 static int hci_cmd_sync_run(struct hci_request *req)
116 {
117 	struct hci_dev *hdev = req->hdev;
118 	struct sk_buff *skb;
119 	unsigned long flags;
120 
121 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122 
123 	/* If an error occurred during request building, remove all HCI
124 	 * commands queued on the HCI request queue.
125 	 */
126 	if (req->err) {
127 		skb_queue_purge(&req->cmd_q);
128 		return req->err;
129 	}
130 
131 	/* Do not allow empty requests */
132 	if (skb_queue_empty(&req->cmd_q))
133 		return -ENODATA;
134 
135 	skb = skb_peek_tail(&req->cmd_q);
136 	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137 	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138 
139 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142 
143 	queue_work(hdev->workqueue, &hdev->cmd_work);
144 
145 	return 0;
146 }
147 
148 static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149 {
150 	skb_queue_head_init(&req->cmd_q);
151 	req->hdev = hdev;
152 	req->err = 0;
153 }
154 
155 /* This function requires the caller holds hdev->req_lock. */
156 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157 				  const void *param, u8 event, u32 timeout,
158 				  struct sock *sk)
159 {
160 	struct hci_request req;
161 	struct sk_buff *skb;
162 	int err = 0;
163 
164 	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165 
166 	hci_request_init(&req, hdev);
167 
168 	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169 
170 	hdev->req_status = HCI_REQ_PEND;
171 
172 	err = hci_cmd_sync_run(&req);
173 	if (err < 0)
174 		return ERR_PTR(err);
175 
176 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
177 					       hdev->req_status != HCI_REQ_PEND,
178 					       timeout);
179 
180 	if (err == -ERESTARTSYS)
181 		return ERR_PTR(-EINTR);
182 
183 	switch (hdev->req_status) {
184 	case HCI_REQ_DONE:
185 		err = -bt_to_errno(hdev->req_result);
186 		break;
187 
188 	case HCI_REQ_CANCELED:
189 		err = -hdev->req_result;
190 		break;
191 
192 	default:
193 		err = -ETIMEDOUT;
194 		break;
195 	}
196 
197 	hdev->req_status = 0;
198 	hdev->req_result = 0;
199 	skb = hdev->req_rsp;
200 	hdev->req_rsp = NULL;
201 
202 	bt_dev_dbg(hdev, "end: err %d", err);
203 
204 	if (err < 0) {
205 		kfree_skb(skb);
206 		return ERR_PTR(err);
207 	}
208 
209 	return skb;
210 }
211 EXPORT_SYMBOL(__hci_cmd_sync_sk);
212 
213 /* This function requires the caller holds hdev->req_lock. */
214 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
215 			       const void *param, u32 timeout)
216 {
217 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
218 }
219 EXPORT_SYMBOL(__hci_cmd_sync);
220 
221 /* Send HCI command and wait for command complete event */
222 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
223 			     const void *param, u32 timeout)
224 {
225 	struct sk_buff *skb;
226 
227 	if (!test_bit(HCI_UP, &hdev->flags))
228 		return ERR_PTR(-ENETDOWN);
229 
230 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
231 
232 	hci_req_sync_lock(hdev);
233 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
234 	hci_req_sync_unlock(hdev);
235 
236 	return skb;
237 }
238 EXPORT_SYMBOL(hci_cmd_sync);
239 
240 /* This function requires the caller holds hdev->req_lock. */
241 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
242 				  const void *param, u8 event, u32 timeout)
243 {
244 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
245 				 NULL);
246 }
247 EXPORT_SYMBOL(__hci_cmd_sync_ev);
248 
249 /* This function requires the caller holds hdev->req_lock. */
250 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
251 			     const void *param, u8 event, u32 timeout,
252 			     struct sock *sk)
253 {
254 	struct sk_buff *skb;
255 	u8 status;
256 
257 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
258 	if (IS_ERR(skb)) {
259 		if (!event)
260 			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
261 				   PTR_ERR(skb));
262 		return PTR_ERR(skb);
263 	}
264 
265 	/* If command return a status event skb will be set to NULL as there are
266 	 * no parameters, in case of failure IS_ERR(skb) would have be set to
267 	 * the actual error would be found with PTR_ERR(skb).
268 	 */
269 	if (!skb)
270 		return 0;
271 
272 	status = skb->data[0];
273 
274 	kfree_skb(skb);
275 
276 	return status;
277 }
278 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
279 
280 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
281 			  const void *param, u32 timeout)
282 {
283 	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
284 					NULL);
285 }
286 EXPORT_SYMBOL(__hci_cmd_sync_status);
287 
288 int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
289 			const void *param, u32 timeout)
290 {
291 	int err;
292 
293 	hci_req_sync_lock(hdev);
294 	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
295 	hci_req_sync_unlock(hdev);
296 
297 	return err;
298 }
299 EXPORT_SYMBOL(hci_cmd_sync_status);
300 
301 static void hci_cmd_sync_work(struct work_struct *work)
302 {
303 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
304 
305 	bt_dev_dbg(hdev, "");
306 
307 	/* Dequeue all entries and run them */
308 	while (1) {
309 		struct hci_cmd_sync_work_entry *entry;
310 
311 		mutex_lock(&hdev->cmd_sync_work_lock);
312 		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
313 						 struct hci_cmd_sync_work_entry,
314 						 list);
315 		if (entry)
316 			list_del(&entry->list);
317 		mutex_unlock(&hdev->cmd_sync_work_lock);
318 
319 		if (!entry)
320 			break;
321 
322 		bt_dev_dbg(hdev, "entry %p", entry);
323 
324 		if (entry->func) {
325 			int err;
326 
327 			hci_req_sync_lock(hdev);
328 			err = entry->func(hdev, entry->data);
329 			if (entry->destroy)
330 				entry->destroy(hdev, entry->data, err);
331 			hci_req_sync_unlock(hdev);
332 		}
333 
334 		kfree(entry);
335 	}
336 }
337 
338 static void hci_cmd_sync_cancel_work(struct work_struct *work)
339 {
340 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
341 
342 	cancel_delayed_work_sync(&hdev->cmd_timer);
343 	cancel_delayed_work_sync(&hdev->ncmd_timer);
344 	atomic_set(&hdev->cmd_cnt, 1);
345 
346 	wake_up_interruptible(&hdev->req_wait_q);
347 }
348 
349 static int hci_scan_disable_sync(struct hci_dev *hdev);
350 static int scan_disable_sync(struct hci_dev *hdev, void *data)
351 {
352 	return hci_scan_disable_sync(hdev);
353 }
354 
355 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
356 {
357 	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
358 }
359 
360 static void le_scan_disable(struct work_struct *work)
361 {
362 	struct hci_dev *hdev = container_of(work, struct hci_dev,
363 					    le_scan_disable.work);
364 	int status;
365 
366 	bt_dev_dbg(hdev, "");
367 	hci_dev_lock(hdev);
368 
369 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
370 		goto _return;
371 
372 	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
373 	if (status) {
374 		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
375 		goto _return;
376 	}
377 
378 	/* If we were running LE only scan, change discovery state. If
379 	 * we were running both LE and BR/EDR inquiry simultaneously,
380 	 * and BR/EDR inquiry is already finished, stop discovery,
381 	 * otherwise BR/EDR inquiry will stop discovery when finished.
382 	 * If we will resolve remote device name, do not change
383 	 * discovery state.
384 	 */
385 
386 	if (hdev->discovery.type == DISCOV_TYPE_LE)
387 		goto discov_stopped;
388 
389 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
390 		goto _return;
391 
392 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
393 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
394 		    hdev->discovery.state != DISCOVERY_RESOLVING)
395 			goto discov_stopped;
396 
397 		goto _return;
398 	}
399 
400 	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
401 	if (status) {
402 		bt_dev_err(hdev, "inquiry failed: status %d", status);
403 		goto discov_stopped;
404 	}
405 
406 	goto _return;
407 
408 discov_stopped:
409 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
410 
411 _return:
412 	hci_dev_unlock(hdev);
413 }
414 
415 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
416 				       u8 filter_dup);
417 
418 static int reenable_adv_sync(struct hci_dev *hdev, void *data)
419 {
420 	bt_dev_dbg(hdev, "");
421 
422 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
423 	    list_empty(&hdev->adv_instances))
424 		return 0;
425 
426 	if (hdev->cur_adv_instance) {
427 		return hci_schedule_adv_instance_sync(hdev,
428 						      hdev->cur_adv_instance,
429 						      true);
430 	} else {
431 		if (ext_adv_capable(hdev)) {
432 			hci_start_ext_adv_sync(hdev, 0x00);
433 		} else {
434 			hci_update_adv_data_sync(hdev, 0x00);
435 			hci_update_scan_rsp_data_sync(hdev, 0x00);
436 			hci_enable_advertising_sync(hdev);
437 		}
438 	}
439 
440 	return 0;
441 }
442 
443 static void reenable_adv(struct work_struct *work)
444 {
445 	struct hci_dev *hdev = container_of(work, struct hci_dev,
446 					    reenable_adv_work);
447 	int status;
448 
449 	bt_dev_dbg(hdev, "");
450 
451 	hci_dev_lock(hdev);
452 
453 	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
454 	if (status)
455 		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
456 
457 	hci_dev_unlock(hdev);
458 }
459 
460 static void cancel_adv_timeout(struct hci_dev *hdev)
461 {
462 	if (hdev->adv_instance_timeout) {
463 		hdev->adv_instance_timeout = 0;
464 		cancel_delayed_work(&hdev->adv_instance_expire);
465 	}
466 }
467 
468 /* For a single instance:
469  * - force == true: The instance will be removed even when its remaining
470  *   lifetime is not zero.
471  * - force == false: the instance will be deactivated but kept stored unless
472  *   the remaining lifetime is zero.
473  *
474  * For instance == 0x00:
475  * - force == true: All instances will be removed regardless of their timeout
476  *   setting.
477  * - force == false: Only instances that have a timeout will be removed.
478  */
479 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
480 				u8 instance, bool force)
481 {
482 	struct adv_info *adv_instance, *n, *next_instance = NULL;
483 	int err;
484 	u8 rem_inst;
485 
486 	/* Cancel any timeout concerning the removed instance(s). */
487 	if (!instance || hdev->cur_adv_instance == instance)
488 		cancel_adv_timeout(hdev);
489 
490 	/* Get the next instance to advertise BEFORE we remove
491 	 * the current one. This can be the same instance again
492 	 * if there is only one instance.
493 	 */
494 	if (instance && hdev->cur_adv_instance == instance)
495 		next_instance = hci_get_next_instance(hdev, instance);
496 
497 	if (instance == 0x00) {
498 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
499 					 list) {
500 			if (!(force || adv_instance->timeout))
501 				continue;
502 
503 			rem_inst = adv_instance->instance;
504 			err = hci_remove_adv_instance(hdev, rem_inst);
505 			if (!err)
506 				mgmt_advertising_removed(sk, hdev, rem_inst);
507 		}
508 	} else {
509 		adv_instance = hci_find_adv_instance(hdev, instance);
510 
511 		if (force || (adv_instance && adv_instance->timeout &&
512 			      !adv_instance->remaining_time)) {
513 			/* Don't advertise a removed instance. */
514 			if (next_instance &&
515 			    next_instance->instance == instance)
516 				next_instance = NULL;
517 
518 			err = hci_remove_adv_instance(hdev, instance);
519 			if (!err)
520 				mgmt_advertising_removed(sk, hdev, instance);
521 		}
522 	}
523 
524 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
525 		return 0;
526 
527 	if (next_instance && !ext_adv_capable(hdev))
528 		return hci_schedule_adv_instance_sync(hdev,
529 						      next_instance->instance,
530 						      false);
531 
532 	return 0;
533 }
534 
535 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
536 {
537 	u8 instance = *(u8 *)data;
538 
539 	kfree(data);
540 
541 	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
542 
543 	if (list_empty(&hdev->adv_instances))
544 		return hci_disable_advertising_sync(hdev);
545 
546 	return 0;
547 }
548 
549 static void adv_timeout_expire(struct work_struct *work)
550 {
551 	u8 *inst_ptr;
552 	struct hci_dev *hdev = container_of(work, struct hci_dev,
553 					    adv_instance_expire.work);
554 
555 	bt_dev_dbg(hdev, "");
556 
557 	hci_dev_lock(hdev);
558 
559 	hdev->adv_instance_timeout = 0;
560 
561 	if (hdev->cur_adv_instance == 0x00)
562 		goto unlock;
563 
564 	inst_ptr = kmalloc(1, GFP_KERNEL);
565 	if (!inst_ptr)
566 		goto unlock;
567 
568 	*inst_ptr = hdev->cur_adv_instance;
569 	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
570 
571 unlock:
572 	hci_dev_unlock(hdev);
573 }
574 
575 static bool is_interleave_scanning(struct hci_dev *hdev)
576 {
577 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
578 }
579 
580 static int hci_passive_scan_sync(struct hci_dev *hdev);
581 
582 static void interleave_scan_work(struct work_struct *work)
583 {
584 	struct hci_dev *hdev = container_of(work, struct hci_dev,
585 					    interleave_scan.work);
586 	unsigned long timeout;
587 
588 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
589 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
590 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
591 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
592 	} else {
593 		bt_dev_err(hdev, "unexpected error");
594 		return;
595 	}
596 
597 	hci_passive_scan_sync(hdev);
598 
599 	hci_dev_lock(hdev);
600 
601 	switch (hdev->interleave_scan_state) {
602 	case INTERLEAVE_SCAN_ALLOWLIST:
603 		bt_dev_dbg(hdev, "next state: allowlist");
604 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
605 		break;
606 	case INTERLEAVE_SCAN_NO_FILTER:
607 		bt_dev_dbg(hdev, "next state: no filter");
608 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
609 		break;
610 	case INTERLEAVE_SCAN_NONE:
611 		bt_dev_err(hdev, "unexpected error");
612 	}
613 
614 	hci_dev_unlock(hdev);
615 
616 	/* Don't continue interleaving if it was canceled */
617 	if (is_interleave_scanning(hdev))
618 		queue_delayed_work(hdev->req_workqueue,
619 				   &hdev->interleave_scan, timeout);
620 }
621 
622 void hci_cmd_sync_init(struct hci_dev *hdev)
623 {
624 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
625 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
626 	mutex_init(&hdev->cmd_sync_work_lock);
627 	mutex_init(&hdev->unregister_lock);
628 
629 	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
630 	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
631 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
632 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
633 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
634 }
635 
636 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
637 				       struct hci_cmd_sync_work_entry *entry,
638 				       int err)
639 {
640 	if (entry->destroy)
641 		entry->destroy(hdev, entry->data, err);
642 
643 	list_del(&entry->list);
644 	kfree(entry);
645 }
646 
647 void hci_cmd_sync_clear(struct hci_dev *hdev)
648 {
649 	struct hci_cmd_sync_work_entry *entry, *tmp;
650 
651 	cancel_work_sync(&hdev->cmd_sync_work);
652 	cancel_work_sync(&hdev->reenable_adv_work);
653 
654 	mutex_lock(&hdev->cmd_sync_work_lock);
655 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
656 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
657 	mutex_unlock(&hdev->cmd_sync_work_lock);
658 }
659 
660 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
661 {
662 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
663 
664 	if (hdev->req_status == HCI_REQ_PEND) {
665 		hdev->req_result = err;
666 		hdev->req_status = HCI_REQ_CANCELED;
667 
668 		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
669 	}
670 }
671 EXPORT_SYMBOL(hci_cmd_sync_cancel);
672 
673 /* Cancel ongoing command request synchronously:
674  *
675  * - Set result and mark status to HCI_REQ_CANCELED
676  * - Wakeup command sync thread
677  */
678 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
679 {
680 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
681 
682 	if (hdev->req_status == HCI_REQ_PEND) {
683 		/* req_result is __u32 so error must be positive to be properly
684 		 * propagated.
685 		 */
686 		hdev->req_result = err < 0 ? -err : err;
687 		hdev->req_status = HCI_REQ_CANCELED;
688 
689 		wake_up_interruptible(&hdev->req_wait_q);
690 	}
691 }
692 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
693 
694 /* Submit HCI command to be run in as cmd_sync_work:
695  *
696  * - hdev must _not_ be unregistered
697  */
698 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
699 			void *data, hci_cmd_sync_work_destroy_t destroy)
700 {
701 	struct hci_cmd_sync_work_entry *entry;
702 	int err = 0;
703 
704 	mutex_lock(&hdev->unregister_lock);
705 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
706 		err = -ENODEV;
707 		goto unlock;
708 	}
709 
710 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
711 	if (!entry) {
712 		err = -ENOMEM;
713 		goto unlock;
714 	}
715 	entry->func = func;
716 	entry->data = data;
717 	entry->destroy = destroy;
718 
719 	mutex_lock(&hdev->cmd_sync_work_lock);
720 	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
721 	mutex_unlock(&hdev->cmd_sync_work_lock);
722 
723 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
724 
725 unlock:
726 	mutex_unlock(&hdev->unregister_lock);
727 	return err;
728 }
729 EXPORT_SYMBOL(hci_cmd_sync_submit);
730 
731 /* Queue HCI command:
732  *
733  * - hdev must be running
734  */
735 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
736 		       void *data, hci_cmd_sync_work_destroy_t destroy)
737 {
738 	/* Only queue command if hdev is running which means it had been opened
739 	 * and is either on init phase or is already up.
740 	 */
741 	if (!test_bit(HCI_RUNNING, &hdev->flags))
742 		return -ENETDOWN;
743 
744 	return hci_cmd_sync_submit(hdev, func, data, destroy);
745 }
746 EXPORT_SYMBOL(hci_cmd_sync_queue);
747 
748 static struct hci_cmd_sync_work_entry *
749 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
750 			   void *data, hci_cmd_sync_work_destroy_t destroy)
751 {
752 	struct hci_cmd_sync_work_entry *entry, *tmp;
753 
754 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
755 		if (func && entry->func != func)
756 			continue;
757 
758 		if (data && entry->data != data)
759 			continue;
760 
761 		if (destroy && entry->destroy != destroy)
762 			continue;
763 
764 		return entry;
765 	}
766 
767 	return NULL;
768 }
769 
770 /* Queue HCI command entry once:
771  *
772  * - Lookup if an entry already exist and only if it doesn't creates a new entry
773  *   and queue it.
774  */
775 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
776 			    void *data, hci_cmd_sync_work_destroy_t destroy)
777 {
778 	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
779 		return 0;
780 
781 	return hci_cmd_sync_queue(hdev, func, data, destroy);
782 }
783 EXPORT_SYMBOL(hci_cmd_sync_queue_once);
784 
785 /* Lookup HCI command entry:
786  *
787  * - Return first entry that matches by function callback or data or
788  *   destroy callback.
789  */
790 struct hci_cmd_sync_work_entry *
791 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
792 			  void *data, hci_cmd_sync_work_destroy_t destroy)
793 {
794 	struct hci_cmd_sync_work_entry *entry;
795 
796 	mutex_lock(&hdev->cmd_sync_work_lock);
797 	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
798 	mutex_unlock(&hdev->cmd_sync_work_lock);
799 
800 	return entry;
801 }
802 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
803 
804 /* Cancel HCI command entry */
805 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
806 			       struct hci_cmd_sync_work_entry *entry)
807 {
808 	mutex_lock(&hdev->cmd_sync_work_lock);
809 	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
810 	mutex_unlock(&hdev->cmd_sync_work_lock);
811 }
812 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
813 
814 /* Dequeue one HCI command entry:
815  *
816  * - Lookup and cancel first entry that matches.
817  */
818 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
819 			       hci_cmd_sync_work_func_t func,
820 			       void *data, hci_cmd_sync_work_destroy_t destroy)
821 {
822 	struct hci_cmd_sync_work_entry *entry;
823 
824 	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
825 	if (!entry)
826 		return false;
827 
828 	hci_cmd_sync_cancel_entry(hdev, entry);
829 
830 	return true;
831 }
832 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
833 
834 /* Dequeue HCI command entry:
835  *
836  * - Lookup and cancel any entry that matches by function callback or data or
837  *   destroy callback.
838  */
839 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
840 			  void *data, hci_cmd_sync_work_destroy_t destroy)
841 {
842 	struct hci_cmd_sync_work_entry *entry;
843 	bool ret = false;
844 
845 	mutex_lock(&hdev->cmd_sync_work_lock);
846 	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
847 						   destroy))) {
848 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
849 		ret = true;
850 	}
851 	mutex_unlock(&hdev->cmd_sync_work_lock);
852 
853 	return ret;
854 }
855 EXPORT_SYMBOL(hci_cmd_sync_dequeue);
856 
857 int hci_update_eir_sync(struct hci_dev *hdev)
858 {
859 	struct hci_cp_write_eir cp;
860 
861 	bt_dev_dbg(hdev, "");
862 
863 	if (!hdev_is_powered(hdev))
864 		return 0;
865 
866 	if (!lmp_ext_inq_capable(hdev))
867 		return 0;
868 
869 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
870 		return 0;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
873 		return 0;
874 
875 	memset(&cp, 0, sizeof(cp));
876 
877 	eir_create(hdev, cp.data);
878 
879 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
880 		return 0;
881 
882 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
883 
884 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
885 				     HCI_CMD_TIMEOUT);
886 }
887 
888 static u8 get_service_classes(struct hci_dev *hdev)
889 {
890 	struct bt_uuid *uuid;
891 	u8 val = 0;
892 
893 	list_for_each_entry(uuid, &hdev->uuids, list)
894 		val |= uuid->svc_hint;
895 
896 	return val;
897 }
898 
899 int hci_update_class_sync(struct hci_dev *hdev)
900 {
901 	u8 cod[3];
902 
903 	bt_dev_dbg(hdev, "");
904 
905 	if (!hdev_is_powered(hdev))
906 		return 0;
907 
908 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
909 		return 0;
910 
911 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
912 		return 0;
913 
914 	cod[0] = hdev->minor_class;
915 	cod[1] = hdev->major_class;
916 	cod[2] = get_service_classes(hdev);
917 
918 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
919 		cod[1] |= 0x20;
920 
921 	if (memcmp(cod, hdev->dev_class, 3) == 0)
922 		return 0;
923 
924 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
925 				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
926 }
927 
928 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
929 {
930 	/* If there is no connection we are OK to advertise. */
931 	if (hci_conn_num(hdev, LE_LINK) == 0)
932 		return true;
933 
934 	/* Check le_states if there is any connection in peripheral role. */
935 	if (hdev->conn_hash.le_num_peripheral > 0) {
936 		/* Peripheral connection state and non connectable mode
937 		 * bit 20.
938 		 */
939 		if (!connectable && !(hdev->le_states[2] & 0x10))
940 			return false;
941 
942 		/* Peripheral connection state and connectable mode bit 38
943 		 * and scannable bit 21.
944 		 */
945 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
946 				    !(hdev->le_states[2] & 0x20)))
947 			return false;
948 	}
949 
950 	/* Check le_states if there is any connection in central role. */
951 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
952 		/* Central connection state and non connectable mode bit 18. */
953 		if (!connectable && !(hdev->le_states[2] & 0x02))
954 			return false;
955 
956 		/* Central connection state and connectable mode bit 35 and
957 		 * scannable 19.
958 		 */
959 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
960 				    !(hdev->le_states[2] & 0x08)))
961 			return false;
962 	}
963 
964 	return true;
965 }
966 
967 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
968 {
969 	/* If privacy is not enabled don't use RPA */
970 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
971 		return false;
972 
973 	/* If basic privacy mode is enabled use RPA */
974 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
975 		return true;
976 
977 	/* If limited privacy mode is enabled don't use RPA if we're
978 	 * both discoverable and bondable.
979 	 */
980 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
981 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
982 		return false;
983 
984 	/* We're neither bondable nor discoverable in the limited
985 	 * privacy mode, therefore use RPA.
986 	 */
987 	return true;
988 }
989 
990 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
991 {
992 	/* If we're advertising or initiating an LE connection we can't
993 	 * go ahead and change the random address at this time. This is
994 	 * because the eventual initiator address used for the
995 	 * subsequently created connection will be undefined (some
996 	 * controllers use the new address and others the one we had
997 	 * when the operation started).
998 	 *
999 	 * In this kind of scenario skip the update and let the random
1000 	 * address be updated at the next cycle.
1001 	 */
1002 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1003 	    hci_lookup_le_connect(hdev)) {
1004 		bt_dev_dbg(hdev, "Deferring random address update");
1005 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1006 		return 0;
1007 	}
1008 
1009 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1010 				     6, rpa, HCI_CMD_TIMEOUT);
1011 }
1012 
1013 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1014 				   bool rpa, u8 *own_addr_type)
1015 {
1016 	int err;
1017 
1018 	/* If privacy is enabled use a resolvable private address. If
1019 	 * current RPA has expired or there is something else than
1020 	 * the current RPA in use, then generate a new one.
1021 	 */
1022 	if (rpa) {
1023 		/* If Controller supports LL Privacy use own address type is
1024 		 * 0x03
1025 		 */
1026 		if (use_ll_privacy(hdev))
1027 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1028 		else
1029 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1030 
1031 		/* Check if RPA is valid */
1032 		if (rpa_valid(hdev))
1033 			return 0;
1034 
1035 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1036 		if (err < 0) {
1037 			bt_dev_err(hdev, "failed to generate new RPA");
1038 			return err;
1039 		}
1040 
1041 		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1042 		if (err)
1043 			return err;
1044 
1045 		return 0;
1046 	}
1047 
1048 	/* In case of required privacy without resolvable private address,
1049 	 * use an non-resolvable private address. This is useful for active
1050 	 * scanning and non-connectable advertising.
1051 	 */
1052 	if (require_privacy) {
1053 		bdaddr_t nrpa;
1054 
1055 		while (true) {
1056 			/* The non-resolvable private address is generated
1057 			 * from random six bytes with the two most significant
1058 			 * bits cleared.
1059 			 */
1060 			get_random_bytes(&nrpa, 6);
1061 			nrpa.b[5] &= 0x3f;
1062 
1063 			/* The non-resolvable private address shall not be
1064 			 * equal to the public address.
1065 			 */
1066 			if (bacmp(&hdev->bdaddr, &nrpa))
1067 				break;
1068 		}
1069 
1070 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1071 
1072 		return hci_set_random_addr_sync(hdev, &nrpa);
1073 	}
1074 
1075 	/* If forcing static address is in use or there is no public
1076 	 * address use the static address as random address (but skip
1077 	 * the HCI command if the current random address is already the
1078 	 * static one.
1079 	 *
1080 	 * In case BR/EDR has been disabled on a dual-mode controller
1081 	 * and a static address has been configured, then use that
1082 	 * address instead of the public BR/EDR address.
1083 	 */
1084 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1085 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1086 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1087 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1088 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1089 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1090 			return hci_set_random_addr_sync(hdev,
1091 							&hdev->static_addr);
1092 		return 0;
1093 	}
1094 
1095 	/* Neither privacy nor static address is being used so use a
1096 	 * public address.
1097 	 */
1098 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1099 
1100 	return 0;
1101 }
1102 
1103 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1104 {
1105 	struct hci_cp_le_set_ext_adv_enable *cp;
1106 	struct hci_cp_ext_adv_set *set;
1107 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1108 	u8 size;
1109 	struct adv_info *adv = NULL;
1110 
1111 	/* If request specifies an instance that doesn't exist, fail */
1112 	if (instance > 0) {
1113 		adv = hci_find_adv_instance(hdev, instance);
1114 		if (!adv)
1115 			return -EINVAL;
1116 
1117 		/* If not enabled there is nothing to do */
1118 		if (!adv->enabled)
1119 			return 0;
1120 	}
1121 
1122 	memset(data, 0, sizeof(data));
1123 
1124 	cp = (void *)data;
1125 	set = (void *)cp->data;
1126 
1127 	/* Instance 0x00 indicates all advertising instances will be disabled */
1128 	cp->num_of_sets = !!instance;
1129 	cp->enable = 0x00;
1130 
1131 	set->handle = adv ? adv->handle : instance;
1132 
1133 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1134 
1135 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1136 				     size, data, HCI_CMD_TIMEOUT);
1137 }
1138 
1139 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1140 					    bdaddr_t *random_addr)
1141 {
1142 	struct hci_cp_le_set_adv_set_rand_addr cp;
1143 	int err;
1144 
1145 	if (!instance) {
1146 		/* Instance 0x00 doesn't have an adv_info, instead it uses
1147 		 * hdev->random_addr to track its address so whenever it needs
1148 		 * to be updated this also set the random address since
1149 		 * hdev->random_addr is shared with scan state machine.
1150 		 */
1151 		err = hci_set_random_addr_sync(hdev, random_addr);
1152 		if (err)
1153 			return err;
1154 	}
1155 
1156 	memset(&cp, 0, sizeof(cp));
1157 
1158 	cp.handle = instance;
1159 	bacpy(&cp.bdaddr, random_addr);
1160 
1161 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1162 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1163 }
1164 
1165 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1166 {
1167 	struct hci_cp_le_set_ext_adv_params cp;
1168 	bool connectable;
1169 	u32 flags;
1170 	bdaddr_t random_addr;
1171 	u8 own_addr_type;
1172 	int err;
1173 	struct adv_info *adv;
1174 	bool secondary_adv;
1175 
1176 	if (instance > 0) {
1177 		adv = hci_find_adv_instance(hdev, instance);
1178 		if (!adv)
1179 			return -EINVAL;
1180 	} else {
1181 		adv = NULL;
1182 	}
1183 
1184 	/* Updating parameters of an active instance will return a
1185 	 * Command Disallowed error, so we must first disable the
1186 	 * instance if it is active.
1187 	 */
1188 	if (adv && !adv->pending) {
1189 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1190 		if (err)
1191 			return err;
1192 	}
1193 
1194 	flags = hci_adv_instance_flags(hdev, instance);
1195 
1196 	/* If the "connectable" instance flag was not set, then choose between
1197 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1198 	 */
1199 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1200 		      mgmt_get_connectable(hdev);
1201 
1202 	if (!is_advertising_allowed(hdev, connectable))
1203 		return -EPERM;
1204 
1205 	/* Set require_privacy to true only when non-connectable
1206 	 * advertising is used. In that case it is fine to use a
1207 	 * non-resolvable private address.
1208 	 */
1209 	err = hci_get_random_address(hdev, !connectable,
1210 				     adv_use_rpa(hdev, flags), adv,
1211 				     &own_addr_type, &random_addr);
1212 	if (err < 0)
1213 		return err;
1214 
1215 	memset(&cp, 0, sizeof(cp));
1216 
1217 	if (adv) {
1218 		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1219 		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1220 		cp.tx_power = adv->tx_power;
1221 	} else {
1222 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1223 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1224 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1225 	}
1226 
1227 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1228 
1229 	if (connectable) {
1230 		if (secondary_adv)
1231 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1232 		else
1233 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1234 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1235 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1236 		if (secondary_adv)
1237 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1238 		else
1239 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1240 	} else {
1241 		if (secondary_adv)
1242 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1243 		else
1244 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1245 	}
1246 
1247 	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1248 	 * contains the peer’s Identity Address and the Peer_Address_Type
1249 	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1250 	 * These parameters are used to locate the corresponding local IRK in
1251 	 * the resolving list; this IRK is used to generate their own address
1252 	 * used in the advertisement.
1253 	 */
1254 	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1255 		hci_copy_identity_address(hdev, &cp.peer_addr,
1256 					  &cp.peer_addr_type);
1257 
1258 	cp.own_addr_type = own_addr_type;
1259 	cp.channel_map = hdev->le_adv_channel_map;
1260 	cp.handle = adv ? adv->handle : instance;
1261 
1262 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1263 		cp.primary_phy = HCI_ADV_PHY_1M;
1264 		cp.secondary_phy = HCI_ADV_PHY_2M;
1265 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1266 		cp.primary_phy = HCI_ADV_PHY_CODED;
1267 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1268 	} else {
1269 		/* In all other cases use 1M */
1270 		cp.primary_phy = HCI_ADV_PHY_1M;
1271 		cp.secondary_phy = HCI_ADV_PHY_1M;
1272 	}
1273 
1274 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1275 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1276 	if (err)
1277 		return err;
1278 
1279 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1280 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1281 	    bacmp(&random_addr, BDADDR_ANY)) {
1282 		/* Check if random address need to be updated */
1283 		if (adv) {
1284 			if (!bacmp(&random_addr, &adv->random_addr))
1285 				return 0;
1286 		} else {
1287 			if (!bacmp(&random_addr, &hdev->random_addr))
1288 				return 0;
1289 		}
1290 
1291 		return hci_set_adv_set_random_addr_sync(hdev, instance,
1292 							&random_addr);
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1299 {
1300 	DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1301 		    HCI_MAX_EXT_AD_LENGTH);
1302 	u8 len;
1303 	struct adv_info *adv = NULL;
1304 	int err;
1305 
1306 	if (instance) {
1307 		adv = hci_find_adv_instance(hdev, instance);
1308 		if (!adv || !adv->scan_rsp_changed)
1309 			return 0;
1310 	}
1311 
1312 	len = eir_create_scan_rsp(hdev, instance, pdu->data);
1313 
1314 	pdu->handle = adv ? adv->handle : instance;
1315 	pdu->length = len;
1316 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1317 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1318 
1319 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1320 				    struct_size(pdu, data, len), pdu,
1321 				    HCI_CMD_TIMEOUT);
1322 	if (err)
1323 		return err;
1324 
1325 	if (adv) {
1326 		adv->scan_rsp_changed = false;
1327 	} else {
1328 		memcpy(hdev->scan_rsp_data, pdu->data, len);
1329 		hdev->scan_rsp_data_len = len;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1336 {
1337 	struct hci_cp_le_set_scan_rsp_data cp;
1338 	u8 len;
1339 
1340 	memset(&cp, 0, sizeof(cp));
1341 
1342 	len = eir_create_scan_rsp(hdev, instance, cp.data);
1343 
1344 	if (hdev->scan_rsp_data_len == len &&
1345 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1346 		return 0;
1347 
1348 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1349 	hdev->scan_rsp_data_len = len;
1350 
1351 	cp.length = len;
1352 
1353 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1354 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1355 }
1356 
1357 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1358 {
1359 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1360 		return 0;
1361 
1362 	if (ext_adv_capable(hdev))
1363 		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1364 
1365 	return __hci_set_scan_rsp_data_sync(hdev, instance);
1366 }
1367 
1368 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1369 {
1370 	struct hci_cp_le_set_ext_adv_enable *cp;
1371 	struct hci_cp_ext_adv_set *set;
1372 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1373 	struct adv_info *adv;
1374 
1375 	if (instance > 0) {
1376 		adv = hci_find_adv_instance(hdev, instance);
1377 		if (!adv)
1378 			return -EINVAL;
1379 		/* If already enabled there is nothing to do */
1380 		if (adv->enabled)
1381 			return 0;
1382 	} else {
1383 		adv = NULL;
1384 	}
1385 
1386 	cp = (void *)data;
1387 	set = (void *)cp->data;
1388 
1389 	memset(cp, 0, sizeof(*cp));
1390 
1391 	cp->enable = 0x01;
1392 	cp->num_of_sets = 0x01;
1393 
1394 	memset(set, 0, sizeof(*set));
1395 
1396 	set->handle = adv ? adv->handle : instance;
1397 
1398 	/* Set duration per instance since controller is responsible for
1399 	 * scheduling it.
1400 	 */
1401 	if (adv && adv->timeout) {
1402 		u16 duration = adv->timeout * MSEC_PER_SEC;
1403 
1404 		/* Time = N * 10 ms */
1405 		set->duration = cpu_to_le16(duration / 10);
1406 	}
1407 
1408 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1409 				     sizeof(*cp) +
1410 				     sizeof(*set) * cp->num_of_sets,
1411 				     data, HCI_CMD_TIMEOUT);
1412 }
1413 
1414 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1415 {
1416 	int err;
1417 
1418 	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1419 	if (err)
1420 		return err;
1421 
1422 	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1423 	if (err)
1424 		return err;
1425 
1426 	return hci_enable_ext_advertising_sync(hdev, instance);
1427 }
1428 
1429 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1430 {
1431 	struct hci_cp_le_set_per_adv_enable cp;
1432 	struct adv_info *adv = NULL;
1433 
1434 	/* If periodic advertising already disabled there is nothing to do. */
1435 	adv = hci_find_adv_instance(hdev, instance);
1436 	if (!adv || !adv->periodic || !adv->enabled)
1437 		return 0;
1438 
1439 	memset(&cp, 0, sizeof(cp));
1440 
1441 	cp.enable = 0x00;
1442 	cp.handle = instance;
1443 
1444 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1445 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1446 }
1447 
1448 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1449 				       u16 min_interval, u16 max_interval)
1450 {
1451 	struct hci_cp_le_set_per_adv_params cp;
1452 
1453 	memset(&cp, 0, sizeof(cp));
1454 
1455 	if (!min_interval)
1456 		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1457 
1458 	if (!max_interval)
1459 		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1460 
1461 	cp.handle = instance;
1462 	cp.min_interval = cpu_to_le16(min_interval);
1463 	cp.max_interval = cpu_to_le16(max_interval);
1464 	cp.periodic_properties = 0x0000;
1465 
1466 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1467 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1468 }
1469 
1470 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1471 {
1472 	DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1473 		    HCI_MAX_PER_AD_LENGTH);
1474 	u8 len;
1475 	struct adv_info *adv = NULL;
1476 
1477 	if (instance) {
1478 		adv = hci_find_adv_instance(hdev, instance);
1479 		if (!adv || !adv->periodic)
1480 			return 0;
1481 	}
1482 
1483 	len = eir_create_per_adv_data(hdev, instance, pdu->data);
1484 
1485 	pdu->length = len;
1486 	pdu->handle = adv ? adv->handle : instance;
1487 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1488 
1489 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1490 				     struct_size(pdu, data, len), pdu,
1491 				     HCI_CMD_TIMEOUT);
1492 }
1493 
1494 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1495 {
1496 	struct hci_cp_le_set_per_adv_enable cp;
1497 	struct adv_info *adv = NULL;
1498 
1499 	/* If periodic advertising already enabled there is nothing to do. */
1500 	adv = hci_find_adv_instance(hdev, instance);
1501 	if (adv && adv->periodic && adv->enabled)
1502 		return 0;
1503 
1504 	memset(&cp, 0, sizeof(cp));
1505 
1506 	cp.enable = 0x01;
1507 	cp.handle = instance;
1508 
1509 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1510 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1511 }
1512 
1513 /* Checks if periodic advertising data contains a Basic Announcement and if it
1514  * does generates a Broadcast ID and add Broadcast Announcement.
1515  */
1516 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1517 {
1518 	u8 bid[3];
1519 	u8 ad[4 + 3];
1520 
1521 	/* Skip if NULL adv as instance 0x00 is used for general purpose
1522 	 * advertising so it cannot used for the likes of Broadcast Announcement
1523 	 * as it can be overwritten at any point.
1524 	 */
1525 	if (!adv)
1526 		return 0;
1527 
1528 	/* Check if PA data doesn't contains a Basic Audio Announcement then
1529 	 * there is nothing to do.
1530 	 */
1531 	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1532 				  0x1851, NULL))
1533 		return 0;
1534 
1535 	/* Check if advertising data already has a Broadcast Announcement since
1536 	 * the process may want to control the Broadcast ID directly and in that
1537 	 * case the kernel shall no interfere.
1538 	 */
1539 	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1540 				 NULL))
1541 		return 0;
1542 
1543 	/* Generate Broadcast ID */
1544 	get_random_bytes(bid, sizeof(bid));
1545 	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1546 	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1547 
1548 	return hci_update_adv_data_sync(hdev, adv->instance);
1549 }
1550 
1551 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1552 			   u8 *data, u32 flags, u16 min_interval,
1553 			   u16 max_interval, u16 sync_interval)
1554 {
1555 	struct adv_info *adv = NULL;
1556 	int err;
1557 	bool added = false;
1558 
1559 	hci_disable_per_advertising_sync(hdev, instance);
1560 
1561 	if (instance) {
1562 		adv = hci_find_adv_instance(hdev, instance);
1563 		/* Create an instance if that could not be found */
1564 		if (!adv) {
1565 			adv = hci_add_per_instance(hdev, instance, flags,
1566 						   data_len, data,
1567 						   sync_interval,
1568 						   sync_interval);
1569 			if (IS_ERR(adv))
1570 				return PTR_ERR(adv);
1571 			adv->pending = false;
1572 			added = true;
1573 		}
1574 	}
1575 
1576 	/* Start advertising */
1577 	err = hci_start_ext_adv_sync(hdev, instance);
1578 	if (err < 0)
1579 		goto fail;
1580 
1581 	err = hci_adv_bcast_annoucement(hdev, adv);
1582 	if (err < 0)
1583 		goto fail;
1584 
1585 	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1586 					  max_interval);
1587 	if (err < 0)
1588 		goto fail;
1589 
1590 	err = hci_set_per_adv_data_sync(hdev, instance);
1591 	if (err < 0)
1592 		goto fail;
1593 
1594 	err = hci_enable_per_advertising_sync(hdev, instance);
1595 	if (err < 0)
1596 		goto fail;
1597 
1598 	return 0;
1599 
1600 fail:
1601 	if (added)
1602 		hci_remove_adv_instance(hdev, instance);
1603 
1604 	return err;
1605 }
1606 
1607 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1608 {
1609 	int err;
1610 
1611 	if (ext_adv_capable(hdev))
1612 		return hci_start_ext_adv_sync(hdev, instance);
1613 
1614 	err = hci_update_adv_data_sync(hdev, instance);
1615 	if (err)
1616 		return err;
1617 
1618 	err = hci_update_scan_rsp_data_sync(hdev, instance);
1619 	if (err)
1620 		return err;
1621 
1622 	return hci_enable_advertising_sync(hdev);
1623 }
1624 
1625 int hci_enable_advertising_sync(struct hci_dev *hdev)
1626 {
1627 	struct adv_info *adv_instance;
1628 	struct hci_cp_le_set_adv_param cp;
1629 	u8 own_addr_type, enable = 0x01;
1630 	bool connectable;
1631 	u16 adv_min_interval, adv_max_interval;
1632 	u32 flags;
1633 	u8 status;
1634 
1635 	if (ext_adv_capable(hdev))
1636 		return hci_enable_ext_advertising_sync(hdev,
1637 						       hdev->cur_adv_instance);
1638 
1639 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1640 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1641 
1642 	/* If the "connectable" instance flag was not set, then choose between
1643 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1644 	 */
1645 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1646 		      mgmt_get_connectable(hdev);
1647 
1648 	if (!is_advertising_allowed(hdev, connectable))
1649 		return -EINVAL;
1650 
1651 	status = hci_disable_advertising_sync(hdev);
1652 	if (status)
1653 		return status;
1654 
1655 	/* Clear the HCI_LE_ADV bit temporarily so that the
1656 	 * hci_update_random_address knows that it's safe to go ahead
1657 	 * and write a new random address. The flag will be set back on
1658 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1659 	 */
1660 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1661 
1662 	/* Set require_privacy to true only when non-connectable
1663 	 * advertising is used. In that case it is fine to use a
1664 	 * non-resolvable private address.
1665 	 */
1666 	status = hci_update_random_address_sync(hdev, !connectable,
1667 						adv_use_rpa(hdev, flags),
1668 						&own_addr_type);
1669 	if (status)
1670 		return status;
1671 
1672 	memset(&cp, 0, sizeof(cp));
1673 
1674 	if (adv_instance) {
1675 		adv_min_interval = adv_instance->min_interval;
1676 		adv_max_interval = adv_instance->max_interval;
1677 	} else {
1678 		adv_min_interval = hdev->le_adv_min_interval;
1679 		adv_max_interval = hdev->le_adv_max_interval;
1680 	}
1681 
1682 	if (connectable) {
1683 		cp.type = LE_ADV_IND;
1684 	} else {
1685 		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1686 			cp.type = LE_ADV_SCAN_IND;
1687 		else
1688 			cp.type = LE_ADV_NONCONN_IND;
1689 
1690 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1691 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1692 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1693 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1694 		}
1695 	}
1696 
1697 	cp.min_interval = cpu_to_le16(adv_min_interval);
1698 	cp.max_interval = cpu_to_le16(adv_max_interval);
1699 	cp.own_address_type = own_addr_type;
1700 	cp.channel_map = hdev->le_adv_channel_map;
1701 
1702 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1703 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1704 	if (status)
1705 		return status;
1706 
1707 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1708 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1709 }
1710 
1711 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1712 {
1713 	return hci_enable_advertising_sync(hdev);
1714 }
1715 
1716 int hci_enable_advertising(struct hci_dev *hdev)
1717 {
1718 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1719 	    list_empty(&hdev->adv_instances))
1720 		return 0;
1721 
1722 	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1723 }
1724 
1725 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1726 				     struct sock *sk)
1727 {
1728 	int err;
1729 
1730 	if (!ext_adv_capable(hdev))
1731 		return 0;
1732 
1733 	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1734 	if (err)
1735 		return err;
1736 
1737 	/* If request specifies an instance that doesn't exist, fail */
1738 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1739 		return -EINVAL;
1740 
1741 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1742 					sizeof(instance), &instance, 0,
1743 					HCI_CMD_TIMEOUT, sk);
1744 }
1745 
1746 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1747 {
1748 	struct adv_info *adv = data;
1749 	u8 instance = 0;
1750 
1751 	if (adv)
1752 		instance = adv->instance;
1753 
1754 	return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1755 }
1756 
1757 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1758 {
1759 	struct adv_info *adv = NULL;
1760 
1761 	if (instance) {
1762 		adv = hci_find_adv_instance(hdev, instance);
1763 		if (!adv)
1764 			return -EINVAL;
1765 	}
1766 
1767 	return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1768 }
1769 
1770 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1771 {
1772 	struct hci_cp_le_term_big cp;
1773 
1774 	memset(&cp, 0, sizeof(cp));
1775 	cp.handle = handle;
1776 	cp.reason = reason;
1777 
1778 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1779 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1780 }
1781 
1782 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1783 {
1784 	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1785 		    HCI_MAX_EXT_AD_LENGTH);
1786 	u8 len;
1787 	struct adv_info *adv = NULL;
1788 	int err;
1789 
1790 	if (instance) {
1791 		adv = hci_find_adv_instance(hdev, instance);
1792 		if (!adv || !adv->adv_data_changed)
1793 			return 0;
1794 	}
1795 
1796 	len = eir_create_adv_data(hdev, instance, pdu->data);
1797 
1798 	pdu->length = len;
1799 	pdu->handle = adv ? adv->handle : instance;
1800 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1801 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1802 
1803 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1804 				    struct_size(pdu, data, len), pdu,
1805 				    HCI_CMD_TIMEOUT);
1806 	if (err)
1807 		return err;
1808 
1809 	/* Update data if the command succeed */
1810 	if (adv) {
1811 		adv->adv_data_changed = false;
1812 	} else {
1813 		memcpy(hdev->adv_data, pdu->data, len);
1814 		hdev->adv_data_len = len;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1821 {
1822 	struct hci_cp_le_set_adv_data cp;
1823 	u8 len;
1824 
1825 	memset(&cp, 0, sizeof(cp));
1826 
1827 	len = eir_create_adv_data(hdev, instance, cp.data);
1828 
1829 	/* There's nothing to do if the data hasn't changed */
1830 	if (hdev->adv_data_len == len &&
1831 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1832 		return 0;
1833 
1834 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1835 	hdev->adv_data_len = len;
1836 
1837 	cp.length = len;
1838 
1839 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1840 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1841 }
1842 
1843 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1844 {
1845 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1846 		return 0;
1847 
1848 	if (ext_adv_capable(hdev))
1849 		return hci_set_ext_adv_data_sync(hdev, instance);
1850 
1851 	return hci_set_adv_data_sync(hdev, instance);
1852 }
1853 
1854 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1855 				   bool force)
1856 {
1857 	struct adv_info *adv = NULL;
1858 	u16 timeout;
1859 
1860 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1861 		return -EPERM;
1862 
1863 	if (hdev->adv_instance_timeout)
1864 		return -EBUSY;
1865 
1866 	adv = hci_find_adv_instance(hdev, instance);
1867 	if (!adv)
1868 		return -ENOENT;
1869 
1870 	/* A zero timeout means unlimited advertising. As long as there is
1871 	 * only one instance, duration should be ignored. We still set a timeout
1872 	 * in case further instances are being added later on.
1873 	 *
1874 	 * If the remaining lifetime of the instance is more than the duration
1875 	 * then the timeout corresponds to the duration, otherwise it will be
1876 	 * reduced to the remaining instance lifetime.
1877 	 */
1878 	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1879 		timeout = adv->duration;
1880 	else
1881 		timeout = adv->remaining_time;
1882 
1883 	/* The remaining time is being reduced unless the instance is being
1884 	 * advertised without time limit.
1885 	 */
1886 	if (adv->timeout)
1887 		adv->remaining_time = adv->remaining_time - timeout;
1888 
1889 	/* Only use work for scheduling instances with legacy advertising */
1890 	if (!ext_adv_capable(hdev)) {
1891 		hdev->adv_instance_timeout = timeout;
1892 		queue_delayed_work(hdev->req_workqueue,
1893 				   &hdev->adv_instance_expire,
1894 				   msecs_to_jiffies(timeout * 1000));
1895 	}
1896 
1897 	/* If we're just re-scheduling the same instance again then do not
1898 	 * execute any HCI commands. This happens when a single instance is
1899 	 * being advertised.
1900 	 */
1901 	if (!force && hdev->cur_adv_instance == instance &&
1902 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1903 		return 0;
1904 
1905 	hdev->cur_adv_instance = instance;
1906 
1907 	return hci_start_adv_sync(hdev, instance);
1908 }
1909 
1910 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1911 {
1912 	int err;
1913 
1914 	if (!ext_adv_capable(hdev))
1915 		return 0;
1916 
1917 	/* Disable instance 0x00 to disable all instances */
1918 	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1919 	if (err)
1920 		return err;
1921 
1922 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1923 					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1924 }
1925 
1926 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1927 {
1928 	struct adv_info *adv, *n;
1929 	int err = 0;
1930 
1931 	if (ext_adv_capable(hdev))
1932 		/* Remove all existing sets */
1933 		err = hci_clear_adv_sets_sync(hdev, sk);
1934 	if (ext_adv_capable(hdev))
1935 		return err;
1936 
1937 	/* This is safe as long as there is no command send while the lock is
1938 	 * held.
1939 	 */
1940 	hci_dev_lock(hdev);
1941 
1942 	/* Cleanup non-ext instances */
1943 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1944 		u8 instance = adv->instance;
1945 		int err;
1946 
1947 		if (!(force || adv->timeout))
1948 			continue;
1949 
1950 		err = hci_remove_adv_instance(hdev, instance);
1951 		if (!err)
1952 			mgmt_advertising_removed(sk, hdev, instance);
1953 	}
1954 
1955 	hci_dev_unlock(hdev);
1956 
1957 	return 0;
1958 }
1959 
1960 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1961 			       struct sock *sk)
1962 {
1963 	int err = 0;
1964 
1965 	/* If we use extended advertising, instance has to be removed first. */
1966 	if (ext_adv_capable(hdev))
1967 		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1968 	if (ext_adv_capable(hdev))
1969 		return err;
1970 
1971 	/* This is safe as long as there is no command send while the lock is
1972 	 * held.
1973 	 */
1974 	hci_dev_lock(hdev);
1975 
1976 	err = hci_remove_adv_instance(hdev, instance);
1977 	if (!err)
1978 		mgmt_advertising_removed(sk, hdev, instance);
1979 
1980 	hci_dev_unlock(hdev);
1981 
1982 	return err;
1983 }
1984 
1985 /* For a single instance:
1986  * - force == true: The instance will be removed even when its remaining
1987  *   lifetime is not zero.
1988  * - force == false: the instance will be deactivated but kept stored unless
1989  *   the remaining lifetime is zero.
1990  *
1991  * For instance == 0x00:
1992  * - force == true: All instances will be removed regardless of their timeout
1993  *   setting.
1994  * - force == false: Only instances that have a timeout will be removed.
1995  */
1996 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
1997 				u8 instance, bool force)
1998 {
1999 	struct adv_info *next = NULL;
2000 	int err;
2001 
2002 	/* Cancel any timeout concerning the removed instance(s). */
2003 	if (!instance || hdev->cur_adv_instance == instance)
2004 		cancel_adv_timeout(hdev);
2005 
2006 	/* Get the next instance to advertise BEFORE we remove
2007 	 * the current one. This can be the same instance again
2008 	 * if there is only one instance.
2009 	 */
2010 	if (hdev->cur_adv_instance == instance)
2011 		next = hci_get_next_instance(hdev, instance);
2012 
2013 	if (!instance) {
2014 		err = hci_clear_adv_sync(hdev, sk, force);
2015 		if (err)
2016 			return err;
2017 	} else {
2018 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2019 
2020 		if (force || (adv && adv->timeout && !adv->remaining_time)) {
2021 			/* Don't advertise a removed instance. */
2022 			if (next && next->instance == instance)
2023 				next = NULL;
2024 
2025 			err = hci_remove_adv_sync(hdev, instance, sk);
2026 			if (err)
2027 				return err;
2028 		}
2029 	}
2030 
2031 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2032 		return 0;
2033 
2034 	if (next && !ext_adv_capable(hdev))
2035 		hci_schedule_adv_instance_sync(hdev, next->instance, false);
2036 
2037 	return 0;
2038 }
2039 
2040 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2041 {
2042 	struct hci_cp_read_rssi cp;
2043 
2044 	cp.handle = handle;
2045 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2046 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2047 }
2048 
2049 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2050 {
2051 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2052 					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2053 }
2054 
2055 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2056 {
2057 	struct hci_cp_read_tx_power cp;
2058 
2059 	cp.handle = handle;
2060 	cp.type = type;
2061 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2062 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2063 }
2064 
2065 int hci_disable_advertising_sync(struct hci_dev *hdev)
2066 {
2067 	u8 enable = 0x00;
2068 	int err = 0;
2069 
2070 	/* If controller is not advertising we are done. */
2071 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2072 		return 0;
2073 
2074 	if (ext_adv_capable(hdev))
2075 		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2076 	if (ext_adv_capable(hdev))
2077 		return err;
2078 
2079 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2080 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2081 }
2082 
2083 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2084 					   u8 filter_dup)
2085 {
2086 	struct hci_cp_le_set_ext_scan_enable cp;
2087 
2088 	memset(&cp, 0, sizeof(cp));
2089 	cp.enable = val;
2090 
2091 	if (hci_dev_test_flag(hdev, HCI_MESH))
2092 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2093 	else
2094 		cp.filter_dup = filter_dup;
2095 
2096 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2097 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2098 }
2099 
2100 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2101 				       u8 filter_dup)
2102 {
2103 	struct hci_cp_le_set_scan_enable cp;
2104 
2105 	if (use_ext_scan(hdev))
2106 		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2107 
2108 	memset(&cp, 0, sizeof(cp));
2109 	cp.enable = val;
2110 
2111 	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2112 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2113 	else
2114 		cp.filter_dup = filter_dup;
2115 
2116 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2117 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2118 }
2119 
2120 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2121 {
2122 	if (!use_ll_privacy(hdev))
2123 		return 0;
2124 
2125 	/* If controller is not/already resolving we are done. */
2126 	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2127 		return 0;
2128 
2129 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2130 				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2131 }
2132 
2133 static int hci_scan_disable_sync(struct hci_dev *hdev)
2134 {
2135 	int err;
2136 
2137 	/* If controller is not scanning we are done. */
2138 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2139 		return 0;
2140 
2141 	if (hdev->scanning_paused) {
2142 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2143 		return 0;
2144 	}
2145 
2146 	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2147 	if (err) {
2148 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2149 		return err;
2150 	}
2151 
2152 	return err;
2153 }
2154 
2155 static bool scan_use_rpa(struct hci_dev *hdev)
2156 {
2157 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2158 }
2159 
2160 static void hci_start_interleave_scan(struct hci_dev *hdev)
2161 {
2162 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2163 	queue_delayed_work(hdev->req_workqueue,
2164 			   &hdev->interleave_scan, 0);
2165 }
2166 
2167 static void cancel_interleave_scan(struct hci_dev *hdev)
2168 {
2169 	bt_dev_dbg(hdev, "cancelling interleave scan");
2170 
2171 	cancel_delayed_work_sync(&hdev->interleave_scan);
2172 
2173 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2174 }
2175 
2176 /* Return true if interleave_scan wasn't started until exiting this function,
2177  * otherwise, return false
2178  */
2179 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2180 {
2181 	/* Do interleaved scan only if all of the following are true:
2182 	 * - There is at least one ADV monitor
2183 	 * - At least one pending LE connection or one device to be scanned for
2184 	 * - Monitor offloading is not supported
2185 	 * If so, we should alternate between allowlist scan and one without
2186 	 * any filters to save power.
2187 	 */
2188 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2189 				!(list_empty(&hdev->pend_le_conns) &&
2190 				  list_empty(&hdev->pend_le_reports)) &&
2191 				hci_get_adv_monitor_offload_ext(hdev) ==
2192 				    HCI_ADV_MONITOR_EXT_NONE;
2193 	bool is_interleaving = is_interleave_scanning(hdev);
2194 
2195 	if (use_interleaving && !is_interleaving) {
2196 		hci_start_interleave_scan(hdev);
2197 		bt_dev_dbg(hdev, "starting interleave scan");
2198 		return true;
2199 	}
2200 
2201 	if (!use_interleaving && is_interleaving)
2202 		cancel_interleave_scan(hdev);
2203 
2204 	return false;
2205 }
2206 
2207 /* Removes connection to resolve list if needed.*/
2208 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2209 					bdaddr_t *bdaddr, u8 bdaddr_type)
2210 {
2211 	struct hci_cp_le_del_from_resolv_list cp;
2212 	struct bdaddr_list_with_irk *entry;
2213 
2214 	if (!use_ll_privacy(hdev))
2215 		return 0;
2216 
2217 	/* Check if the IRK has been programmed */
2218 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2219 						bdaddr_type);
2220 	if (!entry)
2221 		return 0;
2222 
2223 	cp.bdaddr_type = bdaddr_type;
2224 	bacpy(&cp.bdaddr, bdaddr);
2225 
2226 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2227 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2228 }
2229 
2230 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2231 				       bdaddr_t *bdaddr, u8 bdaddr_type)
2232 {
2233 	struct hci_cp_le_del_from_accept_list cp;
2234 	int err;
2235 
2236 	/* Check if device is on accept list before removing it */
2237 	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2238 		return 0;
2239 
2240 	cp.bdaddr_type = bdaddr_type;
2241 	bacpy(&cp.bdaddr, bdaddr);
2242 
2243 	/* Ignore errors when removing from resolving list as that is likely
2244 	 * that the device was never added.
2245 	 */
2246 	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2247 
2248 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2249 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2250 	if (err) {
2251 		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2252 		return err;
2253 	}
2254 
2255 	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2256 		   cp.bdaddr_type);
2257 
2258 	return 0;
2259 }
2260 
2261 struct conn_params {
2262 	bdaddr_t addr;
2263 	u8 addr_type;
2264 	hci_conn_flags_t flags;
2265 	u8 privacy_mode;
2266 };
2267 
2268 /* Adds connection to resolve list if needed.
2269  * Setting params to NULL programs local hdev->irk
2270  */
2271 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2272 					struct conn_params *params)
2273 {
2274 	struct hci_cp_le_add_to_resolv_list cp;
2275 	struct smp_irk *irk;
2276 	struct bdaddr_list_with_irk *entry;
2277 	struct hci_conn_params *p;
2278 
2279 	if (!use_ll_privacy(hdev))
2280 		return 0;
2281 
2282 	/* Attempt to program local identity address, type and irk if params is
2283 	 * NULL.
2284 	 */
2285 	if (!params) {
2286 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2287 			return 0;
2288 
2289 		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2290 		memcpy(cp.peer_irk, hdev->irk, 16);
2291 		goto done;
2292 	}
2293 
2294 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2295 	if (!irk)
2296 		return 0;
2297 
2298 	/* Check if the IK has _not_ been programmed yet. */
2299 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2300 						&params->addr,
2301 						params->addr_type);
2302 	if (entry)
2303 		return 0;
2304 
2305 	cp.bdaddr_type = params->addr_type;
2306 	bacpy(&cp.bdaddr, &params->addr);
2307 	memcpy(cp.peer_irk, irk->val, 16);
2308 
2309 	/* Default privacy mode is always Network */
2310 	params->privacy_mode = HCI_NETWORK_PRIVACY;
2311 
2312 	rcu_read_lock();
2313 	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2314 				      &params->addr, params->addr_type);
2315 	if (!p)
2316 		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2317 					      &params->addr, params->addr_type);
2318 	if (p)
2319 		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2320 	rcu_read_unlock();
2321 
2322 done:
2323 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2324 		memcpy(cp.local_irk, hdev->irk, 16);
2325 	else
2326 		memset(cp.local_irk, 0, 16);
2327 
2328 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2329 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2330 }
2331 
2332 /* Set Device Privacy Mode. */
2333 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2334 					struct conn_params *params)
2335 {
2336 	struct hci_cp_le_set_privacy_mode cp;
2337 	struct smp_irk *irk;
2338 
2339 	/* If device privacy mode has already been set there is nothing to do */
2340 	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2341 		return 0;
2342 
2343 	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2344 	 * indicates that LL Privacy has been enabled and
2345 	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2346 	 */
2347 	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2348 		return 0;
2349 
2350 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2351 	if (!irk)
2352 		return 0;
2353 
2354 	memset(&cp, 0, sizeof(cp));
2355 	cp.bdaddr_type = irk->addr_type;
2356 	bacpy(&cp.bdaddr, &irk->bdaddr);
2357 	cp.mode = HCI_DEVICE_PRIVACY;
2358 
2359 	/* Note: params->privacy_mode is not updated since it is a copy */
2360 
2361 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2362 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2363 }
2364 
2365 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2366  * this attempts to program the device in the resolving list as well and
2367  * properly set the privacy mode.
2368  */
2369 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2370 				       struct conn_params *params,
2371 				       u8 *num_entries)
2372 {
2373 	struct hci_cp_le_add_to_accept_list cp;
2374 	int err;
2375 
2376 	/* During suspend, only wakeable devices can be in acceptlist */
2377 	if (hdev->suspended &&
2378 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2379 		hci_le_del_accept_list_sync(hdev, &params->addr,
2380 					    params->addr_type);
2381 		return 0;
2382 	}
2383 
2384 	/* Select filter policy to accept all advertising */
2385 	if (*num_entries >= hdev->le_accept_list_size)
2386 		return -ENOSPC;
2387 
2388 	/* Accept list can not be used with RPAs */
2389 	if (!use_ll_privacy(hdev) &&
2390 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2391 		return -EINVAL;
2392 
2393 	/* Attempt to program the device in the resolving list first to avoid
2394 	 * having to rollback in case it fails since the resolving list is
2395 	 * dynamic it can probably be smaller than the accept list.
2396 	 */
2397 	err = hci_le_add_resolve_list_sync(hdev, params);
2398 	if (err) {
2399 		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2400 		return err;
2401 	}
2402 
2403 	/* Set Privacy Mode */
2404 	err = hci_le_set_privacy_mode_sync(hdev, params);
2405 	if (err) {
2406 		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2407 		return err;
2408 	}
2409 
2410 	/* Check if already in accept list */
2411 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2412 				   params->addr_type))
2413 		return 0;
2414 
2415 	*num_entries += 1;
2416 	cp.bdaddr_type = params->addr_type;
2417 	bacpy(&cp.bdaddr, &params->addr);
2418 
2419 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2420 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2421 	if (err) {
2422 		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2423 		/* Rollback the device from the resolving list */
2424 		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2425 		return err;
2426 	}
2427 
2428 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2429 		   cp.bdaddr_type);
2430 
2431 	return 0;
2432 }
2433 
2434 /* This function disables/pause all advertising instances */
2435 static int hci_pause_advertising_sync(struct hci_dev *hdev)
2436 {
2437 	int err;
2438 	int old_state;
2439 
2440 	/* If already been paused there is nothing to do. */
2441 	if (hdev->advertising_paused)
2442 		return 0;
2443 
2444 	bt_dev_dbg(hdev, "Pausing directed advertising");
2445 
2446 	/* Stop directed advertising */
2447 	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2448 	if (old_state) {
2449 		/* When discoverable timeout triggers, then just make sure
2450 		 * the limited discoverable flag is cleared. Even in the case
2451 		 * of a timeout triggered from general discoverable, it is
2452 		 * safe to unconditionally clear the flag.
2453 		 */
2454 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2455 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2456 		hdev->discov_timeout = 0;
2457 	}
2458 
2459 	bt_dev_dbg(hdev, "Pausing advertising instances");
2460 
2461 	/* Call to disable any advertisements active on the controller.
2462 	 * This will succeed even if no advertisements are configured.
2463 	 */
2464 	err = hci_disable_advertising_sync(hdev);
2465 	if (err)
2466 		return err;
2467 
2468 	/* If we are using software rotation, pause the loop */
2469 	if (!ext_adv_capable(hdev))
2470 		cancel_adv_timeout(hdev);
2471 
2472 	hdev->advertising_paused = true;
2473 	hdev->advertising_old_state = old_state;
2474 
2475 	return 0;
2476 }
2477 
2478 /* This function enables all user advertising instances */
2479 static int hci_resume_advertising_sync(struct hci_dev *hdev)
2480 {
2481 	struct adv_info *adv, *tmp;
2482 	int err;
2483 
2484 	/* If advertising has not been paused there is nothing  to do. */
2485 	if (!hdev->advertising_paused)
2486 		return 0;
2487 
2488 	/* Resume directed advertising */
2489 	hdev->advertising_paused = false;
2490 	if (hdev->advertising_old_state) {
2491 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2492 		hdev->advertising_old_state = 0;
2493 	}
2494 
2495 	bt_dev_dbg(hdev, "Resuming advertising instances");
2496 
2497 	if (ext_adv_capable(hdev)) {
2498 		/* Call for each tracked instance to be re-enabled */
2499 		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2500 			err = hci_enable_ext_advertising_sync(hdev,
2501 							      adv->instance);
2502 			if (!err)
2503 				continue;
2504 
2505 			/* If the instance cannot be resumed remove it */
2506 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2507 							 NULL);
2508 		}
2509 	} else {
2510 		/* Schedule for most recent instance to be restarted and begin
2511 		 * the software rotation loop
2512 		 */
2513 		err = hci_schedule_adv_instance_sync(hdev,
2514 						     hdev->cur_adv_instance,
2515 						     true);
2516 	}
2517 
2518 	hdev->advertising_paused = false;
2519 
2520 	return err;
2521 }
2522 
2523 static int hci_pause_addr_resolution(struct hci_dev *hdev)
2524 {
2525 	int err;
2526 
2527 	if (!use_ll_privacy(hdev))
2528 		return 0;
2529 
2530 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2531 		return 0;
2532 
2533 	/* Cannot disable addr resolution if scanning is enabled or
2534 	 * when initiating an LE connection.
2535 	 */
2536 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2537 	    hci_lookup_le_connect(hdev)) {
2538 		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2539 		return -EPERM;
2540 	}
2541 
2542 	/* Cannot disable addr resolution if advertising is enabled. */
2543 	err = hci_pause_advertising_sync(hdev);
2544 	if (err) {
2545 		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2546 		return err;
2547 	}
2548 
2549 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2550 	if (err)
2551 		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2552 			   err);
2553 
2554 	/* Return if address resolution is disabled and RPA is not used. */
2555 	if (!err && scan_use_rpa(hdev))
2556 		return 0;
2557 
2558 	hci_resume_advertising_sync(hdev);
2559 	return err;
2560 }
2561 
2562 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2563 					     bool extended, struct sock *sk)
2564 {
2565 	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2566 					HCI_OP_READ_LOCAL_OOB_DATA;
2567 
2568 	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2569 }
2570 
2571 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2572 {
2573 	struct hci_conn_params *params;
2574 	struct conn_params *p;
2575 	size_t i;
2576 
2577 	rcu_read_lock();
2578 
2579 	i = 0;
2580 	list_for_each_entry_rcu(params, list, action)
2581 		++i;
2582 	*n = i;
2583 
2584 	rcu_read_unlock();
2585 
2586 	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2587 	if (!p)
2588 		return NULL;
2589 
2590 	rcu_read_lock();
2591 
2592 	i = 0;
2593 	list_for_each_entry_rcu(params, list, action) {
2594 		/* Racing adds are handled in next scan update */
2595 		if (i >= *n)
2596 			break;
2597 
2598 		/* No hdev->lock, but: addr, addr_type are immutable.
2599 		 * privacy_mode is only written by us or in
2600 		 * hci_cc_le_set_privacy_mode that we wait for.
2601 		 * We should be idempotent so MGMT updating flags
2602 		 * while we are processing is OK.
2603 		 */
2604 		bacpy(&p[i].addr, &params->addr);
2605 		p[i].addr_type = params->addr_type;
2606 		p[i].flags = READ_ONCE(params->flags);
2607 		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2608 		++i;
2609 	}
2610 
2611 	rcu_read_unlock();
2612 
2613 	*n = i;
2614 	return p;
2615 }
2616 
2617 /* Clear LE Accept List */
2618 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2619 {
2620 	if (!(hdev->commands[26] & 0x80))
2621 		return 0;
2622 
2623 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2624 				     HCI_CMD_TIMEOUT);
2625 }
2626 
2627 /* Device must not be scanning when updating the accept list.
2628  *
2629  * Update is done using the following sequence:
2630  *
2631  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2632  * Remove Devices From Accept List ->
2633  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2634  * Add Devices to Accept List ->
2635  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2636  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2637  * Enable Scanning
2638  *
2639  * In case of failure advertising shall be restored to its original state and
2640  * return would disable accept list since either accept or resolving list could
2641  * not be programmed.
2642  *
2643  */
2644 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2645 {
2646 	struct conn_params *params;
2647 	struct bdaddr_list *b, *t;
2648 	u8 num_entries = 0;
2649 	bool pend_conn, pend_report;
2650 	u8 filter_policy;
2651 	size_t i, n;
2652 	int err;
2653 
2654 	/* Pause advertising if resolving list can be used as controllers
2655 	 * cannot accept resolving list modifications while advertising.
2656 	 */
2657 	if (use_ll_privacy(hdev)) {
2658 		err = hci_pause_advertising_sync(hdev);
2659 		if (err) {
2660 			bt_dev_err(hdev, "pause advertising failed: %d", err);
2661 			return 0x00;
2662 		}
2663 	}
2664 
2665 	/* Disable address resolution while reprogramming accept list since
2666 	 * devices that do have an IRK will be programmed in the resolving list
2667 	 * when LL Privacy is enabled.
2668 	 */
2669 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2670 	if (err) {
2671 		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2672 		goto done;
2673 	}
2674 
2675 	/* Force address filtering if PA Sync is in progress */
2676 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2677 		struct hci_cp_le_pa_create_sync *sent;
2678 
2679 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
2680 		if (sent) {
2681 			struct conn_params pa;
2682 
2683 			memset(&pa, 0, sizeof(pa));
2684 
2685 			bacpy(&pa.addr, &sent->addr);
2686 			pa.addr_type = sent->addr_type;
2687 
2688 			/* Clear first since there could be addresses left
2689 			 * behind.
2690 			 */
2691 			hci_le_clear_accept_list_sync(hdev);
2692 
2693 			num_entries = 1;
2694 			err = hci_le_add_accept_list_sync(hdev, &pa,
2695 							  &num_entries);
2696 			goto done;
2697 		}
2698 	}
2699 
2700 	/* Go through the current accept list programmed into the
2701 	 * controller one by one and check if that address is connected or is
2702 	 * still in the list of pending connections or list of devices to
2703 	 * report. If not present in either list, then remove it from
2704 	 * the controller.
2705 	 */
2706 	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2707 		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2708 			continue;
2709 
2710 		/* Pointers not dereferenced, no locks needed */
2711 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2712 						      &b->bdaddr,
2713 						      b->bdaddr_type);
2714 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2715 							&b->bdaddr,
2716 							b->bdaddr_type);
2717 
2718 		/* If the device is not likely to connect or report,
2719 		 * remove it from the acceptlist.
2720 		 */
2721 		if (!pend_conn && !pend_report) {
2722 			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2723 						    b->bdaddr_type);
2724 			continue;
2725 		}
2726 
2727 		num_entries++;
2728 	}
2729 
2730 	/* Since all no longer valid accept list entries have been
2731 	 * removed, walk through the list of pending connections
2732 	 * and ensure that any new device gets programmed into
2733 	 * the controller.
2734 	 *
2735 	 * If the list of the devices is larger than the list of
2736 	 * available accept list entries in the controller, then
2737 	 * just abort and return filer policy value to not use the
2738 	 * accept list.
2739 	 *
2740 	 * The list and params may be mutated while we wait for events,
2741 	 * so make a copy and iterate it.
2742 	 */
2743 
2744 	params = conn_params_copy(&hdev->pend_le_conns, &n);
2745 	if (!params) {
2746 		err = -ENOMEM;
2747 		goto done;
2748 	}
2749 
2750 	for (i = 0; i < n; ++i) {
2751 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2752 						  &num_entries);
2753 		if (err) {
2754 			kvfree(params);
2755 			goto done;
2756 		}
2757 	}
2758 
2759 	kvfree(params);
2760 
2761 	/* After adding all new pending connections, walk through
2762 	 * the list of pending reports and also add these to the
2763 	 * accept list if there is still space. Abort if space runs out.
2764 	 */
2765 
2766 	params = conn_params_copy(&hdev->pend_le_reports, &n);
2767 	if (!params) {
2768 		err = -ENOMEM;
2769 		goto done;
2770 	}
2771 
2772 	for (i = 0; i < n; ++i) {
2773 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2774 						  &num_entries);
2775 		if (err) {
2776 			kvfree(params);
2777 			goto done;
2778 		}
2779 	}
2780 
2781 	kvfree(params);
2782 
2783 	/* Use the allowlist unless the following conditions are all true:
2784 	 * - We are not currently suspending
2785 	 * - There are 1 or more ADV monitors registered and it's not offloaded
2786 	 * - Interleaved scanning is not currently using the allowlist
2787 	 */
2788 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2789 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2790 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2791 		err = -EINVAL;
2792 
2793 done:
2794 	filter_policy = err ? 0x00 : 0x01;
2795 
2796 	/* Enable address resolution when LL Privacy is enabled. */
2797 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2798 	if (err)
2799 		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2800 
2801 	/* Resume advertising if it was paused */
2802 	if (use_ll_privacy(hdev))
2803 		hci_resume_advertising_sync(hdev);
2804 
2805 	/* Select filter policy to use accept list */
2806 	return filter_policy;
2807 }
2808 
2809 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2810 				   u8 type, u16 interval, u16 window)
2811 {
2812 	cp->type = type;
2813 	cp->interval = cpu_to_le16(interval);
2814 	cp->window = cpu_to_le16(window);
2815 }
2816 
2817 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2818 					  u16 interval, u16 window,
2819 					  u8 own_addr_type, u8 filter_policy)
2820 {
2821 	struct hci_cp_le_set_ext_scan_params *cp;
2822 	struct hci_cp_le_scan_phy_params *phy;
2823 	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2824 	u8 num_phy = 0x00;
2825 
2826 	cp = (void *)data;
2827 	phy = (void *)cp->data;
2828 
2829 	memset(data, 0, sizeof(data));
2830 
2831 	cp->own_addr_type = own_addr_type;
2832 	cp->filter_policy = filter_policy;
2833 
2834 	/* Check if PA Sync is in progress then select the PHY based on the
2835 	 * hci_conn.iso_qos.
2836 	 */
2837 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2838 		struct hci_cp_le_add_to_accept_list *sent;
2839 
2840 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2841 		if (sent) {
2842 			struct hci_conn *conn;
2843 
2844 			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2845 						       &sent->bdaddr);
2846 			if (conn) {
2847 				struct bt_iso_qos *qos = &conn->iso_qos;
2848 
2849 				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2850 				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2851 					cp->scanning_phys |= LE_SCAN_PHY_1M;
2852 					hci_le_scan_phy_params(phy, type,
2853 							       interval,
2854 							       window);
2855 					num_phy++;
2856 					phy++;
2857 				}
2858 
2859 				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2860 					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2861 					hci_le_scan_phy_params(phy, type,
2862 							       interval * 3,
2863 							       window * 3);
2864 					num_phy++;
2865 					phy++;
2866 				}
2867 
2868 				if (num_phy)
2869 					goto done;
2870 			}
2871 		}
2872 	}
2873 
2874 	if (scan_1m(hdev) || scan_2m(hdev)) {
2875 		cp->scanning_phys |= LE_SCAN_PHY_1M;
2876 		hci_le_scan_phy_params(phy, type, interval, window);
2877 		num_phy++;
2878 		phy++;
2879 	}
2880 
2881 	if (scan_coded(hdev)) {
2882 		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2883 		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2884 		num_phy++;
2885 		phy++;
2886 	}
2887 
2888 done:
2889 	if (!num_phy)
2890 		return -EINVAL;
2891 
2892 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2893 				     sizeof(*cp) + sizeof(*phy) * num_phy,
2894 				     data, HCI_CMD_TIMEOUT);
2895 }
2896 
2897 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2898 				      u16 interval, u16 window,
2899 				      u8 own_addr_type, u8 filter_policy)
2900 {
2901 	struct hci_cp_le_set_scan_param cp;
2902 
2903 	if (use_ext_scan(hdev))
2904 		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2905 						      window, own_addr_type,
2906 						      filter_policy);
2907 
2908 	memset(&cp, 0, sizeof(cp));
2909 	cp.type = type;
2910 	cp.interval = cpu_to_le16(interval);
2911 	cp.window = cpu_to_le16(window);
2912 	cp.own_address_type = own_addr_type;
2913 	cp.filter_policy = filter_policy;
2914 
2915 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2916 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2917 }
2918 
2919 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2920 			       u16 window, u8 own_addr_type, u8 filter_policy,
2921 			       u8 filter_dup)
2922 {
2923 	int err;
2924 
2925 	if (hdev->scanning_paused) {
2926 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2927 		return 0;
2928 	}
2929 
2930 	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2931 					 own_addr_type, filter_policy);
2932 	if (err)
2933 		return err;
2934 
2935 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2936 }
2937 
2938 static int hci_passive_scan_sync(struct hci_dev *hdev)
2939 {
2940 	u8 own_addr_type;
2941 	u8 filter_policy;
2942 	u16 window, interval;
2943 	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2944 	int err;
2945 
2946 	if (hdev->scanning_paused) {
2947 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2948 		return 0;
2949 	}
2950 
2951 	err = hci_scan_disable_sync(hdev);
2952 	if (err) {
2953 		bt_dev_err(hdev, "disable scanning failed: %d", err);
2954 		return err;
2955 	}
2956 
2957 	/* Set require_privacy to false since no SCAN_REQ are send
2958 	 * during passive scanning. Not using an non-resolvable address
2959 	 * here is important so that peer devices using direct
2960 	 * advertising with our address will be correctly reported
2961 	 * by the controller.
2962 	 */
2963 	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
2964 					   &own_addr_type))
2965 		return 0;
2966 
2967 	if (hdev->enable_advmon_interleave_scan &&
2968 	    hci_update_interleaved_scan_sync(hdev))
2969 		return 0;
2970 
2971 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
2972 
2973 	/* Adding or removing entries from the accept list must
2974 	 * happen before enabling scanning. The controller does
2975 	 * not allow accept list modification while scanning.
2976 	 */
2977 	filter_policy = hci_update_accept_list_sync(hdev);
2978 
2979 	/* When the controller is using random resolvable addresses and
2980 	 * with that having LE privacy enabled, then controllers with
2981 	 * Extended Scanner Filter Policies support can now enable support
2982 	 * for handling directed advertising.
2983 	 *
2984 	 * So instead of using filter polices 0x00 (no acceptlist)
2985 	 * and 0x01 (acceptlist enabled) use the new filter policies
2986 	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
2987 	 */
2988 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
2989 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
2990 		filter_policy |= 0x02;
2991 
2992 	if (hdev->suspended) {
2993 		window = hdev->le_scan_window_suspend;
2994 		interval = hdev->le_scan_int_suspend;
2995 	} else if (hci_is_le_conn_scanning(hdev)) {
2996 		window = hdev->le_scan_window_connect;
2997 		interval = hdev->le_scan_int_connect;
2998 	} else if (hci_is_adv_monitoring(hdev)) {
2999 		window = hdev->le_scan_window_adv_monitor;
3000 		interval = hdev->le_scan_int_adv_monitor;
3001 	} else {
3002 		window = hdev->le_scan_window;
3003 		interval = hdev->le_scan_interval;
3004 	}
3005 
3006 	/* Disable all filtering for Mesh */
3007 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
3008 		filter_policy = 0;
3009 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3010 	}
3011 
3012 	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3013 
3014 	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3015 				   own_addr_type, filter_policy, filter_dups);
3016 }
3017 
3018 /* This function controls the passive scanning based on hdev->pend_le_conns
3019  * list. If there are pending LE connection we start the background scanning,
3020  * otherwise we stop it in the following sequence:
3021  *
3022  * If there are devices to scan:
3023  *
3024  * Disable Scanning -> Update Accept List ->
3025  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
3026  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3027  * Enable Scanning
3028  *
3029  * Otherwise:
3030  *
3031  * Disable Scanning
3032  */
3033 int hci_update_passive_scan_sync(struct hci_dev *hdev)
3034 {
3035 	int err;
3036 
3037 	if (!test_bit(HCI_UP, &hdev->flags) ||
3038 	    test_bit(HCI_INIT, &hdev->flags) ||
3039 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3040 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3041 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3042 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3043 		return 0;
3044 
3045 	/* No point in doing scanning if LE support hasn't been enabled */
3046 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3047 		return 0;
3048 
3049 	/* If discovery is active don't interfere with it */
3050 	if (hdev->discovery.state != DISCOVERY_STOPPED)
3051 		return 0;
3052 
3053 	/* Reset RSSI and UUID filters when starting background scanning
3054 	 * since these filters are meant for service discovery only.
3055 	 *
3056 	 * The Start Discovery and Start Service Discovery operations
3057 	 * ensure to set proper values for RSSI threshold and UUID
3058 	 * filter list. So it is safe to just reset them here.
3059 	 */
3060 	hci_discovery_filter_clear(hdev);
3061 
3062 	bt_dev_dbg(hdev, "ADV monitoring is %s",
3063 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3064 
3065 	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3066 	    list_empty(&hdev->pend_le_conns) &&
3067 	    list_empty(&hdev->pend_le_reports) &&
3068 	    !hci_is_adv_monitoring(hdev) &&
3069 	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3070 		/* If there is no pending LE connections or devices
3071 		 * to be scanned for or no ADV monitors, we should stop the
3072 		 * background scanning.
3073 		 */
3074 
3075 		bt_dev_dbg(hdev, "stopping background scanning");
3076 
3077 		err = hci_scan_disable_sync(hdev);
3078 		if (err)
3079 			bt_dev_err(hdev, "stop background scanning failed: %d",
3080 				   err);
3081 	} else {
3082 		/* If there is at least one pending LE connection, we should
3083 		 * keep the background scan running.
3084 		 */
3085 
3086 		/* If controller is connecting, we should not start scanning
3087 		 * since some controllers are not able to scan and connect at
3088 		 * the same time.
3089 		 */
3090 		if (hci_lookup_le_connect(hdev))
3091 			return 0;
3092 
3093 		bt_dev_dbg(hdev, "start background scanning");
3094 
3095 		err = hci_passive_scan_sync(hdev);
3096 		if (err)
3097 			bt_dev_err(hdev, "start background scanning failed: %d",
3098 				   err);
3099 	}
3100 
3101 	return err;
3102 }
3103 
3104 static int update_scan_sync(struct hci_dev *hdev, void *data)
3105 {
3106 	return hci_update_scan_sync(hdev);
3107 }
3108 
3109 int hci_update_scan(struct hci_dev *hdev)
3110 {
3111 	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3112 }
3113 
3114 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3115 {
3116 	return hci_update_passive_scan_sync(hdev);
3117 }
3118 
3119 int hci_update_passive_scan(struct hci_dev *hdev)
3120 {
3121 	/* Only queue if it would have any effect */
3122 	if (!test_bit(HCI_UP, &hdev->flags) ||
3123 	    test_bit(HCI_INIT, &hdev->flags) ||
3124 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3125 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3126 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3127 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3128 		return 0;
3129 
3130 	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3131 				       NULL);
3132 }
3133 
3134 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3135 {
3136 	int err;
3137 
3138 	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3139 		return 0;
3140 
3141 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3142 				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3143 
3144 	if (!err) {
3145 		if (val) {
3146 			hdev->features[1][0] |= LMP_HOST_SC;
3147 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3148 		} else {
3149 			hdev->features[1][0] &= ~LMP_HOST_SC;
3150 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3151 		}
3152 	}
3153 
3154 	return err;
3155 }
3156 
3157 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3158 {
3159 	int err;
3160 
3161 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3162 	    lmp_host_ssp_capable(hdev))
3163 		return 0;
3164 
3165 	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3166 		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3167 				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3168 	}
3169 
3170 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3171 				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3172 	if (err)
3173 		return err;
3174 
3175 	return hci_write_sc_support_sync(hdev, 0x01);
3176 }
3177 
3178 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3179 {
3180 	struct hci_cp_write_le_host_supported cp;
3181 
3182 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3183 	    !lmp_bredr_capable(hdev))
3184 		return 0;
3185 
3186 	/* Check first if we already have the right host state
3187 	 * (host features set)
3188 	 */
3189 	if (le == lmp_host_le_capable(hdev) &&
3190 	    simul == lmp_host_le_br_capable(hdev))
3191 		return 0;
3192 
3193 	memset(&cp, 0, sizeof(cp));
3194 
3195 	cp.le = le;
3196 	cp.simul = simul;
3197 
3198 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3199 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3200 }
3201 
3202 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3203 {
3204 	struct adv_info *adv, *tmp;
3205 	int err;
3206 
3207 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3208 		return 0;
3209 
3210 	/* If RPA Resolution has not been enable yet it means the
3211 	 * resolving list is empty and we should attempt to program the
3212 	 * local IRK in order to support using own_addr_type
3213 	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3214 	 */
3215 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3216 		hci_le_add_resolve_list_sync(hdev, NULL);
3217 		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3218 	}
3219 
3220 	/* Make sure the controller has a good default for
3221 	 * advertising data. This also applies to the case
3222 	 * where BR/EDR was toggled during the AUTO_OFF phase.
3223 	 */
3224 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3225 	    list_empty(&hdev->adv_instances)) {
3226 		if (ext_adv_capable(hdev)) {
3227 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3228 			if (!err)
3229 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3230 		} else {
3231 			err = hci_update_adv_data_sync(hdev, 0x00);
3232 			if (!err)
3233 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3234 		}
3235 
3236 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3237 			hci_enable_advertising_sync(hdev);
3238 	}
3239 
3240 	/* Call for each tracked instance to be scheduled */
3241 	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3242 		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3243 
3244 	return 0;
3245 }
3246 
3247 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3248 {
3249 	u8 link_sec;
3250 
3251 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3252 	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3253 		return 0;
3254 
3255 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3256 				     sizeof(link_sec), &link_sec,
3257 				     HCI_CMD_TIMEOUT);
3258 }
3259 
3260 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3261 {
3262 	struct hci_cp_write_page_scan_activity cp;
3263 	u8 type;
3264 	int err = 0;
3265 
3266 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3267 		return 0;
3268 
3269 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3270 		return 0;
3271 
3272 	memset(&cp, 0, sizeof(cp));
3273 
3274 	if (enable) {
3275 		type = PAGE_SCAN_TYPE_INTERLACED;
3276 
3277 		/* 160 msec page scan interval */
3278 		cp.interval = cpu_to_le16(0x0100);
3279 	} else {
3280 		type = hdev->def_page_scan_type;
3281 		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3282 	}
3283 
3284 	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3285 
3286 	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3287 	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3288 		err = __hci_cmd_sync_status(hdev,
3289 					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3290 					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3291 		if (err)
3292 			return err;
3293 	}
3294 
3295 	if (hdev->page_scan_type != type)
3296 		err = __hci_cmd_sync_status(hdev,
3297 					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3298 					    sizeof(type), &type,
3299 					    HCI_CMD_TIMEOUT);
3300 
3301 	return err;
3302 }
3303 
3304 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3305 {
3306 	struct bdaddr_list *b;
3307 
3308 	list_for_each_entry(b, &hdev->accept_list, list) {
3309 		struct hci_conn *conn;
3310 
3311 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3312 		if (!conn)
3313 			return true;
3314 
3315 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3316 			return true;
3317 	}
3318 
3319 	return false;
3320 }
3321 
3322 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3323 {
3324 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3325 					    sizeof(val), &val,
3326 					    HCI_CMD_TIMEOUT);
3327 }
3328 
3329 int hci_update_scan_sync(struct hci_dev *hdev)
3330 {
3331 	u8 scan;
3332 
3333 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3334 		return 0;
3335 
3336 	if (!hdev_is_powered(hdev))
3337 		return 0;
3338 
3339 	if (mgmt_powering_down(hdev))
3340 		return 0;
3341 
3342 	if (hdev->scanning_paused)
3343 		return 0;
3344 
3345 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3346 	    disconnected_accept_list_entries(hdev))
3347 		scan = SCAN_PAGE;
3348 	else
3349 		scan = SCAN_DISABLED;
3350 
3351 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3352 		scan |= SCAN_INQUIRY;
3353 
3354 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3355 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3356 		return 0;
3357 
3358 	return hci_write_scan_enable_sync(hdev, scan);
3359 }
3360 
3361 int hci_update_name_sync(struct hci_dev *hdev)
3362 {
3363 	struct hci_cp_write_local_name cp;
3364 
3365 	memset(&cp, 0, sizeof(cp));
3366 
3367 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3368 
3369 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3370 					    sizeof(cp), &cp,
3371 					    HCI_CMD_TIMEOUT);
3372 }
3373 
3374 /* This function perform powered update HCI command sequence after the HCI init
3375  * sequence which end up resetting all states, the sequence is as follows:
3376  *
3377  * HCI_SSP_ENABLED(Enable SSP)
3378  * HCI_LE_ENABLED(Enable LE)
3379  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3380  * Update adv data)
3381  * Enable Authentication
3382  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3383  * Set Name -> Set EIR)
3384  * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3385  */
3386 int hci_powered_update_sync(struct hci_dev *hdev)
3387 {
3388 	int err;
3389 
3390 	/* Register the available SMP channels (BR/EDR and LE) only when
3391 	 * successfully powering on the controller. This late
3392 	 * registration is required so that LE SMP can clearly decide if
3393 	 * the public address or static address is used.
3394 	 */
3395 	smp_register(hdev);
3396 
3397 	err = hci_write_ssp_mode_sync(hdev, 0x01);
3398 	if (err)
3399 		return err;
3400 
3401 	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3402 	if (err)
3403 		return err;
3404 
3405 	err = hci_powered_update_adv_sync(hdev);
3406 	if (err)
3407 		return err;
3408 
3409 	err = hci_write_auth_enable_sync(hdev);
3410 	if (err)
3411 		return err;
3412 
3413 	if (lmp_bredr_capable(hdev)) {
3414 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3415 			hci_write_fast_connectable_sync(hdev, true);
3416 		else
3417 			hci_write_fast_connectable_sync(hdev, false);
3418 		hci_update_scan_sync(hdev);
3419 		hci_update_class_sync(hdev);
3420 		hci_update_name_sync(hdev);
3421 		hci_update_eir_sync(hdev);
3422 	}
3423 
3424 	/* If forcing static address is in use or there is no public
3425 	 * address use the static address as random address (but skip
3426 	 * the HCI command if the current random address is already the
3427 	 * static one.
3428 	 *
3429 	 * In case BR/EDR has been disabled on a dual-mode controller
3430 	 * and a static address has been configured, then use that
3431 	 * address instead of the public BR/EDR address.
3432 	 */
3433 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3434 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3435 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3436 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3437 			return hci_set_random_addr_sync(hdev,
3438 							&hdev->static_addr);
3439 	}
3440 
3441 	return 0;
3442 }
3443 
3444 /**
3445  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3446  *				       (BD_ADDR) for a HCI device from
3447  *				       a firmware node property.
3448  * @hdev:	The HCI device
3449  *
3450  * Search the firmware node for 'local-bd-address'.
3451  *
3452  * All-zero BD addresses are rejected, because those could be properties
3453  * that exist in the firmware tables, but were not updated by the firmware. For
3454  * example, the DTS could define 'local-bd-address', with zero BD addresses.
3455  */
3456 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3457 {
3458 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3459 	bdaddr_t ba;
3460 	int ret;
3461 
3462 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3463 					    (u8 *)&ba, sizeof(ba));
3464 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3465 		return;
3466 
3467 	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3468 		baswap(&hdev->public_addr, &ba);
3469 	else
3470 		bacpy(&hdev->public_addr, &ba);
3471 }
3472 
3473 struct hci_init_stage {
3474 	int (*func)(struct hci_dev *hdev);
3475 };
3476 
3477 /* Run init stage NULL terminated function table */
3478 static int hci_init_stage_sync(struct hci_dev *hdev,
3479 			       const struct hci_init_stage *stage)
3480 {
3481 	size_t i;
3482 
3483 	for (i = 0; stage[i].func; i++) {
3484 		int err;
3485 
3486 		err = stage[i].func(hdev);
3487 		if (err)
3488 			return err;
3489 	}
3490 
3491 	return 0;
3492 }
3493 
3494 /* Read Local Version */
3495 static int hci_read_local_version_sync(struct hci_dev *hdev)
3496 {
3497 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3498 				     0, NULL, HCI_CMD_TIMEOUT);
3499 }
3500 
3501 /* Read BD Address */
3502 static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3503 {
3504 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3505 				     0, NULL, HCI_CMD_TIMEOUT);
3506 }
3507 
3508 #define HCI_INIT(_func) \
3509 { \
3510 	.func = _func, \
3511 }
3512 
3513 static const struct hci_init_stage hci_init0[] = {
3514 	/* HCI_OP_READ_LOCAL_VERSION */
3515 	HCI_INIT(hci_read_local_version_sync),
3516 	/* HCI_OP_READ_BD_ADDR */
3517 	HCI_INIT(hci_read_bd_addr_sync),
3518 	{}
3519 };
3520 
3521 int hci_reset_sync(struct hci_dev *hdev)
3522 {
3523 	int err;
3524 
3525 	set_bit(HCI_RESET, &hdev->flags);
3526 
3527 	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3528 				    HCI_CMD_TIMEOUT);
3529 	if (err)
3530 		return err;
3531 
3532 	return 0;
3533 }
3534 
3535 static int hci_init0_sync(struct hci_dev *hdev)
3536 {
3537 	int err;
3538 
3539 	bt_dev_dbg(hdev, "");
3540 
3541 	/* Reset */
3542 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3543 		err = hci_reset_sync(hdev);
3544 		if (err)
3545 			return err;
3546 	}
3547 
3548 	return hci_init_stage_sync(hdev, hci_init0);
3549 }
3550 
3551 static int hci_unconf_init_sync(struct hci_dev *hdev)
3552 {
3553 	int err;
3554 
3555 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3556 		return 0;
3557 
3558 	err = hci_init0_sync(hdev);
3559 	if (err < 0)
3560 		return err;
3561 
3562 	if (hci_dev_test_flag(hdev, HCI_SETUP))
3563 		hci_debugfs_create_basic(hdev);
3564 
3565 	return 0;
3566 }
3567 
3568 /* Read Local Supported Features. */
3569 static int hci_read_local_features_sync(struct hci_dev *hdev)
3570 {
3571 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3572 				     0, NULL, HCI_CMD_TIMEOUT);
3573 }
3574 
3575 /* BR Controller init stage 1 command sequence */
3576 static const struct hci_init_stage br_init1[] = {
3577 	/* HCI_OP_READ_LOCAL_FEATURES */
3578 	HCI_INIT(hci_read_local_features_sync),
3579 	/* HCI_OP_READ_LOCAL_VERSION */
3580 	HCI_INIT(hci_read_local_version_sync),
3581 	/* HCI_OP_READ_BD_ADDR */
3582 	HCI_INIT(hci_read_bd_addr_sync),
3583 	{}
3584 };
3585 
3586 /* Read Local Commands */
3587 static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3588 {
3589 	/* All Bluetooth 1.2 and later controllers should support the
3590 	 * HCI command for reading the local supported commands.
3591 	 *
3592 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3593 	 * but do not have support for this command. If that is the case,
3594 	 * the driver can quirk the behavior and skip reading the local
3595 	 * supported commands.
3596 	 */
3597 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3598 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3599 		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3600 					     0, NULL, HCI_CMD_TIMEOUT);
3601 
3602 	return 0;
3603 }
3604 
3605 static int hci_init1_sync(struct hci_dev *hdev)
3606 {
3607 	int err;
3608 
3609 	bt_dev_dbg(hdev, "");
3610 
3611 	/* Reset */
3612 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3613 		err = hci_reset_sync(hdev);
3614 		if (err)
3615 			return err;
3616 	}
3617 
3618 	return hci_init_stage_sync(hdev, br_init1);
3619 }
3620 
3621 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
3622 static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3623 {
3624 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3625 				     0, NULL, HCI_CMD_TIMEOUT);
3626 }
3627 
3628 /* Read Class of Device */
3629 static int hci_read_dev_class_sync(struct hci_dev *hdev)
3630 {
3631 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3632 				     0, NULL, HCI_CMD_TIMEOUT);
3633 }
3634 
3635 /* Read Local Name */
3636 static int hci_read_local_name_sync(struct hci_dev *hdev)
3637 {
3638 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3639 				     0, NULL, HCI_CMD_TIMEOUT);
3640 }
3641 
3642 /* Read Voice Setting */
3643 static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3644 {
3645 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3646 				     0, NULL, HCI_CMD_TIMEOUT);
3647 }
3648 
3649 /* Read Number of Supported IAC */
3650 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3651 {
3652 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3653 				     0, NULL, HCI_CMD_TIMEOUT);
3654 }
3655 
3656 /* Read Current IAC LAP */
3657 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3658 {
3659 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3660 				     0, NULL, HCI_CMD_TIMEOUT);
3661 }
3662 
3663 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3664 				     u8 cond_type, bdaddr_t *bdaddr,
3665 				     u8 auto_accept)
3666 {
3667 	struct hci_cp_set_event_filter cp;
3668 
3669 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3670 		return 0;
3671 
3672 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3673 		return 0;
3674 
3675 	memset(&cp, 0, sizeof(cp));
3676 	cp.flt_type = flt_type;
3677 
3678 	if (flt_type != HCI_FLT_CLEAR_ALL) {
3679 		cp.cond_type = cond_type;
3680 		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3681 		cp.addr_conn_flt.auto_accept = auto_accept;
3682 	}
3683 
3684 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3685 				     flt_type == HCI_FLT_CLEAR_ALL ?
3686 				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3687 				     HCI_CMD_TIMEOUT);
3688 }
3689 
3690 static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3691 {
3692 	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3693 		return 0;
3694 
3695 	/* In theory the state machine should not reach here unless
3696 	 * a hci_set_event_filter_sync() call succeeds, but we do
3697 	 * the check both for parity and as a future reminder.
3698 	 */
3699 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3700 		return 0;
3701 
3702 	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3703 					 BDADDR_ANY, 0x00);
3704 }
3705 
3706 /* Connection accept timeout ~20 secs */
3707 static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3708 {
3709 	__le16 param = cpu_to_le16(0x7d00);
3710 
3711 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3712 				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3713 }
3714 
3715 /* BR Controller init stage 2 command sequence */
3716 static const struct hci_init_stage br_init2[] = {
3717 	/* HCI_OP_READ_BUFFER_SIZE */
3718 	HCI_INIT(hci_read_buffer_size_sync),
3719 	/* HCI_OP_READ_CLASS_OF_DEV */
3720 	HCI_INIT(hci_read_dev_class_sync),
3721 	/* HCI_OP_READ_LOCAL_NAME */
3722 	HCI_INIT(hci_read_local_name_sync),
3723 	/* HCI_OP_READ_VOICE_SETTING */
3724 	HCI_INIT(hci_read_voice_setting_sync),
3725 	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3726 	HCI_INIT(hci_read_num_supported_iac_sync),
3727 	/* HCI_OP_READ_CURRENT_IAC_LAP */
3728 	HCI_INIT(hci_read_current_iac_lap_sync),
3729 	/* HCI_OP_SET_EVENT_FLT */
3730 	HCI_INIT(hci_clear_event_filter_sync),
3731 	/* HCI_OP_WRITE_CA_TIMEOUT */
3732 	HCI_INIT(hci_write_ca_timeout_sync),
3733 	{}
3734 };
3735 
3736 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3737 {
3738 	u8 mode = 0x01;
3739 
3740 	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3741 		return 0;
3742 
3743 	/* When SSP is available, then the host features page
3744 	 * should also be available as well. However some
3745 	 * controllers list the max_page as 0 as long as SSP
3746 	 * has not been enabled. To achieve proper debugging
3747 	 * output, force the minimum max_page to 1 at least.
3748 	 */
3749 	hdev->max_page = 0x01;
3750 
3751 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3752 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3753 }
3754 
3755 static int hci_write_eir_sync(struct hci_dev *hdev)
3756 {
3757 	struct hci_cp_write_eir cp;
3758 
3759 	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3760 		return 0;
3761 
3762 	memset(hdev->eir, 0, sizeof(hdev->eir));
3763 	memset(&cp, 0, sizeof(cp));
3764 
3765 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3766 				     HCI_CMD_TIMEOUT);
3767 }
3768 
3769 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3770 {
3771 	u8 mode;
3772 
3773 	if (!lmp_inq_rssi_capable(hdev) &&
3774 	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3775 		return 0;
3776 
3777 	/* If Extended Inquiry Result events are supported, then
3778 	 * they are clearly preferred over Inquiry Result with RSSI
3779 	 * events.
3780 	 */
3781 	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3782 
3783 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3784 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3785 }
3786 
3787 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3788 {
3789 	if (!lmp_inq_tx_pwr_capable(hdev))
3790 		return 0;
3791 
3792 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3793 				     0, NULL, HCI_CMD_TIMEOUT);
3794 }
3795 
3796 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3797 {
3798 	struct hci_cp_read_local_ext_features cp;
3799 
3800 	if (!lmp_ext_feat_capable(hdev))
3801 		return 0;
3802 
3803 	memset(&cp, 0, sizeof(cp));
3804 	cp.page = page;
3805 
3806 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3807 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3808 }
3809 
3810 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3811 {
3812 	return hci_read_local_ext_features_sync(hdev, 0x01);
3813 }
3814 
3815 /* HCI Controller init stage 2 command sequence */
3816 static const struct hci_init_stage hci_init2[] = {
3817 	/* HCI_OP_READ_LOCAL_COMMANDS */
3818 	HCI_INIT(hci_read_local_cmds_sync),
3819 	/* HCI_OP_WRITE_SSP_MODE */
3820 	HCI_INIT(hci_write_ssp_mode_1_sync),
3821 	/* HCI_OP_WRITE_EIR */
3822 	HCI_INIT(hci_write_eir_sync),
3823 	/* HCI_OP_WRITE_INQUIRY_MODE */
3824 	HCI_INIT(hci_write_inquiry_mode_sync),
3825 	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3826 	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3827 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3828 	HCI_INIT(hci_read_local_ext_features_1_sync),
3829 	/* HCI_OP_WRITE_AUTH_ENABLE */
3830 	HCI_INIT(hci_write_auth_enable_sync),
3831 	{}
3832 };
3833 
3834 /* Read LE Buffer Size */
3835 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3836 {
3837 	/* Use Read LE Buffer Size V2 if supported */
3838 	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3839 		return __hci_cmd_sync_status(hdev,
3840 					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3841 					     0, NULL, HCI_CMD_TIMEOUT);
3842 
3843 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3844 				     0, NULL, HCI_CMD_TIMEOUT);
3845 }
3846 
3847 /* Read LE Local Supported Features */
3848 static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3849 {
3850 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3851 				     0, NULL, HCI_CMD_TIMEOUT);
3852 }
3853 
3854 /* Read LE Supported States */
3855 static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3856 {
3857 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3858 				     0, NULL, HCI_CMD_TIMEOUT);
3859 }
3860 
3861 /* LE Controller init stage 2 command sequence */
3862 static const struct hci_init_stage le_init2[] = {
3863 	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3864 	HCI_INIT(hci_le_read_local_features_sync),
3865 	/* HCI_OP_LE_READ_BUFFER_SIZE */
3866 	HCI_INIT(hci_le_read_buffer_size_sync),
3867 	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3868 	HCI_INIT(hci_le_read_supported_states_sync),
3869 	{}
3870 };
3871 
3872 static int hci_init2_sync(struct hci_dev *hdev)
3873 {
3874 	int err;
3875 
3876 	bt_dev_dbg(hdev, "");
3877 
3878 	err = hci_init_stage_sync(hdev, hci_init2);
3879 	if (err)
3880 		return err;
3881 
3882 	if (lmp_bredr_capable(hdev)) {
3883 		err = hci_init_stage_sync(hdev, br_init2);
3884 		if (err)
3885 			return err;
3886 	} else {
3887 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3888 	}
3889 
3890 	if (lmp_le_capable(hdev)) {
3891 		err = hci_init_stage_sync(hdev, le_init2);
3892 		if (err)
3893 			return err;
3894 		/* LE-only controllers have LE implicitly enabled */
3895 		if (!lmp_bredr_capable(hdev))
3896 			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3897 	}
3898 
3899 	return 0;
3900 }
3901 
3902 static int hci_set_event_mask_sync(struct hci_dev *hdev)
3903 {
3904 	/* The second byte is 0xff instead of 0x9f (two reserved bits
3905 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3906 	 * command otherwise.
3907 	 */
3908 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3909 
3910 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
3911 	 * any event mask for pre 1.2 devices.
3912 	 */
3913 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3914 		return 0;
3915 
3916 	if (lmp_bredr_capable(hdev)) {
3917 		events[4] |= 0x01; /* Flow Specification Complete */
3918 
3919 		/* Don't set Disconnect Complete and mode change when
3920 		 * suspended as that would wakeup the host when disconnecting
3921 		 * due to suspend.
3922 		 */
3923 		if (hdev->suspended) {
3924 			events[0] &= 0xef;
3925 			events[2] &= 0xf7;
3926 		}
3927 	} else {
3928 		/* Use a different default for LE-only devices */
3929 		memset(events, 0, sizeof(events));
3930 		events[1] |= 0x20; /* Command Complete */
3931 		events[1] |= 0x40; /* Command Status */
3932 		events[1] |= 0x80; /* Hardware Error */
3933 
3934 		/* If the controller supports the Disconnect command, enable
3935 		 * the corresponding event. In addition enable packet flow
3936 		 * control related events.
3937 		 */
3938 		if (hdev->commands[0] & 0x20) {
3939 			/* Don't set Disconnect Complete when suspended as that
3940 			 * would wakeup the host when disconnecting due to
3941 			 * suspend.
3942 			 */
3943 			if (!hdev->suspended)
3944 				events[0] |= 0x10; /* Disconnection Complete */
3945 			events[2] |= 0x04; /* Number of Completed Packets */
3946 			events[3] |= 0x02; /* Data Buffer Overflow */
3947 		}
3948 
3949 		/* If the controller supports the Read Remote Version
3950 		 * Information command, enable the corresponding event.
3951 		 */
3952 		if (hdev->commands[2] & 0x80)
3953 			events[1] |= 0x08; /* Read Remote Version Information
3954 					    * Complete
3955 					    */
3956 
3957 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
3958 			events[0] |= 0x80; /* Encryption Change */
3959 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
3960 		}
3961 	}
3962 
3963 	if (lmp_inq_rssi_capable(hdev) ||
3964 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3965 		events[4] |= 0x02; /* Inquiry Result with RSSI */
3966 
3967 	if (lmp_ext_feat_capable(hdev))
3968 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
3969 
3970 	if (lmp_esco_capable(hdev)) {
3971 		events[5] |= 0x08; /* Synchronous Connection Complete */
3972 		events[5] |= 0x10; /* Synchronous Connection Changed */
3973 	}
3974 
3975 	if (lmp_sniffsubr_capable(hdev))
3976 		events[5] |= 0x20; /* Sniff Subrating */
3977 
3978 	if (lmp_pause_enc_capable(hdev))
3979 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
3980 
3981 	if (lmp_ext_inq_capable(hdev))
3982 		events[5] |= 0x40; /* Extended Inquiry Result */
3983 
3984 	if (lmp_no_flush_capable(hdev))
3985 		events[7] |= 0x01; /* Enhanced Flush Complete */
3986 
3987 	if (lmp_lsto_capable(hdev))
3988 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
3989 
3990 	if (lmp_ssp_capable(hdev)) {
3991 		events[6] |= 0x01;	/* IO Capability Request */
3992 		events[6] |= 0x02;	/* IO Capability Response */
3993 		events[6] |= 0x04;	/* User Confirmation Request */
3994 		events[6] |= 0x08;	/* User Passkey Request */
3995 		events[6] |= 0x10;	/* Remote OOB Data Request */
3996 		events[6] |= 0x20;	/* Simple Pairing Complete */
3997 		events[7] |= 0x04;	/* User Passkey Notification */
3998 		events[7] |= 0x08;	/* Keypress Notification */
3999 		events[7] |= 0x10;	/* Remote Host Supported
4000 					 * Features Notification
4001 					 */
4002 	}
4003 
4004 	if (lmp_le_capable(hdev))
4005 		events[7] |= 0x20;	/* LE Meta-Event */
4006 
4007 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4008 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4009 }
4010 
4011 static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4012 {
4013 	struct hci_cp_read_stored_link_key cp;
4014 
4015 	if (!(hdev->commands[6] & 0x20) ||
4016 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4017 		return 0;
4018 
4019 	memset(&cp, 0, sizeof(cp));
4020 	bacpy(&cp.bdaddr, BDADDR_ANY);
4021 	cp.read_all = 0x01;
4022 
4023 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4024 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4025 }
4026 
4027 static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4028 {
4029 	struct hci_cp_write_def_link_policy cp;
4030 	u16 link_policy = 0;
4031 
4032 	if (!(hdev->commands[5] & 0x10))
4033 		return 0;
4034 
4035 	memset(&cp, 0, sizeof(cp));
4036 
4037 	if (lmp_rswitch_capable(hdev))
4038 		link_policy |= HCI_LP_RSWITCH;
4039 	if (lmp_hold_capable(hdev))
4040 		link_policy |= HCI_LP_HOLD;
4041 	if (lmp_sniff_capable(hdev))
4042 		link_policy |= HCI_LP_SNIFF;
4043 	if (lmp_park_capable(hdev))
4044 		link_policy |= HCI_LP_PARK;
4045 
4046 	cp.policy = cpu_to_le16(link_policy);
4047 
4048 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4049 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4050 }
4051 
4052 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4053 {
4054 	if (!(hdev->commands[8] & 0x01))
4055 		return 0;
4056 
4057 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4058 				     0, NULL, HCI_CMD_TIMEOUT);
4059 }
4060 
4061 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4062 {
4063 	if (!(hdev->commands[18] & 0x04) ||
4064 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4065 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4066 		return 0;
4067 
4068 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4069 				     0, NULL, HCI_CMD_TIMEOUT);
4070 }
4071 
4072 static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4073 {
4074 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4075 	 * support the Read Page Scan Type command. Check support for
4076 	 * this command in the bit mask of supported commands.
4077 	 */
4078 	if (!(hdev->commands[13] & 0x01))
4079 		return 0;
4080 
4081 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4082 				     0, NULL, HCI_CMD_TIMEOUT);
4083 }
4084 
4085 /* Read features beyond page 1 if available */
4086 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4087 {
4088 	u8 page;
4089 	int err;
4090 
4091 	if (!lmp_ext_feat_capable(hdev))
4092 		return 0;
4093 
4094 	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4095 	     page++) {
4096 		err = hci_read_local_ext_features_sync(hdev, page);
4097 		if (err)
4098 			return err;
4099 	}
4100 
4101 	return 0;
4102 }
4103 
4104 /* HCI Controller init stage 3 command sequence */
4105 static const struct hci_init_stage hci_init3[] = {
4106 	/* HCI_OP_SET_EVENT_MASK */
4107 	HCI_INIT(hci_set_event_mask_sync),
4108 	/* HCI_OP_READ_STORED_LINK_KEY */
4109 	HCI_INIT(hci_read_stored_link_key_sync),
4110 	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4111 	HCI_INIT(hci_setup_link_policy_sync),
4112 	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4113 	HCI_INIT(hci_read_page_scan_activity_sync),
4114 	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4115 	HCI_INIT(hci_read_def_err_data_reporting_sync),
4116 	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4117 	HCI_INIT(hci_read_page_scan_type_sync),
4118 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4119 	HCI_INIT(hci_read_local_ext_features_all_sync),
4120 	{}
4121 };
4122 
4123 static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4124 {
4125 	u8 events[8];
4126 
4127 	if (!lmp_le_capable(hdev))
4128 		return 0;
4129 
4130 	memset(events, 0, sizeof(events));
4131 
4132 	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4133 		events[0] |= 0x10;	/* LE Long Term Key Request */
4134 
4135 	/* If controller supports the Connection Parameters Request
4136 	 * Link Layer Procedure, enable the corresponding event.
4137 	 */
4138 	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4139 		/* LE Remote Connection Parameter Request */
4140 		events[0] |= 0x20;
4141 
4142 	/* If the controller supports the Data Length Extension
4143 	 * feature, enable the corresponding event.
4144 	 */
4145 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4146 		events[0] |= 0x40;	/* LE Data Length Change */
4147 
4148 	/* If the controller supports LL Privacy feature or LE Extended Adv,
4149 	 * enable the corresponding event.
4150 	 */
4151 	if (use_enhanced_conn_complete(hdev))
4152 		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4153 
4154 	/* If the controller supports Extended Scanner Filter
4155 	 * Policies, enable the corresponding event.
4156 	 */
4157 	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4158 		events[1] |= 0x04;	/* LE Direct Advertising Report */
4159 
4160 	/* If the controller supports Channel Selection Algorithm #2
4161 	 * feature, enable the corresponding event.
4162 	 */
4163 	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4164 		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4165 
4166 	/* If the controller supports the LE Set Scan Enable command,
4167 	 * enable the corresponding advertising report event.
4168 	 */
4169 	if (hdev->commands[26] & 0x08)
4170 		events[0] |= 0x02;	/* LE Advertising Report */
4171 
4172 	/* If the controller supports the LE Create Connection
4173 	 * command, enable the corresponding event.
4174 	 */
4175 	if (hdev->commands[26] & 0x10)
4176 		events[0] |= 0x01;	/* LE Connection Complete */
4177 
4178 	/* If the controller supports the LE Connection Update
4179 	 * command, enable the corresponding event.
4180 	 */
4181 	if (hdev->commands[27] & 0x04)
4182 		events[0] |= 0x04;	/* LE Connection Update Complete */
4183 
4184 	/* If the controller supports the LE Read Remote Used Features
4185 	 * command, enable the corresponding event.
4186 	 */
4187 	if (hdev->commands[27] & 0x20)
4188 		/* LE Read Remote Used Features Complete */
4189 		events[0] |= 0x08;
4190 
4191 	/* If the controller supports the LE Read Local P-256
4192 	 * Public Key command, enable the corresponding event.
4193 	 */
4194 	if (hdev->commands[34] & 0x02)
4195 		/* LE Read Local P-256 Public Key Complete */
4196 		events[0] |= 0x80;
4197 
4198 	/* If the controller supports the LE Generate DHKey
4199 	 * command, enable the corresponding event.
4200 	 */
4201 	if (hdev->commands[34] & 0x04)
4202 		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4203 
4204 	/* If the controller supports the LE Set Default PHY or
4205 	 * LE Set PHY commands, enable the corresponding event.
4206 	 */
4207 	if (hdev->commands[35] & (0x20 | 0x40))
4208 		events[1] |= 0x08;        /* LE PHY Update Complete */
4209 
4210 	/* If the controller supports LE Set Extended Scan Parameters
4211 	 * and LE Set Extended Scan Enable commands, enable the
4212 	 * corresponding event.
4213 	 */
4214 	if (use_ext_scan(hdev))
4215 		events[1] |= 0x10;	/* LE Extended Advertising Report */
4216 
4217 	/* If the controller supports the LE Extended Advertising
4218 	 * command, enable the corresponding event.
4219 	 */
4220 	if (ext_adv_capable(hdev))
4221 		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4222 
4223 	if (cis_capable(hdev)) {
4224 		events[3] |= 0x01;	/* LE CIS Established */
4225 		if (cis_peripheral_capable(hdev))
4226 			events[3] |= 0x02; /* LE CIS Request */
4227 	}
4228 
4229 	if (bis_capable(hdev)) {
4230 		events[1] |= 0x20;	/* LE PA Report */
4231 		events[1] |= 0x40;	/* LE PA Sync Established */
4232 		events[3] |= 0x04;	/* LE Create BIG Complete */
4233 		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4234 		events[3] |= 0x10;	/* LE BIG Sync Established */
4235 		events[3] |= 0x20;	/* LE BIG Sync Loss */
4236 		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4237 	}
4238 
4239 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4240 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4241 }
4242 
4243 /* Read LE Advertising Channel TX Power */
4244 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4245 {
4246 	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4247 		/* HCI TS spec forbids mixing of legacy and extended
4248 		 * advertising commands wherein READ_ADV_TX_POWER is
4249 		 * also included. So do not call it if extended adv
4250 		 * is supported otherwise controller will return
4251 		 * COMMAND_DISALLOWED for extended commands.
4252 		 */
4253 		return __hci_cmd_sync_status(hdev,
4254 					       HCI_OP_LE_READ_ADV_TX_POWER,
4255 					       0, NULL, HCI_CMD_TIMEOUT);
4256 	}
4257 
4258 	return 0;
4259 }
4260 
4261 /* Read LE Min/Max Tx Power*/
4262 static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4263 {
4264 	if (!(hdev->commands[38] & 0x80) ||
4265 	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4266 		return 0;
4267 
4268 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4269 				     0, NULL, HCI_CMD_TIMEOUT);
4270 }
4271 
4272 /* Read LE Accept List Size */
4273 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4274 {
4275 	if (!(hdev->commands[26] & 0x40))
4276 		return 0;
4277 
4278 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4279 				     0, NULL, HCI_CMD_TIMEOUT);
4280 }
4281 
4282 /* Read LE Resolving List Size */
4283 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4284 {
4285 	if (!(hdev->commands[34] & 0x40))
4286 		return 0;
4287 
4288 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4289 				     0, NULL, HCI_CMD_TIMEOUT);
4290 }
4291 
4292 /* Clear LE Resolving List */
4293 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4294 {
4295 	if (!(hdev->commands[34] & 0x20))
4296 		return 0;
4297 
4298 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4299 				     HCI_CMD_TIMEOUT);
4300 }
4301 
4302 /* Set RPA timeout */
4303 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4304 {
4305 	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4306 
4307 	if (!(hdev->commands[35] & 0x04) ||
4308 	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4309 		return 0;
4310 
4311 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4312 				     sizeof(timeout), &timeout,
4313 				     HCI_CMD_TIMEOUT);
4314 }
4315 
4316 /* Read LE Maximum Data Length */
4317 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4318 {
4319 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4320 		return 0;
4321 
4322 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4323 				     HCI_CMD_TIMEOUT);
4324 }
4325 
4326 /* Read LE Suggested Default Data Length */
4327 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4328 {
4329 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4330 		return 0;
4331 
4332 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4333 				     HCI_CMD_TIMEOUT);
4334 }
4335 
4336 /* Read LE Number of Supported Advertising Sets */
4337 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4338 {
4339 	if (!ext_adv_capable(hdev))
4340 		return 0;
4341 
4342 	return __hci_cmd_sync_status(hdev,
4343 				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4344 				     0, NULL, HCI_CMD_TIMEOUT);
4345 }
4346 
4347 /* Write LE Host Supported */
4348 static int hci_set_le_support_sync(struct hci_dev *hdev)
4349 {
4350 	struct hci_cp_write_le_host_supported cp;
4351 
4352 	/* LE-only devices do not support explicit enablement */
4353 	if (!lmp_bredr_capable(hdev))
4354 		return 0;
4355 
4356 	memset(&cp, 0, sizeof(cp));
4357 
4358 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4359 		cp.le = 0x01;
4360 		cp.simul = 0x00;
4361 	}
4362 
4363 	if (cp.le == lmp_host_le_capable(hdev))
4364 		return 0;
4365 
4366 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4367 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4368 }
4369 
4370 /* LE Set Host Feature */
4371 static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4372 {
4373 	struct hci_cp_le_set_host_feature cp;
4374 
4375 	if (!cis_capable(hdev))
4376 		return 0;
4377 
4378 	memset(&cp, 0, sizeof(cp));
4379 
4380 	/* Connected Isochronous Channels (Host Support) */
4381 	cp.bit_number = 32;
4382 	cp.bit_value = 1;
4383 
4384 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4385 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4386 }
4387 
4388 /* LE Controller init stage 3 command sequence */
4389 static const struct hci_init_stage le_init3[] = {
4390 	/* HCI_OP_LE_SET_EVENT_MASK */
4391 	HCI_INIT(hci_le_set_event_mask_sync),
4392 	/* HCI_OP_LE_READ_ADV_TX_POWER */
4393 	HCI_INIT(hci_le_read_adv_tx_power_sync),
4394 	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4395 	HCI_INIT(hci_le_read_tx_power_sync),
4396 	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4397 	HCI_INIT(hci_le_read_accept_list_size_sync),
4398 	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4399 	HCI_INIT(hci_le_clear_accept_list_sync),
4400 	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4401 	HCI_INIT(hci_le_read_resolv_list_size_sync),
4402 	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4403 	HCI_INIT(hci_le_clear_resolv_list_sync),
4404 	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4405 	HCI_INIT(hci_le_set_rpa_timeout_sync),
4406 	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4407 	HCI_INIT(hci_le_read_max_data_len_sync),
4408 	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4409 	HCI_INIT(hci_le_read_def_data_len_sync),
4410 	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4411 	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4412 	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4413 	HCI_INIT(hci_set_le_support_sync),
4414 	/* HCI_OP_LE_SET_HOST_FEATURE */
4415 	HCI_INIT(hci_le_set_host_feature_sync),
4416 	{}
4417 };
4418 
4419 static int hci_init3_sync(struct hci_dev *hdev)
4420 {
4421 	int err;
4422 
4423 	bt_dev_dbg(hdev, "");
4424 
4425 	err = hci_init_stage_sync(hdev, hci_init3);
4426 	if (err)
4427 		return err;
4428 
4429 	if (lmp_le_capable(hdev))
4430 		return hci_init_stage_sync(hdev, le_init3);
4431 
4432 	return 0;
4433 }
4434 
4435 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4436 {
4437 	struct hci_cp_delete_stored_link_key cp;
4438 
4439 	/* Some Broadcom based Bluetooth controllers do not support the
4440 	 * Delete Stored Link Key command. They are clearly indicating its
4441 	 * absence in the bit mask of supported commands.
4442 	 *
4443 	 * Check the supported commands and only if the command is marked
4444 	 * as supported send it. If not supported assume that the controller
4445 	 * does not have actual support for stored link keys which makes this
4446 	 * command redundant anyway.
4447 	 *
4448 	 * Some controllers indicate that they support handling deleting
4449 	 * stored link keys, but they don't. The quirk lets a driver
4450 	 * just disable this command.
4451 	 */
4452 	if (!(hdev->commands[6] & 0x80) ||
4453 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4454 		return 0;
4455 
4456 	memset(&cp, 0, sizeof(cp));
4457 	bacpy(&cp.bdaddr, BDADDR_ANY);
4458 	cp.delete_all = 0x01;
4459 
4460 	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4461 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4462 }
4463 
4464 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4465 {
4466 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4467 	bool changed = false;
4468 
4469 	/* Set event mask page 2 if the HCI command for it is supported */
4470 	if (!(hdev->commands[22] & 0x04))
4471 		return 0;
4472 
4473 	/* If Connectionless Peripheral Broadcast central role is supported
4474 	 * enable all necessary events for it.
4475 	 */
4476 	if (lmp_cpb_central_capable(hdev)) {
4477 		events[1] |= 0x40;	/* Triggered Clock Capture */
4478 		events[1] |= 0x80;	/* Synchronization Train Complete */
4479 		events[2] |= 0x08;	/* Truncated Page Complete */
4480 		events[2] |= 0x20;	/* CPB Channel Map Change */
4481 		changed = true;
4482 	}
4483 
4484 	/* If Connectionless Peripheral Broadcast peripheral role is supported
4485 	 * enable all necessary events for it.
4486 	 */
4487 	if (lmp_cpb_peripheral_capable(hdev)) {
4488 		events[2] |= 0x01;	/* Synchronization Train Received */
4489 		events[2] |= 0x02;	/* CPB Receive */
4490 		events[2] |= 0x04;	/* CPB Timeout */
4491 		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4492 		changed = true;
4493 	}
4494 
4495 	/* Enable Authenticated Payload Timeout Expired event if supported */
4496 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4497 		events[2] |= 0x80;
4498 		changed = true;
4499 	}
4500 
4501 	/* Some Broadcom based controllers indicate support for Set Event
4502 	 * Mask Page 2 command, but then actually do not support it. Since
4503 	 * the default value is all bits set to zero, the command is only
4504 	 * required if the event mask has to be changed. In case no change
4505 	 * to the event mask is needed, skip this command.
4506 	 */
4507 	if (!changed)
4508 		return 0;
4509 
4510 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4511 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4512 }
4513 
4514 /* Read local codec list if the HCI command is supported */
4515 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4516 {
4517 	if (hdev->commands[45] & 0x04)
4518 		hci_read_supported_codecs_v2(hdev);
4519 	else if (hdev->commands[29] & 0x20)
4520 		hci_read_supported_codecs(hdev);
4521 
4522 	return 0;
4523 }
4524 
4525 /* Read local pairing options if the HCI command is supported */
4526 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4527 {
4528 	if (!(hdev->commands[41] & 0x08))
4529 		return 0;
4530 
4531 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4532 				     0, NULL, HCI_CMD_TIMEOUT);
4533 }
4534 
4535 /* Get MWS transport configuration if the HCI command is supported */
4536 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4537 {
4538 	if (!mws_transport_config_capable(hdev))
4539 		return 0;
4540 
4541 	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4542 				     0, NULL, HCI_CMD_TIMEOUT);
4543 }
4544 
4545 /* Check for Synchronization Train support */
4546 static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4547 {
4548 	if (!lmp_sync_train_capable(hdev))
4549 		return 0;
4550 
4551 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4552 				     0, NULL, HCI_CMD_TIMEOUT);
4553 }
4554 
4555 /* Enable Secure Connections if supported and configured */
4556 static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4557 {
4558 	u8 support = 0x01;
4559 
4560 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4561 	    !bredr_sc_enabled(hdev))
4562 		return 0;
4563 
4564 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4565 				     sizeof(support), &support,
4566 				     HCI_CMD_TIMEOUT);
4567 }
4568 
4569 /* Set erroneous data reporting if supported to the wideband speech
4570  * setting value
4571  */
4572 static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4573 {
4574 	struct hci_cp_write_def_err_data_reporting cp;
4575 	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4576 
4577 	if (!(hdev->commands[18] & 0x08) ||
4578 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4579 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4580 		return 0;
4581 
4582 	if (enabled == hdev->err_data_reporting)
4583 		return 0;
4584 
4585 	memset(&cp, 0, sizeof(cp));
4586 	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4587 				ERR_DATA_REPORTING_DISABLED;
4588 
4589 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4590 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4591 }
4592 
4593 static const struct hci_init_stage hci_init4[] = {
4594 	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4595 	HCI_INIT(hci_delete_stored_link_key_sync),
4596 	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4597 	HCI_INIT(hci_set_event_mask_page_2_sync),
4598 	/* HCI_OP_READ_LOCAL_CODECS */
4599 	HCI_INIT(hci_read_local_codecs_sync),
4600 	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4601 	HCI_INIT(hci_read_local_pairing_opts_sync),
4602 	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4603 	HCI_INIT(hci_get_mws_transport_config_sync),
4604 	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4605 	HCI_INIT(hci_read_sync_train_params_sync),
4606 	/* HCI_OP_WRITE_SC_SUPPORT */
4607 	HCI_INIT(hci_write_sc_support_1_sync),
4608 	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4609 	HCI_INIT(hci_set_err_data_report_sync),
4610 	{}
4611 };
4612 
4613 /* Set Suggested Default Data Length to maximum if supported */
4614 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4615 {
4616 	struct hci_cp_le_write_def_data_len cp;
4617 
4618 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4619 		return 0;
4620 
4621 	memset(&cp, 0, sizeof(cp));
4622 	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4623 	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4624 
4625 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4626 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4627 }
4628 
4629 /* Set Default PHY parameters if command is supported, enables all supported
4630  * PHYs according to the LE Features bits.
4631  */
4632 static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4633 {
4634 	struct hci_cp_le_set_default_phy cp;
4635 
4636 	if (!(hdev->commands[35] & 0x20)) {
4637 		/* If the command is not supported it means only 1M PHY is
4638 		 * supported.
4639 		 */
4640 		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4641 		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4642 		return 0;
4643 	}
4644 
4645 	memset(&cp, 0, sizeof(cp));
4646 	cp.all_phys = 0x00;
4647 	cp.tx_phys = HCI_LE_SET_PHY_1M;
4648 	cp.rx_phys = HCI_LE_SET_PHY_1M;
4649 
4650 	/* Enables 2M PHY if supported */
4651 	if (le_2m_capable(hdev)) {
4652 		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4653 		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4654 	}
4655 
4656 	/* Enables Coded PHY if supported */
4657 	if (le_coded_capable(hdev)) {
4658 		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4659 		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4660 	}
4661 
4662 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4663 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4664 }
4665 
4666 static const struct hci_init_stage le_init4[] = {
4667 	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4668 	HCI_INIT(hci_le_set_write_def_data_len_sync),
4669 	/* HCI_OP_LE_SET_DEFAULT_PHY */
4670 	HCI_INIT(hci_le_set_default_phy_sync),
4671 	{}
4672 };
4673 
4674 static int hci_init4_sync(struct hci_dev *hdev)
4675 {
4676 	int err;
4677 
4678 	bt_dev_dbg(hdev, "");
4679 
4680 	err = hci_init_stage_sync(hdev, hci_init4);
4681 	if (err)
4682 		return err;
4683 
4684 	if (lmp_le_capable(hdev))
4685 		return hci_init_stage_sync(hdev, le_init4);
4686 
4687 	return 0;
4688 }
4689 
4690 static int hci_init_sync(struct hci_dev *hdev)
4691 {
4692 	int err;
4693 
4694 	err = hci_init1_sync(hdev);
4695 	if (err < 0)
4696 		return err;
4697 
4698 	if (hci_dev_test_flag(hdev, HCI_SETUP))
4699 		hci_debugfs_create_basic(hdev);
4700 
4701 	err = hci_init2_sync(hdev);
4702 	if (err < 0)
4703 		return err;
4704 
4705 	err = hci_init3_sync(hdev);
4706 	if (err < 0)
4707 		return err;
4708 
4709 	err = hci_init4_sync(hdev);
4710 	if (err < 0)
4711 		return err;
4712 
4713 	/* This function is only called when the controller is actually in
4714 	 * configured state. When the controller is marked as unconfigured,
4715 	 * this initialization procedure is not run.
4716 	 *
4717 	 * It means that it is possible that a controller runs through its
4718 	 * setup phase and then discovers missing settings. If that is the
4719 	 * case, then this function will not be called. It then will only
4720 	 * be called during the config phase.
4721 	 *
4722 	 * So only when in setup phase or config phase, create the debugfs
4723 	 * entries and register the SMP channels.
4724 	 */
4725 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4726 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4727 		return 0;
4728 
4729 	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4730 		return 0;
4731 
4732 	hci_debugfs_create_common(hdev);
4733 
4734 	if (lmp_bredr_capable(hdev))
4735 		hci_debugfs_create_bredr(hdev);
4736 
4737 	if (lmp_le_capable(hdev))
4738 		hci_debugfs_create_le(hdev);
4739 
4740 	return 0;
4741 }
4742 
4743 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4744 
4745 static const struct {
4746 	unsigned long quirk;
4747 	const char *desc;
4748 } hci_broken_table[] = {
4749 	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4750 			 "HCI Read Local Supported Commands not supported"),
4751 	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4752 			 "HCI Delete Stored Link Key command is advertised, "
4753 			 "but not supported."),
4754 	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4755 			 "HCI Read Default Erroneous Data Reporting command is "
4756 			 "advertised, but not supported."),
4757 	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4758 			 "HCI Read Transmit Power Level command is advertised, "
4759 			 "but not supported."),
4760 	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4761 			 "HCI Set Event Filter command not supported."),
4762 	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4763 			 "HCI Enhanced Setup Synchronous Connection command is "
4764 			 "advertised, but not supported."),
4765 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4766 			 "HCI LE Set Random Private Address Timeout command is "
4767 			 "advertised, but not supported."),
4768 	HCI_QUIRK_BROKEN(LE_CODED,
4769 			 "HCI LE Coded PHY feature bit is set, "
4770 			 "but its usage is not supported.")
4771 };
4772 
4773 /* This function handles hdev setup stage:
4774  *
4775  * Calls hdev->setup
4776  * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4777  */
4778 static int hci_dev_setup_sync(struct hci_dev *hdev)
4779 {
4780 	int ret = 0;
4781 	bool invalid_bdaddr;
4782 	size_t i;
4783 
4784 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4785 	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4786 		return 0;
4787 
4788 	bt_dev_dbg(hdev, "");
4789 
4790 	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4791 
4792 	if (hdev->setup)
4793 		ret = hdev->setup(hdev);
4794 
4795 	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4796 		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4797 			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4798 	}
4799 
4800 	/* The transport driver can set the quirk to mark the
4801 	 * BD_ADDR invalid before creating the HCI device or in
4802 	 * its setup callback.
4803 	 */
4804 	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4805 			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4806 	if (!ret) {
4807 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4808 		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4809 			hci_dev_get_bd_addr_from_property(hdev);
4810 
4811 		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4812 		    hdev->set_bdaddr) {
4813 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4814 			if (!ret)
4815 				invalid_bdaddr = false;
4816 		}
4817 	}
4818 
4819 	/* The transport driver can set these quirks before
4820 	 * creating the HCI device or in its setup callback.
4821 	 *
4822 	 * For the invalid BD_ADDR quirk it is possible that
4823 	 * it becomes a valid address if the bootloader does
4824 	 * provide it (see above).
4825 	 *
4826 	 * In case any of them is set, the controller has to
4827 	 * start up as unconfigured.
4828 	 */
4829 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4830 	    invalid_bdaddr)
4831 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4832 
4833 	/* For an unconfigured controller it is required to
4834 	 * read at least the version information provided by
4835 	 * the Read Local Version Information command.
4836 	 *
4837 	 * If the set_bdaddr driver callback is provided, then
4838 	 * also the original Bluetooth public device address
4839 	 * will be read using the Read BD Address command.
4840 	 */
4841 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4842 		return hci_unconf_init_sync(hdev);
4843 
4844 	return ret;
4845 }
4846 
4847 /* This function handles hdev init stage:
4848  *
4849  * Calls hci_dev_setup_sync to perform setup stage
4850  * Calls hci_init_sync to perform HCI command init sequence
4851  */
4852 static int hci_dev_init_sync(struct hci_dev *hdev)
4853 {
4854 	int ret;
4855 
4856 	bt_dev_dbg(hdev, "");
4857 
4858 	atomic_set(&hdev->cmd_cnt, 1);
4859 	set_bit(HCI_INIT, &hdev->flags);
4860 
4861 	ret = hci_dev_setup_sync(hdev);
4862 
4863 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4864 		/* If public address change is configured, ensure that
4865 		 * the address gets programmed. If the driver does not
4866 		 * support changing the public address, fail the power
4867 		 * on procedure.
4868 		 */
4869 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4870 		    hdev->set_bdaddr)
4871 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4872 		else
4873 			ret = -EADDRNOTAVAIL;
4874 	}
4875 
4876 	if (!ret) {
4877 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4878 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4879 			ret = hci_init_sync(hdev);
4880 			if (!ret && hdev->post_init)
4881 				ret = hdev->post_init(hdev);
4882 		}
4883 	}
4884 
4885 	/* If the HCI Reset command is clearing all diagnostic settings,
4886 	 * then they need to be reprogrammed after the init procedure
4887 	 * completed.
4888 	 */
4889 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4890 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4891 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4892 		ret = hdev->set_diag(hdev, true);
4893 
4894 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4895 		msft_do_open(hdev);
4896 		aosp_do_open(hdev);
4897 	}
4898 
4899 	clear_bit(HCI_INIT, &hdev->flags);
4900 
4901 	return ret;
4902 }
4903 
4904 int hci_dev_open_sync(struct hci_dev *hdev)
4905 {
4906 	int ret;
4907 
4908 	bt_dev_dbg(hdev, "");
4909 
4910 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
4911 		ret = -ENODEV;
4912 		goto done;
4913 	}
4914 
4915 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4916 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4917 		/* Check for rfkill but allow the HCI setup stage to
4918 		 * proceed (which in itself doesn't cause any RF activity).
4919 		 */
4920 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
4921 			ret = -ERFKILL;
4922 			goto done;
4923 		}
4924 
4925 		/* Check for valid public address or a configured static
4926 		 * random address, but let the HCI setup proceed to
4927 		 * be able to determine if there is a public address
4928 		 * or not.
4929 		 *
4930 		 * In case of user channel usage, it is not important
4931 		 * if a public address or static random address is
4932 		 * available.
4933 		 */
4934 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4935 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
4936 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
4937 			ret = -EADDRNOTAVAIL;
4938 			goto done;
4939 		}
4940 	}
4941 
4942 	if (test_bit(HCI_UP, &hdev->flags)) {
4943 		ret = -EALREADY;
4944 		goto done;
4945 	}
4946 
4947 	if (hdev->open(hdev)) {
4948 		ret = -EIO;
4949 		goto done;
4950 	}
4951 
4952 	hci_devcd_reset(hdev);
4953 
4954 	set_bit(HCI_RUNNING, &hdev->flags);
4955 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4956 
4957 	ret = hci_dev_init_sync(hdev);
4958 	if (!ret) {
4959 		hci_dev_hold(hdev);
4960 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4961 		hci_adv_instances_set_rpa_expired(hdev, true);
4962 		set_bit(HCI_UP, &hdev->flags);
4963 		hci_sock_dev_event(hdev, HCI_DEV_UP);
4964 		hci_leds_update_powered(hdev, true);
4965 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4966 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
4967 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4968 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4969 		    hci_dev_test_flag(hdev, HCI_MGMT)) {
4970 			ret = hci_powered_update_sync(hdev);
4971 			mgmt_power_on(hdev, ret);
4972 		}
4973 	} else {
4974 		/* Init failed, cleanup */
4975 		flush_work(&hdev->tx_work);
4976 
4977 		/* Since hci_rx_work() is possible to awake new cmd_work
4978 		 * it should be flushed first to avoid unexpected call of
4979 		 * hci_cmd_work()
4980 		 */
4981 		flush_work(&hdev->rx_work);
4982 		flush_work(&hdev->cmd_work);
4983 
4984 		skb_queue_purge(&hdev->cmd_q);
4985 		skb_queue_purge(&hdev->rx_q);
4986 
4987 		if (hdev->flush)
4988 			hdev->flush(hdev);
4989 
4990 		if (hdev->sent_cmd) {
4991 			cancel_delayed_work_sync(&hdev->cmd_timer);
4992 			kfree_skb(hdev->sent_cmd);
4993 			hdev->sent_cmd = NULL;
4994 		}
4995 
4996 		if (hdev->req_skb) {
4997 			kfree_skb(hdev->req_skb);
4998 			hdev->req_skb = NULL;
4999 		}
5000 
5001 		clear_bit(HCI_RUNNING, &hdev->flags);
5002 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5003 
5004 		hdev->close(hdev);
5005 		hdev->flags &= BIT(HCI_RAW);
5006 	}
5007 
5008 done:
5009 	return ret;
5010 }
5011 
5012 /* This function requires the caller holds hdev->lock */
5013 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5014 {
5015 	struct hci_conn_params *p;
5016 
5017 	list_for_each_entry(p, &hdev->le_conn_params, list) {
5018 		hci_pend_le_list_del_init(p);
5019 		if (p->conn) {
5020 			hci_conn_drop(p->conn);
5021 			hci_conn_put(p->conn);
5022 			p->conn = NULL;
5023 		}
5024 	}
5025 
5026 	BT_DBG("All LE pending actions cleared");
5027 }
5028 
5029 static int hci_dev_shutdown(struct hci_dev *hdev)
5030 {
5031 	int err = 0;
5032 	/* Similar to how we first do setup and then set the exclusive access
5033 	 * bit for userspace, we must first unset userchannel and then clean up.
5034 	 * Otherwise, the kernel can't properly use the hci channel to clean up
5035 	 * the controller (some shutdown routines require sending additional
5036 	 * commands to the controller for example).
5037 	 */
5038 	bool was_userchannel =
5039 		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5040 
5041 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5042 	    test_bit(HCI_UP, &hdev->flags)) {
5043 		/* Execute vendor specific shutdown routine */
5044 		if (hdev->shutdown)
5045 			err = hdev->shutdown(hdev);
5046 	}
5047 
5048 	if (was_userchannel)
5049 		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5050 
5051 	return err;
5052 }
5053 
5054 int hci_dev_close_sync(struct hci_dev *hdev)
5055 {
5056 	bool auto_off;
5057 	int err = 0;
5058 
5059 	bt_dev_dbg(hdev, "");
5060 
5061 	cancel_delayed_work(&hdev->power_off);
5062 	cancel_delayed_work(&hdev->ncmd_timer);
5063 	cancel_delayed_work(&hdev->le_scan_disable);
5064 
5065 	hci_cmd_sync_cancel_sync(hdev, ENODEV);
5066 
5067 	cancel_interleave_scan(hdev);
5068 
5069 	if (hdev->adv_instance_timeout) {
5070 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5071 		hdev->adv_instance_timeout = 0;
5072 	}
5073 
5074 	err = hci_dev_shutdown(hdev);
5075 
5076 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5077 		cancel_delayed_work_sync(&hdev->cmd_timer);
5078 		return err;
5079 	}
5080 
5081 	hci_leds_update_powered(hdev, false);
5082 
5083 	/* Flush RX and TX works */
5084 	flush_work(&hdev->tx_work);
5085 	flush_work(&hdev->rx_work);
5086 
5087 	if (hdev->discov_timeout > 0) {
5088 		hdev->discov_timeout = 0;
5089 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5090 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5091 	}
5092 
5093 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5094 		cancel_delayed_work(&hdev->service_cache);
5095 
5096 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5097 		struct adv_info *adv_instance;
5098 
5099 		cancel_delayed_work_sync(&hdev->rpa_expired);
5100 
5101 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5102 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5103 	}
5104 
5105 	/* Avoid potential lockdep warnings from the *_flush() calls by
5106 	 * ensuring the workqueue is empty up front.
5107 	 */
5108 	drain_workqueue(hdev->workqueue);
5109 
5110 	hci_dev_lock(hdev);
5111 
5112 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5113 
5114 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5115 
5116 	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5117 	    hci_dev_test_flag(hdev, HCI_MGMT))
5118 		__mgmt_power_off(hdev);
5119 
5120 	hci_inquiry_cache_flush(hdev);
5121 	hci_pend_le_actions_clear(hdev);
5122 	hci_conn_hash_flush(hdev);
5123 	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5124 	smp_unregister(hdev);
5125 	hci_dev_unlock(hdev);
5126 
5127 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5128 
5129 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5130 		aosp_do_close(hdev);
5131 		msft_do_close(hdev);
5132 	}
5133 
5134 	if (hdev->flush)
5135 		hdev->flush(hdev);
5136 
5137 	/* Reset device */
5138 	skb_queue_purge(&hdev->cmd_q);
5139 	atomic_set(&hdev->cmd_cnt, 1);
5140 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5141 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5142 		set_bit(HCI_INIT, &hdev->flags);
5143 		hci_reset_sync(hdev);
5144 		clear_bit(HCI_INIT, &hdev->flags);
5145 	}
5146 
5147 	/* flush cmd  work */
5148 	flush_work(&hdev->cmd_work);
5149 
5150 	/* Drop queues */
5151 	skb_queue_purge(&hdev->rx_q);
5152 	skb_queue_purge(&hdev->cmd_q);
5153 	skb_queue_purge(&hdev->raw_q);
5154 
5155 	/* Drop last sent command */
5156 	if (hdev->sent_cmd) {
5157 		cancel_delayed_work_sync(&hdev->cmd_timer);
5158 		kfree_skb(hdev->sent_cmd);
5159 		hdev->sent_cmd = NULL;
5160 	}
5161 
5162 	/* Drop last request */
5163 	if (hdev->req_skb) {
5164 		kfree_skb(hdev->req_skb);
5165 		hdev->req_skb = NULL;
5166 	}
5167 
5168 	clear_bit(HCI_RUNNING, &hdev->flags);
5169 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5170 
5171 	/* After this point our queues are empty and no tasks are scheduled. */
5172 	hdev->close(hdev);
5173 
5174 	/* Clear flags */
5175 	hdev->flags &= BIT(HCI_RAW);
5176 	hci_dev_clear_volatile_flags(hdev);
5177 
5178 	memset(hdev->eir, 0, sizeof(hdev->eir));
5179 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5180 	bacpy(&hdev->random_addr, BDADDR_ANY);
5181 	hci_codec_list_clear(&hdev->local_codecs);
5182 
5183 	hci_dev_put(hdev);
5184 	return err;
5185 }
5186 
5187 /* This function perform power on HCI command sequence as follows:
5188  *
5189  * If controller is already up (HCI_UP) performs hci_powered_update_sync
5190  * sequence otherwise run hci_dev_open_sync which will follow with
5191  * hci_powered_update_sync after the init sequence is completed.
5192  */
5193 static int hci_power_on_sync(struct hci_dev *hdev)
5194 {
5195 	int err;
5196 
5197 	if (test_bit(HCI_UP, &hdev->flags) &&
5198 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5199 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5200 		cancel_delayed_work(&hdev->power_off);
5201 		return hci_powered_update_sync(hdev);
5202 	}
5203 
5204 	err = hci_dev_open_sync(hdev);
5205 	if (err < 0)
5206 		return err;
5207 
5208 	/* During the HCI setup phase, a few error conditions are
5209 	 * ignored and they need to be checked now. If they are still
5210 	 * valid, it is important to return the device back off.
5211 	 */
5212 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5213 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5214 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5215 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5216 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5217 		hci_dev_close_sync(hdev);
5218 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5219 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5220 				   HCI_AUTO_OFF_TIMEOUT);
5221 	}
5222 
5223 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5224 		/* For unconfigured devices, set the HCI_RAW flag
5225 		 * so that userspace can easily identify them.
5226 		 */
5227 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5228 			set_bit(HCI_RAW, &hdev->flags);
5229 
5230 		/* For fully configured devices, this will send
5231 		 * the Index Added event. For unconfigured devices,
5232 		 * it will send Unconfigued Index Added event.
5233 		 *
5234 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5235 		 * and no event will be send.
5236 		 */
5237 		mgmt_index_added(hdev);
5238 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5239 		/* When the controller is now configured, then it
5240 		 * is important to clear the HCI_RAW flag.
5241 		 */
5242 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5243 			clear_bit(HCI_RAW, &hdev->flags);
5244 
5245 		/* Powering on the controller with HCI_CONFIG set only
5246 		 * happens with the transition from unconfigured to
5247 		 * configured. This will send the Index Added event.
5248 		 */
5249 		mgmt_index_added(hdev);
5250 	}
5251 
5252 	return 0;
5253 }
5254 
5255 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5256 {
5257 	struct hci_cp_remote_name_req_cancel cp;
5258 
5259 	memset(&cp, 0, sizeof(cp));
5260 	bacpy(&cp.bdaddr, addr);
5261 
5262 	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5263 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5264 }
5265 
5266 int hci_stop_discovery_sync(struct hci_dev *hdev)
5267 {
5268 	struct discovery_state *d = &hdev->discovery;
5269 	struct inquiry_entry *e;
5270 	int err;
5271 
5272 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5273 
5274 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5275 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5276 			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5277 						    0, NULL, HCI_CMD_TIMEOUT);
5278 			if (err)
5279 				return err;
5280 		}
5281 
5282 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5283 			cancel_delayed_work(&hdev->le_scan_disable);
5284 
5285 			err = hci_scan_disable_sync(hdev);
5286 			if (err)
5287 				return err;
5288 		}
5289 
5290 	} else {
5291 		err = hci_scan_disable_sync(hdev);
5292 		if (err)
5293 			return err;
5294 	}
5295 
5296 	/* Resume advertising if it was paused */
5297 	if (use_ll_privacy(hdev))
5298 		hci_resume_advertising_sync(hdev);
5299 
5300 	/* No further actions needed for LE-only discovery */
5301 	if (d->type == DISCOV_TYPE_LE)
5302 		return 0;
5303 
5304 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5305 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5306 						     NAME_PENDING);
5307 		if (!e)
5308 			return 0;
5309 
5310 		return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5311 	}
5312 
5313 	return 0;
5314 }
5315 
5316 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5317 			       u8 reason)
5318 {
5319 	struct hci_cp_disconnect cp;
5320 
5321 	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5322 		/* This is a BIS connection, hci_conn_del will
5323 		 * do the necessary cleanup.
5324 		 */
5325 		hci_dev_lock(hdev);
5326 		hci_conn_failed(conn, reason);
5327 		hci_dev_unlock(hdev);
5328 
5329 		return 0;
5330 	}
5331 
5332 	memset(&cp, 0, sizeof(cp));
5333 	cp.handle = cpu_to_le16(conn->handle);
5334 	cp.reason = reason;
5335 
5336 	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5337 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5338 	 * used when suspending or powering off, where we don't want to wait
5339 	 * for the peer's response.
5340 	 */
5341 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5342 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5343 						sizeof(cp), &cp,
5344 						HCI_EV_DISCONN_COMPLETE,
5345 						HCI_CMD_TIMEOUT, NULL);
5346 
5347 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5348 				     HCI_CMD_TIMEOUT);
5349 }
5350 
5351 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5352 				      struct hci_conn *conn, u8 reason)
5353 {
5354 	/* Return reason if scanning since the connection shall probably be
5355 	 * cleanup directly.
5356 	 */
5357 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5358 		return reason;
5359 
5360 	if (conn->role == HCI_ROLE_SLAVE ||
5361 	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5362 		return 0;
5363 
5364 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5365 				     0, NULL, HCI_CMD_TIMEOUT);
5366 }
5367 
5368 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5369 				   u8 reason)
5370 {
5371 	if (conn->type == LE_LINK)
5372 		return hci_le_connect_cancel_sync(hdev, conn, reason);
5373 
5374 	if (conn->type == ISO_LINK) {
5375 		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5376 		 * page 1857:
5377 		 *
5378 		 * If this command is issued for a CIS on the Central and the
5379 		 * CIS is successfully terminated before being established,
5380 		 * then an HCI_LE_CIS_Established event shall also be sent for
5381 		 * this CIS with the Status Operation Cancelled by Host (0x44).
5382 		 */
5383 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5384 			return hci_disconnect_sync(hdev, conn, reason);
5385 
5386 		/* CIS with no Create CIS sent have nothing to cancel */
5387 		if (bacmp(&conn->dst, BDADDR_ANY))
5388 			return HCI_ERROR_LOCAL_HOST_TERM;
5389 
5390 		/* There is no way to cancel a BIS without terminating the BIG
5391 		 * which is done later on connection cleanup.
5392 		 */
5393 		return 0;
5394 	}
5395 
5396 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5397 		return 0;
5398 
5399 	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5400 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5401 	 * used when suspending or powering off, where we don't want to wait
5402 	 * for the peer's response.
5403 	 */
5404 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5405 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5406 						6, &conn->dst,
5407 						HCI_EV_CONN_COMPLETE,
5408 						HCI_CMD_TIMEOUT, NULL);
5409 
5410 	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5411 				     6, &conn->dst, HCI_CMD_TIMEOUT);
5412 }
5413 
5414 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5415 			       u8 reason)
5416 {
5417 	struct hci_cp_reject_sync_conn_req cp;
5418 
5419 	memset(&cp, 0, sizeof(cp));
5420 	bacpy(&cp.bdaddr, &conn->dst);
5421 	cp.reason = reason;
5422 
5423 	/* SCO rejection has its own limited set of
5424 	 * allowed error values (0x0D-0x0F).
5425 	 */
5426 	if (reason < 0x0d || reason > 0x0f)
5427 		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5428 
5429 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5430 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5431 }
5432 
5433 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5434 				  u8 reason)
5435 {
5436 	struct hci_cp_le_reject_cis cp;
5437 
5438 	memset(&cp, 0, sizeof(cp));
5439 	cp.handle = cpu_to_le16(conn->handle);
5440 	cp.reason = reason;
5441 
5442 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5443 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5444 }
5445 
5446 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5447 				u8 reason)
5448 {
5449 	struct hci_cp_reject_conn_req cp;
5450 
5451 	if (conn->type == ISO_LINK)
5452 		return hci_le_reject_cis_sync(hdev, conn, reason);
5453 
5454 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5455 		return hci_reject_sco_sync(hdev, conn, reason);
5456 
5457 	memset(&cp, 0, sizeof(cp));
5458 	bacpy(&cp.bdaddr, &conn->dst);
5459 	cp.reason = reason;
5460 
5461 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5462 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5463 }
5464 
5465 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5466 {
5467 	int err = 0;
5468 	u16 handle = conn->handle;
5469 	bool disconnect = false;
5470 	struct hci_conn *c;
5471 
5472 	switch (conn->state) {
5473 	case BT_CONNECTED:
5474 	case BT_CONFIG:
5475 		err = hci_disconnect_sync(hdev, conn, reason);
5476 		break;
5477 	case BT_CONNECT:
5478 		err = hci_connect_cancel_sync(hdev, conn, reason);
5479 		break;
5480 	case BT_CONNECT2:
5481 		err = hci_reject_conn_sync(hdev, conn, reason);
5482 		break;
5483 	case BT_OPEN:
5484 	case BT_BOUND:
5485 		break;
5486 	default:
5487 		disconnect = true;
5488 		break;
5489 	}
5490 
5491 	hci_dev_lock(hdev);
5492 
5493 	/* Check if the connection has been cleaned up concurrently */
5494 	c = hci_conn_hash_lookup_handle(hdev, handle);
5495 	if (!c || c != conn) {
5496 		err = 0;
5497 		goto unlock;
5498 	}
5499 
5500 	/* Cleanup hci_conn object if it cannot be cancelled as it
5501 	 * likelly means the controller and host stack are out of sync
5502 	 * or in case of LE it was still scanning so it can be cleanup
5503 	 * safely.
5504 	 */
5505 	if (disconnect) {
5506 		conn->state = BT_CLOSED;
5507 		hci_disconn_cfm(conn, reason);
5508 		hci_conn_del(conn);
5509 	} else {
5510 		hci_conn_failed(conn, reason);
5511 	}
5512 
5513 unlock:
5514 	hci_dev_unlock(hdev);
5515 	return err;
5516 }
5517 
5518 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5519 {
5520 	struct list_head *head = &hdev->conn_hash.list;
5521 	struct hci_conn *conn;
5522 
5523 	rcu_read_lock();
5524 	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5525 		/* Make sure the connection is not freed while unlocking */
5526 		conn = hci_conn_get(conn);
5527 		rcu_read_unlock();
5528 		/* Disregard possible errors since hci_conn_del shall have been
5529 		 * called even in case of errors had occurred since it would
5530 		 * then cause hci_conn_failed to be called which calls
5531 		 * hci_conn_del internally.
5532 		 */
5533 		hci_abort_conn_sync(hdev, conn, reason);
5534 		hci_conn_put(conn);
5535 		rcu_read_lock();
5536 	}
5537 	rcu_read_unlock();
5538 
5539 	return 0;
5540 }
5541 
5542 /* This function perform power off HCI command sequence as follows:
5543  *
5544  * Clear Advertising
5545  * Stop Discovery
5546  * Disconnect all connections
5547  * hci_dev_close_sync
5548  */
5549 static int hci_power_off_sync(struct hci_dev *hdev)
5550 {
5551 	int err;
5552 
5553 	/* If controller is already down there is nothing to do */
5554 	if (!test_bit(HCI_UP, &hdev->flags))
5555 		return 0;
5556 
5557 	hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5558 
5559 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5560 	    test_bit(HCI_PSCAN, &hdev->flags)) {
5561 		err = hci_write_scan_enable_sync(hdev, 0x00);
5562 		if (err)
5563 			goto out;
5564 	}
5565 
5566 	err = hci_clear_adv_sync(hdev, NULL, false);
5567 	if (err)
5568 		goto out;
5569 
5570 	err = hci_stop_discovery_sync(hdev);
5571 	if (err)
5572 		goto out;
5573 
5574 	/* Terminated due to Power Off */
5575 	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5576 	if (err)
5577 		goto out;
5578 
5579 	err = hci_dev_close_sync(hdev);
5580 
5581 out:
5582 	hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5583 	return err;
5584 }
5585 
5586 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5587 {
5588 	if (val)
5589 		return hci_power_on_sync(hdev);
5590 
5591 	return hci_power_off_sync(hdev);
5592 }
5593 
5594 static int hci_write_iac_sync(struct hci_dev *hdev)
5595 {
5596 	struct hci_cp_write_current_iac_lap cp;
5597 
5598 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5599 		return 0;
5600 
5601 	memset(&cp, 0, sizeof(cp));
5602 
5603 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5604 		/* Limited discoverable mode */
5605 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5606 		cp.iac_lap[0] = 0x00;	/* LIAC */
5607 		cp.iac_lap[1] = 0x8b;
5608 		cp.iac_lap[2] = 0x9e;
5609 		cp.iac_lap[3] = 0x33;	/* GIAC */
5610 		cp.iac_lap[4] = 0x8b;
5611 		cp.iac_lap[5] = 0x9e;
5612 	} else {
5613 		/* General discoverable mode */
5614 		cp.num_iac = 1;
5615 		cp.iac_lap[0] = 0x33;	/* GIAC */
5616 		cp.iac_lap[1] = 0x8b;
5617 		cp.iac_lap[2] = 0x9e;
5618 	}
5619 
5620 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5621 				     (cp.num_iac * 3) + 1, &cp,
5622 				     HCI_CMD_TIMEOUT);
5623 }
5624 
5625 int hci_update_discoverable_sync(struct hci_dev *hdev)
5626 {
5627 	int err = 0;
5628 
5629 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5630 		err = hci_write_iac_sync(hdev);
5631 		if (err)
5632 			return err;
5633 
5634 		err = hci_update_scan_sync(hdev);
5635 		if (err)
5636 			return err;
5637 
5638 		err = hci_update_class_sync(hdev);
5639 		if (err)
5640 			return err;
5641 	}
5642 
5643 	/* Advertising instances don't use the global discoverable setting, so
5644 	 * only update AD if advertising was enabled using Set Advertising.
5645 	 */
5646 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5647 		err = hci_update_adv_data_sync(hdev, 0x00);
5648 		if (err)
5649 			return err;
5650 
5651 		/* Discoverable mode affects the local advertising
5652 		 * address in limited privacy mode.
5653 		 */
5654 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5655 			if (ext_adv_capable(hdev))
5656 				err = hci_start_ext_adv_sync(hdev, 0x00);
5657 			else
5658 				err = hci_enable_advertising_sync(hdev);
5659 		}
5660 	}
5661 
5662 	return err;
5663 }
5664 
5665 static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5666 {
5667 	return hci_update_discoverable_sync(hdev);
5668 }
5669 
5670 int hci_update_discoverable(struct hci_dev *hdev)
5671 {
5672 	/* Only queue if it would have any effect */
5673 	if (hdev_is_powered(hdev) &&
5674 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5675 	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5676 	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5677 		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5678 					  NULL);
5679 
5680 	return 0;
5681 }
5682 
5683 int hci_update_connectable_sync(struct hci_dev *hdev)
5684 {
5685 	int err;
5686 
5687 	err = hci_update_scan_sync(hdev);
5688 	if (err)
5689 		return err;
5690 
5691 	/* If BR/EDR is not enabled and we disable advertising as a
5692 	 * by-product of disabling connectable, we need to update the
5693 	 * advertising flags.
5694 	 */
5695 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5696 		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5697 
5698 	/* Update the advertising parameters if necessary */
5699 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5700 	    !list_empty(&hdev->adv_instances)) {
5701 		if (ext_adv_capable(hdev))
5702 			err = hci_start_ext_adv_sync(hdev,
5703 						     hdev->cur_adv_instance);
5704 		else
5705 			err = hci_enable_advertising_sync(hdev);
5706 
5707 		if (err)
5708 			return err;
5709 	}
5710 
5711 	return hci_update_passive_scan_sync(hdev);
5712 }
5713 
5714 int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5715 {
5716 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5717 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5718 	struct hci_cp_inquiry cp;
5719 
5720 	bt_dev_dbg(hdev, "");
5721 
5722 	if (test_bit(HCI_INQUIRY, &hdev->flags))
5723 		return 0;
5724 
5725 	hci_dev_lock(hdev);
5726 	hci_inquiry_cache_flush(hdev);
5727 	hci_dev_unlock(hdev);
5728 
5729 	memset(&cp, 0, sizeof(cp));
5730 
5731 	if (hdev->discovery.limited)
5732 		memcpy(&cp.lap, liac, sizeof(cp.lap));
5733 	else
5734 		memcpy(&cp.lap, giac, sizeof(cp.lap));
5735 
5736 	cp.length = length;
5737 	cp.num_rsp = num_rsp;
5738 
5739 	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5740 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5741 }
5742 
5743 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5744 {
5745 	u8 own_addr_type;
5746 	/* Accept list is not used for discovery */
5747 	u8 filter_policy = 0x00;
5748 	/* Default is to enable duplicates filter */
5749 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5750 	int err;
5751 
5752 	bt_dev_dbg(hdev, "");
5753 
5754 	/* If controller is scanning, it means the passive scanning is
5755 	 * running. Thus, we should temporarily stop it in order to set the
5756 	 * discovery scanning parameters.
5757 	 */
5758 	err = hci_scan_disable_sync(hdev);
5759 	if (err) {
5760 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5761 		return err;
5762 	}
5763 
5764 	cancel_interleave_scan(hdev);
5765 
5766 	/* Pause address resolution for active scan and stop advertising if
5767 	 * privacy is enabled.
5768 	 */
5769 	err = hci_pause_addr_resolution(hdev);
5770 	if (err)
5771 		goto failed;
5772 
5773 	/* All active scans will be done with either a resolvable private
5774 	 * address (when privacy feature has been enabled) or non-resolvable
5775 	 * private address.
5776 	 */
5777 	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5778 					     &own_addr_type);
5779 	if (err < 0)
5780 		own_addr_type = ADDR_LE_DEV_PUBLIC;
5781 
5782 	if (hci_is_adv_monitoring(hdev) ||
5783 	    (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5784 	    hdev->discovery.result_filtering)) {
5785 		/* Duplicate filter should be disabled when some advertisement
5786 		 * monitor is activated, otherwise AdvMon can only receive one
5787 		 * advertisement for one peer(*) during active scanning, and
5788 		 * might report loss to these peers.
5789 		 *
5790 		 * If controller does strict duplicate filtering and the
5791 		 * discovery requires result filtering disables controller based
5792 		 * filtering since that can cause reports that would match the
5793 		 * host filter to not be reported.
5794 		 */
5795 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5796 	}
5797 
5798 	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5799 				  hdev->le_scan_window_discovery,
5800 				  own_addr_type, filter_policy, filter_dup);
5801 	if (!err)
5802 		return err;
5803 
5804 failed:
5805 	/* Resume advertising if it was paused */
5806 	if (use_ll_privacy(hdev))
5807 		hci_resume_advertising_sync(hdev);
5808 
5809 	/* Resume passive scanning */
5810 	hci_update_passive_scan_sync(hdev);
5811 	return err;
5812 }
5813 
5814 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5815 {
5816 	int err;
5817 
5818 	bt_dev_dbg(hdev, "");
5819 
5820 	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5821 	if (err)
5822 		return err;
5823 
5824 	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5825 }
5826 
5827 int hci_start_discovery_sync(struct hci_dev *hdev)
5828 {
5829 	unsigned long timeout;
5830 	int err;
5831 
5832 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5833 
5834 	switch (hdev->discovery.type) {
5835 	case DISCOV_TYPE_BREDR:
5836 		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5837 	case DISCOV_TYPE_INTERLEAVED:
5838 		/* When running simultaneous discovery, the LE scanning time
5839 		 * should occupy the whole discovery time sine BR/EDR inquiry
5840 		 * and LE scanning are scheduled by the controller.
5841 		 *
5842 		 * For interleaving discovery in comparison, BR/EDR inquiry
5843 		 * and LE scanning are done sequentially with separate
5844 		 * timeouts.
5845 		 */
5846 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5847 			     &hdev->quirks)) {
5848 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5849 			/* During simultaneous discovery, we double LE scan
5850 			 * interval. We must leave some time for the controller
5851 			 * to do BR/EDR inquiry.
5852 			 */
5853 			err = hci_start_interleaved_discovery_sync(hdev);
5854 			break;
5855 		}
5856 
5857 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5858 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5859 		break;
5860 	case DISCOV_TYPE_LE:
5861 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5862 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5863 		break;
5864 	default:
5865 		return -EINVAL;
5866 	}
5867 
5868 	if (err)
5869 		return err;
5870 
5871 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5872 
5873 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5874 			   timeout);
5875 	return 0;
5876 }
5877 
5878 static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5879 {
5880 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5881 	case HCI_ADV_MONITOR_EXT_MSFT:
5882 		msft_suspend_sync(hdev);
5883 		break;
5884 	default:
5885 		return;
5886 	}
5887 }
5888 
5889 /* This function disables discovery and mark it as paused */
5890 static int hci_pause_discovery_sync(struct hci_dev *hdev)
5891 {
5892 	int old_state = hdev->discovery.state;
5893 	int err;
5894 
5895 	/* If discovery already stopped/stopping/paused there nothing to do */
5896 	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
5897 	    hdev->discovery_paused)
5898 		return 0;
5899 
5900 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5901 	err = hci_stop_discovery_sync(hdev);
5902 	if (err)
5903 		return err;
5904 
5905 	hdev->discovery_paused = true;
5906 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5907 
5908 	return 0;
5909 }
5910 
5911 static int hci_update_event_filter_sync(struct hci_dev *hdev)
5912 {
5913 	struct bdaddr_list_with_flags *b;
5914 	u8 scan = SCAN_DISABLED;
5915 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
5916 	int err;
5917 
5918 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5919 		return 0;
5920 
5921 	/* Some fake CSR controllers lock up after setting this type of
5922 	 * filter, so avoid sending the request altogether.
5923 	 */
5924 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
5925 		return 0;
5926 
5927 	/* Always clear event filter when starting */
5928 	hci_clear_event_filter_sync(hdev);
5929 
5930 	list_for_each_entry(b, &hdev->accept_list, list) {
5931 		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
5932 			continue;
5933 
5934 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
5935 
5936 		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
5937 						 HCI_CONN_SETUP_ALLOW_BDADDR,
5938 						 &b->bdaddr,
5939 						 HCI_CONN_SETUP_AUTO_ON);
5940 		if (err)
5941 			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
5942 				   &b->bdaddr);
5943 		else
5944 			scan = SCAN_PAGE;
5945 	}
5946 
5947 	if (scan && !scanning)
5948 		hci_write_scan_enable_sync(hdev, scan);
5949 	else if (!scan && scanning)
5950 		hci_write_scan_enable_sync(hdev, scan);
5951 
5952 	return 0;
5953 }
5954 
5955 /* This function disables scan (BR and LE) and mark it as paused */
5956 static int hci_pause_scan_sync(struct hci_dev *hdev)
5957 {
5958 	if (hdev->scanning_paused)
5959 		return 0;
5960 
5961 	/* Disable page scan if enabled */
5962 	if (test_bit(HCI_PSCAN, &hdev->flags))
5963 		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
5964 
5965 	hci_scan_disable_sync(hdev);
5966 
5967 	hdev->scanning_paused = true;
5968 
5969 	return 0;
5970 }
5971 
5972 /* This function performs the HCI suspend procedures in the follow order:
5973  *
5974  * Pause discovery (active scanning/inquiry)
5975  * Pause Directed Advertising/Advertising
5976  * Pause Scanning (passive scanning in case discovery was not active)
5977  * Disconnect all connections
5978  * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
5979  * otherwise:
5980  * Update event mask (only set events that are allowed to wake up the host)
5981  * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
5982  * Update passive scanning (lower duty cycle)
5983  * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
5984  */
5985 int hci_suspend_sync(struct hci_dev *hdev)
5986 {
5987 	int err;
5988 
5989 	/* If marked as suspended there nothing to do */
5990 	if (hdev->suspended)
5991 		return 0;
5992 
5993 	/* Mark device as suspended */
5994 	hdev->suspended = true;
5995 
5996 	/* Pause discovery if not already stopped */
5997 	hci_pause_discovery_sync(hdev);
5998 
5999 	/* Pause other advertisements */
6000 	hci_pause_advertising_sync(hdev);
6001 
6002 	/* Suspend monitor filters */
6003 	hci_suspend_monitor_sync(hdev);
6004 
6005 	/* Prevent disconnects from causing scanning to be re-enabled */
6006 	hci_pause_scan_sync(hdev);
6007 
6008 	if (hci_conn_count(hdev)) {
6009 		/* Soft disconnect everything (power off) */
6010 		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6011 		if (err) {
6012 			/* Set state to BT_RUNNING so resume doesn't notify */
6013 			hdev->suspend_state = BT_RUNNING;
6014 			hci_resume_sync(hdev);
6015 			return err;
6016 		}
6017 
6018 		/* Update event mask so only the allowed event can wakeup the
6019 		 * host.
6020 		 */
6021 		hci_set_event_mask_sync(hdev);
6022 	}
6023 
6024 	/* Only configure accept list if disconnect succeeded and wake
6025 	 * isn't being prevented.
6026 	 */
6027 	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6028 		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6029 		return 0;
6030 	}
6031 
6032 	/* Unpause to take care of updating scanning params */
6033 	hdev->scanning_paused = false;
6034 
6035 	/* Enable event filter for paired devices */
6036 	hci_update_event_filter_sync(hdev);
6037 
6038 	/* Update LE passive scan if enabled */
6039 	hci_update_passive_scan_sync(hdev);
6040 
6041 	/* Pause scan changes again. */
6042 	hdev->scanning_paused = true;
6043 
6044 	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6045 
6046 	return 0;
6047 }
6048 
6049 /* This function resumes discovery */
6050 static int hci_resume_discovery_sync(struct hci_dev *hdev)
6051 {
6052 	int err;
6053 
6054 	/* If discovery not paused there nothing to do */
6055 	if (!hdev->discovery_paused)
6056 		return 0;
6057 
6058 	hdev->discovery_paused = false;
6059 
6060 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6061 
6062 	err = hci_start_discovery_sync(hdev);
6063 
6064 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6065 				DISCOVERY_FINDING);
6066 
6067 	return err;
6068 }
6069 
6070 static void hci_resume_monitor_sync(struct hci_dev *hdev)
6071 {
6072 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6073 	case HCI_ADV_MONITOR_EXT_MSFT:
6074 		msft_resume_sync(hdev);
6075 		break;
6076 	default:
6077 		return;
6078 	}
6079 }
6080 
6081 /* This function resume scan and reset paused flag */
6082 static int hci_resume_scan_sync(struct hci_dev *hdev)
6083 {
6084 	if (!hdev->scanning_paused)
6085 		return 0;
6086 
6087 	hdev->scanning_paused = false;
6088 
6089 	hci_update_scan_sync(hdev);
6090 
6091 	/* Reset passive scanning to normal */
6092 	hci_update_passive_scan_sync(hdev);
6093 
6094 	return 0;
6095 }
6096 
6097 /* This function performs the HCI suspend procedures in the follow order:
6098  *
6099  * Restore event mask
6100  * Clear event filter
6101  * Update passive scanning (normal duty cycle)
6102  * Resume Directed Advertising/Advertising
6103  * Resume discovery (active scanning/inquiry)
6104  */
6105 int hci_resume_sync(struct hci_dev *hdev)
6106 {
6107 	/* If not marked as suspended there nothing to do */
6108 	if (!hdev->suspended)
6109 		return 0;
6110 
6111 	hdev->suspended = false;
6112 
6113 	/* Restore event mask */
6114 	hci_set_event_mask_sync(hdev);
6115 
6116 	/* Clear any event filters and restore scan state */
6117 	hci_clear_event_filter_sync(hdev);
6118 
6119 	/* Resume scanning */
6120 	hci_resume_scan_sync(hdev);
6121 
6122 	/* Resume monitor filters */
6123 	hci_resume_monitor_sync(hdev);
6124 
6125 	/* Resume other advertisements */
6126 	hci_resume_advertising_sync(hdev);
6127 
6128 	/* Resume discovery */
6129 	hci_resume_discovery_sync(hdev);
6130 
6131 	return 0;
6132 }
6133 
6134 static bool conn_use_rpa(struct hci_conn *conn)
6135 {
6136 	struct hci_dev *hdev = conn->hdev;
6137 
6138 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6139 }
6140 
6141 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6142 						struct hci_conn *conn)
6143 {
6144 	struct hci_cp_le_set_ext_adv_params cp;
6145 	int err;
6146 	bdaddr_t random_addr;
6147 	u8 own_addr_type;
6148 
6149 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6150 					     &own_addr_type);
6151 	if (err)
6152 		return err;
6153 
6154 	/* Set require_privacy to false so that the remote device has a
6155 	 * chance of identifying us.
6156 	 */
6157 	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6158 				     &own_addr_type, &random_addr);
6159 	if (err)
6160 		return err;
6161 
6162 	memset(&cp, 0, sizeof(cp));
6163 
6164 	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6165 	cp.channel_map = hdev->le_adv_channel_map;
6166 	cp.tx_power = HCI_TX_POWER_INVALID;
6167 	cp.primary_phy = HCI_ADV_PHY_1M;
6168 	cp.secondary_phy = HCI_ADV_PHY_1M;
6169 	cp.handle = 0x00; /* Use instance 0 for directed adv */
6170 	cp.own_addr_type = own_addr_type;
6171 	cp.peer_addr_type = conn->dst_type;
6172 	bacpy(&cp.peer_addr, &conn->dst);
6173 
6174 	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6175 	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6176 	 * does not supports advertising data when the advertising set already
6177 	 * contains some, the controller shall return erroc code 'Invalid
6178 	 * HCI Command Parameters(0x12).
6179 	 * So it is required to remove adv set for handle 0x00. since we use
6180 	 * instance 0 for directed adv.
6181 	 */
6182 	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6183 	if (err)
6184 		return err;
6185 
6186 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6187 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6188 	if (err)
6189 		return err;
6190 
6191 	/* Check if random address need to be updated */
6192 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6193 	    bacmp(&random_addr, BDADDR_ANY) &&
6194 	    bacmp(&random_addr, &hdev->random_addr)) {
6195 		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6196 						       &random_addr);
6197 		if (err)
6198 			return err;
6199 	}
6200 
6201 	return hci_enable_ext_advertising_sync(hdev, 0x00);
6202 }
6203 
6204 static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6205 					    struct hci_conn *conn)
6206 {
6207 	struct hci_cp_le_set_adv_param cp;
6208 	u8 status;
6209 	u8 own_addr_type;
6210 	u8 enable;
6211 
6212 	if (ext_adv_capable(hdev))
6213 		return hci_le_ext_directed_advertising_sync(hdev, conn);
6214 
6215 	/* Clear the HCI_LE_ADV bit temporarily so that the
6216 	 * hci_update_random_address knows that it's safe to go ahead
6217 	 * and write a new random address. The flag will be set back on
6218 	 * as soon as the SET_ADV_ENABLE HCI command completes.
6219 	 */
6220 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6221 
6222 	/* Set require_privacy to false so that the remote device has a
6223 	 * chance of identifying us.
6224 	 */
6225 	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6226 						&own_addr_type);
6227 	if (status)
6228 		return status;
6229 
6230 	memset(&cp, 0, sizeof(cp));
6231 
6232 	/* Some controllers might reject command if intervals are not
6233 	 * within range for undirected advertising.
6234 	 * BCM20702A0 is known to be affected by this.
6235 	 */
6236 	cp.min_interval = cpu_to_le16(0x0020);
6237 	cp.max_interval = cpu_to_le16(0x0020);
6238 
6239 	cp.type = LE_ADV_DIRECT_IND;
6240 	cp.own_address_type = own_addr_type;
6241 	cp.direct_addr_type = conn->dst_type;
6242 	bacpy(&cp.direct_addr, &conn->dst);
6243 	cp.channel_map = hdev->le_adv_channel_map;
6244 
6245 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6246 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6247 	if (status)
6248 		return status;
6249 
6250 	enable = 0x01;
6251 
6252 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6253 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6254 }
6255 
6256 static void set_ext_conn_params(struct hci_conn *conn,
6257 				struct hci_cp_le_ext_conn_param *p)
6258 {
6259 	struct hci_dev *hdev = conn->hdev;
6260 
6261 	memset(p, 0, sizeof(*p));
6262 
6263 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6264 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6265 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6266 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6267 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6268 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6269 	p->min_ce_len = cpu_to_le16(0x0000);
6270 	p->max_ce_len = cpu_to_le16(0x0000);
6271 }
6272 
6273 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6274 				       struct hci_conn *conn, u8 own_addr_type)
6275 {
6276 	struct hci_cp_le_ext_create_conn *cp;
6277 	struct hci_cp_le_ext_conn_param *p;
6278 	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6279 	u32 plen;
6280 
6281 	cp = (void *)data;
6282 	p = (void *)cp->data;
6283 
6284 	memset(cp, 0, sizeof(*cp));
6285 
6286 	bacpy(&cp->peer_addr, &conn->dst);
6287 	cp->peer_addr_type = conn->dst_type;
6288 	cp->own_addr_type = own_addr_type;
6289 
6290 	plen = sizeof(*cp);
6291 
6292 	if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6293 			      conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6294 		cp->phys |= LE_SCAN_PHY_1M;
6295 		set_ext_conn_params(conn, p);
6296 
6297 		p++;
6298 		plen += sizeof(*p);
6299 	}
6300 
6301 	if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6302 			      conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6303 		cp->phys |= LE_SCAN_PHY_2M;
6304 		set_ext_conn_params(conn, p);
6305 
6306 		p++;
6307 		plen += sizeof(*p);
6308 	}
6309 
6310 	if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6311 				 conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6312 		cp->phys |= LE_SCAN_PHY_CODED;
6313 		set_ext_conn_params(conn, p);
6314 
6315 		plen += sizeof(*p);
6316 	}
6317 
6318 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6319 					plen, data,
6320 					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6321 					conn->conn_timeout, NULL);
6322 }
6323 
6324 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6325 {
6326 	struct hci_cp_le_create_conn cp;
6327 	struct hci_conn_params *params;
6328 	u8 own_addr_type;
6329 	int err;
6330 	struct hci_conn *conn = data;
6331 
6332 	if (!hci_conn_valid(hdev, conn))
6333 		return -ECANCELED;
6334 
6335 	bt_dev_dbg(hdev, "conn %p", conn);
6336 
6337 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6338 	conn->state = BT_CONNECT;
6339 
6340 	/* If requested to connect as peripheral use directed advertising */
6341 	if (conn->role == HCI_ROLE_SLAVE) {
6342 		/* If we're active scanning and simultaneous roles is not
6343 		 * enabled simply reject the attempt.
6344 		 */
6345 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6346 		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6347 		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6348 			hci_conn_del(conn);
6349 			return -EBUSY;
6350 		}
6351 
6352 		/* Pause advertising while doing directed advertising. */
6353 		hci_pause_advertising_sync(hdev);
6354 
6355 		err = hci_le_directed_advertising_sync(hdev, conn);
6356 		goto done;
6357 	}
6358 
6359 	/* Disable advertising if simultaneous roles is not in use. */
6360 	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6361 		hci_pause_advertising_sync(hdev);
6362 
6363 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6364 	if (params) {
6365 		conn->le_conn_min_interval = params->conn_min_interval;
6366 		conn->le_conn_max_interval = params->conn_max_interval;
6367 		conn->le_conn_latency = params->conn_latency;
6368 		conn->le_supv_timeout = params->supervision_timeout;
6369 	} else {
6370 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6371 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6372 		conn->le_conn_latency = hdev->le_conn_latency;
6373 		conn->le_supv_timeout = hdev->le_supv_timeout;
6374 	}
6375 
6376 	/* If controller is scanning, we stop it since some controllers are
6377 	 * not able to scan and connect at the same time. Also set the
6378 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6379 	 * handler for scan disabling knows to set the correct discovery
6380 	 * state.
6381 	 */
6382 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6383 		hci_scan_disable_sync(hdev);
6384 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6385 	}
6386 
6387 	/* Update random address, but set require_privacy to false so
6388 	 * that we never connect with an non-resolvable address.
6389 	 */
6390 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6391 					     &own_addr_type);
6392 	if (err)
6393 		goto done;
6394 
6395 	if (use_ext_conn(hdev)) {
6396 		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6397 		goto done;
6398 	}
6399 
6400 	memset(&cp, 0, sizeof(cp));
6401 
6402 	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6403 	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6404 
6405 	bacpy(&cp.peer_addr, &conn->dst);
6406 	cp.peer_addr_type = conn->dst_type;
6407 	cp.own_address_type = own_addr_type;
6408 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6409 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6410 	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6411 	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6412 	cp.min_ce_len = cpu_to_le16(0x0000);
6413 	cp.max_ce_len = cpu_to_le16(0x0000);
6414 
6415 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6416 	 *
6417 	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6418 	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6419 	 * sent when a new connection has been created.
6420 	 */
6421 	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6422 				       sizeof(cp), &cp,
6423 				       use_enhanced_conn_complete(hdev) ?
6424 				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6425 				       HCI_EV_LE_CONN_COMPLETE,
6426 				       conn->conn_timeout, NULL);
6427 
6428 done:
6429 	if (err == -ETIMEDOUT)
6430 		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6431 
6432 	/* Re-enable advertising after the connection attempt is finished. */
6433 	hci_resume_advertising_sync(hdev);
6434 	return err;
6435 }
6436 
6437 int hci_le_create_cis_sync(struct hci_dev *hdev)
6438 {
6439 	DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6440 	size_t aux_num_cis = 0;
6441 	struct hci_conn *conn;
6442 	u8 cig = BT_ISO_QOS_CIG_UNSET;
6443 
6444 	/* The spec allows only one pending LE Create CIS command at a time. If
6445 	 * the command is pending now, don't do anything. We check for pending
6446 	 * connections after each CIS Established event.
6447 	 *
6448 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6449 	 * page 2566:
6450 	 *
6451 	 * If the Host issues this command before all the
6452 	 * HCI_LE_CIS_Established events from the previous use of the
6453 	 * command have been generated, the Controller shall return the
6454 	 * error code Command Disallowed (0x0C).
6455 	 *
6456 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6457 	 * page 2567:
6458 	 *
6459 	 * When the Controller receives the HCI_LE_Create_CIS command, the
6460 	 * Controller sends the HCI_Command_Status event to the Host. An
6461 	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6462 	 * is established or if it is disconnected or considered lost before
6463 	 * being established; until all the events are generated, the command
6464 	 * remains pending.
6465 	 */
6466 
6467 	hci_dev_lock(hdev);
6468 
6469 	rcu_read_lock();
6470 
6471 	/* Wait until previous Create CIS has completed */
6472 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6473 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6474 			goto done;
6475 	}
6476 
6477 	/* Find CIG with all CIS ready */
6478 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6479 		struct hci_conn *link;
6480 
6481 		if (hci_conn_check_create_cis(conn))
6482 			continue;
6483 
6484 		cig = conn->iso_qos.ucast.cig;
6485 
6486 		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6487 			if (hci_conn_check_create_cis(link) > 0 &&
6488 			    link->iso_qos.ucast.cig == cig &&
6489 			    link->state != BT_CONNECTED) {
6490 				cig = BT_ISO_QOS_CIG_UNSET;
6491 				break;
6492 			}
6493 		}
6494 
6495 		if (cig != BT_ISO_QOS_CIG_UNSET)
6496 			break;
6497 	}
6498 
6499 	if (cig == BT_ISO_QOS_CIG_UNSET)
6500 		goto done;
6501 
6502 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6503 		struct hci_cis *cis = &cmd->cis[aux_num_cis];
6504 
6505 		if (hci_conn_check_create_cis(conn) ||
6506 		    conn->iso_qos.ucast.cig != cig)
6507 			continue;
6508 
6509 		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6510 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6511 		cis->cis_handle = cpu_to_le16(conn->handle);
6512 		aux_num_cis++;
6513 
6514 		if (aux_num_cis >= cmd->num_cis)
6515 			break;
6516 	}
6517 	cmd->num_cis = aux_num_cis;
6518 
6519 done:
6520 	rcu_read_unlock();
6521 
6522 	hci_dev_unlock(hdev);
6523 
6524 	if (!aux_num_cis)
6525 		return 0;
6526 
6527 	/* Wait for HCI_LE_CIS_Established */
6528 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6529 					struct_size(cmd, cis, cmd->num_cis),
6530 					cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6531 					conn->conn_timeout, NULL);
6532 }
6533 
6534 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6535 {
6536 	struct hci_cp_le_remove_cig cp;
6537 
6538 	memset(&cp, 0, sizeof(cp));
6539 	cp.cig_id = handle;
6540 
6541 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6542 				     &cp, HCI_CMD_TIMEOUT);
6543 }
6544 
6545 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6546 {
6547 	struct hci_cp_le_big_term_sync cp;
6548 
6549 	memset(&cp, 0, sizeof(cp));
6550 	cp.handle = handle;
6551 
6552 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6553 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6554 }
6555 
6556 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6557 {
6558 	struct hci_cp_le_pa_term_sync cp;
6559 
6560 	memset(&cp, 0, sizeof(cp));
6561 	cp.handle = cpu_to_le16(handle);
6562 
6563 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6564 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6565 }
6566 
6567 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6568 			   bool use_rpa, struct adv_info *adv_instance,
6569 			   u8 *own_addr_type, bdaddr_t *rand_addr)
6570 {
6571 	int err;
6572 
6573 	bacpy(rand_addr, BDADDR_ANY);
6574 
6575 	/* If privacy is enabled use a resolvable private address. If
6576 	 * current RPA has expired then generate a new one.
6577 	 */
6578 	if (use_rpa) {
6579 		/* If Controller supports LL Privacy use own address type is
6580 		 * 0x03
6581 		 */
6582 		if (use_ll_privacy(hdev))
6583 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6584 		else
6585 			*own_addr_type = ADDR_LE_DEV_RANDOM;
6586 
6587 		if (adv_instance) {
6588 			if (adv_rpa_valid(adv_instance))
6589 				return 0;
6590 		} else {
6591 			if (rpa_valid(hdev))
6592 				return 0;
6593 		}
6594 
6595 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6596 		if (err < 0) {
6597 			bt_dev_err(hdev, "failed to generate new RPA");
6598 			return err;
6599 		}
6600 
6601 		bacpy(rand_addr, &hdev->rpa);
6602 
6603 		return 0;
6604 	}
6605 
6606 	/* In case of required privacy without resolvable private address,
6607 	 * use an non-resolvable private address. This is useful for
6608 	 * non-connectable advertising.
6609 	 */
6610 	if (require_privacy) {
6611 		bdaddr_t nrpa;
6612 
6613 		while (true) {
6614 			/* The non-resolvable private address is generated
6615 			 * from random six bytes with the two most significant
6616 			 * bits cleared.
6617 			 */
6618 			get_random_bytes(&nrpa, 6);
6619 			nrpa.b[5] &= 0x3f;
6620 
6621 			/* The non-resolvable private address shall not be
6622 			 * equal to the public address.
6623 			 */
6624 			if (bacmp(&hdev->bdaddr, &nrpa))
6625 				break;
6626 		}
6627 
6628 		*own_addr_type = ADDR_LE_DEV_RANDOM;
6629 		bacpy(rand_addr, &nrpa);
6630 
6631 		return 0;
6632 	}
6633 
6634 	/* No privacy so use a public address. */
6635 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6636 
6637 	return 0;
6638 }
6639 
6640 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6641 {
6642 	u8 instance = PTR_UINT(data);
6643 
6644 	return hci_update_adv_data_sync(hdev, instance);
6645 }
6646 
6647 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6648 {
6649 	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6650 				  UINT_PTR(instance), NULL);
6651 }
6652 
6653 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6654 {
6655 	struct hci_conn *conn = data;
6656 	struct inquiry_entry *ie;
6657 	struct hci_cp_create_conn cp;
6658 	int err;
6659 
6660 	if (!hci_conn_valid(hdev, conn))
6661 		return -ECANCELED;
6662 
6663 	/* Many controllers disallow HCI Create Connection while it is doing
6664 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6665 	 * Connection. This may cause the MGMT discovering state to become false
6666 	 * without user space's request but it is okay since the MGMT Discovery
6667 	 * APIs do not promise that discovery should be done forever. Instead,
6668 	 * the user space monitors the status of MGMT discovering and it may
6669 	 * request for discovery again when this flag becomes false.
6670 	 */
6671 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6672 		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6673 					    NULL, HCI_CMD_TIMEOUT);
6674 		if (err)
6675 			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6676 	}
6677 
6678 	conn->state = BT_CONNECT;
6679 	conn->out = true;
6680 	conn->role = HCI_ROLE_MASTER;
6681 
6682 	conn->attempt++;
6683 
6684 	conn->link_policy = hdev->link_policy;
6685 
6686 	memset(&cp, 0, sizeof(cp));
6687 	bacpy(&cp.bdaddr, &conn->dst);
6688 	cp.pscan_rep_mode = 0x02;
6689 
6690 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6691 	if (ie) {
6692 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6693 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6694 			cp.pscan_mode     = ie->data.pscan_mode;
6695 			cp.clock_offset   = ie->data.clock_offset |
6696 					    cpu_to_le16(0x8000);
6697 		}
6698 
6699 		memcpy(conn->dev_class, ie->data.dev_class, 3);
6700 	}
6701 
6702 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6703 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6704 		cp.role_switch = 0x01;
6705 	else
6706 		cp.role_switch = 0x00;
6707 
6708 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6709 					sizeof(cp), &cp,
6710 					HCI_EV_CONN_COMPLETE,
6711 					conn->conn_timeout, NULL);
6712 }
6713 
6714 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6715 {
6716 	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6717 				       NULL);
6718 }
6719 
6720 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6721 {
6722 	struct hci_conn *conn = data;
6723 
6724 	bt_dev_dbg(hdev, "err %d", err);
6725 
6726 	if (err == -ECANCELED)
6727 		return;
6728 
6729 	hci_dev_lock(hdev);
6730 
6731 	if (!hci_conn_valid(hdev, conn))
6732 		goto done;
6733 
6734 	if (!err) {
6735 		hci_connect_le_scan_cleanup(conn, 0x00);
6736 		goto done;
6737 	}
6738 
6739 	/* Check if connection is still pending */
6740 	if (conn != hci_lookup_le_connect(hdev))
6741 		goto done;
6742 
6743 	/* Flush to make sure we send create conn cancel command if needed */
6744 	flush_delayed_work(&conn->le_conn_timeout);
6745 	hci_conn_failed(conn, bt_status(err));
6746 
6747 done:
6748 	hci_dev_unlock(hdev);
6749 }
6750 
6751 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6752 {
6753 	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6754 				       create_le_conn_complete);
6755 }
6756 
6757 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6758 {
6759 	if (conn->state != BT_OPEN)
6760 		return -EINVAL;
6761 
6762 	switch (conn->type) {
6763 	case ACL_LINK:
6764 		return !hci_cmd_sync_dequeue_once(hdev,
6765 						  hci_acl_create_conn_sync,
6766 						  conn, NULL);
6767 	case LE_LINK:
6768 		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6769 						  conn, create_le_conn_complete);
6770 	}
6771 
6772 	return -ENOENT;
6773 }
6774 
6775 int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6776 			    struct hci_conn_params *params)
6777 {
6778 	struct hci_cp_le_conn_update cp;
6779 
6780 	memset(&cp, 0, sizeof(cp));
6781 	cp.handle		= cpu_to_le16(conn->handle);
6782 	cp.conn_interval_min	= cpu_to_le16(params->conn_min_interval);
6783 	cp.conn_interval_max	= cpu_to_le16(params->conn_max_interval);
6784 	cp.conn_latency		= cpu_to_le16(params->conn_latency);
6785 	cp.supervision_timeout	= cpu_to_le16(params->supervision_timeout);
6786 	cp.min_ce_len		= cpu_to_le16(0x0000);
6787 	cp.max_ce_len		= cpu_to_le16(0x0000);
6788 
6789 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6790 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6791 }
6792