xref: /linux/net/bluetooth/hci_sync.c (revision 4b132aacb0768ac1e652cf517097ea6f237214b9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  * Copyright 2023 NXP
7  */
8 
9 #include <linux/property.h>
10 
11 #include <net/bluetooth/bluetooth.h>
12 #include <net/bluetooth/hci_core.h>
13 #include <net/bluetooth/mgmt.h>
14 
15 #include "hci_codec.h"
16 #include "hci_debugfs.h"
17 #include "smp.h"
18 #include "eir.h"
19 #include "msft.h"
20 #include "aosp.h"
21 #include "leds.h"
22 
23 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24 				  struct sk_buff *skb)
25 {
26 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
27 
28 	if (hdev->req_status != HCI_REQ_PEND)
29 		return;
30 
31 	hdev->req_result = result;
32 	hdev->req_status = HCI_REQ_DONE;
33 
34 	/* Free the request command so it is not used as response */
35 	kfree_skb(hdev->req_skb);
36 	hdev->req_skb = NULL;
37 
38 	if (skb) {
39 		struct sock *sk = hci_skb_sk(skb);
40 
41 		/* Drop sk reference if set */
42 		if (sk)
43 			sock_put(sk);
44 
45 		hdev->req_rsp = skb_get(skb);
46 	}
47 
48 	wake_up_interruptible(&hdev->req_wait_q);
49 }
50 
51 struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52 				   const void *param, struct sock *sk)
53 {
54 	int len = HCI_COMMAND_HDR_SIZE + plen;
55 	struct hci_command_hdr *hdr;
56 	struct sk_buff *skb;
57 
58 	skb = bt_skb_alloc(len, GFP_ATOMIC);
59 	if (!skb)
60 		return NULL;
61 
62 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63 	hdr->opcode = cpu_to_le16(opcode);
64 	hdr->plen   = plen;
65 
66 	if (plen)
67 		skb_put_data(skb, param, plen);
68 
69 	bt_dev_dbg(hdev, "skb len %d", skb->len);
70 
71 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72 	hci_skb_opcode(skb) = opcode;
73 
74 	/* Grab a reference if command needs to be associated with a sock (e.g.
75 	 * likely mgmt socket that initiated the command).
76 	 */
77 	if (sk) {
78 		hci_skb_sk(skb) = sk;
79 		sock_hold(sk);
80 	}
81 
82 	return skb;
83 }
84 
85 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86 			     const void *param, u8 event, struct sock *sk)
87 {
88 	struct hci_dev *hdev = req->hdev;
89 	struct sk_buff *skb;
90 
91 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92 
93 	/* If an error occurred during request building, there is no point in
94 	 * queueing the HCI command. We can simply return.
95 	 */
96 	if (req->err)
97 		return;
98 
99 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100 	if (!skb) {
101 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102 			   opcode);
103 		req->err = -ENOMEM;
104 		return;
105 	}
106 
107 	if (skb_queue_empty(&req->cmd_q))
108 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109 
110 	hci_skb_event(skb) = event;
111 
112 	skb_queue_tail(&req->cmd_q, skb);
113 }
114 
115 static int hci_cmd_sync_run(struct hci_request *req)
116 {
117 	struct hci_dev *hdev = req->hdev;
118 	struct sk_buff *skb;
119 	unsigned long flags;
120 
121 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122 
123 	/* If an error occurred during request building, remove all HCI
124 	 * commands queued on the HCI request queue.
125 	 */
126 	if (req->err) {
127 		skb_queue_purge(&req->cmd_q);
128 		return req->err;
129 	}
130 
131 	/* Do not allow empty requests */
132 	if (skb_queue_empty(&req->cmd_q))
133 		return -ENODATA;
134 
135 	skb = skb_peek_tail(&req->cmd_q);
136 	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137 	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138 
139 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142 
143 	queue_work(hdev->workqueue, &hdev->cmd_work);
144 
145 	return 0;
146 }
147 
148 static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149 {
150 	skb_queue_head_init(&req->cmd_q);
151 	req->hdev = hdev;
152 	req->err = 0;
153 }
154 
155 /* This function requires the caller holds hdev->req_lock. */
156 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157 				  const void *param, u8 event, u32 timeout,
158 				  struct sock *sk)
159 {
160 	struct hci_request req;
161 	struct sk_buff *skb;
162 	int err = 0;
163 
164 	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165 
166 	hci_request_init(&req, hdev);
167 
168 	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169 
170 	hdev->req_status = HCI_REQ_PEND;
171 
172 	err = hci_cmd_sync_run(&req);
173 	if (err < 0)
174 		return ERR_PTR(err);
175 
176 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
177 					       hdev->req_status != HCI_REQ_PEND,
178 					       timeout);
179 
180 	if (err == -ERESTARTSYS)
181 		return ERR_PTR(-EINTR);
182 
183 	switch (hdev->req_status) {
184 	case HCI_REQ_DONE:
185 		err = -bt_to_errno(hdev->req_result);
186 		break;
187 
188 	case HCI_REQ_CANCELED:
189 		err = -hdev->req_result;
190 		break;
191 
192 	default:
193 		err = -ETIMEDOUT;
194 		break;
195 	}
196 
197 	hdev->req_status = 0;
198 	hdev->req_result = 0;
199 	skb = hdev->req_rsp;
200 	hdev->req_rsp = NULL;
201 
202 	bt_dev_dbg(hdev, "end: err %d", err);
203 
204 	if (err < 0) {
205 		kfree_skb(skb);
206 		return ERR_PTR(err);
207 	}
208 
209 	return skb;
210 }
211 EXPORT_SYMBOL(__hci_cmd_sync_sk);
212 
213 /* This function requires the caller holds hdev->req_lock. */
214 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
215 			       const void *param, u32 timeout)
216 {
217 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
218 }
219 EXPORT_SYMBOL(__hci_cmd_sync);
220 
221 /* Send HCI command and wait for command complete event */
222 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
223 			     const void *param, u32 timeout)
224 {
225 	struct sk_buff *skb;
226 
227 	if (!test_bit(HCI_UP, &hdev->flags))
228 		return ERR_PTR(-ENETDOWN);
229 
230 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
231 
232 	hci_req_sync_lock(hdev);
233 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
234 	hci_req_sync_unlock(hdev);
235 
236 	return skb;
237 }
238 EXPORT_SYMBOL(hci_cmd_sync);
239 
240 /* This function requires the caller holds hdev->req_lock. */
241 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
242 				  const void *param, u8 event, u32 timeout)
243 {
244 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
245 				 NULL);
246 }
247 EXPORT_SYMBOL(__hci_cmd_sync_ev);
248 
249 /* This function requires the caller holds hdev->req_lock. */
250 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
251 			     const void *param, u8 event, u32 timeout,
252 			     struct sock *sk)
253 {
254 	struct sk_buff *skb;
255 	u8 status;
256 
257 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
258 	if (IS_ERR(skb)) {
259 		if (!event)
260 			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
261 				   PTR_ERR(skb));
262 		return PTR_ERR(skb);
263 	}
264 
265 	/* If command return a status event skb will be set to NULL as there are
266 	 * no parameters, in case of failure IS_ERR(skb) would have be set to
267 	 * the actual error would be found with PTR_ERR(skb).
268 	 */
269 	if (!skb)
270 		return 0;
271 
272 	status = skb->data[0];
273 
274 	kfree_skb(skb);
275 
276 	return status;
277 }
278 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
279 
280 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
281 			  const void *param, u32 timeout)
282 {
283 	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
284 					NULL);
285 }
286 EXPORT_SYMBOL(__hci_cmd_sync_status);
287 
288 int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
289 			const void *param, u32 timeout)
290 {
291 	int err;
292 
293 	hci_req_sync_lock(hdev);
294 	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
295 	hci_req_sync_unlock(hdev);
296 
297 	return err;
298 }
299 EXPORT_SYMBOL(hci_cmd_sync_status);
300 
301 static void hci_cmd_sync_work(struct work_struct *work)
302 {
303 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
304 
305 	bt_dev_dbg(hdev, "");
306 
307 	/* Dequeue all entries and run them */
308 	while (1) {
309 		struct hci_cmd_sync_work_entry *entry;
310 
311 		mutex_lock(&hdev->cmd_sync_work_lock);
312 		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
313 						 struct hci_cmd_sync_work_entry,
314 						 list);
315 		if (entry)
316 			list_del(&entry->list);
317 		mutex_unlock(&hdev->cmd_sync_work_lock);
318 
319 		if (!entry)
320 			break;
321 
322 		bt_dev_dbg(hdev, "entry %p", entry);
323 
324 		if (entry->func) {
325 			int err;
326 
327 			hci_req_sync_lock(hdev);
328 			err = entry->func(hdev, entry->data);
329 			if (entry->destroy)
330 				entry->destroy(hdev, entry->data, err);
331 			hci_req_sync_unlock(hdev);
332 		}
333 
334 		kfree(entry);
335 	}
336 }
337 
338 static void hci_cmd_sync_cancel_work(struct work_struct *work)
339 {
340 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
341 
342 	cancel_delayed_work_sync(&hdev->cmd_timer);
343 	cancel_delayed_work_sync(&hdev->ncmd_timer);
344 	atomic_set(&hdev->cmd_cnt, 1);
345 
346 	wake_up_interruptible(&hdev->req_wait_q);
347 }
348 
349 static int hci_scan_disable_sync(struct hci_dev *hdev);
350 static int scan_disable_sync(struct hci_dev *hdev, void *data)
351 {
352 	return hci_scan_disable_sync(hdev);
353 }
354 
355 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
356 {
357 	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
358 }
359 
360 static void le_scan_disable(struct work_struct *work)
361 {
362 	struct hci_dev *hdev = container_of(work, struct hci_dev,
363 					    le_scan_disable.work);
364 	int status;
365 
366 	bt_dev_dbg(hdev, "");
367 	hci_dev_lock(hdev);
368 
369 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
370 		goto _return;
371 
372 	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
373 	if (status) {
374 		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
375 		goto _return;
376 	}
377 
378 	/* If we were running LE only scan, change discovery state. If
379 	 * we were running both LE and BR/EDR inquiry simultaneously,
380 	 * and BR/EDR inquiry is already finished, stop discovery,
381 	 * otherwise BR/EDR inquiry will stop discovery when finished.
382 	 * If we will resolve remote device name, do not change
383 	 * discovery state.
384 	 */
385 
386 	if (hdev->discovery.type == DISCOV_TYPE_LE)
387 		goto discov_stopped;
388 
389 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
390 		goto _return;
391 
392 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
393 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
394 		    hdev->discovery.state != DISCOVERY_RESOLVING)
395 			goto discov_stopped;
396 
397 		goto _return;
398 	}
399 
400 	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
401 	if (status) {
402 		bt_dev_err(hdev, "inquiry failed: status %d", status);
403 		goto discov_stopped;
404 	}
405 
406 	goto _return;
407 
408 discov_stopped:
409 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
410 
411 _return:
412 	hci_dev_unlock(hdev);
413 }
414 
415 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
416 				       u8 filter_dup);
417 
418 static int reenable_adv_sync(struct hci_dev *hdev, void *data)
419 {
420 	bt_dev_dbg(hdev, "");
421 
422 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
423 	    list_empty(&hdev->adv_instances))
424 		return 0;
425 
426 	if (hdev->cur_adv_instance) {
427 		return hci_schedule_adv_instance_sync(hdev,
428 						      hdev->cur_adv_instance,
429 						      true);
430 	} else {
431 		if (ext_adv_capable(hdev)) {
432 			hci_start_ext_adv_sync(hdev, 0x00);
433 		} else {
434 			hci_update_adv_data_sync(hdev, 0x00);
435 			hci_update_scan_rsp_data_sync(hdev, 0x00);
436 			hci_enable_advertising_sync(hdev);
437 		}
438 	}
439 
440 	return 0;
441 }
442 
443 static void reenable_adv(struct work_struct *work)
444 {
445 	struct hci_dev *hdev = container_of(work, struct hci_dev,
446 					    reenable_adv_work);
447 	int status;
448 
449 	bt_dev_dbg(hdev, "");
450 
451 	hci_dev_lock(hdev);
452 
453 	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
454 	if (status)
455 		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
456 
457 	hci_dev_unlock(hdev);
458 }
459 
460 static void cancel_adv_timeout(struct hci_dev *hdev)
461 {
462 	if (hdev->adv_instance_timeout) {
463 		hdev->adv_instance_timeout = 0;
464 		cancel_delayed_work(&hdev->adv_instance_expire);
465 	}
466 }
467 
468 /* For a single instance:
469  * - force == true: The instance will be removed even when its remaining
470  *   lifetime is not zero.
471  * - force == false: the instance will be deactivated but kept stored unless
472  *   the remaining lifetime is zero.
473  *
474  * For instance == 0x00:
475  * - force == true: All instances will be removed regardless of their timeout
476  *   setting.
477  * - force == false: Only instances that have a timeout will be removed.
478  */
479 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
480 				u8 instance, bool force)
481 {
482 	struct adv_info *adv_instance, *n, *next_instance = NULL;
483 	int err;
484 	u8 rem_inst;
485 
486 	/* Cancel any timeout concerning the removed instance(s). */
487 	if (!instance || hdev->cur_adv_instance == instance)
488 		cancel_adv_timeout(hdev);
489 
490 	/* Get the next instance to advertise BEFORE we remove
491 	 * the current one. This can be the same instance again
492 	 * if there is only one instance.
493 	 */
494 	if (instance && hdev->cur_adv_instance == instance)
495 		next_instance = hci_get_next_instance(hdev, instance);
496 
497 	if (instance == 0x00) {
498 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
499 					 list) {
500 			if (!(force || adv_instance->timeout))
501 				continue;
502 
503 			rem_inst = adv_instance->instance;
504 			err = hci_remove_adv_instance(hdev, rem_inst);
505 			if (!err)
506 				mgmt_advertising_removed(sk, hdev, rem_inst);
507 		}
508 	} else {
509 		adv_instance = hci_find_adv_instance(hdev, instance);
510 
511 		if (force || (adv_instance && adv_instance->timeout &&
512 			      !adv_instance->remaining_time)) {
513 			/* Don't advertise a removed instance. */
514 			if (next_instance &&
515 			    next_instance->instance == instance)
516 				next_instance = NULL;
517 
518 			err = hci_remove_adv_instance(hdev, instance);
519 			if (!err)
520 				mgmt_advertising_removed(sk, hdev, instance);
521 		}
522 	}
523 
524 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
525 		return 0;
526 
527 	if (next_instance && !ext_adv_capable(hdev))
528 		return hci_schedule_adv_instance_sync(hdev,
529 						      next_instance->instance,
530 						      false);
531 
532 	return 0;
533 }
534 
535 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
536 {
537 	u8 instance = *(u8 *)data;
538 
539 	kfree(data);
540 
541 	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
542 
543 	if (list_empty(&hdev->adv_instances))
544 		return hci_disable_advertising_sync(hdev);
545 
546 	return 0;
547 }
548 
549 static void adv_timeout_expire(struct work_struct *work)
550 {
551 	u8 *inst_ptr;
552 	struct hci_dev *hdev = container_of(work, struct hci_dev,
553 					    adv_instance_expire.work);
554 
555 	bt_dev_dbg(hdev, "");
556 
557 	hci_dev_lock(hdev);
558 
559 	hdev->adv_instance_timeout = 0;
560 
561 	if (hdev->cur_adv_instance == 0x00)
562 		goto unlock;
563 
564 	inst_ptr = kmalloc(1, GFP_KERNEL);
565 	if (!inst_ptr)
566 		goto unlock;
567 
568 	*inst_ptr = hdev->cur_adv_instance;
569 	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
570 
571 unlock:
572 	hci_dev_unlock(hdev);
573 }
574 
575 static bool is_interleave_scanning(struct hci_dev *hdev)
576 {
577 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
578 }
579 
580 static int hci_passive_scan_sync(struct hci_dev *hdev);
581 
582 static void interleave_scan_work(struct work_struct *work)
583 {
584 	struct hci_dev *hdev = container_of(work, struct hci_dev,
585 					    interleave_scan.work);
586 	unsigned long timeout;
587 
588 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
589 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
590 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
591 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
592 	} else {
593 		bt_dev_err(hdev, "unexpected error");
594 		return;
595 	}
596 
597 	hci_passive_scan_sync(hdev);
598 
599 	hci_dev_lock(hdev);
600 
601 	switch (hdev->interleave_scan_state) {
602 	case INTERLEAVE_SCAN_ALLOWLIST:
603 		bt_dev_dbg(hdev, "next state: allowlist");
604 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
605 		break;
606 	case INTERLEAVE_SCAN_NO_FILTER:
607 		bt_dev_dbg(hdev, "next state: no filter");
608 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
609 		break;
610 	case INTERLEAVE_SCAN_NONE:
611 		bt_dev_err(hdev, "unexpected error");
612 	}
613 
614 	hci_dev_unlock(hdev);
615 
616 	/* Don't continue interleaving if it was canceled */
617 	if (is_interleave_scanning(hdev))
618 		queue_delayed_work(hdev->req_workqueue,
619 				   &hdev->interleave_scan, timeout);
620 }
621 
622 void hci_cmd_sync_init(struct hci_dev *hdev)
623 {
624 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
625 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
626 	mutex_init(&hdev->cmd_sync_work_lock);
627 	mutex_init(&hdev->unregister_lock);
628 
629 	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
630 	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
631 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
632 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
633 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
634 }
635 
636 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
637 				       struct hci_cmd_sync_work_entry *entry,
638 				       int err)
639 {
640 	if (entry->destroy)
641 		entry->destroy(hdev, entry->data, err);
642 
643 	list_del(&entry->list);
644 	kfree(entry);
645 }
646 
647 void hci_cmd_sync_clear(struct hci_dev *hdev)
648 {
649 	struct hci_cmd_sync_work_entry *entry, *tmp;
650 
651 	cancel_work_sync(&hdev->cmd_sync_work);
652 	cancel_work_sync(&hdev->reenable_adv_work);
653 
654 	mutex_lock(&hdev->cmd_sync_work_lock);
655 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
656 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
657 	mutex_unlock(&hdev->cmd_sync_work_lock);
658 }
659 
660 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
661 {
662 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
663 
664 	if (hdev->req_status == HCI_REQ_PEND) {
665 		hdev->req_result = err;
666 		hdev->req_status = HCI_REQ_CANCELED;
667 
668 		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
669 	}
670 }
671 EXPORT_SYMBOL(hci_cmd_sync_cancel);
672 
673 /* Cancel ongoing command request synchronously:
674  *
675  * - Set result and mark status to HCI_REQ_CANCELED
676  * - Wakeup command sync thread
677  */
678 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
679 {
680 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
681 
682 	if (hdev->req_status == HCI_REQ_PEND) {
683 		/* req_result is __u32 so error must be positive to be properly
684 		 * propagated.
685 		 */
686 		hdev->req_result = err < 0 ? -err : err;
687 		hdev->req_status = HCI_REQ_CANCELED;
688 
689 		wake_up_interruptible(&hdev->req_wait_q);
690 	}
691 }
692 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
693 
694 /* Submit HCI command to be run in as cmd_sync_work:
695  *
696  * - hdev must _not_ be unregistered
697  */
698 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
699 			void *data, hci_cmd_sync_work_destroy_t destroy)
700 {
701 	struct hci_cmd_sync_work_entry *entry;
702 	int err = 0;
703 
704 	mutex_lock(&hdev->unregister_lock);
705 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
706 		err = -ENODEV;
707 		goto unlock;
708 	}
709 
710 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
711 	if (!entry) {
712 		err = -ENOMEM;
713 		goto unlock;
714 	}
715 	entry->func = func;
716 	entry->data = data;
717 	entry->destroy = destroy;
718 
719 	mutex_lock(&hdev->cmd_sync_work_lock);
720 	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
721 	mutex_unlock(&hdev->cmd_sync_work_lock);
722 
723 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
724 
725 unlock:
726 	mutex_unlock(&hdev->unregister_lock);
727 	return err;
728 }
729 EXPORT_SYMBOL(hci_cmd_sync_submit);
730 
731 /* Queue HCI command:
732  *
733  * - hdev must be running
734  */
735 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
736 		       void *data, hci_cmd_sync_work_destroy_t destroy)
737 {
738 	/* Only queue command if hdev is running which means it had been opened
739 	 * and is either on init phase or is already up.
740 	 */
741 	if (!test_bit(HCI_RUNNING, &hdev->flags))
742 		return -ENETDOWN;
743 
744 	return hci_cmd_sync_submit(hdev, func, data, destroy);
745 }
746 EXPORT_SYMBOL(hci_cmd_sync_queue);
747 
748 static struct hci_cmd_sync_work_entry *
749 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
750 			   void *data, hci_cmd_sync_work_destroy_t destroy)
751 {
752 	struct hci_cmd_sync_work_entry *entry, *tmp;
753 
754 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
755 		if (func && entry->func != func)
756 			continue;
757 
758 		if (data && entry->data != data)
759 			continue;
760 
761 		if (destroy && entry->destroy != destroy)
762 			continue;
763 
764 		return entry;
765 	}
766 
767 	return NULL;
768 }
769 
770 /* Queue HCI command entry once:
771  *
772  * - Lookup if an entry already exist and only if it doesn't creates a new entry
773  *   and queue it.
774  */
775 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
776 			    void *data, hci_cmd_sync_work_destroy_t destroy)
777 {
778 	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
779 		return 0;
780 
781 	return hci_cmd_sync_queue(hdev, func, data, destroy);
782 }
783 EXPORT_SYMBOL(hci_cmd_sync_queue_once);
784 
785 /* Lookup HCI command entry:
786  *
787  * - Return first entry that matches by function callback or data or
788  *   destroy callback.
789  */
790 struct hci_cmd_sync_work_entry *
791 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
792 			  void *data, hci_cmd_sync_work_destroy_t destroy)
793 {
794 	struct hci_cmd_sync_work_entry *entry;
795 
796 	mutex_lock(&hdev->cmd_sync_work_lock);
797 	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
798 	mutex_unlock(&hdev->cmd_sync_work_lock);
799 
800 	return entry;
801 }
802 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
803 
804 /* Cancel HCI command entry */
805 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
806 			       struct hci_cmd_sync_work_entry *entry)
807 {
808 	mutex_lock(&hdev->cmd_sync_work_lock);
809 	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
810 	mutex_unlock(&hdev->cmd_sync_work_lock);
811 }
812 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
813 
814 /* Dequeue one HCI command entry:
815  *
816  * - Lookup and cancel first entry that matches.
817  */
818 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
819 			       hci_cmd_sync_work_func_t func,
820 			       void *data, hci_cmd_sync_work_destroy_t destroy)
821 {
822 	struct hci_cmd_sync_work_entry *entry;
823 
824 	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
825 	if (!entry)
826 		return false;
827 
828 	hci_cmd_sync_cancel_entry(hdev, entry);
829 
830 	return true;
831 }
832 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
833 
834 /* Dequeue HCI command entry:
835  *
836  * - Lookup and cancel any entry that matches by function callback or data or
837  *   destroy callback.
838  */
839 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
840 			  void *data, hci_cmd_sync_work_destroy_t destroy)
841 {
842 	struct hci_cmd_sync_work_entry *entry;
843 	bool ret = false;
844 
845 	mutex_lock(&hdev->cmd_sync_work_lock);
846 	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
847 						   destroy))) {
848 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
849 		ret = true;
850 	}
851 	mutex_unlock(&hdev->cmd_sync_work_lock);
852 
853 	return ret;
854 }
855 EXPORT_SYMBOL(hci_cmd_sync_dequeue);
856 
857 int hci_update_eir_sync(struct hci_dev *hdev)
858 {
859 	struct hci_cp_write_eir cp;
860 
861 	bt_dev_dbg(hdev, "");
862 
863 	if (!hdev_is_powered(hdev))
864 		return 0;
865 
866 	if (!lmp_ext_inq_capable(hdev))
867 		return 0;
868 
869 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
870 		return 0;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
873 		return 0;
874 
875 	memset(&cp, 0, sizeof(cp));
876 
877 	eir_create(hdev, cp.data);
878 
879 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
880 		return 0;
881 
882 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
883 
884 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
885 				     HCI_CMD_TIMEOUT);
886 }
887 
888 static u8 get_service_classes(struct hci_dev *hdev)
889 {
890 	struct bt_uuid *uuid;
891 	u8 val = 0;
892 
893 	list_for_each_entry(uuid, &hdev->uuids, list)
894 		val |= uuid->svc_hint;
895 
896 	return val;
897 }
898 
899 int hci_update_class_sync(struct hci_dev *hdev)
900 {
901 	u8 cod[3];
902 
903 	bt_dev_dbg(hdev, "");
904 
905 	if (!hdev_is_powered(hdev))
906 		return 0;
907 
908 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
909 		return 0;
910 
911 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
912 		return 0;
913 
914 	cod[0] = hdev->minor_class;
915 	cod[1] = hdev->major_class;
916 	cod[2] = get_service_classes(hdev);
917 
918 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
919 		cod[1] |= 0x20;
920 
921 	if (memcmp(cod, hdev->dev_class, 3) == 0)
922 		return 0;
923 
924 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
925 				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
926 }
927 
928 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
929 {
930 	/* If there is no connection we are OK to advertise. */
931 	if (hci_conn_num(hdev, LE_LINK) == 0)
932 		return true;
933 
934 	/* Check le_states if there is any connection in peripheral role. */
935 	if (hdev->conn_hash.le_num_peripheral > 0) {
936 		/* Peripheral connection state and non connectable mode
937 		 * bit 20.
938 		 */
939 		if (!connectable && !(hdev->le_states[2] & 0x10))
940 			return false;
941 
942 		/* Peripheral connection state and connectable mode bit 38
943 		 * and scannable bit 21.
944 		 */
945 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
946 				    !(hdev->le_states[2] & 0x20)))
947 			return false;
948 	}
949 
950 	/* Check le_states if there is any connection in central role. */
951 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
952 		/* Central connection state and non connectable mode bit 18. */
953 		if (!connectable && !(hdev->le_states[2] & 0x02))
954 			return false;
955 
956 		/* Central connection state and connectable mode bit 35 and
957 		 * scannable 19.
958 		 */
959 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
960 				    !(hdev->le_states[2] & 0x08)))
961 			return false;
962 	}
963 
964 	return true;
965 }
966 
967 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
968 {
969 	/* If privacy is not enabled don't use RPA */
970 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
971 		return false;
972 
973 	/* If basic privacy mode is enabled use RPA */
974 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
975 		return true;
976 
977 	/* If limited privacy mode is enabled don't use RPA if we're
978 	 * both discoverable and bondable.
979 	 */
980 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
981 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
982 		return false;
983 
984 	/* We're neither bondable nor discoverable in the limited
985 	 * privacy mode, therefore use RPA.
986 	 */
987 	return true;
988 }
989 
990 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
991 {
992 	/* If we're advertising or initiating an LE connection we can't
993 	 * go ahead and change the random address at this time. This is
994 	 * because the eventual initiator address used for the
995 	 * subsequently created connection will be undefined (some
996 	 * controllers use the new address and others the one we had
997 	 * when the operation started).
998 	 *
999 	 * In this kind of scenario skip the update and let the random
1000 	 * address be updated at the next cycle.
1001 	 */
1002 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1003 	    hci_lookup_le_connect(hdev)) {
1004 		bt_dev_dbg(hdev, "Deferring random address update");
1005 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1006 		return 0;
1007 	}
1008 
1009 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1010 				     6, rpa, HCI_CMD_TIMEOUT);
1011 }
1012 
1013 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1014 				   bool rpa, u8 *own_addr_type)
1015 {
1016 	int err;
1017 
1018 	/* If privacy is enabled use a resolvable private address. If
1019 	 * current RPA has expired or there is something else than
1020 	 * the current RPA in use, then generate a new one.
1021 	 */
1022 	if (rpa) {
1023 		/* If Controller supports LL Privacy use own address type is
1024 		 * 0x03
1025 		 */
1026 		if (use_ll_privacy(hdev))
1027 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1028 		else
1029 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1030 
1031 		/* Check if RPA is valid */
1032 		if (rpa_valid(hdev))
1033 			return 0;
1034 
1035 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1036 		if (err < 0) {
1037 			bt_dev_err(hdev, "failed to generate new RPA");
1038 			return err;
1039 		}
1040 
1041 		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1042 		if (err)
1043 			return err;
1044 
1045 		return 0;
1046 	}
1047 
1048 	/* In case of required privacy without resolvable private address,
1049 	 * use an non-resolvable private address. This is useful for active
1050 	 * scanning and non-connectable advertising.
1051 	 */
1052 	if (require_privacy) {
1053 		bdaddr_t nrpa;
1054 
1055 		while (true) {
1056 			/* The non-resolvable private address is generated
1057 			 * from random six bytes with the two most significant
1058 			 * bits cleared.
1059 			 */
1060 			get_random_bytes(&nrpa, 6);
1061 			nrpa.b[5] &= 0x3f;
1062 
1063 			/* The non-resolvable private address shall not be
1064 			 * equal to the public address.
1065 			 */
1066 			if (bacmp(&hdev->bdaddr, &nrpa))
1067 				break;
1068 		}
1069 
1070 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1071 
1072 		return hci_set_random_addr_sync(hdev, &nrpa);
1073 	}
1074 
1075 	/* If forcing static address is in use or there is no public
1076 	 * address use the static address as random address (but skip
1077 	 * the HCI command if the current random address is already the
1078 	 * static one.
1079 	 *
1080 	 * In case BR/EDR has been disabled on a dual-mode controller
1081 	 * and a static address has been configured, then use that
1082 	 * address instead of the public BR/EDR address.
1083 	 */
1084 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1085 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1086 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1087 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1088 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1089 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1090 			return hci_set_random_addr_sync(hdev,
1091 							&hdev->static_addr);
1092 		return 0;
1093 	}
1094 
1095 	/* Neither privacy nor static address is being used so use a
1096 	 * public address.
1097 	 */
1098 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1099 
1100 	return 0;
1101 }
1102 
1103 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1104 {
1105 	struct hci_cp_le_set_ext_adv_enable *cp;
1106 	struct hci_cp_ext_adv_set *set;
1107 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1108 	u8 size;
1109 	struct adv_info *adv = NULL;
1110 
1111 	/* If request specifies an instance that doesn't exist, fail */
1112 	if (instance > 0) {
1113 		adv = hci_find_adv_instance(hdev, instance);
1114 		if (!adv)
1115 			return -EINVAL;
1116 
1117 		/* If not enabled there is nothing to do */
1118 		if (!adv->enabled)
1119 			return 0;
1120 	}
1121 
1122 	memset(data, 0, sizeof(data));
1123 
1124 	cp = (void *)data;
1125 	set = (void *)cp->data;
1126 
1127 	/* Instance 0x00 indicates all advertising instances will be disabled */
1128 	cp->num_of_sets = !!instance;
1129 	cp->enable = 0x00;
1130 
1131 	set->handle = adv ? adv->handle : instance;
1132 
1133 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1134 
1135 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1136 				     size, data, HCI_CMD_TIMEOUT);
1137 }
1138 
1139 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1140 					    bdaddr_t *random_addr)
1141 {
1142 	struct hci_cp_le_set_adv_set_rand_addr cp;
1143 	int err;
1144 
1145 	if (!instance) {
1146 		/* Instance 0x00 doesn't have an adv_info, instead it uses
1147 		 * hdev->random_addr to track its address so whenever it needs
1148 		 * to be updated this also set the random address since
1149 		 * hdev->random_addr is shared with scan state machine.
1150 		 */
1151 		err = hci_set_random_addr_sync(hdev, random_addr);
1152 		if (err)
1153 			return err;
1154 	}
1155 
1156 	memset(&cp, 0, sizeof(cp));
1157 
1158 	cp.handle = instance;
1159 	bacpy(&cp.bdaddr, random_addr);
1160 
1161 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1162 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1163 }
1164 
1165 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1166 {
1167 	struct hci_cp_le_set_ext_adv_params cp;
1168 	bool connectable;
1169 	u32 flags;
1170 	bdaddr_t random_addr;
1171 	u8 own_addr_type;
1172 	int err;
1173 	struct adv_info *adv;
1174 	bool secondary_adv;
1175 
1176 	if (instance > 0) {
1177 		adv = hci_find_adv_instance(hdev, instance);
1178 		if (!adv)
1179 			return -EINVAL;
1180 	} else {
1181 		adv = NULL;
1182 	}
1183 
1184 	/* Updating parameters of an active instance will return a
1185 	 * Command Disallowed error, so we must first disable the
1186 	 * instance if it is active.
1187 	 */
1188 	if (adv && !adv->pending) {
1189 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1190 		if (err)
1191 			return err;
1192 	}
1193 
1194 	flags = hci_adv_instance_flags(hdev, instance);
1195 
1196 	/* If the "connectable" instance flag was not set, then choose between
1197 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1198 	 */
1199 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1200 		      mgmt_get_connectable(hdev);
1201 
1202 	if (!is_advertising_allowed(hdev, connectable))
1203 		return -EPERM;
1204 
1205 	/* Set require_privacy to true only when non-connectable
1206 	 * advertising is used. In that case it is fine to use a
1207 	 * non-resolvable private address.
1208 	 */
1209 	err = hci_get_random_address(hdev, !connectable,
1210 				     adv_use_rpa(hdev, flags), adv,
1211 				     &own_addr_type, &random_addr);
1212 	if (err < 0)
1213 		return err;
1214 
1215 	memset(&cp, 0, sizeof(cp));
1216 
1217 	if (adv) {
1218 		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1219 		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1220 		cp.tx_power = adv->tx_power;
1221 	} else {
1222 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1223 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1224 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1225 	}
1226 
1227 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1228 
1229 	if (connectable) {
1230 		if (secondary_adv)
1231 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1232 		else
1233 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1234 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1235 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1236 		if (secondary_adv)
1237 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1238 		else
1239 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1240 	} else {
1241 		if (secondary_adv)
1242 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1243 		else
1244 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1245 	}
1246 
1247 	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1248 	 * contains the peer’s Identity Address and the Peer_Address_Type
1249 	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1250 	 * These parameters are used to locate the corresponding local IRK in
1251 	 * the resolving list; this IRK is used to generate their own address
1252 	 * used in the advertisement.
1253 	 */
1254 	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1255 		hci_copy_identity_address(hdev, &cp.peer_addr,
1256 					  &cp.peer_addr_type);
1257 
1258 	cp.own_addr_type = own_addr_type;
1259 	cp.channel_map = hdev->le_adv_channel_map;
1260 	cp.handle = adv ? adv->handle : instance;
1261 
1262 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1263 		cp.primary_phy = HCI_ADV_PHY_1M;
1264 		cp.secondary_phy = HCI_ADV_PHY_2M;
1265 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1266 		cp.primary_phy = HCI_ADV_PHY_CODED;
1267 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1268 	} else {
1269 		/* In all other cases use 1M */
1270 		cp.primary_phy = HCI_ADV_PHY_1M;
1271 		cp.secondary_phy = HCI_ADV_PHY_1M;
1272 	}
1273 
1274 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1275 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1276 	if (err)
1277 		return err;
1278 
1279 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1280 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1281 	    bacmp(&random_addr, BDADDR_ANY)) {
1282 		/* Check if random address need to be updated */
1283 		if (adv) {
1284 			if (!bacmp(&random_addr, &adv->random_addr))
1285 				return 0;
1286 		} else {
1287 			if (!bacmp(&random_addr, &hdev->random_addr))
1288 				return 0;
1289 		}
1290 
1291 		return hci_set_adv_set_random_addr_sync(hdev, instance,
1292 							&random_addr);
1293 	}
1294 
1295 	return 0;
1296 }
1297 
1298 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1299 {
1300 	DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1301 		    HCI_MAX_EXT_AD_LENGTH);
1302 	u8 len;
1303 	struct adv_info *adv = NULL;
1304 	int err;
1305 
1306 	if (instance) {
1307 		adv = hci_find_adv_instance(hdev, instance);
1308 		if (!adv || !adv->scan_rsp_changed)
1309 			return 0;
1310 	}
1311 
1312 	len = eir_create_scan_rsp(hdev, instance, pdu->data);
1313 
1314 	pdu->handle = adv ? adv->handle : instance;
1315 	pdu->length = len;
1316 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1317 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1318 
1319 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1320 				    struct_size(pdu, data, len), pdu,
1321 				    HCI_CMD_TIMEOUT);
1322 	if (err)
1323 		return err;
1324 
1325 	if (adv) {
1326 		adv->scan_rsp_changed = false;
1327 	} else {
1328 		memcpy(hdev->scan_rsp_data, pdu->data, len);
1329 		hdev->scan_rsp_data_len = len;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
1335 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1336 {
1337 	struct hci_cp_le_set_scan_rsp_data cp;
1338 	u8 len;
1339 
1340 	memset(&cp, 0, sizeof(cp));
1341 
1342 	len = eir_create_scan_rsp(hdev, instance, cp.data);
1343 
1344 	if (hdev->scan_rsp_data_len == len &&
1345 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1346 		return 0;
1347 
1348 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1349 	hdev->scan_rsp_data_len = len;
1350 
1351 	cp.length = len;
1352 
1353 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1354 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1355 }
1356 
1357 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1358 {
1359 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1360 		return 0;
1361 
1362 	if (ext_adv_capable(hdev))
1363 		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1364 
1365 	return __hci_set_scan_rsp_data_sync(hdev, instance);
1366 }
1367 
1368 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1369 {
1370 	struct hci_cp_le_set_ext_adv_enable *cp;
1371 	struct hci_cp_ext_adv_set *set;
1372 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1373 	struct adv_info *adv;
1374 
1375 	if (instance > 0) {
1376 		adv = hci_find_adv_instance(hdev, instance);
1377 		if (!adv)
1378 			return -EINVAL;
1379 		/* If already enabled there is nothing to do */
1380 		if (adv->enabled)
1381 			return 0;
1382 	} else {
1383 		adv = NULL;
1384 	}
1385 
1386 	cp = (void *)data;
1387 	set = (void *)cp->data;
1388 
1389 	memset(cp, 0, sizeof(*cp));
1390 
1391 	cp->enable = 0x01;
1392 	cp->num_of_sets = 0x01;
1393 
1394 	memset(set, 0, sizeof(*set));
1395 
1396 	set->handle = adv ? adv->handle : instance;
1397 
1398 	/* Set duration per instance since controller is responsible for
1399 	 * scheduling it.
1400 	 */
1401 	if (adv && adv->timeout) {
1402 		u16 duration = adv->timeout * MSEC_PER_SEC;
1403 
1404 		/* Time = N * 10 ms */
1405 		set->duration = cpu_to_le16(duration / 10);
1406 	}
1407 
1408 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1409 				     sizeof(*cp) +
1410 				     sizeof(*set) * cp->num_of_sets,
1411 				     data, HCI_CMD_TIMEOUT);
1412 }
1413 
1414 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1415 {
1416 	int err;
1417 
1418 	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1419 	if (err)
1420 		return err;
1421 
1422 	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1423 	if (err)
1424 		return err;
1425 
1426 	return hci_enable_ext_advertising_sync(hdev, instance);
1427 }
1428 
1429 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1430 {
1431 	struct hci_cp_le_set_per_adv_enable cp;
1432 	struct adv_info *adv = NULL;
1433 
1434 	/* If periodic advertising already disabled there is nothing to do. */
1435 	adv = hci_find_adv_instance(hdev, instance);
1436 	if (!adv || !adv->periodic || !adv->enabled)
1437 		return 0;
1438 
1439 	memset(&cp, 0, sizeof(cp));
1440 
1441 	cp.enable = 0x00;
1442 	cp.handle = instance;
1443 
1444 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1445 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1446 }
1447 
1448 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1449 				       u16 min_interval, u16 max_interval)
1450 {
1451 	struct hci_cp_le_set_per_adv_params cp;
1452 
1453 	memset(&cp, 0, sizeof(cp));
1454 
1455 	if (!min_interval)
1456 		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1457 
1458 	if (!max_interval)
1459 		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1460 
1461 	cp.handle = instance;
1462 	cp.min_interval = cpu_to_le16(min_interval);
1463 	cp.max_interval = cpu_to_le16(max_interval);
1464 	cp.periodic_properties = 0x0000;
1465 
1466 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1467 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1468 }
1469 
1470 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1471 {
1472 	DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1473 		    HCI_MAX_PER_AD_LENGTH);
1474 	u8 len;
1475 	struct adv_info *adv = NULL;
1476 
1477 	if (instance) {
1478 		adv = hci_find_adv_instance(hdev, instance);
1479 		if (!adv || !adv->periodic)
1480 			return 0;
1481 	}
1482 
1483 	len = eir_create_per_adv_data(hdev, instance, pdu->data);
1484 
1485 	pdu->length = len;
1486 	pdu->handle = adv ? adv->handle : instance;
1487 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1488 
1489 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1490 				     struct_size(pdu, data, len), pdu,
1491 				     HCI_CMD_TIMEOUT);
1492 }
1493 
1494 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1495 {
1496 	struct hci_cp_le_set_per_adv_enable cp;
1497 	struct adv_info *adv = NULL;
1498 
1499 	/* If periodic advertising already enabled there is nothing to do. */
1500 	adv = hci_find_adv_instance(hdev, instance);
1501 	if (adv && adv->periodic && adv->enabled)
1502 		return 0;
1503 
1504 	memset(&cp, 0, sizeof(cp));
1505 
1506 	cp.enable = 0x01;
1507 	cp.handle = instance;
1508 
1509 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1510 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1511 }
1512 
1513 /* Checks if periodic advertising data contains a Basic Announcement and if it
1514  * does generates a Broadcast ID and add Broadcast Announcement.
1515  */
1516 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1517 {
1518 	u8 bid[3];
1519 	u8 ad[4 + 3];
1520 
1521 	/* Skip if NULL adv as instance 0x00 is used for general purpose
1522 	 * advertising so it cannot used for the likes of Broadcast Announcement
1523 	 * as it can be overwritten at any point.
1524 	 */
1525 	if (!adv)
1526 		return 0;
1527 
1528 	/* Check if PA data doesn't contains a Basic Audio Announcement then
1529 	 * there is nothing to do.
1530 	 */
1531 	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1532 				  0x1851, NULL))
1533 		return 0;
1534 
1535 	/* Check if advertising data already has a Broadcast Announcement since
1536 	 * the process may want to control the Broadcast ID directly and in that
1537 	 * case the kernel shall no interfere.
1538 	 */
1539 	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1540 				 NULL))
1541 		return 0;
1542 
1543 	/* Generate Broadcast ID */
1544 	get_random_bytes(bid, sizeof(bid));
1545 	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1546 	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1547 
1548 	return hci_update_adv_data_sync(hdev, adv->instance);
1549 }
1550 
1551 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1552 			   u8 *data, u32 flags, u16 min_interval,
1553 			   u16 max_interval, u16 sync_interval)
1554 {
1555 	struct adv_info *adv = NULL;
1556 	int err;
1557 	bool added = false;
1558 
1559 	hci_disable_per_advertising_sync(hdev, instance);
1560 
1561 	if (instance) {
1562 		adv = hci_find_adv_instance(hdev, instance);
1563 		/* Create an instance if that could not be found */
1564 		if (!adv) {
1565 			adv = hci_add_per_instance(hdev, instance, flags,
1566 						   data_len, data,
1567 						   sync_interval,
1568 						   sync_interval);
1569 			if (IS_ERR(adv))
1570 				return PTR_ERR(adv);
1571 			adv->pending = false;
1572 			added = true;
1573 		}
1574 	}
1575 
1576 	/* Start advertising */
1577 	err = hci_start_ext_adv_sync(hdev, instance);
1578 	if (err < 0)
1579 		goto fail;
1580 
1581 	err = hci_adv_bcast_annoucement(hdev, adv);
1582 	if (err < 0)
1583 		goto fail;
1584 
1585 	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1586 					  max_interval);
1587 	if (err < 0)
1588 		goto fail;
1589 
1590 	err = hci_set_per_adv_data_sync(hdev, instance);
1591 	if (err < 0)
1592 		goto fail;
1593 
1594 	err = hci_enable_per_advertising_sync(hdev, instance);
1595 	if (err < 0)
1596 		goto fail;
1597 
1598 	return 0;
1599 
1600 fail:
1601 	if (added)
1602 		hci_remove_adv_instance(hdev, instance);
1603 
1604 	return err;
1605 }
1606 
1607 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1608 {
1609 	int err;
1610 
1611 	if (ext_adv_capable(hdev))
1612 		return hci_start_ext_adv_sync(hdev, instance);
1613 
1614 	err = hci_update_adv_data_sync(hdev, instance);
1615 	if (err)
1616 		return err;
1617 
1618 	err = hci_update_scan_rsp_data_sync(hdev, instance);
1619 	if (err)
1620 		return err;
1621 
1622 	return hci_enable_advertising_sync(hdev);
1623 }
1624 
1625 int hci_enable_advertising_sync(struct hci_dev *hdev)
1626 {
1627 	struct adv_info *adv_instance;
1628 	struct hci_cp_le_set_adv_param cp;
1629 	u8 own_addr_type, enable = 0x01;
1630 	bool connectable;
1631 	u16 adv_min_interval, adv_max_interval;
1632 	u32 flags;
1633 	u8 status;
1634 
1635 	if (ext_adv_capable(hdev))
1636 		return hci_enable_ext_advertising_sync(hdev,
1637 						       hdev->cur_adv_instance);
1638 
1639 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1640 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1641 
1642 	/* If the "connectable" instance flag was not set, then choose between
1643 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1644 	 */
1645 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1646 		      mgmt_get_connectable(hdev);
1647 
1648 	if (!is_advertising_allowed(hdev, connectable))
1649 		return -EINVAL;
1650 
1651 	status = hci_disable_advertising_sync(hdev);
1652 	if (status)
1653 		return status;
1654 
1655 	/* Clear the HCI_LE_ADV bit temporarily so that the
1656 	 * hci_update_random_address knows that it's safe to go ahead
1657 	 * and write a new random address. The flag will be set back on
1658 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1659 	 */
1660 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1661 
1662 	/* Set require_privacy to true only when non-connectable
1663 	 * advertising is used. In that case it is fine to use a
1664 	 * non-resolvable private address.
1665 	 */
1666 	status = hci_update_random_address_sync(hdev, !connectable,
1667 						adv_use_rpa(hdev, flags),
1668 						&own_addr_type);
1669 	if (status)
1670 		return status;
1671 
1672 	memset(&cp, 0, sizeof(cp));
1673 
1674 	if (adv_instance) {
1675 		adv_min_interval = adv_instance->min_interval;
1676 		adv_max_interval = adv_instance->max_interval;
1677 	} else {
1678 		adv_min_interval = hdev->le_adv_min_interval;
1679 		adv_max_interval = hdev->le_adv_max_interval;
1680 	}
1681 
1682 	if (connectable) {
1683 		cp.type = LE_ADV_IND;
1684 	} else {
1685 		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1686 			cp.type = LE_ADV_SCAN_IND;
1687 		else
1688 			cp.type = LE_ADV_NONCONN_IND;
1689 
1690 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1691 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1692 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1693 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1694 		}
1695 	}
1696 
1697 	cp.min_interval = cpu_to_le16(adv_min_interval);
1698 	cp.max_interval = cpu_to_le16(adv_max_interval);
1699 	cp.own_address_type = own_addr_type;
1700 	cp.channel_map = hdev->le_adv_channel_map;
1701 
1702 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1703 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1704 	if (status)
1705 		return status;
1706 
1707 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1708 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1709 }
1710 
1711 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1712 {
1713 	return hci_enable_advertising_sync(hdev);
1714 }
1715 
1716 int hci_enable_advertising(struct hci_dev *hdev)
1717 {
1718 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1719 	    list_empty(&hdev->adv_instances))
1720 		return 0;
1721 
1722 	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1723 }
1724 
1725 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1726 				     struct sock *sk)
1727 {
1728 	int err;
1729 
1730 	if (!ext_adv_capable(hdev))
1731 		return 0;
1732 
1733 	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1734 	if (err)
1735 		return err;
1736 
1737 	/* If request specifies an instance that doesn't exist, fail */
1738 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1739 		return -EINVAL;
1740 
1741 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1742 					sizeof(instance), &instance, 0,
1743 					HCI_CMD_TIMEOUT, sk);
1744 }
1745 
1746 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1747 {
1748 	struct adv_info *adv = data;
1749 	u8 instance = 0;
1750 
1751 	if (adv)
1752 		instance = adv->instance;
1753 
1754 	return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1755 }
1756 
1757 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1758 {
1759 	struct adv_info *adv = NULL;
1760 
1761 	if (instance) {
1762 		adv = hci_find_adv_instance(hdev, instance);
1763 		if (!adv)
1764 			return -EINVAL;
1765 	}
1766 
1767 	return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1768 }
1769 
1770 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1771 {
1772 	struct hci_cp_le_term_big cp;
1773 
1774 	memset(&cp, 0, sizeof(cp));
1775 	cp.handle = handle;
1776 	cp.reason = reason;
1777 
1778 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1779 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1780 }
1781 
1782 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1783 {
1784 	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1785 		    HCI_MAX_EXT_AD_LENGTH);
1786 	u8 len;
1787 	struct adv_info *adv = NULL;
1788 	int err;
1789 
1790 	if (instance) {
1791 		adv = hci_find_adv_instance(hdev, instance);
1792 		if (!adv || !adv->adv_data_changed)
1793 			return 0;
1794 	}
1795 
1796 	len = eir_create_adv_data(hdev, instance, pdu->data);
1797 
1798 	pdu->length = len;
1799 	pdu->handle = adv ? adv->handle : instance;
1800 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1801 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1802 
1803 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1804 				    struct_size(pdu, data, len), pdu,
1805 				    HCI_CMD_TIMEOUT);
1806 	if (err)
1807 		return err;
1808 
1809 	/* Update data if the command succeed */
1810 	if (adv) {
1811 		adv->adv_data_changed = false;
1812 	} else {
1813 		memcpy(hdev->adv_data, pdu->data, len);
1814 		hdev->adv_data_len = len;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
1820 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1821 {
1822 	struct hci_cp_le_set_adv_data cp;
1823 	u8 len;
1824 
1825 	memset(&cp, 0, sizeof(cp));
1826 
1827 	len = eir_create_adv_data(hdev, instance, cp.data);
1828 
1829 	/* There's nothing to do if the data hasn't changed */
1830 	if (hdev->adv_data_len == len &&
1831 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1832 		return 0;
1833 
1834 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1835 	hdev->adv_data_len = len;
1836 
1837 	cp.length = len;
1838 
1839 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1840 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1841 }
1842 
1843 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1844 {
1845 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1846 		return 0;
1847 
1848 	if (ext_adv_capable(hdev))
1849 		return hci_set_ext_adv_data_sync(hdev, instance);
1850 
1851 	return hci_set_adv_data_sync(hdev, instance);
1852 }
1853 
1854 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1855 				   bool force)
1856 {
1857 	struct adv_info *adv = NULL;
1858 	u16 timeout;
1859 
1860 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1861 		return -EPERM;
1862 
1863 	if (hdev->adv_instance_timeout)
1864 		return -EBUSY;
1865 
1866 	adv = hci_find_adv_instance(hdev, instance);
1867 	if (!adv)
1868 		return -ENOENT;
1869 
1870 	/* A zero timeout means unlimited advertising. As long as there is
1871 	 * only one instance, duration should be ignored. We still set a timeout
1872 	 * in case further instances are being added later on.
1873 	 *
1874 	 * If the remaining lifetime of the instance is more than the duration
1875 	 * then the timeout corresponds to the duration, otherwise it will be
1876 	 * reduced to the remaining instance lifetime.
1877 	 */
1878 	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1879 		timeout = adv->duration;
1880 	else
1881 		timeout = adv->remaining_time;
1882 
1883 	/* The remaining time is being reduced unless the instance is being
1884 	 * advertised without time limit.
1885 	 */
1886 	if (adv->timeout)
1887 		adv->remaining_time = adv->remaining_time - timeout;
1888 
1889 	/* Only use work for scheduling instances with legacy advertising */
1890 	if (!ext_adv_capable(hdev)) {
1891 		hdev->adv_instance_timeout = timeout;
1892 		queue_delayed_work(hdev->req_workqueue,
1893 				   &hdev->adv_instance_expire,
1894 				   msecs_to_jiffies(timeout * 1000));
1895 	}
1896 
1897 	/* If we're just re-scheduling the same instance again then do not
1898 	 * execute any HCI commands. This happens when a single instance is
1899 	 * being advertised.
1900 	 */
1901 	if (!force && hdev->cur_adv_instance == instance &&
1902 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1903 		return 0;
1904 
1905 	hdev->cur_adv_instance = instance;
1906 
1907 	return hci_start_adv_sync(hdev, instance);
1908 }
1909 
1910 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1911 {
1912 	int err;
1913 
1914 	if (!ext_adv_capable(hdev))
1915 		return 0;
1916 
1917 	/* Disable instance 0x00 to disable all instances */
1918 	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1919 	if (err)
1920 		return err;
1921 
1922 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1923 					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1924 }
1925 
1926 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1927 {
1928 	struct adv_info *adv, *n;
1929 	int err = 0;
1930 
1931 	if (ext_adv_capable(hdev))
1932 		/* Remove all existing sets */
1933 		err = hci_clear_adv_sets_sync(hdev, sk);
1934 	if (ext_adv_capable(hdev))
1935 		return err;
1936 
1937 	/* This is safe as long as there is no command send while the lock is
1938 	 * held.
1939 	 */
1940 	hci_dev_lock(hdev);
1941 
1942 	/* Cleanup non-ext instances */
1943 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1944 		u8 instance = adv->instance;
1945 		int err;
1946 
1947 		if (!(force || adv->timeout))
1948 			continue;
1949 
1950 		err = hci_remove_adv_instance(hdev, instance);
1951 		if (!err)
1952 			mgmt_advertising_removed(sk, hdev, instance);
1953 	}
1954 
1955 	hci_dev_unlock(hdev);
1956 
1957 	return 0;
1958 }
1959 
1960 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1961 			       struct sock *sk)
1962 {
1963 	int err = 0;
1964 
1965 	/* If we use extended advertising, instance has to be removed first. */
1966 	if (ext_adv_capable(hdev))
1967 		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1968 	if (ext_adv_capable(hdev))
1969 		return err;
1970 
1971 	/* This is safe as long as there is no command send while the lock is
1972 	 * held.
1973 	 */
1974 	hci_dev_lock(hdev);
1975 
1976 	err = hci_remove_adv_instance(hdev, instance);
1977 	if (!err)
1978 		mgmt_advertising_removed(sk, hdev, instance);
1979 
1980 	hci_dev_unlock(hdev);
1981 
1982 	return err;
1983 }
1984 
1985 /* For a single instance:
1986  * - force == true: The instance will be removed even when its remaining
1987  *   lifetime is not zero.
1988  * - force == false: the instance will be deactivated but kept stored unless
1989  *   the remaining lifetime is zero.
1990  *
1991  * For instance == 0x00:
1992  * - force == true: All instances will be removed regardless of their timeout
1993  *   setting.
1994  * - force == false: Only instances that have a timeout will be removed.
1995  */
1996 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
1997 				u8 instance, bool force)
1998 {
1999 	struct adv_info *next = NULL;
2000 	int err;
2001 
2002 	/* Cancel any timeout concerning the removed instance(s). */
2003 	if (!instance || hdev->cur_adv_instance == instance)
2004 		cancel_adv_timeout(hdev);
2005 
2006 	/* Get the next instance to advertise BEFORE we remove
2007 	 * the current one. This can be the same instance again
2008 	 * if there is only one instance.
2009 	 */
2010 	if (hdev->cur_adv_instance == instance)
2011 		next = hci_get_next_instance(hdev, instance);
2012 
2013 	if (!instance) {
2014 		err = hci_clear_adv_sync(hdev, sk, force);
2015 		if (err)
2016 			return err;
2017 	} else {
2018 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2019 
2020 		if (force || (adv && adv->timeout && !adv->remaining_time)) {
2021 			/* Don't advertise a removed instance. */
2022 			if (next && next->instance == instance)
2023 				next = NULL;
2024 
2025 			err = hci_remove_adv_sync(hdev, instance, sk);
2026 			if (err)
2027 				return err;
2028 		}
2029 	}
2030 
2031 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2032 		return 0;
2033 
2034 	if (next && !ext_adv_capable(hdev))
2035 		hci_schedule_adv_instance_sync(hdev, next->instance, false);
2036 
2037 	return 0;
2038 }
2039 
2040 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2041 {
2042 	struct hci_cp_read_rssi cp;
2043 
2044 	cp.handle = handle;
2045 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2046 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2047 }
2048 
2049 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2050 {
2051 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2052 					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2053 }
2054 
2055 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2056 {
2057 	struct hci_cp_read_tx_power cp;
2058 
2059 	cp.handle = handle;
2060 	cp.type = type;
2061 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2062 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2063 }
2064 
2065 int hci_disable_advertising_sync(struct hci_dev *hdev)
2066 {
2067 	u8 enable = 0x00;
2068 	int err = 0;
2069 
2070 	/* If controller is not advertising we are done. */
2071 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2072 		return 0;
2073 
2074 	if (ext_adv_capable(hdev))
2075 		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2076 	if (ext_adv_capable(hdev))
2077 		return err;
2078 
2079 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2080 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2081 }
2082 
2083 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2084 					   u8 filter_dup)
2085 {
2086 	struct hci_cp_le_set_ext_scan_enable cp;
2087 
2088 	memset(&cp, 0, sizeof(cp));
2089 	cp.enable = val;
2090 
2091 	if (hci_dev_test_flag(hdev, HCI_MESH))
2092 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2093 	else
2094 		cp.filter_dup = filter_dup;
2095 
2096 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2097 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2098 }
2099 
2100 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2101 				       u8 filter_dup)
2102 {
2103 	struct hci_cp_le_set_scan_enable cp;
2104 
2105 	if (use_ext_scan(hdev))
2106 		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2107 
2108 	memset(&cp, 0, sizeof(cp));
2109 	cp.enable = val;
2110 
2111 	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2112 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2113 	else
2114 		cp.filter_dup = filter_dup;
2115 
2116 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2117 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2118 }
2119 
2120 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2121 {
2122 	if (!use_ll_privacy(hdev))
2123 		return 0;
2124 
2125 	/* If controller is not/already resolving we are done. */
2126 	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2127 		return 0;
2128 
2129 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2130 				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2131 }
2132 
2133 static int hci_scan_disable_sync(struct hci_dev *hdev)
2134 {
2135 	int err;
2136 
2137 	/* If controller is not scanning we are done. */
2138 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2139 		return 0;
2140 
2141 	if (hdev->scanning_paused) {
2142 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2143 		return 0;
2144 	}
2145 
2146 	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2147 	if (err) {
2148 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2149 		return err;
2150 	}
2151 
2152 	return err;
2153 }
2154 
2155 static bool scan_use_rpa(struct hci_dev *hdev)
2156 {
2157 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2158 }
2159 
2160 static void hci_start_interleave_scan(struct hci_dev *hdev)
2161 {
2162 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2163 	queue_delayed_work(hdev->req_workqueue,
2164 			   &hdev->interleave_scan, 0);
2165 }
2166 
2167 static void cancel_interleave_scan(struct hci_dev *hdev)
2168 {
2169 	bt_dev_dbg(hdev, "cancelling interleave scan");
2170 
2171 	cancel_delayed_work_sync(&hdev->interleave_scan);
2172 
2173 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2174 }
2175 
2176 /* Return true if interleave_scan wasn't started until exiting this function,
2177  * otherwise, return false
2178  */
2179 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2180 {
2181 	/* Do interleaved scan only if all of the following are true:
2182 	 * - There is at least one ADV monitor
2183 	 * - At least one pending LE connection or one device to be scanned for
2184 	 * - Monitor offloading is not supported
2185 	 * If so, we should alternate between allowlist scan and one without
2186 	 * any filters to save power.
2187 	 */
2188 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2189 				!(list_empty(&hdev->pend_le_conns) &&
2190 				  list_empty(&hdev->pend_le_reports)) &&
2191 				hci_get_adv_monitor_offload_ext(hdev) ==
2192 				    HCI_ADV_MONITOR_EXT_NONE;
2193 	bool is_interleaving = is_interleave_scanning(hdev);
2194 
2195 	if (use_interleaving && !is_interleaving) {
2196 		hci_start_interleave_scan(hdev);
2197 		bt_dev_dbg(hdev, "starting interleave scan");
2198 		return true;
2199 	}
2200 
2201 	if (!use_interleaving && is_interleaving)
2202 		cancel_interleave_scan(hdev);
2203 
2204 	return false;
2205 }
2206 
2207 /* Removes connection to resolve list if needed.*/
2208 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2209 					bdaddr_t *bdaddr, u8 bdaddr_type)
2210 {
2211 	struct hci_cp_le_del_from_resolv_list cp;
2212 	struct bdaddr_list_with_irk *entry;
2213 
2214 	if (!use_ll_privacy(hdev))
2215 		return 0;
2216 
2217 	/* Check if the IRK has been programmed */
2218 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2219 						bdaddr_type);
2220 	if (!entry)
2221 		return 0;
2222 
2223 	cp.bdaddr_type = bdaddr_type;
2224 	bacpy(&cp.bdaddr, bdaddr);
2225 
2226 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2227 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2228 }
2229 
2230 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2231 				       bdaddr_t *bdaddr, u8 bdaddr_type)
2232 {
2233 	struct hci_cp_le_del_from_accept_list cp;
2234 	int err;
2235 
2236 	/* Check if device is on accept list before removing it */
2237 	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2238 		return 0;
2239 
2240 	cp.bdaddr_type = bdaddr_type;
2241 	bacpy(&cp.bdaddr, bdaddr);
2242 
2243 	/* Ignore errors when removing from resolving list as that is likely
2244 	 * that the device was never added.
2245 	 */
2246 	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2247 
2248 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2249 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2250 	if (err) {
2251 		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2252 		return err;
2253 	}
2254 
2255 	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2256 		   cp.bdaddr_type);
2257 
2258 	return 0;
2259 }
2260 
2261 struct conn_params {
2262 	bdaddr_t addr;
2263 	u8 addr_type;
2264 	hci_conn_flags_t flags;
2265 	u8 privacy_mode;
2266 };
2267 
2268 /* Adds connection to resolve list if needed.
2269  * Setting params to NULL programs local hdev->irk
2270  */
2271 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2272 					struct conn_params *params)
2273 {
2274 	struct hci_cp_le_add_to_resolv_list cp;
2275 	struct smp_irk *irk;
2276 	struct bdaddr_list_with_irk *entry;
2277 	struct hci_conn_params *p;
2278 
2279 	if (!use_ll_privacy(hdev))
2280 		return 0;
2281 
2282 	/* Attempt to program local identity address, type and irk if params is
2283 	 * NULL.
2284 	 */
2285 	if (!params) {
2286 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2287 			return 0;
2288 
2289 		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2290 		memcpy(cp.peer_irk, hdev->irk, 16);
2291 		goto done;
2292 	}
2293 
2294 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2295 	if (!irk)
2296 		return 0;
2297 
2298 	/* Check if the IK has _not_ been programmed yet. */
2299 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2300 						&params->addr,
2301 						params->addr_type);
2302 	if (entry)
2303 		return 0;
2304 
2305 	cp.bdaddr_type = params->addr_type;
2306 	bacpy(&cp.bdaddr, &params->addr);
2307 	memcpy(cp.peer_irk, irk->val, 16);
2308 
2309 	/* Default privacy mode is always Network */
2310 	params->privacy_mode = HCI_NETWORK_PRIVACY;
2311 
2312 	rcu_read_lock();
2313 	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2314 				      &params->addr, params->addr_type);
2315 	if (!p)
2316 		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2317 					      &params->addr, params->addr_type);
2318 	if (p)
2319 		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2320 	rcu_read_unlock();
2321 
2322 done:
2323 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2324 		memcpy(cp.local_irk, hdev->irk, 16);
2325 	else
2326 		memset(cp.local_irk, 0, 16);
2327 
2328 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2329 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2330 }
2331 
2332 /* Set Device Privacy Mode. */
2333 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2334 					struct conn_params *params)
2335 {
2336 	struct hci_cp_le_set_privacy_mode cp;
2337 	struct smp_irk *irk;
2338 
2339 	/* If device privacy mode has already been set there is nothing to do */
2340 	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2341 		return 0;
2342 
2343 	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2344 	 * indicates that LL Privacy has been enabled and
2345 	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2346 	 */
2347 	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2348 		return 0;
2349 
2350 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2351 	if (!irk)
2352 		return 0;
2353 
2354 	memset(&cp, 0, sizeof(cp));
2355 	cp.bdaddr_type = irk->addr_type;
2356 	bacpy(&cp.bdaddr, &irk->bdaddr);
2357 	cp.mode = HCI_DEVICE_PRIVACY;
2358 
2359 	/* Note: params->privacy_mode is not updated since it is a copy */
2360 
2361 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2362 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2363 }
2364 
2365 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2366  * this attempts to program the device in the resolving list as well and
2367  * properly set the privacy mode.
2368  */
2369 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2370 				       struct conn_params *params,
2371 				       u8 *num_entries)
2372 {
2373 	struct hci_cp_le_add_to_accept_list cp;
2374 	int err;
2375 
2376 	/* During suspend, only wakeable devices can be in acceptlist */
2377 	if (hdev->suspended &&
2378 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2379 		hci_le_del_accept_list_sync(hdev, &params->addr,
2380 					    params->addr_type);
2381 		return 0;
2382 	}
2383 
2384 	/* Select filter policy to accept all advertising */
2385 	if (*num_entries >= hdev->le_accept_list_size)
2386 		return -ENOSPC;
2387 
2388 	/* Accept list can not be used with RPAs */
2389 	if (!use_ll_privacy(hdev) &&
2390 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2391 		return -EINVAL;
2392 
2393 	/* Attempt to program the device in the resolving list first to avoid
2394 	 * having to rollback in case it fails since the resolving list is
2395 	 * dynamic it can probably be smaller than the accept list.
2396 	 */
2397 	err = hci_le_add_resolve_list_sync(hdev, params);
2398 	if (err) {
2399 		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2400 		return err;
2401 	}
2402 
2403 	/* Set Privacy Mode */
2404 	err = hci_le_set_privacy_mode_sync(hdev, params);
2405 	if (err) {
2406 		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2407 		return err;
2408 	}
2409 
2410 	/* Check if already in accept list */
2411 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2412 				   params->addr_type))
2413 		return 0;
2414 
2415 	*num_entries += 1;
2416 	cp.bdaddr_type = params->addr_type;
2417 	bacpy(&cp.bdaddr, &params->addr);
2418 
2419 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2420 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2421 	if (err) {
2422 		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2423 		/* Rollback the device from the resolving list */
2424 		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2425 		return err;
2426 	}
2427 
2428 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2429 		   cp.bdaddr_type);
2430 
2431 	return 0;
2432 }
2433 
2434 /* This function disables/pause all advertising instances */
2435 static int hci_pause_advertising_sync(struct hci_dev *hdev)
2436 {
2437 	int err;
2438 	int old_state;
2439 
2440 	/* If already been paused there is nothing to do. */
2441 	if (hdev->advertising_paused)
2442 		return 0;
2443 
2444 	bt_dev_dbg(hdev, "Pausing directed advertising");
2445 
2446 	/* Stop directed advertising */
2447 	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2448 	if (old_state) {
2449 		/* When discoverable timeout triggers, then just make sure
2450 		 * the limited discoverable flag is cleared. Even in the case
2451 		 * of a timeout triggered from general discoverable, it is
2452 		 * safe to unconditionally clear the flag.
2453 		 */
2454 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2455 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2456 		hdev->discov_timeout = 0;
2457 	}
2458 
2459 	bt_dev_dbg(hdev, "Pausing advertising instances");
2460 
2461 	/* Call to disable any advertisements active on the controller.
2462 	 * This will succeed even if no advertisements are configured.
2463 	 */
2464 	err = hci_disable_advertising_sync(hdev);
2465 	if (err)
2466 		return err;
2467 
2468 	/* If we are using software rotation, pause the loop */
2469 	if (!ext_adv_capable(hdev))
2470 		cancel_adv_timeout(hdev);
2471 
2472 	hdev->advertising_paused = true;
2473 	hdev->advertising_old_state = old_state;
2474 
2475 	return 0;
2476 }
2477 
2478 /* This function enables all user advertising instances */
2479 static int hci_resume_advertising_sync(struct hci_dev *hdev)
2480 {
2481 	struct adv_info *adv, *tmp;
2482 	int err;
2483 
2484 	/* If advertising has not been paused there is nothing  to do. */
2485 	if (!hdev->advertising_paused)
2486 		return 0;
2487 
2488 	/* Resume directed advertising */
2489 	hdev->advertising_paused = false;
2490 	if (hdev->advertising_old_state) {
2491 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2492 		hdev->advertising_old_state = 0;
2493 	}
2494 
2495 	bt_dev_dbg(hdev, "Resuming advertising instances");
2496 
2497 	if (ext_adv_capable(hdev)) {
2498 		/* Call for each tracked instance to be re-enabled */
2499 		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2500 			err = hci_enable_ext_advertising_sync(hdev,
2501 							      adv->instance);
2502 			if (!err)
2503 				continue;
2504 
2505 			/* If the instance cannot be resumed remove it */
2506 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2507 							 NULL);
2508 		}
2509 	} else {
2510 		/* Schedule for most recent instance to be restarted and begin
2511 		 * the software rotation loop
2512 		 */
2513 		err = hci_schedule_adv_instance_sync(hdev,
2514 						     hdev->cur_adv_instance,
2515 						     true);
2516 	}
2517 
2518 	hdev->advertising_paused = false;
2519 
2520 	return err;
2521 }
2522 
2523 static int hci_pause_addr_resolution(struct hci_dev *hdev)
2524 {
2525 	int err;
2526 
2527 	if (!use_ll_privacy(hdev))
2528 		return 0;
2529 
2530 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2531 		return 0;
2532 
2533 	/* Cannot disable addr resolution if scanning is enabled or
2534 	 * when initiating an LE connection.
2535 	 */
2536 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2537 	    hci_lookup_le_connect(hdev)) {
2538 		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2539 		return -EPERM;
2540 	}
2541 
2542 	/* Cannot disable addr resolution if advertising is enabled. */
2543 	err = hci_pause_advertising_sync(hdev);
2544 	if (err) {
2545 		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2546 		return err;
2547 	}
2548 
2549 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2550 	if (err)
2551 		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2552 			   err);
2553 
2554 	/* Return if address resolution is disabled and RPA is not used. */
2555 	if (!err && scan_use_rpa(hdev))
2556 		return 0;
2557 
2558 	hci_resume_advertising_sync(hdev);
2559 	return err;
2560 }
2561 
2562 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2563 					     bool extended, struct sock *sk)
2564 {
2565 	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2566 					HCI_OP_READ_LOCAL_OOB_DATA;
2567 
2568 	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2569 }
2570 
2571 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2572 {
2573 	struct hci_conn_params *params;
2574 	struct conn_params *p;
2575 	size_t i;
2576 
2577 	rcu_read_lock();
2578 
2579 	i = 0;
2580 	list_for_each_entry_rcu(params, list, action)
2581 		++i;
2582 	*n = i;
2583 
2584 	rcu_read_unlock();
2585 
2586 	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2587 	if (!p)
2588 		return NULL;
2589 
2590 	rcu_read_lock();
2591 
2592 	i = 0;
2593 	list_for_each_entry_rcu(params, list, action) {
2594 		/* Racing adds are handled in next scan update */
2595 		if (i >= *n)
2596 			break;
2597 
2598 		/* No hdev->lock, but: addr, addr_type are immutable.
2599 		 * privacy_mode is only written by us or in
2600 		 * hci_cc_le_set_privacy_mode that we wait for.
2601 		 * We should be idempotent so MGMT updating flags
2602 		 * while we are processing is OK.
2603 		 */
2604 		bacpy(&p[i].addr, &params->addr);
2605 		p[i].addr_type = params->addr_type;
2606 		p[i].flags = READ_ONCE(params->flags);
2607 		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2608 		++i;
2609 	}
2610 
2611 	rcu_read_unlock();
2612 
2613 	*n = i;
2614 	return p;
2615 }
2616 
2617 /* Clear LE Accept List */
2618 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2619 {
2620 	if (!(hdev->commands[26] & 0x80))
2621 		return 0;
2622 
2623 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2624 				     HCI_CMD_TIMEOUT);
2625 }
2626 
2627 /* Device must not be scanning when updating the accept list.
2628  *
2629  * Update is done using the following sequence:
2630  *
2631  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2632  * Remove Devices From Accept List ->
2633  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2634  * Add Devices to Accept List ->
2635  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2636  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2637  * Enable Scanning
2638  *
2639  * In case of failure advertising shall be restored to its original state and
2640  * return would disable accept list since either accept or resolving list could
2641  * not be programmed.
2642  *
2643  */
2644 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2645 {
2646 	struct conn_params *params;
2647 	struct bdaddr_list *b, *t;
2648 	u8 num_entries = 0;
2649 	bool pend_conn, pend_report;
2650 	u8 filter_policy;
2651 	size_t i, n;
2652 	int err;
2653 
2654 	/* Pause advertising if resolving list can be used as controllers
2655 	 * cannot accept resolving list modifications while advertising.
2656 	 */
2657 	if (use_ll_privacy(hdev)) {
2658 		err = hci_pause_advertising_sync(hdev);
2659 		if (err) {
2660 			bt_dev_err(hdev, "pause advertising failed: %d", err);
2661 			return 0x00;
2662 		}
2663 	}
2664 
2665 	/* Disable address resolution while reprogramming accept list since
2666 	 * devices that do have an IRK will be programmed in the resolving list
2667 	 * when LL Privacy is enabled.
2668 	 */
2669 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2670 	if (err) {
2671 		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2672 		goto done;
2673 	}
2674 
2675 	/* Force address filtering if PA Sync is in progress */
2676 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2677 		struct hci_cp_le_pa_create_sync *sent;
2678 
2679 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
2680 		if (sent) {
2681 			struct conn_params pa;
2682 
2683 			memset(&pa, 0, sizeof(pa));
2684 
2685 			bacpy(&pa.addr, &sent->addr);
2686 			pa.addr_type = sent->addr_type;
2687 
2688 			/* Clear first since there could be addresses left
2689 			 * behind.
2690 			 */
2691 			hci_le_clear_accept_list_sync(hdev);
2692 
2693 			num_entries = 1;
2694 			err = hci_le_add_accept_list_sync(hdev, &pa,
2695 							  &num_entries);
2696 			goto done;
2697 		}
2698 	}
2699 
2700 	/* Go through the current accept list programmed into the
2701 	 * controller one by one and check if that address is connected or is
2702 	 * still in the list of pending connections or list of devices to
2703 	 * report. If not present in either list, then remove it from
2704 	 * the controller.
2705 	 */
2706 	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2707 		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2708 			continue;
2709 
2710 		/* Pointers not dereferenced, no locks needed */
2711 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2712 						      &b->bdaddr,
2713 						      b->bdaddr_type);
2714 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2715 							&b->bdaddr,
2716 							b->bdaddr_type);
2717 
2718 		/* If the device is not likely to connect or report,
2719 		 * remove it from the acceptlist.
2720 		 */
2721 		if (!pend_conn && !pend_report) {
2722 			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2723 						    b->bdaddr_type);
2724 			continue;
2725 		}
2726 
2727 		num_entries++;
2728 	}
2729 
2730 	/* Since all no longer valid accept list entries have been
2731 	 * removed, walk through the list of pending connections
2732 	 * and ensure that any new device gets programmed into
2733 	 * the controller.
2734 	 *
2735 	 * If the list of the devices is larger than the list of
2736 	 * available accept list entries in the controller, then
2737 	 * just abort and return filer policy value to not use the
2738 	 * accept list.
2739 	 *
2740 	 * The list and params may be mutated while we wait for events,
2741 	 * so make a copy and iterate it.
2742 	 */
2743 
2744 	params = conn_params_copy(&hdev->pend_le_conns, &n);
2745 	if (!params) {
2746 		err = -ENOMEM;
2747 		goto done;
2748 	}
2749 
2750 	for (i = 0; i < n; ++i) {
2751 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2752 						  &num_entries);
2753 		if (err) {
2754 			kvfree(params);
2755 			goto done;
2756 		}
2757 	}
2758 
2759 	kvfree(params);
2760 
2761 	/* After adding all new pending connections, walk through
2762 	 * the list of pending reports and also add these to the
2763 	 * accept list if there is still space. Abort if space runs out.
2764 	 */
2765 
2766 	params = conn_params_copy(&hdev->pend_le_reports, &n);
2767 	if (!params) {
2768 		err = -ENOMEM;
2769 		goto done;
2770 	}
2771 
2772 	for (i = 0; i < n; ++i) {
2773 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2774 						  &num_entries);
2775 		if (err) {
2776 			kvfree(params);
2777 			goto done;
2778 		}
2779 	}
2780 
2781 	kvfree(params);
2782 
2783 	/* Use the allowlist unless the following conditions are all true:
2784 	 * - We are not currently suspending
2785 	 * - There are 1 or more ADV monitors registered and it's not offloaded
2786 	 * - Interleaved scanning is not currently using the allowlist
2787 	 */
2788 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2789 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2790 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2791 		err = -EINVAL;
2792 
2793 done:
2794 	filter_policy = err ? 0x00 : 0x01;
2795 
2796 	/* Enable address resolution when LL Privacy is enabled. */
2797 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2798 	if (err)
2799 		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2800 
2801 	/* Resume advertising if it was paused */
2802 	if (use_ll_privacy(hdev))
2803 		hci_resume_advertising_sync(hdev);
2804 
2805 	/* Select filter policy to use accept list */
2806 	return filter_policy;
2807 }
2808 
2809 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2810 				   u8 type, u16 interval, u16 window)
2811 {
2812 	cp->type = type;
2813 	cp->interval = cpu_to_le16(interval);
2814 	cp->window = cpu_to_le16(window);
2815 }
2816 
2817 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2818 					  u16 interval, u16 window,
2819 					  u8 own_addr_type, u8 filter_policy)
2820 {
2821 	struct hci_cp_le_set_ext_scan_params *cp;
2822 	struct hci_cp_le_scan_phy_params *phy;
2823 	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2824 	u8 num_phy = 0x00;
2825 
2826 	cp = (void *)data;
2827 	phy = (void *)cp->data;
2828 
2829 	memset(data, 0, sizeof(data));
2830 
2831 	cp->own_addr_type = own_addr_type;
2832 	cp->filter_policy = filter_policy;
2833 
2834 	/* Check if PA Sync is in progress then select the PHY based on the
2835 	 * hci_conn.iso_qos.
2836 	 */
2837 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2838 		struct hci_cp_le_add_to_accept_list *sent;
2839 
2840 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2841 		if (sent) {
2842 			struct hci_conn *conn;
2843 
2844 			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2845 						       &sent->bdaddr);
2846 			if (conn) {
2847 				struct bt_iso_qos *qos = &conn->iso_qos;
2848 
2849 				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2850 				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2851 					cp->scanning_phys |= LE_SCAN_PHY_1M;
2852 					hci_le_scan_phy_params(phy, type,
2853 							       interval,
2854 							       window);
2855 					num_phy++;
2856 					phy++;
2857 				}
2858 
2859 				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2860 					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2861 					hci_le_scan_phy_params(phy, type,
2862 							       interval * 3,
2863 							       window * 3);
2864 					num_phy++;
2865 					phy++;
2866 				}
2867 
2868 				if (num_phy)
2869 					goto done;
2870 			}
2871 		}
2872 	}
2873 
2874 	if (scan_1m(hdev) || scan_2m(hdev)) {
2875 		cp->scanning_phys |= LE_SCAN_PHY_1M;
2876 		hci_le_scan_phy_params(phy, type, interval, window);
2877 		num_phy++;
2878 		phy++;
2879 	}
2880 
2881 	if (scan_coded(hdev)) {
2882 		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2883 		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2884 		num_phy++;
2885 		phy++;
2886 	}
2887 
2888 done:
2889 	if (!num_phy)
2890 		return -EINVAL;
2891 
2892 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2893 				     sizeof(*cp) + sizeof(*phy) * num_phy,
2894 				     data, HCI_CMD_TIMEOUT);
2895 }
2896 
2897 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2898 				      u16 interval, u16 window,
2899 				      u8 own_addr_type, u8 filter_policy)
2900 {
2901 	struct hci_cp_le_set_scan_param cp;
2902 
2903 	if (use_ext_scan(hdev))
2904 		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2905 						      window, own_addr_type,
2906 						      filter_policy);
2907 
2908 	memset(&cp, 0, sizeof(cp));
2909 	cp.type = type;
2910 	cp.interval = cpu_to_le16(interval);
2911 	cp.window = cpu_to_le16(window);
2912 	cp.own_address_type = own_addr_type;
2913 	cp.filter_policy = filter_policy;
2914 
2915 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2916 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2917 }
2918 
2919 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2920 			       u16 window, u8 own_addr_type, u8 filter_policy,
2921 			       u8 filter_dup)
2922 {
2923 	int err;
2924 
2925 	if (hdev->scanning_paused) {
2926 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2927 		return 0;
2928 	}
2929 
2930 	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2931 					 own_addr_type, filter_policy);
2932 	if (err)
2933 		return err;
2934 
2935 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2936 }
2937 
2938 static int hci_passive_scan_sync(struct hci_dev *hdev)
2939 {
2940 	u8 own_addr_type;
2941 	u8 filter_policy;
2942 	u16 window, interval;
2943 	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2944 	int err;
2945 
2946 	if (hdev->scanning_paused) {
2947 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2948 		return 0;
2949 	}
2950 
2951 	err = hci_scan_disable_sync(hdev);
2952 	if (err) {
2953 		bt_dev_err(hdev, "disable scanning failed: %d", err);
2954 		return err;
2955 	}
2956 
2957 	/* Set require_privacy to false since no SCAN_REQ are send
2958 	 * during passive scanning. Not using an non-resolvable address
2959 	 * here is important so that peer devices using direct
2960 	 * advertising with our address will be correctly reported
2961 	 * by the controller.
2962 	 */
2963 	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
2964 					   &own_addr_type))
2965 		return 0;
2966 
2967 	if (hdev->enable_advmon_interleave_scan &&
2968 	    hci_update_interleaved_scan_sync(hdev))
2969 		return 0;
2970 
2971 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
2972 
2973 	/* Adding or removing entries from the accept list must
2974 	 * happen before enabling scanning. The controller does
2975 	 * not allow accept list modification while scanning.
2976 	 */
2977 	filter_policy = hci_update_accept_list_sync(hdev);
2978 
2979 	/* If suspended and filter_policy set to 0x00 (no acceptlist) then
2980 	 * passive scanning cannot be started since that would require the host
2981 	 * to be woken up to process the reports.
2982 	 */
2983 	if (hdev->suspended && !filter_policy) {
2984 		/* Check if accept list is empty then there is no need to scan
2985 		 * while suspended.
2986 		 */
2987 		if (list_empty(&hdev->le_accept_list))
2988 			return 0;
2989 
2990 		/* If there are devices is the accept_list that means some
2991 		 * devices could not be programmed which in non-suspended case
2992 		 * means filter_policy needs to be set to 0x00 so the host needs
2993 		 * to filter, but since this is treating suspended case we
2994 		 * can ignore device needing host to filter to allow devices in
2995 		 * the acceptlist to be able to wakeup the system.
2996 		 */
2997 		filter_policy = 0x01;
2998 	}
2999 
3000 	/* When the controller is using random resolvable addresses and
3001 	 * with that having LE privacy enabled, then controllers with
3002 	 * Extended Scanner Filter Policies support can now enable support
3003 	 * for handling directed advertising.
3004 	 *
3005 	 * So instead of using filter polices 0x00 (no acceptlist)
3006 	 * and 0x01 (acceptlist enabled) use the new filter policies
3007 	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3008 	 */
3009 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3010 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3011 		filter_policy |= 0x02;
3012 
3013 	if (hdev->suspended) {
3014 		window = hdev->le_scan_window_suspend;
3015 		interval = hdev->le_scan_int_suspend;
3016 	} else if (hci_is_le_conn_scanning(hdev)) {
3017 		window = hdev->le_scan_window_connect;
3018 		interval = hdev->le_scan_int_connect;
3019 	} else if (hci_is_adv_monitoring(hdev)) {
3020 		window = hdev->le_scan_window_adv_monitor;
3021 		interval = hdev->le_scan_int_adv_monitor;
3022 
3023 		/* Disable duplicates filter when scanning for advertisement
3024 		 * monitor for the following reasons.
3025 		 *
3026 		 * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm
3027 		 * controllers ignore RSSI_Sampling_Period when the duplicates
3028 		 * filter is enabled.
3029 		 *
3030 		 * For SW pattern filtering, when we're not doing interleaved
3031 		 * scanning, it is necessary to disable duplicates filter,
3032 		 * otherwise hosts can only receive one advertisement and it's
3033 		 * impossible to know if a peer is still in range.
3034 		 */
3035 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3036 	} else {
3037 		window = hdev->le_scan_window;
3038 		interval = hdev->le_scan_interval;
3039 	}
3040 
3041 	/* Disable all filtering for Mesh */
3042 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
3043 		filter_policy = 0;
3044 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3045 	}
3046 
3047 	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3048 
3049 	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3050 				   own_addr_type, filter_policy, filter_dups);
3051 }
3052 
3053 /* This function controls the passive scanning based on hdev->pend_le_conns
3054  * list. If there are pending LE connection we start the background scanning,
3055  * otherwise we stop it in the following sequence:
3056  *
3057  * If there are devices to scan:
3058  *
3059  * Disable Scanning -> Update Accept List ->
3060  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
3061  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3062  * Enable Scanning
3063  *
3064  * Otherwise:
3065  *
3066  * Disable Scanning
3067  */
3068 int hci_update_passive_scan_sync(struct hci_dev *hdev)
3069 {
3070 	int err;
3071 
3072 	if (!test_bit(HCI_UP, &hdev->flags) ||
3073 	    test_bit(HCI_INIT, &hdev->flags) ||
3074 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3075 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3076 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3077 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3078 		return 0;
3079 
3080 	/* No point in doing scanning if LE support hasn't been enabled */
3081 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3082 		return 0;
3083 
3084 	/* If discovery is active don't interfere with it */
3085 	if (hdev->discovery.state != DISCOVERY_STOPPED)
3086 		return 0;
3087 
3088 	/* Reset RSSI and UUID filters when starting background scanning
3089 	 * since these filters are meant for service discovery only.
3090 	 *
3091 	 * The Start Discovery and Start Service Discovery operations
3092 	 * ensure to set proper values for RSSI threshold and UUID
3093 	 * filter list. So it is safe to just reset them here.
3094 	 */
3095 	hci_discovery_filter_clear(hdev);
3096 
3097 	bt_dev_dbg(hdev, "ADV monitoring is %s",
3098 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3099 
3100 	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3101 	    list_empty(&hdev->pend_le_conns) &&
3102 	    list_empty(&hdev->pend_le_reports) &&
3103 	    !hci_is_adv_monitoring(hdev) &&
3104 	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3105 		/* If there is no pending LE connections or devices
3106 		 * to be scanned for or no ADV monitors, we should stop the
3107 		 * background scanning.
3108 		 */
3109 
3110 		bt_dev_dbg(hdev, "stopping background scanning");
3111 
3112 		err = hci_scan_disable_sync(hdev);
3113 		if (err)
3114 			bt_dev_err(hdev, "stop background scanning failed: %d",
3115 				   err);
3116 	} else {
3117 		/* If there is at least one pending LE connection, we should
3118 		 * keep the background scan running.
3119 		 */
3120 
3121 		/* If controller is connecting, we should not start scanning
3122 		 * since some controllers are not able to scan and connect at
3123 		 * the same time.
3124 		 */
3125 		if (hci_lookup_le_connect(hdev))
3126 			return 0;
3127 
3128 		bt_dev_dbg(hdev, "start background scanning");
3129 
3130 		err = hci_passive_scan_sync(hdev);
3131 		if (err)
3132 			bt_dev_err(hdev, "start background scanning failed: %d",
3133 				   err);
3134 	}
3135 
3136 	return err;
3137 }
3138 
3139 static int update_scan_sync(struct hci_dev *hdev, void *data)
3140 {
3141 	return hci_update_scan_sync(hdev);
3142 }
3143 
3144 int hci_update_scan(struct hci_dev *hdev)
3145 {
3146 	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3147 }
3148 
3149 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3150 {
3151 	return hci_update_passive_scan_sync(hdev);
3152 }
3153 
3154 int hci_update_passive_scan(struct hci_dev *hdev)
3155 {
3156 	/* Only queue if it would have any effect */
3157 	if (!test_bit(HCI_UP, &hdev->flags) ||
3158 	    test_bit(HCI_INIT, &hdev->flags) ||
3159 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3160 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3161 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3162 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3163 		return 0;
3164 
3165 	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3166 				       NULL);
3167 }
3168 
3169 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3170 {
3171 	int err;
3172 
3173 	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3174 		return 0;
3175 
3176 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3177 				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3178 
3179 	if (!err) {
3180 		if (val) {
3181 			hdev->features[1][0] |= LMP_HOST_SC;
3182 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3183 		} else {
3184 			hdev->features[1][0] &= ~LMP_HOST_SC;
3185 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3186 		}
3187 	}
3188 
3189 	return err;
3190 }
3191 
3192 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3193 {
3194 	int err;
3195 
3196 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3197 	    lmp_host_ssp_capable(hdev))
3198 		return 0;
3199 
3200 	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3201 		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3202 				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3203 	}
3204 
3205 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3206 				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3207 	if (err)
3208 		return err;
3209 
3210 	return hci_write_sc_support_sync(hdev, 0x01);
3211 }
3212 
3213 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3214 {
3215 	struct hci_cp_write_le_host_supported cp;
3216 
3217 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3218 	    !lmp_bredr_capable(hdev))
3219 		return 0;
3220 
3221 	/* Check first if we already have the right host state
3222 	 * (host features set)
3223 	 */
3224 	if (le == lmp_host_le_capable(hdev) &&
3225 	    simul == lmp_host_le_br_capable(hdev))
3226 		return 0;
3227 
3228 	memset(&cp, 0, sizeof(cp));
3229 
3230 	cp.le = le;
3231 	cp.simul = simul;
3232 
3233 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3234 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3235 }
3236 
3237 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3238 {
3239 	struct adv_info *adv, *tmp;
3240 	int err;
3241 
3242 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3243 		return 0;
3244 
3245 	/* If RPA Resolution has not been enable yet it means the
3246 	 * resolving list is empty and we should attempt to program the
3247 	 * local IRK in order to support using own_addr_type
3248 	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3249 	 */
3250 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3251 		hci_le_add_resolve_list_sync(hdev, NULL);
3252 		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3253 	}
3254 
3255 	/* Make sure the controller has a good default for
3256 	 * advertising data. This also applies to the case
3257 	 * where BR/EDR was toggled during the AUTO_OFF phase.
3258 	 */
3259 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3260 	    list_empty(&hdev->adv_instances)) {
3261 		if (ext_adv_capable(hdev)) {
3262 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3263 			if (!err)
3264 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3265 		} else {
3266 			err = hci_update_adv_data_sync(hdev, 0x00);
3267 			if (!err)
3268 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3269 		}
3270 
3271 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3272 			hci_enable_advertising_sync(hdev);
3273 	}
3274 
3275 	/* Call for each tracked instance to be scheduled */
3276 	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3277 		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3278 
3279 	return 0;
3280 }
3281 
3282 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3283 {
3284 	u8 link_sec;
3285 
3286 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3287 	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3288 		return 0;
3289 
3290 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3291 				     sizeof(link_sec), &link_sec,
3292 				     HCI_CMD_TIMEOUT);
3293 }
3294 
3295 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3296 {
3297 	struct hci_cp_write_page_scan_activity cp;
3298 	u8 type;
3299 	int err = 0;
3300 
3301 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3302 		return 0;
3303 
3304 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3305 		return 0;
3306 
3307 	memset(&cp, 0, sizeof(cp));
3308 
3309 	if (enable) {
3310 		type = PAGE_SCAN_TYPE_INTERLACED;
3311 
3312 		/* 160 msec page scan interval */
3313 		cp.interval = cpu_to_le16(0x0100);
3314 	} else {
3315 		type = hdev->def_page_scan_type;
3316 		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3317 	}
3318 
3319 	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3320 
3321 	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3322 	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3323 		err = __hci_cmd_sync_status(hdev,
3324 					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3325 					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3326 		if (err)
3327 			return err;
3328 	}
3329 
3330 	if (hdev->page_scan_type != type)
3331 		err = __hci_cmd_sync_status(hdev,
3332 					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3333 					    sizeof(type), &type,
3334 					    HCI_CMD_TIMEOUT);
3335 
3336 	return err;
3337 }
3338 
3339 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3340 {
3341 	struct bdaddr_list *b;
3342 
3343 	list_for_each_entry(b, &hdev->accept_list, list) {
3344 		struct hci_conn *conn;
3345 
3346 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3347 		if (!conn)
3348 			return true;
3349 
3350 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3351 			return true;
3352 	}
3353 
3354 	return false;
3355 }
3356 
3357 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3358 {
3359 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3360 					    sizeof(val), &val,
3361 					    HCI_CMD_TIMEOUT);
3362 }
3363 
3364 int hci_update_scan_sync(struct hci_dev *hdev)
3365 {
3366 	u8 scan;
3367 
3368 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3369 		return 0;
3370 
3371 	if (!hdev_is_powered(hdev))
3372 		return 0;
3373 
3374 	if (mgmt_powering_down(hdev))
3375 		return 0;
3376 
3377 	if (hdev->scanning_paused)
3378 		return 0;
3379 
3380 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3381 	    disconnected_accept_list_entries(hdev))
3382 		scan = SCAN_PAGE;
3383 	else
3384 		scan = SCAN_DISABLED;
3385 
3386 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3387 		scan |= SCAN_INQUIRY;
3388 
3389 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3390 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3391 		return 0;
3392 
3393 	return hci_write_scan_enable_sync(hdev, scan);
3394 }
3395 
3396 int hci_update_name_sync(struct hci_dev *hdev)
3397 {
3398 	struct hci_cp_write_local_name cp;
3399 
3400 	memset(&cp, 0, sizeof(cp));
3401 
3402 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3403 
3404 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3405 					    sizeof(cp), &cp,
3406 					    HCI_CMD_TIMEOUT);
3407 }
3408 
3409 /* This function perform powered update HCI command sequence after the HCI init
3410  * sequence which end up resetting all states, the sequence is as follows:
3411  *
3412  * HCI_SSP_ENABLED(Enable SSP)
3413  * HCI_LE_ENABLED(Enable LE)
3414  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3415  * Update adv data)
3416  * Enable Authentication
3417  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3418  * Set Name -> Set EIR)
3419  * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3420  */
3421 int hci_powered_update_sync(struct hci_dev *hdev)
3422 {
3423 	int err;
3424 
3425 	/* Register the available SMP channels (BR/EDR and LE) only when
3426 	 * successfully powering on the controller. This late
3427 	 * registration is required so that LE SMP can clearly decide if
3428 	 * the public address or static address is used.
3429 	 */
3430 	smp_register(hdev);
3431 
3432 	err = hci_write_ssp_mode_sync(hdev, 0x01);
3433 	if (err)
3434 		return err;
3435 
3436 	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3437 	if (err)
3438 		return err;
3439 
3440 	err = hci_powered_update_adv_sync(hdev);
3441 	if (err)
3442 		return err;
3443 
3444 	err = hci_write_auth_enable_sync(hdev);
3445 	if (err)
3446 		return err;
3447 
3448 	if (lmp_bredr_capable(hdev)) {
3449 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3450 			hci_write_fast_connectable_sync(hdev, true);
3451 		else
3452 			hci_write_fast_connectable_sync(hdev, false);
3453 		hci_update_scan_sync(hdev);
3454 		hci_update_class_sync(hdev);
3455 		hci_update_name_sync(hdev);
3456 		hci_update_eir_sync(hdev);
3457 	}
3458 
3459 	/* If forcing static address is in use or there is no public
3460 	 * address use the static address as random address (but skip
3461 	 * the HCI command if the current random address is already the
3462 	 * static one.
3463 	 *
3464 	 * In case BR/EDR has been disabled on a dual-mode controller
3465 	 * and a static address has been configured, then use that
3466 	 * address instead of the public BR/EDR address.
3467 	 */
3468 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3469 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3470 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3471 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3472 			return hci_set_random_addr_sync(hdev,
3473 							&hdev->static_addr);
3474 	}
3475 
3476 	return 0;
3477 }
3478 
3479 /**
3480  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3481  *				       (BD_ADDR) for a HCI device from
3482  *				       a firmware node property.
3483  * @hdev:	The HCI device
3484  *
3485  * Search the firmware node for 'local-bd-address'.
3486  *
3487  * All-zero BD addresses are rejected, because those could be properties
3488  * that exist in the firmware tables, but were not updated by the firmware. For
3489  * example, the DTS could define 'local-bd-address', with zero BD addresses.
3490  */
3491 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3492 {
3493 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3494 	bdaddr_t ba;
3495 	int ret;
3496 
3497 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3498 					    (u8 *)&ba, sizeof(ba));
3499 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3500 		return;
3501 
3502 	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3503 		baswap(&hdev->public_addr, &ba);
3504 	else
3505 		bacpy(&hdev->public_addr, &ba);
3506 }
3507 
3508 struct hci_init_stage {
3509 	int (*func)(struct hci_dev *hdev);
3510 };
3511 
3512 /* Run init stage NULL terminated function table */
3513 static int hci_init_stage_sync(struct hci_dev *hdev,
3514 			       const struct hci_init_stage *stage)
3515 {
3516 	size_t i;
3517 
3518 	for (i = 0; stage[i].func; i++) {
3519 		int err;
3520 
3521 		err = stage[i].func(hdev);
3522 		if (err)
3523 			return err;
3524 	}
3525 
3526 	return 0;
3527 }
3528 
3529 /* Read Local Version */
3530 static int hci_read_local_version_sync(struct hci_dev *hdev)
3531 {
3532 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3533 				     0, NULL, HCI_CMD_TIMEOUT);
3534 }
3535 
3536 /* Read BD Address */
3537 static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3538 {
3539 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3540 				     0, NULL, HCI_CMD_TIMEOUT);
3541 }
3542 
3543 #define HCI_INIT(_func) \
3544 { \
3545 	.func = _func, \
3546 }
3547 
3548 static const struct hci_init_stage hci_init0[] = {
3549 	/* HCI_OP_READ_LOCAL_VERSION */
3550 	HCI_INIT(hci_read_local_version_sync),
3551 	/* HCI_OP_READ_BD_ADDR */
3552 	HCI_INIT(hci_read_bd_addr_sync),
3553 	{}
3554 };
3555 
3556 int hci_reset_sync(struct hci_dev *hdev)
3557 {
3558 	int err;
3559 
3560 	set_bit(HCI_RESET, &hdev->flags);
3561 
3562 	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3563 				    HCI_CMD_TIMEOUT);
3564 	if (err)
3565 		return err;
3566 
3567 	return 0;
3568 }
3569 
3570 static int hci_init0_sync(struct hci_dev *hdev)
3571 {
3572 	int err;
3573 
3574 	bt_dev_dbg(hdev, "");
3575 
3576 	/* Reset */
3577 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3578 		err = hci_reset_sync(hdev);
3579 		if (err)
3580 			return err;
3581 	}
3582 
3583 	return hci_init_stage_sync(hdev, hci_init0);
3584 }
3585 
3586 static int hci_unconf_init_sync(struct hci_dev *hdev)
3587 {
3588 	int err;
3589 
3590 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3591 		return 0;
3592 
3593 	err = hci_init0_sync(hdev);
3594 	if (err < 0)
3595 		return err;
3596 
3597 	if (hci_dev_test_flag(hdev, HCI_SETUP))
3598 		hci_debugfs_create_basic(hdev);
3599 
3600 	return 0;
3601 }
3602 
3603 /* Read Local Supported Features. */
3604 static int hci_read_local_features_sync(struct hci_dev *hdev)
3605 {
3606 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3607 				     0, NULL, HCI_CMD_TIMEOUT);
3608 }
3609 
3610 /* BR Controller init stage 1 command sequence */
3611 static const struct hci_init_stage br_init1[] = {
3612 	/* HCI_OP_READ_LOCAL_FEATURES */
3613 	HCI_INIT(hci_read_local_features_sync),
3614 	/* HCI_OP_READ_LOCAL_VERSION */
3615 	HCI_INIT(hci_read_local_version_sync),
3616 	/* HCI_OP_READ_BD_ADDR */
3617 	HCI_INIT(hci_read_bd_addr_sync),
3618 	{}
3619 };
3620 
3621 /* Read Local Commands */
3622 static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3623 {
3624 	/* All Bluetooth 1.2 and later controllers should support the
3625 	 * HCI command for reading the local supported commands.
3626 	 *
3627 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3628 	 * but do not have support for this command. If that is the case,
3629 	 * the driver can quirk the behavior and skip reading the local
3630 	 * supported commands.
3631 	 */
3632 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3633 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3634 		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3635 					     0, NULL, HCI_CMD_TIMEOUT);
3636 
3637 	return 0;
3638 }
3639 
3640 static int hci_init1_sync(struct hci_dev *hdev)
3641 {
3642 	int err;
3643 
3644 	bt_dev_dbg(hdev, "");
3645 
3646 	/* Reset */
3647 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3648 		err = hci_reset_sync(hdev);
3649 		if (err)
3650 			return err;
3651 	}
3652 
3653 	return hci_init_stage_sync(hdev, br_init1);
3654 }
3655 
3656 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
3657 static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3658 {
3659 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3660 				     0, NULL, HCI_CMD_TIMEOUT);
3661 }
3662 
3663 /* Read Class of Device */
3664 static int hci_read_dev_class_sync(struct hci_dev *hdev)
3665 {
3666 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3667 				     0, NULL, HCI_CMD_TIMEOUT);
3668 }
3669 
3670 /* Read Local Name */
3671 static int hci_read_local_name_sync(struct hci_dev *hdev)
3672 {
3673 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3674 				     0, NULL, HCI_CMD_TIMEOUT);
3675 }
3676 
3677 /* Read Voice Setting */
3678 static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3679 {
3680 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3681 				     0, NULL, HCI_CMD_TIMEOUT);
3682 }
3683 
3684 /* Read Number of Supported IAC */
3685 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3686 {
3687 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3688 				     0, NULL, HCI_CMD_TIMEOUT);
3689 }
3690 
3691 /* Read Current IAC LAP */
3692 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3693 {
3694 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3695 				     0, NULL, HCI_CMD_TIMEOUT);
3696 }
3697 
3698 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3699 				     u8 cond_type, bdaddr_t *bdaddr,
3700 				     u8 auto_accept)
3701 {
3702 	struct hci_cp_set_event_filter cp;
3703 
3704 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3705 		return 0;
3706 
3707 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3708 		return 0;
3709 
3710 	memset(&cp, 0, sizeof(cp));
3711 	cp.flt_type = flt_type;
3712 
3713 	if (flt_type != HCI_FLT_CLEAR_ALL) {
3714 		cp.cond_type = cond_type;
3715 		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3716 		cp.addr_conn_flt.auto_accept = auto_accept;
3717 	}
3718 
3719 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3720 				     flt_type == HCI_FLT_CLEAR_ALL ?
3721 				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3722 				     HCI_CMD_TIMEOUT);
3723 }
3724 
3725 static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3726 {
3727 	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3728 		return 0;
3729 
3730 	/* In theory the state machine should not reach here unless
3731 	 * a hci_set_event_filter_sync() call succeeds, but we do
3732 	 * the check both for parity and as a future reminder.
3733 	 */
3734 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3735 		return 0;
3736 
3737 	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3738 					 BDADDR_ANY, 0x00);
3739 }
3740 
3741 /* Connection accept timeout ~20 secs */
3742 static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3743 {
3744 	__le16 param = cpu_to_le16(0x7d00);
3745 
3746 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3747 				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3748 }
3749 
3750 /* BR Controller init stage 2 command sequence */
3751 static const struct hci_init_stage br_init2[] = {
3752 	/* HCI_OP_READ_BUFFER_SIZE */
3753 	HCI_INIT(hci_read_buffer_size_sync),
3754 	/* HCI_OP_READ_CLASS_OF_DEV */
3755 	HCI_INIT(hci_read_dev_class_sync),
3756 	/* HCI_OP_READ_LOCAL_NAME */
3757 	HCI_INIT(hci_read_local_name_sync),
3758 	/* HCI_OP_READ_VOICE_SETTING */
3759 	HCI_INIT(hci_read_voice_setting_sync),
3760 	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3761 	HCI_INIT(hci_read_num_supported_iac_sync),
3762 	/* HCI_OP_READ_CURRENT_IAC_LAP */
3763 	HCI_INIT(hci_read_current_iac_lap_sync),
3764 	/* HCI_OP_SET_EVENT_FLT */
3765 	HCI_INIT(hci_clear_event_filter_sync),
3766 	/* HCI_OP_WRITE_CA_TIMEOUT */
3767 	HCI_INIT(hci_write_ca_timeout_sync),
3768 	{}
3769 };
3770 
3771 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3772 {
3773 	u8 mode = 0x01;
3774 
3775 	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3776 		return 0;
3777 
3778 	/* When SSP is available, then the host features page
3779 	 * should also be available as well. However some
3780 	 * controllers list the max_page as 0 as long as SSP
3781 	 * has not been enabled. To achieve proper debugging
3782 	 * output, force the minimum max_page to 1 at least.
3783 	 */
3784 	hdev->max_page = 0x01;
3785 
3786 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3787 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3788 }
3789 
3790 static int hci_write_eir_sync(struct hci_dev *hdev)
3791 {
3792 	struct hci_cp_write_eir cp;
3793 
3794 	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3795 		return 0;
3796 
3797 	memset(hdev->eir, 0, sizeof(hdev->eir));
3798 	memset(&cp, 0, sizeof(cp));
3799 
3800 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3801 				     HCI_CMD_TIMEOUT);
3802 }
3803 
3804 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3805 {
3806 	u8 mode;
3807 
3808 	if (!lmp_inq_rssi_capable(hdev) &&
3809 	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3810 		return 0;
3811 
3812 	/* If Extended Inquiry Result events are supported, then
3813 	 * they are clearly preferred over Inquiry Result with RSSI
3814 	 * events.
3815 	 */
3816 	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3817 
3818 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3819 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3820 }
3821 
3822 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3823 {
3824 	if (!lmp_inq_tx_pwr_capable(hdev))
3825 		return 0;
3826 
3827 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3828 				     0, NULL, HCI_CMD_TIMEOUT);
3829 }
3830 
3831 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3832 {
3833 	struct hci_cp_read_local_ext_features cp;
3834 
3835 	if (!lmp_ext_feat_capable(hdev))
3836 		return 0;
3837 
3838 	memset(&cp, 0, sizeof(cp));
3839 	cp.page = page;
3840 
3841 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3842 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3843 }
3844 
3845 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3846 {
3847 	return hci_read_local_ext_features_sync(hdev, 0x01);
3848 }
3849 
3850 /* HCI Controller init stage 2 command sequence */
3851 static const struct hci_init_stage hci_init2[] = {
3852 	/* HCI_OP_READ_LOCAL_COMMANDS */
3853 	HCI_INIT(hci_read_local_cmds_sync),
3854 	/* HCI_OP_WRITE_SSP_MODE */
3855 	HCI_INIT(hci_write_ssp_mode_1_sync),
3856 	/* HCI_OP_WRITE_EIR */
3857 	HCI_INIT(hci_write_eir_sync),
3858 	/* HCI_OP_WRITE_INQUIRY_MODE */
3859 	HCI_INIT(hci_write_inquiry_mode_sync),
3860 	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3861 	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3862 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3863 	HCI_INIT(hci_read_local_ext_features_1_sync),
3864 	/* HCI_OP_WRITE_AUTH_ENABLE */
3865 	HCI_INIT(hci_write_auth_enable_sync),
3866 	{}
3867 };
3868 
3869 /* Read LE Buffer Size */
3870 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3871 {
3872 	/* Use Read LE Buffer Size V2 if supported */
3873 	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3874 		return __hci_cmd_sync_status(hdev,
3875 					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3876 					     0, NULL, HCI_CMD_TIMEOUT);
3877 
3878 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3879 				     0, NULL, HCI_CMD_TIMEOUT);
3880 }
3881 
3882 /* Read LE Local Supported Features */
3883 static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3884 {
3885 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3886 				     0, NULL, HCI_CMD_TIMEOUT);
3887 }
3888 
3889 /* Read LE Supported States */
3890 static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3891 {
3892 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3893 				     0, NULL, HCI_CMD_TIMEOUT);
3894 }
3895 
3896 /* LE Controller init stage 2 command sequence */
3897 static const struct hci_init_stage le_init2[] = {
3898 	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3899 	HCI_INIT(hci_le_read_local_features_sync),
3900 	/* HCI_OP_LE_READ_BUFFER_SIZE */
3901 	HCI_INIT(hci_le_read_buffer_size_sync),
3902 	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3903 	HCI_INIT(hci_le_read_supported_states_sync),
3904 	{}
3905 };
3906 
3907 static int hci_init2_sync(struct hci_dev *hdev)
3908 {
3909 	int err;
3910 
3911 	bt_dev_dbg(hdev, "");
3912 
3913 	err = hci_init_stage_sync(hdev, hci_init2);
3914 	if (err)
3915 		return err;
3916 
3917 	if (lmp_bredr_capable(hdev)) {
3918 		err = hci_init_stage_sync(hdev, br_init2);
3919 		if (err)
3920 			return err;
3921 	} else {
3922 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3923 	}
3924 
3925 	if (lmp_le_capable(hdev)) {
3926 		err = hci_init_stage_sync(hdev, le_init2);
3927 		if (err)
3928 			return err;
3929 		/* LE-only controllers have LE implicitly enabled */
3930 		if (!lmp_bredr_capable(hdev))
3931 			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3932 	}
3933 
3934 	return 0;
3935 }
3936 
3937 static int hci_set_event_mask_sync(struct hci_dev *hdev)
3938 {
3939 	/* The second byte is 0xff instead of 0x9f (two reserved bits
3940 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3941 	 * command otherwise.
3942 	 */
3943 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3944 
3945 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
3946 	 * any event mask for pre 1.2 devices.
3947 	 */
3948 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3949 		return 0;
3950 
3951 	if (lmp_bredr_capable(hdev)) {
3952 		events[4] |= 0x01; /* Flow Specification Complete */
3953 
3954 		/* Don't set Disconnect Complete and mode change when
3955 		 * suspended as that would wakeup the host when disconnecting
3956 		 * due to suspend.
3957 		 */
3958 		if (hdev->suspended) {
3959 			events[0] &= 0xef;
3960 			events[2] &= 0xf7;
3961 		}
3962 	} else {
3963 		/* Use a different default for LE-only devices */
3964 		memset(events, 0, sizeof(events));
3965 		events[1] |= 0x20; /* Command Complete */
3966 		events[1] |= 0x40; /* Command Status */
3967 		events[1] |= 0x80; /* Hardware Error */
3968 
3969 		/* If the controller supports the Disconnect command, enable
3970 		 * the corresponding event. In addition enable packet flow
3971 		 * control related events.
3972 		 */
3973 		if (hdev->commands[0] & 0x20) {
3974 			/* Don't set Disconnect Complete when suspended as that
3975 			 * would wakeup the host when disconnecting due to
3976 			 * suspend.
3977 			 */
3978 			if (!hdev->suspended)
3979 				events[0] |= 0x10; /* Disconnection Complete */
3980 			events[2] |= 0x04; /* Number of Completed Packets */
3981 			events[3] |= 0x02; /* Data Buffer Overflow */
3982 		}
3983 
3984 		/* If the controller supports the Read Remote Version
3985 		 * Information command, enable the corresponding event.
3986 		 */
3987 		if (hdev->commands[2] & 0x80)
3988 			events[1] |= 0x08; /* Read Remote Version Information
3989 					    * Complete
3990 					    */
3991 
3992 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
3993 			events[0] |= 0x80; /* Encryption Change */
3994 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
3995 		}
3996 	}
3997 
3998 	if (lmp_inq_rssi_capable(hdev) ||
3999 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
4000 		events[4] |= 0x02; /* Inquiry Result with RSSI */
4001 
4002 	if (lmp_ext_feat_capable(hdev))
4003 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
4004 
4005 	if (lmp_esco_capable(hdev)) {
4006 		events[5] |= 0x08; /* Synchronous Connection Complete */
4007 		events[5] |= 0x10; /* Synchronous Connection Changed */
4008 	}
4009 
4010 	if (lmp_sniffsubr_capable(hdev))
4011 		events[5] |= 0x20; /* Sniff Subrating */
4012 
4013 	if (lmp_pause_enc_capable(hdev))
4014 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
4015 
4016 	if (lmp_ext_inq_capable(hdev))
4017 		events[5] |= 0x40; /* Extended Inquiry Result */
4018 
4019 	if (lmp_no_flush_capable(hdev))
4020 		events[7] |= 0x01; /* Enhanced Flush Complete */
4021 
4022 	if (lmp_lsto_capable(hdev))
4023 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
4024 
4025 	if (lmp_ssp_capable(hdev)) {
4026 		events[6] |= 0x01;	/* IO Capability Request */
4027 		events[6] |= 0x02;	/* IO Capability Response */
4028 		events[6] |= 0x04;	/* User Confirmation Request */
4029 		events[6] |= 0x08;	/* User Passkey Request */
4030 		events[6] |= 0x10;	/* Remote OOB Data Request */
4031 		events[6] |= 0x20;	/* Simple Pairing Complete */
4032 		events[7] |= 0x04;	/* User Passkey Notification */
4033 		events[7] |= 0x08;	/* Keypress Notification */
4034 		events[7] |= 0x10;	/* Remote Host Supported
4035 					 * Features Notification
4036 					 */
4037 	}
4038 
4039 	if (lmp_le_capable(hdev))
4040 		events[7] |= 0x20;	/* LE Meta-Event */
4041 
4042 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4043 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4044 }
4045 
4046 static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4047 {
4048 	struct hci_cp_read_stored_link_key cp;
4049 
4050 	if (!(hdev->commands[6] & 0x20) ||
4051 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4052 		return 0;
4053 
4054 	memset(&cp, 0, sizeof(cp));
4055 	bacpy(&cp.bdaddr, BDADDR_ANY);
4056 	cp.read_all = 0x01;
4057 
4058 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4059 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4060 }
4061 
4062 static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4063 {
4064 	struct hci_cp_write_def_link_policy cp;
4065 	u16 link_policy = 0;
4066 
4067 	if (!(hdev->commands[5] & 0x10))
4068 		return 0;
4069 
4070 	memset(&cp, 0, sizeof(cp));
4071 
4072 	if (lmp_rswitch_capable(hdev))
4073 		link_policy |= HCI_LP_RSWITCH;
4074 	if (lmp_hold_capable(hdev))
4075 		link_policy |= HCI_LP_HOLD;
4076 	if (lmp_sniff_capable(hdev))
4077 		link_policy |= HCI_LP_SNIFF;
4078 	if (lmp_park_capable(hdev))
4079 		link_policy |= HCI_LP_PARK;
4080 
4081 	cp.policy = cpu_to_le16(link_policy);
4082 
4083 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4084 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4085 }
4086 
4087 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4088 {
4089 	if (!(hdev->commands[8] & 0x01))
4090 		return 0;
4091 
4092 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4093 				     0, NULL, HCI_CMD_TIMEOUT);
4094 }
4095 
4096 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4097 {
4098 	if (!(hdev->commands[18] & 0x04) ||
4099 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4100 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4101 		return 0;
4102 
4103 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4104 				     0, NULL, HCI_CMD_TIMEOUT);
4105 }
4106 
4107 static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4108 {
4109 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4110 	 * support the Read Page Scan Type command. Check support for
4111 	 * this command in the bit mask of supported commands.
4112 	 */
4113 	if (!(hdev->commands[13] & 0x01))
4114 		return 0;
4115 
4116 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4117 				     0, NULL, HCI_CMD_TIMEOUT);
4118 }
4119 
4120 /* Read features beyond page 1 if available */
4121 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4122 {
4123 	u8 page;
4124 	int err;
4125 
4126 	if (!lmp_ext_feat_capable(hdev))
4127 		return 0;
4128 
4129 	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4130 	     page++) {
4131 		err = hci_read_local_ext_features_sync(hdev, page);
4132 		if (err)
4133 			return err;
4134 	}
4135 
4136 	return 0;
4137 }
4138 
4139 /* HCI Controller init stage 3 command sequence */
4140 static const struct hci_init_stage hci_init3[] = {
4141 	/* HCI_OP_SET_EVENT_MASK */
4142 	HCI_INIT(hci_set_event_mask_sync),
4143 	/* HCI_OP_READ_STORED_LINK_KEY */
4144 	HCI_INIT(hci_read_stored_link_key_sync),
4145 	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4146 	HCI_INIT(hci_setup_link_policy_sync),
4147 	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4148 	HCI_INIT(hci_read_page_scan_activity_sync),
4149 	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4150 	HCI_INIT(hci_read_def_err_data_reporting_sync),
4151 	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4152 	HCI_INIT(hci_read_page_scan_type_sync),
4153 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4154 	HCI_INIT(hci_read_local_ext_features_all_sync),
4155 	{}
4156 };
4157 
4158 static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4159 {
4160 	u8 events[8];
4161 
4162 	if (!lmp_le_capable(hdev))
4163 		return 0;
4164 
4165 	memset(events, 0, sizeof(events));
4166 
4167 	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4168 		events[0] |= 0x10;	/* LE Long Term Key Request */
4169 
4170 	/* If controller supports the Connection Parameters Request
4171 	 * Link Layer Procedure, enable the corresponding event.
4172 	 */
4173 	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4174 		/* LE Remote Connection Parameter Request */
4175 		events[0] |= 0x20;
4176 
4177 	/* If the controller supports the Data Length Extension
4178 	 * feature, enable the corresponding event.
4179 	 */
4180 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4181 		events[0] |= 0x40;	/* LE Data Length Change */
4182 
4183 	/* If the controller supports LL Privacy feature or LE Extended Adv,
4184 	 * enable the corresponding event.
4185 	 */
4186 	if (use_enhanced_conn_complete(hdev))
4187 		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4188 
4189 	/* If the controller supports Extended Scanner Filter
4190 	 * Policies, enable the corresponding event.
4191 	 */
4192 	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4193 		events[1] |= 0x04;	/* LE Direct Advertising Report */
4194 
4195 	/* If the controller supports Channel Selection Algorithm #2
4196 	 * feature, enable the corresponding event.
4197 	 */
4198 	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4199 		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4200 
4201 	/* If the controller supports the LE Set Scan Enable command,
4202 	 * enable the corresponding advertising report event.
4203 	 */
4204 	if (hdev->commands[26] & 0x08)
4205 		events[0] |= 0x02;	/* LE Advertising Report */
4206 
4207 	/* If the controller supports the LE Create Connection
4208 	 * command, enable the corresponding event.
4209 	 */
4210 	if (hdev->commands[26] & 0x10)
4211 		events[0] |= 0x01;	/* LE Connection Complete */
4212 
4213 	/* If the controller supports the LE Connection Update
4214 	 * command, enable the corresponding event.
4215 	 */
4216 	if (hdev->commands[27] & 0x04)
4217 		events[0] |= 0x04;	/* LE Connection Update Complete */
4218 
4219 	/* If the controller supports the LE Read Remote Used Features
4220 	 * command, enable the corresponding event.
4221 	 */
4222 	if (hdev->commands[27] & 0x20)
4223 		/* LE Read Remote Used Features Complete */
4224 		events[0] |= 0x08;
4225 
4226 	/* If the controller supports the LE Read Local P-256
4227 	 * Public Key command, enable the corresponding event.
4228 	 */
4229 	if (hdev->commands[34] & 0x02)
4230 		/* LE Read Local P-256 Public Key Complete */
4231 		events[0] |= 0x80;
4232 
4233 	/* If the controller supports the LE Generate DHKey
4234 	 * command, enable the corresponding event.
4235 	 */
4236 	if (hdev->commands[34] & 0x04)
4237 		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4238 
4239 	/* If the controller supports the LE Set Default PHY or
4240 	 * LE Set PHY commands, enable the corresponding event.
4241 	 */
4242 	if (hdev->commands[35] & (0x20 | 0x40))
4243 		events[1] |= 0x08;        /* LE PHY Update Complete */
4244 
4245 	/* If the controller supports LE Set Extended Scan Parameters
4246 	 * and LE Set Extended Scan Enable commands, enable the
4247 	 * corresponding event.
4248 	 */
4249 	if (use_ext_scan(hdev))
4250 		events[1] |= 0x10;	/* LE Extended Advertising Report */
4251 
4252 	/* If the controller supports the LE Extended Advertising
4253 	 * command, enable the corresponding event.
4254 	 */
4255 	if (ext_adv_capable(hdev))
4256 		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4257 
4258 	if (cis_capable(hdev)) {
4259 		events[3] |= 0x01;	/* LE CIS Established */
4260 		if (cis_peripheral_capable(hdev))
4261 			events[3] |= 0x02; /* LE CIS Request */
4262 	}
4263 
4264 	if (bis_capable(hdev)) {
4265 		events[1] |= 0x20;	/* LE PA Report */
4266 		events[1] |= 0x40;	/* LE PA Sync Established */
4267 		events[3] |= 0x04;	/* LE Create BIG Complete */
4268 		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4269 		events[3] |= 0x10;	/* LE BIG Sync Established */
4270 		events[3] |= 0x20;	/* LE BIG Sync Loss */
4271 		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4272 	}
4273 
4274 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4275 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4276 }
4277 
4278 /* Read LE Advertising Channel TX Power */
4279 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4280 {
4281 	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4282 		/* HCI TS spec forbids mixing of legacy and extended
4283 		 * advertising commands wherein READ_ADV_TX_POWER is
4284 		 * also included. So do not call it if extended adv
4285 		 * is supported otherwise controller will return
4286 		 * COMMAND_DISALLOWED for extended commands.
4287 		 */
4288 		return __hci_cmd_sync_status(hdev,
4289 					       HCI_OP_LE_READ_ADV_TX_POWER,
4290 					       0, NULL, HCI_CMD_TIMEOUT);
4291 	}
4292 
4293 	return 0;
4294 }
4295 
4296 /* Read LE Min/Max Tx Power*/
4297 static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4298 {
4299 	if (!(hdev->commands[38] & 0x80) ||
4300 	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4301 		return 0;
4302 
4303 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4304 				     0, NULL, HCI_CMD_TIMEOUT);
4305 }
4306 
4307 /* Read LE Accept List Size */
4308 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4309 {
4310 	if (!(hdev->commands[26] & 0x40))
4311 		return 0;
4312 
4313 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4314 				     0, NULL, HCI_CMD_TIMEOUT);
4315 }
4316 
4317 /* Read LE Resolving List Size */
4318 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4319 {
4320 	if (!(hdev->commands[34] & 0x40))
4321 		return 0;
4322 
4323 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4324 				     0, NULL, HCI_CMD_TIMEOUT);
4325 }
4326 
4327 /* Clear LE Resolving List */
4328 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4329 {
4330 	if (!(hdev->commands[34] & 0x20))
4331 		return 0;
4332 
4333 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4334 				     HCI_CMD_TIMEOUT);
4335 }
4336 
4337 /* Set RPA timeout */
4338 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4339 {
4340 	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4341 
4342 	if (!(hdev->commands[35] & 0x04) ||
4343 	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4344 		return 0;
4345 
4346 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4347 				     sizeof(timeout), &timeout,
4348 				     HCI_CMD_TIMEOUT);
4349 }
4350 
4351 /* Read LE Maximum Data Length */
4352 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4353 {
4354 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4355 		return 0;
4356 
4357 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4358 				     HCI_CMD_TIMEOUT);
4359 }
4360 
4361 /* Read LE Suggested Default Data Length */
4362 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4363 {
4364 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4365 		return 0;
4366 
4367 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4368 				     HCI_CMD_TIMEOUT);
4369 }
4370 
4371 /* Read LE Number of Supported Advertising Sets */
4372 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4373 {
4374 	if (!ext_adv_capable(hdev))
4375 		return 0;
4376 
4377 	return __hci_cmd_sync_status(hdev,
4378 				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4379 				     0, NULL, HCI_CMD_TIMEOUT);
4380 }
4381 
4382 /* Write LE Host Supported */
4383 static int hci_set_le_support_sync(struct hci_dev *hdev)
4384 {
4385 	struct hci_cp_write_le_host_supported cp;
4386 
4387 	/* LE-only devices do not support explicit enablement */
4388 	if (!lmp_bredr_capable(hdev))
4389 		return 0;
4390 
4391 	memset(&cp, 0, sizeof(cp));
4392 
4393 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4394 		cp.le = 0x01;
4395 		cp.simul = 0x00;
4396 	}
4397 
4398 	if (cp.le == lmp_host_le_capable(hdev))
4399 		return 0;
4400 
4401 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4402 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4403 }
4404 
4405 /* LE Set Host Feature */
4406 static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4407 {
4408 	struct hci_cp_le_set_host_feature cp;
4409 
4410 	if (!cis_capable(hdev))
4411 		return 0;
4412 
4413 	memset(&cp, 0, sizeof(cp));
4414 
4415 	/* Connected Isochronous Channels (Host Support) */
4416 	cp.bit_number = 32;
4417 	cp.bit_value = 1;
4418 
4419 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4420 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4421 }
4422 
4423 /* LE Controller init stage 3 command sequence */
4424 static const struct hci_init_stage le_init3[] = {
4425 	/* HCI_OP_LE_SET_EVENT_MASK */
4426 	HCI_INIT(hci_le_set_event_mask_sync),
4427 	/* HCI_OP_LE_READ_ADV_TX_POWER */
4428 	HCI_INIT(hci_le_read_adv_tx_power_sync),
4429 	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4430 	HCI_INIT(hci_le_read_tx_power_sync),
4431 	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4432 	HCI_INIT(hci_le_read_accept_list_size_sync),
4433 	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4434 	HCI_INIT(hci_le_clear_accept_list_sync),
4435 	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4436 	HCI_INIT(hci_le_read_resolv_list_size_sync),
4437 	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4438 	HCI_INIT(hci_le_clear_resolv_list_sync),
4439 	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4440 	HCI_INIT(hci_le_set_rpa_timeout_sync),
4441 	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4442 	HCI_INIT(hci_le_read_max_data_len_sync),
4443 	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4444 	HCI_INIT(hci_le_read_def_data_len_sync),
4445 	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4446 	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4447 	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4448 	HCI_INIT(hci_set_le_support_sync),
4449 	/* HCI_OP_LE_SET_HOST_FEATURE */
4450 	HCI_INIT(hci_le_set_host_feature_sync),
4451 	{}
4452 };
4453 
4454 static int hci_init3_sync(struct hci_dev *hdev)
4455 {
4456 	int err;
4457 
4458 	bt_dev_dbg(hdev, "");
4459 
4460 	err = hci_init_stage_sync(hdev, hci_init3);
4461 	if (err)
4462 		return err;
4463 
4464 	if (lmp_le_capable(hdev))
4465 		return hci_init_stage_sync(hdev, le_init3);
4466 
4467 	return 0;
4468 }
4469 
4470 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4471 {
4472 	struct hci_cp_delete_stored_link_key cp;
4473 
4474 	/* Some Broadcom based Bluetooth controllers do not support the
4475 	 * Delete Stored Link Key command. They are clearly indicating its
4476 	 * absence in the bit mask of supported commands.
4477 	 *
4478 	 * Check the supported commands and only if the command is marked
4479 	 * as supported send it. If not supported assume that the controller
4480 	 * does not have actual support for stored link keys which makes this
4481 	 * command redundant anyway.
4482 	 *
4483 	 * Some controllers indicate that they support handling deleting
4484 	 * stored link keys, but they don't. The quirk lets a driver
4485 	 * just disable this command.
4486 	 */
4487 	if (!(hdev->commands[6] & 0x80) ||
4488 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4489 		return 0;
4490 
4491 	memset(&cp, 0, sizeof(cp));
4492 	bacpy(&cp.bdaddr, BDADDR_ANY);
4493 	cp.delete_all = 0x01;
4494 
4495 	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4496 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4497 }
4498 
4499 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4500 {
4501 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4502 	bool changed = false;
4503 
4504 	/* Set event mask page 2 if the HCI command for it is supported */
4505 	if (!(hdev->commands[22] & 0x04))
4506 		return 0;
4507 
4508 	/* If Connectionless Peripheral Broadcast central role is supported
4509 	 * enable all necessary events for it.
4510 	 */
4511 	if (lmp_cpb_central_capable(hdev)) {
4512 		events[1] |= 0x40;	/* Triggered Clock Capture */
4513 		events[1] |= 0x80;	/* Synchronization Train Complete */
4514 		events[2] |= 0x08;	/* Truncated Page Complete */
4515 		events[2] |= 0x20;	/* CPB Channel Map Change */
4516 		changed = true;
4517 	}
4518 
4519 	/* If Connectionless Peripheral Broadcast peripheral role is supported
4520 	 * enable all necessary events for it.
4521 	 */
4522 	if (lmp_cpb_peripheral_capable(hdev)) {
4523 		events[2] |= 0x01;	/* Synchronization Train Received */
4524 		events[2] |= 0x02;	/* CPB Receive */
4525 		events[2] |= 0x04;	/* CPB Timeout */
4526 		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4527 		changed = true;
4528 	}
4529 
4530 	/* Enable Authenticated Payload Timeout Expired event if supported */
4531 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4532 		events[2] |= 0x80;
4533 		changed = true;
4534 	}
4535 
4536 	/* Some Broadcom based controllers indicate support for Set Event
4537 	 * Mask Page 2 command, but then actually do not support it. Since
4538 	 * the default value is all bits set to zero, the command is only
4539 	 * required if the event mask has to be changed. In case no change
4540 	 * to the event mask is needed, skip this command.
4541 	 */
4542 	if (!changed)
4543 		return 0;
4544 
4545 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4546 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4547 }
4548 
4549 /* Read local codec list if the HCI command is supported */
4550 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4551 {
4552 	if (hdev->commands[45] & 0x04)
4553 		hci_read_supported_codecs_v2(hdev);
4554 	else if (hdev->commands[29] & 0x20)
4555 		hci_read_supported_codecs(hdev);
4556 
4557 	return 0;
4558 }
4559 
4560 /* Read local pairing options if the HCI command is supported */
4561 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4562 {
4563 	if (!(hdev->commands[41] & 0x08))
4564 		return 0;
4565 
4566 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4567 				     0, NULL, HCI_CMD_TIMEOUT);
4568 }
4569 
4570 /* Get MWS transport configuration if the HCI command is supported */
4571 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4572 {
4573 	if (!mws_transport_config_capable(hdev))
4574 		return 0;
4575 
4576 	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4577 				     0, NULL, HCI_CMD_TIMEOUT);
4578 }
4579 
4580 /* Check for Synchronization Train support */
4581 static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4582 {
4583 	if (!lmp_sync_train_capable(hdev))
4584 		return 0;
4585 
4586 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4587 				     0, NULL, HCI_CMD_TIMEOUT);
4588 }
4589 
4590 /* Enable Secure Connections if supported and configured */
4591 static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4592 {
4593 	u8 support = 0x01;
4594 
4595 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4596 	    !bredr_sc_enabled(hdev))
4597 		return 0;
4598 
4599 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4600 				     sizeof(support), &support,
4601 				     HCI_CMD_TIMEOUT);
4602 }
4603 
4604 /* Set erroneous data reporting if supported to the wideband speech
4605  * setting value
4606  */
4607 static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4608 {
4609 	struct hci_cp_write_def_err_data_reporting cp;
4610 	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4611 
4612 	if (!(hdev->commands[18] & 0x08) ||
4613 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4614 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4615 		return 0;
4616 
4617 	if (enabled == hdev->err_data_reporting)
4618 		return 0;
4619 
4620 	memset(&cp, 0, sizeof(cp));
4621 	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4622 				ERR_DATA_REPORTING_DISABLED;
4623 
4624 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4625 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4626 }
4627 
4628 static const struct hci_init_stage hci_init4[] = {
4629 	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4630 	HCI_INIT(hci_delete_stored_link_key_sync),
4631 	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4632 	HCI_INIT(hci_set_event_mask_page_2_sync),
4633 	/* HCI_OP_READ_LOCAL_CODECS */
4634 	HCI_INIT(hci_read_local_codecs_sync),
4635 	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4636 	HCI_INIT(hci_read_local_pairing_opts_sync),
4637 	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4638 	HCI_INIT(hci_get_mws_transport_config_sync),
4639 	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4640 	HCI_INIT(hci_read_sync_train_params_sync),
4641 	/* HCI_OP_WRITE_SC_SUPPORT */
4642 	HCI_INIT(hci_write_sc_support_1_sync),
4643 	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4644 	HCI_INIT(hci_set_err_data_report_sync),
4645 	{}
4646 };
4647 
4648 /* Set Suggested Default Data Length to maximum if supported */
4649 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4650 {
4651 	struct hci_cp_le_write_def_data_len cp;
4652 
4653 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4654 		return 0;
4655 
4656 	memset(&cp, 0, sizeof(cp));
4657 	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4658 	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4659 
4660 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4661 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4662 }
4663 
4664 /* Set Default PHY parameters if command is supported, enables all supported
4665  * PHYs according to the LE Features bits.
4666  */
4667 static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4668 {
4669 	struct hci_cp_le_set_default_phy cp;
4670 
4671 	if (!(hdev->commands[35] & 0x20)) {
4672 		/* If the command is not supported it means only 1M PHY is
4673 		 * supported.
4674 		 */
4675 		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4676 		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4677 		return 0;
4678 	}
4679 
4680 	memset(&cp, 0, sizeof(cp));
4681 	cp.all_phys = 0x00;
4682 	cp.tx_phys = HCI_LE_SET_PHY_1M;
4683 	cp.rx_phys = HCI_LE_SET_PHY_1M;
4684 
4685 	/* Enables 2M PHY if supported */
4686 	if (le_2m_capable(hdev)) {
4687 		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4688 		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4689 	}
4690 
4691 	/* Enables Coded PHY if supported */
4692 	if (le_coded_capable(hdev)) {
4693 		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4694 		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4695 	}
4696 
4697 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4698 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4699 }
4700 
4701 static const struct hci_init_stage le_init4[] = {
4702 	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4703 	HCI_INIT(hci_le_set_write_def_data_len_sync),
4704 	/* HCI_OP_LE_SET_DEFAULT_PHY */
4705 	HCI_INIT(hci_le_set_default_phy_sync),
4706 	{}
4707 };
4708 
4709 static int hci_init4_sync(struct hci_dev *hdev)
4710 {
4711 	int err;
4712 
4713 	bt_dev_dbg(hdev, "");
4714 
4715 	err = hci_init_stage_sync(hdev, hci_init4);
4716 	if (err)
4717 		return err;
4718 
4719 	if (lmp_le_capable(hdev))
4720 		return hci_init_stage_sync(hdev, le_init4);
4721 
4722 	return 0;
4723 }
4724 
4725 static int hci_init_sync(struct hci_dev *hdev)
4726 {
4727 	int err;
4728 
4729 	err = hci_init1_sync(hdev);
4730 	if (err < 0)
4731 		return err;
4732 
4733 	if (hci_dev_test_flag(hdev, HCI_SETUP))
4734 		hci_debugfs_create_basic(hdev);
4735 
4736 	err = hci_init2_sync(hdev);
4737 	if (err < 0)
4738 		return err;
4739 
4740 	err = hci_init3_sync(hdev);
4741 	if (err < 0)
4742 		return err;
4743 
4744 	err = hci_init4_sync(hdev);
4745 	if (err < 0)
4746 		return err;
4747 
4748 	/* This function is only called when the controller is actually in
4749 	 * configured state. When the controller is marked as unconfigured,
4750 	 * this initialization procedure is not run.
4751 	 *
4752 	 * It means that it is possible that a controller runs through its
4753 	 * setup phase and then discovers missing settings. If that is the
4754 	 * case, then this function will not be called. It then will only
4755 	 * be called during the config phase.
4756 	 *
4757 	 * So only when in setup phase or config phase, create the debugfs
4758 	 * entries and register the SMP channels.
4759 	 */
4760 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4761 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4762 		return 0;
4763 
4764 	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4765 		return 0;
4766 
4767 	hci_debugfs_create_common(hdev);
4768 
4769 	if (lmp_bredr_capable(hdev))
4770 		hci_debugfs_create_bredr(hdev);
4771 
4772 	if (lmp_le_capable(hdev))
4773 		hci_debugfs_create_le(hdev);
4774 
4775 	return 0;
4776 }
4777 
4778 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4779 
4780 static const struct {
4781 	unsigned long quirk;
4782 	const char *desc;
4783 } hci_broken_table[] = {
4784 	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4785 			 "HCI Read Local Supported Commands not supported"),
4786 	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4787 			 "HCI Delete Stored Link Key command is advertised, "
4788 			 "but not supported."),
4789 	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4790 			 "HCI Read Default Erroneous Data Reporting command is "
4791 			 "advertised, but not supported."),
4792 	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4793 			 "HCI Read Transmit Power Level command is advertised, "
4794 			 "but not supported."),
4795 	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4796 			 "HCI Set Event Filter command not supported."),
4797 	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4798 			 "HCI Enhanced Setup Synchronous Connection command is "
4799 			 "advertised, but not supported."),
4800 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4801 			 "HCI LE Set Random Private Address Timeout command is "
4802 			 "advertised, but not supported."),
4803 	HCI_QUIRK_BROKEN(LE_CODED,
4804 			 "HCI LE Coded PHY feature bit is set, "
4805 			 "but its usage is not supported.")
4806 };
4807 
4808 /* This function handles hdev setup stage:
4809  *
4810  * Calls hdev->setup
4811  * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4812  */
4813 static int hci_dev_setup_sync(struct hci_dev *hdev)
4814 {
4815 	int ret = 0;
4816 	bool invalid_bdaddr;
4817 	size_t i;
4818 
4819 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4820 	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4821 		return 0;
4822 
4823 	bt_dev_dbg(hdev, "");
4824 
4825 	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4826 
4827 	if (hdev->setup)
4828 		ret = hdev->setup(hdev);
4829 
4830 	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4831 		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4832 			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4833 	}
4834 
4835 	/* The transport driver can set the quirk to mark the
4836 	 * BD_ADDR invalid before creating the HCI device or in
4837 	 * its setup callback.
4838 	 */
4839 	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4840 			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4841 	if (!ret) {
4842 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4843 		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4844 			hci_dev_get_bd_addr_from_property(hdev);
4845 
4846 		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4847 		    hdev->set_bdaddr) {
4848 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4849 			if (!ret)
4850 				invalid_bdaddr = false;
4851 		}
4852 	}
4853 
4854 	/* The transport driver can set these quirks before
4855 	 * creating the HCI device or in its setup callback.
4856 	 *
4857 	 * For the invalid BD_ADDR quirk it is possible that
4858 	 * it becomes a valid address if the bootloader does
4859 	 * provide it (see above).
4860 	 *
4861 	 * In case any of them is set, the controller has to
4862 	 * start up as unconfigured.
4863 	 */
4864 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4865 	    invalid_bdaddr)
4866 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4867 
4868 	/* For an unconfigured controller it is required to
4869 	 * read at least the version information provided by
4870 	 * the Read Local Version Information command.
4871 	 *
4872 	 * If the set_bdaddr driver callback is provided, then
4873 	 * also the original Bluetooth public device address
4874 	 * will be read using the Read BD Address command.
4875 	 */
4876 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4877 		return hci_unconf_init_sync(hdev);
4878 
4879 	return ret;
4880 }
4881 
4882 /* This function handles hdev init stage:
4883  *
4884  * Calls hci_dev_setup_sync to perform setup stage
4885  * Calls hci_init_sync to perform HCI command init sequence
4886  */
4887 static int hci_dev_init_sync(struct hci_dev *hdev)
4888 {
4889 	int ret;
4890 
4891 	bt_dev_dbg(hdev, "");
4892 
4893 	atomic_set(&hdev->cmd_cnt, 1);
4894 	set_bit(HCI_INIT, &hdev->flags);
4895 
4896 	ret = hci_dev_setup_sync(hdev);
4897 
4898 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4899 		/* If public address change is configured, ensure that
4900 		 * the address gets programmed. If the driver does not
4901 		 * support changing the public address, fail the power
4902 		 * on procedure.
4903 		 */
4904 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4905 		    hdev->set_bdaddr)
4906 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4907 		else
4908 			ret = -EADDRNOTAVAIL;
4909 	}
4910 
4911 	if (!ret) {
4912 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4913 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4914 			ret = hci_init_sync(hdev);
4915 			if (!ret && hdev->post_init)
4916 				ret = hdev->post_init(hdev);
4917 		}
4918 	}
4919 
4920 	/* If the HCI Reset command is clearing all diagnostic settings,
4921 	 * then they need to be reprogrammed after the init procedure
4922 	 * completed.
4923 	 */
4924 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4925 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4926 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4927 		ret = hdev->set_diag(hdev, true);
4928 
4929 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4930 		msft_do_open(hdev);
4931 		aosp_do_open(hdev);
4932 	}
4933 
4934 	clear_bit(HCI_INIT, &hdev->flags);
4935 
4936 	return ret;
4937 }
4938 
4939 int hci_dev_open_sync(struct hci_dev *hdev)
4940 {
4941 	int ret;
4942 
4943 	bt_dev_dbg(hdev, "");
4944 
4945 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
4946 		ret = -ENODEV;
4947 		goto done;
4948 	}
4949 
4950 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4951 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4952 		/* Check for rfkill but allow the HCI setup stage to
4953 		 * proceed (which in itself doesn't cause any RF activity).
4954 		 */
4955 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
4956 			ret = -ERFKILL;
4957 			goto done;
4958 		}
4959 
4960 		/* Check for valid public address or a configured static
4961 		 * random address, but let the HCI setup proceed to
4962 		 * be able to determine if there is a public address
4963 		 * or not.
4964 		 *
4965 		 * In case of user channel usage, it is not important
4966 		 * if a public address or static random address is
4967 		 * available.
4968 		 */
4969 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4970 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
4971 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
4972 			ret = -EADDRNOTAVAIL;
4973 			goto done;
4974 		}
4975 	}
4976 
4977 	if (test_bit(HCI_UP, &hdev->flags)) {
4978 		ret = -EALREADY;
4979 		goto done;
4980 	}
4981 
4982 	if (hdev->open(hdev)) {
4983 		ret = -EIO;
4984 		goto done;
4985 	}
4986 
4987 	hci_devcd_reset(hdev);
4988 
4989 	set_bit(HCI_RUNNING, &hdev->flags);
4990 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4991 
4992 	ret = hci_dev_init_sync(hdev);
4993 	if (!ret) {
4994 		hci_dev_hold(hdev);
4995 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4996 		hci_adv_instances_set_rpa_expired(hdev, true);
4997 		set_bit(HCI_UP, &hdev->flags);
4998 		hci_sock_dev_event(hdev, HCI_DEV_UP);
4999 		hci_leds_update_powered(hdev, true);
5000 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
5001 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
5002 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
5003 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5004 		    hci_dev_test_flag(hdev, HCI_MGMT)) {
5005 			ret = hci_powered_update_sync(hdev);
5006 			mgmt_power_on(hdev, ret);
5007 		}
5008 	} else {
5009 		/* Init failed, cleanup */
5010 		flush_work(&hdev->tx_work);
5011 
5012 		/* Since hci_rx_work() is possible to awake new cmd_work
5013 		 * it should be flushed first to avoid unexpected call of
5014 		 * hci_cmd_work()
5015 		 */
5016 		flush_work(&hdev->rx_work);
5017 		flush_work(&hdev->cmd_work);
5018 
5019 		skb_queue_purge(&hdev->cmd_q);
5020 		skb_queue_purge(&hdev->rx_q);
5021 
5022 		if (hdev->flush)
5023 			hdev->flush(hdev);
5024 
5025 		if (hdev->sent_cmd) {
5026 			cancel_delayed_work_sync(&hdev->cmd_timer);
5027 			kfree_skb(hdev->sent_cmd);
5028 			hdev->sent_cmd = NULL;
5029 		}
5030 
5031 		if (hdev->req_skb) {
5032 			kfree_skb(hdev->req_skb);
5033 			hdev->req_skb = NULL;
5034 		}
5035 
5036 		clear_bit(HCI_RUNNING, &hdev->flags);
5037 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5038 
5039 		hdev->close(hdev);
5040 		hdev->flags &= BIT(HCI_RAW);
5041 	}
5042 
5043 done:
5044 	return ret;
5045 }
5046 
5047 /* This function requires the caller holds hdev->lock */
5048 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5049 {
5050 	struct hci_conn_params *p;
5051 
5052 	list_for_each_entry(p, &hdev->le_conn_params, list) {
5053 		hci_pend_le_list_del_init(p);
5054 		if (p->conn) {
5055 			hci_conn_drop(p->conn);
5056 			hci_conn_put(p->conn);
5057 			p->conn = NULL;
5058 		}
5059 	}
5060 
5061 	BT_DBG("All LE pending actions cleared");
5062 }
5063 
5064 static int hci_dev_shutdown(struct hci_dev *hdev)
5065 {
5066 	int err = 0;
5067 	/* Similar to how we first do setup and then set the exclusive access
5068 	 * bit for userspace, we must first unset userchannel and then clean up.
5069 	 * Otherwise, the kernel can't properly use the hci channel to clean up
5070 	 * the controller (some shutdown routines require sending additional
5071 	 * commands to the controller for example).
5072 	 */
5073 	bool was_userchannel =
5074 		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5075 
5076 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5077 	    test_bit(HCI_UP, &hdev->flags)) {
5078 		/* Execute vendor specific shutdown routine */
5079 		if (hdev->shutdown)
5080 			err = hdev->shutdown(hdev);
5081 	}
5082 
5083 	if (was_userchannel)
5084 		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5085 
5086 	return err;
5087 }
5088 
5089 int hci_dev_close_sync(struct hci_dev *hdev)
5090 {
5091 	bool auto_off;
5092 	int err = 0;
5093 
5094 	bt_dev_dbg(hdev, "");
5095 
5096 	cancel_delayed_work(&hdev->power_off);
5097 	cancel_delayed_work(&hdev->ncmd_timer);
5098 	cancel_delayed_work(&hdev->le_scan_disable);
5099 
5100 	hci_cmd_sync_cancel_sync(hdev, ENODEV);
5101 
5102 	cancel_interleave_scan(hdev);
5103 
5104 	if (hdev->adv_instance_timeout) {
5105 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5106 		hdev->adv_instance_timeout = 0;
5107 	}
5108 
5109 	err = hci_dev_shutdown(hdev);
5110 
5111 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5112 		cancel_delayed_work_sync(&hdev->cmd_timer);
5113 		return err;
5114 	}
5115 
5116 	hci_leds_update_powered(hdev, false);
5117 
5118 	/* Flush RX and TX works */
5119 	flush_work(&hdev->tx_work);
5120 	flush_work(&hdev->rx_work);
5121 
5122 	if (hdev->discov_timeout > 0) {
5123 		hdev->discov_timeout = 0;
5124 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5125 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5126 	}
5127 
5128 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5129 		cancel_delayed_work(&hdev->service_cache);
5130 
5131 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5132 		struct adv_info *adv_instance;
5133 
5134 		cancel_delayed_work_sync(&hdev->rpa_expired);
5135 
5136 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5137 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5138 	}
5139 
5140 	/* Avoid potential lockdep warnings from the *_flush() calls by
5141 	 * ensuring the workqueue is empty up front.
5142 	 */
5143 	drain_workqueue(hdev->workqueue);
5144 
5145 	hci_dev_lock(hdev);
5146 
5147 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5148 
5149 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5150 
5151 	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5152 	    hci_dev_test_flag(hdev, HCI_MGMT))
5153 		__mgmt_power_off(hdev);
5154 
5155 	hci_inquiry_cache_flush(hdev);
5156 	hci_pend_le_actions_clear(hdev);
5157 	hci_conn_hash_flush(hdev);
5158 	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5159 	smp_unregister(hdev);
5160 	hci_dev_unlock(hdev);
5161 
5162 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5163 
5164 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5165 		aosp_do_close(hdev);
5166 		msft_do_close(hdev);
5167 	}
5168 
5169 	if (hdev->flush)
5170 		hdev->flush(hdev);
5171 
5172 	/* Reset device */
5173 	skb_queue_purge(&hdev->cmd_q);
5174 	atomic_set(&hdev->cmd_cnt, 1);
5175 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5176 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5177 		set_bit(HCI_INIT, &hdev->flags);
5178 		hci_reset_sync(hdev);
5179 		clear_bit(HCI_INIT, &hdev->flags);
5180 	}
5181 
5182 	/* flush cmd  work */
5183 	flush_work(&hdev->cmd_work);
5184 
5185 	/* Drop queues */
5186 	skb_queue_purge(&hdev->rx_q);
5187 	skb_queue_purge(&hdev->cmd_q);
5188 	skb_queue_purge(&hdev->raw_q);
5189 
5190 	/* Drop last sent command */
5191 	if (hdev->sent_cmd) {
5192 		cancel_delayed_work_sync(&hdev->cmd_timer);
5193 		kfree_skb(hdev->sent_cmd);
5194 		hdev->sent_cmd = NULL;
5195 	}
5196 
5197 	/* Drop last request */
5198 	if (hdev->req_skb) {
5199 		kfree_skb(hdev->req_skb);
5200 		hdev->req_skb = NULL;
5201 	}
5202 
5203 	clear_bit(HCI_RUNNING, &hdev->flags);
5204 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5205 
5206 	/* After this point our queues are empty and no tasks are scheduled. */
5207 	hdev->close(hdev);
5208 
5209 	/* Clear flags */
5210 	hdev->flags &= BIT(HCI_RAW);
5211 	hci_dev_clear_volatile_flags(hdev);
5212 
5213 	memset(hdev->eir, 0, sizeof(hdev->eir));
5214 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5215 	bacpy(&hdev->random_addr, BDADDR_ANY);
5216 	hci_codec_list_clear(&hdev->local_codecs);
5217 
5218 	hci_dev_put(hdev);
5219 	return err;
5220 }
5221 
5222 /* This function perform power on HCI command sequence as follows:
5223  *
5224  * If controller is already up (HCI_UP) performs hci_powered_update_sync
5225  * sequence otherwise run hci_dev_open_sync which will follow with
5226  * hci_powered_update_sync after the init sequence is completed.
5227  */
5228 static int hci_power_on_sync(struct hci_dev *hdev)
5229 {
5230 	int err;
5231 
5232 	if (test_bit(HCI_UP, &hdev->flags) &&
5233 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5234 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5235 		cancel_delayed_work(&hdev->power_off);
5236 		return hci_powered_update_sync(hdev);
5237 	}
5238 
5239 	err = hci_dev_open_sync(hdev);
5240 	if (err < 0)
5241 		return err;
5242 
5243 	/* During the HCI setup phase, a few error conditions are
5244 	 * ignored and they need to be checked now. If they are still
5245 	 * valid, it is important to return the device back off.
5246 	 */
5247 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5248 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5249 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5250 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5251 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5252 		hci_dev_close_sync(hdev);
5253 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5254 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5255 				   HCI_AUTO_OFF_TIMEOUT);
5256 	}
5257 
5258 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5259 		/* For unconfigured devices, set the HCI_RAW flag
5260 		 * so that userspace can easily identify them.
5261 		 */
5262 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5263 			set_bit(HCI_RAW, &hdev->flags);
5264 
5265 		/* For fully configured devices, this will send
5266 		 * the Index Added event. For unconfigured devices,
5267 		 * it will send Unconfigued Index Added event.
5268 		 *
5269 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5270 		 * and no event will be send.
5271 		 */
5272 		mgmt_index_added(hdev);
5273 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5274 		/* When the controller is now configured, then it
5275 		 * is important to clear the HCI_RAW flag.
5276 		 */
5277 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5278 			clear_bit(HCI_RAW, &hdev->flags);
5279 
5280 		/* Powering on the controller with HCI_CONFIG set only
5281 		 * happens with the transition from unconfigured to
5282 		 * configured. This will send the Index Added event.
5283 		 */
5284 		mgmt_index_added(hdev);
5285 	}
5286 
5287 	return 0;
5288 }
5289 
5290 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5291 {
5292 	struct hci_cp_remote_name_req_cancel cp;
5293 
5294 	memset(&cp, 0, sizeof(cp));
5295 	bacpy(&cp.bdaddr, addr);
5296 
5297 	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5298 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5299 }
5300 
5301 int hci_stop_discovery_sync(struct hci_dev *hdev)
5302 {
5303 	struct discovery_state *d = &hdev->discovery;
5304 	struct inquiry_entry *e;
5305 	int err;
5306 
5307 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5308 
5309 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5310 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5311 			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5312 						    0, NULL, HCI_CMD_TIMEOUT);
5313 			if (err)
5314 				return err;
5315 		}
5316 
5317 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5318 			cancel_delayed_work(&hdev->le_scan_disable);
5319 
5320 			err = hci_scan_disable_sync(hdev);
5321 			if (err)
5322 				return err;
5323 		}
5324 
5325 	} else {
5326 		err = hci_scan_disable_sync(hdev);
5327 		if (err)
5328 			return err;
5329 	}
5330 
5331 	/* Resume advertising if it was paused */
5332 	if (use_ll_privacy(hdev))
5333 		hci_resume_advertising_sync(hdev);
5334 
5335 	/* No further actions needed for LE-only discovery */
5336 	if (d->type == DISCOV_TYPE_LE)
5337 		return 0;
5338 
5339 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5340 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5341 						     NAME_PENDING);
5342 		if (!e)
5343 			return 0;
5344 
5345 		return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5346 	}
5347 
5348 	return 0;
5349 }
5350 
5351 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5352 			       u8 reason)
5353 {
5354 	struct hci_cp_disconnect cp;
5355 
5356 	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5357 		/* This is a BIS connection, hci_conn_del will
5358 		 * do the necessary cleanup.
5359 		 */
5360 		hci_dev_lock(hdev);
5361 		hci_conn_failed(conn, reason);
5362 		hci_dev_unlock(hdev);
5363 
5364 		return 0;
5365 	}
5366 
5367 	memset(&cp, 0, sizeof(cp));
5368 	cp.handle = cpu_to_le16(conn->handle);
5369 	cp.reason = reason;
5370 
5371 	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5372 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5373 	 * used when suspending or powering off, where we don't want to wait
5374 	 * for the peer's response.
5375 	 */
5376 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5377 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5378 						sizeof(cp), &cp,
5379 						HCI_EV_DISCONN_COMPLETE,
5380 						HCI_CMD_TIMEOUT, NULL);
5381 
5382 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5383 				     HCI_CMD_TIMEOUT);
5384 }
5385 
5386 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5387 				      struct hci_conn *conn, u8 reason)
5388 {
5389 	/* Return reason if scanning since the connection shall probably be
5390 	 * cleanup directly.
5391 	 */
5392 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5393 		return reason;
5394 
5395 	if (conn->role == HCI_ROLE_SLAVE ||
5396 	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5397 		return 0;
5398 
5399 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5400 				     0, NULL, HCI_CMD_TIMEOUT);
5401 }
5402 
5403 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5404 				   u8 reason)
5405 {
5406 	if (conn->type == LE_LINK)
5407 		return hci_le_connect_cancel_sync(hdev, conn, reason);
5408 
5409 	if (conn->type == ISO_LINK) {
5410 		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5411 		 * page 1857:
5412 		 *
5413 		 * If this command is issued for a CIS on the Central and the
5414 		 * CIS is successfully terminated before being established,
5415 		 * then an HCI_LE_CIS_Established event shall also be sent for
5416 		 * this CIS with the Status Operation Cancelled by Host (0x44).
5417 		 */
5418 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5419 			return hci_disconnect_sync(hdev, conn, reason);
5420 
5421 		/* CIS with no Create CIS sent have nothing to cancel */
5422 		if (bacmp(&conn->dst, BDADDR_ANY))
5423 			return HCI_ERROR_LOCAL_HOST_TERM;
5424 
5425 		/* There is no way to cancel a BIS without terminating the BIG
5426 		 * which is done later on connection cleanup.
5427 		 */
5428 		return 0;
5429 	}
5430 
5431 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5432 		return 0;
5433 
5434 	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5435 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5436 	 * used when suspending or powering off, where we don't want to wait
5437 	 * for the peer's response.
5438 	 */
5439 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5440 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5441 						6, &conn->dst,
5442 						HCI_EV_CONN_COMPLETE,
5443 						HCI_CMD_TIMEOUT, NULL);
5444 
5445 	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5446 				     6, &conn->dst, HCI_CMD_TIMEOUT);
5447 }
5448 
5449 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5450 			       u8 reason)
5451 {
5452 	struct hci_cp_reject_sync_conn_req cp;
5453 
5454 	memset(&cp, 0, sizeof(cp));
5455 	bacpy(&cp.bdaddr, &conn->dst);
5456 	cp.reason = reason;
5457 
5458 	/* SCO rejection has its own limited set of
5459 	 * allowed error values (0x0D-0x0F).
5460 	 */
5461 	if (reason < 0x0d || reason > 0x0f)
5462 		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5463 
5464 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5465 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5466 }
5467 
5468 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5469 				  u8 reason)
5470 {
5471 	struct hci_cp_le_reject_cis cp;
5472 
5473 	memset(&cp, 0, sizeof(cp));
5474 	cp.handle = cpu_to_le16(conn->handle);
5475 	cp.reason = reason;
5476 
5477 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5478 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5479 }
5480 
5481 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5482 				u8 reason)
5483 {
5484 	struct hci_cp_reject_conn_req cp;
5485 
5486 	if (conn->type == ISO_LINK)
5487 		return hci_le_reject_cis_sync(hdev, conn, reason);
5488 
5489 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5490 		return hci_reject_sco_sync(hdev, conn, reason);
5491 
5492 	memset(&cp, 0, sizeof(cp));
5493 	bacpy(&cp.bdaddr, &conn->dst);
5494 	cp.reason = reason;
5495 
5496 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5497 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5498 }
5499 
5500 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5501 {
5502 	int err = 0;
5503 	u16 handle = conn->handle;
5504 	bool disconnect = false;
5505 	struct hci_conn *c;
5506 
5507 	switch (conn->state) {
5508 	case BT_CONNECTED:
5509 	case BT_CONFIG:
5510 		err = hci_disconnect_sync(hdev, conn, reason);
5511 		break;
5512 	case BT_CONNECT:
5513 		err = hci_connect_cancel_sync(hdev, conn, reason);
5514 		break;
5515 	case BT_CONNECT2:
5516 		err = hci_reject_conn_sync(hdev, conn, reason);
5517 		break;
5518 	case BT_OPEN:
5519 	case BT_BOUND:
5520 		break;
5521 	default:
5522 		disconnect = true;
5523 		break;
5524 	}
5525 
5526 	hci_dev_lock(hdev);
5527 
5528 	/* Check if the connection has been cleaned up concurrently */
5529 	c = hci_conn_hash_lookup_handle(hdev, handle);
5530 	if (!c || c != conn) {
5531 		err = 0;
5532 		goto unlock;
5533 	}
5534 
5535 	/* Cleanup hci_conn object if it cannot be cancelled as it
5536 	 * likelly means the controller and host stack are out of sync
5537 	 * or in case of LE it was still scanning so it can be cleanup
5538 	 * safely.
5539 	 */
5540 	if (disconnect) {
5541 		conn->state = BT_CLOSED;
5542 		hci_disconn_cfm(conn, reason);
5543 		hci_conn_del(conn);
5544 	} else {
5545 		hci_conn_failed(conn, reason);
5546 	}
5547 
5548 unlock:
5549 	hci_dev_unlock(hdev);
5550 	return err;
5551 }
5552 
5553 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5554 {
5555 	struct list_head *head = &hdev->conn_hash.list;
5556 	struct hci_conn *conn;
5557 
5558 	rcu_read_lock();
5559 	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5560 		/* Make sure the connection is not freed while unlocking */
5561 		conn = hci_conn_get(conn);
5562 		rcu_read_unlock();
5563 		/* Disregard possible errors since hci_conn_del shall have been
5564 		 * called even in case of errors had occurred since it would
5565 		 * then cause hci_conn_failed to be called which calls
5566 		 * hci_conn_del internally.
5567 		 */
5568 		hci_abort_conn_sync(hdev, conn, reason);
5569 		hci_conn_put(conn);
5570 		rcu_read_lock();
5571 	}
5572 	rcu_read_unlock();
5573 
5574 	return 0;
5575 }
5576 
5577 /* This function perform power off HCI command sequence as follows:
5578  *
5579  * Clear Advertising
5580  * Stop Discovery
5581  * Disconnect all connections
5582  * hci_dev_close_sync
5583  */
5584 static int hci_power_off_sync(struct hci_dev *hdev)
5585 {
5586 	int err;
5587 
5588 	/* If controller is already down there is nothing to do */
5589 	if (!test_bit(HCI_UP, &hdev->flags))
5590 		return 0;
5591 
5592 	hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5593 
5594 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5595 	    test_bit(HCI_PSCAN, &hdev->flags)) {
5596 		err = hci_write_scan_enable_sync(hdev, 0x00);
5597 		if (err)
5598 			goto out;
5599 	}
5600 
5601 	err = hci_clear_adv_sync(hdev, NULL, false);
5602 	if (err)
5603 		goto out;
5604 
5605 	err = hci_stop_discovery_sync(hdev);
5606 	if (err)
5607 		goto out;
5608 
5609 	/* Terminated due to Power Off */
5610 	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5611 	if (err)
5612 		goto out;
5613 
5614 	err = hci_dev_close_sync(hdev);
5615 
5616 out:
5617 	hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5618 	return err;
5619 }
5620 
5621 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5622 {
5623 	if (val)
5624 		return hci_power_on_sync(hdev);
5625 
5626 	return hci_power_off_sync(hdev);
5627 }
5628 
5629 static int hci_write_iac_sync(struct hci_dev *hdev)
5630 {
5631 	struct hci_cp_write_current_iac_lap cp;
5632 
5633 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5634 		return 0;
5635 
5636 	memset(&cp, 0, sizeof(cp));
5637 
5638 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5639 		/* Limited discoverable mode */
5640 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5641 		cp.iac_lap[0] = 0x00;	/* LIAC */
5642 		cp.iac_lap[1] = 0x8b;
5643 		cp.iac_lap[2] = 0x9e;
5644 		cp.iac_lap[3] = 0x33;	/* GIAC */
5645 		cp.iac_lap[4] = 0x8b;
5646 		cp.iac_lap[5] = 0x9e;
5647 	} else {
5648 		/* General discoverable mode */
5649 		cp.num_iac = 1;
5650 		cp.iac_lap[0] = 0x33;	/* GIAC */
5651 		cp.iac_lap[1] = 0x8b;
5652 		cp.iac_lap[2] = 0x9e;
5653 	}
5654 
5655 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5656 				     (cp.num_iac * 3) + 1, &cp,
5657 				     HCI_CMD_TIMEOUT);
5658 }
5659 
5660 int hci_update_discoverable_sync(struct hci_dev *hdev)
5661 {
5662 	int err = 0;
5663 
5664 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5665 		err = hci_write_iac_sync(hdev);
5666 		if (err)
5667 			return err;
5668 
5669 		err = hci_update_scan_sync(hdev);
5670 		if (err)
5671 			return err;
5672 
5673 		err = hci_update_class_sync(hdev);
5674 		if (err)
5675 			return err;
5676 	}
5677 
5678 	/* Advertising instances don't use the global discoverable setting, so
5679 	 * only update AD if advertising was enabled using Set Advertising.
5680 	 */
5681 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5682 		err = hci_update_adv_data_sync(hdev, 0x00);
5683 		if (err)
5684 			return err;
5685 
5686 		/* Discoverable mode affects the local advertising
5687 		 * address in limited privacy mode.
5688 		 */
5689 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5690 			if (ext_adv_capable(hdev))
5691 				err = hci_start_ext_adv_sync(hdev, 0x00);
5692 			else
5693 				err = hci_enable_advertising_sync(hdev);
5694 		}
5695 	}
5696 
5697 	return err;
5698 }
5699 
5700 static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5701 {
5702 	return hci_update_discoverable_sync(hdev);
5703 }
5704 
5705 int hci_update_discoverable(struct hci_dev *hdev)
5706 {
5707 	/* Only queue if it would have any effect */
5708 	if (hdev_is_powered(hdev) &&
5709 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5710 	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5711 	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5712 		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5713 					  NULL);
5714 
5715 	return 0;
5716 }
5717 
5718 int hci_update_connectable_sync(struct hci_dev *hdev)
5719 {
5720 	int err;
5721 
5722 	err = hci_update_scan_sync(hdev);
5723 	if (err)
5724 		return err;
5725 
5726 	/* If BR/EDR is not enabled and we disable advertising as a
5727 	 * by-product of disabling connectable, we need to update the
5728 	 * advertising flags.
5729 	 */
5730 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5731 		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5732 
5733 	/* Update the advertising parameters if necessary */
5734 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5735 	    !list_empty(&hdev->adv_instances)) {
5736 		if (ext_adv_capable(hdev))
5737 			err = hci_start_ext_adv_sync(hdev,
5738 						     hdev->cur_adv_instance);
5739 		else
5740 			err = hci_enable_advertising_sync(hdev);
5741 
5742 		if (err)
5743 			return err;
5744 	}
5745 
5746 	return hci_update_passive_scan_sync(hdev);
5747 }
5748 
5749 int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5750 {
5751 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5752 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5753 	struct hci_cp_inquiry cp;
5754 
5755 	bt_dev_dbg(hdev, "");
5756 
5757 	if (test_bit(HCI_INQUIRY, &hdev->flags))
5758 		return 0;
5759 
5760 	hci_dev_lock(hdev);
5761 	hci_inquiry_cache_flush(hdev);
5762 	hci_dev_unlock(hdev);
5763 
5764 	memset(&cp, 0, sizeof(cp));
5765 
5766 	if (hdev->discovery.limited)
5767 		memcpy(&cp.lap, liac, sizeof(cp.lap));
5768 	else
5769 		memcpy(&cp.lap, giac, sizeof(cp.lap));
5770 
5771 	cp.length = length;
5772 	cp.num_rsp = num_rsp;
5773 
5774 	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5775 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5776 }
5777 
5778 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5779 {
5780 	u8 own_addr_type;
5781 	/* Accept list is not used for discovery */
5782 	u8 filter_policy = 0x00;
5783 	/* Default is to enable duplicates filter */
5784 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5785 	int err;
5786 
5787 	bt_dev_dbg(hdev, "");
5788 
5789 	/* If controller is scanning, it means the passive scanning is
5790 	 * running. Thus, we should temporarily stop it in order to set the
5791 	 * discovery scanning parameters.
5792 	 */
5793 	err = hci_scan_disable_sync(hdev);
5794 	if (err) {
5795 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5796 		return err;
5797 	}
5798 
5799 	cancel_interleave_scan(hdev);
5800 
5801 	/* Pause address resolution for active scan and stop advertising if
5802 	 * privacy is enabled.
5803 	 */
5804 	err = hci_pause_addr_resolution(hdev);
5805 	if (err)
5806 		goto failed;
5807 
5808 	/* All active scans will be done with either a resolvable private
5809 	 * address (when privacy feature has been enabled) or non-resolvable
5810 	 * private address.
5811 	 */
5812 	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5813 					     &own_addr_type);
5814 	if (err < 0)
5815 		own_addr_type = ADDR_LE_DEV_PUBLIC;
5816 
5817 	if (hci_is_adv_monitoring(hdev) ||
5818 	    (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5819 	    hdev->discovery.result_filtering)) {
5820 		/* Duplicate filter should be disabled when some advertisement
5821 		 * monitor is activated, otherwise AdvMon can only receive one
5822 		 * advertisement for one peer(*) during active scanning, and
5823 		 * might report loss to these peers.
5824 		 *
5825 		 * If controller does strict duplicate filtering and the
5826 		 * discovery requires result filtering disables controller based
5827 		 * filtering since that can cause reports that would match the
5828 		 * host filter to not be reported.
5829 		 */
5830 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5831 	}
5832 
5833 	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5834 				  hdev->le_scan_window_discovery,
5835 				  own_addr_type, filter_policy, filter_dup);
5836 	if (!err)
5837 		return err;
5838 
5839 failed:
5840 	/* Resume advertising if it was paused */
5841 	if (use_ll_privacy(hdev))
5842 		hci_resume_advertising_sync(hdev);
5843 
5844 	/* Resume passive scanning */
5845 	hci_update_passive_scan_sync(hdev);
5846 	return err;
5847 }
5848 
5849 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5850 {
5851 	int err;
5852 
5853 	bt_dev_dbg(hdev, "");
5854 
5855 	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5856 	if (err)
5857 		return err;
5858 
5859 	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5860 }
5861 
5862 int hci_start_discovery_sync(struct hci_dev *hdev)
5863 {
5864 	unsigned long timeout;
5865 	int err;
5866 
5867 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5868 
5869 	switch (hdev->discovery.type) {
5870 	case DISCOV_TYPE_BREDR:
5871 		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5872 	case DISCOV_TYPE_INTERLEAVED:
5873 		/* When running simultaneous discovery, the LE scanning time
5874 		 * should occupy the whole discovery time sine BR/EDR inquiry
5875 		 * and LE scanning are scheduled by the controller.
5876 		 *
5877 		 * For interleaving discovery in comparison, BR/EDR inquiry
5878 		 * and LE scanning are done sequentially with separate
5879 		 * timeouts.
5880 		 */
5881 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5882 			     &hdev->quirks)) {
5883 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5884 			/* During simultaneous discovery, we double LE scan
5885 			 * interval. We must leave some time for the controller
5886 			 * to do BR/EDR inquiry.
5887 			 */
5888 			err = hci_start_interleaved_discovery_sync(hdev);
5889 			break;
5890 		}
5891 
5892 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5893 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5894 		break;
5895 	case DISCOV_TYPE_LE:
5896 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5897 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5898 		break;
5899 	default:
5900 		return -EINVAL;
5901 	}
5902 
5903 	if (err)
5904 		return err;
5905 
5906 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5907 
5908 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5909 			   timeout);
5910 	return 0;
5911 }
5912 
5913 static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5914 {
5915 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5916 	case HCI_ADV_MONITOR_EXT_MSFT:
5917 		msft_suspend_sync(hdev);
5918 		break;
5919 	default:
5920 		return;
5921 	}
5922 }
5923 
5924 /* This function disables discovery and mark it as paused */
5925 static int hci_pause_discovery_sync(struct hci_dev *hdev)
5926 {
5927 	int old_state = hdev->discovery.state;
5928 	int err;
5929 
5930 	/* If discovery already stopped/stopping/paused there nothing to do */
5931 	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
5932 	    hdev->discovery_paused)
5933 		return 0;
5934 
5935 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5936 	err = hci_stop_discovery_sync(hdev);
5937 	if (err)
5938 		return err;
5939 
5940 	hdev->discovery_paused = true;
5941 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5942 
5943 	return 0;
5944 }
5945 
5946 static int hci_update_event_filter_sync(struct hci_dev *hdev)
5947 {
5948 	struct bdaddr_list_with_flags *b;
5949 	u8 scan = SCAN_DISABLED;
5950 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
5951 	int err;
5952 
5953 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5954 		return 0;
5955 
5956 	/* Some fake CSR controllers lock up after setting this type of
5957 	 * filter, so avoid sending the request altogether.
5958 	 */
5959 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
5960 		return 0;
5961 
5962 	/* Always clear event filter when starting */
5963 	hci_clear_event_filter_sync(hdev);
5964 
5965 	list_for_each_entry(b, &hdev->accept_list, list) {
5966 		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
5967 			continue;
5968 
5969 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
5970 
5971 		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
5972 						 HCI_CONN_SETUP_ALLOW_BDADDR,
5973 						 &b->bdaddr,
5974 						 HCI_CONN_SETUP_AUTO_ON);
5975 		if (err)
5976 			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
5977 				   &b->bdaddr);
5978 		else
5979 			scan = SCAN_PAGE;
5980 	}
5981 
5982 	if (scan && !scanning)
5983 		hci_write_scan_enable_sync(hdev, scan);
5984 	else if (!scan && scanning)
5985 		hci_write_scan_enable_sync(hdev, scan);
5986 
5987 	return 0;
5988 }
5989 
5990 /* This function disables scan (BR and LE) and mark it as paused */
5991 static int hci_pause_scan_sync(struct hci_dev *hdev)
5992 {
5993 	if (hdev->scanning_paused)
5994 		return 0;
5995 
5996 	/* Disable page scan if enabled */
5997 	if (test_bit(HCI_PSCAN, &hdev->flags))
5998 		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
5999 
6000 	hci_scan_disable_sync(hdev);
6001 
6002 	hdev->scanning_paused = true;
6003 
6004 	return 0;
6005 }
6006 
6007 /* This function performs the HCI suspend procedures in the follow order:
6008  *
6009  * Pause discovery (active scanning/inquiry)
6010  * Pause Directed Advertising/Advertising
6011  * Pause Scanning (passive scanning in case discovery was not active)
6012  * Disconnect all connections
6013  * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6014  * otherwise:
6015  * Update event mask (only set events that are allowed to wake up the host)
6016  * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6017  * Update passive scanning (lower duty cycle)
6018  * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6019  */
6020 int hci_suspend_sync(struct hci_dev *hdev)
6021 {
6022 	int err;
6023 
6024 	/* If marked as suspended there nothing to do */
6025 	if (hdev->suspended)
6026 		return 0;
6027 
6028 	/* Mark device as suspended */
6029 	hdev->suspended = true;
6030 
6031 	/* Pause discovery if not already stopped */
6032 	hci_pause_discovery_sync(hdev);
6033 
6034 	/* Pause other advertisements */
6035 	hci_pause_advertising_sync(hdev);
6036 
6037 	/* Suspend monitor filters */
6038 	hci_suspend_monitor_sync(hdev);
6039 
6040 	/* Prevent disconnects from causing scanning to be re-enabled */
6041 	hci_pause_scan_sync(hdev);
6042 
6043 	if (hci_conn_count(hdev)) {
6044 		/* Soft disconnect everything (power off) */
6045 		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6046 		if (err) {
6047 			/* Set state to BT_RUNNING so resume doesn't notify */
6048 			hdev->suspend_state = BT_RUNNING;
6049 			hci_resume_sync(hdev);
6050 			return err;
6051 		}
6052 
6053 		/* Update event mask so only the allowed event can wakeup the
6054 		 * host.
6055 		 */
6056 		hci_set_event_mask_sync(hdev);
6057 	}
6058 
6059 	/* Only configure accept list if disconnect succeeded and wake
6060 	 * isn't being prevented.
6061 	 */
6062 	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6063 		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6064 		return 0;
6065 	}
6066 
6067 	/* Unpause to take care of updating scanning params */
6068 	hdev->scanning_paused = false;
6069 
6070 	/* Enable event filter for paired devices */
6071 	hci_update_event_filter_sync(hdev);
6072 
6073 	/* Update LE passive scan if enabled */
6074 	hci_update_passive_scan_sync(hdev);
6075 
6076 	/* Pause scan changes again. */
6077 	hdev->scanning_paused = true;
6078 
6079 	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6080 
6081 	return 0;
6082 }
6083 
6084 /* This function resumes discovery */
6085 static int hci_resume_discovery_sync(struct hci_dev *hdev)
6086 {
6087 	int err;
6088 
6089 	/* If discovery not paused there nothing to do */
6090 	if (!hdev->discovery_paused)
6091 		return 0;
6092 
6093 	hdev->discovery_paused = false;
6094 
6095 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6096 
6097 	err = hci_start_discovery_sync(hdev);
6098 
6099 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6100 				DISCOVERY_FINDING);
6101 
6102 	return err;
6103 }
6104 
6105 static void hci_resume_monitor_sync(struct hci_dev *hdev)
6106 {
6107 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6108 	case HCI_ADV_MONITOR_EXT_MSFT:
6109 		msft_resume_sync(hdev);
6110 		break;
6111 	default:
6112 		return;
6113 	}
6114 }
6115 
6116 /* This function resume scan and reset paused flag */
6117 static int hci_resume_scan_sync(struct hci_dev *hdev)
6118 {
6119 	if (!hdev->scanning_paused)
6120 		return 0;
6121 
6122 	hdev->scanning_paused = false;
6123 
6124 	hci_update_scan_sync(hdev);
6125 
6126 	/* Reset passive scanning to normal */
6127 	hci_update_passive_scan_sync(hdev);
6128 
6129 	return 0;
6130 }
6131 
6132 /* This function performs the HCI suspend procedures in the follow order:
6133  *
6134  * Restore event mask
6135  * Clear event filter
6136  * Update passive scanning (normal duty cycle)
6137  * Resume Directed Advertising/Advertising
6138  * Resume discovery (active scanning/inquiry)
6139  */
6140 int hci_resume_sync(struct hci_dev *hdev)
6141 {
6142 	/* If not marked as suspended there nothing to do */
6143 	if (!hdev->suspended)
6144 		return 0;
6145 
6146 	hdev->suspended = false;
6147 
6148 	/* Restore event mask */
6149 	hci_set_event_mask_sync(hdev);
6150 
6151 	/* Clear any event filters and restore scan state */
6152 	hci_clear_event_filter_sync(hdev);
6153 
6154 	/* Resume scanning */
6155 	hci_resume_scan_sync(hdev);
6156 
6157 	/* Resume monitor filters */
6158 	hci_resume_monitor_sync(hdev);
6159 
6160 	/* Resume other advertisements */
6161 	hci_resume_advertising_sync(hdev);
6162 
6163 	/* Resume discovery */
6164 	hci_resume_discovery_sync(hdev);
6165 
6166 	return 0;
6167 }
6168 
6169 static bool conn_use_rpa(struct hci_conn *conn)
6170 {
6171 	struct hci_dev *hdev = conn->hdev;
6172 
6173 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6174 }
6175 
6176 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6177 						struct hci_conn *conn)
6178 {
6179 	struct hci_cp_le_set_ext_adv_params cp;
6180 	int err;
6181 	bdaddr_t random_addr;
6182 	u8 own_addr_type;
6183 
6184 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6185 					     &own_addr_type);
6186 	if (err)
6187 		return err;
6188 
6189 	/* Set require_privacy to false so that the remote device has a
6190 	 * chance of identifying us.
6191 	 */
6192 	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6193 				     &own_addr_type, &random_addr);
6194 	if (err)
6195 		return err;
6196 
6197 	memset(&cp, 0, sizeof(cp));
6198 
6199 	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6200 	cp.channel_map = hdev->le_adv_channel_map;
6201 	cp.tx_power = HCI_TX_POWER_INVALID;
6202 	cp.primary_phy = HCI_ADV_PHY_1M;
6203 	cp.secondary_phy = HCI_ADV_PHY_1M;
6204 	cp.handle = 0x00; /* Use instance 0 for directed adv */
6205 	cp.own_addr_type = own_addr_type;
6206 	cp.peer_addr_type = conn->dst_type;
6207 	bacpy(&cp.peer_addr, &conn->dst);
6208 
6209 	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6210 	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6211 	 * does not supports advertising data when the advertising set already
6212 	 * contains some, the controller shall return erroc code 'Invalid
6213 	 * HCI Command Parameters(0x12).
6214 	 * So it is required to remove adv set for handle 0x00. since we use
6215 	 * instance 0 for directed adv.
6216 	 */
6217 	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6218 	if (err)
6219 		return err;
6220 
6221 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6222 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6223 	if (err)
6224 		return err;
6225 
6226 	/* Check if random address need to be updated */
6227 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6228 	    bacmp(&random_addr, BDADDR_ANY) &&
6229 	    bacmp(&random_addr, &hdev->random_addr)) {
6230 		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6231 						       &random_addr);
6232 		if (err)
6233 			return err;
6234 	}
6235 
6236 	return hci_enable_ext_advertising_sync(hdev, 0x00);
6237 }
6238 
6239 static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6240 					    struct hci_conn *conn)
6241 {
6242 	struct hci_cp_le_set_adv_param cp;
6243 	u8 status;
6244 	u8 own_addr_type;
6245 	u8 enable;
6246 
6247 	if (ext_adv_capable(hdev))
6248 		return hci_le_ext_directed_advertising_sync(hdev, conn);
6249 
6250 	/* Clear the HCI_LE_ADV bit temporarily so that the
6251 	 * hci_update_random_address knows that it's safe to go ahead
6252 	 * and write a new random address. The flag will be set back on
6253 	 * as soon as the SET_ADV_ENABLE HCI command completes.
6254 	 */
6255 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6256 
6257 	/* Set require_privacy to false so that the remote device has a
6258 	 * chance of identifying us.
6259 	 */
6260 	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6261 						&own_addr_type);
6262 	if (status)
6263 		return status;
6264 
6265 	memset(&cp, 0, sizeof(cp));
6266 
6267 	/* Some controllers might reject command if intervals are not
6268 	 * within range for undirected advertising.
6269 	 * BCM20702A0 is known to be affected by this.
6270 	 */
6271 	cp.min_interval = cpu_to_le16(0x0020);
6272 	cp.max_interval = cpu_to_le16(0x0020);
6273 
6274 	cp.type = LE_ADV_DIRECT_IND;
6275 	cp.own_address_type = own_addr_type;
6276 	cp.direct_addr_type = conn->dst_type;
6277 	bacpy(&cp.direct_addr, &conn->dst);
6278 	cp.channel_map = hdev->le_adv_channel_map;
6279 
6280 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6281 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6282 	if (status)
6283 		return status;
6284 
6285 	enable = 0x01;
6286 
6287 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6288 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6289 }
6290 
6291 static void set_ext_conn_params(struct hci_conn *conn,
6292 				struct hci_cp_le_ext_conn_param *p)
6293 {
6294 	struct hci_dev *hdev = conn->hdev;
6295 
6296 	memset(p, 0, sizeof(*p));
6297 
6298 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6299 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6300 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6301 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6302 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6303 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6304 	p->min_ce_len = cpu_to_le16(0x0000);
6305 	p->max_ce_len = cpu_to_le16(0x0000);
6306 }
6307 
6308 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6309 				       struct hci_conn *conn, u8 own_addr_type)
6310 {
6311 	struct hci_cp_le_ext_create_conn *cp;
6312 	struct hci_cp_le_ext_conn_param *p;
6313 	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6314 	u32 plen;
6315 
6316 	cp = (void *)data;
6317 	p = (void *)cp->data;
6318 
6319 	memset(cp, 0, sizeof(*cp));
6320 
6321 	bacpy(&cp->peer_addr, &conn->dst);
6322 	cp->peer_addr_type = conn->dst_type;
6323 	cp->own_addr_type = own_addr_type;
6324 
6325 	plen = sizeof(*cp);
6326 
6327 	if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6328 			      conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6329 		cp->phys |= LE_SCAN_PHY_1M;
6330 		set_ext_conn_params(conn, p);
6331 
6332 		p++;
6333 		plen += sizeof(*p);
6334 	}
6335 
6336 	if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6337 			      conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6338 		cp->phys |= LE_SCAN_PHY_2M;
6339 		set_ext_conn_params(conn, p);
6340 
6341 		p++;
6342 		plen += sizeof(*p);
6343 	}
6344 
6345 	if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6346 				 conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6347 		cp->phys |= LE_SCAN_PHY_CODED;
6348 		set_ext_conn_params(conn, p);
6349 
6350 		plen += sizeof(*p);
6351 	}
6352 
6353 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6354 					plen, data,
6355 					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6356 					conn->conn_timeout, NULL);
6357 }
6358 
6359 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6360 {
6361 	struct hci_cp_le_create_conn cp;
6362 	struct hci_conn_params *params;
6363 	u8 own_addr_type;
6364 	int err;
6365 	struct hci_conn *conn = data;
6366 
6367 	if (!hci_conn_valid(hdev, conn))
6368 		return -ECANCELED;
6369 
6370 	bt_dev_dbg(hdev, "conn %p", conn);
6371 
6372 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6373 	conn->state = BT_CONNECT;
6374 
6375 	/* If requested to connect as peripheral use directed advertising */
6376 	if (conn->role == HCI_ROLE_SLAVE) {
6377 		/* If we're active scanning and simultaneous roles is not
6378 		 * enabled simply reject the attempt.
6379 		 */
6380 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6381 		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6382 		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6383 			hci_conn_del(conn);
6384 			return -EBUSY;
6385 		}
6386 
6387 		/* Pause advertising while doing directed advertising. */
6388 		hci_pause_advertising_sync(hdev);
6389 
6390 		err = hci_le_directed_advertising_sync(hdev, conn);
6391 		goto done;
6392 	}
6393 
6394 	/* Disable advertising if simultaneous roles is not in use. */
6395 	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6396 		hci_pause_advertising_sync(hdev);
6397 
6398 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6399 	if (params) {
6400 		conn->le_conn_min_interval = params->conn_min_interval;
6401 		conn->le_conn_max_interval = params->conn_max_interval;
6402 		conn->le_conn_latency = params->conn_latency;
6403 		conn->le_supv_timeout = params->supervision_timeout;
6404 	} else {
6405 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6406 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6407 		conn->le_conn_latency = hdev->le_conn_latency;
6408 		conn->le_supv_timeout = hdev->le_supv_timeout;
6409 	}
6410 
6411 	/* If controller is scanning, we stop it since some controllers are
6412 	 * not able to scan and connect at the same time. Also set the
6413 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6414 	 * handler for scan disabling knows to set the correct discovery
6415 	 * state.
6416 	 */
6417 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6418 		hci_scan_disable_sync(hdev);
6419 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6420 	}
6421 
6422 	/* Update random address, but set require_privacy to false so
6423 	 * that we never connect with an non-resolvable address.
6424 	 */
6425 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6426 					     &own_addr_type);
6427 	if (err)
6428 		goto done;
6429 
6430 	if (use_ext_conn(hdev)) {
6431 		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6432 		goto done;
6433 	}
6434 
6435 	memset(&cp, 0, sizeof(cp));
6436 
6437 	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6438 	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6439 
6440 	bacpy(&cp.peer_addr, &conn->dst);
6441 	cp.peer_addr_type = conn->dst_type;
6442 	cp.own_address_type = own_addr_type;
6443 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6444 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6445 	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6446 	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6447 	cp.min_ce_len = cpu_to_le16(0x0000);
6448 	cp.max_ce_len = cpu_to_le16(0x0000);
6449 
6450 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6451 	 *
6452 	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6453 	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6454 	 * sent when a new connection has been created.
6455 	 */
6456 	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6457 				       sizeof(cp), &cp,
6458 				       use_enhanced_conn_complete(hdev) ?
6459 				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6460 				       HCI_EV_LE_CONN_COMPLETE,
6461 				       conn->conn_timeout, NULL);
6462 
6463 done:
6464 	if (err == -ETIMEDOUT)
6465 		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6466 
6467 	/* Re-enable advertising after the connection attempt is finished. */
6468 	hci_resume_advertising_sync(hdev);
6469 	return err;
6470 }
6471 
6472 int hci_le_create_cis_sync(struct hci_dev *hdev)
6473 {
6474 	DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6475 	size_t aux_num_cis = 0;
6476 	struct hci_conn *conn;
6477 	u8 cig = BT_ISO_QOS_CIG_UNSET;
6478 
6479 	/* The spec allows only one pending LE Create CIS command at a time. If
6480 	 * the command is pending now, don't do anything. We check for pending
6481 	 * connections after each CIS Established event.
6482 	 *
6483 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6484 	 * page 2566:
6485 	 *
6486 	 * If the Host issues this command before all the
6487 	 * HCI_LE_CIS_Established events from the previous use of the
6488 	 * command have been generated, the Controller shall return the
6489 	 * error code Command Disallowed (0x0C).
6490 	 *
6491 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6492 	 * page 2567:
6493 	 *
6494 	 * When the Controller receives the HCI_LE_Create_CIS command, the
6495 	 * Controller sends the HCI_Command_Status event to the Host. An
6496 	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6497 	 * is established or if it is disconnected or considered lost before
6498 	 * being established; until all the events are generated, the command
6499 	 * remains pending.
6500 	 */
6501 
6502 	hci_dev_lock(hdev);
6503 
6504 	rcu_read_lock();
6505 
6506 	/* Wait until previous Create CIS has completed */
6507 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6508 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6509 			goto done;
6510 	}
6511 
6512 	/* Find CIG with all CIS ready */
6513 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6514 		struct hci_conn *link;
6515 
6516 		if (hci_conn_check_create_cis(conn))
6517 			continue;
6518 
6519 		cig = conn->iso_qos.ucast.cig;
6520 
6521 		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6522 			if (hci_conn_check_create_cis(link) > 0 &&
6523 			    link->iso_qos.ucast.cig == cig &&
6524 			    link->state != BT_CONNECTED) {
6525 				cig = BT_ISO_QOS_CIG_UNSET;
6526 				break;
6527 			}
6528 		}
6529 
6530 		if (cig != BT_ISO_QOS_CIG_UNSET)
6531 			break;
6532 	}
6533 
6534 	if (cig == BT_ISO_QOS_CIG_UNSET)
6535 		goto done;
6536 
6537 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6538 		struct hci_cis *cis = &cmd->cis[aux_num_cis];
6539 
6540 		if (hci_conn_check_create_cis(conn) ||
6541 		    conn->iso_qos.ucast.cig != cig)
6542 			continue;
6543 
6544 		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6545 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6546 		cis->cis_handle = cpu_to_le16(conn->handle);
6547 		aux_num_cis++;
6548 
6549 		if (aux_num_cis >= cmd->num_cis)
6550 			break;
6551 	}
6552 	cmd->num_cis = aux_num_cis;
6553 
6554 done:
6555 	rcu_read_unlock();
6556 
6557 	hci_dev_unlock(hdev);
6558 
6559 	if (!aux_num_cis)
6560 		return 0;
6561 
6562 	/* Wait for HCI_LE_CIS_Established */
6563 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6564 					struct_size(cmd, cis, cmd->num_cis),
6565 					cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6566 					conn->conn_timeout, NULL);
6567 }
6568 
6569 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6570 {
6571 	struct hci_cp_le_remove_cig cp;
6572 
6573 	memset(&cp, 0, sizeof(cp));
6574 	cp.cig_id = handle;
6575 
6576 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6577 				     &cp, HCI_CMD_TIMEOUT);
6578 }
6579 
6580 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6581 {
6582 	struct hci_cp_le_big_term_sync cp;
6583 
6584 	memset(&cp, 0, sizeof(cp));
6585 	cp.handle = handle;
6586 
6587 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6588 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6589 }
6590 
6591 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6592 {
6593 	struct hci_cp_le_pa_term_sync cp;
6594 
6595 	memset(&cp, 0, sizeof(cp));
6596 	cp.handle = cpu_to_le16(handle);
6597 
6598 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6599 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6600 }
6601 
6602 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6603 			   bool use_rpa, struct adv_info *adv_instance,
6604 			   u8 *own_addr_type, bdaddr_t *rand_addr)
6605 {
6606 	int err;
6607 
6608 	bacpy(rand_addr, BDADDR_ANY);
6609 
6610 	/* If privacy is enabled use a resolvable private address. If
6611 	 * current RPA has expired then generate a new one.
6612 	 */
6613 	if (use_rpa) {
6614 		/* If Controller supports LL Privacy use own address type is
6615 		 * 0x03
6616 		 */
6617 		if (use_ll_privacy(hdev))
6618 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6619 		else
6620 			*own_addr_type = ADDR_LE_DEV_RANDOM;
6621 
6622 		if (adv_instance) {
6623 			if (adv_rpa_valid(adv_instance))
6624 				return 0;
6625 		} else {
6626 			if (rpa_valid(hdev))
6627 				return 0;
6628 		}
6629 
6630 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6631 		if (err < 0) {
6632 			bt_dev_err(hdev, "failed to generate new RPA");
6633 			return err;
6634 		}
6635 
6636 		bacpy(rand_addr, &hdev->rpa);
6637 
6638 		return 0;
6639 	}
6640 
6641 	/* In case of required privacy without resolvable private address,
6642 	 * use an non-resolvable private address. This is useful for
6643 	 * non-connectable advertising.
6644 	 */
6645 	if (require_privacy) {
6646 		bdaddr_t nrpa;
6647 
6648 		while (true) {
6649 			/* The non-resolvable private address is generated
6650 			 * from random six bytes with the two most significant
6651 			 * bits cleared.
6652 			 */
6653 			get_random_bytes(&nrpa, 6);
6654 			nrpa.b[5] &= 0x3f;
6655 
6656 			/* The non-resolvable private address shall not be
6657 			 * equal to the public address.
6658 			 */
6659 			if (bacmp(&hdev->bdaddr, &nrpa))
6660 				break;
6661 		}
6662 
6663 		*own_addr_type = ADDR_LE_DEV_RANDOM;
6664 		bacpy(rand_addr, &nrpa);
6665 
6666 		return 0;
6667 	}
6668 
6669 	/* No privacy so use a public address. */
6670 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6671 
6672 	return 0;
6673 }
6674 
6675 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6676 {
6677 	u8 instance = PTR_UINT(data);
6678 
6679 	return hci_update_adv_data_sync(hdev, instance);
6680 }
6681 
6682 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6683 {
6684 	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6685 				  UINT_PTR(instance), NULL);
6686 }
6687 
6688 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6689 {
6690 	struct hci_conn *conn = data;
6691 	struct inquiry_entry *ie;
6692 	struct hci_cp_create_conn cp;
6693 	int err;
6694 
6695 	if (!hci_conn_valid(hdev, conn))
6696 		return -ECANCELED;
6697 
6698 	/* Many controllers disallow HCI Create Connection while it is doing
6699 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6700 	 * Connection. This may cause the MGMT discovering state to become false
6701 	 * without user space's request but it is okay since the MGMT Discovery
6702 	 * APIs do not promise that discovery should be done forever. Instead,
6703 	 * the user space monitors the status of MGMT discovering and it may
6704 	 * request for discovery again when this flag becomes false.
6705 	 */
6706 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6707 		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6708 					    NULL, HCI_CMD_TIMEOUT);
6709 		if (err)
6710 			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6711 	}
6712 
6713 	conn->state = BT_CONNECT;
6714 	conn->out = true;
6715 	conn->role = HCI_ROLE_MASTER;
6716 
6717 	conn->attempt++;
6718 
6719 	conn->link_policy = hdev->link_policy;
6720 
6721 	memset(&cp, 0, sizeof(cp));
6722 	bacpy(&cp.bdaddr, &conn->dst);
6723 	cp.pscan_rep_mode = 0x02;
6724 
6725 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6726 	if (ie) {
6727 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6728 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6729 			cp.pscan_mode     = ie->data.pscan_mode;
6730 			cp.clock_offset   = ie->data.clock_offset |
6731 					    cpu_to_le16(0x8000);
6732 		}
6733 
6734 		memcpy(conn->dev_class, ie->data.dev_class, 3);
6735 	}
6736 
6737 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6738 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6739 		cp.role_switch = 0x01;
6740 	else
6741 		cp.role_switch = 0x00;
6742 
6743 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6744 					sizeof(cp), &cp,
6745 					HCI_EV_CONN_COMPLETE,
6746 					conn->conn_timeout, NULL);
6747 }
6748 
6749 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6750 {
6751 	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6752 				       NULL);
6753 }
6754 
6755 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6756 {
6757 	struct hci_conn *conn = data;
6758 
6759 	bt_dev_dbg(hdev, "err %d", err);
6760 
6761 	if (err == -ECANCELED)
6762 		return;
6763 
6764 	hci_dev_lock(hdev);
6765 
6766 	if (!hci_conn_valid(hdev, conn))
6767 		goto done;
6768 
6769 	if (!err) {
6770 		hci_connect_le_scan_cleanup(conn, 0x00);
6771 		goto done;
6772 	}
6773 
6774 	/* Check if connection is still pending */
6775 	if (conn != hci_lookup_le_connect(hdev))
6776 		goto done;
6777 
6778 	/* Flush to make sure we send create conn cancel command if needed */
6779 	flush_delayed_work(&conn->le_conn_timeout);
6780 	hci_conn_failed(conn, bt_status(err));
6781 
6782 done:
6783 	hci_dev_unlock(hdev);
6784 }
6785 
6786 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6787 {
6788 	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6789 				       create_le_conn_complete);
6790 }
6791 
6792 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6793 {
6794 	if (conn->state != BT_OPEN)
6795 		return -EINVAL;
6796 
6797 	switch (conn->type) {
6798 	case ACL_LINK:
6799 		return !hci_cmd_sync_dequeue_once(hdev,
6800 						  hci_acl_create_conn_sync,
6801 						  conn, NULL);
6802 	case LE_LINK:
6803 		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6804 						  conn, create_le_conn_complete);
6805 	}
6806 
6807 	return -ENOENT;
6808 }
6809 
6810 int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6811 			    struct hci_conn_params *params)
6812 {
6813 	struct hci_cp_le_conn_update cp;
6814 
6815 	memset(&cp, 0, sizeof(cp));
6816 	cp.handle		= cpu_to_le16(conn->handle);
6817 	cp.conn_interval_min	= cpu_to_le16(params->conn_min_interval);
6818 	cp.conn_interval_max	= cpu_to_le16(params->conn_max_interval);
6819 	cp.conn_latency		= cpu_to_le16(params->conn_latency);
6820 	cp.supervision_timeout	= cpu_to_le16(params->supervision_timeout);
6821 	cp.min_ce_len		= cpu_to_le16(0x0000);
6822 	cp.max_ce_len		= cpu_to_le16(0x0000);
6823 
6824 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6825 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6826 }
6827