Lines Matching +full:cpb +full:- +full:codec
1 // SPDX-License-Identifier: GPL-2.0
3 * BlueZ - Bluetooth protocol stack for Linux
28 if (hdev->req_status != HCI_REQ_PEND) in hci_cmd_sync_complete()
31 hdev->req_result = result; in hci_cmd_sync_complete()
32 hdev->req_status = HCI_REQ_DONE; in hci_cmd_sync_complete()
35 kfree_skb(hdev->req_skb); in hci_cmd_sync_complete()
36 hdev->req_skb = NULL; in hci_cmd_sync_complete()
45 hdev->req_rsp = skb_get(skb); in hci_cmd_sync_complete()
48 wake_up_interruptible(&hdev->req_wait_q); in hci_cmd_sync_complete()
63 hdr->opcode = cpu_to_le16(opcode); in hci_cmd_sync_alloc()
64 hdr->plen = plen; in hci_cmd_sync_alloc()
69 bt_dev_dbg(hdev, "skb len %d", skb->len); in hci_cmd_sync_alloc()
88 struct hci_dev *hdev = req->hdev; in hci_cmd_sync_add()
96 if (req->err) in hci_cmd_sync_add()
103 req->err = -ENOMEM; in hci_cmd_sync_add()
107 if (skb_queue_empty(&req->cmd_q)) in hci_cmd_sync_add()
108 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; in hci_cmd_sync_add()
112 skb_queue_tail(&req->cmd_q, skb); in hci_cmd_sync_add()
117 struct hci_dev *hdev = req->hdev; in hci_req_sync_run()
121 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); in hci_req_sync_run()
126 if (req->err) { in hci_req_sync_run()
127 skb_queue_purge(&req->cmd_q); in hci_req_sync_run()
128 return req->err; in hci_req_sync_run()
132 if (skb_queue_empty(&req->cmd_q)) in hci_req_sync_run()
133 return -ENODATA; in hci_req_sync_run()
135 skb = skb_peek_tail(&req->cmd_q); in hci_req_sync_run()
136 bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; in hci_req_sync_run()
137 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; in hci_req_sync_run()
139 spin_lock_irqsave(&hdev->cmd_q.lock, flags); in hci_req_sync_run()
140 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); in hci_req_sync_run()
141 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); in hci_req_sync_run()
143 queue_work(hdev->workqueue, &hdev->cmd_work); in hci_req_sync_run()
150 skb_queue_head_init(&req->cmd_q); in hci_request_init()
151 req->hdev = hdev; in hci_request_init()
152 req->err = 0; in hci_request_init()
155 /* This function requires the caller holds hdev->req_lock. */
170 hdev->req_status = HCI_REQ_PEND; in __hci_cmd_sync_sk()
176 err = wait_event_interruptible_timeout(hdev->req_wait_q, in __hci_cmd_sync_sk()
177 hdev->req_status != HCI_REQ_PEND, in __hci_cmd_sync_sk()
180 if (err == -ERESTARTSYS) in __hci_cmd_sync_sk()
181 return ERR_PTR(-EINTR); in __hci_cmd_sync_sk()
183 switch (hdev->req_status) { in __hci_cmd_sync_sk()
185 err = -bt_to_errno(hdev->req_result); in __hci_cmd_sync_sk()
189 err = -hdev->req_result; in __hci_cmd_sync_sk()
193 err = -ETIMEDOUT; in __hci_cmd_sync_sk()
197 hdev->req_status = 0; in __hci_cmd_sync_sk()
198 hdev->req_result = 0; in __hci_cmd_sync_sk()
199 skb = hdev->req_rsp; in __hci_cmd_sync_sk()
200 hdev->req_rsp = NULL; in __hci_cmd_sync_sk()
213 return ERR_PTR(-ENODATA); in __hci_cmd_sync_sk()
219 /* This function requires the caller holds hdev->req_lock. */
233 if (!test_bit(HCI_UP, &hdev->flags)) in hci_cmd_sync()
234 return ERR_PTR(-ENETDOWN); in hci_cmd_sync()
246 /* This function requires the caller holds hdev->req_lock. */
255 /* This function requires the caller holds hdev->req_lock. */
265 /* If command return a status event, skb will be set to -ENODATA */ in __hci_cmd_sync_status_sk()
266 if (skb == ERR_PTR(-ENODATA)) in __hci_cmd_sync_status_sk()
276 status = skb->data[0]; in __hci_cmd_sync_status_sk()
315 mutex_lock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_work()
316 entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, in hci_cmd_sync_work()
320 list_del(&entry->list); in hci_cmd_sync_work()
321 mutex_unlock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_work()
328 if (entry->func) { in hci_cmd_sync_work()
332 err = entry->func(hdev, entry->data); in hci_cmd_sync_work()
333 if (entry->destroy) in hci_cmd_sync_work()
334 entry->destroy(hdev, entry->data, err); in hci_cmd_sync_work()
346 cancel_delayed_work_sync(&hdev->cmd_timer); in hci_cmd_sync_cancel_work()
347 cancel_delayed_work_sync(&hdev->ncmd_timer); in hci_cmd_sync_cancel_work()
348 atomic_set(&hdev->cmd_cnt, 1); in hci_cmd_sync_cancel_work()
350 wake_up_interruptible(&hdev->req_wait_q); in hci_cmd_sync_cancel_work()
390 if (hdev->discovery.type == DISCOV_TYPE_LE) in le_scan_disable()
393 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) in le_scan_disable()
396 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { in le_scan_disable()
397 if (!test_bit(HCI_INQUIRY, &hdev->flags) && in le_scan_disable()
398 hdev->discovery.state != DISCOVERY_RESOLVING) in le_scan_disable()
427 list_empty(&hdev->adv_instances)) in reenable_adv_sync()
430 if (hdev->cur_adv_instance) { in reenable_adv_sync()
432 hdev->cur_adv_instance, in reenable_adv_sync()
466 if (hdev->adv_instance_timeout) { in cancel_adv_timeout()
467 hdev->adv_instance_timeout = 0; in cancel_adv_timeout()
468 cancel_delayed_work(&hdev->adv_instance_expire); in cancel_adv_timeout()
473 * - force == true: The instance will be removed even when its remaining
475 * - force == false: the instance will be deactivated but kept stored unless
479 * - force == true: All instances will be removed regardless of their timeout
481 * - force == false: Only instances that have a timeout will be removed.
491 if (!instance || hdev->cur_adv_instance == instance) in hci_clear_adv_instance_sync()
498 if (instance && hdev->cur_adv_instance == instance) in hci_clear_adv_instance_sync()
502 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, in hci_clear_adv_instance_sync()
504 if (!(force || adv_instance->timeout)) in hci_clear_adv_instance_sync()
507 rem_inst = adv_instance->instance; in hci_clear_adv_instance_sync()
515 if (force || (adv_instance && adv_instance->timeout && in hci_clear_adv_instance_sync()
516 !adv_instance->remaining_time)) { in hci_clear_adv_instance_sync()
519 next_instance->instance == instance) in hci_clear_adv_instance_sync()
533 next_instance->instance, in hci_clear_adv_instance_sync()
547 if (list_empty(&hdev->adv_instances)) in adv_timeout_expire_sync()
563 hdev->adv_instance_timeout = 0; in adv_timeout_expire()
565 if (hdev->cur_adv_instance == 0x00) in adv_timeout_expire()
572 *inst_ptr = hdev->cur_adv_instance; in adv_timeout_expire()
581 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; in is_interleave_scanning()
592 if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { in interleave_scan_work()
593 timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); in interleave_scan_work()
594 } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { in interleave_scan_work()
595 timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); in interleave_scan_work()
605 switch (hdev->interleave_scan_state) { in interleave_scan_work()
608 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; in interleave_scan_work()
612 hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; in interleave_scan_work()
622 queue_delayed_work(hdev->req_workqueue, in interleave_scan_work()
623 &hdev->interleave_scan, timeout); in interleave_scan_work()
628 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); in hci_cmd_sync_init()
629 INIT_LIST_HEAD(&hdev->cmd_sync_work_list); in hci_cmd_sync_init()
630 mutex_init(&hdev->cmd_sync_work_lock); in hci_cmd_sync_init()
631 mutex_init(&hdev->unregister_lock); in hci_cmd_sync_init()
633 INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); in hci_cmd_sync_init()
634 INIT_WORK(&hdev->reenable_adv_work, reenable_adv); in hci_cmd_sync_init()
635 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); in hci_cmd_sync_init()
636 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); in hci_cmd_sync_init()
637 INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); in hci_cmd_sync_init()
644 if (entry->destroy) in _hci_cmd_sync_cancel_entry()
645 entry->destroy(hdev, entry->data, err); in _hci_cmd_sync_cancel_entry()
647 list_del(&entry->list); in _hci_cmd_sync_cancel_entry()
655 cancel_work_sync(&hdev->cmd_sync_work); in hci_cmd_sync_clear()
656 cancel_work_sync(&hdev->reenable_adv_work); in hci_cmd_sync_clear()
658 mutex_lock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_clear()
659 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) in hci_cmd_sync_clear()
660 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); in hci_cmd_sync_clear()
661 mutex_unlock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_clear()
668 if (hdev->req_status == HCI_REQ_PEND) { in hci_cmd_sync_cancel()
669 hdev->req_result = err; in hci_cmd_sync_cancel()
670 hdev->req_status = HCI_REQ_CANCELED; in hci_cmd_sync_cancel()
672 queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); in hci_cmd_sync_cancel()
679 * - Set result and mark status to HCI_REQ_CANCELED
680 * - Wakeup command sync thread
686 if (hdev->req_status == HCI_REQ_PEND) { in hci_cmd_sync_cancel_sync()
690 hdev->req_result = err < 0 ? -err : err; in hci_cmd_sync_cancel_sync()
691 hdev->req_status = HCI_REQ_CANCELED; in hci_cmd_sync_cancel_sync()
693 wake_up_interruptible(&hdev->req_wait_q); in hci_cmd_sync_cancel_sync()
700 * - hdev must _not_ be unregistered
708 mutex_lock(&hdev->unregister_lock); in hci_cmd_sync_submit()
710 err = -ENODEV; in hci_cmd_sync_submit()
716 err = -ENOMEM; in hci_cmd_sync_submit()
719 entry->func = func; in hci_cmd_sync_submit()
720 entry->data = data; in hci_cmd_sync_submit()
721 entry->destroy = destroy; in hci_cmd_sync_submit()
723 mutex_lock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_submit()
724 list_add_tail(&entry->list, &hdev->cmd_sync_work_list); in hci_cmd_sync_submit()
725 mutex_unlock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_submit()
727 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); in hci_cmd_sync_submit()
730 mutex_unlock(&hdev->unregister_lock); in hci_cmd_sync_submit()
737 * - hdev must be running
745 if (!test_bit(HCI_RUNNING, &hdev->flags)) in hci_cmd_sync_queue()
746 return -ENETDOWN; in hci_cmd_sync_queue()
758 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { in _hci_cmd_sync_lookup_entry()
759 if (func && entry->func != func) in _hci_cmd_sync_lookup_entry()
762 if (data && entry->data != data) in _hci_cmd_sync_lookup_entry()
765 if (destroy && entry->destroy != destroy) in _hci_cmd_sync_lookup_entry()
776 * - Lookup if an entry already exist and only if it doesn't creates a new entry
791 * - hdev must be running
792 * - if on cmd_sync_work then run immediately otherwise queue
800 if (!test_bit(HCI_RUNNING, &hdev->flags)) in hci_cmd_sync_run()
801 return -ENETDOWN; in hci_cmd_sync_run()
804 if (current_work() == &hdev->cmd_sync_work) in hci_cmd_sync_run()
813 * - Lookup if an entry already exist and only if it doesn't creates a new entry
815 * - if on cmd_sync_work then run immediately otherwise queue
829 * - Return first entry that matches by function callback or data or
838 mutex_lock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_lookup_entry()
840 mutex_unlock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_lookup_entry()
850 mutex_lock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_cancel_entry()
851 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); in hci_cmd_sync_cancel_entry()
852 mutex_unlock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_cancel_entry()
858 * - Lookup and cancel first entry that matches.
878 * - Lookup and cancel any entry that matches by function callback or data or
887 mutex_lock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_dequeue()
890 _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); in hci_cmd_sync_dequeue()
893 mutex_unlock(&hdev->cmd_sync_work_lock); in hci_cmd_sync_dequeue()
921 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) in hci_update_eir_sync()
924 memcpy(hdev->eir, cp.data, sizeof(cp.data)); in hci_update_eir_sync()
935 list_for_each_entry(uuid, &hdev->uuids, list) in get_service_classes()
936 val |= uuid->svc_hint; in get_service_classes()
956 cod[0] = hdev->minor_class; in hci_update_class_sync()
957 cod[1] = hdev->major_class; in hci_update_class_sync()
963 if (memcmp(cod, hdev->dev_class, 3) == 0) in hci_update_class_sync()
977 if (hdev->conn_hash.le_num_peripheral > 0) { in is_advertising_allowed()
981 if (!connectable && !(hdev->le_states[2] & 0x10)) in is_advertising_allowed()
987 if (connectable && (!(hdev->le_states[4] & 0x40) || in is_advertising_allowed()
988 !(hdev->le_states[2] & 0x20))) in is_advertising_allowed()
993 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { in is_advertising_allowed()
995 if (!connectable && !(hdev->le_states[2] & 0x02)) in is_advertising_allowed()
1001 if (connectable && (!(hdev->le_states[4] & 0x08) || in is_advertising_allowed()
1002 !(hdev->le_states[2] & 0x08))) in is_advertising_allowed()
1044 if (bacmp(&hdev->random_addr, BDADDR_ANY) && in hci_set_random_addr_sync()
1078 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); in hci_update_random_address_sync()
1084 err = hci_set_random_addr_sync(hdev, &hdev->rpa); in hci_update_random_address_sync()
1092 * use an non-resolvable private address. This is useful for active in hci_update_random_address_sync()
1093 * scanning and non-connectable advertising. in hci_update_random_address_sync()
1099 /* The non-resolvable private address is generated in hci_update_random_address_sync()
1106 /* The non-resolvable private address shall not be in hci_update_random_address_sync()
1109 if (bacmp(&hdev->bdaddr, &nrpa)) in hci_update_random_address_sync()
1123 * In case BR/EDR has been disabled on a dual-mode controller in hci_update_random_address_sync()
1128 !bacmp(&hdev->bdaddr, BDADDR_ANY) || in hci_update_random_address_sync()
1130 bacmp(&hdev->static_addr, BDADDR_ANY))) { in hci_update_random_address_sync()
1132 if (bacmp(&hdev->static_addr, &hdev->random_addr)) in hci_update_random_address_sync()
1134 &hdev->static_addr); in hci_update_random_address_sync()
1158 return -EINVAL; in hci_disable_ext_adv_instance_sync()
1161 if (!adv->enabled) in hci_disable_ext_adv_instance_sync()
1168 set = (void *)cp->data; in hci_disable_ext_adv_instance_sync()
1171 cp->num_of_sets = !!instance; in hci_disable_ext_adv_instance_sync()
1172 cp->enable = 0x00; in hci_disable_ext_adv_instance_sync()
1174 set->handle = adv ? adv->handle : instance; in hci_disable_ext_adv_instance_sync()
1176 size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; in hci_disable_ext_adv_instance_sync()
1190 * hdev->random_addr to track its address so whenever it needs in hci_set_adv_set_random_addr_sync()
1192 * hdev->random_addr is shared with scan state machine. in hci_set_adv_set_random_addr_sync()
1222 return -EINVAL; in hci_setup_ext_adv_instance_sync()
1231 if (adv && !adv->pending) { in hci_setup_ext_adv_instance_sync()
1246 return -EPERM; in hci_setup_ext_adv_instance_sync()
1248 /* Set require_privacy to true only when non-connectable in hci_setup_ext_adv_instance_sync()
1250 * non-resolvable private address. in hci_setup_ext_adv_instance_sync()
1261 hci_cpu_to_le24(adv->min_interval, cp.min_interval); in hci_setup_ext_adv_instance_sync()
1262 hci_cpu_to_le24(adv->max_interval, cp.max_interval); in hci_setup_ext_adv_instance_sync()
1263 cp.tx_power = adv->tx_power; in hci_setup_ext_adv_instance_sync()
1265 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); in hci_setup_ext_adv_instance_sync()
1266 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); in hci_setup_ext_adv_instance_sync()
1302 cp.channel_map = hdev->le_adv_channel_map; in hci_setup_ext_adv_instance_sync()
1303 cp.handle = adv ? adv->handle : instance; in hci_setup_ext_adv_instance_sync()
1327 if (!bacmp(&random_addr, &adv->random_addr)) in hci_setup_ext_adv_instance_sync()
1330 if (!bacmp(&random_addr, &hdev->random_addr)) in hci_setup_ext_adv_instance_sync()
1351 if (!adv || !adv->scan_rsp_changed) in hci_set_ext_scan_rsp_data_sync()
1355 len = eir_create_scan_rsp(hdev, instance, pdu->data); in hci_set_ext_scan_rsp_data_sync()
1357 pdu->handle = adv ? adv->handle : instance; in hci_set_ext_scan_rsp_data_sync()
1358 pdu->length = len; in hci_set_ext_scan_rsp_data_sync()
1359 pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; in hci_set_ext_scan_rsp_data_sync()
1360 pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; in hci_set_ext_scan_rsp_data_sync()
1369 adv->scan_rsp_changed = false; in hci_set_ext_scan_rsp_data_sync()
1371 memcpy(hdev->scan_rsp_data, pdu->data, len); in hci_set_ext_scan_rsp_data_sync()
1372 hdev->scan_rsp_data_len = len; in hci_set_ext_scan_rsp_data_sync()
1387 if (hdev->scan_rsp_data_len == len && in __hci_set_scan_rsp_data_sync()
1388 !memcmp(cp.data, hdev->scan_rsp_data, len)) in __hci_set_scan_rsp_data_sync()
1391 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); in __hci_set_scan_rsp_data_sync()
1392 hdev->scan_rsp_data_len = len; in __hci_set_scan_rsp_data_sync()
1421 return -EINVAL; in hci_enable_ext_advertising_sync()
1423 if (adv->enabled) in hci_enable_ext_advertising_sync()
1430 set = (void *)cp->data; in hci_enable_ext_advertising_sync()
1434 cp->enable = 0x01; in hci_enable_ext_advertising_sync()
1435 cp->num_of_sets = 0x01; in hci_enable_ext_advertising_sync()
1439 set->handle = adv ? adv->handle : instance; in hci_enable_ext_advertising_sync()
1444 if (adv && adv->timeout) { in hci_enable_ext_advertising_sync()
1445 u16 duration = adv->timeout * MSEC_PER_SEC; in hci_enable_ext_advertising_sync()
1448 set->duration = cpu_to_le16(duration / 10); in hci_enable_ext_advertising_sync()
1453 sizeof(*set) * cp->num_of_sets, in hci_enable_ext_advertising_sync()
1479 if (!adv || !adv->periodic || !adv->enabled) in hci_disable_per_advertising_sync()
1522 if (!adv || !adv->periodic) in hci_set_per_adv_data_sync()
1526 len = eir_create_per_adv_data(hdev, instance, pdu->data); in hci_set_per_adv_data_sync()
1528 pdu->length = len; in hci_set_per_adv_data_sync()
1529 pdu->handle = adv ? adv->handle : instance; in hci_set_per_adv_data_sync()
1530 pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; in hci_set_per_adv_data_sync()
1544 if (adv && adv->periodic && adv->enabled) in hci_enable_per_advertising_sync()
1574 if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, in hci_adv_bcast_annoucement()
1582 if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, in hci_adv_bcast_annoucement()
1589 hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); in hci_adv_bcast_annoucement()
1591 return hci_update_adv_data_sync(hdev, adv->instance); in hci_adv_bcast_annoucement()
1614 adv->pending = false; in hci_start_per_adv_sync()
1680 hdev->cur_adv_instance); in hci_enable_advertising_sync()
1682 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); in hci_enable_advertising_sync()
1683 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); in hci_enable_advertising_sync()
1692 return -EINVAL; in hci_enable_advertising_sync()
1705 /* Set require_privacy to true only when non-connectable in hci_enable_advertising_sync()
1707 * non-resolvable private address. in hci_enable_advertising_sync()
1718 adv_min_interval = adv_instance->min_interval; in hci_enable_advertising_sync()
1719 adv_max_interval = adv_instance->max_interval; in hci_enable_advertising_sync()
1721 adv_min_interval = hdev->le_adv_min_interval; in hci_enable_advertising_sync()
1722 adv_max_interval = hdev->le_adv_max_interval; in hci_enable_advertising_sync()
1728 if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) in hci_enable_advertising_sync()
1743 cp.channel_map = hdev->le_adv_channel_map; in hci_enable_advertising_sync()
1762 list_empty(&hdev->adv_instances)) in hci_enable_advertising()
1782 return -EINVAL; in hci_remove_ext_adv_instance_sync()
1811 if (!adv || !adv->adv_data_changed) in hci_set_ext_adv_data_sync()
1815 len = eir_create_adv_data(hdev, instance, pdu->data); in hci_set_ext_adv_data_sync()
1817 pdu->length = len; in hci_set_ext_adv_data_sync()
1818 pdu->handle = adv ? adv->handle : instance; in hci_set_ext_adv_data_sync()
1819 pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE; in hci_set_ext_adv_data_sync()
1820 pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG; in hci_set_ext_adv_data_sync()
1830 adv->adv_data_changed = false; in hci_set_ext_adv_data_sync()
1832 memcpy(hdev->adv_data, pdu->data, len); in hci_set_ext_adv_data_sync()
1833 hdev->adv_data_len = len; in hci_set_ext_adv_data_sync()
1849 if (hdev->adv_data_len == len && in hci_set_adv_data_sync()
1850 memcmp(cp.data, hdev->adv_data, len) == 0) in hci_set_adv_data_sync()
1853 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); in hci_set_adv_data_sync()
1854 hdev->adv_data_len = len; in hci_set_adv_data_sync()
1880 return -EPERM; in hci_schedule_adv_instance_sync()
1882 if (hdev->adv_instance_timeout) in hci_schedule_adv_instance_sync()
1883 return -EBUSY; in hci_schedule_adv_instance_sync()
1887 return -ENOENT; in hci_schedule_adv_instance_sync()
1897 if (adv->timeout == 0 || adv->duration <= adv->remaining_time) in hci_schedule_adv_instance_sync()
1898 timeout = adv->duration; in hci_schedule_adv_instance_sync()
1900 timeout = adv->remaining_time; in hci_schedule_adv_instance_sync()
1905 if (adv->timeout) in hci_schedule_adv_instance_sync()
1906 adv->remaining_time = adv->remaining_time - timeout; in hci_schedule_adv_instance_sync()
1910 hdev->adv_instance_timeout = timeout; in hci_schedule_adv_instance_sync()
1911 queue_delayed_work(hdev->req_workqueue, in hci_schedule_adv_instance_sync()
1912 &hdev->adv_instance_expire, in hci_schedule_adv_instance_sync()
1916 /* If we're just re-scheduling the same instance again then do not in hci_schedule_adv_instance_sync()
1920 if (!force && hdev->cur_adv_instance == instance && in hci_schedule_adv_instance_sync()
1924 hdev->cur_adv_instance = instance; in hci_schedule_adv_instance_sync()
1961 /* Cleanup non-ext instances */ in hci_clear_adv_sync()
1962 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { in hci_clear_adv_sync()
1963 u8 instance = adv->instance; in hci_clear_adv_sync()
1966 if (!(force || adv->timeout)) in hci_clear_adv_sync()
2005 * - force == true: The instance will be removed even when its remaining
2007 * - force == false: the instance will be deactivated but kept stored unless
2011 * - force == true: All instances will be removed regardless of their timeout
2013 * - force == false: Only instances that have a timeout will be removed.
2022 if (!instance || hdev->cur_adv_instance == instance) in hci_remove_advertising_sync()
2029 if (hdev->cur_adv_instance == instance) in hci_remove_advertising_sync()
2039 if (force || (adv && adv->timeout && !adv->remaining_time)) { in hci_remove_advertising_sync()
2041 if (next && next->instance == instance) in hci_remove_advertising_sync()
2054 hci_schedule_adv_instance_sync(hdev, next->instance, false); in hci_remove_advertising_sync()
2160 if (hdev->scanning_paused) { in hci_scan_disable_sync()
2181 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; in hci_start_interleave_scan()
2182 queue_delayed_work(hdev->req_workqueue, in hci_start_interleave_scan()
2183 &hdev->interleave_scan, 0); in hci_start_interleave_scan()
2190 cancel_delayed_work_sync(&hdev->interleave_scan); in cancel_interleave_scan()
2192 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; in cancel_interleave_scan()
2201 * - There is at least one ADV monitor in hci_update_interleaved_scan_sync()
2202 * - At least one pending LE connection or one device to be scanned for in hci_update_interleaved_scan_sync()
2203 * - Monitor offloading is not supported in hci_update_interleaved_scan_sync()
2208 !(list_empty(&hdev->pend_le_conns) && in hci_update_interleaved_scan_sync()
2209 list_empty(&hdev->pend_le_reports)) && in hci_update_interleaved_scan_sync()
2237 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, in hci_le_del_resolve_list_sync()
2256 if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) in hci_le_del_accept_list_sync()
2288 * Setting params to NULL programs local hdev->irk
2309 memcpy(cp.peer_irk, hdev->irk, 16); in hci_le_add_resolve_list_sync()
2311 } else if (!(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) in hci_le_add_resolve_list_sync()
2314 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); in hci_le_add_resolve_list_sync()
2319 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, in hci_le_add_resolve_list_sync()
2320 ¶ms->addr, in hci_le_add_resolve_list_sync()
2321 params->addr_type); in hci_le_add_resolve_list_sync()
2325 cp.bdaddr_type = params->addr_type; in hci_le_add_resolve_list_sync()
2326 bacpy(&cp.bdaddr, ¶ms->addr); in hci_le_add_resolve_list_sync()
2327 memcpy(cp.peer_irk, irk->val, 16); in hci_le_add_resolve_list_sync()
2330 params->privacy_mode = HCI_NETWORK_PRIVACY; in hci_le_add_resolve_list_sync()
2333 p = hci_pend_le_action_lookup(&hdev->pend_le_conns, in hci_le_add_resolve_list_sync()
2334 ¶ms->addr, params->addr_type); in hci_le_add_resolve_list_sync()
2336 p = hci_pend_le_action_lookup(&hdev->pend_le_reports, in hci_le_add_resolve_list_sync()
2337 ¶ms->addr, params->addr_type); in hci_le_add_resolve_list_sync()
2339 WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); in hci_le_add_resolve_list_sync()
2344 memcpy(cp.local_irk, hdev->irk, 16); in hci_le_add_resolve_list_sync()
2360 !(params->flags & HCI_CONN_FLAG_ADDRESS_RESOLUTION)) in hci_le_set_privacy_mode_sync()
2364 if (params->privacy_mode == HCI_DEVICE_PRIVACY) in hci_le_set_privacy_mode_sync()
2371 if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) in hci_le_set_privacy_mode_sync()
2374 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); in hci_le_set_privacy_mode_sync()
2379 cp.bdaddr_type = irk->addr_type; in hci_le_set_privacy_mode_sync()
2380 bacpy(&cp.bdaddr, &irk->bdaddr); in hci_le_set_privacy_mode_sync()
2383 /* Note: params->privacy_mode is not updated since it is a copy */ in hci_le_set_privacy_mode_sync()
2401 if (hdev->suspended && in hci_le_add_accept_list_sync()
2402 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) { in hci_le_add_accept_list_sync()
2403 hci_le_del_accept_list_sync(hdev, ¶ms->addr, in hci_le_add_accept_list_sync()
2404 params->addr_type); in hci_le_add_accept_list_sync()
2409 if (*num_entries >= hdev->le_accept_list_size) in hci_le_add_accept_list_sync()
2410 return -ENOSPC; in hci_le_add_accept_list_sync()
2430 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, in hci_le_add_accept_list_sync()
2431 params->addr_type)) in hci_le_add_accept_list_sync()
2435 cp.bdaddr_type = params->addr_type; in hci_le_add_accept_list_sync()
2436 bacpy(&cp.bdaddr, ¶ms->addr); in hci_le_add_accept_list_sync()
2460 if (hdev->advertising_paused) in hci_pause_advertising_sync()
2475 hdev->discov_timeout = 0; in hci_pause_advertising_sync()
2491 hdev->advertising_paused = true; in hci_pause_advertising_sync()
2492 hdev->advertising_old_state = old_state; in hci_pause_advertising_sync()
2504 if (!hdev->advertising_paused) in hci_resume_advertising_sync()
2508 hdev->advertising_paused = false; in hci_resume_advertising_sync()
2509 if (hdev->advertising_old_state) { in hci_resume_advertising_sync()
2511 hdev->advertising_old_state = 0; in hci_resume_advertising_sync()
2517 /* Call for each tracked instance to be re-enabled */ in hci_resume_advertising_sync()
2518 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { in hci_resume_advertising_sync()
2520 adv->instance); in hci_resume_advertising_sync()
2525 hci_remove_ext_adv_instance_sync(hdev, adv->instance, in hci_resume_advertising_sync()
2533 hdev->cur_adv_instance, in hci_resume_advertising_sync()
2537 hdev->advertising_paused = false; in hci_resume_advertising_sync()
2558 return -EPERM; in hci_pause_addr_resolution()
2617 /* No hdev->lock, but: addr, addr_type are immutable. in conn_params_copy()
2623 bacpy(&p[i].addr, ¶ms->addr); in conn_params_copy()
2624 p[i].addr_type = params->addr_type; in conn_params_copy()
2625 p[i].flags = READ_ONCE(params->flags); in conn_params_copy()
2626 p[i].privacy_mode = READ_ONCE(params->privacy_mode); in conn_params_copy()
2639 if (!(hdev->commands[26] & 0x80)) in hci_le_clear_accept_list_sync()
2650 * ll_privacy_capable((Disable Advertising) -> Disable Resolving List) ->
2651 * Remove Devices From Accept List ->
2652 * (has IRK && ll_privacy_capable(Remove Devices From Resolving List))->
2653 * Add Devices to Accept List ->
2654 * (has IRK && ll_privacy_capable(Remove Devices From Resolving List)) ->
2655 * ll_privacy_capable(Enable Resolving List -> (Enable Advertising)) ->
2704 bacpy(&pa.addr, &sent->addr); in hci_update_accept_list_sync()
2705 pa.addr_type = sent->addr_type; in hci_update_accept_list_sync()
2725 list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { in hci_update_accept_list_sync()
2726 if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) in hci_update_accept_list_sync()
2730 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, in hci_update_accept_list_sync()
2731 &b->bdaddr, in hci_update_accept_list_sync()
2732 b->bdaddr_type); in hci_update_accept_list_sync()
2733 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, in hci_update_accept_list_sync()
2734 &b->bdaddr, in hci_update_accept_list_sync()
2735 b->bdaddr_type); in hci_update_accept_list_sync()
2741 hci_le_del_accept_list_sync(hdev, &b->bdaddr, in hci_update_accept_list_sync()
2742 b->bdaddr_type); in hci_update_accept_list_sync()
2763 params = conn_params_copy(&hdev->pend_le_conns, &n); in hci_update_accept_list_sync()
2765 err = -ENOMEM; in hci_update_accept_list_sync()
2785 params = conn_params_copy(&hdev->pend_le_reports, &n); in hci_update_accept_list_sync()
2787 err = -ENOMEM; in hci_update_accept_list_sync()
2803 * - We are not currently suspending in hci_update_accept_list_sync()
2804 * - There are 1 or more ADV monitors registered and it's not offloaded in hci_update_accept_list_sync()
2805 * - Interleaved scanning is not currently using the allowlist in hci_update_accept_list_sync()
2807 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && in hci_update_accept_list_sync()
2809 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) in hci_update_accept_list_sync()
2810 err = -EINVAL; in hci_update_accept_list_sync()
2831 cp->type = type; in hci_le_scan_phy_params()
2832 cp->interval = cpu_to_le16(interval); in hci_le_scan_phy_params()
2833 cp->window = cpu_to_le16(window); in hci_le_scan_phy_params()
2846 phy = (void *)cp->data; in hci_le_set_ext_scan_param_sync()
2850 cp->own_addr_type = own_addr_type; in hci_le_set_ext_scan_param_sync()
2851 cp->filter_policy = filter_policy; in hci_le_set_ext_scan_param_sync()
2864 &sent->bdaddr); in hci_le_set_ext_scan_param_sync()
2866 struct bt_iso_qos *qos = &conn->iso_qos; in hci_le_set_ext_scan_param_sync()
2868 if (qos->bcast.in.phy & BT_ISO_PHY_1M || in hci_le_set_ext_scan_param_sync()
2869 qos->bcast.in.phy & BT_ISO_PHY_2M) { in hci_le_set_ext_scan_param_sync()
2870 cp->scanning_phys |= LE_SCAN_PHY_1M; in hci_le_set_ext_scan_param_sync()
2878 if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { in hci_le_set_ext_scan_param_sync()
2879 cp->scanning_phys |= LE_SCAN_PHY_CODED; in hci_le_set_ext_scan_param_sync()
2894 cp->scanning_phys |= LE_SCAN_PHY_1M; in hci_le_set_ext_scan_param_sync()
2901 cp->scanning_phys |= LE_SCAN_PHY_CODED; in hci_le_set_ext_scan_param_sync()
2909 return -EINVAL; in hci_le_set_ext_scan_param_sync()
2944 if (hdev->scanning_paused) { in hci_start_scan_sync()
2965 if (hdev->scanning_paused) { in hci_passive_scan_sync()
2977 * during passive scanning. Not using an non-resolvable address in hci_passive_scan_sync()
2986 if (hdev->enable_advmon_interleave_scan && in hci_passive_scan_sync()
2990 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); in hci_passive_scan_sync()
3002 if (hdev->suspended && !filter_policy) { in hci_passive_scan_sync()
3006 if (list_empty(&hdev->le_accept_list)) in hci_passive_scan_sync()
3010 * devices could not be programmed which in non-suspended case in hci_passive_scan_sync()
3029 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) in hci_passive_scan_sync()
3032 if (hdev->suspended) { in hci_passive_scan_sync()
3033 window = hdev->le_scan_window_suspend; in hci_passive_scan_sync()
3034 interval = hdev->le_scan_int_suspend; in hci_passive_scan_sync()
3036 window = hdev->le_scan_window_connect; in hci_passive_scan_sync()
3037 interval = hdev->le_scan_int_connect; in hci_passive_scan_sync()
3039 window = hdev->le_scan_window_adv_monitor; in hci_passive_scan_sync()
3040 interval = hdev->le_scan_int_adv_monitor; in hci_passive_scan_sync()
3056 window = hdev->le_scan_window; in hci_passive_scan_sync()
3057 interval = hdev->le_scan_interval; in hci_passive_scan_sync()
3072 /* This function controls the passive scanning based on hdev->pend_le_conns
3078 * Disable Scanning -> Update Accept List ->
3079 * ll_privacy_capable((Disable Advertising) -> Disable Resolving List ->
3080 * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3091 if (!test_bit(HCI_UP, &hdev->flags) || in hci_update_passive_scan_sync()
3092 test_bit(HCI_INIT, &hdev->flags) || in hci_update_passive_scan_sync()
3104 if (hdev->discovery.state != DISCOVERY_STOPPED) in hci_update_passive_scan_sync()
3120 list_empty(&hdev->pend_le_conns) && in hci_update_passive_scan_sync()
3121 list_empty(&hdev->pend_le_reports) && in hci_update_passive_scan_sync()
3176 if (!test_bit(HCI_UP, &hdev->flags) || in hci_update_passive_scan()
3177 test_bit(HCI_INIT, &hdev->flags) || in hci_update_passive_scan()
3200 hdev->features[1][0] |= LMP_HOST_SC; in hci_write_sc_support_sync()
3203 hdev->features[1][0] &= ~LMP_HOST_SC; in hci_write_sc_support_sync()
3279 list_empty(&hdev->adv_instances)) { in hci_powered_update_adv_sync()
3295 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) in hci_powered_update_adv_sync()
3296 hci_schedule_adv_instance_sync(hdev, adv->instance, true); in hci_powered_update_adv_sync()
3306 if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) in hci_write_auth_enable_sync()
3323 if (hdev->hci_ver < BLUETOOTH_VER_1_2) in hci_write_fast_connectable_sync()
3334 type = hdev->def_page_scan_type; in hci_write_fast_connectable_sync()
3335 cp.interval = cpu_to_le16(hdev->def_page_scan_int); in hci_write_fast_connectable_sync()
3338 cp.window = cpu_to_le16(hdev->def_page_scan_window); in hci_write_fast_connectable_sync()
3340 if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || in hci_write_fast_connectable_sync()
3341 __cpu_to_le16(hdev->page_scan_window) != cp.window) { in hci_write_fast_connectable_sync()
3349 if (hdev->page_scan_type != type) in hci_write_fast_connectable_sync()
3362 list_for_each_entry(b, &hdev->accept_list, list) { in disconnected_accept_list_entries()
3365 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); in disconnected_accept_list_entries()
3369 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) in disconnected_accept_list_entries()
3396 if (hdev->scanning_paused) in hci_update_scan_sync()
3408 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && in hci_update_scan_sync()
3409 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) in hci_update_scan_sync()
3421 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); in hci_update_name_sync()
3433 * HCI_LE_ENABLED(ll_privacy_capable(Add local IRK to Resolving List) ->
3436 * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3437 * Set Name -> Set EIR)
3483 * In case BR/EDR has been disabled on a dual-mode controller in hci_powered_update_sync()
3488 (!bacmp(&hdev->bdaddr, BDADDR_ANY) && in hci_powered_update_sync()
3490 if (bacmp(&hdev->static_addr, BDADDR_ANY)) in hci_powered_update_sync()
3492 &hdev->static_addr); in hci_powered_update_sync()
3499 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3504 * Search the firmware node for 'local-bd-address'.
3506 * All-zero BD addresses are rejected, because those could be properties
3508 * example, the DTS could define 'local-bd-address', with zero BD addresses.
3512 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); in hci_dev_get_bd_addr_from_property()
3516 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", in hci_dev_get_bd_addr_from_property()
3521 if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks)) in hci_dev_get_bd_addr_from_property()
3522 baswap(&hdev->public_addr, &ba); in hci_dev_get_bd_addr_from_property()
3524 bacpy(&hdev->public_addr, &ba); in hci_dev_get_bd_addr_from_property()
3579 set_bit(HCI_RESET, &hdev->flags); in hci_reset_sync()
3596 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { in hci_init0_sync()
3609 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) in hci_unconf_init_sync()
3651 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && in hci_read_local_cmds_sync()
3652 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) in hci_read_local_cmds_sync()
3666 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { in hci_init1_sync()
3726 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) in hci_set_event_filter_sync()
3753 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) in hci_clear_event_filter_sync()
3803 hdev->max_page = 0x01; in hci_write_ssp_mode_1_sync()
3816 memset(hdev->eir, 0, sizeof(hdev->eir)); in hci_write_eir_sync()
3828 !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) in hci_write_inquiry_mode_sync()
3892 if (iso_capable(hdev) && hdev->commands[41] & 0x20) in hci_le_read_buffer_size_sync()
3948 /* LE-only controllers have LE implicitly enabled */ in hci_init2_sync()
3967 if (hdev->hci_ver < BLUETOOTH_VER_1_2) in hci_set_event_mask_sync()
3977 if (hdev->suspended) { in hci_set_event_mask_sync()
3982 /* Use a different default for LE-only devices */ in hci_set_event_mask_sync()
3992 if (hdev->commands[0] & 0x20) { in hci_set_event_mask_sync()
3997 if (!hdev->suspended) in hci_set_event_mask_sync()
4006 if (hdev->commands[2] & 0x80) in hci_set_event_mask_sync()
4011 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { in hci_set_event_mask_sync()
4018 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) in hci_set_event_mask_sync()
4059 events[7] |= 0x20; /* LE Meta-Event */ in hci_set_event_mask_sync()
4069 if (!(hdev->commands[6] & 0x20) || in hci_read_stored_link_key_sync()
4070 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) in hci_read_stored_link_key_sync()
4086 if (!(hdev->commands[5] & 0x10)) in hci_setup_link_policy_sync()
4108 if (!(hdev->commands[8] & 0x01)) in hci_read_page_scan_activity_sync()
4117 if (!(hdev->commands[18] & 0x04) || in hci_read_def_err_data_reporting_sync()
4118 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || in hci_read_def_err_data_reporting_sync()
4119 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) in hci_read_def_err_data_reporting_sync()
4132 if (!(hdev->commands[13] & 0x01)) in hci_read_page_scan_type_sync()
4148 for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; in hci_read_local_ext_features_all_sync()
4186 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) in hci_le_set_event_mask_sync()
4192 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) in hci_le_set_event_mask_sync()
4199 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) in hci_le_set_event_mask_sync()
4210 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; in hci_le_set_event_mask_sync()
4214 hdev->conn_flags |= HCI_CONN_FLAG_ADDRESS_RESOLUTION; in hci_le_set_event_mask_sync()
4219 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) in hci_le_set_event_mask_sync()
4225 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) in hci_le_set_event_mask_sync()
4231 if (hdev->commands[26] & 0x08) in hci_le_set_event_mask_sync()
4237 if (hdev->commands[26] & 0x10) in hci_le_set_event_mask_sync()
4243 if (hdev->commands[27] & 0x04) in hci_le_set_event_mask_sync()
4249 if (hdev->commands[27] & 0x20) in hci_le_set_event_mask_sync()
4253 /* If the controller supports the LE Read Local P-256 in hci_le_set_event_mask_sync()
4256 if (hdev->commands[34] & 0x02) in hci_le_set_event_mask_sync()
4257 /* LE Read Local P-256 Public Key Complete */ in hci_le_set_event_mask_sync()
4263 if (hdev->commands[34] & 0x04) in hci_le_set_event_mask_sync()
4269 if (hdev->commands[35] & (0x20 | 0x40)) in hci_le_set_event_mask_sync()
4308 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { in hci_le_read_adv_tx_power_sync()
4326 if (!(hdev->commands[38] & 0x80) || in hci_le_read_tx_power_sync()
4327 test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) in hci_le_read_tx_power_sync()
4337 if (!(hdev->commands[26] & 0x40)) in hci_le_read_accept_list_size_sync()
4347 if (!(hdev->commands[34] & 0x40)) in hci_le_read_resolv_list_size_sync()
4357 if (!(hdev->commands[34] & 0x20)) in hci_le_clear_resolv_list_sync()
4367 __le16 timeout = cpu_to_le16(hdev->rpa_timeout); in hci_le_set_rpa_timeout_sync()
4369 if (!(hdev->commands[35] & 0x04) || in hci_le_set_rpa_timeout_sync()
4370 test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) in hci_le_set_rpa_timeout_sync()
4381 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) in hci_le_read_max_data_len_sync()
4391 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) in hci_le_read_def_data_len_sync()
4414 /* LE-only devices do not support explicit enablement */ in hci_set_le_support_sync()
4514 if (!(hdev->commands[6] & 0x80) || in hci_delete_stored_link_key_sync()
4515 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) in hci_delete_stored_link_key_sync()
4532 if (!(hdev->commands[22] & 0x04)) in hci_set_event_mask_page_2_sync()
4542 events[2] |= 0x20; /* CPB Channel Map Change */ in hci_set_event_mask_page_2_sync()
4551 events[2] |= 0x02; /* CPB Receive */ in hci_set_event_mask_page_2_sync()
4552 events[2] |= 0x04; /* CPB Timeout */ in hci_set_event_mask_page_2_sync()
4558 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { in hci_set_event_mask_page_2_sync()
4576 /* Read local codec list if the HCI command is supported */
4579 if (hdev->commands[45] & 0x04) in hci_read_local_codecs_sync()
4581 else if (hdev->commands[29] & 0x20) in hci_read_local_codecs_sync()
4590 if (!(hdev->commands[41] & 0x08)) in hci_read_local_pairing_opts_sync()
4639 if (!(hdev->commands[18] & 0x08) || in hci_set_err_data_report_sync()
4640 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || in hci_set_err_data_report_sync()
4641 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) in hci_set_err_data_report_sync()
4644 if (enabled == hdev->err_data_reporting) in hci_set_err_data_report_sync()
4680 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) in hci_le_set_write_def_data_len_sync()
4684 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); in hci_le_set_write_def_data_len_sync()
4685 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); in hci_le_set_write_def_data_len_sync()
4698 if (!(hdev->commands[35] & 0x20)) { in hci_le_set_default_phy_sync()
4702 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; in hci_le_set_default_phy_sync()
4703 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; in hci_le_set_default_phy_sync()
4844 * Calls hdev->setup
4854 !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) in hci_dev_setup_sync()
4861 if (hdev->setup) in hci_dev_setup_sync()
4862 ret = hdev->setup(hdev); in hci_dev_setup_sync()
4865 if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) in hci_dev_setup_sync()
4873 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || in hci_dev_setup_sync()
4874 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); in hci_dev_setup_sync()
4876 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && in hci_dev_setup_sync()
4877 !bacmp(&hdev->public_addr, BDADDR_ANY)) in hci_dev_setup_sync()
4880 if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) && in hci_dev_setup_sync()
4881 hdev->set_bdaddr) { in hci_dev_setup_sync()
4882 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); in hci_dev_setup_sync()
4898 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || in hci_dev_setup_sync()
4927 atomic_set(&hdev->cmd_cnt, 1); in hci_dev_init_sync()
4928 set_bit(HCI_INIT, &hdev->flags); in hci_dev_init_sync()
4938 if (bacmp(&hdev->public_addr, BDADDR_ANY) && in hci_dev_init_sync()
4939 hdev->set_bdaddr) in hci_dev_init_sync()
4940 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); in hci_dev_init_sync()
4942 ret = -EADDRNOTAVAIL; in hci_dev_init_sync()
4949 if (!ret && hdev->post_init) in hci_dev_init_sync()
4950 ret = hdev->post_init(hdev); in hci_dev_init_sync()
4958 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && in hci_dev_init_sync()
4960 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) in hci_dev_init_sync()
4961 ret = hdev->set_diag(hdev, true); in hci_dev_init_sync()
4968 clear_bit(HCI_INIT, &hdev->flags); in hci_dev_init_sync()
4980 ret = -ENODEV; in hci_dev_open_sync()
4990 ret = -ERFKILL; in hci_dev_open_sync()
5004 !bacmp(&hdev->bdaddr, BDADDR_ANY) && in hci_dev_open_sync()
5005 !bacmp(&hdev->static_addr, BDADDR_ANY)) { in hci_dev_open_sync()
5006 ret = -EADDRNOTAVAIL; in hci_dev_open_sync()
5011 if (test_bit(HCI_UP, &hdev->flags)) { in hci_dev_open_sync()
5012 ret = -EALREADY; in hci_dev_open_sync()
5016 if (hdev->open(hdev)) { in hci_dev_open_sync()
5017 ret = -EIO; in hci_dev_open_sync()
5023 set_bit(HCI_RUNNING, &hdev->flags); in hci_dev_open_sync()
5031 set_bit(HCI_UP, &hdev->flags); in hci_dev_open_sync()
5044 flush_work(&hdev->tx_work); in hci_dev_open_sync()
5050 flush_work(&hdev->rx_work); in hci_dev_open_sync()
5051 flush_work(&hdev->cmd_work); in hci_dev_open_sync()
5053 skb_queue_purge(&hdev->cmd_q); in hci_dev_open_sync()
5054 skb_queue_purge(&hdev->rx_q); in hci_dev_open_sync()
5056 if (hdev->flush) in hci_dev_open_sync()
5057 hdev->flush(hdev); in hci_dev_open_sync()
5059 if (hdev->sent_cmd) { in hci_dev_open_sync()
5060 cancel_delayed_work_sync(&hdev->cmd_timer); in hci_dev_open_sync()
5061 kfree_skb(hdev->sent_cmd); in hci_dev_open_sync()
5062 hdev->sent_cmd = NULL; in hci_dev_open_sync()
5065 if (hdev->req_skb) { in hci_dev_open_sync()
5066 kfree_skb(hdev->req_skb); in hci_dev_open_sync()
5067 hdev->req_skb = NULL; in hci_dev_open_sync()
5070 clear_bit(HCI_RUNNING, &hdev->flags); in hci_dev_open_sync()
5073 hdev->close(hdev); in hci_dev_open_sync()
5074 hdev->flags &= BIT(HCI_RAW); in hci_dev_open_sync()
5081 /* This function requires the caller holds hdev->lock */
5086 list_for_each_entry(p, &hdev->le_conn_params, list) { in hci_pend_le_actions_clear()
5088 if (p->conn) { in hci_pend_le_actions_clear()
5089 hci_conn_drop(p->conn); in hci_pend_le_actions_clear()
5090 hci_conn_put(p->conn); in hci_pend_le_actions_clear()
5091 p->conn = NULL; in hci_pend_le_actions_clear()
5111 test_bit(HCI_UP, &hdev->flags)) { in hci_dev_shutdown()
5113 if (hdev->shutdown) in hci_dev_shutdown()
5114 err = hdev->shutdown(hdev); in hci_dev_shutdown()
5131 disable_delayed_work(&hdev->power_off); in hci_dev_close_sync()
5132 disable_delayed_work(&hdev->ncmd_timer); in hci_dev_close_sync()
5133 disable_delayed_work(&hdev->le_scan_disable); in hci_dev_close_sync()
5135 cancel_delayed_work(&hdev->power_off); in hci_dev_close_sync()
5136 cancel_delayed_work(&hdev->ncmd_timer); in hci_dev_close_sync()
5137 cancel_delayed_work(&hdev->le_scan_disable); in hci_dev_close_sync()
5144 if (hdev->adv_instance_timeout) { in hci_dev_close_sync()
5145 cancel_delayed_work_sync(&hdev->adv_instance_expire); in hci_dev_close_sync()
5146 hdev->adv_instance_timeout = 0; in hci_dev_close_sync()
5151 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { in hci_dev_close_sync()
5152 cancel_delayed_work_sync(&hdev->cmd_timer); in hci_dev_close_sync()
5159 flush_work(&hdev->tx_work); in hci_dev_close_sync()
5160 flush_work(&hdev->rx_work); in hci_dev_close_sync()
5162 if (hdev->discov_timeout > 0) { in hci_dev_close_sync()
5163 hdev->discov_timeout = 0; in hci_dev_close_sync()
5169 cancel_delayed_work(&hdev->service_cache); in hci_dev_close_sync()
5174 cancel_delayed_work_sync(&hdev->rpa_expired); in hci_dev_close_sync()
5176 list_for_each_entry(adv_instance, &hdev->adv_instances, list) in hci_dev_close_sync()
5177 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); in hci_dev_close_sync()
5183 drain_workqueue(hdev->workqueue); in hci_dev_close_sync()
5198 /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ in hci_dev_close_sync()
5209 if (hdev->flush) in hci_dev_close_sync()
5210 hdev->flush(hdev); in hci_dev_close_sync()
5213 skb_queue_purge(&hdev->cmd_q); in hci_dev_close_sync()
5214 atomic_set(&hdev->cmd_cnt, 1); in hci_dev_close_sync()
5215 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && in hci_dev_close_sync()
5217 set_bit(HCI_INIT, &hdev->flags); in hci_dev_close_sync()
5219 clear_bit(HCI_INIT, &hdev->flags); in hci_dev_close_sync()
5223 flush_work(&hdev->cmd_work); in hci_dev_close_sync()
5226 skb_queue_purge(&hdev->rx_q); in hci_dev_close_sync()
5227 skb_queue_purge(&hdev->cmd_q); in hci_dev_close_sync()
5228 skb_queue_purge(&hdev->raw_q); in hci_dev_close_sync()
5231 if (hdev->sent_cmd) { in hci_dev_close_sync()
5232 cancel_delayed_work_sync(&hdev->cmd_timer); in hci_dev_close_sync()
5233 kfree_skb(hdev->sent_cmd); in hci_dev_close_sync()
5234 hdev->sent_cmd = NULL; in hci_dev_close_sync()
5238 if (hdev->req_skb) { in hci_dev_close_sync()
5239 kfree_skb(hdev->req_skb); in hci_dev_close_sync()
5240 hdev->req_skb = NULL; in hci_dev_close_sync()
5243 clear_bit(HCI_RUNNING, &hdev->flags); in hci_dev_close_sync()
5247 hdev->close(hdev); in hci_dev_close_sync()
5250 hdev->flags &= BIT(HCI_RAW); in hci_dev_close_sync()
5253 memset(hdev->eir, 0, sizeof(hdev->eir)); in hci_dev_close_sync()
5254 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); in hci_dev_close_sync()
5255 bacpy(&hdev->random_addr, BDADDR_ANY); in hci_dev_close_sync()
5256 hci_codec_list_clear(&hdev->local_codecs); in hci_dev_close_sync()
5272 if (test_bit(HCI_UP, &hdev->flags) && in hci_power_on_sync()
5275 cancel_delayed_work(&hdev->power_off); in hci_power_on_sync()
5289 (!bacmp(&hdev->bdaddr, BDADDR_ANY) && in hci_power_on_sync()
5290 !bacmp(&hdev->static_addr, BDADDR_ANY))) { in hci_power_on_sync()
5294 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, in hci_power_on_sync()
5303 set_bit(HCI_RAW, &hdev->flags); in hci_power_on_sync()
5318 clear_bit(HCI_RAW, &hdev->flags); in hci_power_on_sync()
5343 struct discovery_state *d = &hdev->discovery; in hci_stop_discovery_sync()
5347 bt_dev_dbg(hdev, "state %u", hdev->discovery.state); in hci_stop_discovery_sync()
5349 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { in hci_stop_discovery_sync()
5350 if (test_bit(HCI_INQUIRY, &hdev->flags)) { in hci_stop_discovery_sync()
5358 cancel_delayed_work(&hdev->le_scan_disable); in hci_stop_discovery_sync()
5375 /* No further actions needed for LE-only discovery */ in hci_stop_discovery_sync()
5376 if (d->type == DISCOV_TYPE_LE) in hci_stop_discovery_sync()
5379 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { in hci_stop_discovery_sync()
5388 hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); in hci_stop_discovery_sync()
5399 if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) { in hci_disconnect_sync()
5411 cp.handle = cpu_to_le16(conn->handle); in hci_disconnect_sync()
5435 if (test_bit(HCI_CONN_SCANNING, &conn->flags)) in hci_le_connect_cancel_sync()
5438 if (conn->role == HCI_ROLE_SLAVE || in hci_le_connect_cancel_sync()
5439 test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) in hci_le_connect_cancel_sync()
5449 if (conn->type == LE_LINK) in hci_connect_cancel_sync()
5452 if (conn->type == ISO_LINK) { in hci_connect_cancel_sync()
5461 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) in hci_connect_cancel_sync()
5465 if (bacmp(&conn->dst, BDADDR_ANY)) in hci_connect_cancel_sync()
5474 if (hdev->hci_ver < BLUETOOTH_VER_1_2) in hci_connect_cancel_sync()
5484 6, &conn->dst, in hci_connect_cancel_sync()
5489 6, &conn->dst, HCI_CMD_TIMEOUT); in hci_connect_cancel_sync()
5498 bacpy(&cp.bdaddr, &conn->dst); in hci_reject_sco_sync()
5502 * allowed error values (0x0D-0x0F). in hci_reject_sco_sync()
5517 cp.handle = cpu_to_le16(conn->handle); in hci_le_reject_cis_sync()
5529 if (conn->type == ISO_LINK) in hci_reject_conn_sync()
5532 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) in hci_reject_conn_sync()
5536 bacpy(&cp.bdaddr, &conn->dst); in hci_reject_conn_sync()
5546 u16 handle = conn->handle; in hci_abort_conn_sync()
5550 switch (conn->state) { in hci_abort_conn_sync()
5584 conn->state = BT_CLOSED; in hci_abort_conn_sync()
5598 struct list_head *head = &hdev->conn_hash.list; in hci_disconnect_all_sync()
5632 if (!test_bit(HCI_UP, &hdev->flags)) in hci_power_off_sync()
5637 if (test_bit(HCI_ISCAN, &hdev->flags) || in hci_power_off_sync()
5638 test_bit(HCI_PSCAN, &hdev->flags)) { in hci_power_off_sync()
5683 cp.num_iac = min_t(u8, hdev->num_iac, 2); in hci_write_iac_sync()
5770 * by-product of disabling connectable, we need to update the in hci_update_connectable_sync()
5774 err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); in hci_update_connectable_sync()
5778 !list_empty(&hdev->adv_instances)) { in hci_update_connectable_sync()
5781 hdev->cur_adv_instance); in hci_update_connectable_sync()
5800 if (test_bit(HCI_INQUIRY, &hdev->flags)) in hci_inquiry_sync()
5809 if (hdev->discovery.limited) in hci_inquiry_sync()
5852 * address (when privacy feature has been enabled) or non-resolvable in hci_active_scan_sync()
5861 (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && in hci_active_scan_sync()
5862 hdev->discovery.result_filtering)) { in hci_active_scan_sync()
5877 hdev->le_scan_window_discovery, in hci_active_scan_sync()
5898 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); in hci_start_interleaved_discovery_sync()
5910 bt_dev_dbg(hdev, "type %u", hdev->discovery.type); in hci_start_discovery_sync()
5912 switch (hdev->discovery.type) { in hci_start_discovery_sync()
5925 &hdev->quirks)) { in hci_start_discovery_sync()
5935 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); in hci_start_discovery_sync()
5936 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); in hci_start_discovery_sync()
5940 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); in hci_start_discovery_sync()
5943 return -EINVAL; in hci_start_discovery_sync()
5951 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, in hci_start_discovery_sync()
5970 int old_state = hdev->discovery.state; in hci_pause_discovery_sync()
5975 hdev->discovery_paused) in hci_pause_discovery_sync()
5983 hdev->discovery_paused = true; in hci_pause_discovery_sync()
5993 bool scanning = test_bit(HCI_PSCAN, &hdev->flags); in hci_update_event_filter_sync()
6002 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) in hci_update_event_filter_sync()
6008 list_for_each_entry(b, &hdev->accept_list, list) { in hci_update_event_filter_sync()
6009 if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) in hci_update_event_filter_sync()
6012 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); in hci_update_event_filter_sync()
6016 &b->bdaddr, in hci_update_event_filter_sync()
6020 &b->bdaddr); in hci_update_event_filter_sync()
6036 if (hdev->scanning_paused) in hci_pause_scan_sync()
6040 if (test_bit(HCI_PSCAN, &hdev->flags)) in hci_pause_scan_sync()
6045 hdev->scanning_paused = true; in hci_pause_scan_sync()
6068 if (hdev->suspended) in hci_suspend_sync()
6072 hdev->suspended = true; in hci_suspend_sync()
6083 /* Prevent disconnects from causing scanning to be re-enabled */ in hci_suspend_sync()
6091 hdev->suspend_state = BT_RUNNING; in hci_suspend_sync()
6105 if (!hdev->wakeup || !hdev->wakeup(hdev)) { in hci_suspend_sync()
6106 hdev->suspend_state = BT_SUSPEND_DISCONNECT; in hci_suspend_sync()
6111 hdev->scanning_paused = false; in hci_suspend_sync()
6120 hdev->scanning_paused = true; in hci_suspend_sync()
6122 hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; in hci_suspend_sync()
6133 if (!hdev->discovery_paused) in hci_resume_discovery_sync()
6136 hdev->discovery_paused = false; in hci_resume_discovery_sync()
6162 if (!hdev->scanning_paused) in hci_resume_scan_sync()
6165 hdev->scanning_paused = false; in hci_resume_scan_sync()
6186 if (!hdev->suspended) in hci_resume_sync()
6189 hdev->suspended = false; in hci_resume_sync()
6214 struct hci_dev *hdev = conn->hdev; in conn_use_rpa()
6243 cp.channel_map = hdev->le_adv_channel_map; in hci_le_ext_directed_advertising_sync()
6249 cp.peer_addr_type = conn->dst_type; in hci_le_ext_directed_advertising_sync()
6250 bacpy(&cp.peer_addr, &conn->dst); in hci_le_ext_directed_advertising_sync()
6272 bacmp(&random_addr, &hdev->random_addr)) { in hci_le_ext_directed_advertising_sync()
6319 cp.direct_addr_type = conn->dst_type; in hci_le_directed_advertising_sync()
6320 bacpy(&cp.direct_addr, &conn->dst); in hci_le_directed_advertising_sync()
6321 cp.channel_map = hdev->le_adv_channel_map; in hci_le_directed_advertising_sync()
6337 struct hci_dev *hdev = conn->hdev; in set_ext_conn_params()
6341 p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); in set_ext_conn_params()
6342 p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); in set_ext_conn_params()
6343 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); in set_ext_conn_params()
6344 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); in set_ext_conn_params()
6345 p->conn_latency = cpu_to_le16(conn->le_conn_latency); in set_ext_conn_params()
6346 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); in set_ext_conn_params()
6347 p->min_ce_len = cpu_to_le16(0x0000); in set_ext_conn_params()
6348 p->max_ce_len = cpu_to_le16(0x0000); in set_ext_conn_params()
6360 p = (void *)cp->data; in hci_le_ext_create_conn_sync()
6364 bacpy(&cp->peer_addr, &conn->dst); in hci_le_ext_create_conn_sync()
6365 cp->peer_addr_type = conn->dst_type; in hci_le_ext_create_conn_sync()
6366 cp->own_addr_type = own_addr_type; in hci_le_ext_create_conn_sync()
6370 if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M || in hci_le_ext_create_conn_sync()
6371 conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) { in hci_le_ext_create_conn_sync()
6372 cp->phys |= LE_SCAN_PHY_1M; in hci_le_ext_create_conn_sync()
6379 if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M || in hci_le_ext_create_conn_sync()
6380 conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) { in hci_le_ext_create_conn_sync()
6381 cp->phys |= LE_SCAN_PHY_2M; in hci_le_ext_create_conn_sync()
6388 if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED || in hci_le_ext_create_conn_sync()
6389 conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) { in hci_le_ext_create_conn_sync()
6390 cp->phys |= LE_SCAN_PHY_CODED; in hci_le_ext_create_conn_sync()
6399 conn->conn_timeout, NULL); in hci_le_ext_create_conn_sync()
6411 return -ECANCELED; in hci_le_create_conn_sync()
6415 clear_bit(HCI_CONN_SCANNING, &conn->flags); in hci_le_create_conn_sync()
6416 conn->state = BT_CONNECT; in hci_le_create_conn_sync()
6419 if (conn->role == HCI_ROLE_SLAVE) { in hci_le_create_conn_sync()
6424 hdev->le_scan_type == LE_SCAN_ACTIVE && in hci_le_create_conn_sync()
6427 return -EBUSY; in hci_le_create_conn_sync()
6441 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); in hci_le_create_conn_sync()
6443 conn->le_conn_min_interval = params->conn_min_interval; in hci_le_create_conn_sync()
6444 conn->le_conn_max_interval = params->conn_max_interval; in hci_le_create_conn_sync()
6445 conn->le_conn_latency = params->conn_latency; in hci_le_create_conn_sync()
6446 conn->le_supv_timeout = params->supervision_timeout; in hci_le_create_conn_sync()
6448 conn->le_conn_min_interval = hdev->le_conn_min_interval; in hci_le_create_conn_sync()
6449 conn->le_conn_max_interval = hdev->le_conn_max_interval; in hci_le_create_conn_sync()
6450 conn->le_conn_latency = hdev->le_conn_latency; in hci_le_create_conn_sync()
6451 conn->le_supv_timeout = hdev->le_supv_timeout; in hci_le_create_conn_sync()
6466 * that we never connect with an non-resolvable address. in hci_le_create_conn_sync()
6480 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); in hci_le_create_conn_sync()
6481 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); in hci_le_create_conn_sync()
6483 bacpy(&cp.peer_addr, &conn->dst); in hci_le_create_conn_sync()
6484 cp.peer_addr_type = conn->dst_type; in hci_le_create_conn_sync()
6486 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); in hci_le_create_conn_sync()
6487 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); in hci_le_create_conn_sync()
6488 cp.conn_latency = cpu_to_le16(conn->le_conn_latency); in hci_le_create_conn_sync()
6489 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); in hci_le_create_conn_sync()
6504 conn->conn_timeout, NULL); in hci_le_create_conn_sync()
6507 if (err == -ETIMEDOUT) in hci_le_create_conn_sync()
6510 /* Re-enable advertising after the connection attempt is finished. */ in hci_le_create_conn_sync()
6550 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { in hci_le_create_cis_sync()
6551 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) in hci_le_create_cis_sync()
6556 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { in hci_le_create_cis_sync()
6562 cig = conn->iso_qos.ucast.cig; in hci_le_create_cis_sync()
6564 list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) { in hci_le_create_cis_sync()
6566 link->iso_qos.ucast.cig == cig && in hci_le_create_cis_sync()
6567 link->state != BT_CONNECTED) { in hci_le_create_cis_sync()
6580 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { in hci_le_create_cis_sync()
6581 struct hci_cis *cis = &cmd->cis[aux_num_cis]; in hci_le_create_cis_sync()
6584 conn->iso_qos.ucast.cig != cig) in hci_le_create_cis_sync()
6587 set_bit(HCI_CONN_CREATE_CIS, &conn->flags); in hci_le_create_cis_sync()
6588 cis->acl_handle = cpu_to_le16(conn->parent->handle); in hci_le_create_cis_sync()
6589 cis->cis_handle = cpu_to_le16(conn->handle); in hci_le_create_cis_sync()
6592 if (aux_num_cis >= cmd->num_cis) in hci_le_create_cis_sync()
6595 cmd->num_cis = aux_num_cis; in hci_le_create_cis_sync()
6607 struct_size(cmd, cis, cmd->num_cis), in hci_le_create_cis_sync()
6609 conn->conn_timeout, NULL); in hci_le_create_cis_sync()
6673 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); in hci_get_random_address()
6679 bacpy(rand_addr, &hdev->rpa); in hci_get_random_address()
6685 * use an non-resolvable private address. This is useful for in hci_get_random_address()
6686 * non-connectable advertising. in hci_get_random_address()
6692 /* The non-resolvable private address is generated in hci_get_random_address()
6699 /* The non-resolvable private address shall not be in hci_get_random_address()
6702 if (bacmp(&hdev->bdaddr, &nrpa)) in hci_get_random_address()
6739 return -ECANCELED; in hci_acl_create_conn_sync()
6749 if (test_bit(HCI_INQUIRY, &hdev->flags)) { in hci_acl_create_conn_sync()
6756 conn->state = BT_CONNECT; in hci_acl_create_conn_sync()
6757 conn->out = true; in hci_acl_create_conn_sync()
6758 conn->role = HCI_ROLE_MASTER; in hci_acl_create_conn_sync()
6760 conn->attempt++; in hci_acl_create_conn_sync()
6762 conn->link_policy = hdev->link_policy; in hci_acl_create_conn_sync()
6765 bacpy(&cp.bdaddr, &conn->dst); in hci_acl_create_conn_sync()
6768 ie = hci_inquiry_cache_lookup(hdev, &conn->dst); in hci_acl_create_conn_sync()
6771 cp.pscan_rep_mode = ie->data.pscan_rep_mode; in hci_acl_create_conn_sync()
6772 cp.pscan_mode = ie->data.pscan_mode; in hci_acl_create_conn_sync()
6773 cp.clock_offset = ie->data.clock_offset | in hci_acl_create_conn_sync()
6777 memcpy(conn->dev_class, ie->data.dev_class, 3); in hci_acl_create_conn_sync()
6780 cp.pkt_type = cpu_to_le16(conn->pkt_type); in hci_acl_create_conn_sync()
6781 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) in hci_acl_create_conn_sync()
6789 conn->conn_timeout, NULL); in hci_acl_create_conn_sync()
6804 if (err == -ECANCELED) in create_le_conn_complete()
6822 flush_delayed_work(&conn->le_conn_timeout); in create_le_conn_complete()
6837 if (conn->state != BT_OPEN) in hci_cancel_connect_sync()
6838 return -EINVAL; in hci_cancel_connect_sync()
6840 switch (conn->type) { in hci_cancel_connect_sync()
6850 return -ENOENT; in hci_cancel_connect_sync()
6859 cp.handle = cpu_to_le16(conn->handle); in hci_le_conn_update_sync()
6860 cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); in hci_le_conn_update_sync()
6861 cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); in hci_le_conn_update_sync()
6862 cp.conn_latency = cpu_to_le16(params->conn_latency); in hci_le_conn_update_sync()
6863 cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); in hci_le_conn_update_sync()