1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. 4 Copyright 2023-2024 NXP 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI connection handling. */ 27 28 #include <linux/export.h> 29 #include <linux/debugfs.h> 30 #include <linux/errqueue.h> 31 32 #include <net/bluetooth/bluetooth.h> 33 #include <net/bluetooth/hci_core.h> 34 #include <net/bluetooth/l2cap.h> 35 #include <net/bluetooth/iso.h> 36 #include <net/bluetooth/mgmt.h> 37 38 #include "smp.h" 39 #include "eir.h" 40 41 struct sco_param { 42 u16 pkt_type; 43 u16 max_latency; 44 u8 retrans_effort; 45 }; 46 47 struct conn_handle_t { 48 struct hci_conn *conn; 49 __u16 handle; 50 }; 51 52 static const struct sco_param esco_param_cvsd[] = { 53 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ 54 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ 55 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */ 56 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */ 57 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */ 58 }; 59 60 static const struct sco_param sco_param_cvsd[] = { 61 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */ 62 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */ 63 }; 64 65 static const struct sco_param esco_param_msbc[] = { 66 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */ 67 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ 68 }; 69 70 /* This function requires the caller holds hdev->lock */ 71 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) 72 { 73 struct hci_conn_params *params; 74 struct hci_dev *hdev = conn->hdev; 75 struct smp_irk *irk; 76 bdaddr_t *bdaddr; 77 u8 bdaddr_type; 78 79 bdaddr = &conn->dst; 80 bdaddr_type = conn->dst_type; 81 82 /* Check if we need to convert to identity address */ 83 irk = hci_get_irk(hdev, bdaddr, bdaddr_type); 84 if (irk) { 85 bdaddr = &irk->bdaddr; 86 bdaddr_type = irk->addr_type; 87 } 88 89 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, 90 bdaddr_type); 91 if (!params) 92 return; 93 94 if (params->conn) { 95 hci_conn_drop(params->conn); 96 hci_conn_put(params->conn); 97 params->conn = NULL; 98 } 99 100 if (!params->explicit_connect) 101 return; 102 103 /* If the status indicates successful cancellation of 104 * the attempt (i.e. Unknown Connection Id) there's no point of 105 * notifying failure since we'll go back to keep trying to 106 * connect. The only exception is explicit connect requests 107 * where a timeout + cancel does indicate an actual failure. 108 */ 109 if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) 110 mgmt_connect_failed(hdev, conn, status); 111 112 /* The connection attempt was doing scan for new RPA, and is 113 * in scan phase. If params are not associated with any other 114 * autoconnect action, remove them completely. If they are, just unmark 115 * them as waiting for connection, by clearing explicit_connect field. 116 */ 117 params->explicit_connect = false; 118 119 hci_pend_le_list_del_init(params); 120 121 switch (params->auto_connect) { 122 case HCI_AUTO_CONN_EXPLICIT: 123 hci_conn_params_del(hdev, bdaddr, bdaddr_type); 124 /* return instead of break to avoid duplicate scan update */ 125 return; 126 case HCI_AUTO_CONN_DIRECT: 127 case HCI_AUTO_CONN_ALWAYS: 128 hci_pend_le_list_add(params, &hdev->pend_le_conns); 129 break; 130 case HCI_AUTO_CONN_REPORT: 131 hci_pend_le_list_add(params, &hdev->pend_le_reports); 132 break; 133 default: 134 break; 135 } 136 137 hci_update_passive_scan(hdev); 138 } 139 140 static void hci_conn_cleanup(struct hci_conn *conn) 141 { 142 struct hci_dev *hdev = conn->hdev; 143 144 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) 145 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); 146 147 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) 148 hci_remove_link_key(hdev, &conn->dst); 149 150 hci_chan_list_flush(conn); 151 152 hci_conn_hash_del(hdev, conn); 153 154 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 155 ida_free(&hdev->unset_handle_ida, conn->handle); 156 157 if (conn->cleanup) 158 conn->cleanup(conn); 159 160 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { 161 switch (conn->setting & SCO_AIRMODE_MASK) { 162 case SCO_AIRMODE_CVSD: 163 case SCO_AIRMODE_TRANSP: 164 if (hdev->notify) 165 hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO); 166 break; 167 } 168 } else { 169 if (hdev->notify) 170 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 171 } 172 173 debugfs_remove_recursive(conn->debugfs); 174 175 hci_conn_del_sysfs(conn); 176 177 hci_dev_put(hdev); 178 } 179 180 int hci_disconnect(struct hci_conn *conn, __u8 reason) 181 { 182 BT_DBG("hcon %p", conn); 183 184 /* When we are central of an established connection and it enters 185 * the disconnect timeout, then go ahead and try to read the 186 * current clock offset. Processing of the result is done 187 * within the event handling and hci_clock_offset_evt function. 188 */ 189 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER && 190 (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) { 191 struct hci_dev *hdev = conn->hdev; 192 struct hci_cp_read_clock_offset clkoff_cp; 193 194 clkoff_cp.handle = cpu_to_le16(conn->handle); 195 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), 196 &clkoff_cp); 197 } 198 199 return hci_abort_conn(conn, reason); 200 } 201 202 static void hci_add_sco(struct hci_conn *conn, __u16 handle) 203 { 204 struct hci_dev *hdev = conn->hdev; 205 struct hci_cp_add_sco cp; 206 207 BT_DBG("hcon %p", conn); 208 209 conn->state = BT_CONNECT; 210 conn->out = true; 211 212 conn->attempt++; 213 214 cp.handle = cpu_to_le16(handle); 215 cp.pkt_type = cpu_to_le16(conn->pkt_type); 216 217 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); 218 } 219 220 static bool find_next_esco_param(struct hci_conn *conn, 221 const struct sco_param *esco_param, int size) 222 { 223 if (!conn->parent) 224 return false; 225 226 for (; conn->attempt <= size; conn->attempt++) { 227 if (lmp_esco_2m_capable(conn->parent) || 228 (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) 229 break; 230 BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", 231 conn, conn->attempt); 232 } 233 234 return conn->attempt <= size; 235 } 236 237 static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec) 238 { 239 int err; 240 __u8 vnd_len, *vnd_data = NULL; 241 struct hci_op_configure_data_path *cmd = NULL; 242 243 /* Do not take below 2 checks as error since the 1st means user do not 244 * want to use HFP offload mode and the 2nd means the vendor controller 245 * do not need to send below HCI command for offload mode. 246 */ 247 if (!codec->data_path || !hdev->get_codec_config_data) 248 return 0; 249 250 err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, 251 &vnd_data); 252 if (err < 0) 253 goto error; 254 255 cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); 256 if (!cmd) { 257 err = -ENOMEM; 258 goto error; 259 } 260 261 err = hdev->get_data_path_id(hdev, &cmd->data_path_id); 262 if (err < 0) 263 goto error; 264 265 cmd->vnd_len = vnd_len; 266 memcpy(cmd->vnd_data, vnd_data, vnd_len); 267 268 cmd->direction = 0x00; 269 __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, 270 sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); 271 272 cmd->direction = 0x01; 273 err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, 274 sizeof(*cmd) + vnd_len, cmd, 275 HCI_CMD_TIMEOUT); 276 error: 277 278 kfree(cmd); 279 kfree(vnd_data); 280 return err; 281 } 282 283 static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) 284 { 285 struct conn_handle_t *conn_handle = data; 286 struct hci_conn *conn = conn_handle->conn; 287 __u16 handle = conn_handle->handle; 288 struct hci_cp_enhanced_setup_sync_conn cp; 289 const struct sco_param *param; 290 291 kfree(conn_handle); 292 293 if (!hci_conn_valid(hdev, conn)) 294 return -ECANCELED; 295 296 bt_dev_dbg(hdev, "hcon %p", conn); 297 298 configure_datapath_sync(hdev, &conn->codec); 299 300 conn->state = BT_CONNECT; 301 conn->out = true; 302 303 conn->attempt++; 304 305 memset(&cp, 0x00, sizeof(cp)); 306 307 cp.handle = cpu_to_le16(handle); 308 309 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 310 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 311 312 switch (conn->codec.id) { 313 case BT_CODEC_MSBC: 314 if (!find_next_esco_param(conn, esco_param_msbc, 315 ARRAY_SIZE(esco_param_msbc))) 316 return -EINVAL; 317 318 param = &esco_param_msbc[conn->attempt - 1]; 319 cp.tx_coding_format.id = 0x05; 320 cp.rx_coding_format.id = 0x05; 321 cp.tx_codec_frame_size = __cpu_to_le16(60); 322 cp.rx_codec_frame_size = __cpu_to_le16(60); 323 cp.in_bandwidth = __cpu_to_le32(32000); 324 cp.out_bandwidth = __cpu_to_le32(32000); 325 cp.in_coding_format.id = 0x04; 326 cp.out_coding_format.id = 0x04; 327 cp.in_coded_data_size = __cpu_to_le16(16); 328 cp.out_coded_data_size = __cpu_to_le16(16); 329 cp.in_pcm_data_format = 2; 330 cp.out_pcm_data_format = 2; 331 cp.in_pcm_sample_payload_msb_pos = 0; 332 cp.out_pcm_sample_payload_msb_pos = 0; 333 cp.in_data_path = conn->codec.data_path; 334 cp.out_data_path = conn->codec.data_path; 335 cp.in_transport_unit_size = 1; 336 cp.out_transport_unit_size = 1; 337 break; 338 339 case BT_CODEC_TRANSPARENT: 340 if (!find_next_esco_param(conn, esco_param_msbc, 341 ARRAY_SIZE(esco_param_msbc))) 342 return false; 343 param = &esco_param_msbc[conn->attempt - 1]; 344 cp.tx_coding_format.id = 0x03; 345 cp.rx_coding_format.id = 0x03; 346 cp.tx_codec_frame_size = __cpu_to_le16(60); 347 cp.rx_codec_frame_size = __cpu_to_le16(60); 348 cp.in_bandwidth = __cpu_to_le32(0x1f40); 349 cp.out_bandwidth = __cpu_to_le32(0x1f40); 350 cp.in_coding_format.id = 0x03; 351 cp.out_coding_format.id = 0x03; 352 cp.in_coded_data_size = __cpu_to_le16(16); 353 cp.out_coded_data_size = __cpu_to_le16(16); 354 cp.in_pcm_data_format = 2; 355 cp.out_pcm_data_format = 2; 356 cp.in_pcm_sample_payload_msb_pos = 0; 357 cp.out_pcm_sample_payload_msb_pos = 0; 358 cp.in_data_path = conn->codec.data_path; 359 cp.out_data_path = conn->codec.data_path; 360 cp.in_transport_unit_size = 1; 361 cp.out_transport_unit_size = 1; 362 break; 363 364 case BT_CODEC_CVSD: 365 if (conn->parent && lmp_esco_capable(conn->parent)) { 366 if (!find_next_esco_param(conn, esco_param_cvsd, 367 ARRAY_SIZE(esco_param_cvsd))) 368 return -EINVAL; 369 param = &esco_param_cvsd[conn->attempt - 1]; 370 } else { 371 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) 372 return -EINVAL; 373 param = &sco_param_cvsd[conn->attempt - 1]; 374 } 375 cp.tx_coding_format.id = 2; 376 cp.rx_coding_format.id = 2; 377 cp.tx_codec_frame_size = __cpu_to_le16(60); 378 cp.rx_codec_frame_size = __cpu_to_le16(60); 379 cp.in_bandwidth = __cpu_to_le32(16000); 380 cp.out_bandwidth = __cpu_to_le32(16000); 381 cp.in_coding_format.id = 4; 382 cp.out_coding_format.id = 4; 383 cp.in_coded_data_size = __cpu_to_le16(16); 384 cp.out_coded_data_size = __cpu_to_le16(16); 385 cp.in_pcm_data_format = 2; 386 cp.out_pcm_data_format = 2; 387 cp.in_pcm_sample_payload_msb_pos = 0; 388 cp.out_pcm_sample_payload_msb_pos = 0; 389 cp.in_data_path = conn->codec.data_path; 390 cp.out_data_path = conn->codec.data_path; 391 cp.in_transport_unit_size = 16; 392 cp.out_transport_unit_size = 16; 393 break; 394 default: 395 return -EINVAL; 396 } 397 398 cp.retrans_effort = param->retrans_effort; 399 cp.pkt_type = __cpu_to_le16(param->pkt_type); 400 cp.max_latency = __cpu_to_le16(param->max_latency); 401 402 if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) 403 return -EIO; 404 405 return 0; 406 } 407 408 static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) 409 { 410 struct hci_dev *hdev = conn->hdev; 411 struct hci_cp_setup_sync_conn cp; 412 const struct sco_param *param; 413 414 bt_dev_dbg(hdev, "hcon %p", conn); 415 416 conn->state = BT_CONNECT; 417 conn->out = true; 418 419 conn->attempt++; 420 421 cp.handle = cpu_to_le16(handle); 422 423 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 424 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 425 cp.voice_setting = cpu_to_le16(conn->setting); 426 427 switch (conn->setting & SCO_AIRMODE_MASK) { 428 case SCO_AIRMODE_TRANSP: 429 if (!find_next_esco_param(conn, esco_param_msbc, 430 ARRAY_SIZE(esco_param_msbc))) 431 return false; 432 param = &esco_param_msbc[conn->attempt - 1]; 433 break; 434 case SCO_AIRMODE_CVSD: 435 if (conn->parent && lmp_esco_capable(conn->parent)) { 436 if (!find_next_esco_param(conn, esco_param_cvsd, 437 ARRAY_SIZE(esco_param_cvsd))) 438 return false; 439 param = &esco_param_cvsd[conn->attempt - 1]; 440 } else { 441 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) 442 return false; 443 param = &sco_param_cvsd[conn->attempt - 1]; 444 } 445 break; 446 default: 447 return false; 448 } 449 450 cp.retrans_effort = param->retrans_effort; 451 cp.pkt_type = __cpu_to_le16(param->pkt_type); 452 cp.max_latency = __cpu_to_le16(param->max_latency); 453 454 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) 455 return false; 456 457 return true; 458 } 459 460 bool hci_setup_sync(struct hci_conn *conn, __u16 handle) 461 { 462 int result; 463 struct conn_handle_t *conn_handle; 464 465 if (enhanced_sync_conn_capable(conn->hdev)) { 466 conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL); 467 468 if (!conn_handle) 469 return false; 470 471 conn_handle->conn = conn; 472 conn_handle->handle = handle; 473 result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, 474 conn_handle, NULL); 475 if (result < 0) 476 kfree(conn_handle); 477 478 return result == 0; 479 } 480 481 return hci_setup_sync_conn(conn, handle); 482 } 483 484 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, 485 u16 to_multiplier) 486 { 487 struct hci_dev *hdev = conn->hdev; 488 struct hci_conn_params *params; 489 struct hci_cp_le_conn_update cp; 490 491 hci_dev_lock(hdev); 492 493 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 494 if (params) { 495 params->conn_min_interval = min; 496 params->conn_max_interval = max; 497 params->conn_latency = latency; 498 params->supervision_timeout = to_multiplier; 499 } 500 501 hci_dev_unlock(hdev); 502 503 memset(&cp, 0, sizeof(cp)); 504 cp.handle = cpu_to_le16(conn->handle); 505 cp.conn_interval_min = cpu_to_le16(min); 506 cp.conn_interval_max = cpu_to_le16(max); 507 cp.conn_latency = cpu_to_le16(latency); 508 cp.supervision_timeout = cpu_to_le16(to_multiplier); 509 cp.min_ce_len = cpu_to_le16(0x0000); 510 cp.max_ce_len = cpu_to_le16(0x0000); 511 512 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); 513 514 if (params) 515 return 0x01; 516 517 return 0x00; 518 } 519 520 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, 521 __u8 ltk[16], __u8 key_size) 522 { 523 struct hci_dev *hdev = conn->hdev; 524 struct hci_cp_le_start_enc cp; 525 526 BT_DBG("hcon %p", conn); 527 528 memset(&cp, 0, sizeof(cp)); 529 530 cp.handle = cpu_to_le16(conn->handle); 531 cp.rand = rand; 532 cp.ediv = ediv; 533 memcpy(cp.ltk, ltk, key_size); 534 535 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); 536 } 537 538 /* Device _must_ be locked */ 539 void hci_sco_setup(struct hci_conn *conn, __u8 status) 540 { 541 struct hci_link *link; 542 543 link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); 544 if (!link || !link->conn) 545 return; 546 547 BT_DBG("hcon %p", conn); 548 549 if (!status) { 550 if (lmp_esco_capable(conn->hdev)) 551 hci_setup_sync(link->conn, conn->handle); 552 else 553 hci_add_sco(link->conn, conn->handle); 554 } else { 555 hci_connect_cfm(link->conn, status); 556 hci_conn_del(link->conn); 557 } 558 } 559 560 static void hci_conn_timeout(struct work_struct *work) 561 { 562 struct hci_conn *conn = container_of(work, struct hci_conn, 563 disc_work.work); 564 int refcnt = atomic_read(&conn->refcnt); 565 566 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 567 568 WARN_ON(refcnt < 0); 569 570 /* FIXME: It was observed that in pairing failed scenario, refcnt 571 * drops below 0. Probably this is because l2cap_conn_del calls 572 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is 573 * dropped. After that loop hci_chan_del is called which also drops 574 * conn. For now make sure that ACL is alive if refcnt is higher then 0, 575 * otherwise drop it. 576 */ 577 if (refcnt > 0) 578 return; 579 580 hci_abort_conn(conn, hci_proto_disconn_ind(conn)); 581 } 582 583 /* Enter sniff mode */ 584 static void hci_conn_idle(struct work_struct *work) 585 { 586 struct hci_conn *conn = container_of(work, struct hci_conn, 587 idle_work.work); 588 struct hci_dev *hdev = conn->hdev; 589 590 BT_DBG("hcon %p mode %d", conn, conn->mode); 591 592 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 593 return; 594 595 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) 596 return; 597 598 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 599 struct hci_cp_sniff_subrate cp; 600 cp.handle = cpu_to_le16(conn->handle); 601 cp.max_latency = cpu_to_le16(0); 602 cp.min_remote_timeout = cpu_to_le16(0); 603 cp.min_local_timeout = cpu_to_le16(0); 604 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 605 } 606 607 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 608 struct hci_cp_sniff_mode cp; 609 cp.handle = cpu_to_le16(conn->handle); 610 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 611 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 612 cp.attempt = cpu_to_le16(4); 613 cp.timeout = cpu_to_le16(1); 614 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 615 } 616 } 617 618 static void hci_conn_auto_accept(struct work_struct *work) 619 { 620 struct hci_conn *conn = container_of(work, struct hci_conn, 621 auto_accept_work.work); 622 623 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), 624 &conn->dst); 625 } 626 627 static void le_disable_advertising(struct hci_dev *hdev) 628 { 629 if (ext_adv_capable(hdev)) { 630 struct hci_cp_le_set_ext_adv_enable cp; 631 632 cp.enable = 0x00; 633 cp.num_of_sets = 0x00; 634 635 hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), 636 &cp); 637 } else { 638 u8 enable = 0x00; 639 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), 640 &enable); 641 } 642 } 643 644 static void le_conn_timeout(struct work_struct *work) 645 { 646 struct hci_conn *conn = container_of(work, struct hci_conn, 647 le_conn_timeout.work); 648 struct hci_dev *hdev = conn->hdev; 649 650 BT_DBG(""); 651 652 /* We could end up here due to having done directed advertising, 653 * so clean up the state if necessary. This should however only 654 * happen with broken hardware or if low duty cycle was used 655 * (which doesn't have a timeout of its own). 656 */ 657 if (conn->role == HCI_ROLE_SLAVE) { 658 /* Disable LE Advertising */ 659 le_disable_advertising(hdev); 660 hci_dev_lock(hdev); 661 hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); 662 hci_dev_unlock(hdev); 663 return; 664 } 665 666 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); 667 } 668 669 struct iso_list_data { 670 union { 671 u8 cig; 672 u8 big; 673 }; 674 union { 675 u8 cis; 676 u8 bis; 677 u16 sync_handle; 678 }; 679 int count; 680 bool big_term; 681 bool pa_sync_term; 682 bool big_sync_term; 683 }; 684 685 static void bis_list(struct hci_conn *conn, void *data) 686 { 687 struct iso_list_data *d = data; 688 689 /* Skip if not broadcast/ANY address */ 690 if (bacmp(&conn->dst, BDADDR_ANY)) 691 return; 692 693 if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || 694 d->bis != conn->iso_qos.bcast.bis) 695 return; 696 697 d->count++; 698 } 699 700 static int terminate_big_sync(struct hci_dev *hdev, void *data) 701 { 702 struct iso_list_data *d = data; 703 704 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); 705 706 hci_disable_per_advertising_sync(hdev, d->bis); 707 hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); 708 709 /* Only terminate BIG if it has been created */ 710 if (!d->big_term) 711 return 0; 712 713 return hci_le_terminate_big_sync(hdev, d->big, 714 HCI_ERROR_LOCAL_HOST_TERM); 715 } 716 717 static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err) 718 { 719 kfree(data); 720 } 721 722 static int hci_le_terminate_big(struct hci_dev *hdev, struct hci_conn *conn) 723 { 724 struct iso_list_data *d; 725 int ret; 726 727 bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", conn->iso_qos.bcast.big, 728 conn->iso_qos.bcast.bis); 729 730 d = kzalloc(sizeof(*d), GFP_KERNEL); 731 if (!d) 732 return -ENOMEM; 733 734 d->big = conn->iso_qos.bcast.big; 735 d->bis = conn->iso_qos.bcast.bis; 736 d->big_term = test_and_clear_bit(HCI_CONN_BIG_CREATED, &conn->flags); 737 738 ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, 739 terminate_big_destroy); 740 if (ret) 741 kfree(d); 742 743 return ret; 744 } 745 746 static int big_terminate_sync(struct hci_dev *hdev, void *data) 747 { 748 struct iso_list_data *d = data; 749 750 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, 751 d->sync_handle); 752 753 if (d->big_sync_term) 754 hci_le_big_terminate_sync(hdev, d->big); 755 756 if (d->pa_sync_term) 757 return hci_le_pa_terminate_sync(hdev, d->sync_handle); 758 759 return 0; 760 } 761 762 static void find_bis(struct hci_conn *conn, void *data) 763 { 764 struct iso_list_data *d = data; 765 766 /* Ignore if BIG doesn't match */ 767 if (d->big != conn->iso_qos.bcast.big) 768 return; 769 770 d->count++; 771 } 772 773 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *conn) 774 { 775 struct iso_list_data *d; 776 int ret; 777 778 bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, conn->sync_handle); 779 780 d = kzalloc(sizeof(*d), GFP_KERNEL); 781 if (!d) 782 return -ENOMEM; 783 784 d->big = big; 785 d->sync_handle = conn->sync_handle; 786 787 if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) { 788 hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK, 789 HCI_CONN_PA_SYNC, d); 790 791 if (!d->count) 792 d->pa_sync_term = true; 793 794 d->count = 0; 795 } 796 797 if (test_and_clear_bit(HCI_CONN_BIG_SYNC, &conn->flags)) { 798 hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK, 799 HCI_CONN_BIG_SYNC, d); 800 801 if (!d->count) 802 d->big_sync_term = true; 803 } 804 805 ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, 806 terminate_big_destroy); 807 if (ret) 808 kfree(d); 809 810 return ret; 811 } 812 813 /* Cleanup BIS connection 814 * 815 * Detects if there any BIS left connected in a BIG 816 * broadcaster: Remove advertising instance and terminate BIG. 817 * broadcaster receiver: Teminate BIG sync and terminate PA sync. 818 */ 819 static void bis_cleanup(struct hci_conn *conn) 820 { 821 struct hci_dev *hdev = conn->hdev; 822 struct hci_conn *bis; 823 824 bt_dev_dbg(hdev, "conn %p", conn); 825 826 if (conn->role == HCI_ROLE_MASTER) { 827 if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) 828 return; 829 830 /* Check if ISO connection is a BIS and terminate advertising 831 * set and BIG if there are no other connections using it. 832 */ 833 bis = hci_conn_hash_lookup_big(hdev, conn->iso_qos.bcast.big); 834 if (bis) 835 return; 836 837 hci_le_terminate_big(hdev, conn); 838 } else { 839 hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, 840 conn); 841 } 842 } 843 844 static int remove_cig_sync(struct hci_dev *hdev, void *data) 845 { 846 u8 handle = PTR_UINT(data); 847 848 return hci_le_remove_cig_sync(hdev, handle); 849 } 850 851 static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle) 852 { 853 bt_dev_dbg(hdev, "handle 0x%2.2x", handle); 854 855 return hci_cmd_sync_queue(hdev, remove_cig_sync, UINT_PTR(handle), 856 NULL); 857 } 858 859 static void find_cis(struct hci_conn *conn, void *data) 860 { 861 struct iso_list_data *d = data; 862 863 /* Ignore broadcast or if CIG don't match */ 864 if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig) 865 return; 866 867 d->count++; 868 } 869 870 /* Cleanup CIS connection: 871 * 872 * Detects if there any CIS left connected in a CIG and remove it. 873 */ 874 static void cis_cleanup(struct hci_conn *conn) 875 { 876 struct hci_dev *hdev = conn->hdev; 877 struct iso_list_data d; 878 879 if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET) 880 return; 881 882 memset(&d, 0, sizeof(d)); 883 d.cig = conn->iso_qos.ucast.cig; 884 885 /* Check if ISO connection is a CIS and remove CIG if there are 886 * no other connections using it. 887 */ 888 hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_BOUND, &d); 889 hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECT, 890 &d); 891 hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, BT_CONNECTED, 892 &d); 893 if (d.count) 894 return; 895 896 hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); 897 } 898 899 static int hci_conn_hash_alloc_unset(struct hci_dev *hdev) 900 { 901 return ida_alloc_range(&hdev->unset_handle_ida, HCI_CONN_HANDLE_MAX + 1, 902 U16_MAX, GFP_ATOMIC); 903 } 904 905 static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, 906 u8 role, u16 handle) 907 { 908 struct hci_conn *conn; 909 910 switch (type) { 911 case ACL_LINK: 912 if (!hdev->acl_mtu) 913 return ERR_PTR(-ECONNREFUSED); 914 break; 915 case CIS_LINK: 916 case BIS_LINK: 917 if (hdev->iso_mtu) 918 /* Dedicated ISO Buffer exists */ 919 break; 920 fallthrough; 921 case LE_LINK: 922 if (hdev->le_mtu && hdev->le_mtu < HCI_MIN_LE_MTU) 923 return ERR_PTR(-ECONNREFUSED); 924 if (!hdev->le_mtu && hdev->acl_mtu < HCI_MIN_LE_MTU) 925 return ERR_PTR(-ECONNREFUSED); 926 break; 927 case SCO_LINK: 928 case ESCO_LINK: 929 if (!hdev->sco_pkts) 930 /* Controller does not support SCO or eSCO over HCI */ 931 return ERR_PTR(-ECONNREFUSED); 932 break; 933 default: 934 return ERR_PTR(-ECONNREFUSED); 935 } 936 937 bt_dev_dbg(hdev, "dst %pMR handle 0x%4.4x", dst, handle); 938 939 conn = kzalloc(sizeof(*conn), GFP_KERNEL); 940 if (!conn) 941 return ERR_PTR(-ENOMEM); 942 943 bacpy(&conn->dst, dst); 944 bacpy(&conn->src, &hdev->bdaddr); 945 conn->handle = handle; 946 conn->hdev = hdev; 947 conn->type = type; 948 conn->role = role; 949 conn->mode = HCI_CM_ACTIVE; 950 conn->state = BT_OPEN; 951 conn->auth_type = HCI_AT_GENERAL_BONDING; 952 conn->io_capability = hdev->io_capability; 953 conn->remote_auth = 0xff; 954 conn->key_type = 0xff; 955 conn->rssi = HCI_RSSI_INVALID; 956 conn->tx_power = HCI_TX_POWER_INVALID; 957 conn->max_tx_power = HCI_TX_POWER_INVALID; 958 conn->sync_handle = HCI_SYNC_HANDLE_INVALID; 959 conn->sid = HCI_SID_INVALID; 960 961 set_bit(HCI_CONN_POWER_SAVE, &conn->flags); 962 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 963 964 /* Set Default Authenticated payload timeout to 30s */ 965 conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 966 967 if (conn->role == HCI_ROLE_MASTER) 968 conn->out = true; 969 970 switch (type) { 971 case ACL_LINK: 972 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 973 conn->mtu = hdev->acl_mtu; 974 break; 975 case LE_LINK: 976 /* conn->src should reflect the local identity address */ 977 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); 978 conn->mtu = hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; 979 break; 980 case CIS_LINK: 981 case BIS_LINK: 982 /* conn->src should reflect the local identity address */ 983 hci_copy_identity_address(hdev, &conn->src, &conn->src_type); 984 985 /* set proper cleanup function */ 986 if (!bacmp(dst, BDADDR_ANY)) 987 conn->cleanup = bis_cleanup; 988 else if (conn->role == HCI_ROLE_MASTER) 989 conn->cleanup = cis_cleanup; 990 991 conn->mtu = hdev->iso_mtu ? hdev->iso_mtu : 992 hdev->le_mtu ? hdev->le_mtu : hdev->acl_mtu; 993 break; 994 case SCO_LINK: 995 if (lmp_esco_capable(hdev)) 996 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 997 (hdev->esco_type & EDR_ESCO_MASK); 998 else 999 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; 1000 1001 conn->mtu = hdev->sco_mtu; 1002 break; 1003 case ESCO_LINK: 1004 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; 1005 conn->mtu = hdev->sco_mtu; 1006 break; 1007 } 1008 1009 skb_queue_head_init(&conn->data_q); 1010 skb_queue_head_init(&conn->tx_q.queue); 1011 1012 INIT_LIST_HEAD(&conn->chan_list); 1013 INIT_LIST_HEAD(&conn->link_list); 1014 1015 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); 1016 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); 1017 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); 1018 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); 1019 1020 atomic_set(&conn->refcnt, 0); 1021 1022 hci_dev_hold(hdev); 1023 1024 hci_conn_hash_add(hdev, conn); 1025 1026 /* The SCO and eSCO connections will only be notified when their 1027 * setup has been completed. This is different to ACL links which 1028 * can be notified right away. 1029 */ 1030 if (conn->type != SCO_LINK && conn->type != ESCO_LINK) { 1031 if (hdev->notify) 1032 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 1033 } 1034 1035 hci_conn_init_sysfs(conn); 1036 1037 return conn; 1038 } 1039 1040 struct hci_conn *hci_conn_add_unset(struct hci_dev *hdev, int type, 1041 bdaddr_t *dst, u8 role) 1042 { 1043 int handle; 1044 1045 bt_dev_dbg(hdev, "dst %pMR", dst); 1046 1047 handle = hci_conn_hash_alloc_unset(hdev); 1048 if (unlikely(handle < 0)) 1049 return ERR_PTR(-ECONNREFUSED); 1050 1051 return __hci_conn_add(hdev, type, dst, role, handle); 1052 } 1053 1054 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, 1055 u8 role, u16 handle) 1056 { 1057 if (handle > HCI_CONN_HANDLE_MAX) 1058 return ERR_PTR(-EINVAL); 1059 1060 return __hci_conn_add(hdev, type, dst, role, handle); 1061 } 1062 1063 static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason) 1064 { 1065 if (!reason) 1066 reason = HCI_ERROR_REMOTE_USER_TERM; 1067 1068 /* Due to race, SCO/ISO conn might be not established yet at this point, 1069 * and nothing else will clean it up. In other cases it is done via HCI 1070 * events. 1071 */ 1072 switch (conn->type) { 1073 case SCO_LINK: 1074 case ESCO_LINK: 1075 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 1076 hci_conn_failed(conn, reason); 1077 break; 1078 case CIS_LINK: 1079 case BIS_LINK: 1080 if ((conn->state != BT_CONNECTED && 1081 !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) || 1082 test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) 1083 hci_conn_failed(conn, reason); 1084 break; 1085 } 1086 } 1087 1088 static void hci_conn_unlink(struct hci_conn *conn) 1089 { 1090 struct hci_dev *hdev = conn->hdev; 1091 1092 bt_dev_dbg(hdev, "hcon %p", conn); 1093 1094 if (!conn->parent) { 1095 struct hci_link *link, *t; 1096 1097 list_for_each_entry_safe(link, t, &conn->link_list, list) { 1098 struct hci_conn *child = link->conn; 1099 1100 hci_conn_unlink(child); 1101 1102 /* If hdev is down it means 1103 * hci_dev_close_sync/hci_conn_hash_flush is in progress 1104 * and links don't need to be cleanup as all connections 1105 * would be cleanup. 1106 */ 1107 if (!test_bit(HCI_UP, &hdev->flags)) 1108 continue; 1109 1110 hci_conn_cleanup_child(child, conn->abort_reason); 1111 } 1112 1113 return; 1114 } 1115 1116 if (!conn->link) 1117 return; 1118 1119 list_del_rcu(&conn->link->list); 1120 synchronize_rcu(); 1121 1122 hci_conn_drop(conn->parent); 1123 hci_conn_put(conn->parent); 1124 conn->parent = NULL; 1125 1126 kfree(conn->link); 1127 conn->link = NULL; 1128 } 1129 1130 void hci_conn_del(struct hci_conn *conn) 1131 { 1132 struct hci_dev *hdev = conn->hdev; 1133 1134 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); 1135 1136 hci_conn_unlink(conn); 1137 1138 disable_delayed_work_sync(&conn->disc_work); 1139 disable_delayed_work_sync(&conn->auto_accept_work); 1140 disable_delayed_work_sync(&conn->idle_work); 1141 1142 if (conn->type == ACL_LINK) { 1143 /* Unacked frames */ 1144 hdev->acl_cnt += conn->sent; 1145 } else if (conn->type == LE_LINK) { 1146 cancel_delayed_work(&conn->le_conn_timeout); 1147 1148 if (hdev->le_pkts) 1149 hdev->le_cnt += conn->sent; 1150 else 1151 hdev->acl_cnt += conn->sent; 1152 } else { 1153 /* Unacked ISO frames */ 1154 if (conn->type == CIS_LINK || 1155 conn->type == BIS_LINK) { 1156 if (hdev->iso_pkts) 1157 hdev->iso_cnt += conn->sent; 1158 else if (hdev->le_pkts) 1159 hdev->le_cnt += conn->sent; 1160 else 1161 hdev->acl_cnt += conn->sent; 1162 } 1163 } 1164 1165 skb_queue_purge(&conn->data_q); 1166 skb_queue_purge(&conn->tx_q.queue); 1167 1168 /* Remove the connection from the list and cleanup its remaining 1169 * state. This is a separate function since for some cases like 1170 * BT_CONNECT_SCAN we *only* want the cleanup part without the 1171 * rest of hci_conn_del. 1172 */ 1173 hci_conn_cleanup(conn); 1174 1175 /* Dequeue callbacks using connection pointer as data */ 1176 hci_cmd_sync_dequeue(hdev, NULL, conn, NULL); 1177 } 1178 1179 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) 1180 { 1181 int use_src = bacmp(src, BDADDR_ANY); 1182 struct hci_dev *hdev = NULL, *d; 1183 1184 BT_DBG("%pMR -> %pMR", src, dst); 1185 1186 read_lock(&hci_dev_list_lock); 1187 1188 list_for_each_entry(d, &hci_dev_list, list) { 1189 if (!test_bit(HCI_UP, &d->flags) || 1190 hci_dev_test_flag(d, HCI_USER_CHANNEL)) 1191 continue; 1192 1193 /* Simple routing: 1194 * No source address - find interface with bdaddr != dst 1195 * Source address - find interface with bdaddr == src 1196 */ 1197 1198 if (use_src) { 1199 bdaddr_t id_addr; 1200 u8 id_addr_type; 1201 1202 if (src_type == BDADDR_BREDR) { 1203 if (!lmp_bredr_capable(d)) 1204 continue; 1205 bacpy(&id_addr, &d->bdaddr); 1206 id_addr_type = BDADDR_BREDR; 1207 } else { 1208 if (!lmp_le_capable(d)) 1209 continue; 1210 1211 hci_copy_identity_address(d, &id_addr, 1212 &id_addr_type); 1213 1214 /* Convert from HCI to three-value type */ 1215 if (id_addr_type == ADDR_LE_DEV_PUBLIC) 1216 id_addr_type = BDADDR_LE_PUBLIC; 1217 else 1218 id_addr_type = BDADDR_LE_RANDOM; 1219 } 1220 1221 if (!bacmp(&id_addr, src) && id_addr_type == src_type) { 1222 hdev = d; break; 1223 } 1224 } else { 1225 if (bacmp(&d->bdaddr, dst)) { 1226 hdev = d; break; 1227 } 1228 } 1229 } 1230 1231 if (hdev) 1232 hdev = hci_dev_hold(hdev); 1233 1234 read_unlock(&hci_dev_list_lock); 1235 return hdev; 1236 } 1237 EXPORT_SYMBOL(hci_get_route); 1238 1239 /* This function requires the caller holds hdev->lock */ 1240 static void hci_le_conn_failed(struct hci_conn *conn, u8 status) 1241 { 1242 struct hci_dev *hdev = conn->hdev; 1243 1244 hci_connect_le_scan_cleanup(conn, status); 1245 1246 /* Enable advertising in case this was a failed connection 1247 * attempt as a peripheral. 1248 */ 1249 hci_enable_advertising(hdev); 1250 } 1251 1252 /* This function requires the caller holds hdev->lock */ 1253 void hci_conn_failed(struct hci_conn *conn, u8 status) 1254 { 1255 struct hci_dev *hdev = conn->hdev; 1256 1257 bt_dev_dbg(hdev, "status 0x%2.2x", status); 1258 1259 switch (conn->type) { 1260 case LE_LINK: 1261 hci_le_conn_failed(conn, status); 1262 break; 1263 case ACL_LINK: 1264 mgmt_connect_failed(hdev, conn, status); 1265 break; 1266 } 1267 1268 /* In case of BIG/PA sync failed, clear conn flags so that 1269 * the conns will be correctly cleaned up by ISO layer 1270 */ 1271 test_and_clear_bit(HCI_CONN_BIG_SYNC_FAILED, &conn->flags); 1272 test_and_clear_bit(HCI_CONN_PA_SYNC_FAILED, &conn->flags); 1273 1274 conn->state = BT_CLOSED; 1275 hci_connect_cfm(conn, status); 1276 hci_conn_del(conn); 1277 } 1278 1279 /* This function requires the caller holds hdev->lock */ 1280 u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle) 1281 { 1282 struct hci_dev *hdev = conn->hdev; 1283 1284 bt_dev_dbg(hdev, "hcon %p handle 0x%4.4x", conn, handle); 1285 1286 if (conn->handle == handle) 1287 return 0; 1288 1289 if (handle > HCI_CONN_HANDLE_MAX) { 1290 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", 1291 handle, HCI_CONN_HANDLE_MAX); 1292 return HCI_ERROR_INVALID_PARAMETERS; 1293 } 1294 1295 /* If abort_reason has been sent it means the connection is being 1296 * aborted and the handle shall not be changed. 1297 */ 1298 if (conn->abort_reason) 1299 return conn->abort_reason; 1300 1301 if (HCI_CONN_HANDLE_UNSET(conn->handle)) 1302 ida_free(&hdev->unset_handle_ida, conn->handle); 1303 1304 conn->handle = handle; 1305 1306 return 0; 1307 } 1308 1309 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, 1310 u8 dst_type, bool dst_resolved, u8 sec_level, 1311 u16 conn_timeout, u8 role, u8 phy, u8 sec_phy) 1312 { 1313 struct hci_conn *conn; 1314 struct smp_irk *irk; 1315 int err; 1316 1317 /* Let's make sure that le is enabled.*/ 1318 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1319 if (lmp_le_capable(hdev)) 1320 return ERR_PTR(-ECONNREFUSED); 1321 1322 return ERR_PTR(-EOPNOTSUPP); 1323 } 1324 1325 /* Since the controller supports only one LE connection attempt at a 1326 * time, we return -EBUSY if there is any connection attempt running. 1327 */ 1328 if (hci_lookup_le_connect(hdev)) 1329 return ERR_PTR(-EBUSY); 1330 1331 /* If there's already a connection object but it's not in 1332 * scanning state it means it must already be established, in 1333 * which case we can't do anything else except report a failure 1334 * to connect. 1335 */ 1336 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); 1337 if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) { 1338 return ERR_PTR(-EBUSY); 1339 } 1340 1341 /* Check if the destination address has been resolved by the controller 1342 * since if it did then the identity address shall be used. 1343 */ 1344 if (!dst_resolved) { 1345 /* When given an identity address with existing identity 1346 * resolving key, the connection needs to be established 1347 * to a resolvable random address. 1348 * 1349 * Storing the resolvable random address is required here 1350 * to handle connection failures. The address will later 1351 * be resolved back into the original identity address 1352 * from the connect request. 1353 */ 1354 irk = hci_find_irk_by_addr(hdev, dst, dst_type); 1355 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { 1356 dst = &irk->rpa; 1357 dst_type = ADDR_LE_DEV_RANDOM; 1358 } 1359 } 1360 1361 if (conn) { 1362 bacpy(&conn->dst, dst); 1363 } else { 1364 conn = hci_conn_add_unset(hdev, LE_LINK, dst, role); 1365 if (IS_ERR(conn)) 1366 return conn; 1367 hci_conn_hold(conn); 1368 conn->pending_sec_level = sec_level; 1369 } 1370 1371 conn->dst_type = dst_type; 1372 conn->sec_level = BT_SECURITY_LOW; 1373 conn->conn_timeout = conn_timeout; 1374 conn->le_adv_phy = phy; 1375 conn->le_adv_sec_phy = sec_phy; 1376 1377 err = hci_connect_le_sync(hdev, conn); 1378 if (err) { 1379 hci_conn_del(conn); 1380 return ERR_PTR(err); 1381 } 1382 1383 return conn; 1384 } 1385 1386 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) 1387 { 1388 struct hci_conn *conn; 1389 1390 conn = hci_conn_hash_lookup_le(hdev, addr, type); 1391 if (!conn) 1392 return false; 1393 1394 if (conn->state != BT_CONNECTED) 1395 return false; 1396 1397 return true; 1398 } 1399 1400 /* This function requires the caller holds hdev->lock */ 1401 static int hci_explicit_conn_params_set(struct hci_dev *hdev, 1402 bdaddr_t *addr, u8 addr_type) 1403 { 1404 struct hci_conn_params *params; 1405 1406 if (is_connected(hdev, addr, addr_type)) 1407 return -EISCONN; 1408 1409 params = hci_conn_params_lookup(hdev, addr, addr_type); 1410 if (!params) { 1411 params = hci_conn_params_add(hdev, addr, addr_type); 1412 if (!params) 1413 return -ENOMEM; 1414 1415 /* If we created new params, mark them to be deleted in 1416 * hci_connect_le_scan_cleanup. It's different case than 1417 * existing disabled params, those will stay after cleanup. 1418 */ 1419 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 1420 } 1421 1422 /* We're trying to connect, so make sure params are at pend_le_conns */ 1423 if (params->auto_connect == HCI_AUTO_CONN_DISABLED || 1424 params->auto_connect == HCI_AUTO_CONN_REPORT || 1425 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { 1426 hci_pend_le_list_del_init(params); 1427 hci_pend_le_list_add(params, &hdev->pend_le_conns); 1428 } 1429 1430 params->explicit_connect = true; 1431 1432 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, 1433 params->auto_connect); 1434 1435 return 0; 1436 } 1437 1438 static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) 1439 { 1440 struct hci_conn *conn; 1441 u8 big; 1442 1443 /* Allocate a BIG if not set */ 1444 if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { 1445 for (big = 0x00; big < 0xef; big++) { 1446 1447 conn = hci_conn_hash_lookup_big(hdev, big); 1448 if (!conn) 1449 break; 1450 } 1451 1452 if (big == 0xef) 1453 return -EADDRNOTAVAIL; 1454 1455 /* Update BIG */ 1456 qos->bcast.big = big; 1457 } 1458 1459 return 0; 1460 } 1461 1462 static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) 1463 { 1464 struct hci_conn *conn; 1465 u8 bis; 1466 1467 /* Allocate BIS if not set */ 1468 if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { 1469 if (qos->bcast.big != BT_ISO_QOS_BIG_UNSET) { 1470 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); 1471 1472 if (conn) { 1473 /* If the BIG handle is already matched to an advertising 1474 * handle, do not allocate a new one. 1475 */ 1476 qos->bcast.bis = conn->iso_qos.bcast.bis; 1477 return 0; 1478 } 1479 } 1480 1481 /* Find an unused adv set to advertise BIS, skip instance 0x00 1482 * since it is reserved as general purpose set. 1483 */ 1484 for (bis = 0x01; bis < hdev->le_num_of_adv_sets; 1485 bis++) { 1486 1487 conn = hci_conn_hash_lookup_bis(hdev, BDADDR_ANY, bis); 1488 if (!conn) 1489 break; 1490 } 1491 1492 if (bis == hdev->le_num_of_adv_sets) 1493 return -EADDRNOTAVAIL; 1494 1495 /* Update BIS */ 1496 qos->bcast.bis = bis; 1497 } 1498 1499 return 0; 1500 } 1501 1502 /* This function requires the caller holds hdev->lock */ 1503 static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, 1504 struct bt_iso_qos *qos, __u8 base_len, 1505 __u8 *base) 1506 { 1507 struct hci_conn *conn; 1508 int err; 1509 1510 /* Let's make sure that le is enabled.*/ 1511 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1512 if (lmp_le_capable(hdev)) 1513 return ERR_PTR(-ECONNREFUSED); 1514 return ERR_PTR(-EOPNOTSUPP); 1515 } 1516 1517 err = qos_set_big(hdev, qos); 1518 if (err) 1519 return ERR_PTR(err); 1520 1521 err = qos_set_bis(hdev, qos); 1522 if (err) 1523 return ERR_PTR(err); 1524 1525 /* Check if the LE Create BIG command has already been sent */ 1526 conn = hci_conn_hash_lookup_per_adv_bis(hdev, dst, qos->bcast.big, 1527 qos->bcast.big); 1528 if (conn) 1529 return ERR_PTR(-EADDRINUSE); 1530 1531 /* Check BIS settings against other bound BISes, since all 1532 * BISes in a BIG must have the same value for all parameters 1533 */ 1534 conn = hci_conn_hash_lookup_big(hdev, qos->bcast.big); 1535 1536 if (conn && (memcmp(qos, &conn->iso_qos, sizeof(*qos)) || 1537 base_len != conn->le_per_adv_data_len || 1538 memcmp(conn->le_per_adv_data, base, base_len))) 1539 return ERR_PTR(-EADDRINUSE); 1540 1541 conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_MASTER); 1542 if (IS_ERR(conn)) 1543 return conn; 1544 1545 conn->state = BT_CONNECT; 1546 1547 hci_conn_hold(conn); 1548 return conn; 1549 } 1550 1551 /* This function requires the caller holds hdev->lock */ 1552 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, 1553 u8 dst_type, u8 sec_level, 1554 u16 conn_timeout, 1555 enum conn_reasons conn_reason) 1556 { 1557 struct hci_conn *conn; 1558 1559 /* Let's make sure that le is enabled.*/ 1560 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 1561 if (lmp_le_capable(hdev)) 1562 return ERR_PTR(-ECONNREFUSED); 1563 1564 return ERR_PTR(-EOPNOTSUPP); 1565 } 1566 1567 /* Some devices send ATT messages as soon as the physical link is 1568 * established. To be able to handle these ATT messages, the user- 1569 * space first establishes the connection and then starts the pairing 1570 * process. 1571 * 1572 * So if a hci_conn object already exists for the following connection 1573 * attempt, we simply update pending_sec_level and auth_type fields 1574 * and return the object found. 1575 */ 1576 conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); 1577 if (conn) { 1578 if (conn->pending_sec_level < sec_level) 1579 conn->pending_sec_level = sec_level; 1580 goto done; 1581 } 1582 1583 BT_DBG("requesting refresh of dst_addr"); 1584 1585 conn = hci_conn_add_unset(hdev, LE_LINK, dst, HCI_ROLE_MASTER); 1586 if (IS_ERR(conn)) 1587 return conn; 1588 1589 if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { 1590 hci_conn_del(conn); 1591 return ERR_PTR(-EBUSY); 1592 } 1593 1594 conn->state = BT_CONNECT; 1595 set_bit(HCI_CONN_SCANNING, &conn->flags); 1596 conn->dst_type = dst_type; 1597 conn->sec_level = BT_SECURITY_LOW; 1598 conn->pending_sec_level = sec_level; 1599 conn->conn_timeout = conn_timeout; 1600 conn->conn_reason = conn_reason; 1601 1602 hci_update_passive_scan(hdev); 1603 1604 done: 1605 hci_conn_hold(conn); 1606 return conn; 1607 } 1608 1609 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, 1610 u8 sec_level, u8 auth_type, 1611 enum conn_reasons conn_reason, u16 timeout) 1612 { 1613 struct hci_conn *acl; 1614 1615 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1616 if (lmp_bredr_capable(hdev)) 1617 return ERR_PTR(-ECONNREFUSED); 1618 1619 return ERR_PTR(-EOPNOTSUPP); 1620 } 1621 1622 /* Reject outgoing connection to device with same BD ADDR against 1623 * CVE-2020-26555 1624 */ 1625 if (!bacmp(&hdev->bdaddr, dst)) { 1626 bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", 1627 dst); 1628 return ERR_PTR(-ECONNREFUSED); 1629 } 1630 1631 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); 1632 if (!acl) { 1633 acl = hci_conn_add_unset(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); 1634 if (IS_ERR(acl)) 1635 return acl; 1636 } 1637 1638 hci_conn_hold(acl); 1639 1640 acl->conn_reason = conn_reason; 1641 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 1642 int err; 1643 1644 acl->sec_level = BT_SECURITY_LOW; 1645 acl->pending_sec_level = sec_level; 1646 acl->auth_type = auth_type; 1647 acl->conn_timeout = timeout; 1648 1649 err = hci_connect_acl_sync(hdev, acl); 1650 if (err) { 1651 hci_conn_del(acl); 1652 return ERR_PTR(err); 1653 } 1654 } 1655 1656 return acl; 1657 } 1658 1659 static struct hci_link *hci_conn_link(struct hci_conn *parent, 1660 struct hci_conn *conn) 1661 { 1662 struct hci_dev *hdev = parent->hdev; 1663 struct hci_link *link; 1664 1665 bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn); 1666 1667 if (conn->link) 1668 return conn->link; 1669 1670 if (conn->parent) 1671 return NULL; 1672 1673 link = kzalloc(sizeof(*link), GFP_KERNEL); 1674 if (!link) 1675 return NULL; 1676 1677 link->conn = hci_conn_hold(conn); 1678 conn->link = link; 1679 conn->parent = hci_conn_get(parent); 1680 1681 /* Use list_add_tail_rcu append to the list */ 1682 list_add_tail_rcu(&link->list, &parent->link_list); 1683 1684 return link; 1685 } 1686 1687 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, 1688 __u16 setting, struct bt_codec *codec, 1689 u16 timeout) 1690 { 1691 struct hci_conn *acl; 1692 struct hci_conn *sco; 1693 struct hci_link *link; 1694 1695 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, 1696 CONN_REASON_SCO_CONNECT, timeout); 1697 if (IS_ERR(acl)) 1698 return acl; 1699 1700 sco = hci_conn_hash_lookup_ba(hdev, type, dst); 1701 if (!sco) { 1702 sco = hci_conn_add_unset(hdev, type, dst, HCI_ROLE_MASTER); 1703 if (IS_ERR(sco)) { 1704 hci_conn_drop(acl); 1705 return sco; 1706 } 1707 } 1708 1709 link = hci_conn_link(acl, sco); 1710 if (!link) { 1711 hci_conn_drop(acl); 1712 hci_conn_drop(sco); 1713 return ERR_PTR(-ENOLINK); 1714 } 1715 1716 sco->setting = setting; 1717 sco->codec = *codec; 1718 1719 if (acl->state == BT_CONNECTED && 1720 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 1721 set_bit(HCI_CONN_POWER_SAVE, &acl->flags); 1722 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); 1723 1724 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) { 1725 /* defer SCO setup until mode change completed */ 1726 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags); 1727 return sco; 1728 } 1729 1730 hci_sco_setup(acl, 0x00); 1731 } 1732 1733 return sco; 1734 } 1735 1736 static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) 1737 { 1738 struct hci_dev *hdev = conn->hdev; 1739 struct hci_cp_le_create_big cp; 1740 struct iso_list_data data; 1741 1742 memset(&cp, 0, sizeof(cp)); 1743 1744 data.big = qos->bcast.big; 1745 data.bis = qos->bcast.bis; 1746 data.count = 0; 1747 1748 /* Create a BIS for each bound connection */ 1749 hci_conn_hash_list_state(hdev, bis_list, BIS_LINK, 1750 BT_BOUND, &data); 1751 1752 cp.handle = qos->bcast.big; 1753 cp.adv_handle = qos->bcast.bis; 1754 cp.num_bis = data.count; 1755 hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); 1756 cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); 1757 cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); 1758 cp.bis.rtn = qos->bcast.out.rtn; 1759 cp.bis.phy = qos->bcast.out.phy; 1760 cp.bis.packing = qos->bcast.packing; 1761 cp.bis.framing = qos->bcast.framing; 1762 cp.bis.encryption = qos->bcast.encryption; 1763 memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); 1764 1765 return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); 1766 } 1767 1768 static int set_cig_params_sync(struct hci_dev *hdev, void *data) 1769 { 1770 DEFINE_FLEX(struct hci_cp_le_set_cig_params, pdu, cis, num_cis, 0x1f); 1771 u8 cig_id = PTR_UINT(data); 1772 struct hci_conn *conn; 1773 struct bt_iso_qos *qos; 1774 u8 aux_num_cis = 0; 1775 u8 cis_id; 1776 1777 conn = hci_conn_hash_lookup_cig(hdev, cig_id); 1778 if (!conn) 1779 return 0; 1780 1781 qos = &conn->iso_qos; 1782 pdu->cig_id = cig_id; 1783 hci_cpu_to_le24(qos->ucast.out.interval, pdu->c_interval); 1784 hci_cpu_to_le24(qos->ucast.in.interval, pdu->p_interval); 1785 pdu->sca = qos->ucast.sca; 1786 pdu->packing = qos->ucast.packing; 1787 pdu->framing = qos->ucast.framing; 1788 pdu->c_latency = cpu_to_le16(qos->ucast.out.latency); 1789 pdu->p_latency = cpu_to_le16(qos->ucast.in.latency); 1790 1791 /* Reprogram all CIS(s) with the same CIG, valid range are: 1792 * num_cis: 0x00 to 0x1F 1793 * cis_id: 0x00 to 0xEF 1794 */ 1795 for (cis_id = 0x00; cis_id < 0xf0 && 1796 aux_num_cis < pdu->num_cis; cis_id++) { 1797 struct hci_cis_params *cis; 1798 1799 conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, cig_id, cis_id); 1800 if (!conn) 1801 continue; 1802 1803 qos = &conn->iso_qos; 1804 1805 cis = &pdu->cis[aux_num_cis++]; 1806 cis->cis_id = cis_id; 1807 cis->c_sdu = cpu_to_le16(conn->iso_qos.ucast.out.sdu); 1808 cis->p_sdu = cpu_to_le16(conn->iso_qos.ucast.in.sdu); 1809 cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : 1810 qos->ucast.in.phy; 1811 cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : 1812 qos->ucast.out.phy; 1813 cis->c_rtn = qos->ucast.out.rtn; 1814 cis->p_rtn = qos->ucast.in.rtn; 1815 } 1816 pdu->num_cis = aux_num_cis; 1817 1818 if (!pdu->num_cis) 1819 return 0; 1820 1821 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, 1822 struct_size(pdu, cis, pdu->num_cis), 1823 pdu, HCI_CMD_TIMEOUT); 1824 } 1825 1826 static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) 1827 { 1828 struct hci_dev *hdev = conn->hdev; 1829 struct iso_list_data data; 1830 1831 memset(&data, 0, sizeof(data)); 1832 1833 /* Allocate first still reconfigurable CIG if not set */ 1834 if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { 1835 for (data.cig = 0x00; data.cig < 0xf0; data.cig++) { 1836 data.count = 0; 1837 1838 hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, 1839 BT_CONNECT, &data); 1840 if (data.count) 1841 continue; 1842 1843 hci_conn_hash_list_state(hdev, find_cis, CIS_LINK, 1844 BT_CONNECTED, &data); 1845 if (!data.count) 1846 break; 1847 } 1848 1849 if (data.cig == 0xf0) 1850 return false; 1851 1852 /* Update CIG */ 1853 qos->ucast.cig = data.cig; 1854 } 1855 1856 if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { 1857 if (hci_conn_hash_lookup_cis(hdev, NULL, 0, qos->ucast.cig, 1858 qos->ucast.cis)) 1859 return false; 1860 goto done; 1861 } 1862 1863 /* Allocate first available CIS if not set */ 1864 for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0xf0; 1865 data.cis++) { 1866 if (!hci_conn_hash_lookup_cis(hdev, NULL, 0, data.cig, 1867 data.cis)) { 1868 /* Update CIS */ 1869 qos->ucast.cis = data.cis; 1870 break; 1871 } 1872 } 1873 1874 if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) 1875 return false; 1876 1877 done: 1878 if (hci_cmd_sync_queue(hdev, set_cig_params_sync, 1879 UINT_PTR(qos->ucast.cig), NULL) < 0) 1880 return false; 1881 1882 return true; 1883 } 1884 1885 struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, 1886 __u8 dst_type, struct bt_iso_qos *qos) 1887 { 1888 struct hci_conn *cis; 1889 1890 cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, 1891 qos->ucast.cis); 1892 if (!cis) { 1893 cis = hci_conn_add_unset(hdev, CIS_LINK, dst, 1894 HCI_ROLE_MASTER); 1895 if (IS_ERR(cis)) 1896 return cis; 1897 cis->cleanup = cis_cleanup; 1898 cis->dst_type = dst_type; 1899 cis->iso_qos.ucast.cig = BT_ISO_QOS_CIG_UNSET; 1900 cis->iso_qos.ucast.cis = BT_ISO_QOS_CIS_UNSET; 1901 } 1902 1903 if (cis->state == BT_CONNECTED) 1904 return cis; 1905 1906 /* Check if CIS has been set and the settings matches */ 1907 if (cis->state == BT_BOUND && 1908 !memcmp(&cis->iso_qos, qos, sizeof(*qos))) 1909 return cis; 1910 1911 /* Update LINK PHYs according to QoS preference */ 1912 cis->le_tx_phy = qos->ucast.out.phy; 1913 cis->le_rx_phy = qos->ucast.in.phy; 1914 1915 /* If output interval is not set use the input interval as it cannot be 1916 * 0x000000. 1917 */ 1918 if (!qos->ucast.out.interval) 1919 qos->ucast.out.interval = qos->ucast.in.interval; 1920 1921 /* If input interval is not set use the output interval as it cannot be 1922 * 0x000000. 1923 */ 1924 if (!qos->ucast.in.interval) 1925 qos->ucast.in.interval = qos->ucast.out.interval; 1926 1927 /* If output latency is not set use the input latency as it cannot be 1928 * 0x0000. 1929 */ 1930 if (!qos->ucast.out.latency) 1931 qos->ucast.out.latency = qos->ucast.in.latency; 1932 1933 /* If input latency is not set use the output latency as it cannot be 1934 * 0x0000. 1935 */ 1936 if (!qos->ucast.in.latency) 1937 qos->ucast.in.latency = qos->ucast.out.latency; 1938 1939 if (!hci_le_set_cig_params(cis, qos)) { 1940 hci_conn_drop(cis); 1941 return ERR_PTR(-EINVAL); 1942 } 1943 1944 hci_conn_hold(cis); 1945 1946 cis->iso_qos = *qos; 1947 cis->state = BT_BOUND; 1948 1949 return cis; 1950 } 1951 1952 bool hci_iso_setup_path(struct hci_conn *conn) 1953 { 1954 struct hci_dev *hdev = conn->hdev; 1955 struct hci_cp_le_setup_iso_path cmd; 1956 1957 memset(&cmd, 0, sizeof(cmd)); 1958 1959 if (conn->iso_qos.ucast.out.sdu) { 1960 cmd.handle = cpu_to_le16(conn->handle); 1961 cmd.direction = 0x00; /* Input (Host to Controller) */ 1962 cmd.path = 0x00; /* HCI path if enabled */ 1963 cmd.codec = 0x03; /* Transparent Data */ 1964 1965 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), 1966 &cmd) < 0) 1967 return false; 1968 } 1969 1970 if (conn->iso_qos.ucast.in.sdu) { 1971 cmd.handle = cpu_to_le16(conn->handle); 1972 cmd.direction = 0x01; /* Output (Controller to Host) */ 1973 cmd.path = 0x00; /* HCI path if enabled */ 1974 cmd.codec = 0x03; /* Transparent Data */ 1975 1976 if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), 1977 &cmd) < 0) 1978 return false; 1979 } 1980 1981 return true; 1982 } 1983 1984 int hci_conn_check_create_cis(struct hci_conn *conn) 1985 { 1986 if (conn->type != CIS_LINK) 1987 return -EINVAL; 1988 1989 if (!conn->parent || conn->parent->state != BT_CONNECTED || 1990 conn->state != BT_CONNECT || HCI_CONN_HANDLE_UNSET(conn->handle)) 1991 return 1; 1992 1993 return 0; 1994 } 1995 1996 static int hci_create_cis_sync(struct hci_dev *hdev, void *data) 1997 { 1998 return hci_le_create_cis_sync(hdev); 1999 } 2000 2001 int hci_le_create_cis_pending(struct hci_dev *hdev) 2002 { 2003 struct hci_conn *conn; 2004 bool pending = false; 2005 2006 rcu_read_lock(); 2007 2008 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { 2009 if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) { 2010 rcu_read_unlock(); 2011 return -EBUSY; 2012 } 2013 2014 if (!hci_conn_check_create_cis(conn)) 2015 pending = true; 2016 } 2017 2018 rcu_read_unlock(); 2019 2020 if (!pending) 2021 return 0; 2022 2023 /* Queue Create CIS */ 2024 return hci_cmd_sync_queue(hdev, hci_create_cis_sync, NULL, NULL); 2025 } 2026 2027 static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, 2028 struct bt_iso_io_qos *qos, __u8 phy) 2029 { 2030 /* Only set MTU if PHY is enabled */ 2031 if (!qos->sdu && qos->phy) 2032 qos->sdu = conn->mtu; 2033 2034 /* Use the same PHY as ACL if set to any */ 2035 if (qos->phy == BT_ISO_PHY_ANY) 2036 qos->phy = phy; 2037 2038 /* Use LE ACL connection interval if not set */ 2039 if (!qos->interval) 2040 /* ACL interval unit in 1.25 ms to us */ 2041 qos->interval = conn->le_conn_interval * 1250; 2042 2043 /* Use LE ACL connection latency if not set */ 2044 if (!qos->latency) 2045 qos->latency = conn->le_conn_latency; 2046 } 2047 2048 static int create_big_sync(struct hci_dev *hdev, void *data) 2049 { 2050 struct hci_conn *conn = data; 2051 struct bt_iso_qos *qos = &conn->iso_qos; 2052 u16 interval, sync_interval = 0; 2053 u32 flags = 0; 2054 int err; 2055 2056 if (qos->bcast.out.phy == 0x02) 2057 flags |= MGMT_ADV_FLAG_SEC_2M; 2058 2059 /* Align intervals */ 2060 interval = (qos->bcast.out.interval / 1250) * qos->bcast.sync_factor; 2061 2062 if (qos->bcast.bis) 2063 sync_interval = interval * 4; 2064 2065 err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, 2066 conn->le_per_adv_data, flags, interval, 2067 interval, sync_interval); 2068 if (err) 2069 return err; 2070 2071 return hci_le_create_big(conn, &conn->iso_qos); 2072 } 2073 2074 struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, 2075 __u8 dst_type, __u8 sid, 2076 struct bt_iso_qos *qos) 2077 { 2078 struct hci_conn *conn; 2079 2080 bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid); 2081 2082 conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_SLAVE); 2083 if (IS_ERR(conn)) 2084 return conn; 2085 2086 conn->iso_qos = *qos; 2087 conn->dst_type = dst_type; 2088 conn->sid = sid; 2089 conn->state = BT_LISTEN; 2090 conn->conn_timeout = msecs_to_jiffies(qos->bcast.sync_timeout * 10); 2091 2092 hci_conn_hold(conn); 2093 2094 hci_connect_pa_sync(hdev, conn); 2095 2096 return conn; 2097 } 2098 2099 int hci_conn_big_create_sync(struct hci_dev *hdev, struct hci_conn *hcon, 2100 struct bt_iso_qos *qos, __u16 sync_handle, 2101 __u8 num_bis, __u8 bis[]) 2102 { 2103 int err; 2104 2105 if (num_bis < 0x01 || num_bis > ISO_MAX_NUM_BIS) 2106 return -EINVAL; 2107 2108 err = qos_set_big(hdev, qos); 2109 if (err) 2110 return err; 2111 2112 if (hcon) { 2113 /* Update hcon QoS */ 2114 hcon->iso_qos = *qos; 2115 2116 hcon->num_bis = num_bis; 2117 memcpy(hcon->bis, bis, num_bis); 2118 hcon->conn_timeout = msecs_to_jiffies(qos->bcast.timeout * 10); 2119 } 2120 2121 return hci_connect_big_sync(hdev, hcon); 2122 } 2123 2124 static void create_big_complete(struct hci_dev *hdev, void *data, int err) 2125 { 2126 struct hci_conn *conn = data; 2127 2128 bt_dev_dbg(hdev, "conn %p", conn); 2129 2130 if (err) { 2131 bt_dev_err(hdev, "Unable to create BIG: %d", err); 2132 hci_connect_cfm(conn, err); 2133 hci_conn_del(conn); 2134 } 2135 } 2136 2137 struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst, 2138 struct bt_iso_qos *qos, 2139 __u8 base_len, __u8 *base) 2140 { 2141 struct hci_conn *conn; 2142 struct hci_conn *parent; 2143 __u8 eir[HCI_MAX_PER_AD_LENGTH]; 2144 struct hci_link *link; 2145 2146 /* Look for any BIS that is open for rebinding */ 2147 conn = hci_conn_hash_lookup_big_state(hdev, qos->bcast.big, BT_OPEN); 2148 if (conn) { 2149 memcpy(qos, &conn->iso_qos, sizeof(*qos)); 2150 conn->state = BT_CONNECTED; 2151 return conn; 2152 } 2153 2154 if (base_len && base) 2155 base_len = eir_append_service_data(eir, 0, 0x1851, 2156 base, base_len); 2157 2158 /* We need hci_conn object using the BDADDR_ANY as dst */ 2159 conn = hci_add_bis(hdev, dst, qos, base_len, eir); 2160 if (IS_ERR(conn)) 2161 return conn; 2162 2163 /* Update LINK PHYs according to QoS preference */ 2164 conn->le_tx_phy = qos->bcast.out.phy; 2165 conn->le_tx_phy = qos->bcast.out.phy; 2166 2167 /* Add Basic Announcement into Peridic Adv Data if BASE is set */ 2168 if (base_len && base) { 2169 memcpy(conn->le_per_adv_data, eir, sizeof(eir)); 2170 conn->le_per_adv_data_len = base_len; 2171 } 2172 2173 hci_iso_qos_setup(hdev, conn, &qos->bcast.out, 2174 conn->le_tx_phy ? conn->le_tx_phy : 2175 hdev->le_tx_def_phys); 2176 2177 conn->iso_qos = *qos; 2178 conn->state = BT_BOUND; 2179 2180 /* Link BISes together */ 2181 parent = hci_conn_hash_lookup_big(hdev, 2182 conn->iso_qos.bcast.big); 2183 if (parent && parent != conn) { 2184 link = hci_conn_link(parent, conn); 2185 hci_conn_drop(conn); 2186 if (!link) 2187 return ERR_PTR(-ENOLINK); 2188 } 2189 2190 return conn; 2191 } 2192 2193 static void bis_mark_per_adv(struct hci_conn *conn, void *data) 2194 { 2195 struct iso_list_data *d = data; 2196 2197 /* Skip if not broadcast/ANY address */ 2198 if (bacmp(&conn->dst, BDADDR_ANY)) 2199 return; 2200 2201 if (d->big != conn->iso_qos.bcast.big || 2202 d->bis == BT_ISO_QOS_BIS_UNSET || 2203 d->bis != conn->iso_qos.bcast.bis) 2204 return; 2205 2206 set_bit(HCI_CONN_PER_ADV, &conn->flags); 2207 } 2208 2209 struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, 2210 __u8 dst_type, struct bt_iso_qos *qos, 2211 __u8 base_len, __u8 *base) 2212 { 2213 struct hci_conn *conn; 2214 int err; 2215 struct iso_list_data data; 2216 2217 conn = hci_bind_bis(hdev, dst, qos, base_len, base); 2218 if (IS_ERR(conn)) 2219 return conn; 2220 2221 if (conn->state == BT_CONNECTED) 2222 return conn; 2223 2224 data.big = qos->bcast.big; 2225 data.bis = qos->bcast.bis; 2226 2227 /* Set HCI_CONN_PER_ADV for all bound connections, to mark that 2228 * the start periodic advertising and create BIG commands have 2229 * been queued 2230 */ 2231 hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK, 2232 BT_BOUND, &data); 2233 2234 /* Queue start periodic advertising and create BIG */ 2235 err = hci_cmd_sync_queue(hdev, create_big_sync, conn, 2236 create_big_complete); 2237 if (err < 0) { 2238 hci_conn_drop(conn); 2239 return ERR_PTR(err); 2240 } 2241 2242 return conn; 2243 } 2244 2245 struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, 2246 __u8 dst_type, struct bt_iso_qos *qos) 2247 { 2248 struct hci_conn *le; 2249 struct hci_conn *cis; 2250 struct hci_link *link; 2251 2252 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 2253 le = hci_connect_le(hdev, dst, dst_type, false, 2254 BT_SECURITY_LOW, 2255 HCI_LE_CONN_TIMEOUT, 2256 HCI_ROLE_SLAVE, 0, 0); 2257 else 2258 le = hci_connect_le_scan(hdev, dst, dst_type, 2259 BT_SECURITY_LOW, 2260 HCI_LE_CONN_TIMEOUT, 2261 CONN_REASON_ISO_CONNECT); 2262 if (IS_ERR(le)) 2263 return le; 2264 2265 hci_iso_qos_setup(hdev, le, &qos->ucast.out, 2266 le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); 2267 hci_iso_qos_setup(hdev, le, &qos->ucast.in, 2268 le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); 2269 2270 cis = hci_bind_cis(hdev, dst, dst_type, qos); 2271 if (IS_ERR(cis)) { 2272 hci_conn_drop(le); 2273 return cis; 2274 } 2275 2276 link = hci_conn_link(le, cis); 2277 hci_conn_drop(cis); 2278 if (!link) { 2279 hci_conn_drop(le); 2280 return ERR_PTR(-ENOLINK); 2281 } 2282 2283 cis->state = BT_CONNECT; 2284 2285 hci_le_create_cis_pending(hdev); 2286 2287 return cis; 2288 } 2289 2290 /* Check link security requirement */ 2291 int hci_conn_check_link_mode(struct hci_conn *conn) 2292 { 2293 BT_DBG("hcon %p", conn); 2294 2295 /* In Secure Connections Only mode, it is required that Secure 2296 * Connections is used and the link is encrypted with AES-CCM 2297 * using a P-256 authenticated combination key. 2298 */ 2299 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { 2300 if (!hci_conn_sc_enabled(conn) || 2301 !test_bit(HCI_CONN_AES_CCM, &conn->flags) || 2302 conn->key_type != HCI_LK_AUTH_COMBINATION_P256) 2303 return 0; 2304 } 2305 2306 /* AES encryption is required for Level 4: 2307 * 2308 * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C 2309 * page 1319: 2310 * 2311 * 128-bit equivalent strength for link and encryption keys 2312 * required using FIPS approved algorithms (E0 not allowed, 2313 * SAFER+ not allowed, and P-192 not allowed; encryption key 2314 * not shortened) 2315 */ 2316 if (conn->sec_level == BT_SECURITY_FIPS && 2317 !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { 2318 bt_dev_err(conn->hdev, 2319 "Invalid security: Missing AES-CCM usage"); 2320 return 0; 2321 } 2322 2323 if (hci_conn_ssp_enabled(conn) && 2324 !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2325 return 0; 2326 2327 return 1; 2328 } 2329 2330 /* Authenticate remote device */ 2331 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 2332 { 2333 BT_DBG("hcon %p", conn); 2334 2335 if (conn->pending_sec_level > sec_level) 2336 sec_level = conn->pending_sec_level; 2337 2338 if (sec_level > conn->sec_level) 2339 conn->pending_sec_level = sec_level; 2340 else if (test_bit(HCI_CONN_AUTH, &conn->flags)) 2341 return 1; 2342 2343 /* Make sure we preserve an existing MITM requirement*/ 2344 auth_type |= (conn->auth_type & 0x01); 2345 2346 conn->auth_type = auth_type; 2347 2348 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 2349 struct hci_cp_auth_requested cp; 2350 2351 cp.handle = cpu_to_le16(conn->handle); 2352 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 2353 sizeof(cp), &cp); 2354 2355 /* Set the ENCRYPT_PEND to trigger encryption after 2356 * authentication. 2357 */ 2358 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2359 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 2360 } 2361 2362 return 0; 2363 } 2364 2365 /* Encrypt the link */ 2366 static void hci_conn_encrypt(struct hci_conn *conn) 2367 { 2368 BT_DBG("hcon %p", conn); 2369 2370 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { 2371 struct hci_cp_set_conn_encrypt cp; 2372 cp.handle = cpu_to_le16(conn->handle); 2373 cp.encrypt = 0x01; 2374 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), 2375 &cp); 2376 } 2377 } 2378 2379 /* Enable security */ 2380 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, 2381 bool initiator) 2382 { 2383 BT_DBG("hcon %p", conn); 2384 2385 if (conn->type == LE_LINK) 2386 return smp_conn_security(conn, sec_level); 2387 2388 /* For sdp we don't need the link key. */ 2389 if (sec_level == BT_SECURITY_SDP) 2390 return 1; 2391 2392 /* For non 2.1 devices and low security level we don't need the link 2393 key. */ 2394 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn)) 2395 return 1; 2396 2397 /* For other security levels we need the link key. */ 2398 if (!test_bit(HCI_CONN_AUTH, &conn->flags)) 2399 goto auth; 2400 2401 switch (conn->key_type) { 2402 case HCI_LK_AUTH_COMBINATION_P256: 2403 /* An authenticated FIPS approved combination key has 2404 * sufficient security for security level 4 or lower. 2405 */ 2406 if (sec_level <= BT_SECURITY_FIPS) 2407 goto encrypt; 2408 break; 2409 case HCI_LK_AUTH_COMBINATION_P192: 2410 /* An authenticated combination key has sufficient security for 2411 * security level 3 or lower. 2412 */ 2413 if (sec_level <= BT_SECURITY_HIGH) 2414 goto encrypt; 2415 break; 2416 case HCI_LK_UNAUTH_COMBINATION_P192: 2417 case HCI_LK_UNAUTH_COMBINATION_P256: 2418 /* An unauthenticated combination key has sufficient security 2419 * for security level 2 or lower. 2420 */ 2421 if (sec_level <= BT_SECURITY_MEDIUM) 2422 goto encrypt; 2423 break; 2424 case HCI_LK_COMBINATION: 2425 /* A combination key has always sufficient security for the 2426 * security levels 2 or lower. High security level requires the 2427 * combination key is generated using maximum PIN code length 2428 * (16). For pre 2.1 units. 2429 */ 2430 if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16) 2431 goto encrypt; 2432 break; 2433 default: 2434 break; 2435 } 2436 2437 auth: 2438 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) 2439 return 0; 2440 2441 if (initiator) 2442 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); 2443 2444 if (!hci_conn_auth(conn, sec_level, auth_type)) 2445 return 0; 2446 2447 encrypt: 2448 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { 2449 /* Ensure that the encryption key size has been read, 2450 * otherwise stall the upper layer responses. 2451 */ 2452 if (!conn->enc_key_size) 2453 return 0; 2454 2455 /* Nothing else needed, all requirements are met */ 2456 return 1; 2457 } 2458 2459 hci_conn_encrypt(conn); 2460 return 0; 2461 } 2462 EXPORT_SYMBOL(hci_conn_security); 2463 2464 /* Check secure link requirement */ 2465 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) 2466 { 2467 BT_DBG("hcon %p", conn); 2468 2469 /* Accept if non-secure or higher security level is required */ 2470 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS) 2471 return 1; 2472 2473 /* Accept if secure or higher security level is already present */ 2474 if (conn->sec_level == BT_SECURITY_HIGH || 2475 conn->sec_level == BT_SECURITY_FIPS) 2476 return 1; 2477 2478 /* Reject not secure link */ 2479 return 0; 2480 } 2481 EXPORT_SYMBOL(hci_conn_check_secure); 2482 2483 /* Switch role */ 2484 int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 2485 { 2486 BT_DBG("hcon %p", conn); 2487 2488 if (role == conn->role) 2489 return 1; 2490 2491 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { 2492 struct hci_cp_switch_role cp; 2493 bacpy(&cp.bdaddr, &conn->dst); 2494 cp.role = role; 2495 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); 2496 } 2497 2498 return 0; 2499 } 2500 EXPORT_SYMBOL(hci_conn_switch_role); 2501 2502 /* Enter active mode */ 2503 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) 2504 { 2505 struct hci_dev *hdev = conn->hdev; 2506 2507 BT_DBG("hcon %p mode %d", conn, conn->mode); 2508 2509 if (conn->mode != HCI_CM_SNIFF) 2510 goto timer; 2511 2512 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active) 2513 goto timer; 2514 2515 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { 2516 struct hci_cp_exit_sniff_mode cp; 2517 cp.handle = cpu_to_le16(conn->handle); 2518 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); 2519 } 2520 2521 timer: 2522 if (hdev->idle_timeout > 0) 2523 queue_delayed_work(hdev->workqueue, &conn->idle_work, 2524 msecs_to_jiffies(hdev->idle_timeout)); 2525 } 2526 2527 /* Drop all connection on the device */ 2528 void hci_conn_hash_flush(struct hci_dev *hdev) 2529 { 2530 struct list_head *head = &hdev->conn_hash.list; 2531 struct hci_conn *conn; 2532 2533 BT_DBG("hdev %s", hdev->name); 2534 2535 /* We should not traverse the list here, because hci_conn_del 2536 * can remove extra links, which may cause the list traversal 2537 * to hit items that have already been released. 2538 */ 2539 while ((conn = list_first_entry_or_null(head, 2540 struct hci_conn, 2541 list)) != NULL) { 2542 conn->state = BT_CLOSED; 2543 hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM); 2544 hci_conn_del(conn); 2545 } 2546 } 2547 2548 static u32 get_link_mode(struct hci_conn *conn) 2549 { 2550 u32 link_mode = 0; 2551 2552 if (conn->role == HCI_ROLE_MASTER) 2553 link_mode |= HCI_LM_MASTER; 2554 2555 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) 2556 link_mode |= HCI_LM_ENCRYPT; 2557 2558 if (test_bit(HCI_CONN_AUTH, &conn->flags)) 2559 link_mode |= HCI_LM_AUTH; 2560 2561 if (test_bit(HCI_CONN_SECURE, &conn->flags)) 2562 link_mode |= HCI_LM_SECURE; 2563 2564 if (test_bit(HCI_CONN_FIPS, &conn->flags)) 2565 link_mode |= HCI_LM_FIPS; 2566 2567 return link_mode; 2568 } 2569 2570 int hci_get_conn_list(void __user *arg) 2571 { 2572 struct hci_conn *c; 2573 struct hci_conn_list_req req, *cl; 2574 struct hci_conn_info *ci; 2575 struct hci_dev *hdev; 2576 int n = 0, size, err; 2577 2578 if (copy_from_user(&req, arg, sizeof(req))) 2579 return -EFAULT; 2580 2581 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) 2582 return -EINVAL; 2583 2584 size = sizeof(req) + req.conn_num * sizeof(*ci); 2585 2586 cl = kmalloc(size, GFP_KERNEL); 2587 if (!cl) 2588 return -ENOMEM; 2589 2590 hdev = hci_dev_get(req.dev_id); 2591 if (!hdev) { 2592 kfree(cl); 2593 return -ENODEV; 2594 } 2595 2596 ci = cl->conn_info; 2597 2598 hci_dev_lock(hdev); 2599 list_for_each_entry(c, &hdev->conn_hash.list, list) { 2600 bacpy(&(ci + n)->bdaddr, &c->dst); 2601 (ci + n)->handle = c->handle; 2602 (ci + n)->type = c->type; 2603 (ci + n)->out = c->out; 2604 (ci + n)->state = c->state; 2605 (ci + n)->link_mode = get_link_mode(c); 2606 if (++n >= req.conn_num) 2607 break; 2608 } 2609 hci_dev_unlock(hdev); 2610 2611 cl->dev_id = hdev->id; 2612 cl->conn_num = n; 2613 size = sizeof(req) + n * sizeof(*ci); 2614 2615 hci_dev_put(hdev); 2616 2617 err = copy_to_user(arg, cl, size); 2618 kfree(cl); 2619 2620 return err ? -EFAULT : 0; 2621 } 2622 2623 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) 2624 { 2625 struct hci_conn_info_req req; 2626 struct hci_conn_info ci; 2627 struct hci_conn *conn; 2628 char __user *ptr = arg + sizeof(req); 2629 2630 if (copy_from_user(&req, arg, sizeof(req))) 2631 return -EFAULT; 2632 2633 hci_dev_lock(hdev); 2634 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); 2635 if (conn) { 2636 bacpy(&ci.bdaddr, &conn->dst); 2637 ci.handle = conn->handle; 2638 ci.type = conn->type; 2639 ci.out = conn->out; 2640 ci.state = conn->state; 2641 ci.link_mode = get_link_mode(conn); 2642 } 2643 hci_dev_unlock(hdev); 2644 2645 if (!conn) 2646 return -ENOENT; 2647 2648 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; 2649 } 2650 2651 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) 2652 { 2653 struct hci_auth_info_req req; 2654 struct hci_conn *conn; 2655 2656 if (copy_from_user(&req, arg, sizeof(req))) 2657 return -EFAULT; 2658 2659 hci_dev_lock(hdev); 2660 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); 2661 if (conn) 2662 req.type = conn->auth_type; 2663 hci_dev_unlock(hdev); 2664 2665 if (!conn) 2666 return -ENOENT; 2667 2668 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; 2669 } 2670 2671 struct hci_chan *hci_chan_create(struct hci_conn *conn) 2672 { 2673 struct hci_dev *hdev = conn->hdev; 2674 struct hci_chan *chan; 2675 2676 BT_DBG("%s hcon %p", hdev->name, conn); 2677 2678 if (test_bit(HCI_CONN_DROP, &conn->flags)) { 2679 BT_DBG("Refusing to create new hci_chan"); 2680 return NULL; 2681 } 2682 2683 chan = kzalloc(sizeof(*chan), GFP_KERNEL); 2684 if (!chan) 2685 return NULL; 2686 2687 chan->conn = hci_conn_get(conn); 2688 skb_queue_head_init(&chan->data_q); 2689 chan->state = BT_CONNECTED; 2690 2691 list_add_rcu(&chan->list, &conn->chan_list); 2692 2693 return chan; 2694 } 2695 2696 void hci_chan_del(struct hci_chan *chan) 2697 { 2698 struct hci_conn *conn = chan->conn; 2699 struct hci_dev *hdev = conn->hdev; 2700 2701 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan); 2702 2703 list_del_rcu(&chan->list); 2704 2705 synchronize_rcu(); 2706 2707 /* Prevent new hci_chan's to be created for this hci_conn */ 2708 set_bit(HCI_CONN_DROP, &conn->flags); 2709 2710 hci_conn_put(conn); 2711 2712 skb_queue_purge(&chan->data_q); 2713 kfree(chan); 2714 } 2715 2716 void hci_chan_list_flush(struct hci_conn *conn) 2717 { 2718 struct hci_chan *chan, *n; 2719 2720 BT_DBG("hcon %p", conn); 2721 2722 list_for_each_entry_safe(chan, n, &conn->chan_list, list) 2723 hci_chan_del(chan); 2724 } 2725 2726 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon, 2727 __u16 handle) 2728 { 2729 struct hci_chan *hchan; 2730 2731 list_for_each_entry(hchan, &hcon->chan_list, list) { 2732 if (hchan->handle == handle) 2733 return hchan; 2734 } 2735 2736 return NULL; 2737 } 2738 2739 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) 2740 { 2741 struct hci_conn_hash *h = &hdev->conn_hash; 2742 struct hci_conn *hcon; 2743 struct hci_chan *hchan = NULL; 2744 2745 rcu_read_lock(); 2746 2747 list_for_each_entry_rcu(hcon, &h->list, list) { 2748 hchan = __hci_chan_lookup_handle(hcon, handle); 2749 if (hchan) 2750 break; 2751 } 2752 2753 rcu_read_unlock(); 2754 2755 return hchan; 2756 } 2757 2758 u32 hci_conn_get_phy(struct hci_conn *conn) 2759 { 2760 u32 phys = 0; 2761 2762 /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471: 2763 * Table 6.2: Packets defined for synchronous, asynchronous, and 2764 * CPB logical transport types. 2765 */ 2766 switch (conn->type) { 2767 case SCO_LINK: 2768 /* SCO logical transport (1 Mb/s): 2769 * HV1, HV2, HV3 and DV. 2770 */ 2771 phys |= BT_PHY_BR_1M_1SLOT; 2772 2773 break; 2774 2775 case ACL_LINK: 2776 /* ACL logical transport (1 Mb/s) ptt=0: 2777 * DH1, DM3, DH3, DM5 and DH5. 2778 */ 2779 phys |= BT_PHY_BR_1M_1SLOT; 2780 2781 if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) 2782 phys |= BT_PHY_BR_1M_3SLOT; 2783 2784 if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) 2785 phys |= BT_PHY_BR_1M_5SLOT; 2786 2787 /* ACL logical transport (2 Mb/s) ptt=1: 2788 * 2-DH1, 2-DH3 and 2-DH5. 2789 */ 2790 if (!(conn->pkt_type & HCI_2DH1)) 2791 phys |= BT_PHY_EDR_2M_1SLOT; 2792 2793 if (!(conn->pkt_type & HCI_2DH3)) 2794 phys |= BT_PHY_EDR_2M_3SLOT; 2795 2796 if (!(conn->pkt_type & HCI_2DH5)) 2797 phys |= BT_PHY_EDR_2M_5SLOT; 2798 2799 /* ACL logical transport (3 Mb/s) ptt=1: 2800 * 3-DH1, 3-DH3 and 3-DH5. 2801 */ 2802 if (!(conn->pkt_type & HCI_3DH1)) 2803 phys |= BT_PHY_EDR_3M_1SLOT; 2804 2805 if (!(conn->pkt_type & HCI_3DH3)) 2806 phys |= BT_PHY_EDR_3M_3SLOT; 2807 2808 if (!(conn->pkt_type & HCI_3DH5)) 2809 phys |= BT_PHY_EDR_3M_5SLOT; 2810 2811 break; 2812 2813 case ESCO_LINK: 2814 /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */ 2815 phys |= BT_PHY_BR_1M_1SLOT; 2816 2817 if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) 2818 phys |= BT_PHY_BR_1M_3SLOT; 2819 2820 /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ 2821 if (!(conn->pkt_type & ESCO_2EV3)) 2822 phys |= BT_PHY_EDR_2M_1SLOT; 2823 2824 if (!(conn->pkt_type & ESCO_2EV5)) 2825 phys |= BT_PHY_EDR_2M_3SLOT; 2826 2827 /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ 2828 if (!(conn->pkt_type & ESCO_3EV3)) 2829 phys |= BT_PHY_EDR_3M_1SLOT; 2830 2831 if (!(conn->pkt_type & ESCO_3EV5)) 2832 phys |= BT_PHY_EDR_3M_3SLOT; 2833 2834 break; 2835 2836 case LE_LINK: 2837 if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) 2838 phys |= BT_PHY_LE_1M_TX; 2839 2840 if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) 2841 phys |= BT_PHY_LE_1M_RX; 2842 2843 if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) 2844 phys |= BT_PHY_LE_2M_TX; 2845 2846 if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) 2847 phys |= BT_PHY_LE_2M_RX; 2848 2849 if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) 2850 phys |= BT_PHY_LE_CODED_TX; 2851 2852 if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) 2853 phys |= BT_PHY_LE_CODED_RX; 2854 2855 break; 2856 } 2857 2858 return phys; 2859 } 2860 2861 static int abort_conn_sync(struct hci_dev *hdev, void *data) 2862 { 2863 struct hci_conn *conn = data; 2864 2865 if (!hci_conn_valid(hdev, conn)) 2866 return -ECANCELED; 2867 2868 return hci_abort_conn_sync(hdev, conn, conn->abort_reason); 2869 } 2870 2871 int hci_abort_conn(struct hci_conn *conn, u8 reason) 2872 { 2873 struct hci_dev *hdev = conn->hdev; 2874 2875 /* If abort_reason has already been set it means the connection is 2876 * already being aborted so don't attempt to overwrite it. 2877 */ 2878 if (conn->abort_reason) 2879 return 0; 2880 2881 bt_dev_dbg(hdev, "handle 0x%2.2x reason 0x%2.2x", conn->handle, reason); 2882 2883 conn->abort_reason = reason; 2884 2885 /* If the connection is pending check the command opcode since that 2886 * might be blocking on hci_cmd_sync_work while waiting its respective 2887 * event so we need to hci_cmd_sync_cancel to cancel it. 2888 * 2889 * hci_connect_le serializes the connection attempts so only one 2890 * connection can be in BT_CONNECT at time. 2891 */ 2892 if (conn->state == BT_CONNECT && hdev->req_status == HCI_REQ_PEND) { 2893 switch (hci_skb_event(hdev->sent_cmd)) { 2894 case HCI_EV_CONN_COMPLETE: 2895 case HCI_EV_LE_CONN_COMPLETE: 2896 case HCI_EV_LE_ENHANCED_CONN_COMPLETE: 2897 case HCI_EVT_LE_CIS_ESTABLISHED: 2898 hci_cmd_sync_cancel(hdev, ECANCELED); 2899 break; 2900 } 2901 /* Cancel connect attempt if still queued/pending */ 2902 } else if (!hci_cancel_connect_sync(hdev, conn)) { 2903 return 0; 2904 } 2905 2906 /* Run immediately if on cmd_sync_work since this may be called 2907 * as a result to MGMT_OP_DISCONNECT/MGMT_OP_UNPAIR which does 2908 * already queue its callback on cmd_sync_work. 2909 */ 2910 return hci_cmd_sync_run_once(hdev, abort_conn_sync, conn, NULL); 2911 } 2912 2913 void hci_setup_tx_timestamp(struct sk_buff *skb, size_t key_offset, 2914 const struct sockcm_cookie *sockc) 2915 { 2916 struct sock *sk = skb ? skb->sk : NULL; 2917 int key; 2918 2919 /* This shall be called on a single skb of those generated by user 2920 * sendmsg(), and only when the sendmsg() does not return error to 2921 * user. This is required for keeping the tskey that increments here in 2922 * sync with possible sendmsg() counting by user. 2923 * 2924 * Stream sockets shall set key_offset to sendmsg() length in bytes 2925 * and call with the last fragment, others to 1 and first fragment. 2926 */ 2927 2928 if (!skb || !sockc || !sk || !key_offset) 2929 return; 2930 2931 sock_tx_timestamp(sk, sockc, &skb_shinfo(skb)->tx_flags); 2932 2933 if (sk->sk_type == SOCK_STREAM) 2934 key = atomic_add_return(key_offset, &sk->sk_tskey); 2935 2936 if (sockc->tsflags & SOF_TIMESTAMPING_OPT_ID && 2937 sockc->tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { 2938 if (sockc->tsflags & SOCKCM_FLAG_TS_OPT_ID) { 2939 skb_shinfo(skb)->tskey = sockc->ts_opt_id; 2940 } else { 2941 if (sk->sk_type != SOCK_STREAM) 2942 key = atomic_inc_return(&sk->sk_tskey); 2943 skb_shinfo(skb)->tskey = key - 1; 2944 } 2945 } 2946 } 2947 2948 void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb) 2949 { 2950 struct tx_queue *comp = &conn->tx_q; 2951 bool track = false; 2952 2953 /* Emit SND now, ie. just before sending to driver */ 2954 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP) 2955 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, SCM_TSTAMP_SND); 2956 2957 /* COMPLETION tstamp is emitted for tracked skb later in Number of 2958 * Completed Packets event. Available only for flow controlled cases. 2959 * 2960 * TODO: SCO support without flowctl (needs to be done in drivers) 2961 */ 2962 switch (conn->type) { 2963 case CIS_LINK: 2964 case BIS_LINK: 2965 case ACL_LINK: 2966 case LE_LINK: 2967 break; 2968 case SCO_LINK: 2969 case ESCO_LINK: 2970 if (!hci_dev_test_flag(conn->hdev, HCI_SCO_FLOWCTL)) 2971 return; 2972 break; 2973 default: 2974 return; 2975 } 2976 2977 if (skb->sk && (skb_shinfo(skb)->tx_flags & SKBTX_COMPLETION_TSTAMP)) 2978 track = true; 2979 2980 /* If nothing is tracked, just count extra skbs at the queue head */ 2981 if (!track && !comp->tracked) { 2982 comp->extra++; 2983 return; 2984 } 2985 2986 if (track) { 2987 skb = skb_clone_sk(skb); 2988 if (!skb) 2989 goto count_only; 2990 2991 comp->tracked++; 2992 } else { 2993 skb = skb_clone(skb, GFP_KERNEL); 2994 if (!skb) 2995 goto count_only; 2996 } 2997 2998 skb_queue_tail(&comp->queue, skb); 2999 return; 3000 3001 count_only: 3002 /* Stop tracking skbs, and only count. This will not emit timestamps for 3003 * the packets, but if we get here something is more seriously wrong. 3004 */ 3005 comp->tracked = 0; 3006 comp->extra += skb_queue_len(&comp->queue) + 1; 3007 skb_queue_purge(&comp->queue); 3008 } 3009 3010 void hci_conn_tx_dequeue(struct hci_conn *conn) 3011 { 3012 struct tx_queue *comp = &conn->tx_q; 3013 struct sk_buff *skb; 3014 3015 /* If there are tracked skbs, the counted extra go before dequeuing real 3016 * skbs, to keep ordering. When nothing is tracked, the ordering doesn't 3017 * matter so dequeue real skbs first to get rid of them ASAP. 3018 */ 3019 if (comp->extra && (comp->tracked || skb_queue_empty(&comp->queue))) { 3020 comp->extra--; 3021 return; 3022 } 3023 3024 skb = skb_dequeue(&comp->queue); 3025 if (!skb) 3026 return; 3027 3028 if (skb->sk) { 3029 comp->tracked--; 3030 __skb_tstamp_tx(skb, NULL, NULL, skb->sk, 3031 SCM_TSTAMP_COMPLETION); 3032 } 3033 3034 kfree_skb(skb); 3035 } 3036 3037 u8 *hci_conn_key_enc_size(struct hci_conn *conn) 3038 { 3039 if (conn->type == ACL_LINK) { 3040 struct link_key *key; 3041 3042 key = hci_find_link_key(conn->hdev, &conn->dst); 3043 if (!key) 3044 return NULL; 3045 3046 return &key->pin_len; 3047 } else if (conn->type == LE_LINK) { 3048 struct smp_ltk *ltk; 3049 3050 ltk = hci_find_ltk(conn->hdev, &conn->dst, conn->dst_type, 3051 conn->role); 3052 if (!ltk) 3053 return NULL; 3054 3055 return <k->enc_size; 3056 } 3057 3058 return NULL; 3059 } 3060 3061 int hci_ethtool_ts_info(unsigned int index, int sk_proto, 3062 struct kernel_ethtool_ts_info *info) 3063 { 3064 struct hci_dev *hdev; 3065 3066 hdev = hci_dev_get(index); 3067 if (!hdev) 3068 return -ENODEV; 3069 3070 info->so_timestamping = 3071 SOF_TIMESTAMPING_RX_SOFTWARE | 3072 SOF_TIMESTAMPING_SOFTWARE; 3073 info->phc_index = -1; 3074 info->tx_types = BIT(HWTSTAMP_TX_OFF); 3075 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE); 3076 3077 switch (sk_proto) { 3078 case BTPROTO_ISO: 3079 case BTPROTO_L2CAP: 3080 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; 3081 info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION; 3082 break; 3083 case BTPROTO_SCO: 3084 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE; 3085 if (hci_dev_test_flag(hdev, HCI_SCO_FLOWCTL)) 3086 info->so_timestamping |= SOF_TIMESTAMPING_TX_COMPLETION; 3087 break; 3088 } 3089 3090 hci_dev_put(hdev); 3091 return 0; 3092 } 3093