1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/rfkill.h> 30 #include <linux/debugfs.h> 31 #include <linux/crypto.h> 32 #include <linux/property.h> 33 #include <linux/suspend.h> 34 #include <linux/wait.h> 35 #include <asm/unaligned.h> 36 37 #include <net/bluetooth/bluetooth.h> 38 #include <net/bluetooth/hci_core.h> 39 #include <net/bluetooth/l2cap.h> 40 #include <net/bluetooth/mgmt.h> 41 42 #include "hci_request.h" 43 #include "hci_debugfs.h" 44 #include "smp.h" 45 #include "leds.h" 46 #include "msft.h" 47 #include "aosp.h" 48 #include "hci_codec.h" 49 50 static void hci_rx_work(struct work_struct *work); 51 static void hci_cmd_work(struct work_struct *work); 52 static void hci_tx_work(struct work_struct *work); 53 54 /* HCI device list */ 55 LIST_HEAD(hci_dev_list); 56 DEFINE_RWLOCK(hci_dev_list_lock); 57 58 /* HCI callback list */ 59 LIST_HEAD(hci_cb_list); 60 DEFINE_MUTEX(hci_cb_list_lock); 61 62 /* HCI ID Numbering */ 63 static DEFINE_IDA(hci_index_ida); 64 65 static int hci_reset_req(struct hci_request *req, unsigned long opt) 66 { 67 BT_DBG("%s %ld", req->hdev->name, opt); 68 69 /* Reset device */ 70 set_bit(HCI_RESET, &req->hdev->flags); 71 hci_req_add(req, HCI_OP_RESET, 0, NULL); 72 return 0; 73 } 74 75 static void bredr_init(struct hci_request *req) 76 { 77 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 78 79 /* Read Local Supported Features */ 80 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 81 82 /* Read Local Version */ 83 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 84 85 /* Read BD Address */ 86 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 87 } 88 89 static void amp_init1(struct hci_request *req) 90 { 91 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 92 93 /* Read Local Version */ 94 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 95 96 /* Read Local Supported Commands */ 97 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 98 99 /* Read Local AMP Info */ 100 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 101 102 /* Read Data Blk size */ 103 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); 104 105 /* Read Flow Control Mode */ 106 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); 107 108 /* Read Location Data */ 109 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); 110 } 111 112 static int amp_init2(struct hci_request *req) 113 { 114 /* Read Local Supported Features. Not all AMP controllers 115 * support this so it's placed conditionally in the second 116 * stage init. 117 */ 118 if (req->hdev->commands[14] & 0x20) 119 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 120 121 return 0; 122 } 123 124 static int hci_init1_req(struct hci_request *req, unsigned long opt) 125 { 126 struct hci_dev *hdev = req->hdev; 127 128 BT_DBG("%s %ld", hdev->name, opt); 129 130 /* Reset */ 131 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 132 hci_reset_req(req, 0); 133 134 switch (hdev->dev_type) { 135 case HCI_PRIMARY: 136 bredr_init(req); 137 break; 138 case HCI_AMP: 139 amp_init1(req); 140 break; 141 default: 142 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); 143 break; 144 } 145 146 return 0; 147 } 148 149 static void bredr_setup(struct hci_request *req) 150 { 151 __le16 param; 152 __u8 flt_type; 153 154 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 155 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 156 157 /* Read Class of Device */ 158 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 159 160 /* Read Local Name */ 161 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); 162 163 /* Read Voice Setting */ 164 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); 165 166 /* Read Number of Supported IAC */ 167 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); 168 169 /* Read Current IAC LAP */ 170 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); 171 172 /* Clear Event Filters */ 173 flt_type = HCI_FLT_CLEAR_ALL; 174 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 175 176 /* Connection accept timeout ~20 secs */ 177 param = cpu_to_le16(0x7d00); 178 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); 179 } 180 181 static void le_setup(struct hci_request *req) 182 { 183 struct hci_dev *hdev = req->hdev; 184 185 /* Read LE Buffer Size */ 186 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 187 188 /* Read LE Local Supported Features */ 189 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); 190 191 /* Read LE Supported States */ 192 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); 193 194 /* LE-only controllers have LE implicitly enabled */ 195 if (!lmp_bredr_capable(hdev)) 196 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 197 } 198 199 static void hci_setup_event_mask(struct hci_request *req) 200 { 201 struct hci_dev *hdev = req->hdev; 202 203 /* The second byte is 0xff instead of 0x9f (two reserved bits 204 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 205 * command otherwise. 206 */ 207 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 208 209 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 210 * any event mask for pre 1.2 devices. 211 */ 212 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 213 return; 214 215 if (lmp_bredr_capable(hdev)) { 216 events[4] |= 0x01; /* Flow Specification Complete */ 217 } else { 218 /* Use a different default for LE-only devices */ 219 memset(events, 0, sizeof(events)); 220 events[1] |= 0x20; /* Command Complete */ 221 events[1] |= 0x40; /* Command Status */ 222 events[1] |= 0x80; /* Hardware Error */ 223 224 /* If the controller supports the Disconnect command, enable 225 * the corresponding event. In addition enable packet flow 226 * control related events. 227 */ 228 if (hdev->commands[0] & 0x20) { 229 events[0] |= 0x10; /* Disconnection Complete */ 230 events[2] |= 0x04; /* Number of Completed Packets */ 231 events[3] |= 0x02; /* Data Buffer Overflow */ 232 } 233 234 /* If the controller supports the Read Remote Version 235 * Information command, enable the corresponding event. 236 */ 237 if (hdev->commands[2] & 0x80) 238 events[1] |= 0x08; /* Read Remote Version Information 239 * Complete 240 */ 241 242 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 243 events[0] |= 0x80; /* Encryption Change */ 244 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 245 } 246 } 247 248 if (lmp_inq_rssi_capable(hdev) || 249 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 250 events[4] |= 0x02; /* Inquiry Result with RSSI */ 251 252 if (lmp_ext_feat_capable(hdev)) 253 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 254 255 if (lmp_esco_capable(hdev)) { 256 events[5] |= 0x08; /* Synchronous Connection Complete */ 257 events[5] |= 0x10; /* Synchronous Connection Changed */ 258 } 259 260 if (lmp_sniffsubr_capable(hdev)) 261 events[5] |= 0x20; /* Sniff Subrating */ 262 263 if (lmp_pause_enc_capable(hdev)) 264 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 265 266 if (lmp_ext_inq_capable(hdev)) 267 events[5] |= 0x40; /* Extended Inquiry Result */ 268 269 if (lmp_no_flush_capable(hdev)) 270 events[7] |= 0x01; /* Enhanced Flush Complete */ 271 272 if (lmp_lsto_capable(hdev)) 273 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 274 275 if (lmp_ssp_capable(hdev)) { 276 events[6] |= 0x01; /* IO Capability Request */ 277 events[6] |= 0x02; /* IO Capability Response */ 278 events[6] |= 0x04; /* User Confirmation Request */ 279 events[6] |= 0x08; /* User Passkey Request */ 280 events[6] |= 0x10; /* Remote OOB Data Request */ 281 events[6] |= 0x20; /* Simple Pairing Complete */ 282 events[7] |= 0x04; /* User Passkey Notification */ 283 events[7] |= 0x08; /* Keypress Notification */ 284 events[7] |= 0x10; /* Remote Host Supported 285 * Features Notification 286 */ 287 } 288 289 if (lmp_le_capable(hdev)) 290 events[7] |= 0x20; /* LE Meta-Event */ 291 292 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 293 } 294 295 static int hci_init2_req(struct hci_request *req, unsigned long opt) 296 { 297 struct hci_dev *hdev = req->hdev; 298 299 if (hdev->dev_type == HCI_AMP) 300 return amp_init2(req); 301 302 if (lmp_bredr_capable(hdev)) 303 bredr_setup(req); 304 else 305 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); 306 307 if (lmp_le_capable(hdev)) 308 le_setup(req); 309 310 /* All Bluetooth 1.2 and later controllers should support the 311 * HCI command for reading the local supported commands. 312 * 313 * Unfortunately some controllers indicate Bluetooth 1.2 support, 314 * but do not have support for this command. If that is the case, 315 * the driver can quirk the behavior and skip reading the local 316 * supported commands. 317 */ 318 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && 319 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) 320 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 321 322 if (lmp_ssp_capable(hdev)) { 323 /* When SSP is available, then the host features page 324 * should also be available as well. However some 325 * controllers list the max_page as 0 as long as SSP 326 * has not been enabled. To achieve proper debugging 327 * output, force the minimum max_page to 1 at least. 328 */ 329 hdev->max_page = 0x01; 330 331 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { 332 u8 mode = 0x01; 333 334 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, 335 sizeof(mode), &mode); 336 } else { 337 struct hci_cp_write_eir cp; 338 339 memset(hdev->eir, 0, sizeof(hdev->eir)); 340 memset(&cp, 0, sizeof(cp)); 341 342 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 343 } 344 } 345 346 if (lmp_inq_rssi_capable(hdev) || 347 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { 348 u8 mode; 349 350 /* If Extended Inquiry Result events are supported, then 351 * they are clearly preferred over Inquiry Result with RSSI 352 * events. 353 */ 354 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; 355 356 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 357 } 358 359 if (lmp_inq_tx_pwr_capable(hdev)) 360 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 361 362 if (lmp_ext_feat_capable(hdev)) { 363 struct hci_cp_read_local_ext_features cp; 364 365 cp.page = 0x01; 366 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 367 sizeof(cp), &cp); 368 } 369 370 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { 371 u8 enable = 1; 372 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 373 &enable); 374 } 375 376 return 0; 377 } 378 379 static void hci_setup_link_policy(struct hci_request *req) 380 { 381 struct hci_dev *hdev = req->hdev; 382 struct hci_cp_write_def_link_policy cp; 383 u16 link_policy = 0; 384 385 if (lmp_rswitch_capable(hdev)) 386 link_policy |= HCI_LP_RSWITCH; 387 if (lmp_hold_capable(hdev)) 388 link_policy |= HCI_LP_HOLD; 389 if (lmp_sniff_capable(hdev)) 390 link_policy |= HCI_LP_SNIFF; 391 if (lmp_park_capable(hdev)) 392 link_policy |= HCI_LP_PARK; 393 394 cp.policy = cpu_to_le16(link_policy); 395 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 396 } 397 398 static void hci_set_le_support(struct hci_request *req) 399 { 400 struct hci_dev *hdev = req->hdev; 401 struct hci_cp_write_le_host_supported cp; 402 403 /* LE-only devices do not support explicit enablement */ 404 if (!lmp_bredr_capable(hdev)) 405 return; 406 407 memset(&cp, 0, sizeof(cp)); 408 409 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 410 cp.le = 0x01; 411 cp.simul = 0x00; 412 } 413 414 if (cp.le != lmp_host_le_capable(hdev)) 415 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), 416 &cp); 417 } 418 419 static void hci_set_event_mask_page_2(struct hci_request *req) 420 { 421 struct hci_dev *hdev = req->hdev; 422 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 423 bool changed = false; 424 425 /* If Connectionless Peripheral Broadcast central role is supported 426 * enable all necessary events for it. 427 */ 428 if (lmp_cpb_central_capable(hdev)) { 429 events[1] |= 0x40; /* Triggered Clock Capture */ 430 events[1] |= 0x80; /* Synchronization Train Complete */ 431 events[2] |= 0x10; /* Peripheral Page Response Timeout */ 432 events[2] |= 0x20; /* CPB Channel Map Change */ 433 changed = true; 434 } 435 436 /* If Connectionless Peripheral Broadcast peripheral role is supported 437 * enable all necessary events for it. 438 */ 439 if (lmp_cpb_peripheral_capable(hdev)) { 440 events[2] |= 0x01; /* Synchronization Train Received */ 441 events[2] |= 0x02; /* CPB Receive */ 442 events[2] |= 0x04; /* CPB Timeout */ 443 events[2] |= 0x08; /* Truncated Page Complete */ 444 changed = true; 445 } 446 447 /* Enable Authenticated Payload Timeout Expired event if supported */ 448 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { 449 events[2] |= 0x80; 450 changed = true; 451 } 452 453 /* Some Broadcom based controllers indicate support for Set Event 454 * Mask Page 2 command, but then actually do not support it. Since 455 * the default value is all bits set to zero, the command is only 456 * required if the event mask has to be changed. In case no change 457 * to the event mask is needed, skip this command. 458 */ 459 if (changed) 460 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, 461 sizeof(events), events); 462 } 463 464 static int hci_init3_req(struct hci_request *req, unsigned long opt) 465 { 466 struct hci_dev *hdev = req->hdev; 467 u8 p; 468 469 hci_setup_event_mask(req); 470 471 if (hdev->commands[6] & 0x20 && 472 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { 473 struct hci_cp_read_stored_link_key cp; 474 475 bacpy(&cp.bdaddr, BDADDR_ANY); 476 cp.read_all = 0x01; 477 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); 478 } 479 480 if (hdev->commands[5] & 0x10) 481 hci_setup_link_policy(req); 482 483 if (hdev->commands[8] & 0x01) 484 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); 485 486 if (hdev->commands[18] & 0x04 && 487 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 488 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); 489 490 /* Some older Broadcom based Bluetooth 1.2 controllers do not 491 * support the Read Page Scan Type command. Check support for 492 * this command in the bit mask of supported commands. 493 */ 494 if (hdev->commands[13] & 0x01) 495 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); 496 497 if (lmp_le_capable(hdev)) { 498 u8 events[8]; 499 500 memset(events, 0, sizeof(events)); 501 502 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 503 events[0] |= 0x10; /* LE Long Term Key Request */ 504 505 /* If controller supports the Connection Parameters Request 506 * Link Layer Procedure, enable the corresponding event. 507 */ 508 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) 509 events[0] |= 0x20; /* LE Remote Connection 510 * Parameter Request 511 */ 512 513 /* If the controller supports the Data Length Extension 514 * feature, enable the corresponding event. 515 */ 516 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) 517 events[0] |= 0x40; /* LE Data Length Change */ 518 519 /* If the controller supports LL Privacy feature, enable 520 * the corresponding event. 521 */ 522 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY) 523 events[1] |= 0x02; /* LE Enhanced Connection 524 * Complete 525 */ 526 527 /* If the controller supports Extended Scanner Filter 528 * Policies, enable the corresponding event. 529 */ 530 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) 531 events[1] |= 0x04; /* LE Direct Advertising 532 * Report 533 */ 534 535 /* If the controller supports Channel Selection Algorithm #2 536 * feature, enable the corresponding event. 537 */ 538 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) 539 events[2] |= 0x08; /* LE Channel Selection 540 * Algorithm 541 */ 542 543 /* If the controller supports the LE Set Scan Enable command, 544 * enable the corresponding advertising report event. 545 */ 546 if (hdev->commands[26] & 0x08) 547 events[0] |= 0x02; /* LE Advertising Report */ 548 549 /* If the controller supports the LE Create Connection 550 * command, enable the corresponding event. 551 */ 552 if (hdev->commands[26] & 0x10) 553 events[0] |= 0x01; /* LE Connection Complete */ 554 555 /* If the controller supports the LE Connection Update 556 * command, enable the corresponding event. 557 */ 558 if (hdev->commands[27] & 0x04) 559 events[0] |= 0x04; /* LE Connection Update 560 * Complete 561 */ 562 563 /* If the controller supports the LE Read Remote Used Features 564 * command, enable the corresponding event. 565 */ 566 if (hdev->commands[27] & 0x20) 567 events[0] |= 0x08; /* LE Read Remote Used 568 * Features Complete 569 */ 570 571 /* If the controller supports the LE Read Local P-256 572 * Public Key command, enable the corresponding event. 573 */ 574 if (hdev->commands[34] & 0x02) 575 events[0] |= 0x80; /* LE Read Local P-256 576 * Public Key Complete 577 */ 578 579 /* If the controller supports the LE Generate DHKey 580 * command, enable the corresponding event. 581 */ 582 if (hdev->commands[34] & 0x04) 583 events[1] |= 0x01; /* LE Generate DHKey Complete */ 584 585 /* If the controller supports the LE Set Default PHY or 586 * LE Set PHY commands, enable the corresponding event. 587 */ 588 if (hdev->commands[35] & (0x20 | 0x40)) 589 events[1] |= 0x08; /* LE PHY Update Complete */ 590 591 /* If the controller supports LE Set Extended Scan Parameters 592 * and LE Set Extended Scan Enable commands, enable the 593 * corresponding event. 594 */ 595 if (use_ext_scan(hdev)) 596 events[1] |= 0x10; /* LE Extended Advertising 597 * Report 598 */ 599 600 /* If the controller supports the LE Extended Advertising 601 * command, enable the corresponding event. 602 */ 603 if (ext_adv_capable(hdev)) 604 events[2] |= 0x02; /* LE Advertising Set 605 * Terminated 606 */ 607 608 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), 609 events); 610 611 /* Read LE Advertising Channel TX Power */ 612 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { 613 /* HCI TS spec forbids mixing of legacy and extended 614 * advertising commands wherein READ_ADV_TX_POWER is 615 * also included. So do not call it if extended adv 616 * is supported otherwise controller will return 617 * COMMAND_DISALLOWED for extended commands. 618 */ 619 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 620 } 621 622 if (hdev->commands[38] & 0x80) { 623 /* Read LE Min/Max Tx Power*/ 624 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER, 625 0, NULL); 626 } 627 628 if (hdev->commands[26] & 0x40) { 629 /* Read LE Accept List Size */ 630 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 631 0, NULL); 632 } 633 634 if (hdev->commands[26] & 0x80) { 635 /* Clear LE Accept List */ 636 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL); 637 } 638 639 if (hdev->commands[34] & 0x40) { 640 /* Read LE Resolving List Size */ 641 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 642 0, NULL); 643 } 644 645 if (hdev->commands[34] & 0x20) { 646 /* Clear LE Resolving List */ 647 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); 648 } 649 650 if (hdev->commands[35] & 0x04) { 651 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout); 652 653 /* Set RPA timeout */ 654 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2, 655 &rpa_timeout); 656 } 657 658 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { 659 /* Read LE Maximum Data Length */ 660 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); 661 662 /* Read LE Suggested Default Data Length */ 663 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); 664 } 665 666 if (ext_adv_capable(hdev)) { 667 /* Read LE Number of Supported Advertising Sets */ 668 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 669 0, NULL); 670 } 671 672 hci_set_le_support(req); 673 } 674 675 /* Read features beyond page 1 if available */ 676 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { 677 struct hci_cp_read_local_ext_features cp; 678 679 cp.page = p; 680 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 681 sizeof(cp), &cp); 682 } 683 684 return 0; 685 } 686 687 static int hci_init4_req(struct hci_request *req, unsigned long opt) 688 { 689 struct hci_dev *hdev = req->hdev; 690 691 /* Some Broadcom based Bluetooth controllers do not support the 692 * Delete Stored Link Key command. They are clearly indicating its 693 * absence in the bit mask of supported commands. 694 * 695 * Check the supported commands and only if the command is marked 696 * as supported send it. If not supported assume that the controller 697 * does not have actual support for stored link keys which makes this 698 * command redundant anyway. 699 * 700 * Some controllers indicate that they support handling deleting 701 * stored link keys, but they don't. The quirk lets a driver 702 * just disable this command. 703 */ 704 if (hdev->commands[6] & 0x80 && 705 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { 706 struct hci_cp_delete_stored_link_key cp; 707 708 bacpy(&cp.bdaddr, BDADDR_ANY); 709 cp.delete_all = 0x01; 710 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, 711 sizeof(cp), &cp); 712 } 713 714 /* Set event mask page 2 if the HCI command for it is supported */ 715 if (hdev->commands[22] & 0x04) 716 hci_set_event_mask_page_2(req); 717 718 /* Read local pairing options if the HCI command is supported */ 719 if (hdev->commands[41] & 0x08) 720 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL); 721 722 /* Get MWS transport configuration if the HCI command is supported */ 723 if (hdev->commands[30] & 0x08) 724 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); 725 726 /* Check for Synchronization Train support */ 727 if (lmp_sync_train_capable(hdev)) 728 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); 729 730 /* Enable Secure Connections if supported and configured */ 731 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && 732 bredr_sc_enabled(hdev)) { 733 u8 support = 0x01; 734 735 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 736 sizeof(support), &support); 737 } 738 739 /* Set erroneous data reporting if supported to the wideband speech 740 * setting value 741 */ 742 if (hdev->commands[18] & 0x08 && 743 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) { 744 bool enabled = hci_dev_test_flag(hdev, 745 HCI_WIDEBAND_SPEECH_ENABLED); 746 747 if (enabled != 748 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) { 749 struct hci_cp_write_def_err_data_reporting cp; 750 751 cp.err_data_reporting = enabled ? 752 ERR_DATA_REPORTING_ENABLED : 753 ERR_DATA_REPORTING_DISABLED; 754 755 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 756 sizeof(cp), &cp); 757 } 758 } 759 760 /* Set Suggested Default Data Length to maximum if supported */ 761 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { 762 struct hci_cp_le_write_def_data_len cp; 763 764 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); 765 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); 766 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); 767 } 768 769 /* Set Default PHY parameters if command is supported */ 770 if (hdev->commands[35] & 0x20) { 771 struct hci_cp_le_set_default_phy cp; 772 773 cp.all_phys = 0x00; 774 cp.tx_phys = hdev->le_tx_def_phys; 775 cp.rx_phys = hdev->le_rx_def_phys; 776 777 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); 778 } 779 780 return 0; 781 } 782 783 static int __hci_init(struct hci_dev *hdev) 784 { 785 int err; 786 787 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL); 788 if (err < 0) 789 return err; 790 791 if (hci_dev_test_flag(hdev, HCI_SETUP)) 792 hci_debugfs_create_basic(hdev); 793 794 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL); 795 if (err < 0) 796 return err; 797 798 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode 799 * BR/EDR/LE type controllers. AMP controllers only need the 800 * first two stages of init. 801 */ 802 if (hdev->dev_type != HCI_PRIMARY) 803 return 0; 804 805 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL); 806 if (err < 0) 807 return err; 808 809 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); 810 if (err < 0) 811 return err; 812 813 /* Read local codec list if the HCI command is supported */ 814 if (hdev->commands[45] & 0x04) 815 hci_read_supported_codecs_v2(hdev); 816 else if (hdev->commands[29] & 0x20) 817 hci_read_supported_codecs(hdev); 818 819 /* This function is only called when the controller is actually in 820 * configured state. When the controller is marked as unconfigured, 821 * this initialization procedure is not run. 822 * 823 * It means that it is possible that a controller runs through its 824 * setup phase and then discovers missing settings. If that is the 825 * case, then this function will not be called. It then will only 826 * be called during the config phase. 827 * 828 * So only when in setup phase or config phase, create the debugfs 829 * entries and register the SMP channels. 830 */ 831 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 832 !hci_dev_test_flag(hdev, HCI_CONFIG)) 833 return 0; 834 835 hci_debugfs_create_common(hdev); 836 837 if (lmp_bredr_capable(hdev)) 838 hci_debugfs_create_bredr(hdev); 839 840 if (lmp_le_capable(hdev)) 841 hci_debugfs_create_le(hdev); 842 843 return 0; 844 } 845 846 static int hci_init0_req(struct hci_request *req, unsigned long opt) 847 { 848 struct hci_dev *hdev = req->hdev; 849 850 BT_DBG("%s %ld", hdev->name, opt); 851 852 /* Reset */ 853 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 854 hci_reset_req(req, 0); 855 856 /* Read Local Version */ 857 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 858 859 /* Read BD Address */ 860 if (hdev->set_bdaddr) 861 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 862 863 return 0; 864 } 865 866 static int __hci_unconf_init(struct hci_dev *hdev) 867 { 868 int err; 869 870 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 871 return 0; 872 873 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL); 874 if (err < 0) 875 return err; 876 877 if (hci_dev_test_flag(hdev, HCI_SETUP)) 878 hci_debugfs_create_basic(hdev); 879 880 return 0; 881 } 882 883 static int hci_scan_req(struct hci_request *req, unsigned long opt) 884 { 885 __u8 scan = opt; 886 887 BT_DBG("%s %x", req->hdev->name, scan); 888 889 /* Inquiry and Page scans */ 890 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 891 return 0; 892 } 893 894 static int hci_auth_req(struct hci_request *req, unsigned long opt) 895 { 896 __u8 auth = opt; 897 898 BT_DBG("%s %x", req->hdev->name, auth); 899 900 /* Authentication */ 901 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 902 return 0; 903 } 904 905 static int hci_encrypt_req(struct hci_request *req, unsigned long opt) 906 { 907 __u8 encrypt = opt; 908 909 BT_DBG("%s %x", req->hdev->name, encrypt); 910 911 /* Encryption */ 912 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 913 return 0; 914 } 915 916 static int hci_linkpol_req(struct hci_request *req, unsigned long opt) 917 { 918 __le16 policy = cpu_to_le16(opt); 919 920 BT_DBG("%s %x", req->hdev->name, policy); 921 922 /* Default link policy */ 923 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 924 return 0; 925 } 926 927 /* Get HCI device by index. 928 * Device is held on return. */ 929 struct hci_dev *hci_dev_get(int index) 930 { 931 struct hci_dev *hdev = NULL, *d; 932 933 BT_DBG("%d", index); 934 935 if (index < 0) 936 return NULL; 937 938 read_lock(&hci_dev_list_lock); 939 list_for_each_entry(d, &hci_dev_list, list) { 940 if (d->id == index) { 941 hdev = hci_dev_hold(d); 942 break; 943 } 944 } 945 read_unlock(&hci_dev_list_lock); 946 return hdev; 947 } 948 949 /* ---- Inquiry support ---- */ 950 951 bool hci_discovery_active(struct hci_dev *hdev) 952 { 953 struct discovery_state *discov = &hdev->discovery; 954 955 switch (discov->state) { 956 case DISCOVERY_FINDING: 957 case DISCOVERY_RESOLVING: 958 return true; 959 960 default: 961 return false; 962 } 963 } 964 965 void hci_discovery_set_state(struct hci_dev *hdev, int state) 966 { 967 int old_state = hdev->discovery.state; 968 969 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 970 971 if (old_state == state) 972 return; 973 974 hdev->discovery.state = state; 975 976 switch (state) { 977 case DISCOVERY_STOPPED: 978 hci_update_background_scan(hdev); 979 980 if (old_state != DISCOVERY_STARTING) 981 mgmt_discovering(hdev, 0); 982 break; 983 case DISCOVERY_STARTING: 984 break; 985 case DISCOVERY_FINDING: 986 mgmt_discovering(hdev, 1); 987 break; 988 case DISCOVERY_RESOLVING: 989 break; 990 case DISCOVERY_STOPPING: 991 break; 992 } 993 } 994 995 void hci_inquiry_cache_flush(struct hci_dev *hdev) 996 { 997 struct discovery_state *cache = &hdev->discovery; 998 struct inquiry_entry *p, *n; 999 1000 list_for_each_entry_safe(p, n, &cache->all, all) { 1001 list_del(&p->all); 1002 kfree(p); 1003 } 1004 1005 INIT_LIST_HEAD(&cache->unknown); 1006 INIT_LIST_HEAD(&cache->resolve); 1007 } 1008 1009 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 1010 bdaddr_t *bdaddr) 1011 { 1012 struct discovery_state *cache = &hdev->discovery; 1013 struct inquiry_entry *e; 1014 1015 BT_DBG("cache %p, %pMR", cache, bdaddr); 1016 1017 list_for_each_entry(e, &cache->all, all) { 1018 if (!bacmp(&e->data.bdaddr, bdaddr)) 1019 return e; 1020 } 1021 1022 return NULL; 1023 } 1024 1025 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 1026 bdaddr_t *bdaddr) 1027 { 1028 struct discovery_state *cache = &hdev->discovery; 1029 struct inquiry_entry *e; 1030 1031 BT_DBG("cache %p, %pMR", cache, bdaddr); 1032 1033 list_for_each_entry(e, &cache->unknown, list) { 1034 if (!bacmp(&e->data.bdaddr, bdaddr)) 1035 return e; 1036 } 1037 1038 return NULL; 1039 } 1040 1041 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 1042 bdaddr_t *bdaddr, 1043 int state) 1044 { 1045 struct discovery_state *cache = &hdev->discovery; 1046 struct inquiry_entry *e; 1047 1048 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 1049 1050 list_for_each_entry(e, &cache->resolve, list) { 1051 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 1052 return e; 1053 if (!bacmp(&e->data.bdaddr, bdaddr)) 1054 return e; 1055 } 1056 1057 return NULL; 1058 } 1059 1060 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 1061 struct inquiry_entry *ie) 1062 { 1063 struct discovery_state *cache = &hdev->discovery; 1064 struct list_head *pos = &cache->resolve; 1065 struct inquiry_entry *p; 1066 1067 list_del(&ie->list); 1068 1069 list_for_each_entry(p, &cache->resolve, list) { 1070 if (p->name_state != NAME_PENDING && 1071 abs(p->data.rssi) >= abs(ie->data.rssi)) 1072 break; 1073 pos = &p->list; 1074 } 1075 1076 list_add(&ie->list, pos); 1077 } 1078 1079 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 1080 bool name_known) 1081 { 1082 struct discovery_state *cache = &hdev->discovery; 1083 struct inquiry_entry *ie; 1084 u32 flags = 0; 1085 1086 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 1087 1088 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 1089 1090 if (!data->ssp_mode) 1091 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 1092 1093 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 1094 if (ie) { 1095 if (!ie->data.ssp_mode) 1096 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 1097 1098 if (ie->name_state == NAME_NEEDED && 1099 data->rssi != ie->data.rssi) { 1100 ie->data.rssi = data->rssi; 1101 hci_inquiry_cache_update_resolve(hdev, ie); 1102 } 1103 1104 goto update; 1105 } 1106 1107 /* Entry not in the cache. Add new one. */ 1108 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 1109 if (!ie) { 1110 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 1111 goto done; 1112 } 1113 1114 list_add(&ie->all, &cache->all); 1115 1116 if (name_known) { 1117 ie->name_state = NAME_KNOWN; 1118 } else { 1119 ie->name_state = NAME_NOT_KNOWN; 1120 list_add(&ie->list, &cache->unknown); 1121 } 1122 1123 update: 1124 if (name_known && ie->name_state != NAME_KNOWN && 1125 ie->name_state != NAME_PENDING) { 1126 ie->name_state = NAME_KNOWN; 1127 list_del(&ie->list); 1128 } 1129 1130 memcpy(&ie->data, data, sizeof(*data)); 1131 ie->timestamp = jiffies; 1132 cache->timestamp = jiffies; 1133 1134 if (ie->name_state == NAME_NOT_KNOWN) 1135 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 1136 1137 done: 1138 return flags; 1139 } 1140 1141 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 1142 { 1143 struct discovery_state *cache = &hdev->discovery; 1144 struct inquiry_info *info = (struct inquiry_info *) buf; 1145 struct inquiry_entry *e; 1146 int copied = 0; 1147 1148 list_for_each_entry(e, &cache->all, all) { 1149 struct inquiry_data *data = &e->data; 1150 1151 if (copied >= num) 1152 break; 1153 1154 bacpy(&info->bdaddr, &data->bdaddr); 1155 info->pscan_rep_mode = data->pscan_rep_mode; 1156 info->pscan_period_mode = data->pscan_period_mode; 1157 info->pscan_mode = data->pscan_mode; 1158 memcpy(info->dev_class, data->dev_class, 3); 1159 info->clock_offset = data->clock_offset; 1160 1161 info++; 1162 copied++; 1163 } 1164 1165 BT_DBG("cache %p, copied %d", cache, copied); 1166 return copied; 1167 } 1168 1169 static int hci_inq_req(struct hci_request *req, unsigned long opt) 1170 { 1171 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 1172 struct hci_dev *hdev = req->hdev; 1173 struct hci_cp_inquiry cp; 1174 1175 BT_DBG("%s", hdev->name); 1176 1177 if (test_bit(HCI_INQUIRY, &hdev->flags)) 1178 return 0; 1179 1180 /* Start Inquiry */ 1181 memcpy(&cp.lap, &ir->lap, 3); 1182 cp.length = ir->length; 1183 cp.num_rsp = ir->num_rsp; 1184 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 1185 1186 return 0; 1187 } 1188 1189 int hci_inquiry(void __user *arg) 1190 { 1191 __u8 __user *ptr = arg; 1192 struct hci_inquiry_req ir; 1193 struct hci_dev *hdev; 1194 int err = 0, do_inquiry = 0, max_rsp; 1195 long timeo; 1196 __u8 *buf; 1197 1198 if (copy_from_user(&ir, ptr, sizeof(ir))) 1199 return -EFAULT; 1200 1201 hdev = hci_dev_get(ir.dev_id); 1202 if (!hdev) 1203 return -ENODEV; 1204 1205 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1206 err = -EBUSY; 1207 goto done; 1208 } 1209 1210 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1211 err = -EOPNOTSUPP; 1212 goto done; 1213 } 1214 1215 if (hdev->dev_type != HCI_PRIMARY) { 1216 err = -EOPNOTSUPP; 1217 goto done; 1218 } 1219 1220 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1221 err = -EOPNOTSUPP; 1222 goto done; 1223 } 1224 1225 /* Restrict maximum inquiry length to 60 seconds */ 1226 if (ir.length > 60) { 1227 err = -EINVAL; 1228 goto done; 1229 } 1230 1231 hci_dev_lock(hdev); 1232 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 1233 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 1234 hci_inquiry_cache_flush(hdev); 1235 do_inquiry = 1; 1236 } 1237 hci_dev_unlock(hdev); 1238 1239 timeo = ir.length * msecs_to_jiffies(2000); 1240 1241 if (do_inquiry) { 1242 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 1243 timeo, NULL); 1244 if (err < 0) 1245 goto done; 1246 1247 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 1248 * cleared). If it is interrupted by a signal, return -EINTR. 1249 */ 1250 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 1251 TASK_INTERRUPTIBLE)) { 1252 err = -EINTR; 1253 goto done; 1254 } 1255 } 1256 1257 /* for unlimited number of responses we will use buffer with 1258 * 255 entries 1259 */ 1260 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 1261 1262 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 1263 * copy it to the user space. 1264 */ 1265 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); 1266 if (!buf) { 1267 err = -ENOMEM; 1268 goto done; 1269 } 1270 1271 hci_dev_lock(hdev); 1272 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 1273 hci_dev_unlock(hdev); 1274 1275 BT_DBG("num_rsp %d", ir.num_rsp); 1276 1277 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 1278 ptr += sizeof(ir); 1279 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 1280 ir.num_rsp)) 1281 err = -EFAULT; 1282 } else 1283 err = -EFAULT; 1284 1285 kfree(buf); 1286 1287 done: 1288 hci_dev_put(hdev); 1289 return err; 1290 } 1291 1292 /** 1293 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address 1294 * (BD_ADDR) for a HCI device from 1295 * a firmware node property. 1296 * @hdev: The HCI device 1297 * 1298 * Search the firmware node for 'local-bd-address'. 1299 * 1300 * All-zero BD addresses are rejected, because those could be properties 1301 * that exist in the firmware tables, but were not updated by the firmware. For 1302 * example, the DTS could define 'local-bd-address', with zero BD addresses. 1303 */ 1304 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) 1305 { 1306 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); 1307 bdaddr_t ba; 1308 int ret; 1309 1310 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", 1311 (u8 *)&ba, sizeof(ba)); 1312 if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) 1313 return; 1314 1315 bacpy(&hdev->public_addr, &ba); 1316 } 1317 1318 static int hci_dev_do_open(struct hci_dev *hdev) 1319 { 1320 int ret = 0; 1321 1322 BT_DBG("%s %p", hdev->name, hdev); 1323 1324 hci_req_sync_lock(hdev); 1325 1326 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1327 ret = -ENODEV; 1328 goto done; 1329 } 1330 1331 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 1332 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 1333 /* Check for rfkill but allow the HCI setup stage to 1334 * proceed (which in itself doesn't cause any RF activity). 1335 */ 1336 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { 1337 ret = -ERFKILL; 1338 goto done; 1339 } 1340 1341 /* Check for valid public address or a configured static 1342 * random address, but let the HCI setup proceed to 1343 * be able to determine if there is a public address 1344 * or not. 1345 * 1346 * In case of user channel usage, it is not important 1347 * if a public address or static random address is 1348 * available. 1349 * 1350 * This check is only valid for BR/EDR controllers 1351 * since AMP controllers do not have an address. 1352 */ 1353 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1354 hdev->dev_type == HCI_PRIMARY && 1355 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 1356 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 1357 ret = -EADDRNOTAVAIL; 1358 goto done; 1359 } 1360 } 1361 1362 if (test_bit(HCI_UP, &hdev->flags)) { 1363 ret = -EALREADY; 1364 goto done; 1365 } 1366 1367 if (hdev->open(hdev)) { 1368 ret = -EIO; 1369 goto done; 1370 } 1371 1372 set_bit(HCI_RUNNING, &hdev->flags); 1373 hci_sock_dev_event(hdev, HCI_DEV_OPEN); 1374 1375 atomic_set(&hdev->cmd_cnt, 1); 1376 set_bit(HCI_INIT, &hdev->flags); 1377 1378 if (hci_dev_test_flag(hdev, HCI_SETUP) || 1379 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { 1380 bool invalid_bdaddr; 1381 1382 hci_sock_dev_event(hdev, HCI_DEV_SETUP); 1383 1384 if (hdev->setup) 1385 ret = hdev->setup(hdev); 1386 1387 /* The transport driver can set the quirk to mark the 1388 * BD_ADDR invalid before creating the HCI device or in 1389 * its setup callback. 1390 */ 1391 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, 1392 &hdev->quirks); 1393 1394 if (ret) 1395 goto setup_failed; 1396 1397 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { 1398 if (!bacmp(&hdev->public_addr, BDADDR_ANY)) 1399 hci_dev_get_bd_addr_from_property(hdev); 1400 1401 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 1402 hdev->set_bdaddr) { 1403 ret = hdev->set_bdaddr(hdev, 1404 &hdev->public_addr); 1405 1406 /* If setting of the BD_ADDR from the device 1407 * property succeeds, then treat the address 1408 * as valid even if the invalid BD_ADDR 1409 * quirk indicates otherwise. 1410 */ 1411 if (!ret) 1412 invalid_bdaddr = false; 1413 } 1414 } 1415 1416 setup_failed: 1417 /* The transport driver can set these quirks before 1418 * creating the HCI device or in its setup callback. 1419 * 1420 * For the invalid BD_ADDR quirk it is possible that 1421 * it becomes a valid address if the bootloader does 1422 * provide it (see above). 1423 * 1424 * In case any of them is set, the controller has to 1425 * start up as unconfigured. 1426 */ 1427 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 1428 invalid_bdaddr) 1429 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 1430 1431 /* For an unconfigured controller it is required to 1432 * read at least the version information provided by 1433 * the Read Local Version Information command. 1434 * 1435 * If the set_bdaddr driver callback is provided, then 1436 * also the original Bluetooth public device address 1437 * will be read using the Read BD Address command. 1438 */ 1439 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 1440 ret = __hci_unconf_init(hdev); 1441 } 1442 1443 if (hci_dev_test_flag(hdev, HCI_CONFIG)) { 1444 /* If public address change is configured, ensure that 1445 * the address gets programmed. If the driver does not 1446 * support changing the public address, fail the power 1447 * on procedure. 1448 */ 1449 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 1450 hdev->set_bdaddr) 1451 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 1452 else 1453 ret = -EADDRNOTAVAIL; 1454 } 1455 1456 if (!ret) { 1457 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1458 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1459 ret = __hci_init(hdev); 1460 if (!ret && hdev->post_init) 1461 ret = hdev->post_init(hdev); 1462 } 1463 } 1464 1465 /* If the HCI Reset command is clearing all diagnostic settings, 1466 * then they need to be reprogrammed after the init procedure 1467 * completed. 1468 */ 1469 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 1470 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1471 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) 1472 ret = hdev->set_diag(hdev, true); 1473 1474 msft_do_open(hdev); 1475 aosp_do_open(hdev); 1476 1477 clear_bit(HCI_INIT, &hdev->flags); 1478 1479 if (!ret) { 1480 hci_dev_hold(hdev); 1481 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 1482 hci_adv_instances_set_rpa_expired(hdev, true); 1483 set_bit(HCI_UP, &hdev->flags); 1484 hci_sock_dev_event(hdev, HCI_DEV_UP); 1485 hci_leds_update_powered(hdev, true); 1486 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 1487 !hci_dev_test_flag(hdev, HCI_CONFIG) && 1488 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1489 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1490 hci_dev_test_flag(hdev, HCI_MGMT) && 1491 hdev->dev_type == HCI_PRIMARY) { 1492 ret = __hci_req_hci_power_on(hdev); 1493 mgmt_power_on(hdev, ret); 1494 } 1495 } else { 1496 /* Init failed, cleanup */ 1497 flush_work(&hdev->tx_work); 1498 1499 /* Since hci_rx_work() is possible to awake new cmd_work 1500 * it should be flushed first to avoid unexpected call of 1501 * hci_cmd_work() 1502 */ 1503 flush_work(&hdev->rx_work); 1504 flush_work(&hdev->cmd_work); 1505 1506 skb_queue_purge(&hdev->cmd_q); 1507 skb_queue_purge(&hdev->rx_q); 1508 1509 if (hdev->flush) 1510 hdev->flush(hdev); 1511 1512 if (hdev->sent_cmd) { 1513 kfree_skb(hdev->sent_cmd); 1514 hdev->sent_cmd = NULL; 1515 } 1516 1517 clear_bit(HCI_RUNNING, &hdev->flags); 1518 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 1519 1520 hdev->close(hdev); 1521 hdev->flags &= BIT(HCI_RAW); 1522 } 1523 1524 done: 1525 hci_req_sync_unlock(hdev); 1526 return ret; 1527 } 1528 1529 /* ---- HCI ioctl helpers ---- */ 1530 1531 int hci_dev_open(__u16 dev) 1532 { 1533 struct hci_dev *hdev; 1534 int err; 1535 1536 hdev = hci_dev_get(dev); 1537 if (!hdev) 1538 return -ENODEV; 1539 1540 /* Devices that are marked as unconfigured can only be powered 1541 * up as user channel. Trying to bring them up as normal devices 1542 * will result into a failure. Only user channel operation is 1543 * possible. 1544 * 1545 * When this function is called for a user channel, the flag 1546 * HCI_USER_CHANNEL will be set first before attempting to 1547 * open the device. 1548 */ 1549 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1550 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1551 err = -EOPNOTSUPP; 1552 goto done; 1553 } 1554 1555 /* We need to ensure that no other power on/off work is pending 1556 * before proceeding to call hci_dev_do_open. This is 1557 * particularly important if the setup procedure has not yet 1558 * completed. 1559 */ 1560 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 1561 cancel_delayed_work(&hdev->power_off); 1562 1563 /* After this call it is guaranteed that the setup procedure 1564 * has finished. This means that error conditions like RFKILL 1565 * or no valid public or static random address apply. 1566 */ 1567 flush_workqueue(hdev->req_workqueue); 1568 1569 /* For controllers not using the management interface and that 1570 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 1571 * so that pairing works for them. Once the management interface 1572 * is in use this bit will be cleared again and userspace has 1573 * to explicitly enable it. 1574 */ 1575 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1576 !hci_dev_test_flag(hdev, HCI_MGMT)) 1577 hci_dev_set_flag(hdev, HCI_BONDABLE); 1578 1579 err = hci_dev_do_open(hdev); 1580 1581 done: 1582 hci_dev_put(hdev); 1583 return err; 1584 } 1585 1586 /* This function requires the caller holds hdev->lock */ 1587 static void hci_pend_le_actions_clear(struct hci_dev *hdev) 1588 { 1589 struct hci_conn_params *p; 1590 1591 list_for_each_entry(p, &hdev->le_conn_params, list) { 1592 if (p->conn) { 1593 hci_conn_drop(p->conn); 1594 hci_conn_put(p->conn); 1595 p->conn = NULL; 1596 } 1597 list_del_init(&p->action); 1598 } 1599 1600 BT_DBG("All LE pending actions cleared"); 1601 } 1602 1603 int hci_dev_do_close(struct hci_dev *hdev) 1604 { 1605 bool auto_off; 1606 int err = 0; 1607 1608 BT_DBG("%s %p", hdev->name, hdev); 1609 1610 cancel_delayed_work(&hdev->power_off); 1611 cancel_delayed_work(&hdev->ncmd_timer); 1612 1613 hci_request_cancel_all(hdev); 1614 hci_req_sync_lock(hdev); 1615 1616 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && 1617 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1618 test_bit(HCI_UP, &hdev->flags)) { 1619 /* Execute vendor specific shutdown routine */ 1620 if (hdev->shutdown) 1621 err = hdev->shutdown(hdev); 1622 } 1623 1624 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 1625 cancel_delayed_work_sync(&hdev->cmd_timer); 1626 hci_req_sync_unlock(hdev); 1627 return err; 1628 } 1629 1630 hci_leds_update_powered(hdev, false); 1631 1632 /* Flush RX and TX works */ 1633 flush_work(&hdev->tx_work); 1634 flush_work(&hdev->rx_work); 1635 1636 if (hdev->discov_timeout > 0) { 1637 hdev->discov_timeout = 0; 1638 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 1639 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 1640 } 1641 1642 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 1643 cancel_delayed_work(&hdev->service_cache); 1644 1645 if (hci_dev_test_flag(hdev, HCI_MGMT)) { 1646 struct adv_info *adv_instance; 1647 1648 cancel_delayed_work_sync(&hdev->rpa_expired); 1649 1650 list_for_each_entry(adv_instance, &hdev->adv_instances, list) 1651 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1652 } 1653 1654 /* Avoid potential lockdep warnings from the *_flush() calls by 1655 * ensuring the workqueue is empty up front. 1656 */ 1657 drain_workqueue(hdev->workqueue); 1658 1659 hci_dev_lock(hdev); 1660 1661 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1662 1663 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); 1664 1665 if (!auto_off && hdev->dev_type == HCI_PRIMARY && 1666 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1667 hci_dev_test_flag(hdev, HCI_MGMT)) 1668 __mgmt_power_off(hdev); 1669 1670 hci_inquiry_cache_flush(hdev); 1671 hci_pend_le_actions_clear(hdev); 1672 hci_conn_hash_flush(hdev); 1673 hci_dev_unlock(hdev); 1674 1675 smp_unregister(hdev); 1676 1677 hci_sock_dev_event(hdev, HCI_DEV_DOWN); 1678 1679 aosp_do_close(hdev); 1680 msft_do_close(hdev); 1681 1682 if (hdev->flush) 1683 hdev->flush(hdev); 1684 1685 /* Reset device */ 1686 skb_queue_purge(&hdev->cmd_q); 1687 atomic_set(&hdev->cmd_cnt, 1); 1688 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 1689 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1690 set_bit(HCI_INIT, &hdev->flags); 1691 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL); 1692 clear_bit(HCI_INIT, &hdev->flags); 1693 } 1694 1695 /* flush cmd work */ 1696 flush_work(&hdev->cmd_work); 1697 1698 /* Drop queues */ 1699 skb_queue_purge(&hdev->rx_q); 1700 skb_queue_purge(&hdev->cmd_q); 1701 skb_queue_purge(&hdev->raw_q); 1702 1703 /* Drop last sent command */ 1704 if (hdev->sent_cmd) { 1705 cancel_delayed_work_sync(&hdev->cmd_timer); 1706 kfree_skb(hdev->sent_cmd); 1707 hdev->sent_cmd = NULL; 1708 } 1709 1710 clear_bit(HCI_RUNNING, &hdev->flags); 1711 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 1712 1713 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks)) 1714 wake_up(&hdev->suspend_wait_q); 1715 1716 /* After this point our queues are empty 1717 * and no tasks are scheduled. */ 1718 hdev->close(hdev); 1719 1720 /* Clear flags */ 1721 hdev->flags &= BIT(HCI_RAW); 1722 hci_dev_clear_volatile_flags(hdev); 1723 1724 /* Controller radio is available but is currently powered down */ 1725 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 1726 1727 memset(hdev->eir, 0, sizeof(hdev->eir)); 1728 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 1729 bacpy(&hdev->random_addr, BDADDR_ANY); 1730 hci_codec_list_clear(&hdev->local_codecs); 1731 1732 hci_req_sync_unlock(hdev); 1733 1734 hci_dev_put(hdev); 1735 return err; 1736 } 1737 1738 int hci_dev_close(__u16 dev) 1739 { 1740 struct hci_dev *hdev; 1741 int err; 1742 1743 hdev = hci_dev_get(dev); 1744 if (!hdev) 1745 return -ENODEV; 1746 1747 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1748 err = -EBUSY; 1749 goto done; 1750 } 1751 1752 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 1753 cancel_delayed_work(&hdev->power_off); 1754 1755 err = hci_dev_do_close(hdev); 1756 1757 done: 1758 hci_dev_put(hdev); 1759 return err; 1760 } 1761 1762 static int hci_dev_do_reset(struct hci_dev *hdev) 1763 { 1764 int ret; 1765 1766 BT_DBG("%s %p", hdev->name, hdev); 1767 1768 hci_req_sync_lock(hdev); 1769 1770 /* Drop queues */ 1771 skb_queue_purge(&hdev->rx_q); 1772 skb_queue_purge(&hdev->cmd_q); 1773 1774 /* Avoid potential lockdep warnings from the *_flush() calls by 1775 * ensuring the workqueue is empty up front. 1776 */ 1777 drain_workqueue(hdev->workqueue); 1778 1779 hci_dev_lock(hdev); 1780 hci_inquiry_cache_flush(hdev); 1781 hci_conn_hash_flush(hdev); 1782 hci_dev_unlock(hdev); 1783 1784 if (hdev->flush) 1785 hdev->flush(hdev); 1786 1787 atomic_set(&hdev->cmd_cnt, 1); 1788 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 1789 1790 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL); 1791 1792 hci_req_sync_unlock(hdev); 1793 return ret; 1794 } 1795 1796 int hci_dev_reset(__u16 dev) 1797 { 1798 struct hci_dev *hdev; 1799 int err; 1800 1801 hdev = hci_dev_get(dev); 1802 if (!hdev) 1803 return -ENODEV; 1804 1805 if (!test_bit(HCI_UP, &hdev->flags)) { 1806 err = -ENETDOWN; 1807 goto done; 1808 } 1809 1810 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1811 err = -EBUSY; 1812 goto done; 1813 } 1814 1815 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1816 err = -EOPNOTSUPP; 1817 goto done; 1818 } 1819 1820 err = hci_dev_do_reset(hdev); 1821 1822 done: 1823 hci_dev_put(hdev); 1824 return err; 1825 } 1826 1827 int hci_dev_reset_stat(__u16 dev) 1828 { 1829 struct hci_dev *hdev; 1830 int ret = 0; 1831 1832 hdev = hci_dev_get(dev); 1833 if (!hdev) 1834 return -ENODEV; 1835 1836 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1837 ret = -EBUSY; 1838 goto done; 1839 } 1840 1841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1842 ret = -EOPNOTSUPP; 1843 goto done; 1844 } 1845 1846 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1847 1848 done: 1849 hci_dev_put(hdev); 1850 return ret; 1851 } 1852 1853 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) 1854 { 1855 bool conn_changed, discov_changed; 1856 1857 BT_DBG("%s scan 0x%02x", hdev->name, scan); 1858 1859 if ((scan & SCAN_PAGE)) 1860 conn_changed = !hci_dev_test_and_set_flag(hdev, 1861 HCI_CONNECTABLE); 1862 else 1863 conn_changed = hci_dev_test_and_clear_flag(hdev, 1864 HCI_CONNECTABLE); 1865 1866 if ((scan & SCAN_INQUIRY)) { 1867 discov_changed = !hci_dev_test_and_set_flag(hdev, 1868 HCI_DISCOVERABLE); 1869 } else { 1870 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 1871 discov_changed = hci_dev_test_and_clear_flag(hdev, 1872 HCI_DISCOVERABLE); 1873 } 1874 1875 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 1876 return; 1877 1878 if (conn_changed || discov_changed) { 1879 /* In case this was disabled through mgmt */ 1880 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 1881 1882 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1883 hci_req_update_adv_data(hdev, hdev->cur_adv_instance); 1884 1885 mgmt_new_settings(hdev); 1886 } 1887 } 1888 1889 int hci_dev_cmd(unsigned int cmd, void __user *arg) 1890 { 1891 struct hci_dev *hdev; 1892 struct hci_dev_req dr; 1893 int err = 0; 1894 1895 if (copy_from_user(&dr, arg, sizeof(dr))) 1896 return -EFAULT; 1897 1898 hdev = hci_dev_get(dr.dev_id); 1899 if (!hdev) 1900 return -ENODEV; 1901 1902 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1903 err = -EBUSY; 1904 goto done; 1905 } 1906 1907 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1908 err = -EOPNOTSUPP; 1909 goto done; 1910 } 1911 1912 if (hdev->dev_type != HCI_PRIMARY) { 1913 err = -EOPNOTSUPP; 1914 goto done; 1915 } 1916 1917 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1918 err = -EOPNOTSUPP; 1919 goto done; 1920 } 1921 1922 switch (cmd) { 1923 case HCISETAUTH: 1924 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 1925 HCI_INIT_TIMEOUT, NULL); 1926 break; 1927 1928 case HCISETENCRYPT: 1929 if (!lmp_encrypt_capable(hdev)) { 1930 err = -EOPNOTSUPP; 1931 break; 1932 } 1933 1934 if (!test_bit(HCI_AUTH, &hdev->flags)) { 1935 /* Auth must be enabled first */ 1936 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 1937 HCI_INIT_TIMEOUT, NULL); 1938 if (err) 1939 break; 1940 } 1941 1942 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, 1943 HCI_INIT_TIMEOUT, NULL); 1944 break; 1945 1946 case HCISETSCAN: 1947 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 1948 HCI_INIT_TIMEOUT, NULL); 1949 1950 /* Ensure that the connectable and discoverable states 1951 * get correctly modified as this was a non-mgmt change. 1952 */ 1953 if (!err) 1954 hci_update_scan_state(hdev, dr.dev_opt); 1955 break; 1956 1957 case HCISETLINKPOL: 1958 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, 1959 HCI_INIT_TIMEOUT, NULL); 1960 break; 1961 1962 case HCISETLINKMODE: 1963 hdev->link_mode = ((__u16) dr.dev_opt) & 1964 (HCI_LM_MASTER | HCI_LM_ACCEPT); 1965 break; 1966 1967 case HCISETPTYPE: 1968 if (hdev->pkt_type == (__u16) dr.dev_opt) 1969 break; 1970 1971 hdev->pkt_type = (__u16) dr.dev_opt; 1972 mgmt_phy_configuration_changed(hdev, NULL); 1973 break; 1974 1975 case HCISETACLMTU: 1976 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 1977 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 1978 break; 1979 1980 case HCISETSCOMTU: 1981 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 1982 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 1983 break; 1984 1985 default: 1986 err = -EINVAL; 1987 break; 1988 } 1989 1990 done: 1991 hci_dev_put(hdev); 1992 return err; 1993 } 1994 1995 int hci_get_dev_list(void __user *arg) 1996 { 1997 struct hci_dev *hdev; 1998 struct hci_dev_list_req *dl; 1999 struct hci_dev_req *dr; 2000 int n = 0, size, err; 2001 __u16 dev_num; 2002 2003 if (get_user(dev_num, (__u16 __user *) arg)) 2004 return -EFAULT; 2005 2006 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 2007 return -EINVAL; 2008 2009 size = sizeof(*dl) + dev_num * sizeof(*dr); 2010 2011 dl = kzalloc(size, GFP_KERNEL); 2012 if (!dl) 2013 return -ENOMEM; 2014 2015 dr = dl->dev_req; 2016 2017 read_lock(&hci_dev_list_lock); 2018 list_for_each_entry(hdev, &hci_dev_list, list) { 2019 unsigned long flags = hdev->flags; 2020 2021 /* When the auto-off is configured it means the transport 2022 * is running, but in that case still indicate that the 2023 * device is actually down. 2024 */ 2025 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 2026 flags &= ~BIT(HCI_UP); 2027 2028 (dr + n)->dev_id = hdev->id; 2029 (dr + n)->dev_opt = flags; 2030 2031 if (++n >= dev_num) 2032 break; 2033 } 2034 read_unlock(&hci_dev_list_lock); 2035 2036 dl->dev_num = n; 2037 size = sizeof(*dl) + n * sizeof(*dr); 2038 2039 err = copy_to_user(arg, dl, size); 2040 kfree(dl); 2041 2042 return err ? -EFAULT : 0; 2043 } 2044 2045 int hci_get_dev_info(void __user *arg) 2046 { 2047 struct hci_dev *hdev; 2048 struct hci_dev_info di; 2049 unsigned long flags; 2050 int err = 0; 2051 2052 if (copy_from_user(&di, arg, sizeof(di))) 2053 return -EFAULT; 2054 2055 hdev = hci_dev_get(di.dev_id); 2056 if (!hdev) 2057 return -ENODEV; 2058 2059 /* When the auto-off is configured it means the transport 2060 * is running, but in that case still indicate that the 2061 * device is actually down. 2062 */ 2063 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 2064 flags = hdev->flags & ~BIT(HCI_UP); 2065 else 2066 flags = hdev->flags; 2067 2068 strcpy(di.name, hdev->name); 2069 di.bdaddr = hdev->bdaddr; 2070 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); 2071 di.flags = flags; 2072 di.pkt_type = hdev->pkt_type; 2073 if (lmp_bredr_capable(hdev)) { 2074 di.acl_mtu = hdev->acl_mtu; 2075 di.acl_pkts = hdev->acl_pkts; 2076 di.sco_mtu = hdev->sco_mtu; 2077 di.sco_pkts = hdev->sco_pkts; 2078 } else { 2079 di.acl_mtu = hdev->le_mtu; 2080 di.acl_pkts = hdev->le_pkts; 2081 di.sco_mtu = 0; 2082 di.sco_pkts = 0; 2083 } 2084 di.link_policy = hdev->link_policy; 2085 di.link_mode = hdev->link_mode; 2086 2087 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 2088 memcpy(&di.features, &hdev->features, sizeof(di.features)); 2089 2090 if (copy_to_user(arg, &di, sizeof(di))) 2091 err = -EFAULT; 2092 2093 hci_dev_put(hdev); 2094 2095 return err; 2096 } 2097 2098 /* ---- Interface to HCI drivers ---- */ 2099 2100 static int hci_rfkill_set_block(void *data, bool blocked) 2101 { 2102 struct hci_dev *hdev = data; 2103 2104 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 2105 2106 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 2107 return -EBUSY; 2108 2109 if (blocked) { 2110 hci_dev_set_flag(hdev, HCI_RFKILLED); 2111 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 2112 !hci_dev_test_flag(hdev, HCI_CONFIG)) 2113 hci_dev_do_close(hdev); 2114 } else { 2115 hci_dev_clear_flag(hdev, HCI_RFKILLED); 2116 } 2117 2118 return 0; 2119 } 2120 2121 static const struct rfkill_ops hci_rfkill_ops = { 2122 .set_block = hci_rfkill_set_block, 2123 }; 2124 2125 static void hci_power_on(struct work_struct *work) 2126 { 2127 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 2128 int err; 2129 2130 BT_DBG("%s", hdev->name); 2131 2132 if (test_bit(HCI_UP, &hdev->flags) && 2133 hci_dev_test_flag(hdev, HCI_MGMT) && 2134 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 2135 cancel_delayed_work(&hdev->power_off); 2136 hci_req_sync_lock(hdev); 2137 err = __hci_req_hci_power_on(hdev); 2138 hci_req_sync_unlock(hdev); 2139 mgmt_power_on(hdev, err); 2140 return; 2141 } 2142 2143 err = hci_dev_do_open(hdev); 2144 if (err < 0) { 2145 hci_dev_lock(hdev); 2146 mgmt_set_powered_failed(hdev, err); 2147 hci_dev_unlock(hdev); 2148 return; 2149 } 2150 2151 /* During the HCI setup phase, a few error conditions are 2152 * ignored and they need to be checked now. If they are still 2153 * valid, it is important to turn the device back off. 2154 */ 2155 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 2156 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 2157 (hdev->dev_type == HCI_PRIMARY && 2158 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 2159 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 2160 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 2161 hci_dev_do_close(hdev); 2162 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 2163 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 2164 HCI_AUTO_OFF_TIMEOUT); 2165 } 2166 2167 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 2168 /* For unconfigured devices, set the HCI_RAW flag 2169 * so that userspace can easily identify them. 2170 */ 2171 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 2172 set_bit(HCI_RAW, &hdev->flags); 2173 2174 /* For fully configured devices, this will send 2175 * the Index Added event. For unconfigured devices, 2176 * it will send Unconfigued Index Added event. 2177 * 2178 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 2179 * and no event will be send. 2180 */ 2181 mgmt_index_added(hdev); 2182 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 2183 /* When the controller is now configured, then it 2184 * is important to clear the HCI_RAW flag. 2185 */ 2186 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 2187 clear_bit(HCI_RAW, &hdev->flags); 2188 2189 /* Powering on the controller with HCI_CONFIG set only 2190 * happens with the transition from unconfigured to 2191 * configured. This will send the Index Added event. 2192 */ 2193 mgmt_index_added(hdev); 2194 } 2195 } 2196 2197 static void hci_power_off(struct work_struct *work) 2198 { 2199 struct hci_dev *hdev = container_of(work, struct hci_dev, 2200 power_off.work); 2201 2202 BT_DBG("%s", hdev->name); 2203 2204 hci_dev_do_close(hdev); 2205 } 2206 2207 static void hci_error_reset(struct work_struct *work) 2208 { 2209 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 2210 2211 BT_DBG("%s", hdev->name); 2212 2213 if (hdev->hw_error) 2214 hdev->hw_error(hdev, hdev->hw_error_code); 2215 else 2216 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); 2217 2218 if (hci_dev_do_close(hdev)) 2219 return; 2220 2221 hci_dev_do_open(hdev); 2222 } 2223 2224 void hci_uuids_clear(struct hci_dev *hdev) 2225 { 2226 struct bt_uuid *uuid, *tmp; 2227 2228 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 2229 list_del(&uuid->list); 2230 kfree(uuid); 2231 } 2232 } 2233 2234 void hci_link_keys_clear(struct hci_dev *hdev) 2235 { 2236 struct link_key *key; 2237 2238 list_for_each_entry(key, &hdev->link_keys, list) { 2239 list_del_rcu(&key->list); 2240 kfree_rcu(key, rcu); 2241 } 2242 } 2243 2244 void hci_smp_ltks_clear(struct hci_dev *hdev) 2245 { 2246 struct smp_ltk *k; 2247 2248 list_for_each_entry(k, &hdev->long_term_keys, list) { 2249 list_del_rcu(&k->list); 2250 kfree_rcu(k, rcu); 2251 } 2252 } 2253 2254 void hci_smp_irks_clear(struct hci_dev *hdev) 2255 { 2256 struct smp_irk *k; 2257 2258 list_for_each_entry(k, &hdev->identity_resolving_keys, list) { 2259 list_del_rcu(&k->list); 2260 kfree_rcu(k, rcu); 2261 } 2262 } 2263 2264 void hci_blocked_keys_clear(struct hci_dev *hdev) 2265 { 2266 struct blocked_key *b; 2267 2268 list_for_each_entry(b, &hdev->blocked_keys, list) { 2269 list_del_rcu(&b->list); 2270 kfree_rcu(b, rcu); 2271 } 2272 } 2273 2274 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) 2275 { 2276 bool blocked = false; 2277 struct blocked_key *b; 2278 2279 rcu_read_lock(); 2280 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { 2281 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { 2282 blocked = true; 2283 break; 2284 } 2285 } 2286 2287 rcu_read_unlock(); 2288 return blocked; 2289 } 2290 2291 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 2292 { 2293 struct link_key *k; 2294 2295 rcu_read_lock(); 2296 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 2297 if (bacmp(bdaddr, &k->bdaddr) == 0) { 2298 rcu_read_unlock(); 2299 2300 if (hci_is_blocked_key(hdev, 2301 HCI_BLOCKED_KEY_TYPE_LINKKEY, 2302 k->val)) { 2303 bt_dev_warn_ratelimited(hdev, 2304 "Link key blocked for %pMR", 2305 &k->bdaddr); 2306 return NULL; 2307 } 2308 2309 return k; 2310 } 2311 } 2312 rcu_read_unlock(); 2313 2314 return NULL; 2315 } 2316 2317 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 2318 u8 key_type, u8 old_key_type) 2319 { 2320 /* Legacy key */ 2321 if (key_type < 0x03) 2322 return true; 2323 2324 /* Debug keys are insecure so don't store them persistently */ 2325 if (key_type == HCI_LK_DEBUG_COMBINATION) 2326 return false; 2327 2328 /* Changed combination key and there's no previous one */ 2329 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 2330 return false; 2331 2332 /* Security mode 3 case */ 2333 if (!conn) 2334 return true; 2335 2336 /* BR/EDR key derived using SC from an LE link */ 2337 if (conn->type == LE_LINK) 2338 return true; 2339 2340 /* Neither local nor remote side had no-bonding as requirement */ 2341 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 2342 return true; 2343 2344 /* Local side had dedicated bonding as requirement */ 2345 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 2346 return true; 2347 2348 /* Remote side had dedicated bonding as requirement */ 2349 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 2350 return true; 2351 2352 /* If none of the above criteria match, then don't store the key 2353 * persistently */ 2354 return false; 2355 } 2356 2357 static u8 ltk_role(u8 type) 2358 { 2359 if (type == SMP_LTK) 2360 return HCI_ROLE_MASTER; 2361 2362 return HCI_ROLE_SLAVE; 2363 } 2364 2365 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2366 u8 addr_type, u8 role) 2367 { 2368 struct smp_ltk *k; 2369 2370 rcu_read_lock(); 2371 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2372 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 2373 continue; 2374 2375 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 2376 rcu_read_unlock(); 2377 2378 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, 2379 k->val)) { 2380 bt_dev_warn_ratelimited(hdev, 2381 "LTK blocked for %pMR", 2382 &k->bdaddr); 2383 return NULL; 2384 } 2385 2386 return k; 2387 } 2388 } 2389 rcu_read_unlock(); 2390 2391 return NULL; 2392 } 2393 2394 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 2395 { 2396 struct smp_irk *irk_to_return = NULL; 2397 struct smp_irk *irk; 2398 2399 rcu_read_lock(); 2400 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2401 if (!bacmp(&irk->rpa, rpa)) { 2402 irk_to_return = irk; 2403 goto done; 2404 } 2405 } 2406 2407 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2408 if (smp_irk_matches(hdev, irk->val, rpa)) { 2409 bacpy(&irk->rpa, rpa); 2410 irk_to_return = irk; 2411 goto done; 2412 } 2413 } 2414 2415 done: 2416 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 2417 irk_to_return->val)) { 2418 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 2419 &irk_to_return->bdaddr); 2420 irk_to_return = NULL; 2421 } 2422 2423 rcu_read_unlock(); 2424 2425 return irk_to_return; 2426 } 2427 2428 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 2429 u8 addr_type) 2430 { 2431 struct smp_irk *irk_to_return = NULL; 2432 struct smp_irk *irk; 2433 2434 /* Identity Address must be public or static random */ 2435 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 2436 return NULL; 2437 2438 rcu_read_lock(); 2439 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2440 if (addr_type == irk->addr_type && 2441 bacmp(bdaddr, &irk->bdaddr) == 0) { 2442 irk_to_return = irk; 2443 goto done; 2444 } 2445 } 2446 2447 done: 2448 2449 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 2450 irk_to_return->val)) { 2451 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 2452 &irk_to_return->bdaddr); 2453 irk_to_return = NULL; 2454 } 2455 2456 rcu_read_unlock(); 2457 2458 return irk_to_return; 2459 } 2460 2461 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 2462 bdaddr_t *bdaddr, u8 *val, u8 type, 2463 u8 pin_len, bool *persistent) 2464 { 2465 struct link_key *key, *old_key; 2466 u8 old_key_type; 2467 2468 old_key = hci_find_link_key(hdev, bdaddr); 2469 if (old_key) { 2470 old_key_type = old_key->type; 2471 key = old_key; 2472 } else { 2473 old_key_type = conn ? conn->key_type : 0xff; 2474 key = kzalloc(sizeof(*key), GFP_KERNEL); 2475 if (!key) 2476 return NULL; 2477 list_add_rcu(&key->list, &hdev->link_keys); 2478 } 2479 2480 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 2481 2482 /* Some buggy controller combinations generate a changed 2483 * combination key for legacy pairing even when there's no 2484 * previous key */ 2485 if (type == HCI_LK_CHANGED_COMBINATION && 2486 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 2487 type = HCI_LK_COMBINATION; 2488 if (conn) 2489 conn->key_type = type; 2490 } 2491 2492 bacpy(&key->bdaddr, bdaddr); 2493 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 2494 key->pin_len = pin_len; 2495 2496 if (type == HCI_LK_CHANGED_COMBINATION) 2497 key->type = old_key_type; 2498 else 2499 key->type = type; 2500 2501 if (persistent) 2502 *persistent = hci_persistent_key(hdev, conn, type, 2503 old_key_type); 2504 2505 return key; 2506 } 2507 2508 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2509 u8 addr_type, u8 type, u8 authenticated, 2510 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 2511 { 2512 struct smp_ltk *key, *old_key; 2513 u8 role = ltk_role(type); 2514 2515 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 2516 if (old_key) 2517 key = old_key; 2518 else { 2519 key = kzalloc(sizeof(*key), GFP_KERNEL); 2520 if (!key) 2521 return NULL; 2522 list_add_rcu(&key->list, &hdev->long_term_keys); 2523 } 2524 2525 bacpy(&key->bdaddr, bdaddr); 2526 key->bdaddr_type = addr_type; 2527 memcpy(key->val, tk, sizeof(key->val)); 2528 key->authenticated = authenticated; 2529 key->ediv = ediv; 2530 key->rand = rand; 2531 key->enc_size = enc_size; 2532 key->type = type; 2533 2534 return key; 2535 } 2536 2537 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2538 u8 addr_type, u8 val[16], bdaddr_t *rpa) 2539 { 2540 struct smp_irk *irk; 2541 2542 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 2543 if (!irk) { 2544 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 2545 if (!irk) 2546 return NULL; 2547 2548 bacpy(&irk->bdaddr, bdaddr); 2549 irk->addr_type = addr_type; 2550 2551 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 2552 } 2553 2554 memcpy(irk->val, val, 16); 2555 bacpy(&irk->rpa, rpa); 2556 2557 return irk; 2558 } 2559 2560 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 2561 { 2562 struct link_key *key; 2563 2564 key = hci_find_link_key(hdev, bdaddr); 2565 if (!key) 2566 return -ENOENT; 2567 2568 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2569 2570 list_del_rcu(&key->list); 2571 kfree_rcu(key, rcu); 2572 2573 return 0; 2574 } 2575 2576 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 2577 { 2578 struct smp_ltk *k; 2579 int removed = 0; 2580 2581 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2582 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 2583 continue; 2584 2585 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2586 2587 list_del_rcu(&k->list); 2588 kfree_rcu(k, rcu); 2589 removed++; 2590 } 2591 2592 return removed ? 0 : -ENOENT; 2593 } 2594 2595 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 2596 { 2597 struct smp_irk *k; 2598 2599 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { 2600 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 2601 continue; 2602 2603 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2604 2605 list_del_rcu(&k->list); 2606 kfree_rcu(k, rcu); 2607 } 2608 } 2609 2610 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 2611 { 2612 struct smp_ltk *k; 2613 struct smp_irk *irk; 2614 u8 addr_type; 2615 2616 if (type == BDADDR_BREDR) { 2617 if (hci_find_link_key(hdev, bdaddr)) 2618 return true; 2619 return false; 2620 } 2621 2622 /* Convert to HCI addr type which struct smp_ltk uses */ 2623 if (type == BDADDR_LE_PUBLIC) 2624 addr_type = ADDR_LE_DEV_PUBLIC; 2625 else 2626 addr_type = ADDR_LE_DEV_RANDOM; 2627 2628 irk = hci_get_irk(hdev, bdaddr, addr_type); 2629 if (irk) { 2630 bdaddr = &irk->bdaddr; 2631 addr_type = irk->addr_type; 2632 } 2633 2634 rcu_read_lock(); 2635 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2636 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 2637 rcu_read_unlock(); 2638 return true; 2639 } 2640 } 2641 rcu_read_unlock(); 2642 2643 return false; 2644 } 2645 2646 /* HCI command timer function */ 2647 static void hci_cmd_timeout(struct work_struct *work) 2648 { 2649 struct hci_dev *hdev = container_of(work, struct hci_dev, 2650 cmd_timer.work); 2651 2652 if (hdev->sent_cmd) { 2653 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 2654 u16 opcode = __le16_to_cpu(sent->opcode); 2655 2656 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); 2657 } else { 2658 bt_dev_err(hdev, "command tx timeout"); 2659 } 2660 2661 if (hdev->cmd_timeout) 2662 hdev->cmd_timeout(hdev); 2663 2664 atomic_set(&hdev->cmd_cnt, 1); 2665 queue_work(hdev->workqueue, &hdev->cmd_work); 2666 } 2667 2668 /* HCI ncmd timer function */ 2669 static void hci_ncmd_timeout(struct work_struct *work) 2670 { 2671 struct hci_dev *hdev = container_of(work, struct hci_dev, 2672 ncmd_timer.work); 2673 2674 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); 2675 2676 /* During HCI_INIT phase no events can be injected if the ncmd timer 2677 * triggers since the procedure has its own timeout handling. 2678 */ 2679 if (test_bit(HCI_INIT, &hdev->flags)) 2680 return; 2681 2682 /* This is an irrecoverable state, inject hardware error event */ 2683 hci_reset_dev(hdev); 2684 } 2685 2686 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 2687 bdaddr_t *bdaddr, u8 bdaddr_type) 2688 { 2689 struct oob_data *data; 2690 2691 list_for_each_entry(data, &hdev->remote_oob_data, list) { 2692 if (bacmp(bdaddr, &data->bdaddr) != 0) 2693 continue; 2694 if (data->bdaddr_type != bdaddr_type) 2695 continue; 2696 return data; 2697 } 2698 2699 return NULL; 2700 } 2701 2702 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 2703 u8 bdaddr_type) 2704 { 2705 struct oob_data *data; 2706 2707 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 2708 if (!data) 2709 return -ENOENT; 2710 2711 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 2712 2713 list_del(&data->list); 2714 kfree(data); 2715 2716 return 0; 2717 } 2718 2719 void hci_remote_oob_data_clear(struct hci_dev *hdev) 2720 { 2721 struct oob_data *data, *n; 2722 2723 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 2724 list_del(&data->list); 2725 kfree(data); 2726 } 2727 } 2728 2729 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 2730 u8 bdaddr_type, u8 *hash192, u8 *rand192, 2731 u8 *hash256, u8 *rand256) 2732 { 2733 struct oob_data *data; 2734 2735 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 2736 if (!data) { 2737 data = kmalloc(sizeof(*data), GFP_KERNEL); 2738 if (!data) 2739 return -ENOMEM; 2740 2741 bacpy(&data->bdaddr, bdaddr); 2742 data->bdaddr_type = bdaddr_type; 2743 list_add(&data->list, &hdev->remote_oob_data); 2744 } 2745 2746 if (hash192 && rand192) { 2747 memcpy(data->hash192, hash192, sizeof(data->hash192)); 2748 memcpy(data->rand192, rand192, sizeof(data->rand192)); 2749 if (hash256 && rand256) 2750 data->present = 0x03; 2751 } else { 2752 memset(data->hash192, 0, sizeof(data->hash192)); 2753 memset(data->rand192, 0, sizeof(data->rand192)); 2754 if (hash256 && rand256) 2755 data->present = 0x02; 2756 else 2757 data->present = 0x00; 2758 } 2759 2760 if (hash256 && rand256) { 2761 memcpy(data->hash256, hash256, sizeof(data->hash256)); 2762 memcpy(data->rand256, rand256, sizeof(data->rand256)); 2763 } else { 2764 memset(data->hash256, 0, sizeof(data->hash256)); 2765 memset(data->rand256, 0, sizeof(data->rand256)); 2766 if (hash192 && rand192) 2767 data->present = 0x01; 2768 } 2769 2770 BT_DBG("%s for %pMR", hdev->name, bdaddr); 2771 2772 return 0; 2773 } 2774 2775 /* This function requires the caller holds hdev->lock */ 2776 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 2777 { 2778 struct adv_info *adv_instance; 2779 2780 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 2781 if (adv_instance->instance == instance) 2782 return adv_instance; 2783 } 2784 2785 return NULL; 2786 } 2787 2788 /* This function requires the caller holds hdev->lock */ 2789 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) 2790 { 2791 struct adv_info *cur_instance; 2792 2793 cur_instance = hci_find_adv_instance(hdev, instance); 2794 if (!cur_instance) 2795 return NULL; 2796 2797 if (cur_instance == list_last_entry(&hdev->adv_instances, 2798 struct adv_info, list)) 2799 return list_first_entry(&hdev->adv_instances, 2800 struct adv_info, list); 2801 else 2802 return list_next_entry(cur_instance, list); 2803 } 2804 2805 /* This function requires the caller holds hdev->lock */ 2806 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 2807 { 2808 struct adv_info *adv_instance; 2809 2810 adv_instance = hci_find_adv_instance(hdev, instance); 2811 if (!adv_instance) 2812 return -ENOENT; 2813 2814 BT_DBG("%s removing %dMR", hdev->name, instance); 2815 2816 if (hdev->cur_adv_instance == instance) { 2817 if (hdev->adv_instance_timeout) { 2818 cancel_delayed_work(&hdev->adv_instance_expire); 2819 hdev->adv_instance_timeout = 0; 2820 } 2821 hdev->cur_adv_instance = 0x00; 2822 } 2823 2824 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 2825 2826 list_del(&adv_instance->list); 2827 kfree(adv_instance); 2828 2829 hdev->adv_instance_cnt--; 2830 2831 return 0; 2832 } 2833 2834 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 2835 { 2836 struct adv_info *adv_instance, *n; 2837 2838 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 2839 adv_instance->rpa_expired = rpa_expired; 2840 } 2841 2842 /* This function requires the caller holds hdev->lock */ 2843 void hci_adv_instances_clear(struct hci_dev *hdev) 2844 { 2845 struct adv_info *adv_instance, *n; 2846 2847 if (hdev->adv_instance_timeout) { 2848 cancel_delayed_work(&hdev->adv_instance_expire); 2849 hdev->adv_instance_timeout = 0; 2850 } 2851 2852 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 2853 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 2854 list_del(&adv_instance->list); 2855 kfree(adv_instance); 2856 } 2857 2858 hdev->adv_instance_cnt = 0; 2859 hdev->cur_adv_instance = 0x00; 2860 } 2861 2862 static void adv_instance_rpa_expired(struct work_struct *work) 2863 { 2864 struct adv_info *adv_instance = container_of(work, struct adv_info, 2865 rpa_expired_cb.work); 2866 2867 BT_DBG(""); 2868 2869 adv_instance->rpa_expired = true; 2870 } 2871 2872 /* This function requires the caller holds hdev->lock */ 2873 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, 2874 u16 adv_data_len, u8 *adv_data, 2875 u16 scan_rsp_len, u8 *scan_rsp_data, 2876 u16 timeout, u16 duration, s8 tx_power, 2877 u32 min_interval, u32 max_interval) 2878 { 2879 struct adv_info *adv_instance; 2880 2881 adv_instance = hci_find_adv_instance(hdev, instance); 2882 if (adv_instance) { 2883 memset(adv_instance->adv_data, 0, 2884 sizeof(adv_instance->adv_data)); 2885 memset(adv_instance->scan_rsp_data, 0, 2886 sizeof(adv_instance->scan_rsp_data)); 2887 } else { 2888 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || 2889 instance < 1 || instance > hdev->le_num_of_adv_sets) 2890 return -EOVERFLOW; 2891 2892 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); 2893 if (!adv_instance) 2894 return -ENOMEM; 2895 2896 adv_instance->pending = true; 2897 adv_instance->instance = instance; 2898 list_add(&adv_instance->list, &hdev->adv_instances); 2899 hdev->adv_instance_cnt++; 2900 } 2901 2902 adv_instance->flags = flags; 2903 adv_instance->adv_data_len = adv_data_len; 2904 adv_instance->scan_rsp_len = scan_rsp_len; 2905 adv_instance->min_interval = min_interval; 2906 adv_instance->max_interval = max_interval; 2907 adv_instance->tx_power = tx_power; 2908 2909 if (adv_data_len) 2910 memcpy(adv_instance->adv_data, adv_data, adv_data_len); 2911 2912 if (scan_rsp_len) 2913 memcpy(adv_instance->scan_rsp_data, 2914 scan_rsp_data, scan_rsp_len); 2915 2916 adv_instance->timeout = timeout; 2917 adv_instance->remaining_time = timeout; 2918 2919 if (duration == 0) 2920 adv_instance->duration = hdev->def_multi_adv_rotation_duration; 2921 else 2922 adv_instance->duration = duration; 2923 2924 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, 2925 adv_instance_rpa_expired); 2926 2927 BT_DBG("%s for %dMR", hdev->name, instance); 2928 2929 return 0; 2930 } 2931 2932 /* This function requires the caller holds hdev->lock */ 2933 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, 2934 u16 adv_data_len, u8 *adv_data, 2935 u16 scan_rsp_len, u8 *scan_rsp_data) 2936 { 2937 struct adv_info *adv_instance; 2938 2939 adv_instance = hci_find_adv_instance(hdev, instance); 2940 2941 /* If advertisement doesn't exist, we can't modify its data */ 2942 if (!adv_instance) 2943 return -ENOENT; 2944 2945 if (adv_data_len) { 2946 memset(adv_instance->adv_data, 0, 2947 sizeof(adv_instance->adv_data)); 2948 memcpy(adv_instance->adv_data, adv_data, adv_data_len); 2949 adv_instance->adv_data_len = adv_data_len; 2950 } 2951 2952 if (scan_rsp_len) { 2953 memset(adv_instance->scan_rsp_data, 0, 2954 sizeof(adv_instance->scan_rsp_data)); 2955 memcpy(adv_instance->scan_rsp_data, 2956 scan_rsp_data, scan_rsp_len); 2957 adv_instance->scan_rsp_len = scan_rsp_len; 2958 } 2959 2960 return 0; 2961 } 2962 2963 /* This function requires the caller holds hdev->lock */ 2964 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) 2965 { 2966 u32 flags; 2967 struct adv_info *adv; 2968 2969 if (instance == 0x00) { 2970 /* Instance 0 always manages the "Tx Power" and "Flags" 2971 * fields 2972 */ 2973 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; 2974 2975 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting 2976 * corresponds to the "connectable" instance flag. 2977 */ 2978 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) 2979 flags |= MGMT_ADV_FLAG_CONNECTABLE; 2980 2981 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 2982 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; 2983 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 2984 flags |= MGMT_ADV_FLAG_DISCOV; 2985 2986 return flags; 2987 } 2988 2989 adv = hci_find_adv_instance(hdev, instance); 2990 2991 /* Return 0 when we got an invalid instance identifier. */ 2992 if (!adv) 2993 return 0; 2994 2995 return adv->flags; 2996 } 2997 2998 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) 2999 { 3000 struct adv_info *adv; 3001 3002 /* Instance 0x00 always set local name */ 3003 if (instance == 0x00) 3004 return true; 3005 3006 adv = hci_find_adv_instance(hdev, instance); 3007 if (!adv) 3008 return false; 3009 3010 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || 3011 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) 3012 return true; 3013 3014 return adv->scan_rsp_len ? true : false; 3015 } 3016 3017 /* This function requires the caller holds hdev->lock */ 3018 void hci_adv_monitors_clear(struct hci_dev *hdev) 3019 { 3020 struct adv_monitor *monitor; 3021 int handle; 3022 3023 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) 3024 hci_free_adv_monitor(hdev, monitor); 3025 3026 idr_destroy(&hdev->adv_monitors_idr); 3027 } 3028 3029 /* Frees the monitor structure and do some bookkeepings. 3030 * This function requires the caller holds hdev->lock. 3031 */ 3032 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) 3033 { 3034 struct adv_pattern *pattern; 3035 struct adv_pattern *tmp; 3036 3037 if (!monitor) 3038 return; 3039 3040 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { 3041 list_del(&pattern->list); 3042 kfree(pattern); 3043 } 3044 3045 if (monitor->handle) 3046 idr_remove(&hdev->adv_monitors_idr, monitor->handle); 3047 3048 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { 3049 hdev->adv_monitors_cnt--; 3050 mgmt_adv_monitor_removed(hdev, monitor->handle); 3051 } 3052 3053 kfree(monitor); 3054 } 3055 3056 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status) 3057 { 3058 return mgmt_add_adv_patterns_monitor_complete(hdev, status); 3059 } 3060 3061 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status) 3062 { 3063 return mgmt_remove_adv_monitor_complete(hdev, status); 3064 } 3065 3066 /* Assigns handle to a monitor, and if offloading is supported and power is on, 3067 * also attempts to forward the request to the controller. 3068 * Returns true if request is forwarded (result is pending), false otherwise. 3069 * This function requires the caller holds hdev->lock. 3070 */ 3071 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor, 3072 int *err) 3073 { 3074 int min, max, handle; 3075 3076 *err = 0; 3077 3078 if (!monitor) { 3079 *err = -EINVAL; 3080 return false; 3081 } 3082 3083 min = HCI_MIN_ADV_MONITOR_HANDLE; 3084 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; 3085 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, 3086 GFP_KERNEL); 3087 if (handle < 0) { 3088 *err = handle; 3089 return false; 3090 } 3091 3092 monitor->handle = handle; 3093 3094 if (!hdev_is_powered(hdev)) 3095 return false; 3096 3097 switch (hci_get_adv_monitor_offload_ext(hdev)) { 3098 case HCI_ADV_MONITOR_EXT_NONE: 3099 hci_update_background_scan(hdev); 3100 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err); 3101 /* Message was not forwarded to controller - not an error */ 3102 return false; 3103 case HCI_ADV_MONITOR_EXT_MSFT: 3104 *err = msft_add_monitor_pattern(hdev, monitor); 3105 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name, 3106 *err); 3107 break; 3108 } 3109 3110 return (*err == 0); 3111 } 3112 3113 /* Attempts to tell the controller and free the monitor. If somehow the 3114 * controller doesn't have a corresponding handle, remove anyway. 3115 * Returns true if request is forwarded (result is pending), false otherwise. 3116 * This function requires the caller holds hdev->lock. 3117 */ 3118 static bool hci_remove_adv_monitor(struct hci_dev *hdev, 3119 struct adv_monitor *monitor, 3120 u16 handle, int *err) 3121 { 3122 *err = 0; 3123 3124 switch (hci_get_adv_monitor_offload_ext(hdev)) { 3125 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ 3126 goto free_monitor; 3127 case HCI_ADV_MONITOR_EXT_MSFT: 3128 *err = msft_remove_monitor(hdev, monitor, handle); 3129 break; 3130 } 3131 3132 /* In case no matching handle registered, just free the monitor */ 3133 if (*err == -ENOENT) 3134 goto free_monitor; 3135 3136 return (*err == 0); 3137 3138 free_monitor: 3139 if (*err == -ENOENT) 3140 bt_dev_warn(hdev, "Removing monitor with no matching handle %d", 3141 monitor->handle); 3142 hci_free_adv_monitor(hdev, monitor); 3143 3144 *err = 0; 3145 return false; 3146 } 3147 3148 /* Returns true if request is forwarded (result is pending), false otherwise. 3149 * This function requires the caller holds hdev->lock. 3150 */ 3151 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err) 3152 { 3153 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); 3154 bool pending; 3155 3156 if (!monitor) { 3157 *err = -EINVAL; 3158 return false; 3159 } 3160 3161 pending = hci_remove_adv_monitor(hdev, monitor, handle, err); 3162 if (!*err && !pending) 3163 hci_update_background_scan(hdev); 3164 3165 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending", 3166 hdev->name, handle, *err, pending ? "" : "not "); 3167 3168 return pending; 3169 } 3170 3171 /* Returns true if request is forwarded (result is pending), false otherwise. 3172 * This function requires the caller holds hdev->lock. 3173 */ 3174 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err) 3175 { 3176 struct adv_monitor *monitor; 3177 int idr_next_id = 0; 3178 bool pending = false; 3179 bool update = false; 3180 3181 *err = 0; 3182 3183 while (!*err && !pending) { 3184 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); 3185 if (!monitor) 3186 break; 3187 3188 pending = hci_remove_adv_monitor(hdev, monitor, 0, err); 3189 3190 if (!*err && !pending) 3191 update = true; 3192 } 3193 3194 if (update) 3195 hci_update_background_scan(hdev); 3196 3197 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending", 3198 hdev->name, *err, pending ? "" : "not "); 3199 3200 return pending; 3201 } 3202 3203 /* This function requires the caller holds hdev->lock */ 3204 bool hci_is_adv_monitoring(struct hci_dev *hdev) 3205 { 3206 return !idr_is_empty(&hdev->adv_monitors_idr); 3207 } 3208 3209 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) 3210 { 3211 if (msft_monitor_supported(hdev)) 3212 return HCI_ADV_MONITOR_EXT_MSFT; 3213 3214 return HCI_ADV_MONITOR_EXT_NONE; 3215 } 3216 3217 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 3218 bdaddr_t *bdaddr, u8 type) 3219 { 3220 struct bdaddr_list *b; 3221 3222 list_for_each_entry(b, bdaddr_list, list) { 3223 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 3224 return b; 3225 } 3226 3227 return NULL; 3228 } 3229 3230 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 3231 struct list_head *bdaddr_list, bdaddr_t *bdaddr, 3232 u8 type) 3233 { 3234 struct bdaddr_list_with_irk *b; 3235 3236 list_for_each_entry(b, bdaddr_list, list) { 3237 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 3238 return b; 3239 } 3240 3241 return NULL; 3242 } 3243 3244 struct bdaddr_list_with_flags * 3245 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, 3246 bdaddr_t *bdaddr, u8 type) 3247 { 3248 struct bdaddr_list_with_flags *b; 3249 3250 list_for_each_entry(b, bdaddr_list, list) { 3251 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 3252 return b; 3253 } 3254 3255 return NULL; 3256 } 3257 3258 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 3259 { 3260 struct bdaddr_list *b, *n; 3261 3262 list_for_each_entry_safe(b, n, bdaddr_list, list) { 3263 list_del(&b->list); 3264 kfree(b); 3265 } 3266 } 3267 3268 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 3269 { 3270 struct bdaddr_list *entry; 3271 3272 if (!bacmp(bdaddr, BDADDR_ANY)) 3273 return -EBADF; 3274 3275 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 3276 return -EEXIST; 3277 3278 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3279 if (!entry) 3280 return -ENOMEM; 3281 3282 bacpy(&entry->bdaddr, bdaddr); 3283 entry->bdaddr_type = type; 3284 3285 list_add(&entry->list, list); 3286 3287 return 0; 3288 } 3289 3290 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 3291 u8 type, u8 *peer_irk, u8 *local_irk) 3292 { 3293 struct bdaddr_list_with_irk *entry; 3294 3295 if (!bacmp(bdaddr, BDADDR_ANY)) 3296 return -EBADF; 3297 3298 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 3299 return -EEXIST; 3300 3301 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3302 if (!entry) 3303 return -ENOMEM; 3304 3305 bacpy(&entry->bdaddr, bdaddr); 3306 entry->bdaddr_type = type; 3307 3308 if (peer_irk) 3309 memcpy(entry->peer_irk, peer_irk, 16); 3310 3311 if (local_irk) 3312 memcpy(entry->local_irk, local_irk, 16); 3313 3314 list_add(&entry->list, list); 3315 3316 return 0; 3317 } 3318 3319 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, 3320 u8 type, u32 flags) 3321 { 3322 struct bdaddr_list_with_flags *entry; 3323 3324 if (!bacmp(bdaddr, BDADDR_ANY)) 3325 return -EBADF; 3326 3327 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 3328 return -EEXIST; 3329 3330 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3331 if (!entry) 3332 return -ENOMEM; 3333 3334 bacpy(&entry->bdaddr, bdaddr); 3335 entry->bdaddr_type = type; 3336 entry->current_flags = flags; 3337 3338 list_add(&entry->list, list); 3339 3340 return 0; 3341 } 3342 3343 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 3344 { 3345 struct bdaddr_list *entry; 3346 3347 if (!bacmp(bdaddr, BDADDR_ANY)) { 3348 hci_bdaddr_list_clear(list); 3349 return 0; 3350 } 3351 3352 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 3353 if (!entry) 3354 return -ENOENT; 3355 3356 list_del(&entry->list); 3357 kfree(entry); 3358 3359 return 0; 3360 } 3361 3362 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 3363 u8 type) 3364 { 3365 struct bdaddr_list_with_irk *entry; 3366 3367 if (!bacmp(bdaddr, BDADDR_ANY)) { 3368 hci_bdaddr_list_clear(list); 3369 return 0; 3370 } 3371 3372 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); 3373 if (!entry) 3374 return -ENOENT; 3375 3376 list_del(&entry->list); 3377 kfree(entry); 3378 3379 return 0; 3380 } 3381 3382 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, 3383 u8 type) 3384 { 3385 struct bdaddr_list_with_flags *entry; 3386 3387 if (!bacmp(bdaddr, BDADDR_ANY)) { 3388 hci_bdaddr_list_clear(list); 3389 return 0; 3390 } 3391 3392 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); 3393 if (!entry) 3394 return -ENOENT; 3395 3396 list_del(&entry->list); 3397 kfree(entry); 3398 3399 return 0; 3400 } 3401 3402 /* This function requires the caller holds hdev->lock */ 3403 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 3404 bdaddr_t *addr, u8 addr_type) 3405 { 3406 struct hci_conn_params *params; 3407 3408 list_for_each_entry(params, &hdev->le_conn_params, list) { 3409 if (bacmp(¶ms->addr, addr) == 0 && 3410 params->addr_type == addr_type) { 3411 return params; 3412 } 3413 } 3414 3415 return NULL; 3416 } 3417 3418 /* This function requires the caller holds hdev->lock */ 3419 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 3420 bdaddr_t *addr, u8 addr_type) 3421 { 3422 struct hci_conn_params *param; 3423 3424 list_for_each_entry(param, list, action) { 3425 if (bacmp(¶m->addr, addr) == 0 && 3426 param->addr_type == addr_type) 3427 return param; 3428 } 3429 3430 return NULL; 3431 } 3432 3433 /* This function requires the caller holds hdev->lock */ 3434 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 3435 bdaddr_t *addr, u8 addr_type) 3436 { 3437 struct hci_conn_params *params; 3438 3439 params = hci_conn_params_lookup(hdev, addr, addr_type); 3440 if (params) 3441 return params; 3442 3443 params = kzalloc(sizeof(*params), GFP_KERNEL); 3444 if (!params) { 3445 bt_dev_err(hdev, "out of memory"); 3446 return NULL; 3447 } 3448 3449 bacpy(¶ms->addr, addr); 3450 params->addr_type = addr_type; 3451 3452 list_add(¶ms->list, &hdev->le_conn_params); 3453 INIT_LIST_HEAD(¶ms->action); 3454 3455 params->conn_min_interval = hdev->le_conn_min_interval; 3456 params->conn_max_interval = hdev->le_conn_max_interval; 3457 params->conn_latency = hdev->le_conn_latency; 3458 params->supervision_timeout = hdev->le_supv_timeout; 3459 params->auto_connect = HCI_AUTO_CONN_DISABLED; 3460 3461 BT_DBG("addr %pMR (type %u)", addr, addr_type); 3462 3463 return params; 3464 } 3465 3466 static void hci_conn_params_free(struct hci_conn_params *params) 3467 { 3468 if (params->conn) { 3469 hci_conn_drop(params->conn); 3470 hci_conn_put(params->conn); 3471 } 3472 3473 list_del(¶ms->action); 3474 list_del(¶ms->list); 3475 kfree(params); 3476 } 3477 3478 /* This function requires the caller holds hdev->lock */ 3479 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 3480 { 3481 struct hci_conn_params *params; 3482 3483 params = hci_conn_params_lookup(hdev, addr, addr_type); 3484 if (!params) 3485 return; 3486 3487 hci_conn_params_free(params); 3488 3489 hci_update_background_scan(hdev); 3490 3491 BT_DBG("addr %pMR (type %u)", addr, addr_type); 3492 } 3493 3494 /* This function requires the caller holds hdev->lock */ 3495 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 3496 { 3497 struct hci_conn_params *params, *tmp; 3498 3499 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 3500 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 3501 continue; 3502 3503 /* If trying to establish one time connection to disabled 3504 * device, leave the params, but mark them as just once. 3505 */ 3506 if (params->explicit_connect) { 3507 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 3508 continue; 3509 } 3510 3511 list_del(¶ms->list); 3512 kfree(params); 3513 } 3514 3515 BT_DBG("All LE disabled connection parameters were removed"); 3516 } 3517 3518 /* This function requires the caller holds hdev->lock */ 3519 static void hci_conn_params_clear_all(struct hci_dev *hdev) 3520 { 3521 struct hci_conn_params *params, *tmp; 3522 3523 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 3524 hci_conn_params_free(params); 3525 3526 BT_DBG("All LE connection parameters were removed"); 3527 } 3528 3529 /* Copy the Identity Address of the controller. 3530 * 3531 * If the controller has a public BD_ADDR, then by default use that one. 3532 * If this is a LE only controller without a public address, default to 3533 * the static random address. 3534 * 3535 * For debugging purposes it is possible to force controllers with a 3536 * public address to use the static random address instead. 3537 * 3538 * In case BR/EDR has been disabled on a dual-mode controller and 3539 * userspace has configured a static address, then that address 3540 * becomes the identity address instead of the public BR/EDR address. 3541 */ 3542 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 3543 u8 *bdaddr_type) 3544 { 3545 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 3546 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 3547 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 3548 bacmp(&hdev->static_addr, BDADDR_ANY))) { 3549 bacpy(bdaddr, &hdev->static_addr); 3550 *bdaddr_type = ADDR_LE_DEV_RANDOM; 3551 } else { 3552 bacpy(bdaddr, &hdev->bdaddr); 3553 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 3554 } 3555 } 3556 3557 static void hci_suspend_clear_tasks(struct hci_dev *hdev) 3558 { 3559 int i; 3560 3561 for (i = 0; i < __SUSPEND_NUM_TASKS; i++) 3562 clear_bit(i, hdev->suspend_tasks); 3563 3564 wake_up(&hdev->suspend_wait_q); 3565 } 3566 3567 static int hci_suspend_wait_event(struct hci_dev *hdev) 3568 { 3569 #define WAKE_COND \ 3570 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \ 3571 __SUSPEND_NUM_TASKS) 3572 3573 int i; 3574 int ret = wait_event_timeout(hdev->suspend_wait_q, 3575 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); 3576 3577 if (ret == 0) { 3578 bt_dev_err(hdev, "Timed out waiting for suspend events"); 3579 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { 3580 if (test_bit(i, hdev->suspend_tasks)) 3581 bt_dev_err(hdev, "Suspend timeout bit: %d", i); 3582 clear_bit(i, hdev->suspend_tasks); 3583 } 3584 3585 ret = -ETIMEDOUT; 3586 } else { 3587 ret = 0; 3588 } 3589 3590 return ret; 3591 } 3592 3593 static void hci_prepare_suspend(struct work_struct *work) 3594 { 3595 struct hci_dev *hdev = 3596 container_of(work, struct hci_dev, suspend_prepare); 3597 3598 hci_dev_lock(hdev); 3599 hci_req_prepare_suspend(hdev, hdev->suspend_state_next); 3600 hci_dev_unlock(hdev); 3601 } 3602 3603 static int hci_change_suspend_state(struct hci_dev *hdev, 3604 enum suspended_state next) 3605 { 3606 hdev->suspend_state_next = next; 3607 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); 3608 queue_work(hdev->req_workqueue, &hdev->suspend_prepare); 3609 return hci_suspend_wait_event(hdev); 3610 } 3611 3612 static void hci_clear_wake_reason(struct hci_dev *hdev) 3613 { 3614 hci_dev_lock(hdev); 3615 3616 hdev->wake_reason = 0; 3617 bacpy(&hdev->wake_addr, BDADDR_ANY); 3618 hdev->wake_addr_type = 0; 3619 3620 hci_dev_unlock(hdev); 3621 } 3622 3623 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, 3624 void *data) 3625 { 3626 struct hci_dev *hdev = 3627 container_of(nb, struct hci_dev, suspend_notifier); 3628 int ret = 0; 3629 3630 if (action == PM_SUSPEND_PREPARE) 3631 ret = hci_suspend_dev(hdev); 3632 else if (action == PM_POST_SUSPEND) 3633 ret = hci_resume_dev(hdev); 3634 3635 if (ret) 3636 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", 3637 action, ret); 3638 3639 return NOTIFY_DONE; 3640 } 3641 3642 /* Alloc HCI device */ 3643 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) 3644 { 3645 struct hci_dev *hdev; 3646 unsigned int alloc_size; 3647 3648 alloc_size = sizeof(*hdev); 3649 if (sizeof_priv) { 3650 /* Fixme: May need ALIGN-ment? */ 3651 alloc_size += sizeof_priv; 3652 } 3653 3654 hdev = kzalloc(alloc_size, GFP_KERNEL); 3655 if (!hdev) 3656 return NULL; 3657 3658 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 3659 hdev->esco_type = (ESCO_HV1); 3660 hdev->link_mode = (HCI_LM_ACCEPT); 3661 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 3662 hdev->io_capability = 0x03; /* No Input No Output */ 3663 hdev->manufacturer = 0xffff; /* Default to internal use */ 3664 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 3665 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 3666 hdev->adv_instance_cnt = 0; 3667 hdev->cur_adv_instance = 0x00; 3668 hdev->adv_instance_timeout = 0; 3669 3670 hdev->advmon_allowlist_duration = 300; 3671 hdev->advmon_no_filter_duration = 500; 3672 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ 3673 3674 hdev->sniff_max_interval = 800; 3675 hdev->sniff_min_interval = 80; 3676 3677 hdev->le_adv_channel_map = 0x07; 3678 hdev->le_adv_min_interval = 0x0800; 3679 hdev->le_adv_max_interval = 0x0800; 3680 hdev->le_scan_interval = 0x0060; 3681 hdev->le_scan_window = 0x0030; 3682 hdev->le_scan_int_suspend = 0x0400; 3683 hdev->le_scan_window_suspend = 0x0012; 3684 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; 3685 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; 3686 hdev->le_scan_int_adv_monitor = 0x0060; 3687 hdev->le_scan_window_adv_monitor = 0x0030; 3688 hdev->le_scan_int_connect = 0x0060; 3689 hdev->le_scan_window_connect = 0x0060; 3690 hdev->le_conn_min_interval = 0x0018; 3691 hdev->le_conn_max_interval = 0x0028; 3692 hdev->le_conn_latency = 0x0000; 3693 hdev->le_supv_timeout = 0x002a; 3694 hdev->le_def_tx_len = 0x001b; 3695 hdev->le_def_tx_time = 0x0148; 3696 hdev->le_max_tx_len = 0x001b; 3697 hdev->le_max_tx_time = 0x0148; 3698 hdev->le_max_rx_len = 0x001b; 3699 hdev->le_max_rx_time = 0x0148; 3700 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; 3701 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; 3702 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 3703 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 3704 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; 3705 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; 3706 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; 3707 hdev->min_le_tx_power = HCI_TX_POWER_INVALID; 3708 hdev->max_le_tx_power = HCI_TX_POWER_INVALID; 3709 3710 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3711 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 3712 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3713 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3714 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 3715 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; 3716 3717 /* default 1.28 sec page scan */ 3718 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; 3719 hdev->def_page_scan_int = 0x0800; 3720 hdev->def_page_scan_window = 0x0012; 3721 3722 mutex_init(&hdev->lock); 3723 mutex_init(&hdev->req_lock); 3724 3725 INIT_LIST_HEAD(&hdev->mgmt_pending); 3726 INIT_LIST_HEAD(&hdev->reject_list); 3727 INIT_LIST_HEAD(&hdev->accept_list); 3728 INIT_LIST_HEAD(&hdev->uuids); 3729 INIT_LIST_HEAD(&hdev->link_keys); 3730 INIT_LIST_HEAD(&hdev->long_term_keys); 3731 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 3732 INIT_LIST_HEAD(&hdev->remote_oob_data); 3733 INIT_LIST_HEAD(&hdev->le_accept_list); 3734 INIT_LIST_HEAD(&hdev->le_resolv_list); 3735 INIT_LIST_HEAD(&hdev->le_conn_params); 3736 INIT_LIST_HEAD(&hdev->pend_le_conns); 3737 INIT_LIST_HEAD(&hdev->pend_le_reports); 3738 INIT_LIST_HEAD(&hdev->conn_hash.list); 3739 INIT_LIST_HEAD(&hdev->adv_instances); 3740 INIT_LIST_HEAD(&hdev->blocked_keys); 3741 3742 INIT_LIST_HEAD(&hdev->local_codecs); 3743 INIT_WORK(&hdev->rx_work, hci_rx_work); 3744 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 3745 INIT_WORK(&hdev->tx_work, hci_tx_work); 3746 INIT_WORK(&hdev->power_on, hci_power_on); 3747 INIT_WORK(&hdev->error_reset, hci_error_reset); 3748 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend); 3749 3750 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 3751 3752 skb_queue_head_init(&hdev->rx_q); 3753 skb_queue_head_init(&hdev->cmd_q); 3754 skb_queue_head_init(&hdev->raw_q); 3755 3756 init_waitqueue_head(&hdev->req_wait_q); 3757 init_waitqueue_head(&hdev->suspend_wait_q); 3758 3759 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 3760 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); 3761 3762 hci_request_setup(hdev); 3763 3764 hci_init_sysfs(hdev); 3765 discovery_init(hdev); 3766 3767 return hdev; 3768 } 3769 EXPORT_SYMBOL(hci_alloc_dev_priv); 3770 3771 /* Free HCI device */ 3772 void hci_free_dev(struct hci_dev *hdev) 3773 { 3774 /* will free via device release */ 3775 put_device(&hdev->dev); 3776 } 3777 EXPORT_SYMBOL(hci_free_dev); 3778 3779 /* Register HCI device */ 3780 int hci_register_dev(struct hci_dev *hdev) 3781 { 3782 int id, error; 3783 3784 if (!hdev->open || !hdev->close || !hdev->send) 3785 return -EINVAL; 3786 3787 /* Do not allow HCI_AMP devices to register at index 0, 3788 * so the index can be used as the AMP controller ID. 3789 */ 3790 switch (hdev->dev_type) { 3791 case HCI_PRIMARY: 3792 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); 3793 break; 3794 case HCI_AMP: 3795 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); 3796 break; 3797 default: 3798 return -EINVAL; 3799 } 3800 3801 if (id < 0) 3802 return id; 3803 3804 sprintf(hdev->name, "hci%d", id); 3805 hdev->id = id; 3806 3807 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3808 3809 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); 3810 if (!hdev->workqueue) { 3811 error = -ENOMEM; 3812 goto err; 3813 } 3814 3815 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, 3816 hdev->name); 3817 if (!hdev->req_workqueue) { 3818 destroy_workqueue(hdev->workqueue); 3819 error = -ENOMEM; 3820 goto err; 3821 } 3822 3823 if (!IS_ERR_OR_NULL(bt_debugfs)) 3824 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 3825 3826 dev_set_name(&hdev->dev, "%s", hdev->name); 3827 3828 error = device_add(&hdev->dev); 3829 if (error < 0) 3830 goto err_wqueue; 3831 3832 hci_leds_init(hdev); 3833 3834 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 3835 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 3836 hdev); 3837 if (hdev->rfkill) { 3838 if (rfkill_register(hdev->rfkill) < 0) { 3839 rfkill_destroy(hdev->rfkill); 3840 hdev->rfkill = NULL; 3841 } 3842 } 3843 3844 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 3845 hci_dev_set_flag(hdev, HCI_RFKILLED); 3846 3847 hci_dev_set_flag(hdev, HCI_SETUP); 3848 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 3849 3850 if (hdev->dev_type == HCI_PRIMARY) { 3851 /* Assume BR/EDR support until proven otherwise (such as 3852 * through reading supported features during init. 3853 */ 3854 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 3855 } 3856 3857 write_lock(&hci_dev_list_lock); 3858 list_add(&hdev->list, &hci_dev_list); 3859 write_unlock(&hci_dev_list_lock); 3860 3861 /* Devices that are marked for raw-only usage are unconfigured 3862 * and should not be included in normal operation. 3863 */ 3864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3865 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 3866 3867 hci_sock_dev_event(hdev, HCI_DEV_REG); 3868 hci_dev_hold(hdev); 3869 3870 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 3871 hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 3872 error = register_pm_notifier(&hdev->suspend_notifier); 3873 if (error) 3874 goto err_wqueue; 3875 } 3876 3877 queue_work(hdev->req_workqueue, &hdev->power_on); 3878 3879 idr_init(&hdev->adv_monitors_idr); 3880 msft_register(hdev); 3881 3882 return id; 3883 3884 err_wqueue: 3885 debugfs_remove_recursive(hdev->debugfs); 3886 destroy_workqueue(hdev->workqueue); 3887 destroy_workqueue(hdev->req_workqueue); 3888 err: 3889 ida_simple_remove(&hci_index_ida, hdev->id); 3890 3891 return error; 3892 } 3893 EXPORT_SYMBOL(hci_register_dev); 3894 3895 /* Unregister HCI device */ 3896 void hci_unregister_dev(struct hci_dev *hdev) 3897 { 3898 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3899 3900 hci_dev_set_flag(hdev, HCI_UNREGISTER); 3901 3902 write_lock(&hci_dev_list_lock); 3903 list_del(&hdev->list); 3904 write_unlock(&hci_dev_list_lock); 3905 3906 cancel_work_sync(&hdev->power_on); 3907 3908 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { 3909 hci_suspend_clear_tasks(hdev); 3910 unregister_pm_notifier(&hdev->suspend_notifier); 3911 cancel_work_sync(&hdev->suspend_prepare); 3912 } 3913 3914 msft_unregister(hdev); 3915 3916 hci_dev_do_close(hdev); 3917 3918 if (!test_bit(HCI_INIT, &hdev->flags) && 3919 !hci_dev_test_flag(hdev, HCI_SETUP) && 3920 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 3921 hci_dev_lock(hdev); 3922 mgmt_index_removed(hdev); 3923 hci_dev_unlock(hdev); 3924 } 3925 3926 /* mgmt_index_removed should take care of emptying the 3927 * pending list */ 3928 BUG_ON(!list_empty(&hdev->mgmt_pending)); 3929 3930 hci_sock_dev_event(hdev, HCI_DEV_UNREG); 3931 3932 if (hdev->rfkill) { 3933 rfkill_unregister(hdev->rfkill); 3934 rfkill_destroy(hdev->rfkill); 3935 } 3936 3937 device_del(&hdev->dev); 3938 /* Actual cleanup is deferred until hci_release_dev(). */ 3939 hci_dev_put(hdev); 3940 } 3941 EXPORT_SYMBOL(hci_unregister_dev); 3942 3943 /* Release HCI device */ 3944 void hci_release_dev(struct hci_dev *hdev) 3945 { 3946 debugfs_remove_recursive(hdev->debugfs); 3947 kfree_const(hdev->hw_info); 3948 kfree_const(hdev->fw_info); 3949 3950 destroy_workqueue(hdev->workqueue); 3951 destroy_workqueue(hdev->req_workqueue); 3952 3953 hci_dev_lock(hdev); 3954 hci_bdaddr_list_clear(&hdev->reject_list); 3955 hci_bdaddr_list_clear(&hdev->accept_list); 3956 hci_uuids_clear(hdev); 3957 hci_link_keys_clear(hdev); 3958 hci_smp_ltks_clear(hdev); 3959 hci_smp_irks_clear(hdev); 3960 hci_remote_oob_data_clear(hdev); 3961 hci_adv_instances_clear(hdev); 3962 hci_adv_monitors_clear(hdev); 3963 hci_bdaddr_list_clear(&hdev->le_accept_list); 3964 hci_bdaddr_list_clear(&hdev->le_resolv_list); 3965 hci_conn_params_clear_all(hdev); 3966 hci_discovery_filter_clear(hdev); 3967 hci_blocked_keys_clear(hdev); 3968 hci_dev_unlock(hdev); 3969 3970 ida_simple_remove(&hci_index_ida, hdev->id); 3971 kfree(hdev); 3972 } 3973 EXPORT_SYMBOL(hci_release_dev); 3974 3975 /* Suspend HCI device */ 3976 int hci_suspend_dev(struct hci_dev *hdev) 3977 { 3978 int ret; 3979 u8 state = BT_RUNNING; 3980 3981 bt_dev_dbg(hdev, ""); 3982 3983 /* Suspend should only act on when powered. */ 3984 if (!hdev_is_powered(hdev) || 3985 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 3986 return 0; 3987 3988 /* If powering down, wait for completion. */ 3989 if (mgmt_powering_down(hdev)) { 3990 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); 3991 ret = hci_suspend_wait_event(hdev); 3992 if (ret) 3993 goto done; 3994 } 3995 3996 /* Suspend consists of two actions: 3997 * - First, disconnect everything and make the controller not 3998 * connectable (disabling scanning) 3999 * - Second, program event filter/accept list and enable scan 4000 */ 4001 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); 4002 if (ret) 4003 goto clear; 4004 4005 state = BT_SUSPEND_DISCONNECT; 4006 4007 /* Only configure accept list if device may wakeup. */ 4008 if (hdev->wakeup && hdev->wakeup(hdev)) { 4009 ret = hci_change_suspend_state(hdev, BT_SUSPEND_CONFIGURE_WAKE); 4010 if (!ret) 4011 state = BT_SUSPEND_CONFIGURE_WAKE; 4012 } 4013 4014 clear: 4015 hci_clear_wake_reason(hdev); 4016 mgmt_suspending(hdev, state); 4017 4018 done: 4019 /* We always allow suspend even if suspend preparation failed and 4020 * attempt to recover in resume. 4021 */ 4022 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 4023 return ret; 4024 } 4025 EXPORT_SYMBOL(hci_suspend_dev); 4026 4027 /* Resume HCI device */ 4028 int hci_resume_dev(struct hci_dev *hdev) 4029 { 4030 int ret; 4031 4032 bt_dev_dbg(hdev, ""); 4033 4034 /* Resume should only act on when powered. */ 4035 if (!hdev_is_powered(hdev) || 4036 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 4037 return 0; 4038 4039 /* If powering down don't attempt to resume */ 4040 if (mgmt_powering_down(hdev)) 4041 return 0; 4042 4043 ret = hci_change_suspend_state(hdev, BT_RUNNING); 4044 4045 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, 4046 hdev->wake_addr_type); 4047 4048 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 4049 return ret; 4050 } 4051 EXPORT_SYMBOL(hci_resume_dev); 4052 4053 /* Reset HCI device */ 4054 int hci_reset_dev(struct hci_dev *hdev) 4055 { 4056 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 4057 struct sk_buff *skb; 4058 4059 skb = bt_skb_alloc(3, GFP_ATOMIC); 4060 if (!skb) 4061 return -ENOMEM; 4062 4063 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 4064 skb_put_data(skb, hw_err, 3); 4065 4066 bt_dev_err(hdev, "Injecting HCI hardware error event"); 4067 4068 /* Send Hardware Error to upper stack */ 4069 return hci_recv_frame(hdev, skb); 4070 } 4071 EXPORT_SYMBOL(hci_reset_dev); 4072 4073 /* Receive frame from HCI drivers */ 4074 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 4075 { 4076 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 4077 && !test_bit(HCI_INIT, &hdev->flags))) { 4078 kfree_skb(skb); 4079 return -ENXIO; 4080 } 4081 4082 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && 4083 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 4084 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 4085 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { 4086 kfree_skb(skb); 4087 return -EINVAL; 4088 } 4089 4090 /* Incoming skb */ 4091 bt_cb(skb)->incoming = 1; 4092 4093 /* Time stamp */ 4094 __net_timestamp(skb); 4095 4096 skb_queue_tail(&hdev->rx_q, skb); 4097 queue_work(hdev->workqueue, &hdev->rx_work); 4098 4099 return 0; 4100 } 4101 EXPORT_SYMBOL(hci_recv_frame); 4102 4103 /* Receive diagnostic message from HCI drivers */ 4104 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 4105 { 4106 /* Mark as diagnostic packet */ 4107 hci_skb_pkt_type(skb) = HCI_DIAG_PKT; 4108 4109 /* Time stamp */ 4110 __net_timestamp(skb); 4111 4112 skb_queue_tail(&hdev->rx_q, skb); 4113 queue_work(hdev->workqueue, &hdev->rx_work); 4114 4115 return 0; 4116 } 4117 EXPORT_SYMBOL(hci_recv_diag); 4118 4119 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) 4120 { 4121 va_list vargs; 4122 4123 va_start(vargs, fmt); 4124 kfree_const(hdev->hw_info); 4125 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 4126 va_end(vargs); 4127 } 4128 EXPORT_SYMBOL(hci_set_hw_info); 4129 4130 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) 4131 { 4132 va_list vargs; 4133 4134 va_start(vargs, fmt); 4135 kfree_const(hdev->fw_info); 4136 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 4137 va_end(vargs); 4138 } 4139 EXPORT_SYMBOL(hci_set_fw_info); 4140 4141 /* ---- Interface to upper protocols ---- */ 4142 4143 int hci_register_cb(struct hci_cb *cb) 4144 { 4145 BT_DBG("%p name %s", cb, cb->name); 4146 4147 mutex_lock(&hci_cb_list_lock); 4148 list_add_tail(&cb->list, &hci_cb_list); 4149 mutex_unlock(&hci_cb_list_lock); 4150 4151 return 0; 4152 } 4153 EXPORT_SYMBOL(hci_register_cb); 4154 4155 int hci_unregister_cb(struct hci_cb *cb) 4156 { 4157 BT_DBG("%p name %s", cb, cb->name); 4158 4159 mutex_lock(&hci_cb_list_lock); 4160 list_del(&cb->list); 4161 mutex_unlock(&hci_cb_list_lock); 4162 4163 return 0; 4164 } 4165 EXPORT_SYMBOL(hci_unregister_cb); 4166 4167 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 4168 { 4169 int err; 4170 4171 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), 4172 skb->len); 4173 4174 /* Time stamp */ 4175 __net_timestamp(skb); 4176 4177 /* Send copy to monitor */ 4178 hci_send_to_monitor(hdev, skb); 4179 4180 if (atomic_read(&hdev->promisc)) { 4181 /* Send copy to the sockets */ 4182 hci_send_to_sock(hdev, skb); 4183 } 4184 4185 /* Get rid of skb owner, prior to sending to the driver. */ 4186 skb_orphan(skb); 4187 4188 if (!test_bit(HCI_RUNNING, &hdev->flags)) { 4189 kfree_skb(skb); 4190 return; 4191 } 4192 4193 err = hdev->send(hdev, skb); 4194 if (err < 0) { 4195 bt_dev_err(hdev, "sending frame failed (%d)", err); 4196 kfree_skb(skb); 4197 } 4198 } 4199 4200 /* Send HCI command */ 4201 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 4202 const void *param) 4203 { 4204 struct sk_buff *skb; 4205 4206 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 4207 4208 skb = hci_prepare_cmd(hdev, opcode, plen, param); 4209 if (!skb) { 4210 bt_dev_err(hdev, "no memory for command"); 4211 return -ENOMEM; 4212 } 4213 4214 /* Stand-alone HCI commands must be flagged as 4215 * single-command requests. 4216 */ 4217 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 4218 4219 skb_queue_tail(&hdev->cmd_q, skb); 4220 queue_work(hdev->workqueue, &hdev->cmd_work); 4221 4222 return 0; 4223 } 4224 4225 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 4226 const void *param) 4227 { 4228 struct sk_buff *skb; 4229 4230 if (hci_opcode_ogf(opcode) != 0x3f) { 4231 /* A controller receiving a command shall respond with either 4232 * a Command Status Event or a Command Complete Event. 4233 * Therefore, all standard HCI commands must be sent via the 4234 * standard API, using hci_send_cmd or hci_cmd_sync helpers. 4235 * Some vendors do not comply with this rule for vendor-specific 4236 * commands and do not return any event. We want to support 4237 * unresponded commands for such cases only. 4238 */ 4239 bt_dev_err(hdev, "unresponded command not supported"); 4240 return -EINVAL; 4241 } 4242 4243 skb = hci_prepare_cmd(hdev, opcode, plen, param); 4244 if (!skb) { 4245 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 4246 opcode); 4247 return -ENOMEM; 4248 } 4249 4250 hci_send_frame(hdev, skb); 4251 4252 return 0; 4253 } 4254 EXPORT_SYMBOL(__hci_cmd_send); 4255 4256 /* Get data from the previously sent command */ 4257 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 4258 { 4259 struct hci_command_hdr *hdr; 4260 4261 if (!hdev->sent_cmd) 4262 return NULL; 4263 4264 hdr = (void *) hdev->sent_cmd->data; 4265 4266 if (hdr->opcode != cpu_to_le16(opcode)) 4267 return NULL; 4268 4269 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 4270 4271 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 4272 } 4273 4274 /* Send HCI command and wait for command complete event */ 4275 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 4276 const void *param, u32 timeout) 4277 { 4278 struct sk_buff *skb; 4279 4280 if (!test_bit(HCI_UP, &hdev->flags)) 4281 return ERR_PTR(-ENETDOWN); 4282 4283 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 4284 4285 hci_req_sync_lock(hdev); 4286 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 4287 hci_req_sync_unlock(hdev); 4288 4289 return skb; 4290 } 4291 EXPORT_SYMBOL(hci_cmd_sync); 4292 4293 /* Send ACL data */ 4294 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 4295 { 4296 struct hci_acl_hdr *hdr; 4297 int len = skb->len; 4298 4299 skb_push(skb, HCI_ACL_HDR_SIZE); 4300 skb_reset_transport_header(skb); 4301 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 4302 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 4303 hdr->dlen = cpu_to_le16(len); 4304 } 4305 4306 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 4307 struct sk_buff *skb, __u16 flags) 4308 { 4309 struct hci_conn *conn = chan->conn; 4310 struct hci_dev *hdev = conn->hdev; 4311 struct sk_buff *list; 4312 4313 skb->len = skb_headlen(skb); 4314 skb->data_len = 0; 4315 4316 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 4317 4318 switch (hdev->dev_type) { 4319 case HCI_PRIMARY: 4320 hci_add_acl_hdr(skb, conn->handle, flags); 4321 break; 4322 case HCI_AMP: 4323 hci_add_acl_hdr(skb, chan->handle, flags); 4324 break; 4325 default: 4326 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 4327 return; 4328 } 4329 4330 list = skb_shinfo(skb)->frag_list; 4331 if (!list) { 4332 /* Non fragmented */ 4333 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 4334 4335 skb_queue_tail(queue, skb); 4336 } else { 4337 /* Fragmented */ 4338 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 4339 4340 skb_shinfo(skb)->frag_list = NULL; 4341 4342 /* Queue all fragments atomically. We need to use spin_lock_bh 4343 * here because of 6LoWPAN links, as there this function is 4344 * called from softirq and using normal spin lock could cause 4345 * deadlocks. 4346 */ 4347 spin_lock_bh(&queue->lock); 4348 4349 __skb_queue_tail(queue, skb); 4350 4351 flags &= ~ACL_START; 4352 flags |= ACL_CONT; 4353 do { 4354 skb = list; list = list->next; 4355 4356 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 4357 hci_add_acl_hdr(skb, conn->handle, flags); 4358 4359 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 4360 4361 __skb_queue_tail(queue, skb); 4362 } while (list); 4363 4364 spin_unlock_bh(&queue->lock); 4365 } 4366 } 4367 4368 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 4369 { 4370 struct hci_dev *hdev = chan->conn->hdev; 4371 4372 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 4373 4374 hci_queue_acl(chan, &chan->data_q, skb, flags); 4375 4376 queue_work(hdev->workqueue, &hdev->tx_work); 4377 } 4378 4379 /* Send SCO data */ 4380 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 4381 { 4382 struct hci_dev *hdev = conn->hdev; 4383 struct hci_sco_hdr hdr; 4384 4385 BT_DBG("%s len %d", hdev->name, skb->len); 4386 4387 hdr.handle = cpu_to_le16(conn->handle); 4388 hdr.dlen = skb->len; 4389 4390 skb_push(skb, HCI_SCO_HDR_SIZE); 4391 skb_reset_transport_header(skb); 4392 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 4393 4394 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; 4395 4396 skb_queue_tail(&conn->data_q, skb); 4397 queue_work(hdev->workqueue, &hdev->tx_work); 4398 } 4399 4400 /* ---- HCI TX task (outgoing data) ---- */ 4401 4402 /* HCI Connection scheduler */ 4403 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 4404 int *quote) 4405 { 4406 struct hci_conn_hash *h = &hdev->conn_hash; 4407 struct hci_conn *conn = NULL, *c; 4408 unsigned int num = 0, min = ~0; 4409 4410 /* We don't have to lock device here. Connections are always 4411 * added and removed with TX task disabled. */ 4412 4413 rcu_read_lock(); 4414 4415 list_for_each_entry_rcu(c, &h->list, list) { 4416 if (c->type != type || skb_queue_empty(&c->data_q)) 4417 continue; 4418 4419 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 4420 continue; 4421 4422 num++; 4423 4424 if (c->sent < min) { 4425 min = c->sent; 4426 conn = c; 4427 } 4428 4429 if (hci_conn_num(hdev, type) == num) 4430 break; 4431 } 4432 4433 rcu_read_unlock(); 4434 4435 if (conn) { 4436 int cnt, q; 4437 4438 switch (conn->type) { 4439 case ACL_LINK: 4440 cnt = hdev->acl_cnt; 4441 break; 4442 case SCO_LINK: 4443 case ESCO_LINK: 4444 cnt = hdev->sco_cnt; 4445 break; 4446 case LE_LINK: 4447 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 4448 break; 4449 default: 4450 cnt = 0; 4451 bt_dev_err(hdev, "unknown link type %d", conn->type); 4452 } 4453 4454 q = cnt / num; 4455 *quote = q ? q : 1; 4456 } else 4457 *quote = 0; 4458 4459 BT_DBG("conn %p quote %d", conn, *quote); 4460 return conn; 4461 } 4462 4463 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 4464 { 4465 struct hci_conn_hash *h = &hdev->conn_hash; 4466 struct hci_conn *c; 4467 4468 bt_dev_err(hdev, "link tx timeout"); 4469 4470 rcu_read_lock(); 4471 4472 /* Kill stalled connections */ 4473 list_for_each_entry_rcu(c, &h->list, list) { 4474 if (c->type == type && c->sent) { 4475 bt_dev_err(hdev, "killing stalled connection %pMR", 4476 &c->dst); 4477 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 4478 } 4479 } 4480 4481 rcu_read_unlock(); 4482 } 4483 4484 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 4485 int *quote) 4486 { 4487 struct hci_conn_hash *h = &hdev->conn_hash; 4488 struct hci_chan *chan = NULL; 4489 unsigned int num = 0, min = ~0, cur_prio = 0; 4490 struct hci_conn *conn; 4491 int cnt, q, conn_num = 0; 4492 4493 BT_DBG("%s", hdev->name); 4494 4495 rcu_read_lock(); 4496 4497 list_for_each_entry_rcu(conn, &h->list, list) { 4498 struct hci_chan *tmp; 4499 4500 if (conn->type != type) 4501 continue; 4502 4503 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 4504 continue; 4505 4506 conn_num++; 4507 4508 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 4509 struct sk_buff *skb; 4510 4511 if (skb_queue_empty(&tmp->data_q)) 4512 continue; 4513 4514 skb = skb_peek(&tmp->data_q); 4515 if (skb->priority < cur_prio) 4516 continue; 4517 4518 if (skb->priority > cur_prio) { 4519 num = 0; 4520 min = ~0; 4521 cur_prio = skb->priority; 4522 } 4523 4524 num++; 4525 4526 if (conn->sent < min) { 4527 min = conn->sent; 4528 chan = tmp; 4529 } 4530 } 4531 4532 if (hci_conn_num(hdev, type) == conn_num) 4533 break; 4534 } 4535 4536 rcu_read_unlock(); 4537 4538 if (!chan) 4539 return NULL; 4540 4541 switch (chan->conn->type) { 4542 case ACL_LINK: 4543 cnt = hdev->acl_cnt; 4544 break; 4545 case AMP_LINK: 4546 cnt = hdev->block_cnt; 4547 break; 4548 case SCO_LINK: 4549 case ESCO_LINK: 4550 cnt = hdev->sco_cnt; 4551 break; 4552 case LE_LINK: 4553 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 4554 break; 4555 default: 4556 cnt = 0; 4557 bt_dev_err(hdev, "unknown link type %d", chan->conn->type); 4558 } 4559 4560 q = cnt / num; 4561 *quote = q ? q : 1; 4562 BT_DBG("chan %p quote %d", chan, *quote); 4563 return chan; 4564 } 4565 4566 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 4567 { 4568 struct hci_conn_hash *h = &hdev->conn_hash; 4569 struct hci_conn *conn; 4570 int num = 0; 4571 4572 BT_DBG("%s", hdev->name); 4573 4574 rcu_read_lock(); 4575 4576 list_for_each_entry_rcu(conn, &h->list, list) { 4577 struct hci_chan *chan; 4578 4579 if (conn->type != type) 4580 continue; 4581 4582 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 4583 continue; 4584 4585 num++; 4586 4587 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 4588 struct sk_buff *skb; 4589 4590 if (chan->sent) { 4591 chan->sent = 0; 4592 continue; 4593 } 4594 4595 if (skb_queue_empty(&chan->data_q)) 4596 continue; 4597 4598 skb = skb_peek(&chan->data_q); 4599 if (skb->priority >= HCI_PRIO_MAX - 1) 4600 continue; 4601 4602 skb->priority = HCI_PRIO_MAX - 1; 4603 4604 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 4605 skb->priority); 4606 } 4607 4608 if (hci_conn_num(hdev, type) == num) 4609 break; 4610 } 4611 4612 rcu_read_unlock(); 4613 4614 } 4615 4616 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) 4617 { 4618 /* Calculate count of blocks used by this packet */ 4619 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 4620 } 4621 4622 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 4623 { 4624 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 4625 /* ACL tx timeout must be longer than maximum 4626 * link supervision timeout (40.9 seconds) */ 4627 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 4628 HCI_ACL_TX_TIMEOUT)) 4629 hci_link_tx_to(hdev, ACL_LINK); 4630 } 4631 } 4632 4633 /* Schedule SCO */ 4634 static void hci_sched_sco(struct hci_dev *hdev) 4635 { 4636 struct hci_conn *conn; 4637 struct sk_buff *skb; 4638 int quote; 4639 4640 BT_DBG("%s", hdev->name); 4641 4642 if (!hci_conn_num(hdev, SCO_LINK)) 4643 return; 4644 4645 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 4646 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 4647 BT_DBG("skb %p len %d", skb, skb->len); 4648 hci_send_frame(hdev, skb); 4649 4650 conn->sent++; 4651 if (conn->sent == ~0) 4652 conn->sent = 0; 4653 } 4654 } 4655 } 4656 4657 static void hci_sched_esco(struct hci_dev *hdev) 4658 { 4659 struct hci_conn *conn; 4660 struct sk_buff *skb; 4661 int quote; 4662 4663 BT_DBG("%s", hdev->name); 4664 4665 if (!hci_conn_num(hdev, ESCO_LINK)) 4666 return; 4667 4668 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 4669 "e))) { 4670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 4671 BT_DBG("skb %p len %d", skb, skb->len); 4672 hci_send_frame(hdev, skb); 4673 4674 conn->sent++; 4675 if (conn->sent == ~0) 4676 conn->sent = 0; 4677 } 4678 } 4679 } 4680 4681 static void hci_sched_acl_pkt(struct hci_dev *hdev) 4682 { 4683 unsigned int cnt = hdev->acl_cnt; 4684 struct hci_chan *chan; 4685 struct sk_buff *skb; 4686 int quote; 4687 4688 __check_timeout(hdev, cnt); 4689 4690 while (hdev->acl_cnt && 4691 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 4692 u32 priority = (skb_peek(&chan->data_q))->priority; 4693 while (quote-- && (skb = skb_peek(&chan->data_q))) { 4694 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4695 skb->len, skb->priority); 4696 4697 /* Stop if priority has changed */ 4698 if (skb->priority < priority) 4699 break; 4700 4701 skb = skb_dequeue(&chan->data_q); 4702 4703 hci_conn_enter_active_mode(chan->conn, 4704 bt_cb(skb)->force_active); 4705 4706 hci_send_frame(hdev, skb); 4707 hdev->acl_last_tx = jiffies; 4708 4709 hdev->acl_cnt--; 4710 chan->sent++; 4711 chan->conn->sent++; 4712 4713 /* Send pending SCO packets right away */ 4714 hci_sched_sco(hdev); 4715 hci_sched_esco(hdev); 4716 } 4717 } 4718 4719 if (cnt != hdev->acl_cnt) 4720 hci_prio_recalculate(hdev, ACL_LINK); 4721 } 4722 4723 static void hci_sched_acl_blk(struct hci_dev *hdev) 4724 { 4725 unsigned int cnt = hdev->block_cnt; 4726 struct hci_chan *chan; 4727 struct sk_buff *skb; 4728 int quote; 4729 u8 type; 4730 4731 __check_timeout(hdev, cnt); 4732 4733 BT_DBG("%s", hdev->name); 4734 4735 if (hdev->dev_type == HCI_AMP) 4736 type = AMP_LINK; 4737 else 4738 type = ACL_LINK; 4739 4740 while (hdev->block_cnt > 0 && 4741 (chan = hci_chan_sent(hdev, type, "e))) { 4742 u32 priority = (skb_peek(&chan->data_q))->priority; 4743 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 4744 int blocks; 4745 4746 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4747 skb->len, skb->priority); 4748 4749 /* Stop if priority has changed */ 4750 if (skb->priority < priority) 4751 break; 4752 4753 skb = skb_dequeue(&chan->data_q); 4754 4755 blocks = __get_blocks(hdev, skb); 4756 if (blocks > hdev->block_cnt) 4757 return; 4758 4759 hci_conn_enter_active_mode(chan->conn, 4760 bt_cb(skb)->force_active); 4761 4762 hci_send_frame(hdev, skb); 4763 hdev->acl_last_tx = jiffies; 4764 4765 hdev->block_cnt -= blocks; 4766 quote -= blocks; 4767 4768 chan->sent += blocks; 4769 chan->conn->sent += blocks; 4770 } 4771 } 4772 4773 if (cnt != hdev->block_cnt) 4774 hci_prio_recalculate(hdev, type); 4775 } 4776 4777 static void hci_sched_acl(struct hci_dev *hdev) 4778 { 4779 BT_DBG("%s", hdev->name); 4780 4781 /* No ACL link over BR/EDR controller */ 4782 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) 4783 return; 4784 4785 /* No AMP link over AMP controller */ 4786 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) 4787 return; 4788 4789 switch (hdev->flow_ctl_mode) { 4790 case HCI_FLOW_CTL_MODE_PACKET_BASED: 4791 hci_sched_acl_pkt(hdev); 4792 break; 4793 4794 case HCI_FLOW_CTL_MODE_BLOCK_BASED: 4795 hci_sched_acl_blk(hdev); 4796 break; 4797 } 4798 } 4799 4800 static void hci_sched_le(struct hci_dev *hdev) 4801 { 4802 struct hci_chan *chan; 4803 struct sk_buff *skb; 4804 int quote, cnt, tmp; 4805 4806 BT_DBG("%s", hdev->name); 4807 4808 if (!hci_conn_num(hdev, LE_LINK)) 4809 return; 4810 4811 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 4812 4813 __check_timeout(hdev, cnt); 4814 4815 tmp = cnt; 4816 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 4817 u32 priority = (skb_peek(&chan->data_q))->priority; 4818 while (quote-- && (skb = skb_peek(&chan->data_q))) { 4819 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4820 skb->len, skb->priority); 4821 4822 /* Stop if priority has changed */ 4823 if (skb->priority < priority) 4824 break; 4825 4826 skb = skb_dequeue(&chan->data_q); 4827 4828 hci_send_frame(hdev, skb); 4829 hdev->le_last_tx = jiffies; 4830 4831 cnt--; 4832 chan->sent++; 4833 chan->conn->sent++; 4834 4835 /* Send pending SCO packets right away */ 4836 hci_sched_sco(hdev); 4837 hci_sched_esco(hdev); 4838 } 4839 } 4840 4841 if (hdev->le_pkts) 4842 hdev->le_cnt = cnt; 4843 else 4844 hdev->acl_cnt = cnt; 4845 4846 if (cnt != tmp) 4847 hci_prio_recalculate(hdev, LE_LINK); 4848 } 4849 4850 static void hci_tx_work(struct work_struct *work) 4851 { 4852 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 4853 struct sk_buff *skb; 4854 4855 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 4856 hdev->sco_cnt, hdev->le_cnt); 4857 4858 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4859 /* Schedule queues and send stuff to HCI driver */ 4860 hci_sched_sco(hdev); 4861 hci_sched_esco(hdev); 4862 hci_sched_acl(hdev); 4863 hci_sched_le(hdev); 4864 } 4865 4866 /* Send next queued raw (unknown type) packet */ 4867 while ((skb = skb_dequeue(&hdev->raw_q))) 4868 hci_send_frame(hdev, skb); 4869 } 4870 4871 /* ----- HCI RX task (incoming data processing) ----- */ 4872 4873 /* ACL data packet */ 4874 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 4875 { 4876 struct hci_acl_hdr *hdr = (void *) skb->data; 4877 struct hci_conn *conn; 4878 __u16 handle, flags; 4879 4880 skb_pull(skb, HCI_ACL_HDR_SIZE); 4881 4882 handle = __le16_to_cpu(hdr->handle); 4883 flags = hci_flags(handle); 4884 handle = hci_handle(handle); 4885 4886 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 4887 handle, flags); 4888 4889 hdev->stat.acl_rx++; 4890 4891 hci_dev_lock(hdev); 4892 conn = hci_conn_hash_lookup_handle(hdev, handle); 4893 hci_dev_unlock(hdev); 4894 4895 if (conn) { 4896 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 4897 4898 /* Send to upper protocol */ 4899 l2cap_recv_acldata(conn, skb, flags); 4900 return; 4901 } else { 4902 bt_dev_err(hdev, "ACL packet for unknown connection handle %d", 4903 handle); 4904 } 4905 4906 kfree_skb(skb); 4907 } 4908 4909 /* SCO data packet */ 4910 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 4911 { 4912 struct hci_sco_hdr *hdr = (void *) skb->data; 4913 struct hci_conn *conn; 4914 __u16 handle, flags; 4915 4916 skb_pull(skb, HCI_SCO_HDR_SIZE); 4917 4918 handle = __le16_to_cpu(hdr->handle); 4919 flags = hci_flags(handle); 4920 handle = hci_handle(handle); 4921 4922 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 4923 handle, flags); 4924 4925 hdev->stat.sco_rx++; 4926 4927 hci_dev_lock(hdev); 4928 conn = hci_conn_hash_lookup_handle(hdev, handle); 4929 hci_dev_unlock(hdev); 4930 4931 if (conn) { 4932 /* Send to upper protocol */ 4933 bt_cb(skb)->sco.pkt_status = flags & 0x03; 4934 sco_recv_scodata(conn, skb); 4935 return; 4936 } else { 4937 bt_dev_err(hdev, "SCO packet for unknown connection handle %d", 4938 handle); 4939 } 4940 4941 kfree_skb(skb); 4942 } 4943 4944 static bool hci_req_is_complete(struct hci_dev *hdev) 4945 { 4946 struct sk_buff *skb; 4947 4948 skb = skb_peek(&hdev->cmd_q); 4949 if (!skb) 4950 return true; 4951 4952 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); 4953 } 4954 4955 static void hci_resend_last(struct hci_dev *hdev) 4956 { 4957 struct hci_command_hdr *sent; 4958 struct sk_buff *skb; 4959 u16 opcode; 4960 4961 if (!hdev->sent_cmd) 4962 return; 4963 4964 sent = (void *) hdev->sent_cmd->data; 4965 opcode = __le16_to_cpu(sent->opcode); 4966 if (opcode == HCI_OP_RESET) 4967 return; 4968 4969 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 4970 if (!skb) 4971 return; 4972 4973 skb_queue_head(&hdev->cmd_q, skb); 4974 queue_work(hdev->workqueue, &hdev->cmd_work); 4975 } 4976 4977 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 4978 hci_req_complete_t *req_complete, 4979 hci_req_complete_skb_t *req_complete_skb) 4980 { 4981 struct sk_buff *skb; 4982 unsigned long flags; 4983 4984 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 4985 4986 /* If the completed command doesn't match the last one that was 4987 * sent we need to do special handling of it. 4988 */ 4989 if (!hci_sent_cmd_data(hdev, opcode)) { 4990 /* Some CSR based controllers generate a spontaneous 4991 * reset complete event during init and any pending 4992 * command will never be completed. In such a case we 4993 * need to resend whatever was the last sent 4994 * command. 4995 */ 4996 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 4997 hci_resend_last(hdev); 4998 4999 return; 5000 } 5001 5002 /* If we reach this point this event matches the last command sent */ 5003 hci_dev_clear_flag(hdev, HCI_CMD_PENDING); 5004 5005 /* If the command succeeded and there's still more commands in 5006 * this request the request is not yet complete. 5007 */ 5008 if (!status && !hci_req_is_complete(hdev)) 5009 return; 5010 5011 /* If this was the last command in a request the complete 5012 * callback would be found in hdev->sent_cmd instead of the 5013 * command queue (hdev->cmd_q). 5014 */ 5015 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { 5016 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; 5017 return; 5018 } 5019 5020 if (bt_cb(hdev->sent_cmd)->hci.req_complete) { 5021 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; 5022 return; 5023 } 5024 5025 /* Remove all pending commands belonging to this request */ 5026 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 5027 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 5028 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { 5029 __skb_queue_head(&hdev->cmd_q, skb); 5030 break; 5031 } 5032 5033 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) 5034 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 5035 else 5036 *req_complete = bt_cb(skb)->hci.req_complete; 5037 kfree_skb(skb); 5038 } 5039 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 5040 } 5041 5042 static void hci_rx_work(struct work_struct *work) 5043 { 5044 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 5045 struct sk_buff *skb; 5046 5047 BT_DBG("%s", hdev->name); 5048 5049 while ((skb = skb_dequeue(&hdev->rx_q))) { 5050 /* Send copy to monitor */ 5051 hci_send_to_monitor(hdev, skb); 5052 5053 if (atomic_read(&hdev->promisc)) { 5054 /* Send copy to the sockets */ 5055 hci_send_to_sock(hdev, skb); 5056 } 5057 5058 /* If the device has been opened in HCI_USER_CHANNEL, 5059 * the userspace has exclusive access to device. 5060 * When device is HCI_INIT, we still need to process 5061 * the data packets to the driver in order 5062 * to complete its setup(). 5063 */ 5064 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 5065 !test_bit(HCI_INIT, &hdev->flags)) { 5066 kfree_skb(skb); 5067 continue; 5068 } 5069 5070 if (test_bit(HCI_INIT, &hdev->flags)) { 5071 /* Don't process data packets in this states. */ 5072 switch (hci_skb_pkt_type(skb)) { 5073 case HCI_ACLDATA_PKT: 5074 case HCI_SCODATA_PKT: 5075 case HCI_ISODATA_PKT: 5076 kfree_skb(skb); 5077 continue; 5078 } 5079 } 5080 5081 /* Process frame */ 5082 switch (hci_skb_pkt_type(skb)) { 5083 case HCI_EVENT_PKT: 5084 BT_DBG("%s Event packet", hdev->name); 5085 hci_event_packet(hdev, skb); 5086 break; 5087 5088 case HCI_ACLDATA_PKT: 5089 BT_DBG("%s ACL data packet", hdev->name); 5090 hci_acldata_packet(hdev, skb); 5091 break; 5092 5093 case HCI_SCODATA_PKT: 5094 BT_DBG("%s SCO data packet", hdev->name); 5095 hci_scodata_packet(hdev, skb); 5096 break; 5097 5098 default: 5099 kfree_skb(skb); 5100 break; 5101 } 5102 } 5103 } 5104 5105 static void hci_cmd_work(struct work_struct *work) 5106 { 5107 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 5108 struct sk_buff *skb; 5109 5110 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 5111 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 5112 5113 /* Send queued commands */ 5114 if (atomic_read(&hdev->cmd_cnt)) { 5115 skb = skb_dequeue(&hdev->cmd_q); 5116 if (!skb) 5117 return; 5118 5119 kfree_skb(hdev->sent_cmd); 5120 5121 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 5122 if (hdev->sent_cmd) { 5123 if (hci_req_status_pend(hdev)) 5124 hci_dev_set_flag(hdev, HCI_CMD_PENDING); 5125 atomic_dec(&hdev->cmd_cnt); 5126 hci_send_frame(hdev, skb); 5127 if (test_bit(HCI_RESET, &hdev->flags)) 5128 cancel_delayed_work(&hdev->cmd_timer); 5129 else 5130 schedule_delayed_work(&hdev->cmd_timer, 5131 HCI_CMD_TIMEOUT); 5132 } else { 5133 skb_queue_head(&hdev->cmd_q, skb); 5134 queue_work(hdev->workqueue, &hdev->cmd_work); 5135 } 5136 } 5137 } 5138