1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 Copyright (C) 2011 ProFUSION Embedded Systems 5 6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License version 2 as 10 published by the Free Software Foundation; 11 12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 23 SOFTWARE IS DISCLAIMED. 24 */ 25 26 /* Bluetooth HCI core. */ 27 28 #include <linux/export.h> 29 #include <linux/idr.h> 30 #include <linux/rfkill.h> 31 #include <linux/debugfs.h> 32 #include <linux/crypto.h> 33 #include <linux/property.h> 34 #include <linux/suspend.h> 35 #include <linux/wait.h> 36 #include <asm/unaligned.h> 37 38 #include <net/bluetooth/bluetooth.h> 39 #include <net/bluetooth/hci_core.h> 40 #include <net/bluetooth/l2cap.h> 41 #include <net/bluetooth/mgmt.h> 42 43 #include "hci_request.h" 44 #include "hci_debugfs.h" 45 #include "smp.h" 46 #include "leds.h" 47 48 static void hci_rx_work(struct work_struct *work); 49 static void hci_cmd_work(struct work_struct *work); 50 static void hci_tx_work(struct work_struct *work); 51 52 /* HCI device list */ 53 LIST_HEAD(hci_dev_list); 54 DEFINE_RWLOCK(hci_dev_list_lock); 55 56 /* HCI callback list */ 57 LIST_HEAD(hci_cb_list); 58 DEFINE_MUTEX(hci_cb_list_lock); 59 60 /* HCI ID Numbering */ 61 static DEFINE_IDA(hci_index_ida); 62 63 /* ---- HCI debugfs entries ---- */ 64 65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf, 66 size_t count, loff_t *ppos) 67 { 68 struct hci_dev *hdev = file->private_data; 69 char buf[3]; 70 71 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; 72 buf[1] = '\n'; 73 buf[2] = '\0'; 74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 75 } 76 77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, 78 size_t count, loff_t *ppos) 79 { 80 struct hci_dev *hdev = file->private_data; 81 struct sk_buff *skb; 82 bool enable; 83 int err; 84 85 if (!test_bit(HCI_UP, &hdev->flags)) 86 return -ENETDOWN; 87 88 err = kstrtobool_from_user(user_buf, count, &enable); 89 if (err) 90 return err; 91 92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) 93 return -EALREADY; 94 95 hci_req_sync_lock(hdev); 96 if (enable) 97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, 98 HCI_CMD_TIMEOUT); 99 else 100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, 101 HCI_CMD_TIMEOUT); 102 hci_req_sync_unlock(hdev); 103 104 if (IS_ERR(skb)) 105 return PTR_ERR(skb); 106 107 kfree_skb(skb); 108 109 hci_dev_change_flag(hdev, HCI_DUT_MODE); 110 111 return count; 112 } 113 114 static const struct file_operations dut_mode_fops = { 115 .open = simple_open, 116 .read = dut_mode_read, 117 .write = dut_mode_write, 118 .llseek = default_llseek, 119 }; 120 121 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, 122 size_t count, loff_t *ppos) 123 { 124 struct hci_dev *hdev = file->private_data; 125 char buf[3]; 126 127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; 128 buf[1] = '\n'; 129 buf[2] = '\0'; 130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 131 } 132 133 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, 134 size_t count, loff_t *ppos) 135 { 136 struct hci_dev *hdev = file->private_data; 137 bool enable; 138 int err; 139 140 err = kstrtobool_from_user(user_buf, count, &enable); 141 if (err) 142 return err; 143 144 /* When the diagnostic flags are not persistent and the transport 145 * is not active or in user channel operation, then there is no need 146 * for the vendor callback. Instead just store the desired value and 147 * the setting will be programmed when the controller gets powered on. 148 */ 149 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 150 (!test_bit(HCI_RUNNING, &hdev->flags) || 151 hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) 152 goto done; 153 154 hci_req_sync_lock(hdev); 155 err = hdev->set_diag(hdev, enable); 156 hci_req_sync_unlock(hdev); 157 158 if (err < 0) 159 return err; 160 161 done: 162 if (enable) 163 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); 164 else 165 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); 166 167 return count; 168 } 169 170 static const struct file_operations vendor_diag_fops = { 171 .open = simple_open, 172 .read = vendor_diag_read, 173 .write = vendor_diag_write, 174 .llseek = default_llseek, 175 }; 176 177 static void hci_debugfs_create_basic(struct hci_dev *hdev) 178 { 179 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 180 &dut_mode_fops); 181 182 if (hdev->set_diag) 183 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, 184 &vendor_diag_fops); 185 } 186 187 static int hci_reset_req(struct hci_request *req, unsigned long opt) 188 { 189 BT_DBG("%s %ld", req->hdev->name, opt); 190 191 /* Reset device */ 192 set_bit(HCI_RESET, &req->hdev->flags); 193 hci_req_add(req, HCI_OP_RESET, 0, NULL); 194 return 0; 195 } 196 197 static void bredr_init(struct hci_request *req) 198 { 199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 200 201 /* Read Local Supported Features */ 202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 203 204 /* Read Local Version */ 205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 206 207 /* Read BD Address */ 208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 209 } 210 211 static void amp_init1(struct hci_request *req) 212 { 213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 214 215 /* Read Local Version */ 216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 217 218 /* Read Local Supported Commands */ 219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 220 221 /* Read Local AMP Info */ 222 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); 223 224 /* Read Data Blk size */ 225 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); 226 227 /* Read Flow Control Mode */ 228 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); 229 230 /* Read Location Data */ 231 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); 232 } 233 234 static int amp_init2(struct hci_request *req) 235 { 236 /* Read Local Supported Features. Not all AMP controllers 237 * support this so it's placed conditionally in the second 238 * stage init. 239 */ 240 if (req->hdev->commands[14] & 0x20) 241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 242 243 return 0; 244 } 245 246 static int hci_init1_req(struct hci_request *req, unsigned long opt) 247 { 248 struct hci_dev *hdev = req->hdev; 249 250 BT_DBG("%s %ld", hdev->name, opt); 251 252 /* Reset */ 253 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 254 hci_reset_req(req, 0); 255 256 switch (hdev->dev_type) { 257 case HCI_PRIMARY: 258 bredr_init(req); 259 break; 260 case HCI_AMP: 261 amp_init1(req); 262 break; 263 default: 264 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); 265 break; 266 } 267 268 return 0; 269 } 270 271 static void bredr_setup(struct hci_request *req) 272 { 273 __le16 param; 274 __u8 flt_type; 275 276 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 277 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 278 279 /* Read Class of Device */ 280 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 281 282 /* Read Local Name */ 283 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); 284 285 /* Read Voice Setting */ 286 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); 287 288 /* Read Number of Supported IAC */ 289 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); 290 291 /* Read Current IAC LAP */ 292 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); 293 294 /* Clear Event Filters */ 295 flt_type = HCI_FLT_CLEAR_ALL; 296 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 297 298 /* Connection accept timeout ~20 secs */ 299 param = cpu_to_le16(0x7d00); 300 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); 301 } 302 303 static void le_setup(struct hci_request *req) 304 { 305 struct hci_dev *hdev = req->hdev; 306 307 /* Read LE Buffer Size */ 308 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); 309 310 /* Read LE Local Supported Features */ 311 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); 312 313 /* Read LE Supported States */ 314 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); 315 316 /* LE-only controllers have LE implicitly enabled */ 317 if (!lmp_bredr_capable(hdev)) 318 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 319 } 320 321 static void hci_setup_event_mask(struct hci_request *req) 322 { 323 struct hci_dev *hdev = req->hdev; 324 325 /* The second byte is 0xff instead of 0x9f (two reserved bits 326 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 327 * command otherwise. 328 */ 329 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 330 331 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 332 * any event mask for pre 1.2 devices. 333 */ 334 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 335 return; 336 337 if (lmp_bredr_capable(hdev)) { 338 events[4] |= 0x01; /* Flow Specification Complete */ 339 } else { 340 /* Use a different default for LE-only devices */ 341 memset(events, 0, sizeof(events)); 342 events[1] |= 0x20; /* Command Complete */ 343 events[1] |= 0x40; /* Command Status */ 344 events[1] |= 0x80; /* Hardware Error */ 345 346 /* If the controller supports the Disconnect command, enable 347 * the corresponding event. In addition enable packet flow 348 * control related events. 349 */ 350 if (hdev->commands[0] & 0x20) { 351 events[0] |= 0x10; /* Disconnection Complete */ 352 events[2] |= 0x04; /* Number of Completed Packets */ 353 events[3] |= 0x02; /* Data Buffer Overflow */ 354 } 355 356 /* If the controller supports the Read Remote Version 357 * Information command, enable the corresponding event. 358 */ 359 if (hdev->commands[2] & 0x80) 360 events[1] |= 0x08; /* Read Remote Version Information 361 * Complete 362 */ 363 364 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 365 events[0] |= 0x80; /* Encryption Change */ 366 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 367 } 368 } 369 370 if (lmp_inq_rssi_capable(hdev) || 371 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 372 events[4] |= 0x02; /* Inquiry Result with RSSI */ 373 374 if (lmp_ext_feat_capable(hdev)) 375 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 376 377 if (lmp_esco_capable(hdev)) { 378 events[5] |= 0x08; /* Synchronous Connection Complete */ 379 events[5] |= 0x10; /* Synchronous Connection Changed */ 380 } 381 382 if (lmp_sniffsubr_capable(hdev)) 383 events[5] |= 0x20; /* Sniff Subrating */ 384 385 if (lmp_pause_enc_capable(hdev)) 386 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 387 388 if (lmp_ext_inq_capable(hdev)) 389 events[5] |= 0x40; /* Extended Inquiry Result */ 390 391 if (lmp_no_flush_capable(hdev)) 392 events[7] |= 0x01; /* Enhanced Flush Complete */ 393 394 if (lmp_lsto_capable(hdev)) 395 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 396 397 if (lmp_ssp_capable(hdev)) { 398 events[6] |= 0x01; /* IO Capability Request */ 399 events[6] |= 0x02; /* IO Capability Response */ 400 events[6] |= 0x04; /* User Confirmation Request */ 401 events[6] |= 0x08; /* User Passkey Request */ 402 events[6] |= 0x10; /* Remote OOB Data Request */ 403 events[6] |= 0x20; /* Simple Pairing Complete */ 404 events[7] |= 0x04; /* User Passkey Notification */ 405 events[7] |= 0x08; /* Keypress Notification */ 406 events[7] |= 0x10; /* Remote Host Supported 407 * Features Notification 408 */ 409 } 410 411 if (lmp_le_capable(hdev)) 412 events[7] |= 0x20; /* LE Meta-Event */ 413 414 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 415 } 416 417 static int hci_init2_req(struct hci_request *req, unsigned long opt) 418 { 419 struct hci_dev *hdev = req->hdev; 420 421 if (hdev->dev_type == HCI_AMP) 422 return amp_init2(req); 423 424 if (lmp_bredr_capable(hdev)) 425 bredr_setup(req); 426 else 427 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); 428 429 if (lmp_le_capable(hdev)) 430 le_setup(req); 431 432 /* All Bluetooth 1.2 and later controllers should support the 433 * HCI command for reading the local supported commands. 434 * 435 * Unfortunately some controllers indicate Bluetooth 1.2 support, 436 * but do not have support for this command. If that is the case, 437 * the driver can quirk the behavior and skip reading the local 438 * supported commands. 439 */ 440 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && 441 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) 442 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); 443 444 if (lmp_ssp_capable(hdev)) { 445 /* When SSP is available, then the host features page 446 * should also be available as well. However some 447 * controllers list the max_page as 0 as long as SSP 448 * has not been enabled. To achieve proper debugging 449 * output, force the minimum max_page to 1 at least. 450 */ 451 hdev->max_page = 0x01; 452 453 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { 454 u8 mode = 0x01; 455 456 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, 457 sizeof(mode), &mode); 458 } else { 459 struct hci_cp_write_eir cp; 460 461 memset(hdev->eir, 0, sizeof(hdev->eir)); 462 memset(&cp, 0, sizeof(cp)); 463 464 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); 465 } 466 } 467 468 if (lmp_inq_rssi_capable(hdev) || 469 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { 470 u8 mode; 471 472 /* If Extended Inquiry Result events are supported, then 473 * they are clearly preferred over Inquiry Result with RSSI 474 * events. 475 */ 476 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; 477 478 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); 479 } 480 481 if (lmp_inq_tx_pwr_capable(hdev)) 482 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 483 484 if (lmp_ext_feat_capable(hdev)) { 485 struct hci_cp_read_local_ext_features cp; 486 487 cp.page = 0x01; 488 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 489 sizeof(cp), &cp); 490 } 491 492 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { 493 u8 enable = 1; 494 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), 495 &enable); 496 } 497 498 return 0; 499 } 500 501 static void hci_setup_link_policy(struct hci_request *req) 502 { 503 struct hci_dev *hdev = req->hdev; 504 struct hci_cp_write_def_link_policy cp; 505 u16 link_policy = 0; 506 507 if (lmp_rswitch_capable(hdev)) 508 link_policy |= HCI_LP_RSWITCH; 509 if (lmp_hold_capable(hdev)) 510 link_policy |= HCI_LP_HOLD; 511 if (lmp_sniff_capable(hdev)) 512 link_policy |= HCI_LP_SNIFF; 513 if (lmp_park_capable(hdev)) 514 link_policy |= HCI_LP_PARK; 515 516 cp.policy = cpu_to_le16(link_policy); 517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); 518 } 519 520 static void hci_set_le_support(struct hci_request *req) 521 { 522 struct hci_dev *hdev = req->hdev; 523 struct hci_cp_write_le_host_supported cp; 524 525 /* LE-only devices do not support explicit enablement */ 526 if (!lmp_bredr_capable(hdev)) 527 return; 528 529 memset(&cp, 0, sizeof(cp)); 530 531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 532 cp.le = 0x01; 533 cp.simul = 0x00; 534 } 535 536 if (cp.le != lmp_host_le_capable(hdev)) 537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), 538 &cp); 539 } 540 541 static void hci_set_event_mask_page_2(struct hci_request *req) 542 { 543 struct hci_dev *hdev = req->hdev; 544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 545 bool changed = false; 546 547 /* If Connectionless Slave Broadcast master role is supported 548 * enable all necessary events for it. 549 */ 550 if (lmp_csb_master_capable(hdev)) { 551 events[1] |= 0x40; /* Triggered Clock Capture */ 552 events[1] |= 0x80; /* Synchronization Train Complete */ 553 events[2] |= 0x10; /* Slave Page Response Timeout */ 554 events[2] |= 0x20; /* CSB Channel Map Change */ 555 changed = true; 556 } 557 558 /* If Connectionless Slave Broadcast slave role is supported 559 * enable all necessary events for it. 560 */ 561 if (lmp_csb_slave_capable(hdev)) { 562 events[2] |= 0x01; /* Synchronization Train Received */ 563 events[2] |= 0x02; /* CSB Receive */ 564 events[2] |= 0x04; /* CSB Timeout */ 565 events[2] |= 0x08; /* Truncated Page Complete */ 566 changed = true; 567 } 568 569 /* Enable Authenticated Payload Timeout Expired event if supported */ 570 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { 571 events[2] |= 0x80; 572 changed = true; 573 } 574 575 /* Some Broadcom based controllers indicate support for Set Event 576 * Mask Page 2 command, but then actually do not support it. Since 577 * the default value is all bits set to zero, the command is only 578 * required if the event mask has to be changed. In case no change 579 * to the event mask is needed, skip this command. 580 */ 581 if (changed) 582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, 583 sizeof(events), events); 584 } 585 586 static int hci_init3_req(struct hci_request *req, unsigned long opt) 587 { 588 struct hci_dev *hdev = req->hdev; 589 u8 p; 590 591 hci_setup_event_mask(req); 592 593 if (hdev->commands[6] & 0x20 && 594 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { 595 struct hci_cp_read_stored_link_key cp; 596 597 bacpy(&cp.bdaddr, BDADDR_ANY); 598 cp.read_all = 0x01; 599 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); 600 } 601 602 if (hdev->commands[5] & 0x10) 603 hci_setup_link_policy(req); 604 605 if (hdev->commands[8] & 0x01) 606 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); 607 608 if (hdev->commands[18] & 0x04) 609 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL); 610 611 /* Some older Broadcom based Bluetooth 1.2 controllers do not 612 * support the Read Page Scan Type command. Check support for 613 * this command in the bit mask of supported commands. 614 */ 615 if (hdev->commands[13] & 0x01) 616 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); 617 618 if (lmp_le_capable(hdev)) { 619 u8 events[8]; 620 621 memset(events, 0, sizeof(events)); 622 623 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 624 events[0] |= 0x10; /* LE Long Term Key Request */ 625 626 /* If controller supports the Connection Parameters Request 627 * Link Layer Procedure, enable the corresponding event. 628 */ 629 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) 630 events[0] |= 0x20; /* LE Remote Connection 631 * Parameter Request 632 */ 633 634 /* If the controller supports the Data Length Extension 635 * feature, enable the corresponding event. 636 */ 637 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) 638 events[0] |= 0x40; /* LE Data Length Change */ 639 640 /* If the controller supports Extended Scanner Filter 641 * Policies, enable the correspondig event. 642 */ 643 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) 644 events[1] |= 0x04; /* LE Direct Advertising 645 * Report 646 */ 647 648 /* If the controller supports Channel Selection Algorithm #2 649 * feature, enable the corresponding event. 650 */ 651 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) 652 events[2] |= 0x08; /* LE Channel Selection 653 * Algorithm 654 */ 655 656 /* If the controller supports the LE Set Scan Enable command, 657 * enable the corresponding advertising report event. 658 */ 659 if (hdev->commands[26] & 0x08) 660 events[0] |= 0x02; /* LE Advertising Report */ 661 662 /* If the controller supports the LE Create Connection 663 * command, enable the corresponding event. 664 */ 665 if (hdev->commands[26] & 0x10) 666 events[0] |= 0x01; /* LE Connection Complete */ 667 668 /* If the controller supports the LE Connection Update 669 * command, enable the corresponding event. 670 */ 671 if (hdev->commands[27] & 0x04) 672 events[0] |= 0x04; /* LE Connection Update 673 * Complete 674 */ 675 676 /* If the controller supports the LE Read Remote Used Features 677 * command, enable the corresponding event. 678 */ 679 if (hdev->commands[27] & 0x20) 680 events[0] |= 0x08; /* LE Read Remote Used 681 * Features Complete 682 */ 683 684 /* If the controller supports the LE Read Local P-256 685 * Public Key command, enable the corresponding event. 686 */ 687 if (hdev->commands[34] & 0x02) 688 events[0] |= 0x80; /* LE Read Local P-256 689 * Public Key Complete 690 */ 691 692 /* If the controller supports the LE Generate DHKey 693 * command, enable the corresponding event. 694 */ 695 if (hdev->commands[34] & 0x04) 696 events[1] |= 0x01; /* LE Generate DHKey Complete */ 697 698 /* If the controller supports the LE Set Default PHY or 699 * LE Set PHY commands, enable the corresponding event. 700 */ 701 if (hdev->commands[35] & (0x20 | 0x40)) 702 events[1] |= 0x08; /* LE PHY Update Complete */ 703 704 /* If the controller supports LE Set Extended Scan Parameters 705 * and LE Set Extended Scan Enable commands, enable the 706 * corresponding event. 707 */ 708 if (use_ext_scan(hdev)) 709 events[1] |= 0x10; /* LE Extended Advertising 710 * Report 711 */ 712 713 /* If the controller supports the LE Extended Create Connection 714 * command, enable the corresponding event. 715 */ 716 if (use_ext_conn(hdev)) 717 events[1] |= 0x02; /* LE Enhanced Connection 718 * Complete 719 */ 720 721 /* If the controller supports the LE Extended Advertising 722 * command, enable the corresponding event. 723 */ 724 if (ext_adv_capable(hdev)) 725 events[2] |= 0x02; /* LE Advertising Set 726 * Terminated 727 */ 728 729 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), 730 events); 731 732 /* Read LE Advertising Channel TX Power */ 733 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { 734 /* HCI TS spec forbids mixing of legacy and extended 735 * advertising commands wherein READ_ADV_TX_POWER is 736 * also included. So do not call it if extended adv 737 * is supported otherwise controller will return 738 * COMMAND_DISALLOWED for extended commands. 739 */ 740 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); 741 } 742 743 if (hdev->commands[26] & 0x40) { 744 /* Read LE White List Size */ 745 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 746 0, NULL); 747 } 748 749 if (hdev->commands[26] & 0x80) { 750 /* Clear LE White List */ 751 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); 752 } 753 754 if (hdev->commands[34] & 0x40) { 755 /* Read LE Resolving List Size */ 756 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 757 0, NULL); 758 } 759 760 if (hdev->commands[34] & 0x20) { 761 /* Clear LE Resolving List */ 762 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL); 763 } 764 765 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { 766 /* Read LE Maximum Data Length */ 767 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); 768 769 /* Read LE Suggested Default Data Length */ 770 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); 771 } 772 773 if (ext_adv_capable(hdev)) { 774 /* Read LE Number of Supported Advertising Sets */ 775 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 776 0, NULL); 777 } 778 779 hci_set_le_support(req); 780 } 781 782 /* Read features beyond page 1 if available */ 783 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { 784 struct hci_cp_read_local_ext_features cp; 785 786 cp.page = p; 787 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, 788 sizeof(cp), &cp); 789 } 790 791 return 0; 792 } 793 794 static int hci_init4_req(struct hci_request *req, unsigned long opt) 795 { 796 struct hci_dev *hdev = req->hdev; 797 798 /* Some Broadcom based Bluetooth controllers do not support the 799 * Delete Stored Link Key command. They are clearly indicating its 800 * absence in the bit mask of supported commands. 801 * 802 * Check the supported commands and only if the the command is marked 803 * as supported send it. If not supported assume that the controller 804 * does not have actual support for stored link keys which makes this 805 * command redundant anyway. 806 * 807 * Some controllers indicate that they support handling deleting 808 * stored link keys, but they don't. The quirk lets a driver 809 * just disable this command. 810 */ 811 if (hdev->commands[6] & 0x80 && 812 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { 813 struct hci_cp_delete_stored_link_key cp; 814 815 bacpy(&cp.bdaddr, BDADDR_ANY); 816 cp.delete_all = 0x01; 817 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, 818 sizeof(cp), &cp); 819 } 820 821 /* Set event mask page 2 if the HCI command for it is supported */ 822 if (hdev->commands[22] & 0x04) 823 hci_set_event_mask_page_2(req); 824 825 /* Read local codec list if the HCI command is supported */ 826 if (hdev->commands[29] & 0x20) 827 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); 828 829 /* Get MWS transport configuration if the HCI command is supported */ 830 if (hdev->commands[30] & 0x08) 831 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); 832 833 /* Check for Synchronization Train support */ 834 if (lmp_sync_train_capable(hdev)) 835 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); 836 837 /* Enable Secure Connections if supported and configured */ 838 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && 839 bredr_sc_enabled(hdev)) { 840 u8 support = 0x01; 841 842 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, 843 sizeof(support), &support); 844 } 845 846 /* Set erroneous data reporting if supported to the wideband speech 847 * setting value 848 */ 849 if (hdev->commands[18] & 0x08) { 850 bool enabled = hci_dev_test_flag(hdev, 851 HCI_WIDEBAND_SPEECH_ENABLED); 852 853 if (enabled != 854 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) { 855 struct hci_cp_write_def_err_data_reporting cp; 856 857 cp.err_data_reporting = enabled ? 858 ERR_DATA_REPORTING_ENABLED : 859 ERR_DATA_REPORTING_DISABLED; 860 861 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 862 sizeof(cp), &cp); 863 } 864 } 865 866 /* Set Suggested Default Data Length to maximum if supported */ 867 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { 868 struct hci_cp_le_write_def_data_len cp; 869 870 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); 871 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); 872 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); 873 } 874 875 /* Set Default PHY parameters if command is supported */ 876 if (hdev->commands[35] & 0x20) { 877 struct hci_cp_le_set_default_phy cp; 878 879 cp.all_phys = 0x00; 880 cp.tx_phys = hdev->le_tx_def_phys; 881 cp.rx_phys = hdev->le_rx_def_phys; 882 883 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); 884 } 885 886 return 0; 887 } 888 889 static int __hci_init(struct hci_dev *hdev) 890 { 891 int err; 892 893 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL); 894 if (err < 0) 895 return err; 896 897 if (hci_dev_test_flag(hdev, HCI_SETUP)) 898 hci_debugfs_create_basic(hdev); 899 900 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL); 901 if (err < 0) 902 return err; 903 904 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode 905 * BR/EDR/LE type controllers. AMP controllers only need the 906 * first two stages of init. 907 */ 908 if (hdev->dev_type != HCI_PRIMARY) 909 return 0; 910 911 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL); 912 if (err < 0) 913 return err; 914 915 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); 916 if (err < 0) 917 return err; 918 919 /* This function is only called when the controller is actually in 920 * configured state. When the controller is marked as unconfigured, 921 * this initialization procedure is not run. 922 * 923 * It means that it is possible that a controller runs through its 924 * setup phase and then discovers missing settings. If that is the 925 * case, then this function will not be called. It then will only 926 * be called during the config phase. 927 * 928 * So only when in setup phase or config phase, create the debugfs 929 * entries and register the SMP channels. 930 */ 931 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 932 !hci_dev_test_flag(hdev, HCI_CONFIG)) 933 return 0; 934 935 hci_debugfs_create_common(hdev); 936 937 if (lmp_bredr_capable(hdev)) 938 hci_debugfs_create_bredr(hdev); 939 940 if (lmp_le_capable(hdev)) 941 hci_debugfs_create_le(hdev); 942 943 return 0; 944 } 945 946 static int hci_init0_req(struct hci_request *req, unsigned long opt) 947 { 948 struct hci_dev *hdev = req->hdev; 949 950 BT_DBG("%s %ld", hdev->name, opt); 951 952 /* Reset */ 953 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) 954 hci_reset_req(req, 0); 955 956 /* Read Local Version */ 957 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 958 959 /* Read BD Address */ 960 if (hdev->set_bdaddr) 961 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); 962 963 return 0; 964 } 965 966 static int __hci_unconf_init(struct hci_dev *hdev) 967 { 968 int err; 969 970 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 971 return 0; 972 973 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL); 974 if (err < 0) 975 return err; 976 977 if (hci_dev_test_flag(hdev, HCI_SETUP)) 978 hci_debugfs_create_basic(hdev); 979 980 return 0; 981 } 982 983 static int hci_scan_req(struct hci_request *req, unsigned long opt) 984 { 985 __u8 scan = opt; 986 987 BT_DBG("%s %x", req->hdev->name, scan); 988 989 /* Inquiry and Page scans */ 990 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 991 return 0; 992 } 993 994 static int hci_auth_req(struct hci_request *req, unsigned long opt) 995 { 996 __u8 auth = opt; 997 998 BT_DBG("%s %x", req->hdev->name, auth); 999 1000 /* Authentication */ 1001 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 1002 return 0; 1003 } 1004 1005 static int hci_encrypt_req(struct hci_request *req, unsigned long opt) 1006 { 1007 __u8 encrypt = opt; 1008 1009 BT_DBG("%s %x", req->hdev->name, encrypt); 1010 1011 /* Encryption */ 1012 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 1013 return 0; 1014 } 1015 1016 static int hci_linkpol_req(struct hci_request *req, unsigned long opt) 1017 { 1018 __le16 policy = cpu_to_le16(opt); 1019 1020 BT_DBG("%s %x", req->hdev->name, policy); 1021 1022 /* Default link policy */ 1023 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 1024 return 0; 1025 } 1026 1027 /* Get HCI device by index. 1028 * Device is held on return. */ 1029 struct hci_dev *hci_dev_get(int index) 1030 { 1031 struct hci_dev *hdev = NULL, *d; 1032 1033 BT_DBG("%d", index); 1034 1035 if (index < 0) 1036 return NULL; 1037 1038 read_lock(&hci_dev_list_lock); 1039 list_for_each_entry(d, &hci_dev_list, list) { 1040 if (d->id == index) { 1041 hdev = hci_dev_hold(d); 1042 break; 1043 } 1044 } 1045 read_unlock(&hci_dev_list_lock); 1046 return hdev; 1047 } 1048 1049 /* ---- Inquiry support ---- */ 1050 1051 bool hci_discovery_active(struct hci_dev *hdev) 1052 { 1053 struct discovery_state *discov = &hdev->discovery; 1054 1055 switch (discov->state) { 1056 case DISCOVERY_FINDING: 1057 case DISCOVERY_RESOLVING: 1058 return true; 1059 1060 default: 1061 return false; 1062 } 1063 } 1064 1065 void hci_discovery_set_state(struct hci_dev *hdev, int state) 1066 { 1067 int old_state = hdev->discovery.state; 1068 1069 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); 1070 1071 if (old_state == state) 1072 return; 1073 1074 hdev->discovery.state = state; 1075 1076 switch (state) { 1077 case DISCOVERY_STOPPED: 1078 hci_update_background_scan(hdev); 1079 1080 if (old_state != DISCOVERY_STARTING) 1081 mgmt_discovering(hdev, 0); 1082 break; 1083 case DISCOVERY_STARTING: 1084 break; 1085 case DISCOVERY_FINDING: 1086 mgmt_discovering(hdev, 1); 1087 break; 1088 case DISCOVERY_RESOLVING: 1089 break; 1090 case DISCOVERY_STOPPING: 1091 break; 1092 } 1093 } 1094 1095 void hci_inquiry_cache_flush(struct hci_dev *hdev) 1096 { 1097 struct discovery_state *cache = &hdev->discovery; 1098 struct inquiry_entry *p, *n; 1099 1100 list_for_each_entry_safe(p, n, &cache->all, all) { 1101 list_del(&p->all); 1102 kfree(p); 1103 } 1104 1105 INIT_LIST_HEAD(&cache->unknown); 1106 INIT_LIST_HEAD(&cache->resolve); 1107 } 1108 1109 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, 1110 bdaddr_t *bdaddr) 1111 { 1112 struct discovery_state *cache = &hdev->discovery; 1113 struct inquiry_entry *e; 1114 1115 BT_DBG("cache %p, %pMR", cache, bdaddr); 1116 1117 list_for_each_entry(e, &cache->all, all) { 1118 if (!bacmp(&e->data.bdaddr, bdaddr)) 1119 return e; 1120 } 1121 1122 return NULL; 1123 } 1124 1125 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, 1126 bdaddr_t *bdaddr) 1127 { 1128 struct discovery_state *cache = &hdev->discovery; 1129 struct inquiry_entry *e; 1130 1131 BT_DBG("cache %p, %pMR", cache, bdaddr); 1132 1133 list_for_each_entry(e, &cache->unknown, list) { 1134 if (!bacmp(&e->data.bdaddr, bdaddr)) 1135 return e; 1136 } 1137 1138 return NULL; 1139 } 1140 1141 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, 1142 bdaddr_t *bdaddr, 1143 int state) 1144 { 1145 struct discovery_state *cache = &hdev->discovery; 1146 struct inquiry_entry *e; 1147 1148 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); 1149 1150 list_for_each_entry(e, &cache->resolve, list) { 1151 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) 1152 return e; 1153 if (!bacmp(&e->data.bdaddr, bdaddr)) 1154 return e; 1155 } 1156 1157 return NULL; 1158 } 1159 1160 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, 1161 struct inquiry_entry *ie) 1162 { 1163 struct discovery_state *cache = &hdev->discovery; 1164 struct list_head *pos = &cache->resolve; 1165 struct inquiry_entry *p; 1166 1167 list_del(&ie->list); 1168 1169 list_for_each_entry(p, &cache->resolve, list) { 1170 if (p->name_state != NAME_PENDING && 1171 abs(p->data.rssi) >= abs(ie->data.rssi)) 1172 break; 1173 pos = &p->list; 1174 } 1175 1176 list_add(&ie->list, pos); 1177 } 1178 1179 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, 1180 bool name_known) 1181 { 1182 struct discovery_state *cache = &hdev->discovery; 1183 struct inquiry_entry *ie; 1184 u32 flags = 0; 1185 1186 BT_DBG("cache %p, %pMR", cache, &data->bdaddr); 1187 1188 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); 1189 1190 if (!data->ssp_mode) 1191 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 1192 1193 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 1194 if (ie) { 1195 if (!ie->data.ssp_mode) 1196 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; 1197 1198 if (ie->name_state == NAME_NEEDED && 1199 data->rssi != ie->data.rssi) { 1200 ie->data.rssi = data->rssi; 1201 hci_inquiry_cache_update_resolve(hdev, ie); 1202 } 1203 1204 goto update; 1205 } 1206 1207 /* Entry not in the cache. Add new one. */ 1208 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 1209 if (!ie) { 1210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 1211 goto done; 1212 } 1213 1214 list_add(&ie->all, &cache->all); 1215 1216 if (name_known) { 1217 ie->name_state = NAME_KNOWN; 1218 } else { 1219 ie->name_state = NAME_NOT_KNOWN; 1220 list_add(&ie->list, &cache->unknown); 1221 } 1222 1223 update: 1224 if (name_known && ie->name_state != NAME_KNOWN && 1225 ie->name_state != NAME_PENDING) { 1226 ie->name_state = NAME_KNOWN; 1227 list_del(&ie->list); 1228 } 1229 1230 memcpy(&ie->data, data, sizeof(*data)); 1231 ie->timestamp = jiffies; 1232 cache->timestamp = jiffies; 1233 1234 if (ie->name_state == NAME_NOT_KNOWN) 1235 flags |= MGMT_DEV_FOUND_CONFIRM_NAME; 1236 1237 done: 1238 return flags; 1239 } 1240 1241 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 1242 { 1243 struct discovery_state *cache = &hdev->discovery; 1244 struct inquiry_info *info = (struct inquiry_info *) buf; 1245 struct inquiry_entry *e; 1246 int copied = 0; 1247 1248 list_for_each_entry(e, &cache->all, all) { 1249 struct inquiry_data *data = &e->data; 1250 1251 if (copied >= num) 1252 break; 1253 1254 bacpy(&info->bdaddr, &data->bdaddr); 1255 info->pscan_rep_mode = data->pscan_rep_mode; 1256 info->pscan_period_mode = data->pscan_period_mode; 1257 info->pscan_mode = data->pscan_mode; 1258 memcpy(info->dev_class, data->dev_class, 3); 1259 info->clock_offset = data->clock_offset; 1260 1261 info++; 1262 copied++; 1263 } 1264 1265 BT_DBG("cache %p, copied %d", cache, copied); 1266 return copied; 1267 } 1268 1269 static int hci_inq_req(struct hci_request *req, unsigned long opt) 1270 { 1271 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 1272 struct hci_dev *hdev = req->hdev; 1273 struct hci_cp_inquiry cp; 1274 1275 BT_DBG("%s", hdev->name); 1276 1277 if (test_bit(HCI_INQUIRY, &hdev->flags)) 1278 return 0; 1279 1280 /* Start Inquiry */ 1281 memcpy(&cp.lap, &ir->lap, 3); 1282 cp.length = ir->length; 1283 cp.num_rsp = ir->num_rsp; 1284 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); 1285 1286 return 0; 1287 } 1288 1289 int hci_inquiry(void __user *arg) 1290 { 1291 __u8 __user *ptr = arg; 1292 struct hci_inquiry_req ir; 1293 struct hci_dev *hdev; 1294 int err = 0, do_inquiry = 0, max_rsp; 1295 long timeo; 1296 __u8 *buf; 1297 1298 if (copy_from_user(&ir, ptr, sizeof(ir))) 1299 return -EFAULT; 1300 1301 hdev = hci_dev_get(ir.dev_id); 1302 if (!hdev) 1303 return -ENODEV; 1304 1305 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1306 err = -EBUSY; 1307 goto done; 1308 } 1309 1310 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1311 err = -EOPNOTSUPP; 1312 goto done; 1313 } 1314 1315 if (hdev->dev_type != HCI_PRIMARY) { 1316 err = -EOPNOTSUPP; 1317 goto done; 1318 } 1319 1320 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1321 err = -EOPNOTSUPP; 1322 goto done; 1323 } 1324 1325 hci_dev_lock(hdev); 1326 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 1327 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { 1328 hci_inquiry_cache_flush(hdev); 1329 do_inquiry = 1; 1330 } 1331 hci_dev_unlock(hdev); 1332 1333 timeo = ir.length * msecs_to_jiffies(2000); 1334 1335 if (do_inquiry) { 1336 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, 1337 timeo, NULL); 1338 if (err < 0) 1339 goto done; 1340 1341 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is 1342 * cleared). If it is interrupted by a signal, return -EINTR. 1343 */ 1344 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, 1345 TASK_INTERRUPTIBLE)) 1346 return -EINTR; 1347 } 1348 1349 /* for unlimited number of responses we will use buffer with 1350 * 255 entries 1351 */ 1352 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 1353 1354 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 1355 * copy it to the user space. 1356 */ 1357 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); 1358 if (!buf) { 1359 err = -ENOMEM; 1360 goto done; 1361 } 1362 1363 hci_dev_lock(hdev); 1364 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 1365 hci_dev_unlock(hdev); 1366 1367 BT_DBG("num_rsp %d", ir.num_rsp); 1368 1369 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 1370 ptr += sizeof(ir); 1371 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 1372 ir.num_rsp)) 1373 err = -EFAULT; 1374 } else 1375 err = -EFAULT; 1376 1377 kfree(buf); 1378 1379 done: 1380 hci_dev_put(hdev); 1381 return err; 1382 } 1383 1384 /** 1385 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address 1386 * (BD_ADDR) for a HCI device from 1387 * a firmware node property. 1388 * @hdev: The HCI device 1389 * 1390 * Search the firmware node for 'local-bd-address'. 1391 * 1392 * All-zero BD addresses are rejected, because those could be properties 1393 * that exist in the firmware tables, but were not updated by the firmware. For 1394 * example, the DTS could define 'local-bd-address', with zero BD addresses. 1395 */ 1396 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) 1397 { 1398 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); 1399 bdaddr_t ba; 1400 int ret; 1401 1402 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", 1403 (u8 *)&ba, sizeof(ba)); 1404 if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) 1405 return; 1406 1407 bacpy(&hdev->public_addr, &ba); 1408 } 1409 1410 static int hci_dev_do_open(struct hci_dev *hdev) 1411 { 1412 int ret = 0; 1413 1414 BT_DBG("%s %p", hdev->name, hdev); 1415 1416 hci_req_sync_lock(hdev); 1417 1418 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 1419 ret = -ENODEV; 1420 goto done; 1421 } 1422 1423 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 1424 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 1425 /* Check for rfkill but allow the HCI setup stage to 1426 * proceed (which in itself doesn't cause any RF activity). 1427 */ 1428 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { 1429 ret = -ERFKILL; 1430 goto done; 1431 } 1432 1433 /* Check for valid public address or a configured static 1434 * random adddress, but let the HCI setup proceed to 1435 * be able to determine if there is a public address 1436 * or not. 1437 * 1438 * In case of user channel usage, it is not important 1439 * if a public address or static random address is 1440 * available. 1441 * 1442 * This check is only valid for BR/EDR controllers 1443 * since AMP controllers do not have an address. 1444 */ 1445 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1446 hdev->dev_type == HCI_PRIMARY && 1447 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 1448 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 1449 ret = -EADDRNOTAVAIL; 1450 goto done; 1451 } 1452 } 1453 1454 if (test_bit(HCI_UP, &hdev->flags)) { 1455 ret = -EALREADY; 1456 goto done; 1457 } 1458 1459 if (hdev->open(hdev)) { 1460 ret = -EIO; 1461 goto done; 1462 } 1463 1464 set_bit(HCI_RUNNING, &hdev->flags); 1465 hci_sock_dev_event(hdev, HCI_DEV_OPEN); 1466 1467 atomic_set(&hdev->cmd_cnt, 1); 1468 set_bit(HCI_INIT, &hdev->flags); 1469 1470 if (hci_dev_test_flag(hdev, HCI_SETUP) || 1471 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { 1472 bool invalid_bdaddr; 1473 1474 hci_sock_dev_event(hdev, HCI_DEV_SETUP); 1475 1476 if (hdev->setup) 1477 ret = hdev->setup(hdev); 1478 1479 /* The transport driver can set the quirk to mark the 1480 * BD_ADDR invalid before creating the HCI device or in 1481 * its setup callback. 1482 */ 1483 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, 1484 &hdev->quirks); 1485 1486 if (ret) 1487 goto setup_failed; 1488 1489 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { 1490 if (!bacmp(&hdev->public_addr, BDADDR_ANY)) 1491 hci_dev_get_bd_addr_from_property(hdev); 1492 1493 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 1494 hdev->set_bdaddr) { 1495 ret = hdev->set_bdaddr(hdev, 1496 &hdev->public_addr); 1497 1498 /* If setting of the BD_ADDR from the device 1499 * property succeeds, then treat the address 1500 * as valid even if the invalid BD_ADDR 1501 * quirk indicates otherwise. 1502 */ 1503 if (!ret) 1504 invalid_bdaddr = false; 1505 } 1506 } 1507 1508 setup_failed: 1509 /* The transport driver can set these quirks before 1510 * creating the HCI device or in its setup callback. 1511 * 1512 * For the invalid BD_ADDR quirk it is possible that 1513 * it becomes a valid address if the bootloader does 1514 * provide it (see above). 1515 * 1516 * In case any of them is set, the controller has to 1517 * start up as unconfigured. 1518 */ 1519 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 1520 invalid_bdaddr) 1521 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 1522 1523 /* For an unconfigured controller it is required to 1524 * read at least the version information provided by 1525 * the Read Local Version Information command. 1526 * 1527 * If the set_bdaddr driver callback is provided, then 1528 * also the original Bluetooth public device address 1529 * will be read using the Read BD Address command. 1530 */ 1531 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 1532 ret = __hci_unconf_init(hdev); 1533 } 1534 1535 if (hci_dev_test_flag(hdev, HCI_CONFIG)) { 1536 /* If public address change is configured, ensure that 1537 * the address gets programmed. If the driver does not 1538 * support changing the public address, fail the power 1539 * on procedure. 1540 */ 1541 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 1542 hdev->set_bdaddr) 1543 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 1544 else 1545 ret = -EADDRNOTAVAIL; 1546 } 1547 1548 if (!ret) { 1549 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1550 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1551 ret = __hci_init(hdev); 1552 if (!ret && hdev->post_init) 1553 ret = hdev->post_init(hdev); 1554 } 1555 } 1556 1557 /* If the HCI Reset command is clearing all diagnostic settings, 1558 * then they need to be reprogrammed after the init procedure 1559 * completed. 1560 */ 1561 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 1562 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1563 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) 1564 ret = hdev->set_diag(hdev, true); 1565 1566 clear_bit(HCI_INIT, &hdev->flags); 1567 1568 if (!ret) { 1569 hci_dev_hold(hdev); 1570 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 1571 hci_adv_instances_set_rpa_expired(hdev, true); 1572 set_bit(HCI_UP, &hdev->flags); 1573 hci_sock_dev_event(hdev, HCI_DEV_UP); 1574 hci_leds_update_powered(hdev, true); 1575 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 1576 !hci_dev_test_flag(hdev, HCI_CONFIG) && 1577 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1578 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1579 hci_dev_test_flag(hdev, HCI_MGMT) && 1580 hdev->dev_type == HCI_PRIMARY) { 1581 ret = __hci_req_hci_power_on(hdev); 1582 mgmt_power_on(hdev, ret); 1583 } 1584 } else { 1585 /* Init failed, cleanup */ 1586 flush_work(&hdev->tx_work); 1587 flush_work(&hdev->cmd_work); 1588 flush_work(&hdev->rx_work); 1589 1590 skb_queue_purge(&hdev->cmd_q); 1591 skb_queue_purge(&hdev->rx_q); 1592 1593 if (hdev->flush) 1594 hdev->flush(hdev); 1595 1596 if (hdev->sent_cmd) { 1597 kfree_skb(hdev->sent_cmd); 1598 hdev->sent_cmd = NULL; 1599 } 1600 1601 clear_bit(HCI_RUNNING, &hdev->flags); 1602 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 1603 1604 hdev->close(hdev); 1605 hdev->flags &= BIT(HCI_RAW); 1606 } 1607 1608 done: 1609 hci_req_sync_unlock(hdev); 1610 return ret; 1611 } 1612 1613 /* ---- HCI ioctl helpers ---- */ 1614 1615 int hci_dev_open(__u16 dev) 1616 { 1617 struct hci_dev *hdev; 1618 int err; 1619 1620 hdev = hci_dev_get(dev); 1621 if (!hdev) 1622 return -ENODEV; 1623 1624 /* Devices that are marked as unconfigured can only be powered 1625 * up as user channel. Trying to bring them up as normal devices 1626 * will result into a failure. Only user channel operation is 1627 * possible. 1628 * 1629 * When this function is called for a user channel, the flag 1630 * HCI_USER_CHANNEL will be set first before attempting to 1631 * open the device. 1632 */ 1633 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1634 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1635 err = -EOPNOTSUPP; 1636 goto done; 1637 } 1638 1639 /* We need to ensure that no other power on/off work is pending 1640 * before proceeding to call hci_dev_do_open. This is 1641 * particularly important if the setup procedure has not yet 1642 * completed. 1643 */ 1644 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 1645 cancel_delayed_work(&hdev->power_off); 1646 1647 /* After this call it is guaranteed that the setup procedure 1648 * has finished. This means that error conditions like RFKILL 1649 * or no valid public or static random address apply. 1650 */ 1651 flush_workqueue(hdev->req_workqueue); 1652 1653 /* For controllers not using the management interface and that 1654 * are brought up using legacy ioctl, set the HCI_BONDABLE bit 1655 * so that pairing works for them. Once the management interface 1656 * is in use this bit will be cleared again and userspace has 1657 * to explicitly enable it. 1658 */ 1659 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1660 !hci_dev_test_flag(hdev, HCI_MGMT)) 1661 hci_dev_set_flag(hdev, HCI_BONDABLE); 1662 1663 err = hci_dev_do_open(hdev); 1664 1665 done: 1666 hci_dev_put(hdev); 1667 return err; 1668 } 1669 1670 /* This function requires the caller holds hdev->lock */ 1671 static void hci_pend_le_actions_clear(struct hci_dev *hdev) 1672 { 1673 struct hci_conn_params *p; 1674 1675 list_for_each_entry(p, &hdev->le_conn_params, list) { 1676 if (p->conn) { 1677 hci_conn_drop(p->conn); 1678 hci_conn_put(p->conn); 1679 p->conn = NULL; 1680 } 1681 list_del_init(&p->action); 1682 } 1683 1684 BT_DBG("All LE pending actions cleared"); 1685 } 1686 1687 int hci_dev_do_close(struct hci_dev *hdev) 1688 { 1689 bool auto_off; 1690 1691 BT_DBG("%s %p", hdev->name, hdev); 1692 1693 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && 1694 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1695 test_bit(HCI_UP, &hdev->flags)) { 1696 /* Execute vendor specific shutdown routine */ 1697 if (hdev->shutdown) 1698 hdev->shutdown(hdev); 1699 } 1700 1701 cancel_delayed_work(&hdev->power_off); 1702 1703 hci_request_cancel_all(hdev); 1704 hci_req_sync_lock(hdev); 1705 1706 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 1707 cancel_delayed_work_sync(&hdev->cmd_timer); 1708 hci_req_sync_unlock(hdev); 1709 return 0; 1710 } 1711 1712 hci_leds_update_powered(hdev, false); 1713 1714 /* Flush RX and TX works */ 1715 flush_work(&hdev->tx_work); 1716 flush_work(&hdev->rx_work); 1717 1718 if (hdev->discov_timeout > 0) { 1719 hdev->discov_timeout = 0; 1720 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 1721 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 1722 } 1723 1724 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 1725 cancel_delayed_work(&hdev->service_cache); 1726 1727 if (hci_dev_test_flag(hdev, HCI_MGMT)) { 1728 struct adv_info *adv_instance; 1729 1730 cancel_delayed_work_sync(&hdev->rpa_expired); 1731 1732 list_for_each_entry(adv_instance, &hdev->adv_instances, list) 1733 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 1734 } 1735 1736 /* Avoid potential lockdep warnings from the *_flush() calls by 1737 * ensuring the workqueue is empty up front. 1738 */ 1739 drain_workqueue(hdev->workqueue); 1740 1741 hci_dev_lock(hdev); 1742 1743 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1744 1745 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); 1746 1747 if (!auto_off && hdev->dev_type == HCI_PRIMARY && 1748 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 1749 hci_dev_test_flag(hdev, HCI_MGMT)) 1750 __mgmt_power_off(hdev); 1751 1752 hci_inquiry_cache_flush(hdev); 1753 hci_pend_le_actions_clear(hdev); 1754 hci_conn_hash_flush(hdev); 1755 hci_dev_unlock(hdev); 1756 1757 smp_unregister(hdev); 1758 1759 hci_sock_dev_event(hdev, HCI_DEV_DOWN); 1760 1761 if (hdev->flush) 1762 hdev->flush(hdev); 1763 1764 /* Reset device */ 1765 skb_queue_purge(&hdev->cmd_q); 1766 atomic_set(&hdev->cmd_cnt, 1); 1767 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 1768 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1769 set_bit(HCI_INIT, &hdev->flags); 1770 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL); 1771 clear_bit(HCI_INIT, &hdev->flags); 1772 } 1773 1774 /* flush cmd work */ 1775 flush_work(&hdev->cmd_work); 1776 1777 /* Drop queues */ 1778 skb_queue_purge(&hdev->rx_q); 1779 skb_queue_purge(&hdev->cmd_q); 1780 skb_queue_purge(&hdev->raw_q); 1781 1782 /* Drop last sent command */ 1783 if (hdev->sent_cmd) { 1784 cancel_delayed_work_sync(&hdev->cmd_timer); 1785 kfree_skb(hdev->sent_cmd); 1786 hdev->sent_cmd = NULL; 1787 } 1788 1789 clear_bit(HCI_RUNNING, &hdev->flags); 1790 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 1791 1792 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks)) 1793 wake_up(&hdev->suspend_wait_q); 1794 1795 /* After this point our queues are empty 1796 * and no tasks are scheduled. */ 1797 hdev->close(hdev); 1798 1799 /* Clear flags */ 1800 hdev->flags &= BIT(HCI_RAW); 1801 hci_dev_clear_volatile_flags(hdev); 1802 1803 /* Controller radio is available but is currently powered down */ 1804 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 1805 1806 memset(hdev->eir, 0, sizeof(hdev->eir)); 1807 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 1808 bacpy(&hdev->random_addr, BDADDR_ANY); 1809 1810 hci_req_sync_unlock(hdev); 1811 1812 hci_dev_put(hdev); 1813 return 0; 1814 } 1815 1816 int hci_dev_close(__u16 dev) 1817 { 1818 struct hci_dev *hdev; 1819 int err; 1820 1821 hdev = hci_dev_get(dev); 1822 if (!hdev) 1823 return -ENODEV; 1824 1825 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1826 err = -EBUSY; 1827 goto done; 1828 } 1829 1830 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) 1831 cancel_delayed_work(&hdev->power_off); 1832 1833 err = hci_dev_do_close(hdev); 1834 1835 done: 1836 hci_dev_put(hdev); 1837 return err; 1838 } 1839 1840 static int hci_dev_do_reset(struct hci_dev *hdev) 1841 { 1842 int ret; 1843 1844 BT_DBG("%s %p", hdev->name, hdev); 1845 1846 hci_req_sync_lock(hdev); 1847 1848 /* Drop queues */ 1849 skb_queue_purge(&hdev->rx_q); 1850 skb_queue_purge(&hdev->cmd_q); 1851 1852 /* Avoid potential lockdep warnings from the *_flush() calls by 1853 * ensuring the workqueue is empty up front. 1854 */ 1855 drain_workqueue(hdev->workqueue); 1856 1857 hci_dev_lock(hdev); 1858 hci_inquiry_cache_flush(hdev); 1859 hci_conn_hash_flush(hdev); 1860 hci_dev_unlock(hdev); 1861 1862 if (hdev->flush) 1863 hdev->flush(hdev); 1864 1865 atomic_set(&hdev->cmd_cnt, 1); 1866 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; 1867 1868 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL); 1869 1870 hci_req_sync_unlock(hdev); 1871 return ret; 1872 } 1873 1874 int hci_dev_reset(__u16 dev) 1875 { 1876 struct hci_dev *hdev; 1877 int err; 1878 1879 hdev = hci_dev_get(dev); 1880 if (!hdev) 1881 return -ENODEV; 1882 1883 if (!test_bit(HCI_UP, &hdev->flags)) { 1884 err = -ENETDOWN; 1885 goto done; 1886 } 1887 1888 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1889 err = -EBUSY; 1890 goto done; 1891 } 1892 1893 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1894 err = -EOPNOTSUPP; 1895 goto done; 1896 } 1897 1898 err = hci_dev_do_reset(hdev); 1899 1900 done: 1901 hci_dev_put(hdev); 1902 return err; 1903 } 1904 1905 int hci_dev_reset_stat(__u16 dev) 1906 { 1907 struct hci_dev *hdev; 1908 int ret = 0; 1909 1910 hdev = hci_dev_get(dev); 1911 if (!hdev) 1912 return -ENODEV; 1913 1914 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1915 ret = -EBUSY; 1916 goto done; 1917 } 1918 1919 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1920 ret = -EOPNOTSUPP; 1921 goto done; 1922 } 1923 1924 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 1925 1926 done: 1927 hci_dev_put(hdev); 1928 return ret; 1929 } 1930 1931 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) 1932 { 1933 bool conn_changed, discov_changed; 1934 1935 BT_DBG("%s scan 0x%02x", hdev->name, scan); 1936 1937 if ((scan & SCAN_PAGE)) 1938 conn_changed = !hci_dev_test_and_set_flag(hdev, 1939 HCI_CONNECTABLE); 1940 else 1941 conn_changed = hci_dev_test_and_clear_flag(hdev, 1942 HCI_CONNECTABLE); 1943 1944 if ((scan & SCAN_INQUIRY)) { 1945 discov_changed = !hci_dev_test_and_set_flag(hdev, 1946 HCI_DISCOVERABLE); 1947 } else { 1948 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 1949 discov_changed = hci_dev_test_and_clear_flag(hdev, 1950 HCI_DISCOVERABLE); 1951 } 1952 1953 if (!hci_dev_test_flag(hdev, HCI_MGMT)) 1954 return; 1955 1956 if (conn_changed || discov_changed) { 1957 /* In case this was disabled through mgmt */ 1958 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 1959 1960 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1961 hci_req_update_adv_data(hdev, hdev->cur_adv_instance); 1962 1963 mgmt_new_settings(hdev); 1964 } 1965 } 1966 1967 int hci_dev_cmd(unsigned int cmd, void __user *arg) 1968 { 1969 struct hci_dev *hdev; 1970 struct hci_dev_req dr; 1971 int err = 0; 1972 1973 if (copy_from_user(&dr, arg, sizeof(dr))) 1974 return -EFAULT; 1975 1976 hdev = hci_dev_get(dr.dev_id); 1977 if (!hdev) 1978 return -ENODEV; 1979 1980 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 1981 err = -EBUSY; 1982 goto done; 1983 } 1984 1985 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1986 err = -EOPNOTSUPP; 1987 goto done; 1988 } 1989 1990 if (hdev->dev_type != HCI_PRIMARY) { 1991 err = -EOPNOTSUPP; 1992 goto done; 1993 } 1994 1995 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 1996 err = -EOPNOTSUPP; 1997 goto done; 1998 } 1999 2000 switch (cmd) { 2001 case HCISETAUTH: 2002 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 2003 HCI_INIT_TIMEOUT, NULL); 2004 break; 2005 2006 case HCISETENCRYPT: 2007 if (!lmp_encrypt_capable(hdev)) { 2008 err = -EOPNOTSUPP; 2009 break; 2010 } 2011 2012 if (!test_bit(HCI_AUTH, &hdev->flags)) { 2013 /* Auth must be enabled first */ 2014 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, 2015 HCI_INIT_TIMEOUT, NULL); 2016 if (err) 2017 break; 2018 } 2019 2020 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, 2021 HCI_INIT_TIMEOUT, NULL); 2022 break; 2023 2024 case HCISETSCAN: 2025 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, 2026 HCI_INIT_TIMEOUT, NULL); 2027 2028 /* Ensure that the connectable and discoverable states 2029 * get correctly modified as this was a non-mgmt change. 2030 */ 2031 if (!err) 2032 hci_update_scan_state(hdev, dr.dev_opt); 2033 break; 2034 2035 case HCISETLINKPOL: 2036 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, 2037 HCI_INIT_TIMEOUT, NULL); 2038 break; 2039 2040 case HCISETLINKMODE: 2041 hdev->link_mode = ((__u16) dr.dev_opt) & 2042 (HCI_LM_MASTER | HCI_LM_ACCEPT); 2043 break; 2044 2045 case HCISETPTYPE: 2046 if (hdev->pkt_type == (__u16) dr.dev_opt) 2047 break; 2048 2049 hdev->pkt_type = (__u16) dr.dev_opt; 2050 mgmt_phy_configuration_changed(hdev, NULL); 2051 break; 2052 2053 case HCISETACLMTU: 2054 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 2055 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 2056 break; 2057 2058 case HCISETSCOMTU: 2059 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 2060 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 2061 break; 2062 2063 default: 2064 err = -EINVAL; 2065 break; 2066 } 2067 2068 done: 2069 hci_dev_put(hdev); 2070 return err; 2071 } 2072 2073 int hci_get_dev_list(void __user *arg) 2074 { 2075 struct hci_dev *hdev; 2076 struct hci_dev_list_req *dl; 2077 struct hci_dev_req *dr; 2078 int n = 0, size, err; 2079 __u16 dev_num; 2080 2081 if (get_user(dev_num, (__u16 __user *) arg)) 2082 return -EFAULT; 2083 2084 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 2085 return -EINVAL; 2086 2087 size = sizeof(*dl) + dev_num * sizeof(*dr); 2088 2089 dl = kzalloc(size, GFP_KERNEL); 2090 if (!dl) 2091 return -ENOMEM; 2092 2093 dr = dl->dev_req; 2094 2095 read_lock(&hci_dev_list_lock); 2096 list_for_each_entry(hdev, &hci_dev_list, list) { 2097 unsigned long flags = hdev->flags; 2098 2099 /* When the auto-off is configured it means the transport 2100 * is running, but in that case still indicate that the 2101 * device is actually down. 2102 */ 2103 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 2104 flags &= ~BIT(HCI_UP); 2105 2106 (dr + n)->dev_id = hdev->id; 2107 (dr + n)->dev_opt = flags; 2108 2109 if (++n >= dev_num) 2110 break; 2111 } 2112 read_unlock(&hci_dev_list_lock); 2113 2114 dl->dev_num = n; 2115 size = sizeof(*dl) + n * sizeof(*dr); 2116 2117 err = copy_to_user(arg, dl, size); 2118 kfree(dl); 2119 2120 return err ? -EFAULT : 0; 2121 } 2122 2123 int hci_get_dev_info(void __user *arg) 2124 { 2125 struct hci_dev *hdev; 2126 struct hci_dev_info di; 2127 unsigned long flags; 2128 int err = 0; 2129 2130 if (copy_from_user(&di, arg, sizeof(di))) 2131 return -EFAULT; 2132 2133 hdev = hci_dev_get(di.dev_id); 2134 if (!hdev) 2135 return -ENODEV; 2136 2137 /* When the auto-off is configured it means the transport 2138 * is running, but in that case still indicate that the 2139 * device is actually down. 2140 */ 2141 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) 2142 flags = hdev->flags & ~BIT(HCI_UP); 2143 else 2144 flags = hdev->flags; 2145 2146 strcpy(di.name, hdev->name); 2147 di.bdaddr = hdev->bdaddr; 2148 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); 2149 di.flags = flags; 2150 di.pkt_type = hdev->pkt_type; 2151 if (lmp_bredr_capable(hdev)) { 2152 di.acl_mtu = hdev->acl_mtu; 2153 di.acl_pkts = hdev->acl_pkts; 2154 di.sco_mtu = hdev->sco_mtu; 2155 di.sco_pkts = hdev->sco_pkts; 2156 } else { 2157 di.acl_mtu = hdev->le_mtu; 2158 di.acl_pkts = hdev->le_pkts; 2159 di.sco_mtu = 0; 2160 di.sco_pkts = 0; 2161 } 2162 di.link_policy = hdev->link_policy; 2163 di.link_mode = hdev->link_mode; 2164 2165 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 2166 memcpy(&di.features, &hdev->features, sizeof(di.features)); 2167 2168 if (copy_to_user(arg, &di, sizeof(di))) 2169 err = -EFAULT; 2170 2171 hci_dev_put(hdev); 2172 2173 return err; 2174 } 2175 2176 /* ---- Interface to HCI drivers ---- */ 2177 2178 static int hci_rfkill_set_block(void *data, bool blocked) 2179 { 2180 struct hci_dev *hdev = data; 2181 2182 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 2183 2184 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) 2185 return -EBUSY; 2186 2187 if (blocked) { 2188 hci_dev_set_flag(hdev, HCI_RFKILLED); 2189 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 2190 !hci_dev_test_flag(hdev, HCI_CONFIG)) 2191 hci_dev_do_close(hdev); 2192 } else { 2193 hci_dev_clear_flag(hdev, HCI_RFKILLED); 2194 } 2195 2196 return 0; 2197 } 2198 2199 static const struct rfkill_ops hci_rfkill_ops = { 2200 .set_block = hci_rfkill_set_block, 2201 }; 2202 2203 static void hci_power_on(struct work_struct *work) 2204 { 2205 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); 2206 int err; 2207 2208 BT_DBG("%s", hdev->name); 2209 2210 if (test_bit(HCI_UP, &hdev->flags) && 2211 hci_dev_test_flag(hdev, HCI_MGMT) && 2212 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 2213 cancel_delayed_work(&hdev->power_off); 2214 hci_req_sync_lock(hdev); 2215 err = __hci_req_hci_power_on(hdev); 2216 hci_req_sync_unlock(hdev); 2217 mgmt_power_on(hdev, err); 2218 return; 2219 } 2220 2221 err = hci_dev_do_open(hdev); 2222 if (err < 0) { 2223 hci_dev_lock(hdev); 2224 mgmt_set_powered_failed(hdev, err); 2225 hci_dev_unlock(hdev); 2226 return; 2227 } 2228 2229 /* During the HCI setup phase, a few error conditions are 2230 * ignored and they need to be checked now. If they are still 2231 * valid, it is important to turn the device back off. 2232 */ 2233 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 2234 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 2235 (hdev->dev_type == HCI_PRIMARY && 2236 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 2237 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 2238 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 2239 hci_dev_do_close(hdev); 2240 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 2241 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 2242 HCI_AUTO_OFF_TIMEOUT); 2243 } 2244 2245 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 2246 /* For unconfigured devices, set the HCI_RAW flag 2247 * so that userspace can easily identify them. 2248 */ 2249 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 2250 set_bit(HCI_RAW, &hdev->flags); 2251 2252 /* For fully configured devices, this will send 2253 * the Index Added event. For unconfigured devices, 2254 * it will send Unconfigued Index Added event. 2255 * 2256 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 2257 * and no event will be send. 2258 */ 2259 mgmt_index_added(hdev); 2260 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 2261 /* When the controller is now configured, then it 2262 * is important to clear the HCI_RAW flag. 2263 */ 2264 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 2265 clear_bit(HCI_RAW, &hdev->flags); 2266 2267 /* Powering on the controller with HCI_CONFIG set only 2268 * happens with the transition from unconfigured to 2269 * configured. This will send the Index Added event. 2270 */ 2271 mgmt_index_added(hdev); 2272 } 2273 } 2274 2275 static void hci_power_off(struct work_struct *work) 2276 { 2277 struct hci_dev *hdev = container_of(work, struct hci_dev, 2278 power_off.work); 2279 2280 BT_DBG("%s", hdev->name); 2281 2282 hci_dev_do_close(hdev); 2283 } 2284 2285 static void hci_error_reset(struct work_struct *work) 2286 { 2287 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); 2288 2289 BT_DBG("%s", hdev->name); 2290 2291 if (hdev->hw_error) 2292 hdev->hw_error(hdev, hdev->hw_error_code); 2293 else 2294 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); 2295 2296 if (hci_dev_do_close(hdev)) 2297 return; 2298 2299 hci_dev_do_open(hdev); 2300 } 2301 2302 void hci_uuids_clear(struct hci_dev *hdev) 2303 { 2304 struct bt_uuid *uuid, *tmp; 2305 2306 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { 2307 list_del(&uuid->list); 2308 kfree(uuid); 2309 } 2310 } 2311 2312 void hci_link_keys_clear(struct hci_dev *hdev) 2313 { 2314 struct link_key *key; 2315 2316 list_for_each_entry(key, &hdev->link_keys, list) { 2317 list_del_rcu(&key->list); 2318 kfree_rcu(key, rcu); 2319 } 2320 } 2321 2322 void hci_smp_ltks_clear(struct hci_dev *hdev) 2323 { 2324 struct smp_ltk *k; 2325 2326 list_for_each_entry(k, &hdev->long_term_keys, list) { 2327 list_del_rcu(&k->list); 2328 kfree_rcu(k, rcu); 2329 } 2330 } 2331 2332 void hci_smp_irks_clear(struct hci_dev *hdev) 2333 { 2334 struct smp_irk *k; 2335 2336 list_for_each_entry(k, &hdev->identity_resolving_keys, list) { 2337 list_del_rcu(&k->list); 2338 kfree_rcu(k, rcu); 2339 } 2340 } 2341 2342 void hci_blocked_keys_clear(struct hci_dev *hdev) 2343 { 2344 struct blocked_key *b; 2345 2346 list_for_each_entry(b, &hdev->blocked_keys, list) { 2347 list_del_rcu(&b->list); 2348 kfree_rcu(b, rcu); 2349 } 2350 } 2351 2352 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) 2353 { 2354 bool blocked = false; 2355 struct blocked_key *b; 2356 2357 rcu_read_lock(); 2358 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { 2359 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { 2360 blocked = true; 2361 break; 2362 } 2363 } 2364 2365 rcu_read_unlock(); 2366 return blocked; 2367 } 2368 2369 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 2370 { 2371 struct link_key *k; 2372 2373 rcu_read_lock(); 2374 list_for_each_entry_rcu(k, &hdev->link_keys, list) { 2375 if (bacmp(bdaddr, &k->bdaddr) == 0) { 2376 rcu_read_unlock(); 2377 2378 if (hci_is_blocked_key(hdev, 2379 HCI_BLOCKED_KEY_TYPE_LINKKEY, 2380 k->val)) { 2381 bt_dev_warn_ratelimited(hdev, 2382 "Link key blocked for %pMR", 2383 &k->bdaddr); 2384 return NULL; 2385 } 2386 2387 return k; 2388 } 2389 } 2390 rcu_read_unlock(); 2391 2392 return NULL; 2393 } 2394 2395 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, 2396 u8 key_type, u8 old_key_type) 2397 { 2398 /* Legacy key */ 2399 if (key_type < 0x03) 2400 return true; 2401 2402 /* Debug keys are insecure so don't store them persistently */ 2403 if (key_type == HCI_LK_DEBUG_COMBINATION) 2404 return false; 2405 2406 /* Changed combination key and there's no previous one */ 2407 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) 2408 return false; 2409 2410 /* Security mode 3 case */ 2411 if (!conn) 2412 return true; 2413 2414 /* BR/EDR key derived using SC from an LE link */ 2415 if (conn->type == LE_LINK) 2416 return true; 2417 2418 /* Neither local nor remote side had no-bonding as requirement */ 2419 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) 2420 return true; 2421 2422 /* Local side had dedicated bonding as requirement */ 2423 if (conn->auth_type == 0x02 || conn->auth_type == 0x03) 2424 return true; 2425 2426 /* Remote side had dedicated bonding as requirement */ 2427 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) 2428 return true; 2429 2430 /* If none of the above criteria match, then don't store the key 2431 * persistently */ 2432 return false; 2433 } 2434 2435 static u8 ltk_role(u8 type) 2436 { 2437 if (type == SMP_LTK) 2438 return HCI_ROLE_MASTER; 2439 2440 return HCI_ROLE_SLAVE; 2441 } 2442 2443 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2444 u8 addr_type, u8 role) 2445 { 2446 struct smp_ltk *k; 2447 2448 rcu_read_lock(); 2449 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2450 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) 2451 continue; 2452 2453 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { 2454 rcu_read_unlock(); 2455 2456 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, 2457 k->val)) { 2458 bt_dev_warn_ratelimited(hdev, 2459 "LTK blocked for %pMR", 2460 &k->bdaddr); 2461 return NULL; 2462 } 2463 2464 return k; 2465 } 2466 } 2467 rcu_read_unlock(); 2468 2469 return NULL; 2470 } 2471 2472 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) 2473 { 2474 struct smp_irk *irk_to_return = NULL; 2475 struct smp_irk *irk; 2476 2477 rcu_read_lock(); 2478 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2479 if (!bacmp(&irk->rpa, rpa)) { 2480 irk_to_return = irk; 2481 goto done; 2482 } 2483 } 2484 2485 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2486 if (smp_irk_matches(hdev, irk->val, rpa)) { 2487 bacpy(&irk->rpa, rpa); 2488 irk_to_return = irk; 2489 goto done; 2490 } 2491 } 2492 2493 done: 2494 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 2495 irk_to_return->val)) { 2496 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 2497 &irk_to_return->bdaddr); 2498 irk_to_return = NULL; 2499 } 2500 2501 rcu_read_unlock(); 2502 2503 return irk_to_return; 2504 } 2505 2506 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, 2507 u8 addr_type) 2508 { 2509 struct smp_irk *irk_to_return = NULL; 2510 struct smp_irk *irk; 2511 2512 /* Identity Address must be public or static random */ 2513 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) 2514 return NULL; 2515 2516 rcu_read_lock(); 2517 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { 2518 if (addr_type == irk->addr_type && 2519 bacmp(bdaddr, &irk->bdaddr) == 0) { 2520 irk_to_return = irk; 2521 goto done; 2522 } 2523 } 2524 2525 done: 2526 2527 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, 2528 irk_to_return->val)) { 2529 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", 2530 &irk_to_return->bdaddr); 2531 irk_to_return = NULL; 2532 } 2533 2534 rcu_read_unlock(); 2535 2536 return irk_to_return; 2537 } 2538 2539 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, 2540 bdaddr_t *bdaddr, u8 *val, u8 type, 2541 u8 pin_len, bool *persistent) 2542 { 2543 struct link_key *key, *old_key; 2544 u8 old_key_type; 2545 2546 old_key = hci_find_link_key(hdev, bdaddr); 2547 if (old_key) { 2548 old_key_type = old_key->type; 2549 key = old_key; 2550 } else { 2551 old_key_type = conn ? conn->key_type : 0xff; 2552 key = kzalloc(sizeof(*key), GFP_KERNEL); 2553 if (!key) 2554 return NULL; 2555 list_add_rcu(&key->list, &hdev->link_keys); 2556 } 2557 2558 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); 2559 2560 /* Some buggy controller combinations generate a changed 2561 * combination key for legacy pairing even when there's no 2562 * previous key */ 2563 if (type == HCI_LK_CHANGED_COMBINATION && 2564 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { 2565 type = HCI_LK_COMBINATION; 2566 if (conn) 2567 conn->key_type = type; 2568 } 2569 2570 bacpy(&key->bdaddr, bdaddr); 2571 memcpy(key->val, val, HCI_LINK_KEY_SIZE); 2572 key->pin_len = pin_len; 2573 2574 if (type == HCI_LK_CHANGED_COMBINATION) 2575 key->type = old_key_type; 2576 else 2577 key->type = type; 2578 2579 if (persistent) 2580 *persistent = hci_persistent_key(hdev, conn, type, 2581 old_key_type); 2582 2583 return key; 2584 } 2585 2586 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2587 u8 addr_type, u8 type, u8 authenticated, 2588 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) 2589 { 2590 struct smp_ltk *key, *old_key; 2591 u8 role = ltk_role(type); 2592 2593 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); 2594 if (old_key) 2595 key = old_key; 2596 else { 2597 key = kzalloc(sizeof(*key), GFP_KERNEL); 2598 if (!key) 2599 return NULL; 2600 list_add_rcu(&key->list, &hdev->long_term_keys); 2601 } 2602 2603 bacpy(&key->bdaddr, bdaddr); 2604 key->bdaddr_type = addr_type; 2605 memcpy(key->val, tk, sizeof(key->val)); 2606 key->authenticated = authenticated; 2607 key->ediv = ediv; 2608 key->rand = rand; 2609 key->enc_size = enc_size; 2610 key->type = type; 2611 2612 return key; 2613 } 2614 2615 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, 2616 u8 addr_type, u8 val[16], bdaddr_t *rpa) 2617 { 2618 struct smp_irk *irk; 2619 2620 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); 2621 if (!irk) { 2622 irk = kzalloc(sizeof(*irk), GFP_KERNEL); 2623 if (!irk) 2624 return NULL; 2625 2626 bacpy(&irk->bdaddr, bdaddr); 2627 irk->addr_type = addr_type; 2628 2629 list_add_rcu(&irk->list, &hdev->identity_resolving_keys); 2630 } 2631 2632 memcpy(irk->val, val, 16); 2633 bacpy(&irk->rpa, rpa); 2634 2635 return irk; 2636 } 2637 2638 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 2639 { 2640 struct link_key *key; 2641 2642 key = hci_find_link_key(hdev, bdaddr); 2643 if (!key) 2644 return -ENOENT; 2645 2646 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2647 2648 list_del_rcu(&key->list); 2649 kfree_rcu(key, rcu); 2650 2651 return 0; 2652 } 2653 2654 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) 2655 { 2656 struct smp_ltk *k; 2657 int removed = 0; 2658 2659 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2660 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) 2661 continue; 2662 2663 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2664 2665 list_del_rcu(&k->list); 2666 kfree_rcu(k, rcu); 2667 removed++; 2668 } 2669 2670 return removed ? 0 : -ENOENT; 2671 } 2672 2673 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) 2674 { 2675 struct smp_irk *k; 2676 2677 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { 2678 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) 2679 continue; 2680 2681 BT_DBG("%s removing %pMR", hdev->name, bdaddr); 2682 2683 list_del_rcu(&k->list); 2684 kfree_rcu(k, rcu); 2685 } 2686 } 2687 2688 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 2689 { 2690 struct smp_ltk *k; 2691 struct smp_irk *irk; 2692 u8 addr_type; 2693 2694 if (type == BDADDR_BREDR) { 2695 if (hci_find_link_key(hdev, bdaddr)) 2696 return true; 2697 return false; 2698 } 2699 2700 /* Convert to HCI addr type which struct smp_ltk uses */ 2701 if (type == BDADDR_LE_PUBLIC) 2702 addr_type = ADDR_LE_DEV_PUBLIC; 2703 else 2704 addr_type = ADDR_LE_DEV_RANDOM; 2705 2706 irk = hci_get_irk(hdev, bdaddr, addr_type); 2707 if (irk) { 2708 bdaddr = &irk->bdaddr; 2709 addr_type = irk->addr_type; 2710 } 2711 2712 rcu_read_lock(); 2713 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { 2714 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { 2715 rcu_read_unlock(); 2716 return true; 2717 } 2718 } 2719 rcu_read_unlock(); 2720 2721 return false; 2722 } 2723 2724 /* HCI command timer function */ 2725 static void hci_cmd_timeout(struct work_struct *work) 2726 { 2727 struct hci_dev *hdev = container_of(work, struct hci_dev, 2728 cmd_timer.work); 2729 2730 if (hdev->sent_cmd) { 2731 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; 2732 u16 opcode = __le16_to_cpu(sent->opcode); 2733 2734 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); 2735 } else { 2736 bt_dev_err(hdev, "command tx timeout"); 2737 } 2738 2739 if (hdev->cmd_timeout) 2740 hdev->cmd_timeout(hdev); 2741 2742 atomic_set(&hdev->cmd_cnt, 1); 2743 queue_work(hdev->workqueue, &hdev->cmd_work); 2744 } 2745 2746 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, 2747 bdaddr_t *bdaddr, u8 bdaddr_type) 2748 { 2749 struct oob_data *data; 2750 2751 list_for_each_entry(data, &hdev->remote_oob_data, list) { 2752 if (bacmp(bdaddr, &data->bdaddr) != 0) 2753 continue; 2754 if (data->bdaddr_type != bdaddr_type) 2755 continue; 2756 return data; 2757 } 2758 2759 return NULL; 2760 } 2761 2762 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 2763 u8 bdaddr_type) 2764 { 2765 struct oob_data *data; 2766 2767 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 2768 if (!data) 2769 return -ENOENT; 2770 2771 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); 2772 2773 list_del(&data->list); 2774 kfree(data); 2775 2776 return 0; 2777 } 2778 2779 void hci_remote_oob_data_clear(struct hci_dev *hdev) 2780 { 2781 struct oob_data *data, *n; 2782 2783 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { 2784 list_del(&data->list); 2785 kfree(data); 2786 } 2787 } 2788 2789 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, 2790 u8 bdaddr_type, u8 *hash192, u8 *rand192, 2791 u8 *hash256, u8 *rand256) 2792 { 2793 struct oob_data *data; 2794 2795 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); 2796 if (!data) { 2797 data = kmalloc(sizeof(*data), GFP_KERNEL); 2798 if (!data) 2799 return -ENOMEM; 2800 2801 bacpy(&data->bdaddr, bdaddr); 2802 data->bdaddr_type = bdaddr_type; 2803 list_add(&data->list, &hdev->remote_oob_data); 2804 } 2805 2806 if (hash192 && rand192) { 2807 memcpy(data->hash192, hash192, sizeof(data->hash192)); 2808 memcpy(data->rand192, rand192, sizeof(data->rand192)); 2809 if (hash256 && rand256) 2810 data->present = 0x03; 2811 } else { 2812 memset(data->hash192, 0, sizeof(data->hash192)); 2813 memset(data->rand192, 0, sizeof(data->rand192)); 2814 if (hash256 && rand256) 2815 data->present = 0x02; 2816 else 2817 data->present = 0x00; 2818 } 2819 2820 if (hash256 && rand256) { 2821 memcpy(data->hash256, hash256, sizeof(data->hash256)); 2822 memcpy(data->rand256, rand256, sizeof(data->rand256)); 2823 } else { 2824 memset(data->hash256, 0, sizeof(data->hash256)); 2825 memset(data->rand256, 0, sizeof(data->rand256)); 2826 if (hash192 && rand192) 2827 data->present = 0x01; 2828 } 2829 2830 BT_DBG("%s for %pMR", hdev->name, bdaddr); 2831 2832 return 0; 2833 } 2834 2835 /* This function requires the caller holds hdev->lock */ 2836 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) 2837 { 2838 struct adv_info *adv_instance; 2839 2840 list_for_each_entry(adv_instance, &hdev->adv_instances, list) { 2841 if (adv_instance->instance == instance) 2842 return adv_instance; 2843 } 2844 2845 return NULL; 2846 } 2847 2848 /* This function requires the caller holds hdev->lock */ 2849 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) 2850 { 2851 struct adv_info *cur_instance; 2852 2853 cur_instance = hci_find_adv_instance(hdev, instance); 2854 if (!cur_instance) 2855 return NULL; 2856 2857 if (cur_instance == list_last_entry(&hdev->adv_instances, 2858 struct adv_info, list)) 2859 return list_first_entry(&hdev->adv_instances, 2860 struct adv_info, list); 2861 else 2862 return list_next_entry(cur_instance, list); 2863 } 2864 2865 /* This function requires the caller holds hdev->lock */ 2866 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) 2867 { 2868 struct adv_info *adv_instance; 2869 2870 adv_instance = hci_find_adv_instance(hdev, instance); 2871 if (!adv_instance) 2872 return -ENOENT; 2873 2874 BT_DBG("%s removing %dMR", hdev->name, instance); 2875 2876 if (hdev->cur_adv_instance == instance) { 2877 if (hdev->adv_instance_timeout) { 2878 cancel_delayed_work(&hdev->adv_instance_expire); 2879 hdev->adv_instance_timeout = 0; 2880 } 2881 hdev->cur_adv_instance = 0x00; 2882 } 2883 2884 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 2885 2886 list_del(&adv_instance->list); 2887 kfree(adv_instance); 2888 2889 hdev->adv_instance_cnt--; 2890 2891 return 0; 2892 } 2893 2894 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) 2895 { 2896 struct adv_info *adv_instance, *n; 2897 2898 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) 2899 adv_instance->rpa_expired = rpa_expired; 2900 } 2901 2902 /* This function requires the caller holds hdev->lock */ 2903 void hci_adv_instances_clear(struct hci_dev *hdev) 2904 { 2905 struct adv_info *adv_instance, *n; 2906 2907 if (hdev->adv_instance_timeout) { 2908 cancel_delayed_work(&hdev->adv_instance_expire); 2909 hdev->adv_instance_timeout = 0; 2910 } 2911 2912 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { 2913 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 2914 list_del(&adv_instance->list); 2915 kfree(adv_instance); 2916 } 2917 2918 hdev->adv_instance_cnt = 0; 2919 hdev->cur_adv_instance = 0x00; 2920 } 2921 2922 static void adv_instance_rpa_expired(struct work_struct *work) 2923 { 2924 struct adv_info *adv_instance = container_of(work, struct adv_info, 2925 rpa_expired_cb.work); 2926 2927 BT_DBG(""); 2928 2929 adv_instance->rpa_expired = true; 2930 } 2931 2932 /* This function requires the caller holds hdev->lock */ 2933 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, 2934 u16 adv_data_len, u8 *adv_data, 2935 u16 scan_rsp_len, u8 *scan_rsp_data, 2936 u16 timeout, u16 duration) 2937 { 2938 struct adv_info *adv_instance; 2939 2940 adv_instance = hci_find_adv_instance(hdev, instance); 2941 if (adv_instance) { 2942 memset(adv_instance->adv_data, 0, 2943 sizeof(adv_instance->adv_data)); 2944 memset(adv_instance->scan_rsp_data, 0, 2945 sizeof(adv_instance->scan_rsp_data)); 2946 } else { 2947 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || 2948 instance < 1 || instance > HCI_MAX_ADV_INSTANCES) 2949 return -EOVERFLOW; 2950 2951 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); 2952 if (!adv_instance) 2953 return -ENOMEM; 2954 2955 adv_instance->pending = true; 2956 adv_instance->instance = instance; 2957 list_add(&adv_instance->list, &hdev->adv_instances); 2958 hdev->adv_instance_cnt++; 2959 } 2960 2961 adv_instance->flags = flags; 2962 adv_instance->adv_data_len = adv_data_len; 2963 adv_instance->scan_rsp_len = scan_rsp_len; 2964 2965 if (adv_data_len) 2966 memcpy(adv_instance->adv_data, adv_data, adv_data_len); 2967 2968 if (scan_rsp_len) 2969 memcpy(adv_instance->scan_rsp_data, 2970 scan_rsp_data, scan_rsp_len); 2971 2972 adv_instance->timeout = timeout; 2973 adv_instance->remaining_time = timeout; 2974 2975 if (duration == 0) 2976 adv_instance->duration = HCI_DEFAULT_ADV_DURATION; 2977 else 2978 adv_instance->duration = duration; 2979 2980 adv_instance->tx_power = HCI_TX_POWER_INVALID; 2981 2982 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb, 2983 adv_instance_rpa_expired); 2984 2985 BT_DBG("%s for %dMR", hdev->name, instance); 2986 2987 return 0; 2988 } 2989 2990 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, 2991 bdaddr_t *bdaddr, u8 type) 2992 { 2993 struct bdaddr_list *b; 2994 2995 list_for_each_entry(b, bdaddr_list, list) { 2996 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 2997 return b; 2998 } 2999 3000 return NULL; 3001 } 3002 3003 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( 3004 struct list_head *bdaddr_list, bdaddr_t *bdaddr, 3005 u8 type) 3006 { 3007 struct bdaddr_list_with_irk *b; 3008 3009 list_for_each_entry(b, bdaddr_list, list) { 3010 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) 3011 return b; 3012 } 3013 3014 return NULL; 3015 } 3016 3017 void hci_bdaddr_list_clear(struct list_head *bdaddr_list) 3018 { 3019 struct bdaddr_list *b, *n; 3020 3021 list_for_each_entry_safe(b, n, bdaddr_list, list) { 3022 list_del(&b->list); 3023 kfree(b); 3024 } 3025 } 3026 3027 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) 3028 { 3029 struct bdaddr_list *entry; 3030 3031 if (!bacmp(bdaddr, BDADDR_ANY)) 3032 return -EBADF; 3033 3034 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 3035 return -EEXIST; 3036 3037 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3038 if (!entry) 3039 return -ENOMEM; 3040 3041 bacpy(&entry->bdaddr, bdaddr); 3042 entry->bdaddr_type = type; 3043 3044 list_add(&entry->list, list); 3045 3046 return 0; 3047 } 3048 3049 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, 3050 u8 type, u8 *peer_irk, u8 *local_irk) 3051 { 3052 struct bdaddr_list_with_irk *entry; 3053 3054 if (!bacmp(bdaddr, BDADDR_ANY)) 3055 return -EBADF; 3056 3057 if (hci_bdaddr_list_lookup(list, bdaddr, type)) 3058 return -EEXIST; 3059 3060 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 3061 if (!entry) 3062 return -ENOMEM; 3063 3064 bacpy(&entry->bdaddr, bdaddr); 3065 entry->bdaddr_type = type; 3066 3067 if (peer_irk) 3068 memcpy(entry->peer_irk, peer_irk, 16); 3069 3070 if (local_irk) 3071 memcpy(entry->local_irk, local_irk, 16); 3072 3073 list_add(&entry->list, list); 3074 3075 return 0; 3076 } 3077 3078 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) 3079 { 3080 struct bdaddr_list *entry; 3081 3082 if (!bacmp(bdaddr, BDADDR_ANY)) { 3083 hci_bdaddr_list_clear(list); 3084 return 0; 3085 } 3086 3087 entry = hci_bdaddr_list_lookup(list, bdaddr, type); 3088 if (!entry) 3089 return -ENOENT; 3090 3091 list_del(&entry->list); 3092 kfree(entry); 3093 3094 return 0; 3095 } 3096 3097 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, 3098 u8 type) 3099 { 3100 struct bdaddr_list_with_irk *entry; 3101 3102 if (!bacmp(bdaddr, BDADDR_ANY)) { 3103 hci_bdaddr_list_clear(list); 3104 return 0; 3105 } 3106 3107 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); 3108 if (!entry) 3109 return -ENOENT; 3110 3111 list_del(&entry->list); 3112 kfree(entry); 3113 3114 return 0; 3115 } 3116 3117 /* This function requires the caller holds hdev->lock */ 3118 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, 3119 bdaddr_t *addr, u8 addr_type) 3120 { 3121 struct hci_conn_params *params; 3122 3123 list_for_each_entry(params, &hdev->le_conn_params, list) { 3124 if (bacmp(¶ms->addr, addr) == 0 && 3125 params->addr_type == addr_type) { 3126 return params; 3127 } 3128 } 3129 3130 return NULL; 3131 } 3132 3133 /* This function requires the caller holds hdev->lock */ 3134 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, 3135 bdaddr_t *addr, u8 addr_type) 3136 { 3137 struct hci_conn_params *param; 3138 3139 list_for_each_entry(param, list, action) { 3140 if (bacmp(¶m->addr, addr) == 0 && 3141 param->addr_type == addr_type) 3142 return param; 3143 } 3144 3145 return NULL; 3146 } 3147 3148 /* This function requires the caller holds hdev->lock */ 3149 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, 3150 bdaddr_t *addr, u8 addr_type) 3151 { 3152 struct hci_conn_params *params; 3153 3154 params = hci_conn_params_lookup(hdev, addr, addr_type); 3155 if (params) 3156 return params; 3157 3158 params = kzalloc(sizeof(*params), GFP_KERNEL); 3159 if (!params) { 3160 bt_dev_err(hdev, "out of memory"); 3161 return NULL; 3162 } 3163 3164 bacpy(¶ms->addr, addr); 3165 params->addr_type = addr_type; 3166 3167 list_add(¶ms->list, &hdev->le_conn_params); 3168 INIT_LIST_HEAD(¶ms->action); 3169 3170 params->conn_min_interval = hdev->le_conn_min_interval; 3171 params->conn_max_interval = hdev->le_conn_max_interval; 3172 params->conn_latency = hdev->le_conn_latency; 3173 params->supervision_timeout = hdev->le_supv_timeout; 3174 params->auto_connect = HCI_AUTO_CONN_DISABLED; 3175 3176 BT_DBG("addr %pMR (type %u)", addr, addr_type); 3177 3178 return params; 3179 } 3180 3181 static void hci_conn_params_free(struct hci_conn_params *params) 3182 { 3183 if (params->conn) { 3184 hci_conn_drop(params->conn); 3185 hci_conn_put(params->conn); 3186 } 3187 3188 list_del(¶ms->action); 3189 list_del(¶ms->list); 3190 kfree(params); 3191 } 3192 3193 /* This function requires the caller holds hdev->lock */ 3194 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) 3195 { 3196 struct hci_conn_params *params; 3197 3198 params = hci_conn_params_lookup(hdev, addr, addr_type); 3199 if (!params) 3200 return; 3201 3202 hci_conn_params_free(params); 3203 3204 hci_update_background_scan(hdev); 3205 3206 BT_DBG("addr %pMR (type %u)", addr, addr_type); 3207 } 3208 3209 /* This function requires the caller holds hdev->lock */ 3210 void hci_conn_params_clear_disabled(struct hci_dev *hdev) 3211 { 3212 struct hci_conn_params *params, *tmp; 3213 3214 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { 3215 if (params->auto_connect != HCI_AUTO_CONN_DISABLED) 3216 continue; 3217 3218 /* If trying to estabilish one time connection to disabled 3219 * device, leave the params, but mark them as just once. 3220 */ 3221 if (params->explicit_connect) { 3222 params->auto_connect = HCI_AUTO_CONN_EXPLICIT; 3223 continue; 3224 } 3225 3226 list_del(¶ms->list); 3227 kfree(params); 3228 } 3229 3230 BT_DBG("All LE disabled connection parameters were removed"); 3231 } 3232 3233 /* This function requires the caller holds hdev->lock */ 3234 static void hci_conn_params_clear_all(struct hci_dev *hdev) 3235 { 3236 struct hci_conn_params *params, *tmp; 3237 3238 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) 3239 hci_conn_params_free(params); 3240 3241 BT_DBG("All LE connection parameters were removed"); 3242 } 3243 3244 /* Copy the Identity Address of the controller. 3245 * 3246 * If the controller has a public BD_ADDR, then by default use that one. 3247 * If this is a LE only controller without a public address, default to 3248 * the static random address. 3249 * 3250 * For debugging purposes it is possible to force controllers with a 3251 * public address to use the static random address instead. 3252 * 3253 * In case BR/EDR has been disabled on a dual-mode controller and 3254 * userspace has configured a static address, then that address 3255 * becomes the identity address instead of the public BR/EDR address. 3256 */ 3257 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, 3258 u8 *bdaddr_type) 3259 { 3260 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 3261 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 3262 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 3263 bacmp(&hdev->static_addr, BDADDR_ANY))) { 3264 bacpy(bdaddr, &hdev->static_addr); 3265 *bdaddr_type = ADDR_LE_DEV_RANDOM; 3266 } else { 3267 bacpy(bdaddr, &hdev->bdaddr); 3268 *bdaddr_type = ADDR_LE_DEV_PUBLIC; 3269 } 3270 } 3271 3272 static int hci_suspend_wait_event(struct hci_dev *hdev) 3273 { 3274 #define WAKE_COND \ 3275 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \ 3276 __SUSPEND_NUM_TASKS) 3277 3278 int i; 3279 int ret = wait_event_timeout(hdev->suspend_wait_q, 3280 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT); 3281 3282 if (ret == 0) { 3283 bt_dev_dbg(hdev, "Timed out waiting for suspend"); 3284 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) { 3285 if (test_bit(i, hdev->suspend_tasks)) 3286 bt_dev_dbg(hdev, "Bit %d is set", i); 3287 clear_bit(i, hdev->suspend_tasks); 3288 } 3289 3290 ret = -ETIMEDOUT; 3291 } else { 3292 ret = 0; 3293 } 3294 3295 return ret; 3296 } 3297 3298 static void hci_prepare_suspend(struct work_struct *work) 3299 { 3300 struct hci_dev *hdev = 3301 container_of(work, struct hci_dev, suspend_prepare); 3302 3303 hci_dev_lock(hdev); 3304 hci_req_prepare_suspend(hdev, hdev->suspend_state_next); 3305 hci_dev_unlock(hdev); 3306 } 3307 3308 static int hci_change_suspend_state(struct hci_dev *hdev, 3309 enum suspended_state next) 3310 { 3311 hdev->suspend_state_next = next; 3312 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks); 3313 queue_work(hdev->req_workqueue, &hdev->suspend_prepare); 3314 return hci_suspend_wait_event(hdev); 3315 } 3316 3317 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, 3318 void *data) 3319 { 3320 struct hci_dev *hdev = 3321 container_of(nb, struct hci_dev, suspend_notifier); 3322 int ret = 0; 3323 3324 /* If powering down, wait for completion. */ 3325 if (mgmt_powering_down(hdev)) { 3326 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks); 3327 ret = hci_suspend_wait_event(hdev); 3328 if (ret) 3329 goto done; 3330 } 3331 3332 /* Suspend notifier should only act on events when powered. */ 3333 if (!hdev_is_powered(hdev)) 3334 goto done; 3335 3336 if (action == PM_SUSPEND_PREPARE) { 3337 /* Suspend consists of two actions: 3338 * - First, disconnect everything and make the controller not 3339 * connectable (disabling scanning) 3340 * - Second, program event filter/whitelist and enable scan 3341 */ 3342 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT); 3343 3344 /* Only configure whitelist if disconnect succeeded */ 3345 if (!ret) 3346 ret = hci_change_suspend_state(hdev, 3347 BT_SUSPEND_COMPLETE); 3348 } else if (action == PM_POST_SUSPEND) { 3349 ret = hci_change_suspend_state(hdev, BT_RUNNING); 3350 } 3351 3352 /* If suspend failed, restore it to running */ 3353 if (ret && action == PM_SUSPEND_PREPARE) 3354 hci_change_suspend_state(hdev, BT_RUNNING); 3355 3356 done: 3357 return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP; 3358 } 3359 3360 /* Alloc HCI device */ 3361 struct hci_dev *hci_alloc_dev(void) 3362 { 3363 struct hci_dev *hdev; 3364 3365 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 3366 if (!hdev) 3367 return NULL; 3368 3369 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 3370 hdev->esco_type = (ESCO_HV1); 3371 hdev->link_mode = (HCI_LM_ACCEPT); 3372 hdev->num_iac = 0x01; /* One IAC support is mandatory */ 3373 hdev->io_capability = 0x03; /* No Input No Output */ 3374 hdev->manufacturer = 0xffff; /* Default to internal use */ 3375 hdev->inq_tx_power = HCI_TX_POWER_INVALID; 3376 hdev->adv_tx_power = HCI_TX_POWER_INVALID; 3377 hdev->adv_instance_cnt = 0; 3378 hdev->cur_adv_instance = 0x00; 3379 hdev->adv_instance_timeout = 0; 3380 3381 hdev->sniff_max_interval = 800; 3382 hdev->sniff_min_interval = 80; 3383 3384 hdev->le_adv_channel_map = 0x07; 3385 hdev->le_adv_min_interval = 0x0800; 3386 hdev->le_adv_max_interval = 0x0800; 3387 hdev->le_scan_interval = 0x0060; 3388 hdev->le_scan_window = 0x0030; 3389 hdev->le_conn_min_interval = 0x0018; 3390 hdev->le_conn_max_interval = 0x0028; 3391 hdev->le_conn_latency = 0x0000; 3392 hdev->le_supv_timeout = 0x002a; 3393 hdev->le_def_tx_len = 0x001b; 3394 hdev->le_def_tx_time = 0x0148; 3395 hdev->le_max_tx_len = 0x001b; 3396 hdev->le_max_tx_time = 0x0148; 3397 hdev->le_max_rx_len = 0x001b; 3398 hdev->le_max_rx_time = 0x0148; 3399 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; 3400 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; 3401 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; 3402 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; 3403 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; 3404 3405 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; 3406 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; 3407 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; 3408 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; 3409 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; 3410 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; 3411 3412 mutex_init(&hdev->lock); 3413 mutex_init(&hdev->req_lock); 3414 3415 INIT_LIST_HEAD(&hdev->mgmt_pending); 3416 INIT_LIST_HEAD(&hdev->blacklist); 3417 INIT_LIST_HEAD(&hdev->whitelist); 3418 INIT_LIST_HEAD(&hdev->wakeable); 3419 INIT_LIST_HEAD(&hdev->uuids); 3420 INIT_LIST_HEAD(&hdev->link_keys); 3421 INIT_LIST_HEAD(&hdev->long_term_keys); 3422 INIT_LIST_HEAD(&hdev->identity_resolving_keys); 3423 INIT_LIST_HEAD(&hdev->remote_oob_data); 3424 INIT_LIST_HEAD(&hdev->le_white_list); 3425 INIT_LIST_HEAD(&hdev->le_resolv_list); 3426 INIT_LIST_HEAD(&hdev->le_conn_params); 3427 INIT_LIST_HEAD(&hdev->pend_le_conns); 3428 INIT_LIST_HEAD(&hdev->pend_le_reports); 3429 INIT_LIST_HEAD(&hdev->conn_hash.list); 3430 INIT_LIST_HEAD(&hdev->adv_instances); 3431 INIT_LIST_HEAD(&hdev->blocked_keys); 3432 3433 INIT_WORK(&hdev->rx_work, hci_rx_work); 3434 INIT_WORK(&hdev->cmd_work, hci_cmd_work); 3435 INIT_WORK(&hdev->tx_work, hci_tx_work); 3436 INIT_WORK(&hdev->power_on, hci_power_on); 3437 INIT_WORK(&hdev->error_reset, hci_error_reset); 3438 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend); 3439 3440 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); 3441 3442 skb_queue_head_init(&hdev->rx_q); 3443 skb_queue_head_init(&hdev->cmd_q); 3444 skb_queue_head_init(&hdev->raw_q); 3445 3446 init_waitqueue_head(&hdev->req_wait_q); 3447 init_waitqueue_head(&hdev->suspend_wait_q); 3448 3449 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); 3450 3451 hci_request_setup(hdev); 3452 3453 hci_init_sysfs(hdev); 3454 discovery_init(hdev); 3455 3456 return hdev; 3457 } 3458 EXPORT_SYMBOL(hci_alloc_dev); 3459 3460 /* Free HCI device */ 3461 void hci_free_dev(struct hci_dev *hdev) 3462 { 3463 /* will free via device release */ 3464 put_device(&hdev->dev); 3465 } 3466 EXPORT_SYMBOL(hci_free_dev); 3467 3468 /* Register HCI device */ 3469 int hci_register_dev(struct hci_dev *hdev) 3470 { 3471 int id, error; 3472 3473 if (!hdev->open || !hdev->close || !hdev->send) 3474 return -EINVAL; 3475 3476 /* Do not allow HCI_AMP devices to register at index 0, 3477 * so the index can be used as the AMP controller ID. 3478 */ 3479 switch (hdev->dev_type) { 3480 case HCI_PRIMARY: 3481 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); 3482 break; 3483 case HCI_AMP: 3484 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); 3485 break; 3486 default: 3487 return -EINVAL; 3488 } 3489 3490 if (id < 0) 3491 return id; 3492 3493 sprintf(hdev->name, "hci%d", id); 3494 hdev->id = id; 3495 3496 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3497 3498 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); 3499 if (!hdev->workqueue) { 3500 error = -ENOMEM; 3501 goto err; 3502 } 3503 3504 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, 3505 hdev->name); 3506 if (!hdev->req_workqueue) { 3507 destroy_workqueue(hdev->workqueue); 3508 error = -ENOMEM; 3509 goto err; 3510 } 3511 3512 if (!IS_ERR_OR_NULL(bt_debugfs)) 3513 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); 3514 3515 dev_set_name(&hdev->dev, "%s", hdev->name); 3516 3517 error = device_add(&hdev->dev); 3518 if (error < 0) 3519 goto err_wqueue; 3520 3521 hci_leds_init(hdev); 3522 3523 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 3524 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, 3525 hdev); 3526 if (hdev->rfkill) { 3527 if (rfkill_register(hdev->rfkill) < 0) { 3528 rfkill_destroy(hdev->rfkill); 3529 hdev->rfkill = NULL; 3530 } 3531 } 3532 3533 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) 3534 hci_dev_set_flag(hdev, HCI_RFKILLED); 3535 3536 hci_dev_set_flag(hdev, HCI_SETUP); 3537 hci_dev_set_flag(hdev, HCI_AUTO_OFF); 3538 3539 if (hdev->dev_type == HCI_PRIMARY) { 3540 /* Assume BR/EDR support until proven otherwise (such as 3541 * through reading supported features during init. 3542 */ 3543 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); 3544 } 3545 3546 write_lock(&hci_dev_list_lock); 3547 list_add(&hdev->list, &hci_dev_list); 3548 write_unlock(&hci_dev_list_lock); 3549 3550 /* Devices that are marked for raw-only usage are unconfigured 3551 * and should not be included in normal operation. 3552 */ 3553 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3554 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 3555 3556 hci_sock_dev_event(hdev, HCI_DEV_REG); 3557 hci_dev_hold(hdev); 3558 3559 hdev->suspend_notifier.notifier_call = hci_suspend_notifier; 3560 error = register_pm_notifier(&hdev->suspend_notifier); 3561 if (error) 3562 goto err_wqueue; 3563 3564 queue_work(hdev->req_workqueue, &hdev->power_on); 3565 3566 return id; 3567 3568 err_wqueue: 3569 destroy_workqueue(hdev->workqueue); 3570 destroy_workqueue(hdev->req_workqueue); 3571 err: 3572 ida_simple_remove(&hci_index_ida, hdev->id); 3573 3574 return error; 3575 } 3576 EXPORT_SYMBOL(hci_register_dev); 3577 3578 /* Unregister HCI device */ 3579 void hci_unregister_dev(struct hci_dev *hdev) 3580 { 3581 int id; 3582 3583 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 3584 3585 hci_dev_set_flag(hdev, HCI_UNREGISTER); 3586 3587 id = hdev->id; 3588 3589 write_lock(&hci_dev_list_lock); 3590 list_del(&hdev->list); 3591 write_unlock(&hci_dev_list_lock); 3592 3593 cancel_work_sync(&hdev->power_on); 3594 3595 hci_dev_do_close(hdev); 3596 3597 unregister_pm_notifier(&hdev->suspend_notifier); 3598 3599 if (!test_bit(HCI_INIT, &hdev->flags) && 3600 !hci_dev_test_flag(hdev, HCI_SETUP) && 3601 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 3602 hci_dev_lock(hdev); 3603 mgmt_index_removed(hdev); 3604 hci_dev_unlock(hdev); 3605 } 3606 3607 /* mgmt_index_removed should take care of emptying the 3608 * pending list */ 3609 BUG_ON(!list_empty(&hdev->mgmt_pending)); 3610 3611 hci_sock_dev_event(hdev, HCI_DEV_UNREG); 3612 3613 if (hdev->rfkill) { 3614 rfkill_unregister(hdev->rfkill); 3615 rfkill_destroy(hdev->rfkill); 3616 } 3617 3618 device_del(&hdev->dev); 3619 3620 debugfs_remove_recursive(hdev->debugfs); 3621 kfree_const(hdev->hw_info); 3622 kfree_const(hdev->fw_info); 3623 3624 destroy_workqueue(hdev->workqueue); 3625 destroy_workqueue(hdev->req_workqueue); 3626 3627 hci_dev_lock(hdev); 3628 hci_bdaddr_list_clear(&hdev->blacklist); 3629 hci_bdaddr_list_clear(&hdev->whitelist); 3630 hci_uuids_clear(hdev); 3631 hci_link_keys_clear(hdev); 3632 hci_smp_ltks_clear(hdev); 3633 hci_smp_irks_clear(hdev); 3634 hci_remote_oob_data_clear(hdev); 3635 hci_adv_instances_clear(hdev); 3636 hci_bdaddr_list_clear(&hdev->le_white_list); 3637 hci_bdaddr_list_clear(&hdev->le_resolv_list); 3638 hci_conn_params_clear_all(hdev); 3639 hci_discovery_filter_clear(hdev); 3640 hci_blocked_keys_clear(hdev); 3641 hci_dev_unlock(hdev); 3642 3643 hci_dev_put(hdev); 3644 3645 ida_simple_remove(&hci_index_ida, id); 3646 } 3647 EXPORT_SYMBOL(hci_unregister_dev); 3648 3649 /* Suspend HCI device */ 3650 int hci_suspend_dev(struct hci_dev *hdev) 3651 { 3652 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); 3653 return 0; 3654 } 3655 EXPORT_SYMBOL(hci_suspend_dev); 3656 3657 /* Resume HCI device */ 3658 int hci_resume_dev(struct hci_dev *hdev) 3659 { 3660 hci_sock_dev_event(hdev, HCI_DEV_RESUME); 3661 return 0; 3662 } 3663 EXPORT_SYMBOL(hci_resume_dev); 3664 3665 /* Reset HCI device */ 3666 int hci_reset_dev(struct hci_dev *hdev) 3667 { 3668 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; 3669 struct sk_buff *skb; 3670 3671 skb = bt_skb_alloc(3, GFP_ATOMIC); 3672 if (!skb) 3673 return -ENOMEM; 3674 3675 hci_skb_pkt_type(skb) = HCI_EVENT_PKT; 3676 skb_put_data(skb, hw_err, 3); 3677 3678 /* Send Hardware Error to upper stack */ 3679 return hci_recv_frame(hdev, skb); 3680 } 3681 EXPORT_SYMBOL(hci_reset_dev); 3682 3683 /* Receive frame from HCI drivers */ 3684 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) 3685 { 3686 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 3687 && !test_bit(HCI_INIT, &hdev->flags))) { 3688 kfree_skb(skb); 3689 return -ENXIO; 3690 } 3691 3692 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && 3693 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && 3694 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && 3695 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { 3696 kfree_skb(skb); 3697 return -EINVAL; 3698 } 3699 3700 /* Incoming skb */ 3701 bt_cb(skb)->incoming = 1; 3702 3703 /* Time stamp */ 3704 __net_timestamp(skb); 3705 3706 skb_queue_tail(&hdev->rx_q, skb); 3707 queue_work(hdev->workqueue, &hdev->rx_work); 3708 3709 return 0; 3710 } 3711 EXPORT_SYMBOL(hci_recv_frame); 3712 3713 /* Receive diagnostic message from HCI drivers */ 3714 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 3715 { 3716 /* Mark as diagnostic packet */ 3717 hci_skb_pkt_type(skb) = HCI_DIAG_PKT; 3718 3719 /* Time stamp */ 3720 __net_timestamp(skb); 3721 3722 skb_queue_tail(&hdev->rx_q, skb); 3723 queue_work(hdev->workqueue, &hdev->rx_work); 3724 3725 return 0; 3726 } 3727 EXPORT_SYMBOL(hci_recv_diag); 3728 3729 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) 3730 { 3731 va_list vargs; 3732 3733 va_start(vargs, fmt); 3734 kfree_const(hdev->hw_info); 3735 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 3736 va_end(vargs); 3737 } 3738 EXPORT_SYMBOL(hci_set_hw_info); 3739 3740 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) 3741 { 3742 va_list vargs; 3743 3744 va_start(vargs, fmt); 3745 kfree_const(hdev->fw_info); 3746 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); 3747 va_end(vargs); 3748 } 3749 EXPORT_SYMBOL(hci_set_fw_info); 3750 3751 /* ---- Interface to upper protocols ---- */ 3752 3753 int hci_register_cb(struct hci_cb *cb) 3754 { 3755 BT_DBG("%p name %s", cb, cb->name); 3756 3757 mutex_lock(&hci_cb_list_lock); 3758 list_add_tail(&cb->list, &hci_cb_list); 3759 mutex_unlock(&hci_cb_list_lock); 3760 3761 return 0; 3762 } 3763 EXPORT_SYMBOL(hci_register_cb); 3764 3765 int hci_unregister_cb(struct hci_cb *cb) 3766 { 3767 BT_DBG("%p name %s", cb, cb->name); 3768 3769 mutex_lock(&hci_cb_list_lock); 3770 list_del(&cb->list); 3771 mutex_unlock(&hci_cb_list_lock); 3772 3773 return 0; 3774 } 3775 EXPORT_SYMBOL(hci_unregister_cb); 3776 3777 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 3778 { 3779 int err; 3780 3781 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), 3782 skb->len); 3783 3784 /* Time stamp */ 3785 __net_timestamp(skb); 3786 3787 /* Send copy to monitor */ 3788 hci_send_to_monitor(hdev, skb); 3789 3790 if (atomic_read(&hdev->promisc)) { 3791 /* Send copy to the sockets */ 3792 hci_send_to_sock(hdev, skb); 3793 } 3794 3795 /* Get rid of skb owner, prior to sending to the driver. */ 3796 skb_orphan(skb); 3797 3798 if (!test_bit(HCI_RUNNING, &hdev->flags)) { 3799 kfree_skb(skb); 3800 return; 3801 } 3802 3803 err = hdev->send(hdev, skb); 3804 if (err < 0) { 3805 bt_dev_err(hdev, "sending frame failed (%d)", err); 3806 kfree_skb(skb); 3807 } 3808 } 3809 3810 /* Send HCI command */ 3811 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, 3812 const void *param) 3813 { 3814 struct sk_buff *skb; 3815 3816 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); 3817 3818 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3819 if (!skb) { 3820 bt_dev_err(hdev, "no memory for command"); 3821 return -ENOMEM; 3822 } 3823 3824 /* Stand-alone HCI commands must be flagged as 3825 * single-command requests. 3826 */ 3827 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 3828 3829 skb_queue_tail(&hdev->cmd_q, skb); 3830 queue_work(hdev->workqueue, &hdev->cmd_work); 3831 3832 return 0; 3833 } 3834 3835 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, 3836 const void *param) 3837 { 3838 struct sk_buff *skb; 3839 3840 if (hci_opcode_ogf(opcode) != 0x3f) { 3841 /* A controller receiving a command shall respond with either 3842 * a Command Status Event or a Command Complete Event. 3843 * Therefore, all standard HCI commands must be sent via the 3844 * standard API, using hci_send_cmd or hci_cmd_sync helpers. 3845 * Some vendors do not comply with this rule for vendor-specific 3846 * commands and do not return any event. We want to support 3847 * unresponded commands for such cases only. 3848 */ 3849 bt_dev_err(hdev, "unresponded command not supported"); 3850 return -EINVAL; 3851 } 3852 3853 skb = hci_prepare_cmd(hdev, opcode, plen, param); 3854 if (!skb) { 3855 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 3856 opcode); 3857 return -ENOMEM; 3858 } 3859 3860 hci_send_frame(hdev, skb); 3861 3862 return 0; 3863 } 3864 EXPORT_SYMBOL(__hci_cmd_send); 3865 3866 /* Get data from the previously sent command */ 3867 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 3868 { 3869 struct hci_command_hdr *hdr; 3870 3871 if (!hdev->sent_cmd) 3872 return NULL; 3873 3874 hdr = (void *) hdev->sent_cmd->data; 3875 3876 if (hdr->opcode != cpu_to_le16(opcode)) 3877 return NULL; 3878 3879 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); 3880 3881 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 3882 } 3883 3884 /* Send HCI command and wait for command commplete event */ 3885 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 3886 const void *param, u32 timeout) 3887 { 3888 struct sk_buff *skb; 3889 3890 if (!test_bit(HCI_UP, &hdev->flags)) 3891 return ERR_PTR(-ENETDOWN); 3892 3893 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 3894 3895 hci_req_sync_lock(hdev); 3896 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 3897 hci_req_sync_unlock(hdev); 3898 3899 return skb; 3900 } 3901 EXPORT_SYMBOL(hci_cmd_sync); 3902 3903 /* Send ACL data */ 3904 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 3905 { 3906 struct hci_acl_hdr *hdr; 3907 int len = skb->len; 3908 3909 skb_push(skb, HCI_ACL_HDR_SIZE); 3910 skb_reset_transport_header(skb); 3911 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 3912 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 3913 hdr->dlen = cpu_to_le16(len); 3914 } 3915 3916 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, 3917 struct sk_buff *skb, __u16 flags) 3918 { 3919 struct hci_conn *conn = chan->conn; 3920 struct hci_dev *hdev = conn->hdev; 3921 struct sk_buff *list; 3922 3923 skb->len = skb_headlen(skb); 3924 skb->data_len = 0; 3925 3926 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3927 3928 switch (hdev->dev_type) { 3929 case HCI_PRIMARY: 3930 hci_add_acl_hdr(skb, conn->handle, flags); 3931 break; 3932 case HCI_AMP: 3933 hci_add_acl_hdr(skb, chan->handle, flags); 3934 break; 3935 default: 3936 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); 3937 return; 3938 } 3939 3940 list = skb_shinfo(skb)->frag_list; 3941 if (!list) { 3942 /* Non fragmented */ 3943 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 3944 3945 skb_queue_tail(queue, skb); 3946 } else { 3947 /* Fragmented */ 3948 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3949 3950 skb_shinfo(skb)->frag_list = NULL; 3951 3952 /* Queue all fragments atomically. We need to use spin_lock_bh 3953 * here because of 6LoWPAN links, as there this function is 3954 * called from softirq and using normal spin lock could cause 3955 * deadlocks. 3956 */ 3957 spin_lock_bh(&queue->lock); 3958 3959 __skb_queue_tail(queue, skb); 3960 3961 flags &= ~ACL_START; 3962 flags |= ACL_CONT; 3963 do { 3964 skb = list; list = list->next; 3965 3966 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; 3967 hci_add_acl_hdr(skb, conn->handle, flags); 3968 3969 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 3970 3971 __skb_queue_tail(queue, skb); 3972 } while (list); 3973 3974 spin_unlock_bh(&queue->lock); 3975 } 3976 } 3977 3978 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) 3979 { 3980 struct hci_dev *hdev = chan->conn->hdev; 3981 3982 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); 3983 3984 hci_queue_acl(chan, &chan->data_q, skb, flags); 3985 3986 queue_work(hdev->workqueue, &hdev->tx_work); 3987 } 3988 3989 /* Send SCO data */ 3990 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 3991 { 3992 struct hci_dev *hdev = conn->hdev; 3993 struct hci_sco_hdr hdr; 3994 3995 BT_DBG("%s len %d", hdev->name, skb->len); 3996 3997 hdr.handle = cpu_to_le16(conn->handle); 3998 hdr.dlen = skb->len; 3999 4000 skb_push(skb, HCI_SCO_HDR_SIZE); 4001 skb_reset_transport_header(skb); 4002 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 4003 4004 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; 4005 4006 skb_queue_tail(&conn->data_q, skb); 4007 queue_work(hdev->workqueue, &hdev->tx_work); 4008 } 4009 4010 /* ---- HCI TX task (outgoing data) ---- */ 4011 4012 /* HCI Connection scheduler */ 4013 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, 4014 int *quote) 4015 { 4016 struct hci_conn_hash *h = &hdev->conn_hash; 4017 struct hci_conn *conn = NULL, *c; 4018 unsigned int num = 0, min = ~0; 4019 4020 /* We don't have to lock device here. Connections are always 4021 * added and removed with TX task disabled. */ 4022 4023 rcu_read_lock(); 4024 4025 list_for_each_entry_rcu(c, &h->list, list) { 4026 if (c->type != type || skb_queue_empty(&c->data_q)) 4027 continue; 4028 4029 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 4030 continue; 4031 4032 num++; 4033 4034 if (c->sent < min) { 4035 min = c->sent; 4036 conn = c; 4037 } 4038 4039 if (hci_conn_num(hdev, type) == num) 4040 break; 4041 } 4042 4043 rcu_read_unlock(); 4044 4045 if (conn) { 4046 int cnt, q; 4047 4048 switch (conn->type) { 4049 case ACL_LINK: 4050 cnt = hdev->acl_cnt; 4051 break; 4052 case SCO_LINK: 4053 case ESCO_LINK: 4054 cnt = hdev->sco_cnt; 4055 break; 4056 case LE_LINK: 4057 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 4058 break; 4059 default: 4060 cnt = 0; 4061 bt_dev_err(hdev, "unknown link type %d", conn->type); 4062 } 4063 4064 q = cnt / num; 4065 *quote = q ? q : 1; 4066 } else 4067 *quote = 0; 4068 4069 BT_DBG("conn %p quote %d", conn, *quote); 4070 return conn; 4071 } 4072 4073 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) 4074 { 4075 struct hci_conn_hash *h = &hdev->conn_hash; 4076 struct hci_conn *c; 4077 4078 bt_dev_err(hdev, "link tx timeout"); 4079 4080 rcu_read_lock(); 4081 4082 /* Kill stalled connections */ 4083 list_for_each_entry_rcu(c, &h->list, list) { 4084 if (c->type == type && c->sent) { 4085 bt_dev_err(hdev, "killing stalled connection %pMR", 4086 &c->dst); 4087 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); 4088 } 4089 } 4090 4091 rcu_read_unlock(); 4092 } 4093 4094 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, 4095 int *quote) 4096 { 4097 struct hci_conn_hash *h = &hdev->conn_hash; 4098 struct hci_chan *chan = NULL; 4099 unsigned int num = 0, min = ~0, cur_prio = 0; 4100 struct hci_conn *conn; 4101 int cnt, q, conn_num = 0; 4102 4103 BT_DBG("%s", hdev->name); 4104 4105 rcu_read_lock(); 4106 4107 list_for_each_entry_rcu(conn, &h->list, list) { 4108 struct hci_chan *tmp; 4109 4110 if (conn->type != type) 4111 continue; 4112 4113 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 4114 continue; 4115 4116 conn_num++; 4117 4118 list_for_each_entry_rcu(tmp, &conn->chan_list, list) { 4119 struct sk_buff *skb; 4120 4121 if (skb_queue_empty(&tmp->data_q)) 4122 continue; 4123 4124 skb = skb_peek(&tmp->data_q); 4125 if (skb->priority < cur_prio) 4126 continue; 4127 4128 if (skb->priority > cur_prio) { 4129 num = 0; 4130 min = ~0; 4131 cur_prio = skb->priority; 4132 } 4133 4134 num++; 4135 4136 if (conn->sent < min) { 4137 min = conn->sent; 4138 chan = tmp; 4139 } 4140 } 4141 4142 if (hci_conn_num(hdev, type) == conn_num) 4143 break; 4144 } 4145 4146 rcu_read_unlock(); 4147 4148 if (!chan) 4149 return NULL; 4150 4151 switch (chan->conn->type) { 4152 case ACL_LINK: 4153 cnt = hdev->acl_cnt; 4154 break; 4155 case AMP_LINK: 4156 cnt = hdev->block_cnt; 4157 break; 4158 case SCO_LINK: 4159 case ESCO_LINK: 4160 cnt = hdev->sco_cnt; 4161 break; 4162 case LE_LINK: 4163 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; 4164 break; 4165 default: 4166 cnt = 0; 4167 bt_dev_err(hdev, "unknown link type %d", chan->conn->type); 4168 } 4169 4170 q = cnt / num; 4171 *quote = q ? q : 1; 4172 BT_DBG("chan %p quote %d", chan, *quote); 4173 return chan; 4174 } 4175 4176 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) 4177 { 4178 struct hci_conn_hash *h = &hdev->conn_hash; 4179 struct hci_conn *conn; 4180 int num = 0; 4181 4182 BT_DBG("%s", hdev->name); 4183 4184 rcu_read_lock(); 4185 4186 list_for_each_entry_rcu(conn, &h->list, list) { 4187 struct hci_chan *chan; 4188 4189 if (conn->type != type) 4190 continue; 4191 4192 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 4193 continue; 4194 4195 num++; 4196 4197 list_for_each_entry_rcu(chan, &conn->chan_list, list) { 4198 struct sk_buff *skb; 4199 4200 if (chan->sent) { 4201 chan->sent = 0; 4202 continue; 4203 } 4204 4205 if (skb_queue_empty(&chan->data_q)) 4206 continue; 4207 4208 skb = skb_peek(&chan->data_q); 4209 if (skb->priority >= HCI_PRIO_MAX - 1) 4210 continue; 4211 4212 skb->priority = HCI_PRIO_MAX - 1; 4213 4214 BT_DBG("chan %p skb %p promoted to %d", chan, skb, 4215 skb->priority); 4216 } 4217 4218 if (hci_conn_num(hdev, type) == num) 4219 break; 4220 } 4221 4222 rcu_read_unlock(); 4223 4224 } 4225 4226 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) 4227 { 4228 /* Calculate count of blocks used by this packet */ 4229 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); 4230 } 4231 4232 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) 4233 { 4234 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 4235 /* ACL tx timeout must be longer than maximum 4236 * link supervision timeout (40.9 seconds) */ 4237 if (!cnt && time_after(jiffies, hdev->acl_last_tx + 4238 HCI_ACL_TX_TIMEOUT)) 4239 hci_link_tx_to(hdev, ACL_LINK); 4240 } 4241 } 4242 4243 static void hci_sched_acl_pkt(struct hci_dev *hdev) 4244 { 4245 unsigned int cnt = hdev->acl_cnt; 4246 struct hci_chan *chan; 4247 struct sk_buff *skb; 4248 int quote; 4249 4250 __check_timeout(hdev, cnt); 4251 4252 while (hdev->acl_cnt && 4253 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { 4254 u32 priority = (skb_peek(&chan->data_q))->priority; 4255 while (quote-- && (skb = skb_peek(&chan->data_q))) { 4256 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4257 skb->len, skb->priority); 4258 4259 /* Stop if priority has changed */ 4260 if (skb->priority < priority) 4261 break; 4262 4263 skb = skb_dequeue(&chan->data_q); 4264 4265 hci_conn_enter_active_mode(chan->conn, 4266 bt_cb(skb)->force_active); 4267 4268 hci_send_frame(hdev, skb); 4269 hdev->acl_last_tx = jiffies; 4270 4271 hdev->acl_cnt--; 4272 chan->sent++; 4273 chan->conn->sent++; 4274 } 4275 } 4276 4277 if (cnt != hdev->acl_cnt) 4278 hci_prio_recalculate(hdev, ACL_LINK); 4279 } 4280 4281 static void hci_sched_acl_blk(struct hci_dev *hdev) 4282 { 4283 unsigned int cnt = hdev->block_cnt; 4284 struct hci_chan *chan; 4285 struct sk_buff *skb; 4286 int quote; 4287 u8 type; 4288 4289 __check_timeout(hdev, cnt); 4290 4291 BT_DBG("%s", hdev->name); 4292 4293 if (hdev->dev_type == HCI_AMP) 4294 type = AMP_LINK; 4295 else 4296 type = ACL_LINK; 4297 4298 while (hdev->block_cnt > 0 && 4299 (chan = hci_chan_sent(hdev, type, "e))) { 4300 u32 priority = (skb_peek(&chan->data_q))->priority; 4301 while (quote > 0 && (skb = skb_peek(&chan->data_q))) { 4302 int blocks; 4303 4304 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4305 skb->len, skb->priority); 4306 4307 /* Stop if priority has changed */ 4308 if (skb->priority < priority) 4309 break; 4310 4311 skb = skb_dequeue(&chan->data_q); 4312 4313 blocks = __get_blocks(hdev, skb); 4314 if (blocks > hdev->block_cnt) 4315 return; 4316 4317 hci_conn_enter_active_mode(chan->conn, 4318 bt_cb(skb)->force_active); 4319 4320 hci_send_frame(hdev, skb); 4321 hdev->acl_last_tx = jiffies; 4322 4323 hdev->block_cnt -= blocks; 4324 quote -= blocks; 4325 4326 chan->sent += blocks; 4327 chan->conn->sent += blocks; 4328 } 4329 } 4330 4331 if (cnt != hdev->block_cnt) 4332 hci_prio_recalculate(hdev, type); 4333 } 4334 4335 static void hci_sched_acl(struct hci_dev *hdev) 4336 { 4337 BT_DBG("%s", hdev->name); 4338 4339 /* No ACL link over BR/EDR controller */ 4340 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) 4341 return; 4342 4343 /* No AMP link over AMP controller */ 4344 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) 4345 return; 4346 4347 switch (hdev->flow_ctl_mode) { 4348 case HCI_FLOW_CTL_MODE_PACKET_BASED: 4349 hci_sched_acl_pkt(hdev); 4350 break; 4351 4352 case HCI_FLOW_CTL_MODE_BLOCK_BASED: 4353 hci_sched_acl_blk(hdev); 4354 break; 4355 } 4356 } 4357 4358 /* Schedule SCO */ 4359 static void hci_sched_sco(struct hci_dev *hdev) 4360 { 4361 struct hci_conn *conn; 4362 struct sk_buff *skb; 4363 int quote; 4364 4365 BT_DBG("%s", hdev->name); 4366 4367 if (!hci_conn_num(hdev, SCO_LINK)) 4368 return; 4369 4370 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 4371 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 4372 BT_DBG("skb %p len %d", skb, skb->len); 4373 hci_send_frame(hdev, skb); 4374 4375 conn->sent++; 4376 if (conn->sent == ~0) 4377 conn->sent = 0; 4378 } 4379 } 4380 } 4381 4382 static void hci_sched_esco(struct hci_dev *hdev) 4383 { 4384 struct hci_conn *conn; 4385 struct sk_buff *skb; 4386 int quote; 4387 4388 BT_DBG("%s", hdev->name); 4389 4390 if (!hci_conn_num(hdev, ESCO_LINK)) 4391 return; 4392 4393 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, 4394 "e))) { 4395 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 4396 BT_DBG("skb %p len %d", skb, skb->len); 4397 hci_send_frame(hdev, skb); 4398 4399 conn->sent++; 4400 if (conn->sent == ~0) 4401 conn->sent = 0; 4402 } 4403 } 4404 } 4405 4406 static void hci_sched_le(struct hci_dev *hdev) 4407 { 4408 struct hci_chan *chan; 4409 struct sk_buff *skb; 4410 int quote, cnt, tmp; 4411 4412 BT_DBG("%s", hdev->name); 4413 4414 if (!hci_conn_num(hdev, LE_LINK)) 4415 return; 4416 4417 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; 4418 4419 __check_timeout(hdev, cnt); 4420 4421 tmp = cnt; 4422 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { 4423 u32 priority = (skb_peek(&chan->data_q))->priority; 4424 while (quote-- && (skb = skb_peek(&chan->data_q))) { 4425 BT_DBG("chan %p skb %p len %d priority %u", chan, skb, 4426 skb->len, skb->priority); 4427 4428 /* Stop if priority has changed */ 4429 if (skb->priority < priority) 4430 break; 4431 4432 skb = skb_dequeue(&chan->data_q); 4433 4434 hci_send_frame(hdev, skb); 4435 hdev->le_last_tx = jiffies; 4436 4437 cnt--; 4438 chan->sent++; 4439 chan->conn->sent++; 4440 } 4441 } 4442 4443 if (hdev->le_pkts) 4444 hdev->le_cnt = cnt; 4445 else 4446 hdev->acl_cnt = cnt; 4447 4448 if (cnt != tmp) 4449 hci_prio_recalculate(hdev, LE_LINK); 4450 } 4451 4452 static void hci_tx_work(struct work_struct *work) 4453 { 4454 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); 4455 struct sk_buff *skb; 4456 4457 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, 4458 hdev->sco_cnt, hdev->le_cnt); 4459 4460 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4461 /* Schedule queues and send stuff to HCI driver */ 4462 hci_sched_acl(hdev); 4463 hci_sched_sco(hdev); 4464 hci_sched_esco(hdev); 4465 hci_sched_le(hdev); 4466 } 4467 4468 /* Send next queued raw (unknown type) packet */ 4469 while ((skb = skb_dequeue(&hdev->raw_q))) 4470 hci_send_frame(hdev, skb); 4471 } 4472 4473 /* ----- HCI RX task (incoming data processing) ----- */ 4474 4475 /* ACL data packet */ 4476 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 4477 { 4478 struct hci_acl_hdr *hdr = (void *) skb->data; 4479 struct hci_conn *conn; 4480 __u16 handle, flags; 4481 4482 skb_pull(skb, HCI_ACL_HDR_SIZE); 4483 4484 handle = __le16_to_cpu(hdr->handle); 4485 flags = hci_flags(handle); 4486 handle = hci_handle(handle); 4487 4488 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 4489 handle, flags); 4490 4491 hdev->stat.acl_rx++; 4492 4493 hci_dev_lock(hdev); 4494 conn = hci_conn_hash_lookup_handle(hdev, handle); 4495 hci_dev_unlock(hdev); 4496 4497 if (conn) { 4498 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); 4499 4500 /* Send to upper protocol */ 4501 l2cap_recv_acldata(conn, skb, flags); 4502 return; 4503 } else { 4504 bt_dev_err(hdev, "ACL packet for unknown connection handle %d", 4505 handle); 4506 } 4507 4508 kfree_skb(skb); 4509 } 4510 4511 /* SCO data packet */ 4512 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 4513 { 4514 struct hci_sco_hdr *hdr = (void *) skb->data; 4515 struct hci_conn *conn; 4516 __u16 handle, flags; 4517 4518 skb_pull(skb, HCI_SCO_HDR_SIZE); 4519 4520 handle = __le16_to_cpu(hdr->handle); 4521 flags = hci_flags(handle); 4522 handle = hci_handle(handle); 4523 4524 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, 4525 handle, flags); 4526 4527 hdev->stat.sco_rx++; 4528 4529 hci_dev_lock(hdev); 4530 conn = hci_conn_hash_lookup_handle(hdev, handle); 4531 hci_dev_unlock(hdev); 4532 4533 if (conn) { 4534 /* Send to upper protocol */ 4535 sco_recv_scodata(conn, skb); 4536 return; 4537 } else { 4538 bt_dev_err(hdev, "SCO packet for unknown connection handle %d", 4539 handle); 4540 } 4541 4542 kfree_skb(skb); 4543 } 4544 4545 static bool hci_req_is_complete(struct hci_dev *hdev) 4546 { 4547 struct sk_buff *skb; 4548 4549 skb = skb_peek(&hdev->cmd_q); 4550 if (!skb) 4551 return true; 4552 4553 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); 4554 } 4555 4556 static void hci_resend_last(struct hci_dev *hdev) 4557 { 4558 struct hci_command_hdr *sent; 4559 struct sk_buff *skb; 4560 u16 opcode; 4561 4562 if (!hdev->sent_cmd) 4563 return; 4564 4565 sent = (void *) hdev->sent_cmd->data; 4566 opcode = __le16_to_cpu(sent->opcode); 4567 if (opcode == HCI_OP_RESET) 4568 return; 4569 4570 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); 4571 if (!skb) 4572 return; 4573 4574 skb_queue_head(&hdev->cmd_q, skb); 4575 queue_work(hdev->workqueue, &hdev->cmd_work); 4576 } 4577 4578 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, 4579 hci_req_complete_t *req_complete, 4580 hci_req_complete_skb_t *req_complete_skb) 4581 { 4582 struct sk_buff *skb; 4583 unsigned long flags; 4584 4585 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); 4586 4587 /* If the completed command doesn't match the last one that was 4588 * sent we need to do special handling of it. 4589 */ 4590 if (!hci_sent_cmd_data(hdev, opcode)) { 4591 /* Some CSR based controllers generate a spontaneous 4592 * reset complete event during init and any pending 4593 * command will never be completed. In such a case we 4594 * need to resend whatever was the last sent 4595 * command. 4596 */ 4597 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) 4598 hci_resend_last(hdev); 4599 4600 return; 4601 } 4602 4603 /* If we reach this point this event matches the last command sent */ 4604 hci_dev_clear_flag(hdev, HCI_CMD_PENDING); 4605 4606 /* If the command succeeded and there's still more commands in 4607 * this request the request is not yet complete. 4608 */ 4609 if (!status && !hci_req_is_complete(hdev)) 4610 return; 4611 4612 /* If this was the last command in a request the complete 4613 * callback would be found in hdev->sent_cmd instead of the 4614 * command queue (hdev->cmd_q). 4615 */ 4616 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { 4617 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; 4618 return; 4619 } 4620 4621 if (bt_cb(hdev->sent_cmd)->hci.req_complete) { 4622 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; 4623 return; 4624 } 4625 4626 /* Remove all pending commands belonging to this request */ 4627 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 4628 while ((skb = __skb_dequeue(&hdev->cmd_q))) { 4629 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { 4630 __skb_queue_head(&hdev->cmd_q, skb); 4631 break; 4632 } 4633 4634 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) 4635 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; 4636 else 4637 *req_complete = bt_cb(skb)->hci.req_complete; 4638 kfree_skb(skb); 4639 } 4640 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 4641 } 4642 4643 static void hci_rx_work(struct work_struct *work) 4644 { 4645 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); 4646 struct sk_buff *skb; 4647 4648 BT_DBG("%s", hdev->name); 4649 4650 while ((skb = skb_dequeue(&hdev->rx_q))) { 4651 /* Send copy to monitor */ 4652 hci_send_to_monitor(hdev, skb); 4653 4654 if (atomic_read(&hdev->promisc)) { 4655 /* Send copy to the sockets */ 4656 hci_send_to_sock(hdev, skb); 4657 } 4658 4659 /* If the device has been opened in HCI_USER_CHANNEL, 4660 * the userspace has exclusive access to device. 4661 * When device is HCI_INIT, we still need to process 4662 * the data packets to the driver in order 4663 * to complete its setup(). 4664 */ 4665 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4666 !test_bit(HCI_INIT, &hdev->flags)) { 4667 kfree_skb(skb); 4668 continue; 4669 } 4670 4671 if (test_bit(HCI_INIT, &hdev->flags)) { 4672 /* Don't process data packets in this states. */ 4673 switch (hci_skb_pkt_type(skb)) { 4674 case HCI_ACLDATA_PKT: 4675 case HCI_SCODATA_PKT: 4676 case HCI_ISODATA_PKT: 4677 kfree_skb(skb); 4678 continue; 4679 } 4680 } 4681 4682 /* Process frame */ 4683 switch (hci_skb_pkt_type(skb)) { 4684 case HCI_EVENT_PKT: 4685 BT_DBG("%s Event packet", hdev->name); 4686 hci_event_packet(hdev, skb); 4687 break; 4688 4689 case HCI_ACLDATA_PKT: 4690 BT_DBG("%s ACL data packet", hdev->name); 4691 hci_acldata_packet(hdev, skb); 4692 break; 4693 4694 case HCI_SCODATA_PKT: 4695 BT_DBG("%s SCO data packet", hdev->name); 4696 hci_scodata_packet(hdev, skb); 4697 break; 4698 4699 default: 4700 kfree_skb(skb); 4701 break; 4702 } 4703 } 4704 } 4705 4706 static void hci_cmd_work(struct work_struct *work) 4707 { 4708 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); 4709 struct sk_buff *skb; 4710 4711 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, 4712 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); 4713 4714 /* Send queued commands */ 4715 if (atomic_read(&hdev->cmd_cnt)) { 4716 skb = skb_dequeue(&hdev->cmd_q); 4717 if (!skb) 4718 return; 4719 4720 kfree_skb(hdev->sent_cmd); 4721 4722 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); 4723 if (hdev->sent_cmd) { 4724 if (hci_req_status_pend(hdev)) 4725 hci_dev_set_flag(hdev, HCI_CMD_PENDING); 4726 atomic_dec(&hdev->cmd_cnt); 4727 hci_send_frame(hdev, skb); 4728 if (test_bit(HCI_RESET, &hdev->flags)) 4729 cancel_delayed_work(&hdev->cmd_timer); 4730 else 4731 schedule_delayed_work(&hdev->cmd_timer, 4732 HCI_CMD_TIMEOUT); 4733 } else { 4734 skb_queue_head(&hdev->cmd_q, skb); 4735 queue_work(hdev->workqueue, &hdev->cmd_work); 4736 } 4737 } 4738 } 4739