1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI core. */ 26 27 #include <linux/jiffies.h> 28 #include <linux/module.h> 29 #include <linux/kmod.h> 30 31 #include <linux/types.h> 32 #include <linux/errno.h> 33 #include <linux/kernel.h> 34 #include <linux/sched.h> 35 #include <linux/slab.h> 36 #include <linux/poll.h> 37 #include <linux/fcntl.h> 38 #include <linux/init.h> 39 #include <linux/skbuff.h> 40 #include <linux/workqueue.h> 41 #include <linux/interrupt.h> 42 #include <linux/notifier.h> 43 #include <linux/rfkill.h> 44 #include <net/sock.h> 45 46 #include <asm/system.h> 47 #include <linux/uaccess.h> 48 #include <asm/unaligned.h> 49 50 #include <net/bluetooth/bluetooth.h> 51 #include <net/bluetooth/hci_core.h> 52 53 static void hci_cmd_task(unsigned long arg); 54 static void hci_rx_task(unsigned long arg); 55 static void hci_tx_task(unsigned long arg); 56 static void hci_notify(struct hci_dev *hdev, int event); 57 58 static DEFINE_RWLOCK(hci_task_lock); 59 60 /* HCI device list */ 61 LIST_HEAD(hci_dev_list); 62 DEFINE_RWLOCK(hci_dev_list_lock); 63 64 /* HCI callback list */ 65 LIST_HEAD(hci_cb_list); 66 DEFINE_RWLOCK(hci_cb_list_lock); 67 68 /* HCI protocols */ 69 #define HCI_MAX_PROTO 2 70 struct hci_proto *hci_proto[HCI_MAX_PROTO]; 71 72 /* HCI notifiers list */ 73 static ATOMIC_NOTIFIER_HEAD(hci_notifier); 74 75 /* ---- HCI notifications ---- */ 76 77 int hci_register_notifier(struct notifier_block *nb) 78 { 79 return atomic_notifier_chain_register(&hci_notifier, nb); 80 } 81 82 int hci_unregister_notifier(struct notifier_block *nb) 83 { 84 return atomic_notifier_chain_unregister(&hci_notifier, nb); 85 } 86 87 static void hci_notify(struct hci_dev *hdev, int event) 88 { 89 atomic_notifier_call_chain(&hci_notifier, event, hdev); 90 } 91 92 /* ---- HCI requests ---- */ 93 94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result) 95 { 96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result); 97 98 /* If the request has set req_last_cmd (typical for multi-HCI 99 * command requests) check if the completed command matches 100 * this, and if not just return. Single HCI command requests 101 * typically leave req_last_cmd as 0 */ 102 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd) 103 return; 104 105 if (hdev->req_status == HCI_REQ_PEND) { 106 hdev->req_result = result; 107 hdev->req_status = HCI_REQ_DONE; 108 wake_up_interruptible(&hdev->req_wait_q); 109 } 110 } 111 112 static void hci_req_cancel(struct hci_dev *hdev, int err) 113 { 114 BT_DBG("%s err 0x%2.2x", hdev->name, err); 115 116 if (hdev->req_status == HCI_REQ_PEND) { 117 hdev->req_result = err; 118 hdev->req_status = HCI_REQ_CANCELED; 119 wake_up_interruptible(&hdev->req_wait_q); 120 } 121 } 122 123 /* Execute request and wait for completion. */ 124 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 125 unsigned long opt, __u32 timeout) 126 { 127 DECLARE_WAITQUEUE(wait, current); 128 int err = 0; 129 130 BT_DBG("%s start", hdev->name); 131 132 hdev->req_status = HCI_REQ_PEND; 133 134 add_wait_queue(&hdev->req_wait_q, &wait); 135 set_current_state(TASK_INTERRUPTIBLE); 136 137 req(hdev, opt); 138 schedule_timeout(timeout); 139 140 remove_wait_queue(&hdev->req_wait_q, &wait); 141 142 if (signal_pending(current)) 143 return -EINTR; 144 145 switch (hdev->req_status) { 146 case HCI_REQ_DONE: 147 err = -bt_err(hdev->req_result); 148 break; 149 150 case HCI_REQ_CANCELED: 151 err = -hdev->req_result; 152 break; 153 154 default: 155 err = -ETIMEDOUT; 156 break; 157 } 158 159 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0; 160 161 BT_DBG("%s end: err %d", hdev->name, err); 162 163 return err; 164 } 165 166 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), 167 unsigned long opt, __u32 timeout) 168 { 169 int ret; 170 171 if (!test_bit(HCI_UP, &hdev->flags)) 172 return -ENETDOWN; 173 174 /* Serialize all requests */ 175 hci_req_lock(hdev); 176 ret = __hci_request(hdev, req, opt, timeout); 177 hci_req_unlock(hdev); 178 179 return ret; 180 } 181 182 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt) 183 { 184 BT_DBG("%s %ld", hdev->name, opt); 185 186 /* Reset device */ 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 188 } 189 190 static void hci_init_req(struct hci_dev *hdev, unsigned long opt) 191 { 192 struct sk_buff *skb; 193 __le16 param; 194 __u8 flt_type; 195 196 BT_DBG("%s %ld", hdev->name, opt); 197 198 /* Driver initialization */ 199 200 /* Special commands */ 201 while ((skb = skb_dequeue(&hdev->driver_init))) { 202 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 203 skb->dev = (void *) hdev; 204 205 skb_queue_tail(&hdev->cmd_q, skb); 206 tasklet_schedule(&hdev->cmd_task); 207 } 208 skb_queue_purge(&hdev->driver_init); 209 210 /* Mandatory initialization */ 211 212 /* Reset */ 213 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) 214 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); 215 216 /* Read Local Supported Features */ 217 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); 218 219 /* Read Local Version */ 220 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL); 221 222 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 223 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL); 224 225 #if 0 226 /* Host buffer size */ 227 { 228 struct hci_cp_host_buffer_size cp; 229 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE); 230 cp.sco_mtu = HCI_MAX_SCO_SIZE; 231 cp.acl_max_pkt = cpu_to_le16(0xffff); 232 cp.sco_max_pkt = cpu_to_le16(0xffff); 233 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp); 234 } 235 #endif 236 237 /* Read BD Address */ 238 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL); 239 240 /* Read Class of Device */ 241 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); 242 243 /* Read Local Name */ 244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL); 245 246 /* Read Voice Setting */ 247 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL); 248 249 /* Optional initialization */ 250 251 /* Clear Event Filters */ 252 flt_type = HCI_FLT_CLEAR_ALL; 253 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); 254 255 /* Page timeout ~20 secs */ 256 param = cpu_to_le16(0x8000); 257 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m); 258 259 /* Connection accept timeout ~20 secs */ 260 param = cpu_to_le16(0x7d00); 261 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); 262 263 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT; 264 } 265 266 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt) 267 { 268 __u8 scan = opt; 269 270 BT_DBG("%s %x", hdev->name, scan); 271 272 /* Inquiry and Page scans */ 273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 274 } 275 276 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt) 277 { 278 __u8 auth = opt; 279 280 BT_DBG("%s %x", hdev->name, auth); 281 282 /* Authentication */ 283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); 284 } 285 286 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt) 287 { 288 __u8 encrypt = opt; 289 290 BT_DBG("%s %x", hdev->name, encrypt); 291 292 /* Encryption */ 293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); 294 } 295 296 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt) 297 { 298 __le16 policy = cpu_to_le16(opt); 299 300 BT_DBG("%s %x", hdev->name, policy); 301 302 /* Default link policy */ 303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); 304 } 305 306 /* Get HCI device by index. 307 * Device is held on return. */ 308 struct hci_dev *hci_dev_get(int index) 309 { 310 struct hci_dev *hdev = NULL; 311 struct list_head *p; 312 313 BT_DBG("%d", index); 314 315 if (index < 0) 316 return NULL; 317 318 read_lock(&hci_dev_list_lock); 319 list_for_each(p, &hci_dev_list) { 320 struct hci_dev *d = list_entry(p, struct hci_dev, list); 321 if (d->id == index) { 322 hdev = hci_dev_hold(d); 323 break; 324 } 325 } 326 read_unlock(&hci_dev_list_lock); 327 return hdev; 328 } 329 330 /* ---- Inquiry support ---- */ 331 static void inquiry_cache_flush(struct hci_dev *hdev) 332 { 333 struct inquiry_cache *cache = &hdev->inq_cache; 334 struct inquiry_entry *next = cache->list, *e; 335 336 BT_DBG("cache %p", cache); 337 338 cache->list = NULL; 339 while ((e = next)) { 340 next = e->next; 341 kfree(e); 342 } 343 } 344 345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 346 { 347 struct inquiry_cache *cache = &hdev->inq_cache; 348 struct inquiry_entry *e; 349 350 BT_DBG("cache %p, %s", cache, batostr(bdaddr)); 351 352 for (e = cache->list; e; e = e->next) 353 if (!bacmp(&e->data.bdaddr, bdaddr)) 354 break; 355 return e; 356 } 357 358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data) 359 { 360 struct inquiry_cache *cache = &hdev->inq_cache; 361 struct inquiry_entry *ie; 362 363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr)); 364 365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); 366 if (!ie) { 367 /* Entry not in the cache. Add new one. */ 368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); 369 if (!ie) 370 return; 371 372 ie->next = cache->list; 373 cache->list = ie; 374 } 375 376 memcpy(&ie->data, data, sizeof(*data)); 377 ie->timestamp = jiffies; 378 cache->timestamp = jiffies; 379 } 380 381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) 382 { 383 struct inquiry_cache *cache = &hdev->inq_cache; 384 struct inquiry_info *info = (struct inquiry_info *) buf; 385 struct inquiry_entry *e; 386 int copied = 0; 387 388 for (e = cache->list; e && copied < num; e = e->next, copied++) { 389 struct inquiry_data *data = &e->data; 390 bacpy(&info->bdaddr, &data->bdaddr); 391 info->pscan_rep_mode = data->pscan_rep_mode; 392 info->pscan_period_mode = data->pscan_period_mode; 393 info->pscan_mode = data->pscan_mode; 394 memcpy(info->dev_class, data->dev_class, 3); 395 info->clock_offset = data->clock_offset; 396 info++; 397 } 398 399 BT_DBG("cache %p, copied %d", cache, copied); 400 return copied; 401 } 402 403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt) 404 { 405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; 406 struct hci_cp_inquiry cp; 407 408 BT_DBG("%s", hdev->name); 409 410 if (test_bit(HCI_INQUIRY, &hdev->flags)) 411 return; 412 413 /* Start Inquiry */ 414 memcpy(&cp.lap, &ir->lap, 3); 415 cp.length = ir->length; 416 cp.num_rsp = ir->num_rsp; 417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp); 418 } 419 420 int hci_inquiry(void __user *arg) 421 { 422 __u8 __user *ptr = arg; 423 struct hci_inquiry_req ir; 424 struct hci_dev *hdev; 425 int err = 0, do_inquiry = 0, max_rsp; 426 long timeo; 427 __u8 *buf; 428 429 if (copy_from_user(&ir, ptr, sizeof(ir))) 430 return -EFAULT; 431 432 if (!(hdev = hci_dev_get(ir.dev_id))) 433 return -ENODEV; 434 435 hci_dev_lock_bh(hdev); 436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || 437 inquiry_cache_empty(hdev) || 438 ir.flags & IREQ_CACHE_FLUSH) { 439 inquiry_cache_flush(hdev); 440 do_inquiry = 1; 441 } 442 hci_dev_unlock_bh(hdev); 443 444 timeo = ir.length * msecs_to_jiffies(2000); 445 446 if (do_inquiry) { 447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo); 448 if (err < 0) 449 goto done; 450 } 451 452 /* for unlimited number of responses we will use buffer with 255 entries */ 453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; 454 455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then 456 * copy it to the user space. 457 */ 458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL); 459 if (!buf) { 460 err = -ENOMEM; 461 goto done; 462 } 463 464 hci_dev_lock_bh(hdev); 465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); 466 hci_dev_unlock_bh(hdev); 467 468 BT_DBG("num_rsp %d", ir.num_rsp); 469 470 if (!copy_to_user(ptr, &ir, sizeof(ir))) { 471 ptr += sizeof(ir); 472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * 473 ir.num_rsp)) 474 err = -EFAULT; 475 } else 476 err = -EFAULT; 477 478 kfree(buf); 479 480 done: 481 hci_dev_put(hdev); 482 return err; 483 } 484 485 /* ---- HCI ioctl helpers ---- */ 486 487 int hci_dev_open(__u16 dev) 488 { 489 struct hci_dev *hdev; 490 int ret = 0; 491 492 if (!(hdev = hci_dev_get(dev))) 493 return -ENODEV; 494 495 BT_DBG("%s %p", hdev->name, hdev); 496 497 hci_req_lock(hdev); 498 499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 500 ret = -ERFKILL; 501 goto done; 502 } 503 504 if (test_bit(HCI_UP, &hdev->flags)) { 505 ret = -EALREADY; 506 goto done; 507 } 508 509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 510 set_bit(HCI_RAW, &hdev->flags); 511 512 /* Treat all non BR/EDR controllers as raw devices for now */ 513 if (hdev->dev_type != HCI_BREDR) 514 set_bit(HCI_RAW, &hdev->flags); 515 516 if (hdev->open(hdev)) { 517 ret = -EIO; 518 goto done; 519 } 520 521 if (!test_bit(HCI_RAW, &hdev->flags)) { 522 atomic_set(&hdev->cmd_cnt, 1); 523 set_bit(HCI_INIT, &hdev->flags); 524 525 //__hci_request(hdev, hci_reset_req, 0, HZ); 526 ret = __hci_request(hdev, hci_init_req, 0, 527 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 528 529 clear_bit(HCI_INIT, &hdev->flags); 530 } 531 532 if (!ret) { 533 hci_dev_hold(hdev); 534 set_bit(HCI_UP, &hdev->flags); 535 hci_notify(hdev, HCI_DEV_UP); 536 } else { 537 /* Init failed, cleanup */ 538 tasklet_kill(&hdev->rx_task); 539 tasklet_kill(&hdev->tx_task); 540 tasklet_kill(&hdev->cmd_task); 541 542 skb_queue_purge(&hdev->cmd_q); 543 skb_queue_purge(&hdev->rx_q); 544 545 if (hdev->flush) 546 hdev->flush(hdev); 547 548 if (hdev->sent_cmd) { 549 kfree_skb(hdev->sent_cmd); 550 hdev->sent_cmd = NULL; 551 } 552 553 hdev->close(hdev); 554 hdev->flags = 0; 555 } 556 557 done: 558 hci_req_unlock(hdev); 559 hci_dev_put(hdev); 560 return ret; 561 } 562 563 static int hci_dev_do_close(struct hci_dev *hdev) 564 { 565 BT_DBG("%s %p", hdev->name, hdev); 566 567 hci_req_cancel(hdev, ENODEV); 568 hci_req_lock(hdev); 569 570 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 571 hci_req_unlock(hdev); 572 return 0; 573 } 574 575 /* Kill RX and TX tasks */ 576 tasklet_kill(&hdev->rx_task); 577 tasklet_kill(&hdev->tx_task); 578 579 hci_dev_lock_bh(hdev); 580 inquiry_cache_flush(hdev); 581 hci_conn_hash_flush(hdev); 582 hci_dev_unlock_bh(hdev); 583 584 hci_notify(hdev, HCI_DEV_DOWN); 585 586 if (hdev->flush) 587 hdev->flush(hdev); 588 589 /* Reset device */ 590 skb_queue_purge(&hdev->cmd_q); 591 atomic_set(&hdev->cmd_cnt, 1); 592 if (!test_bit(HCI_RAW, &hdev->flags)) { 593 set_bit(HCI_INIT, &hdev->flags); 594 __hci_request(hdev, hci_reset_req, 0, 595 msecs_to_jiffies(250)); 596 clear_bit(HCI_INIT, &hdev->flags); 597 } 598 599 /* Kill cmd task */ 600 tasklet_kill(&hdev->cmd_task); 601 602 /* Drop queues */ 603 skb_queue_purge(&hdev->rx_q); 604 skb_queue_purge(&hdev->cmd_q); 605 skb_queue_purge(&hdev->raw_q); 606 607 /* Drop last sent command */ 608 if (hdev->sent_cmd) { 609 kfree_skb(hdev->sent_cmd); 610 hdev->sent_cmd = NULL; 611 } 612 613 /* After this point our queues are empty 614 * and no tasks are scheduled. */ 615 hdev->close(hdev); 616 617 /* Clear flags */ 618 hdev->flags = 0; 619 620 hci_req_unlock(hdev); 621 622 hci_dev_put(hdev); 623 return 0; 624 } 625 626 int hci_dev_close(__u16 dev) 627 { 628 struct hci_dev *hdev; 629 int err; 630 631 hdev = hci_dev_get(dev); 632 if (!hdev) 633 return -ENODEV; 634 err = hci_dev_do_close(hdev); 635 hci_dev_put(hdev); 636 return err; 637 } 638 639 int hci_dev_reset(__u16 dev) 640 { 641 struct hci_dev *hdev; 642 int ret = 0; 643 644 hdev = hci_dev_get(dev); 645 if (!hdev) 646 return -ENODEV; 647 648 hci_req_lock(hdev); 649 tasklet_disable(&hdev->tx_task); 650 651 if (!test_bit(HCI_UP, &hdev->flags)) 652 goto done; 653 654 /* Drop queues */ 655 skb_queue_purge(&hdev->rx_q); 656 skb_queue_purge(&hdev->cmd_q); 657 658 hci_dev_lock_bh(hdev); 659 inquiry_cache_flush(hdev); 660 hci_conn_hash_flush(hdev); 661 hci_dev_unlock_bh(hdev); 662 663 if (hdev->flush) 664 hdev->flush(hdev); 665 666 atomic_set(&hdev->cmd_cnt, 1); 667 hdev->acl_cnt = 0; hdev->sco_cnt = 0; 668 669 if (!test_bit(HCI_RAW, &hdev->flags)) 670 ret = __hci_request(hdev, hci_reset_req, 0, 671 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 672 673 done: 674 tasklet_enable(&hdev->tx_task); 675 hci_req_unlock(hdev); 676 hci_dev_put(hdev); 677 return ret; 678 } 679 680 int hci_dev_reset_stat(__u16 dev) 681 { 682 struct hci_dev *hdev; 683 int ret = 0; 684 685 hdev = hci_dev_get(dev); 686 if (!hdev) 687 return -ENODEV; 688 689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 690 691 hci_dev_put(hdev); 692 693 return ret; 694 } 695 696 int hci_dev_cmd(unsigned int cmd, void __user *arg) 697 { 698 struct hci_dev *hdev; 699 struct hci_dev_req dr; 700 int err = 0; 701 702 if (copy_from_user(&dr, arg, sizeof(dr))) 703 return -EFAULT; 704 705 hdev = hci_dev_get(dr.dev_id); 706 if (!hdev) 707 return -ENODEV; 708 709 switch (cmd) { 710 case HCISETAUTH: 711 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 712 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 713 break; 714 715 case HCISETENCRYPT: 716 if (!lmp_encrypt_capable(hdev)) { 717 err = -EOPNOTSUPP; 718 break; 719 } 720 721 if (!test_bit(HCI_AUTH, &hdev->flags)) { 722 /* Auth must be enabled first */ 723 err = hci_request(hdev, hci_auth_req, dr.dev_opt, 724 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 725 if (err) 726 break; 727 } 728 729 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, 730 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 731 break; 732 733 case HCISETSCAN: 734 err = hci_request(hdev, hci_scan_req, dr.dev_opt, 735 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 736 break; 737 738 case HCISETLINKPOL: 739 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, 740 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 741 break; 742 743 case HCISETLINKMODE: 744 hdev->link_mode = ((__u16) dr.dev_opt) & 745 (HCI_LM_MASTER | HCI_LM_ACCEPT); 746 break; 747 748 case HCISETPTYPE: 749 hdev->pkt_type = (__u16) dr.dev_opt; 750 break; 751 752 case HCISETACLMTU: 753 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); 754 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); 755 break; 756 757 case HCISETSCOMTU: 758 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); 759 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); 760 break; 761 762 default: 763 err = -EINVAL; 764 break; 765 } 766 767 hci_dev_put(hdev); 768 return err; 769 } 770 771 int hci_get_dev_list(void __user *arg) 772 { 773 struct hci_dev_list_req *dl; 774 struct hci_dev_req *dr; 775 struct list_head *p; 776 int n = 0, size, err; 777 __u16 dev_num; 778 779 if (get_user(dev_num, (__u16 __user *) arg)) 780 return -EFAULT; 781 782 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) 783 return -EINVAL; 784 785 size = sizeof(*dl) + dev_num * sizeof(*dr); 786 787 dl = kzalloc(size, GFP_KERNEL); 788 if (!dl) 789 return -ENOMEM; 790 791 dr = dl->dev_req; 792 793 read_lock_bh(&hci_dev_list_lock); 794 list_for_each(p, &hci_dev_list) { 795 struct hci_dev *hdev; 796 hdev = list_entry(p, struct hci_dev, list); 797 (dr + n)->dev_id = hdev->id; 798 (dr + n)->dev_opt = hdev->flags; 799 if (++n >= dev_num) 800 break; 801 } 802 read_unlock_bh(&hci_dev_list_lock); 803 804 dl->dev_num = n; 805 size = sizeof(*dl) + n * sizeof(*dr); 806 807 err = copy_to_user(arg, dl, size); 808 kfree(dl); 809 810 return err ? -EFAULT : 0; 811 } 812 813 int hci_get_dev_info(void __user *arg) 814 { 815 struct hci_dev *hdev; 816 struct hci_dev_info di; 817 int err = 0; 818 819 if (copy_from_user(&di, arg, sizeof(di))) 820 return -EFAULT; 821 822 hdev = hci_dev_get(di.dev_id); 823 if (!hdev) 824 return -ENODEV; 825 826 strcpy(di.name, hdev->name); 827 di.bdaddr = hdev->bdaddr; 828 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4); 829 di.flags = hdev->flags; 830 di.pkt_type = hdev->pkt_type; 831 di.acl_mtu = hdev->acl_mtu; 832 di.acl_pkts = hdev->acl_pkts; 833 di.sco_mtu = hdev->sco_mtu; 834 di.sco_pkts = hdev->sco_pkts; 835 di.link_policy = hdev->link_policy; 836 di.link_mode = hdev->link_mode; 837 838 memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); 839 memcpy(&di.features, &hdev->features, sizeof(di.features)); 840 841 if (copy_to_user(arg, &di, sizeof(di))) 842 err = -EFAULT; 843 844 hci_dev_put(hdev); 845 846 return err; 847 } 848 849 /* ---- Interface to HCI drivers ---- */ 850 851 static int hci_rfkill_set_block(void *data, bool blocked) 852 { 853 struct hci_dev *hdev = data; 854 855 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); 856 857 if (!blocked) 858 return 0; 859 860 hci_dev_do_close(hdev); 861 862 return 0; 863 } 864 865 static const struct rfkill_ops hci_rfkill_ops = { 866 .set_block = hci_rfkill_set_block, 867 }; 868 869 /* Alloc HCI device */ 870 struct hci_dev *hci_alloc_dev(void) 871 { 872 struct hci_dev *hdev; 873 874 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); 875 if (!hdev) 876 return NULL; 877 878 skb_queue_head_init(&hdev->driver_init); 879 880 return hdev; 881 } 882 EXPORT_SYMBOL(hci_alloc_dev); 883 884 /* Free HCI device */ 885 void hci_free_dev(struct hci_dev *hdev) 886 { 887 skb_queue_purge(&hdev->driver_init); 888 889 /* will free via device release */ 890 put_device(&hdev->dev); 891 } 892 EXPORT_SYMBOL(hci_free_dev); 893 894 /* Register HCI device */ 895 int hci_register_dev(struct hci_dev *hdev) 896 { 897 struct list_head *head = &hci_dev_list, *p; 898 int i, id = 0; 899 900 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name, 901 hdev->bus, hdev->owner); 902 903 if (!hdev->open || !hdev->close || !hdev->destruct) 904 return -EINVAL; 905 906 write_lock_bh(&hci_dev_list_lock); 907 908 /* Find first available device id */ 909 list_for_each(p, &hci_dev_list) { 910 if (list_entry(p, struct hci_dev, list)->id != id) 911 break; 912 head = p; id++; 913 } 914 915 sprintf(hdev->name, "hci%d", id); 916 hdev->id = id; 917 list_add(&hdev->list, head); 918 919 atomic_set(&hdev->refcnt, 1); 920 spin_lock_init(&hdev->lock); 921 922 hdev->flags = 0; 923 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); 924 hdev->esco_type = (ESCO_HV1); 925 hdev->link_mode = (HCI_LM_ACCEPT); 926 927 hdev->idle_timeout = 0; 928 hdev->sniff_max_interval = 800; 929 hdev->sniff_min_interval = 80; 930 931 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev); 932 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev); 933 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev); 934 935 skb_queue_head_init(&hdev->rx_q); 936 skb_queue_head_init(&hdev->cmd_q); 937 skb_queue_head_init(&hdev->raw_q); 938 939 for (i = 0; i < NUM_REASSEMBLY; i++) 940 hdev->reassembly[i] = NULL; 941 942 init_waitqueue_head(&hdev->req_wait_q); 943 mutex_init(&hdev->req_lock); 944 945 inquiry_cache_init(hdev); 946 947 hci_conn_hash_init(hdev); 948 949 INIT_LIST_HEAD(&hdev->blacklist); 950 951 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 952 953 atomic_set(&hdev->promisc, 0); 954 955 write_unlock_bh(&hci_dev_list_lock); 956 957 hdev->workqueue = create_singlethread_workqueue(hdev->name); 958 if (!hdev->workqueue) 959 goto nomem; 960 961 hci_register_sysfs(hdev); 962 963 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 964 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); 965 if (hdev->rfkill) { 966 if (rfkill_register(hdev->rfkill) < 0) { 967 rfkill_destroy(hdev->rfkill); 968 hdev->rfkill = NULL; 969 } 970 } 971 972 mgmt_index_added(hdev->id); 973 hci_notify(hdev, HCI_DEV_REG); 974 975 return id; 976 977 nomem: 978 write_lock_bh(&hci_dev_list_lock); 979 list_del(&hdev->list); 980 write_unlock_bh(&hci_dev_list_lock); 981 982 return -ENOMEM; 983 } 984 EXPORT_SYMBOL(hci_register_dev); 985 986 /* Unregister HCI device */ 987 int hci_unregister_dev(struct hci_dev *hdev) 988 { 989 int i; 990 991 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 992 993 write_lock_bh(&hci_dev_list_lock); 994 list_del(&hdev->list); 995 write_unlock_bh(&hci_dev_list_lock); 996 997 hci_dev_do_close(hdev); 998 999 for (i = 0; i < NUM_REASSEMBLY; i++) 1000 kfree_skb(hdev->reassembly[i]); 1001 1002 mgmt_index_removed(hdev->id); 1003 hci_notify(hdev, HCI_DEV_UNREG); 1004 1005 if (hdev->rfkill) { 1006 rfkill_unregister(hdev->rfkill); 1007 rfkill_destroy(hdev->rfkill); 1008 } 1009 1010 hci_unregister_sysfs(hdev); 1011 1012 destroy_workqueue(hdev->workqueue); 1013 1014 __hci_dev_put(hdev); 1015 1016 return 0; 1017 } 1018 EXPORT_SYMBOL(hci_unregister_dev); 1019 1020 /* Suspend HCI device */ 1021 int hci_suspend_dev(struct hci_dev *hdev) 1022 { 1023 hci_notify(hdev, HCI_DEV_SUSPEND); 1024 return 0; 1025 } 1026 EXPORT_SYMBOL(hci_suspend_dev); 1027 1028 /* Resume HCI device */ 1029 int hci_resume_dev(struct hci_dev *hdev) 1030 { 1031 hci_notify(hdev, HCI_DEV_RESUME); 1032 return 0; 1033 } 1034 EXPORT_SYMBOL(hci_resume_dev); 1035 1036 /* Receive frame from HCI drivers */ 1037 int hci_recv_frame(struct sk_buff *skb) 1038 { 1039 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1040 if (!hdev || (!test_bit(HCI_UP, &hdev->flags) 1041 && !test_bit(HCI_INIT, &hdev->flags))) { 1042 kfree_skb(skb); 1043 return -ENXIO; 1044 } 1045 1046 /* Incomming skb */ 1047 bt_cb(skb)->incoming = 1; 1048 1049 /* Time stamp */ 1050 __net_timestamp(skb); 1051 1052 /* Queue frame for rx task */ 1053 skb_queue_tail(&hdev->rx_q, skb); 1054 tasklet_schedule(&hdev->rx_task); 1055 1056 return 0; 1057 } 1058 EXPORT_SYMBOL(hci_recv_frame); 1059 1060 static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1061 int count, __u8 index, gfp_t gfp_mask) 1062 { 1063 int len = 0; 1064 int hlen = 0; 1065 int remain = count; 1066 struct sk_buff *skb; 1067 struct bt_skb_cb *scb; 1068 1069 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || 1070 index >= NUM_REASSEMBLY) 1071 return -EILSEQ; 1072 1073 skb = hdev->reassembly[index]; 1074 1075 if (!skb) { 1076 switch (type) { 1077 case HCI_ACLDATA_PKT: 1078 len = HCI_MAX_FRAME_SIZE; 1079 hlen = HCI_ACL_HDR_SIZE; 1080 break; 1081 case HCI_EVENT_PKT: 1082 len = HCI_MAX_EVENT_SIZE; 1083 hlen = HCI_EVENT_HDR_SIZE; 1084 break; 1085 case HCI_SCODATA_PKT: 1086 len = HCI_MAX_SCO_SIZE; 1087 hlen = HCI_SCO_HDR_SIZE; 1088 break; 1089 } 1090 1091 skb = bt_skb_alloc(len, gfp_mask); 1092 if (!skb) 1093 return -ENOMEM; 1094 1095 scb = (void *) skb->cb; 1096 scb->expect = hlen; 1097 scb->pkt_type = type; 1098 1099 skb->dev = (void *) hdev; 1100 hdev->reassembly[index] = skb; 1101 } 1102 1103 while (count) { 1104 scb = (void *) skb->cb; 1105 len = min(scb->expect, (__u16)count); 1106 1107 memcpy(skb_put(skb, len), data, len); 1108 1109 count -= len; 1110 data += len; 1111 scb->expect -= len; 1112 remain = count; 1113 1114 switch (type) { 1115 case HCI_EVENT_PKT: 1116 if (skb->len == HCI_EVENT_HDR_SIZE) { 1117 struct hci_event_hdr *h = hci_event_hdr(skb); 1118 scb->expect = h->plen; 1119 1120 if (skb_tailroom(skb) < scb->expect) { 1121 kfree_skb(skb); 1122 hdev->reassembly[index] = NULL; 1123 return -ENOMEM; 1124 } 1125 } 1126 break; 1127 1128 case HCI_ACLDATA_PKT: 1129 if (skb->len == HCI_ACL_HDR_SIZE) { 1130 struct hci_acl_hdr *h = hci_acl_hdr(skb); 1131 scb->expect = __le16_to_cpu(h->dlen); 1132 1133 if (skb_tailroom(skb) < scb->expect) { 1134 kfree_skb(skb); 1135 hdev->reassembly[index] = NULL; 1136 return -ENOMEM; 1137 } 1138 } 1139 break; 1140 1141 case HCI_SCODATA_PKT: 1142 if (skb->len == HCI_SCO_HDR_SIZE) { 1143 struct hci_sco_hdr *h = hci_sco_hdr(skb); 1144 scb->expect = h->dlen; 1145 1146 if (skb_tailroom(skb) < scb->expect) { 1147 kfree_skb(skb); 1148 hdev->reassembly[index] = NULL; 1149 return -ENOMEM; 1150 } 1151 } 1152 break; 1153 } 1154 1155 if (scb->expect == 0) { 1156 /* Complete frame */ 1157 1158 bt_cb(skb)->pkt_type = type; 1159 hci_recv_frame(skb); 1160 1161 hdev->reassembly[index] = NULL; 1162 return remain; 1163 } 1164 } 1165 1166 return remain; 1167 } 1168 1169 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) 1170 { 1171 int rem = 0; 1172 1173 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) 1174 return -EILSEQ; 1175 1176 while (count) { 1177 rem = hci_reassembly(hdev, type, data, count, 1178 type - 1, GFP_ATOMIC); 1179 if (rem < 0) 1180 return rem; 1181 1182 data += (count - rem); 1183 count = rem; 1184 }; 1185 1186 return rem; 1187 } 1188 EXPORT_SYMBOL(hci_recv_fragment); 1189 1190 #define STREAM_REASSEMBLY 0 1191 1192 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) 1193 { 1194 int type; 1195 int rem = 0; 1196 1197 while (count) { 1198 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY]; 1199 1200 if (!skb) { 1201 struct { char type; } *pkt; 1202 1203 /* Start of the frame */ 1204 pkt = data; 1205 type = pkt->type; 1206 1207 data++; 1208 count--; 1209 } else 1210 type = bt_cb(skb)->pkt_type; 1211 1212 rem = hci_reassembly(hdev, type, data, 1213 count, STREAM_REASSEMBLY, GFP_ATOMIC); 1214 if (rem < 0) 1215 return rem; 1216 1217 data += (count - rem); 1218 count = rem; 1219 }; 1220 1221 return rem; 1222 } 1223 EXPORT_SYMBOL(hci_recv_stream_fragment); 1224 1225 /* ---- Interface to upper protocols ---- */ 1226 1227 /* Register/Unregister protocols. 1228 * hci_task_lock is used to ensure that no tasks are running. */ 1229 int hci_register_proto(struct hci_proto *hp) 1230 { 1231 int err = 0; 1232 1233 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 1234 1235 if (hp->id >= HCI_MAX_PROTO) 1236 return -EINVAL; 1237 1238 write_lock_bh(&hci_task_lock); 1239 1240 if (!hci_proto[hp->id]) 1241 hci_proto[hp->id] = hp; 1242 else 1243 err = -EEXIST; 1244 1245 write_unlock_bh(&hci_task_lock); 1246 1247 return err; 1248 } 1249 EXPORT_SYMBOL(hci_register_proto); 1250 1251 int hci_unregister_proto(struct hci_proto *hp) 1252 { 1253 int err = 0; 1254 1255 BT_DBG("%p name %s id %d", hp, hp->name, hp->id); 1256 1257 if (hp->id >= HCI_MAX_PROTO) 1258 return -EINVAL; 1259 1260 write_lock_bh(&hci_task_lock); 1261 1262 if (hci_proto[hp->id]) 1263 hci_proto[hp->id] = NULL; 1264 else 1265 err = -ENOENT; 1266 1267 write_unlock_bh(&hci_task_lock); 1268 1269 return err; 1270 } 1271 EXPORT_SYMBOL(hci_unregister_proto); 1272 1273 int hci_register_cb(struct hci_cb *cb) 1274 { 1275 BT_DBG("%p name %s", cb, cb->name); 1276 1277 write_lock_bh(&hci_cb_list_lock); 1278 list_add(&cb->list, &hci_cb_list); 1279 write_unlock_bh(&hci_cb_list_lock); 1280 1281 return 0; 1282 } 1283 EXPORT_SYMBOL(hci_register_cb); 1284 1285 int hci_unregister_cb(struct hci_cb *cb) 1286 { 1287 BT_DBG("%p name %s", cb, cb->name); 1288 1289 write_lock_bh(&hci_cb_list_lock); 1290 list_del(&cb->list); 1291 write_unlock_bh(&hci_cb_list_lock); 1292 1293 return 0; 1294 } 1295 EXPORT_SYMBOL(hci_unregister_cb); 1296 1297 static int hci_send_frame(struct sk_buff *skb) 1298 { 1299 struct hci_dev *hdev = (struct hci_dev *) skb->dev; 1300 1301 if (!hdev) { 1302 kfree_skb(skb); 1303 return -ENODEV; 1304 } 1305 1306 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 1307 1308 if (atomic_read(&hdev->promisc)) { 1309 /* Time stamp */ 1310 __net_timestamp(skb); 1311 1312 hci_send_to_sock(hdev, skb); 1313 } 1314 1315 /* Get rid of skb owner, prior to sending to the driver. */ 1316 skb_orphan(skb); 1317 1318 return hdev->send(skb); 1319 } 1320 1321 /* Send HCI command */ 1322 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param) 1323 { 1324 int len = HCI_COMMAND_HDR_SIZE + plen; 1325 struct hci_command_hdr *hdr; 1326 struct sk_buff *skb; 1327 1328 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen); 1329 1330 skb = bt_skb_alloc(len, GFP_ATOMIC); 1331 if (!skb) { 1332 BT_ERR("%s no memory for command", hdev->name); 1333 return -ENOMEM; 1334 } 1335 1336 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); 1337 hdr->opcode = cpu_to_le16(opcode); 1338 hdr->plen = plen; 1339 1340 if (plen) 1341 memcpy(skb_put(skb, plen), param, plen); 1342 1343 BT_DBG("skb len %d", skb->len); 1344 1345 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; 1346 skb->dev = (void *) hdev; 1347 1348 skb_queue_tail(&hdev->cmd_q, skb); 1349 tasklet_schedule(&hdev->cmd_task); 1350 1351 return 0; 1352 } 1353 1354 /* Get data from the previously sent command */ 1355 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) 1356 { 1357 struct hci_command_hdr *hdr; 1358 1359 if (!hdev->sent_cmd) 1360 return NULL; 1361 1362 hdr = (void *) hdev->sent_cmd->data; 1363 1364 if (hdr->opcode != cpu_to_le16(opcode)) 1365 return NULL; 1366 1367 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1368 1369 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 1370 } 1371 1372 /* Send ACL data */ 1373 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) 1374 { 1375 struct hci_acl_hdr *hdr; 1376 int len = skb->len; 1377 1378 skb_push(skb, HCI_ACL_HDR_SIZE); 1379 skb_reset_transport_header(skb); 1380 hdr = (struct hci_acl_hdr *)skb_transport_header(skb); 1381 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 1382 hdr->dlen = cpu_to_le16(len); 1383 } 1384 1385 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) 1386 { 1387 struct hci_dev *hdev = conn->hdev; 1388 struct sk_buff *list; 1389 1390 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags); 1391 1392 skb->dev = (void *) hdev; 1393 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1394 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START); 1395 1396 list = skb_shinfo(skb)->frag_list; 1397 if (!list) { 1398 /* Non fragmented */ 1399 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); 1400 1401 skb_queue_tail(&conn->data_q, skb); 1402 } else { 1403 /* Fragmented */ 1404 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1405 1406 skb_shinfo(skb)->frag_list = NULL; 1407 1408 /* Queue all fragments atomically */ 1409 spin_lock_bh(&conn->data_q.lock); 1410 1411 __skb_queue_tail(&conn->data_q, skb); 1412 do { 1413 skb = list; list = list->next; 1414 1415 skb->dev = (void *) hdev; 1416 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; 1417 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT); 1418 1419 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); 1420 1421 __skb_queue_tail(&conn->data_q, skb); 1422 } while (list); 1423 1424 spin_unlock_bh(&conn->data_q.lock); 1425 } 1426 1427 tasklet_schedule(&hdev->tx_task); 1428 } 1429 EXPORT_SYMBOL(hci_send_acl); 1430 1431 /* Send SCO data */ 1432 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) 1433 { 1434 struct hci_dev *hdev = conn->hdev; 1435 struct hci_sco_hdr hdr; 1436 1437 BT_DBG("%s len %d", hdev->name, skb->len); 1438 1439 hdr.handle = cpu_to_le16(conn->handle); 1440 hdr.dlen = skb->len; 1441 1442 skb_push(skb, HCI_SCO_HDR_SIZE); 1443 skb_reset_transport_header(skb); 1444 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); 1445 1446 skb->dev = (void *) hdev; 1447 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; 1448 1449 skb_queue_tail(&conn->data_q, skb); 1450 tasklet_schedule(&hdev->tx_task); 1451 } 1452 EXPORT_SYMBOL(hci_send_sco); 1453 1454 /* ---- HCI TX task (outgoing data) ---- */ 1455 1456 /* HCI Connection scheduler */ 1457 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) 1458 { 1459 struct hci_conn_hash *h = &hdev->conn_hash; 1460 struct hci_conn *conn = NULL; 1461 int num = 0, min = ~0; 1462 struct list_head *p; 1463 1464 /* We don't have to lock device here. Connections are always 1465 * added and removed with TX task disabled. */ 1466 list_for_each(p, &h->list) { 1467 struct hci_conn *c; 1468 c = list_entry(p, struct hci_conn, list); 1469 1470 if (c->type != type || skb_queue_empty(&c->data_q)) 1471 continue; 1472 1473 if (c->state != BT_CONNECTED && c->state != BT_CONFIG) 1474 continue; 1475 1476 num++; 1477 1478 if (c->sent < min) { 1479 min = c->sent; 1480 conn = c; 1481 } 1482 } 1483 1484 if (conn) { 1485 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt); 1486 int q = cnt / num; 1487 *quote = q ? q : 1; 1488 } else 1489 *quote = 0; 1490 1491 BT_DBG("conn %p quote %d", conn, *quote); 1492 return conn; 1493 } 1494 1495 static inline void hci_acl_tx_to(struct hci_dev *hdev) 1496 { 1497 struct hci_conn_hash *h = &hdev->conn_hash; 1498 struct list_head *p; 1499 struct hci_conn *c; 1500 1501 BT_ERR("%s ACL tx timeout", hdev->name); 1502 1503 /* Kill stalled connections */ 1504 list_for_each(p, &h->list) { 1505 c = list_entry(p, struct hci_conn, list); 1506 if (c->type == ACL_LINK && c->sent) { 1507 BT_ERR("%s killing stalled ACL connection %s", 1508 hdev->name, batostr(&c->dst)); 1509 hci_acl_disconn(c, 0x13); 1510 } 1511 } 1512 } 1513 1514 static inline void hci_sched_acl(struct hci_dev *hdev) 1515 { 1516 struct hci_conn *conn; 1517 struct sk_buff *skb; 1518 int quote; 1519 1520 BT_DBG("%s", hdev->name); 1521 1522 if (!test_bit(HCI_RAW, &hdev->flags)) { 1523 /* ACL tx timeout must be longer than maximum 1524 * link supervision timeout (40.9 seconds) */ 1525 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45)) 1526 hci_acl_tx_to(hdev); 1527 } 1528 1529 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) { 1530 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1531 BT_DBG("skb %p len %d", skb, skb->len); 1532 1533 hci_conn_enter_active_mode(conn); 1534 1535 hci_send_frame(skb); 1536 hdev->acl_last_tx = jiffies; 1537 1538 hdev->acl_cnt--; 1539 conn->sent++; 1540 } 1541 } 1542 } 1543 1544 /* Schedule SCO */ 1545 static inline void hci_sched_sco(struct hci_dev *hdev) 1546 { 1547 struct hci_conn *conn; 1548 struct sk_buff *skb; 1549 int quote; 1550 1551 BT_DBG("%s", hdev->name); 1552 1553 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { 1554 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1555 BT_DBG("skb %p len %d", skb, skb->len); 1556 hci_send_frame(skb); 1557 1558 conn->sent++; 1559 if (conn->sent == ~0) 1560 conn->sent = 0; 1561 } 1562 } 1563 } 1564 1565 static inline void hci_sched_esco(struct hci_dev *hdev) 1566 { 1567 struct hci_conn *conn; 1568 struct sk_buff *skb; 1569 int quote; 1570 1571 BT_DBG("%s", hdev->name); 1572 1573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { 1574 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 1575 BT_DBG("skb %p len %d", skb, skb->len); 1576 hci_send_frame(skb); 1577 1578 conn->sent++; 1579 if (conn->sent == ~0) 1580 conn->sent = 0; 1581 } 1582 } 1583 } 1584 1585 static void hci_tx_task(unsigned long arg) 1586 { 1587 struct hci_dev *hdev = (struct hci_dev *) arg; 1588 struct sk_buff *skb; 1589 1590 read_lock(&hci_task_lock); 1591 1592 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt); 1593 1594 /* Schedule queues and send stuff to HCI driver */ 1595 1596 hci_sched_acl(hdev); 1597 1598 hci_sched_sco(hdev); 1599 1600 hci_sched_esco(hdev); 1601 1602 /* Send next queued raw (unknown type) packet */ 1603 while ((skb = skb_dequeue(&hdev->raw_q))) 1604 hci_send_frame(skb); 1605 1606 read_unlock(&hci_task_lock); 1607 } 1608 1609 /* ----- HCI RX task (incoming data proccessing) ----- */ 1610 1611 /* ACL data packet */ 1612 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1613 { 1614 struct hci_acl_hdr *hdr = (void *) skb->data; 1615 struct hci_conn *conn; 1616 __u16 handle, flags; 1617 1618 skb_pull(skb, HCI_ACL_HDR_SIZE); 1619 1620 handle = __le16_to_cpu(hdr->handle); 1621 flags = hci_flags(handle); 1622 handle = hci_handle(handle); 1623 1624 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); 1625 1626 hdev->stat.acl_rx++; 1627 1628 hci_dev_lock(hdev); 1629 conn = hci_conn_hash_lookup_handle(hdev, handle); 1630 hci_dev_unlock(hdev); 1631 1632 if (conn) { 1633 register struct hci_proto *hp; 1634 1635 hci_conn_enter_active_mode(conn); 1636 1637 /* Send to upper protocol */ 1638 hp = hci_proto[HCI_PROTO_L2CAP]; 1639 if (hp && hp->recv_acldata) { 1640 hp->recv_acldata(conn, skb, flags); 1641 return; 1642 } 1643 } else { 1644 BT_ERR("%s ACL packet for unknown connection handle %d", 1645 hdev->name, handle); 1646 } 1647 1648 kfree_skb(skb); 1649 } 1650 1651 /* SCO data packet */ 1652 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1653 { 1654 struct hci_sco_hdr *hdr = (void *) skb->data; 1655 struct hci_conn *conn; 1656 __u16 handle; 1657 1658 skb_pull(skb, HCI_SCO_HDR_SIZE); 1659 1660 handle = __le16_to_cpu(hdr->handle); 1661 1662 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle); 1663 1664 hdev->stat.sco_rx++; 1665 1666 hci_dev_lock(hdev); 1667 conn = hci_conn_hash_lookup_handle(hdev, handle); 1668 hci_dev_unlock(hdev); 1669 1670 if (conn) { 1671 register struct hci_proto *hp; 1672 1673 /* Send to upper protocol */ 1674 hp = hci_proto[HCI_PROTO_SCO]; 1675 if (hp && hp->recv_scodata) { 1676 hp->recv_scodata(conn, skb); 1677 return; 1678 } 1679 } else { 1680 BT_ERR("%s SCO packet for unknown connection handle %d", 1681 hdev->name, handle); 1682 } 1683 1684 kfree_skb(skb); 1685 } 1686 1687 static void hci_rx_task(unsigned long arg) 1688 { 1689 struct hci_dev *hdev = (struct hci_dev *) arg; 1690 struct sk_buff *skb; 1691 1692 BT_DBG("%s", hdev->name); 1693 1694 read_lock(&hci_task_lock); 1695 1696 while ((skb = skb_dequeue(&hdev->rx_q))) { 1697 if (atomic_read(&hdev->promisc)) { 1698 /* Send copy to the sockets */ 1699 hci_send_to_sock(hdev, skb); 1700 } 1701 1702 if (test_bit(HCI_RAW, &hdev->flags)) { 1703 kfree_skb(skb); 1704 continue; 1705 } 1706 1707 if (test_bit(HCI_INIT, &hdev->flags)) { 1708 /* Don't process data packets in this states. */ 1709 switch (bt_cb(skb)->pkt_type) { 1710 case HCI_ACLDATA_PKT: 1711 case HCI_SCODATA_PKT: 1712 kfree_skb(skb); 1713 continue; 1714 } 1715 } 1716 1717 /* Process frame */ 1718 switch (bt_cb(skb)->pkt_type) { 1719 case HCI_EVENT_PKT: 1720 hci_event_packet(hdev, skb); 1721 break; 1722 1723 case HCI_ACLDATA_PKT: 1724 BT_DBG("%s ACL data packet", hdev->name); 1725 hci_acldata_packet(hdev, skb); 1726 break; 1727 1728 case HCI_SCODATA_PKT: 1729 BT_DBG("%s SCO data packet", hdev->name); 1730 hci_scodata_packet(hdev, skb); 1731 break; 1732 1733 default: 1734 kfree_skb(skb); 1735 break; 1736 } 1737 } 1738 1739 read_unlock(&hci_task_lock); 1740 } 1741 1742 static void hci_cmd_task(unsigned long arg) 1743 { 1744 struct hci_dev *hdev = (struct hci_dev *) arg; 1745 struct sk_buff *skb; 1746 1747 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt)); 1748 1749 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) { 1750 BT_ERR("%s command tx timeout", hdev->name); 1751 atomic_set(&hdev->cmd_cnt, 1); 1752 } 1753 1754 /* Send queued commands */ 1755 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) { 1756 kfree_skb(hdev->sent_cmd); 1757 1758 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC); 1759 if (hdev->sent_cmd) { 1760 atomic_dec(&hdev->cmd_cnt); 1761 hci_send_frame(skb); 1762 hdev->cmd_last_tx = jiffies; 1763 } else { 1764 skb_queue_head(&hdev->cmd_q, skb); 1765 tasklet_schedule(&hdev->cmd_task); 1766 } 1767 } 1768 } 1769