1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI connection handling. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/errno.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/poll.h> 34 #include <linux/fcntl.h> 35 #include <linux/init.h> 36 #include <linux/skbuff.h> 37 #include <linux/interrupt.h> 38 #include <linux/notifier.h> 39 #include <net/sock.h> 40 41 #include <asm/system.h> 42 #include <asm/uaccess.h> 43 #include <asm/unaligned.h> 44 45 #include <net/bluetooth/bluetooth.h> 46 #include <net/bluetooth/hci_core.h> 47 48 void hci_acl_connect(struct hci_conn *conn) 49 { 50 struct hci_dev *hdev = conn->hdev; 51 struct inquiry_entry *ie; 52 struct hci_cp_create_conn cp; 53 54 BT_DBG("%p", conn); 55 56 conn->state = BT_CONNECT; 57 conn->out = 1; 58 59 conn->link_mode = HCI_LM_MASTER; 60 61 conn->attempt++; 62 63 conn->link_policy = hdev->link_policy; 64 65 memset(&cp, 0, sizeof(cp)); 66 bacpy(&cp.bdaddr, &conn->dst); 67 cp.pscan_rep_mode = 0x02; 68 69 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst))) { 70 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { 71 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 72 cp.pscan_mode = ie->data.pscan_mode; 73 cp.clock_offset = ie->data.clock_offset | 74 cpu_to_le16(0x8000); 75 } 76 77 memcpy(conn->dev_class, ie->data.dev_class, 3); 78 conn->ssp_mode = ie->data.ssp_mode; 79 } 80 81 cp.pkt_type = cpu_to_le16(conn->pkt_type); 82 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) 83 cp.role_switch = 0x01; 84 else 85 cp.role_switch = 0x00; 86 87 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); 88 } 89 90 static void hci_acl_connect_cancel(struct hci_conn *conn) 91 { 92 struct hci_cp_create_conn_cancel cp; 93 94 BT_DBG("%p", conn); 95 96 if (conn->hdev->hci_ver < 2) 97 return; 98 99 bacpy(&cp.bdaddr, &conn->dst); 100 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp); 101 } 102 103 void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 104 { 105 struct hci_cp_disconnect cp; 106 107 BT_DBG("%p", conn); 108 109 conn->state = BT_DISCONN; 110 111 cp.handle = cpu_to_le16(conn->handle); 112 cp.reason = reason; 113 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp); 114 } 115 116 void hci_add_sco(struct hci_conn *conn, __u16 handle) 117 { 118 struct hci_dev *hdev = conn->hdev; 119 struct hci_cp_add_sco cp; 120 121 BT_DBG("%p", conn); 122 123 conn->state = BT_CONNECT; 124 conn->out = 1; 125 126 conn->attempt++; 127 128 cp.handle = cpu_to_le16(handle); 129 cp.pkt_type = cpu_to_le16(conn->pkt_type); 130 131 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); 132 } 133 134 void hci_setup_sync(struct hci_conn *conn, __u16 handle) 135 { 136 struct hci_dev *hdev = conn->hdev; 137 struct hci_cp_setup_sync_conn cp; 138 139 BT_DBG("%p", conn); 140 141 conn->state = BT_CONNECT; 142 conn->out = 1; 143 144 conn->attempt++; 145 146 cp.handle = cpu_to_le16(handle); 147 cp.pkt_type = cpu_to_le16(conn->pkt_type); 148 149 cp.tx_bandwidth = cpu_to_le32(0x00001f40); 150 cp.rx_bandwidth = cpu_to_le32(0x00001f40); 151 cp.max_latency = cpu_to_le16(0xffff); 152 cp.voice_setting = cpu_to_le16(hdev->voice_setting); 153 cp.retrans_effort = 0xff; 154 155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 156 } 157 158 static void hci_conn_timeout(unsigned long arg) 159 { 160 struct hci_conn *conn = (void *) arg; 161 struct hci_dev *hdev = conn->hdev; 162 __u8 reason; 163 164 BT_DBG("conn %p state %d", conn, conn->state); 165 166 if (atomic_read(&conn->refcnt)) 167 return; 168 169 hci_dev_lock(hdev); 170 171 switch (conn->state) { 172 case BT_CONNECT: 173 case BT_CONNECT2: 174 if (conn->type == ACL_LINK && conn->out) 175 hci_acl_connect_cancel(conn); 176 break; 177 case BT_CONFIG: 178 case BT_CONNECTED: 179 reason = hci_proto_disconn_ind(conn); 180 hci_acl_disconn(conn, reason); 181 break; 182 default: 183 conn->state = BT_CLOSED; 184 break; 185 } 186 187 hci_dev_unlock(hdev); 188 } 189 190 static void hci_conn_idle(unsigned long arg) 191 { 192 struct hci_conn *conn = (void *) arg; 193 194 BT_DBG("conn %p mode %d", conn, conn->mode); 195 196 hci_conn_enter_sniff_mode(conn); 197 } 198 199 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 200 { 201 struct hci_conn *conn; 202 203 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 204 205 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); 206 if (!conn) 207 return NULL; 208 209 bacpy(&conn->dst, dst); 210 conn->hdev = hdev; 211 conn->type = type; 212 conn->mode = HCI_CM_ACTIVE; 213 conn->state = BT_OPEN; 214 215 conn->power_save = 1; 216 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 217 218 switch (type) { 219 case ACL_LINK: 220 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; 221 break; 222 case SCO_LINK: 223 if (lmp_esco_capable(hdev)) 224 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | 225 (hdev->esco_type & EDR_ESCO_MASK); 226 else 227 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; 228 break; 229 case ESCO_LINK: 230 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; 231 break; 232 } 233 234 skb_queue_head_init(&conn->data_q); 235 236 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 237 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 238 239 atomic_set(&conn->refcnt, 0); 240 241 hci_dev_hold(hdev); 242 243 tasklet_disable(&hdev->tx_task); 244 245 hci_conn_hash_add(hdev, conn); 246 if (hdev->notify) 247 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 248 249 atomic_set(&conn->devref, 0); 250 251 hci_conn_init_sysfs(conn); 252 253 tasklet_enable(&hdev->tx_task); 254 255 return conn; 256 } 257 258 int hci_conn_del(struct hci_conn *conn) 259 { 260 struct hci_dev *hdev = conn->hdev; 261 262 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); 263 264 del_timer(&conn->idle_timer); 265 266 del_timer(&conn->disc_timer); 267 268 if (conn->type == ACL_LINK) { 269 struct hci_conn *sco = conn->link; 270 if (sco) 271 sco->link = NULL; 272 273 /* Unacked frames */ 274 hdev->acl_cnt += conn->sent; 275 } else { 276 struct hci_conn *acl = conn->link; 277 if (acl) { 278 acl->link = NULL; 279 hci_conn_put(acl); 280 } 281 } 282 283 tasklet_disable(&hdev->tx_task); 284 285 hci_conn_hash_del(hdev, conn); 286 if (hdev->notify) 287 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 288 289 tasklet_enable(&hdev->tx_task); 290 291 skb_queue_purge(&conn->data_q); 292 293 hci_conn_put_device(conn); 294 295 hci_dev_put(hdev); 296 297 return 0; 298 } 299 300 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 301 { 302 int use_src = bacmp(src, BDADDR_ANY); 303 struct hci_dev *hdev = NULL; 304 struct list_head *p; 305 306 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 307 308 read_lock_bh(&hci_dev_list_lock); 309 310 list_for_each(p, &hci_dev_list) { 311 struct hci_dev *d = list_entry(p, struct hci_dev, list); 312 313 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 314 continue; 315 316 /* Simple routing: 317 * No source address - find interface with bdaddr != dst 318 * Source address - find interface with bdaddr == src 319 */ 320 321 if (use_src) { 322 if (!bacmp(&d->bdaddr, src)) { 323 hdev = d; break; 324 } 325 } else { 326 if (bacmp(&d->bdaddr, dst)) { 327 hdev = d; break; 328 } 329 } 330 } 331 332 if (hdev) 333 hdev = hci_dev_hold(hdev); 334 335 read_unlock_bh(&hci_dev_list_lock); 336 return hdev; 337 } 338 EXPORT_SYMBOL(hci_get_route); 339 340 /* Create SCO or ACL connection. 341 * Device _must_ be locked */ 342 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type) 343 { 344 struct hci_conn *acl; 345 struct hci_conn *sco; 346 347 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 348 349 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) { 350 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst))) 351 return NULL; 352 } 353 354 hci_conn_hold(acl); 355 356 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { 357 acl->sec_level = sec_level; 358 acl->auth_type = auth_type; 359 hci_acl_connect(acl); 360 } 361 362 if (type == ACL_LINK) 363 return acl; 364 365 if (!(sco = hci_conn_hash_lookup_ba(hdev, type, dst))) { 366 if (!(sco = hci_conn_add(hdev, type, dst))) { 367 hci_conn_put(acl); 368 return NULL; 369 } 370 } 371 372 acl->link = sco; 373 sco->link = acl; 374 375 hci_conn_hold(sco); 376 377 if (acl->state == BT_CONNECTED && 378 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 379 if (lmp_esco_capable(hdev)) 380 hci_setup_sync(sco, acl->handle); 381 else 382 hci_add_sco(sco, acl->handle); 383 } 384 385 return sco; 386 } 387 EXPORT_SYMBOL(hci_connect); 388 389 /* Check link security requirement */ 390 int hci_conn_check_link_mode(struct hci_conn *conn) 391 { 392 BT_DBG("conn %p", conn); 393 394 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 && 395 !(conn->link_mode & HCI_LM_ENCRYPT)) 396 return 0; 397 398 return 1; 399 } 400 EXPORT_SYMBOL(hci_conn_check_link_mode); 401 402 /* Authenticate remote device */ 403 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 404 { 405 BT_DBG("conn %p", conn); 406 407 if (sec_level > conn->sec_level) 408 conn->sec_level = sec_level; 409 else if (conn->link_mode & HCI_LM_AUTH) 410 return 1; 411 412 conn->auth_type = auth_type; 413 414 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 415 struct hci_cp_auth_requested cp; 416 cp.handle = cpu_to_le16(conn->handle); 417 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 418 sizeof(cp), &cp); 419 } 420 421 return 0; 422 } 423 424 /* Enable security */ 425 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 426 { 427 BT_DBG("conn %p", conn); 428 429 if (sec_level == BT_SECURITY_SDP) 430 return 1; 431 432 if (sec_level == BT_SECURITY_LOW && 433 (!conn->ssp_mode || !conn->hdev->ssp_mode)) 434 return 1; 435 436 if (conn->link_mode & HCI_LM_ENCRYPT) 437 return hci_conn_auth(conn, sec_level, auth_type); 438 439 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 440 return 0; 441 442 if (hci_conn_auth(conn, sec_level, auth_type)) { 443 struct hci_cp_set_conn_encrypt cp; 444 cp.handle = cpu_to_le16(conn->handle); 445 cp.encrypt = 1; 446 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, 447 sizeof(cp), &cp); 448 } 449 450 return 0; 451 } 452 EXPORT_SYMBOL(hci_conn_security); 453 454 /* Change link key */ 455 int hci_conn_change_link_key(struct hci_conn *conn) 456 { 457 BT_DBG("conn %p", conn); 458 459 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 460 struct hci_cp_change_conn_link_key cp; 461 cp.handle = cpu_to_le16(conn->handle); 462 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, 463 sizeof(cp), &cp); 464 } 465 466 return 0; 467 } 468 EXPORT_SYMBOL(hci_conn_change_link_key); 469 470 /* Switch role */ 471 int hci_conn_switch_role(struct hci_conn *conn, __u8 role) 472 { 473 BT_DBG("conn %p", conn); 474 475 if (!role && conn->link_mode & HCI_LM_MASTER) 476 return 1; 477 478 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) { 479 struct hci_cp_switch_role cp; 480 bacpy(&cp.bdaddr, &conn->dst); 481 cp.role = role; 482 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); 483 } 484 485 return 0; 486 } 487 EXPORT_SYMBOL(hci_conn_switch_role); 488 489 /* Enter active mode */ 490 void hci_conn_enter_active_mode(struct hci_conn *conn) 491 { 492 struct hci_dev *hdev = conn->hdev; 493 494 BT_DBG("conn %p mode %d", conn, conn->mode); 495 496 if (test_bit(HCI_RAW, &hdev->flags)) 497 return; 498 499 if (conn->mode != HCI_CM_SNIFF || !conn->power_save) 500 goto timer; 501 502 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 503 struct hci_cp_exit_sniff_mode cp; 504 cp.handle = cpu_to_le16(conn->handle); 505 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); 506 } 507 508 timer: 509 if (hdev->idle_timeout > 0) 510 mod_timer(&conn->idle_timer, 511 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 512 } 513 514 /* Enter sniff mode */ 515 void hci_conn_enter_sniff_mode(struct hci_conn *conn) 516 { 517 struct hci_dev *hdev = conn->hdev; 518 519 BT_DBG("conn %p mode %d", conn, conn->mode); 520 521 if (test_bit(HCI_RAW, &hdev->flags)) 522 return; 523 524 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 525 return; 526 527 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) 528 return; 529 530 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 531 struct hci_cp_sniff_subrate cp; 532 cp.handle = cpu_to_le16(conn->handle); 533 cp.max_latency = cpu_to_le16(0); 534 cp.min_remote_timeout = cpu_to_le16(0); 535 cp.min_local_timeout = cpu_to_le16(0); 536 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); 537 } 538 539 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 540 struct hci_cp_sniff_mode cp; 541 cp.handle = cpu_to_le16(conn->handle); 542 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); 543 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); 544 cp.attempt = cpu_to_le16(4); 545 cp.timeout = cpu_to_le16(1); 546 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); 547 } 548 } 549 550 /* Drop all connection on the device */ 551 void hci_conn_hash_flush(struct hci_dev *hdev) 552 { 553 struct hci_conn_hash *h = &hdev->conn_hash; 554 struct list_head *p; 555 556 BT_DBG("hdev %s", hdev->name); 557 558 p = h->list.next; 559 while (p != &h->list) { 560 struct hci_conn *c; 561 562 c = list_entry(p, struct hci_conn, list); 563 p = p->next; 564 565 c->state = BT_CLOSED; 566 567 hci_proto_disconn_cfm(c, 0x16); 568 hci_conn_del(c); 569 } 570 } 571 572 /* Check pending connect attempts */ 573 void hci_conn_check_pending(struct hci_dev *hdev) 574 { 575 struct hci_conn *conn; 576 577 BT_DBG("hdev %s", hdev->name); 578 579 hci_dev_lock(hdev); 580 581 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); 582 if (conn) 583 hci_acl_connect(conn); 584 585 hci_dev_unlock(hdev); 586 } 587 588 void hci_conn_hold_device(struct hci_conn *conn) 589 { 590 atomic_inc(&conn->devref); 591 } 592 EXPORT_SYMBOL(hci_conn_hold_device); 593 594 void hci_conn_put_device(struct hci_conn *conn) 595 { 596 if (atomic_dec_and_test(&conn->devref)) 597 hci_conn_del_sysfs(conn); 598 } 599 EXPORT_SYMBOL(hci_conn_put_device); 600 601 int hci_get_conn_list(void __user *arg) 602 { 603 struct hci_conn_list_req req, *cl; 604 struct hci_conn_info *ci; 605 struct hci_dev *hdev; 606 struct list_head *p; 607 int n = 0, size, err; 608 609 if (copy_from_user(&req, arg, sizeof(req))) 610 return -EFAULT; 611 612 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) 613 return -EINVAL; 614 615 size = sizeof(req) + req.conn_num * sizeof(*ci); 616 617 if (!(cl = kmalloc(size, GFP_KERNEL))) 618 return -ENOMEM; 619 620 if (!(hdev = hci_dev_get(req.dev_id))) { 621 kfree(cl); 622 return -ENODEV; 623 } 624 625 ci = cl->conn_info; 626 627 hci_dev_lock_bh(hdev); 628 list_for_each(p, &hdev->conn_hash.list) { 629 register struct hci_conn *c; 630 c = list_entry(p, struct hci_conn, list); 631 632 bacpy(&(ci + n)->bdaddr, &c->dst); 633 (ci + n)->handle = c->handle; 634 (ci + n)->type = c->type; 635 (ci + n)->out = c->out; 636 (ci + n)->state = c->state; 637 (ci + n)->link_mode = c->link_mode; 638 if (++n >= req.conn_num) 639 break; 640 } 641 hci_dev_unlock_bh(hdev); 642 643 cl->dev_id = hdev->id; 644 cl->conn_num = n; 645 size = sizeof(req) + n * sizeof(*ci); 646 647 hci_dev_put(hdev); 648 649 err = copy_to_user(arg, cl, size); 650 kfree(cl); 651 652 return err ? -EFAULT : 0; 653 } 654 655 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) 656 { 657 struct hci_conn_info_req req; 658 struct hci_conn_info ci; 659 struct hci_conn *conn; 660 char __user *ptr = arg + sizeof(req); 661 662 if (copy_from_user(&req, arg, sizeof(req))) 663 return -EFAULT; 664 665 hci_dev_lock_bh(hdev); 666 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); 667 if (conn) { 668 bacpy(&ci.bdaddr, &conn->dst); 669 ci.handle = conn->handle; 670 ci.type = conn->type; 671 ci.out = conn->out; 672 ci.state = conn->state; 673 ci.link_mode = conn->link_mode; 674 } 675 hci_dev_unlock_bh(hdev); 676 677 if (!conn) 678 return -ENOENT; 679 680 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; 681 } 682 683 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) 684 { 685 struct hci_auth_info_req req; 686 struct hci_conn *conn; 687 688 if (copy_from_user(&req, arg, sizeof(req))) 689 return -EFAULT; 690 691 hci_dev_lock_bh(hdev); 692 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); 693 if (conn) 694 req.type = conn->auth_type; 695 hci_dev_unlock_bh(hdev); 696 697 if (!conn) 698 return -ENOENT; 699 700 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; 701 } 702