1 /* 2 BlueZ - Bluetooth protocol stack for Linux 3 Copyright (C) 2000-2001 Qualcomm Incorporated 4 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License version 2 as 9 published by the Free Software Foundation; 10 11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. 14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY 15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES 16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 19 20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, 21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS 22 SOFTWARE IS DISCLAIMED. 23 */ 24 25 /* Bluetooth HCI connection handling. */ 26 27 #include <linux/module.h> 28 29 #include <linux/types.h> 30 #include <linux/errno.h> 31 #include <linux/kernel.h> 32 #include <linux/sched.h> 33 #include <linux/slab.h> 34 #include <linux/poll.h> 35 #include <linux/fcntl.h> 36 #include <linux/init.h> 37 #include <linux/skbuff.h> 38 #include <linux/interrupt.h> 39 #include <linux/notifier.h> 40 #include <net/sock.h> 41 42 #include <asm/system.h> 43 #include <asm/uaccess.h> 44 #include <asm/unaligned.h> 45 46 #include <net/bluetooth/bluetooth.h> 47 #include <net/bluetooth/hci_core.h> 48 49 #ifndef CONFIG_BT_HCI_CORE_DEBUG 50 #undef BT_DBG 51 #define BT_DBG(D...) 52 #endif 53 54 static void hci_acl_connect(struct hci_conn *conn) 55 { 56 struct hci_dev *hdev = conn->hdev; 57 struct inquiry_entry *ie; 58 struct hci_cp_create_conn cp; 59 60 BT_DBG("%p", conn); 61 62 conn->state = BT_CONNECT; 63 conn->out = 1; 64 conn->link_mode = HCI_LM_MASTER; 65 66 memset(&cp, 0, sizeof(cp)); 67 bacpy(&cp.bdaddr, &conn->dst); 68 cp.pscan_rep_mode = 0x02; 69 70 if ((ie = hci_inquiry_cache_lookup(hdev, &conn->dst)) && 71 inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { 72 cp.pscan_rep_mode = ie->data.pscan_rep_mode; 73 cp.pscan_mode = ie->data.pscan_mode; 74 cp.clock_offset = ie->data.clock_offset | __cpu_to_le16(0x8000); 75 memcpy(conn->dev_class, ie->data.dev_class, 3); 76 } 77 78 cp.pkt_type = __cpu_to_le16(hdev->pkt_type & ACL_PTYPE_MASK); 79 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) 80 cp.role_switch = 0x01; 81 else 82 cp.role_switch = 0x00; 83 84 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_CREATE_CONN, sizeof(cp), &cp); 85 } 86 87 void hci_acl_disconn(struct hci_conn *conn, __u8 reason) 88 { 89 struct hci_cp_disconnect cp; 90 91 BT_DBG("%p", conn); 92 93 conn->state = BT_DISCONN; 94 95 cp.handle = __cpu_to_le16(conn->handle); 96 cp.reason = reason; 97 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_DISCONNECT, sizeof(cp), &cp); 98 } 99 100 void hci_add_sco(struct hci_conn *conn, __u16 handle) 101 { 102 struct hci_dev *hdev = conn->hdev; 103 struct hci_cp_add_sco cp; 104 105 BT_DBG("%p", conn); 106 107 conn->state = BT_CONNECT; 108 conn->out = 1; 109 110 cp.pkt_type = __cpu_to_le16(hdev->pkt_type & SCO_PTYPE_MASK); 111 cp.handle = __cpu_to_le16(handle); 112 113 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_ADD_SCO, sizeof(cp), &cp); 114 } 115 116 static void hci_conn_timeout(unsigned long arg) 117 { 118 struct hci_conn *conn = (void *) arg; 119 struct hci_dev *hdev = conn->hdev; 120 121 BT_DBG("conn %p state %d", conn, conn->state); 122 123 if (atomic_read(&conn->refcnt)) 124 return; 125 126 hci_dev_lock(hdev); 127 if (conn->state == BT_CONNECTED) 128 hci_acl_disconn(conn, 0x13); 129 else 130 conn->state = BT_CLOSED; 131 hci_dev_unlock(hdev); 132 return; 133 } 134 135 static void hci_conn_idle(unsigned long arg) 136 { 137 struct hci_conn *conn = (void *) arg; 138 139 BT_DBG("conn %p mode %d", conn, conn->mode); 140 141 hci_conn_enter_sniff_mode(conn); 142 } 143 144 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 145 { 146 struct hci_conn *conn; 147 148 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 149 150 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC); 151 if (!conn) 152 return NULL; 153 154 bacpy(&conn->dst, dst); 155 conn->hdev = hdev; 156 conn->type = type; 157 conn->mode = HCI_CM_ACTIVE; 158 conn->state = BT_OPEN; 159 160 conn->power_save = 1; 161 162 skb_queue_head_init(&conn->data_q); 163 164 init_timer(&conn->disc_timer); 165 conn->disc_timer.function = hci_conn_timeout; 166 conn->disc_timer.data = (unsigned long) conn; 167 168 init_timer(&conn->idle_timer); 169 conn->idle_timer.function = hci_conn_idle; 170 conn->idle_timer.data = (unsigned long) conn; 171 172 atomic_set(&conn->refcnt, 0); 173 174 hci_dev_hold(hdev); 175 176 tasklet_disable(&hdev->tx_task); 177 178 hci_conn_hash_add(hdev, conn); 179 if (hdev->notify) 180 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); 181 182 tasklet_enable(&hdev->tx_task); 183 184 return conn; 185 } 186 187 int hci_conn_del(struct hci_conn *conn) 188 { 189 struct hci_dev *hdev = conn->hdev; 190 191 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle); 192 193 del_timer(&conn->idle_timer); 194 195 del_timer(&conn->disc_timer); 196 197 if (conn->type == SCO_LINK) { 198 struct hci_conn *acl = conn->link; 199 if (acl) { 200 acl->link = NULL; 201 hci_conn_put(acl); 202 } 203 } else { 204 struct hci_conn *sco = conn->link; 205 if (sco) 206 sco->link = NULL; 207 208 /* Unacked frames */ 209 hdev->acl_cnt += conn->sent; 210 } 211 212 tasklet_disable(&hdev->tx_task); 213 214 hci_conn_hash_del(hdev, conn); 215 if (hdev->notify) 216 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); 217 218 tasklet_enable(&hdev->tx_task); 219 220 skb_queue_purge(&conn->data_q); 221 222 hci_dev_put(hdev); 223 224 kfree(conn); 225 return 0; 226 } 227 228 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) 229 { 230 int use_src = bacmp(src, BDADDR_ANY); 231 struct hci_dev *hdev = NULL; 232 struct list_head *p; 233 234 BT_DBG("%s -> %s", batostr(src), batostr(dst)); 235 236 read_lock_bh(&hci_dev_list_lock); 237 238 list_for_each(p, &hci_dev_list) { 239 struct hci_dev *d = list_entry(p, struct hci_dev, list); 240 241 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) 242 continue; 243 244 /* Simple routing: 245 * No source address - find interface with bdaddr != dst 246 * Source address - find interface with bdaddr == src 247 */ 248 249 if (use_src) { 250 if (!bacmp(&d->bdaddr, src)) { 251 hdev = d; break; 252 } 253 } else { 254 if (bacmp(&d->bdaddr, dst)) { 255 hdev = d; break; 256 } 257 } 258 } 259 260 if (hdev) 261 hdev = hci_dev_hold(hdev); 262 263 read_unlock_bh(&hci_dev_list_lock); 264 return hdev; 265 } 266 EXPORT_SYMBOL(hci_get_route); 267 268 /* Create SCO or ACL connection. 269 * Device _must_ be locked */ 270 struct hci_conn * hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst) 271 { 272 struct hci_conn *acl; 273 274 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 275 276 if (!(acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst))) { 277 if (!(acl = hci_conn_add(hdev, ACL_LINK, dst))) 278 return NULL; 279 } 280 281 hci_conn_hold(acl); 282 283 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) 284 hci_acl_connect(acl); 285 286 if (type == SCO_LINK) { 287 struct hci_conn *sco; 288 289 if (!(sco = hci_conn_hash_lookup_ba(hdev, SCO_LINK, dst))) { 290 if (!(sco = hci_conn_add(hdev, SCO_LINK, dst))) { 291 hci_conn_put(acl); 292 return NULL; 293 } 294 } 295 acl->link = sco; 296 sco->link = acl; 297 298 hci_conn_hold(sco); 299 300 if (acl->state == BT_CONNECTED && 301 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) 302 hci_add_sco(sco, acl->handle); 303 304 return sco; 305 } else { 306 return acl; 307 } 308 } 309 EXPORT_SYMBOL(hci_connect); 310 311 /* Authenticate remote device */ 312 int hci_conn_auth(struct hci_conn *conn) 313 { 314 BT_DBG("conn %p", conn); 315 316 if (conn->link_mode & HCI_LM_AUTH) 317 return 1; 318 319 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 320 struct hci_cp_auth_requested cp; 321 cp.handle = __cpu_to_le16(conn->handle); 322 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_AUTH_REQUESTED, sizeof(cp), &cp); 323 } 324 return 0; 325 } 326 EXPORT_SYMBOL(hci_conn_auth); 327 328 /* Enable encryption */ 329 int hci_conn_encrypt(struct hci_conn *conn) 330 { 331 BT_DBG("conn %p", conn); 332 333 if (conn->link_mode & HCI_LM_ENCRYPT) 334 return 1; 335 336 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 337 return 0; 338 339 if (hci_conn_auth(conn)) { 340 struct hci_cp_set_conn_encrypt cp; 341 cp.handle = __cpu_to_le16(conn->handle); 342 cp.encrypt = 1; 343 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); 344 } 345 return 0; 346 } 347 EXPORT_SYMBOL(hci_conn_encrypt); 348 349 /* Change link key */ 350 int hci_conn_change_link_key(struct hci_conn *conn) 351 { 352 BT_DBG("conn %p", conn); 353 354 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) { 355 struct hci_cp_change_conn_link_key cp; 356 cp.handle = __cpu_to_le16(conn->handle); 357 hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_CHANGE_CONN_LINK_KEY, sizeof(cp), &cp); 358 } 359 return 0; 360 } 361 EXPORT_SYMBOL(hci_conn_change_link_key); 362 363 /* Switch role */ 364 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role) 365 { 366 BT_DBG("conn %p", conn); 367 368 if (!role && conn->link_mode & HCI_LM_MASTER) 369 return 1; 370 371 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) { 372 struct hci_cp_switch_role cp; 373 bacpy(&cp.bdaddr, &conn->dst); 374 cp.role = role; 375 hci_send_cmd(conn->hdev, OGF_LINK_POLICY, OCF_SWITCH_ROLE, sizeof(cp), &cp); 376 } 377 return 0; 378 } 379 EXPORT_SYMBOL(hci_conn_switch_role); 380 381 /* Enter active mode */ 382 void hci_conn_enter_active_mode(struct hci_conn *conn) 383 { 384 struct hci_dev *hdev = conn->hdev; 385 386 BT_DBG("conn %p mode %d", conn, conn->mode); 387 388 if (test_bit(HCI_RAW, &hdev->flags)) 389 return; 390 391 if (conn->mode != HCI_CM_SNIFF || !conn->power_save) 392 goto timer; 393 394 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 395 struct hci_cp_exit_sniff_mode cp; 396 cp.handle = __cpu_to_le16(conn->handle); 397 hci_send_cmd(hdev, OGF_LINK_POLICY, 398 OCF_EXIT_SNIFF_MODE, sizeof(cp), &cp); 399 } 400 401 timer: 402 if (hdev->idle_timeout > 0) 403 mod_timer(&conn->idle_timer, 404 jiffies + msecs_to_jiffies(hdev->idle_timeout)); 405 } 406 407 /* Enter sniff mode */ 408 void hci_conn_enter_sniff_mode(struct hci_conn *conn) 409 { 410 struct hci_dev *hdev = conn->hdev; 411 412 BT_DBG("conn %p mode %d", conn, conn->mode); 413 414 if (test_bit(HCI_RAW, &hdev->flags)) 415 return; 416 417 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) 418 return; 419 420 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) 421 return; 422 423 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { 424 struct hci_cp_sniff_subrate cp; 425 cp.handle = __cpu_to_le16(conn->handle); 426 cp.max_latency = __constant_cpu_to_le16(0); 427 cp.min_remote_timeout = __constant_cpu_to_le16(0); 428 cp.min_local_timeout = __constant_cpu_to_le16(0); 429 hci_send_cmd(hdev, OGF_LINK_POLICY, 430 OCF_SNIFF_SUBRATE, sizeof(cp), &cp); 431 } 432 433 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 434 struct hci_cp_sniff_mode cp; 435 cp.handle = __cpu_to_le16(conn->handle); 436 cp.max_interval = __cpu_to_le16(hdev->sniff_max_interval); 437 cp.min_interval = __cpu_to_le16(hdev->sniff_min_interval); 438 cp.attempt = __constant_cpu_to_le16(4); 439 cp.timeout = __constant_cpu_to_le16(1); 440 hci_send_cmd(hdev, OGF_LINK_POLICY, 441 OCF_SNIFF_MODE, sizeof(cp), &cp); 442 } 443 } 444 445 /* Drop all connection on the device */ 446 void hci_conn_hash_flush(struct hci_dev *hdev) 447 { 448 struct hci_conn_hash *h = &hdev->conn_hash; 449 struct list_head *p; 450 451 BT_DBG("hdev %s", hdev->name); 452 453 p = h->list.next; 454 while (p != &h->list) { 455 struct hci_conn *c; 456 457 c = list_entry(p, struct hci_conn, list); 458 p = p->next; 459 460 c->state = BT_CLOSED; 461 462 hci_proto_disconn_ind(c, 0x16); 463 hci_conn_del(c); 464 } 465 } 466 467 int hci_get_conn_list(void __user *arg) 468 { 469 struct hci_conn_list_req req, *cl; 470 struct hci_conn_info *ci; 471 struct hci_dev *hdev; 472 struct list_head *p; 473 int n = 0, size, err; 474 475 if (copy_from_user(&req, arg, sizeof(req))) 476 return -EFAULT; 477 478 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) 479 return -EINVAL; 480 481 size = sizeof(req) + req.conn_num * sizeof(*ci); 482 483 if (!(cl = kmalloc(size, GFP_KERNEL))) 484 return -ENOMEM; 485 486 if (!(hdev = hci_dev_get(req.dev_id))) { 487 kfree(cl); 488 return -ENODEV; 489 } 490 491 ci = cl->conn_info; 492 493 hci_dev_lock_bh(hdev); 494 list_for_each(p, &hdev->conn_hash.list) { 495 register struct hci_conn *c; 496 c = list_entry(p, struct hci_conn, list); 497 498 bacpy(&(ci + n)->bdaddr, &c->dst); 499 (ci + n)->handle = c->handle; 500 (ci + n)->type = c->type; 501 (ci + n)->out = c->out; 502 (ci + n)->state = c->state; 503 (ci + n)->link_mode = c->link_mode; 504 if (++n >= req.conn_num) 505 break; 506 } 507 hci_dev_unlock_bh(hdev); 508 509 cl->dev_id = hdev->id; 510 cl->conn_num = n; 511 size = sizeof(req) + n * sizeof(*ci); 512 513 hci_dev_put(hdev); 514 515 err = copy_to_user(arg, cl, size); 516 kfree(cl); 517 518 return err ? -EFAULT : 0; 519 } 520 521 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) 522 { 523 struct hci_conn_info_req req; 524 struct hci_conn_info ci; 525 struct hci_conn *conn; 526 char __user *ptr = arg + sizeof(req); 527 528 if (copy_from_user(&req, arg, sizeof(req))) 529 return -EFAULT; 530 531 hci_dev_lock_bh(hdev); 532 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); 533 if (conn) { 534 bacpy(&ci.bdaddr, &conn->dst); 535 ci.handle = conn->handle; 536 ci.type = conn->type; 537 ci.out = conn->out; 538 ci.state = conn->state; 539 ci.link_mode = conn->link_mode; 540 } 541 hci_dev_unlock_bh(hdev); 542 543 if (!conn) 544 return -ENOENT; 545 546 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; 547 } 548